2 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 * Copyright (C) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
29 #include "bcmsysport.h"
31 /* I/O accessors register helpers */
32 #define BCM_SYSPORT_IO_MACRO(name, offset) \
33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
35 u32 reg = __raw_readl(priv->base + offset + off); \
38 static inline void name##_writel(struct bcm_sysport_priv *priv, \
41 __raw_writel(val, priv->base + offset + off); \
44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45 BCM_SYSPORT_IO_MACRO(intrl2_1
, SYS_PORT_INTRL2_1_OFFSET
);
46 BCM_SYSPORT_IO_MACRO(umac
, SYS_PORT_UMAC_OFFSET
);
47 BCM_SYSPORT_IO_MACRO(gib
, SYS_PORT_GIB_OFFSET
);
48 BCM_SYSPORT_IO_MACRO(tdma
, SYS_PORT_TDMA_OFFSET
);
49 BCM_SYSPORT_IO_MACRO(rxchk
, SYS_PORT_RXCHK_OFFSET
);
50 BCM_SYSPORT_IO_MACRO(txchk
, SYS_PORT_TXCHK_OFFSET
);
51 BCM_SYSPORT_IO_MACRO(rbuf
, SYS_PORT_RBUF_OFFSET
);
52 BCM_SYSPORT_IO_MACRO(tbuf
, SYS_PORT_TBUF_OFFSET
);
53 BCM_SYSPORT_IO_MACRO(topctrl
, SYS_PORT_TOPCTRL_OFFSET
);
55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
56 * same layout, except it has been moved by 4 bytes up, *sigh*
58 static inline u32
rdma_readl(struct bcm_sysport_priv
*priv
, u32 off
)
60 if (priv
->is_lite
&& off
>= RDMA_STATUS
)
62 return __raw_readl(priv
->base
+ SYS_PORT_RDMA_OFFSET
+ off
);
65 static inline void rdma_writel(struct bcm_sysport_priv
*priv
, u32 val
, u32 off
)
67 if (priv
->is_lite
&& off
>= RDMA_STATUS
)
69 __raw_writel(val
, priv
->base
+ SYS_PORT_RDMA_OFFSET
+ off
);
72 static inline u32
tdma_control_bit(struct bcm_sysport_priv
*priv
, u32 bit
)
84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
85 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
87 #define BCM_SYSPORT_INTR_L2(which) \
88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
91 priv->irq##which##_mask &= ~(mask); \
92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
98 priv->irq##which##_mask |= (mask); \
101 BCM_SYSPORT_INTR_L2(0)
102 BCM_SYSPORT_INTR_L2(1)
104 /* Register accesses to GISB/RBUS registers are expensive (few hundred
105 * nanoseconds), so keep the check for 64-bits explicit here to save
106 * one register write per-packet on 32-bits platforms.
108 static inline void dma_desc_set_addr(struct bcm_sysport_priv
*priv
,
112 #ifdef CONFIG_PHYS_ADDR_T_64BIT
113 __raw_writel(upper_32_bits(addr
) & DESC_ADDR_HI_MASK
,
114 d
+ DESC_ADDR_HI_STATUS_LEN
);
116 __raw_writel(lower_32_bits(addr
), d
+ DESC_ADDR_LO
);
119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv
*priv
,
120 struct dma_desc
*desc
,
123 /* Ports are latched, so write upper address first */
124 tdma_writel(priv
, desc
->addr_status_len
, TDMA_WRITE_PORT_HI(port
));
125 tdma_writel(priv
, desc
->addr_lo
, TDMA_WRITE_PORT_LO(port
));
128 /* Ethtool operations */
129 static int bcm_sysport_set_rx_csum(struct net_device
*dev
,
130 netdev_features_t wanted
)
132 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
135 priv
->rx_chk_en
= !!(wanted
& NETIF_F_RXCSUM
);
136 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
142 /* If UniMAC forwards CRC, we need to skip over it to get
143 * a valid CHK bit to be set in the per-packet status word
145 if (priv
->rx_chk_en
&& priv
->crc_fwd
)
146 reg
|= RXCHK_SKIP_FCS
;
148 reg
&= ~RXCHK_SKIP_FCS
;
150 /* If Broadcom tags are enabled (e.g: using a switch), make
151 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
152 * tag after the Ethernet MAC Source Address.
154 if (netdev_uses_dsa(dev
))
155 reg
|= RXCHK_BRCM_TAG_EN
;
157 reg
&= ~RXCHK_BRCM_TAG_EN
;
159 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
164 static int bcm_sysport_set_tx_csum(struct net_device
*dev
,
165 netdev_features_t wanted
)
167 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
170 /* Hardware transmit checksum requires us to enable the Transmit status
171 * block prepended to the packet contents
173 priv
->tsb_en
= !!(wanted
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
174 reg
= tdma_readl(priv
, TDMA_CONTROL
);
176 reg
|= tdma_control_bit(priv
, TSB_EN
);
178 reg
&= ~tdma_control_bit(priv
, TSB_EN
);
179 tdma_writel(priv
, reg
, TDMA_CONTROL
);
184 static int bcm_sysport_set_features(struct net_device
*dev
,
185 netdev_features_t features
)
187 netdev_features_t changed
= features
^ dev
->features
;
188 netdev_features_t wanted
= dev
->wanted_features
;
191 if (changed
& NETIF_F_RXCSUM
)
192 ret
= bcm_sysport_set_rx_csum(dev
, wanted
);
193 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
194 ret
= bcm_sysport_set_tx_csum(dev
, wanted
);
199 /* Hardware counters must be kept in sync because the order/offset
200 * is important here (order in structure declaration = order in hardware)
202 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats
[] = {
204 STAT_NETDEV64(rx_packets
),
205 STAT_NETDEV64(tx_packets
),
206 STAT_NETDEV64(rx_bytes
),
207 STAT_NETDEV64(tx_bytes
),
208 STAT_NETDEV(rx_errors
),
209 STAT_NETDEV(tx_errors
),
210 STAT_NETDEV(rx_dropped
),
211 STAT_NETDEV(tx_dropped
),
212 STAT_NETDEV(multicast
),
213 /* UniMAC RSV counters */
214 STAT_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
215 STAT_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
216 STAT_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
217 STAT_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
218 STAT_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
219 STAT_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
220 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
221 STAT_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
222 STAT_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
223 STAT_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
224 STAT_MIB_RX("rx_pkts", mib
.rx
.pkt
),
225 STAT_MIB_RX("rx_bytes", mib
.rx
.bytes
),
226 STAT_MIB_RX("rx_multicast", mib
.rx
.mca
),
227 STAT_MIB_RX("rx_broadcast", mib
.rx
.bca
),
228 STAT_MIB_RX("rx_fcs", mib
.rx
.fcs
),
229 STAT_MIB_RX("rx_control", mib
.rx
.cf
),
230 STAT_MIB_RX("rx_pause", mib
.rx
.pf
),
231 STAT_MIB_RX("rx_unknown", mib
.rx
.uo
),
232 STAT_MIB_RX("rx_align", mib
.rx
.aln
),
233 STAT_MIB_RX("rx_outrange", mib
.rx
.flr
),
234 STAT_MIB_RX("rx_code", mib
.rx
.cde
),
235 STAT_MIB_RX("rx_carrier", mib
.rx
.fcr
),
236 STAT_MIB_RX("rx_oversize", mib
.rx
.ovr
),
237 STAT_MIB_RX("rx_jabber", mib
.rx
.jbr
),
238 STAT_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
239 STAT_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
240 STAT_MIB_RX("rx_unicast", mib
.rx
.uc
),
241 STAT_MIB_RX("rx_ppp", mib
.rx
.ppp
),
242 STAT_MIB_RX("rx_crc", mib
.rx
.rcrc
),
243 /* UniMAC TSV counters */
244 STAT_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
245 STAT_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
246 STAT_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
247 STAT_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
248 STAT_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
249 STAT_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
250 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
251 STAT_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
252 STAT_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
253 STAT_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
254 STAT_MIB_TX("tx_pkts", mib
.tx
.pkts
),
255 STAT_MIB_TX("tx_multicast", mib
.tx
.mca
),
256 STAT_MIB_TX("tx_broadcast", mib
.tx
.bca
),
257 STAT_MIB_TX("tx_pause", mib
.tx
.pf
),
258 STAT_MIB_TX("tx_control", mib
.tx
.cf
),
259 STAT_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
260 STAT_MIB_TX("tx_oversize", mib
.tx
.ovr
),
261 STAT_MIB_TX("tx_defer", mib
.tx
.drf
),
262 STAT_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
263 STAT_MIB_TX("tx_single_col", mib
.tx
.scl
),
264 STAT_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
265 STAT_MIB_TX("tx_late_col", mib
.tx
.lcl
),
266 STAT_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
267 STAT_MIB_TX("tx_frags", mib
.tx
.frg
),
268 STAT_MIB_TX("tx_total_col", mib
.tx
.ncl
),
269 STAT_MIB_TX("tx_jabber", mib
.tx
.jbr
),
270 STAT_MIB_TX("tx_bytes", mib
.tx
.bytes
),
271 STAT_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
272 STAT_MIB_TX("tx_unicast", mib
.tx
.uc
),
273 /* UniMAC RUNT counters */
274 STAT_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
275 STAT_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
276 STAT_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
277 STAT_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
278 /* RXCHK misc statistics */
279 STAT_RXCHK("rxchk_bad_csum", mib
.rxchk_bad_csum
, RXCHK_BAD_CSUM_CNTR
),
280 STAT_RXCHK("rxchk_other_pkt_disc", mib
.rxchk_other_pkt_disc
,
281 RXCHK_OTHER_DISC_CNTR
),
282 /* RBUF misc statistics */
283 STAT_RBUF("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
, RBUF_OVFL_DISC_CNTR
),
284 STAT_RBUF("rbuf_err_cnt", mib
.rbuf_err_cnt
, RBUF_ERR_PKT_CNTR
),
285 STAT_MIB_SOFT("alloc_rx_buff_failed", mib
.alloc_rx_buff_failed
),
286 STAT_MIB_SOFT("rx_dma_failed", mib
.rx_dma_failed
),
287 STAT_MIB_SOFT("tx_dma_failed", mib
.tx_dma_failed
),
288 /* Per TX-queue statistics are dynamically appended */
291 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
293 static void bcm_sysport_get_drvinfo(struct net_device
*dev
,
294 struct ethtool_drvinfo
*info
)
296 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
297 strlcpy(info
->version
, "0.1", sizeof(info
->version
));
298 strlcpy(info
->bus_info
, "platform", sizeof(info
->bus_info
));
301 static u32
bcm_sysport_get_msglvl(struct net_device
*dev
)
303 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
305 return priv
->msg_enable
;
308 static void bcm_sysport_set_msglvl(struct net_device
*dev
, u32 enable
)
310 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
312 priv
->msg_enable
= enable
;
315 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type
)
318 case BCM_SYSPORT_STAT_NETDEV
:
319 case BCM_SYSPORT_STAT_NETDEV64
:
320 case BCM_SYSPORT_STAT_RXCHK
:
321 case BCM_SYSPORT_STAT_RBUF
:
322 case BCM_SYSPORT_STAT_SOFT
:
329 static int bcm_sysport_get_sset_count(struct net_device
*dev
, int string_set
)
331 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
332 const struct bcm_sysport_stats
*s
;
335 switch (string_set
) {
337 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
338 s
= &bcm_sysport_gstrings_stats
[i
];
340 !bcm_sysport_lite_stat_valid(s
->type
))
344 /* Include per-queue statistics */
345 return j
+ dev
->num_tx_queues
* NUM_SYSPORT_TXQ_STAT
;
351 static void bcm_sysport_get_strings(struct net_device
*dev
,
352 u32 stringset
, u8
*data
)
354 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
355 const struct bcm_sysport_stats
*s
;
361 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
362 s
= &bcm_sysport_gstrings_stats
[i
];
364 !bcm_sysport_lite_stat_valid(s
->type
))
367 memcpy(data
+ j
* ETH_GSTRING_LEN
, s
->stat_string
,
372 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
373 snprintf(buf
, sizeof(buf
), "txq%d_packets", i
);
374 memcpy(data
+ j
* ETH_GSTRING_LEN
, buf
,
378 snprintf(buf
, sizeof(buf
), "txq%d_bytes", i
);
379 memcpy(data
+ j
* ETH_GSTRING_LEN
, buf
,
389 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv
*priv
)
393 for (i
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
394 const struct bcm_sysport_stats
*s
;
399 s
= &bcm_sysport_gstrings_stats
[i
];
401 case BCM_SYSPORT_STAT_NETDEV
:
402 case BCM_SYSPORT_STAT_NETDEV64
:
403 case BCM_SYSPORT_STAT_SOFT
:
405 case BCM_SYSPORT_STAT_MIB_RX
:
406 case BCM_SYSPORT_STAT_MIB_TX
:
407 case BCM_SYSPORT_STAT_RUNT
:
411 if (s
->type
!= BCM_SYSPORT_STAT_MIB_RX
)
412 offset
= UMAC_MIB_STAT_OFFSET
;
413 val
= umac_readl(priv
, UMAC_MIB_START
+ j
+ offset
);
415 case BCM_SYSPORT_STAT_RXCHK
:
416 val
= rxchk_readl(priv
, s
->reg_offset
);
418 rxchk_writel(priv
, 0, s
->reg_offset
);
420 case BCM_SYSPORT_STAT_RBUF
:
421 val
= rbuf_readl(priv
, s
->reg_offset
);
423 rbuf_writel(priv
, 0, s
->reg_offset
);
428 p
= (char *)priv
+ s
->stat_offset
;
432 netif_dbg(priv
, hw
, priv
->netdev
, "updated MIB counters\n");
435 static void bcm_sysport_get_stats(struct net_device
*dev
,
436 struct ethtool_stats
*stats
, u64
*data
)
438 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
439 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
440 struct u64_stats_sync
*syncp
= &priv
->syncp
;
441 struct bcm_sysport_tx_ring
*ring
;
445 if (netif_running(dev
))
446 bcm_sysport_update_mib_counters(priv
);
448 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
449 const struct bcm_sysport_stats
*s
;
452 s
= &bcm_sysport_gstrings_stats
[i
];
453 if (s
->type
== BCM_SYSPORT_STAT_NETDEV
)
454 p
= (char *)&dev
->stats
;
455 else if (s
->type
== BCM_SYSPORT_STAT_NETDEV64
)
460 if (priv
->is_lite
&& !bcm_sysport_lite_stat_valid(s
->type
))
464 if (s
->stat_sizeof
== sizeof(u64
))
466 start
= u64_stats_fetch_begin_irq(syncp
);
468 } while (u64_stats_fetch_retry_irq(syncp
, start
));
474 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
475 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
476 * needs to point to how many total statistics we have minus the
477 * number of per TX queue statistics
479 j
= bcm_sysport_get_sset_count(dev
, ETH_SS_STATS
) -
480 dev
->num_tx_queues
* NUM_SYSPORT_TXQ_STAT
;
482 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
483 ring
= &priv
->tx_rings
[i
];
484 data
[j
] = ring
->packets
;
486 data
[j
] = ring
->bytes
;
491 static void bcm_sysport_get_wol(struct net_device
*dev
,
492 struct ethtool_wolinfo
*wol
)
494 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
497 wol
->supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
;
498 wol
->wolopts
= priv
->wolopts
;
500 if (!(priv
->wolopts
& WAKE_MAGICSECURE
))
503 /* Return the programmed SecureOn password */
504 reg
= umac_readl(priv
, UMAC_PSW_MS
);
505 put_unaligned_be16(reg
, &wol
->sopass
[0]);
506 reg
= umac_readl(priv
, UMAC_PSW_LS
);
507 put_unaligned_be32(reg
, &wol
->sopass
[2]);
510 static int bcm_sysport_set_wol(struct net_device
*dev
,
511 struct ethtool_wolinfo
*wol
)
513 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
514 struct device
*kdev
= &priv
->pdev
->dev
;
515 u32 supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
;
517 if (!device_can_wakeup(kdev
))
520 if (wol
->wolopts
& ~supported
)
523 /* Program the SecureOn password */
524 if (wol
->wolopts
& WAKE_MAGICSECURE
) {
525 umac_writel(priv
, get_unaligned_be16(&wol
->sopass
[0]),
527 umac_writel(priv
, get_unaligned_be32(&wol
->sopass
[2]),
531 /* Flag the device and relevant IRQ as wakeup capable */
533 device_set_wakeup_enable(kdev
, 1);
534 if (priv
->wol_irq_disabled
)
535 enable_irq_wake(priv
->wol_irq
);
536 priv
->wol_irq_disabled
= 0;
538 device_set_wakeup_enable(kdev
, 0);
539 /* Avoid unbalanced disable_irq_wake calls */
540 if (!priv
->wol_irq_disabled
)
541 disable_irq_wake(priv
->wol_irq
);
542 priv
->wol_irq_disabled
= 1;
545 priv
->wolopts
= wol
->wolopts
;
550 static int bcm_sysport_get_coalesce(struct net_device
*dev
,
551 struct ethtool_coalesce
*ec
)
553 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
556 reg
= tdma_readl(priv
, TDMA_DESC_RING_INTR_CONTROL(0));
558 ec
->tx_coalesce_usecs
= (reg
>> RING_TIMEOUT_SHIFT
) * 8192 / 1000;
559 ec
->tx_max_coalesced_frames
= reg
& RING_INTR_THRESH_MASK
;
561 reg
= rdma_readl(priv
, RDMA_MBDONE_INTR
);
563 ec
->rx_coalesce_usecs
= (reg
>> RDMA_TIMEOUT_SHIFT
) * 8192 / 1000;
564 ec
->rx_max_coalesced_frames
= reg
& RDMA_INTR_THRESH_MASK
;
569 static int bcm_sysport_set_coalesce(struct net_device
*dev
,
570 struct ethtool_coalesce
*ec
)
572 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
576 /* Base system clock is 125Mhz, DMA timeout is this reference clock
577 * divided by 1024, which yield roughly 8.192 us, our maximum value has
578 * to fit in the RING_TIMEOUT_MASK (16 bits).
580 if (ec
->tx_max_coalesced_frames
> RING_INTR_THRESH_MASK
||
581 ec
->tx_coalesce_usecs
> (RING_TIMEOUT_MASK
* 8) + 1 ||
582 ec
->rx_max_coalesced_frames
> RDMA_INTR_THRESH_MASK
||
583 ec
->rx_coalesce_usecs
> (RDMA_TIMEOUT_MASK
* 8) + 1)
586 if ((ec
->tx_coalesce_usecs
== 0 && ec
->tx_max_coalesced_frames
== 0) ||
587 (ec
->rx_coalesce_usecs
== 0 && ec
->rx_max_coalesced_frames
== 0))
590 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
591 reg
= tdma_readl(priv
, TDMA_DESC_RING_INTR_CONTROL(i
));
592 reg
&= ~(RING_INTR_THRESH_MASK
|
593 RING_TIMEOUT_MASK
<< RING_TIMEOUT_SHIFT
);
594 reg
|= ec
->tx_max_coalesced_frames
;
595 reg
|= DIV_ROUND_UP(ec
->tx_coalesce_usecs
* 1000, 8192) <<
597 tdma_writel(priv
, reg
, TDMA_DESC_RING_INTR_CONTROL(i
));
600 reg
= rdma_readl(priv
, RDMA_MBDONE_INTR
);
601 reg
&= ~(RDMA_INTR_THRESH_MASK
|
602 RDMA_TIMEOUT_MASK
<< RDMA_TIMEOUT_SHIFT
);
603 reg
|= ec
->rx_max_coalesced_frames
;
604 reg
|= DIV_ROUND_UP(ec
->rx_coalesce_usecs
* 1000, 8192) <<
606 rdma_writel(priv
, reg
, RDMA_MBDONE_INTR
);
611 static void bcm_sysport_free_cb(struct bcm_sysport_cb
*cb
)
613 dev_kfree_skb_any(cb
->skb
);
615 dma_unmap_addr_set(cb
, dma_addr
, 0);
618 static struct sk_buff
*bcm_sysport_rx_refill(struct bcm_sysport_priv
*priv
,
619 struct bcm_sysport_cb
*cb
)
621 struct device
*kdev
= &priv
->pdev
->dev
;
622 struct net_device
*ndev
= priv
->netdev
;
623 struct sk_buff
*skb
, *rx_skb
;
626 /* Allocate a new SKB for a new packet */
627 skb
= netdev_alloc_skb(priv
->netdev
, RX_BUF_LENGTH
);
629 priv
->mib
.alloc_rx_buff_failed
++;
630 netif_err(priv
, rx_err
, ndev
, "SKB alloc failed\n");
634 mapping
= dma_map_single(kdev
, skb
->data
,
635 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
636 if (dma_mapping_error(kdev
, mapping
)) {
637 priv
->mib
.rx_dma_failed
++;
638 dev_kfree_skb_any(skb
);
639 netif_err(priv
, rx_err
, ndev
, "DMA mapping failure\n");
643 /* Grab the current SKB on the ring */
646 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
647 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
649 /* Put the new SKB on the ring */
651 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
652 dma_desc_set_addr(priv
, cb
->bd_addr
, mapping
);
654 netif_dbg(priv
, rx_status
, ndev
, "RX refill\n");
656 /* Return the current SKB to the caller */
660 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv
*priv
)
662 struct bcm_sysport_cb
*cb
;
666 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
667 cb
= &priv
->rx_cbs
[i
];
668 skb
= bcm_sysport_rx_refill(priv
, cb
);
678 /* Poll the hardware for up to budget packets to process */
679 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv
*priv
,
682 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
683 struct net_device
*ndev
= priv
->netdev
;
684 unsigned int processed
= 0, to_process
;
685 struct bcm_sysport_cb
*cb
;
687 unsigned int p_index
;
691 /* Clear status before servicing to reduce spurious interrupts */
692 intrl2_0_writel(priv
, INTRL2_0_RDMA_MBDONE
, INTRL2_CPU_CLEAR
);
694 /* Determine how much we should process since last call, SYSTEMPORT Lite
695 * groups the producer and consumer indexes into the same 32-bit
696 * which we access using RDMA_CONS_INDEX
699 p_index
= rdma_readl(priv
, RDMA_PROD_INDEX
);
701 p_index
= rdma_readl(priv
, RDMA_CONS_INDEX
);
702 p_index
&= RDMA_PROD_INDEX_MASK
;
704 to_process
= (p_index
- priv
->rx_c_index
) & RDMA_CONS_INDEX_MASK
;
706 netif_dbg(priv
, rx_status
, ndev
,
707 "p_index=%d rx_c_index=%d to_process=%d\n",
708 p_index
, priv
->rx_c_index
, to_process
);
710 while ((processed
< to_process
) && (processed
< budget
)) {
711 cb
= &priv
->rx_cbs
[priv
->rx_read_ptr
];
712 skb
= bcm_sysport_rx_refill(priv
, cb
);
715 /* We do not have a backing SKB, so we do not a corresponding
716 * DMA mapping for this incoming packet since
717 * bcm_sysport_rx_refill always either has both skb and mapping
720 if (unlikely(!skb
)) {
721 netif_err(priv
, rx_err
, ndev
, "out of memory!\n");
722 ndev
->stats
.rx_dropped
++;
723 ndev
->stats
.rx_errors
++;
727 /* Extract the Receive Status Block prepended */
728 rsb
= (struct bcm_rsb
*)skb
->data
;
729 len
= (rsb
->rx_status_len
>> DESC_LEN_SHIFT
) & DESC_LEN_MASK
;
730 status
= (rsb
->rx_status_len
>> DESC_STATUS_SHIFT
) &
733 netif_dbg(priv
, rx_status
, ndev
,
734 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
735 p_index
, priv
->rx_c_index
, priv
->rx_read_ptr
,
738 if (unlikely(len
> RX_BUF_LENGTH
)) {
739 netif_err(priv
, rx_status
, ndev
, "oversized packet\n");
740 ndev
->stats
.rx_length_errors
++;
741 ndev
->stats
.rx_errors
++;
742 dev_kfree_skb_any(skb
);
746 if (unlikely(!(status
& DESC_EOP
) || !(status
& DESC_SOP
))) {
747 netif_err(priv
, rx_status
, ndev
, "fragmented packet!\n");
748 ndev
->stats
.rx_dropped
++;
749 ndev
->stats
.rx_errors
++;
750 dev_kfree_skb_any(skb
);
754 if (unlikely(status
& (RX_STATUS_ERR
| RX_STATUS_OVFLOW
))) {
755 netif_err(priv
, rx_err
, ndev
, "error packet\n");
756 if (status
& RX_STATUS_OVFLOW
)
757 ndev
->stats
.rx_over_errors
++;
758 ndev
->stats
.rx_dropped
++;
759 ndev
->stats
.rx_errors
++;
760 dev_kfree_skb_any(skb
);
766 /* Hardware validated our checksum */
767 if (likely(status
& DESC_L4_CSUM
))
768 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
770 /* Hardware pre-pends packets with 2bytes before Ethernet
771 * header plus we have the Receive Status Block, strip off all
772 * of this from the SKB.
774 skb_pull(skb
, sizeof(*rsb
) + 2);
775 len
-= (sizeof(*rsb
) + 2);
777 /* UniMAC may forward CRC */
779 skb_trim(skb
, len
- ETH_FCS_LEN
);
783 skb
->protocol
= eth_type_trans(skb
, ndev
);
784 ndev
->stats
.rx_packets
++;
785 ndev
->stats
.rx_bytes
+= len
;
786 u64_stats_update_begin(&priv
->syncp
);
787 stats64
->rx_packets
++;
788 stats64
->rx_bytes
+= len
;
789 u64_stats_update_end(&priv
->syncp
);
791 napi_gro_receive(&priv
->napi
, skb
);
796 if (priv
->rx_read_ptr
== priv
->num_rx_bds
)
797 priv
->rx_read_ptr
= 0;
803 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring
*ring
,
804 struct bcm_sysport_cb
*cb
,
805 unsigned int *bytes_compl
,
806 unsigned int *pkts_compl
)
808 struct bcm_sysport_priv
*priv
= ring
->priv
;
809 struct device
*kdev
= &priv
->pdev
->dev
;
812 *bytes_compl
+= cb
->skb
->len
;
813 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
814 dma_unmap_len(cb
, dma_len
),
817 bcm_sysport_free_cb(cb
);
819 } else if (dma_unmap_addr(cb
, dma_addr
)) {
820 *bytes_compl
+= dma_unmap_len(cb
, dma_len
);
821 dma_unmap_page(kdev
, dma_unmap_addr(cb
, dma_addr
),
822 dma_unmap_len(cb
, dma_len
), DMA_TO_DEVICE
);
823 dma_unmap_addr_set(cb
, dma_addr
, 0);
827 /* Reclaim queued SKBs for transmission completion, lockless version */
828 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
829 struct bcm_sysport_tx_ring
*ring
)
831 unsigned int c_index
, last_c_index
, last_tx_cn
, num_tx_cbs
;
832 unsigned int pkts_compl
= 0, bytes_compl
= 0;
833 struct net_device
*ndev
= priv
->netdev
;
834 struct bcm_sysport_cb
*cb
;
837 /* Clear status before servicing to reduce spurious interrupts */
838 if (!ring
->priv
->is_lite
)
839 intrl2_1_writel(ring
->priv
, BIT(ring
->index
), INTRL2_CPU_CLEAR
);
841 intrl2_0_writel(ring
->priv
, BIT(ring
->index
+
842 INTRL2_0_TDMA_MBDONE_SHIFT
), INTRL2_CPU_CLEAR
);
844 /* Compute how many descriptors have been processed since last call */
845 hw_ind
= tdma_readl(priv
, TDMA_DESC_RING_PROD_CONS_INDEX(ring
->index
));
846 c_index
= (hw_ind
>> RING_CONS_INDEX_SHIFT
) & RING_CONS_INDEX_MASK
;
847 ring
->p_index
= (hw_ind
& RING_PROD_INDEX_MASK
);
849 last_c_index
= ring
->c_index
;
850 num_tx_cbs
= ring
->size
;
852 c_index
&= (num_tx_cbs
- 1);
854 if (c_index
>= last_c_index
)
855 last_tx_cn
= c_index
- last_c_index
;
857 last_tx_cn
= num_tx_cbs
- last_c_index
+ c_index
;
859 netif_dbg(priv
, tx_done
, ndev
,
860 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
861 ring
->index
, c_index
, last_tx_cn
, last_c_index
);
863 while (last_tx_cn
-- > 0) {
864 cb
= ring
->cbs
+ last_c_index
;
865 bcm_sysport_tx_reclaim_one(ring
, cb
, &bytes_compl
, &pkts_compl
);
869 last_c_index
&= (num_tx_cbs
- 1);
872 u64_stats_update_begin(&priv
->syncp
);
873 ring
->packets
+= pkts_compl
;
874 ring
->bytes
+= bytes_compl
;
875 u64_stats_update_end(&priv
->syncp
);
877 ring
->c_index
= c_index
;
879 netif_dbg(priv
, tx_done
, ndev
,
880 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
881 ring
->index
, ring
->c_index
, pkts_compl
, bytes_compl
);
886 /* Locked version of the per-ring TX reclaim routine */
887 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
888 struct bcm_sysport_tx_ring
*ring
)
890 struct netdev_queue
*txq
;
891 unsigned int released
;
894 txq
= netdev_get_tx_queue(priv
->netdev
, ring
->index
);
896 spin_lock_irqsave(&ring
->lock
, flags
);
897 released
= __bcm_sysport_tx_reclaim(priv
, ring
);
899 netif_tx_wake_queue(txq
);
901 spin_unlock_irqrestore(&ring
->lock
, flags
);
906 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
907 static void bcm_sysport_tx_clean(struct bcm_sysport_priv
*priv
,
908 struct bcm_sysport_tx_ring
*ring
)
912 spin_lock_irqsave(&ring
->lock
, flags
);
913 __bcm_sysport_tx_reclaim(priv
, ring
);
914 spin_unlock_irqrestore(&ring
->lock
, flags
);
917 static int bcm_sysport_tx_poll(struct napi_struct
*napi
, int budget
)
919 struct bcm_sysport_tx_ring
*ring
=
920 container_of(napi
, struct bcm_sysport_tx_ring
, napi
);
921 unsigned int work_done
= 0;
923 work_done
= bcm_sysport_tx_reclaim(ring
->priv
, ring
);
925 if (work_done
== 0) {
927 /* re-enable TX interrupt */
928 if (!ring
->priv
->is_lite
)
929 intrl2_1_mask_clear(ring
->priv
, BIT(ring
->index
));
931 intrl2_0_mask_clear(ring
->priv
, BIT(ring
->index
+
932 INTRL2_0_TDMA_MBDONE_SHIFT
));
940 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv
*priv
)
944 for (q
= 0; q
< priv
->netdev
->num_tx_queues
; q
++)
945 bcm_sysport_tx_reclaim(priv
, &priv
->tx_rings
[q
]);
948 static int bcm_sysport_poll(struct napi_struct
*napi
, int budget
)
950 struct bcm_sysport_priv
*priv
=
951 container_of(napi
, struct bcm_sysport_priv
, napi
);
952 unsigned int work_done
= 0;
954 work_done
= bcm_sysport_desc_rx(priv
, budget
);
956 priv
->rx_c_index
+= work_done
;
957 priv
->rx_c_index
&= RDMA_CONS_INDEX_MASK
;
959 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
960 * maintained by HW, but writes to it will be ignore while RDMA
964 rdma_writel(priv
, priv
->rx_c_index
, RDMA_CONS_INDEX
);
966 rdma_writel(priv
, priv
->rx_c_index
<< 16, RDMA_CONS_INDEX
);
968 if (work_done
< budget
) {
969 napi_complete_done(napi
, work_done
);
970 /* re-enable RX interrupts */
971 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
);
977 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv
*priv
)
981 /* Stop monitoring MPD interrupt */
982 intrl2_0_mask_set(priv
, INTRL2_0_MPD
);
984 /* Clear the MagicPacket detection logic */
985 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
987 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
989 netif_dbg(priv
, wol
, priv
->netdev
, "resumed from WOL\n");
992 /* RX and misc interrupt routine */
993 static irqreturn_t
bcm_sysport_rx_isr(int irq
, void *dev_id
)
995 struct net_device
*dev
= dev_id
;
996 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
997 struct bcm_sysport_tx_ring
*txr
;
998 unsigned int ring
, ring_bit
;
1000 priv
->irq0_stat
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
) &
1001 ~intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
1002 intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
1004 if (unlikely(priv
->irq0_stat
== 0)) {
1005 netdev_warn(priv
->netdev
, "spurious RX interrupt\n");
1009 if (priv
->irq0_stat
& INTRL2_0_RDMA_MBDONE
) {
1010 if (likely(napi_schedule_prep(&priv
->napi
))) {
1011 /* disable RX interrupts */
1012 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
);
1013 __napi_schedule_irqoff(&priv
->napi
);
1017 /* TX ring is full, perform a full reclaim since we do not know
1018 * which one would trigger this interrupt
1020 if (priv
->irq0_stat
& INTRL2_0_TX_RING_FULL
)
1021 bcm_sysport_tx_reclaim_all(priv
);
1023 if (priv
->irq0_stat
& INTRL2_0_MPD
) {
1024 netdev_info(priv
->netdev
, "Wake-on-LAN interrupt!\n");
1025 bcm_sysport_resume_from_wol(priv
);
1031 for (ring
= 0; ring
< dev
->num_tx_queues
; ring
++) {
1032 ring_bit
= BIT(ring
+ INTRL2_0_TDMA_MBDONE_SHIFT
);
1033 if (!(priv
->irq0_stat
& ring_bit
))
1036 txr
= &priv
->tx_rings
[ring
];
1038 if (likely(napi_schedule_prep(&txr
->napi
))) {
1039 intrl2_0_mask_set(priv
, ring_bit
);
1040 __napi_schedule(&txr
->napi
);
1047 /* TX interrupt service routine */
1048 static irqreturn_t
bcm_sysport_tx_isr(int irq
, void *dev_id
)
1050 struct net_device
*dev
= dev_id
;
1051 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1052 struct bcm_sysport_tx_ring
*txr
;
1055 priv
->irq1_stat
= intrl2_1_readl(priv
, INTRL2_CPU_STATUS
) &
1056 ~intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
1057 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1059 if (unlikely(priv
->irq1_stat
== 0)) {
1060 netdev_warn(priv
->netdev
, "spurious TX interrupt\n");
1064 for (ring
= 0; ring
< dev
->num_tx_queues
; ring
++) {
1065 if (!(priv
->irq1_stat
& BIT(ring
)))
1068 txr
= &priv
->tx_rings
[ring
];
1070 if (likely(napi_schedule_prep(&txr
->napi
))) {
1071 intrl2_1_mask_set(priv
, BIT(ring
));
1072 __napi_schedule_irqoff(&txr
->napi
);
1079 static irqreturn_t
bcm_sysport_wol_isr(int irq
, void *dev_id
)
1081 struct bcm_sysport_priv
*priv
= dev_id
;
1083 pm_wakeup_event(&priv
->pdev
->dev
, 0);
1088 #ifdef CONFIG_NET_POLL_CONTROLLER
1089 static void bcm_sysport_poll_controller(struct net_device
*dev
)
1091 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1093 disable_irq(priv
->irq0
);
1094 bcm_sysport_rx_isr(priv
->irq0
, priv
);
1095 enable_irq(priv
->irq0
);
1097 if (!priv
->is_lite
) {
1098 disable_irq(priv
->irq1
);
1099 bcm_sysport_tx_isr(priv
->irq1
, priv
);
1100 enable_irq(priv
->irq1
);
1105 static struct sk_buff
*bcm_sysport_insert_tsb(struct sk_buff
*skb
,
1106 struct net_device
*dev
)
1108 struct sk_buff
*nskb
;
1109 struct bcm_tsb
*tsb
;
1115 /* Re-allocate SKB if needed */
1116 if (unlikely(skb_headroom(skb
) < sizeof(*tsb
))) {
1117 nskb
= skb_realloc_headroom(skb
, sizeof(*tsb
));
1120 dev
->stats
.tx_errors
++;
1121 dev
->stats
.tx_dropped
++;
1127 tsb
= skb_push(skb
, sizeof(*tsb
));
1128 /* Zero-out TSB by default */
1129 memset(tsb
, 0, sizeof(*tsb
));
1131 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1132 ip_ver
= htons(skb
->protocol
);
1135 ip_proto
= ip_hdr(skb
)->protocol
;
1138 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
1144 /* Get the checksum offset and the L4 (transport) offset */
1145 csum_start
= skb_checksum_start_offset(skb
) - sizeof(*tsb
);
1146 csum_info
= (csum_start
+ skb
->csum_offset
) & L4_CSUM_PTR_MASK
;
1147 csum_info
|= (csum_start
<< L4_PTR_SHIFT
);
1149 if (ip_proto
== IPPROTO_TCP
|| ip_proto
== IPPROTO_UDP
) {
1150 csum_info
|= L4_LENGTH_VALID
;
1151 if (ip_proto
== IPPROTO_UDP
&& ip_ver
== ETH_P_IP
)
1152 csum_info
|= L4_UDP
;
1157 tsb
->l4_ptr_dest_map
= csum_info
;
1163 static netdev_tx_t
bcm_sysport_xmit(struct sk_buff
*skb
,
1164 struct net_device
*dev
)
1166 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1167 struct device
*kdev
= &priv
->pdev
->dev
;
1168 struct bcm_sysport_tx_ring
*ring
;
1169 struct bcm_sysport_cb
*cb
;
1170 struct netdev_queue
*txq
;
1171 struct dma_desc
*desc
;
1172 unsigned int skb_len
;
1173 unsigned long flags
;
1179 queue
= skb_get_queue_mapping(skb
);
1180 txq
= netdev_get_tx_queue(dev
, queue
);
1181 ring
= &priv
->tx_rings
[queue
];
1183 /* lock against tx reclaim in BH context and TX ring full interrupt */
1184 spin_lock_irqsave(&ring
->lock
, flags
);
1185 if (unlikely(ring
->desc_count
== 0)) {
1186 netif_tx_stop_queue(txq
);
1187 netdev_err(dev
, "queue %d awake and ring full!\n", queue
);
1188 ret
= NETDEV_TX_BUSY
;
1192 /* The Ethernet switch we are interfaced with needs packets to be at
1193 * least 64 bytes (including FCS) otherwise they will be discarded when
1194 * they enter the switch port logic. When Broadcom tags are enabled, we
1195 * need to make sure that packets are at least 68 bytes
1196 * (including FCS and tag) because the length verification is done after
1197 * the Broadcom tag is stripped off the ingress packet.
1199 if (skb_put_padto(skb
, ETH_ZLEN
+ ENET_BRCM_TAG_LEN
)) {
1204 /* Insert TSB and checksum infos */
1206 skb
= bcm_sysport_insert_tsb(skb
, dev
);
1215 mapping
= dma_map_single(kdev
, skb
->data
, skb_len
, DMA_TO_DEVICE
);
1216 if (dma_mapping_error(kdev
, mapping
)) {
1217 priv
->mib
.tx_dma_failed
++;
1218 netif_err(priv
, tx_err
, dev
, "DMA map failed at %p (len=%d)\n",
1219 skb
->data
, skb_len
);
1224 /* Remember the SKB for future freeing */
1225 cb
= &ring
->cbs
[ring
->curr_desc
];
1227 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
1228 dma_unmap_len_set(cb
, dma_len
, skb_len
);
1230 /* Fetch a descriptor entry from our pool */
1231 desc
= ring
->desc_cpu
;
1233 desc
->addr_lo
= lower_32_bits(mapping
);
1234 len_status
= upper_32_bits(mapping
) & DESC_ADDR_HI_MASK
;
1235 len_status
|= (skb_len
<< DESC_LEN_SHIFT
);
1236 len_status
|= (DESC_SOP
| DESC_EOP
| TX_STATUS_APP_CRC
) <<
1238 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1239 len_status
|= (DESC_L4_CSUM
<< DESC_STATUS_SHIFT
);
1242 if (ring
->curr_desc
== ring
->size
)
1243 ring
->curr_desc
= 0;
1246 /* Ensure write completion of the descriptor status/length
1247 * in DRAM before the System Port WRITE_PORT register latches
1251 desc
->addr_status_len
= len_status
;
1254 /* Write this descriptor address to the RING write port */
1255 tdma_port_write_desc_addr(priv
, desc
, ring
->index
);
1257 /* Check ring space and update SW control flow */
1258 if (ring
->desc_count
== 0)
1259 netif_tx_stop_queue(txq
);
1261 netif_dbg(priv
, tx_queued
, dev
, "ring=%d desc_count=%d, curr_desc=%d\n",
1262 ring
->index
, ring
->desc_count
, ring
->curr_desc
);
1266 spin_unlock_irqrestore(&ring
->lock
, flags
);
1270 static void bcm_sysport_tx_timeout(struct net_device
*dev
)
1272 netdev_warn(dev
, "transmit timeout!\n");
1274 netif_trans_update(dev
);
1275 dev
->stats
.tx_errors
++;
1277 netif_tx_wake_all_queues(dev
);
1280 /* phylib adjust link callback */
1281 static void bcm_sysport_adj_link(struct net_device
*dev
)
1283 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1284 struct phy_device
*phydev
= dev
->phydev
;
1285 unsigned int changed
= 0;
1286 u32 cmd_bits
= 0, reg
;
1288 if (priv
->old_link
!= phydev
->link
) {
1290 priv
->old_link
= phydev
->link
;
1293 if (priv
->old_duplex
!= phydev
->duplex
) {
1295 priv
->old_duplex
= phydev
->duplex
;
1301 switch (phydev
->speed
) {
1303 cmd_bits
= CMD_SPEED_2500
;
1306 cmd_bits
= CMD_SPEED_1000
;
1309 cmd_bits
= CMD_SPEED_100
;
1312 cmd_bits
= CMD_SPEED_10
;
1317 cmd_bits
<<= CMD_SPEED_SHIFT
;
1319 if (phydev
->duplex
== DUPLEX_HALF
)
1320 cmd_bits
|= CMD_HD_EN
;
1322 if (priv
->old_pause
!= phydev
->pause
) {
1324 priv
->old_pause
= phydev
->pause
;
1328 cmd_bits
|= CMD_RX_PAUSE_IGNORE
| CMD_TX_PAUSE_IGNORE
;
1334 reg
= umac_readl(priv
, UMAC_CMD
);
1335 reg
&= ~((CMD_SPEED_MASK
<< CMD_SPEED_SHIFT
) |
1336 CMD_HD_EN
| CMD_RX_PAUSE_IGNORE
|
1337 CMD_TX_PAUSE_IGNORE
);
1339 umac_writel(priv
, reg
, UMAC_CMD
);
1343 phy_print_status(phydev
);
1346 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv
*priv
,
1349 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1350 struct device
*kdev
= &priv
->pdev
->dev
;
1355 /* Simple descriptors partitioning for now */
1358 /* We just need one DMA descriptor which is DMA-able, since writing to
1359 * the port will allocate a new descriptor in its internal linked-list
1361 p
= dma_zalloc_coherent(kdev
, sizeof(struct dma_desc
), &ring
->desc_dma
,
1364 netif_err(priv
, hw
, priv
->netdev
, "DMA alloc failed\n");
1368 ring
->cbs
= kcalloc(size
, sizeof(struct bcm_sysport_cb
), GFP_KERNEL
);
1370 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1374 /* Initialize SW view of the ring */
1375 spin_lock_init(&ring
->lock
);
1377 netif_tx_napi_add(priv
->netdev
, &ring
->napi
, bcm_sysport_tx_poll
, 64);
1378 ring
->index
= index
;
1380 ring
->alloc_size
= ring
->size
;
1382 ring
->desc_count
= ring
->size
;
1383 ring
->curr_desc
= 0;
1385 /* Initialize HW ring */
1386 tdma_writel(priv
, RING_EN
, TDMA_DESC_RING_HEAD_TAIL_PTR(index
));
1387 tdma_writel(priv
, 0, TDMA_DESC_RING_COUNT(index
));
1388 tdma_writel(priv
, 1, TDMA_DESC_RING_INTR_CONTROL(index
));
1389 tdma_writel(priv
, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index
));
1390 tdma_writel(priv
, RING_IGNORE_STATUS
, TDMA_DESC_RING_MAPPING(index
));
1391 tdma_writel(priv
, 0, TDMA_DESC_RING_PCP_DEI_VID(index
));
1393 /* Program the number of descriptors as MAX_THRESHOLD and half of
1394 * its size for the hysteresis trigger
1396 tdma_writel(priv
, ring
->size
|
1397 1 << RING_HYST_THRESH_SHIFT
,
1398 TDMA_DESC_RING_MAX_HYST(index
));
1400 /* Enable the ring queue in the arbiter */
1401 reg
= tdma_readl(priv
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1402 reg
|= (1 << index
);
1403 tdma_writel(priv
, reg
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1405 napi_enable(&ring
->napi
);
1407 netif_dbg(priv
, hw
, priv
->netdev
,
1408 "TDMA cfg, size=%d, desc_cpu=%p\n",
1409 ring
->size
, ring
->desc_cpu
);
1414 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv
*priv
,
1417 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1418 struct device
*kdev
= &priv
->pdev
->dev
;
1421 /* Caller should stop the TDMA engine */
1422 reg
= tdma_readl(priv
, TDMA_STATUS
);
1423 if (!(reg
& TDMA_DISABLED
))
1424 netdev_warn(priv
->netdev
, "TDMA not stopped!\n");
1426 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1427 * fail, so by checking this pointer we know whether the TX ring was
1428 * fully initialized or not.
1433 napi_disable(&ring
->napi
);
1434 netif_napi_del(&ring
->napi
);
1436 bcm_sysport_tx_clean(priv
, ring
);
1441 if (ring
->desc_dma
) {
1442 dma_free_coherent(kdev
, sizeof(struct dma_desc
),
1443 ring
->desc_cpu
, ring
->desc_dma
);
1447 ring
->alloc_size
= 0;
1449 netif_dbg(priv
, hw
, priv
->netdev
, "TDMA fini done\n");
1453 static inline int rdma_enable_set(struct bcm_sysport_priv
*priv
,
1454 unsigned int enable
)
1456 unsigned int timeout
= 1000;
1459 reg
= rdma_readl(priv
, RDMA_CONTROL
);
1464 rdma_writel(priv
, reg
, RDMA_CONTROL
);
1466 /* Poll for RMDA disabling completion */
1468 reg
= rdma_readl(priv
, RDMA_STATUS
);
1469 if (!!(reg
& RDMA_DISABLED
) == !enable
)
1471 usleep_range(1000, 2000);
1472 } while (timeout
-- > 0);
1474 netdev_err(priv
->netdev
, "timeout waiting for RDMA to finish\n");
1480 static inline int tdma_enable_set(struct bcm_sysport_priv
*priv
,
1481 unsigned int enable
)
1483 unsigned int timeout
= 1000;
1486 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1488 reg
|= tdma_control_bit(priv
, TDMA_EN
);
1490 reg
&= ~tdma_control_bit(priv
, TDMA_EN
);
1491 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1493 /* Poll for TMDA disabling completion */
1495 reg
= tdma_readl(priv
, TDMA_STATUS
);
1496 if (!!(reg
& TDMA_DISABLED
) == !enable
)
1499 usleep_range(1000, 2000);
1500 } while (timeout
-- > 0);
1502 netdev_err(priv
->netdev
, "timeout waiting for TDMA to finish\n");
1507 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv
*priv
)
1509 struct bcm_sysport_cb
*cb
;
1514 /* Initialize SW view of the RX ring */
1515 priv
->num_rx_bds
= priv
->num_rx_desc_words
/ WORDS_PER_DESC
;
1516 priv
->rx_bds
= priv
->base
+ SYS_PORT_RDMA_OFFSET
;
1517 priv
->rx_c_index
= 0;
1518 priv
->rx_read_ptr
= 0;
1519 priv
->rx_cbs
= kcalloc(priv
->num_rx_bds
, sizeof(struct bcm_sysport_cb
),
1521 if (!priv
->rx_cbs
) {
1522 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1526 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1527 cb
= priv
->rx_cbs
+ i
;
1528 cb
->bd_addr
= priv
->rx_bds
+ i
* DESC_SIZE
;
1531 ret
= bcm_sysport_alloc_rx_bufs(priv
);
1533 netif_err(priv
, hw
, priv
->netdev
, "SKB allocation failed\n");
1537 /* Initialize HW, ensure RDMA is disabled */
1538 reg
= rdma_readl(priv
, RDMA_STATUS
);
1539 if (!(reg
& RDMA_DISABLED
))
1540 rdma_enable_set(priv
, 0);
1542 rdma_writel(priv
, 0, RDMA_WRITE_PTR_LO
);
1543 rdma_writel(priv
, 0, RDMA_WRITE_PTR_HI
);
1544 rdma_writel(priv
, 0, RDMA_PROD_INDEX
);
1545 rdma_writel(priv
, 0, RDMA_CONS_INDEX
);
1546 rdma_writel(priv
, priv
->num_rx_bds
<< RDMA_RING_SIZE_SHIFT
|
1547 RX_BUF_LENGTH
, RDMA_RING_BUF_SIZE
);
1548 /* Operate the queue in ring mode */
1549 rdma_writel(priv
, 0, RDMA_START_ADDR_HI
);
1550 rdma_writel(priv
, 0, RDMA_START_ADDR_LO
);
1551 rdma_writel(priv
, 0, RDMA_END_ADDR_HI
);
1552 rdma_writel(priv
, priv
->num_rx_desc_words
- 1, RDMA_END_ADDR_LO
);
1554 rdma_writel(priv
, 1, RDMA_MBDONE_INTR
);
1556 netif_dbg(priv
, hw
, priv
->netdev
,
1557 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1558 priv
->num_rx_bds
, priv
->rx_bds
);
1563 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv
*priv
)
1565 struct bcm_sysport_cb
*cb
;
1569 /* Caller should ensure RDMA is disabled */
1570 reg
= rdma_readl(priv
, RDMA_STATUS
);
1571 if (!(reg
& RDMA_DISABLED
))
1572 netdev_warn(priv
->netdev
, "RDMA not stopped!\n");
1574 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1575 cb
= &priv
->rx_cbs
[i
];
1576 if (dma_unmap_addr(cb
, dma_addr
))
1577 dma_unmap_single(&priv
->pdev
->dev
,
1578 dma_unmap_addr(cb
, dma_addr
),
1579 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
1580 bcm_sysport_free_cb(cb
);
1583 kfree(priv
->rx_cbs
);
1584 priv
->rx_cbs
= NULL
;
1586 netif_dbg(priv
, hw
, priv
->netdev
, "RDMA fini done\n");
1589 static void bcm_sysport_set_rx_mode(struct net_device
*dev
)
1591 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1597 reg
= umac_readl(priv
, UMAC_CMD
);
1598 if (dev
->flags
& IFF_PROMISC
)
1601 reg
&= ~CMD_PROMISC
;
1602 umac_writel(priv
, reg
, UMAC_CMD
);
1604 /* No support for ALLMULTI */
1605 if (dev
->flags
& IFF_ALLMULTI
)
1609 static inline void umac_enable_set(struct bcm_sysport_priv
*priv
,
1610 u32 mask
, unsigned int enable
)
1614 if (!priv
->is_lite
) {
1615 reg
= umac_readl(priv
, UMAC_CMD
);
1620 umac_writel(priv
, reg
, UMAC_CMD
);
1622 reg
= gib_readl(priv
, GIB_CONTROL
);
1627 gib_writel(priv
, reg
, GIB_CONTROL
);
1630 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1631 * to be processed (1 msec).
1634 usleep_range(1000, 2000);
1637 static inline void umac_reset(struct bcm_sysport_priv
*priv
)
1644 reg
= umac_readl(priv
, UMAC_CMD
);
1645 reg
|= CMD_SW_RESET
;
1646 umac_writel(priv
, reg
, UMAC_CMD
);
1648 reg
= umac_readl(priv
, UMAC_CMD
);
1649 reg
&= ~CMD_SW_RESET
;
1650 umac_writel(priv
, reg
, UMAC_CMD
);
1653 static void umac_set_hw_addr(struct bcm_sysport_priv
*priv
,
1654 unsigned char *addr
)
1656 u32 mac0
= (addr
[0] << 24) | (addr
[1] << 16) | (addr
[2] << 8) |
1658 u32 mac1
= (addr
[4] << 8) | addr
[5];
1660 if (!priv
->is_lite
) {
1661 umac_writel(priv
, mac0
, UMAC_MAC0
);
1662 umac_writel(priv
, mac1
, UMAC_MAC1
);
1664 gib_writel(priv
, mac0
, GIB_MAC0
);
1665 gib_writel(priv
, mac1
, GIB_MAC1
);
1669 static void topctrl_flush(struct bcm_sysport_priv
*priv
)
1671 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
1672 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
1674 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
1675 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
1678 static int bcm_sysport_change_mac(struct net_device
*dev
, void *p
)
1680 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1681 struct sockaddr
*addr
= p
;
1683 if (!is_valid_ether_addr(addr
->sa_data
))
1686 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1688 /* interface is disabled, changes to MAC will be reflected on next
1691 if (!netif_running(dev
))
1694 umac_set_hw_addr(priv
, dev
->dev_addr
);
1699 static void bcm_sysport_get_stats64(struct net_device
*dev
,
1700 struct rtnl_link_stats64
*stats
)
1702 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1703 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
1704 struct bcm_sysport_tx_ring
*ring
;
1705 u64 tx_packets
= 0, tx_bytes
= 0;
1709 netdev_stats_to_stats64(stats
, &dev
->stats
);
1711 for (q
= 0; q
< dev
->num_tx_queues
; q
++) {
1712 ring
= &priv
->tx_rings
[q
];
1714 start
= u64_stats_fetch_begin_irq(&priv
->syncp
);
1715 tx_bytes
= ring
->bytes
;
1716 tx_packets
= ring
->packets
;
1717 } while (u64_stats_fetch_retry_irq(&priv
->syncp
, start
));
1719 stats
->tx_bytes
+= tx_bytes
;
1720 stats
->tx_packets
+= tx_packets
;
1723 /* lockless update tx_bytes and tx_packets */
1724 u64_stats_update_begin(&priv
->syncp
);
1725 stats64
->tx_bytes
= stats
->tx_bytes
;
1726 stats64
->tx_packets
= stats
->tx_packets
;
1727 u64_stats_update_end(&priv
->syncp
);
1730 start
= u64_stats_fetch_begin_irq(&priv
->syncp
);
1731 stats
->rx_packets
= stats64
->rx_packets
;
1732 stats
->rx_bytes
= stats64
->rx_bytes
;
1733 } while (u64_stats_fetch_retry_irq(&priv
->syncp
, start
));
1736 static void bcm_sysport_netif_start(struct net_device
*dev
)
1738 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1741 napi_enable(&priv
->napi
);
1743 /* Enable RX interrupt and TX ring full interrupt */
1744 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
1746 phy_start(dev
->phydev
);
1748 /* Enable TX interrupts for the TXQs */
1750 intrl2_1_mask_clear(priv
, 0xffffffff);
1752 intrl2_0_mask_clear(priv
, INTRL2_0_TDMA_MBDONE_MASK
);
1754 /* Last call before we start the real business */
1755 netif_tx_start_all_queues(dev
);
1758 static void rbuf_init(struct bcm_sysport_priv
*priv
)
1762 reg
= rbuf_readl(priv
, RBUF_CONTROL
);
1763 reg
|= RBUF_4B_ALGN
| RBUF_RSB_EN
;
1764 /* Set a correct RSB format on SYSTEMPORT Lite */
1765 if (priv
->is_lite
) {
1766 reg
&= ~RBUF_RSB_SWAP1
;
1767 reg
|= RBUF_RSB_SWAP0
;
1769 rbuf_writel(priv
, reg
, RBUF_CONTROL
);
1772 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv
*priv
)
1774 intrl2_0_mask_set(priv
, 0xffffffff);
1775 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1776 if (!priv
->is_lite
) {
1777 intrl2_1_mask_set(priv
, 0xffffffff);
1778 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1782 static inline void gib_set_pad_extension(struct bcm_sysport_priv
*priv
)
1784 u32 __maybe_unused reg
;
1786 /* Include Broadcom tag in pad extension */
1787 if (netdev_uses_dsa(priv
->netdev
)) {
1788 reg
= gib_readl(priv
, GIB_CONTROL
);
1789 reg
&= ~(GIB_PAD_EXTENSION_MASK
<< GIB_PAD_EXTENSION_SHIFT
);
1790 reg
|= ENET_BRCM_TAG_LEN
<< GIB_PAD_EXTENSION_SHIFT
;
1791 gib_writel(priv
, reg
, GIB_CONTROL
);
1795 static int bcm_sysport_open(struct net_device
*dev
)
1797 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1798 struct phy_device
*phydev
;
1805 /* Flush TX and RX FIFOs at TOPCTRL level */
1806 topctrl_flush(priv
);
1808 /* Disable the UniMAC RX/TX */
1809 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 0);
1811 /* Enable RBUF 2bytes alignment and Receive Status Block */
1814 /* Set maximum frame length */
1816 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
1818 gib_set_pad_extension(priv
);
1820 /* Set MAC address */
1821 umac_set_hw_addr(priv
, dev
->dev_addr
);
1823 /* Read CRC forward */
1825 priv
->crc_fwd
= !!(umac_readl(priv
, UMAC_CMD
) & CMD_CRC_FWD
);
1827 priv
->crc_fwd
= !!(gib_readl(priv
, GIB_CONTROL
) &
1830 phydev
= of_phy_connect(dev
, priv
->phy_dn
, bcm_sysport_adj_link
,
1831 0, priv
->phy_interface
);
1833 netdev_err(dev
, "could not attach to PHY\n");
1837 /* Reset house keeping link status */
1838 priv
->old_duplex
= -1;
1839 priv
->old_link
= -1;
1840 priv
->old_pause
= -1;
1842 /* mask all interrupts and request them */
1843 bcm_sysport_mask_all_intrs(priv
);
1845 ret
= request_irq(priv
->irq0
, bcm_sysport_rx_isr
, 0, dev
->name
, dev
);
1847 netdev_err(dev
, "failed to request RX interrupt\n");
1848 goto out_phy_disconnect
;
1851 if (!priv
->is_lite
) {
1852 ret
= request_irq(priv
->irq1
, bcm_sysport_tx_isr
, 0,
1855 netdev_err(dev
, "failed to request TX interrupt\n");
1860 /* Initialize both hardware and software ring */
1861 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1862 ret
= bcm_sysport_init_tx_ring(priv
, i
);
1864 netdev_err(dev
, "failed to initialize TX ring %d\n",
1866 goto out_free_tx_ring
;
1870 /* Initialize linked-list */
1871 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
1873 /* Initialize RX ring */
1874 ret
= bcm_sysport_init_rx_ring(priv
);
1876 netdev_err(dev
, "failed to initialize RX ring\n");
1877 goto out_free_rx_ring
;
1881 ret
= rdma_enable_set(priv
, 1);
1883 goto out_free_rx_ring
;
1886 ret
= tdma_enable_set(priv
, 1);
1888 goto out_clear_rx_int
;
1890 /* Turn on UniMAC TX/RX */
1891 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 1);
1893 bcm_sysport_netif_start(dev
);
1898 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
1900 bcm_sysport_fini_rx_ring(priv
);
1902 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1903 bcm_sysport_fini_tx_ring(priv
, i
);
1905 free_irq(priv
->irq1
, dev
);
1907 free_irq(priv
->irq0
, dev
);
1909 phy_disconnect(phydev
);
1913 static void bcm_sysport_netif_stop(struct net_device
*dev
)
1915 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1917 /* stop all software from updating hardware */
1918 netif_tx_stop_all_queues(dev
);
1919 napi_disable(&priv
->napi
);
1920 phy_stop(dev
->phydev
);
1922 /* mask all interrupts */
1923 bcm_sysport_mask_all_intrs(priv
);
1926 static int bcm_sysport_stop(struct net_device
*dev
)
1928 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1932 bcm_sysport_netif_stop(dev
);
1934 /* Disable UniMAC RX */
1935 umac_enable_set(priv
, CMD_RX_EN
, 0);
1937 ret
= tdma_enable_set(priv
, 0);
1939 netdev_err(dev
, "timeout disabling RDMA\n");
1943 /* Wait for a maximum packet size to be drained */
1944 usleep_range(2000, 3000);
1946 ret
= rdma_enable_set(priv
, 0);
1948 netdev_err(dev
, "timeout disabling TDMA\n");
1952 /* Disable UniMAC TX */
1953 umac_enable_set(priv
, CMD_TX_EN
, 0);
1955 /* Free RX/TX rings SW structures */
1956 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1957 bcm_sysport_fini_tx_ring(priv
, i
);
1958 bcm_sysport_fini_rx_ring(priv
);
1960 free_irq(priv
->irq0
, dev
);
1962 free_irq(priv
->irq1
, dev
);
1964 /* Disconnect from PHY */
1965 phy_disconnect(dev
->phydev
);
1970 static const struct ethtool_ops bcm_sysport_ethtool_ops
= {
1971 .get_drvinfo
= bcm_sysport_get_drvinfo
,
1972 .get_msglevel
= bcm_sysport_get_msglvl
,
1973 .set_msglevel
= bcm_sysport_set_msglvl
,
1974 .get_link
= ethtool_op_get_link
,
1975 .get_strings
= bcm_sysport_get_strings
,
1976 .get_ethtool_stats
= bcm_sysport_get_stats
,
1977 .get_sset_count
= bcm_sysport_get_sset_count
,
1978 .get_wol
= bcm_sysport_get_wol
,
1979 .set_wol
= bcm_sysport_set_wol
,
1980 .get_coalesce
= bcm_sysport_get_coalesce
,
1981 .set_coalesce
= bcm_sysport_set_coalesce
,
1982 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
1983 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
1986 static const struct net_device_ops bcm_sysport_netdev_ops
= {
1987 .ndo_start_xmit
= bcm_sysport_xmit
,
1988 .ndo_tx_timeout
= bcm_sysport_tx_timeout
,
1989 .ndo_open
= bcm_sysport_open
,
1990 .ndo_stop
= bcm_sysport_stop
,
1991 .ndo_set_features
= bcm_sysport_set_features
,
1992 .ndo_set_rx_mode
= bcm_sysport_set_rx_mode
,
1993 .ndo_set_mac_address
= bcm_sysport_change_mac
,
1994 #ifdef CONFIG_NET_POLL_CONTROLLER
1995 .ndo_poll_controller
= bcm_sysport_poll_controller
,
1997 .ndo_get_stats64
= bcm_sysport_get_stats64
,
2000 #define REV_FMT "v%2x.%02x"
2002 static const struct bcm_sysport_hw_params bcm_sysport_params
[] = {
2005 .num_rx_desc_words
= SP_NUM_HW_RX_DESC_WORDS
,
2007 [SYSTEMPORT_LITE
] = {
2009 .num_rx_desc_words
= SP_LT_NUM_HW_RX_DESC_WORDS
,
2013 static const struct of_device_id bcm_sysport_of_match
[] = {
2014 { .compatible
= "brcm,systemportlite-v1.00",
2015 .data
= &bcm_sysport_params
[SYSTEMPORT_LITE
] },
2016 { .compatible
= "brcm,systemport-v1.00",
2017 .data
= &bcm_sysport_params
[SYSTEMPORT
] },
2018 { .compatible
= "brcm,systemport",
2019 .data
= &bcm_sysport_params
[SYSTEMPORT
] },
2022 MODULE_DEVICE_TABLE(of
, bcm_sysport_of_match
);
2024 static int bcm_sysport_probe(struct platform_device
*pdev
)
2026 const struct bcm_sysport_hw_params
*params
;
2027 const struct of_device_id
*of_id
= NULL
;
2028 struct bcm_sysport_priv
*priv
;
2029 struct device_node
*dn
;
2030 struct net_device
*dev
;
2031 const void *macaddr
;
2036 dn
= pdev
->dev
.of_node
;
2037 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2038 of_id
= of_match_node(bcm_sysport_of_match
, dn
);
2039 if (!of_id
|| !of_id
->data
)
2042 /* Fairly quickly we need to know the type of adapter we have */
2043 params
= of_id
->data
;
2045 /* Read the Transmit/Receive Queue properties */
2046 if (of_property_read_u32(dn
, "systemport,num-txq", &txq
))
2047 txq
= TDMA_NUM_RINGS
;
2048 if (of_property_read_u32(dn
, "systemport,num-rxq", &rxq
))
2051 /* Sanity check the number of transmit queues */
2052 if (!txq
|| txq
> TDMA_NUM_RINGS
)
2055 dev
= alloc_etherdev_mqs(sizeof(*priv
), txq
, rxq
);
2059 /* Initialize private members */
2060 priv
= netdev_priv(dev
);
2062 /* Allocate number of TX rings */
2063 priv
->tx_rings
= devm_kcalloc(&pdev
->dev
, txq
,
2064 sizeof(struct bcm_sysport_tx_ring
),
2066 if (!priv
->tx_rings
)
2069 priv
->is_lite
= params
->is_lite
;
2070 priv
->num_rx_desc_words
= params
->num_rx_desc_words
;
2072 priv
->irq0
= platform_get_irq(pdev
, 0);
2073 if (!priv
->is_lite
) {
2074 priv
->irq1
= platform_get_irq(pdev
, 1);
2075 priv
->wol_irq
= platform_get_irq(pdev
, 2);
2077 priv
->wol_irq
= platform_get_irq(pdev
, 1);
2079 if (priv
->irq0
<= 0 || (priv
->irq1
<= 0 && !priv
->is_lite
)) {
2080 dev_err(&pdev
->dev
, "invalid interrupts\n");
2082 goto err_free_netdev
;
2085 priv
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
2086 if (IS_ERR(priv
->base
)) {
2087 ret
= PTR_ERR(priv
->base
);
2088 goto err_free_netdev
;
2094 priv
->phy_interface
= of_get_phy_mode(dn
);
2095 /* Default to GMII interface mode */
2096 if (priv
->phy_interface
< 0)
2097 priv
->phy_interface
= PHY_INTERFACE_MODE_GMII
;
2099 /* In the case of a fixed PHY, the DT node associated
2100 * to the PHY is the Ethernet MAC DT node.
2102 if (of_phy_is_fixed_link(dn
)) {
2103 ret
= of_phy_register_fixed_link(dn
);
2105 dev_err(&pdev
->dev
, "failed to register fixed PHY\n");
2106 goto err_free_netdev
;
2112 /* Initialize netdevice members */
2113 macaddr
= of_get_mac_address(dn
);
2114 if (!macaddr
|| !is_valid_ether_addr(macaddr
)) {
2115 dev_warn(&pdev
->dev
, "using random Ethernet MAC\n");
2116 eth_hw_addr_random(dev
);
2118 ether_addr_copy(dev
->dev_addr
, macaddr
);
2121 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2122 dev_set_drvdata(&pdev
->dev
, dev
);
2123 dev
->ethtool_ops
= &bcm_sysport_ethtool_ops
;
2124 dev
->netdev_ops
= &bcm_sysport_netdev_ops
;
2125 netif_napi_add(dev
, &priv
->napi
, bcm_sysport_poll
, 64);
2127 /* HW supported features, none enabled by default */
2128 dev
->hw_features
|= NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
|
2129 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2131 /* Request the WOL interrupt and advertise suspend if available */
2132 priv
->wol_irq_disabled
= 1;
2133 ret
= devm_request_irq(&pdev
->dev
, priv
->wol_irq
,
2134 bcm_sysport_wol_isr
, 0, dev
->name
, priv
);
2136 device_set_wakeup_capable(&pdev
->dev
, 1);
2138 /* Set the needed headroom once and for all */
2139 BUILD_BUG_ON(sizeof(struct bcm_tsb
) != 8);
2140 dev
->needed_headroom
+= sizeof(struct bcm_tsb
);
2142 /* libphy will adjust the link state accordingly */
2143 netif_carrier_off(dev
);
2145 u64_stats_init(&priv
->syncp
);
2147 ret
= register_netdev(dev
);
2149 dev_err(&pdev
->dev
, "failed to register net_device\n");
2150 goto err_deregister_fixed_link
;
2153 priv
->rev
= topctrl_readl(priv
, REV_CNTL
) & REV_MASK
;
2154 dev_info(&pdev
->dev
,
2155 "Broadcom SYSTEMPORT%s" REV_FMT
2156 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2157 priv
->is_lite
? " Lite" : "",
2158 (priv
->rev
>> 8) & 0xff, priv
->rev
& 0xff,
2159 priv
->base
, priv
->irq0
, priv
->irq1
, txq
, rxq
);
2163 err_deregister_fixed_link
:
2164 if (of_phy_is_fixed_link(dn
))
2165 of_phy_deregister_fixed_link(dn
);
2171 static int bcm_sysport_remove(struct platform_device
*pdev
)
2173 struct net_device
*dev
= dev_get_drvdata(&pdev
->dev
);
2174 struct device_node
*dn
= pdev
->dev
.of_node
;
2176 /* Not much to do, ndo_close has been called
2177 * and we use managed allocations
2179 unregister_netdev(dev
);
2180 if (of_phy_is_fixed_link(dn
))
2181 of_phy_deregister_fixed_link(dn
);
2183 dev_set_drvdata(&pdev
->dev
, NULL
);
2188 #ifdef CONFIG_PM_SLEEP
2189 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv
*priv
)
2191 struct net_device
*ndev
= priv
->netdev
;
2192 unsigned int timeout
= 1000;
2195 /* Password has already been programmed */
2196 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
2199 if (priv
->wolopts
& WAKE_MAGICSECURE
)
2201 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
2203 /* Make sure RBUF entered WoL mode as result */
2205 reg
= rbuf_readl(priv
, RBUF_STATUS
);
2206 if (reg
& RBUF_WOL_MODE
)
2210 } while (timeout
-- > 0);
2212 /* Do not leave the UniMAC RBUF matching only MPD packets */
2214 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
2216 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
2217 netif_err(priv
, wol
, ndev
, "failed to enter WOL mode\n");
2221 /* UniMAC receive needs to be turned on */
2222 umac_enable_set(priv
, CMD_RX_EN
, 1);
2224 /* Enable the interrupt wake-up source */
2225 intrl2_0_mask_clear(priv
, INTRL2_0_MPD
);
2227 netif_dbg(priv
, wol
, ndev
, "entered WOL mode\n");
2232 static int bcm_sysport_suspend(struct device
*d
)
2234 struct net_device
*dev
= dev_get_drvdata(d
);
2235 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2240 if (!netif_running(dev
))
2243 bcm_sysport_netif_stop(dev
);
2245 phy_suspend(dev
->phydev
);
2247 netif_device_detach(dev
);
2249 /* Disable UniMAC RX */
2250 umac_enable_set(priv
, CMD_RX_EN
, 0);
2252 ret
= rdma_enable_set(priv
, 0);
2254 netdev_err(dev
, "RDMA timeout!\n");
2258 /* Disable RXCHK if enabled */
2259 if (priv
->rx_chk_en
) {
2260 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
2262 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
2267 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
2269 ret
= tdma_enable_set(priv
, 0);
2271 netdev_err(dev
, "TDMA timeout!\n");
2275 /* Wait for a packet boundary */
2276 usleep_range(2000, 3000);
2278 umac_enable_set(priv
, CMD_TX_EN
, 0);
2280 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
2282 /* Free RX/TX rings SW structures */
2283 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2284 bcm_sysport_fini_tx_ring(priv
, i
);
2285 bcm_sysport_fini_rx_ring(priv
);
2287 /* Get prepared for Wake-on-LAN */
2288 if (device_may_wakeup(d
) && priv
->wolopts
)
2289 ret
= bcm_sysport_suspend_to_wol(priv
);
2294 static int bcm_sysport_resume(struct device
*d
)
2296 struct net_device
*dev
= dev_get_drvdata(d
);
2297 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2302 if (!netif_running(dev
))
2307 /* We may have been suspended and never received a WOL event that
2308 * would turn off MPD detection, take care of that now
2310 bcm_sysport_resume_from_wol(priv
);
2312 /* Initialize both hardware and software ring */
2313 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2314 ret
= bcm_sysport_init_tx_ring(priv
, i
);
2316 netdev_err(dev
, "failed to initialize TX ring %d\n",
2318 goto out_free_tx_rings
;
2322 /* Initialize linked-list */
2323 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
2325 /* Initialize RX ring */
2326 ret
= bcm_sysport_init_rx_ring(priv
);
2328 netdev_err(dev
, "failed to initialize RX ring\n");
2329 goto out_free_rx_ring
;
2332 netif_device_attach(dev
);
2334 /* RX pipe enable */
2335 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
2337 ret
= rdma_enable_set(priv
, 1);
2339 netdev_err(dev
, "failed to enable RDMA\n");
2340 goto out_free_rx_ring
;
2344 if (priv
->rx_chk_en
) {
2345 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
2347 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
2352 /* Set maximum frame length */
2354 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
2356 gib_set_pad_extension(priv
);
2358 /* Set MAC address */
2359 umac_set_hw_addr(priv
, dev
->dev_addr
);
2361 umac_enable_set(priv
, CMD_RX_EN
, 1);
2363 /* TX pipe enable */
2364 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
2366 umac_enable_set(priv
, CMD_TX_EN
, 1);
2368 ret
= tdma_enable_set(priv
, 1);
2370 netdev_err(dev
, "TDMA timeout!\n");
2371 goto out_free_rx_ring
;
2374 phy_resume(dev
->phydev
);
2376 bcm_sysport_netif_start(dev
);
2381 bcm_sysport_fini_rx_ring(priv
);
2383 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2384 bcm_sysport_fini_tx_ring(priv
, i
);
2389 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops
,
2390 bcm_sysport_suspend
, bcm_sysport_resume
);
2392 static struct platform_driver bcm_sysport_driver
= {
2393 .probe
= bcm_sysport_probe
,
2394 .remove
= bcm_sysport_remove
,
2396 .name
= "brcm-systemport",
2397 .of_match_table
= bcm_sysport_of_match
,
2398 .pm
= &bcm_sysport_pm_ops
,
2401 module_platform_driver(bcm_sysport_driver
);
2403 MODULE_AUTHOR("Broadcom Corporation");
2404 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2405 MODULE_ALIAS("platform:brcm-systemport");
2406 MODULE_LICENSE("GPL");