1 // SPDX-License-Identifier: GPL-2.0-only
3 * Cadence MACB/GEM Ethernet Controller driver
5 * Copyright (C) 2004-2006 Atmel Corporation
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/clk-provider.h>
11 #include <linux/crc32.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/circ_buf.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
20 #include <linux/gpio.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/interrupt.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_data/macb.h>
27 #include <linux/platform_device.h>
28 #include <linux/phylink.h>
30 #include <linux/of_device.h>
31 #include <linux/of_gpio.h>
32 #include <linux/of_mdio.h>
33 #include <linux/of_net.h>
35 #include <linux/udp.h>
36 #include <linux/tcp.h>
37 #include <linux/iopoll.h>
38 #include <linux/pm_runtime.h>
41 /* This structure is only used for MACB on SiFive FU540 devices */
42 struct sifive_fu540_macb_mgmt
{
48 #define MACB_RX_BUFFER_SIZE 128
49 #define RX_BUFFER_MULTIPLE 64 /* bytes */
51 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
52 #define MIN_RX_RING_SIZE 64
53 #define MAX_RX_RING_SIZE 8192
54 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
57 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
58 #define MIN_TX_RING_SIZE 64
59 #define MAX_TX_RING_SIZE 4096
60 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
63 /* level of occupied TX descriptors under which we wake up TX process */
64 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
66 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
67 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
70 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
73 /* Max length of transmit frame must be a multiple of 8 bytes */
74 #define MACB_TX_LEN_ALIGN 8
75 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
76 /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
77 * false amba_error in TX path from the DMA assuming there is not enough
78 * space in the SRAM (16KB) even when there is.
80 #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
82 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
83 #define MACB_NETIF_LSO NETIF_F_TSO
85 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
86 #define MACB_WOL_ENABLED (0x1 << 1)
88 /* Graceful stop timeouts in us. We should allow up to
89 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
91 #define MACB_HALT_TIMEOUT 1230
93 #define MACB_PM_TIMEOUT 100 /* ms */
95 #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
97 /* DMA buffer descriptor might be different size
98 * depends on hardware configuration:
100 * 1. dma address width 32 bits:
101 * word 1: 32 bit address of Data Buffer
104 * 2. dma address width 64 bits:
105 * word 1: 32 bit address of Data Buffer
107 * word 3: upper 32 bit address of Data Buffer
110 * 3. dma address width 32 bits with hardware timestamping:
111 * word 1: 32 bit address of Data Buffer
113 * word 3: timestamp word 1
114 * word 4: timestamp word 2
116 * 4. dma address width 64 bits with hardware timestamping:
117 * word 1: 32 bit address of Data Buffer
119 * word 3: upper 32 bit address of Data Buffer
121 * word 5: timestamp word 1
122 * word 6: timestamp word 2
124 static unsigned int macb_dma_desc_get_size(struct macb
*bp
)
127 unsigned int desc_size
;
129 switch (bp
->hw_dma_cap
) {
131 desc_size
= sizeof(struct macb_dma_desc
)
132 + sizeof(struct macb_dma_desc_64
);
135 desc_size
= sizeof(struct macb_dma_desc
)
136 + sizeof(struct macb_dma_desc_ptp
);
138 case HW_DMA_CAP_64B_PTP
:
139 desc_size
= sizeof(struct macb_dma_desc
)
140 + sizeof(struct macb_dma_desc_64
)
141 + sizeof(struct macb_dma_desc_ptp
);
144 desc_size
= sizeof(struct macb_dma_desc
);
148 return sizeof(struct macb_dma_desc
);
151 static unsigned int macb_adj_dma_desc_idx(struct macb
*bp
, unsigned int desc_idx
)
154 switch (bp
->hw_dma_cap
) {
159 case HW_DMA_CAP_64B_PTP
:
169 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
170 static struct macb_dma_desc_64
*macb_64b_desc(struct macb
*bp
, struct macb_dma_desc
*desc
)
172 return (struct macb_dma_desc_64
*)((void *)desc
173 + sizeof(struct macb_dma_desc
));
177 /* Ring buffer accessors */
178 static unsigned int macb_tx_ring_wrap(struct macb
*bp
, unsigned int index
)
180 return index
& (bp
->tx_ring_size
- 1);
183 static struct macb_dma_desc
*macb_tx_desc(struct macb_queue
*queue
,
186 index
= macb_tx_ring_wrap(queue
->bp
, index
);
187 index
= macb_adj_dma_desc_idx(queue
->bp
, index
);
188 return &queue
->tx_ring
[index
];
191 static struct macb_tx_skb
*macb_tx_skb(struct macb_queue
*queue
,
194 return &queue
->tx_skb
[macb_tx_ring_wrap(queue
->bp
, index
)];
197 static dma_addr_t
macb_tx_dma(struct macb_queue
*queue
, unsigned int index
)
201 offset
= macb_tx_ring_wrap(queue
->bp
, index
) *
202 macb_dma_desc_get_size(queue
->bp
);
204 return queue
->tx_ring_dma
+ offset
;
207 static unsigned int macb_rx_ring_wrap(struct macb
*bp
, unsigned int index
)
209 return index
& (bp
->rx_ring_size
- 1);
212 static struct macb_dma_desc
*macb_rx_desc(struct macb_queue
*queue
, unsigned int index
)
214 index
= macb_rx_ring_wrap(queue
->bp
, index
);
215 index
= macb_adj_dma_desc_idx(queue
->bp
, index
);
216 return &queue
->rx_ring
[index
];
219 static void *macb_rx_buffer(struct macb_queue
*queue
, unsigned int index
)
221 return queue
->rx_buffers
+ queue
->bp
->rx_buffer_size
*
222 macb_rx_ring_wrap(queue
->bp
, index
);
226 static u32
hw_readl_native(struct macb
*bp
, int offset
)
228 return __raw_readl(bp
->regs
+ offset
);
231 static void hw_writel_native(struct macb
*bp
, int offset
, u32 value
)
233 __raw_writel(value
, bp
->regs
+ offset
);
236 static u32
hw_readl(struct macb
*bp
, int offset
)
238 return readl_relaxed(bp
->regs
+ offset
);
241 static void hw_writel(struct macb
*bp
, int offset
, u32 value
)
243 writel_relaxed(value
, bp
->regs
+ offset
);
246 /* Find the CPU endianness by using the loopback bit of NCR register. When the
247 * CPU is in big endian we need to program swapped mode for management
250 static bool hw_is_native_io(void __iomem
*addr
)
252 u32 value
= MACB_BIT(LLB
);
254 __raw_writel(value
, addr
+ MACB_NCR
);
255 value
= __raw_readl(addr
+ MACB_NCR
);
257 /* Write 0 back to disable everything */
258 __raw_writel(0, addr
+ MACB_NCR
);
260 return value
== MACB_BIT(LLB
);
263 static bool hw_is_gem(void __iomem
*addr
, bool native_io
)
268 id
= __raw_readl(addr
+ MACB_MID
);
270 id
= readl_relaxed(addr
+ MACB_MID
);
272 return MACB_BFEXT(IDNUM
, id
) >= 0x2;
275 static void macb_set_hwaddr(struct macb
*bp
)
280 bottom
= cpu_to_le32(*((u32
*)bp
->dev
->dev_addr
));
281 macb_or_gem_writel(bp
, SA1B
, bottom
);
282 top
= cpu_to_le16(*((u16
*)(bp
->dev
->dev_addr
+ 4)));
283 macb_or_gem_writel(bp
, SA1T
, top
);
285 /* Clear unused address register sets */
286 macb_or_gem_writel(bp
, SA2B
, 0);
287 macb_or_gem_writel(bp
, SA2T
, 0);
288 macb_or_gem_writel(bp
, SA3B
, 0);
289 macb_or_gem_writel(bp
, SA3T
, 0);
290 macb_or_gem_writel(bp
, SA4B
, 0);
291 macb_or_gem_writel(bp
, SA4T
, 0);
294 static void macb_get_hwaddr(struct macb
*bp
)
301 /* Check all 4 address register for valid address */
302 for (i
= 0; i
< 4; i
++) {
303 bottom
= macb_or_gem_readl(bp
, SA1B
+ i
* 8);
304 top
= macb_or_gem_readl(bp
, SA1T
+ i
* 8);
306 addr
[0] = bottom
& 0xff;
307 addr
[1] = (bottom
>> 8) & 0xff;
308 addr
[2] = (bottom
>> 16) & 0xff;
309 addr
[3] = (bottom
>> 24) & 0xff;
310 addr
[4] = top
& 0xff;
311 addr
[5] = (top
>> 8) & 0xff;
313 if (is_valid_ether_addr(addr
)) {
314 memcpy(bp
->dev
->dev_addr
, addr
, sizeof(addr
));
319 dev_info(&bp
->pdev
->dev
, "invalid hw address, using random\n");
320 eth_hw_addr_random(bp
->dev
);
323 static int macb_mdio_wait_for_idle(struct macb
*bp
)
327 return readx_poll_timeout(MACB_READ_NSR
, bp
, val
, val
& MACB_BIT(IDLE
),
328 1, MACB_MDIO_TIMEOUT
);
331 static int macb_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
333 struct macb
*bp
= bus
->priv
;
336 status
= pm_runtime_get_sync(&bp
->pdev
->dev
);
338 pm_runtime_put_noidle(&bp
->pdev
->dev
);
342 status
= macb_mdio_wait_for_idle(bp
);
346 if (regnum
& MII_ADDR_C45
) {
347 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_C45_SOF
)
348 | MACB_BF(RW
, MACB_MAN_C45_ADDR
)
349 | MACB_BF(PHYA
, mii_id
)
350 | MACB_BF(REGA
, (regnum
>> 16) & 0x1F)
351 | MACB_BF(DATA
, regnum
& 0xFFFF)
352 | MACB_BF(CODE
, MACB_MAN_C45_CODE
)));
354 status
= macb_mdio_wait_for_idle(bp
);
358 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_C45_SOF
)
359 | MACB_BF(RW
, MACB_MAN_C45_READ
)
360 | MACB_BF(PHYA
, mii_id
)
361 | MACB_BF(REGA
, (regnum
>> 16) & 0x1F)
362 | MACB_BF(CODE
, MACB_MAN_C45_CODE
)));
364 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_C22_SOF
)
365 | MACB_BF(RW
, MACB_MAN_C22_READ
)
366 | MACB_BF(PHYA
, mii_id
)
367 | MACB_BF(REGA
, regnum
)
368 | MACB_BF(CODE
, MACB_MAN_C22_CODE
)));
371 status
= macb_mdio_wait_for_idle(bp
);
375 status
= MACB_BFEXT(DATA
, macb_readl(bp
, MAN
));
378 pm_runtime_mark_last_busy(&bp
->pdev
->dev
);
379 pm_runtime_put_autosuspend(&bp
->pdev
->dev
);
384 static int macb_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
387 struct macb
*bp
= bus
->priv
;
390 status
= pm_runtime_get_sync(&bp
->pdev
->dev
);
392 pm_runtime_put_noidle(&bp
->pdev
->dev
);
396 status
= macb_mdio_wait_for_idle(bp
);
398 goto mdio_write_exit
;
400 if (regnum
& MII_ADDR_C45
) {
401 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_C45_SOF
)
402 | MACB_BF(RW
, MACB_MAN_C45_ADDR
)
403 | MACB_BF(PHYA
, mii_id
)
404 | MACB_BF(REGA
, (regnum
>> 16) & 0x1F)
405 | MACB_BF(DATA
, regnum
& 0xFFFF)
406 | MACB_BF(CODE
, MACB_MAN_C45_CODE
)));
408 status
= macb_mdio_wait_for_idle(bp
);
410 goto mdio_write_exit
;
412 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_C45_SOF
)
413 | MACB_BF(RW
, MACB_MAN_C45_WRITE
)
414 | MACB_BF(PHYA
, mii_id
)
415 | MACB_BF(REGA
, (regnum
>> 16) & 0x1F)
416 | MACB_BF(CODE
, MACB_MAN_C45_CODE
)
417 | MACB_BF(DATA
, value
)));
419 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_C22_SOF
)
420 | MACB_BF(RW
, MACB_MAN_C22_WRITE
)
421 | MACB_BF(PHYA
, mii_id
)
422 | MACB_BF(REGA
, regnum
)
423 | MACB_BF(CODE
, MACB_MAN_C22_CODE
)
424 | MACB_BF(DATA
, value
)));
427 status
= macb_mdio_wait_for_idle(bp
);
429 goto mdio_write_exit
;
432 pm_runtime_mark_last_busy(&bp
->pdev
->dev
);
433 pm_runtime_put_autosuspend(&bp
->pdev
->dev
);
438 static void macb_init_buffers(struct macb
*bp
)
440 struct macb_queue
*queue
;
443 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
444 queue_writel(queue
, RBQP
, lower_32_bits(queue
->rx_ring_dma
));
445 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
446 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
447 queue_writel(queue
, RBQPH
,
448 upper_32_bits(queue
->rx_ring_dma
));
450 queue_writel(queue
, TBQP
, lower_32_bits(queue
->tx_ring_dma
));
451 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
452 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
453 queue_writel(queue
, TBQPH
,
454 upper_32_bits(queue
->tx_ring_dma
));
460 * macb_set_tx_clk() - Set a clock to a new frequency
461 * @clk Pointer to the clock to change
462 * @rate New frequency in Hz
463 * @dev Pointer to the struct net_device
465 static void macb_set_tx_clk(struct clk
*clk
, int speed
, struct net_device
*dev
)
467 long ferr
, rate
, rate_rounded
;
486 rate_rounded
= clk_round_rate(clk
, rate
);
487 if (rate_rounded
< 0)
490 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
493 ferr
= abs(rate_rounded
- rate
);
494 ferr
= DIV_ROUND_UP(ferr
, rate
/ 100000);
496 netdev_warn(dev
, "unable to generate target frequency: %ld Hz\n",
499 if (clk_set_rate(clk
, rate_rounded
))
500 netdev_err(dev
, "adjusting tx_clk failed.\n");
503 static void macb_validate(struct phylink_config
*config
,
504 unsigned long *supported
,
505 struct phylink_link_state
*state
)
507 struct net_device
*ndev
= to_net_dev(config
->dev
);
508 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
509 struct macb
*bp
= netdev_priv(ndev
);
511 /* We only support MII, RMII, GMII, RGMII & SGMII. */
512 if (state
->interface
!= PHY_INTERFACE_MODE_NA
&&
513 state
->interface
!= PHY_INTERFACE_MODE_MII
&&
514 state
->interface
!= PHY_INTERFACE_MODE_RMII
&&
515 state
->interface
!= PHY_INTERFACE_MODE_GMII
&&
516 state
->interface
!= PHY_INTERFACE_MODE_SGMII
&&
517 !phy_interface_mode_is_rgmii(state
->interface
)) {
518 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
522 if (!macb_is_gem(bp
) &&
523 (state
->interface
== PHY_INTERFACE_MODE_GMII
||
524 phy_interface_mode_is_rgmii(state
->interface
))) {
525 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
529 phylink_set_port_modes(mask
);
530 phylink_set(mask
, Autoneg
);
531 phylink_set(mask
, Asym_Pause
);
533 phylink_set(mask
, 10baseT_Half
);
534 phylink_set(mask
, 10baseT_Full
);
535 phylink_set(mask
, 100baseT_Half
);
536 phylink_set(mask
, 100baseT_Full
);
538 if (bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
&&
539 (state
->interface
== PHY_INTERFACE_MODE_NA
||
540 state
->interface
== PHY_INTERFACE_MODE_GMII
||
541 state
->interface
== PHY_INTERFACE_MODE_SGMII
||
542 phy_interface_mode_is_rgmii(state
->interface
))) {
543 phylink_set(mask
, 1000baseT_Full
);
544 phylink_set(mask
, 1000baseX_Full
);
546 if (!(bp
->caps
& MACB_CAPS_NO_GIGABIT_HALF
))
547 phylink_set(mask
, 1000baseT_Half
);
550 bitmap_and(supported
, supported
, mask
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
551 bitmap_and(state
->advertising
, state
->advertising
, mask
,
552 __ETHTOOL_LINK_MODE_MASK_NBITS
);
555 static void macb_mac_pcs_get_state(struct phylink_config
*config
,
556 struct phylink_link_state
*state
)
561 static void macb_mac_an_restart(struct phylink_config
*config
)
566 static void macb_mac_config(struct phylink_config
*config
, unsigned int mode
,
567 const struct phylink_link_state
*state
)
569 struct net_device
*ndev
= to_net_dev(config
->dev
);
570 struct macb
*bp
= netdev_priv(ndev
);
574 spin_lock_irqsave(&bp
->lock
, flags
);
576 old_ctrl
= ctrl
= macb_or_gem_readl(bp
, NCFGR
);
578 if (bp
->caps
& MACB_CAPS_MACB_IS_EMAC
) {
579 if (state
->interface
== PHY_INTERFACE_MODE_RMII
)
580 ctrl
|= MACB_BIT(RM9200_RMII
);
582 ctrl
&= ~(GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
));
584 if (state
->interface
== PHY_INTERFACE_MODE_SGMII
)
585 ctrl
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
588 /* Apply the new configuration, if any */
590 macb_or_gem_writel(bp
, NCFGR
, ctrl
);
592 spin_unlock_irqrestore(&bp
->lock
, flags
);
595 static void macb_mac_link_down(struct phylink_config
*config
, unsigned int mode
,
596 phy_interface_t interface
)
598 struct net_device
*ndev
= to_net_dev(config
->dev
);
599 struct macb
*bp
= netdev_priv(ndev
);
600 struct macb_queue
*queue
;
604 if (!(bp
->caps
& MACB_CAPS_MACB_IS_EMAC
))
605 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
606 queue_writel(queue
, IDR
,
607 bp
->rx_intr_mask
| MACB_TX_INT_FLAGS
| MACB_BIT(HRESP
));
609 /* Disable Rx and Tx */
610 ctrl
= macb_readl(bp
, NCR
) & ~(MACB_BIT(RE
) | MACB_BIT(TE
));
611 macb_writel(bp
, NCR
, ctrl
);
613 netif_tx_stop_all_queues(ndev
);
616 static void macb_mac_link_up(struct phylink_config
*config
,
617 struct phy_device
*phy
,
618 unsigned int mode
, phy_interface_t interface
,
619 int speed
, int duplex
,
620 bool tx_pause
, bool rx_pause
)
622 struct net_device
*ndev
= to_net_dev(config
->dev
);
623 struct macb
*bp
= netdev_priv(ndev
);
624 struct macb_queue
*queue
;
629 spin_lock_irqsave(&bp
->lock
, flags
);
631 ctrl
= macb_or_gem_readl(bp
, NCFGR
);
633 ctrl
&= ~(MACB_BIT(SPD
) | MACB_BIT(FD
));
635 if (speed
== SPEED_100
)
636 ctrl
|= MACB_BIT(SPD
);
639 ctrl
|= MACB_BIT(FD
);
641 if (!(bp
->caps
& MACB_CAPS_MACB_IS_EMAC
)) {
642 ctrl
&= ~(GEM_BIT(GBE
) | MACB_BIT(PAE
));
644 if (speed
== SPEED_1000
)
645 ctrl
|= GEM_BIT(GBE
);
647 /* We do not support MLO_PAUSE_RX yet */
649 ctrl
|= MACB_BIT(PAE
);
651 macb_set_tx_clk(bp
->tx_clk
, speed
, ndev
);
653 /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
654 * cleared the pipeline and control registers.
656 bp
->macbgem_ops
.mog_init_rings(bp
);
657 macb_init_buffers(bp
);
659 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
660 queue_writel(queue
, IER
,
661 bp
->rx_intr_mask
| MACB_TX_INT_FLAGS
| MACB_BIT(HRESP
));
664 macb_or_gem_writel(bp
, NCFGR
, ctrl
);
666 spin_unlock_irqrestore(&bp
->lock
, flags
);
668 /* Enable Rx and Tx */
669 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(RE
) | MACB_BIT(TE
));
671 netif_tx_wake_all_queues(ndev
);
674 static const struct phylink_mac_ops macb_phylink_ops
= {
675 .validate
= macb_validate
,
676 .mac_pcs_get_state
= macb_mac_pcs_get_state
,
677 .mac_an_restart
= macb_mac_an_restart
,
678 .mac_config
= macb_mac_config
,
679 .mac_link_down
= macb_mac_link_down
,
680 .mac_link_up
= macb_mac_link_up
,
683 static bool macb_phy_handle_exists(struct device_node
*dn
)
685 dn
= of_parse_phandle(dn
, "phy-handle", 0);
690 static int macb_phylink_connect(struct macb
*bp
)
692 struct device_node
*dn
= bp
->pdev
->dev
.of_node
;
693 struct net_device
*dev
= bp
->dev
;
694 struct phy_device
*phydev
;
698 ret
= phylink_of_phy_connect(bp
->phylink
, dn
, 0);
700 if (!dn
|| (ret
&& !macb_phy_handle_exists(dn
))) {
701 phydev
= phy_find_first(bp
->mii_bus
);
703 netdev_err(dev
, "no PHY found\n");
707 /* attach the mac to the phy */
708 ret
= phylink_connect_phy(bp
->phylink
, phydev
);
712 netdev_err(dev
, "Could not attach PHY (%d)\n", ret
);
716 phylink_start(bp
->phylink
);
721 /* based on au1000_eth. c*/
722 static int macb_mii_probe(struct net_device
*dev
)
724 struct macb
*bp
= netdev_priv(dev
);
726 bp
->phylink_config
.dev
= &dev
->dev
;
727 bp
->phylink_config
.type
= PHYLINK_NETDEV
;
729 bp
->phylink
= phylink_create(&bp
->phylink_config
, bp
->pdev
->dev
.fwnode
,
730 bp
->phy_interface
, &macb_phylink_ops
);
731 if (IS_ERR(bp
->phylink
)) {
732 netdev_err(dev
, "Could not create a phylink instance (%ld)\n",
733 PTR_ERR(bp
->phylink
));
734 return PTR_ERR(bp
->phylink
);
740 static int macb_mdiobus_register(struct macb
*bp
)
742 struct device_node
*child
, *np
= bp
->pdev
->dev
.of_node
;
744 if (of_phy_is_fixed_link(np
))
745 return mdiobus_register(bp
->mii_bus
);
747 /* Only create the PHY from the device tree if at least one PHY is
748 * described. Otherwise scan the entire MDIO bus. We do this to support
749 * old device tree that did not follow the best practices and did not
750 * describe their network PHYs.
752 for_each_available_child_of_node(np
, child
)
753 if (of_mdiobus_child_is_phy(child
)) {
754 /* The loop increments the child refcount,
755 * decrement it before returning.
759 return of_mdiobus_register(bp
->mii_bus
, np
);
762 return mdiobus_register(bp
->mii_bus
);
765 static int macb_mii_init(struct macb
*bp
)
769 /* Enable management port */
770 macb_writel(bp
, NCR
, MACB_BIT(MPE
));
772 bp
->mii_bus
= mdiobus_alloc();
778 bp
->mii_bus
->name
= "MACB_mii_bus";
779 bp
->mii_bus
->read
= &macb_mdio_read
;
780 bp
->mii_bus
->write
= &macb_mdio_write
;
781 snprintf(bp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
782 bp
->pdev
->name
, bp
->pdev
->id
);
783 bp
->mii_bus
->priv
= bp
;
784 bp
->mii_bus
->parent
= &bp
->pdev
->dev
;
786 dev_set_drvdata(&bp
->dev
->dev
, bp
->mii_bus
);
788 err
= macb_mdiobus_register(bp
);
790 goto err_out_free_mdiobus
;
792 err
= macb_mii_probe(bp
->dev
);
794 goto err_out_unregister_bus
;
798 err_out_unregister_bus
:
799 mdiobus_unregister(bp
->mii_bus
);
800 err_out_free_mdiobus
:
801 mdiobus_free(bp
->mii_bus
);
806 static void macb_update_stats(struct macb
*bp
)
808 u32
*p
= &bp
->hw_stats
.macb
.rx_pause_frames
;
809 u32
*end
= &bp
->hw_stats
.macb
.tx_pause_frames
+ 1;
810 int offset
= MACB_PFR
;
812 WARN_ON((unsigned long)(end
- p
- 1) != (MACB_TPF
- MACB_PFR
) / 4);
814 for (; p
< end
; p
++, offset
+= 4)
815 *p
+= bp
->macb_reg_readl(bp
, offset
);
818 static int macb_halt_tx(struct macb
*bp
)
820 unsigned long halt_time
, timeout
;
823 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(THALT
));
825 timeout
= jiffies
+ usecs_to_jiffies(MACB_HALT_TIMEOUT
);
828 status
= macb_readl(bp
, TSR
);
829 if (!(status
& MACB_BIT(TGO
)))
833 } while (time_before(halt_time
, timeout
));
838 static void macb_tx_unmap(struct macb
*bp
, struct macb_tx_skb
*tx_skb
)
840 if (tx_skb
->mapping
) {
841 if (tx_skb
->mapped_as_page
)
842 dma_unmap_page(&bp
->pdev
->dev
, tx_skb
->mapping
,
843 tx_skb
->size
, DMA_TO_DEVICE
);
845 dma_unmap_single(&bp
->pdev
->dev
, tx_skb
->mapping
,
846 tx_skb
->size
, DMA_TO_DEVICE
);
851 dev_kfree_skb_any(tx_skb
->skb
);
856 static void macb_set_addr(struct macb
*bp
, struct macb_dma_desc
*desc
, dma_addr_t addr
)
858 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
859 struct macb_dma_desc_64
*desc_64
;
861 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
862 desc_64
= macb_64b_desc(bp
, desc
);
863 desc_64
->addrh
= upper_32_bits(addr
);
864 /* The low bits of RX address contain the RX_USED bit, clearing
865 * of which allows packet RX. Make sure the high bits are also
866 * visible to HW at that point.
871 desc
->addr
= lower_32_bits(addr
);
874 static dma_addr_t
macb_get_addr(struct macb
*bp
, struct macb_dma_desc
*desc
)
877 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
878 struct macb_dma_desc_64
*desc_64
;
880 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
881 desc_64
= macb_64b_desc(bp
, desc
);
882 addr
= ((u64
)(desc_64
->addrh
) << 32);
885 addr
|= MACB_BF(RX_WADDR
, MACB_BFEXT(RX_WADDR
, desc
->addr
));
889 static void macb_tx_error_task(struct work_struct
*work
)
891 struct macb_queue
*queue
= container_of(work
, struct macb_queue
,
893 struct macb
*bp
= queue
->bp
;
894 struct macb_tx_skb
*tx_skb
;
895 struct macb_dma_desc
*desc
;
900 netdev_vdbg(bp
->dev
, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
901 (unsigned int)(queue
- bp
->queues
),
902 queue
->tx_tail
, queue
->tx_head
);
904 /* Prevent the queue IRQ handlers from running: each of them may call
905 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
906 * As explained below, we have to halt the transmission before updating
907 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
908 * network engine about the macb/gem being halted.
910 spin_lock_irqsave(&bp
->lock
, flags
);
912 /* Make sure nobody is trying to queue up new packets */
913 netif_tx_stop_all_queues(bp
->dev
);
915 /* Stop transmission now
916 * (in case we have just queued new packets)
917 * macb/gem must be halted to write TBQP register
919 if (macb_halt_tx(bp
))
920 /* Just complain for now, reinitializing TX path can be good */
921 netdev_err(bp
->dev
, "BUG: halt tx timed out\n");
923 /* Treat frames in TX queue including the ones that caused the error.
924 * Free transmit buffers in upper layer.
926 for (tail
= queue
->tx_tail
; tail
!= queue
->tx_head
; tail
++) {
929 desc
= macb_tx_desc(queue
, tail
);
931 tx_skb
= macb_tx_skb(queue
, tail
);
934 if (ctrl
& MACB_BIT(TX_USED
)) {
935 /* skb is set for the last buffer of the frame */
937 macb_tx_unmap(bp
, tx_skb
);
939 tx_skb
= macb_tx_skb(queue
, tail
);
943 /* ctrl still refers to the first buffer descriptor
944 * since it's the only one written back by the hardware
946 if (!(ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))) {
947 netdev_vdbg(bp
->dev
, "txerr skb %u (data %p) TX complete\n",
948 macb_tx_ring_wrap(bp
, tail
),
950 bp
->dev
->stats
.tx_packets
++;
951 queue
->stats
.tx_packets
++;
952 bp
->dev
->stats
.tx_bytes
+= skb
->len
;
953 queue
->stats
.tx_bytes
+= skb
->len
;
956 /* "Buffers exhausted mid-frame" errors may only happen
957 * if the driver is buggy, so complain loudly about
958 * those. Statistics are updated by hardware.
960 if (ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))
962 "BUG: TX buffers exhausted mid-frame\n");
964 desc
->ctrl
= ctrl
| MACB_BIT(TX_USED
);
967 macb_tx_unmap(bp
, tx_skb
);
970 /* Set end of TX queue */
971 desc
= macb_tx_desc(queue
, 0);
972 macb_set_addr(bp
, desc
, 0);
973 desc
->ctrl
= MACB_BIT(TX_USED
);
975 /* Make descriptor updates visible to hardware */
978 /* Reinitialize the TX desc queue */
979 queue_writel(queue
, TBQP
, lower_32_bits(queue
->tx_ring_dma
));
980 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
981 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
982 queue_writel(queue
, TBQPH
, upper_32_bits(queue
->tx_ring_dma
));
984 /* Make TX ring reflect state of hardware */
988 /* Housework before enabling TX IRQ */
989 macb_writel(bp
, TSR
, macb_readl(bp
, TSR
));
990 queue_writel(queue
, IER
, MACB_TX_INT_FLAGS
);
992 /* Now we are ready to start transmission again */
993 netif_tx_start_all_queues(bp
->dev
);
994 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
996 spin_unlock_irqrestore(&bp
->lock
, flags
);
999 static void macb_tx_interrupt(struct macb_queue
*queue
)
1004 struct macb
*bp
= queue
->bp
;
1005 u16 queue_index
= queue
- bp
->queues
;
1007 status
= macb_readl(bp
, TSR
);
1008 macb_writel(bp
, TSR
, status
);
1010 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1011 queue_writel(queue
, ISR
, MACB_BIT(TCOMP
));
1013 netdev_vdbg(bp
->dev
, "macb_tx_interrupt status = 0x%03lx\n",
1014 (unsigned long)status
);
1016 head
= queue
->tx_head
;
1017 for (tail
= queue
->tx_tail
; tail
!= head
; tail
++) {
1018 struct macb_tx_skb
*tx_skb
;
1019 struct sk_buff
*skb
;
1020 struct macb_dma_desc
*desc
;
1023 desc
= macb_tx_desc(queue
, tail
);
1025 /* Make hw descriptor updates visible to CPU */
1030 /* TX_USED bit is only set by hardware on the very first buffer
1031 * descriptor of the transmitted frame.
1033 if (!(ctrl
& MACB_BIT(TX_USED
)))
1036 /* Process all buffers of the current transmitted frame */
1038 tx_skb
= macb_tx_skb(queue
, tail
);
1041 /* First, update TX stats if needed */
1043 if (unlikely(skb_shinfo(skb
)->tx_flags
&
1045 gem_ptp_do_txstamp(queue
, skb
, desc
) == 0) {
1046 /* skb now belongs to timestamp buffer
1047 * and will be removed later
1051 netdev_vdbg(bp
->dev
, "skb %u (data %p) TX complete\n",
1052 macb_tx_ring_wrap(bp
, tail
),
1054 bp
->dev
->stats
.tx_packets
++;
1055 queue
->stats
.tx_packets
++;
1056 bp
->dev
->stats
.tx_bytes
+= skb
->len
;
1057 queue
->stats
.tx_bytes
+= skb
->len
;
1060 /* Now we can safely release resources */
1061 macb_tx_unmap(bp
, tx_skb
);
1063 /* skb is set only for the last buffer of the frame.
1064 * WARNING: at this point skb has been freed by
1072 queue
->tx_tail
= tail
;
1073 if (__netif_subqueue_stopped(bp
->dev
, queue_index
) &&
1074 CIRC_CNT(queue
->tx_head
, queue
->tx_tail
,
1075 bp
->tx_ring_size
) <= MACB_TX_WAKEUP_THRESH(bp
))
1076 netif_wake_subqueue(bp
->dev
, queue_index
);
1079 static void gem_rx_refill(struct macb_queue
*queue
)
1082 struct sk_buff
*skb
;
1084 struct macb
*bp
= queue
->bp
;
1085 struct macb_dma_desc
*desc
;
1087 while (CIRC_SPACE(queue
->rx_prepared_head
, queue
->rx_tail
,
1088 bp
->rx_ring_size
) > 0) {
1089 entry
= macb_rx_ring_wrap(bp
, queue
->rx_prepared_head
);
1091 /* Make hw descriptor updates visible to CPU */
1094 queue
->rx_prepared_head
++;
1095 desc
= macb_rx_desc(queue
, entry
);
1097 if (!queue
->rx_skbuff
[entry
]) {
1098 /* allocate sk_buff for this free entry in ring */
1099 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buffer_size
);
1100 if (unlikely(!skb
)) {
1102 "Unable to allocate sk_buff\n");
1106 /* now fill corresponding descriptor entry */
1107 paddr
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
1110 if (dma_mapping_error(&bp
->pdev
->dev
, paddr
)) {
1115 queue
->rx_skbuff
[entry
] = skb
;
1117 if (entry
== bp
->rx_ring_size
- 1)
1118 paddr
|= MACB_BIT(RX_WRAP
);
1120 /* Setting addr clears RX_USED and allows reception,
1121 * make sure ctrl is cleared first to avoid a race.
1124 macb_set_addr(bp
, desc
, paddr
);
1126 /* properly align Ethernet header */
1127 skb_reserve(skb
, NET_IP_ALIGN
);
1131 desc
->addr
&= ~MACB_BIT(RX_USED
);
1135 /* Make descriptor updates visible to hardware */
1138 netdev_vdbg(bp
->dev
, "rx ring: queue: %p, prepared head %d, tail %d\n",
1139 queue
, queue
->rx_prepared_head
, queue
->rx_tail
);
1142 /* Mark DMA descriptors from begin up to and not including end as unused */
1143 static void discard_partial_frame(struct macb_queue
*queue
, unsigned int begin
,
1148 for (frag
= begin
; frag
!= end
; frag
++) {
1149 struct macb_dma_desc
*desc
= macb_rx_desc(queue
, frag
);
1151 desc
->addr
&= ~MACB_BIT(RX_USED
);
1154 /* Make descriptor updates visible to hardware */
1157 /* When this happens, the hardware stats registers for
1158 * whatever caused this is updated, so we don't have to record
1163 static int gem_rx(struct macb_queue
*queue
, struct napi_struct
*napi
,
1166 struct macb
*bp
= queue
->bp
;
1169 struct sk_buff
*skb
;
1170 struct macb_dma_desc
*desc
;
1173 while (count
< budget
) {
1178 entry
= macb_rx_ring_wrap(bp
, queue
->rx_tail
);
1179 desc
= macb_rx_desc(queue
, entry
);
1181 /* Make hw descriptor updates visible to CPU */
1184 rxused
= (desc
->addr
& MACB_BIT(RX_USED
)) ? true : false;
1185 addr
= macb_get_addr(bp
, desc
);
1190 /* Ensure ctrl is at least as up-to-date as rxused */
1198 if (!(ctrl
& MACB_BIT(RX_SOF
) && ctrl
& MACB_BIT(RX_EOF
))) {
1200 "not whole frame pointed by descriptor\n");
1201 bp
->dev
->stats
.rx_dropped
++;
1202 queue
->stats
.rx_dropped
++;
1205 skb
= queue
->rx_skbuff
[entry
];
1206 if (unlikely(!skb
)) {
1208 "inconsistent Rx descriptor chain\n");
1209 bp
->dev
->stats
.rx_dropped
++;
1210 queue
->stats
.rx_dropped
++;
1213 /* now everything is ready for receiving packet */
1214 queue
->rx_skbuff
[entry
] = NULL
;
1215 len
= ctrl
& bp
->rx_frm_len_mask
;
1217 netdev_vdbg(bp
->dev
, "gem_rx %u (len %u)\n", entry
, len
);
1220 dma_unmap_single(&bp
->pdev
->dev
, addr
,
1221 bp
->rx_buffer_size
, DMA_FROM_DEVICE
);
1223 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1224 skb_checksum_none_assert(skb
);
1225 if (bp
->dev
->features
& NETIF_F_RXCSUM
&&
1226 !(bp
->dev
->flags
& IFF_PROMISC
) &&
1227 GEM_BFEXT(RX_CSUM
, ctrl
) & GEM_RX_CSUM_CHECKED_MASK
)
1228 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1230 bp
->dev
->stats
.rx_packets
++;
1231 queue
->stats
.rx_packets
++;
1232 bp
->dev
->stats
.rx_bytes
+= skb
->len
;
1233 queue
->stats
.rx_bytes
+= skb
->len
;
1235 gem_ptp_do_rxstamp(bp
, skb
, desc
);
1237 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1238 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
1239 skb
->len
, skb
->csum
);
1240 print_hex_dump(KERN_DEBUG
, " mac: ", DUMP_PREFIX_ADDRESS
, 16, 1,
1241 skb_mac_header(skb
), 16, true);
1242 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_ADDRESS
, 16, 1,
1243 skb
->data
, 32, true);
1246 napi_gro_receive(napi
, skb
);
1249 gem_rx_refill(queue
);
1254 static int macb_rx_frame(struct macb_queue
*queue
, struct napi_struct
*napi
,
1255 unsigned int first_frag
, unsigned int last_frag
)
1259 unsigned int offset
;
1260 struct sk_buff
*skb
;
1261 struct macb_dma_desc
*desc
;
1262 struct macb
*bp
= queue
->bp
;
1264 desc
= macb_rx_desc(queue
, last_frag
);
1265 len
= desc
->ctrl
& bp
->rx_frm_len_mask
;
1267 netdev_vdbg(bp
->dev
, "macb_rx_frame frags %u - %u (len %u)\n",
1268 macb_rx_ring_wrap(bp
, first_frag
),
1269 macb_rx_ring_wrap(bp
, last_frag
), len
);
1271 /* The ethernet header starts NET_IP_ALIGN bytes into the
1272 * first buffer. Since the header is 14 bytes, this makes the
1273 * payload word-aligned.
1275 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1276 * the two padding bytes into the skb so that we avoid hitting
1277 * the slowpath in memcpy(), and pull them off afterwards.
1279 skb
= netdev_alloc_skb(bp
->dev
, len
+ NET_IP_ALIGN
);
1281 bp
->dev
->stats
.rx_dropped
++;
1282 for (frag
= first_frag
; ; frag
++) {
1283 desc
= macb_rx_desc(queue
, frag
);
1284 desc
->addr
&= ~MACB_BIT(RX_USED
);
1285 if (frag
== last_frag
)
1289 /* Make descriptor updates visible to hardware */
1296 len
+= NET_IP_ALIGN
;
1297 skb_checksum_none_assert(skb
);
1300 for (frag
= first_frag
; ; frag
++) {
1301 unsigned int frag_len
= bp
->rx_buffer_size
;
1303 if (offset
+ frag_len
> len
) {
1304 if (unlikely(frag
!= last_frag
)) {
1305 dev_kfree_skb_any(skb
);
1308 frag_len
= len
- offset
;
1310 skb_copy_to_linear_data_offset(skb
, offset
,
1311 macb_rx_buffer(queue
, frag
),
1313 offset
+= bp
->rx_buffer_size
;
1314 desc
= macb_rx_desc(queue
, frag
);
1315 desc
->addr
&= ~MACB_BIT(RX_USED
);
1317 if (frag
== last_frag
)
1321 /* Make descriptor updates visible to hardware */
1324 __skb_pull(skb
, NET_IP_ALIGN
);
1325 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1327 bp
->dev
->stats
.rx_packets
++;
1328 bp
->dev
->stats
.rx_bytes
+= skb
->len
;
1329 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
1330 skb
->len
, skb
->csum
);
1331 napi_gro_receive(napi
, skb
);
1336 static inline void macb_init_rx_ring(struct macb_queue
*queue
)
1338 struct macb
*bp
= queue
->bp
;
1340 struct macb_dma_desc
*desc
= NULL
;
1343 addr
= queue
->rx_buffers_dma
;
1344 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
1345 desc
= macb_rx_desc(queue
, i
);
1346 macb_set_addr(bp
, desc
, addr
);
1348 addr
+= bp
->rx_buffer_size
;
1350 desc
->addr
|= MACB_BIT(RX_WRAP
);
1354 static int macb_rx(struct macb_queue
*queue
, struct napi_struct
*napi
,
1357 struct macb
*bp
= queue
->bp
;
1358 bool reset_rx_queue
= false;
1361 int first_frag
= -1;
1363 for (tail
= queue
->rx_tail
; budget
> 0; tail
++) {
1364 struct macb_dma_desc
*desc
= macb_rx_desc(queue
, tail
);
1367 /* Make hw descriptor updates visible to CPU */
1370 if (!(desc
->addr
& MACB_BIT(RX_USED
)))
1373 /* Ensure ctrl is at least as up-to-date as addr */
1378 if (ctrl
& MACB_BIT(RX_SOF
)) {
1379 if (first_frag
!= -1)
1380 discard_partial_frame(queue
, first_frag
, tail
);
1384 if (ctrl
& MACB_BIT(RX_EOF
)) {
1387 if (unlikely(first_frag
== -1)) {
1388 reset_rx_queue
= true;
1392 dropped
= macb_rx_frame(queue
, napi
, first_frag
, tail
);
1394 if (unlikely(dropped
< 0)) {
1395 reset_rx_queue
= true;
1405 if (unlikely(reset_rx_queue
)) {
1406 unsigned long flags
;
1409 netdev_err(bp
->dev
, "RX queue corruption: reset it\n");
1411 spin_lock_irqsave(&bp
->lock
, flags
);
1413 ctrl
= macb_readl(bp
, NCR
);
1414 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1416 macb_init_rx_ring(queue
);
1417 queue_writel(queue
, RBQP
, queue
->rx_ring_dma
);
1419 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1421 spin_unlock_irqrestore(&bp
->lock
, flags
);
1425 if (first_frag
!= -1)
1426 queue
->rx_tail
= first_frag
;
1428 queue
->rx_tail
= tail
;
1433 static int macb_poll(struct napi_struct
*napi
, int budget
)
1435 struct macb_queue
*queue
= container_of(napi
, struct macb_queue
, napi
);
1436 struct macb
*bp
= queue
->bp
;
1440 status
= macb_readl(bp
, RSR
);
1441 macb_writel(bp
, RSR
, status
);
1443 netdev_vdbg(bp
->dev
, "poll: status = %08lx, budget = %d\n",
1444 (unsigned long)status
, budget
);
1446 work_done
= bp
->macbgem_ops
.mog_rx(queue
, napi
, budget
);
1447 if (work_done
< budget
) {
1448 napi_complete_done(napi
, work_done
);
1450 /* Packets received while interrupts were disabled */
1451 status
= macb_readl(bp
, RSR
);
1453 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1454 queue_writel(queue
, ISR
, MACB_BIT(RCOMP
));
1455 napi_reschedule(napi
);
1457 queue_writel(queue
, IER
, bp
->rx_intr_mask
);
1461 /* TODO: Handle errors */
1466 static void macb_hresp_error_task(unsigned long data
)
1468 struct macb
*bp
= (struct macb
*)data
;
1469 struct net_device
*dev
= bp
->dev
;
1470 struct macb_queue
*queue
= bp
->queues
;
1474 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1475 queue_writel(queue
, IDR
, bp
->rx_intr_mask
|
1479 ctrl
= macb_readl(bp
, NCR
);
1480 ctrl
&= ~(MACB_BIT(RE
) | MACB_BIT(TE
));
1481 macb_writel(bp
, NCR
, ctrl
);
1483 netif_tx_stop_all_queues(dev
);
1484 netif_carrier_off(dev
);
1486 bp
->macbgem_ops
.mog_init_rings(bp
);
1488 /* Initialize TX and RX buffers */
1489 macb_init_buffers(bp
);
1491 /* Enable interrupts */
1492 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
1493 queue_writel(queue
, IER
,
1498 ctrl
|= MACB_BIT(RE
) | MACB_BIT(TE
);
1499 macb_writel(bp
, NCR
, ctrl
);
1501 netif_carrier_on(dev
);
1502 netif_tx_start_all_queues(dev
);
1505 static void macb_tx_restart(struct macb_queue
*queue
)
1507 unsigned int head
= queue
->tx_head
;
1508 unsigned int tail
= queue
->tx_tail
;
1509 struct macb
*bp
= queue
->bp
;
1511 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1512 queue_writel(queue
, ISR
, MACB_BIT(TXUBR
));
1517 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
1520 static irqreturn_t
macb_interrupt(int irq
, void *dev_id
)
1522 struct macb_queue
*queue
= dev_id
;
1523 struct macb
*bp
= queue
->bp
;
1524 struct net_device
*dev
= bp
->dev
;
1527 status
= queue_readl(queue
, ISR
);
1529 if (unlikely(!status
))
1532 spin_lock(&bp
->lock
);
1535 /* close possible race with dev_close */
1536 if (unlikely(!netif_running(dev
))) {
1537 queue_writel(queue
, IDR
, -1);
1538 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1539 queue_writel(queue
, ISR
, -1);
1543 netdev_vdbg(bp
->dev
, "queue = %u, isr = 0x%08lx\n",
1544 (unsigned int)(queue
- bp
->queues
),
1545 (unsigned long)status
);
1547 if (status
& bp
->rx_intr_mask
) {
1548 /* There's no point taking any more interrupts
1549 * until we have processed the buffers. The
1550 * scheduling call may fail if the poll routine
1551 * is already scheduled, so disable interrupts
1554 queue_writel(queue
, IDR
, bp
->rx_intr_mask
);
1555 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1556 queue_writel(queue
, ISR
, MACB_BIT(RCOMP
));
1558 if (napi_schedule_prep(&queue
->napi
)) {
1559 netdev_vdbg(bp
->dev
, "scheduling RX softirq\n");
1560 __napi_schedule(&queue
->napi
);
1564 if (unlikely(status
& (MACB_TX_ERR_FLAGS
))) {
1565 queue_writel(queue
, IDR
, MACB_TX_INT_FLAGS
);
1566 schedule_work(&queue
->tx_error_task
);
1568 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1569 queue_writel(queue
, ISR
, MACB_TX_ERR_FLAGS
);
1574 if (status
& MACB_BIT(TCOMP
))
1575 macb_tx_interrupt(queue
);
1577 if (status
& MACB_BIT(TXUBR
))
1578 macb_tx_restart(queue
);
1580 /* Link change detection isn't possible with RMII, so we'll
1581 * add that if/when we get our hands on a full-blown MII PHY.
1584 /* There is a hardware issue under heavy load where DMA can
1585 * stop, this causes endless "used buffer descriptor read"
1586 * interrupts but it can be cleared by re-enabling RX. See
1587 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
1588 * section 16.7.4 for details. RXUBR is only enabled for
1589 * these two versions.
1591 if (status
& MACB_BIT(RXUBR
)) {
1592 ctrl
= macb_readl(bp
, NCR
);
1593 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1595 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1597 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1598 queue_writel(queue
, ISR
, MACB_BIT(RXUBR
));
1601 if (status
& MACB_BIT(ISR_ROVR
)) {
1602 /* We missed at least one packet */
1603 if (macb_is_gem(bp
))
1604 bp
->hw_stats
.gem
.rx_overruns
++;
1606 bp
->hw_stats
.macb
.rx_overruns
++;
1608 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1609 queue_writel(queue
, ISR
, MACB_BIT(ISR_ROVR
));
1612 if (status
& MACB_BIT(HRESP
)) {
1613 tasklet_schedule(&bp
->hresp_err_tasklet
);
1614 netdev_err(dev
, "DMA bus error: HRESP not OK\n");
1616 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1617 queue_writel(queue
, ISR
, MACB_BIT(HRESP
));
1619 status
= queue_readl(queue
, ISR
);
1622 spin_unlock(&bp
->lock
);
1627 #ifdef CONFIG_NET_POLL_CONTROLLER
1628 /* Polling receive - used by netconsole and other diagnostic tools
1629 * to allow network i/o with interrupts disabled.
1631 static void macb_poll_controller(struct net_device
*dev
)
1633 struct macb
*bp
= netdev_priv(dev
);
1634 struct macb_queue
*queue
;
1635 unsigned long flags
;
1638 local_irq_save(flags
);
1639 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
1640 macb_interrupt(dev
->irq
, queue
);
1641 local_irq_restore(flags
);
1645 static unsigned int macb_tx_map(struct macb
*bp
,
1646 struct macb_queue
*queue
,
1647 struct sk_buff
*skb
,
1648 unsigned int hdrlen
)
1651 unsigned int len
, entry
, i
, tx_head
= queue
->tx_head
;
1652 struct macb_tx_skb
*tx_skb
= NULL
;
1653 struct macb_dma_desc
*desc
;
1654 unsigned int offset
, size
, count
= 0;
1655 unsigned int f
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
1656 unsigned int eof
= 1, mss_mfs
= 0;
1657 u32 ctrl
, lso_ctrl
= 0, seq_ctrl
= 0;
1660 if (skb_shinfo(skb
)->gso_size
!= 0) {
1661 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
1663 lso_ctrl
= MACB_LSO_UFO_ENABLE
;
1666 lso_ctrl
= MACB_LSO_TSO_ENABLE
;
1669 /* First, map non-paged data */
1670 len
= skb_headlen(skb
);
1672 /* first buffer length */
1677 entry
= macb_tx_ring_wrap(bp
, tx_head
);
1678 tx_skb
= &queue
->tx_skb
[entry
];
1680 mapping
= dma_map_single(&bp
->pdev
->dev
,
1682 size
, DMA_TO_DEVICE
);
1683 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1686 /* Save info to properly release resources */
1688 tx_skb
->mapping
= mapping
;
1689 tx_skb
->size
= size
;
1690 tx_skb
->mapped_as_page
= false;
1697 size
= min(len
, bp
->max_tx_length
);
1700 /* Then, map paged data from fragments */
1701 for (f
= 0; f
< nr_frags
; f
++) {
1702 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1704 len
= skb_frag_size(frag
);
1707 size
= min(len
, bp
->max_tx_length
);
1708 entry
= macb_tx_ring_wrap(bp
, tx_head
);
1709 tx_skb
= &queue
->tx_skb
[entry
];
1711 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
,
1712 offset
, size
, DMA_TO_DEVICE
);
1713 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1716 /* Save info to properly release resources */
1718 tx_skb
->mapping
= mapping
;
1719 tx_skb
->size
= size
;
1720 tx_skb
->mapped_as_page
= true;
1729 /* Should never happen */
1730 if (unlikely(!tx_skb
)) {
1731 netdev_err(bp
->dev
, "BUG! empty skb!\n");
1735 /* This is the last buffer of the frame: save socket buffer */
1738 /* Update TX ring: update buffer descriptors in reverse order
1739 * to avoid race condition
1742 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1743 * to set the end of TX queue
1746 entry
= macb_tx_ring_wrap(bp
, i
);
1747 ctrl
= MACB_BIT(TX_USED
);
1748 desc
= macb_tx_desc(queue
, entry
);
1752 if (lso_ctrl
== MACB_LSO_UFO_ENABLE
)
1753 /* include header and FCS in value given to h/w */
1754 mss_mfs
= skb_shinfo(skb
)->gso_size
+
1755 skb_transport_offset(skb
) +
1758 mss_mfs
= skb_shinfo(skb
)->gso_size
;
1759 /* TCP Sequence Number Source Select
1760 * can be set only for TSO
1768 entry
= macb_tx_ring_wrap(bp
, i
);
1769 tx_skb
= &queue
->tx_skb
[entry
];
1770 desc
= macb_tx_desc(queue
, entry
);
1772 ctrl
= (u32
)tx_skb
->size
;
1774 ctrl
|= MACB_BIT(TX_LAST
);
1777 if (unlikely(entry
== (bp
->tx_ring_size
- 1)))
1778 ctrl
|= MACB_BIT(TX_WRAP
);
1780 /* First descriptor is header descriptor */
1781 if (i
== queue
->tx_head
) {
1782 ctrl
|= MACB_BF(TX_LSO
, lso_ctrl
);
1783 ctrl
|= MACB_BF(TX_TCP_SEQ_SRC
, seq_ctrl
);
1784 if ((bp
->dev
->features
& NETIF_F_HW_CSUM
) &&
1785 skb
->ip_summed
!= CHECKSUM_PARTIAL
&& !lso_ctrl
)
1786 ctrl
|= MACB_BIT(TX_NOCRC
);
1788 /* Only set MSS/MFS on payload descriptors
1789 * (second or later descriptor)
1791 ctrl
|= MACB_BF(MSS_MFS
, mss_mfs
);
1793 /* Set TX buffer descriptor */
1794 macb_set_addr(bp
, desc
, tx_skb
->mapping
);
1795 /* desc->addr must be visible to hardware before clearing
1796 * 'TX_USED' bit in desc->ctrl.
1800 } while (i
!= queue
->tx_head
);
1802 queue
->tx_head
= tx_head
;
1807 netdev_err(bp
->dev
, "TX DMA map failed\n");
1809 for (i
= queue
->tx_head
; i
!= tx_head
; i
++) {
1810 tx_skb
= macb_tx_skb(queue
, i
);
1812 macb_tx_unmap(bp
, tx_skb
);
1818 static netdev_features_t
macb_features_check(struct sk_buff
*skb
,
1819 struct net_device
*dev
,
1820 netdev_features_t features
)
1822 unsigned int nr_frags
, f
;
1823 unsigned int hdrlen
;
1825 /* Validate LSO compatibility */
1827 /* there is only one buffer or protocol is not UDP */
1828 if (!skb_is_nonlinear(skb
) || (ip_hdr(skb
)->protocol
!= IPPROTO_UDP
))
1831 /* length of header */
1832 hdrlen
= skb_transport_offset(skb
);
1835 * When software supplies two or more payload buffers all payload buffers
1836 * apart from the last must be a multiple of 8 bytes in size.
1838 if (!IS_ALIGNED(skb_headlen(skb
) - hdrlen
, MACB_TX_LEN_ALIGN
))
1839 return features
& ~MACB_NETIF_LSO
;
1841 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1842 /* No need to check last fragment */
1844 for (f
= 0; f
< nr_frags
; f
++) {
1845 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1847 if (!IS_ALIGNED(skb_frag_size(frag
), MACB_TX_LEN_ALIGN
))
1848 return features
& ~MACB_NETIF_LSO
;
1853 static inline int macb_clear_csum(struct sk_buff
*skb
)
1855 /* no change for packets without checksum offloading */
1856 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1859 /* make sure we can modify the header */
1860 if (unlikely(skb_cow_head(skb
, 0)))
1863 /* initialize checksum field
1864 * This is required - at least for Zynq, which otherwise calculates
1865 * wrong UDP header checksums for UDP packets with UDP data len <=2
1867 *(__sum16
*)(skb_checksum_start(skb
) + skb
->csum_offset
) = 0;
1871 static int macb_pad_and_fcs(struct sk_buff
**skb
, struct net_device
*ndev
)
1873 bool cloned
= skb_cloned(*skb
) || skb_header_cloned(*skb
);
1874 int padlen
= ETH_ZLEN
- (*skb
)->len
;
1875 int headroom
= skb_headroom(*skb
);
1876 int tailroom
= skb_tailroom(*skb
);
1877 struct sk_buff
*nskb
;
1880 if (!(ndev
->features
& NETIF_F_HW_CSUM
) ||
1881 !((*skb
)->ip_summed
!= CHECKSUM_PARTIAL
) ||
1882 skb_shinfo(*skb
)->gso_size
) /* Not available for GSO */
1886 /* FCS could be appeded to tailroom. */
1887 if (tailroom
>= ETH_FCS_LEN
)
1889 /* FCS could be appeded by moving data to headroom. */
1890 else if (!cloned
&& headroom
+ tailroom
>= ETH_FCS_LEN
)
1892 /* No room for FCS, need to reallocate skb. */
1894 padlen
= ETH_FCS_LEN
;
1896 /* Add room for FCS. */
1897 padlen
+= ETH_FCS_LEN
;
1900 if (!cloned
&& headroom
+ tailroom
>= padlen
) {
1901 (*skb
)->data
= memmove((*skb
)->head
, (*skb
)->data
, (*skb
)->len
);
1902 skb_set_tail_pointer(*skb
, (*skb
)->len
);
1904 nskb
= skb_copy_expand(*skb
, 0, padlen
, GFP_ATOMIC
);
1908 dev_consume_skb_any(*skb
);
1912 if (padlen
> ETH_FCS_LEN
)
1913 skb_put_zero(*skb
, padlen
- ETH_FCS_LEN
);
1916 /* set FCS to packet */
1917 fcs
= crc32_le(~0, (*skb
)->data
, (*skb
)->len
);
1920 skb_put_u8(*skb
, fcs
& 0xff);
1921 skb_put_u8(*skb
, (fcs
>> 8) & 0xff);
1922 skb_put_u8(*skb
, (fcs
>> 16) & 0xff);
1923 skb_put_u8(*skb
, (fcs
>> 24) & 0xff);
1928 static netdev_tx_t
macb_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1930 u16 queue_index
= skb_get_queue_mapping(skb
);
1931 struct macb
*bp
= netdev_priv(dev
);
1932 struct macb_queue
*queue
= &bp
->queues
[queue_index
];
1933 unsigned long flags
;
1934 unsigned int desc_cnt
, nr_frags
, frag_size
, f
;
1935 unsigned int hdrlen
;
1936 bool is_lso
, is_udp
= 0;
1937 netdev_tx_t ret
= NETDEV_TX_OK
;
1939 if (macb_clear_csum(skb
)) {
1940 dev_kfree_skb_any(skb
);
1944 if (macb_pad_and_fcs(&skb
, dev
)) {
1945 dev_kfree_skb_any(skb
);
1949 is_lso
= (skb_shinfo(skb
)->gso_size
!= 0);
1952 is_udp
= !!(ip_hdr(skb
)->protocol
== IPPROTO_UDP
);
1954 /* length of headers */
1956 /* only queue eth + ip headers separately for UDP */
1957 hdrlen
= skb_transport_offset(skb
);
1959 hdrlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1960 if (skb_headlen(skb
) < hdrlen
) {
1961 netdev_err(bp
->dev
, "Error - LSO headers fragmented!!!\n");
1962 /* if this is required, would need to copy to single buffer */
1963 return NETDEV_TX_BUSY
;
1966 hdrlen
= min(skb_headlen(skb
), bp
->max_tx_length
);
1968 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1969 netdev_vdbg(bp
->dev
,
1970 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1971 queue_index
, skb
->len
, skb
->head
, skb
->data
,
1972 skb_tail_pointer(skb
), skb_end_pointer(skb
));
1973 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_OFFSET
, 16, 1,
1974 skb
->data
, 16, true);
1977 /* Count how many TX buffer descriptors are needed to send this
1978 * socket buffer: skb fragments of jumbo frames may need to be
1979 * split into many buffer descriptors.
1981 if (is_lso
&& (skb_headlen(skb
) > hdrlen
))
1982 /* extra header descriptor if also payload in first buffer */
1983 desc_cnt
= DIV_ROUND_UP((skb_headlen(skb
) - hdrlen
), bp
->max_tx_length
) + 1;
1985 desc_cnt
= DIV_ROUND_UP(skb_headlen(skb
), bp
->max_tx_length
);
1986 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1987 for (f
= 0; f
< nr_frags
; f
++) {
1988 frag_size
= skb_frag_size(&skb_shinfo(skb
)->frags
[f
]);
1989 desc_cnt
+= DIV_ROUND_UP(frag_size
, bp
->max_tx_length
);
1992 spin_lock_irqsave(&bp
->lock
, flags
);
1994 /* This is a hard error, log it. */
1995 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
,
1996 bp
->tx_ring_size
) < desc_cnt
) {
1997 netif_stop_subqueue(dev
, queue_index
);
1998 spin_unlock_irqrestore(&bp
->lock
, flags
);
1999 netdev_dbg(bp
->dev
, "tx_head = %u, tx_tail = %u\n",
2000 queue
->tx_head
, queue
->tx_tail
);
2001 return NETDEV_TX_BUSY
;
2004 /* Map socket buffer for DMA transfer */
2005 if (!macb_tx_map(bp
, queue
, skb
, hdrlen
)) {
2006 dev_kfree_skb_any(skb
);
2010 /* Make newly initialized descriptor visible to hardware */
2012 skb_tx_timestamp(skb
);
2014 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
2016 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
, bp
->tx_ring_size
) < 1)
2017 netif_stop_subqueue(dev
, queue_index
);
2020 spin_unlock_irqrestore(&bp
->lock
, flags
);
2025 static void macb_init_rx_buffer_size(struct macb
*bp
, size_t size
)
2027 if (!macb_is_gem(bp
)) {
2028 bp
->rx_buffer_size
= MACB_RX_BUFFER_SIZE
;
2030 bp
->rx_buffer_size
= size
;
2032 if (bp
->rx_buffer_size
% RX_BUFFER_MULTIPLE
) {
2034 "RX buffer must be multiple of %d bytes, expanding\n",
2035 RX_BUFFER_MULTIPLE
);
2036 bp
->rx_buffer_size
=
2037 roundup(bp
->rx_buffer_size
, RX_BUFFER_MULTIPLE
);
2041 netdev_dbg(bp
->dev
, "mtu [%u] rx_buffer_size [%zu]\n",
2042 bp
->dev
->mtu
, bp
->rx_buffer_size
);
2045 static void gem_free_rx_buffers(struct macb
*bp
)
2047 struct sk_buff
*skb
;
2048 struct macb_dma_desc
*desc
;
2049 struct macb_queue
*queue
;
2054 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2055 if (!queue
->rx_skbuff
)
2058 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
2059 skb
= queue
->rx_skbuff
[i
];
2064 desc
= macb_rx_desc(queue
, i
);
2065 addr
= macb_get_addr(bp
, desc
);
2067 dma_unmap_single(&bp
->pdev
->dev
, addr
, bp
->rx_buffer_size
,
2069 dev_kfree_skb_any(skb
);
2073 kfree(queue
->rx_skbuff
);
2074 queue
->rx_skbuff
= NULL
;
2078 static void macb_free_rx_buffers(struct macb
*bp
)
2080 struct macb_queue
*queue
= &bp
->queues
[0];
2082 if (queue
->rx_buffers
) {
2083 dma_free_coherent(&bp
->pdev
->dev
,
2084 bp
->rx_ring_size
* bp
->rx_buffer_size
,
2085 queue
->rx_buffers
, queue
->rx_buffers_dma
);
2086 queue
->rx_buffers
= NULL
;
2090 static void macb_free_consistent(struct macb
*bp
)
2092 struct macb_queue
*queue
;
2096 bp
->macbgem_ops
.mog_free_rx_buffers(bp
);
2098 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2099 kfree(queue
->tx_skb
);
2100 queue
->tx_skb
= NULL
;
2101 if (queue
->tx_ring
) {
2102 size
= TX_RING_BYTES(bp
) + bp
->tx_bd_rd_prefetch
;
2103 dma_free_coherent(&bp
->pdev
->dev
, size
,
2104 queue
->tx_ring
, queue
->tx_ring_dma
);
2105 queue
->tx_ring
= NULL
;
2107 if (queue
->rx_ring
) {
2108 size
= RX_RING_BYTES(bp
) + bp
->rx_bd_rd_prefetch
;
2109 dma_free_coherent(&bp
->pdev
->dev
, size
,
2110 queue
->rx_ring
, queue
->rx_ring_dma
);
2111 queue
->rx_ring
= NULL
;
2116 static int gem_alloc_rx_buffers(struct macb
*bp
)
2118 struct macb_queue
*queue
;
2122 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2123 size
= bp
->rx_ring_size
* sizeof(struct sk_buff
*);
2124 queue
->rx_skbuff
= kzalloc(size
, GFP_KERNEL
);
2125 if (!queue
->rx_skbuff
)
2129 "Allocated %d RX struct sk_buff entries at %p\n",
2130 bp
->rx_ring_size
, queue
->rx_skbuff
);
2135 static int macb_alloc_rx_buffers(struct macb
*bp
)
2137 struct macb_queue
*queue
= &bp
->queues
[0];
2140 size
= bp
->rx_ring_size
* bp
->rx_buffer_size
;
2141 queue
->rx_buffers
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
2142 &queue
->rx_buffers_dma
, GFP_KERNEL
);
2143 if (!queue
->rx_buffers
)
2147 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
2148 size
, (unsigned long)queue
->rx_buffers_dma
, queue
->rx_buffers
);
2152 static int macb_alloc_consistent(struct macb
*bp
)
2154 struct macb_queue
*queue
;
2158 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2159 size
= TX_RING_BYTES(bp
) + bp
->tx_bd_rd_prefetch
;
2160 queue
->tx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
2161 &queue
->tx_ring_dma
,
2163 if (!queue
->tx_ring
)
2166 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2167 q
, size
, (unsigned long)queue
->tx_ring_dma
,
2170 size
= bp
->tx_ring_size
* sizeof(struct macb_tx_skb
);
2171 queue
->tx_skb
= kmalloc(size
, GFP_KERNEL
);
2175 size
= RX_RING_BYTES(bp
) + bp
->rx_bd_rd_prefetch
;
2176 queue
->rx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
2177 &queue
->rx_ring_dma
, GFP_KERNEL
);
2178 if (!queue
->rx_ring
)
2181 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2182 size
, (unsigned long)queue
->rx_ring_dma
, queue
->rx_ring
);
2184 if (bp
->macbgem_ops
.mog_alloc_rx_buffers(bp
))
2190 macb_free_consistent(bp
);
2194 static void gem_init_rings(struct macb
*bp
)
2196 struct macb_queue
*queue
;
2197 struct macb_dma_desc
*desc
= NULL
;
2201 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2202 for (i
= 0; i
< bp
->tx_ring_size
; i
++) {
2203 desc
= macb_tx_desc(queue
, i
);
2204 macb_set_addr(bp
, desc
, 0);
2205 desc
->ctrl
= MACB_BIT(TX_USED
);
2207 desc
->ctrl
|= MACB_BIT(TX_WRAP
);
2212 queue
->rx_prepared_head
= 0;
2214 gem_rx_refill(queue
);
2219 static void macb_init_rings(struct macb
*bp
)
2222 struct macb_dma_desc
*desc
= NULL
;
2224 macb_init_rx_ring(&bp
->queues
[0]);
2226 for (i
= 0; i
< bp
->tx_ring_size
; i
++) {
2227 desc
= macb_tx_desc(&bp
->queues
[0], i
);
2228 macb_set_addr(bp
, desc
, 0);
2229 desc
->ctrl
= MACB_BIT(TX_USED
);
2231 bp
->queues
[0].tx_head
= 0;
2232 bp
->queues
[0].tx_tail
= 0;
2233 desc
->ctrl
|= MACB_BIT(TX_WRAP
);
2236 static void macb_reset_hw(struct macb
*bp
)
2238 struct macb_queue
*queue
;
2240 u32 ctrl
= macb_readl(bp
, NCR
);
2242 /* Disable RX and TX (XXX: Should we halt the transmission
2245 ctrl
&= ~(MACB_BIT(RE
) | MACB_BIT(TE
));
2247 /* Clear the stats registers (XXX: Update stats first?) */
2248 ctrl
|= MACB_BIT(CLRSTAT
);
2250 macb_writel(bp
, NCR
, ctrl
);
2252 /* Clear all status flags */
2253 macb_writel(bp
, TSR
, -1);
2254 macb_writel(bp
, RSR
, -1);
2256 /* Disable all interrupts */
2257 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2258 queue_writel(queue
, IDR
, -1);
2259 queue_readl(queue
, ISR
);
2260 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
2261 queue_writel(queue
, ISR
, -1);
2265 static u32
gem_mdc_clk_div(struct macb
*bp
)
2268 unsigned long pclk_hz
= clk_get_rate(bp
->pclk
);
2270 if (pclk_hz
<= 20000000)
2271 config
= GEM_BF(CLK
, GEM_CLK_DIV8
);
2272 else if (pclk_hz
<= 40000000)
2273 config
= GEM_BF(CLK
, GEM_CLK_DIV16
);
2274 else if (pclk_hz
<= 80000000)
2275 config
= GEM_BF(CLK
, GEM_CLK_DIV32
);
2276 else if (pclk_hz
<= 120000000)
2277 config
= GEM_BF(CLK
, GEM_CLK_DIV48
);
2278 else if (pclk_hz
<= 160000000)
2279 config
= GEM_BF(CLK
, GEM_CLK_DIV64
);
2281 config
= GEM_BF(CLK
, GEM_CLK_DIV96
);
2286 static u32
macb_mdc_clk_div(struct macb
*bp
)
2289 unsigned long pclk_hz
;
2291 if (macb_is_gem(bp
))
2292 return gem_mdc_clk_div(bp
);
2294 pclk_hz
= clk_get_rate(bp
->pclk
);
2295 if (pclk_hz
<= 20000000)
2296 config
= MACB_BF(CLK
, MACB_CLK_DIV8
);
2297 else if (pclk_hz
<= 40000000)
2298 config
= MACB_BF(CLK
, MACB_CLK_DIV16
);
2299 else if (pclk_hz
<= 80000000)
2300 config
= MACB_BF(CLK
, MACB_CLK_DIV32
);
2302 config
= MACB_BF(CLK
, MACB_CLK_DIV64
);
2307 /* Get the DMA bus width field of the network configuration register that we
2308 * should program. We find the width from decoding the design configuration
2309 * register to find the maximum supported data bus width.
2311 static u32
macb_dbw(struct macb
*bp
)
2313 if (!macb_is_gem(bp
))
2316 switch (GEM_BFEXT(DBWDEF
, gem_readl(bp
, DCFG1
))) {
2318 return GEM_BF(DBW
, GEM_DBW128
);
2320 return GEM_BF(DBW
, GEM_DBW64
);
2323 return GEM_BF(DBW
, GEM_DBW32
);
2327 /* Configure the receive DMA engine
2328 * - use the correct receive buffer size
2329 * - set best burst length for DMA operations
2330 * (if not supported by FIFO, it will fallback to default)
2331 * - set both rx/tx packet buffers to full memory size
2332 * These are configurable parameters for GEM.
2334 static void macb_configure_dma(struct macb
*bp
)
2336 struct macb_queue
*queue
;
2341 buffer_size
= bp
->rx_buffer_size
/ RX_BUFFER_MULTIPLE
;
2342 if (macb_is_gem(bp
)) {
2343 dmacfg
= gem_readl(bp
, DMACFG
) & ~GEM_BF(RXBS
, -1L);
2344 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2346 queue_writel(queue
, RBQS
, buffer_size
);
2348 dmacfg
|= GEM_BF(RXBS
, buffer_size
);
2350 if (bp
->dma_burst_length
)
2351 dmacfg
= GEM_BFINS(FBLDO
, bp
->dma_burst_length
, dmacfg
);
2352 dmacfg
|= GEM_BIT(TXPBMS
) | GEM_BF(RXBMS
, -1L);
2353 dmacfg
&= ~GEM_BIT(ENDIA_PKT
);
2356 dmacfg
&= ~GEM_BIT(ENDIA_DESC
);
2358 dmacfg
|= GEM_BIT(ENDIA_DESC
); /* CPU in big endian */
2360 if (bp
->dev
->features
& NETIF_F_HW_CSUM
)
2361 dmacfg
|= GEM_BIT(TXCOEN
);
2363 dmacfg
&= ~GEM_BIT(TXCOEN
);
2365 dmacfg
&= ~GEM_BIT(ADDR64
);
2366 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2367 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
2368 dmacfg
|= GEM_BIT(ADDR64
);
2370 #ifdef CONFIG_MACB_USE_HWSTAMP
2371 if (bp
->hw_dma_cap
& HW_DMA_CAP_PTP
)
2372 dmacfg
|= GEM_BIT(RXEXT
) | GEM_BIT(TXEXT
);
2374 netdev_dbg(bp
->dev
, "Cadence configure DMA with 0x%08x\n",
2376 gem_writel(bp
, DMACFG
, dmacfg
);
2380 static void macb_init_hw(struct macb
*bp
)
2385 macb_set_hwaddr(bp
);
2387 config
= macb_mdc_clk_div(bp
);
2388 config
|= MACB_BF(RBOF
, NET_IP_ALIGN
); /* Make eth data aligned */
2389 config
|= MACB_BIT(DRFCS
); /* Discard Rx FCS */
2390 if (bp
->caps
& MACB_CAPS_JUMBO
)
2391 config
|= MACB_BIT(JFRAME
); /* Enable jumbo frames */
2393 config
|= MACB_BIT(BIG
); /* Receive oversized frames */
2394 if (bp
->dev
->flags
& IFF_PROMISC
)
2395 config
|= MACB_BIT(CAF
); /* Copy All Frames */
2396 else if (macb_is_gem(bp
) && bp
->dev
->features
& NETIF_F_RXCSUM
)
2397 config
|= GEM_BIT(RXCOEN
);
2398 if (!(bp
->dev
->flags
& IFF_BROADCAST
))
2399 config
|= MACB_BIT(NBC
); /* No BroadCast */
2400 config
|= macb_dbw(bp
);
2401 macb_writel(bp
, NCFGR
, config
);
2402 if ((bp
->caps
& MACB_CAPS_JUMBO
) && bp
->jumbo_max_len
)
2403 gem_writel(bp
, JML
, bp
->jumbo_max_len
);
2404 bp
->rx_frm_len_mask
= MACB_RX_FRMLEN_MASK
;
2405 if (bp
->caps
& MACB_CAPS_JUMBO
)
2406 bp
->rx_frm_len_mask
= MACB_RX_JFRMLEN_MASK
;
2408 macb_configure_dma(bp
);
2411 /* The hash address register is 64 bits long and takes up two
2412 * locations in the memory map. The least significant bits are stored
2413 * in EMAC_HSL and the most significant bits in EMAC_HSH.
2415 * The unicast hash enable and the multicast hash enable bits in the
2416 * network configuration register enable the reception of hash matched
2417 * frames. The destination address is reduced to a 6 bit index into
2418 * the 64 bit hash register using the following hash function. The
2419 * hash function is an exclusive or of every sixth bit of the
2420 * destination address.
2422 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2423 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2424 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2425 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2426 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2427 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2429 * da[0] represents the least significant bit of the first byte
2430 * received, that is, the multicast/unicast indicator, and da[47]
2431 * represents the most significant bit of the last byte received. If
2432 * the hash index, hi[n], points to a bit that is set in the hash
2433 * register then the frame will be matched according to whether the
2434 * frame is multicast or unicast. A multicast match will be signalled
2435 * if the multicast hash enable bit is set, da[0] is 1 and the hash
2436 * index points to a bit set in the hash register. A unicast match
2437 * will be signalled if the unicast hash enable bit is set, da[0] is 0
2438 * and the hash index points to a bit set in the hash register. To
2439 * receive all multicast frames, the hash register should be set with
2440 * all ones and the multicast hash enable bit should be set in the
2441 * network configuration register.
2444 static inline int hash_bit_value(int bitnr
, __u8
*addr
)
2446 if (addr
[bitnr
/ 8] & (1 << (bitnr
% 8)))
2451 /* Return the hash index value for the specified address. */
2452 static int hash_get_index(__u8
*addr
)
2457 for (j
= 0; j
< 6; j
++) {
2458 for (i
= 0, bitval
= 0; i
< 8; i
++)
2459 bitval
^= hash_bit_value(i
* 6 + j
, addr
);
2461 hash_index
|= (bitval
<< j
);
2467 /* Add multicast addresses to the internal multicast-hash table. */
2468 static void macb_sethashtable(struct net_device
*dev
)
2470 struct netdev_hw_addr
*ha
;
2471 unsigned long mc_filter
[2];
2473 struct macb
*bp
= netdev_priv(dev
);
2478 netdev_for_each_mc_addr(ha
, dev
) {
2479 bitnr
= hash_get_index(ha
->addr
);
2480 mc_filter
[bitnr
>> 5] |= 1 << (bitnr
& 31);
2483 macb_or_gem_writel(bp
, HRB
, mc_filter
[0]);
2484 macb_or_gem_writel(bp
, HRT
, mc_filter
[1]);
2487 /* Enable/Disable promiscuous and multicast modes. */
2488 static void macb_set_rx_mode(struct net_device
*dev
)
2491 struct macb
*bp
= netdev_priv(dev
);
2493 cfg
= macb_readl(bp
, NCFGR
);
2495 if (dev
->flags
& IFF_PROMISC
) {
2496 /* Enable promiscuous mode */
2497 cfg
|= MACB_BIT(CAF
);
2499 /* Disable RX checksum offload */
2500 if (macb_is_gem(bp
))
2501 cfg
&= ~GEM_BIT(RXCOEN
);
2503 /* Disable promiscuous mode */
2504 cfg
&= ~MACB_BIT(CAF
);
2506 /* Enable RX checksum offload only if requested */
2507 if (macb_is_gem(bp
) && dev
->features
& NETIF_F_RXCSUM
)
2508 cfg
|= GEM_BIT(RXCOEN
);
2511 if (dev
->flags
& IFF_ALLMULTI
) {
2512 /* Enable all multicast mode */
2513 macb_or_gem_writel(bp
, HRB
, -1);
2514 macb_or_gem_writel(bp
, HRT
, -1);
2515 cfg
|= MACB_BIT(NCFGR_MTI
);
2516 } else if (!netdev_mc_empty(dev
)) {
2517 /* Enable specific multicasts */
2518 macb_sethashtable(dev
);
2519 cfg
|= MACB_BIT(NCFGR_MTI
);
2520 } else if (dev
->flags
& (~IFF_ALLMULTI
)) {
2521 /* Disable all multicast mode */
2522 macb_or_gem_writel(bp
, HRB
, 0);
2523 macb_or_gem_writel(bp
, HRT
, 0);
2524 cfg
&= ~MACB_BIT(NCFGR_MTI
);
2527 macb_writel(bp
, NCFGR
, cfg
);
2530 static int macb_open(struct net_device
*dev
)
2532 size_t bufsz
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ NET_IP_ALIGN
;
2533 struct macb
*bp
= netdev_priv(dev
);
2534 struct macb_queue
*queue
;
2538 netdev_dbg(bp
->dev
, "open\n");
2540 err
= pm_runtime_get_sync(&bp
->pdev
->dev
);
2544 /* RX buffers initialization */
2545 macb_init_rx_buffer_size(bp
, bufsz
);
2547 err
= macb_alloc_consistent(bp
);
2549 netdev_err(dev
, "Unable to allocate DMA memory (error %d)\n",
2554 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
2555 napi_enable(&queue
->napi
);
2559 err
= macb_phylink_connect(bp
);
2563 netif_tx_start_all_queues(dev
);
2566 bp
->ptp_info
->ptp_init(dev
);
2570 pm_runtime_put_sync(&bp
->pdev
->dev
);
2576 static int macb_close(struct net_device
*dev
)
2578 struct macb
*bp
= netdev_priv(dev
);
2579 struct macb_queue
*queue
;
2580 unsigned long flags
;
2583 netif_tx_stop_all_queues(dev
);
2585 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
2586 napi_disable(&queue
->napi
);
2588 phylink_stop(bp
->phylink
);
2589 phylink_disconnect_phy(bp
->phylink
);
2591 spin_lock_irqsave(&bp
->lock
, flags
);
2593 netif_carrier_off(dev
);
2594 spin_unlock_irqrestore(&bp
->lock
, flags
);
2596 macb_free_consistent(bp
);
2599 bp
->ptp_info
->ptp_remove(dev
);
2601 pm_runtime_put(&bp
->pdev
->dev
);
2606 static int macb_change_mtu(struct net_device
*dev
, int new_mtu
)
2608 if (netif_running(dev
))
2616 static void gem_update_stats(struct macb
*bp
)
2618 struct macb_queue
*queue
;
2619 unsigned int i
, q
, idx
;
2620 unsigned long *stat
;
2622 u32
*p
= &bp
->hw_stats
.gem
.tx_octets_31_0
;
2624 for (i
= 0; i
< GEM_STATS_LEN
; ++i
, ++p
) {
2625 u32 offset
= gem_statistics
[i
].offset
;
2626 u64 val
= bp
->macb_reg_readl(bp
, offset
);
2628 bp
->ethtool_stats
[i
] += val
;
2631 if (offset
== GEM_OCTTXL
|| offset
== GEM_OCTRXL
) {
2632 /* Add GEM_OCTTXH, GEM_OCTRXH */
2633 val
= bp
->macb_reg_readl(bp
, offset
+ 4);
2634 bp
->ethtool_stats
[i
] += ((u64
)val
) << 32;
2639 idx
= GEM_STATS_LEN
;
2640 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
2641 for (i
= 0, stat
= &queue
->stats
.first
; i
< QUEUE_STATS_LEN
; ++i
, ++stat
)
2642 bp
->ethtool_stats
[idx
++] = *stat
;
2645 static struct net_device_stats
*gem_get_stats(struct macb
*bp
)
2647 struct gem_stats
*hwstat
= &bp
->hw_stats
.gem
;
2648 struct net_device_stats
*nstat
= &bp
->dev
->stats
;
2650 gem_update_stats(bp
);
2652 nstat
->rx_errors
= (hwstat
->rx_frame_check_sequence_errors
+
2653 hwstat
->rx_alignment_errors
+
2654 hwstat
->rx_resource_errors
+
2655 hwstat
->rx_overruns
+
2656 hwstat
->rx_oversize_frames
+
2657 hwstat
->rx_jabbers
+
2658 hwstat
->rx_undersized_frames
+
2659 hwstat
->rx_length_field_frame_errors
);
2660 nstat
->tx_errors
= (hwstat
->tx_late_collisions
+
2661 hwstat
->tx_excessive_collisions
+
2662 hwstat
->tx_underrun
+
2663 hwstat
->tx_carrier_sense_errors
);
2664 nstat
->multicast
= hwstat
->rx_multicast_frames
;
2665 nstat
->collisions
= (hwstat
->tx_single_collision_frames
+
2666 hwstat
->tx_multiple_collision_frames
+
2667 hwstat
->tx_excessive_collisions
);
2668 nstat
->rx_length_errors
= (hwstat
->rx_oversize_frames
+
2669 hwstat
->rx_jabbers
+
2670 hwstat
->rx_undersized_frames
+
2671 hwstat
->rx_length_field_frame_errors
);
2672 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
;
2673 nstat
->rx_crc_errors
= hwstat
->rx_frame_check_sequence_errors
;
2674 nstat
->rx_frame_errors
= hwstat
->rx_alignment_errors
;
2675 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2676 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_collisions
;
2677 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_sense_errors
;
2678 nstat
->tx_fifo_errors
= hwstat
->tx_underrun
;
2683 static void gem_get_ethtool_stats(struct net_device
*dev
,
2684 struct ethtool_stats
*stats
, u64
*data
)
2688 bp
= netdev_priv(dev
);
2689 gem_update_stats(bp
);
2690 memcpy(data
, &bp
->ethtool_stats
, sizeof(u64
)
2691 * (GEM_STATS_LEN
+ QUEUE_STATS_LEN
* MACB_MAX_QUEUES
));
2694 static int gem_get_sset_count(struct net_device
*dev
, int sset
)
2696 struct macb
*bp
= netdev_priv(dev
);
2700 return GEM_STATS_LEN
+ bp
->num_queues
* QUEUE_STATS_LEN
;
2706 static void gem_get_ethtool_strings(struct net_device
*dev
, u32 sset
, u8
*p
)
2708 char stat_string
[ETH_GSTRING_LEN
];
2709 struct macb
*bp
= netdev_priv(dev
);
2710 struct macb_queue
*queue
;
2716 for (i
= 0; i
< GEM_STATS_LEN
; i
++, p
+= ETH_GSTRING_LEN
)
2717 memcpy(p
, gem_statistics
[i
].stat_string
,
2720 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2721 for (i
= 0; i
< QUEUE_STATS_LEN
; i
++, p
+= ETH_GSTRING_LEN
) {
2722 snprintf(stat_string
, ETH_GSTRING_LEN
, "q%d_%s",
2723 q
, queue_statistics
[i
].stat_string
);
2724 memcpy(p
, stat_string
, ETH_GSTRING_LEN
);
2731 static struct net_device_stats
*macb_get_stats(struct net_device
*dev
)
2733 struct macb
*bp
= netdev_priv(dev
);
2734 struct net_device_stats
*nstat
= &bp
->dev
->stats
;
2735 struct macb_stats
*hwstat
= &bp
->hw_stats
.macb
;
2737 if (macb_is_gem(bp
))
2738 return gem_get_stats(bp
);
2740 /* read stats from hardware */
2741 macb_update_stats(bp
);
2743 /* Convert HW stats into netdevice stats */
2744 nstat
->rx_errors
= (hwstat
->rx_fcs_errors
+
2745 hwstat
->rx_align_errors
+
2746 hwstat
->rx_resource_errors
+
2747 hwstat
->rx_overruns
+
2748 hwstat
->rx_oversize_pkts
+
2749 hwstat
->rx_jabbers
+
2750 hwstat
->rx_undersize_pkts
+
2751 hwstat
->rx_length_mismatch
);
2752 nstat
->tx_errors
= (hwstat
->tx_late_cols
+
2753 hwstat
->tx_excessive_cols
+
2754 hwstat
->tx_underruns
+
2755 hwstat
->tx_carrier_errors
+
2756 hwstat
->sqe_test_errors
);
2757 nstat
->collisions
= (hwstat
->tx_single_cols
+
2758 hwstat
->tx_multiple_cols
+
2759 hwstat
->tx_excessive_cols
);
2760 nstat
->rx_length_errors
= (hwstat
->rx_oversize_pkts
+
2761 hwstat
->rx_jabbers
+
2762 hwstat
->rx_undersize_pkts
+
2763 hwstat
->rx_length_mismatch
);
2764 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
+
2765 hwstat
->rx_overruns
;
2766 nstat
->rx_crc_errors
= hwstat
->rx_fcs_errors
;
2767 nstat
->rx_frame_errors
= hwstat
->rx_align_errors
;
2768 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2769 /* XXX: What does "missed" mean? */
2770 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_cols
;
2771 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_errors
;
2772 nstat
->tx_fifo_errors
= hwstat
->tx_underruns
;
2773 /* Don't know about heartbeat or window errors... */
2778 static int macb_get_regs_len(struct net_device
*netdev
)
2780 return MACB_GREGS_NBR
* sizeof(u32
);
2783 static void macb_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
2786 struct macb
*bp
= netdev_priv(dev
);
2787 unsigned int tail
, head
;
2790 regs
->version
= (macb_readl(bp
, MID
) & ((1 << MACB_REV_SIZE
) - 1))
2791 | MACB_GREGS_VERSION
;
2793 tail
= macb_tx_ring_wrap(bp
, bp
->queues
[0].tx_tail
);
2794 head
= macb_tx_ring_wrap(bp
, bp
->queues
[0].tx_head
);
2796 regs_buff
[0] = macb_readl(bp
, NCR
);
2797 regs_buff
[1] = macb_or_gem_readl(bp
, NCFGR
);
2798 regs_buff
[2] = macb_readl(bp
, NSR
);
2799 regs_buff
[3] = macb_readl(bp
, TSR
);
2800 regs_buff
[4] = macb_readl(bp
, RBQP
);
2801 regs_buff
[5] = macb_readl(bp
, TBQP
);
2802 regs_buff
[6] = macb_readl(bp
, RSR
);
2803 regs_buff
[7] = macb_readl(bp
, IMR
);
2805 regs_buff
[8] = tail
;
2806 regs_buff
[9] = head
;
2807 regs_buff
[10] = macb_tx_dma(&bp
->queues
[0], tail
);
2808 regs_buff
[11] = macb_tx_dma(&bp
->queues
[0], head
);
2810 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
))
2811 regs_buff
[12] = macb_or_gem_readl(bp
, USRIO
);
2812 if (macb_is_gem(bp
))
2813 regs_buff
[13] = gem_readl(bp
, DMACFG
);
2816 static void macb_get_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2818 struct macb
*bp
= netdev_priv(netdev
);
2823 if (bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
)
2824 phylink_ethtool_get_wol(bp
->phylink
, wol
);
2827 static int macb_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2829 struct macb
*bp
= netdev_priv(netdev
);
2832 ret
= phylink_ethtool_set_wol(bp
->phylink
, wol
);
2836 if (!(bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
) ||
2837 (wol
->wolopts
& ~WAKE_MAGIC
))
2840 if (wol
->wolopts
& WAKE_MAGIC
)
2841 bp
->wol
|= MACB_WOL_ENABLED
;
2843 bp
->wol
&= ~MACB_WOL_ENABLED
;
2845 device_set_wakeup_enable(&bp
->pdev
->dev
, bp
->wol
& MACB_WOL_ENABLED
);
2850 static int macb_get_link_ksettings(struct net_device
*netdev
,
2851 struct ethtool_link_ksettings
*kset
)
2853 struct macb
*bp
= netdev_priv(netdev
);
2855 return phylink_ethtool_ksettings_get(bp
->phylink
, kset
);
2858 static int macb_set_link_ksettings(struct net_device
*netdev
,
2859 const struct ethtool_link_ksettings
*kset
)
2861 struct macb
*bp
= netdev_priv(netdev
);
2863 return phylink_ethtool_ksettings_set(bp
->phylink
, kset
);
2866 static void macb_get_ringparam(struct net_device
*netdev
,
2867 struct ethtool_ringparam
*ring
)
2869 struct macb
*bp
= netdev_priv(netdev
);
2871 ring
->rx_max_pending
= MAX_RX_RING_SIZE
;
2872 ring
->tx_max_pending
= MAX_TX_RING_SIZE
;
2874 ring
->rx_pending
= bp
->rx_ring_size
;
2875 ring
->tx_pending
= bp
->tx_ring_size
;
2878 static int macb_set_ringparam(struct net_device
*netdev
,
2879 struct ethtool_ringparam
*ring
)
2881 struct macb
*bp
= netdev_priv(netdev
);
2882 u32 new_rx_size
, new_tx_size
;
2883 unsigned int reset
= 0;
2885 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
2888 new_rx_size
= clamp_t(u32
, ring
->rx_pending
,
2889 MIN_RX_RING_SIZE
, MAX_RX_RING_SIZE
);
2890 new_rx_size
= roundup_pow_of_two(new_rx_size
);
2892 new_tx_size
= clamp_t(u32
, ring
->tx_pending
,
2893 MIN_TX_RING_SIZE
, MAX_TX_RING_SIZE
);
2894 new_tx_size
= roundup_pow_of_two(new_tx_size
);
2896 if ((new_tx_size
== bp
->tx_ring_size
) &&
2897 (new_rx_size
== bp
->rx_ring_size
)) {
2902 if (netif_running(bp
->dev
)) {
2904 macb_close(bp
->dev
);
2907 bp
->rx_ring_size
= new_rx_size
;
2908 bp
->tx_ring_size
= new_tx_size
;
2916 #ifdef CONFIG_MACB_USE_HWSTAMP
2917 static unsigned int gem_get_tsu_rate(struct macb
*bp
)
2919 struct clk
*tsu_clk
;
2920 unsigned int tsu_rate
;
2922 tsu_clk
= devm_clk_get(&bp
->pdev
->dev
, "tsu_clk");
2923 if (!IS_ERR(tsu_clk
))
2924 tsu_rate
= clk_get_rate(tsu_clk
);
2925 /* try pclk instead */
2926 else if (!IS_ERR(bp
->pclk
)) {
2928 tsu_rate
= clk_get_rate(tsu_clk
);
2934 static s32
gem_get_ptp_max_adj(void)
2939 static int gem_get_ts_info(struct net_device
*dev
,
2940 struct ethtool_ts_info
*info
)
2942 struct macb
*bp
= netdev_priv(dev
);
2944 if ((bp
->hw_dma_cap
& HW_DMA_CAP_PTP
) == 0) {
2945 ethtool_op_get_ts_info(dev
, info
);
2949 info
->so_timestamping
=
2950 SOF_TIMESTAMPING_TX_SOFTWARE
|
2951 SOF_TIMESTAMPING_RX_SOFTWARE
|
2952 SOF_TIMESTAMPING_SOFTWARE
|
2953 SOF_TIMESTAMPING_TX_HARDWARE
|
2954 SOF_TIMESTAMPING_RX_HARDWARE
|
2955 SOF_TIMESTAMPING_RAW_HARDWARE
;
2957 (1 << HWTSTAMP_TX_ONESTEP_SYNC
) |
2958 (1 << HWTSTAMP_TX_OFF
) |
2959 (1 << HWTSTAMP_TX_ON
);
2961 (1 << HWTSTAMP_FILTER_NONE
) |
2962 (1 << HWTSTAMP_FILTER_ALL
);
2964 info
->phc_index
= bp
->ptp_clock
? ptp_clock_index(bp
->ptp_clock
) : -1;
2969 static struct macb_ptp_info gem_ptp_info
= {
2970 .ptp_init
= gem_ptp_init
,
2971 .ptp_remove
= gem_ptp_remove
,
2972 .get_ptp_max_adj
= gem_get_ptp_max_adj
,
2973 .get_tsu_rate
= gem_get_tsu_rate
,
2974 .get_ts_info
= gem_get_ts_info
,
2975 .get_hwtst
= gem_get_hwtst
,
2976 .set_hwtst
= gem_set_hwtst
,
2980 static int macb_get_ts_info(struct net_device
*netdev
,
2981 struct ethtool_ts_info
*info
)
2983 struct macb
*bp
= netdev_priv(netdev
);
2986 return bp
->ptp_info
->get_ts_info(netdev
, info
);
2988 return ethtool_op_get_ts_info(netdev
, info
);
2991 static void gem_enable_flow_filters(struct macb
*bp
, bool enable
)
2993 struct net_device
*netdev
= bp
->dev
;
2994 struct ethtool_rx_fs_item
*item
;
2998 if (!(netdev
->features
& NETIF_F_NTUPLE
))
3001 num_t2_scr
= GEM_BFEXT(T2SCR
, gem_readl(bp
, DCFG8
));
3003 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
3004 struct ethtool_rx_flow_spec
*fs
= &item
->fs
;
3005 struct ethtool_tcpip4_spec
*tp4sp_m
;
3007 if (fs
->location
>= num_t2_scr
)
3010 t2_scr
= gem_readl_n(bp
, SCRT2
, fs
->location
);
3012 /* enable/disable screener regs for the flow entry */
3013 t2_scr
= GEM_BFINS(ETHTEN
, enable
, t2_scr
);
3015 /* only enable fields with no masking */
3016 tp4sp_m
= &(fs
->m_u
.tcp_ip4_spec
);
3018 if (enable
&& (tp4sp_m
->ip4src
== 0xFFFFFFFF))
3019 t2_scr
= GEM_BFINS(CMPAEN
, 1, t2_scr
);
3021 t2_scr
= GEM_BFINS(CMPAEN
, 0, t2_scr
);
3023 if (enable
&& (tp4sp_m
->ip4dst
== 0xFFFFFFFF))
3024 t2_scr
= GEM_BFINS(CMPBEN
, 1, t2_scr
);
3026 t2_scr
= GEM_BFINS(CMPBEN
, 0, t2_scr
);
3028 if (enable
&& ((tp4sp_m
->psrc
== 0xFFFF) || (tp4sp_m
->pdst
== 0xFFFF)))
3029 t2_scr
= GEM_BFINS(CMPCEN
, 1, t2_scr
);
3031 t2_scr
= GEM_BFINS(CMPCEN
, 0, t2_scr
);
3033 gem_writel_n(bp
, SCRT2
, fs
->location
, t2_scr
);
3037 static void gem_prog_cmp_regs(struct macb
*bp
, struct ethtool_rx_flow_spec
*fs
)
3039 struct ethtool_tcpip4_spec
*tp4sp_v
, *tp4sp_m
;
3040 uint16_t index
= fs
->location
;
3046 tp4sp_v
= &(fs
->h_u
.tcp_ip4_spec
);
3047 tp4sp_m
= &(fs
->m_u
.tcp_ip4_spec
);
3049 /* ignore field if any masking set */
3050 if (tp4sp_m
->ip4src
== 0xFFFFFFFF) {
3051 /* 1st compare reg - IP source address */
3054 w0
= tp4sp_v
->ip4src
;
3055 w1
= GEM_BFINS(T2DISMSK
, 1, w1
); /* 32-bit compare */
3056 w1
= GEM_BFINS(T2CMPOFST
, GEM_T2COMPOFST_ETYPE
, w1
);
3057 w1
= GEM_BFINS(T2OFST
, ETYPE_SRCIP_OFFSET
, w1
);
3058 gem_writel_n(bp
, T2CMPW0
, T2CMP_OFST(GEM_IP4SRC_CMP(index
)), w0
);
3059 gem_writel_n(bp
, T2CMPW1
, T2CMP_OFST(GEM_IP4SRC_CMP(index
)), w1
);
3063 /* ignore field if any masking set */
3064 if (tp4sp_m
->ip4dst
== 0xFFFFFFFF) {
3065 /* 2nd compare reg - IP destination address */
3068 w0
= tp4sp_v
->ip4dst
;
3069 w1
= GEM_BFINS(T2DISMSK
, 1, w1
); /* 32-bit compare */
3070 w1
= GEM_BFINS(T2CMPOFST
, GEM_T2COMPOFST_ETYPE
, w1
);
3071 w1
= GEM_BFINS(T2OFST
, ETYPE_DSTIP_OFFSET
, w1
);
3072 gem_writel_n(bp
, T2CMPW0
, T2CMP_OFST(GEM_IP4DST_CMP(index
)), w0
);
3073 gem_writel_n(bp
, T2CMPW1
, T2CMP_OFST(GEM_IP4DST_CMP(index
)), w1
);
3077 /* ignore both port fields if masking set in both */
3078 if ((tp4sp_m
->psrc
== 0xFFFF) || (tp4sp_m
->pdst
== 0xFFFF)) {
3079 /* 3rd compare reg - source port, destination port */
3082 w1
= GEM_BFINS(T2CMPOFST
, GEM_T2COMPOFST_IPHDR
, w1
);
3083 if (tp4sp_m
->psrc
== tp4sp_m
->pdst
) {
3084 w0
= GEM_BFINS(T2MASK
, tp4sp_v
->psrc
, w0
);
3085 w0
= GEM_BFINS(T2CMP
, tp4sp_v
->pdst
, w0
);
3086 w1
= GEM_BFINS(T2DISMSK
, 1, w1
); /* 32-bit compare */
3087 w1
= GEM_BFINS(T2OFST
, IPHDR_SRCPORT_OFFSET
, w1
);
3089 /* only one port definition */
3090 w1
= GEM_BFINS(T2DISMSK
, 0, w1
); /* 16-bit compare */
3091 w0
= GEM_BFINS(T2MASK
, 0xFFFF, w0
);
3092 if (tp4sp_m
->psrc
== 0xFFFF) { /* src port */
3093 w0
= GEM_BFINS(T2CMP
, tp4sp_v
->psrc
, w0
);
3094 w1
= GEM_BFINS(T2OFST
, IPHDR_SRCPORT_OFFSET
, w1
);
3095 } else { /* dst port */
3096 w0
= GEM_BFINS(T2CMP
, tp4sp_v
->pdst
, w0
);
3097 w1
= GEM_BFINS(T2OFST
, IPHDR_DSTPORT_OFFSET
, w1
);
3100 gem_writel_n(bp
, T2CMPW0
, T2CMP_OFST(GEM_PORT_CMP(index
)), w0
);
3101 gem_writel_n(bp
, T2CMPW1
, T2CMP_OFST(GEM_PORT_CMP(index
)), w1
);
3106 t2_scr
= GEM_BFINS(QUEUE
, (fs
->ring_cookie
) & 0xFF, t2_scr
);
3107 t2_scr
= GEM_BFINS(ETHT2IDX
, SCRT2_ETHT
, t2_scr
);
3109 t2_scr
= GEM_BFINS(CMPA
, GEM_IP4SRC_CMP(index
), t2_scr
);
3111 t2_scr
= GEM_BFINS(CMPB
, GEM_IP4DST_CMP(index
), t2_scr
);
3113 t2_scr
= GEM_BFINS(CMPC
, GEM_PORT_CMP(index
), t2_scr
);
3114 gem_writel_n(bp
, SCRT2
, index
, t2_scr
);
3117 static int gem_add_flow_filter(struct net_device
*netdev
,
3118 struct ethtool_rxnfc
*cmd
)
3120 struct macb
*bp
= netdev_priv(netdev
);
3121 struct ethtool_rx_flow_spec
*fs
= &cmd
->fs
;
3122 struct ethtool_rx_fs_item
*item
, *newfs
;
3123 unsigned long flags
;
3127 newfs
= kmalloc(sizeof(*newfs
), GFP_KERNEL
);
3130 memcpy(&newfs
->fs
, fs
, sizeof(newfs
->fs
));
3133 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3134 fs
->flow_type
, (int)fs
->ring_cookie
, fs
->location
,
3135 htonl(fs
->h_u
.tcp_ip4_spec
.ip4src
),
3136 htonl(fs
->h_u
.tcp_ip4_spec
.ip4dst
),
3137 htons(fs
->h_u
.tcp_ip4_spec
.psrc
), htons(fs
->h_u
.tcp_ip4_spec
.pdst
));
3139 spin_lock_irqsave(&bp
->rx_fs_lock
, flags
);
3141 /* find correct place to add in list */
3142 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
3143 if (item
->fs
.location
> newfs
->fs
.location
) {
3144 list_add_tail(&newfs
->list
, &item
->list
);
3147 } else if (item
->fs
.location
== fs
->location
) {
3148 netdev_err(netdev
, "Rule not added: location %d not free!\n",
3155 list_add_tail(&newfs
->list
, &bp
->rx_fs_list
.list
);
3157 gem_prog_cmp_regs(bp
, fs
);
3158 bp
->rx_fs_list
.count
++;
3159 /* enable filtering if NTUPLE on */
3160 gem_enable_flow_filters(bp
, 1);
3162 spin_unlock_irqrestore(&bp
->rx_fs_lock
, flags
);
3166 spin_unlock_irqrestore(&bp
->rx_fs_lock
, flags
);
3171 static int gem_del_flow_filter(struct net_device
*netdev
,
3172 struct ethtool_rxnfc
*cmd
)
3174 struct macb
*bp
= netdev_priv(netdev
);
3175 struct ethtool_rx_fs_item
*item
;
3176 struct ethtool_rx_flow_spec
*fs
;
3177 unsigned long flags
;
3179 spin_lock_irqsave(&bp
->rx_fs_lock
, flags
);
3181 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
3182 if (item
->fs
.location
== cmd
->fs
.location
) {
3183 /* disable screener regs for the flow entry */
3186 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3187 fs
->flow_type
, (int)fs
->ring_cookie
, fs
->location
,
3188 htonl(fs
->h_u
.tcp_ip4_spec
.ip4src
),
3189 htonl(fs
->h_u
.tcp_ip4_spec
.ip4dst
),
3190 htons(fs
->h_u
.tcp_ip4_spec
.psrc
),
3191 htons(fs
->h_u
.tcp_ip4_spec
.pdst
));
3193 gem_writel_n(bp
, SCRT2
, fs
->location
, 0);
3195 list_del(&item
->list
);
3196 bp
->rx_fs_list
.count
--;
3197 spin_unlock_irqrestore(&bp
->rx_fs_lock
, flags
);
3203 spin_unlock_irqrestore(&bp
->rx_fs_lock
, flags
);
3207 static int gem_get_flow_entry(struct net_device
*netdev
,
3208 struct ethtool_rxnfc
*cmd
)
3210 struct macb
*bp
= netdev_priv(netdev
);
3211 struct ethtool_rx_fs_item
*item
;
3213 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
3214 if (item
->fs
.location
== cmd
->fs
.location
) {
3215 memcpy(&cmd
->fs
, &item
->fs
, sizeof(cmd
->fs
));
3222 static int gem_get_all_flow_entries(struct net_device
*netdev
,
3223 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
3225 struct macb
*bp
= netdev_priv(netdev
);
3226 struct ethtool_rx_fs_item
*item
;
3229 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
3230 if (cnt
== cmd
->rule_cnt
)
3232 rule_locs
[cnt
] = item
->fs
.location
;
3235 cmd
->data
= bp
->max_tuples
;
3236 cmd
->rule_cnt
= cnt
;
3241 static int gem_get_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*cmd
,
3244 struct macb
*bp
= netdev_priv(netdev
);
3248 case ETHTOOL_GRXRINGS
:
3249 cmd
->data
= bp
->num_queues
;
3251 case ETHTOOL_GRXCLSRLCNT
:
3252 cmd
->rule_cnt
= bp
->rx_fs_list
.count
;
3254 case ETHTOOL_GRXCLSRULE
:
3255 ret
= gem_get_flow_entry(netdev
, cmd
);
3257 case ETHTOOL_GRXCLSRLALL
:
3258 ret
= gem_get_all_flow_entries(netdev
, cmd
, rule_locs
);
3262 "Command parameter %d is not supported\n", cmd
->cmd
);
3269 static int gem_set_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*cmd
)
3271 struct macb
*bp
= netdev_priv(netdev
);
3275 case ETHTOOL_SRXCLSRLINS
:
3276 if ((cmd
->fs
.location
>= bp
->max_tuples
)
3277 || (cmd
->fs
.ring_cookie
>= bp
->num_queues
)) {
3281 ret
= gem_add_flow_filter(netdev
, cmd
);
3283 case ETHTOOL_SRXCLSRLDEL
:
3284 ret
= gem_del_flow_filter(netdev
, cmd
);
3288 "Command parameter %d is not supported\n", cmd
->cmd
);
3295 static const struct ethtool_ops macb_ethtool_ops
= {
3296 .get_regs_len
= macb_get_regs_len
,
3297 .get_regs
= macb_get_regs
,
3298 .get_link
= ethtool_op_get_link
,
3299 .get_ts_info
= ethtool_op_get_ts_info
,
3300 .get_wol
= macb_get_wol
,
3301 .set_wol
= macb_set_wol
,
3302 .get_link_ksettings
= macb_get_link_ksettings
,
3303 .set_link_ksettings
= macb_set_link_ksettings
,
3304 .get_ringparam
= macb_get_ringparam
,
3305 .set_ringparam
= macb_set_ringparam
,
3308 static const struct ethtool_ops gem_ethtool_ops
= {
3309 .get_regs_len
= macb_get_regs_len
,
3310 .get_regs
= macb_get_regs
,
3311 .get_link
= ethtool_op_get_link
,
3312 .get_ts_info
= macb_get_ts_info
,
3313 .get_ethtool_stats
= gem_get_ethtool_stats
,
3314 .get_strings
= gem_get_ethtool_strings
,
3315 .get_sset_count
= gem_get_sset_count
,
3316 .get_link_ksettings
= macb_get_link_ksettings
,
3317 .set_link_ksettings
= macb_set_link_ksettings
,
3318 .get_ringparam
= macb_get_ringparam
,
3319 .set_ringparam
= macb_set_ringparam
,
3320 .get_rxnfc
= gem_get_rxnfc
,
3321 .set_rxnfc
= gem_set_rxnfc
,
3324 static int macb_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
3326 struct macb
*bp
= netdev_priv(dev
);
3328 if (!netif_running(dev
))
3334 return bp
->ptp_info
->set_hwtst(dev
, rq
, cmd
);
3336 return bp
->ptp_info
->get_hwtst(dev
, rq
);
3340 return phylink_mii_ioctl(bp
->phylink
, rq
, cmd
);
3343 static inline void macb_set_txcsum_feature(struct macb
*bp
,
3344 netdev_features_t features
)
3348 if (!macb_is_gem(bp
))
3351 val
= gem_readl(bp
, DMACFG
);
3352 if (features
& NETIF_F_HW_CSUM
)
3353 val
|= GEM_BIT(TXCOEN
);
3355 val
&= ~GEM_BIT(TXCOEN
);
3357 gem_writel(bp
, DMACFG
, val
);
3360 static inline void macb_set_rxcsum_feature(struct macb
*bp
,
3361 netdev_features_t features
)
3363 struct net_device
*netdev
= bp
->dev
;
3366 if (!macb_is_gem(bp
))
3369 val
= gem_readl(bp
, NCFGR
);
3370 if ((features
& NETIF_F_RXCSUM
) && !(netdev
->flags
& IFF_PROMISC
))
3371 val
|= GEM_BIT(RXCOEN
);
3373 val
&= ~GEM_BIT(RXCOEN
);
3375 gem_writel(bp
, NCFGR
, val
);
3378 static inline void macb_set_rxflow_feature(struct macb
*bp
,
3379 netdev_features_t features
)
3381 if (!macb_is_gem(bp
))
3384 gem_enable_flow_filters(bp
, !!(features
& NETIF_F_NTUPLE
));
3387 static int macb_set_features(struct net_device
*netdev
,
3388 netdev_features_t features
)
3390 struct macb
*bp
= netdev_priv(netdev
);
3391 netdev_features_t changed
= features
^ netdev
->features
;
3393 /* TX checksum offload */
3394 if (changed
& NETIF_F_HW_CSUM
)
3395 macb_set_txcsum_feature(bp
, features
);
3397 /* RX checksum offload */
3398 if (changed
& NETIF_F_RXCSUM
)
3399 macb_set_rxcsum_feature(bp
, features
);
3401 /* RX Flow Filters */
3402 if (changed
& NETIF_F_NTUPLE
)
3403 macb_set_rxflow_feature(bp
, features
);
3408 static void macb_restore_features(struct macb
*bp
)
3410 struct net_device
*netdev
= bp
->dev
;
3411 netdev_features_t features
= netdev
->features
;
3413 /* TX checksum offload */
3414 macb_set_txcsum_feature(bp
, features
);
3416 /* RX checksum offload */
3417 macb_set_rxcsum_feature(bp
, features
);
3419 /* RX Flow Filters */
3420 macb_set_rxflow_feature(bp
, features
);
3423 static const struct net_device_ops macb_netdev_ops
= {
3424 .ndo_open
= macb_open
,
3425 .ndo_stop
= macb_close
,
3426 .ndo_start_xmit
= macb_start_xmit
,
3427 .ndo_set_rx_mode
= macb_set_rx_mode
,
3428 .ndo_get_stats
= macb_get_stats
,
3429 .ndo_do_ioctl
= macb_ioctl
,
3430 .ndo_validate_addr
= eth_validate_addr
,
3431 .ndo_change_mtu
= macb_change_mtu
,
3432 .ndo_set_mac_address
= eth_mac_addr
,
3433 #ifdef CONFIG_NET_POLL_CONTROLLER
3434 .ndo_poll_controller
= macb_poll_controller
,
3436 .ndo_set_features
= macb_set_features
,
3437 .ndo_features_check
= macb_features_check
,
3440 /* Configure peripheral capabilities according to device tree
3441 * and integration options used
3443 static void macb_configure_caps(struct macb
*bp
,
3444 const struct macb_config
*dt_conf
)
3449 bp
->caps
= dt_conf
->caps
;
3451 if (hw_is_gem(bp
->regs
, bp
->native_io
)) {
3452 bp
->caps
|= MACB_CAPS_MACB_IS_GEM
;
3454 dcfg
= gem_readl(bp
, DCFG1
);
3455 if (GEM_BFEXT(IRQCOR
, dcfg
) == 0)
3456 bp
->caps
|= MACB_CAPS_ISR_CLEAR_ON_WRITE
;
3457 dcfg
= gem_readl(bp
, DCFG2
);
3458 if ((dcfg
& (GEM_BIT(RX_PKT_BUFF
) | GEM_BIT(TX_PKT_BUFF
))) == 0)
3459 bp
->caps
|= MACB_CAPS_FIFO_MODE
;
3460 #ifdef CONFIG_MACB_USE_HWSTAMP
3461 if (gem_has_ptp(bp
)) {
3462 if (!GEM_BFEXT(TSU
, gem_readl(bp
, DCFG5
)))
3463 dev_err(&bp
->pdev
->dev
,
3464 "GEM doesn't support hardware ptp.\n");
3466 bp
->hw_dma_cap
|= HW_DMA_CAP_PTP
;
3467 bp
->ptp_info
= &gem_ptp_info
;
3473 dev_dbg(&bp
->pdev
->dev
, "Cadence caps 0x%08x\n", bp
->caps
);
3476 static void macb_probe_queues(void __iomem
*mem
,
3478 unsigned int *queue_mask
,
3479 unsigned int *num_queues
)
3486 /* is it macb or gem ?
3488 * We need to read directly from the hardware here because
3489 * we are early in the probe process and don't have the
3490 * MACB_CAPS_MACB_IS_GEM flag positioned
3492 if (!hw_is_gem(mem
, native_io
))
3495 /* bit 0 is never set but queue 0 always exists */
3496 *queue_mask
= readl_relaxed(mem
+ GEM_DCFG6
) & 0xff;
3500 for (hw_q
= 1; hw_q
< MACB_MAX_QUEUES
; ++hw_q
)
3501 if (*queue_mask
& (1 << hw_q
))
3505 static int macb_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
3506 struct clk
**hclk
, struct clk
**tx_clk
,
3507 struct clk
**rx_clk
, struct clk
**tsu_clk
)
3509 struct macb_platform_data
*pdata
;
3512 pdata
= dev_get_platdata(&pdev
->dev
);
3514 *pclk
= pdata
->pclk
;
3515 *hclk
= pdata
->hclk
;
3517 *pclk
= devm_clk_get(&pdev
->dev
, "pclk");
3518 *hclk
= devm_clk_get(&pdev
->dev
, "hclk");
3521 if (IS_ERR_OR_NULL(*pclk
)) {
3522 err
= PTR_ERR(*pclk
);
3526 dev_err(&pdev
->dev
, "failed to get macb_clk (%d)\n", err
);
3530 if (IS_ERR_OR_NULL(*hclk
)) {
3531 err
= PTR_ERR(*hclk
);
3535 dev_err(&pdev
->dev
, "failed to get hclk (%d)\n", err
);
3539 *tx_clk
= devm_clk_get_optional(&pdev
->dev
, "tx_clk");
3540 if (IS_ERR(*tx_clk
))
3541 return PTR_ERR(*tx_clk
);
3543 *rx_clk
= devm_clk_get_optional(&pdev
->dev
, "rx_clk");
3544 if (IS_ERR(*rx_clk
))
3545 return PTR_ERR(*rx_clk
);
3547 *tsu_clk
= devm_clk_get_optional(&pdev
->dev
, "tsu_clk");
3548 if (IS_ERR(*tsu_clk
))
3549 return PTR_ERR(*tsu_clk
);
3551 err
= clk_prepare_enable(*pclk
);
3553 dev_err(&pdev
->dev
, "failed to enable pclk (%d)\n", err
);
3557 err
= clk_prepare_enable(*hclk
);
3559 dev_err(&pdev
->dev
, "failed to enable hclk (%d)\n", err
);
3560 goto err_disable_pclk
;
3563 err
= clk_prepare_enable(*tx_clk
);
3565 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
3566 goto err_disable_hclk
;
3569 err
= clk_prepare_enable(*rx_clk
);
3571 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
3572 goto err_disable_txclk
;
3575 err
= clk_prepare_enable(*tsu_clk
);
3577 dev_err(&pdev
->dev
, "failed to enable tsu_clk (%d)\n", err
);
3578 goto err_disable_rxclk
;
3584 clk_disable_unprepare(*rx_clk
);
3587 clk_disable_unprepare(*tx_clk
);
3590 clk_disable_unprepare(*hclk
);
3593 clk_disable_unprepare(*pclk
);
3598 static int macb_init(struct platform_device
*pdev
)
3600 struct net_device
*dev
= platform_get_drvdata(pdev
);
3601 unsigned int hw_q
, q
;
3602 struct macb
*bp
= netdev_priv(dev
);
3603 struct macb_queue
*queue
;
3607 bp
->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
3608 bp
->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
3610 /* set the queue register mapping once for all: queue0 has a special
3611 * register mapping but we don't want to test the queue index then
3612 * compute the corresponding register offset at run time.
3614 for (hw_q
= 0, q
= 0; hw_q
< MACB_MAX_QUEUES
; ++hw_q
) {
3615 if (!(bp
->queue_mask
& (1 << hw_q
)))
3618 queue
= &bp
->queues
[q
];
3620 netif_napi_add(dev
, &queue
->napi
, macb_poll
, NAPI_POLL_WEIGHT
);
3622 queue
->ISR
= GEM_ISR(hw_q
- 1);
3623 queue
->IER
= GEM_IER(hw_q
- 1);
3624 queue
->IDR
= GEM_IDR(hw_q
- 1);
3625 queue
->IMR
= GEM_IMR(hw_q
- 1);
3626 queue
->TBQP
= GEM_TBQP(hw_q
- 1);
3627 queue
->RBQP
= GEM_RBQP(hw_q
- 1);
3628 queue
->RBQS
= GEM_RBQS(hw_q
- 1);
3629 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3630 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
3631 queue
->TBQPH
= GEM_TBQPH(hw_q
- 1);
3632 queue
->RBQPH
= GEM_RBQPH(hw_q
- 1);
3636 /* queue0 uses legacy registers */
3637 queue
->ISR
= MACB_ISR
;
3638 queue
->IER
= MACB_IER
;
3639 queue
->IDR
= MACB_IDR
;
3640 queue
->IMR
= MACB_IMR
;
3641 queue
->TBQP
= MACB_TBQP
;
3642 queue
->RBQP
= MACB_RBQP
;
3643 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3644 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
3645 queue
->TBQPH
= MACB_TBQPH
;
3646 queue
->RBQPH
= MACB_RBQPH
;
3651 /* get irq: here we use the linux queue index, not the hardware
3652 * queue index. the queue irq definitions in the device tree
3653 * must remove the optional gaps that could exist in the
3654 * hardware queue mask.
3656 queue
->irq
= platform_get_irq(pdev
, q
);
3657 err
= devm_request_irq(&pdev
->dev
, queue
->irq
, macb_interrupt
,
3658 IRQF_SHARED
, dev
->name
, queue
);
3661 "Unable to request IRQ %d (error %d)\n",
3666 INIT_WORK(&queue
->tx_error_task
, macb_tx_error_task
);
3670 dev
->netdev_ops
= &macb_netdev_ops
;
3672 /* setup appropriated routines according to adapter type */
3673 if (macb_is_gem(bp
)) {
3674 bp
->max_tx_length
= GEM_MAX_TX_LEN
;
3675 bp
->macbgem_ops
.mog_alloc_rx_buffers
= gem_alloc_rx_buffers
;
3676 bp
->macbgem_ops
.mog_free_rx_buffers
= gem_free_rx_buffers
;
3677 bp
->macbgem_ops
.mog_init_rings
= gem_init_rings
;
3678 bp
->macbgem_ops
.mog_rx
= gem_rx
;
3679 dev
->ethtool_ops
= &gem_ethtool_ops
;
3681 bp
->max_tx_length
= MACB_MAX_TX_LEN
;
3682 bp
->macbgem_ops
.mog_alloc_rx_buffers
= macb_alloc_rx_buffers
;
3683 bp
->macbgem_ops
.mog_free_rx_buffers
= macb_free_rx_buffers
;
3684 bp
->macbgem_ops
.mog_init_rings
= macb_init_rings
;
3685 bp
->macbgem_ops
.mog_rx
= macb_rx
;
3686 dev
->ethtool_ops
= &macb_ethtool_ops
;
3690 dev
->hw_features
= NETIF_F_SG
;
3692 /* Check LSO capability */
3693 if (GEM_BFEXT(PBUF_LSO
, gem_readl(bp
, DCFG6
)))
3694 dev
->hw_features
|= MACB_NETIF_LSO
;
3696 /* Checksum offload is only available on gem with packet buffer */
3697 if (macb_is_gem(bp
) && !(bp
->caps
& MACB_CAPS_FIFO_MODE
))
3698 dev
->hw_features
|= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
3699 if (bp
->caps
& MACB_CAPS_SG_DISABLED
)
3700 dev
->hw_features
&= ~NETIF_F_SG
;
3701 dev
->features
= dev
->hw_features
;
3703 /* Check RX Flow Filters support.
3704 * Max Rx flows set by availability of screeners & compare regs:
3705 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
3707 reg
= gem_readl(bp
, DCFG8
);
3708 bp
->max_tuples
= min((GEM_BFEXT(SCR2CMP
, reg
) / 3),
3709 GEM_BFEXT(T2SCR
, reg
));
3710 if (bp
->max_tuples
> 0) {
3711 /* also needs one ethtype match to check IPv4 */
3712 if (GEM_BFEXT(SCR2ETH
, reg
) > 0) {
3713 /* program this reg now */
3715 reg
= GEM_BFINS(ETHTCMP
, (uint16_t)ETH_P_IP
, reg
);
3716 gem_writel_n(bp
, ETHT
, SCRT2_ETHT
, reg
);
3717 /* Filtering is supported in hw but don't enable it in kernel now */
3718 dev
->hw_features
|= NETIF_F_NTUPLE
;
3719 /* init Rx flow definitions */
3720 INIT_LIST_HEAD(&bp
->rx_fs_list
.list
);
3721 bp
->rx_fs_list
.count
= 0;
3722 spin_lock_init(&bp
->rx_fs_lock
);
3727 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
)) {
3729 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RGMII
)
3730 val
= GEM_BIT(RGMII
);
3731 else if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
&&
3732 (bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
3733 val
= MACB_BIT(RMII
);
3734 else if (!(bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
3735 val
= MACB_BIT(MII
);
3737 if (bp
->caps
& MACB_CAPS_USRIO_HAS_CLKEN
)
3738 val
|= MACB_BIT(CLKEN
);
3740 macb_or_gem_writel(bp
, USRIO
, val
);
3743 /* Set MII management clock divider */
3744 val
= macb_mdc_clk_div(bp
);
3745 val
|= macb_dbw(bp
);
3746 if (bp
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
3747 val
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
3748 macb_writel(bp
, NCFGR
, val
);
3753 #if defined(CONFIG_OF)
3754 /* 1518 rounded up */
3755 #define AT91ETHER_MAX_RBUFF_SZ 0x600
3756 /* max number of receive buffers */
3757 #define AT91ETHER_MAX_RX_DESCR 9
3759 static struct sifive_fu540_macb_mgmt
*mgmt
;
3761 /* Initialize and start the Receiver and Transmit subsystems */
3762 static int at91ether_start(struct net_device
*dev
)
3764 struct macb
*lp
= netdev_priv(dev
);
3765 struct macb_queue
*q
= &lp
->queues
[0];
3766 struct macb_dma_desc
*desc
;
3771 q
->rx_ring
= dma_alloc_coherent(&lp
->pdev
->dev
,
3772 (AT91ETHER_MAX_RX_DESCR
*
3773 macb_dma_desc_get_size(lp
)),
3774 &q
->rx_ring_dma
, GFP_KERNEL
);
3778 q
->rx_buffers
= dma_alloc_coherent(&lp
->pdev
->dev
,
3779 AT91ETHER_MAX_RX_DESCR
*
3780 AT91ETHER_MAX_RBUFF_SZ
,
3781 &q
->rx_buffers_dma
, GFP_KERNEL
);
3782 if (!q
->rx_buffers
) {
3783 dma_free_coherent(&lp
->pdev
->dev
,
3784 AT91ETHER_MAX_RX_DESCR
*
3785 macb_dma_desc_get_size(lp
),
3786 q
->rx_ring
, q
->rx_ring_dma
);
3791 addr
= q
->rx_buffers_dma
;
3792 for (i
= 0; i
< AT91ETHER_MAX_RX_DESCR
; i
++) {
3793 desc
= macb_rx_desc(q
, i
);
3794 macb_set_addr(lp
, desc
, addr
);
3796 addr
+= AT91ETHER_MAX_RBUFF_SZ
;
3799 /* Set the Wrap bit on the last descriptor */
3800 desc
->addr
|= MACB_BIT(RX_WRAP
);
3802 /* Reset buffer index */
3805 /* Program address of descriptor list in Rx Buffer Queue register */
3806 macb_writel(lp
, RBQP
, q
->rx_ring_dma
);
3808 /* Enable Receive and Transmit */
3809 ctl
= macb_readl(lp
, NCR
);
3810 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
) | MACB_BIT(TE
));
3815 /* Open the ethernet interface */
3816 static int at91ether_open(struct net_device
*dev
)
3818 struct macb
*lp
= netdev_priv(dev
);
3822 ret
= pm_runtime_get_sync(&lp
->pdev
->dev
);
3824 pm_runtime_put_noidle(&lp
->pdev
->dev
);
3828 /* Clear internal statistics */
3829 ctl
= macb_readl(lp
, NCR
);
3830 macb_writel(lp
, NCR
, ctl
| MACB_BIT(CLRSTAT
));
3832 macb_set_hwaddr(lp
);
3834 ret
= at91ether_start(dev
);
3838 /* Enable MAC interrupts */
3839 macb_writel(lp
, IER
, MACB_BIT(RCOMP
) |
3841 MACB_BIT(ISR_TUND
) |
3844 MACB_BIT(ISR_ROVR
) |
3847 ret
= macb_phylink_connect(lp
);
3851 netif_start_queue(dev
);
3856 /* Close the interface */
3857 static int at91ether_close(struct net_device
*dev
)
3859 struct macb
*lp
= netdev_priv(dev
);
3860 struct macb_queue
*q
= &lp
->queues
[0];
3863 /* Disable Receiver and Transmitter */
3864 ctl
= macb_readl(lp
, NCR
);
3865 macb_writel(lp
, NCR
, ctl
& ~(MACB_BIT(TE
) | MACB_BIT(RE
)));
3867 /* Disable MAC interrupts */
3868 macb_writel(lp
, IDR
, MACB_BIT(RCOMP
) |
3870 MACB_BIT(ISR_TUND
) |
3873 MACB_BIT(ISR_ROVR
) |
3876 netif_stop_queue(dev
);
3878 phylink_stop(lp
->phylink
);
3879 phylink_disconnect_phy(lp
->phylink
);
3881 dma_free_coherent(&lp
->pdev
->dev
,
3882 AT91ETHER_MAX_RX_DESCR
*
3883 macb_dma_desc_get_size(lp
),
3884 q
->rx_ring
, q
->rx_ring_dma
);
3887 dma_free_coherent(&lp
->pdev
->dev
,
3888 AT91ETHER_MAX_RX_DESCR
* AT91ETHER_MAX_RBUFF_SZ
,
3889 q
->rx_buffers
, q
->rx_buffers_dma
);
3890 q
->rx_buffers
= NULL
;
3892 return pm_runtime_put(&lp
->pdev
->dev
);
3895 /* Transmit packet */
3896 static netdev_tx_t
at91ether_start_xmit(struct sk_buff
*skb
,
3897 struct net_device
*dev
)
3899 struct macb
*lp
= netdev_priv(dev
);
3901 if (macb_readl(lp
, TSR
) & MACB_BIT(RM9200_BNQ
)) {
3902 netif_stop_queue(dev
);
3904 /* Store packet information (to free when Tx completed) */
3906 lp
->skb_length
= skb
->len
;
3907 lp
->skb_physaddr
= dma_map_single(&lp
->pdev
->dev
, skb
->data
,
3908 skb
->len
, DMA_TO_DEVICE
);
3909 if (dma_mapping_error(&lp
->pdev
->dev
, lp
->skb_physaddr
)) {
3910 dev_kfree_skb_any(skb
);
3911 dev
->stats
.tx_dropped
++;
3912 netdev_err(dev
, "%s: DMA mapping error\n", __func__
);
3913 return NETDEV_TX_OK
;
3916 /* Set address of the data in the Transmit Address register */
3917 macb_writel(lp
, TAR
, lp
->skb_physaddr
);
3918 /* Set length of the packet in the Transmit Control register */
3919 macb_writel(lp
, TCR
, skb
->len
);
3922 netdev_err(dev
, "%s called, but device is busy!\n", __func__
);
3923 return NETDEV_TX_BUSY
;
3926 return NETDEV_TX_OK
;
3929 /* Extract received frame from buffer descriptors and sent to upper layers.
3930 * (Called from interrupt context)
3932 static void at91ether_rx(struct net_device
*dev
)
3934 struct macb
*lp
= netdev_priv(dev
);
3935 struct macb_queue
*q
= &lp
->queues
[0];
3936 struct macb_dma_desc
*desc
;
3937 unsigned char *p_recv
;
3938 struct sk_buff
*skb
;
3939 unsigned int pktlen
;
3941 desc
= macb_rx_desc(q
, q
->rx_tail
);
3942 while (desc
->addr
& MACB_BIT(RX_USED
)) {
3943 p_recv
= q
->rx_buffers
+ q
->rx_tail
* AT91ETHER_MAX_RBUFF_SZ
;
3944 pktlen
= MACB_BF(RX_FRMLEN
, desc
->ctrl
);
3945 skb
= netdev_alloc_skb(dev
, pktlen
+ 2);
3947 skb_reserve(skb
, 2);
3948 skb_put_data(skb
, p_recv
, pktlen
);
3950 skb
->protocol
= eth_type_trans(skb
, dev
);
3951 dev
->stats
.rx_packets
++;
3952 dev
->stats
.rx_bytes
+= pktlen
;
3955 dev
->stats
.rx_dropped
++;
3958 if (desc
->ctrl
& MACB_BIT(RX_MHASH_MATCH
))
3959 dev
->stats
.multicast
++;
3961 /* reset ownership bit */
3962 desc
->addr
&= ~MACB_BIT(RX_USED
);
3964 /* wrap after last buffer */
3965 if (q
->rx_tail
== AT91ETHER_MAX_RX_DESCR
- 1)
3970 desc
= macb_rx_desc(q
, q
->rx_tail
);
3974 /* MAC interrupt handler */
3975 static irqreturn_t
at91ether_interrupt(int irq
, void *dev_id
)
3977 struct net_device
*dev
= dev_id
;
3978 struct macb
*lp
= netdev_priv(dev
);
3981 /* MAC Interrupt Status register indicates what interrupts are pending.
3982 * It is automatically cleared once read.
3984 intstatus
= macb_readl(lp
, ISR
);
3986 /* Receive complete */
3987 if (intstatus
& MACB_BIT(RCOMP
))
3990 /* Transmit complete */
3991 if (intstatus
& MACB_BIT(TCOMP
)) {
3992 /* The TCOM bit is set even if the transmission failed */
3993 if (intstatus
& (MACB_BIT(ISR_TUND
) | MACB_BIT(ISR_RLE
)))
3994 dev
->stats
.tx_errors
++;
3997 dev_consume_skb_irq(lp
->skb
);
3999 dma_unmap_single(&lp
->pdev
->dev
, lp
->skb_physaddr
,
4000 lp
->skb_length
, DMA_TO_DEVICE
);
4001 dev
->stats
.tx_packets
++;
4002 dev
->stats
.tx_bytes
+= lp
->skb_length
;
4004 netif_wake_queue(dev
);
4007 /* Work-around for EMAC Errata section 41.3.1 */
4008 if (intstatus
& MACB_BIT(RXUBR
)) {
4009 ctl
= macb_readl(lp
, NCR
);
4010 macb_writel(lp
, NCR
, ctl
& ~MACB_BIT(RE
));
4012 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
));
4015 if (intstatus
& MACB_BIT(ISR_ROVR
))
4016 netdev_err(dev
, "ROVR error\n");
4021 #ifdef CONFIG_NET_POLL_CONTROLLER
4022 static void at91ether_poll_controller(struct net_device
*dev
)
4024 unsigned long flags
;
4026 local_irq_save(flags
);
4027 at91ether_interrupt(dev
->irq
, dev
);
4028 local_irq_restore(flags
);
4032 static const struct net_device_ops at91ether_netdev_ops
= {
4033 .ndo_open
= at91ether_open
,
4034 .ndo_stop
= at91ether_close
,
4035 .ndo_start_xmit
= at91ether_start_xmit
,
4036 .ndo_get_stats
= macb_get_stats
,
4037 .ndo_set_rx_mode
= macb_set_rx_mode
,
4038 .ndo_set_mac_address
= eth_mac_addr
,
4039 .ndo_do_ioctl
= macb_ioctl
,
4040 .ndo_validate_addr
= eth_validate_addr
,
4041 #ifdef CONFIG_NET_POLL_CONTROLLER
4042 .ndo_poll_controller
= at91ether_poll_controller
,
4046 static int at91ether_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
4047 struct clk
**hclk
, struct clk
**tx_clk
,
4048 struct clk
**rx_clk
, struct clk
**tsu_clk
)
4057 *pclk
= devm_clk_get(&pdev
->dev
, "ether_clk");
4059 return PTR_ERR(*pclk
);
4061 err
= clk_prepare_enable(*pclk
);
4063 dev_err(&pdev
->dev
, "failed to enable pclk (%d)\n", err
);
4070 static int at91ether_init(struct platform_device
*pdev
)
4072 struct net_device
*dev
= platform_get_drvdata(pdev
);
4073 struct macb
*bp
= netdev_priv(dev
);
4076 bp
->queues
[0].bp
= bp
;
4078 dev
->netdev_ops
= &at91ether_netdev_ops
;
4079 dev
->ethtool_ops
= &macb_ethtool_ops
;
4081 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, at91ether_interrupt
,
4086 macb_writel(bp
, NCR
, 0);
4088 macb_writel(bp
, NCFGR
, MACB_BF(CLK
, MACB_CLK_DIV32
) | MACB_BIT(BIG
));
4093 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw
*hw
,
4094 unsigned long parent_rate
)
4099 static long fu540_macb_tx_round_rate(struct clk_hw
*hw
, unsigned long rate
,
4100 unsigned long *parent_rate
)
4102 if (WARN_ON(rate
< 2500000))
4104 else if (rate
== 2500000)
4106 else if (WARN_ON(rate
< 13750000))
4108 else if (WARN_ON(rate
< 25000000))
4110 else if (rate
== 25000000)
4112 else if (WARN_ON(rate
< 75000000))
4114 else if (WARN_ON(rate
< 125000000))
4116 else if (rate
== 125000000)
4119 WARN_ON(rate
> 125000000);
4124 static int fu540_macb_tx_set_rate(struct clk_hw
*hw
, unsigned long rate
,
4125 unsigned long parent_rate
)
4127 rate
= fu540_macb_tx_round_rate(hw
, rate
, &parent_rate
);
4128 if (rate
!= 125000000)
4129 iowrite32(1, mgmt
->reg
);
4131 iowrite32(0, mgmt
->reg
);
4137 static const struct clk_ops fu540_c000_ops
= {
4138 .recalc_rate
= fu540_macb_tx_recalc_rate
,
4139 .round_rate
= fu540_macb_tx_round_rate
,
4140 .set_rate
= fu540_macb_tx_set_rate
,
4143 static int fu540_c000_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
4144 struct clk
**hclk
, struct clk
**tx_clk
,
4145 struct clk
**rx_clk
, struct clk
**tsu_clk
)
4147 struct clk_init_data init
;
4150 err
= macb_clk_init(pdev
, pclk
, hclk
, tx_clk
, rx_clk
, tsu_clk
);
4154 mgmt
= devm_kzalloc(&pdev
->dev
, sizeof(*mgmt
), GFP_KERNEL
);
4158 init
.name
= "sifive-gemgxl-mgmt";
4159 init
.ops
= &fu540_c000_ops
;
4161 init
.num_parents
= 0;
4164 mgmt
->hw
.init
= &init
;
4166 *tx_clk
= devm_clk_register(&pdev
->dev
, &mgmt
->hw
);
4167 if (IS_ERR(*tx_clk
))
4168 return PTR_ERR(*tx_clk
);
4170 err
= clk_prepare_enable(*tx_clk
);
4172 dev_err(&pdev
->dev
, "failed to enable tx_clk (%u)\n", err
);
4174 dev_info(&pdev
->dev
, "Registered clk switch '%s'\n", init
.name
);
4179 static int fu540_c000_init(struct platform_device
*pdev
)
4181 mgmt
->reg
= devm_platform_ioremap_resource(pdev
, 1);
4182 if (IS_ERR(mgmt
->reg
))
4183 return PTR_ERR(mgmt
->reg
);
4185 return macb_init(pdev
);
4188 static const struct macb_config fu540_c000_config
= {
4189 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_JUMBO
|
4190 MACB_CAPS_GEM_HAS_PTP
,
4191 .dma_burst_length
= 16,
4192 .clk_init
= fu540_c000_clk_init
,
4193 .init
= fu540_c000_init
,
4194 .jumbo_max_len
= 10240,
4197 static const struct macb_config at91sam9260_config
= {
4198 .caps
= MACB_CAPS_USRIO_HAS_CLKEN
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
4199 .clk_init
= macb_clk_init
,
4203 static const struct macb_config sama5d3macb_config
= {
4204 .caps
= MACB_CAPS_SG_DISABLED
4205 | MACB_CAPS_USRIO_HAS_CLKEN
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
4206 .clk_init
= macb_clk_init
,
4210 static const struct macb_config pc302gem_config
= {
4211 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
,
4212 .dma_burst_length
= 16,
4213 .clk_init
= macb_clk_init
,
4217 static const struct macb_config sama5d2_config
= {
4218 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
4219 .dma_burst_length
= 16,
4220 .clk_init
= macb_clk_init
,
4224 static const struct macb_config sama5d3_config
= {
4225 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
4226 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
| MACB_CAPS_JUMBO
,
4227 .dma_burst_length
= 16,
4228 .clk_init
= macb_clk_init
,
4230 .jumbo_max_len
= 10240,
4233 static const struct macb_config sama5d4_config
= {
4234 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
4235 .dma_burst_length
= 4,
4236 .clk_init
= macb_clk_init
,
4240 static const struct macb_config emac_config
= {
4241 .caps
= MACB_CAPS_NEEDS_RSTONUBR
| MACB_CAPS_MACB_IS_EMAC
,
4242 .clk_init
= at91ether_clk_init
,
4243 .init
= at91ether_init
,
4246 static const struct macb_config np4_config
= {
4247 .caps
= MACB_CAPS_USRIO_DISABLED
,
4248 .clk_init
= macb_clk_init
,
4252 static const struct macb_config zynqmp_config
= {
4253 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
|
4255 MACB_CAPS_GEM_HAS_PTP
| MACB_CAPS_BD_RD_PREFETCH
,
4256 .dma_burst_length
= 16,
4257 .clk_init
= macb_clk_init
,
4259 .jumbo_max_len
= 10240,
4262 static const struct macb_config zynq_config
= {
4263 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_NO_GIGABIT_HALF
|
4264 MACB_CAPS_NEEDS_RSTONUBR
,
4265 .dma_burst_length
= 16,
4266 .clk_init
= macb_clk_init
,
4270 static const struct of_device_id macb_dt_ids
[] = {
4271 { .compatible
= "cdns,at32ap7000-macb" },
4272 { .compatible
= "cdns,at91sam9260-macb", .data
= &at91sam9260_config
},
4273 { .compatible
= "cdns,macb" },
4274 { .compatible
= "cdns,np4-macb", .data
= &np4_config
},
4275 { .compatible
= "cdns,pc302-gem", .data
= &pc302gem_config
},
4276 { .compatible
= "cdns,gem", .data
= &pc302gem_config
},
4277 { .compatible
= "cdns,sam9x60-macb", .data
= &at91sam9260_config
},
4278 { .compatible
= "atmel,sama5d2-gem", .data
= &sama5d2_config
},
4279 { .compatible
= "atmel,sama5d3-gem", .data
= &sama5d3_config
},
4280 { .compatible
= "atmel,sama5d3-macb", .data
= &sama5d3macb_config
},
4281 { .compatible
= "atmel,sama5d4-gem", .data
= &sama5d4_config
},
4282 { .compatible
= "cdns,at91rm9200-emac", .data
= &emac_config
},
4283 { .compatible
= "cdns,emac", .data
= &emac_config
},
4284 { .compatible
= "cdns,zynqmp-gem", .data
= &zynqmp_config
},
4285 { .compatible
= "cdns,zynq-gem", .data
= &zynq_config
},
4286 { .compatible
= "sifive,fu540-c000-gem", .data
= &fu540_c000_config
},
4289 MODULE_DEVICE_TABLE(of
, macb_dt_ids
);
4290 #endif /* CONFIG_OF */
4292 static const struct macb_config default_gem_config
= {
4293 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
|
4295 MACB_CAPS_GEM_HAS_PTP
,
4296 .dma_burst_length
= 16,
4297 .clk_init
= macb_clk_init
,
4299 .jumbo_max_len
= 10240,
4302 static int macb_probe(struct platform_device
*pdev
)
4304 const struct macb_config
*macb_config
= &default_gem_config
;
4305 int (*clk_init
)(struct platform_device
*, struct clk
**,
4306 struct clk
**, struct clk
**, struct clk
**,
4307 struct clk
**) = macb_config
->clk_init
;
4308 int (*init
)(struct platform_device
*) = macb_config
->init
;
4309 struct device_node
*np
= pdev
->dev
.of_node
;
4310 struct clk
*pclk
, *hclk
= NULL
, *tx_clk
= NULL
, *rx_clk
= NULL
;
4311 struct clk
*tsu_clk
= NULL
;
4312 unsigned int queue_mask
, num_queues
;
4314 phy_interface_t interface
;
4315 struct net_device
*dev
;
4316 struct resource
*regs
;
4322 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
4323 mem
= devm_ioremap_resource(&pdev
->dev
, regs
);
4325 return PTR_ERR(mem
);
4328 const struct of_device_id
*match
;
4330 match
= of_match_node(macb_dt_ids
, np
);
4331 if (match
&& match
->data
) {
4332 macb_config
= match
->data
;
4333 clk_init
= macb_config
->clk_init
;
4334 init
= macb_config
->init
;
4338 err
= clk_init(pdev
, &pclk
, &hclk
, &tx_clk
, &rx_clk
, &tsu_clk
);
4342 pm_runtime_set_autosuspend_delay(&pdev
->dev
, MACB_PM_TIMEOUT
);
4343 pm_runtime_use_autosuspend(&pdev
->dev
);
4344 pm_runtime_get_noresume(&pdev
->dev
);
4345 pm_runtime_set_active(&pdev
->dev
);
4346 pm_runtime_enable(&pdev
->dev
);
4347 native_io
= hw_is_native_io(mem
);
4349 macb_probe_queues(mem
, native_io
, &queue_mask
, &num_queues
);
4350 dev
= alloc_etherdev_mq(sizeof(*bp
), num_queues
);
4353 goto err_disable_clocks
;
4356 dev
->base_addr
= regs
->start
;
4358 SET_NETDEV_DEV(dev
, &pdev
->dev
);
4360 bp
= netdev_priv(dev
);
4364 bp
->native_io
= native_io
;
4366 bp
->macb_reg_readl
= hw_readl_native
;
4367 bp
->macb_reg_writel
= hw_writel_native
;
4369 bp
->macb_reg_readl
= hw_readl
;
4370 bp
->macb_reg_writel
= hw_writel
;
4372 bp
->num_queues
= num_queues
;
4373 bp
->queue_mask
= queue_mask
;
4375 bp
->dma_burst_length
= macb_config
->dma_burst_length
;
4378 bp
->tx_clk
= tx_clk
;
4379 bp
->rx_clk
= rx_clk
;
4380 bp
->tsu_clk
= tsu_clk
;
4382 bp
->jumbo_max_len
= macb_config
->jumbo_max_len
;
4385 if (of_get_property(np
, "magic-packet", NULL
))
4386 bp
->wol
|= MACB_WOL_HAS_MAGIC_PACKET
;
4387 device_init_wakeup(&pdev
->dev
, bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
);
4389 spin_lock_init(&bp
->lock
);
4391 /* setup capabilities */
4392 macb_configure_caps(bp
, macb_config
);
4394 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4395 if (GEM_BFEXT(DAW64
, gem_readl(bp
, DCFG6
))) {
4396 dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(44));
4397 bp
->hw_dma_cap
|= HW_DMA_CAP_64B
;
4400 platform_set_drvdata(pdev
, dev
);
4402 dev
->irq
= platform_get_irq(pdev
, 0);
4405 goto err_out_free_netdev
;
4408 /* MTU range: 68 - 1500 or 10240 */
4409 dev
->min_mtu
= GEM_MTU_MIN_SIZE
;
4410 if (bp
->caps
& MACB_CAPS_JUMBO
)
4411 dev
->max_mtu
= gem_readl(bp
, JML
) - ETH_HLEN
- ETH_FCS_LEN
;
4413 dev
->max_mtu
= ETH_DATA_LEN
;
4415 if (bp
->caps
& MACB_CAPS_BD_RD_PREFETCH
) {
4416 val
= GEM_BFEXT(RXBD_RDBUFF
, gem_readl(bp
, DCFG10
));
4418 bp
->rx_bd_rd_prefetch
= (2 << (val
- 1)) *
4419 macb_dma_desc_get_size(bp
);
4421 val
= GEM_BFEXT(TXBD_RDBUFF
, gem_readl(bp
, DCFG10
));
4423 bp
->tx_bd_rd_prefetch
= (2 << (val
- 1)) *
4424 macb_dma_desc_get_size(bp
);
4427 bp
->rx_intr_mask
= MACB_RX_INT_FLAGS
;
4428 if (bp
->caps
& MACB_CAPS_NEEDS_RSTONUBR
)
4429 bp
->rx_intr_mask
|= MACB_BIT(RXUBR
);
4431 mac
= of_get_mac_address(np
);
4432 if (PTR_ERR(mac
) == -EPROBE_DEFER
) {
4433 err
= -EPROBE_DEFER
;
4434 goto err_out_free_netdev
;
4435 } else if (!IS_ERR_OR_NULL(mac
)) {
4436 ether_addr_copy(bp
->dev
->dev_addr
, mac
);
4438 macb_get_hwaddr(bp
);
4441 err
= of_get_phy_mode(np
, &interface
);
4443 /* not found in DT, MII by default */
4444 bp
->phy_interface
= PHY_INTERFACE_MODE_MII
;
4446 bp
->phy_interface
= interface
;
4448 /* IP specific init */
4451 goto err_out_free_netdev
;
4453 err
= macb_mii_init(bp
);
4455 goto err_out_free_netdev
;
4457 netif_carrier_off(dev
);
4459 err
= register_netdev(dev
);
4461 dev_err(&pdev
->dev
, "Cannot register net device, aborting.\n");
4462 goto err_out_unregister_mdio
;
4465 tasklet_init(&bp
->hresp_err_tasklet
, macb_hresp_error_task
,
4468 netdev_info(dev
, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4469 macb_is_gem(bp
) ? "GEM" : "MACB", macb_readl(bp
, MID
),
4470 dev
->base_addr
, dev
->irq
, dev
->dev_addr
);
4472 pm_runtime_mark_last_busy(&bp
->pdev
->dev
);
4473 pm_runtime_put_autosuspend(&bp
->pdev
->dev
);
4477 err_out_unregister_mdio
:
4478 mdiobus_unregister(bp
->mii_bus
);
4479 mdiobus_free(bp
->mii_bus
);
4481 err_out_free_netdev
:
4485 clk_disable_unprepare(tx_clk
);
4486 clk_disable_unprepare(hclk
);
4487 clk_disable_unprepare(pclk
);
4488 clk_disable_unprepare(rx_clk
);
4489 clk_disable_unprepare(tsu_clk
);
4490 pm_runtime_disable(&pdev
->dev
);
4491 pm_runtime_set_suspended(&pdev
->dev
);
4492 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
4497 static int macb_remove(struct platform_device
*pdev
)
4499 struct net_device
*dev
;
4502 dev
= platform_get_drvdata(pdev
);
4505 bp
= netdev_priv(dev
);
4506 mdiobus_unregister(bp
->mii_bus
);
4507 mdiobus_free(bp
->mii_bus
);
4509 unregister_netdev(dev
);
4510 tasklet_kill(&bp
->hresp_err_tasklet
);
4511 pm_runtime_disable(&pdev
->dev
);
4512 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
4513 if (!pm_runtime_suspended(&pdev
->dev
)) {
4514 clk_disable_unprepare(bp
->tx_clk
);
4515 clk_disable_unprepare(bp
->hclk
);
4516 clk_disable_unprepare(bp
->pclk
);
4517 clk_disable_unprepare(bp
->rx_clk
);
4518 clk_disable_unprepare(bp
->tsu_clk
);
4519 pm_runtime_set_suspended(&pdev
->dev
);
4521 phylink_destroy(bp
->phylink
);
4528 static int __maybe_unused
macb_suspend(struct device
*dev
)
4530 struct net_device
*netdev
= dev_get_drvdata(dev
);
4531 struct macb
*bp
= netdev_priv(netdev
);
4532 struct macb_queue
*queue
= bp
->queues
;
4533 unsigned long flags
;
4536 if (!netif_running(netdev
))
4539 if (bp
->wol
& MACB_WOL_ENABLED
) {
4540 macb_writel(bp
, IER
, MACB_BIT(WOL
));
4541 macb_writel(bp
, WOL
, MACB_BIT(MAG
));
4542 enable_irq_wake(bp
->queues
[0].irq
);
4543 netif_device_detach(netdev
);
4545 netif_device_detach(netdev
);
4546 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
;
4548 napi_disable(&queue
->napi
);
4550 phylink_stop(bp
->phylink
);
4552 spin_lock_irqsave(&bp
->lock
, flags
);
4554 spin_unlock_irqrestore(&bp
->lock
, flags
);
4556 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
))
4557 bp
->pm_data
.usrio
= macb_or_gem_readl(bp
, USRIO
);
4559 if (netdev
->hw_features
& NETIF_F_NTUPLE
)
4560 bp
->pm_data
.scrt2
= gem_readl_n(bp
, ETHT
, SCRT2_ETHT
);
4563 netif_carrier_off(netdev
);
4565 bp
->ptp_info
->ptp_remove(netdev
);
4566 pm_runtime_force_suspend(dev
);
4571 static int __maybe_unused
macb_resume(struct device
*dev
)
4573 struct net_device
*netdev
= dev_get_drvdata(dev
);
4574 struct macb
*bp
= netdev_priv(netdev
);
4575 struct macb_queue
*queue
= bp
->queues
;
4578 if (!netif_running(netdev
))
4581 pm_runtime_force_resume(dev
);
4583 if (bp
->wol
& MACB_WOL_ENABLED
) {
4584 macb_writel(bp
, IDR
, MACB_BIT(WOL
));
4585 macb_writel(bp
, WOL
, 0);
4586 disable_irq_wake(bp
->queues
[0].irq
);
4588 macb_writel(bp
, NCR
, MACB_BIT(MPE
));
4590 if (netdev
->hw_features
& NETIF_F_NTUPLE
)
4591 gem_writel_n(bp
, ETHT
, SCRT2_ETHT
, bp
->pm_data
.scrt2
);
4593 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
))
4594 macb_or_gem_writel(bp
, USRIO
, bp
->pm_data
.usrio
);
4596 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
;
4598 napi_enable(&queue
->napi
);
4600 phylink_start(bp
->phylink
);
4605 macb_set_rx_mode(netdev
);
4606 macb_restore_features(bp
);
4607 netif_device_attach(netdev
);
4609 bp
->ptp_info
->ptp_init(netdev
);
4614 static int __maybe_unused
macb_runtime_suspend(struct device
*dev
)
4616 struct net_device
*netdev
= dev_get_drvdata(dev
);
4617 struct macb
*bp
= netdev_priv(netdev
);
4619 if (!(device_may_wakeup(&bp
->dev
->dev
))) {
4620 clk_disable_unprepare(bp
->tx_clk
);
4621 clk_disable_unprepare(bp
->hclk
);
4622 clk_disable_unprepare(bp
->pclk
);
4623 clk_disable_unprepare(bp
->rx_clk
);
4625 clk_disable_unprepare(bp
->tsu_clk
);
4630 static int __maybe_unused
macb_runtime_resume(struct device
*dev
)
4632 struct net_device
*netdev
= dev_get_drvdata(dev
);
4633 struct macb
*bp
= netdev_priv(netdev
);
4635 if (!(device_may_wakeup(&bp
->dev
->dev
))) {
4636 clk_prepare_enable(bp
->pclk
);
4637 clk_prepare_enable(bp
->hclk
);
4638 clk_prepare_enable(bp
->tx_clk
);
4639 clk_prepare_enable(bp
->rx_clk
);
4641 clk_prepare_enable(bp
->tsu_clk
);
4646 static const struct dev_pm_ops macb_pm_ops
= {
4647 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend
, macb_resume
)
4648 SET_RUNTIME_PM_OPS(macb_runtime_suspend
, macb_runtime_resume
, NULL
)
4651 static struct platform_driver macb_driver
= {
4652 .probe
= macb_probe
,
4653 .remove
= macb_remove
,
4656 .of_match_table
= of_match_ptr(macb_dt_ids
),
4661 module_platform_driver(macb_driver
);
4663 MODULE_LICENSE("GPL");
4664 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
4665 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4666 MODULE_ALIAS("platform:macb");