2 * Cadence MACB/GEM Ethernet Controller driver
4 * Copyright (C) 2004-2006 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/clk.h>
13 #include <linux/crc32.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/circ_buf.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
22 #include <linux/gpio.h>
23 #include <linux/gpio/consumer.h>
24 #include <linux/interrupt.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_data/macb.h>
29 #include <linux/platform_device.h>
30 #include <linux/phy.h>
32 #include <linux/of_device.h>
33 #include <linux/of_gpio.h>
34 #include <linux/of_mdio.h>
35 #include <linux/of_net.h>
37 #include <linux/udp.h>
38 #include <linux/tcp.h>
39 #include <linux/iopoll.h>
40 #include <linux/pm_runtime.h>
43 #define MACB_RX_BUFFER_SIZE 128
44 #define RX_BUFFER_MULTIPLE 64 /* bytes */
46 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
47 #define MIN_RX_RING_SIZE 64
48 #define MAX_RX_RING_SIZE 8192
49 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
52 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
53 #define MIN_TX_RING_SIZE 64
54 #define MAX_TX_RING_SIZE 4096
55 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
58 /* level of occupied TX descriptors under which we wake up TX process */
59 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
61 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
62 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
65 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
68 /* Max length of transmit frame must be a multiple of 8 bytes */
69 #define MACB_TX_LEN_ALIGN 8
70 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
71 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
73 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
74 #define MACB_NETIF_LSO NETIF_F_TSO
76 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
77 #define MACB_WOL_ENABLED (0x1 << 1)
79 /* Graceful stop timeouts in us. We should allow up to
80 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
82 #define MACB_HALT_TIMEOUT 1230
84 #define MACB_PM_TIMEOUT 100 /* ms */
86 #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
88 /* DMA buffer descriptor might be different size
89 * depends on hardware configuration:
91 * 1. dma address width 32 bits:
92 * word 1: 32 bit address of Data Buffer
95 * 2. dma address width 64 bits:
96 * word 1: 32 bit address of Data Buffer
98 * word 3: upper 32 bit address of Data Buffer
101 * 3. dma address width 32 bits with hardware timestamping:
102 * word 1: 32 bit address of Data Buffer
104 * word 3: timestamp word 1
105 * word 4: timestamp word 2
107 * 4. dma address width 64 bits with hardware timestamping:
108 * word 1: 32 bit address of Data Buffer
110 * word 3: upper 32 bit address of Data Buffer
112 * word 5: timestamp word 1
113 * word 6: timestamp word 2
115 static unsigned int macb_dma_desc_get_size(struct macb
*bp
)
118 unsigned int desc_size
;
120 switch (bp
->hw_dma_cap
) {
122 desc_size
= sizeof(struct macb_dma_desc
)
123 + sizeof(struct macb_dma_desc_64
);
126 desc_size
= sizeof(struct macb_dma_desc
)
127 + sizeof(struct macb_dma_desc_ptp
);
129 case HW_DMA_CAP_64B_PTP
:
130 desc_size
= sizeof(struct macb_dma_desc
)
131 + sizeof(struct macb_dma_desc_64
)
132 + sizeof(struct macb_dma_desc_ptp
);
135 desc_size
= sizeof(struct macb_dma_desc
);
139 return sizeof(struct macb_dma_desc
);
142 static unsigned int macb_adj_dma_desc_idx(struct macb
*bp
, unsigned int desc_idx
)
145 switch (bp
->hw_dma_cap
) {
150 case HW_DMA_CAP_64B_PTP
:
160 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
161 static struct macb_dma_desc_64
*macb_64b_desc(struct macb
*bp
, struct macb_dma_desc
*desc
)
163 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
164 return (struct macb_dma_desc_64
*)((void *)desc
+ sizeof(struct macb_dma_desc
));
169 /* Ring buffer accessors */
170 static unsigned int macb_tx_ring_wrap(struct macb
*bp
, unsigned int index
)
172 return index
& (bp
->tx_ring_size
- 1);
175 static struct macb_dma_desc
*macb_tx_desc(struct macb_queue
*queue
,
178 index
= macb_tx_ring_wrap(queue
->bp
, index
);
179 index
= macb_adj_dma_desc_idx(queue
->bp
, index
);
180 return &queue
->tx_ring
[index
];
183 static struct macb_tx_skb
*macb_tx_skb(struct macb_queue
*queue
,
186 return &queue
->tx_skb
[macb_tx_ring_wrap(queue
->bp
, index
)];
189 static dma_addr_t
macb_tx_dma(struct macb_queue
*queue
, unsigned int index
)
193 offset
= macb_tx_ring_wrap(queue
->bp
, index
) *
194 macb_dma_desc_get_size(queue
->bp
);
196 return queue
->tx_ring_dma
+ offset
;
199 static unsigned int macb_rx_ring_wrap(struct macb
*bp
, unsigned int index
)
201 return index
& (bp
->rx_ring_size
- 1);
204 static struct macb_dma_desc
*macb_rx_desc(struct macb_queue
*queue
, unsigned int index
)
206 index
= macb_rx_ring_wrap(queue
->bp
, index
);
207 index
= macb_adj_dma_desc_idx(queue
->bp
, index
);
208 return &queue
->rx_ring
[index
];
211 static void *macb_rx_buffer(struct macb_queue
*queue
, unsigned int index
)
213 return queue
->rx_buffers
+ queue
->bp
->rx_buffer_size
*
214 macb_rx_ring_wrap(queue
->bp
, index
);
218 static u32
hw_readl_native(struct macb
*bp
, int offset
)
220 return __raw_readl(bp
->regs
+ offset
);
223 static void hw_writel_native(struct macb
*bp
, int offset
, u32 value
)
225 __raw_writel(value
, bp
->regs
+ offset
);
228 static u32
hw_readl(struct macb
*bp
, int offset
)
230 return readl_relaxed(bp
->regs
+ offset
);
233 static void hw_writel(struct macb
*bp
, int offset
, u32 value
)
235 writel_relaxed(value
, bp
->regs
+ offset
);
238 /* Find the CPU endianness by using the loopback bit of NCR register. When the
239 * CPU is in big endian we need to program swapped mode for management
242 static bool hw_is_native_io(void __iomem
*addr
)
244 u32 value
= MACB_BIT(LLB
);
246 __raw_writel(value
, addr
+ MACB_NCR
);
247 value
= __raw_readl(addr
+ MACB_NCR
);
249 /* Write 0 back to disable everything */
250 __raw_writel(0, addr
+ MACB_NCR
);
252 return value
== MACB_BIT(LLB
);
255 static bool hw_is_gem(void __iomem
*addr
, bool native_io
)
260 id
= __raw_readl(addr
+ MACB_MID
);
262 id
= readl_relaxed(addr
+ MACB_MID
);
264 return MACB_BFEXT(IDNUM
, id
) >= 0x2;
267 static void macb_set_hwaddr(struct macb
*bp
)
272 bottom
= cpu_to_le32(*((u32
*)bp
->dev
->dev_addr
));
273 macb_or_gem_writel(bp
, SA1B
, bottom
);
274 top
= cpu_to_le16(*((u16
*)(bp
->dev
->dev_addr
+ 4)));
275 macb_or_gem_writel(bp
, SA1T
, top
);
277 /* Clear unused address register sets */
278 macb_or_gem_writel(bp
, SA2B
, 0);
279 macb_or_gem_writel(bp
, SA2T
, 0);
280 macb_or_gem_writel(bp
, SA3B
, 0);
281 macb_or_gem_writel(bp
, SA3T
, 0);
282 macb_or_gem_writel(bp
, SA4B
, 0);
283 macb_or_gem_writel(bp
, SA4T
, 0);
286 static void macb_get_hwaddr(struct macb
*bp
)
288 struct macb_platform_data
*pdata
;
294 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
296 /* Check all 4 address register for valid address */
297 for (i
= 0; i
< 4; i
++) {
298 bottom
= macb_or_gem_readl(bp
, SA1B
+ i
* 8);
299 top
= macb_or_gem_readl(bp
, SA1T
+ i
* 8);
301 if (pdata
&& pdata
->rev_eth_addr
) {
302 addr
[5] = bottom
& 0xff;
303 addr
[4] = (bottom
>> 8) & 0xff;
304 addr
[3] = (bottom
>> 16) & 0xff;
305 addr
[2] = (bottom
>> 24) & 0xff;
306 addr
[1] = top
& 0xff;
307 addr
[0] = (top
& 0xff00) >> 8;
309 addr
[0] = bottom
& 0xff;
310 addr
[1] = (bottom
>> 8) & 0xff;
311 addr
[2] = (bottom
>> 16) & 0xff;
312 addr
[3] = (bottom
>> 24) & 0xff;
313 addr
[4] = top
& 0xff;
314 addr
[5] = (top
>> 8) & 0xff;
317 if (is_valid_ether_addr(addr
)) {
318 memcpy(bp
->dev
->dev_addr
, addr
, sizeof(addr
));
323 dev_info(&bp
->pdev
->dev
, "invalid hw address, using random\n");
324 eth_hw_addr_random(bp
->dev
);
327 static int macb_mdio_wait_for_idle(struct macb
*bp
)
331 return readx_poll_timeout(MACB_READ_NSR
, bp
, val
, val
& MACB_BIT(IDLE
),
332 1, MACB_MDIO_TIMEOUT
);
335 static int macb_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
337 struct macb
*bp
= bus
->priv
;
340 status
= pm_runtime_get_sync(&bp
->pdev
->dev
);
344 status
= macb_mdio_wait_for_idle(bp
);
348 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
349 | MACB_BF(RW
, MACB_MAN_READ
)
350 | MACB_BF(PHYA
, mii_id
)
351 | MACB_BF(REGA
, regnum
)
352 | MACB_BF(CODE
, MACB_MAN_CODE
)));
354 status
= macb_mdio_wait_for_idle(bp
);
358 status
= MACB_BFEXT(DATA
, macb_readl(bp
, MAN
));
361 pm_runtime_mark_last_busy(&bp
->pdev
->dev
);
362 pm_runtime_put_autosuspend(&bp
->pdev
->dev
);
367 static int macb_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
370 struct macb
*bp
= bus
->priv
;
373 status
= pm_runtime_get_sync(&bp
->pdev
->dev
);
377 status
= macb_mdio_wait_for_idle(bp
);
379 goto mdio_write_exit
;
381 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
382 | MACB_BF(RW
, MACB_MAN_WRITE
)
383 | MACB_BF(PHYA
, mii_id
)
384 | MACB_BF(REGA
, regnum
)
385 | MACB_BF(CODE
, MACB_MAN_CODE
)
386 | MACB_BF(DATA
, value
)));
388 status
= macb_mdio_wait_for_idle(bp
);
390 goto mdio_write_exit
;
393 pm_runtime_mark_last_busy(&bp
->pdev
->dev
);
394 pm_runtime_put_autosuspend(&bp
->pdev
->dev
);
400 * macb_set_tx_clk() - Set a clock to a new frequency
401 * @clk Pointer to the clock to change
402 * @rate New frequency in Hz
403 * @dev Pointer to the struct net_device
405 static void macb_set_tx_clk(struct clk
*clk
, int speed
, struct net_device
*dev
)
407 long ferr
, rate
, rate_rounded
;
426 rate_rounded
= clk_round_rate(clk
, rate
);
427 if (rate_rounded
< 0)
430 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
433 ferr
= abs(rate_rounded
- rate
);
434 ferr
= DIV_ROUND_UP(ferr
, rate
/ 100000);
436 netdev_warn(dev
, "unable to generate target frequency: %ld Hz\n",
439 if (clk_set_rate(clk
, rate_rounded
))
440 netdev_err(dev
, "adjusting tx_clk failed.\n");
443 static void macb_handle_link_change(struct net_device
*dev
)
445 struct macb
*bp
= netdev_priv(dev
);
446 struct phy_device
*phydev
= dev
->phydev
;
448 int status_change
= 0;
450 spin_lock_irqsave(&bp
->lock
, flags
);
453 if ((bp
->speed
!= phydev
->speed
) ||
454 (bp
->duplex
!= phydev
->duplex
)) {
457 reg
= macb_readl(bp
, NCFGR
);
458 reg
&= ~(MACB_BIT(SPD
) | MACB_BIT(FD
));
460 reg
&= ~GEM_BIT(GBE
);
464 if (phydev
->speed
== SPEED_100
)
465 reg
|= MACB_BIT(SPD
);
466 if (phydev
->speed
== SPEED_1000
&&
467 bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
470 macb_or_gem_writel(bp
, NCFGR
, reg
);
472 bp
->speed
= phydev
->speed
;
473 bp
->duplex
= phydev
->duplex
;
478 if (phydev
->link
!= bp
->link
) {
483 bp
->link
= phydev
->link
;
488 spin_unlock_irqrestore(&bp
->lock
, flags
);
492 /* Update the TX clock rate if and only if the link is
493 * up and there has been a link change.
495 macb_set_tx_clk(bp
->tx_clk
, phydev
->speed
, dev
);
497 netif_carrier_on(dev
);
498 netdev_info(dev
, "link up (%d/%s)\n",
500 phydev
->duplex
== DUPLEX_FULL
?
503 netif_carrier_off(dev
);
504 netdev_info(dev
, "link down\n");
509 /* based on au1000_eth. c*/
510 static int macb_mii_probe(struct net_device
*dev
)
512 struct macb
*bp
= netdev_priv(dev
);
513 struct macb_platform_data
*pdata
;
514 struct phy_device
*phydev
;
515 struct device_node
*np
;
518 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
519 np
= bp
->pdev
->dev
.of_node
;
523 if (of_phy_is_fixed_link(np
)) {
524 bp
->phy_node
= of_node_get(np
);
526 bp
->phy_node
= of_parse_phandle(np
, "phy-handle", 0);
527 /* fallback to standard phy registration if no
528 * phy-handle was found nor any phy found during
529 * dt phy registration
531 if (!bp
->phy_node
&& !phy_find_first(bp
->mii_bus
)) {
532 for (i
= 0; i
< PHY_MAX_ADDR
; i
++) {
533 struct phy_device
*phydev
;
535 phydev
= mdiobus_scan(bp
->mii_bus
, i
);
536 if (IS_ERR(phydev
) &&
537 PTR_ERR(phydev
) != -ENODEV
) {
538 ret
= PTR_ERR(phydev
);
550 phydev
= of_phy_connect(dev
, bp
->phy_node
,
551 &macb_handle_link_change
, 0,
556 phydev
= phy_find_first(bp
->mii_bus
);
558 netdev_err(dev
, "no PHY found\n");
563 if (gpio_is_valid(pdata
->phy_irq_pin
)) {
564 ret
= devm_gpio_request(&bp
->pdev
->dev
,
565 pdata
->phy_irq_pin
, "phy int");
567 phy_irq
= gpio_to_irq(pdata
->phy_irq_pin
);
568 phydev
->irq
= (phy_irq
< 0) ? PHY_POLL
: phy_irq
;
571 phydev
->irq
= PHY_POLL
;
575 /* attach the mac to the phy */
576 ret
= phy_connect_direct(dev
, phydev
, &macb_handle_link_change
,
579 netdev_err(dev
, "Could not attach to PHY\n");
584 /* mask with MAC supported features */
585 if (macb_is_gem(bp
) && bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
586 phy_set_max_speed(phydev
, SPEED_1000
);
588 phy_set_max_speed(phydev
, SPEED_100
);
590 if (bp
->caps
& MACB_CAPS_NO_GIGABIT_HALF
)
591 phy_remove_link_mode(phydev
,
592 ETHTOOL_LINK_MODE_1000baseT_Half_BIT
);
601 static int macb_mii_init(struct macb
*bp
)
603 struct macb_platform_data
*pdata
;
604 struct device_node
*np
;
607 /* Enable management port */
608 macb_writel(bp
, NCR
, MACB_BIT(MPE
));
610 bp
->mii_bus
= mdiobus_alloc();
616 bp
->mii_bus
->name
= "MACB_mii_bus";
617 bp
->mii_bus
->read
= &macb_mdio_read
;
618 bp
->mii_bus
->write
= &macb_mdio_write
;
619 snprintf(bp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
620 bp
->pdev
->name
, bp
->pdev
->id
);
621 bp
->mii_bus
->priv
= bp
;
622 bp
->mii_bus
->parent
= &bp
->pdev
->dev
;
623 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
625 dev_set_drvdata(&bp
->dev
->dev
, bp
->mii_bus
);
627 np
= bp
->pdev
->dev
.of_node
;
628 if (np
&& of_phy_is_fixed_link(np
)) {
629 if (of_phy_register_fixed_link(np
) < 0) {
630 dev_err(&bp
->pdev
->dev
,
631 "broken fixed-link specification %pOF\n", np
);
632 goto err_out_free_mdiobus
;
635 err
= mdiobus_register(bp
->mii_bus
);
638 bp
->mii_bus
->phy_mask
= pdata
->phy_mask
;
640 err
= of_mdiobus_register(bp
->mii_bus
, np
);
644 goto err_out_free_fixed_link
;
646 err
= macb_mii_probe(bp
->dev
);
648 goto err_out_unregister_bus
;
652 err_out_unregister_bus
:
653 mdiobus_unregister(bp
->mii_bus
);
654 err_out_free_fixed_link
:
655 if (np
&& of_phy_is_fixed_link(np
))
656 of_phy_deregister_fixed_link(np
);
657 err_out_free_mdiobus
:
658 of_node_put(bp
->phy_node
);
659 mdiobus_free(bp
->mii_bus
);
664 static void macb_update_stats(struct macb
*bp
)
666 u32
*p
= &bp
->hw_stats
.macb
.rx_pause_frames
;
667 u32
*end
= &bp
->hw_stats
.macb
.tx_pause_frames
+ 1;
668 int offset
= MACB_PFR
;
670 WARN_ON((unsigned long)(end
- p
- 1) != (MACB_TPF
- MACB_PFR
) / 4);
672 for (; p
< end
; p
++, offset
+= 4)
673 *p
+= bp
->macb_reg_readl(bp
, offset
);
676 static int macb_halt_tx(struct macb
*bp
)
678 unsigned long halt_time
, timeout
;
681 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(THALT
));
683 timeout
= jiffies
+ usecs_to_jiffies(MACB_HALT_TIMEOUT
);
686 status
= macb_readl(bp
, TSR
);
687 if (!(status
& MACB_BIT(TGO
)))
691 } while (time_before(halt_time
, timeout
));
696 static void macb_tx_unmap(struct macb
*bp
, struct macb_tx_skb
*tx_skb
)
698 if (tx_skb
->mapping
) {
699 if (tx_skb
->mapped_as_page
)
700 dma_unmap_page(&bp
->pdev
->dev
, tx_skb
->mapping
,
701 tx_skb
->size
, DMA_TO_DEVICE
);
703 dma_unmap_single(&bp
->pdev
->dev
, tx_skb
->mapping
,
704 tx_skb
->size
, DMA_TO_DEVICE
);
709 dev_kfree_skb_any(tx_skb
->skb
);
714 static void macb_set_addr(struct macb
*bp
, struct macb_dma_desc
*desc
, dma_addr_t addr
)
716 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
717 struct macb_dma_desc_64
*desc_64
;
719 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
720 desc_64
= macb_64b_desc(bp
, desc
);
721 desc_64
->addrh
= upper_32_bits(addr
);
722 /* The low bits of RX address contain the RX_USED bit, clearing
723 * of which allows packet RX. Make sure the high bits are also
724 * visible to HW at that point.
729 desc
->addr
= lower_32_bits(addr
);
732 static dma_addr_t
macb_get_addr(struct macb
*bp
, struct macb_dma_desc
*desc
)
735 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
736 struct macb_dma_desc_64
*desc_64
;
738 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
739 desc_64
= macb_64b_desc(bp
, desc
);
740 addr
= ((u64
)(desc_64
->addrh
) << 32);
743 addr
|= MACB_BF(RX_WADDR
, MACB_BFEXT(RX_WADDR
, desc
->addr
));
747 static void macb_tx_error_task(struct work_struct
*work
)
749 struct macb_queue
*queue
= container_of(work
, struct macb_queue
,
751 struct macb
*bp
= queue
->bp
;
752 struct macb_tx_skb
*tx_skb
;
753 struct macb_dma_desc
*desc
;
758 netdev_vdbg(bp
->dev
, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
759 (unsigned int)(queue
- bp
->queues
),
760 queue
->tx_tail
, queue
->tx_head
);
762 /* Prevent the queue IRQ handlers from running: each of them may call
763 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
764 * As explained below, we have to halt the transmission before updating
765 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
766 * network engine about the macb/gem being halted.
768 spin_lock_irqsave(&bp
->lock
, flags
);
770 /* Make sure nobody is trying to queue up new packets */
771 netif_tx_stop_all_queues(bp
->dev
);
773 /* Stop transmission now
774 * (in case we have just queued new packets)
775 * macb/gem must be halted to write TBQP register
777 if (macb_halt_tx(bp
))
778 /* Just complain for now, reinitializing TX path can be good */
779 netdev_err(bp
->dev
, "BUG: halt tx timed out\n");
781 /* Treat frames in TX queue including the ones that caused the error.
782 * Free transmit buffers in upper layer.
784 for (tail
= queue
->tx_tail
; tail
!= queue
->tx_head
; tail
++) {
787 desc
= macb_tx_desc(queue
, tail
);
789 tx_skb
= macb_tx_skb(queue
, tail
);
792 if (ctrl
& MACB_BIT(TX_USED
)) {
793 /* skb is set for the last buffer of the frame */
795 macb_tx_unmap(bp
, tx_skb
);
797 tx_skb
= macb_tx_skb(queue
, tail
);
801 /* ctrl still refers to the first buffer descriptor
802 * since it's the only one written back by the hardware
804 if (!(ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))) {
805 netdev_vdbg(bp
->dev
, "txerr skb %u (data %p) TX complete\n",
806 macb_tx_ring_wrap(bp
, tail
),
808 bp
->dev
->stats
.tx_packets
++;
809 queue
->stats
.tx_packets
++;
810 bp
->dev
->stats
.tx_bytes
+= skb
->len
;
811 queue
->stats
.tx_bytes
+= skb
->len
;
814 /* "Buffers exhausted mid-frame" errors may only happen
815 * if the driver is buggy, so complain loudly about
816 * those. Statistics are updated by hardware.
818 if (ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))
820 "BUG: TX buffers exhausted mid-frame\n");
822 desc
->ctrl
= ctrl
| MACB_BIT(TX_USED
);
825 macb_tx_unmap(bp
, tx_skb
);
828 /* Set end of TX queue */
829 desc
= macb_tx_desc(queue
, 0);
830 macb_set_addr(bp
, desc
, 0);
831 desc
->ctrl
= MACB_BIT(TX_USED
);
833 /* Make descriptor updates visible to hardware */
836 /* Reinitialize the TX desc queue */
837 queue_writel(queue
, TBQP
, lower_32_bits(queue
->tx_ring_dma
));
838 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
839 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
840 queue_writel(queue
, TBQPH
, upper_32_bits(queue
->tx_ring_dma
));
842 /* Make TX ring reflect state of hardware */
846 /* Housework before enabling TX IRQ */
847 macb_writel(bp
, TSR
, macb_readl(bp
, TSR
));
848 queue_writel(queue
, IER
, MACB_TX_INT_FLAGS
);
850 /* Now we are ready to start transmission again */
851 netif_tx_start_all_queues(bp
->dev
);
852 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
854 spin_unlock_irqrestore(&bp
->lock
, flags
);
857 static void macb_tx_interrupt(struct macb_queue
*queue
)
862 struct macb
*bp
= queue
->bp
;
863 u16 queue_index
= queue
- bp
->queues
;
865 status
= macb_readl(bp
, TSR
);
866 macb_writel(bp
, TSR
, status
);
868 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
869 queue_writel(queue
, ISR
, MACB_BIT(TCOMP
));
871 netdev_vdbg(bp
->dev
, "macb_tx_interrupt status = 0x%03lx\n",
872 (unsigned long)status
);
874 head
= queue
->tx_head
;
875 for (tail
= queue
->tx_tail
; tail
!= head
; tail
++) {
876 struct macb_tx_skb
*tx_skb
;
878 struct macb_dma_desc
*desc
;
881 desc
= macb_tx_desc(queue
, tail
);
883 /* Make hw descriptor updates visible to CPU */
888 /* TX_USED bit is only set by hardware on the very first buffer
889 * descriptor of the transmitted frame.
891 if (!(ctrl
& MACB_BIT(TX_USED
)))
894 /* Process all buffers of the current transmitted frame */
896 tx_skb
= macb_tx_skb(queue
, tail
);
899 /* First, update TX stats if needed */
901 if (unlikely(skb_shinfo(skb
)->tx_flags
&
903 gem_ptp_do_txstamp(queue
, skb
, desc
) == 0) {
904 /* skb now belongs to timestamp buffer
905 * and will be removed later
909 netdev_vdbg(bp
->dev
, "skb %u (data %p) TX complete\n",
910 macb_tx_ring_wrap(bp
, tail
),
912 bp
->dev
->stats
.tx_packets
++;
913 queue
->stats
.tx_packets
++;
914 bp
->dev
->stats
.tx_bytes
+= skb
->len
;
915 queue
->stats
.tx_bytes
+= skb
->len
;
918 /* Now we can safely release resources */
919 macb_tx_unmap(bp
, tx_skb
);
921 /* skb is set only for the last buffer of the frame.
922 * WARNING: at this point skb has been freed by
930 queue
->tx_tail
= tail
;
931 if (__netif_subqueue_stopped(bp
->dev
, queue_index
) &&
932 CIRC_CNT(queue
->tx_head
, queue
->tx_tail
,
933 bp
->tx_ring_size
) <= MACB_TX_WAKEUP_THRESH(bp
))
934 netif_wake_subqueue(bp
->dev
, queue_index
);
937 static void gem_rx_refill(struct macb_queue
*queue
)
942 struct macb
*bp
= queue
->bp
;
943 struct macb_dma_desc
*desc
;
945 while (CIRC_SPACE(queue
->rx_prepared_head
, queue
->rx_tail
,
946 bp
->rx_ring_size
) > 0) {
947 entry
= macb_rx_ring_wrap(bp
, queue
->rx_prepared_head
);
949 /* Make hw descriptor updates visible to CPU */
952 queue
->rx_prepared_head
++;
953 desc
= macb_rx_desc(queue
, entry
);
955 if (!queue
->rx_skbuff
[entry
]) {
956 /* allocate sk_buff for this free entry in ring */
957 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buffer_size
);
958 if (unlikely(!skb
)) {
960 "Unable to allocate sk_buff\n");
964 /* now fill corresponding descriptor entry */
965 paddr
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
968 if (dma_mapping_error(&bp
->pdev
->dev
, paddr
)) {
973 queue
->rx_skbuff
[entry
] = skb
;
975 if (entry
== bp
->rx_ring_size
- 1)
976 paddr
|= MACB_BIT(RX_WRAP
);
978 /* Setting addr clears RX_USED and allows reception,
979 * make sure ctrl is cleared first to avoid a race.
982 macb_set_addr(bp
, desc
, paddr
);
984 /* properly align Ethernet header */
985 skb_reserve(skb
, NET_IP_ALIGN
);
989 desc
->addr
&= ~MACB_BIT(RX_USED
);
993 /* Make descriptor updates visible to hardware */
996 netdev_vdbg(bp
->dev
, "rx ring: queue: %p, prepared head %d, tail %d\n",
997 queue
, queue
->rx_prepared_head
, queue
->rx_tail
);
1000 /* Mark DMA descriptors from begin up to and not including end as unused */
1001 static void discard_partial_frame(struct macb_queue
*queue
, unsigned int begin
,
1006 for (frag
= begin
; frag
!= end
; frag
++) {
1007 struct macb_dma_desc
*desc
= macb_rx_desc(queue
, frag
);
1009 desc
->addr
&= ~MACB_BIT(RX_USED
);
1012 /* Make descriptor updates visible to hardware */
1015 /* When this happens, the hardware stats registers for
1016 * whatever caused this is updated, so we don't have to record
1021 static int gem_rx(struct macb_queue
*queue
, int budget
)
1023 struct macb
*bp
= queue
->bp
;
1026 struct sk_buff
*skb
;
1027 struct macb_dma_desc
*desc
;
1030 while (count
< budget
) {
1035 entry
= macb_rx_ring_wrap(bp
, queue
->rx_tail
);
1036 desc
= macb_rx_desc(queue
, entry
);
1038 /* Make hw descriptor updates visible to CPU */
1041 rxused
= (desc
->addr
& MACB_BIT(RX_USED
)) ? true : false;
1042 addr
= macb_get_addr(bp
, desc
);
1047 /* Ensure ctrl is at least as up-to-date as rxused */
1055 if (!(ctrl
& MACB_BIT(RX_SOF
) && ctrl
& MACB_BIT(RX_EOF
))) {
1057 "not whole frame pointed by descriptor\n");
1058 bp
->dev
->stats
.rx_dropped
++;
1059 queue
->stats
.rx_dropped
++;
1062 skb
= queue
->rx_skbuff
[entry
];
1063 if (unlikely(!skb
)) {
1065 "inconsistent Rx descriptor chain\n");
1066 bp
->dev
->stats
.rx_dropped
++;
1067 queue
->stats
.rx_dropped
++;
1070 /* now everything is ready for receiving packet */
1071 queue
->rx_skbuff
[entry
] = NULL
;
1072 len
= ctrl
& bp
->rx_frm_len_mask
;
1074 netdev_vdbg(bp
->dev
, "gem_rx %u (len %u)\n", entry
, len
);
1077 dma_unmap_single(&bp
->pdev
->dev
, addr
,
1078 bp
->rx_buffer_size
, DMA_FROM_DEVICE
);
1080 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1081 skb_checksum_none_assert(skb
);
1082 if (bp
->dev
->features
& NETIF_F_RXCSUM
&&
1083 !(bp
->dev
->flags
& IFF_PROMISC
) &&
1084 GEM_BFEXT(RX_CSUM
, ctrl
) & GEM_RX_CSUM_CHECKED_MASK
)
1085 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1087 bp
->dev
->stats
.rx_packets
++;
1088 queue
->stats
.rx_packets
++;
1089 bp
->dev
->stats
.rx_bytes
+= skb
->len
;
1090 queue
->stats
.rx_bytes
+= skb
->len
;
1092 gem_ptp_do_rxstamp(bp
, skb
, desc
);
1094 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1095 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
1096 skb
->len
, skb
->csum
);
1097 print_hex_dump(KERN_DEBUG
, " mac: ", DUMP_PREFIX_ADDRESS
, 16, 1,
1098 skb_mac_header(skb
), 16, true);
1099 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_ADDRESS
, 16, 1,
1100 skb
->data
, 32, true);
1103 netif_receive_skb(skb
);
1106 gem_rx_refill(queue
);
1111 static int macb_rx_frame(struct macb_queue
*queue
, unsigned int first_frag
,
1112 unsigned int last_frag
)
1116 unsigned int offset
;
1117 struct sk_buff
*skb
;
1118 struct macb_dma_desc
*desc
;
1119 struct macb
*bp
= queue
->bp
;
1121 desc
= macb_rx_desc(queue
, last_frag
);
1122 len
= desc
->ctrl
& bp
->rx_frm_len_mask
;
1124 netdev_vdbg(bp
->dev
, "macb_rx_frame frags %u - %u (len %u)\n",
1125 macb_rx_ring_wrap(bp
, first_frag
),
1126 macb_rx_ring_wrap(bp
, last_frag
), len
);
1128 /* The ethernet header starts NET_IP_ALIGN bytes into the
1129 * first buffer. Since the header is 14 bytes, this makes the
1130 * payload word-aligned.
1132 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1133 * the two padding bytes into the skb so that we avoid hitting
1134 * the slowpath in memcpy(), and pull them off afterwards.
1136 skb
= netdev_alloc_skb(bp
->dev
, len
+ NET_IP_ALIGN
);
1138 bp
->dev
->stats
.rx_dropped
++;
1139 for (frag
= first_frag
; ; frag
++) {
1140 desc
= macb_rx_desc(queue
, frag
);
1141 desc
->addr
&= ~MACB_BIT(RX_USED
);
1142 if (frag
== last_frag
)
1146 /* Make descriptor updates visible to hardware */
1153 len
+= NET_IP_ALIGN
;
1154 skb_checksum_none_assert(skb
);
1157 for (frag
= first_frag
; ; frag
++) {
1158 unsigned int frag_len
= bp
->rx_buffer_size
;
1160 if (offset
+ frag_len
> len
) {
1161 if (unlikely(frag
!= last_frag
)) {
1162 dev_kfree_skb_any(skb
);
1165 frag_len
= len
- offset
;
1167 skb_copy_to_linear_data_offset(skb
, offset
,
1168 macb_rx_buffer(queue
, frag
),
1170 offset
+= bp
->rx_buffer_size
;
1171 desc
= macb_rx_desc(queue
, frag
);
1172 desc
->addr
&= ~MACB_BIT(RX_USED
);
1174 if (frag
== last_frag
)
1178 /* Make descriptor updates visible to hardware */
1181 __skb_pull(skb
, NET_IP_ALIGN
);
1182 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1184 bp
->dev
->stats
.rx_packets
++;
1185 bp
->dev
->stats
.rx_bytes
+= skb
->len
;
1186 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
1187 skb
->len
, skb
->csum
);
1188 netif_receive_skb(skb
);
1193 static inline void macb_init_rx_ring(struct macb_queue
*queue
)
1195 struct macb
*bp
= queue
->bp
;
1197 struct macb_dma_desc
*desc
= NULL
;
1200 addr
= queue
->rx_buffers_dma
;
1201 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
1202 desc
= macb_rx_desc(queue
, i
);
1203 macb_set_addr(bp
, desc
, addr
);
1205 addr
+= bp
->rx_buffer_size
;
1207 desc
->addr
|= MACB_BIT(RX_WRAP
);
1211 static int macb_rx(struct macb_queue
*queue
, int budget
)
1213 struct macb
*bp
= queue
->bp
;
1214 bool reset_rx_queue
= false;
1217 int first_frag
= -1;
1219 for (tail
= queue
->rx_tail
; budget
> 0; tail
++) {
1220 struct macb_dma_desc
*desc
= macb_rx_desc(queue
, tail
);
1223 /* Make hw descriptor updates visible to CPU */
1226 if (!(desc
->addr
& MACB_BIT(RX_USED
)))
1229 /* Ensure ctrl is at least as up-to-date as addr */
1234 if (ctrl
& MACB_BIT(RX_SOF
)) {
1235 if (first_frag
!= -1)
1236 discard_partial_frame(queue
, first_frag
, tail
);
1240 if (ctrl
& MACB_BIT(RX_EOF
)) {
1243 if (unlikely(first_frag
== -1)) {
1244 reset_rx_queue
= true;
1248 dropped
= macb_rx_frame(queue
, first_frag
, tail
);
1250 if (unlikely(dropped
< 0)) {
1251 reset_rx_queue
= true;
1261 if (unlikely(reset_rx_queue
)) {
1262 unsigned long flags
;
1265 netdev_err(bp
->dev
, "RX queue corruption: reset it\n");
1267 spin_lock_irqsave(&bp
->lock
, flags
);
1269 ctrl
= macb_readl(bp
, NCR
);
1270 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1272 macb_init_rx_ring(queue
);
1273 queue_writel(queue
, RBQP
, queue
->rx_ring_dma
);
1275 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1277 spin_unlock_irqrestore(&bp
->lock
, flags
);
1281 if (first_frag
!= -1)
1282 queue
->rx_tail
= first_frag
;
1284 queue
->rx_tail
= tail
;
1289 static int macb_poll(struct napi_struct
*napi
, int budget
)
1291 struct macb_queue
*queue
= container_of(napi
, struct macb_queue
, napi
);
1292 struct macb
*bp
= queue
->bp
;
1296 status
= macb_readl(bp
, RSR
);
1297 macb_writel(bp
, RSR
, status
);
1299 netdev_vdbg(bp
->dev
, "poll: status = %08lx, budget = %d\n",
1300 (unsigned long)status
, budget
);
1302 work_done
= bp
->macbgem_ops
.mog_rx(queue
, budget
);
1303 if (work_done
< budget
) {
1304 napi_complete_done(napi
, work_done
);
1306 /* Packets received while interrupts were disabled */
1307 status
= macb_readl(bp
, RSR
);
1309 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1310 queue_writel(queue
, ISR
, MACB_BIT(RCOMP
));
1311 napi_reschedule(napi
);
1313 queue_writel(queue
, IER
, bp
->rx_intr_mask
);
1317 /* TODO: Handle errors */
1322 static void macb_hresp_error_task(unsigned long data
)
1324 struct macb
*bp
= (struct macb
*)data
;
1325 struct net_device
*dev
= bp
->dev
;
1326 struct macb_queue
*queue
= bp
->queues
;
1330 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1331 queue_writel(queue
, IDR
, bp
->rx_intr_mask
|
1335 ctrl
= macb_readl(bp
, NCR
);
1336 ctrl
&= ~(MACB_BIT(RE
) | MACB_BIT(TE
));
1337 macb_writel(bp
, NCR
, ctrl
);
1339 netif_tx_stop_all_queues(dev
);
1340 netif_carrier_off(dev
);
1342 bp
->macbgem_ops
.mog_init_rings(bp
);
1344 /* Initialize TX and RX buffers */
1345 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1346 queue_writel(queue
, RBQP
, lower_32_bits(queue
->rx_ring_dma
));
1347 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1348 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
1349 queue_writel(queue
, RBQPH
,
1350 upper_32_bits(queue
->rx_ring_dma
));
1352 queue_writel(queue
, TBQP
, lower_32_bits(queue
->tx_ring_dma
));
1353 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1354 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
1355 queue_writel(queue
, TBQPH
,
1356 upper_32_bits(queue
->tx_ring_dma
));
1359 /* Enable interrupts */
1360 queue_writel(queue
, IER
,
1366 ctrl
|= MACB_BIT(RE
) | MACB_BIT(TE
);
1367 macb_writel(bp
, NCR
, ctrl
);
1369 netif_carrier_on(dev
);
1370 netif_tx_start_all_queues(dev
);
1373 static void macb_tx_restart(struct macb_queue
*queue
)
1375 unsigned int head
= queue
->tx_head
;
1376 unsigned int tail
= queue
->tx_tail
;
1377 struct macb
*bp
= queue
->bp
;
1379 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1380 queue_writel(queue
, ISR
, MACB_BIT(TXUBR
));
1385 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
1388 static irqreturn_t
macb_interrupt(int irq
, void *dev_id
)
1390 struct macb_queue
*queue
= dev_id
;
1391 struct macb
*bp
= queue
->bp
;
1392 struct net_device
*dev
= bp
->dev
;
1395 status
= queue_readl(queue
, ISR
);
1397 if (unlikely(!status
))
1400 spin_lock(&bp
->lock
);
1403 /* close possible race with dev_close */
1404 if (unlikely(!netif_running(dev
))) {
1405 queue_writel(queue
, IDR
, -1);
1406 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1407 queue_writel(queue
, ISR
, -1);
1411 netdev_vdbg(bp
->dev
, "queue = %u, isr = 0x%08lx\n",
1412 (unsigned int)(queue
- bp
->queues
),
1413 (unsigned long)status
);
1415 if (status
& bp
->rx_intr_mask
) {
1416 /* There's no point taking any more interrupts
1417 * until we have processed the buffers. The
1418 * scheduling call may fail if the poll routine
1419 * is already scheduled, so disable interrupts
1422 queue_writel(queue
, IDR
, bp
->rx_intr_mask
);
1423 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1424 queue_writel(queue
, ISR
, MACB_BIT(RCOMP
));
1426 if (napi_schedule_prep(&queue
->napi
)) {
1427 netdev_vdbg(bp
->dev
, "scheduling RX softirq\n");
1428 __napi_schedule(&queue
->napi
);
1432 if (unlikely(status
& (MACB_TX_ERR_FLAGS
))) {
1433 queue_writel(queue
, IDR
, MACB_TX_INT_FLAGS
);
1434 schedule_work(&queue
->tx_error_task
);
1436 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1437 queue_writel(queue
, ISR
, MACB_TX_ERR_FLAGS
);
1442 if (status
& MACB_BIT(TCOMP
))
1443 macb_tx_interrupt(queue
);
1445 if (status
& MACB_BIT(TXUBR
))
1446 macb_tx_restart(queue
);
1448 /* Link change detection isn't possible with RMII, so we'll
1449 * add that if/when we get our hands on a full-blown MII PHY.
1452 /* There is a hardware issue under heavy load where DMA can
1453 * stop, this causes endless "used buffer descriptor read"
1454 * interrupts but it can be cleared by re-enabling RX. See
1455 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
1456 * section 16.7.4 for details. RXUBR is only enabled for
1457 * these two versions.
1459 if (status
& MACB_BIT(RXUBR
)) {
1460 ctrl
= macb_readl(bp
, NCR
);
1461 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1463 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1465 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1466 queue_writel(queue
, ISR
, MACB_BIT(RXUBR
));
1469 if (status
& MACB_BIT(ISR_ROVR
)) {
1470 /* We missed at least one packet */
1471 if (macb_is_gem(bp
))
1472 bp
->hw_stats
.gem
.rx_overruns
++;
1474 bp
->hw_stats
.macb
.rx_overruns
++;
1476 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1477 queue_writel(queue
, ISR
, MACB_BIT(ISR_ROVR
));
1480 if (status
& MACB_BIT(HRESP
)) {
1481 tasklet_schedule(&bp
->hresp_err_tasklet
);
1482 netdev_err(dev
, "DMA bus error: HRESP not OK\n");
1484 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1485 queue_writel(queue
, ISR
, MACB_BIT(HRESP
));
1487 status
= queue_readl(queue
, ISR
);
1490 spin_unlock(&bp
->lock
);
1495 #ifdef CONFIG_NET_POLL_CONTROLLER
1496 /* Polling receive - used by netconsole and other diagnostic tools
1497 * to allow network i/o with interrupts disabled.
1499 static void macb_poll_controller(struct net_device
*dev
)
1501 struct macb
*bp
= netdev_priv(dev
);
1502 struct macb_queue
*queue
;
1503 unsigned long flags
;
1506 local_irq_save(flags
);
1507 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
1508 macb_interrupt(dev
->irq
, queue
);
1509 local_irq_restore(flags
);
1513 static unsigned int macb_tx_map(struct macb
*bp
,
1514 struct macb_queue
*queue
,
1515 struct sk_buff
*skb
,
1516 unsigned int hdrlen
)
1519 unsigned int len
, entry
, i
, tx_head
= queue
->tx_head
;
1520 struct macb_tx_skb
*tx_skb
= NULL
;
1521 struct macb_dma_desc
*desc
;
1522 unsigned int offset
, size
, count
= 0;
1523 unsigned int f
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
1524 unsigned int eof
= 1, mss_mfs
= 0;
1525 u32 ctrl
, lso_ctrl
= 0, seq_ctrl
= 0;
1528 if (skb_shinfo(skb
)->gso_size
!= 0) {
1529 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
1531 lso_ctrl
= MACB_LSO_UFO_ENABLE
;
1534 lso_ctrl
= MACB_LSO_TSO_ENABLE
;
1537 /* First, map non-paged data */
1538 len
= skb_headlen(skb
);
1540 /* first buffer length */
1545 entry
= macb_tx_ring_wrap(bp
, tx_head
);
1546 tx_skb
= &queue
->tx_skb
[entry
];
1548 mapping
= dma_map_single(&bp
->pdev
->dev
,
1550 size
, DMA_TO_DEVICE
);
1551 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1554 /* Save info to properly release resources */
1556 tx_skb
->mapping
= mapping
;
1557 tx_skb
->size
= size
;
1558 tx_skb
->mapped_as_page
= false;
1565 size
= min(len
, bp
->max_tx_length
);
1568 /* Then, map paged data from fragments */
1569 for (f
= 0; f
< nr_frags
; f
++) {
1570 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1572 len
= skb_frag_size(frag
);
1575 size
= min(len
, bp
->max_tx_length
);
1576 entry
= macb_tx_ring_wrap(bp
, tx_head
);
1577 tx_skb
= &queue
->tx_skb
[entry
];
1579 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
,
1580 offset
, size
, DMA_TO_DEVICE
);
1581 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1584 /* Save info to properly release resources */
1586 tx_skb
->mapping
= mapping
;
1587 tx_skb
->size
= size
;
1588 tx_skb
->mapped_as_page
= true;
1597 /* Should never happen */
1598 if (unlikely(!tx_skb
)) {
1599 netdev_err(bp
->dev
, "BUG! empty skb!\n");
1603 /* This is the last buffer of the frame: save socket buffer */
1606 /* Update TX ring: update buffer descriptors in reverse order
1607 * to avoid race condition
1610 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1611 * to set the end of TX queue
1614 entry
= macb_tx_ring_wrap(bp
, i
);
1615 ctrl
= MACB_BIT(TX_USED
);
1616 desc
= macb_tx_desc(queue
, entry
);
1620 if (lso_ctrl
== MACB_LSO_UFO_ENABLE
)
1621 /* include header and FCS in value given to h/w */
1622 mss_mfs
= skb_shinfo(skb
)->gso_size
+
1623 skb_transport_offset(skb
) +
1626 mss_mfs
= skb_shinfo(skb
)->gso_size
;
1627 /* TCP Sequence Number Source Select
1628 * can be set only for TSO
1636 entry
= macb_tx_ring_wrap(bp
, i
);
1637 tx_skb
= &queue
->tx_skb
[entry
];
1638 desc
= macb_tx_desc(queue
, entry
);
1640 ctrl
= (u32
)tx_skb
->size
;
1642 ctrl
|= MACB_BIT(TX_LAST
);
1645 if (unlikely(entry
== (bp
->tx_ring_size
- 1)))
1646 ctrl
|= MACB_BIT(TX_WRAP
);
1648 /* First descriptor is header descriptor */
1649 if (i
== queue
->tx_head
) {
1650 ctrl
|= MACB_BF(TX_LSO
, lso_ctrl
);
1651 ctrl
|= MACB_BF(TX_TCP_SEQ_SRC
, seq_ctrl
);
1652 if ((bp
->dev
->features
& NETIF_F_HW_CSUM
) &&
1653 skb
->ip_summed
!= CHECKSUM_PARTIAL
&& !lso_ctrl
)
1654 ctrl
|= MACB_BIT(TX_NOCRC
);
1656 /* Only set MSS/MFS on payload descriptors
1657 * (second or later descriptor)
1659 ctrl
|= MACB_BF(MSS_MFS
, mss_mfs
);
1661 /* Set TX buffer descriptor */
1662 macb_set_addr(bp
, desc
, tx_skb
->mapping
);
1663 /* desc->addr must be visible to hardware before clearing
1664 * 'TX_USED' bit in desc->ctrl.
1668 } while (i
!= queue
->tx_head
);
1670 queue
->tx_head
= tx_head
;
1675 netdev_err(bp
->dev
, "TX DMA map failed\n");
1677 for (i
= queue
->tx_head
; i
!= tx_head
; i
++) {
1678 tx_skb
= macb_tx_skb(queue
, i
);
1680 macb_tx_unmap(bp
, tx_skb
);
1686 static netdev_features_t
macb_features_check(struct sk_buff
*skb
,
1687 struct net_device
*dev
,
1688 netdev_features_t features
)
1690 unsigned int nr_frags
, f
;
1691 unsigned int hdrlen
;
1693 /* Validate LSO compatibility */
1695 /* there is only one buffer */
1696 if (!skb_is_nonlinear(skb
))
1699 /* length of header */
1700 hdrlen
= skb_transport_offset(skb
);
1701 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1702 hdrlen
+= tcp_hdrlen(skb
);
1705 * When software supplies two or more payload buffers all payload buffers
1706 * apart from the last must be a multiple of 8 bytes in size.
1708 if (!IS_ALIGNED(skb_headlen(skb
) - hdrlen
, MACB_TX_LEN_ALIGN
))
1709 return features
& ~MACB_NETIF_LSO
;
1711 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1712 /* No need to check last fragment */
1714 for (f
= 0; f
< nr_frags
; f
++) {
1715 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1717 if (!IS_ALIGNED(skb_frag_size(frag
), MACB_TX_LEN_ALIGN
))
1718 return features
& ~MACB_NETIF_LSO
;
1723 static inline int macb_clear_csum(struct sk_buff
*skb
)
1725 /* no change for packets without checksum offloading */
1726 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1729 /* make sure we can modify the header */
1730 if (unlikely(skb_cow_head(skb
, 0)))
1733 /* initialize checksum field
1734 * This is required - at least for Zynq, which otherwise calculates
1735 * wrong UDP header checksums for UDP packets with UDP data len <=2
1737 *(__sum16
*)(skb_checksum_start(skb
) + skb
->csum_offset
) = 0;
1741 static int macb_pad_and_fcs(struct sk_buff
**skb
, struct net_device
*ndev
)
1743 bool cloned
= skb_cloned(*skb
) || skb_header_cloned(*skb
);
1744 int padlen
= ETH_ZLEN
- (*skb
)->len
;
1745 int headroom
= skb_headroom(*skb
);
1746 int tailroom
= skb_tailroom(*skb
);
1747 struct sk_buff
*nskb
;
1750 if (!(ndev
->features
& NETIF_F_HW_CSUM
) ||
1751 !((*skb
)->ip_summed
!= CHECKSUM_PARTIAL
) ||
1752 skb_shinfo(*skb
)->gso_size
) /* Not available for GSO */
1756 /* FCS could be appeded to tailroom. */
1757 if (tailroom
>= ETH_FCS_LEN
)
1759 /* FCS could be appeded by moving data to headroom. */
1760 else if (!cloned
&& headroom
+ tailroom
>= ETH_FCS_LEN
)
1762 /* No room for FCS, need to reallocate skb. */
1764 padlen
= ETH_FCS_LEN
;
1766 /* Add room for FCS. */
1767 padlen
+= ETH_FCS_LEN
;
1770 if (!cloned
&& headroom
+ tailroom
>= padlen
) {
1771 (*skb
)->data
= memmove((*skb
)->head
, (*skb
)->data
, (*skb
)->len
);
1772 skb_set_tail_pointer(*skb
, (*skb
)->len
);
1774 nskb
= skb_copy_expand(*skb
, 0, padlen
, GFP_ATOMIC
);
1778 dev_consume_skb_any(*skb
);
1782 if (padlen
> ETH_FCS_LEN
)
1783 skb_put_zero(*skb
, padlen
- ETH_FCS_LEN
);
1786 /* set FCS to packet */
1787 fcs
= crc32_le(~0, (*skb
)->data
, (*skb
)->len
);
1790 skb_put_u8(*skb
, fcs
& 0xff);
1791 skb_put_u8(*skb
, (fcs
>> 8) & 0xff);
1792 skb_put_u8(*skb
, (fcs
>> 16) & 0xff);
1793 skb_put_u8(*skb
, (fcs
>> 24) & 0xff);
1798 static netdev_tx_t
macb_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1800 u16 queue_index
= skb_get_queue_mapping(skb
);
1801 struct macb
*bp
= netdev_priv(dev
);
1802 struct macb_queue
*queue
= &bp
->queues
[queue_index
];
1803 unsigned long flags
;
1804 unsigned int desc_cnt
, nr_frags
, frag_size
, f
;
1805 unsigned int hdrlen
;
1806 bool is_lso
, is_udp
= 0;
1807 netdev_tx_t ret
= NETDEV_TX_OK
;
1809 if (macb_clear_csum(skb
)) {
1810 dev_kfree_skb_any(skb
);
1814 if (macb_pad_and_fcs(&skb
, dev
)) {
1815 dev_kfree_skb_any(skb
);
1819 is_lso
= (skb_shinfo(skb
)->gso_size
!= 0);
1822 is_udp
= !!(ip_hdr(skb
)->protocol
== IPPROTO_UDP
);
1824 /* length of headers */
1826 /* only queue eth + ip headers separately for UDP */
1827 hdrlen
= skb_transport_offset(skb
);
1829 hdrlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1830 if (skb_headlen(skb
) < hdrlen
) {
1831 netdev_err(bp
->dev
, "Error - LSO headers fragmented!!!\n");
1832 /* if this is required, would need to copy to single buffer */
1833 return NETDEV_TX_BUSY
;
1836 hdrlen
= min(skb_headlen(skb
), bp
->max_tx_length
);
1838 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1839 netdev_vdbg(bp
->dev
,
1840 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1841 queue_index
, skb
->len
, skb
->head
, skb
->data
,
1842 skb_tail_pointer(skb
), skb_end_pointer(skb
));
1843 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_OFFSET
, 16, 1,
1844 skb
->data
, 16, true);
1847 /* Count how many TX buffer descriptors are needed to send this
1848 * socket buffer: skb fragments of jumbo frames may need to be
1849 * split into many buffer descriptors.
1851 if (is_lso
&& (skb_headlen(skb
) > hdrlen
))
1852 /* extra header descriptor if also payload in first buffer */
1853 desc_cnt
= DIV_ROUND_UP((skb_headlen(skb
) - hdrlen
), bp
->max_tx_length
) + 1;
1855 desc_cnt
= DIV_ROUND_UP(skb_headlen(skb
), bp
->max_tx_length
);
1856 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1857 for (f
= 0; f
< nr_frags
; f
++) {
1858 frag_size
= skb_frag_size(&skb_shinfo(skb
)->frags
[f
]);
1859 desc_cnt
+= DIV_ROUND_UP(frag_size
, bp
->max_tx_length
);
1862 spin_lock_irqsave(&bp
->lock
, flags
);
1864 /* This is a hard error, log it. */
1865 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
,
1866 bp
->tx_ring_size
) < desc_cnt
) {
1867 netif_stop_subqueue(dev
, queue_index
);
1868 spin_unlock_irqrestore(&bp
->lock
, flags
);
1869 netdev_dbg(bp
->dev
, "tx_head = %u, tx_tail = %u\n",
1870 queue
->tx_head
, queue
->tx_tail
);
1871 return NETDEV_TX_BUSY
;
1874 /* Map socket buffer for DMA transfer */
1875 if (!macb_tx_map(bp
, queue
, skb
, hdrlen
)) {
1876 dev_kfree_skb_any(skb
);
1880 /* Make newly initialized descriptor visible to hardware */
1882 skb_tx_timestamp(skb
);
1884 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
1886 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
, bp
->tx_ring_size
) < 1)
1887 netif_stop_subqueue(dev
, queue_index
);
1890 spin_unlock_irqrestore(&bp
->lock
, flags
);
1895 static void macb_init_rx_buffer_size(struct macb
*bp
, size_t size
)
1897 if (!macb_is_gem(bp
)) {
1898 bp
->rx_buffer_size
= MACB_RX_BUFFER_SIZE
;
1900 bp
->rx_buffer_size
= size
;
1902 if (bp
->rx_buffer_size
% RX_BUFFER_MULTIPLE
) {
1904 "RX buffer must be multiple of %d bytes, expanding\n",
1905 RX_BUFFER_MULTIPLE
);
1906 bp
->rx_buffer_size
=
1907 roundup(bp
->rx_buffer_size
, RX_BUFFER_MULTIPLE
);
1911 netdev_dbg(bp
->dev
, "mtu [%u] rx_buffer_size [%zu]\n",
1912 bp
->dev
->mtu
, bp
->rx_buffer_size
);
1915 static void gem_free_rx_buffers(struct macb
*bp
)
1917 struct sk_buff
*skb
;
1918 struct macb_dma_desc
*desc
;
1919 struct macb_queue
*queue
;
1924 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1925 if (!queue
->rx_skbuff
)
1928 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
1929 skb
= queue
->rx_skbuff
[i
];
1934 desc
= macb_rx_desc(queue
, i
);
1935 addr
= macb_get_addr(bp
, desc
);
1937 dma_unmap_single(&bp
->pdev
->dev
, addr
, bp
->rx_buffer_size
,
1939 dev_kfree_skb_any(skb
);
1943 kfree(queue
->rx_skbuff
);
1944 queue
->rx_skbuff
= NULL
;
1948 static void macb_free_rx_buffers(struct macb
*bp
)
1950 struct macb_queue
*queue
= &bp
->queues
[0];
1952 if (queue
->rx_buffers
) {
1953 dma_free_coherent(&bp
->pdev
->dev
,
1954 bp
->rx_ring_size
* bp
->rx_buffer_size
,
1955 queue
->rx_buffers
, queue
->rx_buffers_dma
);
1956 queue
->rx_buffers
= NULL
;
1960 static void macb_free_consistent(struct macb
*bp
)
1962 struct macb_queue
*queue
;
1966 bp
->macbgem_ops
.mog_free_rx_buffers(bp
);
1968 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1969 kfree(queue
->tx_skb
);
1970 queue
->tx_skb
= NULL
;
1971 if (queue
->tx_ring
) {
1972 size
= TX_RING_BYTES(bp
) + bp
->tx_bd_rd_prefetch
;
1973 dma_free_coherent(&bp
->pdev
->dev
, size
,
1974 queue
->tx_ring
, queue
->tx_ring_dma
);
1975 queue
->tx_ring
= NULL
;
1977 if (queue
->rx_ring
) {
1978 size
= RX_RING_BYTES(bp
) + bp
->rx_bd_rd_prefetch
;
1979 dma_free_coherent(&bp
->pdev
->dev
, size
,
1980 queue
->rx_ring
, queue
->rx_ring_dma
);
1981 queue
->rx_ring
= NULL
;
1986 static int gem_alloc_rx_buffers(struct macb
*bp
)
1988 struct macb_queue
*queue
;
1992 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1993 size
= bp
->rx_ring_size
* sizeof(struct sk_buff
*);
1994 queue
->rx_skbuff
= kzalloc(size
, GFP_KERNEL
);
1995 if (!queue
->rx_skbuff
)
1999 "Allocated %d RX struct sk_buff entries at %p\n",
2000 bp
->rx_ring_size
, queue
->rx_skbuff
);
2005 static int macb_alloc_rx_buffers(struct macb
*bp
)
2007 struct macb_queue
*queue
= &bp
->queues
[0];
2010 size
= bp
->rx_ring_size
* bp
->rx_buffer_size
;
2011 queue
->rx_buffers
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
2012 &queue
->rx_buffers_dma
, GFP_KERNEL
);
2013 if (!queue
->rx_buffers
)
2017 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
2018 size
, (unsigned long)queue
->rx_buffers_dma
, queue
->rx_buffers
);
2022 static int macb_alloc_consistent(struct macb
*bp
)
2024 struct macb_queue
*queue
;
2028 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2029 size
= TX_RING_BYTES(bp
) + bp
->tx_bd_rd_prefetch
;
2030 queue
->tx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
2031 &queue
->tx_ring_dma
,
2033 if (!queue
->tx_ring
)
2036 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2037 q
, size
, (unsigned long)queue
->tx_ring_dma
,
2040 size
= bp
->tx_ring_size
* sizeof(struct macb_tx_skb
);
2041 queue
->tx_skb
= kmalloc(size
, GFP_KERNEL
);
2045 size
= RX_RING_BYTES(bp
) + bp
->rx_bd_rd_prefetch
;
2046 queue
->rx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
2047 &queue
->rx_ring_dma
, GFP_KERNEL
);
2048 if (!queue
->rx_ring
)
2051 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2052 size
, (unsigned long)queue
->rx_ring_dma
, queue
->rx_ring
);
2054 if (bp
->macbgem_ops
.mog_alloc_rx_buffers(bp
))
2060 macb_free_consistent(bp
);
2064 static void gem_init_rings(struct macb
*bp
)
2066 struct macb_queue
*queue
;
2067 struct macb_dma_desc
*desc
= NULL
;
2071 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2072 for (i
= 0; i
< bp
->tx_ring_size
; i
++) {
2073 desc
= macb_tx_desc(queue
, i
);
2074 macb_set_addr(bp
, desc
, 0);
2075 desc
->ctrl
= MACB_BIT(TX_USED
);
2077 desc
->ctrl
|= MACB_BIT(TX_WRAP
);
2082 queue
->rx_prepared_head
= 0;
2084 gem_rx_refill(queue
);
2089 static void macb_init_rings(struct macb
*bp
)
2092 struct macb_dma_desc
*desc
= NULL
;
2094 macb_init_rx_ring(&bp
->queues
[0]);
2096 for (i
= 0; i
< bp
->tx_ring_size
; i
++) {
2097 desc
= macb_tx_desc(&bp
->queues
[0], i
);
2098 macb_set_addr(bp
, desc
, 0);
2099 desc
->ctrl
= MACB_BIT(TX_USED
);
2101 bp
->queues
[0].tx_head
= 0;
2102 bp
->queues
[0].tx_tail
= 0;
2103 desc
->ctrl
|= MACB_BIT(TX_WRAP
);
2106 static void macb_reset_hw(struct macb
*bp
)
2108 struct macb_queue
*queue
;
2110 u32 ctrl
= macb_readl(bp
, NCR
);
2112 /* Disable RX and TX (XXX: Should we halt the transmission
2115 ctrl
&= ~(MACB_BIT(RE
) | MACB_BIT(TE
));
2117 /* Clear the stats registers (XXX: Update stats first?) */
2118 ctrl
|= MACB_BIT(CLRSTAT
);
2120 macb_writel(bp
, NCR
, ctrl
);
2122 /* Clear all status flags */
2123 macb_writel(bp
, TSR
, -1);
2124 macb_writel(bp
, RSR
, -1);
2126 /* Disable all interrupts */
2127 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2128 queue_writel(queue
, IDR
, -1);
2129 queue_readl(queue
, ISR
);
2130 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
2131 queue_writel(queue
, ISR
, -1);
2135 static u32
gem_mdc_clk_div(struct macb
*bp
)
2138 unsigned long pclk_hz
= clk_get_rate(bp
->pclk
);
2140 if (pclk_hz
<= 20000000)
2141 config
= GEM_BF(CLK
, GEM_CLK_DIV8
);
2142 else if (pclk_hz
<= 40000000)
2143 config
= GEM_BF(CLK
, GEM_CLK_DIV16
);
2144 else if (pclk_hz
<= 80000000)
2145 config
= GEM_BF(CLK
, GEM_CLK_DIV32
);
2146 else if (pclk_hz
<= 120000000)
2147 config
= GEM_BF(CLK
, GEM_CLK_DIV48
);
2148 else if (pclk_hz
<= 160000000)
2149 config
= GEM_BF(CLK
, GEM_CLK_DIV64
);
2151 config
= GEM_BF(CLK
, GEM_CLK_DIV96
);
2156 static u32
macb_mdc_clk_div(struct macb
*bp
)
2159 unsigned long pclk_hz
;
2161 if (macb_is_gem(bp
))
2162 return gem_mdc_clk_div(bp
);
2164 pclk_hz
= clk_get_rate(bp
->pclk
);
2165 if (pclk_hz
<= 20000000)
2166 config
= MACB_BF(CLK
, MACB_CLK_DIV8
);
2167 else if (pclk_hz
<= 40000000)
2168 config
= MACB_BF(CLK
, MACB_CLK_DIV16
);
2169 else if (pclk_hz
<= 80000000)
2170 config
= MACB_BF(CLK
, MACB_CLK_DIV32
);
2172 config
= MACB_BF(CLK
, MACB_CLK_DIV64
);
2177 /* Get the DMA bus width field of the network configuration register that we
2178 * should program. We find the width from decoding the design configuration
2179 * register to find the maximum supported data bus width.
2181 static u32
macb_dbw(struct macb
*bp
)
2183 if (!macb_is_gem(bp
))
2186 switch (GEM_BFEXT(DBWDEF
, gem_readl(bp
, DCFG1
))) {
2188 return GEM_BF(DBW
, GEM_DBW128
);
2190 return GEM_BF(DBW
, GEM_DBW64
);
2193 return GEM_BF(DBW
, GEM_DBW32
);
2197 /* Configure the receive DMA engine
2198 * - use the correct receive buffer size
2199 * - set best burst length for DMA operations
2200 * (if not supported by FIFO, it will fallback to default)
2201 * - set both rx/tx packet buffers to full memory size
2202 * These are configurable parameters for GEM.
2204 static void macb_configure_dma(struct macb
*bp
)
2206 struct macb_queue
*queue
;
2211 buffer_size
= bp
->rx_buffer_size
/ RX_BUFFER_MULTIPLE
;
2212 if (macb_is_gem(bp
)) {
2213 dmacfg
= gem_readl(bp
, DMACFG
) & ~GEM_BF(RXBS
, -1L);
2214 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2216 queue_writel(queue
, RBQS
, buffer_size
);
2218 dmacfg
|= GEM_BF(RXBS
, buffer_size
);
2220 if (bp
->dma_burst_length
)
2221 dmacfg
= GEM_BFINS(FBLDO
, bp
->dma_burst_length
, dmacfg
);
2222 dmacfg
|= GEM_BIT(TXPBMS
) | GEM_BF(RXBMS
, -1L);
2223 dmacfg
&= ~GEM_BIT(ENDIA_PKT
);
2226 dmacfg
&= ~GEM_BIT(ENDIA_DESC
);
2228 dmacfg
|= GEM_BIT(ENDIA_DESC
); /* CPU in big endian */
2230 if (bp
->dev
->features
& NETIF_F_HW_CSUM
)
2231 dmacfg
|= GEM_BIT(TXCOEN
);
2233 dmacfg
&= ~GEM_BIT(TXCOEN
);
2235 dmacfg
&= ~GEM_BIT(ADDR64
);
2236 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2237 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
2238 dmacfg
|= GEM_BIT(ADDR64
);
2240 #ifdef CONFIG_MACB_USE_HWSTAMP
2241 if (bp
->hw_dma_cap
& HW_DMA_CAP_PTP
)
2242 dmacfg
|= GEM_BIT(RXEXT
) | GEM_BIT(TXEXT
);
2244 netdev_dbg(bp
->dev
, "Cadence configure DMA with 0x%08x\n",
2246 gem_writel(bp
, DMACFG
, dmacfg
);
2250 static void macb_init_hw(struct macb
*bp
)
2252 struct macb_queue
*queue
;
2258 macb_set_hwaddr(bp
);
2260 config
= macb_mdc_clk_div(bp
);
2261 if (bp
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
2262 config
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
2263 config
|= MACB_BF(RBOF
, NET_IP_ALIGN
); /* Make eth data aligned */
2264 config
|= MACB_BIT(PAE
); /* PAuse Enable */
2265 config
|= MACB_BIT(DRFCS
); /* Discard Rx FCS */
2266 if (bp
->caps
& MACB_CAPS_JUMBO
)
2267 config
|= MACB_BIT(JFRAME
); /* Enable jumbo frames */
2269 config
|= MACB_BIT(BIG
); /* Receive oversized frames */
2270 if (bp
->dev
->flags
& IFF_PROMISC
)
2271 config
|= MACB_BIT(CAF
); /* Copy All Frames */
2272 else if (macb_is_gem(bp
) && bp
->dev
->features
& NETIF_F_RXCSUM
)
2273 config
|= GEM_BIT(RXCOEN
);
2274 if (!(bp
->dev
->flags
& IFF_BROADCAST
))
2275 config
|= MACB_BIT(NBC
); /* No BroadCast */
2276 config
|= macb_dbw(bp
);
2277 macb_writel(bp
, NCFGR
, config
);
2278 if ((bp
->caps
& MACB_CAPS_JUMBO
) && bp
->jumbo_max_len
)
2279 gem_writel(bp
, JML
, bp
->jumbo_max_len
);
2280 bp
->speed
= SPEED_10
;
2281 bp
->duplex
= DUPLEX_HALF
;
2282 bp
->rx_frm_len_mask
= MACB_RX_FRMLEN_MASK
;
2283 if (bp
->caps
& MACB_CAPS_JUMBO
)
2284 bp
->rx_frm_len_mask
= MACB_RX_JFRMLEN_MASK
;
2286 macb_configure_dma(bp
);
2288 /* Initialize TX and RX buffers */
2289 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2290 queue_writel(queue
, RBQP
, lower_32_bits(queue
->rx_ring_dma
));
2291 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2292 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
2293 queue_writel(queue
, RBQPH
, upper_32_bits(queue
->rx_ring_dma
));
2295 queue_writel(queue
, TBQP
, lower_32_bits(queue
->tx_ring_dma
));
2296 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2297 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
2298 queue_writel(queue
, TBQPH
, upper_32_bits(queue
->tx_ring_dma
));
2301 /* Enable interrupts */
2302 queue_writel(queue
, IER
,
2308 /* Enable TX and RX */
2309 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(RE
) | MACB_BIT(TE
));
2312 /* The hash address register is 64 bits long and takes up two
2313 * locations in the memory map. The least significant bits are stored
2314 * in EMAC_HSL and the most significant bits in EMAC_HSH.
2316 * The unicast hash enable and the multicast hash enable bits in the
2317 * network configuration register enable the reception of hash matched
2318 * frames. The destination address is reduced to a 6 bit index into
2319 * the 64 bit hash register using the following hash function. The
2320 * hash function is an exclusive or of every sixth bit of the
2321 * destination address.
2323 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2324 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2325 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2326 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2327 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2328 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2330 * da[0] represents the least significant bit of the first byte
2331 * received, that is, the multicast/unicast indicator, and da[47]
2332 * represents the most significant bit of the last byte received. If
2333 * the hash index, hi[n], points to a bit that is set in the hash
2334 * register then the frame will be matched according to whether the
2335 * frame is multicast or unicast. A multicast match will be signalled
2336 * if the multicast hash enable bit is set, da[0] is 1 and the hash
2337 * index points to a bit set in the hash register. A unicast match
2338 * will be signalled if the unicast hash enable bit is set, da[0] is 0
2339 * and the hash index points to a bit set in the hash register. To
2340 * receive all multicast frames, the hash register should be set with
2341 * all ones and the multicast hash enable bit should be set in the
2342 * network configuration register.
2345 static inline int hash_bit_value(int bitnr
, __u8
*addr
)
2347 if (addr
[bitnr
/ 8] & (1 << (bitnr
% 8)))
2352 /* Return the hash index value for the specified address. */
2353 static int hash_get_index(__u8
*addr
)
2358 for (j
= 0; j
< 6; j
++) {
2359 for (i
= 0, bitval
= 0; i
< 8; i
++)
2360 bitval
^= hash_bit_value(i
* 6 + j
, addr
);
2362 hash_index
|= (bitval
<< j
);
2368 /* Add multicast addresses to the internal multicast-hash table. */
2369 static void macb_sethashtable(struct net_device
*dev
)
2371 struct netdev_hw_addr
*ha
;
2372 unsigned long mc_filter
[2];
2374 struct macb
*bp
= netdev_priv(dev
);
2379 netdev_for_each_mc_addr(ha
, dev
) {
2380 bitnr
= hash_get_index(ha
->addr
);
2381 mc_filter
[bitnr
>> 5] |= 1 << (bitnr
& 31);
2384 macb_or_gem_writel(bp
, HRB
, mc_filter
[0]);
2385 macb_or_gem_writel(bp
, HRT
, mc_filter
[1]);
2388 /* Enable/Disable promiscuous and multicast modes. */
2389 static void macb_set_rx_mode(struct net_device
*dev
)
2392 struct macb
*bp
= netdev_priv(dev
);
2394 cfg
= macb_readl(bp
, NCFGR
);
2396 if (dev
->flags
& IFF_PROMISC
) {
2397 /* Enable promiscuous mode */
2398 cfg
|= MACB_BIT(CAF
);
2400 /* Disable RX checksum offload */
2401 if (macb_is_gem(bp
))
2402 cfg
&= ~GEM_BIT(RXCOEN
);
2404 /* Disable promiscuous mode */
2405 cfg
&= ~MACB_BIT(CAF
);
2407 /* Enable RX checksum offload only if requested */
2408 if (macb_is_gem(bp
) && dev
->features
& NETIF_F_RXCSUM
)
2409 cfg
|= GEM_BIT(RXCOEN
);
2412 if (dev
->flags
& IFF_ALLMULTI
) {
2413 /* Enable all multicast mode */
2414 macb_or_gem_writel(bp
, HRB
, -1);
2415 macb_or_gem_writel(bp
, HRT
, -1);
2416 cfg
|= MACB_BIT(NCFGR_MTI
);
2417 } else if (!netdev_mc_empty(dev
)) {
2418 /* Enable specific multicasts */
2419 macb_sethashtable(dev
);
2420 cfg
|= MACB_BIT(NCFGR_MTI
);
2421 } else if (dev
->flags
& (~IFF_ALLMULTI
)) {
2422 /* Disable all multicast mode */
2423 macb_or_gem_writel(bp
, HRB
, 0);
2424 macb_or_gem_writel(bp
, HRT
, 0);
2425 cfg
&= ~MACB_BIT(NCFGR_MTI
);
2428 macb_writel(bp
, NCFGR
, cfg
);
2431 static int macb_open(struct net_device
*dev
)
2433 struct macb
*bp
= netdev_priv(dev
);
2434 size_t bufsz
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ NET_IP_ALIGN
;
2435 struct macb_queue
*queue
;
2439 netdev_dbg(bp
->dev
, "open\n");
2441 err
= pm_runtime_get_sync(&bp
->pdev
->dev
);
2445 /* carrier starts down */
2446 netif_carrier_off(dev
);
2448 /* if the phy is not yet register, retry later*/
2454 /* RX buffers initialization */
2455 macb_init_rx_buffer_size(bp
, bufsz
);
2457 err
= macb_alloc_consistent(bp
);
2459 netdev_err(dev
, "Unable to allocate DMA memory (error %d)\n",
2464 bp
->macbgem_ops
.mog_init_rings(bp
);
2467 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
2468 napi_enable(&queue
->napi
);
2470 /* schedule a link state check */
2471 phy_start(dev
->phydev
);
2473 netif_tx_start_all_queues(dev
);
2476 bp
->ptp_info
->ptp_init(dev
);
2480 pm_runtime_put_sync(&bp
->pdev
->dev
);
2486 static int macb_close(struct net_device
*dev
)
2488 struct macb
*bp
= netdev_priv(dev
);
2489 struct macb_queue
*queue
;
2490 unsigned long flags
;
2493 netif_tx_stop_all_queues(dev
);
2495 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
2496 napi_disable(&queue
->napi
);
2499 phy_stop(dev
->phydev
);
2501 spin_lock_irqsave(&bp
->lock
, flags
);
2503 netif_carrier_off(dev
);
2504 spin_unlock_irqrestore(&bp
->lock
, flags
);
2506 macb_free_consistent(bp
);
2509 bp
->ptp_info
->ptp_remove(dev
);
2511 pm_runtime_put(&bp
->pdev
->dev
);
2516 static int macb_change_mtu(struct net_device
*dev
, int new_mtu
)
2518 if (netif_running(dev
))
2526 static void gem_update_stats(struct macb
*bp
)
2528 struct macb_queue
*queue
;
2529 unsigned int i
, q
, idx
;
2530 unsigned long *stat
;
2532 u32
*p
= &bp
->hw_stats
.gem
.tx_octets_31_0
;
2534 for (i
= 0; i
< GEM_STATS_LEN
; ++i
, ++p
) {
2535 u32 offset
= gem_statistics
[i
].offset
;
2536 u64 val
= bp
->macb_reg_readl(bp
, offset
);
2538 bp
->ethtool_stats
[i
] += val
;
2541 if (offset
== GEM_OCTTXL
|| offset
== GEM_OCTRXL
) {
2542 /* Add GEM_OCTTXH, GEM_OCTRXH */
2543 val
= bp
->macb_reg_readl(bp
, offset
+ 4);
2544 bp
->ethtool_stats
[i
] += ((u64
)val
) << 32;
2549 idx
= GEM_STATS_LEN
;
2550 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
2551 for (i
= 0, stat
= &queue
->stats
.first
; i
< QUEUE_STATS_LEN
; ++i
, ++stat
)
2552 bp
->ethtool_stats
[idx
++] = *stat
;
2555 static struct net_device_stats
*gem_get_stats(struct macb
*bp
)
2557 struct gem_stats
*hwstat
= &bp
->hw_stats
.gem
;
2558 struct net_device_stats
*nstat
= &bp
->dev
->stats
;
2560 gem_update_stats(bp
);
2562 nstat
->rx_errors
= (hwstat
->rx_frame_check_sequence_errors
+
2563 hwstat
->rx_alignment_errors
+
2564 hwstat
->rx_resource_errors
+
2565 hwstat
->rx_overruns
+
2566 hwstat
->rx_oversize_frames
+
2567 hwstat
->rx_jabbers
+
2568 hwstat
->rx_undersized_frames
+
2569 hwstat
->rx_length_field_frame_errors
);
2570 nstat
->tx_errors
= (hwstat
->tx_late_collisions
+
2571 hwstat
->tx_excessive_collisions
+
2572 hwstat
->tx_underrun
+
2573 hwstat
->tx_carrier_sense_errors
);
2574 nstat
->multicast
= hwstat
->rx_multicast_frames
;
2575 nstat
->collisions
= (hwstat
->tx_single_collision_frames
+
2576 hwstat
->tx_multiple_collision_frames
+
2577 hwstat
->tx_excessive_collisions
);
2578 nstat
->rx_length_errors
= (hwstat
->rx_oversize_frames
+
2579 hwstat
->rx_jabbers
+
2580 hwstat
->rx_undersized_frames
+
2581 hwstat
->rx_length_field_frame_errors
);
2582 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
;
2583 nstat
->rx_crc_errors
= hwstat
->rx_frame_check_sequence_errors
;
2584 nstat
->rx_frame_errors
= hwstat
->rx_alignment_errors
;
2585 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2586 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_collisions
;
2587 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_sense_errors
;
2588 nstat
->tx_fifo_errors
= hwstat
->tx_underrun
;
2593 static void gem_get_ethtool_stats(struct net_device
*dev
,
2594 struct ethtool_stats
*stats
, u64
*data
)
2598 bp
= netdev_priv(dev
);
2599 gem_update_stats(bp
);
2600 memcpy(data
, &bp
->ethtool_stats
, sizeof(u64
)
2601 * (GEM_STATS_LEN
+ QUEUE_STATS_LEN
* MACB_MAX_QUEUES
));
2604 static int gem_get_sset_count(struct net_device
*dev
, int sset
)
2606 struct macb
*bp
= netdev_priv(dev
);
2610 return GEM_STATS_LEN
+ bp
->num_queues
* QUEUE_STATS_LEN
;
2616 static void gem_get_ethtool_strings(struct net_device
*dev
, u32 sset
, u8
*p
)
2618 char stat_string
[ETH_GSTRING_LEN
];
2619 struct macb
*bp
= netdev_priv(dev
);
2620 struct macb_queue
*queue
;
2626 for (i
= 0; i
< GEM_STATS_LEN
; i
++, p
+= ETH_GSTRING_LEN
)
2627 memcpy(p
, gem_statistics
[i
].stat_string
,
2630 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2631 for (i
= 0; i
< QUEUE_STATS_LEN
; i
++, p
+= ETH_GSTRING_LEN
) {
2632 snprintf(stat_string
, ETH_GSTRING_LEN
, "q%d_%s",
2633 q
, queue_statistics
[i
].stat_string
);
2634 memcpy(p
, stat_string
, ETH_GSTRING_LEN
);
2641 static struct net_device_stats
*macb_get_stats(struct net_device
*dev
)
2643 struct macb
*bp
= netdev_priv(dev
);
2644 struct net_device_stats
*nstat
= &bp
->dev
->stats
;
2645 struct macb_stats
*hwstat
= &bp
->hw_stats
.macb
;
2647 if (macb_is_gem(bp
))
2648 return gem_get_stats(bp
);
2650 /* read stats from hardware */
2651 macb_update_stats(bp
);
2653 /* Convert HW stats into netdevice stats */
2654 nstat
->rx_errors
= (hwstat
->rx_fcs_errors
+
2655 hwstat
->rx_align_errors
+
2656 hwstat
->rx_resource_errors
+
2657 hwstat
->rx_overruns
+
2658 hwstat
->rx_oversize_pkts
+
2659 hwstat
->rx_jabbers
+
2660 hwstat
->rx_undersize_pkts
+
2661 hwstat
->rx_length_mismatch
);
2662 nstat
->tx_errors
= (hwstat
->tx_late_cols
+
2663 hwstat
->tx_excessive_cols
+
2664 hwstat
->tx_underruns
+
2665 hwstat
->tx_carrier_errors
+
2666 hwstat
->sqe_test_errors
);
2667 nstat
->collisions
= (hwstat
->tx_single_cols
+
2668 hwstat
->tx_multiple_cols
+
2669 hwstat
->tx_excessive_cols
);
2670 nstat
->rx_length_errors
= (hwstat
->rx_oversize_pkts
+
2671 hwstat
->rx_jabbers
+
2672 hwstat
->rx_undersize_pkts
+
2673 hwstat
->rx_length_mismatch
);
2674 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
+
2675 hwstat
->rx_overruns
;
2676 nstat
->rx_crc_errors
= hwstat
->rx_fcs_errors
;
2677 nstat
->rx_frame_errors
= hwstat
->rx_align_errors
;
2678 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2679 /* XXX: What does "missed" mean? */
2680 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_cols
;
2681 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_errors
;
2682 nstat
->tx_fifo_errors
= hwstat
->tx_underruns
;
2683 /* Don't know about heartbeat or window errors... */
2688 static int macb_get_regs_len(struct net_device
*netdev
)
2690 return MACB_GREGS_NBR
* sizeof(u32
);
2693 static void macb_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
2696 struct macb
*bp
= netdev_priv(dev
);
2697 unsigned int tail
, head
;
2700 regs
->version
= (macb_readl(bp
, MID
) & ((1 << MACB_REV_SIZE
) - 1))
2701 | MACB_GREGS_VERSION
;
2703 tail
= macb_tx_ring_wrap(bp
, bp
->queues
[0].tx_tail
);
2704 head
= macb_tx_ring_wrap(bp
, bp
->queues
[0].tx_head
);
2706 regs_buff
[0] = macb_readl(bp
, NCR
);
2707 regs_buff
[1] = macb_or_gem_readl(bp
, NCFGR
);
2708 regs_buff
[2] = macb_readl(bp
, NSR
);
2709 regs_buff
[3] = macb_readl(bp
, TSR
);
2710 regs_buff
[4] = macb_readl(bp
, RBQP
);
2711 regs_buff
[5] = macb_readl(bp
, TBQP
);
2712 regs_buff
[6] = macb_readl(bp
, RSR
);
2713 regs_buff
[7] = macb_readl(bp
, IMR
);
2715 regs_buff
[8] = tail
;
2716 regs_buff
[9] = head
;
2717 regs_buff
[10] = macb_tx_dma(&bp
->queues
[0], tail
);
2718 regs_buff
[11] = macb_tx_dma(&bp
->queues
[0], head
);
2720 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
))
2721 regs_buff
[12] = macb_or_gem_readl(bp
, USRIO
);
2722 if (macb_is_gem(bp
))
2723 regs_buff
[13] = gem_readl(bp
, DMACFG
);
2726 static void macb_get_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2728 struct macb
*bp
= netdev_priv(netdev
);
2733 if (bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
) {
2734 wol
->supported
= WAKE_MAGIC
;
2736 if (bp
->wol
& MACB_WOL_ENABLED
)
2737 wol
->wolopts
|= WAKE_MAGIC
;
2741 static int macb_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2743 struct macb
*bp
= netdev_priv(netdev
);
2745 if (!(bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
) ||
2746 (wol
->wolopts
& ~WAKE_MAGIC
))
2749 if (wol
->wolopts
& WAKE_MAGIC
)
2750 bp
->wol
|= MACB_WOL_ENABLED
;
2752 bp
->wol
&= ~MACB_WOL_ENABLED
;
2754 device_set_wakeup_enable(&bp
->pdev
->dev
, bp
->wol
& MACB_WOL_ENABLED
);
2759 static void macb_get_ringparam(struct net_device
*netdev
,
2760 struct ethtool_ringparam
*ring
)
2762 struct macb
*bp
= netdev_priv(netdev
);
2764 ring
->rx_max_pending
= MAX_RX_RING_SIZE
;
2765 ring
->tx_max_pending
= MAX_TX_RING_SIZE
;
2767 ring
->rx_pending
= bp
->rx_ring_size
;
2768 ring
->tx_pending
= bp
->tx_ring_size
;
2771 static int macb_set_ringparam(struct net_device
*netdev
,
2772 struct ethtool_ringparam
*ring
)
2774 struct macb
*bp
= netdev_priv(netdev
);
2775 u32 new_rx_size
, new_tx_size
;
2776 unsigned int reset
= 0;
2778 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
2781 new_rx_size
= clamp_t(u32
, ring
->rx_pending
,
2782 MIN_RX_RING_SIZE
, MAX_RX_RING_SIZE
);
2783 new_rx_size
= roundup_pow_of_two(new_rx_size
);
2785 new_tx_size
= clamp_t(u32
, ring
->tx_pending
,
2786 MIN_TX_RING_SIZE
, MAX_TX_RING_SIZE
);
2787 new_tx_size
= roundup_pow_of_two(new_tx_size
);
2789 if ((new_tx_size
== bp
->tx_ring_size
) &&
2790 (new_rx_size
== bp
->rx_ring_size
)) {
2795 if (netif_running(bp
->dev
)) {
2797 macb_close(bp
->dev
);
2800 bp
->rx_ring_size
= new_rx_size
;
2801 bp
->tx_ring_size
= new_tx_size
;
2809 #ifdef CONFIG_MACB_USE_HWSTAMP
2810 static unsigned int gem_get_tsu_rate(struct macb
*bp
)
2812 struct clk
*tsu_clk
;
2813 unsigned int tsu_rate
;
2815 tsu_clk
= devm_clk_get(&bp
->pdev
->dev
, "tsu_clk");
2816 if (!IS_ERR(tsu_clk
))
2817 tsu_rate
= clk_get_rate(tsu_clk
);
2818 /* try pclk instead */
2819 else if (!IS_ERR(bp
->pclk
)) {
2821 tsu_rate
= clk_get_rate(tsu_clk
);
2827 static s32
gem_get_ptp_max_adj(void)
2832 static int gem_get_ts_info(struct net_device
*dev
,
2833 struct ethtool_ts_info
*info
)
2835 struct macb
*bp
= netdev_priv(dev
);
2837 if ((bp
->hw_dma_cap
& HW_DMA_CAP_PTP
) == 0) {
2838 ethtool_op_get_ts_info(dev
, info
);
2842 info
->so_timestamping
=
2843 SOF_TIMESTAMPING_TX_SOFTWARE
|
2844 SOF_TIMESTAMPING_RX_SOFTWARE
|
2845 SOF_TIMESTAMPING_SOFTWARE
|
2846 SOF_TIMESTAMPING_TX_HARDWARE
|
2847 SOF_TIMESTAMPING_RX_HARDWARE
|
2848 SOF_TIMESTAMPING_RAW_HARDWARE
;
2850 (1 << HWTSTAMP_TX_ONESTEP_SYNC
) |
2851 (1 << HWTSTAMP_TX_OFF
) |
2852 (1 << HWTSTAMP_TX_ON
);
2854 (1 << HWTSTAMP_FILTER_NONE
) |
2855 (1 << HWTSTAMP_FILTER_ALL
);
2857 info
->phc_index
= bp
->ptp_clock
? ptp_clock_index(bp
->ptp_clock
) : -1;
2862 static struct macb_ptp_info gem_ptp_info
= {
2863 .ptp_init
= gem_ptp_init
,
2864 .ptp_remove
= gem_ptp_remove
,
2865 .get_ptp_max_adj
= gem_get_ptp_max_adj
,
2866 .get_tsu_rate
= gem_get_tsu_rate
,
2867 .get_ts_info
= gem_get_ts_info
,
2868 .get_hwtst
= gem_get_hwtst
,
2869 .set_hwtst
= gem_set_hwtst
,
2873 static int macb_get_ts_info(struct net_device
*netdev
,
2874 struct ethtool_ts_info
*info
)
2876 struct macb
*bp
= netdev_priv(netdev
);
2879 return bp
->ptp_info
->get_ts_info(netdev
, info
);
2881 return ethtool_op_get_ts_info(netdev
, info
);
2884 static void gem_enable_flow_filters(struct macb
*bp
, bool enable
)
2886 struct ethtool_rx_fs_item
*item
;
2890 num_t2_scr
= GEM_BFEXT(T2SCR
, gem_readl(bp
, DCFG8
));
2892 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
2893 struct ethtool_rx_flow_spec
*fs
= &item
->fs
;
2894 struct ethtool_tcpip4_spec
*tp4sp_m
;
2896 if (fs
->location
>= num_t2_scr
)
2899 t2_scr
= gem_readl_n(bp
, SCRT2
, fs
->location
);
2901 /* enable/disable screener regs for the flow entry */
2902 t2_scr
= GEM_BFINS(ETHTEN
, enable
, t2_scr
);
2904 /* only enable fields with no masking */
2905 tp4sp_m
= &(fs
->m_u
.tcp_ip4_spec
);
2907 if (enable
&& (tp4sp_m
->ip4src
== 0xFFFFFFFF))
2908 t2_scr
= GEM_BFINS(CMPAEN
, 1, t2_scr
);
2910 t2_scr
= GEM_BFINS(CMPAEN
, 0, t2_scr
);
2912 if (enable
&& (tp4sp_m
->ip4dst
== 0xFFFFFFFF))
2913 t2_scr
= GEM_BFINS(CMPBEN
, 1, t2_scr
);
2915 t2_scr
= GEM_BFINS(CMPBEN
, 0, t2_scr
);
2917 if (enable
&& ((tp4sp_m
->psrc
== 0xFFFF) || (tp4sp_m
->pdst
== 0xFFFF)))
2918 t2_scr
= GEM_BFINS(CMPCEN
, 1, t2_scr
);
2920 t2_scr
= GEM_BFINS(CMPCEN
, 0, t2_scr
);
2922 gem_writel_n(bp
, SCRT2
, fs
->location
, t2_scr
);
2926 static void gem_prog_cmp_regs(struct macb
*bp
, struct ethtool_rx_flow_spec
*fs
)
2928 struct ethtool_tcpip4_spec
*tp4sp_v
, *tp4sp_m
;
2929 uint16_t index
= fs
->location
;
2935 tp4sp_v
= &(fs
->h_u
.tcp_ip4_spec
);
2936 tp4sp_m
= &(fs
->m_u
.tcp_ip4_spec
);
2938 /* ignore field if any masking set */
2939 if (tp4sp_m
->ip4src
== 0xFFFFFFFF) {
2940 /* 1st compare reg - IP source address */
2943 w0
= tp4sp_v
->ip4src
;
2944 w1
= GEM_BFINS(T2DISMSK
, 1, w1
); /* 32-bit compare */
2945 w1
= GEM_BFINS(T2CMPOFST
, GEM_T2COMPOFST_ETYPE
, w1
);
2946 w1
= GEM_BFINS(T2OFST
, ETYPE_SRCIP_OFFSET
, w1
);
2947 gem_writel_n(bp
, T2CMPW0
, T2CMP_OFST(GEM_IP4SRC_CMP(index
)), w0
);
2948 gem_writel_n(bp
, T2CMPW1
, T2CMP_OFST(GEM_IP4SRC_CMP(index
)), w1
);
2952 /* ignore field if any masking set */
2953 if (tp4sp_m
->ip4dst
== 0xFFFFFFFF) {
2954 /* 2nd compare reg - IP destination address */
2957 w0
= tp4sp_v
->ip4dst
;
2958 w1
= GEM_BFINS(T2DISMSK
, 1, w1
); /* 32-bit compare */
2959 w1
= GEM_BFINS(T2CMPOFST
, GEM_T2COMPOFST_ETYPE
, w1
);
2960 w1
= GEM_BFINS(T2OFST
, ETYPE_DSTIP_OFFSET
, w1
);
2961 gem_writel_n(bp
, T2CMPW0
, T2CMP_OFST(GEM_IP4DST_CMP(index
)), w0
);
2962 gem_writel_n(bp
, T2CMPW1
, T2CMP_OFST(GEM_IP4DST_CMP(index
)), w1
);
2966 /* ignore both port fields if masking set in both */
2967 if ((tp4sp_m
->psrc
== 0xFFFF) || (tp4sp_m
->pdst
== 0xFFFF)) {
2968 /* 3rd compare reg - source port, destination port */
2971 w1
= GEM_BFINS(T2CMPOFST
, GEM_T2COMPOFST_IPHDR
, w1
);
2972 if (tp4sp_m
->psrc
== tp4sp_m
->pdst
) {
2973 w0
= GEM_BFINS(T2MASK
, tp4sp_v
->psrc
, w0
);
2974 w0
= GEM_BFINS(T2CMP
, tp4sp_v
->pdst
, w0
);
2975 w1
= GEM_BFINS(T2DISMSK
, 1, w1
); /* 32-bit compare */
2976 w1
= GEM_BFINS(T2OFST
, IPHDR_SRCPORT_OFFSET
, w1
);
2978 /* only one port definition */
2979 w1
= GEM_BFINS(T2DISMSK
, 0, w1
); /* 16-bit compare */
2980 w0
= GEM_BFINS(T2MASK
, 0xFFFF, w0
);
2981 if (tp4sp_m
->psrc
== 0xFFFF) { /* src port */
2982 w0
= GEM_BFINS(T2CMP
, tp4sp_v
->psrc
, w0
);
2983 w1
= GEM_BFINS(T2OFST
, IPHDR_SRCPORT_OFFSET
, w1
);
2984 } else { /* dst port */
2985 w0
= GEM_BFINS(T2CMP
, tp4sp_v
->pdst
, w0
);
2986 w1
= GEM_BFINS(T2OFST
, IPHDR_DSTPORT_OFFSET
, w1
);
2989 gem_writel_n(bp
, T2CMPW0
, T2CMP_OFST(GEM_PORT_CMP(index
)), w0
);
2990 gem_writel_n(bp
, T2CMPW1
, T2CMP_OFST(GEM_PORT_CMP(index
)), w1
);
2995 t2_scr
= GEM_BFINS(QUEUE
, (fs
->ring_cookie
) & 0xFF, t2_scr
);
2996 t2_scr
= GEM_BFINS(ETHT2IDX
, SCRT2_ETHT
, t2_scr
);
2998 t2_scr
= GEM_BFINS(CMPA
, GEM_IP4SRC_CMP(index
), t2_scr
);
3000 t2_scr
= GEM_BFINS(CMPB
, GEM_IP4DST_CMP(index
), t2_scr
);
3002 t2_scr
= GEM_BFINS(CMPC
, GEM_PORT_CMP(index
), t2_scr
);
3003 gem_writel_n(bp
, SCRT2
, index
, t2_scr
);
3006 static int gem_add_flow_filter(struct net_device
*netdev
,
3007 struct ethtool_rxnfc
*cmd
)
3009 struct macb
*bp
= netdev_priv(netdev
);
3010 struct ethtool_rx_flow_spec
*fs
= &cmd
->fs
;
3011 struct ethtool_rx_fs_item
*item
, *newfs
;
3012 unsigned long flags
;
3016 newfs
= kmalloc(sizeof(*newfs
), GFP_KERNEL
);
3019 memcpy(&newfs
->fs
, fs
, sizeof(newfs
->fs
));
3022 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3023 fs
->flow_type
, (int)fs
->ring_cookie
, fs
->location
,
3024 htonl(fs
->h_u
.tcp_ip4_spec
.ip4src
),
3025 htonl(fs
->h_u
.tcp_ip4_spec
.ip4dst
),
3026 htons(fs
->h_u
.tcp_ip4_spec
.psrc
), htons(fs
->h_u
.tcp_ip4_spec
.pdst
));
3028 spin_lock_irqsave(&bp
->rx_fs_lock
, flags
);
3030 /* find correct place to add in list */
3031 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
3032 if (item
->fs
.location
> newfs
->fs
.location
) {
3033 list_add_tail(&newfs
->list
, &item
->list
);
3036 } else if (item
->fs
.location
== fs
->location
) {
3037 netdev_err(netdev
, "Rule not added: location %d not free!\n",
3044 list_add_tail(&newfs
->list
, &bp
->rx_fs_list
.list
);
3046 gem_prog_cmp_regs(bp
, fs
);
3047 bp
->rx_fs_list
.count
++;
3048 /* enable filtering if NTUPLE on */
3049 if (netdev
->features
& NETIF_F_NTUPLE
)
3050 gem_enable_flow_filters(bp
, 1);
3052 spin_unlock_irqrestore(&bp
->rx_fs_lock
, flags
);
3056 spin_unlock_irqrestore(&bp
->rx_fs_lock
, flags
);
3061 static int gem_del_flow_filter(struct net_device
*netdev
,
3062 struct ethtool_rxnfc
*cmd
)
3064 struct macb
*bp
= netdev_priv(netdev
);
3065 struct ethtool_rx_fs_item
*item
;
3066 struct ethtool_rx_flow_spec
*fs
;
3067 unsigned long flags
;
3069 spin_lock_irqsave(&bp
->rx_fs_lock
, flags
);
3071 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
3072 if (item
->fs
.location
== cmd
->fs
.location
) {
3073 /* disable screener regs for the flow entry */
3076 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3077 fs
->flow_type
, (int)fs
->ring_cookie
, fs
->location
,
3078 htonl(fs
->h_u
.tcp_ip4_spec
.ip4src
),
3079 htonl(fs
->h_u
.tcp_ip4_spec
.ip4dst
),
3080 htons(fs
->h_u
.tcp_ip4_spec
.psrc
),
3081 htons(fs
->h_u
.tcp_ip4_spec
.pdst
));
3083 gem_writel_n(bp
, SCRT2
, fs
->location
, 0);
3085 list_del(&item
->list
);
3086 bp
->rx_fs_list
.count
--;
3087 spin_unlock_irqrestore(&bp
->rx_fs_lock
, flags
);
3093 spin_unlock_irqrestore(&bp
->rx_fs_lock
, flags
);
3097 static int gem_get_flow_entry(struct net_device
*netdev
,
3098 struct ethtool_rxnfc
*cmd
)
3100 struct macb
*bp
= netdev_priv(netdev
);
3101 struct ethtool_rx_fs_item
*item
;
3103 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
3104 if (item
->fs
.location
== cmd
->fs
.location
) {
3105 memcpy(&cmd
->fs
, &item
->fs
, sizeof(cmd
->fs
));
3112 static int gem_get_all_flow_entries(struct net_device
*netdev
,
3113 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
3115 struct macb
*bp
= netdev_priv(netdev
);
3116 struct ethtool_rx_fs_item
*item
;
3119 list_for_each_entry(item
, &bp
->rx_fs_list
.list
, list
) {
3120 if (cnt
== cmd
->rule_cnt
)
3122 rule_locs
[cnt
] = item
->fs
.location
;
3125 cmd
->data
= bp
->max_tuples
;
3126 cmd
->rule_cnt
= cnt
;
3131 static int gem_get_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*cmd
,
3134 struct macb
*bp
= netdev_priv(netdev
);
3138 case ETHTOOL_GRXRINGS
:
3139 cmd
->data
= bp
->num_queues
;
3141 case ETHTOOL_GRXCLSRLCNT
:
3142 cmd
->rule_cnt
= bp
->rx_fs_list
.count
;
3144 case ETHTOOL_GRXCLSRULE
:
3145 ret
= gem_get_flow_entry(netdev
, cmd
);
3147 case ETHTOOL_GRXCLSRLALL
:
3148 ret
= gem_get_all_flow_entries(netdev
, cmd
, rule_locs
);
3152 "Command parameter %d is not supported\n", cmd
->cmd
);
3159 static int gem_set_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*cmd
)
3161 struct macb
*bp
= netdev_priv(netdev
);
3165 case ETHTOOL_SRXCLSRLINS
:
3166 if ((cmd
->fs
.location
>= bp
->max_tuples
)
3167 || (cmd
->fs
.ring_cookie
>= bp
->num_queues
)) {
3171 ret
= gem_add_flow_filter(netdev
, cmd
);
3173 case ETHTOOL_SRXCLSRLDEL
:
3174 ret
= gem_del_flow_filter(netdev
, cmd
);
3178 "Command parameter %d is not supported\n", cmd
->cmd
);
3185 static const struct ethtool_ops macb_ethtool_ops
= {
3186 .get_regs_len
= macb_get_regs_len
,
3187 .get_regs
= macb_get_regs
,
3188 .get_link
= ethtool_op_get_link
,
3189 .get_ts_info
= ethtool_op_get_ts_info
,
3190 .get_wol
= macb_get_wol
,
3191 .set_wol
= macb_set_wol
,
3192 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
3193 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
3194 .get_ringparam
= macb_get_ringparam
,
3195 .set_ringparam
= macb_set_ringparam
,
3198 static const struct ethtool_ops gem_ethtool_ops
= {
3199 .get_regs_len
= macb_get_regs_len
,
3200 .get_regs
= macb_get_regs
,
3201 .get_link
= ethtool_op_get_link
,
3202 .get_ts_info
= macb_get_ts_info
,
3203 .get_ethtool_stats
= gem_get_ethtool_stats
,
3204 .get_strings
= gem_get_ethtool_strings
,
3205 .get_sset_count
= gem_get_sset_count
,
3206 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
3207 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
3208 .get_ringparam
= macb_get_ringparam
,
3209 .set_ringparam
= macb_set_ringparam
,
3210 .get_rxnfc
= gem_get_rxnfc
,
3211 .set_rxnfc
= gem_set_rxnfc
,
3214 static int macb_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
3216 struct phy_device
*phydev
= dev
->phydev
;
3217 struct macb
*bp
= netdev_priv(dev
);
3219 if (!netif_running(dev
))
3226 return phy_mii_ioctl(phydev
, rq
, cmd
);
3230 return bp
->ptp_info
->set_hwtst(dev
, rq
, cmd
);
3232 return bp
->ptp_info
->get_hwtst(dev
, rq
);
3234 return phy_mii_ioctl(phydev
, rq
, cmd
);
3238 static int macb_set_features(struct net_device
*netdev
,
3239 netdev_features_t features
)
3241 struct macb
*bp
= netdev_priv(netdev
);
3242 netdev_features_t changed
= features
^ netdev
->features
;
3244 /* TX checksum offload */
3245 if ((changed
& NETIF_F_HW_CSUM
) && macb_is_gem(bp
)) {
3248 dmacfg
= gem_readl(bp
, DMACFG
);
3249 if (features
& NETIF_F_HW_CSUM
)
3250 dmacfg
|= GEM_BIT(TXCOEN
);
3252 dmacfg
&= ~GEM_BIT(TXCOEN
);
3253 gem_writel(bp
, DMACFG
, dmacfg
);
3256 /* RX checksum offload */
3257 if ((changed
& NETIF_F_RXCSUM
) && macb_is_gem(bp
)) {
3260 netcfg
= gem_readl(bp
, NCFGR
);
3261 if (features
& NETIF_F_RXCSUM
&&
3262 !(netdev
->flags
& IFF_PROMISC
))
3263 netcfg
|= GEM_BIT(RXCOEN
);
3265 netcfg
&= ~GEM_BIT(RXCOEN
);
3266 gem_writel(bp
, NCFGR
, netcfg
);
3269 /* RX Flow Filters */
3270 if ((changed
& NETIF_F_NTUPLE
) && macb_is_gem(bp
)) {
3271 bool turn_on
= features
& NETIF_F_NTUPLE
;
3273 gem_enable_flow_filters(bp
, turn_on
);
3278 static const struct net_device_ops macb_netdev_ops
= {
3279 .ndo_open
= macb_open
,
3280 .ndo_stop
= macb_close
,
3281 .ndo_start_xmit
= macb_start_xmit
,
3282 .ndo_set_rx_mode
= macb_set_rx_mode
,
3283 .ndo_get_stats
= macb_get_stats
,
3284 .ndo_do_ioctl
= macb_ioctl
,
3285 .ndo_validate_addr
= eth_validate_addr
,
3286 .ndo_change_mtu
= macb_change_mtu
,
3287 .ndo_set_mac_address
= eth_mac_addr
,
3288 #ifdef CONFIG_NET_POLL_CONTROLLER
3289 .ndo_poll_controller
= macb_poll_controller
,
3291 .ndo_set_features
= macb_set_features
,
3292 .ndo_features_check
= macb_features_check
,
3295 /* Configure peripheral capabilities according to device tree
3296 * and integration options used
3298 static void macb_configure_caps(struct macb
*bp
,
3299 const struct macb_config
*dt_conf
)
3304 bp
->caps
= dt_conf
->caps
;
3306 if (hw_is_gem(bp
->regs
, bp
->native_io
)) {
3307 bp
->caps
|= MACB_CAPS_MACB_IS_GEM
;
3309 dcfg
= gem_readl(bp
, DCFG1
);
3310 if (GEM_BFEXT(IRQCOR
, dcfg
) == 0)
3311 bp
->caps
|= MACB_CAPS_ISR_CLEAR_ON_WRITE
;
3312 dcfg
= gem_readl(bp
, DCFG2
);
3313 if ((dcfg
& (GEM_BIT(RX_PKT_BUFF
) | GEM_BIT(TX_PKT_BUFF
))) == 0)
3314 bp
->caps
|= MACB_CAPS_FIFO_MODE
;
3315 #ifdef CONFIG_MACB_USE_HWSTAMP
3316 if (gem_has_ptp(bp
)) {
3317 if (!GEM_BFEXT(TSU
, gem_readl(bp
, DCFG5
)))
3318 pr_err("GEM doesn't support hardware ptp.\n");
3320 bp
->hw_dma_cap
|= HW_DMA_CAP_PTP
;
3321 bp
->ptp_info
= &gem_ptp_info
;
3327 dev_dbg(&bp
->pdev
->dev
, "Cadence caps 0x%08x\n", bp
->caps
);
3330 static void macb_probe_queues(void __iomem
*mem
,
3332 unsigned int *queue_mask
,
3333 unsigned int *num_queues
)
3340 /* is it macb or gem ?
3342 * We need to read directly from the hardware here because
3343 * we are early in the probe process and don't have the
3344 * MACB_CAPS_MACB_IS_GEM flag positioned
3346 if (!hw_is_gem(mem
, native_io
))
3349 /* bit 0 is never set but queue 0 always exists */
3350 *queue_mask
= readl_relaxed(mem
+ GEM_DCFG6
) & 0xff;
3354 for (hw_q
= 1; hw_q
< MACB_MAX_QUEUES
; ++hw_q
)
3355 if (*queue_mask
& (1 << hw_q
))
3359 static int macb_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
3360 struct clk
**hclk
, struct clk
**tx_clk
,
3361 struct clk
**rx_clk
, struct clk
**tsu_clk
)
3363 struct macb_platform_data
*pdata
;
3366 pdata
= dev_get_platdata(&pdev
->dev
);
3368 *pclk
= pdata
->pclk
;
3369 *hclk
= pdata
->hclk
;
3371 *pclk
= devm_clk_get(&pdev
->dev
, "pclk");
3372 *hclk
= devm_clk_get(&pdev
->dev
, "hclk");
3375 if (IS_ERR_OR_NULL(*pclk
)) {
3376 err
= PTR_ERR(*pclk
);
3380 dev_err(&pdev
->dev
, "failed to get macb_clk (%u)\n", err
);
3384 if (IS_ERR_OR_NULL(*hclk
)) {
3385 err
= PTR_ERR(*hclk
);
3389 dev_err(&pdev
->dev
, "failed to get hclk (%u)\n", err
);
3393 *tx_clk
= devm_clk_get(&pdev
->dev
, "tx_clk");
3394 if (IS_ERR(*tx_clk
))
3397 *rx_clk
= devm_clk_get(&pdev
->dev
, "rx_clk");
3398 if (IS_ERR(*rx_clk
))
3401 *tsu_clk
= devm_clk_get(&pdev
->dev
, "tsu_clk");
3402 if (IS_ERR(*tsu_clk
))
3405 err
= clk_prepare_enable(*pclk
);
3407 dev_err(&pdev
->dev
, "failed to enable pclk (%u)\n", err
);
3411 err
= clk_prepare_enable(*hclk
);
3413 dev_err(&pdev
->dev
, "failed to enable hclk (%u)\n", err
);
3414 goto err_disable_pclk
;
3417 err
= clk_prepare_enable(*tx_clk
);
3419 dev_err(&pdev
->dev
, "failed to enable tx_clk (%u)\n", err
);
3420 goto err_disable_hclk
;
3423 err
= clk_prepare_enable(*rx_clk
);
3425 dev_err(&pdev
->dev
, "failed to enable rx_clk (%u)\n", err
);
3426 goto err_disable_txclk
;
3429 err
= clk_prepare_enable(*tsu_clk
);
3431 dev_err(&pdev
->dev
, "failed to enable tsu_clk (%u)\n", err
);
3432 goto err_disable_rxclk
;
3438 clk_disable_unprepare(*rx_clk
);
3441 clk_disable_unprepare(*tx_clk
);
3444 clk_disable_unprepare(*hclk
);
3447 clk_disable_unprepare(*pclk
);
3452 static int macb_init(struct platform_device
*pdev
)
3454 struct net_device
*dev
= platform_get_drvdata(pdev
);
3455 unsigned int hw_q
, q
;
3456 struct macb
*bp
= netdev_priv(dev
);
3457 struct macb_queue
*queue
;
3461 bp
->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
3462 bp
->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
3464 /* set the queue register mapping once for all: queue0 has a special
3465 * register mapping but we don't want to test the queue index then
3466 * compute the corresponding register offset at run time.
3468 for (hw_q
= 0, q
= 0; hw_q
< MACB_MAX_QUEUES
; ++hw_q
) {
3469 if (!(bp
->queue_mask
& (1 << hw_q
)))
3472 queue
= &bp
->queues
[q
];
3474 netif_napi_add(dev
, &queue
->napi
, macb_poll
, 64);
3476 queue
->ISR
= GEM_ISR(hw_q
- 1);
3477 queue
->IER
= GEM_IER(hw_q
- 1);
3478 queue
->IDR
= GEM_IDR(hw_q
- 1);
3479 queue
->IMR
= GEM_IMR(hw_q
- 1);
3480 queue
->TBQP
= GEM_TBQP(hw_q
- 1);
3481 queue
->RBQP
= GEM_RBQP(hw_q
- 1);
3482 queue
->RBQS
= GEM_RBQS(hw_q
- 1);
3483 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3484 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
3485 queue
->TBQPH
= GEM_TBQPH(hw_q
- 1);
3486 queue
->RBQPH
= GEM_RBQPH(hw_q
- 1);
3490 /* queue0 uses legacy registers */
3491 queue
->ISR
= MACB_ISR
;
3492 queue
->IER
= MACB_IER
;
3493 queue
->IDR
= MACB_IDR
;
3494 queue
->IMR
= MACB_IMR
;
3495 queue
->TBQP
= MACB_TBQP
;
3496 queue
->RBQP
= MACB_RBQP
;
3497 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3498 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
3499 queue
->TBQPH
= MACB_TBQPH
;
3500 queue
->RBQPH
= MACB_RBQPH
;
3505 /* get irq: here we use the linux queue index, not the hardware
3506 * queue index. the queue irq definitions in the device tree
3507 * must remove the optional gaps that could exist in the
3508 * hardware queue mask.
3510 queue
->irq
= platform_get_irq(pdev
, q
);
3511 err
= devm_request_irq(&pdev
->dev
, queue
->irq
, macb_interrupt
,
3512 IRQF_SHARED
, dev
->name
, queue
);
3515 "Unable to request IRQ %d (error %d)\n",
3520 INIT_WORK(&queue
->tx_error_task
, macb_tx_error_task
);
3524 dev
->netdev_ops
= &macb_netdev_ops
;
3526 /* setup appropriated routines according to adapter type */
3527 if (macb_is_gem(bp
)) {
3528 bp
->max_tx_length
= GEM_MAX_TX_LEN
;
3529 bp
->macbgem_ops
.mog_alloc_rx_buffers
= gem_alloc_rx_buffers
;
3530 bp
->macbgem_ops
.mog_free_rx_buffers
= gem_free_rx_buffers
;
3531 bp
->macbgem_ops
.mog_init_rings
= gem_init_rings
;
3532 bp
->macbgem_ops
.mog_rx
= gem_rx
;
3533 dev
->ethtool_ops
= &gem_ethtool_ops
;
3535 bp
->max_tx_length
= MACB_MAX_TX_LEN
;
3536 bp
->macbgem_ops
.mog_alloc_rx_buffers
= macb_alloc_rx_buffers
;
3537 bp
->macbgem_ops
.mog_free_rx_buffers
= macb_free_rx_buffers
;
3538 bp
->macbgem_ops
.mog_init_rings
= macb_init_rings
;
3539 bp
->macbgem_ops
.mog_rx
= macb_rx
;
3540 dev
->ethtool_ops
= &macb_ethtool_ops
;
3544 dev
->hw_features
= NETIF_F_SG
;
3546 /* Check LSO capability */
3547 if (GEM_BFEXT(PBUF_LSO
, gem_readl(bp
, DCFG6
)))
3548 dev
->hw_features
|= MACB_NETIF_LSO
;
3550 /* Checksum offload is only available on gem with packet buffer */
3551 if (macb_is_gem(bp
) && !(bp
->caps
& MACB_CAPS_FIFO_MODE
))
3552 dev
->hw_features
|= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
3553 if (bp
->caps
& MACB_CAPS_SG_DISABLED
)
3554 dev
->hw_features
&= ~NETIF_F_SG
;
3555 dev
->features
= dev
->hw_features
;
3557 /* Check RX Flow Filters support.
3558 * Max Rx flows set by availability of screeners & compare regs:
3559 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
3561 reg
= gem_readl(bp
, DCFG8
);
3562 bp
->max_tuples
= min((GEM_BFEXT(SCR2CMP
, reg
) / 3),
3563 GEM_BFEXT(T2SCR
, reg
));
3564 if (bp
->max_tuples
> 0) {
3565 /* also needs one ethtype match to check IPv4 */
3566 if (GEM_BFEXT(SCR2ETH
, reg
) > 0) {
3567 /* program this reg now */
3569 reg
= GEM_BFINS(ETHTCMP
, (uint16_t)ETH_P_IP
, reg
);
3570 gem_writel_n(bp
, ETHT
, SCRT2_ETHT
, reg
);
3571 /* Filtering is supported in hw but don't enable it in kernel now */
3572 dev
->hw_features
|= NETIF_F_NTUPLE
;
3573 /* init Rx flow definitions */
3574 INIT_LIST_HEAD(&bp
->rx_fs_list
.list
);
3575 bp
->rx_fs_list
.count
= 0;
3576 spin_lock_init(&bp
->rx_fs_lock
);
3581 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
)) {
3583 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RGMII
)
3584 val
= GEM_BIT(RGMII
);
3585 else if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
&&
3586 (bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
3587 val
= MACB_BIT(RMII
);
3588 else if (!(bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
3589 val
= MACB_BIT(MII
);
3591 if (bp
->caps
& MACB_CAPS_USRIO_HAS_CLKEN
)
3592 val
|= MACB_BIT(CLKEN
);
3594 macb_or_gem_writel(bp
, USRIO
, val
);
3597 /* Set MII management clock divider */
3598 val
= macb_mdc_clk_div(bp
);
3599 val
|= macb_dbw(bp
);
3600 if (bp
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
3601 val
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
3602 macb_writel(bp
, NCFGR
, val
);
3607 #if defined(CONFIG_OF)
3608 /* 1518 rounded up */
3609 #define AT91ETHER_MAX_RBUFF_SZ 0x600
3610 /* max number of receive buffers */
3611 #define AT91ETHER_MAX_RX_DESCR 9
3613 /* Initialize and start the Receiver and Transmit subsystems */
3614 static int at91ether_start(struct net_device
*dev
)
3616 struct macb
*lp
= netdev_priv(dev
);
3617 struct macb_queue
*q
= &lp
->queues
[0];
3618 struct macb_dma_desc
*desc
;
3623 q
->rx_ring
= dma_alloc_coherent(&lp
->pdev
->dev
,
3624 (AT91ETHER_MAX_RX_DESCR
*
3625 macb_dma_desc_get_size(lp
)),
3626 &q
->rx_ring_dma
, GFP_KERNEL
);
3630 q
->rx_buffers
= dma_alloc_coherent(&lp
->pdev
->dev
,
3631 AT91ETHER_MAX_RX_DESCR
*
3632 AT91ETHER_MAX_RBUFF_SZ
,
3633 &q
->rx_buffers_dma
, GFP_KERNEL
);
3634 if (!q
->rx_buffers
) {
3635 dma_free_coherent(&lp
->pdev
->dev
,
3636 AT91ETHER_MAX_RX_DESCR
*
3637 macb_dma_desc_get_size(lp
),
3638 q
->rx_ring
, q
->rx_ring_dma
);
3643 addr
= q
->rx_buffers_dma
;
3644 for (i
= 0; i
< AT91ETHER_MAX_RX_DESCR
; i
++) {
3645 desc
= macb_rx_desc(q
, i
);
3646 macb_set_addr(lp
, desc
, addr
);
3648 addr
+= AT91ETHER_MAX_RBUFF_SZ
;
3651 /* Set the Wrap bit on the last descriptor */
3652 desc
->addr
|= MACB_BIT(RX_WRAP
);
3654 /* Reset buffer index */
3657 /* Program address of descriptor list in Rx Buffer Queue register */
3658 macb_writel(lp
, RBQP
, q
->rx_ring_dma
);
3660 /* Enable Receive and Transmit */
3661 ctl
= macb_readl(lp
, NCR
);
3662 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
) | MACB_BIT(TE
));
3667 /* Open the ethernet interface */
3668 static int at91ether_open(struct net_device
*dev
)
3670 struct macb
*lp
= netdev_priv(dev
);
3674 /* Clear internal statistics */
3675 ctl
= macb_readl(lp
, NCR
);
3676 macb_writel(lp
, NCR
, ctl
| MACB_BIT(CLRSTAT
));
3678 macb_set_hwaddr(lp
);
3680 ret
= at91ether_start(dev
);
3684 /* Enable MAC interrupts */
3685 macb_writel(lp
, IER
, MACB_BIT(RCOMP
) |
3687 MACB_BIT(ISR_TUND
) |
3690 MACB_BIT(ISR_ROVR
) |
3693 /* schedule a link state check */
3694 phy_start(dev
->phydev
);
3696 netif_start_queue(dev
);
3701 /* Close the interface */
3702 static int at91ether_close(struct net_device
*dev
)
3704 struct macb
*lp
= netdev_priv(dev
);
3705 struct macb_queue
*q
= &lp
->queues
[0];
3708 /* Disable Receiver and Transmitter */
3709 ctl
= macb_readl(lp
, NCR
);
3710 macb_writel(lp
, NCR
, ctl
& ~(MACB_BIT(TE
) | MACB_BIT(RE
)));
3712 /* Disable MAC interrupts */
3713 macb_writel(lp
, IDR
, MACB_BIT(RCOMP
) |
3715 MACB_BIT(ISR_TUND
) |
3718 MACB_BIT(ISR_ROVR
) |
3721 netif_stop_queue(dev
);
3723 dma_free_coherent(&lp
->pdev
->dev
,
3724 AT91ETHER_MAX_RX_DESCR
*
3725 macb_dma_desc_get_size(lp
),
3726 q
->rx_ring
, q
->rx_ring_dma
);
3729 dma_free_coherent(&lp
->pdev
->dev
,
3730 AT91ETHER_MAX_RX_DESCR
* AT91ETHER_MAX_RBUFF_SZ
,
3731 q
->rx_buffers
, q
->rx_buffers_dma
);
3732 q
->rx_buffers
= NULL
;
3737 /* Transmit packet */
3738 static netdev_tx_t
at91ether_start_xmit(struct sk_buff
*skb
,
3739 struct net_device
*dev
)
3741 struct macb
*lp
= netdev_priv(dev
);
3743 if (macb_readl(lp
, TSR
) & MACB_BIT(RM9200_BNQ
)) {
3744 netif_stop_queue(dev
);
3746 /* Store packet information (to free when Tx completed) */
3748 lp
->skb_length
= skb
->len
;
3749 lp
->skb_physaddr
= dma_map_single(&lp
->pdev
->dev
, skb
->data
,
3750 skb
->len
, DMA_TO_DEVICE
);
3751 if (dma_mapping_error(&lp
->pdev
->dev
, lp
->skb_physaddr
)) {
3752 dev_kfree_skb_any(skb
);
3753 dev
->stats
.tx_dropped
++;
3754 netdev_err(dev
, "%s: DMA mapping error\n", __func__
);
3755 return NETDEV_TX_OK
;
3758 /* Set address of the data in the Transmit Address register */
3759 macb_writel(lp
, TAR
, lp
->skb_physaddr
);
3760 /* Set length of the packet in the Transmit Control register */
3761 macb_writel(lp
, TCR
, skb
->len
);
3764 netdev_err(dev
, "%s called, but device is busy!\n", __func__
);
3765 return NETDEV_TX_BUSY
;
3768 return NETDEV_TX_OK
;
3771 /* Extract received frame from buffer descriptors and sent to upper layers.
3772 * (Called from interrupt context)
3774 static void at91ether_rx(struct net_device
*dev
)
3776 struct macb
*lp
= netdev_priv(dev
);
3777 struct macb_queue
*q
= &lp
->queues
[0];
3778 struct macb_dma_desc
*desc
;
3779 unsigned char *p_recv
;
3780 struct sk_buff
*skb
;
3781 unsigned int pktlen
;
3783 desc
= macb_rx_desc(q
, q
->rx_tail
);
3784 while (desc
->addr
& MACB_BIT(RX_USED
)) {
3785 p_recv
= q
->rx_buffers
+ q
->rx_tail
* AT91ETHER_MAX_RBUFF_SZ
;
3786 pktlen
= MACB_BF(RX_FRMLEN
, desc
->ctrl
);
3787 skb
= netdev_alloc_skb(dev
, pktlen
+ 2);
3789 skb_reserve(skb
, 2);
3790 skb_put_data(skb
, p_recv
, pktlen
);
3792 skb
->protocol
= eth_type_trans(skb
, dev
);
3793 dev
->stats
.rx_packets
++;
3794 dev
->stats
.rx_bytes
+= pktlen
;
3797 dev
->stats
.rx_dropped
++;
3800 if (desc
->ctrl
& MACB_BIT(RX_MHASH_MATCH
))
3801 dev
->stats
.multicast
++;
3803 /* reset ownership bit */
3804 desc
->addr
&= ~MACB_BIT(RX_USED
);
3806 /* wrap after last buffer */
3807 if (q
->rx_tail
== AT91ETHER_MAX_RX_DESCR
- 1)
3812 desc
= macb_rx_desc(q
, q
->rx_tail
);
3816 /* MAC interrupt handler */
3817 static irqreturn_t
at91ether_interrupt(int irq
, void *dev_id
)
3819 struct net_device
*dev
= dev_id
;
3820 struct macb
*lp
= netdev_priv(dev
);
3823 /* MAC Interrupt Status register indicates what interrupts are pending.
3824 * It is automatically cleared once read.
3826 intstatus
= macb_readl(lp
, ISR
);
3828 /* Receive complete */
3829 if (intstatus
& MACB_BIT(RCOMP
))
3832 /* Transmit complete */
3833 if (intstatus
& MACB_BIT(TCOMP
)) {
3834 /* The TCOM bit is set even if the transmission failed */
3835 if (intstatus
& (MACB_BIT(ISR_TUND
) | MACB_BIT(ISR_RLE
)))
3836 dev
->stats
.tx_errors
++;
3839 dev_consume_skb_irq(lp
->skb
);
3841 dma_unmap_single(&lp
->pdev
->dev
, lp
->skb_physaddr
,
3842 lp
->skb_length
, DMA_TO_DEVICE
);
3843 dev
->stats
.tx_packets
++;
3844 dev
->stats
.tx_bytes
+= lp
->skb_length
;
3846 netif_wake_queue(dev
);
3849 /* Work-around for EMAC Errata section 41.3.1 */
3850 if (intstatus
& MACB_BIT(RXUBR
)) {
3851 ctl
= macb_readl(lp
, NCR
);
3852 macb_writel(lp
, NCR
, ctl
& ~MACB_BIT(RE
));
3854 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
));
3857 if (intstatus
& MACB_BIT(ISR_ROVR
))
3858 netdev_err(dev
, "ROVR error\n");
3863 #ifdef CONFIG_NET_POLL_CONTROLLER
3864 static void at91ether_poll_controller(struct net_device
*dev
)
3866 unsigned long flags
;
3868 local_irq_save(flags
);
3869 at91ether_interrupt(dev
->irq
, dev
);
3870 local_irq_restore(flags
);
3874 static const struct net_device_ops at91ether_netdev_ops
= {
3875 .ndo_open
= at91ether_open
,
3876 .ndo_stop
= at91ether_close
,
3877 .ndo_start_xmit
= at91ether_start_xmit
,
3878 .ndo_get_stats
= macb_get_stats
,
3879 .ndo_set_rx_mode
= macb_set_rx_mode
,
3880 .ndo_set_mac_address
= eth_mac_addr
,
3881 .ndo_do_ioctl
= macb_ioctl
,
3882 .ndo_validate_addr
= eth_validate_addr
,
3883 #ifdef CONFIG_NET_POLL_CONTROLLER
3884 .ndo_poll_controller
= at91ether_poll_controller
,
3888 static int at91ether_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
3889 struct clk
**hclk
, struct clk
**tx_clk
,
3890 struct clk
**rx_clk
, struct clk
**tsu_clk
)
3899 *pclk
= devm_clk_get(&pdev
->dev
, "ether_clk");
3901 return PTR_ERR(*pclk
);
3903 err
= clk_prepare_enable(*pclk
);
3905 dev_err(&pdev
->dev
, "failed to enable pclk (%u)\n", err
);
3912 static int at91ether_init(struct platform_device
*pdev
)
3914 struct net_device
*dev
= platform_get_drvdata(pdev
);
3915 struct macb
*bp
= netdev_priv(dev
);
3919 bp
->queues
[0].bp
= bp
;
3921 dev
->netdev_ops
= &at91ether_netdev_ops
;
3922 dev
->ethtool_ops
= &macb_ethtool_ops
;
3924 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, at91ether_interrupt
,
3929 macb_writel(bp
, NCR
, 0);
3931 reg
= MACB_BF(CLK
, MACB_CLK_DIV32
) | MACB_BIT(BIG
);
3932 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
3933 reg
|= MACB_BIT(RM9200_RMII
);
3935 macb_writel(bp
, NCFGR
, reg
);
3940 static const struct macb_config at91sam9260_config
= {
3941 .caps
= MACB_CAPS_USRIO_HAS_CLKEN
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3942 .clk_init
= macb_clk_init
,
3946 static const struct macb_config sama5d3macb_config
= {
3947 .caps
= MACB_CAPS_SG_DISABLED
3948 | MACB_CAPS_USRIO_HAS_CLKEN
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3949 .clk_init
= macb_clk_init
,
3953 static const struct macb_config pc302gem_config
= {
3954 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
,
3955 .dma_burst_length
= 16,
3956 .clk_init
= macb_clk_init
,
3960 static const struct macb_config sama5d2_config
= {
3961 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3962 .dma_burst_length
= 16,
3963 .clk_init
= macb_clk_init
,
3967 static const struct macb_config sama5d3_config
= {
3968 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
3969 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
| MACB_CAPS_JUMBO
,
3970 .dma_burst_length
= 16,
3971 .clk_init
= macb_clk_init
,
3973 .jumbo_max_len
= 10240,
3976 static const struct macb_config sama5d4_config
= {
3977 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3978 .dma_burst_length
= 4,
3979 .clk_init
= macb_clk_init
,
3983 static const struct macb_config emac_config
= {
3984 .caps
= MACB_CAPS_NEEDS_RSTONUBR
,
3985 .clk_init
= at91ether_clk_init
,
3986 .init
= at91ether_init
,
3989 static const struct macb_config np4_config
= {
3990 .caps
= MACB_CAPS_USRIO_DISABLED
,
3991 .clk_init
= macb_clk_init
,
3995 static const struct macb_config zynqmp_config
= {
3996 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
|
3998 MACB_CAPS_GEM_HAS_PTP
| MACB_CAPS_BD_RD_PREFETCH
,
3999 .dma_burst_length
= 16,
4000 .clk_init
= macb_clk_init
,
4002 .jumbo_max_len
= 10240,
4005 static const struct macb_config zynq_config
= {
4006 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_NO_GIGABIT_HALF
|
4007 MACB_CAPS_NEEDS_RSTONUBR
,
4008 .dma_burst_length
= 16,
4009 .clk_init
= macb_clk_init
,
4013 static const struct of_device_id macb_dt_ids
[] = {
4014 { .compatible
= "cdns,at32ap7000-macb" },
4015 { .compatible
= "cdns,at91sam9260-macb", .data
= &at91sam9260_config
},
4016 { .compatible
= "cdns,macb" },
4017 { .compatible
= "cdns,np4-macb", .data
= &np4_config
},
4018 { .compatible
= "cdns,pc302-gem", .data
= &pc302gem_config
},
4019 { .compatible
= "cdns,gem", .data
= &pc302gem_config
},
4020 { .compatible
= "cdns,sam9x60-macb", .data
= &at91sam9260_config
},
4021 { .compatible
= "atmel,sama5d2-gem", .data
= &sama5d2_config
},
4022 { .compatible
= "atmel,sama5d3-gem", .data
= &sama5d3_config
},
4023 { .compatible
= "atmel,sama5d3-macb", .data
= &sama5d3macb_config
},
4024 { .compatible
= "atmel,sama5d4-gem", .data
= &sama5d4_config
},
4025 { .compatible
= "cdns,at91rm9200-emac", .data
= &emac_config
},
4026 { .compatible
= "cdns,emac", .data
= &emac_config
},
4027 { .compatible
= "cdns,zynqmp-gem", .data
= &zynqmp_config
},
4028 { .compatible
= "cdns,zynq-gem", .data
= &zynq_config
},
4031 MODULE_DEVICE_TABLE(of
, macb_dt_ids
);
4032 #endif /* CONFIG_OF */
4034 static const struct macb_config default_gem_config
= {
4035 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
|
4037 MACB_CAPS_GEM_HAS_PTP
,
4038 .dma_burst_length
= 16,
4039 .clk_init
= macb_clk_init
,
4041 .jumbo_max_len
= 10240,
4044 static int macb_probe(struct platform_device
*pdev
)
4046 const struct macb_config
*macb_config
= &default_gem_config
;
4047 int (*clk_init
)(struct platform_device
*, struct clk
**,
4048 struct clk
**, struct clk
**, struct clk
**,
4049 struct clk
**) = macb_config
->clk_init
;
4050 int (*init
)(struct platform_device
*) = macb_config
->init
;
4051 struct device_node
*np
= pdev
->dev
.of_node
;
4052 struct clk
*pclk
, *hclk
= NULL
, *tx_clk
= NULL
, *rx_clk
= NULL
;
4053 struct clk
*tsu_clk
= NULL
;
4054 unsigned int queue_mask
, num_queues
;
4055 struct macb_platform_data
*pdata
;
4057 struct phy_device
*phydev
;
4058 struct net_device
*dev
;
4059 struct resource
*regs
;
4065 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
4066 mem
= devm_ioremap_resource(&pdev
->dev
, regs
);
4068 return PTR_ERR(mem
);
4071 const struct of_device_id
*match
;
4073 match
= of_match_node(macb_dt_ids
, np
);
4074 if (match
&& match
->data
) {
4075 macb_config
= match
->data
;
4076 clk_init
= macb_config
->clk_init
;
4077 init
= macb_config
->init
;
4081 err
= clk_init(pdev
, &pclk
, &hclk
, &tx_clk
, &rx_clk
, &tsu_clk
);
4085 pm_runtime_set_autosuspend_delay(&pdev
->dev
, MACB_PM_TIMEOUT
);
4086 pm_runtime_use_autosuspend(&pdev
->dev
);
4087 pm_runtime_get_noresume(&pdev
->dev
);
4088 pm_runtime_set_active(&pdev
->dev
);
4089 pm_runtime_enable(&pdev
->dev
);
4090 native_io
= hw_is_native_io(mem
);
4092 macb_probe_queues(mem
, native_io
, &queue_mask
, &num_queues
);
4093 dev
= alloc_etherdev_mq(sizeof(*bp
), num_queues
);
4096 goto err_disable_clocks
;
4099 dev
->base_addr
= regs
->start
;
4101 SET_NETDEV_DEV(dev
, &pdev
->dev
);
4103 bp
= netdev_priv(dev
);
4107 bp
->native_io
= native_io
;
4109 bp
->macb_reg_readl
= hw_readl_native
;
4110 bp
->macb_reg_writel
= hw_writel_native
;
4112 bp
->macb_reg_readl
= hw_readl
;
4113 bp
->macb_reg_writel
= hw_writel
;
4115 bp
->num_queues
= num_queues
;
4116 bp
->queue_mask
= queue_mask
;
4118 bp
->dma_burst_length
= macb_config
->dma_burst_length
;
4121 bp
->tx_clk
= tx_clk
;
4122 bp
->rx_clk
= rx_clk
;
4123 bp
->tsu_clk
= tsu_clk
;
4125 bp
->jumbo_max_len
= macb_config
->jumbo_max_len
;
4128 if (of_get_property(np
, "magic-packet", NULL
))
4129 bp
->wol
|= MACB_WOL_HAS_MAGIC_PACKET
;
4130 device_init_wakeup(&pdev
->dev
, bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
);
4132 spin_lock_init(&bp
->lock
);
4134 /* setup capabilities */
4135 macb_configure_caps(bp
, macb_config
);
4137 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4138 if (GEM_BFEXT(DAW64
, gem_readl(bp
, DCFG6
))) {
4139 dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(44));
4140 bp
->hw_dma_cap
|= HW_DMA_CAP_64B
;
4143 platform_set_drvdata(pdev
, dev
);
4145 dev
->irq
= platform_get_irq(pdev
, 0);
4148 goto err_out_free_netdev
;
4151 /* MTU range: 68 - 1500 or 10240 */
4152 dev
->min_mtu
= GEM_MTU_MIN_SIZE
;
4153 if (bp
->caps
& MACB_CAPS_JUMBO
)
4154 dev
->max_mtu
= gem_readl(bp
, JML
) - ETH_HLEN
- ETH_FCS_LEN
;
4156 dev
->max_mtu
= ETH_DATA_LEN
;
4158 if (bp
->caps
& MACB_CAPS_BD_RD_PREFETCH
) {
4159 val
= GEM_BFEXT(RXBD_RDBUFF
, gem_readl(bp
, DCFG10
));
4161 bp
->rx_bd_rd_prefetch
= (2 << (val
- 1)) *
4162 macb_dma_desc_get_size(bp
);
4164 val
= GEM_BFEXT(TXBD_RDBUFF
, gem_readl(bp
, DCFG10
));
4166 bp
->tx_bd_rd_prefetch
= (2 << (val
- 1)) *
4167 macb_dma_desc_get_size(bp
);
4170 bp
->rx_intr_mask
= MACB_RX_INT_FLAGS
;
4171 if (bp
->caps
& MACB_CAPS_NEEDS_RSTONUBR
)
4172 bp
->rx_intr_mask
|= MACB_BIT(RXUBR
);
4174 mac
= of_get_mac_address(np
);
4176 ether_addr_copy(bp
->dev
->dev_addr
, mac
);
4178 err
= nvmem_get_mac_address(&pdev
->dev
, bp
->dev
->dev_addr
);
4180 if (err
== -EPROBE_DEFER
)
4181 goto err_out_free_netdev
;
4182 macb_get_hwaddr(bp
);
4186 err
= of_get_phy_mode(np
);
4188 pdata
= dev_get_platdata(&pdev
->dev
);
4189 if (pdata
&& pdata
->is_rmii
)
4190 bp
->phy_interface
= PHY_INTERFACE_MODE_RMII
;
4192 bp
->phy_interface
= PHY_INTERFACE_MODE_MII
;
4194 bp
->phy_interface
= err
;
4197 /* IP specific init */
4200 goto err_out_free_netdev
;
4202 err
= macb_mii_init(bp
);
4204 goto err_out_free_netdev
;
4206 phydev
= dev
->phydev
;
4208 netif_carrier_off(dev
);
4210 err
= register_netdev(dev
);
4212 dev_err(&pdev
->dev
, "Cannot register net device, aborting.\n");
4213 goto err_out_unregister_mdio
;
4216 tasklet_init(&bp
->hresp_err_tasklet
, macb_hresp_error_task
,
4219 phy_attached_info(phydev
);
4221 netdev_info(dev
, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4222 macb_is_gem(bp
) ? "GEM" : "MACB", macb_readl(bp
, MID
),
4223 dev
->base_addr
, dev
->irq
, dev
->dev_addr
);
4225 pm_runtime_mark_last_busy(&bp
->pdev
->dev
);
4226 pm_runtime_put_autosuspend(&bp
->pdev
->dev
);
4230 err_out_unregister_mdio
:
4231 phy_disconnect(dev
->phydev
);
4232 mdiobus_unregister(bp
->mii_bus
);
4233 of_node_put(bp
->phy_node
);
4234 if (np
&& of_phy_is_fixed_link(np
))
4235 of_phy_deregister_fixed_link(np
);
4236 mdiobus_free(bp
->mii_bus
);
4238 err_out_free_netdev
:
4242 clk_disable_unprepare(tx_clk
);
4243 clk_disable_unprepare(hclk
);
4244 clk_disable_unprepare(pclk
);
4245 clk_disable_unprepare(rx_clk
);
4246 clk_disable_unprepare(tsu_clk
);
4247 pm_runtime_disable(&pdev
->dev
);
4248 pm_runtime_set_suspended(&pdev
->dev
);
4249 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
4254 static int macb_remove(struct platform_device
*pdev
)
4256 struct net_device
*dev
;
4258 struct device_node
*np
= pdev
->dev
.of_node
;
4260 dev
= platform_get_drvdata(pdev
);
4263 bp
= netdev_priv(dev
);
4265 phy_disconnect(dev
->phydev
);
4266 mdiobus_unregister(bp
->mii_bus
);
4267 if (np
&& of_phy_is_fixed_link(np
))
4268 of_phy_deregister_fixed_link(np
);
4270 mdiobus_free(bp
->mii_bus
);
4272 unregister_netdev(dev
);
4273 pm_runtime_disable(&pdev
->dev
);
4274 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
4275 if (!pm_runtime_suspended(&pdev
->dev
)) {
4276 clk_disable_unprepare(bp
->tx_clk
);
4277 clk_disable_unprepare(bp
->hclk
);
4278 clk_disable_unprepare(bp
->pclk
);
4279 clk_disable_unprepare(bp
->rx_clk
);
4280 clk_disable_unprepare(bp
->tsu_clk
);
4281 pm_runtime_set_suspended(&pdev
->dev
);
4283 of_node_put(bp
->phy_node
);
4290 static int __maybe_unused
macb_suspend(struct device
*dev
)
4292 struct net_device
*netdev
= dev_get_drvdata(dev
);
4293 struct macb
*bp
= netdev_priv(netdev
);
4294 struct macb_queue
*queue
= bp
->queues
;
4295 unsigned long flags
;
4298 if (!netif_running(netdev
))
4302 if (bp
->wol
& MACB_WOL_ENABLED
) {
4303 macb_writel(bp
, IER
, MACB_BIT(WOL
));
4304 macb_writel(bp
, WOL
, MACB_BIT(MAG
));
4305 enable_irq_wake(bp
->queues
[0].irq
);
4306 netif_device_detach(netdev
);
4308 netif_device_detach(netdev
);
4309 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
;
4311 napi_disable(&queue
->napi
);
4312 phy_stop(netdev
->phydev
);
4313 phy_suspend(netdev
->phydev
);
4314 spin_lock_irqsave(&bp
->lock
, flags
);
4316 spin_unlock_irqrestore(&bp
->lock
, flags
);
4319 netif_carrier_off(netdev
);
4321 bp
->ptp_info
->ptp_remove(netdev
);
4322 pm_runtime_force_suspend(dev
);
4327 static int __maybe_unused
macb_resume(struct device
*dev
)
4329 struct net_device
*netdev
= dev_get_drvdata(dev
);
4330 struct macb
*bp
= netdev_priv(netdev
);
4331 struct macb_queue
*queue
= bp
->queues
;
4334 if (!netif_running(netdev
))
4337 pm_runtime_force_resume(dev
);
4339 if (bp
->wol
& MACB_WOL_ENABLED
) {
4340 macb_writel(bp
, IDR
, MACB_BIT(WOL
));
4341 macb_writel(bp
, WOL
, 0);
4342 disable_irq_wake(bp
->queues
[0].irq
);
4344 macb_writel(bp
, NCR
, MACB_BIT(MPE
));
4345 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
;
4347 napi_enable(&queue
->napi
);
4348 phy_resume(netdev
->phydev
);
4349 phy_init_hw(netdev
->phydev
);
4350 phy_start(netdev
->phydev
);
4353 bp
->macbgem_ops
.mog_init_rings(bp
);
4355 macb_set_rx_mode(netdev
);
4356 netif_device_attach(netdev
);
4358 bp
->ptp_info
->ptp_init(netdev
);
4363 static int __maybe_unused
macb_runtime_suspend(struct device
*dev
)
4365 struct platform_device
*pdev
= to_platform_device(dev
);
4366 struct net_device
*netdev
= platform_get_drvdata(pdev
);
4367 struct macb
*bp
= netdev_priv(netdev
);
4369 if (!(device_may_wakeup(&bp
->dev
->dev
))) {
4370 clk_disable_unprepare(bp
->tx_clk
);
4371 clk_disable_unprepare(bp
->hclk
);
4372 clk_disable_unprepare(bp
->pclk
);
4373 clk_disable_unprepare(bp
->rx_clk
);
4375 clk_disable_unprepare(bp
->tsu_clk
);
4380 static int __maybe_unused
macb_runtime_resume(struct device
*dev
)
4382 struct platform_device
*pdev
= to_platform_device(dev
);
4383 struct net_device
*netdev
= platform_get_drvdata(pdev
);
4384 struct macb
*bp
= netdev_priv(netdev
);
4386 if (!(device_may_wakeup(&bp
->dev
->dev
))) {
4387 clk_prepare_enable(bp
->pclk
);
4388 clk_prepare_enable(bp
->hclk
);
4389 clk_prepare_enable(bp
->tx_clk
);
4390 clk_prepare_enable(bp
->rx_clk
);
4392 clk_prepare_enable(bp
->tsu_clk
);
4397 static const struct dev_pm_ops macb_pm_ops
= {
4398 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend
, macb_resume
)
4399 SET_RUNTIME_PM_OPS(macb_runtime_suspend
, macb_runtime_resume
, NULL
)
4402 static struct platform_driver macb_driver
= {
4403 .probe
= macb_probe
,
4404 .remove
= macb_remove
,
4407 .of_match_table
= of_match_ptr(macb_dt_ids
),
4412 module_platform_driver(macb_driver
);
4414 MODULE_LICENSE("GPL");
4415 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
4416 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4417 MODULE_ALIAS("platform:macb");