1 // SPDX-License-Identifier: GPL-2.0-only
3 * Xilinx Axi Ethernet device driver
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
47 #include "xilinx_axienet.h"
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT 128
51 #define RX_BD_NUM_DEFAULT 1024
52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX 4096
54 #define RX_BD_NUM_MAX 4096
55 #define DMA_NUM_APP_WORDS 5
57 #define RX_BUF_NUM_DEFAULT 128
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME "xaxienet"
61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION "1.00a"
64 #define AXIENET_REGS_N 40
66 static void axienet_rx_submit_desc(struct net_device
*ndev
);
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match
[] = {
70 { .compatible
= "xlnx,axi-ethernet-1.00.a", },
71 { .compatible
= "xlnx,axi-ethernet-1.01.a", },
72 { .compatible
= "xlnx,axi-ethernet-2.01.a", },
76 MODULE_DEVICE_TABLE(of
, axienet_of_match
);
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options
[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
82 .opt
= XAE_OPTION_JUMBO
,
84 .m_or
= XAE_TC_JUM_MASK
,
86 .opt
= XAE_OPTION_JUMBO
,
87 .reg
= XAE_RCW1_OFFSET
,
88 .m_or
= XAE_RCW1_JUM_MASK
,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt
= XAE_OPTION_VLAN
,
92 .m_or
= XAE_TC_VLAN_MASK
,
94 .opt
= XAE_OPTION_VLAN
,
95 .reg
= XAE_RCW1_OFFSET
,
96 .m_or
= XAE_RCW1_VLAN_MASK
,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt
= XAE_OPTION_FCS_STRIP
,
99 .reg
= XAE_RCW1_OFFSET
,
100 .m_or
= XAE_RCW1_FCS_MASK
,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt
= XAE_OPTION_FCS_INSERT
,
103 .reg
= XAE_TC_OFFSET
,
104 .m_or
= XAE_TC_FCS_MASK
,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt
= XAE_OPTION_LENTYPE_ERR
,
107 .reg
= XAE_RCW1_OFFSET
,
108 .m_or
= XAE_RCW1_LT_DIS_MASK
,
109 }, { /* Turn on Rx flow control */
110 .opt
= XAE_OPTION_FLOW_CONTROL
,
111 .reg
= XAE_FCC_OFFSET
,
112 .m_or
= XAE_FCC_FCRX_MASK
,
113 }, { /* Turn on Tx flow control */
114 .opt
= XAE_OPTION_FLOW_CONTROL
,
115 .reg
= XAE_FCC_OFFSET
,
116 .m_or
= XAE_FCC_FCTX_MASK
,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt
= XAE_OPTION_PROMISC
,
119 .reg
= XAE_FMI_OFFSET
,
120 .m_or
= XAE_FMI_PM_MASK
,
121 }, { /* Enable transmitter */
122 .opt
= XAE_OPTION_TXEN
,
123 .reg
= XAE_TC_OFFSET
,
124 .m_or
= XAE_TC_TX_MASK
,
125 }, { /* Enable receiver */
126 .opt
= XAE_OPTION_RXEN
,
127 .reg
= XAE_RCW1_OFFSET
,
128 .m_or
= XAE_RCW1_RX_MASK
,
133 static struct skbuf_dma_descriptor
*axienet_get_rx_desc(struct axienet_local
*lp
, int i
)
135 return lp
->rx_skb_ring
[i
& (RX_BUF_NUM_DEFAULT
- 1)];
138 static struct skbuf_dma_descriptor
*axienet_get_tx_desc(struct axienet_local
*lp
, int i
)
140 return lp
->tx_skb_ring
[i
& (TX_BD_NUM_MAX
- 1)];
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
148 * Return: The contents of the Axi DMA register
150 * This function returns the contents of the corresponding Axi DMA register.
152 static inline u32
axienet_dma_in32(struct axienet_local
*lp
, off_t reg
)
154 return ioread32(lp
->dma_regs
+ reg
);
157 static void desc_set_phys_addr(struct axienet_local
*lp
, dma_addr_t addr
,
158 struct axidma_bd
*desc
)
160 desc
->phys
= lower_32_bits(addr
);
161 if (lp
->features
& XAE_FEATURE_DMA_64BIT
)
162 desc
->phys_msb
= upper_32_bits(addr
);
165 static dma_addr_t
desc_get_phys_addr(struct axienet_local
*lp
,
166 struct axidma_bd
*desc
)
168 dma_addr_t ret
= desc
->phys
;
170 if (lp
->features
& XAE_FEATURE_DMA_64BIT
)
171 ret
|= ((dma_addr_t
)desc
->phys_msb
<< 16) << 16;
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
184 static void axienet_dma_bd_release(struct net_device
*ndev
)
187 struct axienet_local
*lp
= netdev_priv(ndev
);
189 /* If we end up here, tx_bd_v must have been DMA allocated. */
190 dma_free_coherent(lp
->dev
,
191 sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_num
,
198 for (i
= 0; i
< lp
->rx_bd_num
; i
++) {
201 /* A NULL skb means this descriptor has not been initialised
204 if (!lp
->rx_bd_v
[i
].skb
)
207 dev_kfree_skb(lp
->rx_bd_v
[i
].skb
);
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
213 if (lp
->rx_bd_v
[i
].cntrl
) {
214 phys
= desc_get_phys_addr(lp
, &lp
->rx_bd_v
[i
]);
215 dma_unmap_single(lp
->dev
, phys
,
216 lp
->max_frm_size
, DMA_FROM_DEVICE
);
220 dma_free_coherent(lp
->dev
,
221 sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_num
,
226 static u64
axienet_dma_rate(struct axienet_local
*lp
)
229 return clk_get_rate(lp
->axi_clk
);
230 return 125000000; /* arbitrary guess if no clock rate set */
234 * axienet_calc_cr() - Calculate control register value
235 * @lp: Device private data
236 * @count: Number of completions before an interrupt
237 * @usec: Microseconds after the last completion before an interrupt
239 * Calculate a control register value based on the coalescing settings. The
240 * run/stop bit is not set.
242 * Return: Control register value with coalescing settings configured.
244 static u32
axienet_calc_cr(struct axienet_local
*lp
, u32 count
, u32 usec
)
248 cr
= FIELD_PREP(XAXIDMA_COALESCE_MASK
, count
) | XAXIDMA_IRQ_IOC_MASK
|
249 XAXIDMA_IRQ_ERROR_MASK
;
250 /* Only set interrupt delay timer if not generating an interrupt on
251 * the first packet. Otherwise leave at 0 to disable delay interrupt.
254 u64 clk_rate
= axienet_dma_rate(lp
);
257 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
258 timer
= DIV64_U64_ROUND_CLOSEST((u64
)usec
* clk_rate
,
259 XAXIDMA_DELAY_SCALE
);
261 timer
= min(timer
, FIELD_MAX(XAXIDMA_DELAY_MASK
));
262 cr
|= FIELD_PREP(XAXIDMA_DELAY_MASK
, timer
) |
263 XAXIDMA_IRQ_DELAY_MASK
;
270 * axienet_coalesce_params() - Extract coalesce parameters from the CR
271 * @lp: Device private data
272 * @cr: The control register to parse
273 * @count: Number of packets before an interrupt
274 * @usec: Idle time (in usec) before an interrupt
276 static void axienet_coalesce_params(struct axienet_local
*lp
, u32 cr
,
277 u32
*count
, u32
*usec
)
279 u64 clk_rate
= axienet_dma_rate(lp
);
280 u64 timer
= FIELD_GET(XAXIDMA_DELAY_MASK
, cr
);
282 *count
= FIELD_GET(XAXIDMA_COALESCE_MASK
, cr
);
283 *usec
= DIV64_U64_ROUND_CLOSEST(timer
* XAXIDMA_DELAY_SCALE
, clk_rate
);
287 * axienet_dma_start - Set up DMA registers and start DMA operation
288 * @lp: Pointer to the axienet_local structure
290 static void axienet_dma_start(struct axienet_local
*lp
)
292 spin_lock_irq(&lp
->rx_cr_lock
);
294 /* Start updating the Rx channel control register */
295 lp
->rx_dma_cr
&= ~XAXIDMA_CR_RUNSTOP_MASK
;
296 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, lp
->rx_dma_cr
);
298 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
299 * halted state. This will make the Rx side ready for reception.
301 axienet_dma_out_addr(lp
, XAXIDMA_RX_CDESC_OFFSET
, lp
->rx_bd_p
);
302 lp
->rx_dma_cr
|= XAXIDMA_CR_RUNSTOP_MASK
;
303 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, lp
->rx_dma_cr
);
304 axienet_dma_out_addr(lp
, XAXIDMA_RX_TDESC_OFFSET
, lp
->rx_bd_p
+
305 (sizeof(*lp
->rx_bd_v
) * (lp
->rx_bd_num
- 1)));
306 lp
->rx_dma_started
= true;
308 spin_unlock_irq(&lp
->rx_cr_lock
);
309 spin_lock_irq(&lp
->tx_cr_lock
);
311 /* Start updating the Tx channel control register */
312 lp
->tx_dma_cr
&= ~XAXIDMA_CR_RUNSTOP_MASK
;
313 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, lp
->tx_dma_cr
);
315 /* Write to the RS (Run-stop) bit in the Tx channel control register.
316 * Tx channel is now ready to run. But only after we write to the
317 * tail pointer register that the Tx channel will start transmitting.
319 axienet_dma_out_addr(lp
, XAXIDMA_TX_CDESC_OFFSET
, lp
->tx_bd_p
);
320 lp
->tx_dma_cr
|= XAXIDMA_CR_RUNSTOP_MASK
;
321 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, lp
->tx_dma_cr
);
322 lp
->tx_dma_started
= true;
324 spin_unlock_irq(&lp
->tx_cr_lock
);
328 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
329 * @ndev: Pointer to the net_device structure
331 * Return: 0, on success -ENOMEM, on failure
333 * This function is called to initialize the Rx and Tx DMA descriptor
334 * rings. This initializes the descriptors with required default values
335 * and is called when Axi Ethernet driver reset is called.
337 static int axienet_dma_bd_init(struct net_device
*ndev
)
341 struct axienet_local
*lp
= netdev_priv(ndev
);
343 /* Reset the indexes which are used for accessing the BDs */
348 /* Allocate the Tx and Rx buffer descriptors. */
349 lp
->tx_bd_v
= dma_alloc_coherent(lp
->dev
,
350 sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_num
,
351 &lp
->tx_bd_p
, GFP_KERNEL
);
355 lp
->rx_bd_v
= dma_alloc_coherent(lp
->dev
,
356 sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_num
,
357 &lp
->rx_bd_p
, GFP_KERNEL
);
361 for (i
= 0; i
< lp
->tx_bd_num
; i
++) {
362 dma_addr_t addr
= lp
->tx_bd_p
+
363 sizeof(*lp
->tx_bd_v
) *
364 ((i
+ 1) % lp
->tx_bd_num
);
366 lp
->tx_bd_v
[i
].next
= lower_32_bits(addr
);
367 if (lp
->features
& XAE_FEATURE_DMA_64BIT
)
368 lp
->tx_bd_v
[i
].next_msb
= upper_32_bits(addr
);
371 for (i
= 0; i
< lp
->rx_bd_num
; i
++) {
374 addr
= lp
->rx_bd_p
+ sizeof(*lp
->rx_bd_v
) *
375 ((i
+ 1) % lp
->rx_bd_num
);
376 lp
->rx_bd_v
[i
].next
= lower_32_bits(addr
);
377 if (lp
->features
& XAE_FEATURE_DMA_64BIT
)
378 lp
->rx_bd_v
[i
].next_msb
= upper_32_bits(addr
);
380 skb
= netdev_alloc_skb_ip_align(ndev
, lp
->max_frm_size
);
384 lp
->rx_bd_v
[i
].skb
= skb
;
385 addr
= dma_map_single(lp
->dev
, skb
->data
,
386 lp
->max_frm_size
, DMA_FROM_DEVICE
);
387 if (dma_mapping_error(lp
->dev
, addr
)) {
388 netdev_err(ndev
, "DMA mapping error\n");
391 desc_set_phys_addr(lp
, addr
, &lp
->rx_bd_v
[i
]);
393 lp
->rx_bd_v
[i
].cntrl
= lp
->max_frm_size
;
396 axienet_dma_start(lp
);
400 axienet_dma_bd_release(ndev
);
405 * axienet_set_mac_address - Write the MAC address
406 * @ndev: Pointer to the net_device structure
407 * @address: 6 byte Address to be written as MAC address
409 * This function is called to initialize the MAC address of the Axi Ethernet
410 * core. It writes to the UAW0 and UAW1 registers of the core.
412 static void axienet_set_mac_address(struct net_device
*ndev
,
415 struct axienet_local
*lp
= netdev_priv(ndev
);
418 eth_hw_addr_set(ndev
, address
);
419 if (!is_valid_ether_addr(ndev
->dev_addr
))
420 eth_hw_addr_random(ndev
);
422 /* Set up unicast MAC address filter set its mac address */
423 axienet_iow(lp
, XAE_UAW0_OFFSET
,
424 (ndev
->dev_addr
[0]) |
425 (ndev
->dev_addr
[1] << 8) |
426 (ndev
->dev_addr
[2] << 16) |
427 (ndev
->dev_addr
[3] << 24));
428 axienet_iow(lp
, XAE_UAW1_OFFSET
,
429 (((axienet_ior(lp
, XAE_UAW1_OFFSET
)) &
430 ~XAE_UAW1_UNICASTADDR_MASK
) |
432 (ndev
->dev_addr
[5] << 8))));
436 * netdev_set_mac_address - Write the MAC address (from outside the driver)
437 * @ndev: Pointer to the net_device structure
438 * @p: 6 byte Address to be written as MAC address
440 * Return: 0 for all conditions. Presently, there is no failure case.
442 * This function is called to initialize the MAC address of the Axi Ethernet
443 * core. It calls the core specific axienet_set_mac_address. This is the
444 * function that goes into net_device_ops structure entry ndo_set_mac_address.
446 static int netdev_set_mac_address(struct net_device
*ndev
, void *p
)
448 struct sockaddr
*addr
= p
;
450 axienet_set_mac_address(ndev
, addr
->sa_data
);
455 * axienet_set_multicast_list - Prepare the multicast table
456 * @ndev: Pointer to the net_device structure
458 * This function is called to initialize the multicast table during
459 * initialization. The Axi Ethernet basic multicast support has a four-entry
460 * multicast table which is initialized here. Additionally this function
461 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
462 * means whenever the multicast table entries need to be updated this
463 * function gets called.
465 static void axienet_set_multicast_list(struct net_device
*ndev
)
468 u32 reg
, af0reg
, af1reg
;
469 struct axienet_local
*lp
= netdev_priv(ndev
);
471 reg
= axienet_ior(lp
, XAE_FMI_OFFSET
);
472 reg
&= ~XAE_FMI_PM_MASK
;
473 if (ndev
->flags
& IFF_PROMISC
)
474 reg
|= XAE_FMI_PM_MASK
;
476 reg
&= ~XAE_FMI_PM_MASK
;
477 axienet_iow(lp
, XAE_FMI_OFFSET
, reg
);
479 if (ndev
->flags
& IFF_ALLMULTI
||
480 netdev_mc_count(ndev
) > XAE_MULTICAST_CAM_TABLE_NUM
) {
482 axienet_iow(lp
, XAE_FMI_OFFSET
, reg
);
483 axienet_iow(lp
, XAE_AF0_OFFSET
, 1); /* Multicast bit */
484 axienet_iow(lp
, XAE_AF1_OFFSET
, 0);
485 axienet_iow(lp
, XAE_AM0_OFFSET
, 1); /* ditto */
486 axienet_iow(lp
, XAE_AM1_OFFSET
, 0);
487 axienet_iow(lp
, XAE_FFE_OFFSET
, 1);
489 } else if (!netdev_mc_empty(ndev
)) {
490 struct netdev_hw_addr
*ha
;
492 netdev_for_each_mc_addr(ha
, ndev
) {
493 if (i
>= XAE_MULTICAST_CAM_TABLE_NUM
)
496 af0reg
= (ha
->addr
[0]);
497 af0reg
|= (ha
->addr
[1] << 8);
498 af0reg
|= (ha
->addr
[2] << 16);
499 af0reg
|= (ha
->addr
[3] << 24);
501 af1reg
= (ha
->addr
[4]);
502 af1reg
|= (ha
->addr
[5] << 8);
507 axienet_iow(lp
, XAE_FMI_OFFSET
, reg
);
508 axienet_iow(lp
, XAE_AF0_OFFSET
, af0reg
);
509 axienet_iow(lp
, XAE_AF1_OFFSET
, af1reg
);
510 axienet_iow(lp
, XAE_AM0_OFFSET
, 0xffffffff);
511 axienet_iow(lp
, XAE_AM1_OFFSET
, 0x0000ffff);
512 axienet_iow(lp
, XAE_FFE_OFFSET
, 1);
517 for (; i
< XAE_MULTICAST_CAM_TABLE_NUM
; i
++) {
520 axienet_iow(lp
, XAE_FMI_OFFSET
, reg
);
521 axienet_iow(lp
, XAE_FFE_OFFSET
, 0);
526 * axienet_setoptions - Set an Axi Ethernet option
527 * @ndev: Pointer to the net_device structure
528 * @options: Option to be enabled/disabled
530 * The Axi Ethernet core has multiple features which can be selectively turned
531 * on or off. The typical options could be jumbo frame option, basic VLAN
532 * option, promiscuous mode option etc. This function is used to set or clear
533 * these options in the Axi Ethernet hardware. This is done through
534 * axienet_option structure .
536 static void axienet_setoptions(struct net_device
*ndev
, u32 options
)
539 struct axienet_local
*lp
= netdev_priv(ndev
);
540 struct axienet_option
*tp
= &axienet_options
[0];
543 reg
= ((axienet_ior(lp
, tp
->reg
)) & ~(tp
->m_or
));
544 if (options
& tp
->opt
)
546 axienet_iow(lp
, tp
->reg
, reg
);
550 lp
->options
|= options
;
553 static u64
axienet_stat(struct axienet_local
*lp
, enum temac_stat stat
)
557 if (lp
->reset_in_progress
)
558 return lp
->hw_stat_base
[stat
];
560 counter
= axienet_ior(lp
, XAE_STATS_OFFSET
+ stat
* 8);
561 return lp
->hw_stat_base
[stat
] + (counter
- lp
->hw_last_counter
[stat
]);
564 static void axienet_stats_update(struct axienet_local
*lp
, bool reset
)
566 enum temac_stat stat
;
568 write_seqcount_begin(&lp
->hw_stats_seqcount
);
569 lp
->reset_in_progress
= reset
;
570 for (stat
= 0; stat
< STAT_COUNT
; stat
++) {
571 u32 counter
= axienet_ior(lp
, XAE_STATS_OFFSET
+ stat
* 8);
573 lp
->hw_stat_base
[stat
] += counter
- lp
->hw_last_counter
[stat
];
574 lp
->hw_last_counter
[stat
] = counter
;
576 write_seqcount_end(&lp
->hw_stats_seqcount
);
579 static void axienet_refresh_stats(struct work_struct
*work
)
581 struct axienet_local
*lp
= container_of(work
, struct axienet_local
,
584 mutex_lock(&lp
->stats_lock
);
585 axienet_stats_update(lp
, false);
586 mutex_unlock(&lp
->stats_lock
);
588 /* Just less than 2^32 bytes at 2.5 GBit/s */
589 schedule_delayed_work(&lp
->stats_work
, 13 * HZ
);
592 static int __axienet_device_reset(struct axienet_local
*lp
)
597 /* Save statistics counters in case they will be reset */
598 mutex_lock(&lp
->stats_lock
);
599 if (lp
->features
& XAE_FEATURE_STATS
)
600 axienet_stats_update(lp
, true);
602 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
603 * process of Axi DMA takes a while to complete as all pending
604 * commands/transfers will be flushed or completed during this
606 * Note that even though both TX and RX have their own reset register,
607 * they both reset the entire DMA core, so only one needs to be used.
609 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, XAXIDMA_CR_RESET_MASK
);
610 ret
= read_poll_timeout(axienet_dma_in32
, value
,
611 !(value
& XAXIDMA_CR_RESET_MASK
),
612 DELAY_OF_ONE_MILLISEC
, 50000, false, lp
,
613 XAXIDMA_TX_CR_OFFSET
);
615 dev_err(lp
->dev
, "%s: DMA reset timeout!\n", __func__
);
619 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
620 ret
= read_poll_timeout(axienet_ior
, value
,
621 value
& XAE_INT_PHYRSTCMPLT_MASK
,
622 DELAY_OF_ONE_MILLISEC
, 50000, false, lp
,
625 dev_err(lp
->dev
, "%s: timeout waiting for PhyRstCmplt\n", __func__
);
629 /* Update statistics counters with new values */
630 if (lp
->features
& XAE_FEATURE_STATS
) {
631 enum temac_stat stat
;
633 write_seqcount_begin(&lp
->hw_stats_seqcount
);
634 lp
->reset_in_progress
= false;
635 for (stat
= 0; stat
< STAT_COUNT
; stat
++) {
637 axienet_ior(lp
, XAE_STATS_OFFSET
+ stat
* 8);
639 lp
->hw_stat_base
[stat
] +=
640 lp
->hw_last_counter
[stat
] - counter
;
641 lp
->hw_last_counter
[stat
] = counter
;
643 write_seqcount_end(&lp
->hw_stats_seqcount
);
647 mutex_unlock(&lp
->stats_lock
);
652 * axienet_dma_stop - Stop DMA operation
653 * @lp: Pointer to the axienet_local structure
655 static void axienet_dma_stop(struct axienet_local
*lp
)
660 spin_lock_irq(&lp
->rx_cr_lock
);
662 cr
= lp
->rx_dma_cr
& ~(XAXIDMA_CR_RUNSTOP_MASK
| XAXIDMA_IRQ_ALL_MASK
);
663 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, cr
);
664 lp
->rx_dma_started
= false;
666 spin_unlock_irq(&lp
->rx_cr_lock
);
667 synchronize_irq(lp
->rx_irq
);
669 spin_lock_irq(&lp
->tx_cr_lock
);
671 cr
= lp
->tx_dma_cr
& ~(XAXIDMA_CR_RUNSTOP_MASK
| XAXIDMA_IRQ_ALL_MASK
);
672 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, cr
);
673 lp
->tx_dma_started
= false;
675 spin_unlock_irq(&lp
->tx_cr_lock
);
676 synchronize_irq(lp
->tx_irq
);
678 /* Give DMAs a chance to halt gracefully */
679 sr
= axienet_dma_in32(lp
, XAXIDMA_RX_SR_OFFSET
);
680 for (count
= 0; !(sr
& XAXIDMA_SR_HALT_MASK
) && count
< 5; ++count
) {
682 sr
= axienet_dma_in32(lp
, XAXIDMA_RX_SR_OFFSET
);
685 sr
= axienet_dma_in32(lp
, XAXIDMA_TX_SR_OFFSET
);
686 for (count
= 0; !(sr
& XAXIDMA_SR_HALT_MASK
) && count
< 5; ++count
) {
688 sr
= axienet_dma_in32(lp
, XAXIDMA_TX_SR_OFFSET
);
691 /* Do a reset to ensure DMA is really stopped */
692 axienet_lock_mii(lp
);
693 __axienet_device_reset(lp
);
694 axienet_unlock_mii(lp
);
698 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
699 * @ndev: Pointer to the net_device structure
701 * This function is called to reset and initialize the Axi Ethernet core. This
702 * is typically called during initialization. It does a reset of the Axi DMA
703 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
704 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
705 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
708 * Return: 0 on success or a negative error number otherwise.
710 static int axienet_device_reset(struct net_device
*ndev
)
713 struct axienet_local
*lp
= netdev_priv(ndev
);
716 lp
->max_frm_size
= XAE_MAX_VLAN_FRAME_SIZE
;
717 lp
->options
|= XAE_OPTION_VLAN
;
718 lp
->options
&= (~XAE_OPTION_JUMBO
);
720 if (ndev
->mtu
> XAE_MTU
&& ndev
->mtu
<= XAE_JUMBO_MTU
) {
721 lp
->max_frm_size
= ndev
->mtu
+ VLAN_ETH_HLEN
+
724 if (lp
->max_frm_size
<= lp
->rxmem
)
725 lp
->options
|= XAE_OPTION_JUMBO
;
728 if (!lp
->use_dmaengine
) {
729 ret
= __axienet_device_reset(lp
);
733 ret
= axienet_dma_bd_init(ndev
);
735 netdev_err(ndev
, "%s: descriptor allocation failed\n",
741 axienet_status
= axienet_ior(lp
, XAE_RCW1_OFFSET
);
742 axienet_status
&= ~XAE_RCW1_RX_MASK
;
743 axienet_iow(lp
, XAE_RCW1_OFFSET
, axienet_status
);
745 axienet_status
= axienet_ior(lp
, XAE_IP_OFFSET
);
746 if (axienet_status
& XAE_INT_RXRJECT_MASK
)
747 axienet_iow(lp
, XAE_IS_OFFSET
, XAE_INT_RXRJECT_MASK
);
748 axienet_iow(lp
, XAE_IE_OFFSET
, lp
->eth_irq
> 0 ?
749 XAE_INT_RECV_ERROR_MASK
: 0);
751 axienet_iow(lp
, XAE_FCC_OFFSET
, XAE_FCC_FCRX_MASK
);
753 /* Sync default options with HW but leave receiver and
754 * transmitter disabled.
756 axienet_setoptions(ndev
, lp
->options
&
757 ~(XAE_OPTION_TXEN
| XAE_OPTION_RXEN
));
758 axienet_set_mac_address(ndev
, NULL
);
759 axienet_set_multicast_list(ndev
);
760 axienet_setoptions(ndev
, lp
->options
);
762 netif_trans_update(ndev
);
768 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
769 * @lp: Pointer to the axienet_local structure
770 * @first_bd: Index of first descriptor to clean up
771 * @nr_bds: Max number of descriptors to clean up
772 * @force: Whether to clean descriptors even if not complete
773 * @sizep: Pointer to a u32 filled with the total sum of all bytes
774 * in all cleaned-up descriptors. Ignored if NULL.
775 * @budget: NAPI budget (use 0 when not called from NAPI poll)
777 * Would either be called after a successful transmit operation, or after
778 * there was an error when setting up the chain.
780 * Return: The number of packets handled.
782 static int axienet_free_tx_chain(struct axienet_local
*lp
, u32 first_bd
,
783 int nr_bds
, bool force
, u32
*sizep
, int budget
)
785 struct axidma_bd
*cur_p
;
790 for (i
= 0; i
< nr_bds
; i
++) {
791 cur_p
= &lp
->tx_bd_v
[(first_bd
+ i
) % lp
->tx_bd_num
];
792 status
= cur_p
->status
;
794 /* If force is not specified, clean up only descriptors
795 * that have been completed by the MAC.
797 if (!force
&& !(status
& XAXIDMA_BD_STS_COMPLETE_MASK
))
800 /* Ensure we see complete descriptor update */
802 phys
= desc_get_phys_addr(lp
, cur_p
);
803 dma_unmap_single(lp
->dev
, phys
,
804 (cur_p
->cntrl
& XAXIDMA_BD_CTRL_LENGTH_MASK
),
807 if (cur_p
->skb
&& (status
& XAXIDMA_BD_STS_COMPLETE_MASK
)) {
808 napi_consume_skb(cur_p
->skb
, budget
);
817 /* ensure our transmit path and device don't prematurely see status cleared */
823 *sizep
+= status
& XAXIDMA_BD_STS_ACTUAL_LEN_MASK
;
828 if (lp
->tx_bd_ci
>= lp
->tx_bd_num
)
829 lp
->tx_bd_ci
%= lp
->tx_bd_num
;
836 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
837 * @lp: Pointer to the axienet_local structure
838 * @num_frag: The number of BDs to check for
840 * Return: 0, on success
841 * NETDEV_TX_BUSY, if any of the descriptors are not free
843 * This function is invoked before BDs are allocated and transmission starts.
844 * This function returns 0 if a BD or group of BDs can be allocated for
845 * transmission. If the BD or any of the BDs are not free the function
846 * returns a busy status.
848 static inline int axienet_check_tx_bd_space(struct axienet_local
*lp
,
851 struct axidma_bd
*cur_p
;
853 /* Ensure we see all descriptor updates from device or TX polling */
855 cur_p
= &lp
->tx_bd_v
[(READ_ONCE(lp
->tx_bd_tail
) + num_frag
) %
858 return NETDEV_TX_BUSY
;
863 * axienet_dma_tx_cb - DMA engine callback for TX channel.
864 * @data: Pointer to the axienet_local structure.
865 * @result: error reporting through dmaengine_result.
866 * This function is called by dmaengine driver for TX channel to notify
867 * that the transmit is done.
869 static void axienet_dma_tx_cb(void *data
, const struct dmaengine_result
*result
)
871 struct skbuf_dma_descriptor
*skbuf_dma
;
872 struct axienet_local
*lp
= data
;
873 struct netdev_queue
*txq
;
876 skbuf_dma
= axienet_get_tx_desc(lp
, lp
->tx_ring_tail
++);
877 len
= skbuf_dma
->skb
->len
;
878 txq
= skb_get_tx_queue(lp
->ndev
, skbuf_dma
->skb
);
879 u64_stats_update_begin(&lp
->tx_stat_sync
);
880 u64_stats_add(&lp
->tx_bytes
, len
);
881 u64_stats_add(&lp
->tx_packets
, 1);
882 u64_stats_update_end(&lp
->tx_stat_sync
);
883 dma_unmap_sg(lp
->dev
, skbuf_dma
->sgl
, skbuf_dma
->sg_len
, DMA_TO_DEVICE
);
884 dev_consume_skb_any(skbuf_dma
->skb
);
885 netif_txq_completed_wake(txq
, 1, len
,
886 CIRC_SPACE(lp
->tx_ring_head
, lp
->tx_ring_tail
, TX_BD_NUM_MAX
),
891 * axienet_start_xmit_dmaengine - Starts the transmission.
892 * @skb: sk_buff pointer that contains data to be Txed.
893 * @ndev: Pointer to net_device structure.
895 * Return: NETDEV_TX_OK on success or any non space errors.
896 * NETDEV_TX_BUSY when free element in TX skb ring buffer
899 * This function is invoked to initiate transmission. The
900 * function sets the skbs, register dma callback API and submit
901 * the dma transaction.
902 * Additionally if checksum offloading is supported,
903 * it populates AXI Stream Control fields with appropriate values.
906 axienet_start_xmit_dmaengine(struct sk_buff
*skb
, struct net_device
*ndev
)
908 struct dma_async_tx_descriptor
*dma_tx_desc
= NULL
;
909 struct axienet_local
*lp
= netdev_priv(ndev
);
910 u32 app_metadata
[DMA_NUM_APP_WORDS
] = {0};
911 struct skbuf_dma_descriptor
*skbuf_dma
;
912 struct dma_device
*dma_dev
;
913 struct netdev_queue
*txq
;
919 dma_dev
= lp
->tx_chan
->device
;
920 sg_len
= skb_shinfo(skb
)->nr_frags
+ 1;
921 if (CIRC_SPACE(lp
->tx_ring_head
, lp
->tx_ring_tail
, TX_BD_NUM_MAX
) <= 1) {
922 netif_stop_queue(ndev
);
924 netdev_warn(ndev
, "TX ring unexpectedly full\n");
925 return NETDEV_TX_BUSY
;
928 skbuf_dma
= axienet_get_tx_desc(lp
, lp
->tx_ring_head
);
930 goto xmit_error_drop_skb
;
933 sg_init_table(skbuf_dma
->sgl
, sg_len
);
934 ret
= skb_to_sgvec(skb
, skbuf_dma
->sgl
, 0, skb
->len
);
936 goto xmit_error_drop_skb
;
938 ret
= dma_map_sg(lp
->dev
, skbuf_dma
->sgl
, sg_len
, DMA_TO_DEVICE
);
940 goto xmit_error_drop_skb
;
942 /* Fill up app fields for checksum */
943 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
944 if (lp
->features
& XAE_FEATURE_FULL_TX_CSUM
) {
945 /* Tx Full Checksum Offload Enabled */
946 app_metadata
[0] |= 2;
947 } else if (lp
->features
& XAE_FEATURE_PARTIAL_TX_CSUM
) {
948 csum_start_off
= skb_transport_offset(skb
);
949 csum_index_off
= csum_start_off
+ skb
->csum_offset
;
950 /* Tx Partial Checksum Offload Enabled */
951 app_metadata
[0] |= 1;
952 app_metadata
[1] = (csum_start_off
<< 16) | csum_index_off
;
954 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
955 app_metadata
[0] |= 2; /* Tx Full Checksum Offload Enabled */
958 dma_tx_desc
= dma_dev
->device_prep_slave_sg(lp
->tx_chan
, skbuf_dma
->sgl
,
959 sg_len
, DMA_MEM_TO_DEV
,
960 DMA_PREP_INTERRUPT
, (void *)app_metadata
);
962 goto xmit_error_unmap_sg
;
964 skbuf_dma
->skb
= skb
;
965 skbuf_dma
->sg_len
= sg_len
;
966 dma_tx_desc
->callback_param
= lp
;
967 dma_tx_desc
->callback_result
= axienet_dma_tx_cb
;
968 txq
= skb_get_tx_queue(lp
->ndev
, skb
);
969 netdev_tx_sent_queue(txq
, skb
->len
);
970 netif_txq_maybe_stop(txq
, CIRC_SPACE(lp
->tx_ring_head
, lp
->tx_ring_tail
, TX_BD_NUM_MAX
),
973 dmaengine_submit(dma_tx_desc
);
974 dma_async_issue_pending(lp
->tx_chan
);
978 dma_unmap_sg(lp
->dev
, skbuf_dma
->sgl
, sg_len
, DMA_TO_DEVICE
);
980 dev_kfree_skb_any(skb
);
985 * axienet_tx_poll - Invoked once a transmit is completed by the
986 * Axi DMA Tx channel.
987 * @napi: Pointer to NAPI structure.
988 * @budget: Max number of TX packets to process.
990 * Return: Number of TX packets processed.
992 * This function is invoked from the NAPI processing to notify the completion
993 * of transmit operation. It clears fields in the corresponding Tx BDs and
994 * unmaps the corresponding buffer so that CPU can regain ownership of the
995 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
998 static int axienet_tx_poll(struct napi_struct
*napi
, int budget
)
1000 struct axienet_local
*lp
= container_of(napi
, struct axienet_local
, napi_tx
);
1001 struct net_device
*ndev
= lp
->ndev
;
1005 packets
= axienet_free_tx_chain(lp
, lp
->tx_bd_ci
, lp
->tx_bd_num
, false,
1009 netdev_completed_queue(ndev
, packets
, size
);
1010 u64_stats_update_begin(&lp
->tx_stat_sync
);
1011 u64_stats_add(&lp
->tx_packets
, packets
);
1012 u64_stats_add(&lp
->tx_bytes
, size
);
1013 u64_stats_update_end(&lp
->tx_stat_sync
);
1015 /* Matches barrier in axienet_start_xmit */
1018 if (!axienet_check_tx_bd_space(lp
, MAX_SKB_FRAGS
+ 1))
1019 netif_wake_queue(ndev
);
1022 if (packets
< budget
&& napi_complete_done(napi
, packets
)) {
1023 /* Re-enable TX completion interrupts. This should
1024 * cause an immediate interrupt if any TX packets are
1027 spin_lock_irq(&lp
->tx_cr_lock
);
1028 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, lp
->tx_dma_cr
);
1029 spin_unlock_irq(&lp
->tx_cr_lock
);
1035 * axienet_start_xmit - Starts the transmission.
1036 * @skb: sk_buff pointer that contains data to be Txed.
1037 * @ndev: Pointer to net_device structure.
1039 * Return: NETDEV_TX_OK, on success
1040 * NETDEV_TX_BUSY, if any of the descriptors are not free
1042 * This function is invoked from upper layers to initiate transmission. The
1043 * function uses the next available free BDs and populates their fields to
1044 * start the transmission. Additionally if checksum offloading is supported,
1045 * it populates AXI Stream Control fields with appropriate values.
1048 axienet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1055 dma_addr_t tail_p
, phys
;
1056 u32 orig_tail_ptr
, new_tail_ptr
;
1057 struct axienet_local
*lp
= netdev_priv(ndev
);
1058 struct axidma_bd
*cur_p
;
1060 orig_tail_ptr
= lp
->tx_bd_tail
;
1061 new_tail_ptr
= orig_tail_ptr
;
1063 num_frag
= skb_shinfo(skb
)->nr_frags
;
1064 cur_p
= &lp
->tx_bd_v
[orig_tail_ptr
];
1066 if (axienet_check_tx_bd_space(lp
, num_frag
+ 1)) {
1067 /* Should not happen as last start_xmit call should have
1068 * checked for sufficient space and queue should only be
1069 * woken when sufficient space is available.
1071 netif_stop_queue(ndev
);
1072 if (net_ratelimit())
1073 netdev_warn(ndev
, "TX ring unexpectedly full\n");
1074 return NETDEV_TX_BUSY
;
1077 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1078 if (lp
->features
& XAE_FEATURE_FULL_TX_CSUM
) {
1079 /* Tx Full Checksum Offload Enabled */
1081 } else if (lp
->features
& XAE_FEATURE_PARTIAL_TX_CSUM
) {
1082 csum_start_off
= skb_transport_offset(skb
);
1083 csum_index_off
= csum_start_off
+ skb
->csum_offset
;
1084 /* Tx Partial Checksum Offload Enabled */
1086 cur_p
->app1
= (csum_start_off
<< 16) | csum_index_off
;
1088 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1089 cur_p
->app0
|= 2; /* Tx Full Checksum Offload Enabled */
1092 phys
= dma_map_single(lp
->dev
, skb
->data
,
1093 skb_headlen(skb
), DMA_TO_DEVICE
);
1094 if (unlikely(dma_mapping_error(lp
->dev
, phys
))) {
1095 if (net_ratelimit())
1096 netdev_err(ndev
, "TX DMA mapping error\n");
1097 ndev
->stats
.tx_dropped
++;
1098 dev_kfree_skb_any(skb
);
1099 return NETDEV_TX_OK
;
1101 desc_set_phys_addr(lp
, phys
, cur_p
);
1102 cur_p
->cntrl
= skb_headlen(skb
) | XAXIDMA_BD_CTRL_TXSOF_MASK
;
1104 for (ii
= 0; ii
< num_frag
; ii
++) {
1105 if (++new_tail_ptr
>= lp
->tx_bd_num
)
1107 cur_p
= &lp
->tx_bd_v
[new_tail_ptr
];
1108 frag
= &skb_shinfo(skb
)->frags
[ii
];
1109 phys
= dma_map_single(lp
->dev
,
1110 skb_frag_address(frag
),
1111 skb_frag_size(frag
),
1113 if (unlikely(dma_mapping_error(lp
->dev
, phys
))) {
1114 if (net_ratelimit())
1115 netdev_err(ndev
, "TX DMA mapping error\n");
1116 ndev
->stats
.tx_dropped
++;
1117 axienet_free_tx_chain(lp
, orig_tail_ptr
, ii
+ 1,
1119 dev_kfree_skb_any(skb
);
1120 return NETDEV_TX_OK
;
1122 desc_set_phys_addr(lp
, phys
, cur_p
);
1123 cur_p
->cntrl
= skb_frag_size(frag
);
1126 cur_p
->cntrl
|= XAXIDMA_BD_CTRL_TXEOF_MASK
;
1129 tail_p
= lp
->tx_bd_p
+ sizeof(*lp
->tx_bd_v
) * new_tail_ptr
;
1130 if (++new_tail_ptr
>= lp
->tx_bd_num
)
1132 WRITE_ONCE(lp
->tx_bd_tail
, new_tail_ptr
);
1133 netdev_sent_queue(ndev
, skb
->len
);
1135 /* Start the transfer */
1136 axienet_dma_out_addr(lp
, XAXIDMA_TX_TDESC_OFFSET
, tail_p
);
1138 /* Stop queue if next transmit may not have space */
1139 if (axienet_check_tx_bd_space(lp
, MAX_SKB_FRAGS
+ 1)) {
1140 netif_stop_queue(ndev
);
1142 /* Matches barrier in axienet_tx_poll */
1145 /* Space might have just been freed - check again */
1146 if (!axienet_check_tx_bd_space(lp
, MAX_SKB_FRAGS
+ 1))
1147 netif_wake_queue(ndev
);
1150 return NETDEV_TX_OK
;
1154 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1155 * @data: Pointer to the skbuf_dma_descriptor structure.
1156 * @result: error reporting through dmaengine_result.
1157 * This function is called by dmaengine driver for RX channel to notify
1158 * that the packet is received.
1160 static void axienet_dma_rx_cb(void *data
, const struct dmaengine_result
*result
)
1162 struct skbuf_dma_descriptor
*skbuf_dma
;
1163 size_t meta_len
, meta_max_len
, rx_len
;
1164 struct axienet_local
*lp
= data
;
1165 struct sk_buff
*skb
;
1169 skbuf_dma
= axienet_get_rx_desc(lp
, lp
->rx_ring_tail
++);
1170 skb
= skbuf_dma
->skb
;
1171 app_metadata
= dmaengine_desc_get_metadata_ptr(skbuf_dma
->desc
, &meta_len
,
1173 dma_unmap_single(lp
->dev
, skbuf_dma
->dma_address
, lp
->max_frm_size
,
1176 if (IS_ERR(app_metadata
)) {
1177 if (net_ratelimit())
1178 netdev_err(lp
->ndev
, "Failed to get RX metadata pointer\n");
1179 dev_kfree_skb_any(skb
);
1180 lp
->ndev
->stats
.rx_dropped
++;
1184 /* TODO: Derive app word index programmatically */
1185 rx_len
= (app_metadata
[LEN_APP
] & 0xFFFF);
1186 skb_put(skb
, rx_len
);
1187 skb
->protocol
= eth_type_trans(skb
, lp
->ndev
);
1188 skb
->ip_summed
= CHECKSUM_NONE
;
1191 u64_stats_update_begin(&lp
->rx_stat_sync
);
1192 u64_stats_add(&lp
->rx_packets
, 1);
1193 u64_stats_add(&lp
->rx_bytes
, rx_len
);
1194 u64_stats_update_end(&lp
->rx_stat_sync
);
1197 for (i
= 0; i
< CIRC_SPACE(lp
->rx_ring_head
, lp
->rx_ring_tail
,
1198 RX_BUF_NUM_DEFAULT
); i
++)
1199 axienet_rx_submit_desc(lp
->ndev
);
1200 dma_async_issue_pending(lp
->rx_chan
);
1204 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1205 * @napi: Pointer to NAPI structure.
1206 * @budget: Max number of RX packets to process.
1208 * Return: Number of RX packets processed.
1210 static int axienet_rx_poll(struct napi_struct
*napi
, int budget
)
1216 dma_addr_t tail_p
= 0;
1217 struct axidma_bd
*cur_p
;
1218 struct sk_buff
*skb
, *new_skb
;
1219 struct axienet_local
*lp
= container_of(napi
, struct axienet_local
, napi_rx
);
1221 cur_p
= &lp
->rx_bd_v
[lp
->rx_bd_ci
];
1223 while (packets
< budget
&& (cur_p
->status
& XAXIDMA_BD_STS_COMPLETE_MASK
)) {
1226 /* Ensure we see complete descriptor update */
1232 /* skb could be NULL if a previous pass already received the
1233 * packet for this slot in the ring, but failed to refill it
1234 * with a newly allocated buffer. In this case, don't try to
1238 length
= cur_p
->app4
& 0x0000FFFF;
1240 phys
= desc_get_phys_addr(lp
, cur_p
);
1241 dma_unmap_single(lp
->dev
, phys
, lp
->max_frm_size
,
1244 skb_put(skb
, length
);
1245 skb
->protocol
= eth_type_trans(skb
, lp
->ndev
);
1246 /*skb_checksum_none_assert(skb);*/
1247 skb
->ip_summed
= CHECKSUM_NONE
;
1249 /* if we're doing Rx csum offload, set it up */
1250 if (lp
->features
& XAE_FEATURE_FULL_RX_CSUM
) {
1251 csumstatus
= (cur_p
->app2
&
1252 XAE_FULL_CSUM_STATUS_MASK
) >> 3;
1253 if (csumstatus
== XAE_IP_TCP_CSUM_VALIDATED
||
1254 csumstatus
== XAE_IP_UDP_CSUM_VALIDATED
) {
1255 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1257 } else if (lp
->features
& XAE_FEATURE_PARTIAL_RX_CSUM
) {
1258 skb
->csum
= be32_to_cpu(cur_p
->app3
& 0xFFFF);
1259 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1262 napi_gro_receive(napi
, skb
);
1268 new_skb
= napi_alloc_skb(napi
, lp
->max_frm_size
);
1272 phys
= dma_map_single(lp
->dev
, new_skb
->data
,
1275 if (unlikely(dma_mapping_error(lp
->dev
, phys
))) {
1276 if (net_ratelimit())
1277 netdev_err(lp
->ndev
, "RX DMA mapping error\n");
1278 dev_kfree_skb(new_skb
);
1281 desc_set_phys_addr(lp
, phys
, cur_p
);
1283 cur_p
->cntrl
= lp
->max_frm_size
;
1285 cur_p
->skb
= new_skb
;
1287 /* Only update tail_p to mark this slot as usable after it has
1288 * been successfully refilled.
1290 tail_p
= lp
->rx_bd_p
+ sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_ci
;
1292 if (++lp
->rx_bd_ci
>= lp
->rx_bd_num
)
1294 cur_p
= &lp
->rx_bd_v
[lp
->rx_bd_ci
];
1297 u64_stats_update_begin(&lp
->rx_stat_sync
);
1298 u64_stats_add(&lp
->rx_packets
, packets
);
1299 u64_stats_add(&lp
->rx_bytes
, size
);
1300 u64_stats_update_end(&lp
->rx_stat_sync
);
1303 axienet_dma_out_addr(lp
, XAXIDMA_RX_TDESC_OFFSET
, tail_p
);
1305 if (packets
< budget
&& napi_complete_done(napi
, packets
)) {
1306 if (READ_ONCE(lp
->rx_dim_enabled
)) {
1307 struct dim_sample sample
= {
1308 .time
= ktime_get(),
1309 /* Safe because we are the only writer */
1310 .pkt_ctr
= u64_stats_read(&lp
->rx_packets
),
1311 .byte_ctr
= u64_stats_read(&lp
->rx_bytes
),
1312 .event_ctr
= READ_ONCE(lp
->rx_irqs
),
1315 net_dim(&lp
->rx_dim
, &sample
);
1318 /* Re-enable RX completion interrupts. This should
1319 * cause an immediate interrupt if any RX packets are
1322 spin_lock_irq(&lp
->rx_cr_lock
);
1323 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, lp
->rx_dma_cr
);
1324 spin_unlock_irq(&lp
->rx_cr_lock
);
1330 * axienet_tx_irq - Tx Done Isr.
1332 * @_ndev: net_device pointer
1334 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1336 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1339 static irqreturn_t
axienet_tx_irq(int irq
, void *_ndev
)
1341 unsigned int status
;
1342 struct net_device
*ndev
= _ndev
;
1343 struct axienet_local
*lp
= netdev_priv(ndev
);
1345 status
= axienet_dma_in32(lp
, XAXIDMA_TX_SR_OFFSET
);
1347 if (!(status
& XAXIDMA_IRQ_ALL_MASK
))
1350 axienet_dma_out32(lp
, XAXIDMA_TX_SR_OFFSET
, status
);
1352 if (unlikely(status
& XAXIDMA_IRQ_ERROR_MASK
)) {
1353 netdev_err(ndev
, "DMA Tx error 0x%x\n", status
);
1354 netdev_err(ndev
, "Current BD is at: 0x%x%08x\n",
1355 (lp
->tx_bd_v
[lp
->tx_bd_ci
]).phys_msb
,
1356 (lp
->tx_bd_v
[lp
->tx_bd_ci
]).phys
);
1357 schedule_work(&lp
->dma_err_task
);
1359 /* Disable further TX completion interrupts and schedule
1360 * NAPI to handle the completions.
1362 if (napi_schedule_prep(&lp
->napi_tx
)) {
1365 spin_lock(&lp
->tx_cr_lock
);
1367 cr
&= ~(XAXIDMA_IRQ_IOC_MASK
| XAXIDMA_IRQ_DELAY_MASK
);
1368 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, cr
);
1369 spin_unlock(&lp
->tx_cr_lock
);
1370 __napi_schedule(&lp
->napi_tx
);
1378 * axienet_rx_irq - Rx Isr.
1380 * @_ndev: net_device pointer
1382 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1384 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1387 static irqreturn_t
axienet_rx_irq(int irq
, void *_ndev
)
1389 unsigned int status
;
1390 struct net_device
*ndev
= _ndev
;
1391 struct axienet_local
*lp
= netdev_priv(ndev
);
1393 status
= axienet_dma_in32(lp
, XAXIDMA_RX_SR_OFFSET
);
1395 if (!(status
& XAXIDMA_IRQ_ALL_MASK
))
1398 axienet_dma_out32(lp
, XAXIDMA_RX_SR_OFFSET
, status
);
1400 if (unlikely(status
& XAXIDMA_IRQ_ERROR_MASK
)) {
1401 netdev_err(ndev
, "DMA Rx error 0x%x\n", status
);
1402 netdev_err(ndev
, "Current BD is at: 0x%x%08x\n",
1403 (lp
->rx_bd_v
[lp
->rx_bd_ci
]).phys_msb
,
1404 (lp
->rx_bd_v
[lp
->rx_bd_ci
]).phys
);
1405 schedule_work(&lp
->dma_err_task
);
1407 /* Disable further RX completion interrupts and schedule
1410 WRITE_ONCE(lp
->rx_irqs
, READ_ONCE(lp
->rx_irqs
) + 1);
1411 if (napi_schedule_prep(&lp
->napi_rx
)) {
1414 spin_lock(&lp
->rx_cr_lock
);
1416 cr
&= ~(XAXIDMA_IRQ_IOC_MASK
| XAXIDMA_IRQ_DELAY_MASK
);
1417 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, cr
);
1418 spin_unlock(&lp
->rx_cr_lock
);
1420 __napi_schedule(&lp
->napi_rx
);
1428 * axienet_eth_irq - Ethernet core Isr.
1430 * @_ndev: net_device pointer
1432 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1434 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1436 static irqreturn_t
axienet_eth_irq(int irq
, void *_ndev
)
1438 struct net_device
*ndev
= _ndev
;
1439 struct axienet_local
*lp
= netdev_priv(ndev
);
1440 unsigned int pending
;
1442 pending
= axienet_ior(lp
, XAE_IP_OFFSET
);
1446 if (pending
& XAE_INT_RXFIFOOVR_MASK
)
1447 ndev
->stats
.rx_missed_errors
++;
1449 if (pending
& XAE_INT_RXRJECT_MASK
)
1450 ndev
->stats
.rx_dropped
++;
1452 axienet_iow(lp
, XAE_IS_OFFSET
, pending
);
1456 static void axienet_dma_err_handler(struct work_struct
*work
);
1459 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1460 * allocate skbuff, map the scatterlist and obtain a descriptor
1461 * and then add the callback information and submit descriptor.
1463 * @ndev: net_device pointer
1466 static void axienet_rx_submit_desc(struct net_device
*ndev
)
1468 struct dma_async_tx_descriptor
*dma_rx_desc
= NULL
;
1469 struct axienet_local
*lp
= netdev_priv(ndev
);
1470 struct skbuf_dma_descriptor
*skbuf_dma
;
1471 struct sk_buff
*skb
;
1474 skbuf_dma
= axienet_get_rx_desc(lp
, lp
->rx_ring_head
);
1478 skb
= netdev_alloc_skb(ndev
, lp
->max_frm_size
);
1482 sg_init_table(skbuf_dma
->sgl
, 1);
1483 addr
= dma_map_single(lp
->dev
, skb
->data
, lp
->max_frm_size
, DMA_FROM_DEVICE
);
1484 if (unlikely(dma_mapping_error(lp
->dev
, addr
))) {
1485 if (net_ratelimit())
1486 netdev_err(ndev
, "DMA mapping error\n");
1487 goto rx_submit_err_free_skb
;
1489 sg_dma_address(skbuf_dma
->sgl
) = addr
;
1490 sg_dma_len(skbuf_dma
->sgl
) = lp
->max_frm_size
;
1491 dma_rx_desc
= dmaengine_prep_slave_sg(lp
->rx_chan
, skbuf_dma
->sgl
,
1493 DMA_PREP_INTERRUPT
);
1495 goto rx_submit_err_unmap_skb
;
1497 skbuf_dma
->skb
= skb
;
1498 skbuf_dma
->dma_address
= sg_dma_address(skbuf_dma
->sgl
);
1499 skbuf_dma
->desc
= dma_rx_desc
;
1500 dma_rx_desc
->callback_param
= lp
;
1501 dma_rx_desc
->callback_result
= axienet_dma_rx_cb
;
1503 dmaengine_submit(dma_rx_desc
);
1507 rx_submit_err_unmap_skb
:
1508 dma_unmap_single(lp
->dev
, addr
, lp
->max_frm_size
, DMA_FROM_DEVICE
);
1509 rx_submit_err_free_skb
:
1514 * axienet_init_dmaengine - init the dmaengine code.
1515 * @ndev: Pointer to net_device structure
1517 * Return: 0, on success.
1518 * non-zero error value on failure
1520 * This is the dmaengine initialization code.
1522 static int axienet_init_dmaengine(struct net_device
*ndev
)
1524 struct axienet_local
*lp
= netdev_priv(ndev
);
1525 struct skbuf_dma_descriptor
*skbuf_dma
;
1528 lp
->tx_chan
= dma_request_chan(lp
->dev
, "tx_chan0");
1529 if (IS_ERR(lp
->tx_chan
)) {
1530 dev_err(lp
->dev
, "No Ethernet DMA (TX) channel found\n");
1531 return PTR_ERR(lp
->tx_chan
);
1534 lp
->rx_chan
= dma_request_chan(lp
->dev
, "rx_chan0");
1535 if (IS_ERR(lp
->rx_chan
)) {
1536 ret
= PTR_ERR(lp
->rx_chan
);
1537 dev_err(lp
->dev
, "No Ethernet DMA (RX) channel found\n");
1538 goto err_dma_release_tx
;
1541 lp
->tx_ring_tail
= 0;
1542 lp
->tx_ring_head
= 0;
1543 lp
->rx_ring_tail
= 0;
1544 lp
->rx_ring_head
= 0;
1545 lp
->tx_skb_ring
= kcalloc(TX_BD_NUM_MAX
, sizeof(*lp
->tx_skb_ring
),
1547 if (!lp
->tx_skb_ring
) {
1549 goto err_dma_release_rx
;
1551 for (i
= 0; i
< TX_BD_NUM_MAX
; i
++) {
1552 skbuf_dma
= kzalloc(sizeof(*skbuf_dma
), GFP_KERNEL
);
1555 goto err_free_tx_skb_ring
;
1557 lp
->tx_skb_ring
[i
] = skbuf_dma
;
1560 lp
->rx_skb_ring
= kcalloc(RX_BUF_NUM_DEFAULT
, sizeof(*lp
->rx_skb_ring
),
1562 if (!lp
->rx_skb_ring
) {
1564 goto err_free_tx_skb_ring
;
1566 for (i
= 0; i
< RX_BUF_NUM_DEFAULT
; i
++) {
1567 skbuf_dma
= kzalloc(sizeof(*skbuf_dma
), GFP_KERNEL
);
1570 goto err_free_rx_skb_ring
;
1572 lp
->rx_skb_ring
[i
] = skbuf_dma
;
1574 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1575 for (i
= 0; i
< RX_BUF_NUM_DEFAULT
; i
++)
1576 axienet_rx_submit_desc(ndev
);
1577 dma_async_issue_pending(lp
->rx_chan
);
1581 err_free_rx_skb_ring
:
1582 for (i
= 0; i
< RX_BUF_NUM_DEFAULT
; i
++)
1583 kfree(lp
->rx_skb_ring
[i
]);
1584 kfree(lp
->rx_skb_ring
);
1585 err_free_tx_skb_ring
:
1586 for (i
= 0; i
< TX_BD_NUM_MAX
; i
++)
1587 kfree(lp
->tx_skb_ring
[i
]);
1588 kfree(lp
->tx_skb_ring
);
1590 dma_release_channel(lp
->rx_chan
);
1592 dma_release_channel(lp
->tx_chan
);
1597 * axienet_init_legacy_dma - init the dma legacy code.
1598 * @ndev: Pointer to net_device structure
1600 * Return: 0, on success.
1601 * non-zero error value on failure
1603 * This is the dma initialization code. It also allocates interrupt
1604 * service routines, enables the interrupt lines and ISR handling.
1607 static int axienet_init_legacy_dma(struct net_device
*ndev
)
1610 struct axienet_local
*lp
= netdev_priv(ndev
);
1612 /* Enable worker thread for Axi DMA error handling */
1613 lp
->stopping
= false;
1614 INIT_WORK(&lp
->dma_err_task
, axienet_dma_err_handler
);
1616 napi_enable(&lp
->napi_rx
);
1617 napi_enable(&lp
->napi_tx
);
1619 /* Enable interrupts for Axi DMA Tx */
1620 ret
= request_irq(lp
->tx_irq
, axienet_tx_irq
, IRQF_SHARED
,
1624 /* Enable interrupts for Axi DMA Rx */
1625 ret
= request_irq(lp
->rx_irq
, axienet_rx_irq
, IRQF_SHARED
,
1629 /* Enable interrupts for Axi Ethernet core (if defined) */
1630 if (lp
->eth_irq
> 0) {
1631 ret
= request_irq(lp
->eth_irq
, axienet_eth_irq
, IRQF_SHARED
,
1640 free_irq(lp
->rx_irq
, ndev
);
1642 free_irq(lp
->tx_irq
, ndev
);
1644 napi_disable(&lp
->napi_tx
);
1645 napi_disable(&lp
->napi_rx
);
1646 cancel_work_sync(&lp
->dma_err_task
);
1647 dev_err(lp
->dev
, "request_irq() failed\n");
1652 * axienet_open - Driver open routine.
1653 * @ndev: Pointer to net_device structure
1655 * Return: 0, on success.
1656 * non-zero error value on failure
1658 * This is the driver open routine. It calls phylink_start to start the
1660 * It also allocates interrupt service routines, enables the interrupt lines
1661 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1662 * descriptors are initialized.
1664 static int axienet_open(struct net_device
*ndev
)
1667 struct axienet_local
*lp
= netdev_priv(ndev
);
1669 /* When we do an Axi Ethernet reset, it resets the complete core
1670 * including the MDIO. MDIO must be disabled before resetting.
1671 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1673 axienet_lock_mii(lp
);
1674 ret
= axienet_device_reset(ndev
);
1675 axienet_unlock_mii(lp
);
1677 ret
= phylink_of_phy_connect(lp
->phylink
, lp
->dev
->of_node
, 0);
1679 dev_err(lp
->dev
, "phylink_of_phy_connect() failed: %d\n", ret
);
1683 phylink_start(lp
->phylink
);
1685 /* Start the statistics refresh work */
1686 schedule_delayed_work(&lp
->stats_work
, 0);
1688 if (lp
->use_dmaengine
) {
1689 /* Enable interrupts for Axi Ethernet core (if defined) */
1690 if (lp
->eth_irq
> 0) {
1691 ret
= request_irq(lp
->eth_irq
, axienet_eth_irq
, IRQF_SHARED
,
1697 ret
= axienet_init_dmaengine(ndev
);
1699 goto err_free_eth_irq
;
1701 ret
= axienet_init_legacy_dma(ndev
);
1709 if (lp
->eth_irq
> 0)
1710 free_irq(lp
->eth_irq
, ndev
);
1712 cancel_work_sync(&lp
->rx_dim
.work
);
1713 cancel_delayed_work_sync(&lp
->stats_work
);
1714 phylink_stop(lp
->phylink
);
1715 phylink_disconnect_phy(lp
->phylink
);
1720 * axienet_stop - Driver stop routine.
1721 * @ndev: Pointer to net_device structure
1723 * Return: 0, on success.
1725 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1726 * device. It also removes the interrupt handlers and disables the interrupts.
1727 * The Axi DMA Tx/Rx BDs are released.
1729 static int axienet_stop(struct net_device
*ndev
)
1731 struct axienet_local
*lp
= netdev_priv(ndev
);
1734 if (!lp
->use_dmaengine
) {
1735 WRITE_ONCE(lp
->stopping
, true);
1736 flush_work(&lp
->dma_err_task
);
1738 napi_disable(&lp
->napi_tx
);
1739 napi_disable(&lp
->napi_rx
);
1742 cancel_work_sync(&lp
->rx_dim
.work
);
1743 cancel_delayed_work_sync(&lp
->stats_work
);
1745 phylink_stop(lp
->phylink
);
1746 phylink_disconnect_phy(lp
->phylink
);
1748 axienet_setoptions(ndev
, lp
->options
&
1749 ~(XAE_OPTION_TXEN
| XAE_OPTION_RXEN
));
1751 if (!lp
->use_dmaengine
) {
1752 axienet_dma_stop(lp
);
1753 cancel_work_sync(&lp
->dma_err_task
);
1754 free_irq(lp
->tx_irq
, ndev
);
1755 free_irq(lp
->rx_irq
, ndev
);
1756 axienet_dma_bd_release(ndev
);
1758 dmaengine_terminate_sync(lp
->tx_chan
);
1759 dmaengine_synchronize(lp
->tx_chan
);
1760 dmaengine_terminate_sync(lp
->rx_chan
);
1761 dmaengine_synchronize(lp
->rx_chan
);
1763 for (i
= 0; i
< TX_BD_NUM_MAX
; i
++)
1764 kfree(lp
->tx_skb_ring
[i
]);
1765 kfree(lp
->tx_skb_ring
);
1766 for (i
= 0; i
< RX_BUF_NUM_DEFAULT
; i
++)
1767 kfree(lp
->rx_skb_ring
[i
]);
1768 kfree(lp
->rx_skb_ring
);
1770 dma_release_channel(lp
->rx_chan
);
1771 dma_release_channel(lp
->tx_chan
);
1774 netdev_reset_queue(ndev
);
1775 axienet_iow(lp
, XAE_IE_OFFSET
, 0);
1777 if (lp
->eth_irq
> 0)
1778 free_irq(lp
->eth_irq
, ndev
);
1783 * axienet_change_mtu - Driver change mtu routine.
1784 * @ndev: Pointer to net_device structure
1785 * @new_mtu: New mtu value to be applied
1787 * Return: Always returns 0 (success).
1789 * This is the change mtu driver routine. It checks if the Axi Ethernet
1790 * hardware supports jumbo frames before changing the mtu. This can be
1791 * called only when the device is not up.
1793 static int axienet_change_mtu(struct net_device
*ndev
, int new_mtu
)
1795 struct axienet_local
*lp
= netdev_priv(ndev
);
1797 if (netif_running(ndev
))
1800 if ((new_mtu
+ VLAN_ETH_HLEN
+
1801 XAE_TRL_SIZE
) > lp
->rxmem
)
1804 WRITE_ONCE(ndev
->mtu
, new_mtu
);
1809 #ifdef CONFIG_NET_POLL_CONTROLLER
1811 * axienet_poll_controller - Axi Ethernet poll mechanism.
1812 * @ndev: Pointer to net_device structure
1814 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1815 * to polling the ISRs and are enabled back after the polling is done.
1817 static void axienet_poll_controller(struct net_device
*ndev
)
1819 struct axienet_local
*lp
= netdev_priv(ndev
);
1821 disable_irq(lp
->tx_irq
);
1822 disable_irq(lp
->rx_irq
);
1823 axienet_rx_irq(lp
->tx_irq
, ndev
);
1824 axienet_tx_irq(lp
->rx_irq
, ndev
);
1825 enable_irq(lp
->tx_irq
);
1826 enable_irq(lp
->rx_irq
);
1830 static int axienet_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1832 struct axienet_local
*lp
= netdev_priv(dev
);
1834 if (!netif_running(dev
))
1837 return phylink_mii_ioctl(lp
->phylink
, rq
, cmd
);
1841 axienet_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1843 struct axienet_local
*lp
= netdev_priv(dev
);
1846 netdev_stats_to_stats64(stats
, &dev
->stats
);
1849 start
= u64_stats_fetch_begin(&lp
->rx_stat_sync
);
1850 stats
->rx_packets
= u64_stats_read(&lp
->rx_packets
);
1851 stats
->rx_bytes
= u64_stats_read(&lp
->rx_bytes
);
1852 } while (u64_stats_fetch_retry(&lp
->rx_stat_sync
, start
));
1855 start
= u64_stats_fetch_begin(&lp
->tx_stat_sync
);
1856 stats
->tx_packets
= u64_stats_read(&lp
->tx_packets
);
1857 stats
->tx_bytes
= u64_stats_read(&lp
->tx_bytes
);
1858 } while (u64_stats_fetch_retry(&lp
->tx_stat_sync
, start
));
1860 if (!(lp
->features
& XAE_FEATURE_STATS
))
1864 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
1865 stats
->rx_length_errors
=
1866 axienet_stat(lp
, STAT_RX_LENGTH_ERRORS
);
1867 stats
->rx_crc_errors
= axienet_stat(lp
, STAT_RX_FCS_ERRORS
);
1868 stats
->rx_frame_errors
=
1869 axienet_stat(lp
, STAT_RX_ALIGNMENT_ERRORS
);
1870 stats
->rx_errors
= axienet_stat(lp
, STAT_UNDERSIZE_FRAMES
) +
1871 axienet_stat(lp
, STAT_FRAGMENT_FRAMES
) +
1872 stats
->rx_length_errors
+
1873 stats
->rx_crc_errors
+
1874 stats
->rx_frame_errors
;
1875 stats
->multicast
= axienet_stat(lp
, STAT_RX_MULTICAST_FRAMES
);
1877 stats
->tx_aborted_errors
=
1878 axienet_stat(lp
, STAT_TX_EXCESS_COLLISIONS
);
1879 stats
->tx_fifo_errors
=
1880 axienet_stat(lp
, STAT_TX_UNDERRUN_ERRORS
);
1881 stats
->tx_window_errors
=
1882 axienet_stat(lp
, STAT_TX_LATE_COLLISIONS
);
1883 stats
->tx_errors
= axienet_stat(lp
, STAT_TX_EXCESS_DEFERRAL
) +
1884 stats
->tx_aborted_errors
+
1885 stats
->tx_fifo_errors
+
1886 stats
->tx_window_errors
;
1887 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
1890 static const struct net_device_ops axienet_netdev_ops
= {
1891 .ndo_open
= axienet_open
,
1892 .ndo_stop
= axienet_stop
,
1893 .ndo_start_xmit
= axienet_start_xmit
,
1894 .ndo_get_stats64
= axienet_get_stats64
,
1895 .ndo_change_mtu
= axienet_change_mtu
,
1896 .ndo_set_mac_address
= netdev_set_mac_address
,
1897 .ndo_validate_addr
= eth_validate_addr
,
1898 .ndo_eth_ioctl
= axienet_ioctl
,
1899 .ndo_set_rx_mode
= axienet_set_multicast_list
,
1900 #ifdef CONFIG_NET_POLL_CONTROLLER
1901 .ndo_poll_controller
= axienet_poll_controller
,
1905 static const struct net_device_ops axienet_netdev_dmaengine_ops
= {
1906 .ndo_open
= axienet_open
,
1907 .ndo_stop
= axienet_stop
,
1908 .ndo_start_xmit
= axienet_start_xmit_dmaengine
,
1909 .ndo_get_stats64
= axienet_get_stats64
,
1910 .ndo_change_mtu
= axienet_change_mtu
,
1911 .ndo_set_mac_address
= netdev_set_mac_address
,
1912 .ndo_validate_addr
= eth_validate_addr
,
1913 .ndo_eth_ioctl
= axienet_ioctl
,
1914 .ndo_set_rx_mode
= axienet_set_multicast_list
,
1918 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1919 * @ndev: Pointer to net_device structure
1920 * @ed: Pointer to ethtool_drvinfo structure
1922 * This implements ethtool command for getting the driver information.
1923 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1925 static void axienet_ethtools_get_drvinfo(struct net_device
*ndev
,
1926 struct ethtool_drvinfo
*ed
)
1928 strscpy(ed
->driver
, DRIVER_NAME
, sizeof(ed
->driver
));
1929 strscpy(ed
->version
, DRIVER_VERSION
, sizeof(ed
->version
));
1933 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1935 * @ndev: Pointer to net_device structure
1937 * This implements ethtool command for getting the total register length
1940 * Return: the total regs length
1942 static int axienet_ethtools_get_regs_len(struct net_device
*ndev
)
1944 return sizeof(u32
) * AXIENET_REGS_N
;
1948 * axienet_ethtools_get_regs - Dump the contents of all registers present
1949 * in AxiEthernet core.
1950 * @ndev: Pointer to net_device structure
1951 * @regs: Pointer to ethtool_regs structure
1952 * @ret: Void pointer used to return the contents of the registers.
1954 * This implements ethtool command for getting the Axi Ethernet register dump.
1955 * Issue "ethtool -d ethX" to execute this function.
1957 static void axienet_ethtools_get_regs(struct net_device
*ndev
,
1958 struct ethtool_regs
*regs
, void *ret
)
1960 u32
*data
= (u32
*)ret
;
1961 size_t len
= sizeof(u32
) * AXIENET_REGS_N
;
1962 struct axienet_local
*lp
= netdev_priv(ndev
);
1967 memset(data
, 0, len
);
1968 data
[0] = axienet_ior(lp
, XAE_RAF_OFFSET
);
1969 data
[1] = axienet_ior(lp
, XAE_TPF_OFFSET
);
1970 data
[2] = axienet_ior(lp
, XAE_IFGP_OFFSET
);
1971 data
[3] = axienet_ior(lp
, XAE_IS_OFFSET
);
1972 data
[4] = axienet_ior(lp
, XAE_IP_OFFSET
);
1973 data
[5] = axienet_ior(lp
, XAE_IE_OFFSET
);
1974 data
[6] = axienet_ior(lp
, XAE_TTAG_OFFSET
);
1975 data
[7] = axienet_ior(lp
, XAE_RTAG_OFFSET
);
1976 data
[8] = axienet_ior(lp
, XAE_UAWL_OFFSET
);
1977 data
[9] = axienet_ior(lp
, XAE_UAWU_OFFSET
);
1978 data
[10] = axienet_ior(lp
, XAE_TPID0_OFFSET
);
1979 data
[11] = axienet_ior(lp
, XAE_TPID1_OFFSET
);
1980 data
[12] = axienet_ior(lp
, XAE_PPST_OFFSET
);
1981 data
[13] = axienet_ior(lp
, XAE_RCW0_OFFSET
);
1982 data
[14] = axienet_ior(lp
, XAE_RCW1_OFFSET
);
1983 data
[15] = axienet_ior(lp
, XAE_TC_OFFSET
);
1984 data
[16] = axienet_ior(lp
, XAE_FCC_OFFSET
);
1985 data
[17] = axienet_ior(lp
, XAE_EMMC_OFFSET
);
1986 data
[18] = axienet_ior(lp
, XAE_PHYC_OFFSET
);
1987 data
[19] = axienet_ior(lp
, XAE_MDIO_MC_OFFSET
);
1988 data
[20] = axienet_ior(lp
, XAE_MDIO_MCR_OFFSET
);
1989 data
[21] = axienet_ior(lp
, XAE_MDIO_MWD_OFFSET
);
1990 data
[22] = axienet_ior(lp
, XAE_MDIO_MRD_OFFSET
);
1991 data
[27] = axienet_ior(lp
, XAE_UAW0_OFFSET
);
1992 data
[28] = axienet_ior(lp
, XAE_UAW1_OFFSET
);
1993 data
[29] = axienet_ior(lp
, XAE_FMI_OFFSET
);
1994 data
[30] = axienet_ior(lp
, XAE_AF0_OFFSET
);
1995 data
[31] = axienet_ior(lp
, XAE_AF1_OFFSET
);
1996 if (!lp
->use_dmaengine
) {
1997 data
[32] = axienet_dma_in32(lp
, XAXIDMA_TX_CR_OFFSET
);
1998 data
[33] = axienet_dma_in32(lp
, XAXIDMA_TX_SR_OFFSET
);
1999 data
[34] = axienet_dma_in32(lp
, XAXIDMA_TX_CDESC_OFFSET
);
2000 data
[35] = axienet_dma_in32(lp
, XAXIDMA_TX_TDESC_OFFSET
);
2001 data
[36] = axienet_dma_in32(lp
, XAXIDMA_RX_CR_OFFSET
);
2002 data
[37] = axienet_dma_in32(lp
, XAXIDMA_RX_SR_OFFSET
);
2003 data
[38] = axienet_dma_in32(lp
, XAXIDMA_RX_CDESC_OFFSET
);
2004 data
[39] = axienet_dma_in32(lp
, XAXIDMA_RX_TDESC_OFFSET
);
2009 axienet_ethtools_get_ringparam(struct net_device
*ndev
,
2010 struct ethtool_ringparam
*ering
,
2011 struct kernel_ethtool_ringparam
*kernel_ering
,
2012 struct netlink_ext_ack
*extack
)
2014 struct axienet_local
*lp
= netdev_priv(ndev
);
2016 ering
->rx_max_pending
= RX_BD_NUM_MAX
;
2017 ering
->rx_mini_max_pending
= 0;
2018 ering
->rx_jumbo_max_pending
= 0;
2019 ering
->tx_max_pending
= TX_BD_NUM_MAX
;
2020 ering
->rx_pending
= lp
->rx_bd_num
;
2021 ering
->rx_mini_pending
= 0;
2022 ering
->rx_jumbo_pending
= 0;
2023 ering
->tx_pending
= lp
->tx_bd_num
;
2027 axienet_ethtools_set_ringparam(struct net_device
*ndev
,
2028 struct ethtool_ringparam
*ering
,
2029 struct kernel_ethtool_ringparam
*kernel_ering
,
2030 struct netlink_ext_ack
*extack
)
2032 struct axienet_local
*lp
= netdev_priv(ndev
);
2034 if (ering
->rx_pending
> RX_BD_NUM_MAX
||
2035 ering
->rx_mini_pending
||
2036 ering
->rx_jumbo_pending
||
2037 ering
->tx_pending
< TX_BD_NUM_MIN
||
2038 ering
->tx_pending
> TX_BD_NUM_MAX
)
2041 if (netif_running(ndev
))
2044 lp
->rx_bd_num
= ering
->rx_pending
;
2045 lp
->tx_bd_num
= ering
->tx_pending
;
2050 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
2052 * @ndev: Pointer to net_device structure
2053 * @epauseparm: Pointer to ethtool_pauseparam structure.
2055 * This implements ethtool command for getting axi ethernet pause frame
2056 * setting. Issue "ethtool -a ethX" to execute this function.
2059 axienet_ethtools_get_pauseparam(struct net_device
*ndev
,
2060 struct ethtool_pauseparam
*epauseparm
)
2062 struct axienet_local
*lp
= netdev_priv(ndev
);
2064 phylink_ethtool_get_pauseparam(lp
->phylink
, epauseparm
);
2068 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
2070 * @ndev: Pointer to net_device structure
2071 * @epauseparm:Pointer to ethtool_pauseparam structure
2073 * This implements ethtool command for enabling flow control on Rx and Tx
2074 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2077 * Return: 0 on success, -EFAULT if device is running
2080 axienet_ethtools_set_pauseparam(struct net_device
*ndev
,
2081 struct ethtool_pauseparam
*epauseparm
)
2083 struct axienet_local
*lp
= netdev_priv(ndev
);
2085 return phylink_ethtool_set_pauseparam(lp
->phylink
, epauseparm
);
2089 * axienet_update_coalesce_rx() - Set RX CR
2090 * @lp: Device private data
2091 * @cr: Value to write to the RX CR
2092 * @mask: Bits to set from @cr
2094 static void axienet_update_coalesce_rx(struct axienet_local
*lp
, u32 cr
,
2097 spin_lock_irq(&lp
->rx_cr_lock
);
2098 lp
->rx_dma_cr
&= ~mask
;
2099 lp
->rx_dma_cr
|= cr
;
2100 /* If DMA isn't started, then the settings will be applied the next
2101 * time dma_start() is called.
2103 if (lp
->rx_dma_started
) {
2104 u32 reg
= axienet_dma_in32(lp
, XAXIDMA_RX_CR_OFFSET
);
2106 /* Don't enable IRQs if they are disabled by NAPI */
2107 if (reg
& XAXIDMA_IRQ_ALL_MASK
)
2110 cr
= lp
->rx_dma_cr
& ~XAXIDMA_IRQ_ALL_MASK
;
2111 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, cr
);
2113 spin_unlock_irq(&lp
->rx_cr_lock
);
2117 * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
2118 * @lp: Device private data
2120 * Return: RX coalescing frame count value for DIM.
2122 static u32
axienet_dim_coalesce_count_rx(struct axienet_local
*lp
)
2124 return min(1 << (lp
->rx_dim
.profile_ix
<< 1), 255);
2128 * axienet_rx_dim_work() - Adjust RX DIM settings
2129 * @work: The work struct
2131 static void axienet_rx_dim_work(struct work_struct
*work
)
2133 struct axienet_local
*lp
=
2134 container_of(work
, struct axienet_local
, rx_dim
.work
);
2135 u32 cr
= axienet_calc_cr(lp
, axienet_dim_coalesce_count_rx(lp
), 0);
2136 u32 mask
= XAXIDMA_COALESCE_MASK
| XAXIDMA_IRQ_IOC_MASK
|
2137 XAXIDMA_IRQ_ERROR_MASK
;
2139 axienet_update_coalesce_rx(lp
, cr
, mask
);
2140 lp
->rx_dim
.state
= DIM_START_MEASURE
;
2144 * axienet_update_coalesce_tx() - Set TX CR
2145 * @lp: Device private data
2146 * @cr: Value to write to the TX CR
2147 * @mask: Bits to set from @cr
2149 static void axienet_update_coalesce_tx(struct axienet_local
*lp
, u32 cr
,
2152 spin_lock_irq(&lp
->tx_cr_lock
);
2153 lp
->tx_dma_cr
&= ~mask
;
2154 lp
->tx_dma_cr
|= cr
;
2155 /* If DMA isn't started, then the settings will be applied the next
2156 * time dma_start() is called.
2158 if (lp
->tx_dma_started
) {
2159 u32 reg
= axienet_dma_in32(lp
, XAXIDMA_TX_CR_OFFSET
);
2161 /* Don't enable IRQs if they are disabled by NAPI */
2162 if (reg
& XAXIDMA_IRQ_ALL_MASK
)
2165 cr
= lp
->tx_dma_cr
& ~XAXIDMA_IRQ_ALL_MASK
;
2166 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, cr
);
2168 spin_unlock_irq(&lp
->tx_cr_lock
);
2172 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2173 * @ndev: Pointer to net_device structure
2174 * @ecoalesce: Pointer to ethtool_coalesce structure
2175 * @kernel_coal: ethtool CQE mode setting structure
2176 * @extack: extack for reporting error messages
2178 * This implements ethtool command for getting the DMA interrupt coalescing
2179 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2180 * execute this function.
2185 axienet_ethtools_get_coalesce(struct net_device
*ndev
,
2186 struct ethtool_coalesce
*ecoalesce
,
2187 struct kernel_ethtool_coalesce
*kernel_coal
,
2188 struct netlink_ext_ack
*extack
)
2190 struct axienet_local
*lp
= netdev_priv(ndev
);
2193 ecoalesce
->use_adaptive_rx_coalesce
= lp
->rx_dim_enabled
;
2195 spin_lock_irq(&lp
->rx_cr_lock
);
2197 spin_unlock_irq(&lp
->rx_cr_lock
);
2198 axienet_coalesce_params(lp
, cr
,
2199 &ecoalesce
->rx_max_coalesced_frames
,
2200 &ecoalesce
->rx_coalesce_usecs
);
2202 spin_lock_irq(&lp
->tx_cr_lock
);
2204 spin_unlock_irq(&lp
->tx_cr_lock
);
2205 axienet_coalesce_params(lp
, cr
,
2206 &ecoalesce
->tx_max_coalesced_frames
,
2207 &ecoalesce
->tx_coalesce_usecs
);
2212 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2213 * @ndev: Pointer to net_device structure
2214 * @ecoalesce: Pointer to ethtool_coalesce structure
2215 * @kernel_coal: ethtool CQE mode setting structure
2216 * @extack: extack for reporting error messages
2218 * This implements ethtool command for setting the DMA interrupt coalescing
2219 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2220 * prompt to execute this function.
2222 * Return: 0, on success, Non-zero error value on failure.
2225 axienet_ethtools_set_coalesce(struct net_device
*ndev
,
2226 struct ethtool_coalesce
*ecoalesce
,
2227 struct kernel_ethtool_coalesce
*kernel_coal
,
2228 struct netlink_ext_ack
*extack
)
2230 struct axienet_local
*lp
= netdev_priv(ndev
);
2231 bool new_dim
= ecoalesce
->use_adaptive_rx_coalesce
;
2232 bool old_dim
= lp
->rx_dim_enabled
;
2233 u32 cr
, mask
= ~XAXIDMA_CR_RUNSTOP_MASK
;
2235 if (ecoalesce
->rx_max_coalesced_frames
> 255 ||
2236 ecoalesce
->tx_max_coalesced_frames
> 255) {
2237 NL_SET_ERR_MSG(extack
, "frames must be less than 256");
2241 if (!ecoalesce
->rx_max_coalesced_frames
||
2242 !ecoalesce
->tx_max_coalesced_frames
) {
2243 NL_SET_ERR_MSG(extack
, "frames must be non-zero");
2247 if (((ecoalesce
->rx_max_coalesced_frames
> 1 || new_dim
) &&
2248 !ecoalesce
->rx_coalesce_usecs
) ||
2249 (ecoalesce
->tx_max_coalesced_frames
> 1 &&
2250 !ecoalesce
->tx_coalesce_usecs
)) {
2251 NL_SET_ERR_MSG(extack
,
2252 "usecs must be non-zero when frames is greater than one");
2256 if (new_dim
&& !old_dim
) {
2257 cr
= axienet_calc_cr(lp
, axienet_dim_coalesce_count_rx(lp
),
2258 ecoalesce
->rx_coalesce_usecs
);
2259 } else if (!new_dim
) {
2261 WRITE_ONCE(lp
->rx_dim_enabled
, false);
2262 napi_synchronize(&lp
->napi_rx
);
2263 flush_work(&lp
->rx_dim
.work
);
2266 cr
= axienet_calc_cr(lp
, ecoalesce
->rx_max_coalesced_frames
,
2267 ecoalesce
->rx_coalesce_usecs
);
2269 /* Dummy value for count just to calculate timer */
2270 cr
= axienet_calc_cr(lp
, 2, ecoalesce
->rx_coalesce_usecs
);
2271 mask
= XAXIDMA_DELAY_MASK
| XAXIDMA_IRQ_DELAY_MASK
;
2274 axienet_update_coalesce_rx(lp
, cr
, mask
);
2275 if (new_dim
&& !old_dim
)
2276 WRITE_ONCE(lp
->rx_dim_enabled
, true);
2278 cr
= axienet_calc_cr(lp
, ecoalesce
->tx_max_coalesced_frames
,
2279 ecoalesce
->tx_coalesce_usecs
);
2280 axienet_update_coalesce_tx(lp
, cr
, ~XAXIDMA_CR_RUNSTOP_MASK
);
2285 axienet_ethtools_get_link_ksettings(struct net_device
*ndev
,
2286 struct ethtool_link_ksettings
*cmd
)
2288 struct axienet_local
*lp
= netdev_priv(ndev
);
2290 return phylink_ethtool_ksettings_get(lp
->phylink
, cmd
);
2294 axienet_ethtools_set_link_ksettings(struct net_device
*ndev
,
2295 const struct ethtool_link_ksettings
*cmd
)
2297 struct axienet_local
*lp
= netdev_priv(ndev
);
2299 return phylink_ethtool_ksettings_set(lp
->phylink
, cmd
);
2302 static int axienet_ethtools_nway_reset(struct net_device
*dev
)
2304 struct axienet_local
*lp
= netdev_priv(dev
);
2306 return phylink_ethtool_nway_reset(lp
->phylink
);
2309 static void axienet_ethtools_get_ethtool_stats(struct net_device
*dev
,
2310 struct ethtool_stats
*stats
,
2313 struct axienet_local
*lp
= netdev_priv(dev
);
2317 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2318 data
[0] = axienet_stat(lp
, STAT_RX_BYTES
);
2319 data
[1] = axienet_stat(lp
, STAT_TX_BYTES
);
2320 data
[2] = axienet_stat(lp
, STAT_RX_VLAN_FRAMES
);
2321 data
[3] = axienet_stat(lp
, STAT_TX_VLAN_FRAMES
);
2322 data
[6] = axienet_stat(lp
, STAT_TX_PFC_FRAMES
);
2323 data
[7] = axienet_stat(lp
, STAT_RX_PFC_FRAMES
);
2324 data
[8] = axienet_stat(lp
, STAT_USER_DEFINED0
);
2325 data
[9] = axienet_stat(lp
, STAT_USER_DEFINED1
);
2326 data
[10] = axienet_stat(lp
, STAT_USER_DEFINED2
);
2327 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2330 static const char axienet_ethtool_stats_strings
[][ETH_GSTRING_LEN
] = {
2332 "Transmitted bytes",
2333 "RX Good VLAN Tagged Frames",
2334 "TX Good VLAN Tagged Frames",
2335 "TX Good PFC Frames",
2336 "RX Good PFC Frames",
2337 "User Defined Counter 0",
2338 "User Defined Counter 1",
2339 "User Defined Counter 2",
2342 static void axienet_ethtools_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
2344 switch (stringset
) {
2346 memcpy(data
, axienet_ethtool_stats_strings
,
2347 sizeof(axienet_ethtool_stats_strings
));
2352 static int axienet_ethtools_get_sset_count(struct net_device
*dev
, int sset
)
2354 struct axienet_local
*lp
= netdev_priv(dev
);
2358 if (lp
->features
& XAE_FEATURE_STATS
)
2359 return ARRAY_SIZE(axienet_ethtool_stats_strings
);
2367 axienet_ethtools_get_pause_stats(struct net_device
*dev
,
2368 struct ethtool_pause_stats
*pause_stats
)
2370 struct axienet_local
*lp
= netdev_priv(dev
);
2373 if (!(lp
->features
& XAE_FEATURE_STATS
))
2377 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2378 pause_stats
->tx_pause_frames
=
2379 axienet_stat(lp
, STAT_TX_PAUSE_FRAMES
);
2380 pause_stats
->rx_pause_frames
=
2381 axienet_stat(lp
, STAT_RX_PAUSE_FRAMES
);
2382 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2386 axienet_ethtool_get_eth_mac_stats(struct net_device
*dev
,
2387 struct ethtool_eth_mac_stats
*mac_stats
)
2389 struct axienet_local
*lp
= netdev_priv(dev
);
2392 if (!(lp
->features
& XAE_FEATURE_STATS
))
2396 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2397 mac_stats
->FramesTransmittedOK
=
2398 axienet_stat(lp
, STAT_TX_GOOD_FRAMES
);
2399 mac_stats
->SingleCollisionFrames
=
2400 axienet_stat(lp
, STAT_TX_SINGLE_COLLISION_FRAMES
);
2401 mac_stats
->MultipleCollisionFrames
=
2402 axienet_stat(lp
, STAT_TX_MULTIPLE_COLLISION_FRAMES
);
2403 mac_stats
->FramesReceivedOK
=
2404 axienet_stat(lp
, STAT_RX_GOOD_FRAMES
);
2405 mac_stats
->FrameCheckSequenceErrors
=
2406 axienet_stat(lp
, STAT_RX_FCS_ERRORS
);
2407 mac_stats
->AlignmentErrors
=
2408 axienet_stat(lp
, STAT_RX_ALIGNMENT_ERRORS
);
2409 mac_stats
->FramesWithDeferredXmissions
=
2410 axienet_stat(lp
, STAT_TX_DEFERRED_FRAMES
);
2411 mac_stats
->LateCollisions
=
2412 axienet_stat(lp
, STAT_TX_LATE_COLLISIONS
);
2413 mac_stats
->FramesAbortedDueToXSColls
=
2414 axienet_stat(lp
, STAT_TX_EXCESS_COLLISIONS
);
2415 mac_stats
->MulticastFramesXmittedOK
=
2416 axienet_stat(lp
, STAT_TX_MULTICAST_FRAMES
);
2417 mac_stats
->BroadcastFramesXmittedOK
=
2418 axienet_stat(lp
, STAT_TX_BROADCAST_FRAMES
);
2419 mac_stats
->FramesWithExcessiveDeferral
=
2420 axienet_stat(lp
, STAT_TX_EXCESS_DEFERRAL
);
2421 mac_stats
->MulticastFramesReceivedOK
=
2422 axienet_stat(lp
, STAT_RX_MULTICAST_FRAMES
);
2423 mac_stats
->BroadcastFramesReceivedOK
=
2424 axienet_stat(lp
, STAT_RX_BROADCAST_FRAMES
);
2425 mac_stats
->InRangeLengthErrors
=
2426 axienet_stat(lp
, STAT_RX_LENGTH_ERRORS
);
2427 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2431 axienet_ethtool_get_eth_ctrl_stats(struct net_device
*dev
,
2432 struct ethtool_eth_ctrl_stats
*ctrl_stats
)
2434 struct axienet_local
*lp
= netdev_priv(dev
);
2437 if (!(lp
->features
& XAE_FEATURE_STATS
))
2441 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2442 ctrl_stats
->MACControlFramesTransmitted
=
2443 axienet_stat(lp
, STAT_TX_CONTROL_FRAMES
);
2444 ctrl_stats
->MACControlFramesReceived
=
2445 axienet_stat(lp
, STAT_RX_CONTROL_FRAMES
);
2446 ctrl_stats
->UnsupportedOpcodesReceived
=
2447 axienet_stat(lp
, STAT_RX_CONTROL_OPCODE_ERRORS
);
2448 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2451 static const struct ethtool_rmon_hist_range axienet_rmon_ranges
[] = {
2463 axienet_ethtool_get_rmon_stats(struct net_device
*dev
,
2464 struct ethtool_rmon_stats
*rmon_stats
,
2465 const struct ethtool_rmon_hist_range
**ranges
)
2467 struct axienet_local
*lp
= netdev_priv(dev
);
2470 if (!(lp
->features
& XAE_FEATURE_STATS
))
2474 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2475 rmon_stats
->undersize_pkts
=
2476 axienet_stat(lp
, STAT_UNDERSIZE_FRAMES
);
2477 rmon_stats
->oversize_pkts
=
2478 axienet_stat(lp
, STAT_RX_OVERSIZE_FRAMES
);
2479 rmon_stats
->fragments
=
2480 axienet_stat(lp
, STAT_FRAGMENT_FRAMES
);
2482 rmon_stats
->hist
[0] =
2483 axienet_stat(lp
, STAT_RX_64_BYTE_FRAMES
);
2484 rmon_stats
->hist
[1] =
2485 axienet_stat(lp
, STAT_RX_65_127_BYTE_FRAMES
);
2486 rmon_stats
->hist
[2] =
2487 axienet_stat(lp
, STAT_RX_128_255_BYTE_FRAMES
);
2488 rmon_stats
->hist
[3] =
2489 axienet_stat(lp
, STAT_RX_256_511_BYTE_FRAMES
);
2490 rmon_stats
->hist
[4] =
2491 axienet_stat(lp
, STAT_RX_512_1023_BYTE_FRAMES
);
2492 rmon_stats
->hist
[5] =
2493 axienet_stat(lp
, STAT_RX_1024_MAX_BYTE_FRAMES
);
2494 rmon_stats
->hist
[6] =
2495 rmon_stats
->oversize_pkts
;
2497 rmon_stats
->hist_tx
[0] =
2498 axienet_stat(lp
, STAT_TX_64_BYTE_FRAMES
);
2499 rmon_stats
->hist_tx
[1] =
2500 axienet_stat(lp
, STAT_TX_65_127_BYTE_FRAMES
);
2501 rmon_stats
->hist_tx
[2] =
2502 axienet_stat(lp
, STAT_TX_128_255_BYTE_FRAMES
);
2503 rmon_stats
->hist_tx
[3] =
2504 axienet_stat(lp
, STAT_TX_256_511_BYTE_FRAMES
);
2505 rmon_stats
->hist_tx
[4] =
2506 axienet_stat(lp
, STAT_TX_512_1023_BYTE_FRAMES
);
2507 rmon_stats
->hist_tx
[5] =
2508 axienet_stat(lp
, STAT_TX_1024_MAX_BYTE_FRAMES
);
2509 rmon_stats
->hist_tx
[6] =
2510 axienet_stat(lp
, STAT_TX_OVERSIZE_FRAMES
);
2511 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2513 *ranges
= axienet_rmon_ranges
;
2516 static const struct ethtool_ops axienet_ethtool_ops
= {
2517 .supported_coalesce_params
= ETHTOOL_COALESCE_MAX_FRAMES
|
2518 ETHTOOL_COALESCE_USECS
|
2519 ETHTOOL_COALESCE_USE_ADAPTIVE_RX
,
2520 .get_drvinfo
= axienet_ethtools_get_drvinfo
,
2521 .get_regs_len
= axienet_ethtools_get_regs_len
,
2522 .get_regs
= axienet_ethtools_get_regs
,
2523 .get_link
= ethtool_op_get_link
,
2524 .get_ringparam
= axienet_ethtools_get_ringparam
,
2525 .set_ringparam
= axienet_ethtools_set_ringparam
,
2526 .get_pauseparam
= axienet_ethtools_get_pauseparam
,
2527 .set_pauseparam
= axienet_ethtools_set_pauseparam
,
2528 .get_coalesce
= axienet_ethtools_get_coalesce
,
2529 .set_coalesce
= axienet_ethtools_set_coalesce
,
2530 .get_link_ksettings
= axienet_ethtools_get_link_ksettings
,
2531 .set_link_ksettings
= axienet_ethtools_set_link_ksettings
,
2532 .nway_reset
= axienet_ethtools_nway_reset
,
2533 .get_ethtool_stats
= axienet_ethtools_get_ethtool_stats
,
2534 .get_strings
= axienet_ethtools_get_strings
,
2535 .get_sset_count
= axienet_ethtools_get_sset_count
,
2536 .get_pause_stats
= axienet_ethtools_get_pause_stats
,
2537 .get_eth_mac_stats
= axienet_ethtool_get_eth_mac_stats
,
2538 .get_eth_ctrl_stats
= axienet_ethtool_get_eth_ctrl_stats
,
2539 .get_rmon_stats
= axienet_ethtool_get_rmon_stats
,
2542 static struct axienet_local
*pcs_to_axienet_local(struct phylink_pcs
*pcs
)
2544 return container_of(pcs
, struct axienet_local
, pcs
);
2547 static void axienet_pcs_get_state(struct phylink_pcs
*pcs
,
2548 unsigned int neg_mode
,
2549 struct phylink_link_state
*state
)
2551 struct mdio_device
*pcs_phy
= pcs_to_axienet_local(pcs
)->pcs_phy
;
2553 phylink_mii_c22_pcs_get_state(pcs_phy
, neg_mode
, state
);
2556 static void axienet_pcs_an_restart(struct phylink_pcs
*pcs
)
2558 struct mdio_device
*pcs_phy
= pcs_to_axienet_local(pcs
)->pcs_phy
;
2560 phylink_mii_c22_pcs_an_restart(pcs_phy
);
2563 static int axienet_pcs_config(struct phylink_pcs
*pcs
, unsigned int neg_mode
,
2564 phy_interface_t interface
,
2565 const unsigned long *advertising
,
2566 bool permit_pause_to_mac
)
2568 struct mdio_device
*pcs_phy
= pcs_to_axienet_local(pcs
)->pcs_phy
;
2569 struct net_device
*ndev
= pcs_to_axienet_local(pcs
)->ndev
;
2570 struct axienet_local
*lp
= netdev_priv(ndev
);
2573 if (lp
->switch_x_sgmii
) {
2574 ret
= mdiodev_write(pcs_phy
, XLNX_MII_STD_SELECT_REG
,
2575 interface
== PHY_INTERFACE_MODE_SGMII
?
2576 XLNX_MII_STD_SELECT_SGMII
: 0);
2579 "Failed to switch PHY interface: %d\n",
2585 ret
= phylink_mii_c22_pcs_config(pcs_phy
, interface
, advertising
,
2588 netdev_warn(ndev
, "Failed to configure PCS: %d\n", ret
);
2593 static const struct phylink_pcs_ops axienet_pcs_ops
= {
2594 .pcs_get_state
= axienet_pcs_get_state
,
2595 .pcs_config
= axienet_pcs_config
,
2596 .pcs_an_restart
= axienet_pcs_an_restart
,
2599 static struct phylink_pcs
*axienet_mac_select_pcs(struct phylink_config
*config
,
2600 phy_interface_t interface
)
2602 struct net_device
*ndev
= to_net_dev(config
->dev
);
2603 struct axienet_local
*lp
= netdev_priv(ndev
);
2605 if (interface
== PHY_INTERFACE_MODE_1000BASEX
||
2606 interface
== PHY_INTERFACE_MODE_SGMII
)
2612 static void axienet_mac_config(struct phylink_config
*config
, unsigned int mode
,
2613 const struct phylink_link_state
*state
)
2615 /* nothing meaningful to do */
2618 static void axienet_mac_link_down(struct phylink_config
*config
,
2620 phy_interface_t interface
)
2622 /* nothing meaningful to do */
2625 static void axienet_mac_link_up(struct phylink_config
*config
,
2626 struct phy_device
*phy
,
2627 unsigned int mode
, phy_interface_t interface
,
2628 int speed
, int duplex
,
2629 bool tx_pause
, bool rx_pause
)
2631 struct net_device
*ndev
= to_net_dev(config
->dev
);
2632 struct axienet_local
*lp
= netdev_priv(ndev
);
2633 u32 emmc_reg
, fcc_reg
;
2635 emmc_reg
= axienet_ior(lp
, XAE_EMMC_OFFSET
);
2636 emmc_reg
&= ~XAE_EMMC_LINKSPEED_MASK
;
2640 emmc_reg
|= XAE_EMMC_LINKSPD_1000
;
2643 emmc_reg
|= XAE_EMMC_LINKSPD_100
;
2646 emmc_reg
|= XAE_EMMC_LINKSPD_10
;
2650 "Speed other than 10, 100 or 1Gbps is not supported\n");
2654 axienet_iow(lp
, XAE_EMMC_OFFSET
, emmc_reg
);
2656 fcc_reg
= axienet_ior(lp
, XAE_FCC_OFFSET
);
2658 fcc_reg
|= XAE_FCC_FCTX_MASK
;
2660 fcc_reg
&= ~XAE_FCC_FCTX_MASK
;
2662 fcc_reg
|= XAE_FCC_FCRX_MASK
;
2664 fcc_reg
&= ~XAE_FCC_FCRX_MASK
;
2665 axienet_iow(lp
, XAE_FCC_OFFSET
, fcc_reg
);
2668 static const struct phylink_mac_ops axienet_phylink_ops
= {
2669 .mac_select_pcs
= axienet_mac_select_pcs
,
2670 .mac_config
= axienet_mac_config
,
2671 .mac_link_down
= axienet_mac_link_down
,
2672 .mac_link_up
= axienet_mac_link_up
,
2676 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2677 * @work: pointer to work_struct
2679 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2682 static void axienet_dma_err_handler(struct work_struct
*work
)
2686 struct axidma_bd
*cur_p
;
2687 struct axienet_local
*lp
= container_of(work
, struct axienet_local
,
2689 struct net_device
*ndev
= lp
->ndev
;
2691 /* Don't bother if we are going to stop anyway */
2692 if (READ_ONCE(lp
->stopping
))
2695 napi_disable(&lp
->napi_tx
);
2696 napi_disable(&lp
->napi_rx
);
2698 axienet_setoptions(ndev
, lp
->options
&
2699 ~(XAE_OPTION_TXEN
| XAE_OPTION_RXEN
));
2701 axienet_dma_stop(lp
);
2702 netdev_reset_queue(ndev
);
2704 for (i
= 0; i
< lp
->tx_bd_num
; i
++) {
2705 cur_p
= &lp
->tx_bd_v
[i
];
2707 dma_addr_t addr
= desc_get_phys_addr(lp
, cur_p
);
2709 dma_unmap_single(lp
->dev
, addr
,
2711 XAXIDMA_BD_CTRL_LENGTH_MASK
),
2715 dev_kfree_skb_irq(cur_p
->skb
);
2717 cur_p
->phys_msb
= 0;
2728 for (i
= 0; i
< lp
->rx_bd_num
; i
++) {
2729 cur_p
= &lp
->rx_bd_v
[i
];
2742 axienet_dma_start(lp
);
2744 axienet_status
= axienet_ior(lp
, XAE_RCW1_OFFSET
);
2745 axienet_status
&= ~XAE_RCW1_RX_MASK
;
2746 axienet_iow(lp
, XAE_RCW1_OFFSET
, axienet_status
);
2748 axienet_status
= axienet_ior(lp
, XAE_IP_OFFSET
);
2749 if (axienet_status
& XAE_INT_RXRJECT_MASK
)
2750 axienet_iow(lp
, XAE_IS_OFFSET
, XAE_INT_RXRJECT_MASK
);
2751 axienet_iow(lp
, XAE_IE_OFFSET
, lp
->eth_irq
> 0 ?
2752 XAE_INT_RECV_ERROR_MASK
: 0);
2753 axienet_iow(lp
, XAE_FCC_OFFSET
, XAE_FCC_FCRX_MASK
);
2755 /* Sync default options with HW but leave receiver and
2756 * transmitter disabled.
2758 axienet_setoptions(ndev
, lp
->options
&
2759 ~(XAE_OPTION_TXEN
| XAE_OPTION_RXEN
));
2760 axienet_set_mac_address(ndev
, NULL
);
2761 axienet_set_multicast_list(ndev
);
2762 napi_enable(&lp
->napi_rx
);
2763 napi_enable(&lp
->napi_tx
);
2764 axienet_setoptions(ndev
, lp
->options
);
2768 * axienet_probe - Axi Ethernet probe function.
2769 * @pdev: Pointer to platform device structure.
2771 * Return: 0, on success
2772 * Non-zero error value on failure.
2774 * This is the probe routine for Axi Ethernet driver. This is called before
2775 * any other driver routines are invoked. It allocates and sets up the Ethernet
2776 * device. Parses through device tree and populates fields of
2777 * axienet_local. It registers the Ethernet device.
2779 static int axienet_probe(struct platform_device
*pdev
)
2782 struct device_node
*np
;
2783 struct axienet_local
*lp
;
2784 struct net_device
*ndev
;
2785 struct resource
*ethres
;
2786 u8 mac_addr
[ETH_ALEN
];
2787 int addr_width
= 32;
2790 ndev
= alloc_etherdev(sizeof(*lp
));
2794 platform_set_drvdata(pdev
, ndev
);
2796 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2797 ndev
->features
= NETIF_F_SG
;
2798 ndev
->ethtool_ops
= &axienet_ethtool_ops
;
2800 /* MTU range: 64 - 9000 */
2802 ndev
->max_mtu
= XAE_JUMBO_MTU
;
2804 lp
= netdev_priv(ndev
);
2806 lp
->dev
= &pdev
->dev
;
2807 lp
->options
= XAE_OPTION_DEFAULTS
;
2808 lp
->rx_bd_num
= RX_BD_NUM_DEFAULT
;
2809 lp
->tx_bd_num
= TX_BD_NUM_DEFAULT
;
2811 u64_stats_init(&lp
->rx_stat_sync
);
2812 u64_stats_init(&lp
->tx_stat_sync
);
2814 mutex_init(&lp
->stats_lock
);
2815 seqcount_mutex_init(&lp
->hw_stats_seqcount
, &lp
->stats_lock
);
2816 INIT_DEFERRABLE_WORK(&lp
->stats_work
, axienet_refresh_stats
);
2818 lp
->axi_clk
= devm_clk_get_optional(&pdev
->dev
, "s_axi_lite_clk");
2820 /* For backward compatibility, if named AXI clock is not present,
2821 * treat the first clock specified as the AXI clock.
2823 lp
->axi_clk
= devm_clk_get_optional(&pdev
->dev
, NULL
);
2825 if (IS_ERR(lp
->axi_clk
)) {
2826 ret
= PTR_ERR(lp
->axi_clk
);
2829 ret
= clk_prepare_enable(lp
->axi_clk
);
2831 dev_err(&pdev
->dev
, "Unable to enable AXI clock: %d\n", ret
);
2835 lp
->misc_clks
[0].id
= "axis_clk";
2836 lp
->misc_clks
[1].id
= "ref_clk";
2837 lp
->misc_clks
[2].id
= "mgt_clk";
2839 ret
= devm_clk_bulk_get_optional(&pdev
->dev
, XAE_NUM_MISC_CLOCKS
, lp
->misc_clks
);
2843 ret
= clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS
, lp
->misc_clks
);
2847 /* Map device registers */
2848 lp
->regs
= devm_platform_get_and_ioremap_resource(pdev
, 0, ðres
);
2849 if (IS_ERR(lp
->regs
)) {
2850 ret
= PTR_ERR(lp
->regs
);
2853 lp
->regs_start
= ethres
->start
;
2855 /* Setup checksum offload, but default to off if not specified */
2858 if (axienet_ior(lp
, XAE_ABILITY_OFFSET
) & XAE_ABILITY_STATS
)
2859 lp
->features
|= XAE_FEATURE_STATS
;
2861 ret
= of_property_read_u32(pdev
->dev
.of_node
, "xlnx,txcsum", &value
);
2865 lp
->features
|= XAE_FEATURE_PARTIAL_TX_CSUM
;
2866 /* Can checksum any contiguous range */
2867 ndev
->features
|= NETIF_F_HW_CSUM
;
2870 lp
->features
|= XAE_FEATURE_FULL_TX_CSUM
;
2871 /* Can checksum TCP/UDP over IPv4. */
2872 ndev
->features
|= NETIF_F_IP_CSUM
;
2876 ret
= of_property_read_u32(pdev
->dev
.of_node
, "xlnx,rxcsum", &value
);
2880 lp
->features
|= XAE_FEATURE_PARTIAL_RX_CSUM
;
2881 ndev
->features
|= NETIF_F_RXCSUM
;
2884 lp
->features
|= XAE_FEATURE_FULL_RX_CSUM
;
2885 ndev
->features
|= NETIF_F_RXCSUM
;
2889 /* For supporting jumbo frames, the Axi Ethernet hardware must have
2890 * a larger Rx/Tx Memory. Typically, the size must be large so that
2891 * we can enable jumbo option and start supporting jumbo frames.
2892 * Here we check for memory allocated for Rx/Tx in the hardware from
2893 * the device-tree and accordingly set flags.
2895 of_property_read_u32(pdev
->dev
.of_node
, "xlnx,rxmem", &lp
->rxmem
);
2897 lp
->switch_x_sgmii
= of_property_read_bool(pdev
->dev
.of_node
,
2898 "xlnx,switch-x-sgmii");
2900 /* Start with the proprietary, and broken phy_type */
2901 ret
= of_property_read_u32(pdev
->dev
.of_node
, "xlnx,phy-type", &value
);
2903 netdev_warn(ndev
, "Please upgrade your device tree binary blob to use phy-mode");
2905 case XAE_PHY_TYPE_MII
:
2906 lp
->phy_mode
= PHY_INTERFACE_MODE_MII
;
2908 case XAE_PHY_TYPE_GMII
:
2909 lp
->phy_mode
= PHY_INTERFACE_MODE_GMII
;
2911 case XAE_PHY_TYPE_RGMII_2_0
:
2912 lp
->phy_mode
= PHY_INTERFACE_MODE_RGMII_ID
;
2914 case XAE_PHY_TYPE_SGMII
:
2915 lp
->phy_mode
= PHY_INTERFACE_MODE_SGMII
;
2917 case XAE_PHY_TYPE_1000BASE_X
:
2918 lp
->phy_mode
= PHY_INTERFACE_MODE_1000BASEX
;
2925 ret
= of_get_phy_mode(pdev
->dev
.of_node
, &lp
->phy_mode
);
2929 if (lp
->switch_x_sgmii
&& lp
->phy_mode
!= PHY_INTERFACE_MODE_SGMII
&&
2930 lp
->phy_mode
!= PHY_INTERFACE_MODE_1000BASEX
) {
2931 dev_err(&pdev
->dev
, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2936 if (!of_property_present(pdev
->dev
.of_node
, "dmas")) {
2937 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2938 np
= of_parse_phandle(pdev
->dev
.of_node
, "axistream-connected", 0);
2941 struct resource dmares
;
2943 ret
= of_address_to_resource(np
, 0, &dmares
);
2946 "unable to get DMA resource\n");
2950 lp
->dma_regs
= devm_ioremap_resource(&pdev
->dev
,
2952 lp
->rx_irq
= irq_of_parse_and_map(np
, 1);
2953 lp
->tx_irq
= irq_of_parse_and_map(np
, 0);
2955 lp
->eth_irq
= platform_get_irq_optional(pdev
, 0);
2957 /* Check for these resources directly on the Ethernet node. */
2958 lp
->dma_regs
= devm_platform_get_and_ioremap_resource(pdev
, 1, NULL
);
2959 lp
->rx_irq
= platform_get_irq(pdev
, 1);
2960 lp
->tx_irq
= platform_get_irq(pdev
, 0);
2961 lp
->eth_irq
= platform_get_irq_optional(pdev
, 2);
2963 if (IS_ERR(lp
->dma_regs
)) {
2964 dev_err(&pdev
->dev
, "could not map DMA regs\n");
2965 ret
= PTR_ERR(lp
->dma_regs
);
2968 if (lp
->rx_irq
<= 0 || lp
->tx_irq
<= 0) {
2969 dev_err(&pdev
->dev
, "could not determine irqs\n");
2974 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2975 ret
= __axienet_device_reset(lp
);
2979 /* Autodetect the need for 64-bit DMA pointers.
2980 * When the IP is configured for a bus width bigger than 32 bits,
2981 * writing the MSB registers is mandatory, even if they are all 0.
2982 * We can detect this case by writing all 1's to one such register
2983 * and see if that sticks: when the IP is configured for 32 bits
2984 * only, those registers are RES0.
2985 * Those MSB registers were introduced in IP v7.1, which we check first.
2987 if ((axienet_ior(lp
, XAE_ID_OFFSET
) >> 24) >= 0x9) {
2988 void __iomem
*desc
= lp
->dma_regs
+ XAXIDMA_TX_CDESC_OFFSET
+ 4;
2990 iowrite32(0x0, desc
);
2991 if (ioread32(desc
) == 0) { /* sanity check */
2992 iowrite32(0xffffffff, desc
);
2993 if (ioread32(desc
) > 0) {
2994 lp
->features
|= XAE_FEATURE_DMA_64BIT
;
2996 dev_info(&pdev
->dev
,
2997 "autodetected 64-bit DMA range\n");
2999 iowrite32(0x0, desc
);
3002 if (!IS_ENABLED(CONFIG_64BIT
) && lp
->features
& XAE_FEATURE_DMA_64BIT
) {
3003 dev_err(&pdev
->dev
, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
3008 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(addr_width
));
3010 dev_err(&pdev
->dev
, "No suitable DMA available\n");
3013 netif_napi_add(ndev
, &lp
->napi_rx
, axienet_rx_poll
);
3014 netif_napi_add(ndev
, &lp
->napi_tx
, axienet_tx_poll
);
3016 struct xilinx_vdma_config cfg
;
3017 struct dma_chan
*tx_chan
;
3019 lp
->eth_irq
= platform_get_irq_optional(pdev
, 0);
3020 if (lp
->eth_irq
< 0 && lp
->eth_irq
!= -ENXIO
) {
3024 tx_chan
= dma_request_chan(lp
->dev
, "tx_chan0");
3025 if (IS_ERR(tx_chan
)) {
3026 ret
= PTR_ERR(tx_chan
);
3027 dev_err_probe(lp
->dev
, ret
, "No Ethernet DMA (TX) channel found\n");
3032 /* As name says VDMA but it has support for DMA channel reset */
3033 ret
= xilinx_vdma_channel_set_config(tx_chan
, &cfg
);
3035 dev_err(&pdev
->dev
, "Reset channel failed\n");
3036 dma_release_channel(tx_chan
);
3040 dma_release_channel(tx_chan
);
3041 lp
->use_dmaengine
= 1;
3044 if (lp
->use_dmaengine
)
3045 ndev
->netdev_ops
= &axienet_netdev_dmaengine_ops
;
3047 ndev
->netdev_ops
= &axienet_netdev_ops
;
3048 /* Check for Ethernet core IRQ (optional) */
3049 if (lp
->eth_irq
<= 0)
3050 dev_info(&pdev
->dev
, "Ethernet core IRQ not defined\n");
3052 /* Retrieve the MAC address */
3053 ret
= of_get_mac_address(pdev
->dev
.of_node
, mac_addr
);
3055 axienet_set_mac_address(ndev
, mac_addr
);
3057 dev_warn(&pdev
->dev
, "could not find MAC address property: %d\n",
3059 axienet_set_mac_address(ndev
, NULL
);
3062 spin_lock_init(&lp
->rx_cr_lock
);
3063 spin_lock_init(&lp
->tx_cr_lock
);
3064 INIT_WORK(&lp
->rx_dim
.work
, axienet_rx_dim_work
);
3065 lp
->rx_dim_enabled
= true;
3066 lp
->rx_dim
.profile_ix
= 1;
3067 lp
->rx_dma_cr
= axienet_calc_cr(lp
, axienet_dim_coalesce_count_rx(lp
),
3068 XAXIDMA_DFT_RX_USEC
);
3069 lp
->tx_dma_cr
= axienet_calc_cr(lp
, XAXIDMA_DFT_TX_THRESHOLD
,
3070 XAXIDMA_DFT_TX_USEC
);
3072 ret
= axienet_mdio_setup(lp
);
3074 dev_warn(&pdev
->dev
,
3075 "error registering MDIO bus: %d\n", ret
);
3077 if (lp
->phy_mode
== PHY_INTERFACE_MODE_SGMII
||
3078 lp
->phy_mode
== PHY_INTERFACE_MODE_1000BASEX
) {
3079 np
= of_parse_phandle(pdev
->dev
.of_node
, "pcs-handle", 0);
3081 /* Deprecated: Always use "pcs-handle" for pcs_phy.
3082 * Falling back to "phy-handle" here is only for
3083 * backward compatibility with old device trees.
3085 np
= of_parse_phandle(pdev
->dev
.of_node
, "phy-handle", 0);
3088 dev_err(&pdev
->dev
, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
3092 lp
->pcs_phy
= of_mdio_find_device(np
);
3094 ret
= -EPROBE_DEFER
;
3099 lp
->pcs
.ops
= &axienet_pcs_ops
;
3100 lp
->pcs
.poll
= true;
3103 lp
->phylink_config
.dev
= &ndev
->dev
;
3104 lp
->phylink_config
.type
= PHYLINK_NETDEV
;
3105 lp
->phylink_config
.mac_managed_pm
= true;
3106 lp
->phylink_config
.mac_capabilities
= MAC_SYM_PAUSE
| MAC_ASYM_PAUSE
|
3107 MAC_10FD
| MAC_100FD
| MAC_1000FD
;
3109 __set_bit(lp
->phy_mode
, lp
->phylink_config
.supported_interfaces
);
3110 if (lp
->switch_x_sgmii
) {
3111 __set_bit(PHY_INTERFACE_MODE_1000BASEX
,
3112 lp
->phylink_config
.supported_interfaces
);
3113 __set_bit(PHY_INTERFACE_MODE_SGMII
,
3114 lp
->phylink_config
.supported_interfaces
);
3117 lp
->phylink
= phylink_create(&lp
->phylink_config
, pdev
->dev
.fwnode
,
3119 &axienet_phylink_ops
);
3120 if (IS_ERR(lp
->phylink
)) {
3121 ret
= PTR_ERR(lp
->phylink
);
3122 dev_err(&pdev
->dev
, "phylink_create error (%i)\n", ret
);
3126 ret
= register_netdev(lp
->ndev
);
3128 dev_err(lp
->dev
, "register_netdev() error (%i)\n", ret
);
3129 goto cleanup_phylink
;
3135 phylink_destroy(lp
->phylink
);
3139 put_device(&lp
->pcs_phy
->dev
);
3141 axienet_mdio_teardown(lp
);
3143 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS
, lp
->misc_clks
);
3144 clk_disable_unprepare(lp
->axi_clk
);
3152 static void axienet_remove(struct platform_device
*pdev
)
3154 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3155 struct axienet_local
*lp
= netdev_priv(ndev
);
3157 unregister_netdev(ndev
);
3160 phylink_destroy(lp
->phylink
);
3163 put_device(&lp
->pcs_phy
->dev
);
3165 axienet_mdio_teardown(lp
);
3167 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS
, lp
->misc_clks
);
3168 clk_disable_unprepare(lp
->axi_clk
);
3173 static void axienet_shutdown(struct platform_device
*pdev
)
3175 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3178 netif_device_detach(ndev
);
3180 if (netif_running(ndev
))
3186 static int axienet_suspend(struct device
*dev
)
3188 struct net_device
*ndev
= dev_get_drvdata(dev
);
3190 if (!netif_running(ndev
))
3193 netif_device_detach(ndev
);
3202 static int axienet_resume(struct device
*dev
)
3204 struct net_device
*ndev
= dev_get_drvdata(dev
);
3206 if (!netif_running(ndev
))
3213 netif_device_attach(ndev
);
3218 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops
,
3219 axienet_suspend
, axienet_resume
);
3221 static struct platform_driver axienet_driver
= {
3222 .probe
= axienet_probe
,
3223 .remove
= axienet_remove
,
3224 .shutdown
= axienet_shutdown
,
3226 .name
= "xilinx_axienet",
3227 .pm
= &axienet_pm_ops
,
3228 .of_match_table
= axienet_of_match
,
3232 module_platform_driver(axienet_driver
);
3234 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3235 MODULE_AUTHOR("Xilinx");
3236 MODULE_LICENSE("GPL");