1 // SPDX-License-Identifier: GPL-2.0-only
3 * Xilinx Axi Ethernet device driver
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
47 #include "xilinx_axienet.h"
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT 128
51 #define RX_BD_NUM_DEFAULT 1024
52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX 4096
54 #define RX_BD_NUM_MAX 4096
55 #define DMA_NUM_APP_WORDS 5
57 #define RX_BUF_NUM_DEFAULT 128
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME "xaxienet"
61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION "1.00a"
64 #define AXIENET_REGS_N 40
66 static void axienet_rx_submit_desc(struct net_device
*ndev
);
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match
[] = {
70 { .compatible
= "xlnx,axi-ethernet-1.00.a", },
71 { .compatible
= "xlnx,axi-ethernet-1.01.a", },
72 { .compatible
= "xlnx,axi-ethernet-2.01.a", },
76 MODULE_DEVICE_TABLE(of
, axienet_of_match
);
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options
[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
82 .opt
= XAE_OPTION_JUMBO
,
84 .m_or
= XAE_TC_JUM_MASK
,
86 .opt
= XAE_OPTION_JUMBO
,
87 .reg
= XAE_RCW1_OFFSET
,
88 .m_or
= XAE_RCW1_JUM_MASK
,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt
= XAE_OPTION_VLAN
,
92 .m_or
= XAE_TC_VLAN_MASK
,
94 .opt
= XAE_OPTION_VLAN
,
95 .reg
= XAE_RCW1_OFFSET
,
96 .m_or
= XAE_RCW1_VLAN_MASK
,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt
= XAE_OPTION_FCS_STRIP
,
99 .reg
= XAE_RCW1_OFFSET
,
100 .m_or
= XAE_RCW1_FCS_MASK
,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt
= XAE_OPTION_FCS_INSERT
,
103 .reg
= XAE_TC_OFFSET
,
104 .m_or
= XAE_TC_FCS_MASK
,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt
= XAE_OPTION_LENTYPE_ERR
,
107 .reg
= XAE_RCW1_OFFSET
,
108 .m_or
= XAE_RCW1_LT_DIS_MASK
,
109 }, { /* Turn on Rx flow control */
110 .opt
= XAE_OPTION_FLOW_CONTROL
,
111 .reg
= XAE_FCC_OFFSET
,
112 .m_or
= XAE_FCC_FCRX_MASK
,
113 }, { /* Turn on Tx flow control */
114 .opt
= XAE_OPTION_FLOW_CONTROL
,
115 .reg
= XAE_FCC_OFFSET
,
116 .m_or
= XAE_FCC_FCTX_MASK
,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt
= XAE_OPTION_PROMISC
,
119 .reg
= XAE_FMI_OFFSET
,
120 .m_or
= XAE_FMI_PM_MASK
,
121 }, { /* Enable transmitter */
122 .opt
= XAE_OPTION_TXEN
,
123 .reg
= XAE_TC_OFFSET
,
124 .m_or
= XAE_TC_TX_MASK
,
125 }, { /* Enable receiver */
126 .opt
= XAE_OPTION_RXEN
,
127 .reg
= XAE_RCW1_OFFSET
,
128 .m_or
= XAE_RCW1_RX_MASK
,
133 static struct skbuf_dma_descriptor
*axienet_get_rx_desc(struct axienet_local
*lp
, int i
)
135 return lp
->rx_skb_ring
[i
& (RX_BUF_NUM_DEFAULT
- 1)];
138 static struct skbuf_dma_descriptor
*axienet_get_tx_desc(struct axienet_local
*lp
, int i
)
140 return lp
->tx_skb_ring
[i
& (TX_BD_NUM_MAX
- 1)];
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
148 * Return: The contents of the Axi DMA register
150 * This function returns the contents of the corresponding Axi DMA register.
152 static inline u32
axienet_dma_in32(struct axienet_local
*lp
, off_t reg
)
154 return ioread32(lp
->dma_regs
+ reg
);
157 static void desc_set_phys_addr(struct axienet_local
*lp
, dma_addr_t addr
,
158 struct axidma_bd
*desc
)
160 desc
->phys
= lower_32_bits(addr
);
161 if (lp
->features
& XAE_FEATURE_DMA_64BIT
)
162 desc
->phys_msb
= upper_32_bits(addr
);
165 static dma_addr_t
desc_get_phys_addr(struct axienet_local
*lp
,
166 struct axidma_bd
*desc
)
168 dma_addr_t ret
= desc
->phys
;
170 if (lp
->features
& XAE_FEATURE_DMA_64BIT
)
171 ret
|= ((dma_addr_t
)desc
->phys_msb
<< 16) << 16;
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
184 static void axienet_dma_bd_release(struct net_device
*ndev
)
187 struct axienet_local
*lp
= netdev_priv(ndev
);
189 /* If we end up here, tx_bd_v must have been DMA allocated. */
190 dma_free_coherent(lp
->dev
,
191 sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_num
,
198 for (i
= 0; i
< lp
->rx_bd_num
; i
++) {
201 /* A NULL skb means this descriptor has not been initialised
204 if (!lp
->rx_bd_v
[i
].skb
)
207 dev_kfree_skb(lp
->rx_bd_v
[i
].skb
);
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
213 if (lp
->rx_bd_v
[i
].cntrl
) {
214 phys
= desc_get_phys_addr(lp
, &lp
->rx_bd_v
[i
]);
215 dma_unmap_single(lp
->dev
, phys
,
216 lp
->max_frm_size
, DMA_FROM_DEVICE
);
220 dma_free_coherent(lp
->dev
,
221 sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_num
,
226 static u64
axienet_dma_rate(struct axienet_local
*lp
)
229 return clk_get_rate(lp
->axi_clk
);
230 return 125000000; /* arbitrary guess if no clock rate set */
234 * axienet_calc_cr() - Calculate control register value
235 * @lp: Device private data
236 * @count: Number of completions before an interrupt
237 * @usec: Microseconds after the last completion before an interrupt
239 * Calculate a control register value based on the coalescing settings. The
240 * run/stop bit is not set.
242 static u32
axienet_calc_cr(struct axienet_local
*lp
, u32 count
, u32 usec
)
246 cr
= FIELD_PREP(XAXIDMA_COALESCE_MASK
, count
) | XAXIDMA_IRQ_IOC_MASK
|
247 XAXIDMA_IRQ_ERROR_MASK
;
248 /* Only set interrupt delay timer if not generating an interrupt on
249 * the first packet. Otherwise leave at 0 to disable delay interrupt.
252 u64 clk_rate
= axienet_dma_rate(lp
);
255 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
256 timer
= DIV64_U64_ROUND_CLOSEST((u64
)usec
* clk_rate
,
257 XAXIDMA_DELAY_SCALE
);
259 timer
= min(timer
, FIELD_MAX(XAXIDMA_DELAY_MASK
));
260 cr
|= FIELD_PREP(XAXIDMA_DELAY_MASK
, timer
) |
261 XAXIDMA_IRQ_DELAY_MASK
;
268 * axienet_coalesce_params() - Extract coalesce parameters from the CR
269 * @lp: Device private data
270 * @cr: The control register to parse
271 * @count: Number of packets before an interrupt
272 * @usec: Idle time (in usec) before an interrupt
274 static void axienet_coalesce_params(struct axienet_local
*lp
, u32 cr
,
275 u32
*count
, u32
*usec
)
277 u64 clk_rate
= axienet_dma_rate(lp
);
278 u64 timer
= FIELD_GET(XAXIDMA_DELAY_MASK
, cr
);
280 *count
= FIELD_GET(XAXIDMA_COALESCE_MASK
, cr
);
281 *usec
= DIV64_U64_ROUND_CLOSEST(timer
* XAXIDMA_DELAY_SCALE
, clk_rate
);
285 * axienet_dma_start - Set up DMA registers and start DMA operation
286 * @lp: Pointer to the axienet_local structure
288 static void axienet_dma_start(struct axienet_local
*lp
)
290 spin_lock_irq(&lp
->rx_cr_lock
);
292 /* Start updating the Rx channel control register */
293 lp
->rx_dma_cr
&= ~XAXIDMA_CR_RUNSTOP_MASK
;
294 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, lp
->rx_dma_cr
);
296 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
297 * halted state. This will make the Rx side ready for reception.
299 axienet_dma_out_addr(lp
, XAXIDMA_RX_CDESC_OFFSET
, lp
->rx_bd_p
);
300 lp
->rx_dma_cr
|= XAXIDMA_CR_RUNSTOP_MASK
;
301 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, lp
->rx_dma_cr
);
302 axienet_dma_out_addr(lp
, XAXIDMA_RX_TDESC_OFFSET
, lp
->rx_bd_p
+
303 (sizeof(*lp
->rx_bd_v
) * (lp
->rx_bd_num
- 1)));
304 lp
->rx_dma_started
= true;
306 spin_unlock_irq(&lp
->rx_cr_lock
);
307 spin_lock_irq(&lp
->tx_cr_lock
);
309 /* Start updating the Tx channel control register */
310 lp
->tx_dma_cr
&= ~XAXIDMA_CR_RUNSTOP_MASK
;
311 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, lp
->tx_dma_cr
);
313 /* Write to the RS (Run-stop) bit in the Tx channel control register.
314 * Tx channel is now ready to run. But only after we write to the
315 * tail pointer register that the Tx channel will start transmitting.
317 axienet_dma_out_addr(lp
, XAXIDMA_TX_CDESC_OFFSET
, lp
->tx_bd_p
);
318 lp
->tx_dma_cr
|= XAXIDMA_CR_RUNSTOP_MASK
;
319 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, lp
->tx_dma_cr
);
320 lp
->tx_dma_started
= true;
322 spin_unlock_irq(&lp
->tx_cr_lock
);
326 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
327 * @ndev: Pointer to the net_device structure
329 * Return: 0, on success -ENOMEM, on failure
331 * This function is called to initialize the Rx and Tx DMA descriptor
332 * rings. This initializes the descriptors with required default values
333 * and is called when Axi Ethernet driver reset is called.
335 static int axienet_dma_bd_init(struct net_device
*ndev
)
339 struct axienet_local
*lp
= netdev_priv(ndev
);
341 /* Reset the indexes which are used for accessing the BDs */
346 /* Allocate the Tx and Rx buffer descriptors. */
347 lp
->tx_bd_v
= dma_alloc_coherent(lp
->dev
,
348 sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_num
,
349 &lp
->tx_bd_p
, GFP_KERNEL
);
353 lp
->rx_bd_v
= dma_alloc_coherent(lp
->dev
,
354 sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_num
,
355 &lp
->rx_bd_p
, GFP_KERNEL
);
359 for (i
= 0; i
< lp
->tx_bd_num
; i
++) {
360 dma_addr_t addr
= lp
->tx_bd_p
+
361 sizeof(*lp
->tx_bd_v
) *
362 ((i
+ 1) % lp
->tx_bd_num
);
364 lp
->tx_bd_v
[i
].next
= lower_32_bits(addr
);
365 if (lp
->features
& XAE_FEATURE_DMA_64BIT
)
366 lp
->tx_bd_v
[i
].next_msb
= upper_32_bits(addr
);
369 for (i
= 0; i
< lp
->rx_bd_num
; i
++) {
372 addr
= lp
->rx_bd_p
+ sizeof(*lp
->rx_bd_v
) *
373 ((i
+ 1) % lp
->rx_bd_num
);
374 lp
->rx_bd_v
[i
].next
= lower_32_bits(addr
);
375 if (lp
->features
& XAE_FEATURE_DMA_64BIT
)
376 lp
->rx_bd_v
[i
].next_msb
= upper_32_bits(addr
);
378 skb
= netdev_alloc_skb_ip_align(ndev
, lp
->max_frm_size
);
382 lp
->rx_bd_v
[i
].skb
= skb
;
383 addr
= dma_map_single(lp
->dev
, skb
->data
,
384 lp
->max_frm_size
, DMA_FROM_DEVICE
);
385 if (dma_mapping_error(lp
->dev
, addr
)) {
386 netdev_err(ndev
, "DMA mapping error\n");
389 desc_set_phys_addr(lp
, addr
, &lp
->rx_bd_v
[i
]);
391 lp
->rx_bd_v
[i
].cntrl
= lp
->max_frm_size
;
394 axienet_dma_start(lp
);
398 axienet_dma_bd_release(ndev
);
403 * axienet_set_mac_address - Write the MAC address
404 * @ndev: Pointer to the net_device structure
405 * @address: 6 byte Address to be written as MAC address
407 * This function is called to initialize the MAC address of the Axi Ethernet
408 * core. It writes to the UAW0 and UAW1 registers of the core.
410 static void axienet_set_mac_address(struct net_device
*ndev
,
413 struct axienet_local
*lp
= netdev_priv(ndev
);
416 eth_hw_addr_set(ndev
, address
);
417 if (!is_valid_ether_addr(ndev
->dev_addr
))
418 eth_hw_addr_random(ndev
);
420 /* Set up unicast MAC address filter set its mac address */
421 axienet_iow(lp
, XAE_UAW0_OFFSET
,
422 (ndev
->dev_addr
[0]) |
423 (ndev
->dev_addr
[1] << 8) |
424 (ndev
->dev_addr
[2] << 16) |
425 (ndev
->dev_addr
[3] << 24));
426 axienet_iow(lp
, XAE_UAW1_OFFSET
,
427 (((axienet_ior(lp
, XAE_UAW1_OFFSET
)) &
428 ~XAE_UAW1_UNICASTADDR_MASK
) |
430 (ndev
->dev_addr
[5] << 8))));
434 * netdev_set_mac_address - Write the MAC address (from outside the driver)
435 * @ndev: Pointer to the net_device structure
436 * @p: 6 byte Address to be written as MAC address
438 * Return: 0 for all conditions. Presently, there is no failure case.
440 * This function is called to initialize the MAC address of the Axi Ethernet
441 * core. It calls the core specific axienet_set_mac_address. This is the
442 * function that goes into net_device_ops structure entry ndo_set_mac_address.
444 static int netdev_set_mac_address(struct net_device
*ndev
, void *p
)
446 struct sockaddr
*addr
= p
;
448 axienet_set_mac_address(ndev
, addr
->sa_data
);
453 * axienet_set_multicast_list - Prepare the multicast table
454 * @ndev: Pointer to the net_device structure
456 * This function is called to initialize the multicast table during
457 * initialization. The Axi Ethernet basic multicast support has a four-entry
458 * multicast table which is initialized here. Additionally this function
459 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
460 * means whenever the multicast table entries need to be updated this
461 * function gets called.
463 static void axienet_set_multicast_list(struct net_device
*ndev
)
466 u32 reg
, af0reg
, af1reg
;
467 struct axienet_local
*lp
= netdev_priv(ndev
);
469 reg
= axienet_ior(lp
, XAE_FMI_OFFSET
);
470 reg
&= ~XAE_FMI_PM_MASK
;
471 if (ndev
->flags
& IFF_PROMISC
)
472 reg
|= XAE_FMI_PM_MASK
;
474 reg
&= ~XAE_FMI_PM_MASK
;
475 axienet_iow(lp
, XAE_FMI_OFFSET
, reg
);
477 if (ndev
->flags
& IFF_ALLMULTI
||
478 netdev_mc_count(ndev
) > XAE_MULTICAST_CAM_TABLE_NUM
) {
480 axienet_iow(lp
, XAE_FMI_OFFSET
, reg
);
481 axienet_iow(lp
, XAE_AF0_OFFSET
, 1); /* Multicast bit */
482 axienet_iow(lp
, XAE_AF1_OFFSET
, 0);
483 axienet_iow(lp
, XAE_AM0_OFFSET
, 1); /* ditto */
484 axienet_iow(lp
, XAE_AM1_OFFSET
, 0);
485 axienet_iow(lp
, XAE_FFE_OFFSET
, 1);
487 } else if (!netdev_mc_empty(ndev
)) {
488 struct netdev_hw_addr
*ha
;
490 netdev_for_each_mc_addr(ha
, ndev
) {
491 if (i
>= XAE_MULTICAST_CAM_TABLE_NUM
)
494 af0reg
= (ha
->addr
[0]);
495 af0reg
|= (ha
->addr
[1] << 8);
496 af0reg
|= (ha
->addr
[2] << 16);
497 af0reg
|= (ha
->addr
[3] << 24);
499 af1reg
= (ha
->addr
[4]);
500 af1reg
|= (ha
->addr
[5] << 8);
505 axienet_iow(lp
, XAE_FMI_OFFSET
, reg
);
506 axienet_iow(lp
, XAE_AF0_OFFSET
, af0reg
);
507 axienet_iow(lp
, XAE_AF1_OFFSET
, af1reg
);
508 axienet_iow(lp
, XAE_AM0_OFFSET
, 0xffffffff);
509 axienet_iow(lp
, XAE_AM1_OFFSET
, 0x0000ffff);
510 axienet_iow(lp
, XAE_FFE_OFFSET
, 1);
515 for (; i
< XAE_MULTICAST_CAM_TABLE_NUM
; i
++) {
518 axienet_iow(lp
, XAE_FMI_OFFSET
, reg
);
519 axienet_iow(lp
, XAE_FFE_OFFSET
, 0);
524 * axienet_setoptions - Set an Axi Ethernet option
525 * @ndev: Pointer to the net_device structure
526 * @options: Option to be enabled/disabled
528 * The Axi Ethernet core has multiple features which can be selectively turned
529 * on or off. The typical options could be jumbo frame option, basic VLAN
530 * option, promiscuous mode option etc. This function is used to set or clear
531 * these options in the Axi Ethernet hardware. This is done through
532 * axienet_option structure .
534 static void axienet_setoptions(struct net_device
*ndev
, u32 options
)
537 struct axienet_local
*lp
= netdev_priv(ndev
);
538 struct axienet_option
*tp
= &axienet_options
[0];
541 reg
= ((axienet_ior(lp
, tp
->reg
)) & ~(tp
->m_or
));
542 if (options
& tp
->opt
)
544 axienet_iow(lp
, tp
->reg
, reg
);
548 lp
->options
|= options
;
551 static u64
axienet_stat(struct axienet_local
*lp
, enum temac_stat stat
)
555 if (lp
->reset_in_progress
)
556 return lp
->hw_stat_base
[stat
];
558 counter
= axienet_ior(lp
, XAE_STATS_OFFSET
+ stat
* 8);
559 return lp
->hw_stat_base
[stat
] + (counter
- lp
->hw_last_counter
[stat
]);
562 static void axienet_stats_update(struct axienet_local
*lp
, bool reset
)
564 enum temac_stat stat
;
566 write_seqcount_begin(&lp
->hw_stats_seqcount
);
567 lp
->reset_in_progress
= reset
;
568 for (stat
= 0; stat
< STAT_COUNT
; stat
++) {
569 u32 counter
= axienet_ior(lp
, XAE_STATS_OFFSET
+ stat
* 8);
571 lp
->hw_stat_base
[stat
] += counter
- lp
->hw_last_counter
[stat
];
572 lp
->hw_last_counter
[stat
] = counter
;
574 write_seqcount_end(&lp
->hw_stats_seqcount
);
577 static void axienet_refresh_stats(struct work_struct
*work
)
579 struct axienet_local
*lp
= container_of(work
, struct axienet_local
,
582 mutex_lock(&lp
->stats_lock
);
583 axienet_stats_update(lp
, false);
584 mutex_unlock(&lp
->stats_lock
);
586 /* Just less than 2^32 bytes at 2.5 GBit/s */
587 schedule_delayed_work(&lp
->stats_work
, 13 * HZ
);
590 static int __axienet_device_reset(struct axienet_local
*lp
)
595 /* Save statistics counters in case they will be reset */
596 mutex_lock(&lp
->stats_lock
);
597 if (lp
->features
& XAE_FEATURE_STATS
)
598 axienet_stats_update(lp
, true);
600 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
601 * process of Axi DMA takes a while to complete as all pending
602 * commands/transfers will be flushed or completed during this
604 * Note that even though both TX and RX have their own reset register,
605 * they both reset the entire DMA core, so only one needs to be used.
607 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, XAXIDMA_CR_RESET_MASK
);
608 ret
= read_poll_timeout(axienet_dma_in32
, value
,
609 !(value
& XAXIDMA_CR_RESET_MASK
),
610 DELAY_OF_ONE_MILLISEC
, 50000, false, lp
,
611 XAXIDMA_TX_CR_OFFSET
);
613 dev_err(lp
->dev
, "%s: DMA reset timeout!\n", __func__
);
617 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
618 ret
= read_poll_timeout(axienet_ior
, value
,
619 value
& XAE_INT_PHYRSTCMPLT_MASK
,
620 DELAY_OF_ONE_MILLISEC
, 50000, false, lp
,
623 dev_err(lp
->dev
, "%s: timeout waiting for PhyRstCmplt\n", __func__
);
627 /* Update statistics counters with new values */
628 if (lp
->features
& XAE_FEATURE_STATS
) {
629 enum temac_stat stat
;
631 write_seqcount_begin(&lp
->hw_stats_seqcount
);
632 lp
->reset_in_progress
= false;
633 for (stat
= 0; stat
< STAT_COUNT
; stat
++) {
635 axienet_ior(lp
, XAE_STATS_OFFSET
+ stat
* 8);
637 lp
->hw_stat_base
[stat
] +=
638 lp
->hw_last_counter
[stat
] - counter
;
639 lp
->hw_last_counter
[stat
] = counter
;
641 write_seqcount_end(&lp
->hw_stats_seqcount
);
645 mutex_unlock(&lp
->stats_lock
);
650 * axienet_dma_stop - Stop DMA operation
651 * @lp: Pointer to the axienet_local structure
653 static void axienet_dma_stop(struct axienet_local
*lp
)
658 spin_lock_irq(&lp
->rx_cr_lock
);
660 cr
= lp
->rx_dma_cr
& ~(XAXIDMA_CR_RUNSTOP_MASK
| XAXIDMA_IRQ_ALL_MASK
);
661 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, cr
);
662 lp
->rx_dma_started
= false;
664 spin_unlock_irq(&lp
->rx_cr_lock
);
665 synchronize_irq(lp
->rx_irq
);
667 spin_lock_irq(&lp
->tx_cr_lock
);
669 cr
= lp
->tx_dma_cr
& ~(XAXIDMA_CR_RUNSTOP_MASK
| XAXIDMA_IRQ_ALL_MASK
);
670 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, cr
);
671 lp
->tx_dma_started
= false;
673 spin_unlock_irq(&lp
->tx_cr_lock
);
674 synchronize_irq(lp
->tx_irq
);
676 /* Give DMAs a chance to halt gracefully */
677 sr
= axienet_dma_in32(lp
, XAXIDMA_RX_SR_OFFSET
);
678 for (count
= 0; !(sr
& XAXIDMA_SR_HALT_MASK
) && count
< 5; ++count
) {
680 sr
= axienet_dma_in32(lp
, XAXIDMA_RX_SR_OFFSET
);
683 sr
= axienet_dma_in32(lp
, XAXIDMA_TX_SR_OFFSET
);
684 for (count
= 0; !(sr
& XAXIDMA_SR_HALT_MASK
) && count
< 5; ++count
) {
686 sr
= axienet_dma_in32(lp
, XAXIDMA_TX_SR_OFFSET
);
689 /* Do a reset to ensure DMA is really stopped */
690 axienet_lock_mii(lp
);
691 __axienet_device_reset(lp
);
692 axienet_unlock_mii(lp
);
696 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
697 * @ndev: Pointer to the net_device structure
699 * This function is called to reset and initialize the Axi Ethernet core. This
700 * is typically called during initialization. It does a reset of the Axi DMA
701 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
702 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
703 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
705 * Returns 0 on success or a negative error number otherwise.
707 static int axienet_device_reset(struct net_device
*ndev
)
710 struct axienet_local
*lp
= netdev_priv(ndev
);
713 lp
->max_frm_size
= XAE_MAX_VLAN_FRAME_SIZE
;
714 lp
->options
|= XAE_OPTION_VLAN
;
715 lp
->options
&= (~XAE_OPTION_JUMBO
);
717 if (ndev
->mtu
> XAE_MTU
&& ndev
->mtu
<= XAE_JUMBO_MTU
) {
718 lp
->max_frm_size
= ndev
->mtu
+ VLAN_ETH_HLEN
+
721 if (lp
->max_frm_size
<= lp
->rxmem
)
722 lp
->options
|= XAE_OPTION_JUMBO
;
725 if (!lp
->use_dmaengine
) {
726 ret
= __axienet_device_reset(lp
);
730 ret
= axienet_dma_bd_init(ndev
);
732 netdev_err(ndev
, "%s: descriptor allocation failed\n",
738 axienet_status
= axienet_ior(lp
, XAE_RCW1_OFFSET
);
739 axienet_status
&= ~XAE_RCW1_RX_MASK
;
740 axienet_iow(lp
, XAE_RCW1_OFFSET
, axienet_status
);
742 axienet_status
= axienet_ior(lp
, XAE_IP_OFFSET
);
743 if (axienet_status
& XAE_INT_RXRJECT_MASK
)
744 axienet_iow(lp
, XAE_IS_OFFSET
, XAE_INT_RXRJECT_MASK
);
745 axienet_iow(lp
, XAE_IE_OFFSET
, lp
->eth_irq
> 0 ?
746 XAE_INT_RECV_ERROR_MASK
: 0);
748 axienet_iow(lp
, XAE_FCC_OFFSET
, XAE_FCC_FCRX_MASK
);
750 /* Sync default options with HW but leave receiver and
751 * transmitter disabled.
753 axienet_setoptions(ndev
, lp
->options
&
754 ~(XAE_OPTION_TXEN
| XAE_OPTION_RXEN
));
755 axienet_set_mac_address(ndev
, NULL
);
756 axienet_set_multicast_list(ndev
);
757 axienet_setoptions(ndev
, lp
->options
);
759 netif_trans_update(ndev
);
765 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
766 * @lp: Pointer to the axienet_local structure
767 * @first_bd: Index of first descriptor to clean up
768 * @nr_bds: Max number of descriptors to clean up
769 * @force: Whether to clean descriptors even if not complete
770 * @sizep: Pointer to a u32 filled with the total sum of all bytes
771 * in all cleaned-up descriptors. Ignored if NULL.
772 * @budget: NAPI budget (use 0 when not called from NAPI poll)
774 * Would either be called after a successful transmit operation, or after
775 * there was an error when setting up the chain.
776 * Returns the number of packets handled.
778 static int axienet_free_tx_chain(struct axienet_local
*lp
, u32 first_bd
,
779 int nr_bds
, bool force
, u32
*sizep
, int budget
)
781 struct axidma_bd
*cur_p
;
786 for (i
= 0; i
< nr_bds
; i
++) {
787 cur_p
= &lp
->tx_bd_v
[(first_bd
+ i
) % lp
->tx_bd_num
];
788 status
= cur_p
->status
;
790 /* If force is not specified, clean up only descriptors
791 * that have been completed by the MAC.
793 if (!force
&& !(status
& XAXIDMA_BD_STS_COMPLETE_MASK
))
796 /* Ensure we see complete descriptor update */
798 phys
= desc_get_phys_addr(lp
, cur_p
);
799 dma_unmap_single(lp
->dev
, phys
,
800 (cur_p
->cntrl
& XAXIDMA_BD_CTRL_LENGTH_MASK
),
803 if (cur_p
->skb
&& (status
& XAXIDMA_BD_STS_COMPLETE_MASK
)) {
804 napi_consume_skb(cur_p
->skb
, budget
);
813 /* ensure our transmit path and device don't prematurely see status cleared */
819 *sizep
+= status
& XAXIDMA_BD_STS_ACTUAL_LEN_MASK
;
824 if (lp
->tx_bd_ci
>= lp
->tx_bd_num
)
825 lp
->tx_bd_ci
%= lp
->tx_bd_num
;
832 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
833 * @lp: Pointer to the axienet_local structure
834 * @num_frag: The number of BDs to check for
836 * Return: 0, on success
837 * NETDEV_TX_BUSY, if any of the descriptors are not free
839 * This function is invoked before BDs are allocated and transmission starts.
840 * This function returns 0 if a BD or group of BDs can be allocated for
841 * transmission. If the BD or any of the BDs are not free the function
842 * returns a busy status.
844 static inline int axienet_check_tx_bd_space(struct axienet_local
*lp
,
847 struct axidma_bd
*cur_p
;
849 /* Ensure we see all descriptor updates from device or TX polling */
851 cur_p
= &lp
->tx_bd_v
[(READ_ONCE(lp
->tx_bd_tail
) + num_frag
) %
854 return NETDEV_TX_BUSY
;
859 * axienet_dma_tx_cb - DMA engine callback for TX channel.
860 * @data: Pointer to the axienet_local structure.
861 * @result: error reporting through dmaengine_result.
862 * This function is called by dmaengine driver for TX channel to notify
863 * that the transmit is done.
865 static void axienet_dma_tx_cb(void *data
, const struct dmaengine_result
*result
)
867 struct skbuf_dma_descriptor
*skbuf_dma
;
868 struct axienet_local
*lp
= data
;
869 struct netdev_queue
*txq
;
872 skbuf_dma
= axienet_get_tx_desc(lp
, lp
->tx_ring_tail
++);
873 len
= skbuf_dma
->skb
->len
;
874 txq
= skb_get_tx_queue(lp
->ndev
, skbuf_dma
->skb
);
875 u64_stats_update_begin(&lp
->tx_stat_sync
);
876 u64_stats_add(&lp
->tx_bytes
, len
);
877 u64_stats_add(&lp
->tx_packets
, 1);
878 u64_stats_update_end(&lp
->tx_stat_sync
);
879 dma_unmap_sg(lp
->dev
, skbuf_dma
->sgl
, skbuf_dma
->sg_len
, DMA_TO_DEVICE
);
880 dev_consume_skb_any(skbuf_dma
->skb
);
881 netif_txq_completed_wake(txq
, 1, len
,
882 CIRC_SPACE(lp
->tx_ring_head
, lp
->tx_ring_tail
, TX_BD_NUM_MAX
),
887 * axienet_start_xmit_dmaengine - Starts the transmission.
888 * @skb: sk_buff pointer that contains data to be Txed.
889 * @ndev: Pointer to net_device structure.
891 * Return: NETDEV_TX_OK on success or any non space errors.
892 * NETDEV_TX_BUSY when free element in TX skb ring buffer
895 * This function is invoked to initiate transmission. The
896 * function sets the skbs, register dma callback API and submit
897 * the dma transaction.
898 * Additionally if checksum offloading is supported,
899 * it populates AXI Stream Control fields with appropriate values.
902 axienet_start_xmit_dmaengine(struct sk_buff
*skb
, struct net_device
*ndev
)
904 struct dma_async_tx_descriptor
*dma_tx_desc
= NULL
;
905 struct axienet_local
*lp
= netdev_priv(ndev
);
906 u32 app_metadata
[DMA_NUM_APP_WORDS
] = {0};
907 struct skbuf_dma_descriptor
*skbuf_dma
;
908 struct dma_device
*dma_dev
;
909 struct netdev_queue
*txq
;
915 dma_dev
= lp
->tx_chan
->device
;
916 sg_len
= skb_shinfo(skb
)->nr_frags
+ 1;
917 if (CIRC_SPACE(lp
->tx_ring_head
, lp
->tx_ring_tail
, TX_BD_NUM_MAX
) <= 1) {
918 netif_stop_queue(ndev
);
920 netdev_warn(ndev
, "TX ring unexpectedly full\n");
921 return NETDEV_TX_BUSY
;
924 skbuf_dma
= axienet_get_tx_desc(lp
, lp
->tx_ring_head
);
926 goto xmit_error_drop_skb
;
929 sg_init_table(skbuf_dma
->sgl
, sg_len
);
930 ret
= skb_to_sgvec(skb
, skbuf_dma
->sgl
, 0, skb
->len
);
932 goto xmit_error_drop_skb
;
934 ret
= dma_map_sg(lp
->dev
, skbuf_dma
->sgl
, sg_len
, DMA_TO_DEVICE
);
936 goto xmit_error_drop_skb
;
938 /* Fill up app fields for checksum */
939 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
940 if (lp
->features
& XAE_FEATURE_FULL_TX_CSUM
) {
941 /* Tx Full Checksum Offload Enabled */
942 app_metadata
[0] |= 2;
943 } else if (lp
->features
& XAE_FEATURE_PARTIAL_TX_CSUM
) {
944 csum_start_off
= skb_transport_offset(skb
);
945 csum_index_off
= csum_start_off
+ skb
->csum_offset
;
946 /* Tx Partial Checksum Offload Enabled */
947 app_metadata
[0] |= 1;
948 app_metadata
[1] = (csum_start_off
<< 16) | csum_index_off
;
950 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
951 app_metadata
[0] |= 2; /* Tx Full Checksum Offload Enabled */
954 dma_tx_desc
= dma_dev
->device_prep_slave_sg(lp
->tx_chan
, skbuf_dma
->sgl
,
955 sg_len
, DMA_MEM_TO_DEV
,
956 DMA_PREP_INTERRUPT
, (void *)app_metadata
);
958 goto xmit_error_unmap_sg
;
960 skbuf_dma
->skb
= skb
;
961 skbuf_dma
->sg_len
= sg_len
;
962 dma_tx_desc
->callback_param
= lp
;
963 dma_tx_desc
->callback_result
= axienet_dma_tx_cb
;
964 txq
= skb_get_tx_queue(lp
->ndev
, skb
);
965 netdev_tx_sent_queue(txq
, skb
->len
);
966 netif_txq_maybe_stop(txq
, CIRC_SPACE(lp
->tx_ring_head
, lp
->tx_ring_tail
, TX_BD_NUM_MAX
),
969 dmaengine_submit(dma_tx_desc
);
970 dma_async_issue_pending(lp
->tx_chan
);
974 dma_unmap_sg(lp
->dev
, skbuf_dma
->sgl
, sg_len
, DMA_TO_DEVICE
);
976 dev_kfree_skb_any(skb
);
981 * axienet_tx_poll - Invoked once a transmit is completed by the
982 * Axi DMA Tx channel.
983 * @napi: Pointer to NAPI structure.
984 * @budget: Max number of TX packets to process.
986 * Return: Number of TX packets processed.
988 * This function is invoked from the NAPI processing to notify the completion
989 * of transmit operation. It clears fields in the corresponding Tx BDs and
990 * unmaps the corresponding buffer so that CPU can regain ownership of the
991 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
994 static int axienet_tx_poll(struct napi_struct
*napi
, int budget
)
996 struct axienet_local
*lp
= container_of(napi
, struct axienet_local
, napi_tx
);
997 struct net_device
*ndev
= lp
->ndev
;
1001 packets
= axienet_free_tx_chain(lp
, lp
->tx_bd_ci
, lp
->tx_bd_num
, false,
1005 netdev_completed_queue(ndev
, packets
, size
);
1006 u64_stats_update_begin(&lp
->tx_stat_sync
);
1007 u64_stats_add(&lp
->tx_packets
, packets
);
1008 u64_stats_add(&lp
->tx_bytes
, size
);
1009 u64_stats_update_end(&lp
->tx_stat_sync
);
1011 /* Matches barrier in axienet_start_xmit */
1014 if (!axienet_check_tx_bd_space(lp
, MAX_SKB_FRAGS
+ 1))
1015 netif_wake_queue(ndev
);
1018 if (packets
< budget
&& napi_complete_done(napi
, packets
)) {
1019 /* Re-enable TX completion interrupts. This should
1020 * cause an immediate interrupt if any TX packets are
1023 spin_lock_irq(&lp
->tx_cr_lock
);
1024 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, lp
->tx_dma_cr
);
1025 spin_unlock_irq(&lp
->tx_cr_lock
);
1031 * axienet_start_xmit - Starts the transmission.
1032 * @skb: sk_buff pointer that contains data to be Txed.
1033 * @ndev: Pointer to net_device structure.
1035 * Return: NETDEV_TX_OK, on success
1036 * NETDEV_TX_BUSY, if any of the descriptors are not free
1038 * This function is invoked from upper layers to initiate transmission. The
1039 * function uses the next available free BDs and populates their fields to
1040 * start the transmission. Additionally if checksum offloading is supported,
1041 * it populates AXI Stream Control fields with appropriate values.
1044 axienet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1051 dma_addr_t tail_p
, phys
;
1052 u32 orig_tail_ptr
, new_tail_ptr
;
1053 struct axienet_local
*lp
= netdev_priv(ndev
);
1054 struct axidma_bd
*cur_p
;
1056 orig_tail_ptr
= lp
->tx_bd_tail
;
1057 new_tail_ptr
= orig_tail_ptr
;
1059 num_frag
= skb_shinfo(skb
)->nr_frags
;
1060 cur_p
= &lp
->tx_bd_v
[orig_tail_ptr
];
1062 if (axienet_check_tx_bd_space(lp
, num_frag
+ 1)) {
1063 /* Should not happen as last start_xmit call should have
1064 * checked for sufficient space and queue should only be
1065 * woken when sufficient space is available.
1067 netif_stop_queue(ndev
);
1068 if (net_ratelimit())
1069 netdev_warn(ndev
, "TX ring unexpectedly full\n");
1070 return NETDEV_TX_BUSY
;
1073 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1074 if (lp
->features
& XAE_FEATURE_FULL_TX_CSUM
) {
1075 /* Tx Full Checksum Offload Enabled */
1077 } else if (lp
->features
& XAE_FEATURE_PARTIAL_TX_CSUM
) {
1078 csum_start_off
= skb_transport_offset(skb
);
1079 csum_index_off
= csum_start_off
+ skb
->csum_offset
;
1080 /* Tx Partial Checksum Offload Enabled */
1082 cur_p
->app1
= (csum_start_off
<< 16) | csum_index_off
;
1084 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1085 cur_p
->app0
|= 2; /* Tx Full Checksum Offload Enabled */
1088 phys
= dma_map_single(lp
->dev
, skb
->data
,
1089 skb_headlen(skb
), DMA_TO_DEVICE
);
1090 if (unlikely(dma_mapping_error(lp
->dev
, phys
))) {
1091 if (net_ratelimit())
1092 netdev_err(ndev
, "TX DMA mapping error\n");
1093 ndev
->stats
.tx_dropped
++;
1094 dev_kfree_skb_any(skb
);
1095 return NETDEV_TX_OK
;
1097 desc_set_phys_addr(lp
, phys
, cur_p
);
1098 cur_p
->cntrl
= skb_headlen(skb
) | XAXIDMA_BD_CTRL_TXSOF_MASK
;
1100 for (ii
= 0; ii
< num_frag
; ii
++) {
1101 if (++new_tail_ptr
>= lp
->tx_bd_num
)
1103 cur_p
= &lp
->tx_bd_v
[new_tail_ptr
];
1104 frag
= &skb_shinfo(skb
)->frags
[ii
];
1105 phys
= dma_map_single(lp
->dev
,
1106 skb_frag_address(frag
),
1107 skb_frag_size(frag
),
1109 if (unlikely(dma_mapping_error(lp
->dev
, phys
))) {
1110 if (net_ratelimit())
1111 netdev_err(ndev
, "TX DMA mapping error\n");
1112 ndev
->stats
.tx_dropped
++;
1113 axienet_free_tx_chain(lp
, orig_tail_ptr
, ii
+ 1,
1115 dev_kfree_skb_any(skb
);
1116 return NETDEV_TX_OK
;
1118 desc_set_phys_addr(lp
, phys
, cur_p
);
1119 cur_p
->cntrl
= skb_frag_size(frag
);
1122 cur_p
->cntrl
|= XAXIDMA_BD_CTRL_TXEOF_MASK
;
1125 tail_p
= lp
->tx_bd_p
+ sizeof(*lp
->tx_bd_v
) * new_tail_ptr
;
1126 if (++new_tail_ptr
>= lp
->tx_bd_num
)
1128 WRITE_ONCE(lp
->tx_bd_tail
, new_tail_ptr
);
1129 netdev_sent_queue(ndev
, skb
->len
);
1131 /* Start the transfer */
1132 axienet_dma_out_addr(lp
, XAXIDMA_TX_TDESC_OFFSET
, tail_p
);
1134 /* Stop queue if next transmit may not have space */
1135 if (axienet_check_tx_bd_space(lp
, MAX_SKB_FRAGS
+ 1)) {
1136 netif_stop_queue(ndev
);
1138 /* Matches barrier in axienet_tx_poll */
1141 /* Space might have just been freed - check again */
1142 if (!axienet_check_tx_bd_space(lp
, MAX_SKB_FRAGS
+ 1))
1143 netif_wake_queue(ndev
);
1146 return NETDEV_TX_OK
;
1150 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1151 * @data: Pointer to the skbuf_dma_descriptor structure.
1152 * @result: error reporting through dmaengine_result.
1153 * This function is called by dmaengine driver for RX channel to notify
1154 * that the packet is received.
1156 static void axienet_dma_rx_cb(void *data
, const struct dmaengine_result
*result
)
1158 struct skbuf_dma_descriptor
*skbuf_dma
;
1159 size_t meta_len
, meta_max_len
, rx_len
;
1160 struct axienet_local
*lp
= data
;
1161 struct sk_buff
*skb
;
1164 skbuf_dma
= axienet_get_rx_desc(lp
, lp
->rx_ring_tail
++);
1165 skb
= skbuf_dma
->skb
;
1166 app_metadata
= dmaengine_desc_get_metadata_ptr(skbuf_dma
->desc
, &meta_len
,
1168 dma_unmap_single(lp
->dev
, skbuf_dma
->dma_address
, lp
->max_frm_size
,
1170 /* TODO: Derive app word index programmatically */
1171 rx_len
= (app_metadata
[LEN_APP
] & 0xFFFF);
1172 skb_put(skb
, rx_len
);
1173 skb
->protocol
= eth_type_trans(skb
, lp
->ndev
);
1174 skb
->ip_summed
= CHECKSUM_NONE
;
1177 u64_stats_update_begin(&lp
->rx_stat_sync
);
1178 u64_stats_add(&lp
->rx_packets
, 1);
1179 u64_stats_add(&lp
->rx_bytes
, rx_len
);
1180 u64_stats_update_end(&lp
->rx_stat_sync
);
1181 axienet_rx_submit_desc(lp
->ndev
);
1182 dma_async_issue_pending(lp
->rx_chan
);
1186 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1187 * @napi: Pointer to NAPI structure.
1188 * @budget: Max number of RX packets to process.
1190 * Return: Number of RX packets processed.
1192 static int axienet_rx_poll(struct napi_struct
*napi
, int budget
)
1198 dma_addr_t tail_p
= 0;
1199 struct axidma_bd
*cur_p
;
1200 struct sk_buff
*skb
, *new_skb
;
1201 struct axienet_local
*lp
= container_of(napi
, struct axienet_local
, napi_rx
);
1203 cur_p
= &lp
->rx_bd_v
[lp
->rx_bd_ci
];
1205 while (packets
< budget
&& (cur_p
->status
& XAXIDMA_BD_STS_COMPLETE_MASK
)) {
1208 /* Ensure we see complete descriptor update */
1214 /* skb could be NULL if a previous pass already received the
1215 * packet for this slot in the ring, but failed to refill it
1216 * with a newly allocated buffer. In this case, don't try to
1220 length
= cur_p
->app4
& 0x0000FFFF;
1222 phys
= desc_get_phys_addr(lp
, cur_p
);
1223 dma_unmap_single(lp
->dev
, phys
, lp
->max_frm_size
,
1226 skb_put(skb
, length
);
1227 skb
->protocol
= eth_type_trans(skb
, lp
->ndev
);
1228 /*skb_checksum_none_assert(skb);*/
1229 skb
->ip_summed
= CHECKSUM_NONE
;
1231 /* if we're doing Rx csum offload, set it up */
1232 if (lp
->features
& XAE_FEATURE_FULL_RX_CSUM
) {
1233 csumstatus
= (cur_p
->app2
&
1234 XAE_FULL_CSUM_STATUS_MASK
) >> 3;
1235 if (csumstatus
== XAE_IP_TCP_CSUM_VALIDATED
||
1236 csumstatus
== XAE_IP_UDP_CSUM_VALIDATED
) {
1237 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1239 } else if (lp
->features
& XAE_FEATURE_PARTIAL_RX_CSUM
) {
1240 skb
->csum
= be32_to_cpu(cur_p
->app3
& 0xFFFF);
1241 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1244 napi_gro_receive(napi
, skb
);
1250 new_skb
= napi_alloc_skb(napi
, lp
->max_frm_size
);
1254 phys
= dma_map_single(lp
->dev
, new_skb
->data
,
1257 if (unlikely(dma_mapping_error(lp
->dev
, phys
))) {
1258 if (net_ratelimit())
1259 netdev_err(lp
->ndev
, "RX DMA mapping error\n");
1260 dev_kfree_skb(new_skb
);
1263 desc_set_phys_addr(lp
, phys
, cur_p
);
1265 cur_p
->cntrl
= lp
->max_frm_size
;
1267 cur_p
->skb
= new_skb
;
1269 /* Only update tail_p to mark this slot as usable after it has
1270 * been successfully refilled.
1272 tail_p
= lp
->rx_bd_p
+ sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_ci
;
1274 if (++lp
->rx_bd_ci
>= lp
->rx_bd_num
)
1276 cur_p
= &lp
->rx_bd_v
[lp
->rx_bd_ci
];
1279 u64_stats_update_begin(&lp
->rx_stat_sync
);
1280 u64_stats_add(&lp
->rx_packets
, packets
);
1281 u64_stats_add(&lp
->rx_bytes
, size
);
1282 u64_stats_update_end(&lp
->rx_stat_sync
);
1285 axienet_dma_out_addr(lp
, XAXIDMA_RX_TDESC_OFFSET
, tail_p
);
1287 if (packets
< budget
&& napi_complete_done(napi
, packets
)) {
1288 if (READ_ONCE(lp
->rx_dim_enabled
)) {
1289 struct dim_sample sample
= {
1290 .time
= ktime_get(),
1291 /* Safe because we are the only writer */
1292 .pkt_ctr
= u64_stats_read(&lp
->rx_packets
),
1293 .byte_ctr
= u64_stats_read(&lp
->rx_bytes
),
1294 .event_ctr
= READ_ONCE(lp
->rx_irqs
),
1297 net_dim(&lp
->rx_dim
, &sample
);
1300 /* Re-enable RX completion interrupts. This should
1301 * cause an immediate interrupt if any RX packets are
1304 spin_lock_irq(&lp
->rx_cr_lock
);
1305 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, lp
->rx_dma_cr
);
1306 spin_unlock_irq(&lp
->rx_cr_lock
);
1312 * axienet_tx_irq - Tx Done Isr.
1314 * @_ndev: net_device pointer
1316 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1318 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1321 static irqreturn_t
axienet_tx_irq(int irq
, void *_ndev
)
1323 unsigned int status
;
1324 struct net_device
*ndev
= _ndev
;
1325 struct axienet_local
*lp
= netdev_priv(ndev
);
1327 status
= axienet_dma_in32(lp
, XAXIDMA_TX_SR_OFFSET
);
1329 if (!(status
& XAXIDMA_IRQ_ALL_MASK
))
1332 axienet_dma_out32(lp
, XAXIDMA_TX_SR_OFFSET
, status
);
1334 if (unlikely(status
& XAXIDMA_IRQ_ERROR_MASK
)) {
1335 netdev_err(ndev
, "DMA Tx error 0x%x\n", status
);
1336 netdev_err(ndev
, "Current BD is at: 0x%x%08x\n",
1337 (lp
->tx_bd_v
[lp
->tx_bd_ci
]).phys_msb
,
1338 (lp
->tx_bd_v
[lp
->tx_bd_ci
]).phys
);
1339 schedule_work(&lp
->dma_err_task
);
1341 /* Disable further TX completion interrupts and schedule
1342 * NAPI to handle the completions.
1344 if (napi_schedule_prep(&lp
->napi_tx
)) {
1347 spin_lock(&lp
->tx_cr_lock
);
1349 cr
&= ~(XAXIDMA_IRQ_IOC_MASK
| XAXIDMA_IRQ_DELAY_MASK
);
1350 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, cr
);
1351 spin_unlock(&lp
->tx_cr_lock
);
1352 __napi_schedule(&lp
->napi_tx
);
1360 * axienet_rx_irq - Rx Isr.
1362 * @_ndev: net_device pointer
1364 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1366 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1369 static irqreturn_t
axienet_rx_irq(int irq
, void *_ndev
)
1371 unsigned int status
;
1372 struct net_device
*ndev
= _ndev
;
1373 struct axienet_local
*lp
= netdev_priv(ndev
);
1375 status
= axienet_dma_in32(lp
, XAXIDMA_RX_SR_OFFSET
);
1377 if (!(status
& XAXIDMA_IRQ_ALL_MASK
))
1380 axienet_dma_out32(lp
, XAXIDMA_RX_SR_OFFSET
, status
);
1382 if (unlikely(status
& XAXIDMA_IRQ_ERROR_MASK
)) {
1383 netdev_err(ndev
, "DMA Rx error 0x%x\n", status
);
1384 netdev_err(ndev
, "Current BD is at: 0x%x%08x\n",
1385 (lp
->rx_bd_v
[lp
->rx_bd_ci
]).phys_msb
,
1386 (lp
->rx_bd_v
[lp
->rx_bd_ci
]).phys
);
1387 schedule_work(&lp
->dma_err_task
);
1389 /* Disable further RX completion interrupts and schedule
1392 WRITE_ONCE(lp
->rx_irqs
, READ_ONCE(lp
->rx_irqs
) + 1);
1393 if (napi_schedule_prep(&lp
->napi_rx
)) {
1396 spin_lock(&lp
->rx_cr_lock
);
1398 cr
&= ~(XAXIDMA_IRQ_IOC_MASK
| XAXIDMA_IRQ_DELAY_MASK
);
1399 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, cr
);
1400 spin_unlock(&lp
->rx_cr_lock
);
1402 __napi_schedule(&lp
->napi_rx
);
1410 * axienet_eth_irq - Ethernet core Isr.
1412 * @_ndev: net_device pointer
1414 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1416 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1418 static irqreturn_t
axienet_eth_irq(int irq
, void *_ndev
)
1420 struct net_device
*ndev
= _ndev
;
1421 struct axienet_local
*lp
= netdev_priv(ndev
);
1422 unsigned int pending
;
1424 pending
= axienet_ior(lp
, XAE_IP_OFFSET
);
1428 if (pending
& XAE_INT_RXFIFOOVR_MASK
)
1429 ndev
->stats
.rx_missed_errors
++;
1431 if (pending
& XAE_INT_RXRJECT_MASK
)
1432 ndev
->stats
.rx_dropped
++;
1434 axienet_iow(lp
, XAE_IS_OFFSET
, pending
);
1438 static void axienet_dma_err_handler(struct work_struct
*work
);
1441 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1442 * allocate skbuff, map the scatterlist and obtain a descriptor
1443 * and then add the callback information and submit descriptor.
1445 * @ndev: net_device pointer
1448 static void axienet_rx_submit_desc(struct net_device
*ndev
)
1450 struct dma_async_tx_descriptor
*dma_rx_desc
= NULL
;
1451 struct axienet_local
*lp
= netdev_priv(ndev
);
1452 struct skbuf_dma_descriptor
*skbuf_dma
;
1453 struct sk_buff
*skb
;
1456 skbuf_dma
= axienet_get_rx_desc(lp
, lp
->rx_ring_head
);
1461 skb
= netdev_alloc_skb(ndev
, lp
->max_frm_size
);
1465 sg_init_table(skbuf_dma
->sgl
, 1);
1466 addr
= dma_map_single(lp
->dev
, skb
->data
, lp
->max_frm_size
, DMA_FROM_DEVICE
);
1467 if (unlikely(dma_mapping_error(lp
->dev
, addr
))) {
1468 if (net_ratelimit())
1469 netdev_err(ndev
, "DMA mapping error\n");
1470 goto rx_submit_err_free_skb
;
1472 sg_dma_address(skbuf_dma
->sgl
) = addr
;
1473 sg_dma_len(skbuf_dma
->sgl
) = lp
->max_frm_size
;
1474 dma_rx_desc
= dmaengine_prep_slave_sg(lp
->rx_chan
, skbuf_dma
->sgl
,
1476 DMA_PREP_INTERRUPT
);
1478 goto rx_submit_err_unmap_skb
;
1480 skbuf_dma
->skb
= skb
;
1481 skbuf_dma
->dma_address
= sg_dma_address(skbuf_dma
->sgl
);
1482 skbuf_dma
->desc
= dma_rx_desc
;
1483 dma_rx_desc
->callback_param
= lp
;
1484 dma_rx_desc
->callback_result
= axienet_dma_rx_cb
;
1485 dmaengine_submit(dma_rx_desc
);
1489 rx_submit_err_unmap_skb
:
1490 dma_unmap_single(lp
->dev
, addr
, lp
->max_frm_size
, DMA_FROM_DEVICE
);
1491 rx_submit_err_free_skb
:
1496 * axienet_init_dmaengine - init the dmaengine code.
1497 * @ndev: Pointer to net_device structure
1499 * Return: 0, on success.
1500 * non-zero error value on failure
1502 * This is the dmaengine initialization code.
1504 static int axienet_init_dmaengine(struct net_device
*ndev
)
1506 struct axienet_local
*lp
= netdev_priv(ndev
);
1507 struct skbuf_dma_descriptor
*skbuf_dma
;
1510 lp
->tx_chan
= dma_request_chan(lp
->dev
, "tx_chan0");
1511 if (IS_ERR(lp
->tx_chan
)) {
1512 dev_err(lp
->dev
, "No Ethernet DMA (TX) channel found\n");
1513 return PTR_ERR(lp
->tx_chan
);
1516 lp
->rx_chan
= dma_request_chan(lp
->dev
, "rx_chan0");
1517 if (IS_ERR(lp
->rx_chan
)) {
1518 ret
= PTR_ERR(lp
->rx_chan
);
1519 dev_err(lp
->dev
, "No Ethernet DMA (RX) channel found\n");
1520 goto err_dma_release_tx
;
1523 lp
->tx_ring_tail
= 0;
1524 lp
->tx_ring_head
= 0;
1525 lp
->rx_ring_tail
= 0;
1526 lp
->rx_ring_head
= 0;
1527 lp
->tx_skb_ring
= kcalloc(TX_BD_NUM_MAX
, sizeof(*lp
->tx_skb_ring
),
1529 if (!lp
->tx_skb_ring
) {
1531 goto err_dma_release_rx
;
1533 for (i
= 0; i
< TX_BD_NUM_MAX
; i
++) {
1534 skbuf_dma
= kzalloc(sizeof(*skbuf_dma
), GFP_KERNEL
);
1537 goto err_free_tx_skb_ring
;
1539 lp
->tx_skb_ring
[i
] = skbuf_dma
;
1542 lp
->rx_skb_ring
= kcalloc(RX_BUF_NUM_DEFAULT
, sizeof(*lp
->rx_skb_ring
),
1544 if (!lp
->rx_skb_ring
) {
1546 goto err_free_tx_skb_ring
;
1548 for (i
= 0; i
< RX_BUF_NUM_DEFAULT
; i
++) {
1549 skbuf_dma
= kzalloc(sizeof(*skbuf_dma
), GFP_KERNEL
);
1552 goto err_free_rx_skb_ring
;
1554 lp
->rx_skb_ring
[i
] = skbuf_dma
;
1556 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1557 for (i
= 0; i
< RX_BUF_NUM_DEFAULT
; i
++)
1558 axienet_rx_submit_desc(ndev
);
1559 dma_async_issue_pending(lp
->rx_chan
);
1563 err_free_rx_skb_ring
:
1564 for (i
= 0; i
< RX_BUF_NUM_DEFAULT
; i
++)
1565 kfree(lp
->rx_skb_ring
[i
]);
1566 kfree(lp
->rx_skb_ring
);
1567 err_free_tx_skb_ring
:
1568 for (i
= 0; i
< TX_BD_NUM_MAX
; i
++)
1569 kfree(lp
->tx_skb_ring
[i
]);
1570 kfree(lp
->tx_skb_ring
);
1572 dma_release_channel(lp
->rx_chan
);
1574 dma_release_channel(lp
->tx_chan
);
1579 * axienet_init_legacy_dma - init the dma legacy code.
1580 * @ndev: Pointer to net_device structure
1582 * Return: 0, on success.
1583 * non-zero error value on failure
1585 * This is the dma initialization code. It also allocates interrupt
1586 * service routines, enables the interrupt lines and ISR handling.
1589 static int axienet_init_legacy_dma(struct net_device
*ndev
)
1592 struct axienet_local
*lp
= netdev_priv(ndev
);
1594 /* Enable worker thread for Axi DMA error handling */
1595 lp
->stopping
= false;
1596 INIT_WORK(&lp
->dma_err_task
, axienet_dma_err_handler
);
1598 napi_enable(&lp
->napi_rx
);
1599 napi_enable(&lp
->napi_tx
);
1601 /* Enable interrupts for Axi DMA Tx */
1602 ret
= request_irq(lp
->tx_irq
, axienet_tx_irq
, IRQF_SHARED
,
1606 /* Enable interrupts for Axi DMA Rx */
1607 ret
= request_irq(lp
->rx_irq
, axienet_rx_irq
, IRQF_SHARED
,
1611 /* Enable interrupts for Axi Ethernet core (if defined) */
1612 if (lp
->eth_irq
> 0) {
1613 ret
= request_irq(lp
->eth_irq
, axienet_eth_irq
, IRQF_SHARED
,
1622 free_irq(lp
->rx_irq
, ndev
);
1624 free_irq(lp
->tx_irq
, ndev
);
1626 napi_disable(&lp
->napi_tx
);
1627 napi_disable(&lp
->napi_rx
);
1628 cancel_work_sync(&lp
->dma_err_task
);
1629 dev_err(lp
->dev
, "request_irq() failed\n");
1634 * axienet_open - Driver open routine.
1635 * @ndev: Pointer to net_device structure
1637 * Return: 0, on success.
1638 * non-zero error value on failure
1640 * This is the driver open routine. It calls phylink_start to start the
1642 * It also allocates interrupt service routines, enables the interrupt lines
1643 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1644 * descriptors are initialized.
1646 static int axienet_open(struct net_device
*ndev
)
1649 struct axienet_local
*lp
= netdev_priv(ndev
);
1651 /* When we do an Axi Ethernet reset, it resets the complete core
1652 * including the MDIO. MDIO must be disabled before resetting.
1653 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1655 axienet_lock_mii(lp
);
1656 ret
= axienet_device_reset(ndev
);
1657 axienet_unlock_mii(lp
);
1659 ret
= phylink_of_phy_connect(lp
->phylink
, lp
->dev
->of_node
, 0);
1661 dev_err(lp
->dev
, "phylink_of_phy_connect() failed: %d\n", ret
);
1665 phylink_start(lp
->phylink
);
1667 /* Start the statistics refresh work */
1668 schedule_delayed_work(&lp
->stats_work
, 0);
1670 if (lp
->use_dmaengine
) {
1671 /* Enable interrupts for Axi Ethernet core (if defined) */
1672 if (lp
->eth_irq
> 0) {
1673 ret
= request_irq(lp
->eth_irq
, axienet_eth_irq
, IRQF_SHARED
,
1679 ret
= axienet_init_dmaengine(ndev
);
1681 goto err_free_eth_irq
;
1683 ret
= axienet_init_legacy_dma(ndev
);
1691 if (lp
->eth_irq
> 0)
1692 free_irq(lp
->eth_irq
, ndev
);
1694 cancel_work_sync(&lp
->rx_dim
.work
);
1695 cancel_delayed_work_sync(&lp
->stats_work
);
1696 phylink_stop(lp
->phylink
);
1697 phylink_disconnect_phy(lp
->phylink
);
1702 * axienet_stop - Driver stop routine.
1703 * @ndev: Pointer to net_device structure
1705 * Return: 0, on success.
1707 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1708 * device. It also removes the interrupt handlers and disables the interrupts.
1709 * The Axi DMA Tx/Rx BDs are released.
1711 static int axienet_stop(struct net_device
*ndev
)
1713 struct axienet_local
*lp
= netdev_priv(ndev
);
1716 if (!lp
->use_dmaengine
) {
1717 WRITE_ONCE(lp
->stopping
, true);
1718 flush_work(&lp
->dma_err_task
);
1720 napi_disable(&lp
->napi_tx
);
1721 napi_disable(&lp
->napi_rx
);
1724 cancel_work_sync(&lp
->rx_dim
.work
);
1725 cancel_delayed_work_sync(&lp
->stats_work
);
1727 phylink_stop(lp
->phylink
);
1728 phylink_disconnect_phy(lp
->phylink
);
1730 axienet_setoptions(ndev
, lp
->options
&
1731 ~(XAE_OPTION_TXEN
| XAE_OPTION_RXEN
));
1733 if (!lp
->use_dmaengine
) {
1734 axienet_dma_stop(lp
);
1735 cancel_work_sync(&lp
->dma_err_task
);
1736 free_irq(lp
->tx_irq
, ndev
);
1737 free_irq(lp
->rx_irq
, ndev
);
1738 axienet_dma_bd_release(ndev
);
1740 dmaengine_terminate_sync(lp
->tx_chan
);
1741 dmaengine_synchronize(lp
->tx_chan
);
1742 dmaengine_terminate_sync(lp
->rx_chan
);
1743 dmaengine_synchronize(lp
->rx_chan
);
1745 for (i
= 0; i
< TX_BD_NUM_MAX
; i
++)
1746 kfree(lp
->tx_skb_ring
[i
]);
1747 kfree(lp
->tx_skb_ring
);
1748 for (i
= 0; i
< RX_BUF_NUM_DEFAULT
; i
++)
1749 kfree(lp
->rx_skb_ring
[i
]);
1750 kfree(lp
->rx_skb_ring
);
1752 dma_release_channel(lp
->rx_chan
);
1753 dma_release_channel(lp
->tx_chan
);
1756 netdev_reset_queue(ndev
);
1757 axienet_iow(lp
, XAE_IE_OFFSET
, 0);
1759 if (lp
->eth_irq
> 0)
1760 free_irq(lp
->eth_irq
, ndev
);
1765 * axienet_change_mtu - Driver change mtu routine.
1766 * @ndev: Pointer to net_device structure
1767 * @new_mtu: New mtu value to be applied
1769 * Return: Always returns 0 (success).
1771 * This is the change mtu driver routine. It checks if the Axi Ethernet
1772 * hardware supports jumbo frames before changing the mtu. This can be
1773 * called only when the device is not up.
1775 static int axienet_change_mtu(struct net_device
*ndev
, int new_mtu
)
1777 struct axienet_local
*lp
= netdev_priv(ndev
);
1779 if (netif_running(ndev
))
1782 if ((new_mtu
+ VLAN_ETH_HLEN
+
1783 XAE_TRL_SIZE
) > lp
->rxmem
)
1786 WRITE_ONCE(ndev
->mtu
, new_mtu
);
1791 #ifdef CONFIG_NET_POLL_CONTROLLER
1793 * axienet_poll_controller - Axi Ethernet poll mechanism.
1794 * @ndev: Pointer to net_device structure
1796 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1797 * to polling the ISRs and are enabled back after the polling is done.
1799 static void axienet_poll_controller(struct net_device
*ndev
)
1801 struct axienet_local
*lp
= netdev_priv(ndev
);
1803 disable_irq(lp
->tx_irq
);
1804 disable_irq(lp
->rx_irq
);
1805 axienet_rx_irq(lp
->tx_irq
, ndev
);
1806 axienet_tx_irq(lp
->rx_irq
, ndev
);
1807 enable_irq(lp
->tx_irq
);
1808 enable_irq(lp
->rx_irq
);
1812 static int axienet_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1814 struct axienet_local
*lp
= netdev_priv(dev
);
1816 if (!netif_running(dev
))
1819 return phylink_mii_ioctl(lp
->phylink
, rq
, cmd
);
1823 axienet_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1825 struct axienet_local
*lp
= netdev_priv(dev
);
1828 netdev_stats_to_stats64(stats
, &dev
->stats
);
1831 start
= u64_stats_fetch_begin(&lp
->rx_stat_sync
);
1832 stats
->rx_packets
= u64_stats_read(&lp
->rx_packets
);
1833 stats
->rx_bytes
= u64_stats_read(&lp
->rx_bytes
);
1834 } while (u64_stats_fetch_retry(&lp
->rx_stat_sync
, start
));
1837 start
= u64_stats_fetch_begin(&lp
->tx_stat_sync
);
1838 stats
->tx_packets
= u64_stats_read(&lp
->tx_packets
);
1839 stats
->tx_bytes
= u64_stats_read(&lp
->tx_bytes
);
1840 } while (u64_stats_fetch_retry(&lp
->tx_stat_sync
, start
));
1842 if (!(lp
->features
& XAE_FEATURE_STATS
))
1846 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
1847 stats
->rx_length_errors
=
1848 axienet_stat(lp
, STAT_RX_LENGTH_ERRORS
);
1849 stats
->rx_crc_errors
= axienet_stat(lp
, STAT_RX_FCS_ERRORS
);
1850 stats
->rx_frame_errors
=
1851 axienet_stat(lp
, STAT_RX_ALIGNMENT_ERRORS
);
1852 stats
->rx_errors
= axienet_stat(lp
, STAT_UNDERSIZE_FRAMES
) +
1853 axienet_stat(lp
, STAT_FRAGMENT_FRAMES
) +
1854 stats
->rx_length_errors
+
1855 stats
->rx_crc_errors
+
1856 stats
->rx_frame_errors
;
1857 stats
->multicast
= axienet_stat(lp
, STAT_RX_MULTICAST_FRAMES
);
1859 stats
->tx_aborted_errors
=
1860 axienet_stat(lp
, STAT_TX_EXCESS_COLLISIONS
);
1861 stats
->tx_fifo_errors
=
1862 axienet_stat(lp
, STAT_TX_UNDERRUN_ERRORS
);
1863 stats
->tx_window_errors
=
1864 axienet_stat(lp
, STAT_TX_LATE_COLLISIONS
);
1865 stats
->tx_errors
= axienet_stat(lp
, STAT_TX_EXCESS_DEFERRAL
) +
1866 stats
->tx_aborted_errors
+
1867 stats
->tx_fifo_errors
+
1868 stats
->tx_window_errors
;
1869 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
1872 static const struct net_device_ops axienet_netdev_ops
= {
1873 .ndo_open
= axienet_open
,
1874 .ndo_stop
= axienet_stop
,
1875 .ndo_start_xmit
= axienet_start_xmit
,
1876 .ndo_get_stats64
= axienet_get_stats64
,
1877 .ndo_change_mtu
= axienet_change_mtu
,
1878 .ndo_set_mac_address
= netdev_set_mac_address
,
1879 .ndo_validate_addr
= eth_validate_addr
,
1880 .ndo_eth_ioctl
= axienet_ioctl
,
1881 .ndo_set_rx_mode
= axienet_set_multicast_list
,
1882 #ifdef CONFIG_NET_POLL_CONTROLLER
1883 .ndo_poll_controller
= axienet_poll_controller
,
1887 static const struct net_device_ops axienet_netdev_dmaengine_ops
= {
1888 .ndo_open
= axienet_open
,
1889 .ndo_stop
= axienet_stop
,
1890 .ndo_start_xmit
= axienet_start_xmit_dmaengine
,
1891 .ndo_get_stats64
= axienet_get_stats64
,
1892 .ndo_change_mtu
= axienet_change_mtu
,
1893 .ndo_set_mac_address
= netdev_set_mac_address
,
1894 .ndo_validate_addr
= eth_validate_addr
,
1895 .ndo_eth_ioctl
= axienet_ioctl
,
1896 .ndo_set_rx_mode
= axienet_set_multicast_list
,
1900 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1901 * @ndev: Pointer to net_device structure
1902 * @ed: Pointer to ethtool_drvinfo structure
1904 * This implements ethtool command for getting the driver information.
1905 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1907 static void axienet_ethtools_get_drvinfo(struct net_device
*ndev
,
1908 struct ethtool_drvinfo
*ed
)
1910 strscpy(ed
->driver
, DRIVER_NAME
, sizeof(ed
->driver
));
1911 strscpy(ed
->version
, DRIVER_VERSION
, sizeof(ed
->version
));
1915 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1917 * @ndev: Pointer to net_device structure
1919 * This implements ethtool command for getting the total register length
1922 * Return: the total regs length
1924 static int axienet_ethtools_get_regs_len(struct net_device
*ndev
)
1926 return sizeof(u32
) * AXIENET_REGS_N
;
1930 * axienet_ethtools_get_regs - Dump the contents of all registers present
1931 * in AxiEthernet core.
1932 * @ndev: Pointer to net_device structure
1933 * @regs: Pointer to ethtool_regs structure
1934 * @ret: Void pointer used to return the contents of the registers.
1936 * This implements ethtool command for getting the Axi Ethernet register dump.
1937 * Issue "ethtool -d ethX" to execute this function.
1939 static void axienet_ethtools_get_regs(struct net_device
*ndev
,
1940 struct ethtool_regs
*regs
, void *ret
)
1942 u32
*data
= (u32
*)ret
;
1943 size_t len
= sizeof(u32
) * AXIENET_REGS_N
;
1944 struct axienet_local
*lp
= netdev_priv(ndev
);
1949 memset(data
, 0, len
);
1950 data
[0] = axienet_ior(lp
, XAE_RAF_OFFSET
);
1951 data
[1] = axienet_ior(lp
, XAE_TPF_OFFSET
);
1952 data
[2] = axienet_ior(lp
, XAE_IFGP_OFFSET
);
1953 data
[3] = axienet_ior(lp
, XAE_IS_OFFSET
);
1954 data
[4] = axienet_ior(lp
, XAE_IP_OFFSET
);
1955 data
[5] = axienet_ior(lp
, XAE_IE_OFFSET
);
1956 data
[6] = axienet_ior(lp
, XAE_TTAG_OFFSET
);
1957 data
[7] = axienet_ior(lp
, XAE_RTAG_OFFSET
);
1958 data
[8] = axienet_ior(lp
, XAE_UAWL_OFFSET
);
1959 data
[9] = axienet_ior(lp
, XAE_UAWU_OFFSET
);
1960 data
[10] = axienet_ior(lp
, XAE_TPID0_OFFSET
);
1961 data
[11] = axienet_ior(lp
, XAE_TPID1_OFFSET
);
1962 data
[12] = axienet_ior(lp
, XAE_PPST_OFFSET
);
1963 data
[13] = axienet_ior(lp
, XAE_RCW0_OFFSET
);
1964 data
[14] = axienet_ior(lp
, XAE_RCW1_OFFSET
);
1965 data
[15] = axienet_ior(lp
, XAE_TC_OFFSET
);
1966 data
[16] = axienet_ior(lp
, XAE_FCC_OFFSET
);
1967 data
[17] = axienet_ior(lp
, XAE_EMMC_OFFSET
);
1968 data
[18] = axienet_ior(lp
, XAE_PHYC_OFFSET
);
1969 data
[19] = axienet_ior(lp
, XAE_MDIO_MC_OFFSET
);
1970 data
[20] = axienet_ior(lp
, XAE_MDIO_MCR_OFFSET
);
1971 data
[21] = axienet_ior(lp
, XAE_MDIO_MWD_OFFSET
);
1972 data
[22] = axienet_ior(lp
, XAE_MDIO_MRD_OFFSET
);
1973 data
[27] = axienet_ior(lp
, XAE_UAW0_OFFSET
);
1974 data
[28] = axienet_ior(lp
, XAE_UAW1_OFFSET
);
1975 data
[29] = axienet_ior(lp
, XAE_FMI_OFFSET
);
1976 data
[30] = axienet_ior(lp
, XAE_AF0_OFFSET
);
1977 data
[31] = axienet_ior(lp
, XAE_AF1_OFFSET
);
1978 if (!lp
->use_dmaengine
) {
1979 data
[32] = axienet_dma_in32(lp
, XAXIDMA_TX_CR_OFFSET
);
1980 data
[33] = axienet_dma_in32(lp
, XAXIDMA_TX_SR_OFFSET
);
1981 data
[34] = axienet_dma_in32(lp
, XAXIDMA_TX_CDESC_OFFSET
);
1982 data
[35] = axienet_dma_in32(lp
, XAXIDMA_TX_TDESC_OFFSET
);
1983 data
[36] = axienet_dma_in32(lp
, XAXIDMA_RX_CR_OFFSET
);
1984 data
[37] = axienet_dma_in32(lp
, XAXIDMA_RX_SR_OFFSET
);
1985 data
[38] = axienet_dma_in32(lp
, XAXIDMA_RX_CDESC_OFFSET
);
1986 data
[39] = axienet_dma_in32(lp
, XAXIDMA_RX_TDESC_OFFSET
);
1991 axienet_ethtools_get_ringparam(struct net_device
*ndev
,
1992 struct ethtool_ringparam
*ering
,
1993 struct kernel_ethtool_ringparam
*kernel_ering
,
1994 struct netlink_ext_ack
*extack
)
1996 struct axienet_local
*lp
= netdev_priv(ndev
);
1998 ering
->rx_max_pending
= RX_BD_NUM_MAX
;
1999 ering
->rx_mini_max_pending
= 0;
2000 ering
->rx_jumbo_max_pending
= 0;
2001 ering
->tx_max_pending
= TX_BD_NUM_MAX
;
2002 ering
->rx_pending
= lp
->rx_bd_num
;
2003 ering
->rx_mini_pending
= 0;
2004 ering
->rx_jumbo_pending
= 0;
2005 ering
->tx_pending
= lp
->tx_bd_num
;
2009 axienet_ethtools_set_ringparam(struct net_device
*ndev
,
2010 struct ethtool_ringparam
*ering
,
2011 struct kernel_ethtool_ringparam
*kernel_ering
,
2012 struct netlink_ext_ack
*extack
)
2014 struct axienet_local
*lp
= netdev_priv(ndev
);
2016 if (ering
->rx_pending
> RX_BD_NUM_MAX
||
2017 ering
->rx_mini_pending
||
2018 ering
->rx_jumbo_pending
||
2019 ering
->tx_pending
< TX_BD_NUM_MIN
||
2020 ering
->tx_pending
> TX_BD_NUM_MAX
)
2023 if (netif_running(ndev
))
2026 lp
->rx_bd_num
= ering
->rx_pending
;
2027 lp
->tx_bd_num
= ering
->tx_pending
;
2032 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
2034 * @ndev: Pointer to net_device structure
2035 * @epauseparm: Pointer to ethtool_pauseparam structure.
2037 * This implements ethtool command for getting axi ethernet pause frame
2038 * setting. Issue "ethtool -a ethX" to execute this function.
2041 axienet_ethtools_get_pauseparam(struct net_device
*ndev
,
2042 struct ethtool_pauseparam
*epauseparm
)
2044 struct axienet_local
*lp
= netdev_priv(ndev
);
2046 phylink_ethtool_get_pauseparam(lp
->phylink
, epauseparm
);
2050 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
2052 * @ndev: Pointer to net_device structure
2053 * @epauseparm:Pointer to ethtool_pauseparam structure
2055 * This implements ethtool command for enabling flow control on Rx and Tx
2056 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2059 * Return: 0 on success, -EFAULT if device is running
2062 axienet_ethtools_set_pauseparam(struct net_device
*ndev
,
2063 struct ethtool_pauseparam
*epauseparm
)
2065 struct axienet_local
*lp
= netdev_priv(ndev
);
2067 return phylink_ethtool_set_pauseparam(lp
->phylink
, epauseparm
);
2071 * axienet_update_coalesce_rx() - Set RX CR
2072 * @lp: Device private data
2073 * @cr: Value to write to the RX CR
2074 * @mask: Bits to set from @cr
2076 static void axienet_update_coalesce_rx(struct axienet_local
*lp
, u32 cr
,
2079 spin_lock_irq(&lp
->rx_cr_lock
);
2080 lp
->rx_dma_cr
&= ~mask
;
2081 lp
->rx_dma_cr
|= cr
;
2082 /* If DMA isn't started, then the settings will be applied the next
2083 * time dma_start() is called.
2085 if (lp
->rx_dma_started
) {
2086 u32 reg
= axienet_dma_in32(lp
, XAXIDMA_RX_CR_OFFSET
);
2088 /* Don't enable IRQs if they are disabled by NAPI */
2089 if (reg
& XAXIDMA_IRQ_ALL_MASK
)
2092 cr
= lp
->rx_dma_cr
& ~XAXIDMA_IRQ_ALL_MASK
;
2093 axienet_dma_out32(lp
, XAXIDMA_RX_CR_OFFSET
, cr
);
2095 spin_unlock_irq(&lp
->rx_cr_lock
);
2099 * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
2100 * @lp: Device private data
2102 static u32
axienet_dim_coalesce_count_rx(struct axienet_local
*lp
)
2104 return min(1 << (lp
->rx_dim
.profile_ix
<< 1), 255);
2108 * axienet_rx_dim_work() - Adjust RX DIM settings
2109 * @work: The work struct
2111 static void axienet_rx_dim_work(struct work_struct
*work
)
2113 struct axienet_local
*lp
=
2114 container_of(work
, struct axienet_local
, rx_dim
.work
);
2115 u32 cr
= axienet_calc_cr(lp
, axienet_dim_coalesce_count_rx(lp
), 0);
2116 u32 mask
= XAXIDMA_COALESCE_MASK
| XAXIDMA_IRQ_IOC_MASK
|
2117 XAXIDMA_IRQ_ERROR_MASK
;
2119 axienet_update_coalesce_rx(lp
, cr
, mask
);
2120 lp
->rx_dim
.state
= DIM_START_MEASURE
;
2124 * axienet_update_coalesce_tx() - Set TX CR
2125 * @lp: Device private data
2126 * @cr: Value to write to the TX CR
2127 * @mask: Bits to set from @cr
2129 static void axienet_update_coalesce_tx(struct axienet_local
*lp
, u32 cr
,
2132 spin_lock_irq(&lp
->tx_cr_lock
);
2133 lp
->tx_dma_cr
&= ~mask
;
2134 lp
->tx_dma_cr
|= cr
;
2135 /* If DMA isn't started, then the settings will be applied the next
2136 * time dma_start() is called.
2138 if (lp
->tx_dma_started
) {
2139 u32 reg
= axienet_dma_in32(lp
, XAXIDMA_TX_CR_OFFSET
);
2141 /* Don't enable IRQs if they are disabled by NAPI */
2142 if (reg
& XAXIDMA_IRQ_ALL_MASK
)
2145 cr
= lp
->tx_dma_cr
& ~XAXIDMA_IRQ_ALL_MASK
;
2146 axienet_dma_out32(lp
, XAXIDMA_TX_CR_OFFSET
, cr
);
2148 spin_unlock_irq(&lp
->tx_cr_lock
);
2152 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2153 * @ndev: Pointer to net_device structure
2154 * @ecoalesce: Pointer to ethtool_coalesce structure
2155 * @kernel_coal: ethtool CQE mode setting structure
2156 * @extack: extack for reporting error messages
2158 * This implements ethtool command for getting the DMA interrupt coalescing
2159 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2160 * execute this function.
2165 axienet_ethtools_get_coalesce(struct net_device
*ndev
,
2166 struct ethtool_coalesce
*ecoalesce
,
2167 struct kernel_ethtool_coalesce
*kernel_coal
,
2168 struct netlink_ext_ack
*extack
)
2170 struct axienet_local
*lp
= netdev_priv(ndev
);
2173 ecoalesce
->use_adaptive_rx_coalesce
= lp
->rx_dim_enabled
;
2175 spin_lock_irq(&lp
->rx_cr_lock
);
2177 spin_unlock_irq(&lp
->rx_cr_lock
);
2178 axienet_coalesce_params(lp
, cr
,
2179 &ecoalesce
->rx_max_coalesced_frames
,
2180 &ecoalesce
->rx_coalesce_usecs
);
2182 spin_lock_irq(&lp
->tx_cr_lock
);
2184 spin_unlock_irq(&lp
->tx_cr_lock
);
2185 axienet_coalesce_params(lp
, cr
,
2186 &ecoalesce
->tx_max_coalesced_frames
,
2187 &ecoalesce
->tx_coalesce_usecs
);
2192 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2193 * @ndev: Pointer to net_device structure
2194 * @ecoalesce: Pointer to ethtool_coalesce structure
2195 * @kernel_coal: ethtool CQE mode setting structure
2196 * @extack: extack for reporting error messages
2198 * This implements ethtool command for setting the DMA interrupt coalescing
2199 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2200 * prompt to execute this function.
2202 * Return: 0, on success, Non-zero error value on failure.
2205 axienet_ethtools_set_coalesce(struct net_device
*ndev
,
2206 struct ethtool_coalesce
*ecoalesce
,
2207 struct kernel_ethtool_coalesce
*kernel_coal
,
2208 struct netlink_ext_ack
*extack
)
2210 struct axienet_local
*lp
= netdev_priv(ndev
);
2211 bool new_dim
= ecoalesce
->use_adaptive_rx_coalesce
;
2212 bool old_dim
= lp
->rx_dim_enabled
;
2213 u32 cr
, mask
= ~XAXIDMA_CR_RUNSTOP_MASK
;
2215 if (ecoalesce
->rx_max_coalesced_frames
> 255 ||
2216 ecoalesce
->tx_max_coalesced_frames
> 255) {
2217 NL_SET_ERR_MSG(extack
, "frames must be less than 256");
2221 if (!ecoalesce
->rx_max_coalesced_frames
||
2222 !ecoalesce
->tx_max_coalesced_frames
) {
2223 NL_SET_ERR_MSG(extack
, "frames must be non-zero");
2227 if (((ecoalesce
->rx_max_coalesced_frames
> 1 || new_dim
) &&
2228 !ecoalesce
->rx_coalesce_usecs
) ||
2229 (ecoalesce
->tx_max_coalesced_frames
> 1 &&
2230 !ecoalesce
->tx_coalesce_usecs
)) {
2231 NL_SET_ERR_MSG(extack
,
2232 "usecs must be non-zero when frames is greater than one");
2236 if (new_dim
&& !old_dim
) {
2237 cr
= axienet_calc_cr(lp
, axienet_dim_coalesce_count_rx(lp
),
2238 ecoalesce
->rx_coalesce_usecs
);
2239 } else if (!new_dim
) {
2241 WRITE_ONCE(lp
->rx_dim_enabled
, false);
2242 napi_synchronize(&lp
->napi_rx
);
2243 flush_work(&lp
->rx_dim
.work
);
2246 cr
= axienet_calc_cr(lp
, ecoalesce
->rx_max_coalesced_frames
,
2247 ecoalesce
->rx_coalesce_usecs
);
2249 /* Dummy value for count just to calculate timer */
2250 cr
= axienet_calc_cr(lp
, 2, ecoalesce
->rx_coalesce_usecs
);
2251 mask
= XAXIDMA_DELAY_MASK
| XAXIDMA_IRQ_DELAY_MASK
;
2254 axienet_update_coalesce_rx(lp
, cr
, mask
);
2255 if (new_dim
&& !old_dim
)
2256 WRITE_ONCE(lp
->rx_dim_enabled
, true);
2258 cr
= axienet_calc_cr(lp
, ecoalesce
->tx_max_coalesced_frames
,
2259 ecoalesce
->tx_coalesce_usecs
);
2260 axienet_update_coalesce_tx(lp
, cr
, ~XAXIDMA_CR_RUNSTOP_MASK
);
2265 axienet_ethtools_get_link_ksettings(struct net_device
*ndev
,
2266 struct ethtool_link_ksettings
*cmd
)
2268 struct axienet_local
*lp
= netdev_priv(ndev
);
2270 return phylink_ethtool_ksettings_get(lp
->phylink
, cmd
);
2274 axienet_ethtools_set_link_ksettings(struct net_device
*ndev
,
2275 const struct ethtool_link_ksettings
*cmd
)
2277 struct axienet_local
*lp
= netdev_priv(ndev
);
2279 return phylink_ethtool_ksettings_set(lp
->phylink
, cmd
);
2282 static int axienet_ethtools_nway_reset(struct net_device
*dev
)
2284 struct axienet_local
*lp
= netdev_priv(dev
);
2286 return phylink_ethtool_nway_reset(lp
->phylink
);
2289 static void axienet_ethtools_get_ethtool_stats(struct net_device
*dev
,
2290 struct ethtool_stats
*stats
,
2293 struct axienet_local
*lp
= netdev_priv(dev
);
2297 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2298 data
[0] = axienet_stat(lp
, STAT_RX_BYTES
);
2299 data
[1] = axienet_stat(lp
, STAT_TX_BYTES
);
2300 data
[2] = axienet_stat(lp
, STAT_RX_VLAN_FRAMES
);
2301 data
[3] = axienet_stat(lp
, STAT_TX_VLAN_FRAMES
);
2302 data
[6] = axienet_stat(lp
, STAT_TX_PFC_FRAMES
);
2303 data
[7] = axienet_stat(lp
, STAT_RX_PFC_FRAMES
);
2304 data
[8] = axienet_stat(lp
, STAT_USER_DEFINED0
);
2305 data
[9] = axienet_stat(lp
, STAT_USER_DEFINED1
);
2306 data
[10] = axienet_stat(lp
, STAT_USER_DEFINED2
);
2307 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2310 static const char axienet_ethtool_stats_strings
[][ETH_GSTRING_LEN
] = {
2312 "Transmitted bytes",
2313 "RX Good VLAN Tagged Frames",
2314 "TX Good VLAN Tagged Frames",
2315 "TX Good PFC Frames",
2316 "RX Good PFC Frames",
2317 "User Defined Counter 0",
2318 "User Defined Counter 1",
2319 "User Defined Counter 2",
2322 static void axienet_ethtools_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
2324 switch (stringset
) {
2326 memcpy(data
, axienet_ethtool_stats_strings
,
2327 sizeof(axienet_ethtool_stats_strings
));
2332 static int axienet_ethtools_get_sset_count(struct net_device
*dev
, int sset
)
2334 struct axienet_local
*lp
= netdev_priv(dev
);
2338 if (lp
->features
& XAE_FEATURE_STATS
)
2339 return ARRAY_SIZE(axienet_ethtool_stats_strings
);
2347 axienet_ethtools_get_pause_stats(struct net_device
*dev
,
2348 struct ethtool_pause_stats
*pause_stats
)
2350 struct axienet_local
*lp
= netdev_priv(dev
);
2353 if (!(lp
->features
& XAE_FEATURE_STATS
))
2357 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2358 pause_stats
->tx_pause_frames
=
2359 axienet_stat(lp
, STAT_TX_PAUSE_FRAMES
);
2360 pause_stats
->rx_pause_frames
=
2361 axienet_stat(lp
, STAT_RX_PAUSE_FRAMES
);
2362 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2366 axienet_ethtool_get_eth_mac_stats(struct net_device
*dev
,
2367 struct ethtool_eth_mac_stats
*mac_stats
)
2369 struct axienet_local
*lp
= netdev_priv(dev
);
2372 if (!(lp
->features
& XAE_FEATURE_STATS
))
2376 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2377 mac_stats
->FramesTransmittedOK
=
2378 axienet_stat(lp
, STAT_TX_GOOD_FRAMES
);
2379 mac_stats
->SingleCollisionFrames
=
2380 axienet_stat(lp
, STAT_TX_SINGLE_COLLISION_FRAMES
);
2381 mac_stats
->MultipleCollisionFrames
=
2382 axienet_stat(lp
, STAT_TX_MULTIPLE_COLLISION_FRAMES
);
2383 mac_stats
->FramesReceivedOK
=
2384 axienet_stat(lp
, STAT_RX_GOOD_FRAMES
);
2385 mac_stats
->FrameCheckSequenceErrors
=
2386 axienet_stat(lp
, STAT_RX_FCS_ERRORS
);
2387 mac_stats
->AlignmentErrors
=
2388 axienet_stat(lp
, STAT_RX_ALIGNMENT_ERRORS
);
2389 mac_stats
->FramesWithDeferredXmissions
=
2390 axienet_stat(lp
, STAT_TX_DEFERRED_FRAMES
);
2391 mac_stats
->LateCollisions
=
2392 axienet_stat(lp
, STAT_TX_LATE_COLLISIONS
);
2393 mac_stats
->FramesAbortedDueToXSColls
=
2394 axienet_stat(lp
, STAT_TX_EXCESS_COLLISIONS
);
2395 mac_stats
->MulticastFramesXmittedOK
=
2396 axienet_stat(lp
, STAT_TX_MULTICAST_FRAMES
);
2397 mac_stats
->BroadcastFramesXmittedOK
=
2398 axienet_stat(lp
, STAT_TX_BROADCAST_FRAMES
);
2399 mac_stats
->FramesWithExcessiveDeferral
=
2400 axienet_stat(lp
, STAT_TX_EXCESS_DEFERRAL
);
2401 mac_stats
->MulticastFramesReceivedOK
=
2402 axienet_stat(lp
, STAT_RX_MULTICAST_FRAMES
);
2403 mac_stats
->BroadcastFramesReceivedOK
=
2404 axienet_stat(lp
, STAT_RX_BROADCAST_FRAMES
);
2405 mac_stats
->InRangeLengthErrors
=
2406 axienet_stat(lp
, STAT_RX_LENGTH_ERRORS
);
2407 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2411 axienet_ethtool_get_eth_ctrl_stats(struct net_device
*dev
,
2412 struct ethtool_eth_ctrl_stats
*ctrl_stats
)
2414 struct axienet_local
*lp
= netdev_priv(dev
);
2417 if (!(lp
->features
& XAE_FEATURE_STATS
))
2421 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2422 ctrl_stats
->MACControlFramesTransmitted
=
2423 axienet_stat(lp
, STAT_TX_CONTROL_FRAMES
);
2424 ctrl_stats
->MACControlFramesReceived
=
2425 axienet_stat(lp
, STAT_RX_CONTROL_FRAMES
);
2426 ctrl_stats
->UnsupportedOpcodesReceived
=
2427 axienet_stat(lp
, STAT_RX_CONTROL_OPCODE_ERRORS
);
2428 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2431 static const struct ethtool_rmon_hist_range axienet_rmon_ranges
[] = {
2443 axienet_ethtool_get_rmon_stats(struct net_device
*dev
,
2444 struct ethtool_rmon_stats
*rmon_stats
,
2445 const struct ethtool_rmon_hist_range
**ranges
)
2447 struct axienet_local
*lp
= netdev_priv(dev
);
2450 if (!(lp
->features
& XAE_FEATURE_STATS
))
2454 start
= read_seqcount_begin(&lp
->hw_stats_seqcount
);
2455 rmon_stats
->undersize_pkts
=
2456 axienet_stat(lp
, STAT_UNDERSIZE_FRAMES
);
2457 rmon_stats
->oversize_pkts
=
2458 axienet_stat(lp
, STAT_RX_OVERSIZE_FRAMES
);
2459 rmon_stats
->fragments
=
2460 axienet_stat(lp
, STAT_FRAGMENT_FRAMES
);
2462 rmon_stats
->hist
[0] =
2463 axienet_stat(lp
, STAT_RX_64_BYTE_FRAMES
);
2464 rmon_stats
->hist
[1] =
2465 axienet_stat(lp
, STAT_RX_65_127_BYTE_FRAMES
);
2466 rmon_stats
->hist
[2] =
2467 axienet_stat(lp
, STAT_RX_128_255_BYTE_FRAMES
);
2468 rmon_stats
->hist
[3] =
2469 axienet_stat(lp
, STAT_RX_256_511_BYTE_FRAMES
);
2470 rmon_stats
->hist
[4] =
2471 axienet_stat(lp
, STAT_RX_512_1023_BYTE_FRAMES
);
2472 rmon_stats
->hist
[5] =
2473 axienet_stat(lp
, STAT_RX_1024_MAX_BYTE_FRAMES
);
2474 rmon_stats
->hist
[6] =
2475 rmon_stats
->oversize_pkts
;
2477 rmon_stats
->hist_tx
[0] =
2478 axienet_stat(lp
, STAT_TX_64_BYTE_FRAMES
);
2479 rmon_stats
->hist_tx
[1] =
2480 axienet_stat(lp
, STAT_TX_65_127_BYTE_FRAMES
);
2481 rmon_stats
->hist_tx
[2] =
2482 axienet_stat(lp
, STAT_TX_128_255_BYTE_FRAMES
);
2483 rmon_stats
->hist_tx
[3] =
2484 axienet_stat(lp
, STAT_TX_256_511_BYTE_FRAMES
);
2485 rmon_stats
->hist_tx
[4] =
2486 axienet_stat(lp
, STAT_TX_512_1023_BYTE_FRAMES
);
2487 rmon_stats
->hist_tx
[5] =
2488 axienet_stat(lp
, STAT_TX_1024_MAX_BYTE_FRAMES
);
2489 rmon_stats
->hist_tx
[6] =
2490 axienet_stat(lp
, STAT_TX_OVERSIZE_FRAMES
);
2491 } while (read_seqcount_retry(&lp
->hw_stats_seqcount
, start
));
2493 *ranges
= axienet_rmon_ranges
;
2496 static const struct ethtool_ops axienet_ethtool_ops
= {
2497 .supported_coalesce_params
= ETHTOOL_COALESCE_MAX_FRAMES
|
2498 ETHTOOL_COALESCE_USECS
|
2499 ETHTOOL_COALESCE_USE_ADAPTIVE_RX
,
2500 .get_drvinfo
= axienet_ethtools_get_drvinfo
,
2501 .get_regs_len
= axienet_ethtools_get_regs_len
,
2502 .get_regs
= axienet_ethtools_get_regs
,
2503 .get_link
= ethtool_op_get_link
,
2504 .get_ringparam
= axienet_ethtools_get_ringparam
,
2505 .set_ringparam
= axienet_ethtools_set_ringparam
,
2506 .get_pauseparam
= axienet_ethtools_get_pauseparam
,
2507 .set_pauseparam
= axienet_ethtools_set_pauseparam
,
2508 .get_coalesce
= axienet_ethtools_get_coalesce
,
2509 .set_coalesce
= axienet_ethtools_set_coalesce
,
2510 .get_link_ksettings
= axienet_ethtools_get_link_ksettings
,
2511 .set_link_ksettings
= axienet_ethtools_set_link_ksettings
,
2512 .nway_reset
= axienet_ethtools_nway_reset
,
2513 .get_ethtool_stats
= axienet_ethtools_get_ethtool_stats
,
2514 .get_strings
= axienet_ethtools_get_strings
,
2515 .get_sset_count
= axienet_ethtools_get_sset_count
,
2516 .get_pause_stats
= axienet_ethtools_get_pause_stats
,
2517 .get_eth_mac_stats
= axienet_ethtool_get_eth_mac_stats
,
2518 .get_eth_ctrl_stats
= axienet_ethtool_get_eth_ctrl_stats
,
2519 .get_rmon_stats
= axienet_ethtool_get_rmon_stats
,
2522 static struct axienet_local
*pcs_to_axienet_local(struct phylink_pcs
*pcs
)
2524 return container_of(pcs
, struct axienet_local
, pcs
);
2527 static void axienet_pcs_get_state(struct phylink_pcs
*pcs
,
2528 unsigned int neg_mode
,
2529 struct phylink_link_state
*state
)
2531 struct mdio_device
*pcs_phy
= pcs_to_axienet_local(pcs
)->pcs_phy
;
2533 phylink_mii_c22_pcs_get_state(pcs_phy
, neg_mode
, state
);
2536 static void axienet_pcs_an_restart(struct phylink_pcs
*pcs
)
2538 struct mdio_device
*pcs_phy
= pcs_to_axienet_local(pcs
)->pcs_phy
;
2540 phylink_mii_c22_pcs_an_restart(pcs_phy
);
2543 static int axienet_pcs_config(struct phylink_pcs
*pcs
, unsigned int neg_mode
,
2544 phy_interface_t interface
,
2545 const unsigned long *advertising
,
2546 bool permit_pause_to_mac
)
2548 struct mdio_device
*pcs_phy
= pcs_to_axienet_local(pcs
)->pcs_phy
;
2549 struct net_device
*ndev
= pcs_to_axienet_local(pcs
)->ndev
;
2550 struct axienet_local
*lp
= netdev_priv(ndev
);
2553 if (lp
->switch_x_sgmii
) {
2554 ret
= mdiodev_write(pcs_phy
, XLNX_MII_STD_SELECT_REG
,
2555 interface
== PHY_INTERFACE_MODE_SGMII
?
2556 XLNX_MII_STD_SELECT_SGMII
: 0);
2559 "Failed to switch PHY interface: %d\n",
2565 ret
= phylink_mii_c22_pcs_config(pcs_phy
, interface
, advertising
,
2568 netdev_warn(ndev
, "Failed to configure PCS: %d\n", ret
);
2573 static const struct phylink_pcs_ops axienet_pcs_ops
= {
2574 .pcs_get_state
= axienet_pcs_get_state
,
2575 .pcs_config
= axienet_pcs_config
,
2576 .pcs_an_restart
= axienet_pcs_an_restart
,
2579 static struct phylink_pcs
*axienet_mac_select_pcs(struct phylink_config
*config
,
2580 phy_interface_t interface
)
2582 struct net_device
*ndev
= to_net_dev(config
->dev
);
2583 struct axienet_local
*lp
= netdev_priv(ndev
);
2585 if (interface
== PHY_INTERFACE_MODE_1000BASEX
||
2586 interface
== PHY_INTERFACE_MODE_SGMII
)
2592 static void axienet_mac_config(struct phylink_config
*config
, unsigned int mode
,
2593 const struct phylink_link_state
*state
)
2595 /* nothing meaningful to do */
2598 static void axienet_mac_link_down(struct phylink_config
*config
,
2600 phy_interface_t interface
)
2602 /* nothing meaningful to do */
2605 static void axienet_mac_link_up(struct phylink_config
*config
,
2606 struct phy_device
*phy
,
2607 unsigned int mode
, phy_interface_t interface
,
2608 int speed
, int duplex
,
2609 bool tx_pause
, bool rx_pause
)
2611 struct net_device
*ndev
= to_net_dev(config
->dev
);
2612 struct axienet_local
*lp
= netdev_priv(ndev
);
2613 u32 emmc_reg
, fcc_reg
;
2615 emmc_reg
= axienet_ior(lp
, XAE_EMMC_OFFSET
);
2616 emmc_reg
&= ~XAE_EMMC_LINKSPEED_MASK
;
2620 emmc_reg
|= XAE_EMMC_LINKSPD_1000
;
2623 emmc_reg
|= XAE_EMMC_LINKSPD_100
;
2626 emmc_reg
|= XAE_EMMC_LINKSPD_10
;
2630 "Speed other than 10, 100 or 1Gbps is not supported\n");
2634 axienet_iow(lp
, XAE_EMMC_OFFSET
, emmc_reg
);
2636 fcc_reg
= axienet_ior(lp
, XAE_FCC_OFFSET
);
2638 fcc_reg
|= XAE_FCC_FCTX_MASK
;
2640 fcc_reg
&= ~XAE_FCC_FCTX_MASK
;
2642 fcc_reg
|= XAE_FCC_FCRX_MASK
;
2644 fcc_reg
&= ~XAE_FCC_FCRX_MASK
;
2645 axienet_iow(lp
, XAE_FCC_OFFSET
, fcc_reg
);
2648 static const struct phylink_mac_ops axienet_phylink_ops
= {
2649 .mac_select_pcs
= axienet_mac_select_pcs
,
2650 .mac_config
= axienet_mac_config
,
2651 .mac_link_down
= axienet_mac_link_down
,
2652 .mac_link_up
= axienet_mac_link_up
,
2656 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2657 * @work: pointer to work_struct
2659 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2662 static void axienet_dma_err_handler(struct work_struct
*work
)
2666 struct axidma_bd
*cur_p
;
2667 struct axienet_local
*lp
= container_of(work
, struct axienet_local
,
2669 struct net_device
*ndev
= lp
->ndev
;
2671 /* Don't bother if we are going to stop anyway */
2672 if (READ_ONCE(lp
->stopping
))
2675 napi_disable(&lp
->napi_tx
);
2676 napi_disable(&lp
->napi_rx
);
2678 axienet_setoptions(ndev
, lp
->options
&
2679 ~(XAE_OPTION_TXEN
| XAE_OPTION_RXEN
));
2681 axienet_dma_stop(lp
);
2682 netdev_reset_queue(ndev
);
2684 for (i
= 0; i
< lp
->tx_bd_num
; i
++) {
2685 cur_p
= &lp
->tx_bd_v
[i
];
2687 dma_addr_t addr
= desc_get_phys_addr(lp
, cur_p
);
2689 dma_unmap_single(lp
->dev
, addr
,
2691 XAXIDMA_BD_CTRL_LENGTH_MASK
),
2695 dev_kfree_skb_irq(cur_p
->skb
);
2697 cur_p
->phys_msb
= 0;
2708 for (i
= 0; i
< lp
->rx_bd_num
; i
++) {
2709 cur_p
= &lp
->rx_bd_v
[i
];
2722 axienet_dma_start(lp
);
2724 axienet_status
= axienet_ior(lp
, XAE_RCW1_OFFSET
);
2725 axienet_status
&= ~XAE_RCW1_RX_MASK
;
2726 axienet_iow(lp
, XAE_RCW1_OFFSET
, axienet_status
);
2728 axienet_status
= axienet_ior(lp
, XAE_IP_OFFSET
);
2729 if (axienet_status
& XAE_INT_RXRJECT_MASK
)
2730 axienet_iow(lp
, XAE_IS_OFFSET
, XAE_INT_RXRJECT_MASK
);
2731 axienet_iow(lp
, XAE_IE_OFFSET
, lp
->eth_irq
> 0 ?
2732 XAE_INT_RECV_ERROR_MASK
: 0);
2733 axienet_iow(lp
, XAE_FCC_OFFSET
, XAE_FCC_FCRX_MASK
);
2735 /* Sync default options with HW but leave receiver and
2736 * transmitter disabled.
2738 axienet_setoptions(ndev
, lp
->options
&
2739 ~(XAE_OPTION_TXEN
| XAE_OPTION_RXEN
));
2740 axienet_set_mac_address(ndev
, NULL
);
2741 axienet_set_multicast_list(ndev
);
2742 napi_enable(&lp
->napi_rx
);
2743 napi_enable(&lp
->napi_tx
);
2744 axienet_setoptions(ndev
, lp
->options
);
2748 * axienet_probe - Axi Ethernet probe function.
2749 * @pdev: Pointer to platform device structure.
2751 * Return: 0, on success
2752 * Non-zero error value on failure.
2754 * This is the probe routine for Axi Ethernet driver. This is called before
2755 * any other driver routines are invoked. It allocates and sets up the Ethernet
2756 * device. Parses through device tree and populates fields of
2757 * axienet_local. It registers the Ethernet device.
2759 static int axienet_probe(struct platform_device
*pdev
)
2762 struct device_node
*np
;
2763 struct axienet_local
*lp
;
2764 struct net_device
*ndev
;
2765 struct resource
*ethres
;
2766 u8 mac_addr
[ETH_ALEN
];
2767 int addr_width
= 32;
2770 ndev
= alloc_etherdev(sizeof(*lp
));
2774 platform_set_drvdata(pdev
, ndev
);
2776 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2777 ndev
->features
= NETIF_F_SG
;
2778 ndev
->ethtool_ops
= &axienet_ethtool_ops
;
2780 /* MTU range: 64 - 9000 */
2782 ndev
->max_mtu
= XAE_JUMBO_MTU
;
2784 lp
= netdev_priv(ndev
);
2786 lp
->dev
= &pdev
->dev
;
2787 lp
->options
= XAE_OPTION_DEFAULTS
;
2788 lp
->rx_bd_num
= RX_BD_NUM_DEFAULT
;
2789 lp
->tx_bd_num
= TX_BD_NUM_DEFAULT
;
2791 u64_stats_init(&lp
->rx_stat_sync
);
2792 u64_stats_init(&lp
->tx_stat_sync
);
2794 mutex_init(&lp
->stats_lock
);
2795 seqcount_mutex_init(&lp
->hw_stats_seqcount
, &lp
->stats_lock
);
2796 INIT_DEFERRABLE_WORK(&lp
->stats_work
, axienet_refresh_stats
);
2798 lp
->axi_clk
= devm_clk_get_optional(&pdev
->dev
, "s_axi_lite_clk");
2800 /* For backward compatibility, if named AXI clock is not present,
2801 * treat the first clock specified as the AXI clock.
2803 lp
->axi_clk
= devm_clk_get_optional(&pdev
->dev
, NULL
);
2805 if (IS_ERR(lp
->axi_clk
)) {
2806 ret
= PTR_ERR(lp
->axi_clk
);
2809 ret
= clk_prepare_enable(lp
->axi_clk
);
2811 dev_err(&pdev
->dev
, "Unable to enable AXI clock: %d\n", ret
);
2815 lp
->misc_clks
[0].id
= "axis_clk";
2816 lp
->misc_clks
[1].id
= "ref_clk";
2817 lp
->misc_clks
[2].id
= "mgt_clk";
2819 ret
= devm_clk_bulk_get_optional(&pdev
->dev
, XAE_NUM_MISC_CLOCKS
, lp
->misc_clks
);
2823 ret
= clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS
, lp
->misc_clks
);
2827 /* Map device registers */
2828 lp
->regs
= devm_platform_get_and_ioremap_resource(pdev
, 0, ðres
);
2829 if (IS_ERR(lp
->regs
)) {
2830 ret
= PTR_ERR(lp
->regs
);
2833 lp
->regs_start
= ethres
->start
;
2835 /* Setup checksum offload, but default to off if not specified */
2838 if (axienet_ior(lp
, XAE_ABILITY_OFFSET
) & XAE_ABILITY_STATS
)
2839 lp
->features
|= XAE_FEATURE_STATS
;
2841 ret
= of_property_read_u32(pdev
->dev
.of_node
, "xlnx,txcsum", &value
);
2845 lp
->features
|= XAE_FEATURE_PARTIAL_TX_CSUM
;
2846 /* Can checksum any contiguous range */
2847 ndev
->features
|= NETIF_F_HW_CSUM
;
2850 lp
->features
|= XAE_FEATURE_FULL_TX_CSUM
;
2851 /* Can checksum TCP/UDP over IPv4. */
2852 ndev
->features
|= NETIF_F_IP_CSUM
;
2856 ret
= of_property_read_u32(pdev
->dev
.of_node
, "xlnx,rxcsum", &value
);
2860 lp
->features
|= XAE_FEATURE_PARTIAL_RX_CSUM
;
2861 ndev
->features
|= NETIF_F_RXCSUM
;
2864 lp
->features
|= XAE_FEATURE_FULL_RX_CSUM
;
2865 ndev
->features
|= NETIF_F_RXCSUM
;
2869 /* For supporting jumbo frames, the Axi Ethernet hardware must have
2870 * a larger Rx/Tx Memory. Typically, the size must be large so that
2871 * we can enable jumbo option and start supporting jumbo frames.
2872 * Here we check for memory allocated for Rx/Tx in the hardware from
2873 * the device-tree and accordingly set flags.
2875 of_property_read_u32(pdev
->dev
.of_node
, "xlnx,rxmem", &lp
->rxmem
);
2877 lp
->switch_x_sgmii
= of_property_read_bool(pdev
->dev
.of_node
,
2878 "xlnx,switch-x-sgmii");
2880 /* Start with the proprietary, and broken phy_type */
2881 ret
= of_property_read_u32(pdev
->dev
.of_node
, "xlnx,phy-type", &value
);
2883 netdev_warn(ndev
, "Please upgrade your device tree binary blob to use phy-mode");
2885 case XAE_PHY_TYPE_MII
:
2886 lp
->phy_mode
= PHY_INTERFACE_MODE_MII
;
2888 case XAE_PHY_TYPE_GMII
:
2889 lp
->phy_mode
= PHY_INTERFACE_MODE_GMII
;
2891 case XAE_PHY_TYPE_RGMII_2_0
:
2892 lp
->phy_mode
= PHY_INTERFACE_MODE_RGMII_ID
;
2894 case XAE_PHY_TYPE_SGMII
:
2895 lp
->phy_mode
= PHY_INTERFACE_MODE_SGMII
;
2897 case XAE_PHY_TYPE_1000BASE_X
:
2898 lp
->phy_mode
= PHY_INTERFACE_MODE_1000BASEX
;
2905 ret
= of_get_phy_mode(pdev
->dev
.of_node
, &lp
->phy_mode
);
2909 if (lp
->switch_x_sgmii
&& lp
->phy_mode
!= PHY_INTERFACE_MODE_SGMII
&&
2910 lp
->phy_mode
!= PHY_INTERFACE_MODE_1000BASEX
) {
2911 dev_err(&pdev
->dev
, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2916 if (!of_property_present(pdev
->dev
.of_node
, "dmas")) {
2917 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2918 np
= of_parse_phandle(pdev
->dev
.of_node
, "axistream-connected", 0);
2921 struct resource dmares
;
2923 ret
= of_address_to_resource(np
, 0, &dmares
);
2926 "unable to get DMA resource\n");
2930 lp
->dma_regs
= devm_ioremap_resource(&pdev
->dev
,
2932 lp
->rx_irq
= irq_of_parse_and_map(np
, 1);
2933 lp
->tx_irq
= irq_of_parse_and_map(np
, 0);
2935 lp
->eth_irq
= platform_get_irq_optional(pdev
, 0);
2937 /* Check for these resources directly on the Ethernet node. */
2938 lp
->dma_regs
= devm_platform_get_and_ioremap_resource(pdev
, 1, NULL
);
2939 lp
->rx_irq
= platform_get_irq(pdev
, 1);
2940 lp
->tx_irq
= platform_get_irq(pdev
, 0);
2941 lp
->eth_irq
= platform_get_irq_optional(pdev
, 2);
2943 if (IS_ERR(lp
->dma_regs
)) {
2944 dev_err(&pdev
->dev
, "could not map DMA regs\n");
2945 ret
= PTR_ERR(lp
->dma_regs
);
2948 if (lp
->rx_irq
<= 0 || lp
->tx_irq
<= 0) {
2949 dev_err(&pdev
->dev
, "could not determine irqs\n");
2954 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2955 ret
= __axienet_device_reset(lp
);
2959 /* Autodetect the need for 64-bit DMA pointers.
2960 * When the IP is configured for a bus width bigger than 32 bits,
2961 * writing the MSB registers is mandatory, even if they are all 0.
2962 * We can detect this case by writing all 1's to one such register
2963 * and see if that sticks: when the IP is configured for 32 bits
2964 * only, those registers are RES0.
2965 * Those MSB registers were introduced in IP v7.1, which we check first.
2967 if ((axienet_ior(lp
, XAE_ID_OFFSET
) >> 24) >= 0x9) {
2968 void __iomem
*desc
= lp
->dma_regs
+ XAXIDMA_TX_CDESC_OFFSET
+ 4;
2970 iowrite32(0x0, desc
);
2971 if (ioread32(desc
) == 0) { /* sanity check */
2972 iowrite32(0xffffffff, desc
);
2973 if (ioread32(desc
) > 0) {
2974 lp
->features
|= XAE_FEATURE_DMA_64BIT
;
2976 dev_info(&pdev
->dev
,
2977 "autodetected 64-bit DMA range\n");
2979 iowrite32(0x0, desc
);
2982 if (!IS_ENABLED(CONFIG_64BIT
) && lp
->features
& XAE_FEATURE_DMA_64BIT
) {
2983 dev_err(&pdev
->dev
, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
2988 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(addr_width
));
2990 dev_err(&pdev
->dev
, "No suitable DMA available\n");
2993 netif_napi_add(ndev
, &lp
->napi_rx
, axienet_rx_poll
);
2994 netif_napi_add(ndev
, &lp
->napi_tx
, axienet_tx_poll
);
2996 struct xilinx_vdma_config cfg
;
2997 struct dma_chan
*tx_chan
;
2999 lp
->eth_irq
= platform_get_irq_optional(pdev
, 0);
3000 if (lp
->eth_irq
< 0 && lp
->eth_irq
!= -ENXIO
) {
3004 tx_chan
= dma_request_chan(lp
->dev
, "tx_chan0");
3005 if (IS_ERR(tx_chan
)) {
3006 ret
= PTR_ERR(tx_chan
);
3007 dev_err_probe(lp
->dev
, ret
, "No Ethernet DMA (TX) channel found\n");
3012 /* As name says VDMA but it has support for DMA channel reset */
3013 ret
= xilinx_vdma_channel_set_config(tx_chan
, &cfg
);
3015 dev_err(&pdev
->dev
, "Reset channel failed\n");
3016 dma_release_channel(tx_chan
);
3020 dma_release_channel(tx_chan
);
3021 lp
->use_dmaengine
= 1;
3024 if (lp
->use_dmaengine
)
3025 ndev
->netdev_ops
= &axienet_netdev_dmaengine_ops
;
3027 ndev
->netdev_ops
= &axienet_netdev_ops
;
3028 /* Check for Ethernet core IRQ (optional) */
3029 if (lp
->eth_irq
<= 0)
3030 dev_info(&pdev
->dev
, "Ethernet core IRQ not defined\n");
3032 /* Retrieve the MAC address */
3033 ret
= of_get_mac_address(pdev
->dev
.of_node
, mac_addr
);
3035 axienet_set_mac_address(ndev
, mac_addr
);
3037 dev_warn(&pdev
->dev
, "could not find MAC address property: %d\n",
3039 axienet_set_mac_address(ndev
, NULL
);
3042 spin_lock_init(&lp
->rx_cr_lock
);
3043 spin_lock_init(&lp
->tx_cr_lock
);
3044 INIT_WORK(&lp
->rx_dim
.work
, axienet_rx_dim_work
);
3045 lp
->rx_dim_enabled
= true;
3046 lp
->rx_dim
.profile_ix
= 1;
3047 lp
->rx_dma_cr
= axienet_calc_cr(lp
, axienet_dim_coalesce_count_rx(lp
),
3048 XAXIDMA_DFT_RX_USEC
);
3049 lp
->tx_dma_cr
= axienet_calc_cr(lp
, XAXIDMA_DFT_TX_THRESHOLD
,
3050 XAXIDMA_DFT_TX_USEC
);
3052 ret
= axienet_mdio_setup(lp
);
3054 dev_warn(&pdev
->dev
,
3055 "error registering MDIO bus: %d\n", ret
);
3057 if (lp
->phy_mode
== PHY_INTERFACE_MODE_SGMII
||
3058 lp
->phy_mode
== PHY_INTERFACE_MODE_1000BASEX
) {
3059 np
= of_parse_phandle(pdev
->dev
.of_node
, "pcs-handle", 0);
3061 /* Deprecated: Always use "pcs-handle" for pcs_phy.
3062 * Falling back to "phy-handle" here is only for
3063 * backward compatibility with old device trees.
3065 np
= of_parse_phandle(pdev
->dev
.of_node
, "phy-handle", 0);
3068 dev_err(&pdev
->dev
, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
3072 lp
->pcs_phy
= of_mdio_find_device(np
);
3074 ret
= -EPROBE_DEFER
;
3079 lp
->pcs
.ops
= &axienet_pcs_ops
;
3080 lp
->pcs
.poll
= true;
3083 lp
->phylink_config
.dev
= &ndev
->dev
;
3084 lp
->phylink_config
.type
= PHYLINK_NETDEV
;
3085 lp
->phylink_config
.mac_managed_pm
= true;
3086 lp
->phylink_config
.mac_capabilities
= MAC_SYM_PAUSE
| MAC_ASYM_PAUSE
|
3087 MAC_10FD
| MAC_100FD
| MAC_1000FD
;
3089 __set_bit(lp
->phy_mode
, lp
->phylink_config
.supported_interfaces
);
3090 if (lp
->switch_x_sgmii
) {
3091 __set_bit(PHY_INTERFACE_MODE_1000BASEX
,
3092 lp
->phylink_config
.supported_interfaces
);
3093 __set_bit(PHY_INTERFACE_MODE_SGMII
,
3094 lp
->phylink_config
.supported_interfaces
);
3097 lp
->phylink
= phylink_create(&lp
->phylink_config
, pdev
->dev
.fwnode
,
3099 &axienet_phylink_ops
);
3100 if (IS_ERR(lp
->phylink
)) {
3101 ret
= PTR_ERR(lp
->phylink
);
3102 dev_err(&pdev
->dev
, "phylink_create error (%i)\n", ret
);
3106 ret
= register_netdev(lp
->ndev
);
3108 dev_err(lp
->dev
, "register_netdev() error (%i)\n", ret
);
3109 goto cleanup_phylink
;
3115 phylink_destroy(lp
->phylink
);
3119 put_device(&lp
->pcs_phy
->dev
);
3121 axienet_mdio_teardown(lp
);
3123 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS
, lp
->misc_clks
);
3124 clk_disable_unprepare(lp
->axi_clk
);
3132 static void axienet_remove(struct platform_device
*pdev
)
3134 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3135 struct axienet_local
*lp
= netdev_priv(ndev
);
3137 unregister_netdev(ndev
);
3140 phylink_destroy(lp
->phylink
);
3143 put_device(&lp
->pcs_phy
->dev
);
3145 axienet_mdio_teardown(lp
);
3147 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS
, lp
->misc_clks
);
3148 clk_disable_unprepare(lp
->axi_clk
);
3153 static void axienet_shutdown(struct platform_device
*pdev
)
3155 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3158 netif_device_detach(ndev
);
3160 if (netif_running(ndev
))
3166 static int axienet_suspend(struct device
*dev
)
3168 struct net_device
*ndev
= dev_get_drvdata(dev
);
3170 if (!netif_running(ndev
))
3173 netif_device_detach(ndev
);
3182 static int axienet_resume(struct device
*dev
)
3184 struct net_device
*ndev
= dev_get_drvdata(dev
);
3186 if (!netif_running(ndev
))
3193 netif_device_attach(ndev
);
3198 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops
,
3199 axienet_suspend
, axienet_resume
);
3201 static struct platform_driver axienet_driver
= {
3202 .probe
= axienet_probe
,
3203 .remove
= axienet_remove
,
3204 .shutdown
= axienet_shutdown
,
3206 .name
= "xilinx_axienet",
3207 .pm
= &axienet_pm_ops
,
3208 .of_match_table
= axienet_of_match
,
3212 module_platform_driver(axienet_driver
);
3214 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3215 MODULE_AUTHOR("Xilinx");
3216 MODULE_LICENSE("GPL");