]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/xilinx/xilinx_axienet_main.c
Merge tag 'hid-for-linus-2025070502' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/linux.git] / drivers / net / ethernet / xilinx / xilinx_axienet_main.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
8a3b7a25
DB
2/*
3 * Xilinx Axi Ethernet device driver
4 *
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
59a54f30
MS
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
cc37610c 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
59a54f30 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
8a3b7a25
DB
12 *
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14 * and Spartan6.
15 *
16 * TODO:
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
23 */
24
09a0354c 25#include <linux/clk.h>
8a3b7a25
DB
26#include <linux/delay.h>
27#include <linux/etherdevice.h>
8a3b7a25
DB
28#include <linux/module.h>
29#include <linux/netdevice.h>
3d40aed8 30#include <linux/of.h>
8a3b7a25 31#include <linux/of_mdio.h>
da90e380 32#include <linux/of_net.h>
9d5e8ec6 33#include <linux/of_irq.h>
8a3b7a25 34#include <linux/of_address.h>
3d40aed8 35#include <linux/platform_device.h>
8a3b7a25 36#include <linux/skbuff.h>
0b79b8dc 37#include <linux/math64.h>
8a3b7a25
DB
38#include <linux/phy.h>
39#include <linux/mii.h>
40#include <linux/ethtool.h>
6a91b846
RSP
41#include <linux/dmaengine.h>
42#include <linux/dma-mapping.h>
43#include <linux/dma/xilinx_dma.h>
44#include <linux/circ_buf.h>
45#include <net/netdev_queues.h>
8a3b7a25
DB
46
47#include "xilinx_axienet.h"
48
8b09ca82 49/* Descriptors defines for Tx and Rx DMA */
2d19c3fd 50#define TX_BD_NUM_DEFAULT 128
8b09ca82 51#define RX_BD_NUM_DEFAULT 1024
70f5817d 52#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
8b09ca82
RH
53#define TX_BD_NUM_MAX 4096
54#define RX_BD_NUM_MAX 4096
6a91b846
RSP
55#define DMA_NUM_APP_WORDS 5
56#define LEN_APP 4
57#define RX_BUF_NUM_DEFAULT 128
8a3b7a25
DB
58
59/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60#define DRIVER_NAME "xaxienet"
61#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62#define DRIVER_VERSION "1.00a"
63
867d03bc 64#define AXIENET_REGS_N 40
8a3b7a25 65
6a91b846
RSP
66static void axienet_rx_submit_desc(struct net_device *ndev);
67
8a3b7a25 68/* Match table for of_platform binding */
74847f23 69static const struct of_device_id axienet_of_match[] = {
8a3b7a25
DB
70 { .compatible = "xlnx,axi-ethernet-1.00.a", },
71 { .compatible = "xlnx,axi-ethernet-1.01.a", },
72 { .compatible = "xlnx,axi-ethernet-2.01.a", },
73 {},
74};
75
76MODULE_DEVICE_TABLE(of, axienet_of_match);
77
78/* Option table for setting up Axi Ethernet hardware options */
79static struct axienet_option axienet_options[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
81 {
82 .opt = XAE_OPTION_JUMBO,
83 .reg = XAE_TC_OFFSET,
84 .m_or = XAE_TC_JUM_MASK,
85 }, {
86 .opt = XAE_OPTION_JUMBO,
87 .reg = XAE_RCW1_OFFSET,
88 .m_or = XAE_RCW1_JUM_MASK,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt = XAE_OPTION_VLAN,
91 .reg = XAE_TC_OFFSET,
92 .m_or = XAE_TC_VLAN_MASK,
93 }, {
94 .opt = XAE_OPTION_VLAN,
95 .reg = XAE_RCW1_OFFSET,
96 .m_or = XAE_RCW1_VLAN_MASK,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt = XAE_OPTION_FCS_STRIP,
99 .reg = XAE_RCW1_OFFSET,
100 .m_or = XAE_RCW1_FCS_MASK,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt = XAE_OPTION_FCS_INSERT,
103 .reg = XAE_TC_OFFSET,
104 .m_or = XAE_TC_FCS_MASK,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt = XAE_OPTION_LENTYPE_ERR,
107 .reg = XAE_RCW1_OFFSET,
108 .m_or = XAE_RCW1_LT_DIS_MASK,
109 }, { /* Turn on Rx flow control */
110 .opt = XAE_OPTION_FLOW_CONTROL,
111 .reg = XAE_FCC_OFFSET,
112 .m_or = XAE_FCC_FCRX_MASK,
113 }, { /* Turn on Tx flow control */
114 .opt = XAE_OPTION_FLOW_CONTROL,
115 .reg = XAE_FCC_OFFSET,
116 .m_or = XAE_FCC_FCTX_MASK,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt = XAE_OPTION_PROMISC,
119 .reg = XAE_FMI_OFFSET,
120 .m_or = XAE_FMI_PM_MASK,
121 }, { /* Enable transmitter */
122 .opt = XAE_OPTION_TXEN,
123 .reg = XAE_TC_OFFSET,
124 .m_or = XAE_TC_TX_MASK,
125 }, { /* Enable receiver */
126 .opt = XAE_OPTION_RXEN,
127 .reg = XAE_RCW1_OFFSET,
128 .m_or = XAE_RCW1_RX_MASK,
129 },
130 {}
131};
132
6a91b846
RSP
133static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134{
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136}
137
138static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139{
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141}
142
8a3b7a25
DB
143/**
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
147 *
b0d081c5 148 * Return: The contents of the Axi DMA register
8a3b7a25
DB
149 *
150 * This function returns the contents of the corresponding Axi DMA register.
151 */
152static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153{
d85f5f3e 154 return ioread32(lp->dma_regs + reg);
8a3b7a25
DB
155}
156
4e958f33
AP
157static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 struct axidma_bd *desc)
159{
160 desc->phys = lower_32_bits(addr);
161 if (lp->features & XAE_FEATURE_DMA_64BIT)
162 desc->phys_msb = upper_32_bits(addr);
163}
164
165static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 struct axidma_bd *desc)
167{
168 dma_addr_t ret = desc->phys;
169
170 if (lp->features & XAE_FEATURE_DMA_64BIT)
171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172
173 return ret;
174}
175
8a3b7a25
DB
176/**
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
179 *
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
183 */
184static void axienet_dma_bd_release(struct net_device *ndev)
185{
186 int i;
187 struct axienet_local *lp = netdev_priv(ndev);
188
f26667a3 189 /* If we end up here, tx_bd_v must have been DMA allocated. */
17882fd4 190 dma_free_coherent(lp->dev,
f26667a3
AP
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 lp->tx_bd_v,
193 lp->tx_bd_p);
194
195 if (!lp->rx_bd_v)
196 return;
197
8b09ca82 198 for (i = 0; i < lp->rx_bd_num; i++) {
4e958f33
AP
199 dma_addr_t phys;
200
f26667a3
AP
201 /* A NULL skb means this descriptor has not been initialised
202 * at all.
203 */
204 if (!lp->rx_bd_v[i].skb)
205 break;
206
23e6b2dc 207 dev_kfree_skb(lp->rx_bd_v[i].skb);
8a3b7a25 208
f26667a3
AP
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
212 */
4e958f33
AP
213 if (lp->rx_bd_v[i].cntrl) {
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
17882fd4 215 dma_unmap_single(lp->dev, phys,
f26667a3 216 lp->max_frm_size, DMA_FROM_DEVICE);
4e958f33 217 }
8a3b7a25 218 }
f26667a3 219
17882fd4 220 dma_free_coherent(lp->dev,
f26667a3
AP
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 lp->rx_bd_v,
223 lp->rx_bd_p);
8a3b7a25
DB
224}
225
eb80520e
SA
226static u64 axienet_dma_rate(struct axienet_local *lp)
227{
228 if (lp->axi_clk)
229 return clk_get_rate(lp->axi_clk);
230 return 125000000; /* arbitrary guess if no clock rate set */
231}
232
0b79b8dc 233/**
e76d1ea8
SA
234 * axienet_calc_cr() - Calculate control register value
235 * @lp: Device private data
236 * @count: Number of completions before an interrupt
237 * @usec: Microseconds after the last completion before an interrupt
238 *
239 * Calculate a control register value based on the coalescing settings. The
240 * run/stop bit is not set.
0b79b8dc 241 */
e76d1ea8 242static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
0b79b8dc 243{
e76d1ea8
SA
244 u32 cr;
245
246 cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
247 XAXIDMA_IRQ_ERROR_MASK;
248 /* Only set interrupt delay timer if not generating an interrupt on
249 * the first packet. Otherwise leave at 0 to disable delay interrupt.
250 */
251 if (count > 1) {
eb80520e 252 u64 clk_rate = axienet_dma_rate(lp);
e76d1ea8
SA
253 u32 timer;
254
e76d1ea8
SA
255 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
256 timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
257 XAXIDMA_DELAY_SCALE);
0b79b8dc 258
e76d1ea8
SA
259 timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
260 cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
261 XAXIDMA_IRQ_DELAY_MASK;
262 }
263
264 return cr;
0b79b8dc
RH
265}
266
eb80520e
SA
267/**
268 * axienet_coalesce_params() - Extract coalesce parameters from the CR
269 * @lp: Device private data
270 * @cr: The control register to parse
271 * @count: Number of packets before an interrupt
272 * @usec: Idle time (in usec) before an interrupt
273 */
274static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
275 u32 *count, u32 *usec)
276{
277 u64 clk_rate = axienet_dma_rate(lp);
278 u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
279
280 *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
281 *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
282}
283
84b9ccc0
RH
284/**
285 * axienet_dma_start - Set up DMA registers and start DMA operation
286 * @lp: Pointer to the axienet_local structure
287 */
288static void axienet_dma_start(struct axienet_local *lp)
289{
d048c717
SA
290 spin_lock_irq(&lp->rx_cr_lock);
291
84b9ccc0 292 /* Start updating the Rx channel control register */
d048c717 293 lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
cc37610c 294 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
84b9ccc0 295
84b9ccc0
RH
296 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
297 * halted state. This will make the Rx side ready for reception.
298 */
299 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cc37610c
RH
300 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
301 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
84b9ccc0
RH
302 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
303 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
d048c717
SA
304 lp->rx_dma_started = true;
305
306 spin_unlock_irq(&lp->rx_cr_lock);
307 spin_lock_irq(&lp->tx_cr_lock);
308
309 /* Start updating the Tx channel control register */
310 lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
311 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
84b9ccc0
RH
312
313 /* Write to the RS (Run-stop) bit in the Tx channel control register.
314 * Tx channel is now ready to run. But only after we write to the
315 * tail pointer register that the Tx channel will start transmitting.
316 */
317 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
9e2bc267
RH
318 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
319 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
d048c717
SA
320 lp->tx_dma_started = true;
321
322 spin_unlock_irq(&lp->tx_cr_lock);
84b9ccc0
RH
323}
324
8a3b7a25
DB
325/**
326 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
327 * @ndev: Pointer to the net_device structure
328 *
b0d081c5 329 * Return: 0, on success -ENOMEM, on failure
8a3b7a25
DB
330 *
331 * This function is called to initialize the Rx and Tx DMA descriptor
332 * rings. This initializes the descriptors with required default values
333 * and is called when Axi Ethernet driver reset is called.
334 */
335static int axienet_dma_bd_init(struct net_device *ndev)
336{
8a3b7a25
DB
337 int i;
338 struct sk_buff *skb;
339 struct axienet_local *lp = netdev_priv(ndev);
340
341 /* Reset the indexes which are used for accessing the BDs */
342 lp->tx_bd_ci = 0;
343 lp->tx_bd_tail = 0;
344 lp->rx_bd_ci = 0;
345
850a7503 346 /* Allocate the Tx and Rx buffer descriptors. */
17882fd4 347 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
8b09ca82 348 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
750afb08 349 &lp->tx_bd_p, GFP_KERNEL);
d0320f75 350 if (!lp->tx_bd_v)
f26667a3 351 return -ENOMEM;
8a3b7a25 352
17882fd4 353 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
8b09ca82 354 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
750afb08 355 &lp->rx_bd_p, GFP_KERNEL);
d0320f75 356 if (!lp->rx_bd_v)
8a3b7a25 357 goto out;
8a3b7a25 358
8b09ca82 359 for (i = 0; i < lp->tx_bd_num; i++) {
4e958f33
AP
360 dma_addr_t addr = lp->tx_bd_p +
361 sizeof(*lp->tx_bd_v) *
362 ((i + 1) % lp->tx_bd_num);
363
364 lp->tx_bd_v[i].next = lower_32_bits(addr);
365 if (lp->features & XAE_FEATURE_DMA_64BIT)
366 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
8a3b7a25
DB
367 }
368
8b09ca82 369 for (i = 0; i < lp->rx_bd_num; i++) {
4e958f33
AP
370 dma_addr_t addr;
371
372 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
373 ((i + 1) % lp->rx_bd_num);
374 lp->rx_bd_v[i].next = lower_32_bits(addr);
375 if (lp->features & XAE_FEATURE_DMA_64BIT)
376 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
8a3b7a25
DB
377
378 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
720a43ef 379 if (!skb)
8a3b7a25 380 goto out;
8a3b7a25 381
23e6b2dc 382 lp->rx_bd_v[i].skb = skb;
17882fd4 383 addr = dma_map_single(lp->dev, skb->data,
4e958f33 384 lp->max_frm_size, DMA_FROM_DEVICE);
17882fd4 385 if (dma_mapping_error(lp->dev, addr)) {
71791dc8
AP
386 netdev_err(ndev, "DMA mapping error\n");
387 goto out;
388 }
4e958f33 389 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
71791dc8 390
8a3b7a25
DB
391 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
392 }
393
84b9ccc0 394 axienet_dma_start(lp);
8a3b7a25
DB
395
396 return 0;
397out:
398 axienet_dma_bd_release(ndev);
399 return -ENOMEM;
400}
401
402/**
403 * axienet_set_mac_address - Write the MAC address
404 * @ndev: Pointer to the net_device structure
405 * @address: 6 byte Address to be written as MAC address
406 *
407 * This function is called to initialize the MAC address of the Axi Ethernet
408 * core. It writes to the UAW0 and UAW1 registers of the core.
409 */
da90e380
TK
410static void axienet_set_mac_address(struct net_device *ndev,
411 const void *address)
8a3b7a25
DB
412{
413 struct axienet_local *lp = netdev_priv(ndev);
414
415 if (address)
a96d317f 416 eth_hw_addr_set(ndev, address);
8a3b7a25 417 if (!is_valid_ether_addr(ndev->dev_addr))
452349c3 418 eth_hw_addr_random(ndev);
8a3b7a25
DB
419
420 /* Set up unicast MAC address filter set its mac address */
421 axienet_iow(lp, XAE_UAW0_OFFSET,
422 (ndev->dev_addr[0]) |
423 (ndev->dev_addr[1] << 8) |
424 (ndev->dev_addr[2] << 16) |
425 (ndev->dev_addr[3] << 24));
426 axienet_iow(lp, XAE_UAW1_OFFSET,
427 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
428 ~XAE_UAW1_UNICASTADDR_MASK) |
429 (ndev->dev_addr[4] |
430 (ndev->dev_addr[5] << 8))));
431}
432
433/**
434 * netdev_set_mac_address - Write the MAC address (from outside the driver)
435 * @ndev: Pointer to the net_device structure
436 * @p: 6 byte Address to be written as MAC address
437 *
b0d081c5 438 * Return: 0 for all conditions. Presently, there is no failure case.
8a3b7a25
DB
439 *
440 * This function is called to initialize the MAC address of the Axi Ethernet
441 * core. It calls the core specific axienet_set_mac_address. This is the
442 * function that goes into net_device_ops structure entry ndo_set_mac_address.
443 */
444static int netdev_set_mac_address(struct net_device *ndev, void *p)
445{
446 struct sockaddr *addr = p;
f7061a3e 447
8a3b7a25
DB
448 axienet_set_mac_address(ndev, addr->sa_data);
449 return 0;
450}
451
452/**
453 * axienet_set_multicast_list - Prepare the multicast table
454 * @ndev: Pointer to the net_device structure
455 *
456 * This function is called to initialize the multicast table during
457 * initialization. The Axi Ethernet basic multicast support has a four-entry
458 * multicast table which is initialized here. Additionally this function
459 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
460 * means whenever the multicast table entries need to be updated this
461 * function gets called.
462 */
463static void axienet_set_multicast_list(struct net_device *ndev)
464{
797a68c9 465 int i = 0;
8a3b7a25
DB
466 u32 reg, af0reg, af1reg;
467 struct axienet_local *lp = netdev_priv(ndev);
468
749e67d5
SA
469 reg = axienet_ior(lp, XAE_FMI_OFFSET);
470 reg &= ~XAE_FMI_PM_MASK;
471 if (ndev->flags & IFF_PROMISC)
8a3b7a25 472 reg |= XAE_FMI_PM_MASK;
749e67d5
SA
473 else
474 reg &= ~XAE_FMI_PM_MASK;
475 axienet_iow(lp, XAE_FMI_OFFSET, reg);
476
477 if (ndev->flags & IFF_ALLMULTI ||
478 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
479 reg &= 0xFFFFFF00;
8a3b7a25 480 axienet_iow(lp, XAE_FMI_OFFSET, reg);
749e67d5
SA
481 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
482 axienet_iow(lp, XAE_AF1_OFFSET, 0);
483 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
484 axienet_iow(lp, XAE_AM1_OFFSET, 0);
485 axienet_iow(lp, XAE_FFE_OFFSET, 1);
486 i = 1;
8a3b7a25
DB
487 } else if (!netdev_mc_empty(ndev)) {
488 struct netdev_hw_addr *ha;
489
8a3b7a25
DB
490 netdev_for_each_mc_addr(ha, ndev) {
491 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
492 break;
493
494 af0reg = (ha->addr[0]);
495 af0reg |= (ha->addr[1] << 8);
496 af0reg |= (ha->addr[2] << 16);
497 af0reg |= (ha->addr[3] << 24);
498
499 af1reg = (ha->addr[4]);
500 af1reg |= (ha->addr[5] << 8);
501
749e67d5 502 reg &= 0xFFFFFF00;
8a3b7a25
DB
503 reg |= i;
504
505 axienet_iow(lp, XAE_FMI_OFFSET, reg);
506 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
507 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
749e67d5
SA
508 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
509 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
797a68c9 510 axienet_iow(lp, XAE_FFE_OFFSET, 1);
8a3b7a25
DB
511 i++;
512 }
8a3b7a25 513 }
797a68c9
SA
514
515 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
749e67d5 516 reg &= 0xFFFFFF00;
797a68c9
SA
517 reg |= i;
518 axienet_iow(lp, XAE_FMI_OFFSET, reg);
519 axienet_iow(lp, XAE_FFE_OFFSET, 0);
520 }
8a3b7a25
DB
521}
522
523/**
524 * axienet_setoptions - Set an Axi Ethernet option
525 * @ndev: Pointer to the net_device structure
526 * @options: Option to be enabled/disabled
527 *
528 * The Axi Ethernet core has multiple features which can be selectively turned
529 * on or off. The typical options could be jumbo frame option, basic VLAN
530 * option, promiscuous mode option etc. This function is used to set or clear
531 * these options in the Axi Ethernet hardware. This is done through
532 * axienet_option structure .
533 */
534static void axienet_setoptions(struct net_device *ndev, u32 options)
535{
536 int reg;
537 struct axienet_local *lp = netdev_priv(ndev);
538 struct axienet_option *tp = &axienet_options[0];
539
540 while (tp->opt) {
541 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
542 if (options & tp->opt)
543 reg |= tp->m_or;
544 axienet_iow(lp, tp->reg, reg);
545 tp++;
546 }
547
548 lp->options |= options;
549}
550
76abb5d6
SA
551static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
552{
553 u32 counter;
554
555 if (lp->reset_in_progress)
556 return lp->hw_stat_base[stat];
557
558 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
559 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
560}
561
562static void axienet_stats_update(struct axienet_local *lp, bool reset)
563{
564 enum temac_stat stat;
565
566 write_seqcount_begin(&lp->hw_stats_seqcount);
567 lp->reset_in_progress = reset;
568 for (stat = 0; stat < STAT_COUNT; stat++) {
569 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
570
571 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
572 lp->hw_last_counter[stat] = counter;
573 }
574 write_seqcount_end(&lp->hw_stats_seqcount);
575}
576
577static void axienet_refresh_stats(struct work_struct *work)
578{
579 struct axienet_local *lp = container_of(work, struct axienet_local,
580 stats_work.work);
581
582 mutex_lock(&lp->stats_lock);
583 axienet_stats_update(lp, false);
584 mutex_unlock(&lp->stats_lock);
585
586 /* Just less than 2^32 bytes at 2.5 GBit/s */
587 schedule_delayed_work(&lp->stats_work, 13 * HZ);
588}
589
ee44d0b7 590static int __axienet_device_reset(struct axienet_local *lp)
8a3b7a25 591{
2e5644b1
RH
592 u32 value;
593 int ret;
ee44d0b7 594
76abb5d6
SA
595 /* Save statistics counters in case they will be reset */
596 mutex_lock(&lp->stats_lock);
597 if (lp->features & XAE_FEATURE_STATS)
598 axienet_stats_update(lp, true);
599
8a3b7a25
DB
600 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
601 * process of Axi DMA takes a while to complete as all pending
602 * commands/transfers will be flushed or completed during this
850a7503 603 * reset process.
489d4d77
RH
604 * Note that even though both TX and RX have their own reset register,
605 * they both reset the entire DMA core, so only one needs to be used.
850a7503 606 */
489d4d77 607 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
2e5644b1
RH
608 ret = read_poll_timeout(axienet_dma_in32, value,
609 !(value & XAXIDMA_CR_RESET_MASK),
610 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
611 XAXIDMA_TX_CR_OFFSET);
612 if (ret) {
613 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
76abb5d6 614 goto out;
8a3b7a25 615 }
ee44d0b7 616
b400c2f4
RH
617 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
618 ret = read_poll_timeout(axienet_ior, value,
619 value & XAE_INT_PHYRSTCMPLT_MASK,
620 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
621 XAE_IS_OFFSET);
622 if (ret) {
623 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
76abb5d6 624 goto out;
b400c2f4
RH
625 }
626
76abb5d6
SA
627 /* Update statistics counters with new values */
628 if (lp->features & XAE_FEATURE_STATS) {
629 enum temac_stat stat;
630
631 write_seqcount_begin(&lp->hw_stats_seqcount);
632 lp->reset_in_progress = false;
633 for (stat = 0; stat < STAT_COUNT; stat++) {
634 u32 counter =
635 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
636
637 lp->hw_stat_base[stat] +=
638 lp->hw_last_counter[stat] - counter;
639 lp->hw_last_counter[stat] = counter;
640 }
641 write_seqcount_end(&lp->hw_stats_seqcount);
642 }
643
644out:
645 mutex_unlock(&lp->stats_lock);
646 return ret;
8a3b7a25
DB
647}
648
84b9ccc0
RH
649/**
650 * axienet_dma_stop - Stop DMA operation
651 * @lp: Pointer to the axienet_local structure
652 */
653static void axienet_dma_stop(struct axienet_local *lp)
654{
655 int count;
656 u32 cr, sr;
657
d048c717
SA
658 spin_lock_irq(&lp->rx_cr_lock);
659
660 cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
84b9ccc0 661 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
d048c717
SA
662 lp->rx_dma_started = false;
663
664 spin_unlock_irq(&lp->rx_cr_lock);
84b9ccc0
RH
665 synchronize_irq(lp->rx_irq);
666
d048c717
SA
667 spin_lock_irq(&lp->tx_cr_lock);
668
669 cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
84b9ccc0 670 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
d048c717
SA
671 lp->tx_dma_started = false;
672
673 spin_unlock_irq(&lp->tx_cr_lock);
84b9ccc0
RH
674 synchronize_irq(lp->tx_irq);
675
676 /* Give DMAs a chance to halt gracefully */
677 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
678 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
679 msleep(20);
680 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
681 }
682
683 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
684 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
685 msleep(20);
686 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
687 }
688
689 /* Do a reset to ensure DMA is really stopped */
690 axienet_lock_mii(lp);
691 __axienet_device_reset(lp);
692 axienet_unlock_mii(lp);
693}
694
8a3b7a25
DB
695/**
696 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
697 * @ndev: Pointer to the net_device structure
698 *
699 * This function is called to reset and initialize the Axi Ethernet core. This
700 * is typically called during initialization. It does a reset of the Axi DMA
701 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
8aba73ef 702 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
8a3b7a25
DB
703 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
704 * core.
ee44d0b7 705 * Returns 0 on success or a negative error number otherwise.
8a3b7a25 706 */
ee44d0b7 707static int axienet_device_reset(struct net_device *ndev)
8a3b7a25
DB
708{
709 u32 axienet_status;
710 struct axienet_local *lp = netdev_priv(ndev);
ee44d0b7 711 int ret;
8a3b7a25 712
8a3b7a25 713 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
f080a8c3 714 lp->options |= XAE_OPTION_VLAN;
8a3b7a25
DB
715 lp->options &= (~XAE_OPTION_JUMBO);
716
48ba8a1d 717 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
f080a8c3
ST
718 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
719 XAE_TRL_SIZE;
720
721 if (lp->max_frm_size <= lp->rxmem)
722 lp->options |= XAE_OPTION_JUMBO;
8a3b7a25
DB
723 }
724
6b1b40f7
SBNG
725 if (!lp->use_dmaengine) {
726 ret = __axienet_device_reset(lp);
727 if (ret)
728 return ret;
729
730 ret = axienet_dma_bd_init(ndev);
731 if (ret) {
732 netdev_err(ndev, "%s: descriptor allocation failed\n",
733 __func__);
734 return ret;
735 }
8a3b7a25
DB
736 }
737
738 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
739 axienet_status &= ~XAE_RCW1_RX_MASK;
740 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
741
742 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
743 if (axienet_status & XAE_INT_RXRJECT_MASK)
744 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
522856ce
RH
745 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
746 XAE_INT_RECV_ERROR_MASK : 0);
8a3b7a25
DB
747
748 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
749
750 /* Sync default options with HW but leave receiver and
850a7503
MS
751 * transmitter disabled.
752 */
8a3b7a25
DB
753 axienet_setoptions(ndev, lp->options &
754 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
755 axienet_set_mac_address(ndev, NULL);
756 axienet_set_multicast_list(ndev);
757 axienet_setoptions(ndev, lp->options);
758
860e9538 759 netif_trans_update(ndev);
ee44d0b7
AP
760
761 return 0;
8a3b7a25
DB
762}
763
8a3b7a25 764/**
ab365c33 765 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
9e2bc267 766 * @lp: Pointer to the axienet_local structure
ab365c33 767 * @first_bd: Index of first descriptor to clean up
9e2bc267
RH
768 * @nr_bds: Max number of descriptors to clean up
769 * @force: Whether to clean descriptors even if not complete
ab365c33 770 * @sizep: Pointer to a u32 filled with the total sum of all bytes
7fe85bb3 771 * in all cleaned-up descriptors. Ignored if NULL.
9e2bc267 772 * @budget: NAPI budget (use 0 when not called from NAPI poll)
8a3b7a25 773 *
ab365c33
AP
774 * Would either be called after a successful transmit operation, or after
775 * there was an error when setting up the chain.
5a6caa2c 776 * Returns the number of packets handled.
8a3b7a25 777 */
9e2bc267
RH
778static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
779 int nr_bds, bool force, u32 *sizep, int budget)
8a3b7a25 780{
8a3b7a25 781 struct axidma_bd *cur_p;
ab365c33 782 unsigned int status;
5a6caa2c 783 int i, packets = 0;
4e958f33 784 dma_addr_t phys;
ab365c33 785
9e2bc267 786 for (i = 0; i < nr_bds; i++) {
ab365c33
AP
787 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
788 status = cur_p->status;
789
9e2bc267
RH
790 /* If force is not specified, clean up only descriptors
791 * that have been completed by the MAC.
ab365c33 792 */
9e2bc267 793 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
ab365c33 794 break;
8a3b7a25 795
95978df6
RH
796 /* Ensure we see complete descriptor update */
797 dma_rmb();
4e958f33 798 phys = desc_get_phys_addr(lp, cur_p);
17882fd4 799 dma_unmap_single(lp->dev, phys,
4e958f33
AP
800 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
801 DMA_TO_DEVICE);
ab365c33 802
5a6caa2c 803 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
9e2bc267 804 napi_consume_skb(cur_p->skb, budget);
5a6caa2c
SA
805 packets++;
806 }
ab365c33 807
8a3b7a25
DB
808 cur_p->app0 = 0;
809 cur_p->app1 = 0;
810 cur_p->app2 = 0;
811 cur_p->app4 = 0;
23e6b2dc 812 cur_p->skb = NULL;
95978df6
RH
813 /* ensure our transmit path and device don't prematurely see status cleared */
814 wmb();
996defd7 815 cur_p->cntrl = 0;
95978df6 816 cur_p->status = 0;
8a3b7a25 817
ab365c33
AP
818 if (sizep)
819 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
8a3b7a25
DB
820 }
821
5a6caa2c
SA
822 if (!force) {
823 lp->tx_bd_ci += i;
824 if (lp->tx_bd_ci >= lp->tx_bd_num)
825 lp->tx_bd_ci %= lp->tx_bd_num;
826 }
827
828 return packets;
ab365c33
AP
829}
830
bb193e3d
RH
831/**
832 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
833 * @lp: Pointer to the axienet_local structure
834 * @num_frag: The number of BDs to check for
835 *
836 * Return: 0, on success
837 * NETDEV_TX_BUSY, if any of the descriptors are not free
838 *
839 * This function is invoked before BDs are allocated and transmission starts.
840 * This function returns 0 if a BD or group of BDs can be allocated for
841 * transmission. If the BD or any of the BDs are not free the function
9e2bc267 842 * returns a busy status.
bb193e3d
RH
843 */
844static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
845 int num_frag)
846{
847 struct axidma_bd *cur_p;
848
9e2bc267 849 /* Ensure we see all descriptor updates from device or TX polling */
bb193e3d 850 rmb();
f0cf4000
RH
851 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
852 lp->tx_bd_num];
bb193e3d
RH
853 if (cur_p->cntrl)
854 return NETDEV_TX_BUSY;
855 return 0;
856}
857
6a91b846
RSP
858/**
859 * axienet_dma_tx_cb - DMA engine callback for TX channel.
860 * @data: Pointer to the axienet_local structure.
861 * @result: error reporting through dmaengine_result.
862 * This function is called by dmaengine driver for TX channel to notify
863 * that the transmit is done.
864 */
865static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
866{
867 struct skbuf_dma_descriptor *skbuf_dma;
868 struct axienet_local *lp = data;
869 struct netdev_queue *txq;
870 int len;
871
872 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
873 len = skbuf_dma->skb->len;
874 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
875 u64_stats_update_begin(&lp->tx_stat_sync);
876 u64_stats_add(&lp->tx_bytes, len);
877 u64_stats_add(&lp->tx_packets, 1);
878 u64_stats_update_end(&lp->tx_stat_sync);
879 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
880 dev_consume_skb_any(skbuf_dma->skb);
881 netif_txq_completed_wake(txq, 1, len,
882 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
32374234 883 2);
6a91b846
RSP
884}
885
886/**
887 * axienet_start_xmit_dmaengine - Starts the transmission.
888 * @skb: sk_buff pointer that contains data to be Txed.
889 * @ndev: Pointer to net_device structure.
890 *
891 * Return: NETDEV_TX_OK on success or any non space errors.
892 * NETDEV_TX_BUSY when free element in TX skb ring buffer
893 * is not available.
894 *
895 * This function is invoked to initiate transmission. The
896 * function sets the skbs, register dma callback API and submit
897 * the dma transaction.
898 * Additionally if checksum offloading is supported,
899 * it populates AXI Stream Control fields with appropriate values.
900 */
901static netdev_tx_t
902axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
903{
904 struct dma_async_tx_descriptor *dma_tx_desc = NULL;
905 struct axienet_local *lp = netdev_priv(ndev);
906 u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
907 struct skbuf_dma_descriptor *skbuf_dma;
908 struct dma_device *dma_dev;
909 struct netdev_queue *txq;
910 u32 csum_start_off;
911 u32 csum_index_off;
912 int sg_len;
913 int ret;
914
915 dma_dev = lp->tx_chan->device;
916 sg_len = skb_shinfo(skb)->nr_frags + 1;
32374234 917 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
6a91b846
RSP
918 netif_stop_queue(ndev);
919 if (net_ratelimit())
920 netdev_warn(ndev, "TX ring unexpectedly full\n");
921 return NETDEV_TX_BUSY;
922 }
923
924 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
925 if (!skbuf_dma)
926 goto xmit_error_drop_skb;
927
928 lp->tx_ring_head++;
929 sg_init_table(skbuf_dma->sgl, sg_len);
930 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
931 if (ret < 0)
932 goto xmit_error_drop_skb;
933
934 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
935 if (!ret)
936 goto xmit_error_drop_skb;
937
938 /* Fill up app fields for checksum */
939 if (skb->ip_summed == CHECKSUM_PARTIAL) {
940 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
941 /* Tx Full Checksum Offload Enabled */
942 app_metadata[0] |= 2;
943 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
944 csum_start_off = skb_transport_offset(skb);
945 csum_index_off = csum_start_off + skb->csum_offset;
946 /* Tx Partial Checksum Offload Enabled */
947 app_metadata[0] |= 1;
948 app_metadata[1] = (csum_start_off << 16) | csum_index_off;
949 }
950 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
951 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
952 }
953
954 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
955 sg_len, DMA_MEM_TO_DEV,
956 DMA_PREP_INTERRUPT, (void *)app_metadata);
957 if (!dma_tx_desc)
958 goto xmit_error_unmap_sg;
959
960 skbuf_dma->skb = skb;
961 skbuf_dma->sg_len = sg_len;
962 dma_tx_desc->callback_param = lp;
963 dma_tx_desc->callback_result = axienet_dma_tx_cb;
6a91b846
RSP
964 txq = skb_get_tx_queue(lp->ndev, skb);
965 netdev_tx_sent_queue(txq, skb->len);
966 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
32374234 967 1, 2);
6a91b846 968
5ccdcdf1
SG
969 dmaengine_submit(dma_tx_desc);
970 dma_async_issue_pending(lp->tx_chan);
6a91b846
RSP
971 return NETDEV_TX_OK;
972
973xmit_error_unmap_sg:
974 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
975xmit_error_drop_skb:
976 dev_kfree_skb_any(skb);
977 return NETDEV_TX_OK;
978}
979
ab365c33 980/**
9e2bc267 981 * axienet_tx_poll - Invoked once a transmit is completed by the
ab365c33 982 * Axi DMA Tx channel.
9e2bc267
RH
983 * @napi: Pointer to NAPI structure.
984 * @budget: Max number of TX packets to process.
985 *
986 * Return: Number of TX packets processed.
ab365c33 987 *
9e2bc267 988 * This function is invoked from the NAPI processing to notify the completion
ab365c33
AP
989 * of transmit operation. It clears fields in the corresponding Tx BDs and
990 * unmaps the corresponding buffer so that CPU can regain ownership of the
991 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
992 * required.
993 */
9e2bc267 994static int axienet_tx_poll(struct napi_struct *napi, int budget)
ab365c33 995{
9e2bc267
RH
996 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
997 struct net_device *ndev = lp->ndev;
ab365c33 998 u32 size = 0;
9e2bc267 999 int packets;
ab365c33 1000
5a6caa2c
SA
1001 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
1002 &size, budget);
ab365c33 1003
9e2bc267 1004 if (packets) {
c900e49d 1005 netdev_completed_queue(ndev, packets, size);
cb45a8bf
RH
1006 u64_stats_update_begin(&lp->tx_stat_sync);
1007 u64_stats_add(&lp->tx_packets, packets);
1008 u64_stats_add(&lp->tx_bytes, size);
1009 u64_stats_update_end(&lp->tx_stat_sync);
7de44285 1010
9e2bc267
RH
1011 /* Matches barrier in axienet_start_xmit */
1012 smp_mb();
7de44285 1013
9e2bc267
RH
1014 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1015 netif_wake_queue(ndev);
1016 }
1017
1018 if (packets < budget && napi_complete_done(napi, packets)) {
1019 /* Re-enable TX completion interrupts. This should
1020 * cause an immediate interrupt if any TX packets are
1021 * already pending.
1022 */
d048c717 1023 spin_lock_irq(&lp->tx_cr_lock);
9e2bc267 1024 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
d048c717 1025 spin_unlock_irq(&lp->tx_cr_lock);
9e2bc267
RH
1026 }
1027 return packets;
8a3b7a25
DB
1028}
1029
1030/**
1031 * axienet_start_xmit - Starts the transmission.
1032 * @skb: sk_buff pointer that contains data to be Txed.
1033 * @ndev: Pointer to net_device structure.
1034 *
b0d081c5 1035 * Return: NETDEV_TX_OK, on success
8a3b7a25
DB
1036 * NETDEV_TX_BUSY, if any of the descriptors are not free
1037 *
1038 * This function is invoked from upper layers to initiate transmission. The
1039 * function uses the next available free BDs and populates their fields to
1040 * start the transmission. Additionally if checksum offloading is supported,
1041 * it populates AXI Stream Control fields with appropriate values.
1042 */
81255af8
Y
1043static netdev_tx_t
1044axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
8a3b7a25
DB
1045{
1046 u32 ii;
1047 u32 num_frag;
1048 u32 csum_start_off;
1049 u32 csum_index_off;
1050 skb_frag_t *frag;
4e958f33 1051 dma_addr_t tail_p, phys;
f0cf4000 1052 u32 orig_tail_ptr, new_tail_ptr;
8a3b7a25
DB
1053 struct axienet_local *lp = netdev_priv(ndev);
1054 struct axidma_bd *cur_p;
f0cf4000
RH
1055
1056 orig_tail_ptr = lp->tx_bd_tail;
1057 new_tail_ptr = orig_tail_ptr;
8a3b7a25
DB
1058
1059 num_frag = skb_shinfo(skb)->nr_frags;
f0cf4000 1060 cur_p = &lp->tx_bd_v[orig_tail_ptr];
8a3b7a25 1061
aba57a82 1062 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
bb193e3d
RH
1063 /* Should not happen as last start_xmit call should have
1064 * checked for sufficient space and queue should only be
1065 * woken when sufficient space is available.
1066 */
7de44285 1067 netif_stop_queue(ndev);
bb193e3d
RH
1068 if (net_ratelimit())
1069 netdev_warn(ndev, "TX ring unexpectedly full\n");
1070 return NETDEV_TX_BUSY;
8a3b7a25
DB
1071 }
1072
1073 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1074 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1075 /* Tx Full Checksum Offload Enabled */
1076 cur_p->app0 |= 2;
fd0413bb 1077 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
8a3b7a25
DB
1078 csum_start_off = skb_transport_offset(skb);
1079 csum_index_off = csum_start_off + skb->csum_offset;
1080 /* Tx Partial Checksum Offload Enabled */
1081 cur_p->app0 |= 1;
1082 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1083 }
1084 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1085 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1086 }
1087
17882fd4 1088 phys = dma_map_single(lp->dev, skb->data,
4e958f33 1089 skb_headlen(skb), DMA_TO_DEVICE);
17882fd4 1090 if (unlikely(dma_mapping_error(lp->dev, phys))) {
71791dc8
AP
1091 if (net_ratelimit())
1092 netdev_err(ndev, "TX DMA mapping error\n");
1093 ndev->stats.tx_dropped++;
99714e37 1094 dev_kfree_skb_any(skb);
71791dc8
AP
1095 return NETDEV_TX_OK;
1096 }
4e958f33 1097 desc_set_phys_addr(lp, phys, cur_p);
71791dc8 1098 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
8a3b7a25
DB
1099
1100 for (ii = 0; ii < num_frag; ii++) {
f0cf4000
RH
1101 if (++new_tail_ptr >= lp->tx_bd_num)
1102 new_tail_ptr = 0;
1103 cur_p = &lp->tx_bd_v[new_tail_ptr];
8a3b7a25 1104 frag = &skb_shinfo(skb)->frags[ii];
17882fd4 1105 phys = dma_map_single(lp->dev,
4e958f33
AP
1106 skb_frag_address(frag),
1107 skb_frag_size(frag),
1108 DMA_TO_DEVICE);
17882fd4 1109 if (unlikely(dma_mapping_error(lp->dev, phys))) {
71791dc8
AP
1110 if (net_ratelimit())
1111 netdev_err(ndev, "TX DMA mapping error\n");
1112 ndev->stats.tx_dropped++;
9e2bc267
RH
1113 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1114 true, NULL, 0);
99714e37 1115 dev_kfree_skb_any(skb);
71791dc8
AP
1116 return NETDEV_TX_OK;
1117 }
4e958f33 1118 desc_set_phys_addr(lp, phys, cur_p);
8a3b7a25
DB
1119 cur_p->cntrl = skb_frag_size(frag);
1120 }
1121
1122 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
23e6b2dc 1123 cur_p->skb = skb;
8a3b7a25 1124
f0cf4000
RH
1125 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1126 if (++new_tail_ptr >= lp->tx_bd_num)
1127 new_tail_ptr = 0;
1128 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
c900e49d 1129 netdev_sent_queue(ndev, skb->len);
f0cf4000 1130
8a3b7a25 1131 /* Start the transfer */
6a00d0dd 1132 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
8a3b7a25 1133
bb193e3d
RH
1134 /* Stop queue if next transmit may not have space */
1135 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1136 netif_stop_queue(ndev);
1137
9e2bc267 1138 /* Matches barrier in axienet_tx_poll */
bb193e3d
RH
1139 smp_mb();
1140
1141 /* Space might have just been freed - check again */
1142 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1143 netif_wake_queue(ndev);
1144 }
1145
8a3b7a25
DB
1146 return NETDEV_TX_OK;
1147}
1148
6a91b846
RSP
1149/**
1150 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1151 * @data: Pointer to the skbuf_dma_descriptor structure.
1152 * @result: error reporting through dmaengine_result.
1153 * This function is called by dmaengine driver for RX channel to notify
1154 * that the packet is received.
1155 */
1156static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1157{
1158 struct skbuf_dma_descriptor *skbuf_dma;
1159 size_t meta_len, meta_max_len, rx_len;
1160 struct axienet_local *lp = data;
1161 struct sk_buff *skb;
1162 u32 *app_metadata;
1163
1164 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1165 skb = skbuf_dma->skb;
1166 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1167 &meta_max_len);
1168 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1169 DMA_FROM_DEVICE);
1170 /* TODO: Derive app word index programmatically */
1171 rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1172 skb_put(skb, rx_len);
1173 skb->protocol = eth_type_trans(skb, lp->ndev);
1174 skb->ip_summed = CHECKSUM_NONE;
1175
1176 __netif_rx(skb);
1177 u64_stats_update_begin(&lp->rx_stat_sync);
1178 u64_stats_add(&lp->rx_packets, 1);
1179 u64_stats_add(&lp->rx_bytes, rx_len);
1180 u64_stats_update_end(&lp->rx_stat_sync);
1181 axienet_rx_submit_desc(lp->ndev);
1182 dma_async_issue_pending(lp->rx_chan);
1183}
1184
8a3b7a25 1185/**
9e2bc267 1186 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
cc37610c 1187 * @napi: Pointer to NAPI structure.
9e2bc267 1188 * @budget: Max number of RX packets to process.
8a3b7a25 1189 *
cc37610c 1190 * Return: Number of RX packets processed.
8a3b7a25 1191 */
9e2bc267 1192static int axienet_rx_poll(struct napi_struct *napi, int budget)
8a3b7a25
DB
1193{
1194 u32 length;
1195 u32 csumstatus;
1196 u32 size = 0;
cc37610c 1197 int packets = 0;
38e96b35 1198 dma_addr_t tail_p = 0;
8a3b7a25 1199 struct axidma_bd *cur_p;
cc37610c 1200 struct sk_buff *skb, *new_skb;
9e2bc267 1201 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
8a3b7a25 1202
8a3b7a25
DB
1203 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1204
cc37610c 1205 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
4e958f33
AP
1206 dma_addr_t phys;
1207
95978df6
RH
1208 /* Ensure we see complete descriptor update */
1209 dma_rmb();
8a3b7a25 1210
23e6b2dc
RH
1211 skb = cur_p->skb;
1212 cur_p->skb = NULL;
7a7d340b
RH
1213
1214 /* skb could be NULL if a previous pass already received the
1215 * packet for this slot in the ring, but failed to refill it
1216 * with a newly allocated buffer. In this case, don't try to
1217 * receive it again.
1218 */
1219 if (likely(skb)) {
1220 length = cur_p->app4 & 0x0000FFFF;
1221
1222 phys = desc_get_phys_addr(lp, cur_p);
17882fd4 1223 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
7a7d340b
RH
1224 DMA_FROM_DEVICE);
1225
1226 skb_put(skb, length);
cc37610c 1227 skb->protocol = eth_type_trans(skb, lp->ndev);
7a7d340b
RH
1228 /*skb_checksum_none_assert(skb);*/
1229 skb->ip_summed = CHECKSUM_NONE;
1230
1231 /* if we're doing Rx csum offload, set it up */
1232 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1233 csumstatus = (cur_p->app2 &
1234 XAE_FULL_CSUM_STATUS_MASK) >> 3;
1235 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1236 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1237 skb->ip_summed = CHECKSUM_UNNECESSARY;
1238 }
736f0c7a 1239 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
7a7d340b
RH
1240 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1241 skb->ip_summed = CHECKSUM_COMPLETE;
8a3b7a25 1242 }
8a3b7a25 1243
cc37610c 1244 napi_gro_receive(napi, skb);
8a3b7a25 1245
7a7d340b
RH
1246 size += length;
1247 packets++;
1248 }
8a3b7a25 1249
6c7e7da2 1250 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
720a43ef 1251 if (!new_skb)
7a7d340b 1252 break;
720a43ef 1253
17882fd4 1254 phys = dma_map_single(lp->dev, new_skb->data,
4e958f33
AP
1255 lp->max_frm_size,
1256 DMA_FROM_DEVICE);
17882fd4 1257 if (unlikely(dma_mapping_error(lp->dev, phys))) {
71791dc8 1258 if (net_ratelimit())
cc37610c 1259 netdev_err(lp->ndev, "RX DMA mapping error\n");
71791dc8 1260 dev_kfree_skb(new_skb);
7a7d340b 1261 break;
71791dc8 1262 }
4e958f33 1263 desc_set_phys_addr(lp, phys, cur_p);
71791dc8 1264
8a3b7a25
DB
1265 cur_p->cntrl = lp->max_frm_size;
1266 cur_p->status = 0;
23e6b2dc 1267 cur_p->skb = new_skb;
8a3b7a25 1268
7a7d340b
RH
1269 /* Only update tail_p to mark this slot as usable after it has
1270 * been successfully refilled.
1271 */
1272 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1273
8b09ca82
RH
1274 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1275 lp->rx_bd_ci = 0;
8a3b7a25
DB
1276 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1277 }
1278
cb45a8bf
RH
1279 u64_stats_update_begin(&lp->rx_stat_sync);
1280 u64_stats_add(&lp->rx_packets, packets);
1281 u64_stats_add(&lp->rx_bytes, size);
1282 u64_stats_update_end(&lp->rx_stat_sync);
8a3b7a25 1283
38e96b35 1284 if (tail_p)
6a00d0dd 1285 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
cc37610c
RH
1286
1287 if (packets < budget && napi_complete_done(napi, packets)) {
e1d27d29
SA
1288 if (READ_ONCE(lp->rx_dim_enabled)) {
1289 struct dim_sample sample = {
1290 .time = ktime_get(),
1291 /* Safe because we are the only writer */
1292 .pkt_ctr = u64_stats_read(&lp->rx_packets),
1293 .byte_ctr = u64_stats_read(&lp->rx_bytes),
1294 .event_ctr = READ_ONCE(lp->rx_irqs),
1295 };
1296
1297 net_dim(&lp->rx_dim, &sample);
1298 }
1299
cc37610c
RH
1300 /* Re-enable RX completion interrupts. This should
1301 * cause an immediate interrupt if any RX packets are
1302 * already pending.
1303 */
d048c717 1304 spin_lock_irq(&lp->rx_cr_lock);
cc37610c 1305 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
d048c717 1306 spin_unlock_irq(&lp->rx_cr_lock);
cc37610c
RH
1307 }
1308 return packets;
8a3b7a25
DB
1309}
1310
1311/**
1312 * axienet_tx_irq - Tx Done Isr.
1313 * @irq: irq number
1314 * @_ndev: net_device pointer
1315 *
9cbc1b68 1316 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
8a3b7a25 1317 *
9e2bc267
RH
1318 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1319 * TX BD processing.
8a3b7a25
DB
1320 */
1321static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1322{
8a3b7a25
DB
1323 unsigned int status;
1324 struct net_device *ndev = _ndev;
1325 struct axienet_local *lp = netdev_priv(ndev);
1326
1327 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
84b9ccc0 1328
8a3b7a25 1329 if (!(status & XAXIDMA_IRQ_ALL_MASK))
9cbc1b68 1330 return IRQ_NONE;
8a3b7a25 1331
84b9ccc0
RH
1332 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1333
1334 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1335 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1336 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1337 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1338 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
24201a64 1339 schedule_work(&lp->dma_err_task);
84b9ccc0 1340 } else {
9e2bc267
RH
1341 /* Disable further TX completion interrupts and schedule
1342 * NAPI to handle the completions.
1343 */
ba0da2dc 1344 if (napi_schedule_prep(&lp->napi_tx)) {
d048c717
SA
1345 u32 cr;
1346
1347 spin_lock(&lp->tx_cr_lock);
1348 cr = lp->tx_dma_cr;
1349 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
ba0da2dc 1350 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
d048c717 1351 spin_unlock(&lp->tx_cr_lock);
ba0da2dc
SA
1352 __napi_schedule(&lp->napi_tx);
1353 }
8a3b7a25 1354 }
84b9ccc0 1355
8a3b7a25
DB
1356 return IRQ_HANDLED;
1357}
1358
1359/**
1360 * axienet_rx_irq - Rx Isr.
1361 * @irq: irq number
1362 * @_ndev: net_device pointer
1363 *
9cbc1b68 1364 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
8a3b7a25 1365 *
cc37610c 1366 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
8a3b7a25
DB
1367 * processing.
1368 */
1369static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1370{
8a3b7a25
DB
1371 unsigned int status;
1372 struct net_device *ndev = _ndev;
1373 struct axienet_local *lp = netdev_priv(ndev);
1374
1375 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
84b9ccc0 1376
8a3b7a25 1377 if (!(status & XAXIDMA_IRQ_ALL_MASK))
9cbc1b68 1378 return IRQ_NONE;
8a3b7a25 1379
84b9ccc0
RH
1380 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1381
1382 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1383 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1384 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1385 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1386 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
24201a64 1387 schedule_work(&lp->dma_err_task);
84b9ccc0 1388 } else {
cc37610c
RH
1389 /* Disable further RX completion interrupts and schedule
1390 * NAPI receive.
1391 */
e1d27d29 1392 WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
ba0da2dc 1393 if (napi_schedule_prep(&lp->napi_rx)) {
d048c717
SA
1394 u32 cr;
1395
1396 spin_lock(&lp->rx_cr_lock);
1397 cr = lp->rx_dma_cr;
1398 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
ba0da2dc 1399 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
d048c717
SA
1400 spin_unlock(&lp->rx_cr_lock);
1401
ba0da2dc
SA
1402 __napi_schedule(&lp->napi_rx);
1403 }
8a3b7a25 1404 }
84b9ccc0 1405
8a3b7a25
DB
1406 return IRQ_HANDLED;
1407}
1408
522856ce
RH
1409/**
1410 * axienet_eth_irq - Ethernet core Isr.
1411 * @irq: irq number
1412 * @_ndev: net_device pointer
1413 *
1414 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1415 *
1416 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1417 */
1418static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1419{
1420 struct net_device *ndev = _ndev;
1421 struct axienet_local *lp = netdev_priv(ndev);
1422 unsigned int pending;
1423
1424 pending = axienet_ior(lp, XAE_IP_OFFSET);
1425 if (!pending)
1426 return IRQ_NONE;
1427
1428 if (pending & XAE_INT_RXFIFOOVR_MASK)
1429 ndev->stats.rx_missed_errors++;
1430
1431 if (pending & XAE_INT_RXRJECT_MASK)
d70e3788 1432 ndev->stats.rx_dropped++;
522856ce
RH
1433
1434 axienet_iow(lp, XAE_IS_OFFSET, pending);
1435 return IRQ_HANDLED;
1436}
1437
24201a64 1438static void axienet_dma_err_handler(struct work_struct *work);
aecb55be 1439
6a91b846
RSP
1440/**
1441 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1442 * allocate skbuff, map the scatterlist and obtain a descriptor
1443 * and then add the callback information and submit descriptor.
1444 *
1445 * @ndev: net_device pointer
1446 *
1447 */
1448static void axienet_rx_submit_desc(struct net_device *ndev)
1449{
1450 struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1451 struct axienet_local *lp = netdev_priv(ndev);
1452 struct skbuf_dma_descriptor *skbuf_dma;
1453 struct sk_buff *skb;
1454 dma_addr_t addr;
1455
1456 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1457 if (!skbuf_dma)
1458 return;
1459
1460 lp->rx_ring_head++;
1461 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1462 if (!skb)
1463 return;
1464
1465 sg_init_table(skbuf_dma->sgl, 1);
1466 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1467 if (unlikely(dma_mapping_error(lp->dev, addr))) {
1468 if (net_ratelimit())
1469 netdev_err(ndev, "DMA mapping error\n");
1470 goto rx_submit_err_free_skb;
1471 }
1472 sg_dma_address(skbuf_dma->sgl) = addr;
1473 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1474 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1475 1, DMA_DEV_TO_MEM,
1476 DMA_PREP_INTERRUPT);
1477 if (!dma_rx_desc)
1478 goto rx_submit_err_unmap_skb;
1479
1480 skbuf_dma->skb = skb;
1481 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1482 skbuf_dma->desc = dma_rx_desc;
1483 dma_rx_desc->callback_param = lp;
1484 dma_rx_desc->callback_result = axienet_dma_rx_cb;
1485 dmaengine_submit(dma_rx_desc);
1486
1487 return;
1488
1489rx_submit_err_unmap_skb:
1490 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1491rx_submit_err_free_skb:
1492 dev_kfree_skb(skb);
1493}
1494
1495/**
1496 * axienet_init_dmaengine - init the dmaengine code.
1497 * @ndev: Pointer to net_device structure
1498 *
1499 * Return: 0, on success.
1500 * non-zero error value on failure
1501 *
1502 * This is the dmaengine initialization code.
1503 */
1504static int axienet_init_dmaengine(struct net_device *ndev)
1505{
1506 struct axienet_local *lp = netdev_priv(ndev);
1507 struct skbuf_dma_descriptor *skbuf_dma;
1508 int i, ret;
1509
1510 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1511 if (IS_ERR(lp->tx_chan)) {
1512 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1513 return PTR_ERR(lp->tx_chan);
1514 }
1515
1516 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1517 if (IS_ERR(lp->rx_chan)) {
1518 ret = PTR_ERR(lp->rx_chan);
1519 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1520 goto err_dma_release_tx;
1521 }
1522
1523 lp->tx_ring_tail = 0;
1524 lp->tx_ring_head = 0;
1525 lp->rx_ring_tail = 0;
1526 lp->rx_ring_head = 0;
1527 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1528 GFP_KERNEL);
1529 if (!lp->tx_skb_ring) {
1530 ret = -ENOMEM;
1531 goto err_dma_release_rx;
1532 }
1533 for (i = 0; i < TX_BD_NUM_MAX; i++) {
1534 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1535 if (!skbuf_dma) {
1536 ret = -ENOMEM;
1537 goto err_free_tx_skb_ring;
1538 }
1539 lp->tx_skb_ring[i] = skbuf_dma;
1540 }
1541
1542 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1543 GFP_KERNEL);
1544 if (!lp->rx_skb_ring) {
1545 ret = -ENOMEM;
1546 goto err_free_tx_skb_ring;
1547 }
1548 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1549 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1550 if (!skbuf_dma) {
1551 ret = -ENOMEM;
1552 goto err_free_rx_skb_ring;
1553 }
1554 lp->rx_skb_ring[i] = skbuf_dma;
1555 }
1556 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1557 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1558 axienet_rx_submit_desc(ndev);
1559 dma_async_issue_pending(lp->rx_chan);
1560
1561 return 0;
1562
1563err_free_rx_skb_ring:
1564 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1565 kfree(lp->rx_skb_ring[i]);
1566 kfree(lp->rx_skb_ring);
1567err_free_tx_skb_ring:
1568 for (i = 0; i < TX_BD_NUM_MAX; i++)
1569 kfree(lp->tx_skb_ring[i]);
1570 kfree(lp->tx_skb_ring);
1571err_dma_release_rx:
1572 dma_release_channel(lp->rx_chan);
1573err_dma_release_tx:
1574 dma_release_channel(lp->tx_chan);
1575 return ret;
1576}
1577
8a3b7a25 1578/**
6b1b40f7
SBNG
1579 * axienet_init_legacy_dma - init the dma legacy code.
1580 * @ndev: Pointer to net_device structure
8a3b7a25 1581 *
b0d081c5 1582 * Return: 0, on success.
6b1b40f7
SBNG
1583 * non-zero error value on failure
1584 *
1585 * This is the dma initialization code. It also allocates interrupt
1586 * service routines, enables the interrupt lines and ISR handling.
8a3b7a25 1587 *
8a3b7a25 1588 */
6b1b40f7 1589static int axienet_init_legacy_dma(struct net_device *ndev)
8a3b7a25 1590{
7789e9ed 1591 int ret;
8a3b7a25
DB
1592 struct axienet_local *lp = netdev_priv(ndev);
1593
24201a64 1594 /* Enable worker thread for Axi DMA error handling */
858430db 1595 lp->stopping = false;
24201a64 1596 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
71c6c837 1597
9e2bc267
RH
1598 napi_enable(&lp->napi_rx);
1599 napi_enable(&lp->napi_tx);
cc37610c 1600
8a3b7a25 1601 /* Enable interrupts for Axi DMA Tx */
9cbc1b68
RH
1602 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1603 ndev->name, ndev);
8a3b7a25
DB
1604 if (ret)
1605 goto err_tx_irq;
1606 /* Enable interrupts for Axi DMA Rx */
9cbc1b68
RH
1607 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1608 ndev->name, ndev);
8a3b7a25
DB
1609 if (ret)
1610 goto err_rx_irq;
522856ce
RH
1611 /* Enable interrupts for Axi Ethernet core (if defined) */
1612 if (lp->eth_irq > 0) {
1613 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1614 ndev->name, ndev);
1615 if (ret)
1616 goto err_eth_irq;
1617 }
71c6c837 1618
8a3b7a25
DB
1619 return 0;
1620
522856ce
RH
1621err_eth_irq:
1622 free_irq(lp->rx_irq, ndev);
8a3b7a25
DB
1623err_rx_irq:
1624 free_irq(lp->tx_irq, ndev);
1625err_tx_irq:
9e2bc267
RH
1626 napi_disable(&lp->napi_tx);
1627 napi_disable(&lp->napi_rx);
24201a64 1628 cancel_work_sync(&lp->dma_err_task);
8a3b7a25
DB
1629 dev_err(lp->dev, "request_irq() failed\n");
1630 return ret;
1631}
1632
6b1b40f7
SBNG
1633/**
1634 * axienet_open - Driver open routine.
1635 * @ndev: Pointer to net_device structure
1636 *
1637 * Return: 0, on success.
1638 * non-zero error value on failure
1639 *
1640 * This is the driver open routine. It calls phylink_start to start the
1641 * PHY device.
1642 * It also allocates interrupt service routines, enables the interrupt lines
1643 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1644 * descriptors are initialized.
1645 */
1646static int axienet_open(struct net_device *ndev)
1647{
1648 int ret;
1649 struct axienet_local *lp = netdev_priv(ndev);
1650
6b1b40f7
SBNG
1651 /* When we do an Axi Ethernet reset, it resets the complete core
1652 * including the MDIO. MDIO must be disabled before resetting.
1653 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1654 */
1655 axienet_lock_mii(lp);
1656 ret = axienet_device_reset(ndev);
1657 axienet_unlock_mii(lp);
1658
1659 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1660 if (ret) {
1661 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1662 return ret;
1663 }
1664
1665 phylink_start(lp->phylink);
1666
76abb5d6
SA
1667 /* Start the statistics refresh work */
1668 schedule_delayed_work(&lp->stats_work, 0);
1669
6a91b846
RSP
1670 if (lp->use_dmaengine) {
1671 /* Enable interrupts for Axi Ethernet core (if defined) */
1672 if (lp->eth_irq > 0) {
1673 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1674 ndev->name, ndev);
1675 if (ret)
1676 goto err_phy;
1677 }
1678
1679 ret = axienet_init_dmaengine(ndev);
1680 if (ret < 0)
1681 goto err_free_eth_irq;
1682 } else {
6b1b40f7
SBNG
1683 ret = axienet_init_legacy_dma(ndev);
1684 if (ret)
1685 goto err_phy;
1686 }
1687
1688 return 0;
1689
6a91b846
RSP
1690err_free_eth_irq:
1691 if (lp->eth_irq > 0)
1692 free_irq(lp->eth_irq, ndev);
6b1b40f7 1693err_phy:
e1d27d29 1694 cancel_work_sync(&lp->rx_dim.work);
76abb5d6 1695 cancel_delayed_work_sync(&lp->stats_work);
6b1b40f7
SBNG
1696 phylink_stop(lp->phylink);
1697 phylink_disconnect_phy(lp->phylink);
1698 return ret;
1699}
1700
8a3b7a25
DB
1701/**
1702 * axienet_stop - Driver stop routine.
1703 * @ndev: Pointer to net_device structure
1704 *
b0d081c5 1705 * Return: 0, on success.
8a3b7a25 1706 *
f5203a3d 1707 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
8a3b7a25
DB
1708 * device. It also removes the interrupt handlers and disables the interrupts.
1709 * The Axi DMA Tx/Rx BDs are released.
1710 */
1711static int axienet_stop(struct net_device *ndev)
1712{
8a3b7a25 1713 struct axienet_local *lp = netdev_priv(ndev);
6a91b846 1714 int i;
8a3b7a25 1715
6b1b40f7 1716 if (!lp->use_dmaengine) {
858430db
SA
1717 WRITE_ONCE(lp->stopping, true);
1718 flush_work(&lp->dma_err_task);
1719
6b1b40f7
SBNG
1720 napi_disable(&lp->napi_tx);
1721 napi_disable(&lp->napi_rx);
1722 }
cc37610c 1723
e1d27d29 1724 cancel_work_sync(&lp->rx_dim.work);
76abb5d6
SA
1725 cancel_delayed_work_sync(&lp->stats_work);
1726
f5203a3d
RH
1727 phylink_stop(lp->phylink);
1728 phylink_disconnect_phy(lp->phylink);
1729
8a3b7a25
DB
1730 axienet_setoptions(ndev, lp->options &
1731 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1732
6b1b40f7
SBNG
1733 if (!lp->use_dmaengine) {
1734 axienet_dma_stop(lp);
1735 cancel_work_sync(&lp->dma_err_task);
1736 free_irq(lp->tx_irq, ndev);
1737 free_irq(lp->rx_irq, ndev);
1738 axienet_dma_bd_release(ndev);
6a91b846
RSP
1739 } else {
1740 dmaengine_terminate_sync(lp->tx_chan);
1741 dmaengine_synchronize(lp->tx_chan);
1742 dmaengine_terminate_sync(lp->rx_chan);
1743 dmaengine_synchronize(lp->rx_chan);
1744
1745 for (i = 0; i < TX_BD_NUM_MAX; i++)
1746 kfree(lp->tx_skb_ring[i]);
1747 kfree(lp->tx_skb_ring);
1748 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1749 kfree(lp->rx_skb_ring[i]);
1750 kfree(lp->rx_skb_ring);
1751
1752 dma_release_channel(lp->rx_chan);
1753 dma_release_channel(lp->tx_chan);
6b1b40f7 1754 }
489d4d77 1755
c900e49d 1756 netdev_reset_queue(ndev);
489d4d77
RH
1757 axienet_iow(lp, XAE_IE_OFFSET, 0);
1758
522856ce
RH
1759 if (lp->eth_irq > 0)
1760 free_irq(lp->eth_irq, ndev);
8a3b7a25
DB
1761 return 0;
1762}
1763
1764/**
1765 * axienet_change_mtu - Driver change mtu routine.
1766 * @ndev: Pointer to net_device structure
1767 * @new_mtu: New mtu value to be applied
1768 *
b0d081c5 1769 * Return: Always returns 0 (success).
8a3b7a25
DB
1770 *
1771 * This is the change mtu driver routine. It checks if the Axi Ethernet
1772 * hardware supports jumbo frames before changing the mtu. This can be
1773 * called only when the device is not up.
1774 */
1775static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1776{
1777 struct axienet_local *lp = netdev_priv(ndev);
1778
1779 if (netif_running(ndev))
1780 return -EBUSY;
f080a8c3
ST
1781
1782 if ((new_mtu + VLAN_ETH_HLEN +
1783 XAE_TRL_SIZE) > lp->rxmem)
1784 return -EINVAL;
1785
1eb2cded 1786 WRITE_ONCE(ndev->mtu, new_mtu);
8a3b7a25
DB
1787
1788 return 0;
1789}
1790
1791#ifdef CONFIG_NET_POLL_CONTROLLER
1792/**
1793 * axienet_poll_controller - Axi Ethernet poll mechanism.
1794 * @ndev: Pointer to net_device structure
1795 *
1796 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1797 * to polling the ISRs and are enabled back after the polling is done.
1798 */
1799static void axienet_poll_controller(struct net_device *ndev)
1800{
1801 struct axienet_local *lp = netdev_priv(ndev);
f7061a3e 1802
8a3b7a25
DB
1803 disable_irq(lp->tx_irq);
1804 disable_irq(lp->rx_irq);
1805 axienet_rx_irq(lp->tx_irq, ndev);
1806 axienet_tx_irq(lp->rx_irq, ndev);
1807 enable_irq(lp->tx_irq);
1808 enable_irq(lp->rx_irq);
1809}
1810#endif
1811
2a9b65ea
AP
1812static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1813{
1814 struct axienet_local *lp = netdev_priv(dev);
1815
1816 if (!netif_running(dev))
1817 return -EINVAL;
1818
1819 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1820}
1821
cb45a8bf
RH
1822static void
1823axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1824{
1825 struct axienet_local *lp = netdev_priv(dev);
1826 unsigned int start;
1827
1828 netdev_stats_to_stats64(stats, &dev->stats);
1829
1830 do {
068c38ad 1831 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
cb45a8bf
RH
1832 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1833 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
068c38ad 1834 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
cb45a8bf
RH
1835
1836 do {
068c38ad 1837 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
cb45a8bf
RH
1838 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1839 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
068c38ad 1840 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
76abb5d6
SA
1841
1842 if (!(lp->features & XAE_FEATURE_STATS))
1843 return;
1844
1845 do {
1846 start = read_seqcount_begin(&lp->hw_stats_seqcount);
1847 stats->rx_length_errors =
1848 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1849 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1850 stats->rx_frame_errors =
1851 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1852 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1853 axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1854 stats->rx_length_errors +
1855 stats->rx_crc_errors +
1856 stats->rx_frame_errors;
1857 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1858
1859 stats->tx_aborted_errors =
1860 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1861 stats->tx_fifo_errors =
1862 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1863 stats->tx_window_errors =
1864 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1865 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1866 stats->tx_aborted_errors +
1867 stats->tx_fifo_errors +
1868 stats->tx_window_errors;
1869 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
cb45a8bf
RH
1870}
1871
8a3b7a25
DB
1872static const struct net_device_ops axienet_netdev_ops = {
1873 .ndo_open = axienet_open,
1874 .ndo_stop = axienet_stop,
1875 .ndo_start_xmit = axienet_start_xmit,
cb45a8bf 1876 .ndo_get_stats64 = axienet_get_stats64,
8a3b7a25
DB
1877 .ndo_change_mtu = axienet_change_mtu,
1878 .ndo_set_mac_address = netdev_set_mac_address,
1879 .ndo_validate_addr = eth_validate_addr,
a7605370 1880 .ndo_eth_ioctl = axienet_ioctl,
8a3b7a25
DB
1881 .ndo_set_rx_mode = axienet_set_multicast_list,
1882#ifdef CONFIG_NET_POLL_CONTROLLER
1883 .ndo_poll_controller = axienet_poll_controller,
1884#endif
1885};
1886
6a91b846
RSP
1887static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1888 .ndo_open = axienet_open,
1889 .ndo_stop = axienet_stop,
1890 .ndo_start_xmit = axienet_start_xmit_dmaengine,
1891 .ndo_get_stats64 = axienet_get_stats64,
1892 .ndo_change_mtu = axienet_change_mtu,
1893 .ndo_set_mac_address = netdev_set_mac_address,
1894 .ndo_validate_addr = eth_validate_addr,
1895 .ndo_eth_ioctl = axienet_ioctl,
1896 .ndo_set_rx_mode = axienet_set_multicast_list,
1897};
1898
8a3b7a25
DB
1899/**
1900 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1901 * @ndev: Pointer to net_device structure
1902 * @ed: Pointer to ethtool_drvinfo structure
1903 *
1904 * This implements ethtool command for getting the driver information.
1905 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1906 */
1907static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1908 struct ethtool_drvinfo *ed)
1909{
f029c781
WS
1910 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1911 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
8a3b7a25
DB
1912}
1913
1914/**
1915 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1916 * AxiEthernet core.
1917 * @ndev: Pointer to net_device structure
1918 *
1919 * This implements ethtool command for getting the total register length
1920 * information.
b0d081c5
MS
1921 *
1922 * Return: the total regs length
8a3b7a25
DB
1923 */
1924static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1925{
1926 return sizeof(u32) * AXIENET_REGS_N;
1927}
1928
1929/**
1930 * axienet_ethtools_get_regs - Dump the contents of all registers present
1931 * in AxiEthernet core.
1932 * @ndev: Pointer to net_device structure
1933 * @regs: Pointer to ethtool_regs structure
1934 * @ret: Void pointer used to return the contents of the registers.
1935 *
1936 * This implements ethtool command for getting the Axi Ethernet register dump.
1937 * Issue "ethtool -d ethX" to execute this function.
1938 */
1939static void axienet_ethtools_get_regs(struct net_device *ndev,
1940 struct ethtool_regs *regs, void *ret)
1941{
7fe85bb3 1942 u32 *data = (u32 *)ret;
8a3b7a25
DB
1943 size_t len = sizeof(u32) * AXIENET_REGS_N;
1944 struct axienet_local *lp = netdev_priv(ndev);
1945
1946 regs->version = 0;
1947 regs->len = len;
1948
1949 memset(data, 0, len);
1950 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1951 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1952 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1953 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1954 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1955 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1956 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1957 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1958 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1959 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1960 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1961 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1962 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1963 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1964 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1965 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1966 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1967 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1968 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1969 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1970 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1971 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1972 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
8a3b7a25
DB
1973 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1974 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1975 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1976 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1977 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
6b1b40f7
SBNG
1978 if (!lp->use_dmaengine) {
1979 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1980 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1981 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1982 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1983 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1984 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1985 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1986 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1987 }
8a3b7a25
DB
1988}
1989
74624944
HC
1990static void
1991axienet_ethtools_get_ringparam(struct net_device *ndev,
1992 struct ethtool_ringparam *ering,
1993 struct kernel_ethtool_ringparam *kernel_ering,
1994 struct netlink_ext_ack *extack)
8b09ca82
RH
1995{
1996 struct axienet_local *lp = netdev_priv(ndev);
1997
1998 ering->rx_max_pending = RX_BD_NUM_MAX;
1999 ering->rx_mini_max_pending = 0;
2000 ering->rx_jumbo_max_pending = 0;
2001 ering->tx_max_pending = TX_BD_NUM_MAX;
2002 ering->rx_pending = lp->rx_bd_num;
2003 ering->rx_mini_pending = 0;
2004 ering->rx_jumbo_pending = 0;
2005 ering->tx_pending = lp->tx_bd_num;
2006}
2007
74624944
HC
2008static int
2009axienet_ethtools_set_ringparam(struct net_device *ndev,
2010 struct ethtool_ringparam *ering,
2011 struct kernel_ethtool_ringparam *kernel_ering,
2012 struct netlink_ext_ack *extack)
8b09ca82
RH
2013{
2014 struct axienet_local *lp = netdev_priv(ndev);
2015
2016 if (ering->rx_pending > RX_BD_NUM_MAX ||
2017 ering->rx_mini_pending ||
2018 ering->rx_jumbo_pending ||
70f5817d
RH
2019 ering->tx_pending < TX_BD_NUM_MIN ||
2020 ering->tx_pending > TX_BD_NUM_MAX)
8b09ca82
RH
2021 return -EINVAL;
2022
2023 if (netif_running(ndev))
2024 return -EBUSY;
2025
2026 lp->rx_bd_num = ering->rx_pending;
2027 lp->tx_bd_num = ering->tx_pending;
2028 return 0;
2029}
2030
8a3b7a25
DB
2031/**
2032 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
2033 * Tx and Rx paths.
2034 * @ndev: Pointer to net_device structure
2035 * @epauseparm: Pointer to ethtool_pauseparam structure.
2036 *
2037 * This implements ethtool command for getting axi ethernet pause frame
2038 * setting. Issue "ethtool -a ethX" to execute this function.
2039 */
2040static void
2041axienet_ethtools_get_pauseparam(struct net_device *ndev,
2042 struct ethtool_pauseparam *epauseparm)
2043{
8a3b7a25 2044 struct axienet_local *lp = netdev_priv(ndev);
f5203a3d
RH
2045
2046 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
8a3b7a25
DB
2047}
2048
2049/**
2050 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
2051 * settings.
2052 * @ndev: Pointer to net_device structure
b0d081c5 2053 * @epauseparm:Pointer to ethtool_pauseparam structure
8a3b7a25
DB
2054 *
2055 * This implements ethtool command for enabling flow control on Rx and Tx
2056 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2057 * function.
b0d081c5
MS
2058 *
2059 * Return: 0 on success, -EFAULT if device is running
8a3b7a25
DB
2060 */
2061static int
2062axienet_ethtools_set_pauseparam(struct net_device *ndev,
2063 struct ethtool_pauseparam *epauseparm)
2064{
8a3b7a25
DB
2065 struct axienet_local *lp = netdev_priv(ndev);
2066
f5203a3d 2067 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
8a3b7a25
DB
2068}
2069
d048c717
SA
2070/**
2071 * axienet_update_coalesce_rx() - Set RX CR
2072 * @lp: Device private data
2073 * @cr: Value to write to the RX CR
2074 * @mask: Bits to set from @cr
2075 */
2076static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
2077 u32 mask)
2078{
2079 spin_lock_irq(&lp->rx_cr_lock);
2080 lp->rx_dma_cr &= ~mask;
2081 lp->rx_dma_cr |= cr;
2082 /* If DMA isn't started, then the settings will be applied the next
2083 * time dma_start() is called.
2084 */
2085 if (lp->rx_dma_started) {
2086 u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
2087
2088 /* Don't enable IRQs if they are disabled by NAPI */
2089 if (reg & XAXIDMA_IRQ_ALL_MASK)
2090 cr = lp->rx_dma_cr;
2091 else
2092 cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2093 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
2094 }
2095 spin_unlock_irq(&lp->rx_cr_lock);
2096}
2097
e1d27d29
SA
2098/**
2099 * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
2100 * @lp: Device private data
2101 */
2102static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
2103{
2104 return min(1 << (lp->rx_dim.profile_ix << 1), 255);
2105}
2106
2107/**
2108 * axienet_rx_dim_work() - Adjust RX DIM settings
2109 * @work: The work struct
2110 */
2111static void axienet_rx_dim_work(struct work_struct *work)
2112{
2113 struct axienet_local *lp =
2114 container_of(work, struct axienet_local, rx_dim.work);
2115 u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
2116 u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
2117 XAXIDMA_IRQ_ERROR_MASK;
2118
2119 axienet_update_coalesce_rx(lp, cr, mask);
2120 lp->rx_dim.state = DIM_START_MEASURE;
2121}
2122
d048c717
SA
2123/**
2124 * axienet_update_coalesce_tx() - Set TX CR
2125 * @lp: Device private data
2126 * @cr: Value to write to the TX CR
2127 * @mask: Bits to set from @cr
2128 */
2129static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
2130 u32 mask)
2131{
2132 spin_lock_irq(&lp->tx_cr_lock);
2133 lp->tx_dma_cr &= ~mask;
2134 lp->tx_dma_cr |= cr;
2135 /* If DMA isn't started, then the settings will be applied the next
2136 * time dma_start() is called.
2137 */
2138 if (lp->tx_dma_started) {
2139 u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
2140
2141 /* Don't enable IRQs if they are disabled by NAPI */
2142 if (reg & XAXIDMA_IRQ_ALL_MASK)
2143 cr = lp->tx_dma_cr;
2144 else
2145 cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2146 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
2147 }
2148 spin_unlock_irq(&lp->tx_cr_lock);
2149}
2150
8a3b7a25
DB
2151/**
2152 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2153 * @ndev: Pointer to net_device structure
2154 * @ecoalesce: Pointer to ethtool_coalesce structure
f3ccfda1
YM
2155 * @kernel_coal: ethtool CQE mode setting structure
2156 * @extack: extack for reporting error messages
8a3b7a25
DB
2157 *
2158 * This implements ethtool command for getting the DMA interrupt coalescing
2159 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2160 * execute this function.
b0d081c5
MS
2161 *
2162 * Return: 0 always
8a3b7a25 2163 */
f3ccfda1
YM
2164static int
2165axienet_ethtools_get_coalesce(struct net_device *ndev,
2166 struct ethtool_coalesce *ecoalesce,
2167 struct kernel_ethtool_coalesce *kernel_coal,
2168 struct netlink_ext_ack *extack)
8a3b7a25 2169{
8a3b7a25 2170 struct axienet_local *lp = netdev_priv(ndev);
eb80520e 2171 u32 cr;
0b79b8dc 2172
e1d27d29
SA
2173 ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
2174
eb80520e
SA
2175 spin_lock_irq(&lp->rx_cr_lock);
2176 cr = lp->rx_dma_cr;
2177 spin_unlock_irq(&lp->rx_cr_lock);
2178 axienet_coalesce_params(lp, cr,
2179 &ecoalesce->rx_max_coalesced_frames,
2180 &ecoalesce->rx_coalesce_usecs);
2181
2182 spin_lock_irq(&lp->tx_cr_lock);
2183 cr = lp->tx_dma_cr;
2184 spin_unlock_irq(&lp->tx_cr_lock);
2185 axienet_coalesce_params(lp, cr,
2186 &ecoalesce->tx_max_coalesced_frames,
2187 &ecoalesce->tx_coalesce_usecs);
8a3b7a25
DB
2188 return 0;
2189}
2190
2191/**
2192 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2193 * @ndev: Pointer to net_device structure
2194 * @ecoalesce: Pointer to ethtool_coalesce structure
f3ccfda1
YM
2195 * @kernel_coal: ethtool CQE mode setting structure
2196 * @extack: extack for reporting error messages
8a3b7a25
DB
2197 *
2198 * This implements ethtool command for setting the DMA interrupt coalescing
2199 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2200 * prompt to execute this function.
b0d081c5
MS
2201 *
2202 * Return: 0, on success, Non-zero error value on failure.
8a3b7a25 2203 */
f3ccfda1
YM
2204static int
2205axienet_ethtools_set_coalesce(struct net_device *ndev,
2206 struct ethtool_coalesce *ecoalesce,
2207 struct kernel_ethtool_coalesce *kernel_coal,
2208 struct netlink_ext_ack *extack)
8a3b7a25
DB
2209{
2210 struct axienet_local *lp = netdev_priv(ndev);
e1d27d29
SA
2211 bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
2212 bool old_dim = lp->rx_dim_enabled;
2213 u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
8a3b7a25 2214
c17ff476
SA
2215 if (ecoalesce->rx_max_coalesced_frames > 255 ||
2216 ecoalesce->tx_max_coalesced_frames > 255) {
2217 NL_SET_ERR_MSG(extack, "frames must be less than 256");
2218 return -EINVAL;
2219 }
2220
9d301a53
SA
2221 if (!ecoalesce->rx_max_coalesced_frames ||
2222 !ecoalesce->tx_max_coalesced_frames) {
2223 NL_SET_ERR_MSG(extack, "frames must be non-zero");
2224 return -EINVAL;
2225 }
2226
e1d27d29 2227 if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
9d301a53
SA
2228 !ecoalesce->rx_coalesce_usecs) ||
2229 (ecoalesce->tx_max_coalesced_frames > 1 &&
2230 !ecoalesce->tx_coalesce_usecs)) {
2231 NL_SET_ERR_MSG(extack,
2232 "usecs must be non-zero when frames is greater than one");
2233 return -EINVAL;
2234 }
2235
e1d27d29
SA
2236 if (new_dim && !old_dim) {
2237 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
2238 ecoalesce->rx_coalesce_usecs);
2239 } else if (!new_dim) {
2240 if (old_dim) {
2241 WRITE_ONCE(lp->rx_dim_enabled, false);
2242 napi_synchronize(&lp->napi_rx);
2243 flush_work(&lp->rx_dim.work);
2244 }
2245
2246 cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
2247 ecoalesce->rx_coalesce_usecs);
2248 } else {
2249 /* Dummy value for count just to calculate timer */
2250 cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
2251 mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
2252 }
2253
2254 axienet_update_coalesce_rx(lp, cr, mask);
2255 if (new_dim && !old_dim)
2256 WRITE_ONCE(lp->rx_dim_enabled, true);
d048c717 2257
eb80520e
SA
2258 cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
2259 ecoalesce->tx_coalesce_usecs);
d048c717 2260 axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
8a3b7a25
DB
2261 return 0;
2262}
2263
f5203a3d
RH
2264static int
2265axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2266 struct ethtool_link_ksettings *cmd)
2267{
2268 struct axienet_local *lp = netdev_priv(ndev);
2269
2270 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2271}
2272
2273static int
2274axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2275 const struct ethtool_link_ksettings *cmd)
2276{
2277 struct axienet_local *lp = netdev_priv(ndev);
2278
2279 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2280}
2281
66b51663
RH
2282static int axienet_ethtools_nway_reset(struct net_device *dev)
2283{
2284 struct axienet_local *lp = netdev_priv(dev);
2285
2286 return phylink_ethtool_nway_reset(lp->phylink);
2287}
2288
76abb5d6
SA
2289static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2290 struct ethtool_stats *stats,
2291 u64 *data)
2292{
2293 struct axienet_local *lp = netdev_priv(dev);
2294 unsigned int start;
2295
2296 do {
2297 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2298 data[0] = axienet_stat(lp, STAT_RX_BYTES);
2299 data[1] = axienet_stat(lp, STAT_TX_BYTES);
2300 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2301 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2302 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2303 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2304 data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2305 data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2306 data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2307 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2308}
2309
2310static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2311 "Received bytes",
2312 "Transmitted bytes",
2313 "RX Good VLAN Tagged Frames",
2314 "TX Good VLAN Tagged Frames",
2315 "TX Good PFC Frames",
2316 "RX Good PFC Frames",
2317 "User Defined Counter 0",
2318 "User Defined Counter 1",
2319 "User Defined Counter 2",
2320};
2321
2322static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2323{
2324 switch (stringset) {
2325 case ETH_SS_STATS:
2326 memcpy(data, axienet_ethtool_stats_strings,
2327 sizeof(axienet_ethtool_stats_strings));
2328 break;
2329 }
2330}
2331
2332static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2333{
2334 struct axienet_local *lp = netdev_priv(dev);
2335
2336 switch (sset) {
2337 case ETH_SS_STATS:
2338 if (lp->features & XAE_FEATURE_STATS)
2339 return ARRAY_SIZE(axienet_ethtool_stats_strings);
2340 fallthrough;
2341 default:
2342 return -EOPNOTSUPP;
2343 }
2344}
2345
2346static void
2347axienet_ethtools_get_pause_stats(struct net_device *dev,
2348 struct ethtool_pause_stats *pause_stats)
2349{
2350 struct axienet_local *lp = netdev_priv(dev);
2351 unsigned int start;
2352
2353 if (!(lp->features & XAE_FEATURE_STATS))
2354 return;
2355
2356 do {
2357 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2358 pause_stats->tx_pause_frames =
2359 axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2360 pause_stats->rx_pause_frames =
2361 axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2362 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2363}
2364
2365static void
2366axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2367 struct ethtool_eth_mac_stats *mac_stats)
2368{
2369 struct axienet_local *lp = netdev_priv(dev);
2370 unsigned int start;
2371
2372 if (!(lp->features & XAE_FEATURE_STATS))
2373 return;
2374
2375 do {
2376 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2377 mac_stats->FramesTransmittedOK =
2378 axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2379 mac_stats->SingleCollisionFrames =
2380 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2381 mac_stats->MultipleCollisionFrames =
2382 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2383 mac_stats->FramesReceivedOK =
2384 axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2385 mac_stats->FrameCheckSequenceErrors =
2386 axienet_stat(lp, STAT_RX_FCS_ERRORS);
2387 mac_stats->AlignmentErrors =
2388 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2389 mac_stats->FramesWithDeferredXmissions =
2390 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2391 mac_stats->LateCollisions =
2392 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2393 mac_stats->FramesAbortedDueToXSColls =
2394 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2395 mac_stats->MulticastFramesXmittedOK =
2396 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2397 mac_stats->BroadcastFramesXmittedOK =
2398 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2399 mac_stats->FramesWithExcessiveDeferral =
2400 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2401 mac_stats->MulticastFramesReceivedOK =
2402 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2403 mac_stats->BroadcastFramesReceivedOK =
2404 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2405 mac_stats->InRangeLengthErrors =
2406 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2407 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2408}
2409
2410static void
2411axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2412 struct ethtool_eth_ctrl_stats *ctrl_stats)
2413{
2414 struct axienet_local *lp = netdev_priv(dev);
2415 unsigned int start;
2416
2417 if (!(lp->features & XAE_FEATURE_STATS))
2418 return;
2419
2420 do {
2421 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2422 ctrl_stats->MACControlFramesTransmitted =
2423 axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2424 ctrl_stats->MACControlFramesReceived =
2425 axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2426 ctrl_stats->UnsupportedOpcodesReceived =
2427 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2428 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2429}
2430
2431static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2432 { 64, 64 },
2433 { 65, 127 },
2434 { 128, 255 },
2435 { 256, 511 },
2436 { 512, 1023 },
2437 { 1024, 1518 },
2438 { 1519, 16384 },
2439 { },
2440};
2441
2442static void
2443axienet_ethtool_get_rmon_stats(struct net_device *dev,
2444 struct ethtool_rmon_stats *rmon_stats,
2445 const struct ethtool_rmon_hist_range **ranges)
2446{
2447 struct axienet_local *lp = netdev_priv(dev);
2448 unsigned int start;
2449
2450 if (!(lp->features & XAE_FEATURE_STATS))
2451 return;
2452
2453 do {
2454 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2455 rmon_stats->undersize_pkts =
2456 axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2457 rmon_stats->oversize_pkts =
2458 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2459 rmon_stats->fragments =
2460 axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2461
2462 rmon_stats->hist[0] =
2463 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2464 rmon_stats->hist[1] =
2465 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2466 rmon_stats->hist[2] =
2467 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2468 rmon_stats->hist[3] =
2469 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2470 rmon_stats->hist[4] =
2471 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2472 rmon_stats->hist[5] =
2473 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2474 rmon_stats->hist[6] =
2475 rmon_stats->oversize_pkts;
2476
2477 rmon_stats->hist_tx[0] =
2478 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2479 rmon_stats->hist_tx[1] =
2480 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2481 rmon_stats->hist_tx[2] =
2482 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2483 rmon_stats->hist_tx[3] =
2484 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2485 rmon_stats->hist_tx[4] =
2486 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2487 rmon_stats->hist_tx[5] =
2488 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2489 rmon_stats->hist_tx[6] =
2490 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2491 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2492
2493 *ranges = axienet_rmon_ranges;
2494}
2495
c7735f1b 2496static const struct ethtool_ops axienet_ethtool_ops = {
0b79b8dc 2497 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
e1d27d29
SA
2498 ETHTOOL_COALESCE_USECS |
2499 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
8a3b7a25
DB
2500 .get_drvinfo = axienet_ethtools_get_drvinfo,
2501 .get_regs_len = axienet_ethtools_get_regs_len,
2502 .get_regs = axienet_ethtools_get_regs,
2503 .get_link = ethtool_op_get_link,
8b09ca82
RH
2504 .get_ringparam = axienet_ethtools_get_ringparam,
2505 .set_ringparam = axienet_ethtools_set_ringparam,
8a3b7a25
DB
2506 .get_pauseparam = axienet_ethtools_get_pauseparam,
2507 .set_pauseparam = axienet_ethtools_set_pauseparam,
2508 .get_coalesce = axienet_ethtools_get_coalesce,
2509 .set_coalesce = axienet_ethtools_set_coalesce,
f5203a3d
RH
2510 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
2511 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
66b51663 2512 .nway_reset = axienet_ethtools_nway_reset,
76abb5d6
SA
2513 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2514 .get_strings = axienet_ethtools_get_strings,
2515 .get_sset_count = axienet_ethtools_get_sset_count,
2516 .get_pause_stats = axienet_ethtools_get_pause_stats,
2517 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2518 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2519 .get_rmon_stats = axienet_ethtool_get_rmon_stats,
f5203a3d
RH
2520};
2521
7a86be6a 2522static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
f5203a3d 2523{
7a86be6a
RKO
2524 return container_of(pcs, struct axienet_local, pcs);
2525}
f5203a3d 2526
7a86be6a 2527static void axienet_pcs_get_state(struct phylink_pcs *pcs,
c6739623 2528 unsigned int neg_mode,
7a86be6a
RKO
2529 struct phylink_link_state *state)
2530{
2531 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2532
7e3cb4e8 2533 phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
f5203a3d
RH
2534}
2535
7a86be6a 2536static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
f5203a3d 2537{
7a86be6a 2538 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1a025560 2539
7a86be6a 2540 phylink_mii_c22_pcs_an_restart(pcs_phy);
f5203a3d
RH
2541}
2542
febf2aaf 2543static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
7a86be6a
RKO
2544 phy_interface_t interface,
2545 const unsigned long *advertising,
2546 bool permit_pause_to_mac)
6c8f06bb 2547{
7a86be6a
RKO
2548 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2549 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
6c8f06bb
RH
2550 struct axienet_local *lp = netdev_priv(ndev);
2551 int ret;
2552
7a86be6a 2553 if (lp->switch_x_sgmii) {
03854d8a 2554 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
7a86be6a 2555 interface == PHY_INTERFACE_MODE_SGMII ?
6c8f06bb 2556 XLNX_MII_STD_SELECT_SGMII : 0);
7a86be6a
RKO
2557 if (ret < 0) {
2558 netdev_warn(ndev,
2559 "Failed to switch PHY interface: %d\n",
6c8f06bb 2560 ret);
7a86be6a
RKO
2561 return ret;
2562 }
6c8f06bb 2563 }
7a86be6a 2564
febf2aaf
RKO
2565 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2566 neg_mode);
7a86be6a
RKO
2567 if (ret < 0)
2568 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2569
2570 return ret;
6c8f06bb
RH
2571}
2572
7a86be6a
RKO
2573static const struct phylink_pcs_ops axienet_pcs_ops = {
2574 .pcs_get_state = axienet_pcs_get_state,
2575 .pcs_config = axienet_pcs_config,
2576 .pcs_an_restart = axienet_pcs_an_restart,
2577};
2578
2579static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2580 phy_interface_t interface)
95347842 2581{
1a025560
RH
2582 struct net_device *ndev = to_net_dev(config->dev);
2583 struct axienet_local *lp = netdev_priv(ndev);
1a025560 2584
7a86be6a
RKO
2585 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2586 interface == PHY_INTERFACE_MODE_SGMII)
2587 return &lp->pcs;
1a025560 2588
7a86be6a
RKO
2589 return NULL;
2590}
2591
2592static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2593 const struct phylink_link_state *state)
2594{
2595 /* nothing meaningful to do */
95347842
RK
2596}
2597
2598static void axienet_mac_link_down(struct phylink_config *config,
2599 unsigned int mode,
2600 phy_interface_t interface)
2601{
2602 /* nothing meaningful to do */
2603}
2604
2605static void axienet_mac_link_up(struct phylink_config *config,
2606 struct phy_device *phy,
2607 unsigned int mode, phy_interface_t interface,
2608 int speed, int duplex,
2609 bool tx_pause, bool rx_pause)
f5203a3d
RH
2610{
2611 struct net_device *ndev = to_net_dev(config->dev);
2612 struct axienet_local *lp = netdev_priv(ndev);
2613 u32 emmc_reg, fcc_reg;
2614
2615 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2616 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2617
95347842 2618 switch (speed) {
f5203a3d
RH
2619 case SPEED_1000:
2620 emmc_reg |= XAE_EMMC_LINKSPD_1000;
2621 break;
2622 case SPEED_100:
2623 emmc_reg |= XAE_EMMC_LINKSPD_100;
2624 break;
2625 case SPEED_10:
2626 emmc_reg |= XAE_EMMC_LINKSPD_10;
2627 break;
2628 default:
2629 dev_err(&ndev->dev,
2630 "Speed other than 10, 100 or 1Gbps is not supported\n");
2631 break;
2632 }
2633
2634 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2635
2636 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
95347842 2637 if (tx_pause)
f5203a3d
RH
2638 fcc_reg |= XAE_FCC_FCTX_MASK;
2639 else
2640 fcc_reg &= ~XAE_FCC_FCTX_MASK;
95347842 2641 if (rx_pause)
f5203a3d
RH
2642 fcc_reg |= XAE_FCC_FCRX_MASK;
2643 else
2644 fcc_reg &= ~XAE_FCC_FCRX_MASK;
2645 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2646}
2647
f5203a3d 2648static const struct phylink_mac_ops axienet_phylink_ops = {
7a86be6a 2649 .mac_select_pcs = axienet_mac_select_pcs,
f5203a3d
RH
2650 .mac_config = axienet_mac_config,
2651 .mac_link_down = axienet_mac_link_down,
2652 .mac_link_up = axienet_mac_link_up,
8a3b7a25
DB
2653};
2654
2655/**
24201a64
AP
2656 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2657 * @work: pointer to work_struct
8a3b7a25
DB
2658 *
2659 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2660 * Tx/Rx BDs.
2661 */
24201a64 2662static void axienet_dma_err_handler(struct work_struct *work)
8a3b7a25 2663{
84b9ccc0 2664 u32 i;
8a3b7a25 2665 u32 axienet_status;
84b9ccc0 2666 struct axidma_bd *cur_p;
24201a64
AP
2667 struct axienet_local *lp = container_of(work, struct axienet_local,
2668 dma_err_task);
8a3b7a25 2669 struct net_device *ndev = lp->ndev;
8a3b7a25 2670
858430db
SA
2671 /* Don't bother if we are going to stop anyway */
2672 if (READ_ONCE(lp->stopping))
2673 return;
2674
9e2bc267
RH
2675 napi_disable(&lp->napi_tx);
2676 napi_disable(&lp->napi_rx);
cc37610c 2677
8a3b7a25
DB
2678 axienet_setoptions(ndev, lp->options &
2679 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
84b9ccc0
RH
2680
2681 axienet_dma_stop(lp);
c900e49d 2682 netdev_reset_queue(ndev);
8a3b7a25 2683
8b09ca82 2684 for (i = 0; i < lp->tx_bd_num; i++) {
8a3b7a25 2685 cur_p = &lp->tx_bd_v[i];
4e958f33
AP
2686 if (cur_p->cntrl) {
2687 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2688
17882fd4 2689 dma_unmap_single(lp->dev, addr,
8a3b7a25
DB
2690 (cur_p->cntrl &
2691 XAXIDMA_BD_CTRL_LENGTH_MASK),
2692 DMA_TO_DEVICE);
4e958f33 2693 }
23e6b2dc
RH
2694 if (cur_p->skb)
2695 dev_kfree_skb_irq(cur_p->skb);
8a3b7a25 2696 cur_p->phys = 0;
4e958f33 2697 cur_p->phys_msb = 0;
8a3b7a25
DB
2698 cur_p->cntrl = 0;
2699 cur_p->status = 0;
2700 cur_p->app0 = 0;
2701 cur_p->app1 = 0;
2702 cur_p->app2 = 0;
2703 cur_p->app3 = 0;
2704 cur_p->app4 = 0;
23e6b2dc 2705 cur_p->skb = NULL;
8a3b7a25
DB
2706 }
2707
8b09ca82 2708 for (i = 0; i < lp->rx_bd_num; i++) {
8a3b7a25
DB
2709 cur_p = &lp->rx_bd_v[i];
2710 cur_p->status = 0;
2711 cur_p->app0 = 0;
2712 cur_p->app1 = 0;
2713 cur_p->app2 = 0;
2714 cur_p->app3 = 0;
2715 cur_p->app4 = 0;
2716 }
2717
2718 lp->tx_bd_ci = 0;
2719 lp->tx_bd_tail = 0;
2720 lp->rx_bd_ci = 0;
2721
84b9ccc0 2722 axienet_dma_start(lp);
8a3b7a25
DB
2723
2724 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2725 axienet_status &= ~XAE_RCW1_RX_MASK;
2726 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2727
2728 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2729 if (axienet_status & XAE_INT_RXRJECT_MASK)
2730 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
522856ce
RH
2731 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2732 XAE_INT_RECV_ERROR_MASK : 0);
8a3b7a25
DB
2733 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2734
2735 /* Sync default options with HW but leave receiver and
850a7503
MS
2736 * transmitter disabled.
2737 */
8a3b7a25
DB
2738 axienet_setoptions(ndev, lp->options &
2739 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2740 axienet_set_mac_address(ndev, NULL);
2741 axienet_set_multicast_list(ndev);
9e2bc267
RH
2742 napi_enable(&lp->napi_rx);
2743 napi_enable(&lp->napi_tx);
799a8295 2744 axienet_setoptions(ndev, lp->options);
8a3b7a25
DB
2745}
2746
2747/**
2be58620 2748 * axienet_probe - Axi Ethernet probe function.
95219aa5 2749 * @pdev: Pointer to platform device structure.
8a3b7a25 2750 *
b0d081c5 2751 * Return: 0, on success
8a3b7a25
DB
2752 * Non-zero error value on failure.
2753 *
2754 * This is the probe routine for Axi Ethernet driver. This is called before
2755 * any other driver routines are invoked. It allocates and sets up the Ethernet
2756 * device. Parses through device tree and populates fields of
2757 * axienet_local. It registers the Ethernet device.
2758 */
2be58620 2759static int axienet_probe(struct platform_device *pdev)
8a3b7a25 2760{
8495659b 2761 int ret;
8a3b7a25
DB
2762 struct device_node *np;
2763 struct axienet_local *lp;
2764 struct net_device *ndev;
28ef9ebd 2765 struct resource *ethres;
83216e39 2766 u8 mac_addr[ETH_ALEN];
5fff0151 2767 int addr_width = 32;
8495659b 2768 u32 value;
8a3b7a25
DB
2769
2770 ndev = alloc_etherdev(sizeof(*lp));
41de8d4c 2771 if (!ndev)
8a3b7a25 2772 return -ENOMEM;
8a3b7a25 2773
95219aa5 2774 platform_set_drvdata(pdev, ndev);
8a3b7a25 2775
95219aa5 2776 SET_NETDEV_DEV(ndev, &pdev->dev);
28e24c62 2777 ndev->features = NETIF_F_SG;
8a3b7a25
DB
2778 ndev->ethtool_ops = &axienet_ethtool_ops;
2779
d894be57
JW
2780 /* MTU range: 64 - 9000 */
2781 ndev->min_mtu = 64;
2782 ndev->max_mtu = XAE_JUMBO_MTU;
2783
8a3b7a25
DB
2784 lp = netdev_priv(ndev);
2785 lp->ndev = ndev;
95219aa5 2786 lp->dev = &pdev->dev;
8a3b7a25 2787 lp->options = XAE_OPTION_DEFAULTS;
8b09ca82
RH
2788 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2789 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
57baf8cc 2790
cb45a8bf
RH
2791 u64_stats_init(&lp->rx_stat_sync);
2792 u64_stats_init(&lp->tx_stat_sync);
2793
76abb5d6
SA
2794 mutex_init(&lp->stats_lock);
2795 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2796 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2797
b11bfb9a
RH
2798 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2799 if (!lp->axi_clk) {
2800 /* For backward compatibility, if named AXI clock is not present,
2801 * treat the first clock specified as the AXI clock.
2802 */
2803 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2804 }
2805 if (IS_ERR(lp->axi_clk)) {
2806 ret = PTR_ERR(lp->axi_clk);
57baf8cc
RH
2807 goto free_netdev;
2808 }
b11bfb9a 2809 ret = clk_prepare_enable(lp->axi_clk);
57baf8cc 2810 if (ret) {
b11bfb9a 2811 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
57baf8cc
RH
2812 goto free_netdev;
2813 }
2814
b11bfb9a
RH
2815 lp->misc_clks[0].id = "axis_clk";
2816 lp->misc_clks[1].id = "ref_clk";
2817 lp->misc_clks[2].id = "mgt_clk";
2818
2819 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2820 if (ret)
2821 goto cleanup_clk;
2822
2823 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2824 if (ret)
2825 goto cleanup_clk;
2826
8a3b7a25 2827 /* Map device registers */
47651c51 2828 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
fcc028c1 2829 if (IS_ERR(lp->regs)) {
fcc028c1 2830 ret = PTR_ERR(lp->regs);
59cd4f19 2831 goto cleanup_clk;
8a3b7a25 2832 }
7fa0043d 2833 lp->regs_start = ethres->start;
46aa27df 2834
8a3b7a25
DB
2835 /* Setup checksum offload, but default to off if not specified */
2836 lp->features = 0;
2837
76abb5d6
SA
2838 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2839 lp->features |= XAE_FEATURE_STATS;
2840
8495659b
ST
2841 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2842 if (!ret) {
2843 switch (value) {
8a3b7a25 2844 case 1:
8a3b7a25 2845 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
dd28f4c0
SA
2846 /* Can checksum any contiguous range */
2847 ndev->features |= NETIF_F_HW_CSUM;
8a3b7a25
DB
2848 break;
2849 case 2:
8a3b7a25
DB
2850 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2851 /* Can checksum TCP/UDP over IPv4. */
2852 ndev->features |= NETIF_F_IP_CSUM;
2853 break;
8a3b7a25
DB
2854 }
2855 }
8495659b
ST
2856 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2857 if (!ret) {
2858 switch (value) {
8a3b7a25 2859 case 1:
8a3b7a25 2860 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
06c069ff 2861 ndev->features |= NETIF_F_RXCSUM;
8a3b7a25
DB
2862 break;
2863 case 2:
8a3b7a25 2864 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
06c069ff 2865 ndev->features |= NETIF_F_RXCSUM;
8a3b7a25 2866 break;
8a3b7a25
DB
2867 }
2868 }
2869 /* For supporting jumbo frames, the Axi Ethernet hardware must have
f080a8c3
ST
2870 * a larger Rx/Tx Memory. Typically, the size must be large so that
2871 * we can enable jumbo option and start supporting jumbo frames.
2872 * Here we check for memory allocated for Rx/Tx in the hardware from
2873 * the device-tree and accordingly set flags.
2874 */
8495659b 2875 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
ee06b172 2876
6c8f06bb
RH
2877 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2878 "xlnx,switch-x-sgmii");
2879
ee06b172
A
2880 /* Start with the proprietary, and broken phy_type */
2881 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2882 if (!ret) {
2883 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2884 switch (value) {
2885 case XAE_PHY_TYPE_MII:
2886 lp->phy_mode = PHY_INTERFACE_MODE_MII;
2887 break;
2888 case XAE_PHY_TYPE_GMII:
2889 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2890 break;
2891 case XAE_PHY_TYPE_RGMII_2_0:
2892 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2893 break;
2894 case XAE_PHY_TYPE_SGMII:
2895 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2896 break;
2897 case XAE_PHY_TYPE_1000BASE_X:
2898 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2899 break;
2900 default:
2901 ret = -EINVAL;
59cd4f19 2902 goto cleanup_clk;
ee06b172
A
2903 }
2904 } else {
0c65b2b9
AL
2905 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2906 if (ret)
59cd4f19 2907 goto cleanup_clk;
ee06b172 2908 }
6c8f06bb
RH
2909 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2910 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2911 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2912 ret = -EINVAL;
59cd4f19 2913 goto cleanup_clk;
6c8f06bb 2914 }
8a3b7a25 2915
5fe164fb 2916 if (!of_property_present(pdev->dev.of_node, "dmas")) {
6b1b40f7
SBNG
2917 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2918 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
28ef9ebd 2919
6b1b40f7
SBNG
2920 if (np) {
2921 struct resource dmares;
2922
2923 ret = of_address_to_resource(np, 0, &dmares);
2924 if (ret) {
2925 dev_err(&pdev->dev,
2926 "unable to get DMA resource\n");
2927 of_node_put(np);
2928 goto cleanup_clk;
2929 }
2930 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2931 &dmares);
2932 lp->rx_irq = irq_of_parse_and_map(np, 1);
2933 lp->tx_irq = irq_of_parse_and_map(np, 0);
28ef9ebd 2934 of_node_put(np);
6b1b40f7
SBNG
2935 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2936 } else {
2937 /* Check for these resources directly on the Ethernet node. */
2938 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2939 lp->rx_irq = platform_get_irq(pdev, 1);
2940 lp->tx_irq = platform_get_irq(pdev, 0);
2941 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2942 }
2943 if (IS_ERR(lp->dma_regs)) {
2944 dev_err(&pdev->dev, "could not map DMA regs\n");
2945 ret = PTR_ERR(lp->dma_regs);
2946 goto cleanup_clk;
2947 }
2948 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2949 dev_err(&pdev->dev, "could not determine irqs\n");
2950 ret = -ENOMEM;
59cd4f19 2951 goto cleanup_clk;
28ef9ebd 2952 }
8a3b7a25 2953
6b1b40f7
SBNG
2954 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2955 ret = __axienet_device_reset(lp);
2956 if (ret)
2957 goto cleanup_clk;
2958
2959 /* Autodetect the need for 64-bit DMA pointers.
2960 * When the IP is configured for a bus width bigger than 32 bits,
2961 * writing the MSB registers is mandatory, even if they are all 0.
2962 * We can detect this case by writing all 1's to one such register
2963 * and see if that sticks: when the IP is configured for 32 bits
2964 * only, those registers are RES0.
2965 * Those MSB registers were introduced in IP v7.1, which we check first.
2966 */
2967 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2968 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
f1bc9fc4 2969
f735c40e 2970 iowrite32(0x0, desc);
6b1b40f7
SBNG
2971 if (ioread32(desc) == 0) { /* sanity check */
2972 iowrite32(0xffffffff, desc);
2973 if (ioread32(desc) > 0) {
2974 lp->features |= XAE_FEATURE_DMA_64BIT;
2975 addr_width = 64;
2976 dev_info(&pdev->dev,
2977 "autodetected 64-bit DMA range\n");
2978 }
2979 iowrite32(0x0, desc);
2980 }
2981 }
2982 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
61fde511 2983 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
6b1b40f7
SBNG
2984 ret = -EINVAL;
2985 goto cleanup_clk;
f735c40e 2986 }
f735c40e 2987
6b1b40f7
SBNG
2988 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2989 if (ret) {
2990 dev_err(&pdev->dev, "No suitable DMA available\n");
2991 goto cleanup_clk;
2992 }
2993 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2994 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
6a91b846
RSP
2995 } else {
2996 struct xilinx_vdma_config cfg;
2997 struct dma_chan *tx_chan;
2998
2999 lp->eth_irq = platform_get_irq_optional(pdev, 0);
3000 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
3001 ret = lp->eth_irq;
3002 goto cleanup_clk;
3003 }
3004 tx_chan = dma_request_chan(lp->dev, "tx_chan0");
3005 if (IS_ERR(tx_chan)) {
3006 ret = PTR_ERR(tx_chan);
3007 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
3008 goto cleanup_clk;
3009 }
3010
3011 cfg.reset = 1;
3012 /* As name says VDMA but it has support for DMA channel reset */
3013 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
3014 if (ret < 0) {
3015 dev_err(&pdev->dev, "Reset channel failed\n");
3016 dma_release_channel(tx_chan);
3017 goto cleanup_clk;
3018 }
3019
3020 dma_release_channel(tx_chan);
3021 lp->use_dmaengine = 1;
5fff0151
AP
3022 }
3023
6a91b846
RSP
3024 if (lp->use_dmaengine)
3025 ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
3026 else
3027 ndev->netdev_ops = &axienet_netdev_ops;
522856ce
RH
3028 /* Check for Ethernet core IRQ (optional) */
3029 if (lp->eth_irq <= 0)
3030 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
3031
8a3b7a25 3032 /* Retrieve the MAC address */
83216e39
MW
3033 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
3034 if (!ret) {
3035 axienet_set_mac_address(ndev, mac_addr);
3036 } else {
3037 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
3038 ret);
3039 axienet_set_mac_address(ndev, NULL);
8a3b7a25 3040 }
8a3b7a25 3041
d048c717
SA
3042 spin_lock_init(&lp->rx_cr_lock);
3043 spin_lock_init(&lp->tx_cr_lock);
e1d27d29
SA
3044 INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
3045 lp->rx_dim_enabled = true;
3046 lp->rx_dim.profile_ix = 1;
3047 lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
eb80520e
SA
3048 XAXIDMA_DFT_RX_USEC);
3049 lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
3050 XAXIDMA_DFT_TX_USEC);
8a3b7a25 3051
d1c4f93e
AC
3052 ret = axienet_mdio_setup(lp);
3053 if (ret)
3054 dev_warn(&pdev->dev,
3055 "error registering MDIO bus: %d\n", ret);
3056
1a025560
RH
3057 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
3058 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
19c7a439 3059 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
ab3a5d4c 3060 if (!np) {
19c7a439
AC
3061 /* Deprecated: Always use "pcs-handle" for pcs_phy.
3062 * Falling back to "phy-handle" here is only for
3063 * backward compatibility with old device trees.
3064 */
3065 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
3066 }
3067 if (!np) {
3068 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
1a025560 3069 ret = -EINVAL;
59cd4f19 3070 goto cleanup_mdio;
1a025560 3071 }
ab3a5d4c 3072 lp->pcs_phy = of_mdio_find_device(np);
1a025560
RH
3073 if (!lp->pcs_phy) {
3074 ret = -EPROBE_DEFER;
ab3a5d4c 3075 of_node_put(np);
59cd4f19 3076 goto cleanup_mdio;
1a025560 3077 }
ab3a5d4c 3078 of_node_put(np);
7a86be6a
RKO
3079 lp->pcs.ops = &axienet_pcs_ops;
3080 lp->pcs.poll = true;
1a025560 3081 }
8a3b7a25 3082
f5203a3d
RH
3083 lp->phylink_config.dev = &ndev->dev;
3084 lp->phylink_config.type = PHYLINK_NETDEV;
a3702953 3085 lp->phylink_config.mac_managed_pm = true;
72a47e1a
RKO
3086 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
3087 MAC_10FD | MAC_100FD | MAC_1000FD;
f5203a3d 3088
136a3fa2
RKO
3089 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
3090 if (lp->switch_x_sgmii) {
3091 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
3092 lp->phylink_config.supported_interfaces);
3093 __set_bit(PHY_INTERFACE_MODE_SGMII,
3094 lp->phylink_config.supported_interfaces);
3095 }
3096
f5203a3d
RH
3097 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
3098 lp->phy_mode,
3099 &axienet_phylink_ops);
3100 if (IS_ERR(lp->phylink)) {
3101 ret = PTR_ERR(lp->phylink);
3102 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
59cd4f19 3103 goto cleanup_mdio;
f5203a3d
RH
3104 }
3105
8a3b7a25
DB
3106 ret = register_netdev(lp->ndev);
3107 if (ret) {
3108 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
59cd4f19 3109 goto cleanup_phylink;
8a3b7a25
DB
3110 }
3111
8a3b7a25
DB
3112 return 0;
3113
59cd4f19
RH
3114cleanup_phylink:
3115 phylink_destroy(lp->phylink);
3116
3117cleanup_mdio:
3118 if (lp->pcs_phy)
3119 put_device(&lp->pcs_phy->dev);
3120 if (lp->mii_bus)
3121 axienet_mdio_teardown(lp);
59cd4f19 3122cleanup_clk:
b11bfb9a
RH
3123 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3124 clk_disable_unprepare(lp->axi_clk);
59cd4f19 3125
46aa27df 3126free_netdev:
8a3b7a25 3127 free_netdev(ndev);
46aa27df 3128
8a3b7a25
DB
3129 return ret;
3130}
3131
2e0ec0af 3132static void axienet_remove(struct platform_device *pdev)
8a3b7a25 3133{
95219aa5 3134 struct net_device *ndev = platform_get_drvdata(pdev);
8a3b7a25
DB
3135 struct axienet_local *lp = netdev_priv(ndev);
3136
8a3b7a25 3137 unregister_netdev(ndev);
f5203a3d
RH
3138
3139 if (lp->phylink)
3140 phylink_destroy(lp->phylink);
3141
1a025560
RH
3142 if (lp->pcs_phy)
3143 put_device(&lp->pcs_phy->dev);
3144
e7a3d116 3145 axienet_mdio_teardown(lp);
8a3b7a25 3146
b11bfb9a
RH
3147 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3148 clk_disable_unprepare(lp->axi_clk);
09a0354c 3149
8a3b7a25 3150 free_netdev(ndev);
8a3b7a25
DB
3151}
3152
70c50265
RH
3153static void axienet_shutdown(struct platform_device *pdev)
3154{
3155 struct net_device *ndev = platform_get_drvdata(pdev);
3156
3157 rtnl_lock();
3158 netif_device_detach(ndev);
3159
3160 if (netif_running(ndev))
3161 dev_close(ndev);
3162
3163 rtnl_unlock();
3164}
3165
a3de357b
AC
3166static int axienet_suspend(struct device *dev)
3167{
3168 struct net_device *ndev = dev_get_drvdata(dev);
3169
3170 if (!netif_running(ndev))
3171 return 0;
3172
3173 netif_device_detach(ndev);
3174
3175 rtnl_lock();
3176 axienet_stop(ndev);
3177 rtnl_unlock();
3178
3179 return 0;
3180}
3181
3182static int axienet_resume(struct device *dev)
3183{
3184 struct net_device *ndev = dev_get_drvdata(dev);
3185
3186 if (!netif_running(ndev))
3187 return 0;
3188
3189 rtnl_lock();
3190 axienet_open(ndev);
3191 rtnl_unlock();
3192
3193 netif_device_attach(ndev);
3194
3195 return 0;
3196}
3197
3198static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3199 axienet_suspend, axienet_resume);
3200
2be58620
ST
3201static struct platform_driver axienet_driver = {
3202 .probe = axienet_probe,
e96321fa 3203 .remove = axienet_remove,
70c50265 3204 .shutdown = axienet_shutdown,
8a3b7a25 3205 .driver = {
8a3b7a25 3206 .name = "xilinx_axienet",
a3de357b 3207 .pm = &axienet_pm_ops,
8a3b7a25
DB
3208 .of_match_table = axienet_of_match,
3209 },
3210};
3211
2be58620 3212module_platform_driver(axienet_driver);
8a3b7a25
DB
3213
3214MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3215MODULE_AUTHOR("Xilinx");
3216MODULE_LICENSE("GPL");