]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/xilinx/xilinx_axienet_main.c
Merge tag 'perf-tools-fixes-for-v6.17-2025-09-16' of git://git.kernel.org/pub/scm...
[thirdparty/linux.git] / drivers / net / ethernet / xilinx / xilinx_axienet_main.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
8a3b7a25
DB
2/*
3 * Xilinx Axi Ethernet device driver
4 *
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
59a54f30
MS
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
cc37610c 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
59a54f30 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
8a3b7a25
DB
12 *
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14 * and Spartan6.
15 *
16 * TODO:
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
23 */
24
09a0354c 25#include <linux/clk.h>
8a3b7a25
DB
26#include <linux/delay.h>
27#include <linux/etherdevice.h>
8a3b7a25
DB
28#include <linux/module.h>
29#include <linux/netdevice.h>
3d40aed8 30#include <linux/of.h>
8a3b7a25 31#include <linux/of_mdio.h>
da90e380 32#include <linux/of_net.h>
9d5e8ec6 33#include <linux/of_irq.h>
8a3b7a25 34#include <linux/of_address.h>
3d40aed8 35#include <linux/platform_device.h>
8a3b7a25 36#include <linux/skbuff.h>
0b79b8dc 37#include <linux/math64.h>
8a3b7a25
DB
38#include <linux/phy.h>
39#include <linux/mii.h>
40#include <linux/ethtool.h>
6a91b846
RSP
41#include <linux/dmaengine.h>
42#include <linux/dma-mapping.h>
43#include <linux/dma/xilinx_dma.h>
44#include <linux/circ_buf.h>
45#include <net/netdev_queues.h>
8a3b7a25
DB
46
47#include "xilinx_axienet.h"
48
8b09ca82 49/* Descriptors defines for Tx and Rx DMA */
2d19c3fd 50#define TX_BD_NUM_DEFAULT 128
8b09ca82 51#define RX_BD_NUM_DEFAULT 1024
70f5817d 52#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
8b09ca82
RH
53#define TX_BD_NUM_MAX 4096
54#define RX_BD_NUM_MAX 4096
6a91b846
RSP
55#define DMA_NUM_APP_WORDS 5
56#define LEN_APP 4
57#define RX_BUF_NUM_DEFAULT 128
8a3b7a25
DB
58
59/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60#define DRIVER_NAME "xaxienet"
61#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62#define DRIVER_VERSION "1.00a"
63
867d03bc 64#define AXIENET_REGS_N 40
8a3b7a25 65
6a91b846
RSP
66static void axienet_rx_submit_desc(struct net_device *ndev);
67
8a3b7a25 68/* Match table for of_platform binding */
74847f23 69static const struct of_device_id axienet_of_match[] = {
8a3b7a25
DB
70 { .compatible = "xlnx,axi-ethernet-1.00.a", },
71 { .compatible = "xlnx,axi-ethernet-1.01.a", },
72 { .compatible = "xlnx,axi-ethernet-2.01.a", },
73 {},
74};
75
76MODULE_DEVICE_TABLE(of, axienet_of_match);
77
78/* Option table for setting up Axi Ethernet hardware options */
79static struct axienet_option axienet_options[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
81 {
82 .opt = XAE_OPTION_JUMBO,
83 .reg = XAE_TC_OFFSET,
84 .m_or = XAE_TC_JUM_MASK,
85 }, {
86 .opt = XAE_OPTION_JUMBO,
87 .reg = XAE_RCW1_OFFSET,
88 .m_or = XAE_RCW1_JUM_MASK,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt = XAE_OPTION_VLAN,
91 .reg = XAE_TC_OFFSET,
92 .m_or = XAE_TC_VLAN_MASK,
93 }, {
94 .opt = XAE_OPTION_VLAN,
95 .reg = XAE_RCW1_OFFSET,
96 .m_or = XAE_RCW1_VLAN_MASK,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt = XAE_OPTION_FCS_STRIP,
99 .reg = XAE_RCW1_OFFSET,
100 .m_or = XAE_RCW1_FCS_MASK,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt = XAE_OPTION_FCS_INSERT,
103 .reg = XAE_TC_OFFSET,
104 .m_or = XAE_TC_FCS_MASK,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt = XAE_OPTION_LENTYPE_ERR,
107 .reg = XAE_RCW1_OFFSET,
108 .m_or = XAE_RCW1_LT_DIS_MASK,
109 }, { /* Turn on Rx flow control */
110 .opt = XAE_OPTION_FLOW_CONTROL,
111 .reg = XAE_FCC_OFFSET,
112 .m_or = XAE_FCC_FCRX_MASK,
113 }, { /* Turn on Tx flow control */
114 .opt = XAE_OPTION_FLOW_CONTROL,
115 .reg = XAE_FCC_OFFSET,
116 .m_or = XAE_FCC_FCTX_MASK,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt = XAE_OPTION_PROMISC,
119 .reg = XAE_FMI_OFFSET,
120 .m_or = XAE_FMI_PM_MASK,
121 }, { /* Enable transmitter */
122 .opt = XAE_OPTION_TXEN,
123 .reg = XAE_TC_OFFSET,
124 .m_or = XAE_TC_TX_MASK,
125 }, { /* Enable receiver */
126 .opt = XAE_OPTION_RXEN,
127 .reg = XAE_RCW1_OFFSET,
128 .m_or = XAE_RCW1_RX_MASK,
129 },
130 {}
131};
132
6a91b846
RSP
133static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134{
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136}
137
138static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139{
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141}
142
8a3b7a25
DB
143/**
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
147 *
b0d081c5 148 * Return: The contents of the Axi DMA register
8a3b7a25
DB
149 *
150 * This function returns the contents of the corresponding Axi DMA register.
151 */
152static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153{
d85f5f3e 154 return ioread32(lp->dma_regs + reg);
8a3b7a25
DB
155}
156
4e958f33
AP
157static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 struct axidma_bd *desc)
159{
160 desc->phys = lower_32_bits(addr);
161 if (lp->features & XAE_FEATURE_DMA_64BIT)
162 desc->phys_msb = upper_32_bits(addr);
163}
164
165static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 struct axidma_bd *desc)
167{
168 dma_addr_t ret = desc->phys;
169
170 if (lp->features & XAE_FEATURE_DMA_64BIT)
171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172
173 return ret;
174}
175
8a3b7a25
DB
176/**
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
179 *
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
183 */
184static void axienet_dma_bd_release(struct net_device *ndev)
185{
186 int i;
187 struct axienet_local *lp = netdev_priv(ndev);
188
f26667a3 189 /* If we end up here, tx_bd_v must have been DMA allocated. */
17882fd4 190 dma_free_coherent(lp->dev,
f26667a3
AP
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 lp->tx_bd_v,
193 lp->tx_bd_p);
194
195 if (!lp->rx_bd_v)
196 return;
197
8b09ca82 198 for (i = 0; i < lp->rx_bd_num; i++) {
4e958f33
AP
199 dma_addr_t phys;
200
f26667a3
AP
201 /* A NULL skb means this descriptor has not been initialised
202 * at all.
203 */
204 if (!lp->rx_bd_v[i].skb)
205 break;
206
23e6b2dc 207 dev_kfree_skb(lp->rx_bd_v[i].skb);
8a3b7a25 208
f26667a3
AP
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
212 */
4e958f33
AP
213 if (lp->rx_bd_v[i].cntrl) {
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
17882fd4 215 dma_unmap_single(lp->dev, phys,
f26667a3 216 lp->max_frm_size, DMA_FROM_DEVICE);
4e958f33 217 }
8a3b7a25 218 }
f26667a3 219
17882fd4 220 dma_free_coherent(lp->dev,
f26667a3
AP
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 lp->rx_bd_v,
223 lp->rx_bd_p);
8a3b7a25
DB
224}
225
eb80520e
SA
226static u64 axienet_dma_rate(struct axienet_local *lp)
227{
228 if (lp->axi_clk)
229 return clk_get_rate(lp->axi_clk);
230 return 125000000; /* arbitrary guess if no clock rate set */
231}
232
0b79b8dc 233/**
e76d1ea8
SA
234 * axienet_calc_cr() - Calculate control register value
235 * @lp: Device private data
236 * @count: Number of completions before an interrupt
237 * @usec: Microseconds after the last completion before an interrupt
238 *
239 * Calculate a control register value based on the coalescing settings. The
240 * run/stop bit is not set.
0b79b8dc 241 */
e76d1ea8 242static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
0b79b8dc 243{
e76d1ea8
SA
244 u32 cr;
245
246 cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
247 XAXIDMA_IRQ_ERROR_MASK;
248 /* Only set interrupt delay timer if not generating an interrupt on
249 * the first packet. Otherwise leave at 0 to disable delay interrupt.
250 */
251 if (count > 1) {
eb80520e 252 u64 clk_rate = axienet_dma_rate(lp);
e76d1ea8
SA
253 u32 timer;
254
e76d1ea8
SA
255 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
256 timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
257 XAXIDMA_DELAY_SCALE);
0b79b8dc 258
e76d1ea8
SA
259 timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
260 cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
261 XAXIDMA_IRQ_DELAY_MASK;
262 }
263
264 return cr;
0b79b8dc
RH
265}
266
eb80520e
SA
267/**
268 * axienet_coalesce_params() - Extract coalesce parameters from the CR
269 * @lp: Device private data
270 * @cr: The control register to parse
271 * @count: Number of packets before an interrupt
272 * @usec: Idle time (in usec) before an interrupt
273 */
274static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
275 u32 *count, u32 *usec)
276{
277 u64 clk_rate = axienet_dma_rate(lp);
278 u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
279
280 *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
281 *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
282}
283
84b9ccc0
RH
284/**
285 * axienet_dma_start - Set up DMA registers and start DMA operation
286 * @lp: Pointer to the axienet_local structure
287 */
288static void axienet_dma_start(struct axienet_local *lp)
289{
d048c717
SA
290 spin_lock_irq(&lp->rx_cr_lock);
291
84b9ccc0 292 /* Start updating the Rx channel control register */
d048c717 293 lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
cc37610c 294 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
84b9ccc0 295
84b9ccc0
RH
296 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
297 * halted state. This will make the Rx side ready for reception.
298 */
299 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cc37610c
RH
300 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
301 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
84b9ccc0
RH
302 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
303 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
d048c717
SA
304 lp->rx_dma_started = true;
305
306 spin_unlock_irq(&lp->rx_cr_lock);
307 spin_lock_irq(&lp->tx_cr_lock);
308
309 /* Start updating the Tx channel control register */
310 lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
311 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
84b9ccc0
RH
312
313 /* Write to the RS (Run-stop) bit in the Tx channel control register.
314 * Tx channel is now ready to run. But only after we write to the
315 * tail pointer register that the Tx channel will start transmitting.
316 */
317 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
9e2bc267
RH
318 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
319 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
d048c717
SA
320 lp->tx_dma_started = true;
321
322 spin_unlock_irq(&lp->tx_cr_lock);
84b9ccc0
RH
323}
324
8a3b7a25
DB
325/**
326 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
327 * @ndev: Pointer to the net_device structure
328 *
b0d081c5 329 * Return: 0, on success -ENOMEM, on failure
8a3b7a25
DB
330 *
331 * This function is called to initialize the Rx and Tx DMA descriptor
332 * rings. This initializes the descriptors with required default values
333 * and is called when Axi Ethernet driver reset is called.
334 */
335static int axienet_dma_bd_init(struct net_device *ndev)
336{
8a3b7a25
DB
337 int i;
338 struct sk_buff *skb;
339 struct axienet_local *lp = netdev_priv(ndev);
340
341 /* Reset the indexes which are used for accessing the BDs */
342 lp->tx_bd_ci = 0;
343 lp->tx_bd_tail = 0;
344 lp->rx_bd_ci = 0;
345
850a7503 346 /* Allocate the Tx and Rx buffer descriptors. */
17882fd4 347 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
8b09ca82 348 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
750afb08 349 &lp->tx_bd_p, GFP_KERNEL);
d0320f75 350 if (!lp->tx_bd_v)
f26667a3 351 return -ENOMEM;
8a3b7a25 352
17882fd4 353 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
8b09ca82 354 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
750afb08 355 &lp->rx_bd_p, GFP_KERNEL);
d0320f75 356 if (!lp->rx_bd_v)
8a3b7a25 357 goto out;
8a3b7a25 358
8b09ca82 359 for (i = 0; i < lp->tx_bd_num; i++) {
4e958f33
AP
360 dma_addr_t addr = lp->tx_bd_p +
361 sizeof(*lp->tx_bd_v) *
362 ((i + 1) % lp->tx_bd_num);
363
364 lp->tx_bd_v[i].next = lower_32_bits(addr);
365 if (lp->features & XAE_FEATURE_DMA_64BIT)
366 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
8a3b7a25
DB
367 }
368
8b09ca82 369 for (i = 0; i < lp->rx_bd_num; i++) {
4e958f33
AP
370 dma_addr_t addr;
371
372 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
373 ((i + 1) % lp->rx_bd_num);
374 lp->rx_bd_v[i].next = lower_32_bits(addr);
375 if (lp->features & XAE_FEATURE_DMA_64BIT)
376 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
8a3b7a25
DB
377
378 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
720a43ef 379 if (!skb)
8a3b7a25 380 goto out;
8a3b7a25 381
23e6b2dc 382 lp->rx_bd_v[i].skb = skb;
17882fd4 383 addr = dma_map_single(lp->dev, skb->data,
4e958f33 384 lp->max_frm_size, DMA_FROM_DEVICE);
17882fd4 385 if (dma_mapping_error(lp->dev, addr)) {
71791dc8
AP
386 netdev_err(ndev, "DMA mapping error\n");
387 goto out;
388 }
4e958f33 389 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
71791dc8 390
8a3b7a25
DB
391 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
392 }
393
84b9ccc0 394 axienet_dma_start(lp);
8a3b7a25
DB
395
396 return 0;
397out:
398 axienet_dma_bd_release(ndev);
399 return -ENOMEM;
400}
401
402/**
403 * axienet_set_mac_address - Write the MAC address
404 * @ndev: Pointer to the net_device structure
405 * @address: 6 byte Address to be written as MAC address
406 *
407 * This function is called to initialize the MAC address of the Axi Ethernet
408 * core. It writes to the UAW0 and UAW1 registers of the core.
409 */
da90e380
TK
410static void axienet_set_mac_address(struct net_device *ndev,
411 const void *address)
8a3b7a25
DB
412{
413 struct axienet_local *lp = netdev_priv(ndev);
414
415 if (address)
a96d317f 416 eth_hw_addr_set(ndev, address);
8a3b7a25 417 if (!is_valid_ether_addr(ndev->dev_addr))
452349c3 418 eth_hw_addr_random(ndev);
8a3b7a25
DB
419
420 /* Set up unicast MAC address filter set its mac address */
421 axienet_iow(lp, XAE_UAW0_OFFSET,
422 (ndev->dev_addr[0]) |
423 (ndev->dev_addr[1] << 8) |
424 (ndev->dev_addr[2] << 16) |
425 (ndev->dev_addr[3] << 24));
426 axienet_iow(lp, XAE_UAW1_OFFSET,
427 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
428 ~XAE_UAW1_UNICASTADDR_MASK) |
429 (ndev->dev_addr[4] |
430 (ndev->dev_addr[5] << 8))));
431}
432
433/**
434 * netdev_set_mac_address - Write the MAC address (from outside the driver)
435 * @ndev: Pointer to the net_device structure
436 * @p: 6 byte Address to be written as MAC address
437 *
b0d081c5 438 * Return: 0 for all conditions. Presently, there is no failure case.
8a3b7a25
DB
439 *
440 * This function is called to initialize the MAC address of the Axi Ethernet
441 * core. It calls the core specific axienet_set_mac_address. This is the
442 * function that goes into net_device_ops structure entry ndo_set_mac_address.
443 */
444static int netdev_set_mac_address(struct net_device *ndev, void *p)
445{
446 struct sockaddr *addr = p;
f7061a3e 447
8a3b7a25
DB
448 axienet_set_mac_address(ndev, addr->sa_data);
449 return 0;
450}
451
452/**
453 * axienet_set_multicast_list - Prepare the multicast table
454 * @ndev: Pointer to the net_device structure
455 *
456 * This function is called to initialize the multicast table during
457 * initialization. The Axi Ethernet basic multicast support has a four-entry
458 * multicast table which is initialized here. Additionally this function
459 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
460 * means whenever the multicast table entries need to be updated this
461 * function gets called.
462 */
463static void axienet_set_multicast_list(struct net_device *ndev)
464{
797a68c9 465 int i = 0;
8a3b7a25
DB
466 u32 reg, af0reg, af1reg;
467 struct axienet_local *lp = netdev_priv(ndev);
468
749e67d5
SA
469 reg = axienet_ior(lp, XAE_FMI_OFFSET);
470 reg &= ~XAE_FMI_PM_MASK;
471 if (ndev->flags & IFF_PROMISC)
8a3b7a25 472 reg |= XAE_FMI_PM_MASK;
749e67d5
SA
473 else
474 reg &= ~XAE_FMI_PM_MASK;
475 axienet_iow(lp, XAE_FMI_OFFSET, reg);
476
477 if (ndev->flags & IFF_ALLMULTI ||
478 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
479 reg &= 0xFFFFFF00;
8a3b7a25 480 axienet_iow(lp, XAE_FMI_OFFSET, reg);
749e67d5
SA
481 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
482 axienet_iow(lp, XAE_AF1_OFFSET, 0);
483 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
484 axienet_iow(lp, XAE_AM1_OFFSET, 0);
485 axienet_iow(lp, XAE_FFE_OFFSET, 1);
486 i = 1;
8a3b7a25
DB
487 } else if (!netdev_mc_empty(ndev)) {
488 struct netdev_hw_addr *ha;
489
8a3b7a25
DB
490 netdev_for_each_mc_addr(ha, ndev) {
491 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
492 break;
493
494 af0reg = (ha->addr[0]);
495 af0reg |= (ha->addr[1] << 8);
496 af0reg |= (ha->addr[2] << 16);
497 af0reg |= (ha->addr[3] << 24);
498
499 af1reg = (ha->addr[4]);
500 af1reg |= (ha->addr[5] << 8);
501
749e67d5 502 reg &= 0xFFFFFF00;
8a3b7a25
DB
503 reg |= i;
504
505 axienet_iow(lp, XAE_FMI_OFFSET, reg);
506 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
507 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
749e67d5
SA
508 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
509 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
797a68c9 510 axienet_iow(lp, XAE_FFE_OFFSET, 1);
8a3b7a25
DB
511 i++;
512 }
8a3b7a25 513 }
797a68c9
SA
514
515 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
749e67d5 516 reg &= 0xFFFFFF00;
797a68c9
SA
517 reg |= i;
518 axienet_iow(lp, XAE_FMI_OFFSET, reg);
519 axienet_iow(lp, XAE_FFE_OFFSET, 0);
520 }
8a3b7a25
DB
521}
522
523/**
524 * axienet_setoptions - Set an Axi Ethernet option
525 * @ndev: Pointer to the net_device structure
526 * @options: Option to be enabled/disabled
527 *
528 * The Axi Ethernet core has multiple features which can be selectively turned
529 * on or off. The typical options could be jumbo frame option, basic VLAN
530 * option, promiscuous mode option etc. This function is used to set or clear
531 * these options in the Axi Ethernet hardware. This is done through
532 * axienet_option structure .
533 */
534static void axienet_setoptions(struct net_device *ndev, u32 options)
535{
536 int reg;
537 struct axienet_local *lp = netdev_priv(ndev);
538 struct axienet_option *tp = &axienet_options[0];
539
540 while (tp->opt) {
541 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
542 if (options & tp->opt)
543 reg |= tp->m_or;
544 axienet_iow(lp, tp->reg, reg);
545 tp++;
546 }
547
548 lp->options |= options;
549}
550
76abb5d6
SA
551static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
552{
553 u32 counter;
554
555 if (lp->reset_in_progress)
556 return lp->hw_stat_base[stat];
557
558 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
559 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
560}
561
562static void axienet_stats_update(struct axienet_local *lp, bool reset)
563{
564 enum temac_stat stat;
565
566 write_seqcount_begin(&lp->hw_stats_seqcount);
567 lp->reset_in_progress = reset;
568 for (stat = 0; stat < STAT_COUNT; stat++) {
569 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
570
571 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
572 lp->hw_last_counter[stat] = counter;
573 }
574 write_seqcount_end(&lp->hw_stats_seqcount);
575}
576
577static void axienet_refresh_stats(struct work_struct *work)
578{
579 struct axienet_local *lp = container_of(work, struct axienet_local,
580 stats_work.work);
581
582 mutex_lock(&lp->stats_lock);
583 axienet_stats_update(lp, false);
584 mutex_unlock(&lp->stats_lock);
585
586 /* Just less than 2^32 bytes at 2.5 GBit/s */
587 schedule_delayed_work(&lp->stats_work, 13 * HZ);
588}
589
ee44d0b7 590static int __axienet_device_reset(struct axienet_local *lp)
8a3b7a25 591{
2e5644b1
RH
592 u32 value;
593 int ret;
ee44d0b7 594
76abb5d6
SA
595 /* Save statistics counters in case they will be reset */
596 mutex_lock(&lp->stats_lock);
597 if (lp->features & XAE_FEATURE_STATS)
598 axienet_stats_update(lp, true);
599
8a3b7a25
DB
600 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
601 * process of Axi DMA takes a while to complete as all pending
602 * commands/transfers will be flushed or completed during this
850a7503 603 * reset process.
489d4d77
RH
604 * Note that even though both TX and RX have their own reset register,
605 * they both reset the entire DMA core, so only one needs to be used.
850a7503 606 */
489d4d77 607 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
2e5644b1
RH
608 ret = read_poll_timeout(axienet_dma_in32, value,
609 !(value & XAXIDMA_CR_RESET_MASK),
610 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
611 XAXIDMA_TX_CR_OFFSET);
612 if (ret) {
613 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
76abb5d6 614 goto out;
8a3b7a25 615 }
ee44d0b7 616
b400c2f4
RH
617 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
618 ret = read_poll_timeout(axienet_ior, value,
619 value & XAE_INT_PHYRSTCMPLT_MASK,
620 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
621 XAE_IS_OFFSET);
622 if (ret) {
623 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
76abb5d6 624 goto out;
b400c2f4
RH
625 }
626
76abb5d6
SA
627 /* Update statistics counters with new values */
628 if (lp->features & XAE_FEATURE_STATS) {
629 enum temac_stat stat;
630
631 write_seqcount_begin(&lp->hw_stats_seqcount);
632 lp->reset_in_progress = false;
633 for (stat = 0; stat < STAT_COUNT; stat++) {
634 u32 counter =
635 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
636
637 lp->hw_stat_base[stat] +=
638 lp->hw_last_counter[stat] - counter;
639 lp->hw_last_counter[stat] = counter;
640 }
641 write_seqcount_end(&lp->hw_stats_seqcount);
642 }
643
644out:
645 mutex_unlock(&lp->stats_lock);
646 return ret;
8a3b7a25
DB
647}
648
84b9ccc0
RH
649/**
650 * axienet_dma_stop - Stop DMA operation
651 * @lp: Pointer to the axienet_local structure
652 */
653static void axienet_dma_stop(struct axienet_local *lp)
654{
655 int count;
656 u32 cr, sr;
657
d048c717
SA
658 spin_lock_irq(&lp->rx_cr_lock);
659
660 cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
84b9ccc0 661 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
d048c717
SA
662 lp->rx_dma_started = false;
663
664 spin_unlock_irq(&lp->rx_cr_lock);
84b9ccc0
RH
665 synchronize_irq(lp->rx_irq);
666
d048c717
SA
667 spin_lock_irq(&lp->tx_cr_lock);
668
669 cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
84b9ccc0 670 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
d048c717
SA
671 lp->tx_dma_started = false;
672
673 spin_unlock_irq(&lp->tx_cr_lock);
84b9ccc0
RH
674 synchronize_irq(lp->tx_irq);
675
676 /* Give DMAs a chance to halt gracefully */
677 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
678 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
679 msleep(20);
680 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
681 }
682
683 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
684 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
685 msleep(20);
686 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
687 }
688
689 /* Do a reset to ensure DMA is really stopped */
690 axienet_lock_mii(lp);
691 __axienet_device_reset(lp);
692 axienet_unlock_mii(lp);
693}
694
8a3b7a25
DB
695/**
696 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
697 * @ndev: Pointer to the net_device structure
698 *
699 * This function is called to reset and initialize the Axi Ethernet core. This
700 * is typically called during initialization. It does a reset of the Axi DMA
701 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
8aba73ef 702 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
8a3b7a25
DB
703 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
704 * core.
ee44d0b7 705 * Returns 0 on success or a negative error number otherwise.
8a3b7a25 706 */
ee44d0b7 707static int axienet_device_reset(struct net_device *ndev)
8a3b7a25
DB
708{
709 u32 axienet_status;
710 struct axienet_local *lp = netdev_priv(ndev);
ee44d0b7 711 int ret;
8a3b7a25 712
8a3b7a25 713 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
f080a8c3 714 lp->options |= XAE_OPTION_VLAN;
8a3b7a25
DB
715 lp->options &= (~XAE_OPTION_JUMBO);
716
48ba8a1d 717 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
f080a8c3
ST
718 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
719 XAE_TRL_SIZE;
720
721 if (lp->max_frm_size <= lp->rxmem)
722 lp->options |= XAE_OPTION_JUMBO;
8a3b7a25
DB
723 }
724
6b1b40f7
SBNG
725 if (!lp->use_dmaengine) {
726 ret = __axienet_device_reset(lp);
727 if (ret)
728 return ret;
729
730 ret = axienet_dma_bd_init(ndev);
731 if (ret) {
732 netdev_err(ndev, "%s: descriptor allocation failed\n",
733 __func__);
734 return ret;
735 }
8a3b7a25
DB
736 }
737
738 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
739 axienet_status &= ~XAE_RCW1_RX_MASK;
740 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
741
742 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
743 if (axienet_status & XAE_INT_RXRJECT_MASK)
744 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
522856ce
RH
745 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
746 XAE_INT_RECV_ERROR_MASK : 0);
8a3b7a25
DB
747
748 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
749
750 /* Sync default options with HW but leave receiver and
850a7503
MS
751 * transmitter disabled.
752 */
8a3b7a25
DB
753 axienet_setoptions(ndev, lp->options &
754 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
755 axienet_set_mac_address(ndev, NULL);
756 axienet_set_multicast_list(ndev);
757 axienet_setoptions(ndev, lp->options);
758
860e9538 759 netif_trans_update(ndev);
ee44d0b7
AP
760
761 return 0;
8a3b7a25
DB
762}
763
8a3b7a25 764/**
ab365c33 765 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
9e2bc267 766 * @lp: Pointer to the axienet_local structure
ab365c33 767 * @first_bd: Index of first descriptor to clean up
9e2bc267
RH
768 * @nr_bds: Max number of descriptors to clean up
769 * @force: Whether to clean descriptors even if not complete
ab365c33 770 * @sizep: Pointer to a u32 filled with the total sum of all bytes
7fe85bb3 771 * in all cleaned-up descriptors. Ignored if NULL.
9e2bc267 772 * @budget: NAPI budget (use 0 when not called from NAPI poll)
8a3b7a25 773 *
ab365c33
AP
774 * Would either be called after a successful transmit operation, or after
775 * there was an error when setting up the chain.
5a6caa2c 776 * Returns the number of packets handled.
8a3b7a25 777 */
9e2bc267
RH
778static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
779 int nr_bds, bool force, u32 *sizep, int budget)
8a3b7a25 780{
8a3b7a25 781 struct axidma_bd *cur_p;
ab365c33 782 unsigned int status;
5a6caa2c 783 int i, packets = 0;
4e958f33 784 dma_addr_t phys;
ab365c33 785
9e2bc267 786 for (i = 0; i < nr_bds; i++) {
ab365c33
AP
787 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
788 status = cur_p->status;
789
9e2bc267
RH
790 /* If force is not specified, clean up only descriptors
791 * that have been completed by the MAC.
ab365c33 792 */
9e2bc267 793 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
ab365c33 794 break;
8a3b7a25 795
95978df6
RH
796 /* Ensure we see complete descriptor update */
797 dma_rmb();
4e958f33 798 phys = desc_get_phys_addr(lp, cur_p);
17882fd4 799 dma_unmap_single(lp->dev, phys,
4e958f33
AP
800 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
801 DMA_TO_DEVICE);
ab365c33 802
5a6caa2c 803 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
9e2bc267 804 napi_consume_skb(cur_p->skb, budget);
5a6caa2c
SA
805 packets++;
806 }
ab365c33 807
8a3b7a25
DB
808 cur_p->app0 = 0;
809 cur_p->app1 = 0;
810 cur_p->app2 = 0;
811 cur_p->app4 = 0;
23e6b2dc 812 cur_p->skb = NULL;
95978df6
RH
813 /* ensure our transmit path and device don't prematurely see status cleared */
814 wmb();
996defd7 815 cur_p->cntrl = 0;
95978df6 816 cur_p->status = 0;
8a3b7a25 817
ab365c33
AP
818 if (sizep)
819 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
8a3b7a25
DB
820 }
821
5a6caa2c
SA
822 if (!force) {
823 lp->tx_bd_ci += i;
824 if (lp->tx_bd_ci >= lp->tx_bd_num)
825 lp->tx_bd_ci %= lp->tx_bd_num;
826 }
827
828 return packets;
ab365c33
AP
829}
830
bb193e3d
RH
831/**
832 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
833 * @lp: Pointer to the axienet_local structure
834 * @num_frag: The number of BDs to check for
835 *
836 * Return: 0, on success
837 * NETDEV_TX_BUSY, if any of the descriptors are not free
838 *
839 * This function is invoked before BDs are allocated and transmission starts.
840 * This function returns 0 if a BD or group of BDs can be allocated for
841 * transmission. If the BD or any of the BDs are not free the function
9e2bc267 842 * returns a busy status.
bb193e3d
RH
843 */
844static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
845 int num_frag)
846{
847 struct axidma_bd *cur_p;
848
9e2bc267 849 /* Ensure we see all descriptor updates from device or TX polling */
bb193e3d 850 rmb();
f0cf4000
RH
851 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
852 lp->tx_bd_num];
bb193e3d
RH
853 if (cur_p->cntrl)
854 return NETDEV_TX_BUSY;
855 return 0;
856}
857
6a91b846
RSP
858/**
859 * axienet_dma_tx_cb - DMA engine callback for TX channel.
860 * @data: Pointer to the axienet_local structure.
861 * @result: error reporting through dmaengine_result.
862 * This function is called by dmaengine driver for TX channel to notify
863 * that the transmit is done.
864 */
865static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
866{
867 struct skbuf_dma_descriptor *skbuf_dma;
868 struct axienet_local *lp = data;
869 struct netdev_queue *txq;
870 int len;
871
872 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
873 len = skbuf_dma->skb->len;
874 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
875 u64_stats_update_begin(&lp->tx_stat_sync);
876 u64_stats_add(&lp->tx_bytes, len);
877 u64_stats_add(&lp->tx_packets, 1);
878 u64_stats_update_end(&lp->tx_stat_sync);
879 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
880 dev_consume_skb_any(skbuf_dma->skb);
881 netif_txq_completed_wake(txq, 1, len,
882 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
32374234 883 2);
6a91b846
RSP
884}
885
886/**
887 * axienet_start_xmit_dmaengine - Starts the transmission.
888 * @skb: sk_buff pointer that contains data to be Txed.
889 * @ndev: Pointer to net_device structure.
890 *
891 * Return: NETDEV_TX_OK on success or any non space errors.
892 * NETDEV_TX_BUSY when free element in TX skb ring buffer
893 * is not available.
894 *
895 * This function is invoked to initiate transmission. The
896 * function sets the skbs, register dma callback API and submit
897 * the dma transaction.
898 * Additionally if checksum offloading is supported,
899 * it populates AXI Stream Control fields with appropriate values.
900 */
901static netdev_tx_t
902axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
903{
904 struct dma_async_tx_descriptor *dma_tx_desc = NULL;
905 struct axienet_local *lp = netdev_priv(ndev);
906 u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
907 struct skbuf_dma_descriptor *skbuf_dma;
908 struct dma_device *dma_dev;
909 struct netdev_queue *txq;
910 u32 csum_start_off;
911 u32 csum_index_off;
912 int sg_len;
913 int ret;
914
915 dma_dev = lp->tx_chan->device;
916 sg_len = skb_shinfo(skb)->nr_frags + 1;
32374234 917 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
6a91b846
RSP
918 netif_stop_queue(ndev);
919 if (net_ratelimit())
920 netdev_warn(ndev, "TX ring unexpectedly full\n");
921 return NETDEV_TX_BUSY;
922 }
923
924 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
925 if (!skbuf_dma)
926 goto xmit_error_drop_skb;
927
928 lp->tx_ring_head++;
929 sg_init_table(skbuf_dma->sgl, sg_len);
930 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
931 if (ret < 0)
932 goto xmit_error_drop_skb;
933
934 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
935 if (!ret)
936 goto xmit_error_drop_skb;
937
938 /* Fill up app fields for checksum */
939 if (skb->ip_summed == CHECKSUM_PARTIAL) {
940 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
941 /* Tx Full Checksum Offload Enabled */
942 app_metadata[0] |= 2;
943 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
944 csum_start_off = skb_transport_offset(skb);
945 csum_index_off = csum_start_off + skb->csum_offset;
946 /* Tx Partial Checksum Offload Enabled */
947 app_metadata[0] |= 1;
948 app_metadata[1] = (csum_start_off << 16) | csum_index_off;
949 }
950 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
951 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
952 }
953
954 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
955 sg_len, DMA_MEM_TO_DEV,
956 DMA_PREP_INTERRUPT, (void *)app_metadata);
957 if (!dma_tx_desc)
958 goto xmit_error_unmap_sg;
959
960 skbuf_dma->skb = skb;
961 skbuf_dma->sg_len = sg_len;
962 dma_tx_desc->callback_param = lp;
963 dma_tx_desc->callback_result = axienet_dma_tx_cb;
6a91b846
RSP
964 txq = skb_get_tx_queue(lp->ndev, skb);
965 netdev_tx_sent_queue(txq, skb->len);
966 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
32374234 967 1, 2);
6a91b846 968
5ccdcdf1
SG
969 dmaengine_submit(dma_tx_desc);
970 dma_async_issue_pending(lp->tx_chan);
6a91b846
RSP
971 return NETDEV_TX_OK;
972
973xmit_error_unmap_sg:
974 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
975xmit_error_drop_skb:
976 dev_kfree_skb_any(skb);
977 return NETDEV_TX_OK;
978}
979
ab365c33 980/**
9e2bc267 981 * axienet_tx_poll - Invoked once a transmit is completed by the
ab365c33 982 * Axi DMA Tx channel.
9e2bc267
RH
983 * @napi: Pointer to NAPI structure.
984 * @budget: Max number of TX packets to process.
985 *
986 * Return: Number of TX packets processed.
ab365c33 987 *
9e2bc267 988 * This function is invoked from the NAPI processing to notify the completion
ab365c33
AP
989 * of transmit operation. It clears fields in the corresponding Tx BDs and
990 * unmaps the corresponding buffer so that CPU can regain ownership of the
991 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
992 * required.
993 */
9e2bc267 994static int axienet_tx_poll(struct napi_struct *napi, int budget)
ab365c33 995{
9e2bc267
RH
996 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
997 struct net_device *ndev = lp->ndev;
ab365c33 998 u32 size = 0;
9e2bc267 999 int packets;
ab365c33 1000
5a6caa2c
SA
1001 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
1002 &size, budget);
ab365c33 1003
9e2bc267 1004 if (packets) {
c900e49d 1005 netdev_completed_queue(ndev, packets, size);
cb45a8bf
RH
1006 u64_stats_update_begin(&lp->tx_stat_sync);
1007 u64_stats_add(&lp->tx_packets, packets);
1008 u64_stats_add(&lp->tx_bytes, size);
1009 u64_stats_update_end(&lp->tx_stat_sync);
7de44285 1010
9e2bc267
RH
1011 /* Matches barrier in axienet_start_xmit */
1012 smp_mb();
7de44285 1013
9e2bc267
RH
1014 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1015 netif_wake_queue(ndev);
1016 }
1017
1018 if (packets < budget && napi_complete_done(napi, packets)) {
1019 /* Re-enable TX completion interrupts. This should
1020 * cause an immediate interrupt if any TX packets are
1021 * already pending.
1022 */
d048c717 1023 spin_lock_irq(&lp->tx_cr_lock);
9e2bc267 1024 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
d048c717 1025 spin_unlock_irq(&lp->tx_cr_lock);
9e2bc267
RH
1026 }
1027 return packets;
8a3b7a25
DB
1028}
1029
1030/**
1031 * axienet_start_xmit - Starts the transmission.
1032 * @skb: sk_buff pointer that contains data to be Txed.
1033 * @ndev: Pointer to net_device structure.
1034 *
b0d081c5 1035 * Return: NETDEV_TX_OK, on success
8a3b7a25
DB
1036 * NETDEV_TX_BUSY, if any of the descriptors are not free
1037 *
1038 * This function is invoked from upper layers to initiate transmission. The
1039 * function uses the next available free BDs and populates their fields to
1040 * start the transmission. Additionally if checksum offloading is supported,
1041 * it populates AXI Stream Control fields with appropriate values.
1042 */
81255af8
Y
1043static netdev_tx_t
1044axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
8a3b7a25
DB
1045{
1046 u32 ii;
1047 u32 num_frag;
1048 u32 csum_start_off;
1049 u32 csum_index_off;
1050 skb_frag_t *frag;
4e958f33 1051 dma_addr_t tail_p, phys;
f0cf4000 1052 u32 orig_tail_ptr, new_tail_ptr;
8a3b7a25
DB
1053 struct axienet_local *lp = netdev_priv(ndev);
1054 struct axidma_bd *cur_p;
f0cf4000
RH
1055
1056 orig_tail_ptr = lp->tx_bd_tail;
1057 new_tail_ptr = orig_tail_ptr;
8a3b7a25
DB
1058
1059 num_frag = skb_shinfo(skb)->nr_frags;
f0cf4000 1060 cur_p = &lp->tx_bd_v[orig_tail_ptr];
8a3b7a25 1061
aba57a82 1062 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
bb193e3d
RH
1063 /* Should not happen as last start_xmit call should have
1064 * checked for sufficient space and queue should only be
1065 * woken when sufficient space is available.
1066 */
7de44285 1067 netif_stop_queue(ndev);
bb193e3d
RH
1068 if (net_ratelimit())
1069 netdev_warn(ndev, "TX ring unexpectedly full\n");
1070 return NETDEV_TX_BUSY;
8a3b7a25
DB
1071 }
1072
1073 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1074 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1075 /* Tx Full Checksum Offload Enabled */
1076 cur_p->app0 |= 2;
fd0413bb 1077 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
8a3b7a25
DB
1078 csum_start_off = skb_transport_offset(skb);
1079 csum_index_off = csum_start_off + skb->csum_offset;
1080 /* Tx Partial Checksum Offload Enabled */
1081 cur_p->app0 |= 1;
1082 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1083 }
1084 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1085 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1086 }
1087
17882fd4 1088 phys = dma_map_single(lp->dev, skb->data,
4e958f33 1089 skb_headlen(skb), DMA_TO_DEVICE);
17882fd4 1090 if (unlikely(dma_mapping_error(lp->dev, phys))) {
71791dc8
AP
1091 if (net_ratelimit())
1092 netdev_err(ndev, "TX DMA mapping error\n");
1093 ndev->stats.tx_dropped++;
99714e37 1094 dev_kfree_skb_any(skb);
71791dc8
AP
1095 return NETDEV_TX_OK;
1096 }
4e958f33 1097 desc_set_phys_addr(lp, phys, cur_p);
71791dc8 1098 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
8a3b7a25
DB
1099
1100 for (ii = 0; ii < num_frag; ii++) {
f0cf4000
RH
1101 if (++new_tail_ptr >= lp->tx_bd_num)
1102 new_tail_ptr = 0;
1103 cur_p = &lp->tx_bd_v[new_tail_ptr];
8a3b7a25 1104 frag = &skb_shinfo(skb)->frags[ii];
17882fd4 1105 phys = dma_map_single(lp->dev,
4e958f33
AP
1106 skb_frag_address(frag),
1107 skb_frag_size(frag),
1108 DMA_TO_DEVICE);
17882fd4 1109 if (unlikely(dma_mapping_error(lp->dev, phys))) {
71791dc8
AP
1110 if (net_ratelimit())
1111 netdev_err(ndev, "TX DMA mapping error\n");
1112 ndev->stats.tx_dropped++;
9e2bc267
RH
1113 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1114 true, NULL, 0);
99714e37 1115 dev_kfree_skb_any(skb);
71791dc8
AP
1116 return NETDEV_TX_OK;
1117 }
4e958f33 1118 desc_set_phys_addr(lp, phys, cur_p);
8a3b7a25
DB
1119 cur_p->cntrl = skb_frag_size(frag);
1120 }
1121
1122 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
23e6b2dc 1123 cur_p->skb = skb;
8a3b7a25 1124
f0cf4000
RH
1125 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1126 if (++new_tail_ptr >= lp->tx_bd_num)
1127 new_tail_ptr = 0;
1128 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
c900e49d 1129 netdev_sent_queue(ndev, skb->len);
f0cf4000 1130
8a3b7a25 1131 /* Start the transfer */
6a00d0dd 1132 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
8a3b7a25 1133
bb193e3d
RH
1134 /* Stop queue if next transmit may not have space */
1135 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1136 netif_stop_queue(ndev);
1137
9e2bc267 1138 /* Matches barrier in axienet_tx_poll */
bb193e3d
RH
1139 smp_mb();
1140
1141 /* Space might have just been freed - check again */
1142 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1143 netif_wake_queue(ndev);
1144 }
1145
8a3b7a25
DB
1146 return NETDEV_TX_OK;
1147}
1148
6a91b846
RSP
1149/**
1150 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1151 * @data: Pointer to the skbuf_dma_descriptor structure.
1152 * @result: error reporting through dmaengine_result.
1153 * This function is called by dmaengine driver for RX channel to notify
1154 * that the packet is received.
1155 */
1156static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1157{
1158 struct skbuf_dma_descriptor *skbuf_dma;
1159 size_t meta_len, meta_max_len, rx_len;
1160 struct axienet_local *lp = data;
1161 struct sk_buff *skb;
1162 u32 *app_metadata;
fd980bf6 1163 int i;
6a91b846
RSP
1164
1165 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1166 skb = skbuf_dma->skb;
1167 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1168 &meta_max_len);
1169 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1170 DMA_FROM_DEVICE);
8bbceba7
AJ
1171
1172 if (IS_ERR(app_metadata)) {
1173 if (net_ratelimit())
1174 netdev_err(lp->ndev, "Failed to get RX metadata pointer\n");
1175 dev_kfree_skb_any(skb);
1176 lp->ndev->stats.rx_dropped++;
1177 goto rx_submit;
1178 }
1179
6a91b846
RSP
1180 /* TODO: Derive app word index programmatically */
1181 rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1182 skb_put(skb, rx_len);
1183 skb->protocol = eth_type_trans(skb, lp->ndev);
1184 skb->ip_summed = CHECKSUM_NONE;
1185
1186 __netif_rx(skb);
1187 u64_stats_update_begin(&lp->rx_stat_sync);
1188 u64_stats_add(&lp->rx_packets, 1);
1189 u64_stats_add(&lp->rx_bytes, rx_len);
1190 u64_stats_update_end(&lp->rx_stat_sync);
fd980bf6 1191
8bbceba7 1192rx_submit:
fd980bf6
SG
1193 for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
1194 RX_BUF_NUM_DEFAULT); i++)
1195 axienet_rx_submit_desc(lp->ndev);
6a91b846
RSP
1196 dma_async_issue_pending(lp->rx_chan);
1197}
1198
8a3b7a25 1199/**
9e2bc267 1200 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
cc37610c 1201 * @napi: Pointer to NAPI structure.
9e2bc267 1202 * @budget: Max number of RX packets to process.
8a3b7a25 1203 *
cc37610c 1204 * Return: Number of RX packets processed.
8a3b7a25 1205 */
9e2bc267 1206static int axienet_rx_poll(struct napi_struct *napi, int budget)
8a3b7a25
DB
1207{
1208 u32 length;
1209 u32 csumstatus;
1210 u32 size = 0;
cc37610c 1211 int packets = 0;
38e96b35 1212 dma_addr_t tail_p = 0;
8a3b7a25 1213 struct axidma_bd *cur_p;
cc37610c 1214 struct sk_buff *skb, *new_skb;
9e2bc267 1215 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
8a3b7a25 1216
8a3b7a25
DB
1217 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1218
cc37610c 1219 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
4e958f33
AP
1220 dma_addr_t phys;
1221
95978df6
RH
1222 /* Ensure we see complete descriptor update */
1223 dma_rmb();
8a3b7a25 1224
23e6b2dc
RH
1225 skb = cur_p->skb;
1226 cur_p->skb = NULL;
7a7d340b
RH
1227
1228 /* skb could be NULL if a previous pass already received the
1229 * packet for this slot in the ring, but failed to refill it
1230 * with a newly allocated buffer. In this case, don't try to
1231 * receive it again.
1232 */
1233 if (likely(skb)) {
1234 length = cur_p->app4 & 0x0000FFFF;
1235
1236 phys = desc_get_phys_addr(lp, cur_p);
17882fd4 1237 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
7a7d340b
RH
1238 DMA_FROM_DEVICE);
1239
1240 skb_put(skb, length);
cc37610c 1241 skb->protocol = eth_type_trans(skb, lp->ndev);
7a7d340b
RH
1242 /*skb_checksum_none_assert(skb);*/
1243 skb->ip_summed = CHECKSUM_NONE;
1244
1245 /* if we're doing Rx csum offload, set it up */
1246 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1247 csumstatus = (cur_p->app2 &
1248 XAE_FULL_CSUM_STATUS_MASK) >> 3;
1249 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1250 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1251 skb->ip_summed = CHECKSUM_UNNECESSARY;
1252 }
736f0c7a 1253 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
7a7d340b
RH
1254 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1255 skb->ip_summed = CHECKSUM_COMPLETE;
8a3b7a25 1256 }
8a3b7a25 1257
cc37610c 1258 napi_gro_receive(napi, skb);
8a3b7a25 1259
7a7d340b
RH
1260 size += length;
1261 packets++;
1262 }
8a3b7a25 1263
6c7e7da2 1264 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
720a43ef 1265 if (!new_skb)
7a7d340b 1266 break;
720a43ef 1267
17882fd4 1268 phys = dma_map_single(lp->dev, new_skb->data,
4e958f33
AP
1269 lp->max_frm_size,
1270 DMA_FROM_DEVICE);
17882fd4 1271 if (unlikely(dma_mapping_error(lp->dev, phys))) {
71791dc8 1272 if (net_ratelimit())
cc37610c 1273 netdev_err(lp->ndev, "RX DMA mapping error\n");
71791dc8 1274 dev_kfree_skb(new_skb);
7a7d340b 1275 break;
71791dc8 1276 }
4e958f33 1277 desc_set_phys_addr(lp, phys, cur_p);
71791dc8 1278
8a3b7a25
DB
1279 cur_p->cntrl = lp->max_frm_size;
1280 cur_p->status = 0;
23e6b2dc 1281 cur_p->skb = new_skb;
8a3b7a25 1282
7a7d340b
RH
1283 /* Only update tail_p to mark this slot as usable after it has
1284 * been successfully refilled.
1285 */
1286 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1287
8b09ca82
RH
1288 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1289 lp->rx_bd_ci = 0;
8a3b7a25
DB
1290 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1291 }
1292
cb45a8bf
RH
1293 u64_stats_update_begin(&lp->rx_stat_sync);
1294 u64_stats_add(&lp->rx_packets, packets);
1295 u64_stats_add(&lp->rx_bytes, size);
1296 u64_stats_update_end(&lp->rx_stat_sync);
8a3b7a25 1297
38e96b35 1298 if (tail_p)
6a00d0dd 1299 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
cc37610c
RH
1300
1301 if (packets < budget && napi_complete_done(napi, packets)) {
e1d27d29
SA
1302 if (READ_ONCE(lp->rx_dim_enabled)) {
1303 struct dim_sample sample = {
1304 .time = ktime_get(),
1305 /* Safe because we are the only writer */
1306 .pkt_ctr = u64_stats_read(&lp->rx_packets),
1307 .byte_ctr = u64_stats_read(&lp->rx_bytes),
1308 .event_ctr = READ_ONCE(lp->rx_irqs),
1309 };
1310
1311 net_dim(&lp->rx_dim, &sample);
1312 }
1313
cc37610c
RH
1314 /* Re-enable RX completion interrupts. This should
1315 * cause an immediate interrupt if any RX packets are
1316 * already pending.
1317 */
d048c717 1318 spin_lock_irq(&lp->rx_cr_lock);
cc37610c 1319 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
d048c717 1320 spin_unlock_irq(&lp->rx_cr_lock);
cc37610c
RH
1321 }
1322 return packets;
8a3b7a25
DB
1323}
1324
1325/**
1326 * axienet_tx_irq - Tx Done Isr.
1327 * @irq: irq number
1328 * @_ndev: net_device pointer
1329 *
9cbc1b68 1330 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
8a3b7a25 1331 *
9e2bc267
RH
1332 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1333 * TX BD processing.
8a3b7a25
DB
1334 */
1335static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1336{
8a3b7a25
DB
1337 unsigned int status;
1338 struct net_device *ndev = _ndev;
1339 struct axienet_local *lp = netdev_priv(ndev);
1340
1341 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
84b9ccc0 1342
8a3b7a25 1343 if (!(status & XAXIDMA_IRQ_ALL_MASK))
9cbc1b68 1344 return IRQ_NONE;
8a3b7a25 1345
84b9ccc0
RH
1346 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1347
1348 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1349 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1350 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1351 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1352 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
24201a64 1353 schedule_work(&lp->dma_err_task);
84b9ccc0 1354 } else {
9e2bc267
RH
1355 /* Disable further TX completion interrupts and schedule
1356 * NAPI to handle the completions.
1357 */
ba0da2dc 1358 if (napi_schedule_prep(&lp->napi_tx)) {
d048c717
SA
1359 u32 cr;
1360
1361 spin_lock(&lp->tx_cr_lock);
1362 cr = lp->tx_dma_cr;
1363 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
ba0da2dc 1364 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
d048c717 1365 spin_unlock(&lp->tx_cr_lock);
ba0da2dc
SA
1366 __napi_schedule(&lp->napi_tx);
1367 }
8a3b7a25 1368 }
84b9ccc0 1369
8a3b7a25
DB
1370 return IRQ_HANDLED;
1371}
1372
1373/**
1374 * axienet_rx_irq - Rx Isr.
1375 * @irq: irq number
1376 * @_ndev: net_device pointer
1377 *
9cbc1b68 1378 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
8a3b7a25 1379 *
cc37610c 1380 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
8a3b7a25
DB
1381 * processing.
1382 */
1383static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1384{
8a3b7a25
DB
1385 unsigned int status;
1386 struct net_device *ndev = _ndev;
1387 struct axienet_local *lp = netdev_priv(ndev);
1388
1389 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
84b9ccc0 1390
8a3b7a25 1391 if (!(status & XAXIDMA_IRQ_ALL_MASK))
9cbc1b68 1392 return IRQ_NONE;
8a3b7a25 1393
84b9ccc0
RH
1394 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1395
1396 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1397 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1398 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1399 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1400 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
24201a64 1401 schedule_work(&lp->dma_err_task);
84b9ccc0 1402 } else {
cc37610c
RH
1403 /* Disable further RX completion interrupts and schedule
1404 * NAPI receive.
1405 */
e1d27d29 1406 WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
ba0da2dc 1407 if (napi_schedule_prep(&lp->napi_rx)) {
d048c717
SA
1408 u32 cr;
1409
1410 spin_lock(&lp->rx_cr_lock);
1411 cr = lp->rx_dma_cr;
1412 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
ba0da2dc 1413 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
d048c717
SA
1414 spin_unlock(&lp->rx_cr_lock);
1415
ba0da2dc
SA
1416 __napi_schedule(&lp->napi_rx);
1417 }
8a3b7a25 1418 }
84b9ccc0 1419
8a3b7a25
DB
1420 return IRQ_HANDLED;
1421}
1422
522856ce
RH
1423/**
1424 * axienet_eth_irq - Ethernet core Isr.
1425 * @irq: irq number
1426 * @_ndev: net_device pointer
1427 *
1428 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1429 *
1430 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1431 */
1432static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1433{
1434 struct net_device *ndev = _ndev;
1435 struct axienet_local *lp = netdev_priv(ndev);
1436 unsigned int pending;
1437
1438 pending = axienet_ior(lp, XAE_IP_OFFSET);
1439 if (!pending)
1440 return IRQ_NONE;
1441
1442 if (pending & XAE_INT_RXFIFOOVR_MASK)
1443 ndev->stats.rx_missed_errors++;
1444
1445 if (pending & XAE_INT_RXRJECT_MASK)
d70e3788 1446 ndev->stats.rx_dropped++;
522856ce
RH
1447
1448 axienet_iow(lp, XAE_IS_OFFSET, pending);
1449 return IRQ_HANDLED;
1450}
1451
24201a64 1452static void axienet_dma_err_handler(struct work_struct *work);
aecb55be 1453
6a91b846
RSP
1454/**
1455 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1456 * allocate skbuff, map the scatterlist and obtain a descriptor
1457 * and then add the callback information and submit descriptor.
1458 *
1459 * @ndev: net_device pointer
1460 *
1461 */
1462static void axienet_rx_submit_desc(struct net_device *ndev)
1463{
1464 struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1465 struct axienet_local *lp = netdev_priv(ndev);
1466 struct skbuf_dma_descriptor *skbuf_dma;
1467 struct sk_buff *skb;
1468 dma_addr_t addr;
1469
1470 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1471 if (!skbuf_dma)
1472 return;
1473
6a91b846
RSP
1474 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1475 if (!skb)
1476 return;
1477
1478 sg_init_table(skbuf_dma->sgl, 1);
1479 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1480 if (unlikely(dma_mapping_error(lp->dev, addr))) {
1481 if (net_ratelimit())
1482 netdev_err(ndev, "DMA mapping error\n");
1483 goto rx_submit_err_free_skb;
1484 }
1485 sg_dma_address(skbuf_dma->sgl) = addr;
1486 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1487 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1488 1, DMA_DEV_TO_MEM,
1489 DMA_PREP_INTERRUPT);
1490 if (!dma_rx_desc)
1491 goto rx_submit_err_unmap_skb;
1492
1493 skbuf_dma->skb = skb;
1494 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1495 skbuf_dma->desc = dma_rx_desc;
1496 dma_rx_desc->callback_param = lp;
1497 dma_rx_desc->callback_result = axienet_dma_rx_cb;
fd980bf6 1498 lp->rx_ring_head++;
6a91b846
RSP
1499 dmaengine_submit(dma_rx_desc);
1500
1501 return;
1502
1503rx_submit_err_unmap_skb:
1504 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1505rx_submit_err_free_skb:
1506 dev_kfree_skb(skb);
1507}
1508
1509/**
1510 * axienet_init_dmaengine - init the dmaengine code.
1511 * @ndev: Pointer to net_device structure
1512 *
1513 * Return: 0, on success.
1514 * non-zero error value on failure
1515 *
1516 * This is the dmaengine initialization code.
1517 */
1518static int axienet_init_dmaengine(struct net_device *ndev)
1519{
1520 struct axienet_local *lp = netdev_priv(ndev);
1521 struct skbuf_dma_descriptor *skbuf_dma;
1522 int i, ret;
1523
1524 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1525 if (IS_ERR(lp->tx_chan)) {
1526 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1527 return PTR_ERR(lp->tx_chan);
1528 }
1529
1530 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1531 if (IS_ERR(lp->rx_chan)) {
1532 ret = PTR_ERR(lp->rx_chan);
1533 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1534 goto err_dma_release_tx;
1535 }
1536
1537 lp->tx_ring_tail = 0;
1538 lp->tx_ring_head = 0;
1539 lp->rx_ring_tail = 0;
1540 lp->rx_ring_head = 0;
1541 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1542 GFP_KERNEL);
1543 if (!lp->tx_skb_ring) {
1544 ret = -ENOMEM;
1545 goto err_dma_release_rx;
1546 }
1547 for (i = 0; i < TX_BD_NUM_MAX; i++) {
1548 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1549 if (!skbuf_dma) {
1550 ret = -ENOMEM;
1551 goto err_free_tx_skb_ring;
1552 }
1553 lp->tx_skb_ring[i] = skbuf_dma;
1554 }
1555
1556 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1557 GFP_KERNEL);
1558 if (!lp->rx_skb_ring) {
1559 ret = -ENOMEM;
1560 goto err_free_tx_skb_ring;
1561 }
1562 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1563 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1564 if (!skbuf_dma) {
1565 ret = -ENOMEM;
1566 goto err_free_rx_skb_ring;
1567 }
1568 lp->rx_skb_ring[i] = skbuf_dma;
1569 }
1570 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1571 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1572 axienet_rx_submit_desc(ndev);
1573 dma_async_issue_pending(lp->rx_chan);
1574
1575 return 0;
1576
1577err_free_rx_skb_ring:
1578 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1579 kfree(lp->rx_skb_ring[i]);
1580 kfree(lp->rx_skb_ring);
1581err_free_tx_skb_ring:
1582 for (i = 0; i < TX_BD_NUM_MAX; i++)
1583 kfree(lp->tx_skb_ring[i]);
1584 kfree(lp->tx_skb_ring);
1585err_dma_release_rx:
1586 dma_release_channel(lp->rx_chan);
1587err_dma_release_tx:
1588 dma_release_channel(lp->tx_chan);
1589 return ret;
1590}
1591
8a3b7a25 1592/**
6b1b40f7
SBNG
1593 * axienet_init_legacy_dma - init the dma legacy code.
1594 * @ndev: Pointer to net_device structure
8a3b7a25 1595 *
b0d081c5 1596 * Return: 0, on success.
6b1b40f7
SBNG
1597 * non-zero error value on failure
1598 *
1599 * This is the dma initialization code. It also allocates interrupt
1600 * service routines, enables the interrupt lines and ISR handling.
8a3b7a25 1601 *
8a3b7a25 1602 */
6b1b40f7 1603static int axienet_init_legacy_dma(struct net_device *ndev)
8a3b7a25 1604{
7789e9ed 1605 int ret;
8a3b7a25
DB
1606 struct axienet_local *lp = netdev_priv(ndev);
1607
24201a64 1608 /* Enable worker thread for Axi DMA error handling */
858430db 1609 lp->stopping = false;
24201a64 1610 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
71c6c837 1611
9e2bc267
RH
1612 napi_enable(&lp->napi_rx);
1613 napi_enable(&lp->napi_tx);
cc37610c 1614
8a3b7a25 1615 /* Enable interrupts for Axi DMA Tx */
9cbc1b68
RH
1616 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1617 ndev->name, ndev);
8a3b7a25
DB
1618 if (ret)
1619 goto err_tx_irq;
1620 /* Enable interrupts for Axi DMA Rx */
9cbc1b68
RH
1621 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1622 ndev->name, ndev);
8a3b7a25
DB
1623 if (ret)
1624 goto err_rx_irq;
522856ce
RH
1625 /* Enable interrupts for Axi Ethernet core (if defined) */
1626 if (lp->eth_irq > 0) {
1627 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1628 ndev->name, ndev);
1629 if (ret)
1630 goto err_eth_irq;
1631 }
71c6c837 1632
8a3b7a25
DB
1633 return 0;
1634
522856ce
RH
1635err_eth_irq:
1636 free_irq(lp->rx_irq, ndev);
8a3b7a25
DB
1637err_rx_irq:
1638 free_irq(lp->tx_irq, ndev);
1639err_tx_irq:
9e2bc267
RH
1640 napi_disable(&lp->napi_tx);
1641 napi_disable(&lp->napi_rx);
24201a64 1642 cancel_work_sync(&lp->dma_err_task);
8a3b7a25
DB
1643 dev_err(lp->dev, "request_irq() failed\n");
1644 return ret;
1645}
1646
6b1b40f7
SBNG
1647/**
1648 * axienet_open - Driver open routine.
1649 * @ndev: Pointer to net_device structure
1650 *
1651 * Return: 0, on success.
1652 * non-zero error value on failure
1653 *
1654 * This is the driver open routine. It calls phylink_start to start the
1655 * PHY device.
1656 * It also allocates interrupt service routines, enables the interrupt lines
1657 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1658 * descriptors are initialized.
1659 */
1660static int axienet_open(struct net_device *ndev)
1661{
1662 int ret;
1663 struct axienet_local *lp = netdev_priv(ndev);
1664
6b1b40f7
SBNG
1665 /* When we do an Axi Ethernet reset, it resets the complete core
1666 * including the MDIO. MDIO must be disabled before resetting.
1667 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1668 */
1669 axienet_lock_mii(lp);
1670 ret = axienet_device_reset(ndev);
1671 axienet_unlock_mii(lp);
1672
1673 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1674 if (ret) {
1675 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1676 return ret;
1677 }
1678
1679 phylink_start(lp->phylink);
1680
76abb5d6
SA
1681 /* Start the statistics refresh work */
1682 schedule_delayed_work(&lp->stats_work, 0);
1683
6a91b846
RSP
1684 if (lp->use_dmaengine) {
1685 /* Enable interrupts for Axi Ethernet core (if defined) */
1686 if (lp->eth_irq > 0) {
1687 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1688 ndev->name, ndev);
1689 if (ret)
1690 goto err_phy;
1691 }
1692
1693 ret = axienet_init_dmaengine(ndev);
1694 if (ret < 0)
1695 goto err_free_eth_irq;
1696 } else {
6b1b40f7
SBNG
1697 ret = axienet_init_legacy_dma(ndev);
1698 if (ret)
1699 goto err_phy;
1700 }
1701
1702 return 0;
1703
6a91b846
RSP
1704err_free_eth_irq:
1705 if (lp->eth_irq > 0)
1706 free_irq(lp->eth_irq, ndev);
6b1b40f7 1707err_phy:
e1d27d29 1708 cancel_work_sync(&lp->rx_dim.work);
76abb5d6 1709 cancel_delayed_work_sync(&lp->stats_work);
6b1b40f7
SBNG
1710 phylink_stop(lp->phylink);
1711 phylink_disconnect_phy(lp->phylink);
1712 return ret;
1713}
1714
8a3b7a25
DB
1715/**
1716 * axienet_stop - Driver stop routine.
1717 * @ndev: Pointer to net_device structure
1718 *
b0d081c5 1719 * Return: 0, on success.
8a3b7a25 1720 *
f5203a3d 1721 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
8a3b7a25
DB
1722 * device. It also removes the interrupt handlers and disables the interrupts.
1723 * The Axi DMA Tx/Rx BDs are released.
1724 */
1725static int axienet_stop(struct net_device *ndev)
1726{
8a3b7a25 1727 struct axienet_local *lp = netdev_priv(ndev);
6a91b846 1728 int i;
8a3b7a25 1729
6b1b40f7 1730 if (!lp->use_dmaengine) {
858430db
SA
1731 WRITE_ONCE(lp->stopping, true);
1732 flush_work(&lp->dma_err_task);
1733
6b1b40f7
SBNG
1734 napi_disable(&lp->napi_tx);
1735 napi_disable(&lp->napi_rx);
1736 }
cc37610c 1737
e1d27d29 1738 cancel_work_sync(&lp->rx_dim.work);
76abb5d6
SA
1739 cancel_delayed_work_sync(&lp->stats_work);
1740
f5203a3d
RH
1741 phylink_stop(lp->phylink);
1742 phylink_disconnect_phy(lp->phylink);
1743
8a3b7a25
DB
1744 axienet_setoptions(ndev, lp->options &
1745 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1746
6b1b40f7
SBNG
1747 if (!lp->use_dmaengine) {
1748 axienet_dma_stop(lp);
1749 cancel_work_sync(&lp->dma_err_task);
1750 free_irq(lp->tx_irq, ndev);
1751 free_irq(lp->rx_irq, ndev);
1752 axienet_dma_bd_release(ndev);
6a91b846
RSP
1753 } else {
1754 dmaengine_terminate_sync(lp->tx_chan);
1755 dmaengine_synchronize(lp->tx_chan);
1756 dmaengine_terminate_sync(lp->rx_chan);
1757 dmaengine_synchronize(lp->rx_chan);
1758
1759 for (i = 0; i < TX_BD_NUM_MAX; i++)
1760 kfree(lp->tx_skb_ring[i]);
1761 kfree(lp->tx_skb_ring);
1762 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1763 kfree(lp->rx_skb_ring[i]);
1764 kfree(lp->rx_skb_ring);
1765
1766 dma_release_channel(lp->rx_chan);
1767 dma_release_channel(lp->tx_chan);
6b1b40f7 1768 }
489d4d77 1769
c900e49d 1770 netdev_reset_queue(ndev);
489d4d77
RH
1771 axienet_iow(lp, XAE_IE_OFFSET, 0);
1772
522856ce
RH
1773 if (lp->eth_irq > 0)
1774 free_irq(lp->eth_irq, ndev);
8a3b7a25
DB
1775 return 0;
1776}
1777
1778/**
1779 * axienet_change_mtu - Driver change mtu routine.
1780 * @ndev: Pointer to net_device structure
1781 * @new_mtu: New mtu value to be applied
1782 *
b0d081c5 1783 * Return: Always returns 0 (success).
8a3b7a25
DB
1784 *
1785 * This is the change mtu driver routine. It checks if the Axi Ethernet
1786 * hardware supports jumbo frames before changing the mtu. This can be
1787 * called only when the device is not up.
1788 */
1789static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1790{
1791 struct axienet_local *lp = netdev_priv(ndev);
1792
1793 if (netif_running(ndev))
1794 return -EBUSY;
f080a8c3
ST
1795
1796 if ((new_mtu + VLAN_ETH_HLEN +
1797 XAE_TRL_SIZE) > lp->rxmem)
1798 return -EINVAL;
1799
1eb2cded 1800 WRITE_ONCE(ndev->mtu, new_mtu);
8a3b7a25
DB
1801
1802 return 0;
1803}
1804
1805#ifdef CONFIG_NET_POLL_CONTROLLER
1806/**
1807 * axienet_poll_controller - Axi Ethernet poll mechanism.
1808 * @ndev: Pointer to net_device structure
1809 *
1810 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1811 * to polling the ISRs and are enabled back after the polling is done.
1812 */
1813static void axienet_poll_controller(struct net_device *ndev)
1814{
1815 struct axienet_local *lp = netdev_priv(ndev);
f7061a3e 1816
8a3b7a25
DB
1817 disable_irq(lp->tx_irq);
1818 disable_irq(lp->rx_irq);
1819 axienet_rx_irq(lp->tx_irq, ndev);
1820 axienet_tx_irq(lp->rx_irq, ndev);
1821 enable_irq(lp->tx_irq);
1822 enable_irq(lp->rx_irq);
1823}
1824#endif
1825
2a9b65ea
AP
1826static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1827{
1828 struct axienet_local *lp = netdev_priv(dev);
1829
1830 if (!netif_running(dev))
1831 return -EINVAL;
1832
1833 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1834}
1835
cb45a8bf
RH
1836static void
1837axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1838{
1839 struct axienet_local *lp = netdev_priv(dev);
1840 unsigned int start;
1841
1842 netdev_stats_to_stats64(stats, &dev->stats);
1843
1844 do {
068c38ad 1845 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
cb45a8bf
RH
1846 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1847 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
068c38ad 1848 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
cb45a8bf
RH
1849
1850 do {
068c38ad 1851 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
cb45a8bf
RH
1852 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1853 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
068c38ad 1854 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
76abb5d6
SA
1855
1856 if (!(lp->features & XAE_FEATURE_STATS))
1857 return;
1858
1859 do {
1860 start = read_seqcount_begin(&lp->hw_stats_seqcount);
1861 stats->rx_length_errors =
1862 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1863 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1864 stats->rx_frame_errors =
1865 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1866 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1867 axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1868 stats->rx_length_errors +
1869 stats->rx_crc_errors +
1870 stats->rx_frame_errors;
1871 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1872
1873 stats->tx_aborted_errors =
1874 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1875 stats->tx_fifo_errors =
1876 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1877 stats->tx_window_errors =
1878 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1879 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1880 stats->tx_aborted_errors +
1881 stats->tx_fifo_errors +
1882 stats->tx_window_errors;
1883 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
cb45a8bf
RH
1884}
1885
8a3b7a25
DB
1886static const struct net_device_ops axienet_netdev_ops = {
1887 .ndo_open = axienet_open,
1888 .ndo_stop = axienet_stop,
1889 .ndo_start_xmit = axienet_start_xmit,
cb45a8bf 1890 .ndo_get_stats64 = axienet_get_stats64,
8a3b7a25
DB
1891 .ndo_change_mtu = axienet_change_mtu,
1892 .ndo_set_mac_address = netdev_set_mac_address,
1893 .ndo_validate_addr = eth_validate_addr,
a7605370 1894 .ndo_eth_ioctl = axienet_ioctl,
8a3b7a25
DB
1895 .ndo_set_rx_mode = axienet_set_multicast_list,
1896#ifdef CONFIG_NET_POLL_CONTROLLER
1897 .ndo_poll_controller = axienet_poll_controller,
1898#endif
1899};
1900
6a91b846
RSP
1901static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1902 .ndo_open = axienet_open,
1903 .ndo_stop = axienet_stop,
1904 .ndo_start_xmit = axienet_start_xmit_dmaengine,
1905 .ndo_get_stats64 = axienet_get_stats64,
1906 .ndo_change_mtu = axienet_change_mtu,
1907 .ndo_set_mac_address = netdev_set_mac_address,
1908 .ndo_validate_addr = eth_validate_addr,
1909 .ndo_eth_ioctl = axienet_ioctl,
1910 .ndo_set_rx_mode = axienet_set_multicast_list,
1911};
1912
8a3b7a25
DB
1913/**
1914 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1915 * @ndev: Pointer to net_device structure
1916 * @ed: Pointer to ethtool_drvinfo structure
1917 *
1918 * This implements ethtool command for getting the driver information.
1919 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1920 */
1921static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1922 struct ethtool_drvinfo *ed)
1923{
f029c781
WS
1924 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1925 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
8a3b7a25
DB
1926}
1927
1928/**
1929 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1930 * AxiEthernet core.
1931 * @ndev: Pointer to net_device structure
1932 *
1933 * This implements ethtool command for getting the total register length
1934 * information.
b0d081c5
MS
1935 *
1936 * Return: the total regs length
8a3b7a25
DB
1937 */
1938static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1939{
1940 return sizeof(u32) * AXIENET_REGS_N;
1941}
1942
1943/**
1944 * axienet_ethtools_get_regs - Dump the contents of all registers present
1945 * in AxiEthernet core.
1946 * @ndev: Pointer to net_device structure
1947 * @regs: Pointer to ethtool_regs structure
1948 * @ret: Void pointer used to return the contents of the registers.
1949 *
1950 * This implements ethtool command for getting the Axi Ethernet register dump.
1951 * Issue "ethtool -d ethX" to execute this function.
1952 */
1953static void axienet_ethtools_get_regs(struct net_device *ndev,
1954 struct ethtool_regs *regs, void *ret)
1955{
7fe85bb3 1956 u32 *data = (u32 *)ret;
8a3b7a25
DB
1957 size_t len = sizeof(u32) * AXIENET_REGS_N;
1958 struct axienet_local *lp = netdev_priv(ndev);
1959
1960 regs->version = 0;
1961 regs->len = len;
1962
1963 memset(data, 0, len);
1964 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1965 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1966 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1967 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1968 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1969 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1970 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1971 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1972 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1973 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1974 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1975 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1976 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1977 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1978 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1979 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1980 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1981 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1982 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1983 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1984 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1985 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1986 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
8a3b7a25
DB
1987 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1988 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1989 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1990 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1991 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
6b1b40f7
SBNG
1992 if (!lp->use_dmaengine) {
1993 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1994 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1995 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1996 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1997 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1998 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1999 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
2000 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
2001 }
8a3b7a25
DB
2002}
2003
74624944
HC
2004static void
2005axienet_ethtools_get_ringparam(struct net_device *ndev,
2006 struct ethtool_ringparam *ering,
2007 struct kernel_ethtool_ringparam *kernel_ering,
2008 struct netlink_ext_ack *extack)
8b09ca82
RH
2009{
2010 struct axienet_local *lp = netdev_priv(ndev);
2011
2012 ering->rx_max_pending = RX_BD_NUM_MAX;
2013 ering->rx_mini_max_pending = 0;
2014 ering->rx_jumbo_max_pending = 0;
2015 ering->tx_max_pending = TX_BD_NUM_MAX;
2016 ering->rx_pending = lp->rx_bd_num;
2017 ering->rx_mini_pending = 0;
2018 ering->rx_jumbo_pending = 0;
2019 ering->tx_pending = lp->tx_bd_num;
2020}
2021
74624944
HC
2022static int
2023axienet_ethtools_set_ringparam(struct net_device *ndev,
2024 struct ethtool_ringparam *ering,
2025 struct kernel_ethtool_ringparam *kernel_ering,
2026 struct netlink_ext_ack *extack)
8b09ca82
RH
2027{
2028 struct axienet_local *lp = netdev_priv(ndev);
2029
2030 if (ering->rx_pending > RX_BD_NUM_MAX ||
2031 ering->rx_mini_pending ||
2032 ering->rx_jumbo_pending ||
70f5817d
RH
2033 ering->tx_pending < TX_BD_NUM_MIN ||
2034 ering->tx_pending > TX_BD_NUM_MAX)
8b09ca82
RH
2035 return -EINVAL;
2036
2037 if (netif_running(ndev))
2038 return -EBUSY;
2039
2040 lp->rx_bd_num = ering->rx_pending;
2041 lp->tx_bd_num = ering->tx_pending;
2042 return 0;
2043}
2044
8a3b7a25
DB
2045/**
2046 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
2047 * Tx and Rx paths.
2048 * @ndev: Pointer to net_device structure
2049 * @epauseparm: Pointer to ethtool_pauseparam structure.
2050 *
2051 * This implements ethtool command for getting axi ethernet pause frame
2052 * setting. Issue "ethtool -a ethX" to execute this function.
2053 */
2054static void
2055axienet_ethtools_get_pauseparam(struct net_device *ndev,
2056 struct ethtool_pauseparam *epauseparm)
2057{
8a3b7a25 2058 struct axienet_local *lp = netdev_priv(ndev);
f5203a3d
RH
2059
2060 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
8a3b7a25
DB
2061}
2062
2063/**
2064 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
2065 * settings.
2066 * @ndev: Pointer to net_device structure
b0d081c5 2067 * @epauseparm:Pointer to ethtool_pauseparam structure
8a3b7a25
DB
2068 *
2069 * This implements ethtool command for enabling flow control on Rx and Tx
2070 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2071 * function.
b0d081c5
MS
2072 *
2073 * Return: 0 on success, -EFAULT if device is running
8a3b7a25
DB
2074 */
2075static int
2076axienet_ethtools_set_pauseparam(struct net_device *ndev,
2077 struct ethtool_pauseparam *epauseparm)
2078{
8a3b7a25
DB
2079 struct axienet_local *lp = netdev_priv(ndev);
2080
f5203a3d 2081 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
8a3b7a25
DB
2082}
2083
d048c717
SA
2084/**
2085 * axienet_update_coalesce_rx() - Set RX CR
2086 * @lp: Device private data
2087 * @cr: Value to write to the RX CR
2088 * @mask: Bits to set from @cr
2089 */
2090static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
2091 u32 mask)
2092{
2093 spin_lock_irq(&lp->rx_cr_lock);
2094 lp->rx_dma_cr &= ~mask;
2095 lp->rx_dma_cr |= cr;
2096 /* If DMA isn't started, then the settings will be applied the next
2097 * time dma_start() is called.
2098 */
2099 if (lp->rx_dma_started) {
2100 u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
2101
2102 /* Don't enable IRQs if they are disabled by NAPI */
2103 if (reg & XAXIDMA_IRQ_ALL_MASK)
2104 cr = lp->rx_dma_cr;
2105 else
2106 cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2107 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
2108 }
2109 spin_unlock_irq(&lp->rx_cr_lock);
2110}
2111
e1d27d29
SA
2112/**
2113 * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
2114 * @lp: Device private data
2115 */
2116static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
2117{
2118 return min(1 << (lp->rx_dim.profile_ix << 1), 255);
2119}
2120
2121/**
2122 * axienet_rx_dim_work() - Adjust RX DIM settings
2123 * @work: The work struct
2124 */
2125static void axienet_rx_dim_work(struct work_struct *work)
2126{
2127 struct axienet_local *lp =
2128 container_of(work, struct axienet_local, rx_dim.work);
2129 u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
2130 u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
2131 XAXIDMA_IRQ_ERROR_MASK;
2132
2133 axienet_update_coalesce_rx(lp, cr, mask);
2134 lp->rx_dim.state = DIM_START_MEASURE;
2135}
2136
d048c717
SA
2137/**
2138 * axienet_update_coalesce_tx() - Set TX CR
2139 * @lp: Device private data
2140 * @cr: Value to write to the TX CR
2141 * @mask: Bits to set from @cr
2142 */
2143static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
2144 u32 mask)
2145{
2146 spin_lock_irq(&lp->tx_cr_lock);
2147 lp->tx_dma_cr &= ~mask;
2148 lp->tx_dma_cr |= cr;
2149 /* If DMA isn't started, then the settings will be applied the next
2150 * time dma_start() is called.
2151 */
2152 if (lp->tx_dma_started) {
2153 u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
2154
2155 /* Don't enable IRQs if they are disabled by NAPI */
2156 if (reg & XAXIDMA_IRQ_ALL_MASK)
2157 cr = lp->tx_dma_cr;
2158 else
2159 cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2160 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
2161 }
2162 spin_unlock_irq(&lp->tx_cr_lock);
2163}
2164
8a3b7a25
DB
2165/**
2166 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2167 * @ndev: Pointer to net_device structure
2168 * @ecoalesce: Pointer to ethtool_coalesce structure
f3ccfda1
YM
2169 * @kernel_coal: ethtool CQE mode setting structure
2170 * @extack: extack for reporting error messages
8a3b7a25
DB
2171 *
2172 * This implements ethtool command for getting the DMA interrupt coalescing
2173 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2174 * execute this function.
b0d081c5
MS
2175 *
2176 * Return: 0 always
8a3b7a25 2177 */
f3ccfda1
YM
2178static int
2179axienet_ethtools_get_coalesce(struct net_device *ndev,
2180 struct ethtool_coalesce *ecoalesce,
2181 struct kernel_ethtool_coalesce *kernel_coal,
2182 struct netlink_ext_ack *extack)
8a3b7a25 2183{
8a3b7a25 2184 struct axienet_local *lp = netdev_priv(ndev);
eb80520e 2185 u32 cr;
0b79b8dc 2186
e1d27d29
SA
2187 ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
2188
eb80520e
SA
2189 spin_lock_irq(&lp->rx_cr_lock);
2190 cr = lp->rx_dma_cr;
2191 spin_unlock_irq(&lp->rx_cr_lock);
2192 axienet_coalesce_params(lp, cr,
2193 &ecoalesce->rx_max_coalesced_frames,
2194 &ecoalesce->rx_coalesce_usecs);
2195
2196 spin_lock_irq(&lp->tx_cr_lock);
2197 cr = lp->tx_dma_cr;
2198 spin_unlock_irq(&lp->tx_cr_lock);
2199 axienet_coalesce_params(lp, cr,
2200 &ecoalesce->tx_max_coalesced_frames,
2201 &ecoalesce->tx_coalesce_usecs);
8a3b7a25
DB
2202 return 0;
2203}
2204
2205/**
2206 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2207 * @ndev: Pointer to net_device structure
2208 * @ecoalesce: Pointer to ethtool_coalesce structure
f3ccfda1
YM
2209 * @kernel_coal: ethtool CQE mode setting structure
2210 * @extack: extack for reporting error messages
8a3b7a25
DB
2211 *
2212 * This implements ethtool command for setting the DMA interrupt coalescing
2213 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2214 * prompt to execute this function.
b0d081c5
MS
2215 *
2216 * Return: 0, on success, Non-zero error value on failure.
8a3b7a25 2217 */
f3ccfda1
YM
2218static int
2219axienet_ethtools_set_coalesce(struct net_device *ndev,
2220 struct ethtool_coalesce *ecoalesce,
2221 struct kernel_ethtool_coalesce *kernel_coal,
2222 struct netlink_ext_ack *extack)
8a3b7a25
DB
2223{
2224 struct axienet_local *lp = netdev_priv(ndev);
e1d27d29
SA
2225 bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
2226 bool old_dim = lp->rx_dim_enabled;
2227 u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
8a3b7a25 2228
c17ff476
SA
2229 if (ecoalesce->rx_max_coalesced_frames > 255 ||
2230 ecoalesce->tx_max_coalesced_frames > 255) {
2231 NL_SET_ERR_MSG(extack, "frames must be less than 256");
2232 return -EINVAL;
2233 }
2234
9d301a53
SA
2235 if (!ecoalesce->rx_max_coalesced_frames ||
2236 !ecoalesce->tx_max_coalesced_frames) {
2237 NL_SET_ERR_MSG(extack, "frames must be non-zero");
2238 return -EINVAL;
2239 }
2240
e1d27d29 2241 if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
9d301a53
SA
2242 !ecoalesce->rx_coalesce_usecs) ||
2243 (ecoalesce->tx_max_coalesced_frames > 1 &&
2244 !ecoalesce->tx_coalesce_usecs)) {
2245 NL_SET_ERR_MSG(extack,
2246 "usecs must be non-zero when frames is greater than one");
2247 return -EINVAL;
2248 }
2249
e1d27d29
SA
2250 if (new_dim && !old_dim) {
2251 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
2252 ecoalesce->rx_coalesce_usecs);
2253 } else if (!new_dim) {
2254 if (old_dim) {
2255 WRITE_ONCE(lp->rx_dim_enabled, false);
2256 napi_synchronize(&lp->napi_rx);
2257 flush_work(&lp->rx_dim.work);
2258 }
2259
2260 cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
2261 ecoalesce->rx_coalesce_usecs);
2262 } else {
2263 /* Dummy value for count just to calculate timer */
2264 cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
2265 mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
2266 }
2267
2268 axienet_update_coalesce_rx(lp, cr, mask);
2269 if (new_dim && !old_dim)
2270 WRITE_ONCE(lp->rx_dim_enabled, true);
d048c717 2271
eb80520e
SA
2272 cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
2273 ecoalesce->tx_coalesce_usecs);
d048c717 2274 axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
8a3b7a25
DB
2275 return 0;
2276}
2277
f5203a3d
RH
2278static int
2279axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2280 struct ethtool_link_ksettings *cmd)
2281{
2282 struct axienet_local *lp = netdev_priv(ndev);
2283
2284 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2285}
2286
2287static int
2288axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2289 const struct ethtool_link_ksettings *cmd)
2290{
2291 struct axienet_local *lp = netdev_priv(ndev);
2292
2293 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2294}
2295
66b51663
RH
2296static int axienet_ethtools_nway_reset(struct net_device *dev)
2297{
2298 struct axienet_local *lp = netdev_priv(dev);
2299
2300 return phylink_ethtool_nway_reset(lp->phylink);
2301}
2302
76abb5d6
SA
2303static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2304 struct ethtool_stats *stats,
2305 u64 *data)
2306{
2307 struct axienet_local *lp = netdev_priv(dev);
2308 unsigned int start;
2309
2310 do {
2311 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2312 data[0] = axienet_stat(lp, STAT_RX_BYTES);
2313 data[1] = axienet_stat(lp, STAT_TX_BYTES);
2314 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2315 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2316 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2317 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2318 data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2319 data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2320 data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2321 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2322}
2323
2324static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2325 "Received bytes",
2326 "Transmitted bytes",
2327 "RX Good VLAN Tagged Frames",
2328 "TX Good VLAN Tagged Frames",
2329 "TX Good PFC Frames",
2330 "RX Good PFC Frames",
2331 "User Defined Counter 0",
2332 "User Defined Counter 1",
2333 "User Defined Counter 2",
2334};
2335
2336static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2337{
2338 switch (stringset) {
2339 case ETH_SS_STATS:
2340 memcpy(data, axienet_ethtool_stats_strings,
2341 sizeof(axienet_ethtool_stats_strings));
2342 break;
2343 }
2344}
2345
2346static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2347{
2348 struct axienet_local *lp = netdev_priv(dev);
2349
2350 switch (sset) {
2351 case ETH_SS_STATS:
2352 if (lp->features & XAE_FEATURE_STATS)
2353 return ARRAY_SIZE(axienet_ethtool_stats_strings);
2354 fallthrough;
2355 default:
2356 return -EOPNOTSUPP;
2357 }
2358}
2359
2360static void
2361axienet_ethtools_get_pause_stats(struct net_device *dev,
2362 struct ethtool_pause_stats *pause_stats)
2363{
2364 struct axienet_local *lp = netdev_priv(dev);
2365 unsigned int start;
2366
2367 if (!(lp->features & XAE_FEATURE_STATS))
2368 return;
2369
2370 do {
2371 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2372 pause_stats->tx_pause_frames =
2373 axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2374 pause_stats->rx_pause_frames =
2375 axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2376 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2377}
2378
2379static void
2380axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2381 struct ethtool_eth_mac_stats *mac_stats)
2382{
2383 struct axienet_local *lp = netdev_priv(dev);
2384 unsigned int start;
2385
2386 if (!(lp->features & XAE_FEATURE_STATS))
2387 return;
2388
2389 do {
2390 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2391 mac_stats->FramesTransmittedOK =
2392 axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2393 mac_stats->SingleCollisionFrames =
2394 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2395 mac_stats->MultipleCollisionFrames =
2396 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2397 mac_stats->FramesReceivedOK =
2398 axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2399 mac_stats->FrameCheckSequenceErrors =
2400 axienet_stat(lp, STAT_RX_FCS_ERRORS);
2401 mac_stats->AlignmentErrors =
2402 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2403 mac_stats->FramesWithDeferredXmissions =
2404 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2405 mac_stats->LateCollisions =
2406 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2407 mac_stats->FramesAbortedDueToXSColls =
2408 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2409 mac_stats->MulticastFramesXmittedOK =
2410 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2411 mac_stats->BroadcastFramesXmittedOK =
2412 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2413 mac_stats->FramesWithExcessiveDeferral =
2414 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2415 mac_stats->MulticastFramesReceivedOK =
2416 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2417 mac_stats->BroadcastFramesReceivedOK =
2418 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2419 mac_stats->InRangeLengthErrors =
2420 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2421 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2422}
2423
2424static void
2425axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2426 struct ethtool_eth_ctrl_stats *ctrl_stats)
2427{
2428 struct axienet_local *lp = netdev_priv(dev);
2429 unsigned int start;
2430
2431 if (!(lp->features & XAE_FEATURE_STATS))
2432 return;
2433
2434 do {
2435 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2436 ctrl_stats->MACControlFramesTransmitted =
2437 axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2438 ctrl_stats->MACControlFramesReceived =
2439 axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2440 ctrl_stats->UnsupportedOpcodesReceived =
2441 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2442 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2443}
2444
2445static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2446 { 64, 64 },
2447 { 65, 127 },
2448 { 128, 255 },
2449 { 256, 511 },
2450 { 512, 1023 },
2451 { 1024, 1518 },
2452 { 1519, 16384 },
2453 { },
2454};
2455
2456static void
2457axienet_ethtool_get_rmon_stats(struct net_device *dev,
2458 struct ethtool_rmon_stats *rmon_stats,
2459 const struct ethtool_rmon_hist_range **ranges)
2460{
2461 struct axienet_local *lp = netdev_priv(dev);
2462 unsigned int start;
2463
2464 if (!(lp->features & XAE_FEATURE_STATS))
2465 return;
2466
2467 do {
2468 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2469 rmon_stats->undersize_pkts =
2470 axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2471 rmon_stats->oversize_pkts =
2472 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2473 rmon_stats->fragments =
2474 axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2475
2476 rmon_stats->hist[0] =
2477 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2478 rmon_stats->hist[1] =
2479 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2480 rmon_stats->hist[2] =
2481 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2482 rmon_stats->hist[3] =
2483 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2484 rmon_stats->hist[4] =
2485 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2486 rmon_stats->hist[5] =
2487 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2488 rmon_stats->hist[6] =
2489 rmon_stats->oversize_pkts;
2490
2491 rmon_stats->hist_tx[0] =
2492 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2493 rmon_stats->hist_tx[1] =
2494 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2495 rmon_stats->hist_tx[2] =
2496 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2497 rmon_stats->hist_tx[3] =
2498 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2499 rmon_stats->hist_tx[4] =
2500 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2501 rmon_stats->hist_tx[5] =
2502 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2503 rmon_stats->hist_tx[6] =
2504 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2505 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2506
2507 *ranges = axienet_rmon_ranges;
2508}
2509
c7735f1b 2510static const struct ethtool_ops axienet_ethtool_ops = {
0b79b8dc 2511 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
e1d27d29
SA
2512 ETHTOOL_COALESCE_USECS |
2513 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
8a3b7a25
DB
2514 .get_drvinfo = axienet_ethtools_get_drvinfo,
2515 .get_regs_len = axienet_ethtools_get_regs_len,
2516 .get_regs = axienet_ethtools_get_regs,
2517 .get_link = ethtool_op_get_link,
8b09ca82
RH
2518 .get_ringparam = axienet_ethtools_get_ringparam,
2519 .set_ringparam = axienet_ethtools_set_ringparam,
8a3b7a25
DB
2520 .get_pauseparam = axienet_ethtools_get_pauseparam,
2521 .set_pauseparam = axienet_ethtools_set_pauseparam,
2522 .get_coalesce = axienet_ethtools_get_coalesce,
2523 .set_coalesce = axienet_ethtools_set_coalesce,
f5203a3d
RH
2524 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
2525 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
66b51663 2526 .nway_reset = axienet_ethtools_nway_reset,
76abb5d6
SA
2527 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2528 .get_strings = axienet_ethtools_get_strings,
2529 .get_sset_count = axienet_ethtools_get_sset_count,
2530 .get_pause_stats = axienet_ethtools_get_pause_stats,
2531 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2532 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2533 .get_rmon_stats = axienet_ethtool_get_rmon_stats,
f5203a3d
RH
2534};
2535
7a86be6a 2536static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
f5203a3d 2537{
7a86be6a
RKO
2538 return container_of(pcs, struct axienet_local, pcs);
2539}
f5203a3d 2540
7a86be6a 2541static void axienet_pcs_get_state(struct phylink_pcs *pcs,
c6739623 2542 unsigned int neg_mode,
7a86be6a
RKO
2543 struct phylink_link_state *state)
2544{
2545 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2546
7e3cb4e8 2547 phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
f5203a3d
RH
2548}
2549
7a86be6a 2550static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
f5203a3d 2551{
7a86be6a 2552 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1a025560 2553
7a86be6a 2554 phylink_mii_c22_pcs_an_restart(pcs_phy);
f5203a3d
RH
2555}
2556
febf2aaf 2557static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
7a86be6a
RKO
2558 phy_interface_t interface,
2559 const unsigned long *advertising,
2560 bool permit_pause_to_mac)
6c8f06bb 2561{
7a86be6a
RKO
2562 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2563 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
6c8f06bb
RH
2564 struct axienet_local *lp = netdev_priv(ndev);
2565 int ret;
2566
7a86be6a 2567 if (lp->switch_x_sgmii) {
03854d8a 2568 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
7a86be6a 2569 interface == PHY_INTERFACE_MODE_SGMII ?
6c8f06bb 2570 XLNX_MII_STD_SELECT_SGMII : 0);
7a86be6a
RKO
2571 if (ret < 0) {
2572 netdev_warn(ndev,
2573 "Failed to switch PHY interface: %d\n",
6c8f06bb 2574 ret);
7a86be6a
RKO
2575 return ret;
2576 }
6c8f06bb 2577 }
7a86be6a 2578
febf2aaf
RKO
2579 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2580 neg_mode);
7a86be6a
RKO
2581 if (ret < 0)
2582 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2583
2584 return ret;
6c8f06bb
RH
2585}
2586
7a86be6a
RKO
2587static const struct phylink_pcs_ops axienet_pcs_ops = {
2588 .pcs_get_state = axienet_pcs_get_state,
2589 .pcs_config = axienet_pcs_config,
2590 .pcs_an_restart = axienet_pcs_an_restart,
2591};
2592
2593static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2594 phy_interface_t interface)
95347842 2595{
1a025560
RH
2596 struct net_device *ndev = to_net_dev(config->dev);
2597 struct axienet_local *lp = netdev_priv(ndev);
1a025560 2598
7a86be6a
RKO
2599 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2600 interface == PHY_INTERFACE_MODE_SGMII)
2601 return &lp->pcs;
1a025560 2602
7a86be6a
RKO
2603 return NULL;
2604}
2605
2606static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2607 const struct phylink_link_state *state)
2608{
2609 /* nothing meaningful to do */
95347842
RK
2610}
2611
2612static void axienet_mac_link_down(struct phylink_config *config,
2613 unsigned int mode,
2614 phy_interface_t interface)
2615{
2616 /* nothing meaningful to do */
2617}
2618
2619static void axienet_mac_link_up(struct phylink_config *config,
2620 struct phy_device *phy,
2621 unsigned int mode, phy_interface_t interface,
2622 int speed, int duplex,
2623 bool tx_pause, bool rx_pause)
f5203a3d
RH
2624{
2625 struct net_device *ndev = to_net_dev(config->dev);
2626 struct axienet_local *lp = netdev_priv(ndev);
2627 u32 emmc_reg, fcc_reg;
2628
2629 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2630 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2631
95347842 2632 switch (speed) {
f5203a3d
RH
2633 case SPEED_1000:
2634 emmc_reg |= XAE_EMMC_LINKSPD_1000;
2635 break;
2636 case SPEED_100:
2637 emmc_reg |= XAE_EMMC_LINKSPD_100;
2638 break;
2639 case SPEED_10:
2640 emmc_reg |= XAE_EMMC_LINKSPD_10;
2641 break;
2642 default:
2643 dev_err(&ndev->dev,
2644 "Speed other than 10, 100 or 1Gbps is not supported\n");
2645 break;
2646 }
2647
2648 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2649
2650 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
95347842 2651 if (tx_pause)
f5203a3d
RH
2652 fcc_reg |= XAE_FCC_FCTX_MASK;
2653 else
2654 fcc_reg &= ~XAE_FCC_FCTX_MASK;
95347842 2655 if (rx_pause)
f5203a3d
RH
2656 fcc_reg |= XAE_FCC_FCRX_MASK;
2657 else
2658 fcc_reg &= ~XAE_FCC_FCRX_MASK;
2659 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2660}
2661
f5203a3d 2662static const struct phylink_mac_ops axienet_phylink_ops = {
7a86be6a 2663 .mac_select_pcs = axienet_mac_select_pcs,
f5203a3d
RH
2664 .mac_config = axienet_mac_config,
2665 .mac_link_down = axienet_mac_link_down,
2666 .mac_link_up = axienet_mac_link_up,
8a3b7a25
DB
2667};
2668
2669/**
24201a64
AP
2670 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2671 * @work: pointer to work_struct
8a3b7a25
DB
2672 *
2673 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2674 * Tx/Rx BDs.
2675 */
24201a64 2676static void axienet_dma_err_handler(struct work_struct *work)
8a3b7a25 2677{
84b9ccc0 2678 u32 i;
8a3b7a25 2679 u32 axienet_status;
84b9ccc0 2680 struct axidma_bd *cur_p;
24201a64
AP
2681 struct axienet_local *lp = container_of(work, struct axienet_local,
2682 dma_err_task);
8a3b7a25 2683 struct net_device *ndev = lp->ndev;
8a3b7a25 2684
858430db
SA
2685 /* Don't bother if we are going to stop anyway */
2686 if (READ_ONCE(lp->stopping))
2687 return;
2688
9e2bc267
RH
2689 napi_disable(&lp->napi_tx);
2690 napi_disable(&lp->napi_rx);
cc37610c 2691
8a3b7a25
DB
2692 axienet_setoptions(ndev, lp->options &
2693 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
84b9ccc0
RH
2694
2695 axienet_dma_stop(lp);
c900e49d 2696 netdev_reset_queue(ndev);
8a3b7a25 2697
8b09ca82 2698 for (i = 0; i < lp->tx_bd_num; i++) {
8a3b7a25 2699 cur_p = &lp->tx_bd_v[i];
4e958f33
AP
2700 if (cur_p->cntrl) {
2701 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2702
17882fd4 2703 dma_unmap_single(lp->dev, addr,
8a3b7a25
DB
2704 (cur_p->cntrl &
2705 XAXIDMA_BD_CTRL_LENGTH_MASK),
2706 DMA_TO_DEVICE);
4e958f33 2707 }
23e6b2dc
RH
2708 if (cur_p->skb)
2709 dev_kfree_skb_irq(cur_p->skb);
8a3b7a25 2710 cur_p->phys = 0;
4e958f33 2711 cur_p->phys_msb = 0;
8a3b7a25
DB
2712 cur_p->cntrl = 0;
2713 cur_p->status = 0;
2714 cur_p->app0 = 0;
2715 cur_p->app1 = 0;
2716 cur_p->app2 = 0;
2717 cur_p->app3 = 0;
2718 cur_p->app4 = 0;
23e6b2dc 2719 cur_p->skb = NULL;
8a3b7a25
DB
2720 }
2721
8b09ca82 2722 for (i = 0; i < lp->rx_bd_num; i++) {
8a3b7a25
DB
2723 cur_p = &lp->rx_bd_v[i];
2724 cur_p->status = 0;
2725 cur_p->app0 = 0;
2726 cur_p->app1 = 0;
2727 cur_p->app2 = 0;
2728 cur_p->app3 = 0;
2729 cur_p->app4 = 0;
2730 }
2731
2732 lp->tx_bd_ci = 0;
2733 lp->tx_bd_tail = 0;
2734 lp->rx_bd_ci = 0;
2735
84b9ccc0 2736 axienet_dma_start(lp);
8a3b7a25
DB
2737
2738 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2739 axienet_status &= ~XAE_RCW1_RX_MASK;
2740 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2741
2742 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2743 if (axienet_status & XAE_INT_RXRJECT_MASK)
2744 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
522856ce
RH
2745 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2746 XAE_INT_RECV_ERROR_MASK : 0);
8a3b7a25
DB
2747 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2748
2749 /* Sync default options with HW but leave receiver and
850a7503
MS
2750 * transmitter disabled.
2751 */
8a3b7a25
DB
2752 axienet_setoptions(ndev, lp->options &
2753 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2754 axienet_set_mac_address(ndev, NULL);
2755 axienet_set_multicast_list(ndev);
9e2bc267
RH
2756 napi_enable(&lp->napi_rx);
2757 napi_enable(&lp->napi_tx);
799a8295 2758 axienet_setoptions(ndev, lp->options);
8a3b7a25
DB
2759}
2760
2761/**
2be58620 2762 * axienet_probe - Axi Ethernet probe function.
95219aa5 2763 * @pdev: Pointer to platform device structure.
8a3b7a25 2764 *
b0d081c5 2765 * Return: 0, on success
8a3b7a25
DB
2766 * Non-zero error value on failure.
2767 *
2768 * This is the probe routine for Axi Ethernet driver. This is called before
2769 * any other driver routines are invoked. It allocates and sets up the Ethernet
2770 * device. Parses through device tree and populates fields of
2771 * axienet_local. It registers the Ethernet device.
2772 */
2be58620 2773static int axienet_probe(struct platform_device *pdev)
8a3b7a25 2774{
8495659b 2775 int ret;
8a3b7a25
DB
2776 struct device_node *np;
2777 struct axienet_local *lp;
2778 struct net_device *ndev;
28ef9ebd 2779 struct resource *ethres;
83216e39 2780 u8 mac_addr[ETH_ALEN];
5fff0151 2781 int addr_width = 32;
8495659b 2782 u32 value;
8a3b7a25
DB
2783
2784 ndev = alloc_etherdev(sizeof(*lp));
41de8d4c 2785 if (!ndev)
8a3b7a25 2786 return -ENOMEM;
8a3b7a25 2787
95219aa5 2788 platform_set_drvdata(pdev, ndev);
8a3b7a25 2789
95219aa5 2790 SET_NETDEV_DEV(ndev, &pdev->dev);
28e24c62 2791 ndev->features = NETIF_F_SG;
8a3b7a25
DB
2792 ndev->ethtool_ops = &axienet_ethtool_ops;
2793
d894be57
JW
2794 /* MTU range: 64 - 9000 */
2795 ndev->min_mtu = 64;
2796 ndev->max_mtu = XAE_JUMBO_MTU;
2797
8a3b7a25
DB
2798 lp = netdev_priv(ndev);
2799 lp->ndev = ndev;
95219aa5 2800 lp->dev = &pdev->dev;
8a3b7a25 2801 lp->options = XAE_OPTION_DEFAULTS;
8b09ca82
RH
2802 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2803 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
57baf8cc 2804
cb45a8bf
RH
2805 u64_stats_init(&lp->rx_stat_sync);
2806 u64_stats_init(&lp->tx_stat_sync);
2807
76abb5d6
SA
2808 mutex_init(&lp->stats_lock);
2809 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2810 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2811
b11bfb9a
RH
2812 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2813 if (!lp->axi_clk) {
2814 /* For backward compatibility, if named AXI clock is not present,
2815 * treat the first clock specified as the AXI clock.
2816 */
2817 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2818 }
2819 if (IS_ERR(lp->axi_clk)) {
2820 ret = PTR_ERR(lp->axi_clk);
57baf8cc
RH
2821 goto free_netdev;
2822 }
b11bfb9a 2823 ret = clk_prepare_enable(lp->axi_clk);
57baf8cc 2824 if (ret) {
b11bfb9a 2825 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
57baf8cc
RH
2826 goto free_netdev;
2827 }
2828
b11bfb9a
RH
2829 lp->misc_clks[0].id = "axis_clk";
2830 lp->misc_clks[1].id = "ref_clk";
2831 lp->misc_clks[2].id = "mgt_clk";
2832
2833 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2834 if (ret)
2835 goto cleanup_clk;
2836
2837 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2838 if (ret)
2839 goto cleanup_clk;
2840
8a3b7a25 2841 /* Map device registers */
47651c51 2842 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
fcc028c1 2843 if (IS_ERR(lp->regs)) {
fcc028c1 2844 ret = PTR_ERR(lp->regs);
59cd4f19 2845 goto cleanup_clk;
8a3b7a25 2846 }
7fa0043d 2847 lp->regs_start = ethres->start;
46aa27df 2848
8a3b7a25
DB
2849 /* Setup checksum offload, but default to off if not specified */
2850 lp->features = 0;
2851
76abb5d6
SA
2852 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2853 lp->features |= XAE_FEATURE_STATS;
2854
8495659b
ST
2855 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2856 if (!ret) {
2857 switch (value) {
8a3b7a25 2858 case 1:
8a3b7a25 2859 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
dd28f4c0
SA
2860 /* Can checksum any contiguous range */
2861 ndev->features |= NETIF_F_HW_CSUM;
8a3b7a25
DB
2862 break;
2863 case 2:
8a3b7a25
DB
2864 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2865 /* Can checksum TCP/UDP over IPv4. */
2866 ndev->features |= NETIF_F_IP_CSUM;
2867 break;
8a3b7a25
DB
2868 }
2869 }
8495659b
ST
2870 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2871 if (!ret) {
2872 switch (value) {
8a3b7a25 2873 case 1:
8a3b7a25 2874 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
06c069ff 2875 ndev->features |= NETIF_F_RXCSUM;
8a3b7a25
DB
2876 break;
2877 case 2:
8a3b7a25 2878 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
06c069ff 2879 ndev->features |= NETIF_F_RXCSUM;
8a3b7a25 2880 break;
8a3b7a25
DB
2881 }
2882 }
2883 /* For supporting jumbo frames, the Axi Ethernet hardware must have
f080a8c3
ST
2884 * a larger Rx/Tx Memory. Typically, the size must be large so that
2885 * we can enable jumbo option and start supporting jumbo frames.
2886 * Here we check for memory allocated for Rx/Tx in the hardware from
2887 * the device-tree and accordingly set flags.
2888 */
8495659b 2889 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
ee06b172 2890
6c8f06bb
RH
2891 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2892 "xlnx,switch-x-sgmii");
2893
ee06b172
A
2894 /* Start with the proprietary, and broken phy_type */
2895 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2896 if (!ret) {
2897 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2898 switch (value) {
2899 case XAE_PHY_TYPE_MII:
2900 lp->phy_mode = PHY_INTERFACE_MODE_MII;
2901 break;
2902 case XAE_PHY_TYPE_GMII:
2903 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2904 break;
2905 case XAE_PHY_TYPE_RGMII_2_0:
2906 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2907 break;
2908 case XAE_PHY_TYPE_SGMII:
2909 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2910 break;
2911 case XAE_PHY_TYPE_1000BASE_X:
2912 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2913 break;
2914 default:
2915 ret = -EINVAL;
59cd4f19 2916 goto cleanup_clk;
ee06b172
A
2917 }
2918 } else {
0c65b2b9
AL
2919 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2920 if (ret)
59cd4f19 2921 goto cleanup_clk;
ee06b172 2922 }
6c8f06bb
RH
2923 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2924 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2925 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2926 ret = -EINVAL;
59cd4f19 2927 goto cleanup_clk;
6c8f06bb 2928 }
8a3b7a25 2929
5fe164fb 2930 if (!of_property_present(pdev->dev.of_node, "dmas")) {
6b1b40f7
SBNG
2931 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2932 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
28ef9ebd 2933
6b1b40f7
SBNG
2934 if (np) {
2935 struct resource dmares;
2936
2937 ret = of_address_to_resource(np, 0, &dmares);
2938 if (ret) {
2939 dev_err(&pdev->dev,
2940 "unable to get DMA resource\n");
2941 of_node_put(np);
2942 goto cleanup_clk;
2943 }
2944 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2945 &dmares);
2946 lp->rx_irq = irq_of_parse_and_map(np, 1);
2947 lp->tx_irq = irq_of_parse_and_map(np, 0);
28ef9ebd 2948 of_node_put(np);
6b1b40f7
SBNG
2949 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2950 } else {
2951 /* Check for these resources directly on the Ethernet node. */
2952 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2953 lp->rx_irq = platform_get_irq(pdev, 1);
2954 lp->tx_irq = platform_get_irq(pdev, 0);
2955 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2956 }
2957 if (IS_ERR(lp->dma_regs)) {
2958 dev_err(&pdev->dev, "could not map DMA regs\n");
2959 ret = PTR_ERR(lp->dma_regs);
2960 goto cleanup_clk;
2961 }
2962 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2963 dev_err(&pdev->dev, "could not determine irqs\n");
2964 ret = -ENOMEM;
59cd4f19 2965 goto cleanup_clk;
28ef9ebd 2966 }
8a3b7a25 2967
6b1b40f7
SBNG
2968 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2969 ret = __axienet_device_reset(lp);
2970 if (ret)
2971 goto cleanup_clk;
2972
2973 /* Autodetect the need for 64-bit DMA pointers.
2974 * When the IP is configured for a bus width bigger than 32 bits,
2975 * writing the MSB registers is mandatory, even if they are all 0.
2976 * We can detect this case by writing all 1's to one such register
2977 * and see if that sticks: when the IP is configured for 32 bits
2978 * only, those registers are RES0.
2979 * Those MSB registers were introduced in IP v7.1, which we check first.
2980 */
2981 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2982 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
f1bc9fc4 2983
f735c40e 2984 iowrite32(0x0, desc);
6b1b40f7
SBNG
2985 if (ioread32(desc) == 0) { /* sanity check */
2986 iowrite32(0xffffffff, desc);
2987 if (ioread32(desc) > 0) {
2988 lp->features |= XAE_FEATURE_DMA_64BIT;
2989 addr_width = 64;
2990 dev_info(&pdev->dev,
2991 "autodetected 64-bit DMA range\n");
2992 }
2993 iowrite32(0x0, desc);
2994 }
2995 }
2996 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
61fde511 2997 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
6b1b40f7
SBNG
2998 ret = -EINVAL;
2999 goto cleanup_clk;
f735c40e 3000 }
f735c40e 3001
6b1b40f7
SBNG
3002 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
3003 if (ret) {
3004 dev_err(&pdev->dev, "No suitable DMA available\n");
3005 goto cleanup_clk;
3006 }
3007 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
3008 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
6a91b846
RSP
3009 } else {
3010 struct xilinx_vdma_config cfg;
3011 struct dma_chan *tx_chan;
3012
3013 lp->eth_irq = platform_get_irq_optional(pdev, 0);
3014 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
3015 ret = lp->eth_irq;
3016 goto cleanup_clk;
3017 }
3018 tx_chan = dma_request_chan(lp->dev, "tx_chan0");
3019 if (IS_ERR(tx_chan)) {
3020 ret = PTR_ERR(tx_chan);
3021 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
3022 goto cleanup_clk;
3023 }
3024
3025 cfg.reset = 1;
3026 /* As name says VDMA but it has support for DMA channel reset */
3027 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
3028 if (ret < 0) {
3029 dev_err(&pdev->dev, "Reset channel failed\n");
3030 dma_release_channel(tx_chan);
3031 goto cleanup_clk;
3032 }
3033
3034 dma_release_channel(tx_chan);
3035 lp->use_dmaengine = 1;
5fff0151
AP
3036 }
3037
6a91b846
RSP
3038 if (lp->use_dmaengine)
3039 ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
3040 else
3041 ndev->netdev_ops = &axienet_netdev_ops;
522856ce
RH
3042 /* Check for Ethernet core IRQ (optional) */
3043 if (lp->eth_irq <= 0)
3044 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
3045
8a3b7a25 3046 /* Retrieve the MAC address */
83216e39
MW
3047 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
3048 if (!ret) {
3049 axienet_set_mac_address(ndev, mac_addr);
3050 } else {
3051 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
3052 ret);
3053 axienet_set_mac_address(ndev, NULL);
8a3b7a25 3054 }
8a3b7a25 3055
d048c717
SA
3056 spin_lock_init(&lp->rx_cr_lock);
3057 spin_lock_init(&lp->tx_cr_lock);
e1d27d29
SA
3058 INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
3059 lp->rx_dim_enabled = true;
3060 lp->rx_dim.profile_ix = 1;
3061 lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
eb80520e
SA
3062 XAXIDMA_DFT_RX_USEC);
3063 lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
3064 XAXIDMA_DFT_TX_USEC);
8a3b7a25 3065
d1c4f93e
AC
3066 ret = axienet_mdio_setup(lp);
3067 if (ret)
3068 dev_warn(&pdev->dev,
3069 "error registering MDIO bus: %d\n", ret);
3070
1a025560
RH
3071 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
3072 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
19c7a439 3073 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
ab3a5d4c 3074 if (!np) {
19c7a439
AC
3075 /* Deprecated: Always use "pcs-handle" for pcs_phy.
3076 * Falling back to "phy-handle" here is only for
3077 * backward compatibility with old device trees.
3078 */
3079 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
3080 }
3081 if (!np) {
3082 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
1a025560 3083 ret = -EINVAL;
59cd4f19 3084 goto cleanup_mdio;
1a025560 3085 }
ab3a5d4c 3086 lp->pcs_phy = of_mdio_find_device(np);
1a025560
RH
3087 if (!lp->pcs_phy) {
3088 ret = -EPROBE_DEFER;
ab3a5d4c 3089 of_node_put(np);
59cd4f19 3090 goto cleanup_mdio;
1a025560 3091 }
ab3a5d4c 3092 of_node_put(np);
7a86be6a
RKO
3093 lp->pcs.ops = &axienet_pcs_ops;
3094 lp->pcs.poll = true;
1a025560 3095 }
8a3b7a25 3096
f5203a3d
RH
3097 lp->phylink_config.dev = &ndev->dev;
3098 lp->phylink_config.type = PHYLINK_NETDEV;
a3702953 3099 lp->phylink_config.mac_managed_pm = true;
72a47e1a
RKO
3100 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
3101 MAC_10FD | MAC_100FD | MAC_1000FD;
f5203a3d 3102
136a3fa2
RKO
3103 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
3104 if (lp->switch_x_sgmii) {
3105 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
3106 lp->phylink_config.supported_interfaces);
3107 __set_bit(PHY_INTERFACE_MODE_SGMII,
3108 lp->phylink_config.supported_interfaces);
3109 }
3110
f5203a3d
RH
3111 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
3112 lp->phy_mode,
3113 &axienet_phylink_ops);
3114 if (IS_ERR(lp->phylink)) {
3115 ret = PTR_ERR(lp->phylink);
3116 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
59cd4f19 3117 goto cleanup_mdio;
f5203a3d
RH
3118 }
3119
8a3b7a25
DB
3120 ret = register_netdev(lp->ndev);
3121 if (ret) {
3122 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
59cd4f19 3123 goto cleanup_phylink;
8a3b7a25
DB
3124 }
3125
8a3b7a25
DB
3126 return 0;
3127
59cd4f19
RH
3128cleanup_phylink:
3129 phylink_destroy(lp->phylink);
3130
3131cleanup_mdio:
3132 if (lp->pcs_phy)
3133 put_device(&lp->pcs_phy->dev);
3134 if (lp->mii_bus)
3135 axienet_mdio_teardown(lp);
59cd4f19 3136cleanup_clk:
b11bfb9a
RH
3137 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3138 clk_disable_unprepare(lp->axi_clk);
59cd4f19 3139
46aa27df 3140free_netdev:
8a3b7a25 3141 free_netdev(ndev);
46aa27df 3142
8a3b7a25
DB
3143 return ret;
3144}
3145
2e0ec0af 3146static void axienet_remove(struct platform_device *pdev)
8a3b7a25 3147{
95219aa5 3148 struct net_device *ndev = platform_get_drvdata(pdev);
8a3b7a25
DB
3149 struct axienet_local *lp = netdev_priv(ndev);
3150
8a3b7a25 3151 unregister_netdev(ndev);
f5203a3d
RH
3152
3153 if (lp->phylink)
3154 phylink_destroy(lp->phylink);
3155
1a025560
RH
3156 if (lp->pcs_phy)
3157 put_device(&lp->pcs_phy->dev);
3158
e7a3d116 3159 axienet_mdio_teardown(lp);
8a3b7a25 3160
b11bfb9a
RH
3161 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3162 clk_disable_unprepare(lp->axi_clk);
09a0354c 3163
8a3b7a25 3164 free_netdev(ndev);
8a3b7a25
DB
3165}
3166
70c50265
RH
3167static void axienet_shutdown(struct platform_device *pdev)
3168{
3169 struct net_device *ndev = platform_get_drvdata(pdev);
3170
3171 rtnl_lock();
3172 netif_device_detach(ndev);
3173
3174 if (netif_running(ndev))
3175 dev_close(ndev);
3176
3177 rtnl_unlock();
3178}
3179
a3de357b
AC
3180static int axienet_suspend(struct device *dev)
3181{
3182 struct net_device *ndev = dev_get_drvdata(dev);
3183
3184 if (!netif_running(ndev))
3185 return 0;
3186
3187 netif_device_detach(ndev);
3188
3189 rtnl_lock();
3190 axienet_stop(ndev);
3191 rtnl_unlock();
3192
3193 return 0;
3194}
3195
3196static int axienet_resume(struct device *dev)
3197{
3198 struct net_device *ndev = dev_get_drvdata(dev);
3199
3200 if (!netif_running(ndev))
3201 return 0;
3202
3203 rtnl_lock();
3204 axienet_open(ndev);
3205 rtnl_unlock();
3206
3207 netif_device_attach(ndev);
3208
3209 return 0;
3210}
3211
3212static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3213 axienet_suspend, axienet_resume);
3214
2be58620
ST
3215static struct platform_driver axienet_driver = {
3216 .probe = axienet_probe,
e96321fa 3217 .remove = axienet_remove,
70c50265 3218 .shutdown = axienet_shutdown,
8a3b7a25 3219 .driver = {
8a3b7a25 3220 .name = "xilinx_axienet",
a3de357b 3221 .pm = &axienet_pm_ops,
8a3b7a25
DB
3222 .of_match_table = axienet_of_match,
3223 },
3224};
3225
2be58620 3226module_platform_driver(axienet_driver);
8a3b7a25
DB
3227
3228MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3229MODULE_AUTHOR("Xilinx");
3230MODULE_LICENSE("GPL");