]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/xilinx/xilinx_axienet_main.c
Merge tag 'probes-fixes-v6.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/linux.git] / drivers / net / ethernet / xilinx / xilinx_axienet_main.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
8a3b7a25
DB
2/*
3 * Xilinx Axi Ethernet device driver
4 *
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
59a54f30
MS
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
cc37610c 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
59a54f30 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
8a3b7a25
DB
12 *
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14 * and Spartan6.
15 *
16 * TODO:
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
23 */
24
09a0354c 25#include <linux/clk.h>
8a3b7a25
DB
26#include <linux/delay.h>
27#include <linux/etherdevice.h>
8a3b7a25
DB
28#include <linux/module.h>
29#include <linux/netdevice.h>
3d40aed8 30#include <linux/of.h>
8a3b7a25 31#include <linux/of_mdio.h>
da90e380 32#include <linux/of_net.h>
9d5e8ec6 33#include <linux/of_irq.h>
8a3b7a25 34#include <linux/of_address.h>
3d40aed8 35#include <linux/platform_device.h>
8a3b7a25 36#include <linux/skbuff.h>
0b79b8dc 37#include <linux/math64.h>
8a3b7a25
DB
38#include <linux/phy.h>
39#include <linux/mii.h>
40#include <linux/ethtool.h>
6a91b846
RSP
41#include <linux/dmaengine.h>
42#include <linux/dma-mapping.h>
43#include <linux/dma/xilinx_dma.h>
44#include <linux/circ_buf.h>
45#include <net/netdev_queues.h>
8a3b7a25
DB
46
47#include "xilinx_axienet.h"
48
8b09ca82 49/* Descriptors defines for Tx and Rx DMA */
2d19c3fd 50#define TX_BD_NUM_DEFAULT 128
8b09ca82 51#define RX_BD_NUM_DEFAULT 1024
70f5817d 52#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
8b09ca82
RH
53#define TX_BD_NUM_MAX 4096
54#define RX_BD_NUM_MAX 4096
6a91b846
RSP
55#define DMA_NUM_APP_WORDS 5
56#define LEN_APP 4
57#define RX_BUF_NUM_DEFAULT 128
8a3b7a25
DB
58
59/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60#define DRIVER_NAME "xaxienet"
61#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62#define DRIVER_VERSION "1.00a"
63
867d03bc 64#define AXIENET_REGS_N 40
8a3b7a25 65
6a91b846
RSP
66static void axienet_rx_submit_desc(struct net_device *ndev);
67
8a3b7a25 68/* Match table for of_platform binding */
74847f23 69static const struct of_device_id axienet_of_match[] = {
8a3b7a25
DB
70 { .compatible = "xlnx,axi-ethernet-1.00.a", },
71 { .compatible = "xlnx,axi-ethernet-1.01.a", },
72 { .compatible = "xlnx,axi-ethernet-2.01.a", },
73 {},
74};
75
76MODULE_DEVICE_TABLE(of, axienet_of_match);
77
78/* Option table for setting up Axi Ethernet hardware options */
79static struct axienet_option axienet_options[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
81 {
82 .opt = XAE_OPTION_JUMBO,
83 .reg = XAE_TC_OFFSET,
84 .m_or = XAE_TC_JUM_MASK,
85 }, {
86 .opt = XAE_OPTION_JUMBO,
87 .reg = XAE_RCW1_OFFSET,
88 .m_or = XAE_RCW1_JUM_MASK,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt = XAE_OPTION_VLAN,
91 .reg = XAE_TC_OFFSET,
92 .m_or = XAE_TC_VLAN_MASK,
93 }, {
94 .opt = XAE_OPTION_VLAN,
95 .reg = XAE_RCW1_OFFSET,
96 .m_or = XAE_RCW1_VLAN_MASK,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt = XAE_OPTION_FCS_STRIP,
99 .reg = XAE_RCW1_OFFSET,
100 .m_or = XAE_RCW1_FCS_MASK,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt = XAE_OPTION_FCS_INSERT,
103 .reg = XAE_TC_OFFSET,
104 .m_or = XAE_TC_FCS_MASK,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt = XAE_OPTION_LENTYPE_ERR,
107 .reg = XAE_RCW1_OFFSET,
108 .m_or = XAE_RCW1_LT_DIS_MASK,
109 }, { /* Turn on Rx flow control */
110 .opt = XAE_OPTION_FLOW_CONTROL,
111 .reg = XAE_FCC_OFFSET,
112 .m_or = XAE_FCC_FCRX_MASK,
113 }, { /* Turn on Tx flow control */
114 .opt = XAE_OPTION_FLOW_CONTROL,
115 .reg = XAE_FCC_OFFSET,
116 .m_or = XAE_FCC_FCTX_MASK,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt = XAE_OPTION_PROMISC,
119 .reg = XAE_FMI_OFFSET,
120 .m_or = XAE_FMI_PM_MASK,
121 }, { /* Enable transmitter */
122 .opt = XAE_OPTION_TXEN,
123 .reg = XAE_TC_OFFSET,
124 .m_or = XAE_TC_TX_MASK,
125 }, { /* Enable receiver */
126 .opt = XAE_OPTION_RXEN,
127 .reg = XAE_RCW1_OFFSET,
128 .m_or = XAE_RCW1_RX_MASK,
129 },
130 {}
131};
132
6a91b846
RSP
133static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134{
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136}
137
138static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139{
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141}
142
8a3b7a25
DB
143/**
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
147 *
b0d081c5 148 * Return: The contents of the Axi DMA register
8a3b7a25
DB
149 *
150 * This function returns the contents of the corresponding Axi DMA register.
151 */
152static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153{
d85f5f3e 154 return ioread32(lp->dma_regs + reg);
8a3b7a25
DB
155}
156
4e958f33
AP
157static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 struct axidma_bd *desc)
159{
160 desc->phys = lower_32_bits(addr);
161 if (lp->features & XAE_FEATURE_DMA_64BIT)
162 desc->phys_msb = upper_32_bits(addr);
163}
164
165static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 struct axidma_bd *desc)
167{
168 dma_addr_t ret = desc->phys;
169
170 if (lp->features & XAE_FEATURE_DMA_64BIT)
171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172
173 return ret;
174}
175
8a3b7a25
DB
176/**
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
179 *
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
183 */
184static void axienet_dma_bd_release(struct net_device *ndev)
185{
186 int i;
187 struct axienet_local *lp = netdev_priv(ndev);
188
f26667a3 189 /* If we end up here, tx_bd_v must have been DMA allocated. */
17882fd4 190 dma_free_coherent(lp->dev,
f26667a3
AP
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 lp->tx_bd_v,
193 lp->tx_bd_p);
194
195 if (!lp->rx_bd_v)
196 return;
197
8b09ca82 198 for (i = 0; i < lp->rx_bd_num; i++) {
4e958f33
AP
199 dma_addr_t phys;
200
f26667a3
AP
201 /* A NULL skb means this descriptor has not been initialised
202 * at all.
203 */
204 if (!lp->rx_bd_v[i].skb)
205 break;
206
23e6b2dc 207 dev_kfree_skb(lp->rx_bd_v[i].skb);
8a3b7a25 208
f26667a3
AP
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
212 */
4e958f33
AP
213 if (lp->rx_bd_v[i].cntrl) {
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
17882fd4 215 dma_unmap_single(lp->dev, phys,
f26667a3 216 lp->max_frm_size, DMA_FROM_DEVICE);
4e958f33 217 }
8a3b7a25 218 }
f26667a3 219
17882fd4 220 dma_free_coherent(lp->dev,
f26667a3
AP
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 lp->rx_bd_v,
223 lp->rx_bd_p);
8a3b7a25
DB
224}
225
eb80520e
SA
226static u64 axienet_dma_rate(struct axienet_local *lp)
227{
228 if (lp->axi_clk)
229 return clk_get_rate(lp->axi_clk);
230 return 125000000; /* arbitrary guess if no clock rate set */
231}
232
0b79b8dc 233/**
e76d1ea8
SA
234 * axienet_calc_cr() - Calculate control register value
235 * @lp: Device private data
236 * @count: Number of completions before an interrupt
237 * @usec: Microseconds after the last completion before an interrupt
238 *
239 * Calculate a control register value based on the coalescing settings. The
240 * run/stop bit is not set.
312e6a58
SG
241 *
242 * Return: Control register value with coalescing settings configured.
0b79b8dc 243 */
e76d1ea8 244static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
0b79b8dc 245{
e76d1ea8
SA
246 u32 cr;
247
248 cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
249 XAXIDMA_IRQ_ERROR_MASK;
250 /* Only set interrupt delay timer if not generating an interrupt on
251 * the first packet. Otherwise leave at 0 to disable delay interrupt.
252 */
253 if (count > 1) {
eb80520e 254 u64 clk_rate = axienet_dma_rate(lp);
e76d1ea8
SA
255 u32 timer;
256
e76d1ea8
SA
257 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
258 timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
259 XAXIDMA_DELAY_SCALE);
0b79b8dc 260
e76d1ea8
SA
261 timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
262 cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
263 XAXIDMA_IRQ_DELAY_MASK;
264 }
265
266 return cr;
0b79b8dc
RH
267}
268
eb80520e
SA
269/**
270 * axienet_coalesce_params() - Extract coalesce parameters from the CR
271 * @lp: Device private data
272 * @cr: The control register to parse
273 * @count: Number of packets before an interrupt
274 * @usec: Idle time (in usec) before an interrupt
275 */
276static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
277 u32 *count, u32 *usec)
278{
279 u64 clk_rate = axienet_dma_rate(lp);
280 u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
281
282 *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
283 *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
284}
285
84b9ccc0
RH
286/**
287 * axienet_dma_start - Set up DMA registers and start DMA operation
288 * @lp: Pointer to the axienet_local structure
289 */
290static void axienet_dma_start(struct axienet_local *lp)
291{
d048c717
SA
292 spin_lock_irq(&lp->rx_cr_lock);
293
84b9ccc0 294 /* Start updating the Rx channel control register */
d048c717 295 lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
cc37610c 296 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
84b9ccc0 297
84b9ccc0
RH
298 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
299 * halted state. This will make the Rx side ready for reception.
300 */
301 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cc37610c
RH
302 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
303 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
84b9ccc0
RH
304 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
305 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
d048c717
SA
306 lp->rx_dma_started = true;
307
308 spin_unlock_irq(&lp->rx_cr_lock);
309 spin_lock_irq(&lp->tx_cr_lock);
310
311 /* Start updating the Tx channel control register */
312 lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
313 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
84b9ccc0
RH
314
315 /* Write to the RS (Run-stop) bit in the Tx channel control register.
316 * Tx channel is now ready to run. But only after we write to the
317 * tail pointer register that the Tx channel will start transmitting.
318 */
319 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
9e2bc267
RH
320 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
321 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
d048c717
SA
322 lp->tx_dma_started = true;
323
324 spin_unlock_irq(&lp->tx_cr_lock);
84b9ccc0
RH
325}
326
8a3b7a25
DB
327/**
328 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
329 * @ndev: Pointer to the net_device structure
330 *
b0d081c5 331 * Return: 0, on success -ENOMEM, on failure
8a3b7a25
DB
332 *
333 * This function is called to initialize the Rx and Tx DMA descriptor
334 * rings. This initializes the descriptors with required default values
335 * and is called when Axi Ethernet driver reset is called.
336 */
337static int axienet_dma_bd_init(struct net_device *ndev)
338{
8a3b7a25
DB
339 int i;
340 struct sk_buff *skb;
341 struct axienet_local *lp = netdev_priv(ndev);
342
343 /* Reset the indexes which are used for accessing the BDs */
344 lp->tx_bd_ci = 0;
345 lp->tx_bd_tail = 0;
346 lp->rx_bd_ci = 0;
347
850a7503 348 /* Allocate the Tx and Rx buffer descriptors. */
17882fd4 349 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
8b09ca82 350 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
750afb08 351 &lp->tx_bd_p, GFP_KERNEL);
d0320f75 352 if (!lp->tx_bd_v)
f26667a3 353 return -ENOMEM;
8a3b7a25 354
17882fd4 355 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
8b09ca82 356 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
750afb08 357 &lp->rx_bd_p, GFP_KERNEL);
d0320f75 358 if (!lp->rx_bd_v)
8a3b7a25 359 goto out;
8a3b7a25 360
8b09ca82 361 for (i = 0; i < lp->tx_bd_num; i++) {
4e958f33
AP
362 dma_addr_t addr = lp->tx_bd_p +
363 sizeof(*lp->tx_bd_v) *
364 ((i + 1) % lp->tx_bd_num);
365
366 lp->tx_bd_v[i].next = lower_32_bits(addr);
367 if (lp->features & XAE_FEATURE_DMA_64BIT)
368 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
8a3b7a25
DB
369 }
370
8b09ca82 371 for (i = 0; i < lp->rx_bd_num; i++) {
4e958f33
AP
372 dma_addr_t addr;
373
374 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
375 ((i + 1) % lp->rx_bd_num);
376 lp->rx_bd_v[i].next = lower_32_bits(addr);
377 if (lp->features & XAE_FEATURE_DMA_64BIT)
378 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
8a3b7a25
DB
379
380 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
720a43ef 381 if (!skb)
8a3b7a25 382 goto out;
8a3b7a25 383
23e6b2dc 384 lp->rx_bd_v[i].skb = skb;
17882fd4 385 addr = dma_map_single(lp->dev, skb->data,
4e958f33 386 lp->max_frm_size, DMA_FROM_DEVICE);
17882fd4 387 if (dma_mapping_error(lp->dev, addr)) {
71791dc8
AP
388 netdev_err(ndev, "DMA mapping error\n");
389 goto out;
390 }
4e958f33 391 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
71791dc8 392
8a3b7a25
DB
393 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
394 }
395
84b9ccc0 396 axienet_dma_start(lp);
8a3b7a25
DB
397
398 return 0;
399out:
400 axienet_dma_bd_release(ndev);
401 return -ENOMEM;
402}
403
404/**
405 * axienet_set_mac_address - Write the MAC address
406 * @ndev: Pointer to the net_device structure
407 * @address: 6 byte Address to be written as MAC address
408 *
409 * This function is called to initialize the MAC address of the Axi Ethernet
410 * core. It writes to the UAW0 and UAW1 registers of the core.
411 */
da90e380
TK
412static void axienet_set_mac_address(struct net_device *ndev,
413 const void *address)
8a3b7a25
DB
414{
415 struct axienet_local *lp = netdev_priv(ndev);
416
417 if (address)
a96d317f 418 eth_hw_addr_set(ndev, address);
8a3b7a25 419 if (!is_valid_ether_addr(ndev->dev_addr))
452349c3 420 eth_hw_addr_random(ndev);
8a3b7a25
DB
421
422 /* Set up unicast MAC address filter set its mac address */
423 axienet_iow(lp, XAE_UAW0_OFFSET,
424 (ndev->dev_addr[0]) |
425 (ndev->dev_addr[1] << 8) |
426 (ndev->dev_addr[2] << 16) |
427 (ndev->dev_addr[3] << 24));
428 axienet_iow(lp, XAE_UAW1_OFFSET,
429 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
430 ~XAE_UAW1_UNICASTADDR_MASK) |
431 (ndev->dev_addr[4] |
432 (ndev->dev_addr[5] << 8))));
433}
434
435/**
436 * netdev_set_mac_address - Write the MAC address (from outside the driver)
437 * @ndev: Pointer to the net_device structure
438 * @p: 6 byte Address to be written as MAC address
439 *
b0d081c5 440 * Return: 0 for all conditions. Presently, there is no failure case.
8a3b7a25
DB
441 *
442 * This function is called to initialize the MAC address of the Axi Ethernet
443 * core. It calls the core specific axienet_set_mac_address. This is the
444 * function that goes into net_device_ops structure entry ndo_set_mac_address.
445 */
446static int netdev_set_mac_address(struct net_device *ndev, void *p)
447{
448 struct sockaddr *addr = p;
f7061a3e 449
8a3b7a25
DB
450 axienet_set_mac_address(ndev, addr->sa_data);
451 return 0;
452}
453
454/**
455 * axienet_set_multicast_list - Prepare the multicast table
456 * @ndev: Pointer to the net_device structure
457 *
458 * This function is called to initialize the multicast table during
459 * initialization. The Axi Ethernet basic multicast support has a four-entry
460 * multicast table which is initialized here. Additionally this function
461 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
462 * means whenever the multicast table entries need to be updated this
463 * function gets called.
464 */
465static void axienet_set_multicast_list(struct net_device *ndev)
466{
797a68c9 467 int i = 0;
8a3b7a25
DB
468 u32 reg, af0reg, af1reg;
469 struct axienet_local *lp = netdev_priv(ndev);
470
749e67d5
SA
471 reg = axienet_ior(lp, XAE_FMI_OFFSET);
472 reg &= ~XAE_FMI_PM_MASK;
473 if (ndev->flags & IFF_PROMISC)
8a3b7a25 474 reg |= XAE_FMI_PM_MASK;
749e67d5
SA
475 else
476 reg &= ~XAE_FMI_PM_MASK;
477 axienet_iow(lp, XAE_FMI_OFFSET, reg);
478
479 if (ndev->flags & IFF_ALLMULTI ||
480 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
481 reg &= 0xFFFFFF00;
8a3b7a25 482 axienet_iow(lp, XAE_FMI_OFFSET, reg);
749e67d5
SA
483 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
484 axienet_iow(lp, XAE_AF1_OFFSET, 0);
485 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
486 axienet_iow(lp, XAE_AM1_OFFSET, 0);
487 axienet_iow(lp, XAE_FFE_OFFSET, 1);
488 i = 1;
8a3b7a25
DB
489 } else if (!netdev_mc_empty(ndev)) {
490 struct netdev_hw_addr *ha;
491
8a3b7a25
DB
492 netdev_for_each_mc_addr(ha, ndev) {
493 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
494 break;
495
496 af0reg = (ha->addr[0]);
497 af0reg |= (ha->addr[1] << 8);
498 af0reg |= (ha->addr[2] << 16);
499 af0reg |= (ha->addr[3] << 24);
500
501 af1reg = (ha->addr[4]);
502 af1reg |= (ha->addr[5] << 8);
503
749e67d5 504 reg &= 0xFFFFFF00;
8a3b7a25
DB
505 reg |= i;
506
507 axienet_iow(lp, XAE_FMI_OFFSET, reg);
508 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
509 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
749e67d5
SA
510 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
511 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
797a68c9 512 axienet_iow(lp, XAE_FFE_OFFSET, 1);
8a3b7a25
DB
513 i++;
514 }
8a3b7a25 515 }
797a68c9
SA
516
517 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
749e67d5 518 reg &= 0xFFFFFF00;
797a68c9
SA
519 reg |= i;
520 axienet_iow(lp, XAE_FMI_OFFSET, reg);
521 axienet_iow(lp, XAE_FFE_OFFSET, 0);
522 }
8a3b7a25
DB
523}
524
525/**
526 * axienet_setoptions - Set an Axi Ethernet option
527 * @ndev: Pointer to the net_device structure
528 * @options: Option to be enabled/disabled
529 *
530 * The Axi Ethernet core has multiple features which can be selectively turned
531 * on or off. The typical options could be jumbo frame option, basic VLAN
532 * option, promiscuous mode option etc. This function is used to set or clear
533 * these options in the Axi Ethernet hardware. This is done through
534 * axienet_option structure .
535 */
536static void axienet_setoptions(struct net_device *ndev, u32 options)
537{
538 int reg;
539 struct axienet_local *lp = netdev_priv(ndev);
540 struct axienet_option *tp = &axienet_options[0];
541
542 while (tp->opt) {
543 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
544 if (options & tp->opt)
545 reg |= tp->m_or;
546 axienet_iow(lp, tp->reg, reg);
547 tp++;
548 }
549
550 lp->options |= options;
551}
552
76abb5d6
SA
553static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
554{
555 u32 counter;
556
557 if (lp->reset_in_progress)
558 return lp->hw_stat_base[stat];
559
560 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
561 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
562}
563
564static void axienet_stats_update(struct axienet_local *lp, bool reset)
565{
566 enum temac_stat stat;
567
568 write_seqcount_begin(&lp->hw_stats_seqcount);
569 lp->reset_in_progress = reset;
570 for (stat = 0; stat < STAT_COUNT; stat++) {
571 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
572
573 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
574 lp->hw_last_counter[stat] = counter;
575 }
576 write_seqcount_end(&lp->hw_stats_seqcount);
577}
578
579static void axienet_refresh_stats(struct work_struct *work)
580{
581 struct axienet_local *lp = container_of(work, struct axienet_local,
582 stats_work.work);
583
584 mutex_lock(&lp->stats_lock);
585 axienet_stats_update(lp, false);
586 mutex_unlock(&lp->stats_lock);
587
588 /* Just less than 2^32 bytes at 2.5 GBit/s */
589 schedule_delayed_work(&lp->stats_work, 13 * HZ);
590}
591
ee44d0b7 592static int __axienet_device_reset(struct axienet_local *lp)
8a3b7a25 593{
2e5644b1
RH
594 u32 value;
595 int ret;
ee44d0b7 596
76abb5d6
SA
597 /* Save statistics counters in case they will be reset */
598 mutex_lock(&lp->stats_lock);
599 if (lp->features & XAE_FEATURE_STATS)
600 axienet_stats_update(lp, true);
601
8a3b7a25
DB
602 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
603 * process of Axi DMA takes a while to complete as all pending
604 * commands/transfers will be flushed or completed during this
850a7503 605 * reset process.
489d4d77
RH
606 * Note that even though both TX and RX have their own reset register,
607 * they both reset the entire DMA core, so only one needs to be used.
850a7503 608 */
489d4d77 609 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
2e5644b1
RH
610 ret = read_poll_timeout(axienet_dma_in32, value,
611 !(value & XAXIDMA_CR_RESET_MASK),
612 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
613 XAXIDMA_TX_CR_OFFSET);
614 if (ret) {
615 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
76abb5d6 616 goto out;
8a3b7a25 617 }
ee44d0b7 618
b400c2f4
RH
619 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
620 ret = read_poll_timeout(axienet_ior, value,
621 value & XAE_INT_PHYRSTCMPLT_MASK,
622 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
623 XAE_IS_OFFSET);
624 if (ret) {
625 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
76abb5d6 626 goto out;
b400c2f4
RH
627 }
628
76abb5d6
SA
629 /* Update statistics counters with new values */
630 if (lp->features & XAE_FEATURE_STATS) {
631 enum temac_stat stat;
632
633 write_seqcount_begin(&lp->hw_stats_seqcount);
634 lp->reset_in_progress = false;
635 for (stat = 0; stat < STAT_COUNT; stat++) {
636 u32 counter =
637 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
638
639 lp->hw_stat_base[stat] +=
640 lp->hw_last_counter[stat] - counter;
641 lp->hw_last_counter[stat] = counter;
642 }
643 write_seqcount_end(&lp->hw_stats_seqcount);
644 }
645
646out:
647 mutex_unlock(&lp->stats_lock);
648 return ret;
8a3b7a25
DB
649}
650
84b9ccc0
RH
651/**
652 * axienet_dma_stop - Stop DMA operation
653 * @lp: Pointer to the axienet_local structure
654 */
655static void axienet_dma_stop(struct axienet_local *lp)
656{
657 int count;
658 u32 cr, sr;
659
d048c717
SA
660 spin_lock_irq(&lp->rx_cr_lock);
661
662 cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
84b9ccc0 663 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
d048c717
SA
664 lp->rx_dma_started = false;
665
666 spin_unlock_irq(&lp->rx_cr_lock);
84b9ccc0
RH
667 synchronize_irq(lp->rx_irq);
668
d048c717
SA
669 spin_lock_irq(&lp->tx_cr_lock);
670
671 cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
84b9ccc0 672 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
d048c717
SA
673 lp->tx_dma_started = false;
674
675 spin_unlock_irq(&lp->tx_cr_lock);
84b9ccc0
RH
676 synchronize_irq(lp->tx_irq);
677
678 /* Give DMAs a chance to halt gracefully */
679 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
680 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
681 msleep(20);
682 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
683 }
684
685 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
686 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
687 msleep(20);
688 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
689 }
690
691 /* Do a reset to ensure DMA is really stopped */
692 axienet_lock_mii(lp);
693 __axienet_device_reset(lp);
694 axienet_unlock_mii(lp);
695}
696
8a3b7a25
DB
697/**
698 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
699 * @ndev: Pointer to the net_device structure
700 *
701 * This function is called to reset and initialize the Axi Ethernet core. This
702 * is typically called during initialization. It does a reset of the Axi DMA
703 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
8aba73ef 704 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
8a3b7a25
DB
705 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
706 * core.
312e6a58
SG
707 *
708 * Return: 0 on success or a negative error number otherwise.
8a3b7a25 709 */
ee44d0b7 710static int axienet_device_reset(struct net_device *ndev)
8a3b7a25
DB
711{
712 u32 axienet_status;
713 struct axienet_local *lp = netdev_priv(ndev);
ee44d0b7 714 int ret;
8a3b7a25 715
8a3b7a25 716 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
f080a8c3 717 lp->options |= XAE_OPTION_VLAN;
8a3b7a25
DB
718 lp->options &= (~XAE_OPTION_JUMBO);
719
48ba8a1d 720 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
f080a8c3
ST
721 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
722 XAE_TRL_SIZE;
723
724 if (lp->max_frm_size <= lp->rxmem)
725 lp->options |= XAE_OPTION_JUMBO;
8a3b7a25
DB
726 }
727
6b1b40f7
SBNG
728 if (!lp->use_dmaengine) {
729 ret = __axienet_device_reset(lp);
730 if (ret)
731 return ret;
732
733 ret = axienet_dma_bd_init(ndev);
734 if (ret) {
735 netdev_err(ndev, "%s: descriptor allocation failed\n",
736 __func__);
737 return ret;
738 }
8a3b7a25
DB
739 }
740
741 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
742 axienet_status &= ~XAE_RCW1_RX_MASK;
743 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
744
745 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
746 if (axienet_status & XAE_INT_RXRJECT_MASK)
747 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
522856ce
RH
748 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
749 XAE_INT_RECV_ERROR_MASK : 0);
8a3b7a25
DB
750
751 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
752
753 /* Sync default options with HW but leave receiver and
850a7503
MS
754 * transmitter disabled.
755 */
8a3b7a25
DB
756 axienet_setoptions(ndev, lp->options &
757 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
758 axienet_set_mac_address(ndev, NULL);
759 axienet_set_multicast_list(ndev);
760 axienet_setoptions(ndev, lp->options);
761
860e9538 762 netif_trans_update(ndev);
ee44d0b7
AP
763
764 return 0;
8a3b7a25
DB
765}
766
8a3b7a25 767/**
ab365c33 768 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
9e2bc267 769 * @lp: Pointer to the axienet_local structure
ab365c33 770 * @first_bd: Index of first descriptor to clean up
9e2bc267
RH
771 * @nr_bds: Max number of descriptors to clean up
772 * @force: Whether to clean descriptors even if not complete
ab365c33 773 * @sizep: Pointer to a u32 filled with the total sum of all bytes
7fe85bb3 774 * in all cleaned-up descriptors. Ignored if NULL.
9e2bc267 775 * @budget: NAPI budget (use 0 when not called from NAPI poll)
8a3b7a25 776 *
ab365c33
AP
777 * Would either be called after a successful transmit operation, or after
778 * there was an error when setting up the chain.
312e6a58
SG
779 *
780 * Return: The number of packets handled.
8a3b7a25 781 */
9e2bc267
RH
782static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
783 int nr_bds, bool force, u32 *sizep, int budget)
8a3b7a25 784{
8a3b7a25 785 struct axidma_bd *cur_p;
ab365c33 786 unsigned int status;
5a6caa2c 787 int i, packets = 0;
4e958f33 788 dma_addr_t phys;
ab365c33 789
9e2bc267 790 for (i = 0; i < nr_bds; i++) {
ab365c33
AP
791 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
792 status = cur_p->status;
793
9e2bc267
RH
794 /* If force is not specified, clean up only descriptors
795 * that have been completed by the MAC.
ab365c33 796 */
9e2bc267 797 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
ab365c33 798 break;
8a3b7a25 799
95978df6
RH
800 /* Ensure we see complete descriptor update */
801 dma_rmb();
4e958f33 802 phys = desc_get_phys_addr(lp, cur_p);
17882fd4 803 dma_unmap_single(lp->dev, phys,
4e958f33
AP
804 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
805 DMA_TO_DEVICE);
ab365c33 806
5a6caa2c 807 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
9e2bc267 808 napi_consume_skb(cur_p->skb, budget);
5a6caa2c
SA
809 packets++;
810 }
ab365c33 811
8a3b7a25
DB
812 cur_p->app0 = 0;
813 cur_p->app1 = 0;
814 cur_p->app2 = 0;
815 cur_p->app4 = 0;
23e6b2dc 816 cur_p->skb = NULL;
95978df6
RH
817 /* ensure our transmit path and device don't prematurely see status cleared */
818 wmb();
996defd7 819 cur_p->cntrl = 0;
95978df6 820 cur_p->status = 0;
8a3b7a25 821
ab365c33
AP
822 if (sizep)
823 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
8a3b7a25
DB
824 }
825
5a6caa2c
SA
826 if (!force) {
827 lp->tx_bd_ci += i;
828 if (lp->tx_bd_ci >= lp->tx_bd_num)
829 lp->tx_bd_ci %= lp->tx_bd_num;
830 }
831
832 return packets;
ab365c33
AP
833}
834
bb193e3d
RH
835/**
836 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
837 * @lp: Pointer to the axienet_local structure
838 * @num_frag: The number of BDs to check for
839 *
840 * Return: 0, on success
841 * NETDEV_TX_BUSY, if any of the descriptors are not free
842 *
843 * This function is invoked before BDs are allocated and transmission starts.
844 * This function returns 0 if a BD or group of BDs can be allocated for
845 * transmission. If the BD or any of the BDs are not free the function
9e2bc267 846 * returns a busy status.
bb193e3d
RH
847 */
848static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
849 int num_frag)
850{
851 struct axidma_bd *cur_p;
852
9e2bc267 853 /* Ensure we see all descriptor updates from device or TX polling */
bb193e3d 854 rmb();
f0cf4000
RH
855 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
856 lp->tx_bd_num];
bb193e3d
RH
857 if (cur_p->cntrl)
858 return NETDEV_TX_BUSY;
859 return 0;
860}
861
6a91b846
RSP
862/**
863 * axienet_dma_tx_cb - DMA engine callback for TX channel.
864 * @data: Pointer to the axienet_local structure.
865 * @result: error reporting through dmaengine_result.
866 * This function is called by dmaengine driver for TX channel to notify
867 * that the transmit is done.
868 */
869static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
870{
871 struct skbuf_dma_descriptor *skbuf_dma;
872 struct axienet_local *lp = data;
873 struct netdev_queue *txq;
874 int len;
875
876 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
877 len = skbuf_dma->skb->len;
878 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
879 u64_stats_update_begin(&lp->tx_stat_sync);
880 u64_stats_add(&lp->tx_bytes, len);
881 u64_stats_add(&lp->tx_packets, 1);
882 u64_stats_update_end(&lp->tx_stat_sync);
883 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
884 dev_consume_skb_any(skbuf_dma->skb);
885 netif_txq_completed_wake(txq, 1, len,
886 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
32374234 887 2);
6a91b846
RSP
888}
889
890/**
891 * axienet_start_xmit_dmaengine - Starts the transmission.
892 * @skb: sk_buff pointer that contains data to be Txed.
893 * @ndev: Pointer to net_device structure.
894 *
895 * Return: NETDEV_TX_OK on success or any non space errors.
896 * NETDEV_TX_BUSY when free element in TX skb ring buffer
897 * is not available.
898 *
899 * This function is invoked to initiate transmission. The
900 * function sets the skbs, register dma callback API and submit
901 * the dma transaction.
902 * Additionally if checksum offloading is supported,
903 * it populates AXI Stream Control fields with appropriate values.
904 */
905static netdev_tx_t
906axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
907{
908 struct dma_async_tx_descriptor *dma_tx_desc = NULL;
909 struct axienet_local *lp = netdev_priv(ndev);
910 u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
911 struct skbuf_dma_descriptor *skbuf_dma;
912 struct dma_device *dma_dev;
913 struct netdev_queue *txq;
914 u32 csum_start_off;
915 u32 csum_index_off;
916 int sg_len;
917 int ret;
918
919 dma_dev = lp->tx_chan->device;
920 sg_len = skb_shinfo(skb)->nr_frags + 1;
32374234 921 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
6a91b846
RSP
922 netif_stop_queue(ndev);
923 if (net_ratelimit())
924 netdev_warn(ndev, "TX ring unexpectedly full\n");
925 return NETDEV_TX_BUSY;
926 }
927
928 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
929 if (!skbuf_dma)
930 goto xmit_error_drop_skb;
931
932 lp->tx_ring_head++;
933 sg_init_table(skbuf_dma->sgl, sg_len);
934 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
935 if (ret < 0)
936 goto xmit_error_drop_skb;
937
938 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
939 if (!ret)
940 goto xmit_error_drop_skb;
941
942 /* Fill up app fields for checksum */
943 if (skb->ip_summed == CHECKSUM_PARTIAL) {
944 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
945 /* Tx Full Checksum Offload Enabled */
946 app_metadata[0] |= 2;
947 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
948 csum_start_off = skb_transport_offset(skb);
949 csum_index_off = csum_start_off + skb->csum_offset;
950 /* Tx Partial Checksum Offload Enabled */
951 app_metadata[0] |= 1;
952 app_metadata[1] = (csum_start_off << 16) | csum_index_off;
953 }
954 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
955 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
956 }
957
958 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
959 sg_len, DMA_MEM_TO_DEV,
960 DMA_PREP_INTERRUPT, (void *)app_metadata);
961 if (!dma_tx_desc)
962 goto xmit_error_unmap_sg;
963
964 skbuf_dma->skb = skb;
965 skbuf_dma->sg_len = sg_len;
966 dma_tx_desc->callback_param = lp;
967 dma_tx_desc->callback_result = axienet_dma_tx_cb;
6a91b846
RSP
968 txq = skb_get_tx_queue(lp->ndev, skb);
969 netdev_tx_sent_queue(txq, skb->len);
970 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
32374234 971 1, 2);
6a91b846 972
5ccdcdf1
SG
973 dmaengine_submit(dma_tx_desc);
974 dma_async_issue_pending(lp->tx_chan);
6a91b846
RSP
975 return NETDEV_TX_OK;
976
977xmit_error_unmap_sg:
978 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
979xmit_error_drop_skb:
980 dev_kfree_skb_any(skb);
981 return NETDEV_TX_OK;
982}
983
ab365c33 984/**
9e2bc267 985 * axienet_tx_poll - Invoked once a transmit is completed by the
ab365c33 986 * Axi DMA Tx channel.
9e2bc267
RH
987 * @napi: Pointer to NAPI structure.
988 * @budget: Max number of TX packets to process.
989 *
990 * Return: Number of TX packets processed.
ab365c33 991 *
9e2bc267 992 * This function is invoked from the NAPI processing to notify the completion
ab365c33
AP
993 * of transmit operation. It clears fields in the corresponding Tx BDs and
994 * unmaps the corresponding buffer so that CPU can regain ownership of the
995 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
996 * required.
997 */
9e2bc267 998static int axienet_tx_poll(struct napi_struct *napi, int budget)
ab365c33 999{
9e2bc267
RH
1000 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
1001 struct net_device *ndev = lp->ndev;
ab365c33 1002 u32 size = 0;
9e2bc267 1003 int packets;
ab365c33 1004
5a6caa2c
SA
1005 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
1006 &size, budget);
ab365c33 1007
9e2bc267 1008 if (packets) {
c900e49d 1009 netdev_completed_queue(ndev, packets, size);
cb45a8bf
RH
1010 u64_stats_update_begin(&lp->tx_stat_sync);
1011 u64_stats_add(&lp->tx_packets, packets);
1012 u64_stats_add(&lp->tx_bytes, size);
1013 u64_stats_update_end(&lp->tx_stat_sync);
7de44285 1014
9e2bc267
RH
1015 /* Matches barrier in axienet_start_xmit */
1016 smp_mb();
7de44285 1017
9e2bc267
RH
1018 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1019 netif_wake_queue(ndev);
1020 }
1021
1022 if (packets < budget && napi_complete_done(napi, packets)) {
1023 /* Re-enable TX completion interrupts. This should
1024 * cause an immediate interrupt if any TX packets are
1025 * already pending.
1026 */
d048c717 1027 spin_lock_irq(&lp->tx_cr_lock);
9e2bc267 1028 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
d048c717 1029 spin_unlock_irq(&lp->tx_cr_lock);
9e2bc267
RH
1030 }
1031 return packets;
8a3b7a25
DB
1032}
1033
1034/**
1035 * axienet_start_xmit - Starts the transmission.
1036 * @skb: sk_buff pointer that contains data to be Txed.
1037 * @ndev: Pointer to net_device structure.
1038 *
b0d081c5 1039 * Return: NETDEV_TX_OK, on success
8a3b7a25
DB
1040 * NETDEV_TX_BUSY, if any of the descriptors are not free
1041 *
1042 * This function is invoked from upper layers to initiate transmission. The
1043 * function uses the next available free BDs and populates their fields to
1044 * start the transmission. Additionally if checksum offloading is supported,
1045 * it populates AXI Stream Control fields with appropriate values.
1046 */
81255af8
Y
1047static netdev_tx_t
1048axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
8a3b7a25
DB
1049{
1050 u32 ii;
1051 u32 num_frag;
1052 u32 csum_start_off;
1053 u32 csum_index_off;
1054 skb_frag_t *frag;
4e958f33 1055 dma_addr_t tail_p, phys;
f0cf4000 1056 u32 orig_tail_ptr, new_tail_ptr;
8a3b7a25
DB
1057 struct axienet_local *lp = netdev_priv(ndev);
1058 struct axidma_bd *cur_p;
f0cf4000
RH
1059
1060 orig_tail_ptr = lp->tx_bd_tail;
1061 new_tail_ptr = orig_tail_ptr;
8a3b7a25
DB
1062
1063 num_frag = skb_shinfo(skb)->nr_frags;
f0cf4000 1064 cur_p = &lp->tx_bd_v[orig_tail_ptr];
8a3b7a25 1065
aba57a82 1066 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
bb193e3d
RH
1067 /* Should not happen as last start_xmit call should have
1068 * checked for sufficient space and queue should only be
1069 * woken when sufficient space is available.
1070 */
7de44285 1071 netif_stop_queue(ndev);
bb193e3d
RH
1072 if (net_ratelimit())
1073 netdev_warn(ndev, "TX ring unexpectedly full\n");
1074 return NETDEV_TX_BUSY;
8a3b7a25
DB
1075 }
1076
1077 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1078 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1079 /* Tx Full Checksum Offload Enabled */
1080 cur_p->app0 |= 2;
fd0413bb 1081 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
8a3b7a25
DB
1082 csum_start_off = skb_transport_offset(skb);
1083 csum_index_off = csum_start_off + skb->csum_offset;
1084 /* Tx Partial Checksum Offload Enabled */
1085 cur_p->app0 |= 1;
1086 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1087 }
1088 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1089 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1090 }
1091
17882fd4 1092 phys = dma_map_single(lp->dev, skb->data,
4e958f33 1093 skb_headlen(skb), DMA_TO_DEVICE);
17882fd4 1094 if (unlikely(dma_mapping_error(lp->dev, phys))) {
71791dc8
AP
1095 if (net_ratelimit())
1096 netdev_err(ndev, "TX DMA mapping error\n");
1097 ndev->stats.tx_dropped++;
99714e37 1098 dev_kfree_skb_any(skb);
71791dc8
AP
1099 return NETDEV_TX_OK;
1100 }
4e958f33 1101 desc_set_phys_addr(lp, phys, cur_p);
71791dc8 1102 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
8a3b7a25
DB
1103
1104 for (ii = 0; ii < num_frag; ii++) {
f0cf4000
RH
1105 if (++new_tail_ptr >= lp->tx_bd_num)
1106 new_tail_ptr = 0;
1107 cur_p = &lp->tx_bd_v[new_tail_ptr];
8a3b7a25 1108 frag = &skb_shinfo(skb)->frags[ii];
17882fd4 1109 phys = dma_map_single(lp->dev,
4e958f33
AP
1110 skb_frag_address(frag),
1111 skb_frag_size(frag),
1112 DMA_TO_DEVICE);
17882fd4 1113 if (unlikely(dma_mapping_error(lp->dev, phys))) {
71791dc8
AP
1114 if (net_ratelimit())
1115 netdev_err(ndev, "TX DMA mapping error\n");
1116 ndev->stats.tx_dropped++;
9e2bc267
RH
1117 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1118 true, NULL, 0);
99714e37 1119 dev_kfree_skb_any(skb);
71791dc8
AP
1120 return NETDEV_TX_OK;
1121 }
4e958f33 1122 desc_set_phys_addr(lp, phys, cur_p);
8a3b7a25
DB
1123 cur_p->cntrl = skb_frag_size(frag);
1124 }
1125
1126 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
23e6b2dc 1127 cur_p->skb = skb;
8a3b7a25 1128
f0cf4000
RH
1129 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1130 if (++new_tail_ptr >= lp->tx_bd_num)
1131 new_tail_ptr = 0;
1132 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
c900e49d 1133 netdev_sent_queue(ndev, skb->len);
f0cf4000 1134
8a3b7a25 1135 /* Start the transfer */
6a00d0dd 1136 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
8a3b7a25 1137
bb193e3d
RH
1138 /* Stop queue if next transmit may not have space */
1139 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1140 netif_stop_queue(ndev);
1141
9e2bc267 1142 /* Matches barrier in axienet_tx_poll */
bb193e3d
RH
1143 smp_mb();
1144
1145 /* Space might have just been freed - check again */
1146 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1147 netif_wake_queue(ndev);
1148 }
1149
8a3b7a25
DB
1150 return NETDEV_TX_OK;
1151}
1152
6a91b846
RSP
1153/**
1154 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1155 * @data: Pointer to the skbuf_dma_descriptor structure.
1156 * @result: error reporting through dmaengine_result.
1157 * This function is called by dmaengine driver for RX channel to notify
1158 * that the packet is received.
1159 */
1160static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1161{
1162 struct skbuf_dma_descriptor *skbuf_dma;
1163 size_t meta_len, meta_max_len, rx_len;
1164 struct axienet_local *lp = data;
1165 struct sk_buff *skb;
1166 u32 *app_metadata;
fd980bf6 1167 int i;
6a91b846
RSP
1168
1169 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1170 skb = skbuf_dma->skb;
1171 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1172 &meta_max_len);
1173 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1174 DMA_FROM_DEVICE);
8bbceba7
AJ
1175
1176 if (IS_ERR(app_metadata)) {
1177 if (net_ratelimit())
1178 netdev_err(lp->ndev, "Failed to get RX metadata pointer\n");
1179 dev_kfree_skb_any(skb);
1180 lp->ndev->stats.rx_dropped++;
1181 goto rx_submit;
1182 }
1183
6a91b846
RSP
1184 /* TODO: Derive app word index programmatically */
1185 rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1186 skb_put(skb, rx_len);
1187 skb->protocol = eth_type_trans(skb, lp->ndev);
1188 skb->ip_summed = CHECKSUM_NONE;
1189
1190 __netif_rx(skb);
1191 u64_stats_update_begin(&lp->rx_stat_sync);
1192 u64_stats_add(&lp->rx_packets, 1);
1193 u64_stats_add(&lp->rx_bytes, rx_len);
1194 u64_stats_update_end(&lp->rx_stat_sync);
fd980bf6 1195
8bbceba7 1196rx_submit:
fd980bf6
SG
1197 for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
1198 RX_BUF_NUM_DEFAULT); i++)
1199 axienet_rx_submit_desc(lp->ndev);
6a91b846
RSP
1200 dma_async_issue_pending(lp->rx_chan);
1201}
1202
8a3b7a25 1203/**
9e2bc267 1204 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
cc37610c 1205 * @napi: Pointer to NAPI structure.
9e2bc267 1206 * @budget: Max number of RX packets to process.
8a3b7a25 1207 *
cc37610c 1208 * Return: Number of RX packets processed.
8a3b7a25 1209 */
9e2bc267 1210static int axienet_rx_poll(struct napi_struct *napi, int budget)
8a3b7a25
DB
1211{
1212 u32 length;
1213 u32 csumstatus;
1214 u32 size = 0;
cc37610c 1215 int packets = 0;
38e96b35 1216 dma_addr_t tail_p = 0;
8a3b7a25 1217 struct axidma_bd *cur_p;
cc37610c 1218 struct sk_buff *skb, *new_skb;
9e2bc267 1219 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
8a3b7a25 1220
8a3b7a25
DB
1221 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1222
cc37610c 1223 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
4e958f33
AP
1224 dma_addr_t phys;
1225
95978df6
RH
1226 /* Ensure we see complete descriptor update */
1227 dma_rmb();
8a3b7a25 1228
23e6b2dc
RH
1229 skb = cur_p->skb;
1230 cur_p->skb = NULL;
7a7d340b
RH
1231
1232 /* skb could be NULL if a previous pass already received the
1233 * packet for this slot in the ring, but failed to refill it
1234 * with a newly allocated buffer. In this case, don't try to
1235 * receive it again.
1236 */
1237 if (likely(skb)) {
1238 length = cur_p->app4 & 0x0000FFFF;
1239
1240 phys = desc_get_phys_addr(lp, cur_p);
17882fd4 1241 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
7a7d340b
RH
1242 DMA_FROM_DEVICE);
1243
1244 skb_put(skb, length);
cc37610c 1245 skb->protocol = eth_type_trans(skb, lp->ndev);
7a7d340b
RH
1246 /*skb_checksum_none_assert(skb);*/
1247 skb->ip_summed = CHECKSUM_NONE;
1248
1249 /* if we're doing Rx csum offload, set it up */
1250 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1251 csumstatus = (cur_p->app2 &
1252 XAE_FULL_CSUM_STATUS_MASK) >> 3;
1253 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1254 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1255 skb->ip_summed = CHECKSUM_UNNECESSARY;
1256 }
736f0c7a 1257 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
7a7d340b
RH
1258 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1259 skb->ip_summed = CHECKSUM_COMPLETE;
8a3b7a25 1260 }
8a3b7a25 1261
cc37610c 1262 napi_gro_receive(napi, skb);
8a3b7a25 1263
7a7d340b
RH
1264 size += length;
1265 packets++;
1266 }
8a3b7a25 1267
6c7e7da2 1268 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
720a43ef 1269 if (!new_skb)
7a7d340b 1270 break;
720a43ef 1271
17882fd4 1272 phys = dma_map_single(lp->dev, new_skb->data,
4e958f33
AP
1273 lp->max_frm_size,
1274 DMA_FROM_DEVICE);
17882fd4 1275 if (unlikely(dma_mapping_error(lp->dev, phys))) {
71791dc8 1276 if (net_ratelimit())
cc37610c 1277 netdev_err(lp->ndev, "RX DMA mapping error\n");
71791dc8 1278 dev_kfree_skb(new_skb);
7a7d340b 1279 break;
71791dc8 1280 }
4e958f33 1281 desc_set_phys_addr(lp, phys, cur_p);
71791dc8 1282
8a3b7a25
DB
1283 cur_p->cntrl = lp->max_frm_size;
1284 cur_p->status = 0;
23e6b2dc 1285 cur_p->skb = new_skb;
8a3b7a25 1286
7a7d340b
RH
1287 /* Only update tail_p to mark this slot as usable after it has
1288 * been successfully refilled.
1289 */
1290 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1291
8b09ca82
RH
1292 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1293 lp->rx_bd_ci = 0;
8a3b7a25
DB
1294 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1295 }
1296
cb45a8bf
RH
1297 u64_stats_update_begin(&lp->rx_stat_sync);
1298 u64_stats_add(&lp->rx_packets, packets);
1299 u64_stats_add(&lp->rx_bytes, size);
1300 u64_stats_update_end(&lp->rx_stat_sync);
8a3b7a25 1301
38e96b35 1302 if (tail_p)
6a00d0dd 1303 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
cc37610c
RH
1304
1305 if (packets < budget && napi_complete_done(napi, packets)) {
e1d27d29
SA
1306 if (READ_ONCE(lp->rx_dim_enabled)) {
1307 struct dim_sample sample = {
1308 .time = ktime_get(),
1309 /* Safe because we are the only writer */
1310 .pkt_ctr = u64_stats_read(&lp->rx_packets),
1311 .byte_ctr = u64_stats_read(&lp->rx_bytes),
1312 .event_ctr = READ_ONCE(lp->rx_irqs),
1313 };
1314
1315 net_dim(&lp->rx_dim, &sample);
1316 }
1317
cc37610c
RH
1318 /* Re-enable RX completion interrupts. This should
1319 * cause an immediate interrupt if any RX packets are
1320 * already pending.
1321 */
d048c717 1322 spin_lock_irq(&lp->rx_cr_lock);
cc37610c 1323 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
d048c717 1324 spin_unlock_irq(&lp->rx_cr_lock);
cc37610c
RH
1325 }
1326 return packets;
8a3b7a25
DB
1327}
1328
1329/**
1330 * axienet_tx_irq - Tx Done Isr.
1331 * @irq: irq number
1332 * @_ndev: net_device pointer
1333 *
9cbc1b68 1334 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
8a3b7a25 1335 *
9e2bc267
RH
1336 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1337 * TX BD processing.
8a3b7a25
DB
1338 */
1339static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1340{
8a3b7a25
DB
1341 unsigned int status;
1342 struct net_device *ndev = _ndev;
1343 struct axienet_local *lp = netdev_priv(ndev);
1344
1345 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
84b9ccc0 1346
8a3b7a25 1347 if (!(status & XAXIDMA_IRQ_ALL_MASK))
9cbc1b68 1348 return IRQ_NONE;
8a3b7a25 1349
84b9ccc0
RH
1350 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1351
1352 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1353 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1354 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1355 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1356 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
24201a64 1357 schedule_work(&lp->dma_err_task);
84b9ccc0 1358 } else {
9e2bc267
RH
1359 /* Disable further TX completion interrupts and schedule
1360 * NAPI to handle the completions.
1361 */
ba0da2dc 1362 if (napi_schedule_prep(&lp->napi_tx)) {
d048c717
SA
1363 u32 cr;
1364
1365 spin_lock(&lp->tx_cr_lock);
1366 cr = lp->tx_dma_cr;
1367 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
ba0da2dc 1368 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
d048c717 1369 spin_unlock(&lp->tx_cr_lock);
ba0da2dc
SA
1370 __napi_schedule(&lp->napi_tx);
1371 }
8a3b7a25 1372 }
84b9ccc0 1373
8a3b7a25
DB
1374 return IRQ_HANDLED;
1375}
1376
1377/**
1378 * axienet_rx_irq - Rx Isr.
1379 * @irq: irq number
1380 * @_ndev: net_device pointer
1381 *
9cbc1b68 1382 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
8a3b7a25 1383 *
cc37610c 1384 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
8a3b7a25
DB
1385 * processing.
1386 */
1387static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1388{
8a3b7a25
DB
1389 unsigned int status;
1390 struct net_device *ndev = _ndev;
1391 struct axienet_local *lp = netdev_priv(ndev);
1392
1393 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
84b9ccc0 1394
8a3b7a25 1395 if (!(status & XAXIDMA_IRQ_ALL_MASK))
9cbc1b68 1396 return IRQ_NONE;
8a3b7a25 1397
84b9ccc0
RH
1398 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1399
1400 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1401 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1402 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1403 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1404 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
24201a64 1405 schedule_work(&lp->dma_err_task);
84b9ccc0 1406 } else {
cc37610c
RH
1407 /* Disable further RX completion interrupts and schedule
1408 * NAPI receive.
1409 */
e1d27d29 1410 WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
ba0da2dc 1411 if (napi_schedule_prep(&lp->napi_rx)) {
d048c717
SA
1412 u32 cr;
1413
1414 spin_lock(&lp->rx_cr_lock);
1415 cr = lp->rx_dma_cr;
1416 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
ba0da2dc 1417 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
d048c717
SA
1418 spin_unlock(&lp->rx_cr_lock);
1419
ba0da2dc
SA
1420 __napi_schedule(&lp->napi_rx);
1421 }
8a3b7a25 1422 }
84b9ccc0 1423
8a3b7a25
DB
1424 return IRQ_HANDLED;
1425}
1426
522856ce
RH
1427/**
1428 * axienet_eth_irq - Ethernet core Isr.
1429 * @irq: irq number
1430 * @_ndev: net_device pointer
1431 *
1432 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1433 *
1434 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1435 */
1436static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1437{
1438 struct net_device *ndev = _ndev;
1439 struct axienet_local *lp = netdev_priv(ndev);
1440 unsigned int pending;
1441
1442 pending = axienet_ior(lp, XAE_IP_OFFSET);
1443 if (!pending)
1444 return IRQ_NONE;
1445
1446 if (pending & XAE_INT_RXFIFOOVR_MASK)
1447 ndev->stats.rx_missed_errors++;
1448
1449 if (pending & XAE_INT_RXRJECT_MASK)
d70e3788 1450 ndev->stats.rx_dropped++;
522856ce
RH
1451
1452 axienet_iow(lp, XAE_IS_OFFSET, pending);
1453 return IRQ_HANDLED;
1454}
1455
24201a64 1456static void axienet_dma_err_handler(struct work_struct *work);
aecb55be 1457
6a91b846
RSP
1458/**
1459 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1460 * allocate skbuff, map the scatterlist and obtain a descriptor
1461 * and then add the callback information and submit descriptor.
1462 *
1463 * @ndev: net_device pointer
1464 *
1465 */
1466static void axienet_rx_submit_desc(struct net_device *ndev)
1467{
1468 struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1469 struct axienet_local *lp = netdev_priv(ndev);
1470 struct skbuf_dma_descriptor *skbuf_dma;
1471 struct sk_buff *skb;
1472 dma_addr_t addr;
1473
1474 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1475 if (!skbuf_dma)
1476 return;
1477
6a91b846
RSP
1478 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1479 if (!skb)
1480 return;
1481
1482 sg_init_table(skbuf_dma->sgl, 1);
1483 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1484 if (unlikely(dma_mapping_error(lp->dev, addr))) {
1485 if (net_ratelimit())
1486 netdev_err(ndev, "DMA mapping error\n");
1487 goto rx_submit_err_free_skb;
1488 }
1489 sg_dma_address(skbuf_dma->sgl) = addr;
1490 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1491 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1492 1, DMA_DEV_TO_MEM,
1493 DMA_PREP_INTERRUPT);
1494 if (!dma_rx_desc)
1495 goto rx_submit_err_unmap_skb;
1496
1497 skbuf_dma->skb = skb;
1498 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1499 skbuf_dma->desc = dma_rx_desc;
1500 dma_rx_desc->callback_param = lp;
1501 dma_rx_desc->callback_result = axienet_dma_rx_cb;
fd980bf6 1502 lp->rx_ring_head++;
6a91b846
RSP
1503 dmaengine_submit(dma_rx_desc);
1504
1505 return;
1506
1507rx_submit_err_unmap_skb:
1508 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1509rx_submit_err_free_skb:
1510 dev_kfree_skb(skb);
1511}
1512
1513/**
1514 * axienet_init_dmaengine - init the dmaengine code.
1515 * @ndev: Pointer to net_device structure
1516 *
1517 * Return: 0, on success.
1518 * non-zero error value on failure
1519 *
1520 * This is the dmaengine initialization code.
1521 */
1522static int axienet_init_dmaengine(struct net_device *ndev)
1523{
1524 struct axienet_local *lp = netdev_priv(ndev);
1525 struct skbuf_dma_descriptor *skbuf_dma;
1526 int i, ret;
1527
1528 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1529 if (IS_ERR(lp->tx_chan)) {
1530 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1531 return PTR_ERR(lp->tx_chan);
1532 }
1533
1534 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1535 if (IS_ERR(lp->rx_chan)) {
1536 ret = PTR_ERR(lp->rx_chan);
1537 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1538 goto err_dma_release_tx;
1539 }
1540
1541 lp->tx_ring_tail = 0;
1542 lp->tx_ring_head = 0;
1543 lp->rx_ring_tail = 0;
1544 lp->rx_ring_head = 0;
1545 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1546 GFP_KERNEL);
1547 if (!lp->tx_skb_ring) {
1548 ret = -ENOMEM;
1549 goto err_dma_release_rx;
1550 }
1551 for (i = 0; i < TX_BD_NUM_MAX; i++) {
1552 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1553 if (!skbuf_dma) {
1554 ret = -ENOMEM;
1555 goto err_free_tx_skb_ring;
1556 }
1557 lp->tx_skb_ring[i] = skbuf_dma;
1558 }
1559
1560 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1561 GFP_KERNEL);
1562 if (!lp->rx_skb_ring) {
1563 ret = -ENOMEM;
1564 goto err_free_tx_skb_ring;
1565 }
1566 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1567 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1568 if (!skbuf_dma) {
1569 ret = -ENOMEM;
1570 goto err_free_rx_skb_ring;
1571 }
1572 lp->rx_skb_ring[i] = skbuf_dma;
1573 }
1574 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1575 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1576 axienet_rx_submit_desc(ndev);
1577 dma_async_issue_pending(lp->rx_chan);
1578
1579 return 0;
1580
1581err_free_rx_skb_ring:
1582 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1583 kfree(lp->rx_skb_ring[i]);
1584 kfree(lp->rx_skb_ring);
1585err_free_tx_skb_ring:
1586 for (i = 0; i < TX_BD_NUM_MAX; i++)
1587 kfree(lp->tx_skb_ring[i]);
1588 kfree(lp->tx_skb_ring);
1589err_dma_release_rx:
1590 dma_release_channel(lp->rx_chan);
1591err_dma_release_tx:
1592 dma_release_channel(lp->tx_chan);
1593 return ret;
1594}
1595
8a3b7a25 1596/**
6b1b40f7
SBNG
1597 * axienet_init_legacy_dma - init the dma legacy code.
1598 * @ndev: Pointer to net_device structure
8a3b7a25 1599 *
b0d081c5 1600 * Return: 0, on success.
6b1b40f7
SBNG
1601 * non-zero error value on failure
1602 *
1603 * This is the dma initialization code. It also allocates interrupt
1604 * service routines, enables the interrupt lines and ISR handling.
8a3b7a25 1605 *
8a3b7a25 1606 */
6b1b40f7 1607static int axienet_init_legacy_dma(struct net_device *ndev)
8a3b7a25 1608{
7789e9ed 1609 int ret;
8a3b7a25
DB
1610 struct axienet_local *lp = netdev_priv(ndev);
1611
24201a64 1612 /* Enable worker thread for Axi DMA error handling */
858430db 1613 lp->stopping = false;
24201a64 1614 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
71c6c837 1615
9e2bc267
RH
1616 napi_enable(&lp->napi_rx);
1617 napi_enable(&lp->napi_tx);
cc37610c 1618
8a3b7a25 1619 /* Enable interrupts for Axi DMA Tx */
9cbc1b68
RH
1620 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1621 ndev->name, ndev);
8a3b7a25
DB
1622 if (ret)
1623 goto err_tx_irq;
1624 /* Enable interrupts for Axi DMA Rx */
9cbc1b68
RH
1625 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1626 ndev->name, ndev);
8a3b7a25
DB
1627 if (ret)
1628 goto err_rx_irq;
522856ce
RH
1629 /* Enable interrupts for Axi Ethernet core (if defined) */
1630 if (lp->eth_irq > 0) {
1631 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1632 ndev->name, ndev);
1633 if (ret)
1634 goto err_eth_irq;
1635 }
71c6c837 1636
8a3b7a25
DB
1637 return 0;
1638
522856ce
RH
1639err_eth_irq:
1640 free_irq(lp->rx_irq, ndev);
8a3b7a25
DB
1641err_rx_irq:
1642 free_irq(lp->tx_irq, ndev);
1643err_tx_irq:
9e2bc267
RH
1644 napi_disable(&lp->napi_tx);
1645 napi_disable(&lp->napi_rx);
24201a64 1646 cancel_work_sync(&lp->dma_err_task);
8a3b7a25
DB
1647 dev_err(lp->dev, "request_irq() failed\n");
1648 return ret;
1649}
1650
6b1b40f7
SBNG
1651/**
1652 * axienet_open - Driver open routine.
1653 * @ndev: Pointer to net_device structure
1654 *
1655 * Return: 0, on success.
1656 * non-zero error value on failure
1657 *
1658 * This is the driver open routine. It calls phylink_start to start the
1659 * PHY device.
1660 * It also allocates interrupt service routines, enables the interrupt lines
1661 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1662 * descriptors are initialized.
1663 */
1664static int axienet_open(struct net_device *ndev)
1665{
1666 int ret;
1667 struct axienet_local *lp = netdev_priv(ndev);
1668
6b1b40f7
SBNG
1669 /* When we do an Axi Ethernet reset, it resets the complete core
1670 * including the MDIO. MDIO must be disabled before resetting.
1671 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1672 */
1673 axienet_lock_mii(lp);
1674 ret = axienet_device_reset(ndev);
1675 axienet_unlock_mii(lp);
1676
1677 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1678 if (ret) {
1679 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1680 return ret;
1681 }
1682
1683 phylink_start(lp->phylink);
1684
76abb5d6
SA
1685 /* Start the statistics refresh work */
1686 schedule_delayed_work(&lp->stats_work, 0);
1687
6a91b846
RSP
1688 if (lp->use_dmaengine) {
1689 /* Enable interrupts for Axi Ethernet core (if defined) */
1690 if (lp->eth_irq > 0) {
1691 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1692 ndev->name, ndev);
1693 if (ret)
1694 goto err_phy;
1695 }
1696
1697 ret = axienet_init_dmaengine(ndev);
1698 if (ret < 0)
1699 goto err_free_eth_irq;
1700 } else {
6b1b40f7
SBNG
1701 ret = axienet_init_legacy_dma(ndev);
1702 if (ret)
1703 goto err_phy;
1704 }
1705
1706 return 0;
1707
6a91b846
RSP
1708err_free_eth_irq:
1709 if (lp->eth_irq > 0)
1710 free_irq(lp->eth_irq, ndev);
6b1b40f7 1711err_phy:
e1d27d29 1712 cancel_work_sync(&lp->rx_dim.work);
76abb5d6 1713 cancel_delayed_work_sync(&lp->stats_work);
6b1b40f7
SBNG
1714 phylink_stop(lp->phylink);
1715 phylink_disconnect_phy(lp->phylink);
1716 return ret;
1717}
1718
8a3b7a25
DB
1719/**
1720 * axienet_stop - Driver stop routine.
1721 * @ndev: Pointer to net_device structure
1722 *
b0d081c5 1723 * Return: 0, on success.
8a3b7a25 1724 *
f5203a3d 1725 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
8a3b7a25
DB
1726 * device. It also removes the interrupt handlers and disables the interrupts.
1727 * The Axi DMA Tx/Rx BDs are released.
1728 */
1729static int axienet_stop(struct net_device *ndev)
1730{
8a3b7a25 1731 struct axienet_local *lp = netdev_priv(ndev);
6a91b846 1732 int i;
8a3b7a25 1733
6b1b40f7 1734 if (!lp->use_dmaengine) {
858430db
SA
1735 WRITE_ONCE(lp->stopping, true);
1736 flush_work(&lp->dma_err_task);
1737
6b1b40f7
SBNG
1738 napi_disable(&lp->napi_tx);
1739 napi_disable(&lp->napi_rx);
1740 }
cc37610c 1741
e1d27d29 1742 cancel_work_sync(&lp->rx_dim.work);
76abb5d6
SA
1743 cancel_delayed_work_sync(&lp->stats_work);
1744
f5203a3d
RH
1745 phylink_stop(lp->phylink);
1746 phylink_disconnect_phy(lp->phylink);
1747
8a3b7a25
DB
1748 axienet_setoptions(ndev, lp->options &
1749 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1750
6b1b40f7
SBNG
1751 if (!lp->use_dmaengine) {
1752 axienet_dma_stop(lp);
1753 cancel_work_sync(&lp->dma_err_task);
1754 free_irq(lp->tx_irq, ndev);
1755 free_irq(lp->rx_irq, ndev);
1756 axienet_dma_bd_release(ndev);
6a91b846
RSP
1757 } else {
1758 dmaengine_terminate_sync(lp->tx_chan);
1759 dmaengine_synchronize(lp->tx_chan);
1760 dmaengine_terminate_sync(lp->rx_chan);
1761 dmaengine_synchronize(lp->rx_chan);
1762
1763 for (i = 0; i < TX_BD_NUM_MAX; i++)
1764 kfree(lp->tx_skb_ring[i]);
1765 kfree(lp->tx_skb_ring);
1766 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1767 kfree(lp->rx_skb_ring[i]);
1768 kfree(lp->rx_skb_ring);
1769
1770 dma_release_channel(lp->rx_chan);
1771 dma_release_channel(lp->tx_chan);
6b1b40f7 1772 }
489d4d77 1773
c900e49d 1774 netdev_reset_queue(ndev);
489d4d77
RH
1775 axienet_iow(lp, XAE_IE_OFFSET, 0);
1776
522856ce
RH
1777 if (lp->eth_irq > 0)
1778 free_irq(lp->eth_irq, ndev);
8a3b7a25
DB
1779 return 0;
1780}
1781
1782/**
1783 * axienet_change_mtu - Driver change mtu routine.
1784 * @ndev: Pointer to net_device structure
1785 * @new_mtu: New mtu value to be applied
1786 *
b0d081c5 1787 * Return: Always returns 0 (success).
8a3b7a25
DB
1788 *
1789 * This is the change mtu driver routine. It checks if the Axi Ethernet
1790 * hardware supports jumbo frames before changing the mtu. This can be
1791 * called only when the device is not up.
1792 */
1793static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1794{
1795 struct axienet_local *lp = netdev_priv(ndev);
1796
1797 if (netif_running(ndev))
1798 return -EBUSY;
f080a8c3
ST
1799
1800 if ((new_mtu + VLAN_ETH_HLEN +
1801 XAE_TRL_SIZE) > lp->rxmem)
1802 return -EINVAL;
1803
1eb2cded 1804 WRITE_ONCE(ndev->mtu, new_mtu);
8a3b7a25
DB
1805
1806 return 0;
1807}
1808
1809#ifdef CONFIG_NET_POLL_CONTROLLER
1810/**
1811 * axienet_poll_controller - Axi Ethernet poll mechanism.
1812 * @ndev: Pointer to net_device structure
1813 *
1814 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1815 * to polling the ISRs and are enabled back after the polling is done.
1816 */
1817static void axienet_poll_controller(struct net_device *ndev)
1818{
1819 struct axienet_local *lp = netdev_priv(ndev);
f7061a3e 1820
8a3b7a25
DB
1821 disable_irq(lp->tx_irq);
1822 disable_irq(lp->rx_irq);
1823 axienet_rx_irq(lp->tx_irq, ndev);
1824 axienet_tx_irq(lp->rx_irq, ndev);
1825 enable_irq(lp->tx_irq);
1826 enable_irq(lp->rx_irq);
1827}
1828#endif
1829
2a9b65ea
AP
1830static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1831{
1832 struct axienet_local *lp = netdev_priv(dev);
1833
1834 if (!netif_running(dev))
1835 return -EINVAL;
1836
1837 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1838}
1839
cb45a8bf
RH
1840static void
1841axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1842{
1843 struct axienet_local *lp = netdev_priv(dev);
1844 unsigned int start;
1845
1846 netdev_stats_to_stats64(stats, &dev->stats);
1847
1848 do {
068c38ad 1849 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
cb45a8bf
RH
1850 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1851 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
068c38ad 1852 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
cb45a8bf
RH
1853
1854 do {
068c38ad 1855 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
cb45a8bf
RH
1856 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1857 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
068c38ad 1858 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
76abb5d6
SA
1859
1860 if (!(lp->features & XAE_FEATURE_STATS))
1861 return;
1862
1863 do {
1864 start = read_seqcount_begin(&lp->hw_stats_seqcount);
1865 stats->rx_length_errors =
1866 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1867 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1868 stats->rx_frame_errors =
1869 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1870 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1871 axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1872 stats->rx_length_errors +
1873 stats->rx_crc_errors +
1874 stats->rx_frame_errors;
1875 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1876
1877 stats->tx_aborted_errors =
1878 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1879 stats->tx_fifo_errors =
1880 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1881 stats->tx_window_errors =
1882 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1883 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1884 stats->tx_aborted_errors +
1885 stats->tx_fifo_errors +
1886 stats->tx_window_errors;
1887 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
cb45a8bf
RH
1888}
1889
8a3b7a25
DB
1890static const struct net_device_ops axienet_netdev_ops = {
1891 .ndo_open = axienet_open,
1892 .ndo_stop = axienet_stop,
1893 .ndo_start_xmit = axienet_start_xmit,
cb45a8bf 1894 .ndo_get_stats64 = axienet_get_stats64,
8a3b7a25
DB
1895 .ndo_change_mtu = axienet_change_mtu,
1896 .ndo_set_mac_address = netdev_set_mac_address,
1897 .ndo_validate_addr = eth_validate_addr,
a7605370 1898 .ndo_eth_ioctl = axienet_ioctl,
8a3b7a25
DB
1899 .ndo_set_rx_mode = axienet_set_multicast_list,
1900#ifdef CONFIG_NET_POLL_CONTROLLER
1901 .ndo_poll_controller = axienet_poll_controller,
1902#endif
1903};
1904
6a91b846
RSP
1905static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1906 .ndo_open = axienet_open,
1907 .ndo_stop = axienet_stop,
1908 .ndo_start_xmit = axienet_start_xmit_dmaengine,
1909 .ndo_get_stats64 = axienet_get_stats64,
1910 .ndo_change_mtu = axienet_change_mtu,
1911 .ndo_set_mac_address = netdev_set_mac_address,
1912 .ndo_validate_addr = eth_validate_addr,
1913 .ndo_eth_ioctl = axienet_ioctl,
1914 .ndo_set_rx_mode = axienet_set_multicast_list,
1915};
1916
8a3b7a25
DB
1917/**
1918 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1919 * @ndev: Pointer to net_device structure
1920 * @ed: Pointer to ethtool_drvinfo structure
1921 *
1922 * This implements ethtool command for getting the driver information.
1923 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1924 */
1925static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1926 struct ethtool_drvinfo *ed)
1927{
f029c781
WS
1928 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1929 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
8a3b7a25
DB
1930}
1931
1932/**
1933 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1934 * AxiEthernet core.
1935 * @ndev: Pointer to net_device structure
1936 *
1937 * This implements ethtool command for getting the total register length
1938 * information.
b0d081c5
MS
1939 *
1940 * Return: the total regs length
8a3b7a25
DB
1941 */
1942static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1943{
1944 return sizeof(u32) * AXIENET_REGS_N;
1945}
1946
1947/**
1948 * axienet_ethtools_get_regs - Dump the contents of all registers present
1949 * in AxiEthernet core.
1950 * @ndev: Pointer to net_device structure
1951 * @regs: Pointer to ethtool_regs structure
1952 * @ret: Void pointer used to return the contents of the registers.
1953 *
1954 * This implements ethtool command for getting the Axi Ethernet register dump.
1955 * Issue "ethtool -d ethX" to execute this function.
1956 */
1957static void axienet_ethtools_get_regs(struct net_device *ndev,
1958 struct ethtool_regs *regs, void *ret)
1959{
7fe85bb3 1960 u32 *data = (u32 *)ret;
8a3b7a25
DB
1961 size_t len = sizeof(u32) * AXIENET_REGS_N;
1962 struct axienet_local *lp = netdev_priv(ndev);
1963
1964 regs->version = 0;
1965 regs->len = len;
1966
1967 memset(data, 0, len);
1968 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1969 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1970 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1971 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1972 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1973 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1974 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1975 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1976 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1977 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1978 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1979 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1980 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1981 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1982 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1983 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1984 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1985 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1986 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1987 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1988 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1989 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1990 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
8a3b7a25
DB
1991 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1992 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1993 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1994 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1995 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
6b1b40f7
SBNG
1996 if (!lp->use_dmaengine) {
1997 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1998 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1999 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
2000 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
2001 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
2002 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
2003 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
2004 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
2005 }
8a3b7a25
DB
2006}
2007
74624944
HC
2008static void
2009axienet_ethtools_get_ringparam(struct net_device *ndev,
2010 struct ethtool_ringparam *ering,
2011 struct kernel_ethtool_ringparam *kernel_ering,
2012 struct netlink_ext_ack *extack)
8b09ca82
RH
2013{
2014 struct axienet_local *lp = netdev_priv(ndev);
2015
2016 ering->rx_max_pending = RX_BD_NUM_MAX;
2017 ering->rx_mini_max_pending = 0;
2018 ering->rx_jumbo_max_pending = 0;
2019 ering->tx_max_pending = TX_BD_NUM_MAX;
2020 ering->rx_pending = lp->rx_bd_num;
2021 ering->rx_mini_pending = 0;
2022 ering->rx_jumbo_pending = 0;
2023 ering->tx_pending = lp->tx_bd_num;
2024}
2025
74624944
HC
2026static int
2027axienet_ethtools_set_ringparam(struct net_device *ndev,
2028 struct ethtool_ringparam *ering,
2029 struct kernel_ethtool_ringparam *kernel_ering,
2030 struct netlink_ext_ack *extack)
8b09ca82
RH
2031{
2032 struct axienet_local *lp = netdev_priv(ndev);
2033
2034 if (ering->rx_pending > RX_BD_NUM_MAX ||
2035 ering->rx_mini_pending ||
2036 ering->rx_jumbo_pending ||
70f5817d
RH
2037 ering->tx_pending < TX_BD_NUM_MIN ||
2038 ering->tx_pending > TX_BD_NUM_MAX)
8b09ca82
RH
2039 return -EINVAL;
2040
2041 if (netif_running(ndev))
2042 return -EBUSY;
2043
2044 lp->rx_bd_num = ering->rx_pending;
2045 lp->tx_bd_num = ering->tx_pending;
2046 return 0;
2047}
2048
8a3b7a25
DB
2049/**
2050 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
2051 * Tx and Rx paths.
2052 * @ndev: Pointer to net_device structure
2053 * @epauseparm: Pointer to ethtool_pauseparam structure.
2054 *
2055 * This implements ethtool command for getting axi ethernet pause frame
2056 * setting. Issue "ethtool -a ethX" to execute this function.
2057 */
2058static void
2059axienet_ethtools_get_pauseparam(struct net_device *ndev,
2060 struct ethtool_pauseparam *epauseparm)
2061{
8a3b7a25 2062 struct axienet_local *lp = netdev_priv(ndev);
f5203a3d
RH
2063
2064 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
8a3b7a25
DB
2065}
2066
2067/**
2068 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
2069 * settings.
2070 * @ndev: Pointer to net_device structure
b0d081c5 2071 * @epauseparm:Pointer to ethtool_pauseparam structure
8a3b7a25
DB
2072 *
2073 * This implements ethtool command for enabling flow control on Rx and Tx
2074 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2075 * function.
b0d081c5
MS
2076 *
2077 * Return: 0 on success, -EFAULT if device is running
8a3b7a25
DB
2078 */
2079static int
2080axienet_ethtools_set_pauseparam(struct net_device *ndev,
2081 struct ethtool_pauseparam *epauseparm)
2082{
8a3b7a25
DB
2083 struct axienet_local *lp = netdev_priv(ndev);
2084
f5203a3d 2085 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
8a3b7a25
DB
2086}
2087
d048c717
SA
2088/**
2089 * axienet_update_coalesce_rx() - Set RX CR
2090 * @lp: Device private data
2091 * @cr: Value to write to the RX CR
2092 * @mask: Bits to set from @cr
2093 */
2094static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
2095 u32 mask)
2096{
2097 spin_lock_irq(&lp->rx_cr_lock);
2098 lp->rx_dma_cr &= ~mask;
2099 lp->rx_dma_cr |= cr;
2100 /* If DMA isn't started, then the settings will be applied the next
2101 * time dma_start() is called.
2102 */
2103 if (lp->rx_dma_started) {
2104 u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
2105
2106 /* Don't enable IRQs if they are disabled by NAPI */
2107 if (reg & XAXIDMA_IRQ_ALL_MASK)
2108 cr = lp->rx_dma_cr;
2109 else
2110 cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2111 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
2112 }
2113 spin_unlock_irq(&lp->rx_cr_lock);
2114}
2115
e1d27d29
SA
2116/**
2117 * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
2118 * @lp: Device private data
312e6a58
SG
2119 *
2120 * Return: RX coalescing frame count value for DIM.
e1d27d29
SA
2121 */
2122static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
2123{
2124 return min(1 << (lp->rx_dim.profile_ix << 1), 255);
2125}
2126
2127/**
2128 * axienet_rx_dim_work() - Adjust RX DIM settings
2129 * @work: The work struct
2130 */
2131static void axienet_rx_dim_work(struct work_struct *work)
2132{
2133 struct axienet_local *lp =
2134 container_of(work, struct axienet_local, rx_dim.work);
2135 u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
2136 u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
2137 XAXIDMA_IRQ_ERROR_MASK;
2138
2139 axienet_update_coalesce_rx(lp, cr, mask);
2140 lp->rx_dim.state = DIM_START_MEASURE;
2141}
2142
d048c717
SA
2143/**
2144 * axienet_update_coalesce_tx() - Set TX CR
2145 * @lp: Device private data
2146 * @cr: Value to write to the TX CR
2147 * @mask: Bits to set from @cr
2148 */
2149static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
2150 u32 mask)
2151{
2152 spin_lock_irq(&lp->tx_cr_lock);
2153 lp->tx_dma_cr &= ~mask;
2154 lp->tx_dma_cr |= cr;
2155 /* If DMA isn't started, then the settings will be applied the next
2156 * time dma_start() is called.
2157 */
2158 if (lp->tx_dma_started) {
2159 u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
2160
2161 /* Don't enable IRQs if they are disabled by NAPI */
2162 if (reg & XAXIDMA_IRQ_ALL_MASK)
2163 cr = lp->tx_dma_cr;
2164 else
2165 cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2166 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
2167 }
2168 spin_unlock_irq(&lp->tx_cr_lock);
2169}
2170
8a3b7a25
DB
2171/**
2172 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2173 * @ndev: Pointer to net_device structure
2174 * @ecoalesce: Pointer to ethtool_coalesce structure
f3ccfda1
YM
2175 * @kernel_coal: ethtool CQE mode setting structure
2176 * @extack: extack for reporting error messages
8a3b7a25
DB
2177 *
2178 * This implements ethtool command for getting the DMA interrupt coalescing
2179 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2180 * execute this function.
b0d081c5
MS
2181 *
2182 * Return: 0 always
8a3b7a25 2183 */
f3ccfda1
YM
2184static int
2185axienet_ethtools_get_coalesce(struct net_device *ndev,
2186 struct ethtool_coalesce *ecoalesce,
2187 struct kernel_ethtool_coalesce *kernel_coal,
2188 struct netlink_ext_ack *extack)
8a3b7a25 2189{
8a3b7a25 2190 struct axienet_local *lp = netdev_priv(ndev);
eb80520e 2191 u32 cr;
0b79b8dc 2192
e1d27d29
SA
2193 ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
2194
eb80520e
SA
2195 spin_lock_irq(&lp->rx_cr_lock);
2196 cr = lp->rx_dma_cr;
2197 spin_unlock_irq(&lp->rx_cr_lock);
2198 axienet_coalesce_params(lp, cr,
2199 &ecoalesce->rx_max_coalesced_frames,
2200 &ecoalesce->rx_coalesce_usecs);
2201
2202 spin_lock_irq(&lp->tx_cr_lock);
2203 cr = lp->tx_dma_cr;
2204 spin_unlock_irq(&lp->tx_cr_lock);
2205 axienet_coalesce_params(lp, cr,
2206 &ecoalesce->tx_max_coalesced_frames,
2207 &ecoalesce->tx_coalesce_usecs);
8a3b7a25
DB
2208 return 0;
2209}
2210
2211/**
2212 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2213 * @ndev: Pointer to net_device structure
2214 * @ecoalesce: Pointer to ethtool_coalesce structure
f3ccfda1
YM
2215 * @kernel_coal: ethtool CQE mode setting structure
2216 * @extack: extack for reporting error messages
8a3b7a25
DB
2217 *
2218 * This implements ethtool command for setting the DMA interrupt coalescing
2219 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2220 * prompt to execute this function.
b0d081c5
MS
2221 *
2222 * Return: 0, on success, Non-zero error value on failure.
8a3b7a25 2223 */
f3ccfda1
YM
2224static int
2225axienet_ethtools_set_coalesce(struct net_device *ndev,
2226 struct ethtool_coalesce *ecoalesce,
2227 struct kernel_ethtool_coalesce *kernel_coal,
2228 struct netlink_ext_ack *extack)
8a3b7a25
DB
2229{
2230 struct axienet_local *lp = netdev_priv(ndev);
e1d27d29
SA
2231 bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
2232 bool old_dim = lp->rx_dim_enabled;
2233 u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
8a3b7a25 2234
c17ff476
SA
2235 if (ecoalesce->rx_max_coalesced_frames > 255 ||
2236 ecoalesce->tx_max_coalesced_frames > 255) {
2237 NL_SET_ERR_MSG(extack, "frames must be less than 256");
2238 return -EINVAL;
2239 }
2240
9d301a53
SA
2241 if (!ecoalesce->rx_max_coalesced_frames ||
2242 !ecoalesce->tx_max_coalesced_frames) {
2243 NL_SET_ERR_MSG(extack, "frames must be non-zero");
2244 return -EINVAL;
2245 }
2246
e1d27d29 2247 if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
9d301a53
SA
2248 !ecoalesce->rx_coalesce_usecs) ||
2249 (ecoalesce->tx_max_coalesced_frames > 1 &&
2250 !ecoalesce->tx_coalesce_usecs)) {
2251 NL_SET_ERR_MSG(extack,
2252 "usecs must be non-zero when frames is greater than one");
2253 return -EINVAL;
2254 }
2255
e1d27d29
SA
2256 if (new_dim && !old_dim) {
2257 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
2258 ecoalesce->rx_coalesce_usecs);
2259 } else if (!new_dim) {
2260 if (old_dim) {
2261 WRITE_ONCE(lp->rx_dim_enabled, false);
2262 napi_synchronize(&lp->napi_rx);
2263 flush_work(&lp->rx_dim.work);
2264 }
2265
2266 cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
2267 ecoalesce->rx_coalesce_usecs);
2268 } else {
2269 /* Dummy value for count just to calculate timer */
2270 cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
2271 mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
2272 }
2273
2274 axienet_update_coalesce_rx(lp, cr, mask);
2275 if (new_dim && !old_dim)
2276 WRITE_ONCE(lp->rx_dim_enabled, true);
d048c717 2277
eb80520e
SA
2278 cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
2279 ecoalesce->tx_coalesce_usecs);
d048c717 2280 axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
8a3b7a25
DB
2281 return 0;
2282}
2283
f5203a3d
RH
2284static int
2285axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2286 struct ethtool_link_ksettings *cmd)
2287{
2288 struct axienet_local *lp = netdev_priv(ndev);
2289
2290 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2291}
2292
2293static int
2294axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2295 const struct ethtool_link_ksettings *cmd)
2296{
2297 struct axienet_local *lp = netdev_priv(ndev);
2298
2299 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2300}
2301
66b51663
RH
2302static int axienet_ethtools_nway_reset(struct net_device *dev)
2303{
2304 struct axienet_local *lp = netdev_priv(dev);
2305
2306 return phylink_ethtool_nway_reset(lp->phylink);
2307}
2308
76abb5d6
SA
2309static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2310 struct ethtool_stats *stats,
2311 u64 *data)
2312{
2313 struct axienet_local *lp = netdev_priv(dev);
2314 unsigned int start;
2315
2316 do {
2317 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2318 data[0] = axienet_stat(lp, STAT_RX_BYTES);
2319 data[1] = axienet_stat(lp, STAT_TX_BYTES);
2320 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2321 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2322 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2323 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2324 data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2325 data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2326 data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2327 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2328}
2329
2330static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2331 "Received bytes",
2332 "Transmitted bytes",
2333 "RX Good VLAN Tagged Frames",
2334 "TX Good VLAN Tagged Frames",
2335 "TX Good PFC Frames",
2336 "RX Good PFC Frames",
2337 "User Defined Counter 0",
2338 "User Defined Counter 1",
2339 "User Defined Counter 2",
2340};
2341
2342static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2343{
2344 switch (stringset) {
2345 case ETH_SS_STATS:
2346 memcpy(data, axienet_ethtool_stats_strings,
2347 sizeof(axienet_ethtool_stats_strings));
2348 break;
2349 }
2350}
2351
2352static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2353{
2354 struct axienet_local *lp = netdev_priv(dev);
2355
2356 switch (sset) {
2357 case ETH_SS_STATS:
2358 if (lp->features & XAE_FEATURE_STATS)
2359 return ARRAY_SIZE(axienet_ethtool_stats_strings);
2360 fallthrough;
2361 default:
2362 return -EOPNOTSUPP;
2363 }
2364}
2365
2366static void
2367axienet_ethtools_get_pause_stats(struct net_device *dev,
2368 struct ethtool_pause_stats *pause_stats)
2369{
2370 struct axienet_local *lp = netdev_priv(dev);
2371 unsigned int start;
2372
2373 if (!(lp->features & XAE_FEATURE_STATS))
2374 return;
2375
2376 do {
2377 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2378 pause_stats->tx_pause_frames =
2379 axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2380 pause_stats->rx_pause_frames =
2381 axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2382 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2383}
2384
2385static void
2386axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2387 struct ethtool_eth_mac_stats *mac_stats)
2388{
2389 struct axienet_local *lp = netdev_priv(dev);
2390 unsigned int start;
2391
2392 if (!(lp->features & XAE_FEATURE_STATS))
2393 return;
2394
2395 do {
2396 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2397 mac_stats->FramesTransmittedOK =
2398 axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2399 mac_stats->SingleCollisionFrames =
2400 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2401 mac_stats->MultipleCollisionFrames =
2402 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2403 mac_stats->FramesReceivedOK =
2404 axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2405 mac_stats->FrameCheckSequenceErrors =
2406 axienet_stat(lp, STAT_RX_FCS_ERRORS);
2407 mac_stats->AlignmentErrors =
2408 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2409 mac_stats->FramesWithDeferredXmissions =
2410 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2411 mac_stats->LateCollisions =
2412 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2413 mac_stats->FramesAbortedDueToXSColls =
2414 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2415 mac_stats->MulticastFramesXmittedOK =
2416 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2417 mac_stats->BroadcastFramesXmittedOK =
2418 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2419 mac_stats->FramesWithExcessiveDeferral =
2420 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2421 mac_stats->MulticastFramesReceivedOK =
2422 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2423 mac_stats->BroadcastFramesReceivedOK =
2424 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2425 mac_stats->InRangeLengthErrors =
2426 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2427 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2428}
2429
2430static void
2431axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2432 struct ethtool_eth_ctrl_stats *ctrl_stats)
2433{
2434 struct axienet_local *lp = netdev_priv(dev);
2435 unsigned int start;
2436
2437 if (!(lp->features & XAE_FEATURE_STATS))
2438 return;
2439
2440 do {
2441 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2442 ctrl_stats->MACControlFramesTransmitted =
2443 axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2444 ctrl_stats->MACControlFramesReceived =
2445 axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2446 ctrl_stats->UnsupportedOpcodesReceived =
2447 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2448 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2449}
2450
2451static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2452 { 64, 64 },
2453 { 65, 127 },
2454 { 128, 255 },
2455 { 256, 511 },
2456 { 512, 1023 },
2457 { 1024, 1518 },
2458 { 1519, 16384 },
2459 { },
2460};
2461
2462static void
2463axienet_ethtool_get_rmon_stats(struct net_device *dev,
2464 struct ethtool_rmon_stats *rmon_stats,
2465 const struct ethtool_rmon_hist_range **ranges)
2466{
2467 struct axienet_local *lp = netdev_priv(dev);
2468 unsigned int start;
2469
2470 if (!(lp->features & XAE_FEATURE_STATS))
2471 return;
2472
2473 do {
2474 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2475 rmon_stats->undersize_pkts =
2476 axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2477 rmon_stats->oversize_pkts =
2478 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2479 rmon_stats->fragments =
2480 axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2481
2482 rmon_stats->hist[0] =
2483 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2484 rmon_stats->hist[1] =
2485 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2486 rmon_stats->hist[2] =
2487 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2488 rmon_stats->hist[3] =
2489 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2490 rmon_stats->hist[4] =
2491 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2492 rmon_stats->hist[5] =
2493 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2494 rmon_stats->hist[6] =
2495 rmon_stats->oversize_pkts;
2496
2497 rmon_stats->hist_tx[0] =
2498 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2499 rmon_stats->hist_tx[1] =
2500 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2501 rmon_stats->hist_tx[2] =
2502 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2503 rmon_stats->hist_tx[3] =
2504 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2505 rmon_stats->hist_tx[4] =
2506 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2507 rmon_stats->hist_tx[5] =
2508 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2509 rmon_stats->hist_tx[6] =
2510 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2511 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2512
2513 *ranges = axienet_rmon_ranges;
2514}
2515
c7735f1b 2516static const struct ethtool_ops axienet_ethtool_ops = {
0b79b8dc 2517 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
e1d27d29
SA
2518 ETHTOOL_COALESCE_USECS |
2519 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
8a3b7a25
DB
2520 .get_drvinfo = axienet_ethtools_get_drvinfo,
2521 .get_regs_len = axienet_ethtools_get_regs_len,
2522 .get_regs = axienet_ethtools_get_regs,
2523 .get_link = ethtool_op_get_link,
8b09ca82
RH
2524 .get_ringparam = axienet_ethtools_get_ringparam,
2525 .set_ringparam = axienet_ethtools_set_ringparam,
8a3b7a25
DB
2526 .get_pauseparam = axienet_ethtools_get_pauseparam,
2527 .set_pauseparam = axienet_ethtools_set_pauseparam,
2528 .get_coalesce = axienet_ethtools_get_coalesce,
2529 .set_coalesce = axienet_ethtools_set_coalesce,
f5203a3d
RH
2530 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
2531 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
66b51663 2532 .nway_reset = axienet_ethtools_nway_reset,
76abb5d6
SA
2533 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2534 .get_strings = axienet_ethtools_get_strings,
2535 .get_sset_count = axienet_ethtools_get_sset_count,
2536 .get_pause_stats = axienet_ethtools_get_pause_stats,
2537 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2538 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2539 .get_rmon_stats = axienet_ethtool_get_rmon_stats,
f5203a3d
RH
2540};
2541
7a86be6a 2542static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
f5203a3d 2543{
7a86be6a
RKO
2544 return container_of(pcs, struct axienet_local, pcs);
2545}
f5203a3d 2546
7a86be6a 2547static void axienet_pcs_get_state(struct phylink_pcs *pcs,
c6739623 2548 unsigned int neg_mode,
7a86be6a
RKO
2549 struct phylink_link_state *state)
2550{
2551 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2552
7e3cb4e8 2553 phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
f5203a3d
RH
2554}
2555
7a86be6a 2556static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
f5203a3d 2557{
7a86be6a 2558 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1a025560 2559
7a86be6a 2560 phylink_mii_c22_pcs_an_restart(pcs_phy);
f5203a3d
RH
2561}
2562
febf2aaf 2563static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
7a86be6a
RKO
2564 phy_interface_t interface,
2565 const unsigned long *advertising,
2566 bool permit_pause_to_mac)
6c8f06bb 2567{
7a86be6a
RKO
2568 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2569 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
6c8f06bb
RH
2570 struct axienet_local *lp = netdev_priv(ndev);
2571 int ret;
2572
7a86be6a 2573 if (lp->switch_x_sgmii) {
03854d8a 2574 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
7a86be6a 2575 interface == PHY_INTERFACE_MODE_SGMII ?
6c8f06bb 2576 XLNX_MII_STD_SELECT_SGMII : 0);
7a86be6a
RKO
2577 if (ret < 0) {
2578 netdev_warn(ndev,
2579 "Failed to switch PHY interface: %d\n",
6c8f06bb 2580 ret);
7a86be6a
RKO
2581 return ret;
2582 }
6c8f06bb 2583 }
7a86be6a 2584
febf2aaf
RKO
2585 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2586 neg_mode);
7a86be6a
RKO
2587 if (ret < 0)
2588 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2589
2590 return ret;
6c8f06bb
RH
2591}
2592
7a86be6a
RKO
2593static const struct phylink_pcs_ops axienet_pcs_ops = {
2594 .pcs_get_state = axienet_pcs_get_state,
2595 .pcs_config = axienet_pcs_config,
2596 .pcs_an_restart = axienet_pcs_an_restart,
2597};
2598
2599static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2600 phy_interface_t interface)
95347842 2601{
1a025560
RH
2602 struct net_device *ndev = to_net_dev(config->dev);
2603 struct axienet_local *lp = netdev_priv(ndev);
1a025560 2604
7a86be6a
RKO
2605 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2606 interface == PHY_INTERFACE_MODE_SGMII)
2607 return &lp->pcs;
1a025560 2608
7a86be6a
RKO
2609 return NULL;
2610}
2611
2612static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2613 const struct phylink_link_state *state)
2614{
2615 /* nothing meaningful to do */
95347842
RK
2616}
2617
2618static void axienet_mac_link_down(struct phylink_config *config,
2619 unsigned int mode,
2620 phy_interface_t interface)
2621{
2622 /* nothing meaningful to do */
2623}
2624
2625static void axienet_mac_link_up(struct phylink_config *config,
2626 struct phy_device *phy,
2627 unsigned int mode, phy_interface_t interface,
2628 int speed, int duplex,
2629 bool tx_pause, bool rx_pause)
f5203a3d
RH
2630{
2631 struct net_device *ndev = to_net_dev(config->dev);
2632 struct axienet_local *lp = netdev_priv(ndev);
2633 u32 emmc_reg, fcc_reg;
2634
2635 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2636 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2637
95347842 2638 switch (speed) {
f5203a3d
RH
2639 case SPEED_1000:
2640 emmc_reg |= XAE_EMMC_LINKSPD_1000;
2641 break;
2642 case SPEED_100:
2643 emmc_reg |= XAE_EMMC_LINKSPD_100;
2644 break;
2645 case SPEED_10:
2646 emmc_reg |= XAE_EMMC_LINKSPD_10;
2647 break;
2648 default:
2649 dev_err(&ndev->dev,
2650 "Speed other than 10, 100 or 1Gbps is not supported\n");
2651 break;
2652 }
2653
2654 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2655
2656 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
95347842 2657 if (tx_pause)
f5203a3d
RH
2658 fcc_reg |= XAE_FCC_FCTX_MASK;
2659 else
2660 fcc_reg &= ~XAE_FCC_FCTX_MASK;
95347842 2661 if (rx_pause)
f5203a3d
RH
2662 fcc_reg |= XAE_FCC_FCRX_MASK;
2663 else
2664 fcc_reg &= ~XAE_FCC_FCRX_MASK;
2665 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2666}
2667
f5203a3d 2668static const struct phylink_mac_ops axienet_phylink_ops = {
7a86be6a 2669 .mac_select_pcs = axienet_mac_select_pcs,
f5203a3d
RH
2670 .mac_config = axienet_mac_config,
2671 .mac_link_down = axienet_mac_link_down,
2672 .mac_link_up = axienet_mac_link_up,
8a3b7a25
DB
2673};
2674
2675/**
24201a64
AP
2676 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2677 * @work: pointer to work_struct
8a3b7a25
DB
2678 *
2679 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2680 * Tx/Rx BDs.
2681 */
24201a64 2682static void axienet_dma_err_handler(struct work_struct *work)
8a3b7a25 2683{
84b9ccc0 2684 u32 i;
8a3b7a25 2685 u32 axienet_status;
84b9ccc0 2686 struct axidma_bd *cur_p;
24201a64
AP
2687 struct axienet_local *lp = container_of(work, struct axienet_local,
2688 dma_err_task);
8a3b7a25 2689 struct net_device *ndev = lp->ndev;
8a3b7a25 2690
858430db
SA
2691 /* Don't bother if we are going to stop anyway */
2692 if (READ_ONCE(lp->stopping))
2693 return;
2694
9e2bc267
RH
2695 napi_disable(&lp->napi_tx);
2696 napi_disable(&lp->napi_rx);
cc37610c 2697
8a3b7a25
DB
2698 axienet_setoptions(ndev, lp->options &
2699 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
84b9ccc0
RH
2700
2701 axienet_dma_stop(lp);
c900e49d 2702 netdev_reset_queue(ndev);
8a3b7a25 2703
8b09ca82 2704 for (i = 0; i < lp->tx_bd_num; i++) {
8a3b7a25 2705 cur_p = &lp->tx_bd_v[i];
4e958f33
AP
2706 if (cur_p->cntrl) {
2707 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2708
17882fd4 2709 dma_unmap_single(lp->dev, addr,
8a3b7a25
DB
2710 (cur_p->cntrl &
2711 XAXIDMA_BD_CTRL_LENGTH_MASK),
2712 DMA_TO_DEVICE);
4e958f33 2713 }
23e6b2dc
RH
2714 if (cur_p->skb)
2715 dev_kfree_skb_irq(cur_p->skb);
8a3b7a25 2716 cur_p->phys = 0;
4e958f33 2717 cur_p->phys_msb = 0;
8a3b7a25
DB
2718 cur_p->cntrl = 0;
2719 cur_p->status = 0;
2720 cur_p->app0 = 0;
2721 cur_p->app1 = 0;
2722 cur_p->app2 = 0;
2723 cur_p->app3 = 0;
2724 cur_p->app4 = 0;
23e6b2dc 2725 cur_p->skb = NULL;
8a3b7a25
DB
2726 }
2727
8b09ca82 2728 for (i = 0; i < lp->rx_bd_num; i++) {
8a3b7a25
DB
2729 cur_p = &lp->rx_bd_v[i];
2730 cur_p->status = 0;
2731 cur_p->app0 = 0;
2732 cur_p->app1 = 0;
2733 cur_p->app2 = 0;
2734 cur_p->app3 = 0;
2735 cur_p->app4 = 0;
2736 }
2737
2738 lp->tx_bd_ci = 0;
2739 lp->tx_bd_tail = 0;
2740 lp->rx_bd_ci = 0;
2741
84b9ccc0 2742 axienet_dma_start(lp);
8a3b7a25
DB
2743
2744 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2745 axienet_status &= ~XAE_RCW1_RX_MASK;
2746 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2747
2748 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2749 if (axienet_status & XAE_INT_RXRJECT_MASK)
2750 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
522856ce
RH
2751 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2752 XAE_INT_RECV_ERROR_MASK : 0);
8a3b7a25
DB
2753 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2754
2755 /* Sync default options with HW but leave receiver and
850a7503
MS
2756 * transmitter disabled.
2757 */
8a3b7a25
DB
2758 axienet_setoptions(ndev, lp->options &
2759 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2760 axienet_set_mac_address(ndev, NULL);
2761 axienet_set_multicast_list(ndev);
9e2bc267
RH
2762 napi_enable(&lp->napi_rx);
2763 napi_enable(&lp->napi_tx);
799a8295 2764 axienet_setoptions(ndev, lp->options);
8a3b7a25
DB
2765}
2766
2767/**
2be58620 2768 * axienet_probe - Axi Ethernet probe function.
95219aa5 2769 * @pdev: Pointer to platform device structure.
8a3b7a25 2770 *
b0d081c5 2771 * Return: 0, on success
8a3b7a25
DB
2772 * Non-zero error value on failure.
2773 *
2774 * This is the probe routine for Axi Ethernet driver. This is called before
2775 * any other driver routines are invoked. It allocates and sets up the Ethernet
2776 * device. Parses through device tree and populates fields of
2777 * axienet_local. It registers the Ethernet device.
2778 */
2be58620 2779static int axienet_probe(struct platform_device *pdev)
8a3b7a25 2780{
8495659b 2781 int ret;
8a3b7a25
DB
2782 struct device_node *np;
2783 struct axienet_local *lp;
2784 struct net_device *ndev;
28ef9ebd 2785 struct resource *ethres;
83216e39 2786 u8 mac_addr[ETH_ALEN];
5fff0151 2787 int addr_width = 32;
8495659b 2788 u32 value;
8a3b7a25
DB
2789
2790 ndev = alloc_etherdev(sizeof(*lp));
41de8d4c 2791 if (!ndev)
8a3b7a25 2792 return -ENOMEM;
8a3b7a25 2793
95219aa5 2794 platform_set_drvdata(pdev, ndev);
8a3b7a25 2795
95219aa5 2796 SET_NETDEV_DEV(ndev, &pdev->dev);
28e24c62 2797 ndev->features = NETIF_F_SG;
8a3b7a25
DB
2798 ndev->ethtool_ops = &axienet_ethtool_ops;
2799
d894be57
JW
2800 /* MTU range: 64 - 9000 */
2801 ndev->min_mtu = 64;
2802 ndev->max_mtu = XAE_JUMBO_MTU;
2803
8a3b7a25
DB
2804 lp = netdev_priv(ndev);
2805 lp->ndev = ndev;
95219aa5 2806 lp->dev = &pdev->dev;
8a3b7a25 2807 lp->options = XAE_OPTION_DEFAULTS;
8b09ca82
RH
2808 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2809 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
57baf8cc 2810
cb45a8bf
RH
2811 u64_stats_init(&lp->rx_stat_sync);
2812 u64_stats_init(&lp->tx_stat_sync);
2813
76abb5d6
SA
2814 mutex_init(&lp->stats_lock);
2815 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2816 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2817
b11bfb9a
RH
2818 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2819 if (!lp->axi_clk) {
2820 /* For backward compatibility, if named AXI clock is not present,
2821 * treat the first clock specified as the AXI clock.
2822 */
2823 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2824 }
2825 if (IS_ERR(lp->axi_clk)) {
2826 ret = PTR_ERR(lp->axi_clk);
57baf8cc
RH
2827 goto free_netdev;
2828 }
b11bfb9a 2829 ret = clk_prepare_enable(lp->axi_clk);
57baf8cc 2830 if (ret) {
b11bfb9a 2831 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
57baf8cc
RH
2832 goto free_netdev;
2833 }
2834
b11bfb9a
RH
2835 lp->misc_clks[0].id = "axis_clk";
2836 lp->misc_clks[1].id = "ref_clk";
2837 lp->misc_clks[2].id = "mgt_clk";
2838
2839 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2840 if (ret)
2841 goto cleanup_clk;
2842
2843 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2844 if (ret)
2845 goto cleanup_clk;
2846
8a3b7a25 2847 /* Map device registers */
47651c51 2848 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
fcc028c1 2849 if (IS_ERR(lp->regs)) {
fcc028c1 2850 ret = PTR_ERR(lp->regs);
59cd4f19 2851 goto cleanup_clk;
8a3b7a25 2852 }
7fa0043d 2853 lp->regs_start = ethres->start;
46aa27df 2854
8a3b7a25
DB
2855 /* Setup checksum offload, but default to off if not specified */
2856 lp->features = 0;
2857
76abb5d6
SA
2858 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2859 lp->features |= XAE_FEATURE_STATS;
2860
8495659b
ST
2861 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2862 if (!ret) {
2863 switch (value) {
8a3b7a25 2864 case 1:
8a3b7a25 2865 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
dd28f4c0
SA
2866 /* Can checksum any contiguous range */
2867 ndev->features |= NETIF_F_HW_CSUM;
8a3b7a25
DB
2868 break;
2869 case 2:
8a3b7a25
DB
2870 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2871 /* Can checksum TCP/UDP over IPv4. */
2872 ndev->features |= NETIF_F_IP_CSUM;
2873 break;
8a3b7a25
DB
2874 }
2875 }
8495659b
ST
2876 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2877 if (!ret) {
2878 switch (value) {
8a3b7a25 2879 case 1:
8a3b7a25 2880 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
06c069ff 2881 ndev->features |= NETIF_F_RXCSUM;
8a3b7a25
DB
2882 break;
2883 case 2:
8a3b7a25 2884 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
06c069ff 2885 ndev->features |= NETIF_F_RXCSUM;
8a3b7a25 2886 break;
8a3b7a25
DB
2887 }
2888 }
2889 /* For supporting jumbo frames, the Axi Ethernet hardware must have
f080a8c3
ST
2890 * a larger Rx/Tx Memory. Typically, the size must be large so that
2891 * we can enable jumbo option and start supporting jumbo frames.
2892 * Here we check for memory allocated for Rx/Tx in the hardware from
2893 * the device-tree and accordingly set flags.
2894 */
8495659b 2895 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
ee06b172 2896
6c8f06bb
RH
2897 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2898 "xlnx,switch-x-sgmii");
2899
ee06b172
A
2900 /* Start with the proprietary, and broken phy_type */
2901 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2902 if (!ret) {
2903 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2904 switch (value) {
2905 case XAE_PHY_TYPE_MII:
2906 lp->phy_mode = PHY_INTERFACE_MODE_MII;
2907 break;
2908 case XAE_PHY_TYPE_GMII:
2909 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2910 break;
2911 case XAE_PHY_TYPE_RGMII_2_0:
2912 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2913 break;
2914 case XAE_PHY_TYPE_SGMII:
2915 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2916 break;
2917 case XAE_PHY_TYPE_1000BASE_X:
2918 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2919 break;
2920 default:
2921 ret = -EINVAL;
59cd4f19 2922 goto cleanup_clk;
ee06b172
A
2923 }
2924 } else {
0c65b2b9
AL
2925 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2926 if (ret)
59cd4f19 2927 goto cleanup_clk;
ee06b172 2928 }
6c8f06bb
RH
2929 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2930 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2931 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2932 ret = -EINVAL;
59cd4f19 2933 goto cleanup_clk;
6c8f06bb 2934 }
8a3b7a25 2935
5fe164fb 2936 if (!of_property_present(pdev->dev.of_node, "dmas")) {
6b1b40f7
SBNG
2937 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2938 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
28ef9ebd 2939
6b1b40f7
SBNG
2940 if (np) {
2941 struct resource dmares;
2942
2943 ret = of_address_to_resource(np, 0, &dmares);
2944 if (ret) {
2945 dev_err(&pdev->dev,
2946 "unable to get DMA resource\n");
2947 of_node_put(np);
2948 goto cleanup_clk;
2949 }
2950 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2951 &dmares);
2952 lp->rx_irq = irq_of_parse_and_map(np, 1);
2953 lp->tx_irq = irq_of_parse_and_map(np, 0);
28ef9ebd 2954 of_node_put(np);
6b1b40f7
SBNG
2955 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2956 } else {
2957 /* Check for these resources directly on the Ethernet node. */
2958 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2959 lp->rx_irq = platform_get_irq(pdev, 1);
2960 lp->tx_irq = platform_get_irq(pdev, 0);
2961 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2962 }
2963 if (IS_ERR(lp->dma_regs)) {
2964 dev_err(&pdev->dev, "could not map DMA regs\n");
2965 ret = PTR_ERR(lp->dma_regs);
2966 goto cleanup_clk;
2967 }
2968 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2969 dev_err(&pdev->dev, "could not determine irqs\n");
2970 ret = -ENOMEM;
59cd4f19 2971 goto cleanup_clk;
28ef9ebd 2972 }
8a3b7a25 2973
6b1b40f7
SBNG
2974 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2975 ret = __axienet_device_reset(lp);
2976 if (ret)
2977 goto cleanup_clk;
2978
2979 /* Autodetect the need for 64-bit DMA pointers.
2980 * When the IP is configured for a bus width bigger than 32 bits,
2981 * writing the MSB registers is mandatory, even if they are all 0.
2982 * We can detect this case by writing all 1's to one such register
2983 * and see if that sticks: when the IP is configured for 32 bits
2984 * only, those registers are RES0.
2985 * Those MSB registers were introduced in IP v7.1, which we check first.
2986 */
2987 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2988 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
f1bc9fc4 2989
f735c40e 2990 iowrite32(0x0, desc);
6b1b40f7
SBNG
2991 if (ioread32(desc) == 0) { /* sanity check */
2992 iowrite32(0xffffffff, desc);
2993 if (ioread32(desc) > 0) {
2994 lp->features |= XAE_FEATURE_DMA_64BIT;
2995 addr_width = 64;
2996 dev_info(&pdev->dev,
2997 "autodetected 64-bit DMA range\n");
2998 }
2999 iowrite32(0x0, desc);
3000 }
3001 }
3002 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
61fde511 3003 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
6b1b40f7
SBNG
3004 ret = -EINVAL;
3005 goto cleanup_clk;
f735c40e 3006 }
f735c40e 3007
6b1b40f7
SBNG
3008 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
3009 if (ret) {
3010 dev_err(&pdev->dev, "No suitable DMA available\n");
3011 goto cleanup_clk;
3012 }
3013 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
3014 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
6a91b846
RSP
3015 } else {
3016 struct xilinx_vdma_config cfg;
3017 struct dma_chan *tx_chan;
3018
3019 lp->eth_irq = platform_get_irq_optional(pdev, 0);
3020 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
3021 ret = lp->eth_irq;
3022 goto cleanup_clk;
3023 }
3024 tx_chan = dma_request_chan(lp->dev, "tx_chan0");
3025 if (IS_ERR(tx_chan)) {
3026 ret = PTR_ERR(tx_chan);
3027 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
3028 goto cleanup_clk;
3029 }
3030
3031 cfg.reset = 1;
3032 /* As name says VDMA but it has support for DMA channel reset */
3033 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
3034 if (ret < 0) {
3035 dev_err(&pdev->dev, "Reset channel failed\n");
3036 dma_release_channel(tx_chan);
3037 goto cleanup_clk;
3038 }
3039
3040 dma_release_channel(tx_chan);
3041 lp->use_dmaengine = 1;
5fff0151
AP
3042 }
3043
6a91b846
RSP
3044 if (lp->use_dmaengine)
3045 ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
3046 else
3047 ndev->netdev_ops = &axienet_netdev_ops;
522856ce
RH
3048 /* Check for Ethernet core IRQ (optional) */
3049 if (lp->eth_irq <= 0)
3050 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
3051
8a3b7a25 3052 /* Retrieve the MAC address */
83216e39
MW
3053 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
3054 if (!ret) {
3055 axienet_set_mac_address(ndev, mac_addr);
3056 } else {
3057 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
3058 ret);
3059 axienet_set_mac_address(ndev, NULL);
8a3b7a25 3060 }
8a3b7a25 3061
d048c717
SA
3062 spin_lock_init(&lp->rx_cr_lock);
3063 spin_lock_init(&lp->tx_cr_lock);
e1d27d29
SA
3064 INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
3065 lp->rx_dim_enabled = true;
3066 lp->rx_dim.profile_ix = 1;
3067 lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
eb80520e
SA
3068 XAXIDMA_DFT_RX_USEC);
3069 lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
3070 XAXIDMA_DFT_TX_USEC);
8a3b7a25 3071
d1c4f93e
AC
3072 ret = axienet_mdio_setup(lp);
3073 if (ret)
3074 dev_warn(&pdev->dev,
3075 "error registering MDIO bus: %d\n", ret);
3076
1a025560
RH
3077 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
3078 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
19c7a439 3079 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
ab3a5d4c 3080 if (!np) {
19c7a439
AC
3081 /* Deprecated: Always use "pcs-handle" for pcs_phy.
3082 * Falling back to "phy-handle" here is only for
3083 * backward compatibility with old device trees.
3084 */
3085 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
3086 }
3087 if (!np) {
3088 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
1a025560 3089 ret = -EINVAL;
59cd4f19 3090 goto cleanup_mdio;
1a025560 3091 }
ab3a5d4c 3092 lp->pcs_phy = of_mdio_find_device(np);
1a025560
RH
3093 if (!lp->pcs_phy) {
3094 ret = -EPROBE_DEFER;
ab3a5d4c 3095 of_node_put(np);
59cd4f19 3096 goto cleanup_mdio;
1a025560 3097 }
ab3a5d4c 3098 of_node_put(np);
7a86be6a
RKO
3099 lp->pcs.ops = &axienet_pcs_ops;
3100 lp->pcs.poll = true;
1a025560 3101 }
8a3b7a25 3102
f5203a3d
RH
3103 lp->phylink_config.dev = &ndev->dev;
3104 lp->phylink_config.type = PHYLINK_NETDEV;
a3702953 3105 lp->phylink_config.mac_managed_pm = true;
72a47e1a
RKO
3106 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
3107 MAC_10FD | MAC_100FD | MAC_1000FD;
f5203a3d 3108
136a3fa2
RKO
3109 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
3110 if (lp->switch_x_sgmii) {
3111 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
3112 lp->phylink_config.supported_interfaces);
3113 __set_bit(PHY_INTERFACE_MODE_SGMII,
3114 lp->phylink_config.supported_interfaces);
3115 }
3116
f5203a3d
RH
3117 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
3118 lp->phy_mode,
3119 &axienet_phylink_ops);
3120 if (IS_ERR(lp->phylink)) {
3121 ret = PTR_ERR(lp->phylink);
3122 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
59cd4f19 3123 goto cleanup_mdio;
f5203a3d
RH
3124 }
3125
8a3b7a25
DB
3126 ret = register_netdev(lp->ndev);
3127 if (ret) {
3128 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
59cd4f19 3129 goto cleanup_phylink;
8a3b7a25
DB
3130 }
3131
8a3b7a25
DB
3132 return 0;
3133
59cd4f19
RH
3134cleanup_phylink:
3135 phylink_destroy(lp->phylink);
3136
3137cleanup_mdio:
3138 if (lp->pcs_phy)
3139 put_device(&lp->pcs_phy->dev);
3140 if (lp->mii_bus)
3141 axienet_mdio_teardown(lp);
59cd4f19 3142cleanup_clk:
b11bfb9a
RH
3143 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3144 clk_disable_unprepare(lp->axi_clk);
59cd4f19 3145
46aa27df 3146free_netdev:
8a3b7a25 3147 free_netdev(ndev);
46aa27df 3148
8a3b7a25
DB
3149 return ret;
3150}
3151
2e0ec0af 3152static void axienet_remove(struct platform_device *pdev)
8a3b7a25 3153{
95219aa5 3154 struct net_device *ndev = platform_get_drvdata(pdev);
8a3b7a25
DB
3155 struct axienet_local *lp = netdev_priv(ndev);
3156
8a3b7a25 3157 unregister_netdev(ndev);
f5203a3d
RH
3158
3159 if (lp->phylink)
3160 phylink_destroy(lp->phylink);
3161
1a025560
RH
3162 if (lp->pcs_phy)
3163 put_device(&lp->pcs_phy->dev);
3164
e7a3d116 3165 axienet_mdio_teardown(lp);
8a3b7a25 3166
b11bfb9a
RH
3167 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3168 clk_disable_unprepare(lp->axi_clk);
09a0354c 3169
8a3b7a25 3170 free_netdev(ndev);
8a3b7a25
DB
3171}
3172
70c50265
RH
3173static void axienet_shutdown(struct platform_device *pdev)
3174{
3175 struct net_device *ndev = platform_get_drvdata(pdev);
3176
3177 rtnl_lock();
3178 netif_device_detach(ndev);
3179
3180 if (netif_running(ndev))
3181 dev_close(ndev);
3182
3183 rtnl_unlock();
3184}
3185
a3de357b
AC
3186static int axienet_suspend(struct device *dev)
3187{
3188 struct net_device *ndev = dev_get_drvdata(dev);
3189
3190 if (!netif_running(ndev))
3191 return 0;
3192
3193 netif_device_detach(ndev);
3194
3195 rtnl_lock();
3196 axienet_stop(ndev);
3197 rtnl_unlock();
3198
3199 return 0;
3200}
3201
3202static int axienet_resume(struct device *dev)
3203{
3204 struct net_device *ndev = dev_get_drvdata(dev);
3205
3206 if (!netif_running(ndev))
3207 return 0;
3208
3209 rtnl_lock();
3210 axienet_open(ndev);
3211 rtnl_unlock();
3212
3213 netif_device_attach(ndev);
3214
3215 return 0;
3216}
3217
3218static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3219 axienet_suspend, axienet_resume);
3220
2be58620
ST
3221static struct platform_driver axienet_driver = {
3222 .probe = axienet_probe,
e96321fa 3223 .remove = axienet_remove,
70c50265 3224 .shutdown = axienet_shutdown,
8a3b7a25 3225 .driver = {
8a3b7a25 3226 .name = "xilinx_axienet",
a3de357b 3227 .pm = &axienet_pm_ops,
8a3b7a25
DB
3228 .of_match_table = axienet_of_match,
3229 },
3230};
3231
2be58620 3232module_platform_driver(axienet_driver);
8a3b7a25
DB
3233
3234MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3235MODULE_AUTHOR("Xilinx");
3236MODULE_LICENSE("GPL");