]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/amd/xgbe/xgbe-drv.c
gpu: host1x: Use SMMU on Tegra124 and Tegra210
[thirdparty/linux.git] / drivers / net / ethernet / amd / xgbe / xgbe-drv.c
1 /*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117 #include <linux/module.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <linux/interrupt.h>
122 #include <linux/clk.h>
123 #include <linux/if_ether.h>
124 #include <linux/net_tstamp.h>
125 #include <linux/phy.h>
126 #include <net/vxlan.h>
127
128 #include "xgbe.h"
129 #include "xgbe-common.h"
130
131 static unsigned int ecc_sec_info_threshold = 10;
132 static unsigned int ecc_sec_warn_threshold = 10000;
133 static unsigned int ecc_sec_period = 600;
134 static unsigned int ecc_ded_threshold = 2;
135 static unsigned int ecc_ded_period = 600;
136
137 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
138 /* Only expose the ECC parameters if supported */
139 module_param(ecc_sec_info_threshold, uint, 0644);
140 MODULE_PARM_DESC(ecc_sec_info_threshold,
141 " ECC corrected error informational threshold setting");
142
143 module_param(ecc_sec_warn_threshold, uint, 0644);
144 MODULE_PARM_DESC(ecc_sec_warn_threshold,
145 " ECC corrected error warning threshold setting");
146
147 module_param(ecc_sec_period, uint, 0644);
148 MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
149
150 module_param(ecc_ded_threshold, uint, 0644);
151 MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
152
153 module_param(ecc_ded_period, uint, 0644);
154 MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
155 #endif
156
157 static int xgbe_one_poll(struct napi_struct *, int);
158 static int xgbe_all_poll(struct napi_struct *, int);
159 static void xgbe_stop(struct xgbe_prv_data *);
160
161 static void *xgbe_alloc_node(size_t size, int node)
162 {
163 void *mem;
164
165 mem = kzalloc_node(size, GFP_KERNEL, node);
166 if (!mem)
167 mem = kzalloc(size, GFP_KERNEL);
168
169 return mem;
170 }
171
172 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
173 {
174 unsigned int i;
175
176 for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
177 if (!pdata->channel[i])
178 continue;
179
180 kfree(pdata->channel[i]->rx_ring);
181 kfree(pdata->channel[i]->tx_ring);
182 kfree(pdata->channel[i]);
183
184 pdata->channel[i] = NULL;
185 }
186
187 pdata->channel_count = 0;
188 }
189
190 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
191 {
192 struct xgbe_channel *channel;
193 struct xgbe_ring *ring;
194 unsigned int count, i;
195 unsigned int cpu;
196 int node;
197
198 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
199 for (i = 0; i < count; i++) {
200 /* Attempt to use a CPU on the node the device is on */
201 cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
202
203 /* Set the allocation node based on the returned CPU */
204 node = cpu_to_node(cpu);
205
206 channel = xgbe_alloc_node(sizeof(*channel), node);
207 if (!channel)
208 goto err_mem;
209 pdata->channel[i] = channel;
210
211 snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
212 channel->pdata = pdata;
213 channel->queue_index = i;
214 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
215 (DMA_CH_INC * i);
216 channel->node = node;
217 cpumask_set_cpu(cpu, &channel->affinity_mask);
218
219 if (pdata->per_channel_irq)
220 channel->dma_irq = pdata->channel_irq[i];
221
222 if (i < pdata->tx_ring_count) {
223 ring = xgbe_alloc_node(sizeof(*ring), node);
224 if (!ring)
225 goto err_mem;
226
227 spin_lock_init(&ring->lock);
228 ring->node = node;
229
230 channel->tx_ring = ring;
231 }
232
233 if (i < pdata->rx_ring_count) {
234 ring = xgbe_alloc_node(sizeof(*ring), node);
235 if (!ring)
236 goto err_mem;
237
238 spin_lock_init(&ring->lock);
239 ring->node = node;
240
241 channel->rx_ring = ring;
242 }
243
244 netif_dbg(pdata, drv, pdata->netdev,
245 "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
246
247 netif_dbg(pdata, drv, pdata->netdev,
248 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
249 channel->name, channel->dma_regs, channel->dma_irq,
250 channel->tx_ring, channel->rx_ring);
251 }
252
253 pdata->channel_count = count;
254
255 return 0;
256
257 err_mem:
258 xgbe_free_channels(pdata);
259
260 return -ENOMEM;
261 }
262
263 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
264 {
265 return (ring->rdesc_count - (ring->cur - ring->dirty));
266 }
267
268 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
269 {
270 return (ring->cur - ring->dirty);
271 }
272
273 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
274 struct xgbe_ring *ring, unsigned int count)
275 {
276 struct xgbe_prv_data *pdata = channel->pdata;
277
278 if (count > xgbe_tx_avail_desc(ring)) {
279 netif_info(pdata, drv, pdata->netdev,
280 "Tx queue stopped, not enough descriptors available\n");
281 netif_stop_subqueue(pdata->netdev, channel->queue_index);
282 ring->tx.queue_stopped = 1;
283
284 /* If we haven't notified the hardware because of xmit_more
285 * support, tell it now
286 */
287 if (ring->tx.xmit_more)
288 pdata->hw_if.tx_start_xmit(channel, ring);
289
290 return NETDEV_TX_BUSY;
291 }
292
293 return 0;
294 }
295
296 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
297 {
298 unsigned int rx_buf_size;
299
300 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
301 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
302
303 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
304 ~(XGBE_RX_BUF_ALIGN - 1);
305
306 return rx_buf_size;
307 }
308
309 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
310 struct xgbe_channel *channel)
311 {
312 struct xgbe_hw_if *hw_if = &pdata->hw_if;
313 enum xgbe_int int_id;
314
315 if (channel->tx_ring && channel->rx_ring)
316 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
317 else if (channel->tx_ring)
318 int_id = XGMAC_INT_DMA_CH_SR_TI;
319 else if (channel->rx_ring)
320 int_id = XGMAC_INT_DMA_CH_SR_RI;
321 else
322 return;
323
324 hw_if->enable_int(channel, int_id);
325 }
326
327 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
328 {
329 unsigned int i;
330
331 for (i = 0; i < pdata->channel_count; i++)
332 xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
333 }
334
335 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
336 struct xgbe_channel *channel)
337 {
338 struct xgbe_hw_if *hw_if = &pdata->hw_if;
339 enum xgbe_int int_id;
340
341 if (channel->tx_ring && channel->rx_ring)
342 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
343 else if (channel->tx_ring)
344 int_id = XGMAC_INT_DMA_CH_SR_TI;
345 else if (channel->rx_ring)
346 int_id = XGMAC_INT_DMA_CH_SR_RI;
347 else
348 return;
349
350 hw_if->disable_int(channel, int_id);
351 }
352
353 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
354 {
355 unsigned int i;
356
357 for (i = 0; i < pdata->channel_count; i++)
358 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
359 }
360
361 static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
362 unsigned int *count, const char *area)
363 {
364 if (time_before(jiffies, *period)) {
365 (*count)++;
366 } else {
367 *period = jiffies + (ecc_sec_period * HZ);
368 *count = 1;
369 }
370
371 if (*count > ecc_sec_info_threshold)
372 dev_warn_once(pdata->dev,
373 "%s ECC corrected errors exceed informational threshold\n",
374 area);
375
376 if (*count > ecc_sec_warn_threshold) {
377 dev_warn_once(pdata->dev,
378 "%s ECC corrected errors exceed warning threshold\n",
379 area);
380 return true;
381 }
382
383 return false;
384 }
385
386 static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
387 unsigned int *count, const char *area)
388 {
389 if (time_before(jiffies, *period)) {
390 (*count)++;
391 } else {
392 *period = jiffies + (ecc_ded_period * HZ);
393 *count = 1;
394 }
395
396 if (*count > ecc_ded_threshold) {
397 netdev_alert(pdata->netdev,
398 "%s ECC detected errors exceed threshold\n",
399 area);
400 return true;
401 }
402
403 return false;
404 }
405
406 static void xgbe_ecc_isr_task(unsigned long data)
407 {
408 struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
409 unsigned int ecc_isr;
410 bool stop = false;
411
412 /* Mask status with only the interrupts we care about */
413 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
414 ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
415 netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
416
417 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
418 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
419 &pdata->tx_ded_count, "TX fifo");
420 }
421
422 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
423 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
424 &pdata->rx_ded_count, "RX fifo");
425 }
426
427 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
428 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
429 &pdata->desc_ded_count,
430 "descriptor cache");
431 }
432
433 if (stop) {
434 pdata->hw_if.disable_ecc_ded(pdata);
435 schedule_work(&pdata->stopdev_work);
436 goto out;
437 }
438
439 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
440 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
441 &pdata->tx_sec_count, "TX fifo"))
442 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
443 }
444
445 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
446 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
447 &pdata->rx_sec_count, "RX fifo"))
448 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
449
450 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
451 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
452 &pdata->desc_sec_count, "descriptor cache"))
453 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
454
455 out:
456 /* Clear all ECC interrupts */
457 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
458
459 /* Reissue interrupt if status is not clear */
460 if (pdata->vdata->irq_reissue_support)
461 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
462 }
463
464 static irqreturn_t xgbe_ecc_isr(int irq, void *data)
465 {
466 struct xgbe_prv_data *pdata = data;
467
468 if (pdata->isr_as_tasklet)
469 tasklet_schedule(&pdata->tasklet_ecc);
470 else
471 xgbe_ecc_isr_task((unsigned long)pdata);
472
473 return IRQ_HANDLED;
474 }
475
476 static void xgbe_isr_task(unsigned long data)
477 {
478 struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
479 struct xgbe_hw_if *hw_if = &pdata->hw_if;
480 struct xgbe_channel *channel;
481 unsigned int dma_isr, dma_ch_isr;
482 unsigned int mac_isr, mac_tssr, mac_mdioisr;
483 unsigned int i;
484
485 /* The DMA interrupt status register also reports MAC and MTL
486 * interrupts. So for polling mode, we just need to check for
487 * this register to be non-zero
488 */
489 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
490 if (!dma_isr)
491 goto isr_done;
492
493 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
494
495 for (i = 0; i < pdata->channel_count; i++) {
496 if (!(dma_isr & (1 << i)))
497 continue;
498
499 channel = pdata->channel[i];
500
501 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
502 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
503 i, dma_ch_isr);
504
505 /* The TI or RI interrupt bits may still be set even if using
506 * per channel DMA interrupts. Check to be sure those are not
507 * enabled before using the private data napi structure.
508 */
509 if (!pdata->per_channel_irq &&
510 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
511 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
512 if (napi_schedule_prep(&pdata->napi)) {
513 /* Disable Tx and Rx interrupts */
514 xgbe_disable_rx_tx_ints(pdata);
515
516 /* Turn on polling */
517 __napi_schedule_irqoff(&pdata->napi);
518 }
519 } else {
520 /* Don't clear Rx/Tx status if doing per channel DMA
521 * interrupts, these will be cleared by the ISR for
522 * per channel DMA interrupts.
523 */
524 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
525 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
526 }
527
528 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
529 pdata->ext_stats.rx_buffer_unavailable++;
530
531 /* Restart the device on a Fatal Bus Error */
532 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
533 schedule_work(&pdata->restart_work);
534
535 /* Clear interrupt signals */
536 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
537 }
538
539 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
540 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
541
542 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
543 mac_isr);
544
545 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
546 hw_if->tx_mmc_int(pdata);
547
548 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
549 hw_if->rx_mmc_int(pdata);
550
551 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
552 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
553
554 netif_dbg(pdata, intr, pdata->netdev,
555 "MAC_TSSR=%#010x\n", mac_tssr);
556
557 if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
558 /* Read Tx Timestamp to clear interrupt */
559 pdata->tx_tstamp =
560 hw_if->get_tx_tstamp(pdata);
561 queue_work(pdata->dev_workqueue,
562 &pdata->tx_tstamp_work);
563 }
564 }
565
566 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
567 mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
568
569 netif_dbg(pdata, intr, pdata->netdev,
570 "MAC_MDIOISR=%#010x\n", mac_mdioisr);
571
572 if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
573 SNGLCOMPINT))
574 complete(&pdata->mdio_complete);
575 }
576 }
577
578 isr_done:
579 /* If there is not a separate AN irq, handle it here */
580 if (pdata->dev_irq == pdata->an_irq)
581 pdata->phy_if.an_isr(pdata);
582
583 /* If there is not a separate ECC irq, handle it here */
584 if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
585 xgbe_ecc_isr_task((unsigned long)pdata);
586
587 /* If there is not a separate I2C irq, handle it here */
588 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
589 pdata->i2c_if.i2c_isr(pdata);
590
591 /* Reissue interrupt if status is not clear */
592 if (pdata->vdata->irq_reissue_support) {
593 unsigned int reissue_mask;
594
595 reissue_mask = 1 << 0;
596 if (!pdata->per_channel_irq)
597 reissue_mask |= 0xffff << 4;
598
599 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
600 }
601 }
602
603 static irqreturn_t xgbe_isr(int irq, void *data)
604 {
605 struct xgbe_prv_data *pdata = data;
606
607 if (pdata->isr_as_tasklet)
608 tasklet_schedule(&pdata->tasklet_dev);
609 else
610 xgbe_isr_task((unsigned long)pdata);
611
612 return IRQ_HANDLED;
613 }
614
615 static irqreturn_t xgbe_dma_isr(int irq, void *data)
616 {
617 struct xgbe_channel *channel = data;
618 struct xgbe_prv_data *pdata = channel->pdata;
619 unsigned int dma_status;
620
621 /* Per channel DMA interrupts are enabled, so we use the per
622 * channel napi structure and not the private data napi structure
623 */
624 if (napi_schedule_prep(&channel->napi)) {
625 /* Disable Tx and Rx interrupts */
626 if (pdata->channel_irq_mode)
627 xgbe_disable_rx_tx_int(pdata, channel);
628 else
629 disable_irq_nosync(channel->dma_irq);
630
631 /* Turn on polling */
632 __napi_schedule_irqoff(&channel->napi);
633 }
634
635 /* Clear Tx/Rx signals */
636 dma_status = 0;
637 XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
638 XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
639 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
640
641 return IRQ_HANDLED;
642 }
643
644 static void xgbe_tx_timer(struct timer_list *t)
645 {
646 struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
647 struct xgbe_prv_data *pdata = channel->pdata;
648 struct napi_struct *napi;
649
650 DBGPR("-->xgbe_tx_timer\n");
651
652 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
653
654 if (napi_schedule_prep(napi)) {
655 /* Disable Tx and Rx interrupts */
656 if (pdata->per_channel_irq)
657 if (pdata->channel_irq_mode)
658 xgbe_disable_rx_tx_int(pdata, channel);
659 else
660 disable_irq_nosync(channel->dma_irq);
661 else
662 xgbe_disable_rx_tx_ints(pdata);
663
664 /* Turn on polling */
665 __napi_schedule(napi);
666 }
667
668 channel->tx_timer_active = 0;
669
670 DBGPR("<--xgbe_tx_timer\n");
671 }
672
673 static void xgbe_service(struct work_struct *work)
674 {
675 struct xgbe_prv_data *pdata = container_of(work,
676 struct xgbe_prv_data,
677 service_work);
678
679 pdata->phy_if.phy_status(pdata);
680 }
681
682 static void xgbe_service_timer(struct timer_list *t)
683 {
684 struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
685
686 queue_work(pdata->dev_workqueue, &pdata->service_work);
687
688 mod_timer(&pdata->service_timer, jiffies + HZ);
689 }
690
691 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
692 {
693 struct xgbe_channel *channel;
694 unsigned int i;
695
696 timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
697
698 for (i = 0; i < pdata->channel_count; i++) {
699 channel = pdata->channel[i];
700 if (!channel->tx_ring)
701 break;
702
703 timer_setup(&channel->tx_timer, xgbe_tx_timer, 0);
704 }
705 }
706
707 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
708 {
709 mod_timer(&pdata->service_timer, jiffies + HZ);
710 }
711
712 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
713 {
714 struct xgbe_channel *channel;
715 unsigned int i;
716
717 del_timer_sync(&pdata->service_timer);
718
719 for (i = 0; i < pdata->channel_count; i++) {
720 channel = pdata->channel[i];
721 if (!channel->tx_ring)
722 break;
723
724 del_timer_sync(&channel->tx_timer);
725 }
726 }
727
728 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
729 {
730 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
731 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
732
733 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
734 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
735 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
736
737 memset(hw_feat, 0, sizeof(*hw_feat));
738
739 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
740
741 /* Hardware feature register 0 */
742 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
743 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
744 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
745 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
746 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
747 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
748 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
749 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
750 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
751 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
752 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
753 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
754 ADDMACADRSEL);
755 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
756 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
757 hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
758
759 /* Hardware feature register 1 */
760 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
761 RXFIFOSIZE);
762 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
763 TXFIFOSIZE);
764 hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
765 hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
766 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
767 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
768 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
769 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
770 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
771 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
772 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
773 HASHTBLSZ);
774 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
775 L3L4FNUM);
776
777 /* Hardware feature register 2 */
778 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
779 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
780 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
781 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
782 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
783 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
784
785 /* Translate the Hash Table size into actual number */
786 switch (hw_feat->hash_table_size) {
787 case 0:
788 break;
789 case 1:
790 hw_feat->hash_table_size = 64;
791 break;
792 case 2:
793 hw_feat->hash_table_size = 128;
794 break;
795 case 3:
796 hw_feat->hash_table_size = 256;
797 break;
798 }
799
800 /* Translate the address width setting into actual number */
801 switch (hw_feat->dma_width) {
802 case 0:
803 hw_feat->dma_width = 32;
804 break;
805 case 1:
806 hw_feat->dma_width = 40;
807 break;
808 case 2:
809 hw_feat->dma_width = 48;
810 break;
811 default:
812 hw_feat->dma_width = 32;
813 }
814
815 /* The Queue, Channel and TC counts are zero based so increment them
816 * to get the actual number
817 */
818 hw_feat->rx_q_cnt++;
819 hw_feat->tx_q_cnt++;
820 hw_feat->rx_ch_cnt++;
821 hw_feat->tx_ch_cnt++;
822 hw_feat->tc_cnt++;
823
824 /* Translate the fifo sizes into actual numbers */
825 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
826 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
827
828 if (netif_msg_probe(pdata)) {
829 dev_dbg(pdata->dev, "Hardware features:\n");
830
831 /* Hardware feature register 0 */
832 dev_dbg(pdata->dev, " 1GbE support : %s\n",
833 hw_feat->gmii ? "yes" : "no");
834 dev_dbg(pdata->dev, " VLAN hash filter : %s\n",
835 hw_feat->vlhash ? "yes" : "no");
836 dev_dbg(pdata->dev, " MDIO interface : %s\n",
837 hw_feat->sma ? "yes" : "no");
838 dev_dbg(pdata->dev, " Wake-up packet support : %s\n",
839 hw_feat->rwk ? "yes" : "no");
840 dev_dbg(pdata->dev, " Magic packet support : %s\n",
841 hw_feat->mgk ? "yes" : "no");
842 dev_dbg(pdata->dev, " Management counters : %s\n",
843 hw_feat->mmc ? "yes" : "no");
844 dev_dbg(pdata->dev, " ARP offload : %s\n",
845 hw_feat->aoe ? "yes" : "no");
846 dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n",
847 hw_feat->ts ? "yes" : "no");
848 dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n",
849 hw_feat->eee ? "yes" : "no");
850 dev_dbg(pdata->dev, " TX checksum offload : %s\n",
851 hw_feat->tx_coe ? "yes" : "no");
852 dev_dbg(pdata->dev, " RX checksum offload : %s\n",
853 hw_feat->rx_coe ? "yes" : "no");
854 dev_dbg(pdata->dev, " Additional MAC addresses : %u\n",
855 hw_feat->addn_mac);
856 dev_dbg(pdata->dev, " Timestamp source : %s\n",
857 (hw_feat->ts_src == 1) ? "internal" :
858 (hw_feat->ts_src == 2) ? "external" :
859 (hw_feat->ts_src == 3) ? "internal/external" : "n/a");
860 dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
861 hw_feat->sa_vlan_ins ? "yes" : "no");
862 dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n",
863 hw_feat->vxn ? "yes" : "no");
864
865 /* Hardware feature register 1 */
866 dev_dbg(pdata->dev, " RX fifo size : %u\n",
867 hw_feat->rx_fifo_size);
868 dev_dbg(pdata->dev, " TX fifo size : %u\n",
869 hw_feat->tx_fifo_size);
870 dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n",
871 hw_feat->adv_ts_hi ? "yes" : "no");
872 dev_dbg(pdata->dev, " DMA width : %u\n",
873 hw_feat->dma_width);
874 dev_dbg(pdata->dev, " Data Center Bridging : %s\n",
875 hw_feat->dcb ? "yes" : "no");
876 dev_dbg(pdata->dev, " Split header : %s\n",
877 hw_feat->sph ? "yes" : "no");
878 dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n",
879 hw_feat->tso ? "yes" : "no");
880 dev_dbg(pdata->dev, " Debug memory interface : %s\n",
881 hw_feat->dma_debug ? "yes" : "no");
882 dev_dbg(pdata->dev, " Receive Side Scaling : %s\n",
883 hw_feat->rss ? "yes" : "no");
884 dev_dbg(pdata->dev, " Traffic Class count : %u\n",
885 hw_feat->tc_cnt);
886 dev_dbg(pdata->dev, " Hash table size : %u\n",
887 hw_feat->hash_table_size);
888 dev_dbg(pdata->dev, " L3/L4 Filters : %u\n",
889 hw_feat->l3l4_filter_num);
890
891 /* Hardware feature register 2 */
892 dev_dbg(pdata->dev, " RX queue count : %u\n",
893 hw_feat->rx_q_cnt);
894 dev_dbg(pdata->dev, " TX queue count : %u\n",
895 hw_feat->tx_q_cnt);
896 dev_dbg(pdata->dev, " RX DMA channel count : %u\n",
897 hw_feat->rx_ch_cnt);
898 dev_dbg(pdata->dev, " TX DMA channel count : %u\n",
899 hw_feat->rx_ch_cnt);
900 dev_dbg(pdata->dev, " PPS outputs : %u\n",
901 hw_feat->pps_out_num);
902 dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n",
903 hw_feat->aux_snap_num);
904 }
905 }
906
907 static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data *pdata)
908 {
909 struct net_device *netdev = pdata->netdev;
910
911 if (!pdata->vxlan_offloads_set)
912 return;
913
914 netdev_info(netdev, "disabling VXLAN offloads\n");
915
916 netdev->hw_enc_features &= ~(NETIF_F_SG |
917 NETIF_F_IP_CSUM |
918 NETIF_F_IPV6_CSUM |
919 NETIF_F_RXCSUM |
920 NETIF_F_TSO |
921 NETIF_F_TSO6 |
922 NETIF_F_GRO |
923 NETIF_F_GSO_UDP_TUNNEL |
924 NETIF_F_GSO_UDP_TUNNEL_CSUM);
925
926 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL |
927 NETIF_F_GSO_UDP_TUNNEL_CSUM);
928
929 pdata->vxlan_offloads_set = 0;
930 }
931
932 static void xgbe_disable_vxlan_hw(struct xgbe_prv_data *pdata)
933 {
934 if (!pdata->vxlan_port_set)
935 return;
936
937 pdata->hw_if.disable_vxlan(pdata);
938
939 pdata->vxlan_port_set = 0;
940 pdata->vxlan_port = 0;
941 }
942
943 static void xgbe_disable_vxlan_accel(struct xgbe_prv_data *pdata)
944 {
945 xgbe_disable_vxlan_offloads(pdata);
946
947 xgbe_disable_vxlan_hw(pdata);
948 }
949
950 static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data *pdata)
951 {
952 struct net_device *netdev = pdata->netdev;
953
954 if (pdata->vxlan_offloads_set)
955 return;
956
957 netdev_info(netdev, "enabling VXLAN offloads\n");
958
959 netdev->hw_enc_features |= NETIF_F_SG |
960 NETIF_F_IP_CSUM |
961 NETIF_F_IPV6_CSUM |
962 NETIF_F_RXCSUM |
963 NETIF_F_TSO |
964 NETIF_F_TSO6 |
965 NETIF_F_GRO |
966 pdata->vxlan_features;
967
968 netdev->features |= pdata->vxlan_features;
969
970 pdata->vxlan_offloads_set = 1;
971 }
972
973 static void xgbe_enable_vxlan_hw(struct xgbe_prv_data *pdata)
974 {
975 struct xgbe_vxlan_data *vdata;
976
977 if (pdata->vxlan_port_set)
978 return;
979
980 if (list_empty(&pdata->vxlan_ports))
981 return;
982
983 vdata = list_first_entry(&pdata->vxlan_ports,
984 struct xgbe_vxlan_data, list);
985
986 pdata->vxlan_port_set = 1;
987 pdata->vxlan_port = be16_to_cpu(vdata->port);
988
989 pdata->hw_if.enable_vxlan(pdata);
990 }
991
992 static void xgbe_enable_vxlan_accel(struct xgbe_prv_data *pdata)
993 {
994 /* VXLAN acceleration desired? */
995 if (!pdata->vxlan_features)
996 return;
997
998 /* VXLAN acceleration possible? */
999 if (pdata->vxlan_force_disable)
1000 return;
1001
1002 xgbe_enable_vxlan_hw(pdata);
1003
1004 xgbe_enable_vxlan_offloads(pdata);
1005 }
1006
1007 static void xgbe_reset_vxlan_accel(struct xgbe_prv_data *pdata)
1008 {
1009 xgbe_disable_vxlan_hw(pdata);
1010
1011 if (pdata->vxlan_features)
1012 xgbe_enable_vxlan_offloads(pdata);
1013
1014 pdata->vxlan_force_disable = 0;
1015 }
1016
1017 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
1018 {
1019 struct xgbe_channel *channel;
1020 unsigned int i;
1021
1022 if (pdata->per_channel_irq) {
1023 for (i = 0; i < pdata->channel_count; i++) {
1024 channel = pdata->channel[i];
1025 if (add)
1026 netif_napi_add(pdata->netdev, &channel->napi,
1027 xgbe_one_poll, NAPI_POLL_WEIGHT);
1028
1029 napi_enable(&channel->napi);
1030 }
1031 } else {
1032 if (add)
1033 netif_napi_add(pdata->netdev, &pdata->napi,
1034 xgbe_all_poll, NAPI_POLL_WEIGHT);
1035
1036 napi_enable(&pdata->napi);
1037 }
1038 }
1039
1040 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
1041 {
1042 struct xgbe_channel *channel;
1043 unsigned int i;
1044
1045 if (pdata->per_channel_irq) {
1046 for (i = 0; i < pdata->channel_count; i++) {
1047 channel = pdata->channel[i];
1048 napi_disable(&channel->napi);
1049
1050 if (del)
1051 netif_napi_del(&channel->napi);
1052 }
1053 } else {
1054 napi_disable(&pdata->napi);
1055
1056 if (del)
1057 netif_napi_del(&pdata->napi);
1058 }
1059 }
1060
1061 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
1062 {
1063 struct xgbe_channel *channel;
1064 struct net_device *netdev = pdata->netdev;
1065 unsigned int i;
1066 int ret;
1067
1068 tasklet_init(&pdata->tasklet_dev, xgbe_isr_task, (unsigned long)pdata);
1069 tasklet_init(&pdata->tasklet_ecc, xgbe_ecc_isr_task,
1070 (unsigned long)pdata);
1071
1072 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1073 netdev_name(netdev), pdata);
1074 if (ret) {
1075 netdev_alert(netdev, "error requesting irq %d\n",
1076 pdata->dev_irq);
1077 return ret;
1078 }
1079
1080 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
1081 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
1082 0, pdata->ecc_name, pdata);
1083 if (ret) {
1084 netdev_alert(netdev, "error requesting ecc irq %d\n",
1085 pdata->ecc_irq);
1086 goto err_dev_irq;
1087 }
1088 }
1089
1090 if (!pdata->per_channel_irq)
1091 return 0;
1092
1093 for (i = 0; i < pdata->channel_count; i++) {
1094 channel = pdata->channel[i];
1095 snprintf(channel->dma_irq_name,
1096 sizeof(channel->dma_irq_name) - 1,
1097 "%s-TxRx-%u", netdev_name(netdev),
1098 channel->queue_index);
1099
1100 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1101 xgbe_dma_isr, 0,
1102 channel->dma_irq_name, channel);
1103 if (ret) {
1104 netdev_alert(netdev, "error requesting irq %d\n",
1105 channel->dma_irq);
1106 goto err_dma_irq;
1107 }
1108
1109 irq_set_affinity_hint(channel->dma_irq,
1110 &channel->affinity_mask);
1111 }
1112
1113 return 0;
1114
1115 err_dma_irq:
1116 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1117 for (i--; i < pdata->channel_count; i--) {
1118 channel = pdata->channel[i];
1119
1120 irq_set_affinity_hint(channel->dma_irq, NULL);
1121 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1122 }
1123
1124 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1125 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1126
1127 err_dev_irq:
1128 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1129
1130 return ret;
1131 }
1132
1133 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
1134 {
1135 struct xgbe_channel *channel;
1136 unsigned int i;
1137
1138 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1139
1140 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1141 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1142
1143 if (!pdata->per_channel_irq)
1144 return;
1145
1146 for (i = 0; i < pdata->channel_count; i++) {
1147 channel = pdata->channel[i];
1148
1149 irq_set_affinity_hint(channel->dma_irq, NULL);
1150 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1151 }
1152 }
1153
1154 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
1155 {
1156 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1157
1158 DBGPR("-->xgbe_init_tx_coalesce\n");
1159
1160 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
1161 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
1162
1163 hw_if->config_tx_coalesce(pdata);
1164
1165 DBGPR("<--xgbe_init_tx_coalesce\n");
1166 }
1167
1168 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
1169 {
1170 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1171
1172 DBGPR("-->xgbe_init_rx_coalesce\n");
1173
1174 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
1175 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
1176 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
1177
1178 hw_if->config_rx_coalesce(pdata);
1179
1180 DBGPR("<--xgbe_init_rx_coalesce\n");
1181 }
1182
1183 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
1184 {
1185 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1186 struct xgbe_ring *ring;
1187 struct xgbe_ring_data *rdata;
1188 unsigned int i, j;
1189
1190 DBGPR("-->xgbe_free_tx_data\n");
1191
1192 for (i = 0; i < pdata->channel_count; i++) {
1193 ring = pdata->channel[i]->tx_ring;
1194 if (!ring)
1195 break;
1196
1197 for (j = 0; j < ring->rdesc_count; j++) {
1198 rdata = XGBE_GET_DESC_DATA(ring, j);
1199 desc_if->unmap_rdata(pdata, rdata);
1200 }
1201 }
1202
1203 DBGPR("<--xgbe_free_tx_data\n");
1204 }
1205
1206 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
1207 {
1208 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1209 struct xgbe_ring *ring;
1210 struct xgbe_ring_data *rdata;
1211 unsigned int i, j;
1212
1213 DBGPR("-->xgbe_free_rx_data\n");
1214
1215 for (i = 0; i < pdata->channel_count; i++) {
1216 ring = pdata->channel[i]->rx_ring;
1217 if (!ring)
1218 break;
1219
1220 for (j = 0; j < ring->rdesc_count; j++) {
1221 rdata = XGBE_GET_DESC_DATA(ring, j);
1222 desc_if->unmap_rdata(pdata, rdata);
1223 }
1224 }
1225
1226 DBGPR("<--xgbe_free_rx_data\n");
1227 }
1228
1229 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
1230 {
1231 pdata->phy_link = -1;
1232 pdata->phy_speed = SPEED_UNKNOWN;
1233
1234 return pdata->phy_if.phy_reset(pdata);
1235 }
1236
1237 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
1238 {
1239 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1240 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1241 unsigned long flags;
1242
1243 DBGPR("-->xgbe_powerdown\n");
1244
1245 if (!netif_running(netdev) ||
1246 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
1247 netdev_alert(netdev, "Device is already powered down\n");
1248 DBGPR("<--xgbe_powerdown\n");
1249 return -EINVAL;
1250 }
1251
1252 spin_lock_irqsave(&pdata->lock, flags);
1253
1254 if (caller == XGMAC_DRIVER_CONTEXT)
1255 netif_device_detach(netdev);
1256
1257 netif_tx_stop_all_queues(netdev);
1258
1259 xgbe_stop_timers(pdata);
1260 flush_workqueue(pdata->dev_workqueue);
1261
1262 hw_if->powerdown_tx(pdata);
1263 hw_if->powerdown_rx(pdata);
1264
1265 xgbe_napi_disable(pdata, 0);
1266
1267 pdata->power_down = 1;
1268
1269 spin_unlock_irqrestore(&pdata->lock, flags);
1270
1271 DBGPR("<--xgbe_powerdown\n");
1272
1273 return 0;
1274 }
1275
1276 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
1277 {
1278 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1279 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1280 unsigned long flags;
1281
1282 DBGPR("-->xgbe_powerup\n");
1283
1284 if (!netif_running(netdev) ||
1285 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
1286 netdev_alert(netdev, "Device is already powered up\n");
1287 DBGPR("<--xgbe_powerup\n");
1288 return -EINVAL;
1289 }
1290
1291 spin_lock_irqsave(&pdata->lock, flags);
1292
1293 pdata->power_down = 0;
1294
1295 xgbe_napi_enable(pdata, 0);
1296
1297 hw_if->powerup_tx(pdata);
1298 hw_if->powerup_rx(pdata);
1299
1300 if (caller == XGMAC_DRIVER_CONTEXT)
1301 netif_device_attach(netdev);
1302
1303 netif_tx_start_all_queues(netdev);
1304
1305 xgbe_start_timers(pdata);
1306
1307 spin_unlock_irqrestore(&pdata->lock, flags);
1308
1309 DBGPR("<--xgbe_powerup\n");
1310
1311 return 0;
1312 }
1313
1314 static void xgbe_free_memory(struct xgbe_prv_data *pdata)
1315 {
1316 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1317
1318 /* Free the ring descriptors and buffers */
1319 desc_if->free_ring_resources(pdata);
1320
1321 /* Free the channel and ring structures */
1322 xgbe_free_channels(pdata);
1323 }
1324
1325 static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
1326 {
1327 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1328 struct net_device *netdev = pdata->netdev;
1329 int ret;
1330
1331 if (pdata->new_tx_ring_count) {
1332 pdata->tx_ring_count = pdata->new_tx_ring_count;
1333 pdata->tx_q_count = pdata->tx_ring_count;
1334 pdata->new_tx_ring_count = 0;
1335 }
1336
1337 if (pdata->new_rx_ring_count) {
1338 pdata->rx_ring_count = pdata->new_rx_ring_count;
1339 pdata->new_rx_ring_count = 0;
1340 }
1341
1342 /* Calculate the Rx buffer size before allocating rings */
1343 pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1344
1345 /* Allocate the channel and ring structures */
1346 ret = xgbe_alloc_channels(pdata);
1347 if (ret)
1348 return ret;
1349
1350 /* Allocate the ring descriptors and buffers */
1351 ret = desc_if->alloc_ring_resources(pdata);
1352 if (ret)
1353 goto err_channels;
1354
1355 /* Initialize the service and Tx timers */
1356 xgbe_init_timers(pdata);
1357
1358 return 0;
1359
1360 err_channels:
1361 xgbe_free_memory(pdata);
1362
1363 return ret;
1364 }
1365
1366 static int xgbe_start(struct xgbe_prv_data *pdata)
1367 {
1368 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1369 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1370 struct net_device *netdev = pdata->netdev;
1371 unsigned int i;
1372 int ret;
1373
1374 /* Set the number of queues */
1375 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
1376 if (ret) {
1377 netdev_err(netdev, "error setting real tx queue count\n");
1378 return ret;
1379 }
1380
1381 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
1382 if (ret) {
1383 netdev_err(netdev, "error setting real rx queue count\n");
1384 return ret;
1385 }
1386
1387 /* Set RSS lookup table data for programming */
1388 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
1389 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
1390 i % pdata->rx_ring_count);
1391
1392 ret = hw_if->init(pdata);
1393 if (ret)
1394 return ret;
1395
1396 xgbe_napi_enable(pdata, 1);
1397
1398 ret = xgbe_request_irqs(pdata);
1399 if (ret)
1400 goto err_napi;
1401
1402 ret = phy_if->phy_start(pdata);
1403 if (ret)
1404 goto err_irqs;
1405
1406 hw_if->enable_tx(pdata);
1407 hw_if->enable_rx(pdata);
1408
1409 udp_tunnel_get_rx_info(netdev);
1410
1411 netif_tx_start_all_queues(netdev);
1412
1413 xgbe_start_timers(pdata);
1414 queue_work(pdata->dev_workqueue, &pdata->service_work);
1415
1416 clear_bit(XGBE_STOPPED, &pdata->dev_state);
1417
1418 return 0;
1419
1420 err_irqs:
1421 xgbe_free_irqs(pdata);
1422
1423 err_napi:
1424 xgbe_napi_disable(pdata, 1);
1425
1426 hw_if->exit(pdata);
1427
1428 return ret;
1429 }
1430
1431 static void xgbe_stop(struct xgbe_prv_data *pdata)
1432 {
1433 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1434 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1435 struct xgbe_channel *channel;
1436 struct net_device *netdev = pdata->netdev;
1437 struct netdev_queue *txq;
1438 unsigned int i;
1439
1440 DBGPR("-->xgbe_stop\n");
1441
1442 if (test_bit(XGBE_STOPPED, &pdata->dev_state))
1443 return;
1444
1445 netif_tx_stop_all_queues(netdev);
1446
1447 xgbe_stop_timers(pdata);
1448 flush_workqueue(pdata->dev_workqueue);
1449
1450 xgbe_reset_vxlan_accel(pdata);
1451
1452 hw_if->disable_tx(pdata);
1453 hw_if->disable_rx(pdata);
1454
1455 phy_if->phy_stop(pdata);
1456
1457 xgbe_free_irqs(pdata);
1458
1459 xgbe_napi_disable(pdata, 1);
1460
1461 hw_if->exit(pdata);
1462
1463 for (i = 0; i < pdata->channel_count; i++) {
1464 channel = pdata->channel[i];
1465 if (!channel->tx_ring)
1466 continue;
1467
1468 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1469 netdev_tx_reset_queue(txq);
1470 }
1471
1472 set_bit(XGBE_STOPPED, &pdata->dev_state);
1473
1474 DBGPR("<--xgbe_stop\n");
1475 }
1476
1477 static void xgbe_stopdev(struct work_struct *work)
1478 {
1479 struct xgbe_prv_data *pdata = container_of(work,
1480 struct xgbe_prv_data,
1481 stopdev_work);
1482
1483 rtnl_lock();
1484
1485 xgbe_stop(pdata);
1486
1487 xgbe_free_tx_data(pdata);
1488 xgbe_free_rx_data(pdata);
1489
1490 rtnl_unlock();
1491
1492 netdev_alert(pdata->netdev, "device stopped\n");
1493 }
1494
1495 void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
1496 {
1497 /* If not running, "restart" will happen on open */
1498 if (!netif_running(pdata->netdev))
1499 return;
1500
1501 xgbe_stop(pdata);
1502
1503 xgbe_free_memory(pdata);
1504 xgbe_alloc_memory(pdata);
1505
1506 xgbe_start(pdata);
1507 }
1508
1509 void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1510 {
1511 /* If not running, "restart" will happen on open */
1512 if (!netif_running(pdata->netdev))
1513 return;
1514
1515 xgbe_stop(pdata);
1516
1517 xgbe_free_tx_data(pdata);
1518 xgbe_free_rx_data(pdata);
1519
1520 xgbe_start(pdata);
1521 }
1522
1523 static void xgbe_restart(struct work_struct *work)
1524 {
1525 struct xgbe_prv_data *pdata = container_of(work,
1526 struct xgbe_prv_data,
1527 restart_work);
1528
1529 rtnl_lock();
1530
1531 xgbe_restart_dev(pdata);
1532
1533 rtnl_unlock();
1534 }
1535
1536 static void xgbe_tx_tstamp(struct work_struct *work)
1537 {
1538 struct xgbe_prv_data *pdata = container_of(work,
1539 struct xgbe_prv_data,
1540 tx_tstamp_work);
1541 struct skb_shared_hwtstamps hwtstamps;
1542 u64 nsec;
1543 unsigned long flags;
1544
1545 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1546 if (!pdata->tx_tstamp_skb)
1547 goto unlock;
1548
1549 if (pdata->tx_tstamp) {
1550 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1551 pdata->tx_tstamp);
1552
1553 memset(&hwtstamps, 0, sizeof(hwtstamps));
1554 hwtstamps.hwtstamp = ns_to_ktime(nsec);
1555 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1556 }
1557
1558 dev_kfree_skb_any(pdata->tx_tstamp_skb);
1559
1560 pdata->tx_tstamp_skb = NULL;
1561
1562 unlock:
1563 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1564 }
1565
1566 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1567 struct ifreq *ifreq)
1568 {
1569 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1570 sizeof(pdata->tstamp_config)))
1571 return -EFAULT;
1572
1573 return 0;
1574 }
1575
1576 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1577 struct ifreq *ifreq)
1578 {
1579 struct hwtstamp_config config;
1580 unsigned int mac_tscr;
1581
1582 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1583 return -EFAULT;
1584
1585 if (config.flags)
1586 return -EINVAL;
1587
1588 mac_tscr = 0;
1589
1590 switch (config.tx_type) {
1591 case HWTSTAMP_TX_OFF:
1592 break;
1593
1594 case HWTSTAMP_TX_ON:
1595 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1596 break;
1597
1598 default:
1599 return -ERANGE;
1600 }
1601
1602 switch (config.rx_filter) {
1603 case HWTSTAMP_FILTER_NONE:
1604 break;
1605
1606 case HWTSTAMP_FILTER_NTP_ALL:
1607 case HWTSTAMP_FILTER_ALL:
1608 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1609 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1610 break;
1611
1612 /* PTP v2, UDP, any kind of event packet */
1613 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1614 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1615 /* Fall through - to PTP v1, UDP, any kind of event packet */
1616 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1617 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1618 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1619 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1620 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1621 break;
1622
1623 /* PTP v2, UDP, Sync packet */
1624 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1625 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1626 /* Fall through - to PTP v1, UDP, Sync packet */
1627 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1628 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1629 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1630 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1631 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1632 break;
1633
1634 /* PTP v2, UDP, Delay_req packet */
1635 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1636 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1637 /* Fall through - to PTP v1, UDP, Delay_req packet */
1638 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1639 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1640 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1641 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1642 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1643 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1644 break;
1645
1646 /* 802.AS1, Ethernet, any kind of event packet */
1647 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1648 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1649 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1650 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1651 break;
1652
1653 /* 802.AS1, Ethernet, Sync packet */
1654 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1655 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1656 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1657 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1658 break;
1659
1660 /* 802.AS1, Ethernet, Delay_req packet */
1661 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1662 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1663 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1664 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1665 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1666 break;
1667
1668 /* PTP v2/802.AS1, any layer, any kind of event packet */
1669 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1670 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1671 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1672 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1673 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1674 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1675 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1676 break;
1677
1678 /* PTP v2/802.AS1, any layer, Sync packet */
1679 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1680 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1681 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1682 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1683 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1684 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1685 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1686 break;
1687
1688 /* PTP v2/802.AS1, any layer, Delay_req packet */
1689 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1690 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1691 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1692 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1693 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1694 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1695 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1696 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1697 break;
1698
1699 default:
1700 return -ERANGE;
1701 }
1702
1703 pdata->hw_if.config_tstamp(pdata, mac_tscr);
1704
1705 memcpy(&pdata->tstamp_config, &config, sizeof(config));
1706
1707 return 0;
1708 }
1709
1710 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1711 struct sk_buff *skb,
1712 struct xgbe_packet_data *packet)
1713 {
1714 unsigned long flags;
1715
1716 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1717 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1718 if (pdata->tx_tstamp_skb) {
1719 /* Another timestamp in progress, ignore this one */
1720 XGMAC_SET_BITS(packet->attributes,
1721 TX_PACKET_ATTRIBUTES, PTP, 0);
1722 } else {
1723 pdata->tx_tstamp_skb = skb_get(skb);
1724 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1725 }
1726 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1727 }
1728
1729 skb_tx_timestamp(skb);
1730 }
1731
1732 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1733 {
1734 if (skb_vlan_tag_present(skb))
1735 packet->vlan_ctag = skb_vlan_tag_get(skb);
1736 }
1737
1738 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1739 {
1740 int ret;
1741
1742 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1743 TSO_ENABLE))
1744 return 0;
1745
1746 ret = skb_cow_head(skb, 0);
1747 if (ret)
1748 return ret;
1749
1750 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
1751 packet->header_len = skb_inner_transport_offset(skb) +
1752 inner_tcp_hdrlen(skb);
1753 packet->tcp_header_len = inner_tcp_hdrlen(skb);
1754 } else {
1755 packet->header_len = skb_transport_offset(skb) +
1756 tcp_hdrlen(skb);
1757 packet->tcp_header_len = tcp_hdrlen(skb);
1758 }
1759 packet->tcp_payload_len = skb->len - packet->header_len;
1760 packet->mss = skb_shinfo(skb)->gso_size;
1761
1762 DBGPR(" packet->header_len=%u\n", packet->header_len);
1763 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1764 packet->tcp_header_len, packet->tcp_payload_len);
1765 DBGPR(" packet->mss=%u\n", packet->mss);
1766
1767 /* Update the number of packets that will ultimately be transmitted
1768 * along with the extra bytes for each extra packet
1769 */
1770 packet->tx_packets = skb_shinfo(skb)->gso_segs;
1771 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1772
1773 return 0;
1774 }
1775
1776 static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb)
1777 {
1778 struct xgbe_vxlan_data *vdata;
1779
1780 if (pdata->vxlan_force_disable)
1781 return false;
1782
1783 if (!skb->encapsulation)
1784 return false;
1785
1786 if (skb->ip_summed != CHECKSUM_PARTIAL)
1787 return false;
1788
1789 switch (skb->protocol) {
1790 case htons(ETH_P_IP):
1791 if (ip_hdr(skb)->protocol != IPPROTO_UDP)
1792 return false;
1793 break;
1794
1795 case htons(ETH_P_IPV6):
1796 if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
1797 return false;
1798 break;
1799
1800 default:
1801 return false;
1802 }
1803
1804 /* See if we have the UDP port in our list */
1805 list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
1806 if ((skb->protocol == htons(ETH_P_IP)) &&
1807 (vdata->sa_family == AF_INET) &&
1808 (vdata->port == udp_hdr(skb)->dest))
1809 return true;
1810 else if ((skb->protocol == htons(ETH_P_IPV6)) &&
1811 (vdata->sa_family == AF_INET6) &&
1812 (vdata->port == udp_hdr(skb)->dest))
1813 return true;
1814 }
1815
1816 return false;
1817 }
1818
1819 static int xgbe_is_tso(struct sk_buff *skb)
1820 {
1821 if (skb->ip_summed != CHECKSUM_PARTIAL)
1822 return 0;
1823
1824 if (!skb_is_gso(skb))
1825 return 0;
1826
1827 DBGPR(" TSO packet to be processed\n");
1828
1829 return 1;
1830 }
1831
1832 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1833 struct xgbe_ring *ring, struct sk_buff *skb,
1834 struct xgbe_packet_data *packet)
1835 {
1836 skb_frag_t *frag;
1837 unsigned int context_desc;
1838 unsigned int len;
1839 unsigned int i;
1840
1841 packet->skb = skb;
1842
1843 context_desc = 0;
1844 packet->rdesc_count = 0;
1845
1846 packet->tx_packets = 1;
1847 packet->tx_bytes = skb->len;
1848
1849 if (xgbe_is_tso(skb)) {
1850 /* TSO requires an extra descriptor if mss is different */
1851 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1852 context_desc = 1;
1853 packet->rdesc_count++;
1854 }
1855
1856 /* TSO requires an extra descriptor for TSO header */
1857 packet->rdesc_count++;
1858
1859 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1860 TSO_ENABLE, 1);
1861 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1862 CSUM_ENABLE, 1);
1863 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1864 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1865 CSUM_ENABLE, 1);
1866
1867 if (xgbe_is_vxlan(pdata, skb))
1868 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1869 VXLAN, 1);
1870
1871 if (skb_vlan_tag_present(skb)) {
1872 /* VLAN requires an extra descriptor if tag is different */
1873 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1874 /* We can share with the TSO context descriptor */
1875 if (!context_desc) {
1876 context_desc = 1;
1877 packet->rdesc_count++;
1878 }
1879
1880 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1881 VLAN_CTAG, 1);
1882 }
1883
1884 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1885 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1886 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1887 PTP, 1);
1888
1889 for (len = skb_headlen(skb); len;) {
1890 packet->rdesc_count++;
1891 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1892 }
1893
1894 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1895 frag = &skb_shinfo(skb)->frags[i];
1896 for (len = skb_frag_size(frag); len; ) {
1897 packet->rdesc_count++;
1898 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1899 }
1900 }
1901 }
1902
1903 static int xgbe_open(struct net_device *netdev)
1904 {
1905 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1906 int ret;
1907
1908 /* Create the various names based on netdev name */
1909 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
1910 netdev_name(netdev));
1911
1912 snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
1913 netdev_name(netdev));
1914
1915 snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
1916 netdev_name(netdev));
1917
1918 /* Create workqueues */
1919 pdata->dev_workqueue =
1920 create_singlethread_workqueue(netdev_name(netdev));
1921 if (!pdata->dev_workqueue) {
1922 netdev_err(netdev, "device workqueue creation failed\n");
1923 return -ENOMEM;
1924 }
1925
1926 pdata->an_workqueue =
1927 create_singlethread_workqueue(pdata->an_name);
1928 if (!pdata->an_workqueue) {
1929 netdev_err(netdev, "phy workqueue creation failed\n");
1930 ret = -ENOMEM;
1931 goto err_dev_wq;
1932 }
1933
1934 /* Reset the phy settings */
1935 ret = xgbe_phy_reset(pdata);
1936 if (ret)
1937 goto err_an_wq;
1938
1939 /* Enable the clocks */
1940 ret = clk_prepare_enable(pdata->sysclk);
1941 if (ret) {
1942 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1943 goto err_an_wq;
1944 }
1945
1946 ret = clk_prepare_enable(pdata->ptpclk);
1947 if (ret) {
1948 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1949 goto err_sysclk;
1950 }
1951
1952 INIT_WORK(&pdata->service_work, xgbe_service);
1953 INIT_WORK(&pdata->restart_work, xgbe_restart);
1954 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1955 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1956
1957 ret = xgbe_alloc_memory(pdata);
1958 if (ret)
1959 goto err_ptpclk;
1960
1961 ret = xgbe_start(pdata);
1962 if (ret)
1963 goto err_mem;
1964
1965 clear_bit(XGBE_DOWN, &pdata->dev_state);
1966
1967 return 0;
1968
1969 err_mem:
1970 xgbe_free_memory(pdata);
1971
1972 err_ptpclk:
1973 clk_disable_unprepare(pdata->ptpclk);
1974
1975 err_sysclk:
1976 clk_disable_unprepare(pdata->sysclk);
1977
1978 err_an_wq:
1979 destroy_workqueue(pdata->an_workqueue);
1980
1981 err_dev_wq:
1982 destroy_workqueue(pdata->dev_workqueue);
1983
1984 return ret;
1985 }
1986
1987 static int xgbe_close(struct net_device *netdev)
1988 {
1989 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1990
1991 /* Stop the device */
1992 xgbe_stop(pdata);
1993
1994 xgbe_free_memory(pdata);
1995
1996 /* Disable the clocks */
1997 clk_disable_unprepare(pdata->ptpclk);
1998 clk_disable_unprepare(pdata->sysclk);
1999
2000 flush_workqueue(pdata->an_workqueue);
2001 destroy_workqueue(pdata->an_workqueue);
2002
2003 flush_workqueue(pdata->dev_workqueue);
2004 destroy_workqueue(pdata->dev_workqueue);
2005
2006 set_bit(XGBE_DOWN, &pdata->dev_state);
2007
2008 return 0;
2009 }
2010
2011 static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
2012 {
2013 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2014 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2015 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2016 struct xgbe_channel *channel;
2017 struct xgbe_ring *ring;
2018 struct xgbe_packet_data *packet;
2019 struct netdev_queue *txq;
2020 netdev_tx_t ret;
2021
2022 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
2023
2024 channel = pdata->channel[skb->queue_mapping];
2025 txq = netdev_get_tx_queue(netdev, channel->queue_index);
2026 ring = channel->tx_ring;
2027 packet = &ring->packet_data;
2028
2029 ret = NETDEV_TX_OK;
2030
2031 if (skb->len == 0) {
2032 netif_err(pdata, tx_err, netdev,
2033 "empty skb received from stack\n");
2034 dev_kfree_skb_any(skb);
2035 goto tx_netdev_return;
2036 }
2037
2038 /* Calculate preliminary packet info */
2039 memset(packet, 0, sizeof(*packet));
2040 xgbe_packet_info(pdata, ring, skb, packet);
2041
2042 /* Check that there are enough descriptors available */
2043 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
2044 if (ret)
2045 goto tx_netdev_return;
2046
2047 ret = xgbe_prep_tso(skb, packet);
2048 if (ret) {
2049 netif_err(pdata, tx_err, netdev,
2050 "error processing TSO packet\n");
2051 dev_kfree_skb_any(skb);
2052 goto tx_netdev_return;
2053 }
2054 xgbe_prep_vlan(skb, packet);
2055
2056 if (!desc_if->map_tx_skb(channel, skb)) {
2057 dev_kfree_skb_any(skb);
2058 goto tx_netdev_return;
2059 }
2060
2061 xgbe_prep_tx_tstamp(pdata, skb, packet);
2062
2063 /* Report on the actual number of bytes (to be) sent */
2064 netdev_tx_sent_queue(txq, packet->tx_bytes);
2065
2066 /* Configure required descriptor fields for transmission */
2067 hw_if->dev_xmit(channel);
2068
2069 if (netif_msg_pktdata(pdata))
2070 xgbe_print_pkt(netdev, skb, true);
2071
2072 /* Stop the queue in advance if there may not be enough descriptors */
2073 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
2074
2075 ret = NETDEV_TX_OK;
2076
2077 tx_netdev_return:
2078 return ret;
2079 }
2080
2081 static void xgbe_set_rx_mode(struct net_device *netdev)
2082 {
2083 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2084 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2085
2086 DBGPR("-->xgbe_set_rx_mode\n");
2087
2088 hw_if->config_rx_mode(pdata);
2089
2090 DBGPR("<--xgbe_set_rx_mode\n");
2091 }
2092
2093 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
2094 {
2095 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2096 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2097 struct sockaddr *saddr = addr;
2098
2099 DBGPR("-->xgbe_set_mac_address\n");
2100
2101 if (!is_valid_ether_addr(saddr->sa_data))
2102 return -EADDRNOTAVAIL;
2103
2104 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
2105
2106 hw_if->set_mac_address(pdata, netdev->dev_addr);
2107
2108 DBGPR("<--xgbe_set_mac_address\n");
2109
2110 return 0;
2111 }
2112
2113 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
2114 {
2115 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2116 int ret;
2117
2118 switch (cmd) {
2119 case SIOCGHWTSTAMP:
2120 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
2121 break;
2122
2123 case SIOCSHWTSTAMP:
2124 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
2125 break;
2126
2127 default:
2128 ret = -EOPNOTSUPP;
2129 }
2130
2131 return ret;
2132 }
2133
2134 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
2135 {
2136 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2137 int ret;
2138
2139 DBGPR("-->xgbe_change_mtu\n");
2140
2141 ret = xgbe_calc_rx_buf_size(netdev, mtu);
2142 if (ret < 0)
2143 return ret;
2144
2145 pdata->rx_buf_size = ret;
2146 netdev->mtu = mtu;
2147
2148 xgbe_restart_dev(pdata);
2149
2150 DBGPR("<--xgbe_change_mtu\n");
2151
2152 return 0;
2153 }
2154
2155 static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2156 {
2157 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2158
2159 netdev_warn(netdev, "tx timeout, device restarting\n");
2160 schedule_work(&pdata->restart_work);
2161 }
2162
2163 static void xgbe_get_stats64(struct net_device *netdev,
2164 struct rtnl_link_stats64 *s)
2165 {
2166 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2167 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
2168
2169 DBGPR("-->%s\n", __func__);
2170
2171 pdata->hw_if.read_mmc_stats(pdata);
2172
2173 s->rx_packets = pstats->rxframecount_gb;
2174 s->rx_bytes = pstats->rxoctetcount_gb;
2175 s->rx_errors = pstats->rxframecount_gb -
2176 pstats->rxbroadcastframes_g -
2177 pstats->rxmulticastframes_g -
2178 pstats->rxunicastframes_g;
2179 s->multicast = pstats->rxmulticastframes_g;
2180 s->rx_length_errors = pstats->rxlengtherror;
2181 s->rx_crc_errors = pstats->rxcrcerror;
2182 s->rx_fifo_errors = pstats->rxfifooverflow;
2183
2184 s->tx_packets = pstats->txframecount_gb;
2185 s->tx_bytes = pstats->txoctetcount_gb;
2186 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
2187 s->tx_dropped = netdev->stats.tx_dropped;
2188
2189 DBGPR("<--%s\n", __func__);
2190 }
2191
2192 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
2193 u16 vid)
2194 {
2195 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2196 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2197
2198 DBGPR("-->%s\n", __func__);
2199
2200 set_bit(vid, pdata->active_vlans);
2201 hw_if->update_vlan_hash_table(pdata);
2202
2203 DBGPR("<--%s\n", __func__);
2204
2205 return 0;
2206 }
2207
2208 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
2209 u16 vid)
2210 {
2211 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2212 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2213
2214 DBGPR("-->%s\n", __func__);
2215
2216 clear_bit(vid, pdata->active_vlans);
2217 hw_if->update_vlan_hash_table(pdata);
2218
2219 DBGPR("<--%s\n", __func__);
2220
2221 return 0;
2222 }
2223
2224 #ifdef CONFIG_NET_POLL_CONTROLLER
2225 static void xgbe_poll_controller(struct net_device *netdev)
2226 {
2227 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2228 struct xgbe_channel *channel;
2229 unsigned int i;
2230
2231 DBGPR("-->xgbe_poll_controller\n");
2232
2233 if (pdata->per_channel_irq) {
2234 for (i = 0; i < pdata->channel_count; i++) {
2235 channel = pdata->channel[i];
2236 xgbe_dma_isr(channel->dma_irq, channel);
2237 }
2238 } else {
2239 disable_irq(pdata->dev_irq);
2240 xgbe_isr(pdata->dev_irq, pdata);
2241 enable_irq(pdata->dev_irq);
2242 }
2243
2244 DBGPR("<--xgbe_poll_controller\n");
2245 }
2246 #endif /* End CONFIG_NET_POLL_CONTROLLER */
2247
2248 static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2249 void *type_data)
2250 {
2251 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2252 struct tc_mqprio_qopt *mqprio = type_data;
2253 u8 tc;
2254
2255 if (type != TC_SETUP_QDISC_MQPRIO)
2256 return -EOPNOTSUPP;
2257
2258 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2259 tc = mqprio->num_tc;
2260
2261 if (tc > pdata->hw_feat.tc_cnt)
2262 return -EINVAL;
2263
2264 pdata->num_tcs = tc;
2265 pdata->hw_if.config_tc(pdata);
2266
2267 return 0;
2268 }
2269
2270 static netdev_features_t xgbe_fix_features(struct net_device *netdev,
2271 netdev_features_t features)
2272 {
2273 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2274 netdev_features_t vxlan_base, vxlan_mask;
2275
2276 vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
2277 vxlan_mask = vxlan_base | NETIF_F_GSO_UDP_TUNNEL_CSUM;
2278
2279 pdata->vxlan_features = features & vxlan_mask;
2280
2281 /* Only fix VXLAN-related features */
2282 if (!pdata->vxlan_features)
2283 return features;
2284
2285 /* If VXLAN isn't supported then clear any features:
2286 * This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets
2287 * automatically set if ndo_udp_tunnel_add is set.
2288 */
2289 if (!pdata->hw_feat.vxn)
2290 return features & ~vxlan_mask;
2291
2292 /* VXLAN CSUM requires VXLAN base */
2293 if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
2294 !(features & NETIF_F_GSO_UDP_TUNNEL)) {
2295 netdev_notice(netdev,
2296 "forcing tx udp tunnel support\n");
2297 features |= NETIF_F_GSO_UDP_TUNNEL;
2298 }
2299
2300 /* Can't do one without doing the other */
2301 if ((features & vxlan_base) != vxlan_base) {
2302 netdev_notice(netdev,
2303 "forcing both tx and rx udp tunnel support\n");
2304 features |= vxlan_base;
2305 }
2306
2307 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2308 if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
2309 netdev_notice(netdev,
2310 "forcing tx udp tunnel checksumming on\n");
2311 features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
2312 }
2313 } else {
2314 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) {
2315 netdev_notice(netdev,
2316 "forcing tx udp tunnel checksumming off\n");
2317 features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
2318 }
2319 }
2320
2321 pdata->vxlan_features = features & vxlan_mask;
2322
2323 /* Adjust UDP Tunnel based on current state */
2324 if (pdata->vxlan_force_disable) {
2325 netdev_notice(netdev,
2326 "VXLAN acceleration disabled, turning off udp tunnel features\n");
2327 features &= ~vxlan_mask;
2328 }
2329
2330 return features;
2331 }
2332
2333 static int xgbe_set_features(struct net_device *netdev,
2334 netdev_features_t features)
2335 {
2336 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2337 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2338 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
2339 netdev_features_t udp_tunnel;
2340 int ret = 0;
2341
2342 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
2343 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
2344 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
2345 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
2346 udp_tunnel = pdata->netdev_features & NETIF_F_GSO_UDP_TUNNEL;
2347
2348 if ((features & NETIF_F_RXHASH) && !rxhash)
2349 ret = hw_if->enable_rss(pdata);
2350 else if (!(features & NETIF_F_RXHASH) && rxhash)
2351 ret = hw_if->disable_rss(pdata);
2352 if (ret)
2353 return ret;
2354
2355 if ((features & NETIF_F_RXCSUM) && !rxcsum)
2356 hw_if->enable_rx_csum(pdata);
2357 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
2358 hw_if->disable_rx_csum(pdata);
2359
2360 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
2361 hw_if->enable_rx_vlan_stripping(pdata);
2362 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
2363 hw_if->disable_rx_vlan_stripping(pdata);
2364
2365 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
2366 hw_if->enable_rx_vlan_filtering(pdata);
2367 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
2368 hw_if->disable_rx_vlan_filtering(pdata);
2369
2370 if ((features & NETIF_F_GSO_UDP_TUNNEL) && !udp_tunnel)
2371 xgbe_enable_vxlan_accel(pdata);
2372 else if (!(features & NETIF_F_GSO_UDP_TUNNEL) && udp_tunnel)
2373 xgbe_disable_vxlan_accel(pdata);
2374
2375 pdata->netdev_features = features;
2376
2377 DBGPR("<--xgbe_set_features\n");
2378
2379 return 0;
2380 }
2381
2382 static void xgbe_udp_tunnel_add(struct net_device *netdev,
2383 struct udp_tunnel_info *ti)
2384 {
2385 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2386 struct xgbe_vxlan_data *vdata;
2387
2388 if (!pdata->hw_feat.vxn)
2389 return;
2390
2391 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2392 return;
2393
2394 pdata->vxlan_port_count++;
2395
2396 netif_dbg(pdata, drv, netdev,
2397 "adding VXLAN tunnel, family=%hx/port=%hx\n",
2398 ti->sa_family, be16_to_cpu(ti->port));
2399
2400 if (pdata->vxlan_force_disable)
2401 return;
2402
2403 vdata = kzalloc(sizeof(*vdata), GFP_ATOMIC);
2404 if (!vdata) {
2405 /* Can no longer properly track VXLAN ports */
2406 pdata->vxlan_force_disable = 1;
2407 netif_dbg(pdata, drv, netdev,
2408 "internal error, disabling VXLAN accelerations\n");
2409
2410 xgbe_disable_vxlan_accel(pdata);
2411
2412 return;
2413 }
2414 vdata->sa_family = ti->sa_family;
2415 vdata->port = ti->port;
2416
2417 list_add_tail(&vdata->list, &pdata->vxlan_ports);
2418
2419 /* First port added? */
2420 if (pdata->vxlan_port_count == 1) {
2421 xgbe_enable_vxlan_accel(pdata);
2422
2423 return;
2424 }
2425 }
2426
2427 static void xgbe_udp_tunnel_del(struct net_device *netdev,
2428 struct udp_tunnel_info *ti)
2429 {
2430 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2431 struct xgbe_vxlan_data *vdata;
2432
2433 if (!pdata->hw_feat.vxn)
2434 return;
2435
2436 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2437 return;
2438
2439 netif_dbg(pdata, drv, netdev,
2440 "deleting VXLAN tunnel, family=%hx/port=%hx\n",
2441 ti->sa_family, be16_to_cpu(ti->port));
2442
2443 /* Don't need safe version since loop terminates with deletion */
2444 list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
2445 if (vdata->sa_family != ti->sa_family)
2446 continue;
2447
2448 if (vdata->port != ti->port)
2449 continue;
2450
2451 list_del(&vdata->list);
2452 kfree(vdata);
2453
2454 break;
2455 }
2456
2457 pdata->vxlan_port_count--;
2458 if (!pdata->vxlan_port_count) {
2459 xgbe_reset_vxlan_accel(pdata);
2460
2461 return;
2462 }
2463
2464 if (pdata->vxlan_force_disable)
2465 return;
2466
2467 /* See if VXLAN tunnel id needs to be changed */
2468 vdata = list_first_entry(&pdata->vxlan_ports,
2469 struct xgbe_vxlan_data, list);
2470 if (pdata->vxlan_port == be16_to_cpu(vdata->port))
2471 return;
2472
2473 pdata->vxlan_port = be16_to_cpu(vdata->port);
2474 pdata->hw_if.set_vxlan_id(pdata);
2475 }
2476
2477 static netdev_features_t xgbe_features_check(struct sk_buff *skb,
2478 struct net_device *netdev,
2479 netdev_features_t features)
2480 {
2481 features = vlan_features_check(skb, features);
2482 features = vxlan_features_check(skb, features);
2483
2484 return features;
2485 }
2486
2487 static const struct net_device_ops xgbe_netdev_ops = {
2488 .ndo_open = xgbe_open,
2489 .ndo_stop = xgbe_close,
2490 .ndo_start_xmit = xgbe_xmit,
2491 .ndo_set_rx_mode = xgbe_set_rx_mode,
2492 .ndo_set_mac_address = xgbe_set_mac_address,
2493 .ndo_validate_addr = eth_validate_addr,
2494 .ndo_do_ioctl = xgbe_ioctl,
2495 .ndo_change_mtu = xgbe_change_mtu,
2496 .ndo_tx_timeout = xgbe_tx_timeout,
2497 .ndo_get_stats64 = xgbe_get_stats64,
2498 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
2499 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
2500 #ifdef CONFIG_NET_POLL_CONTROLLER
2501 .ndo_poll_controller = xgbe_poll_controller,
2502 #endif
2503 .ndo_setup_tc = xgbe_setup_tc,
2504 .ndo_fix_features = xgbe_fix_features,
2505 .ndo_set_features = xgbe_set_features,
2506 .ndo_udp_tunnel_add = xgbe_udp_tunnel_add,
2507 .ndo_udp_tunnel_del = xgbe_udp_tunnel_del,
2508 .ndo_features_check = xgbe_features_check,
2509 };
2510
2511 const struct net_device_ops *xgbe_get_netdev_ops(void)
2512 {
2513 return &xgbe_netdev_ops;
2514 }
2515
2516 static void xgbe_rx_refresh(struct xgbe_channel *channel)
2517 {
2518 struct xgbe_prv_data *pdata = channel->pdata;
2519 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2520 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2521 struct xgbe_ring *ring = channel->rx_ring;
2522 struct xgbe_ring_data *rdata;
2523
2524 while (ring->dirty != ring->cur) {
2525 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2526
2527 /* Reset rdata values */
2528 desc_if->unmap_rdata(pdata, rdata);
2529
2530 if (desc_if->map_rx_buffer(pdata, ring, rdata))
2531 break;
2532
2533 hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
2534
2535 ring->dirty++;
2536 }
2537
2538 /* Make sure everything is written before the register write */
2539 wmb();
2540
2541 /* Update the Rx Tail Pointer Register with address of
2542 * the last cleaned entry */
2543 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
2544 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
2545 lower_32_bits(rdata->rdesc_dma));
2546 }
2547
2548 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
2549 struct napi_struct *napi,
2550 struct xgbe_ring_data *rdata,
2551 unsigned int len)
2552 {
2553 struct sk_buff *skb;
2554 u8 *packet;
2555
2556 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
2557 if (!skb)
2558 return NULL;
2559
2560 /* Pull in the header buffer which may contain just the header
2561 * or the header plus data
2562 */
2563 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
2564 rdata->rx.hdr.dma_off,
2565 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
2566
2567 packet = page_address(rdata->rx.hdr.pa.pages) +
2568 rdata->rx.hdr.pa.pages_offset;
2569 skb_copy_to_linear_data(skb, packet, len);
2570 skb_put(skb, len);
2571
2572 return skb;
2573 }
2574
2575 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
2576 struct xgbe_packet_data *packet)
2577 {
2578 /* Always zero if not the first descriptor */
2579 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
2580 return 0;
2581
2582 /* First descriptor with split header, return header length */
2583 if (rdata->rx.hdr_len)
2584 return rdata->rx.hdr_len;
2585
2586 /* First descriptor but not the last descriptor and no split header,
2587 * so the full buffer was used
2588 */
2589 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2590 return rdata->rx.hdr.dma_len;
2591
2592 /* First descriptor and last descriptor and no split header, so
2593 * calculate how much of the buffer was used
2594 */
2595 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2596 }
2597
2598 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2599 struct xgbe_packet_data *packet,
2600 unsigned int len)
2601 {
2602 /* Always the full buffer if not the last descriptor */
2603 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2604 return rdata->rx.buf.dma_len;
2605
2606 /* Last descriptor so calculate how much of the buffer was used
2607 * for the last bit of data
2608 */
2609 return rdata->rx.len - len;
2610 }
2611
2612 static int xgbe_tx_poll(struct xgbe_channel *channel)
2613 {
2614 struct xgbe_prv_data *pdata = channel->pdata;
2615 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2616 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2617 struct xgbe_ring *ring = channel->tx_ring;
2618 struct xgbe_ring_data *rdata;
2619 struct xgbe_ring_desc *rdesc;
2620 struct net_device *netdev = pdata->netdev;
2621 struct netdev_queue *txq;
2622 int processed = 0;
2623 unsigned int tx_packets = 0, tx_bytes = 0;
2624 unsigned int cur;
2625
2626 DBGPR("-->xgbe_tx_poll\n");
2627
2628 /* Nothing to do if there isn't a Tx ring for this channel */
2629 if (!ring)
2630 return 0;
2631
2632 cur = ring->cur;
2633
2634 /* Be sure we get ring->cur before accessing descriptor data */
2635 smp_rmb();
2636
2637 txq = netdev_get_tx_queue(netdev, channel->queue_index);
2638
2639 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
2640 (ring->dirty != cur)) {
2641 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2642 rdesc = rdata->rdesc;
2643
2644 if (!hw_if->tx_complete(rdesc))
2645 break;
2646
2647 /* Make sure descriptor fields are read after reading the OWN
2648 * bit */
2649 dma_rmb();
2650
2651 if (netif_msg_tx_done(pdata))
2652 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
2653
2654 if (hw_if->is_last_desc(rdesc)) {
2655 tx_packets += rdata->tx.packets;
2656 tx_bytes += rdata->tx.bytes;
2657 }
2658
2659 /* Free the SKB and reset the descriptor for re-use */
2660 desc_if->unmap_rdata(pdata, rdata);
2661 hw_if->tx_desc_reset(rdata);
2662
2663 processed++;
2664 ring->dirty++;
2665 }
2666
2667 if (!processed)
2668 return 0;
2669
2670 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
2671
2672 if ((ring->tx.queue_stopped == 1) &&
2673 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
2674 ring->tx.queue_stopped = 0;
2675 netif_tx_wake_queue(txq);
2676 }
2677
2678 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
2679
2680 return processed;
2681 }
2682
2683 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2684 {
2685 struct xgbe_prv_data *pdata = channel->pdata;
2686 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2687 struct xgbe_ring *ring = channel->rx_ring;
2688 struct xgbe_ring_data *rdata;
2689 struct xgbe_packet_data *packet;
2690 struct net_device *netdev = pdata->netdev;
2691 struct napi_struct *napi;
2692 struct sk_buff *skb;
2693 struct skb_shared_hwtstamps *hwtstamps;
2694 unsigned int last, error, context_next, context;
2695 unsigned int len, buf1_len, buf2_len, max_len;
2696 unsigned int received = 0;
2697 int packet_count = 0;
2698
2699 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
2700
2701 /* Nothing to do if there isn't a Rx ring for this channel */
2702 if (!ring)
2703 return 0;
2704
2705 last = 0;
2706 context_next = 0;
2707
2708 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
2709
2710 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2711 packet = &ring->packet_data;
2712 while (packet_count < budget) {
2713 DBGPR(" cur = %d\n", ring->cur);
2714
2715 /* First time in loop see if we need to restore state */
2716 if (!received && rdata->state_saved) {
2717 skb = rdata->state.skb;
2718 error = rdata->state.error;
2719 len = rdata->state.len;
2720 } else {
2721 memset(packet, 0, sizeof(*packet));
2722 skb = NULL;
2723 error = 0;
2724 len = 0;
2725 }
2726
2727 read_again:
2728 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2729
2730 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
2731 xgbe_rx_refresh(channel);
2732
2733 if (hw_if->dev_read(channel))
2734 break;
2735
2736 received++;
2737 ring->cur++;
2738
2739 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2740 LAST);
2741 context_next = XGMAC_GET_BITS(packet->attributes,
2742 RX_PACKET_ATTRIBUTES,
2743 CONTEXT_NEXT);
2744 context = XGMAC_GET_BITS(packet->attributes,
2745 RX_PACKET_ATTRIBUTES,
2746 CONTEXT);
2747
2748 /* Earlier error, just drain the remaining data */
2749 if ((!last || context_next) && error)
2750 goto read_again;
2751
2752 if (error || packet->errors) {
2753 if (packet->errors)
2754 netif_err(pdata, rx_err, netdev,
2755 "error in received packet\n");
2756 dev_kfree_skb(skb);
2757 goto next_packet;
2758 }
2759
2760 if (!context) {
2761 /* Get the data length in the descriptor buffers */
2762 buf1_len = xgbe_rx_buf1_len(rdata, packet);
2763 len += buf1_len;
2764 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2765 len += buf2_len;
2766
2767 if (!skb) {
2768 skb = xgbe_create_skb(pdata, napi, rdata,
2769 buf1_len);
2770 if (!skb) {
2771 error = 1;
2772 goto skip_data;
2773 }
2774 }
2775
2776 if (buf2_len) {
2777 dma_sync_single_range_for_cpu(pdata->dev,
2778 rdata->rx.buf.dma_base,
2779 rdata->rx.buf.dma_off,
2780 rdata->rx.buf.dma_len,
2781 DMA_FROM_DEVICE);
2782
2783 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2784 rdata->rx.buf.pa.pages,
2785 rdata->rx.buf.pa.pages_offset,
2786 buf2_len,
2787 rdata->rx.buf.dma_len);
2788 rdata->rx.buf.pa.pages = NULL;
2789 }
2790 }
2791
2792 skip_data:
2793 if (!last || context_next)
2794 goto read_again;
2795
2796 if (!skb)
2797 goto next_packet;
2798
2799 /* Be sure we don't exceed the configured MTU */
2800 max_len = netdev->mtu + ETH_HLEN;
2801 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2802 (skb->protocol == htons(ETH_P_8021Q)))
2803 max_len += VLAN_HLEN;
2804
2805 if (skb->len > max_len) {
2806 netif_err(pdata, rx_err, netdev,
2807 "packet length exceeds configured MTU\n");
2808 dev_kfree_skb(skb);
2809 goto next_packet;
2810 }
2811
2812 if (netif_msg_pktdata(pdata))
2813 xgbe_print_pkt(netdev, skb, false);
2814
2815 skb_checksum_none_assert(skb);
2816 if (XGMAC_GET_BITS(packet->attributes,
2817 RX_PACKET_ATTRIBUTES, CSUM_DONE))
2818 skb->ip_summed = CHECKSUM_UNNECESSARY;
2819
2820 if (XGMAC_GET_BITS(packet->attributes,
2821 RX_PACKET_ATTRIBUTES, TNP)) {
2822 skb->encapsulation = 1;
2823
2824 if (XGMAC_GET_BITS(packet->attributes,
2825 RX_PACKET_ATTRIBUTES, TNPCSUM_DONE))
2826 skb->csum_level = 1;
2827 }
2828
2829 if (XGMAC_GET_BITS(packet->attributes,
2830 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2831 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2832 packet->vlan_ctag);
2833
2834 if (XGMAC_GET_BITS(packet->attributes,
2835 RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2836 u64 nsec;
2837
2838 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2839 packet->rx_tstamp);
2840 hwtstamps = skb_hwtstamps(skb);
2841 hwtstamps->hwtstamp = ns_to_ktime(nsec);
2842 }
2843
2844 if (XGMAC_GET_BITS(packet->attributes,
2845 RX_PACKET_ATTRIBUTES, RSS_HASH))
2846 skb_set_hash(skb, packet->rss_hash,
2847 packet->rss_hash_type);
2848
2849 skb->dev = netdev;
2850 skb->protocol = eth_type_trans(skb, netdev);
2851 skb_record_rx_queue(skb, channel->queue_index);
2852
2853 napi_gro_receive(napi, skb);
2854
2855 next_packet:
2856 packet_count++;
2857 }
2858
2859 /* Check if we need to save state before leaving */
2860 if (received && (!last || context_next)) {
2861 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2862 rdata->state_saved = 1;
2863 rdata->state.skb = skb;
2864 rdata->state.len = len;
2865 rdata->state.error = error;
2866 }
2867
2868 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2869
2870 return packet_count;
2871 }
2872
2873 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2874 {
2875 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2876 napi);
2877 struct xgbe_prv_data *pdata = channel->pdata;
2878 int processed = 0;
2879
2880 DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2881
2882 /* Cleanup Tx ring first */
2883 xgbe_tx_poll(channel);
2884
2885 /* Process Rx ring next */
2886 processed = xgbe_rx_poll(channel, budget);
2887
2888 /* If we processed everything, we are done */
2889 if ((processed < budget) && napi_complete_done(napi, processed)) {
2890 /* Enable Tx and Rx interrupts */
2891 if (pdata->channel_irq_mode)
2892 xgbe_enable_rx_tx_int(pdata, channel);
2893 else
2894 enable_irq(channel->dma_irq);
2895 }
2896
2897 DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2898
2899 return processed;
2900 }
2901
2902 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2903 {
2904 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2905 napi);
2906 struct xgbe_channel *channel;
2907 int ring_budget;
2908 int processed, last_processed;
2909 unsigned int i;
2910
2911 DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2912
2913 processed = 0;
2914 ring_budget = budget / pdata->rx_ring_count;
2915 do {
2916 last_processed = processed;
2917
2918 for (i = 0; i < pdata->channel_count; i++) {
2919 channel = pdata->channel[i];
2920
2921 /* Cleanup Tx ring first */
2922 xgbe_tx_poll(channel);
2923
2924 /* Process Rx ring next */
2925 if (ring_budget > (budget - processed))
2926 ring_budget = budget - processed;
2927 processed += xgbe_rx_poll(channel, ring_budget);
2928 }
2929 } while ((processed < budget) && (processed != last_processed));
2930
2931 /* If we processed everything, we are done */
2932 if ((processed < budget) && napi_complete_done(napi, processed)) {
2933 /* Enable Tx and Rx interrupts */
2934 xgbe_enable_rx_tx_ints(pdata);
2935 }
2936
2937 DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2938
2939 return processed;
2940 }
2941
2942 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2943 unsigned int idx, unsigned int count, unsigned int flag)
2944 {
2945 struct xgbe_ring_data *rdata;
2946 struct xgbe_ring_desc *rdesc;
2947
2948 while (count--) {
2949 rdata = XGBE_GET_DESC_DATA(ring, idx);
2950 rdesc = rdata->rdesc;
2951 netdev_dbg(pdata->netdev,
2952 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2953 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2954 le32_to_cpu(rdesc->desc0),
2955 le32_to_cpu(rdesc->desc1),
2956 le32_to_cpu(rdesc->desc2),
2957 le32_to_cpu(rdesc->desc3));
2958 idx++;
2959 }
2960 }
2961
2962 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2963 unsigned int idx)
2964 {
2965 struct xgbe_ring_data *rdata;
2966 struct xgbe_ring_desc *rdesc;
2967
2968 rdata = XGBE_GET_DESC_DATA(ring, idx);
2969 rdesc = rdata->rdesc;
2970 netdev_dbg(pdata->netdev,
2971 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2972 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2973 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2974 }
2975
2976 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2977 {
2978 struct ethhdr *eth = (struct ethhdr *)skb->data;
2979 unsigned char buffer[128];
2980 unsigned int i;
2981
2982 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2983
2984 netdev_dbg(netdev, "%s packet of %d bytes\n",
2985 (tx_rx ? "TX" : "RX"), skb->len);
2986
2987 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2988 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2989 netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
2990
2991 for (i = 0; i < skb->len; i += 32) {
2992 unsigned int len = min(skb->len - i, 32U);
2993
2994 hex_dump_to_buffer(&skb->data[i], len, 32, 1,
2995 buffer, sizeof(buffer), false);
2996 netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
2997 }
2998
2999 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
3000 }