2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/module.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <linux/interrupt.h>
122 #include <linux/clk.h>
123 #include <linux/if_ether.h>
124 #include <linux/net_tstamp.h>
125 #include <linux/phy.h>
126 #include <net/vxlan.h>
129 #include "xgbe-common.h"
131 static unsigned int ecc_sec_info_threshold
= 10;
132 static unsigned int ecc_sec_warn_threshold
= 10000;
133 static unsigned int ecc_sec_period
= 600;
134 static unsigned int ecc_ded_threshold
= 2;
135 static unsigned int ecc_ded_period
= 600;
137 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
138 /* Only expose the ECC parameters if supported */
139 module_param(ecc_sec_info_threshold
, uint
, 0644);
140 MODULE_PARM_DESC(ecc_sec_info_threshold
,
141 " ECC corrected error informational threshold setting");
143 module_param(ecc_sec_warn_threshold
, uint
, 0644);
144 MODULE_PARM_DESC(ecc_sec_warn_threshold
,
145 " ECC corrected error warning threshold setting");
147 module_param(ecc_sec_period
, uint
, 0644);
148 MODULE_PARM_DESC(ecc_sec_period
, " ECC corrected error period (in seconds)");
150 module_param(ecc_ded_threshold
, uint
, 0644);
151 MODULE_PARM_DESC(ecc_ded_threshold
, " ECC detected error threshold setting");
153 module_param(ecc_ded_period
, uint
, 0644);
154 MODULE_PARM_DESC(ecc_ded_period
, " ECC detected error period (in seconds)");
157 static int xgbe_one_poll(struct napi_struct
*, int);
158 static int xgbe_all_poll(struct napi_struct
*, int);
159 static void xgbe_stop(struct xgbe_prv_data
*);
161 static void *xgbe_alloc_node(size_t size
, int node
)
165 mem
= kzalloc_node(size
, GFP_KERNEL
, node
);
167 mem
= kzalloc(size
, GFP_KERNEL
);
172 static void xgbe_free_channels(struct xgbe_prv_data
*pdata
)
176 for (i
= 0; i
< ARRAY_SIZE(pdata
->channel
); i
++) {
177 if (!pdata
->channel
[i
])
180 kfree(pdata
->channel
[i
]->rx_ring
);
181 kfree(pdata
->channel
[i
]->tx_ring
);
182 kfree(pdata
->channel
[i
]);
184 pdata
->channel
[i
] = NULL
;
187 pdata
->channel_count
= 0;
190 static int xgbe_alloc_channels(struct xgbe_prv_data
*pdata
)
192 struct xgbe_channel
*channel
;
193 struct xgbe_ring
*ring
;
194 unsigned int count
, i
;
198 count
= max_t(unsigned int, pdata
->tx_ring_count
, pdata
->rx_ring_count
);
199 for (i
= 0; i
< count
; i
++) {
200 /* Attempt to use a CPU on the node the device is on */
201 cpu
= cpumask_local_spread(i
, dev_to_node(pdata
->dev
));
203 /* Set the allocation node based on the returned CPU */
204 node
= cpu_to_node(cpu
);
206 channel
= xgbe_alloc_node(sizeof(*channel
), node
);
209 pdata
->channel
[i
] = channel
;
211 snprintf(channel
->name
, sizeof(channel
->name
), "channel-%u", i
);
212 channel
->pdata
= pdata
;
213 channel
->queue_index
= i
;
214 channel
->dma_regs
= pdata
->xgmac_regs
+ DMA_CH_BASE
+
216 channel
->node
= node
;
217 cpumask_set_cpu(cpu
, &channel
->affinity_mask
);
219 if (pdata
->per_channel_irq
)
220 channel
->dma_irq
= pdata
->channel_irq
[i
];
222 if (i
< pdata
->tx_ring_count
) {
223 ring
= xgbe_alloc_node(sizeof(*ring
), node
);
227 spin_lock_init(&ring
->lock
);
230 channel
->tx_ring
= ring
;
233 if (i
< pdata
->rx_ring_count
) {
234 ring
= xgbe_alloc_node(sizeof(*ring
), node
);
238 spin_lock_init(&ring
->lock
);
241 channel
->rx_ring
= ring
;
244 netif_dbg(pdata
, drv
, pdata
->netdev
,
245 "%s: cpu=%u, node=%d\n", channel
->name
, cpu
, node
);
247 netif_dbg(pdata
, drv
, pdata
->netdev
,
248 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
249 channel
->name
, channel
->dma_regs
, channel
->dma_irq
,
250 channel
->tx_ring
, channel
->rx_ring
);
253 pdata
->channel_count
= count
;
258 xgbe_free_channels(pdata
);
263 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring
*ring
)
265 return (ring
->rdesc_count
- (ring
->cur
- ring
->dirty
));
268 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring
*ring
)
270 return (ring
->cur
- ring
->dirty
);
273 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel
*channel
,
274 struct xgbe_ring
*ring
, unsigned int count
)
276 struct xgbe_prv_data
*pdata
= channel
->pdata
;
278 if (count
> xgbe_tx_avail_desc(ring
)) {
279 netif_info(pdata
, drv
, pdata
->netdev
,
280 "Tx queue stopped, not enough descriptors available\n");
281 netif_stop_subqueue(pdata
->netdev
, channel
->queue_index
);
282 ring
->tx
.queue_stopped
= 1;
284 /* If we haven't notified the hardware because of xmit_more
285 * support, tell it now
287 if (ring
->tx
.xmit_more
)
288 pdata
->hw_if
.tx_start_xmit(channel
, ring
);
290 return NETDEV_TX_BUSY
;
296 static int xgbe_calc_rx_buf_size(struct net_device
*netdev
, unsigned int mtu
)
298 unsigned int rx_buf_size
;
300 rx_buf_size
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
301 rx_buf_size
= clamp_val(rx_buf_size
, XGBE_RX_MIN_BUF_SIZE
, PAGE_SIZE
);
303 rx_buf_size
= (rx_buf_size
+ XGBE_RX_BUF_ALIGN
- 1) &
304 ~(XGBE_RX_BUF_ALIGN
- 1);
309 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data
*pdata
,
310 struct xgbe_channel
*channel
)
312 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
313 enum xgbe_int int_id
;
315 if (channel
->tx_ring
&& channel
->rx_ring
)
316 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
317 else if (channel
->tx_ring
)
318 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
319 else if (channel
->rx_ring
)
320 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
324 hw_if
->enable_int(channel
, int_id
);
327 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
331 for (i
= 0; i
< pdata
->channel_count
; i
++)
332 xgbe_enable_rx_tx_int(pdata
, pdata
->channel
[i
]);
335 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data
*pdata
,
336 struct xgbe_channel
*channel
)
338 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
339 enum xgbe_int int_id
;
341 if (channel
->tx_ring
&& channel
->rx_ring
)
342 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
343 else if (channel
->tx_ring
)
344 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
345 else if (channel
->rx_ring
)
346 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
350 hw_if
->disable_int(channel
, int_id
);
353 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
357 for (i
= 0; i
< pdata
->channel_count
; i
++)
358 xgbe_disable_rx_tx_int(pdata
, pdata
->channel
[i
]);
361 static bool xgbe_ecc_sec(struct xgbe_prv_data
*pdata
, unsigned long *period
,
362 unsigned int *count
, const char *area
)
364 if (time_before(jiffies
, *period
)) {
367 *period
= jiffies
+ (ecc_sec_period
* HZ
);
371 if (*count
> ecc_sec_info_threshold
)
372 dev_warn_once(pdata
->dev
,
373 "%s ECC corrected errors exceed informational threshold\n",
376 if (*count
> ecc_sec_warn_threshold
) {
377 dev_warn_once(pdata
->dev
,
378 "%s ECC corrected errors exceed warning threshold\n",
386 static bool xgbe_ecc_ded(struct xgbe_prv_data
*pdata
, unsigned long *period
,
387 unsigned int *count
, const char *area
)
389 if (time_before(jiffies
, *period
)) {
392 *period
= jiffies
+ (ecc_ded_period
* HZ
);
396 if (*count
> ecc_ded_threshold
) {
397 netdev_alert(pdata
->netdev
,
398 "%s ECC detected errors exceed threshold\n",
406 static void xgbe_ecc_isr_task(unsigned long data
)
408 struct xgbe_prv_data
*pdata
= (struct xgbe_prv_data
*)data
;
409 unsigned int ecc_isr
;
412 /* Mask status with only the interrupts we care about */
413 ecc_isr
= XP_IOREAD(pdata
, XP_ECC_ISR
);
414 ecc_isr
&= XP_IOREAD(pdata
, XP_ECC_IER
);
415 netif_dbg(pdata
, intr
, pdata
->netdev
, "ECC_ISR=%#010x\n", ecc_isr
);
417 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, TX_DED
)) {
418 stop
|= xgbe_ecc_ded(pdata
, &pdata
->tx_ded_period
,
419 &pdata
->tx_ded_count
, "TX fifo");
422 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, RX_DED
)) {
423 stop
|= xgbe_ecc_ded(pdata
, &pdata
->rx_ded_period
,
424 &pdata
->rx_ded_count
, "RX fifo");
427 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, DESC_DED
)) {
428 stop
|= xgbe_ecc_ded(pdata
, &pdata
->desc_ded_period
,
429 &pdata
->desc_ded_count
,
434 pdata
->hw_if
.disable_ecc_ded(pdata
);
435 schedule_work(&pdata
->stopdev_work
);
439 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, TX_SEC
)) {
440 if (xgbe_ecc_sec(pdata
, &pdata
->tx_sec_period
,
441 &pdata
->tx_sec_count
, "TX fifo"))
442 pdata
->hw_if
.disable_ecc_sec(pdata
, XGBE_ECC_SEC_TX
);
445 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, RX_SEC
))
446 if (xgbe_ecc_sec(pdata
, &pdata
->rx_sec_period
,
447 &pdata
->rx_sec_count
, "RX fifo"))
448 pdata
->hw_if
.disable_ecc_sec(pdata
, XGBE_ECC_SEC_RX
);
450 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, DESC_SEC
))
451 if (xgbe_ecc_sec(pdata
, &pdata
->desc_sec_period
,
452 &pdata
->desc_sec_count
, "descriptor cache"))
453 pdata
->hw_if
.disable_ecc_sec(pdata
, XGBE_ECC_SEC_DESC
);
456 /* Clear all ECC interrupts */
457 XP_IOWRITE(pdata
, XP_ECC_ISR
, ecc_isr
);
459 /* Reissue interrupt if status is not clear */
460 if (pdata
->vdata
->irq_reissue_support
)
461 XP_IOWRITE(pdata
, XP_INT_REISSUE_EN
, 1 << 1);
464 static irqreturn_t
xgbe_ecc_isr(int irq
, void *data
)
466 struct xgbe_prv_data
*pdata
= data
;
468 if (pdata
->isr_as_tasklet
)
469 tasklet_schedule(&pdata
->tasklet_ecc
);
471 xgbe_ecc_isr_task((unsigned long)pdata
);
476 static void xgbe_isr_task(unsigned long data
)
478 struct xgbe_prv_data
*pdata
= (struct xgbe_prv_data
*)data
;
479 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
480 struct xgbe_channel
*channel
;
481 unsigned int dma_isr
, dma_ch_isr
;
482 unsigned int mac_isr
, mac_tssr
, mac_mdioisr
;
485 /* The DMA interrupt status register also reports MAC and MTL
486 * interrupts. So for polling mode, we just need to check for
487 * this register to be non-zero
489 dma_isr
= XGMAC_IOREAD(pdata
, DMA_ISR
);
493 netif_dbg(pdata
, intr
, pdata
->netdev
, "DMA_ISR=%#010x\n", dma_isr
);
495 for (i
= 0; i
< pdata
->channel_count
; i
++) {
496 if (!(dma_isr
& (1 << i
)))
499 channel
= pdata
->channel
[i
];
501 dma_ch_isr
= XGMAC_DMA_IOREAD(channel
, DMA_CH_SR
);
502 netif_dbg(pdata
, intr
, pdata
->netdev
, "DMA_CH%u_ISR=%#010x\n",
505 /* The TI or RI interrupt bits may still be set even if using
506 * per channel DMA interrupts. Check to be sure those are not
507 * enabled before using the private data napi structure.
509 if (!pdata
->per_channel_irq
&&
510 (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
) ||
511 XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
))) {
512 if (napi_schedule_prep(&pdata
->napi
)) {
513 /* Disable Tx and Rx interrupts */
514 xgbe_disable_rx_tx_ints(pdata
);
516 /* Turn on polling */
517 __napi_schedule_irqoff(&pdata
->napi
);
520 /* Don't clear Rx/Tx status if doing per channel DMA
521 * interrupts, these will be cleared by the ISR for
522 * per channel DMA interrupts.
524 XGMAC_SET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
, 0);
525 XGMAC_SET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
, 0);
528 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RBU
))
529 pdata
->ext_stats
.rx_buffer_unavailable
++;
531 /* Restart the device on a Fatal Bus Error */
532 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, FBE
))
533 schedule_work(&pdata
->restart_work
);
535 /* Clear interrupt signals */
536 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_ch_isr
);
539 if (XGMAC_GET_BITS(dma_isr
, DMA_ISR
, MACIS
)) {
540 mac_isr
= XGMAC_IOREAD(pdata
, MAC_ISR
);
542 netif_dbg(pdata
, intr
, pdata
->netdev
, "MAC_ISR=%#010x\n",
545 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCTXIS
))
546 hw_if
->tx_mmc_int(pdata
);
548 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCRXIS
))
549 hw_if
->rx_mmc_int(pdata
);
551 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, TSIS
)) {
552 mac_tssr
= XGMAC_IOREAD(pdata
, MAC_TSSR
);
554 netif_dbg(pdata
, intr
, pdata
->netdev
,
555 "MAC_TSSR=%#010x\n", mac_tssr
);
557 if (XGMAC_GET_BITS(mac_tssr
, MAC_TSSR
, TXTSC
)) {
558 /* Read Tx Timestamp to clear interrupt */
560 hw_if
->get_tx_tstamp(pdata
);
561 queue_work(pdata
->dev_workqueue
,
562 &pdata
->tx_tstamp_work
);
566 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, SMI
)) {
567 mac_mdioisr
= XGMAC_IOREAD(pdata
, MAC_MDIOISR
);
569 netif_dbg(pdata
, intr
, pdata
->netdev
,
570 "MAC_MDIOISR=%#010x\n", mac_mdioisr
);
572 if (XGMAC_GET_BITS(mac_mdioisr
, MAC_MDIOISR
,
574 complete(&pdata
->mdio_complete
);
579 /* If there is not a separate AN irq, handle it here */
580 if (pdata
->dev_irq
== pdata
->an_irq
)
581 pdata
->phy_if
.an_isr(pdata
);
583 /* If there is not a separate ECC irq, handle it here */
584 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
== pdata
->ecc_irq
))
585 xgbe_ecc_isr_task((unsigned long)pdata
);
587 /* If there is not a separate I2C irq, handle it here */
588 if (pdata
->vdata
->i2c_support
&& (pdata
->dev_irq
== pdata
->i2c_irq
))
589 pdata
->i2c_if
.i2c_isr(pdata
);
591 /* Reissue interrupt if status is not clear */
592 if (pdata
->vdata
->irq_reissue_support
) {
593 unsigned int reissue_mask
;
595 reissue_mask
= 1 << 0;
596 if (!pdata
->per_channel_irq
)
597 reissue_mask
|= 0xffff << 4;
599 XP_IOWRITE(pdata
, XP_INT_REISSUE_EN
, reissue_mask
);
603 static irqreturn_t
xgbe_isr(int irq
, void *data
)
605 struct xgbe_prv_data
*pdata
= data
;
607 if (pdata
->isr_as_tasklet
)
608 tasklet_schedule(&pdata
->tasklet_dev
);
610 xgbe_isr_task((unsigned long)pdata
);
615 static irqreturn_t
xgbe_dma_isr(int irq
, void *data
)
617 struct xgbe_channel
*channel
= data
;
618 struct xgbe_prv_data
*pdata
= channel
->pdata
;
619 unsigned int dma_status
;
621 /* Per channel DMA interrupts are enabled, so we use the per
622 * channel napi structure and not the private data napi structure
624 if (napi_schedule_prep(&channel
->napi
)) {
625 /* Disable Tx and Rx interrupts */
626 if (pdata
->channel_irq_mode
)
627 xgbe_disable_rx_tx_int(pdata
, channel
);
629 disable_irq_nosync(channel
->dma_irq
);
631 /* Turn on polling */
632 __napi_schedule_irqoff(&channel
->napi
);
635 /* Clear Tx/Rx signals */
637 XGMAC_SET_BITS(dma_status
, DMA_CH_SR
, TI
, 1);
638 XGMAC_SET_BITS(dma_status
, DMA_CH_SR
, RI
, 1);
639 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_status
);
644 static void xgbe_tx_timer(struct timer_list
*t
)
646 struct xgbe_channel
*channel
= from_timer(channel
, t
, tx_timer
);
647 struct xgbe_prv_data
*pdata
= channel
->pdata
;
648 struct napi_struct
*napi
;
650 DBGPR("-->xgbe_tx_timer\n");
652 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
654 if (napi_schedule_prep(napi
)) {
655 /* Disable Tx and Rx interrupts */
656 if (pdata
->per_channel_irq
)
657 if (pdata
->channel_irq_mode
)
658 xgbe_disable_rx_tx_int(pdata
, channel
);
660 disable_irq_nosync(channel
->dma_irq
);
662 xgbe_disable_rx_tx_ints(pdata
);
664 /* Turn on polling */
665 __napi_schedule(napi
);
668 channel
->tx_timer_active
= 0;
670 DBGPR("<--xgbe_tx_timer\n");
673 static void xgbe_service(struct work_struct
*work
)
675 struct xgbe_prv_data
*pdata
= container_of(work
,
676 struct xgbe_prv_data
,
679 pdata
->phy_if
.phy_status(pdata
);
682 static void xgbe_service_timer(struct timer_list
*t
)
684 struct xgbe_prv_data
*pdata
= from_timer(pdata
, t
, service_timer
);
686 queue_work(pdata
->dev_workqueue
, &pdata
->service_work
);
688 mod_timer(&pdata
->service_timer
, jiffies
+ HZ
);
691 static void xgbe_init_timers(struct xgbe_prv_data
*pdata
)
693 struct xgbe_channel
*channel
;
696 timer_setup(&pdata
->service_timer
, xgbe_service_timer
, 0);
698 for (i
= 0; i
< pdata
->channel_count
; i
++) {
699 channel
= pdata
->channel
[i
];
700 if (!channel
->tx_ring
)
703 timer_setup(&channel
->tx_timer
, xgbe_tx_timer
, 0);
707 static void xgbe_start_timers(struct xgbe_prv_data
*pdata
)
709 mod_timer(&pdata
->service_timer
, jiffies
+ HZ
);
712 static void xgbe_stop_timers(struct xgbe_prv_data
*pdata
)
714 struct xgbe_channel
*channel
;
717 del_timer_sync(&pdata
->service_timer
);
719 for (i
= 0; i
< pdata
->channel_count
; i
++) {
720 channel
= pdata
->channel
[i
];
721 if (!channel
->tx_ring
)
724 del_timer_sync(&channel
->tx_timer
);
728 void xgbe_get_all_hw_features(struct xgbe_prv_data
*pdata
)
730 unsigned int mac_hfr0
, mac_hfr1
, mac_hfr2
;
731 struct xgbe_hw_features
*hw_feat
= &pdata
->hw_feat
;
733 mac_hfr0
= XGMAC_IOREAD(pdata
, MAC_HWF0R
);
734 mac_hfr1
= XGMAC_IOREAD(pdata
, MAC_HWF1R
);
735 mac_hfr2
= XGMAC_IOREAD(pdata
, MAC_HWF2R
);
737 memset(hw_feat
, 0, sizeof(*hw_feat
));
739 hw_feat
->version
= XGMAC_IOREAD(pdata
, MAC_VR
);
741 /* Hardware feature register 0 */
742 hw_feat
->gmii
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, GMIISEL
);
743 hw_feat
->vlhash
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, VLHASH
);
744 hw_feat
->sma
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SMASEL
);
745 hw_feat
->rwk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RWKSEL
);
746 hw_feat
->mgk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MGKSEL
);
747 hw_feat
->mmc
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MMCSEL
);
748 hw_feat
->aoe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, ARPOFFSEL
);
749 hw_feat
->ts
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSEL
);
750 hw_feat
->eee
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, EEESEL
);
751 hw_feat
->tx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TXCOESEL
);
752 hw_feat
->rx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RXCOESEL
);
753 hw_feat
->addn_mac
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
,
755 hw_feat
->ts_src
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSTSSEL
);
756 hw_feat
->sa_vlan_ins
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SAVLANINS
);
757 hw_feat
->vxn
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, VXN
);
759 /* Hardware feature register 1 */
760 hw_feat
->rx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
762 hw_feat
->tx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
764 hw_feat
->adv_ts_hi
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, ADVTHWORD
);
765 hw_feat
->dma_width
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, ADDR64
);
766 hw_feat
->dcb
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DCBEN
);
767 hw_feat
->sph
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, SPHEN
);
768 hw_feat
->tso
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, TSOEN
);
769 hw_feat
->dma_debug
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DBGMEMA
);
770 hw_feat
->rss
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, RSSEN
);
771 hw_feat
->tc_cnt
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, NUMTC
);
772 hw_feat
->hash_table_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
774 hw_feat
->l3l4_filter_num
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
777 /* Hardware feature register 2 */
778 hw_feat
->rx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXQCNT
);
779 hw_feat
->tx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXQCNT
);
780 hw_feat
->rx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXCHCNT
);
781 hw_feat
->tx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXCHCNT
);
782 hw_feat
->pps_out_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, PPSOUTNUM
);
783 hw_feat
->aux_snap_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, AUXSNAPNUM
);
785 /* Translate the Hash Table size into actual number */
786 switch (hw_feat
->hash_table_size
) {
790 hw_feat
->hash_table_size
= 64;
793 hw_feat
->hash_table_size
= 128;
796 hw_feat
->hash_table_size
= 256;
800 /* Translate the address width setting into actual number */
801 switch (hw_feat
->dma_width
) {
803 hw_feat
->dma_width
= 32;
806 hw_feat
->dma_width
= 40;
809 hw_feat
->dma_width
= 48;
812 hw_feat
->dma_width
= 32;
815 /* The Queue, Channel and TC counts are zero based so increment them
816 * to get the actual number
820 hw_feat
->rx_ch_cnt
++;
821 hw_feat
->tx_ch_cnt
++;
824 /* Translate the fifo sizes into actual numbers */
825 hw_feat
->rx_fifo_size
= 1 << (hw_feat
->rx_fifo_size
+ 7);
826 hw_feat
->tx_fifo_size
= 1 << (hw_feat
->tx_fifo_size
+ 7);
828 if (netif_msg_probe(pdata
)) {
829 dev_dbg(pdata
->dev
, "Hardware features:\n");
831 /* Hardware feature register 0 */
832 dev_dbg(pdata
->dev
, " 1GbE support : %s\n",
833 hw_feat
->gmii
? "yes" : "no");
834 dev_dbg(pdata
->dev
, " VLAN hash filter : %s\n",
835 hw_feat
->vlhash
? "yes" : "no");
836 dev_dbg(pdata
->dev
, " MDIO interface : %s\n",
837 hw_feat
->sma
? "yes" : "no");
838 dev_dbg(pdata
->dev
, " Wake-up packet support : %s\n",
839 hw_feat
->rwk
? "yes" : "no");
840 dev_dbg(pdata
->dev
, " Magic packet support : %s\n",
841 hw_feat
->mgk
? "yes" : "no");
842 dev_dbg(pdata
->dev
, " Management counters : %s\n",
843 hw_feat
->mmc
? "yes" : "no");
844 dev_dbg(pdata
->dev
, " ARP offload : %s\n",
845 hw_feat
->aoe
? "yes" : "no");
846 dev_dbg(pdata
->dev
, " IEEE 1588-2008 Timestamp : %s\n",
847 hw_feat
->ts
? "yes" : "no");
848 dev_dbg(pdata
->dev
, " Energy Efficient Ethernet : %s\n",
849 hw_feat
->eee
? "yes" : "no");
850 dev_dbg(pdata
->dev
, " TX checksum offload : %s\n",
851 hw_feat
->tx_coe
? "yes" : "no");
852 dev_dbg(pdata
->dev
, " RX checksum offload : %s\n",
853 hw_feat
->rx_coe
? "yes" : "no");
854 dev_dbg(pdata
->dev
, " Additional MAC addresses : %u\n",
856 dev_dbg(pdata
->dev
, " Timestamp source : %s\n",
857 (hw_feat
->ts_src
== 1) ? "internal" :
858 (hw_feat
->ts_src
== 2) ? "external" :
859 (hw_feat
->ts_src
== 3) ? "internal/external" : "n/a");
860 dev_dbg(pdata
->dev
, " SA/VLAN insertion : %s\n",
861 hw_feat
->sa_vlan_ins
? "yes" : "no");
862 dev_dbg(pdata
->dev
, " VXLAN/NVGRE support : %s\n",
863 hw_feat
->vxn
? "yes" : "no");
865 /* Hardware feature register 1 */
866 dev_dbg(pdata
->dev
, " RX fifo size : %u\n",
867 hw_feat
->rx_fifo_size
);
868 dev_dbg(pdata
->dev
, " TX fifo size : %u\n",
869 hw_feat
->tx_fifo_size
);
870 dev_dbg(pdata
->dev
, " IEEE 1588 high word : %s\n",
871 hw_feat
->adv_ts_hi
? "yes" : "no");
872 dev_dbg(pdata
->dev
, " DMA width : %u\n",
874 dev_dbg(pdata
->dev
, " Data Center Bridging : %s\n",
875 hw_feat
->dcb
? "yes" : "no");
876 dev_dbg(pdata
->dev
, " Split header : %s\n",
877 hw_feat
->sph
? "yes" : "no");
878 dev_dbg(pdata
->dev
, " TCP Segmentation Offload : %s\n",
879 hw_feat
->tso
? "yes" : "no");
880 dev_dbg(pdata
->dev
, " Debug memory interface : %s\n",
881 hw_feat
->dma_debug
? "yes" : "no");
882 dev_dbg(pdata
->dev
, " Receive Side Scaling : %s\n",
883 hw_feat
->rss
? "yes" : "no");
884 dev_dbg(pdata
->dev
, " Traffic Class count : %u\n",
886 dev_dbg(pdata
->dev
, " Hash table size : %u\n",
887 hw_feat
->hash_table_size
);
888 dev_dbg(pdata
->dev
, " L3/L4 Filters : %u\n",
889 hw_feat
->l3l4_filter_num
);
891 /* Hardware feature register 2 */
892 dev_dbg(pdata
->dev
, " RX queue count : %u\n",
894 dev_dbg(pdata
->dev
, " TX queue count : %u\n",
896 dev_dbg(pdata
->dev
, " RX DMA channel count : %u\n",
898 dev_dbg(pdata
->dev
, " TX DMA channel count : %u\n",
900 dev_dbg(pdata
->dev
, " PPS outputs : %u\n",
901 hw_feat
->pps_out_num
);
902 dev_dbg(pdata
->dev
, " Auxiliary snapshot inputs : %u\n",
903 hw_feat
->aux_snap_num
);
907 static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data
*pdata
)
909 struct net_device
*netdev
= pdata
->netdev
;
911 if (!pdata
->vxlan_offloads_set
)
914 netdev_info(netdev
, "disabling VXLAN offloads\n");
916 netdev
->hw_enc_features
&= ~(NETIF_F_SG
|
923 NETIF_F_GSO_UDP_TUNNEL
|
924 NETIF_F_GSO_UDP_TUNNEL_CSUM
);
926 netdev
->features
&= ~(NETIF_F_GSO_UDP_TUNNEL
|
927 NETIF_F_GSO_UDP_TUNNEL_CSUM
);
929 pdata
->vxlan_offloads_set
= 0;
932 static void xgbe_disable_vxlan_hw(struct xgbe_prv_data
*pdata
)
934 if (!pdata
->vxlan_port_set
)
937 pdata
->hw_if
.disable_vxlan(pdata
);
939 pdata
->vxlan_port_set
= 0;
940 pdata
->vxlan_port
= 0;
943 static void xgbe_disable_vxlan_accel(struct xgbe_prv_data
*pdata
)
945 xgbe_disable_vxlan_offloads(pdata
);
947 xgbe_disable_vxlan_hw(pdata
);
950 static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data
*pdata
)
952 struct net_device
*netdev
= pdata
->netdev
;
954 if (pdata
->vxlan_offloads_set
)
957 netdev_info(netdev
, "enabling VXLAN offloads\n");
959 netdev
->hw_enc_features
|= NETIF_F_SG
|
966 pdata
->vxlan_features
;
968 netdev
->features
|= pdata
->vxlan_features
;
970 pdata
->vxlan_offloads_set
= 1;
973 static void xgbe_enable_vxlan_hw(struct xgbe_prv_data
*pdata
)
975 struct xgbe_vxlan_data
*vdata
;
977 if (pdata
->vxlan_port_set
)
980 if (list_empty(&pdata
->vxlan_ports
))
983 vdata
= list_first_entry(&pdata
->vxlan_ports
,
984 struct xgbe_vxlan_data
, list
);
986 pdata
->vxlan_port_set
= 1;
987 pdata
->vxlan_port
= be16_to_cpu(vdata
->port
);
989 pdata
->hw_if
.enable_vxlan(pdata
);
992 static void xgbe_enable_vxlan_accel(struct xgbe_prv_data
*pdata
)
994 /* VXLAN acceleration desired? */
995 if (!pdata
->vxlan_features
)
998 /* VXLAN acceleration possible? */
999 if (pdata
->vxlan_force_disable
)
1002 xgbe_enable_vxlan_hw(pdata
);
1004 xgbe_enable_vxlan_offloads(pdata
);
1007 static void xgbe_reset_vxlan_accel(struct xgbe_prv_data
*pdata
)
1009 xgbe_disable_vxlan_hw(pdata
);
1011 if (pdata
->vxlan_features
)
1012 xgbe_enable_vxlan_offloads(pdata
);
1014 pdata
->vxlan_force_disable
= 0;
1017 static void xgbe_napi_enable(struct xgbe_prv_data
*pdata
, unsigned int add
)
1019 struct xgbe_channel
*channel
;
1022 if (pdata
->per_channel_irq
) {
1023 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1024 channel
= pdata
->channel
[i
];
1026 netif_napi_add(pdata
->netdev
, &channel
->napi
,
1027 xgbe_one_poll
, NAPI_POLL_WEIGHT
);
1029 napi_enable(&channel
->napi
);
1033 netif_napi_add(pdata
->netdev
, &pdata
->napi
,
1034 xgbe_all_poll
, NAPI_POLL_WEIGHT
);
1036 napi_enable(&pdata
->napi
);
1040 static void xgbe_napi_disable(struct xgbe_prv_data
*pdata
, unsigned int del
)
1042 struct xgbe_channel
*channel
;
1045 if (pdata
->per_channel_irq
) {
1046 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1047 channel
= pdata
->channel
[i
];
1048 napi_disable(&channel
->napi
);
1051 netif_napi_del(&channel
->napi
);
1054 napi_disable(&pdata
->napi
);
1057 netif_napi_del(&pdata
->napi
);
1061 static int xgbe_request_irqs(struct xgbe_prv_data
*pdata
)
1063 struct xgbe_channel
*channel
;
1064 struct net_device
*netdev
= pdata
->netdev
;
1068 tasklet_init(&pdata
->tasklet_dev
, xgbe_isr_task
, (unsigned long)pdata
);
1069 tasklet_init(&pdata
->tasklet_ecc
, xgbe_ecc_isr_task
,
1070 (unsigned long)pdata
);
1072 ret
= devm_request_irq(pdata
->dev
, pdata
->dev_irq
, xgbe_isr
, 0,
1073 netdev_name(netdev
), pdata
);
1075 netdev_alert(netdev
, "error requesting irq %d\n",
1080 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
!= pdata
->ecc_irq
)) {
1081 ret
= devm_request_irq(pdata
->dev
, pdata
->ecc_irq
, xgbe_ecc_isr
,
1082 0, pdata
->ecc_name
, pdata
);
1084 netdev_alert(netdev
, "error requesting ecc irq %d\n",
1090 if (!pdata
->per_channel_irq
)
1093 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1094 channel
= pdata
->channel
[i
];
1095 snprintf(channel
->dma_irq_name
,
1096 sizeof(channel
->dma_irq_name
) - 1,
1097 "%s-TxRx-%u", netdev_name(netdev
),
1098 channel
->queue_index
);
1100 ret
= devm_request_irq(pdata
->dev
, channel
->dma_irq
,
1102 channel
->dma_irq_name
, channel
);
1104 netdev_alert(netdev
, "error requesting irq %d\n",
1109 irq_set_affinity_hint(channel
->dma_irq
,
1110 &channel
->affinity_mask
);
1116 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1117 for (i
--; i
< pdata
->channel_count
; i
--) {
1118 channel
= pdata
->channel
[i
];
1120 irq_set_affinity_hint(channel
->dma_irq
, NULL
);
1121 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
1124 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
!= pdata
->ecc_irq
))
1125 devm_free_irq(pdata
->dev
, pdata
->ecc_irq
, pdata
);
1128 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
1133 static void xgbe_free_irqs(struct xgbe_prv_data
*pdata
)
1135 struct xgbe_channel
*channel
;
1138 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
1140 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
!= pdata
->ecc_irq
))
1141 devm_free_irq(pdata
->dev
, pdata
->ecc_irq
, pdata
);
1143 if (!pdata
->per_channel_irq
)
1146 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1147 channel
= pdata
->channel
[i
];
1149 irq_set_affinity_hint(channel
->dma_irq
, NULL
);
1150 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
1154 void xgbe_init_tx_coalesce(struct xgbe_prv_data
*pdata
)
1156 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1158 DBGPR("-->xgbe_init_tx_coalesce\n");
1160 pdata
->tx_usecs
= XGMAC_INIT_DMA_TX_USECS
;
1161 pdata
->tx_frames
= XGMAC_INIT_DMA_TX_FRAMES
;
1163 hw_if
->config_tx_coalesce(pdata
);
1165 DBGPR("<--xgbe_init_tx_coalesce\n");
1168 void xgbe_init_rx_coalesce(struct xgbe_prv_data
*pdata
)
1170 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1172 DBGPR("-->xgbe_init_rx_coalesce\n");
1174 pdata
->rx_riwt
= hw_if
->usec_to_riwt(pdata
, XGMAC_INIT_DMA_RX_USECS
);
1175 pdata
->rx_usecs
= XGMAC_INIT_DMA_RX_USECS
;
1176 pdata
->rx_frames
= XGMAC_INIT_DMA_RX_FRAMES
;
1178 hw_if
->config_rx_coalesce(pdata
);
1180 DBGPR("<--xgbe_init_rx_coalesce\n");
1183 static void xgbe_free_tx_data(struct xgbe_prv_data
*pdata
)
1185 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1186 struct xgbe_ring
*ring
;
1187 struct xgbe_ring_data
*rdata
;
1190 DBGPR("-->xgbe_free_tx_data\n");
1192 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1193 ring
= pdata
->channel
[i
]->tx_ring
;
1197 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
1198 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
1199 desc_if
->unmap_rdata(pdata
, rdata
);
1203 DBGPR("<--xgbe_free_tx_data\n");
1206 static void xgbe_free_rx_data(struct xgbe_prv_data
*pdata
)
1208 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1209 struct xgbe_ring
*ring
;
1210 struct xgbe_ring_data
*rdata
;
1213 DBGPR("-->xgbe_free_rx_data\n");
1215 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1216 ring
= pdata
->channel
[i
]->rx_ring
;
1220 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
1221 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
1222 desc_if
->unmap_rdata(pdata
, rdata
);
1226 DBGPR("<--xgbe_free_rx_data\n");
1229 static int xgbe_phy_reset(struct xgbe_prv_data
*pdata
)
1231 pdata
->phy_link
= -1;
1232 pdata
->phy_speed
= SPEED_UNKNOWN
;
1234 return pdata
->phy_if
.phy_reset(pdata
);
1237 int xgbe_powerdown(struct net_device
*netdev
, unsigned int caller
)
1239 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1240 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1241 unsigned long flags
;
1243 DBGPR("-->xgbe_powerdown\n");
1245 if (!netif_running(netdev
) ||
1246 (caller
== XGMAC_IOCTL_CONTEXT
&& pdata
->power_down
)) {
1247 netdev_alert(netdev
, "Device is already powered down\n");
1248 DBGPR("<--xgbe_powerdown\n");
1252 spin_lock_irqsave(&pdata
->lock
, flags
);
1254 if (caller
== XGMAC_DRIVER_CONTEXT
)
1255 netif_device_detach(netdev
);
1257 netif_tx_stop_all_queues(netdev
);
1259 xgbe_stop_timers(pdata
);
1260 flush_workqueue(pdata
->dev_workqueue
);
1262 hw_if
->powerdown_tx(pdata
);
1263 hw_if
->powerdown_rx(pdata
);
1265 xgbe_napi_disable(pdata
, 0);
1267 pdata
->power_down
= 1;
1269 spin_unlock_irqrestore(&pdata
->lock
, flags
);
1271 DBGPR("<--xgbe_powerdown\n");
1276 int xgbe_powerup(struct net_device
*netdev
, unsigned int caller
)
1278 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1279 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1280 unsigned long flags
;
1282 DBGPR("-->xgbe_powerup\n");
1284 if (!netif_running(netdev
) ||
1285 (caller
== XGMAC_IOCTL_CONTEXT
&& !pdata
->power_down
)) {
1286 netdev_alert(netdev
, "Device is already powered up\n");
1287 DBGPR("<--xgbe_powerup\n");
1291 spin_lock_irqsave(&pdata
->lock
, flags
);
1293 pdata
->power_down
= 0;
1295 xgbe_napi_enable(pdata
, 0);
1297 hw_if
->powerup_tx(pdata
);
1298 hw_if
->powerup_rx(pdata
);
1300 if (caller
== XGMAC_DRIVER_CONTEXT
)
1301 netif_device_attach(netdev
);
1303 netif_tx_start_all_queues(netdev
);
1305 xgbe_start_timers(pdata
);
1307 spin_unlock_irqrestore(&pdata
->lock
, flags
);
1309 DBGPR("<--xgbe_powerup\n");
1314 static void xgbe_free_memory(struct xgbe_prv_data
*pdata
)
1316 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1318 /* Free the ring descriptors and buffers */
1319 desc_if
->free_ring_resources(pdata
);
1321 /* Free the channel and ring structures */
1322 xgbe_free_channels(pdata
);
1325 static int xgbe_alloc_memory(struct xgbe_prv_data
*pdata
)
1327 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1328 struct net_device
*netdev
= pdata
->netdev
;
1331 if (pdata
->new_tx_ring_count
) {
1332 pdata
->tx_ring_count
= pdata
->new_tx_ring_count
;
1333 pdata
->tx_q_count
= pdata
->tx_ring_count
;
1334 pdata
->new_tx_ring_count
= 0;
1337 if (pdata
->new_rx_ring_count
) {
1338 pdata
->rx_ring_count
= pdata
->new_rx_ring_count
;
1339 pdata
->new_rx_ring_count
= 0;
1342 /* Calculate the Rx buffer size before allocating rings */
1343 pdata
->rx_buf_size
= xgbe_calc_rx_buf_size(netdev
, netdev
->mtu
);
1345 /* Allocate the channel and ring structures */
1346 ret
= xgbe_alloc_channels(pdata
);
1350 /* Allocate the ring descriptors and buffers */
1351 ret
= desc_if
->alloc_ring_resources(pdata
);
1355 /* Initialize the service and Tx timers */
1356 xgbe_init_timers(pdata
);
1361 xgbe_free_memory(pdata
);
1366 static int xgbe_start(struct xgbe_prv_data
*pdata
)
1368 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1369 struct xgbe_phy_if
*phy_if
= &pdata
->phy_if
;
1370 struct net_device
*netdev
= pdata
->netdev
;
1374 /* Set the number of queues */
1375 ret
= netif_set_real_num_tx_queues(netdev
, pdata
->tx_ring_count
);
1377 netdev_err(netdev
, "error setting real tx queue count\n");
1381 ret
= netif_set_real_num_rx_queues(netdev
, pdata
->rx_ring_count
);
1383 netdev_err(netdev
, "error setting real rx queue count\n");
1387 /* Set RSS lookup table data for programming */
1388 for (i
= 0; i
< XGBE_RSS_MAX_TABLE_SIZE
; i
++)
1389 XGMAC_SET_BITS(pdata
->rss_table
[i
], MAC_RSSDR
, DMCH
,
1390 i
% pdata
->rx_ring_count
);
1392 ret
= hw_if
->init(pdata
);
1396 xgbe_napi_enable(pdata
, 1);
1398 ret
= xgbe_request_irqs(pdata
);
1402 ret
= phy_if
->phy_start(pdata
);
1406 hw_if
->enable_tx(pdata
);
1407 hw_if
->enable_rx(pdata
);
1409 udp_tunnel_get_rx_info(netdev
);
1411 netif_tx_start_all_queues(netdev
);
1413 xgbe_start_timers(pdata
);
1414 queue_work(pdata
->dev_workqueue
, &pdata
->service_work
);
1416 clear_bit(XGBE_STOPPED
, &pdata
->dev_state
);
1421 xgbe_free_irqs(pdata
);
1424 xgbe_napi_disable(pdata
, 1);
1431 static void xgbe_stop(struct xgbe_prv_data
*pdata
)
1433 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1434 struct xgbe_phy_if
*phy_if
= &pdata
->phy_if
;
1435 struct xgbe_channel
*channel
;
1436 struct net_device
*netdev
= pdata
->netdev
;
1437 struct netdev_queue
*txq
;
1440 DBGPR("-->xgbe_stop\n");
1442 if (test_bit(XGBE_STOPPED
, &pdata
->dev_state
))
1445 netif_tx_stop_all_queues(netdev
);
1447 xgbe_stop_timers(pdata
);
1448 flush_workqueue(pdata
->dev_workqueue
);
1450 xgbe_reset_vxlan_accel(pdata
);
1452 hw_if
->disable_tx(pdata
);
1453 hw_if
->disable_rx(pdata
);
1455 phy_if
->phy_stop(pdata
);
1457 xgbe_free_irqs(pdata
);
1459 xgbe_napi_disable(pdata
, 1);
1463 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1464 channel
= pdata
->channel
[i
];
1465 if (!channel
->tx_ring
)
1468 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1469 netdev_tx_reset_queue(txq
);
1472 set_bit(XGBE_STOPPED
, &pdata
->dev_state
);
1474 DBGPR("<--xgbe_stop\n");
1477 static void xgbe_stopdev(struct work_struct
*work
)
1479 struct xgbe_prv_data
*pdata
= container_of(work
,
1480 struct xgbe_prv_data
,
1487 xgbe_free_tx_data(pdata
);
1488 xgbe_free_rx_data(pdata
);
1492 netdev_alert(pdata
->netdev
, "device stopped\n");
1495 void xgbe_full_restart_dev(struct xgbe_prv_data
*pdata
)
1497 /* If not running, "restart" will happen on open */
1498 if (!netif_running(pdata
->netdev
))
1503 xgbe_free_memory(pdata
);
1504 xgbe_alloc_memory(pdata
);
1509 void xgbe_restart_dev(struct xgbe_prv_data
*pdata
)
1511 /* If not running, "restart" will happen on open */
1512 if (!netif_running(pdata
->netdev
))
1517 xgbe_free_tx_data(pdata
);
1518 xgbe_free_rx_data(pdata
);
1523 static void xgbe_restart(struct work_struct
*work
)
1525 struct xgbe_prv_data
*pdata
= container_of(work
,
1526 struct xgbe_prv_data
,
1531 xgbe_restart_dev(pdata
);
1536 static void xgbe_tx_tstamp(struct work_struct
*work
)
1538 struct xgbe_prv_data
*pdata
= container_of(work
,
1539 struct xgbe_prv_data
,
1541 struct skb_shared_hwtstamps hwtstamps
;
1543 unsigned long flags
;
1545 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1546 if (!pdata
->tx_tstamp_skb
)
1549 if (pdata
->tx_tstamp
) {
1550 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
1553 memset(&hwtstamps
, 0, sizeof(hwtstamps
));
1554 hwtstamps
.hwtstamp
= ns_to_ktime(nsec
);
1555 skb_tstamp_tx(pdata
->tx_tstamp_skb
, &hwtstamps
);
1558 dev_kfree_skb_any(pdata
->tx_tstamp_skb
);
1560 pdata
->tx_tstamp_skb
= NULL
;
1563 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1566 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1567 struct ifreq
*ifreq
)
1569 if (copy_to_user(ifreq
->ifr_data
, &pdata
->tstamp_config
,
1570 sizeof(pdata
->tstamp_config
)))
1576 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1577 struct ifreq
*ifreq
)
1579 struct hwtstamp_config config
;
1580 unsigned int mac_tscr
;
1582 if (copy_from_user(&config
, ifreq
->ifr_data
, sizeof(config
)))
1590 switch (config
.tx_type
) {
1591 case HWTSTAMP_TX_OFF
:
1594 case HWTSTAMP_TX_ON
:
1595 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1602 switch (config
.rx_filter
) {
1603 case HWTSTAMP_FILTER_NONE
:
1606 case HWTSTAMP_FILTER_NTP_ALL
:
1607 case HWTSTAMP_FILTER_ALL
:
1608 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENALL
, 1);
1609 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1612 /* PTP v2, UDP, any kind of event packet */
1613 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1614 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1615 /* Fall through - to PTP v1, UDP, any kind of event packet */
1616 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1617 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1618 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1619 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1620 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1623 /* PTP v2, UDP, Sync packet */
1624 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1625 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1626 /* Fall through - to PTP v1, UDP, Sync packet */
1627 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1628 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1629 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1630 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1631 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1634 /* PTP v2, UDP, Delay_req packet */
1635 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1636 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1637 /* Fall through - to PTP v1, UDP, Delay_req packet */
1638 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1639 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1640 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1641 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1642 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1643 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1646 /* 802.AS1, Ethernet, any kind of event packet */
1647 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1648 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1649 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1650 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1653 /* 802.AS1, Ethernet, Sync packet */
1654 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1655 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1656 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1657 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1660 /* 802.AS1, Ethernet, Delay_req packet */
1661 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1662 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1663 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1664 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1665 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1668 /* PTP v2/802.AS1, any layer, any kind of event packet */
1669 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1670 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1671 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1672 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1673 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1674 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1675 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1678 /* PTP v2/802.AS1, any layer, Sync packet */
1679 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1680 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1681 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1682 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1683 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1684 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1685 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1688 /* PTP v2/802.AS1, any layer, Delay_req packet */
1689 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1690 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1691 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1692 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1693 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1694 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1695 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1696 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1703 pdata
->hw_if
.config_tstamp(pdata
, mac_tscr
);
1705 memcpy(&pdata
->tstamp_config
, &config
, sizeof(config
));
1710 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data
*pdata
,
1711 struct sk_buff
*skb
,
1712 struct xgbe_packet_data
*packet
)
1714 unsigned long flags
;
1716 if (XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, PTP
)) {
1717 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1718 if (pdata
->tx_tstamp_skb
) {
1719 /* Another timestamp in progress, ignore this one */
1720 XGMAC_SET_BITS(packet
->attributes
,
1721 TX_PACKET_ATTRIBUTES
, PTP
, 0);
1723 pdata
->tx_tstamp_skb
= skb_get(skb
);
1724 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1726 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1729 skb_tx_timestamp(skb
);
1732 static void xgbe_prep_vlan(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1734 if (skb_vlan_tag_present(skb
))
1735 packet
->vlan_ctag
= skb_vlan_tag_get(skb
);
1738 static int xgbe_prep_tso(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1742 if (!XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1746 ret
= skb_cow_head(skb
, 0);
1750 if (XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, VXLAN
)) {
1751 packet
->header_len
= skb_inner_transport_offset(skb
) +
1752 inner_tcp_hdrlen(skb
);
1753 packet
->tcp_header_len
= inner_tcp_hdrlen(skb
);
1755 packet
->header_len
= skb_transport_offset(skb
) +
1757 packet
->tcp_header_len
= tcp_hdrlen(skb
);
1759 packet
->tcp_payload_len
= skb
->len
- packet
->header_len
;
1760 packet
->mss
= skb_shinfo(skb
)->gso_size
;
1762 DBGPR(" packet->header_len=%u\n", packet
->header_len
);
1763 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1764 packet
->tcp_header_len
, packet
->tcp_payload_len
);
1765 DBGPR(" packet->mss=%u\n", packet
->mss
);
1767 /* Update the number of packets that will ultimately be transmitted
1768 * along with the extra bytes for each extra packet
1770 packet
->tx_packets
= skb_shinfo(skb
)->gso_segs
;
1771 packet
->tx_bytes
+= (packet
->tx_packets
- 1) * packet
->header_len
;
1776 static bool xgbe_is_vxlan(struct xgbe_prv_data
*pdata
, struct sk_buff
*skb
)
1778 struct xgbe_vxlan_data
*vdata
;
1780 if (pdata
->vxlan_force_disable
)
1783 if (!skb
->encapsulation
)
1786 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1789 switch (skb
->protocol
) {
1790 case htons(ETH_P_IP
):
1791 if (ip_hdr(skb
)->protocol
!= IPPROTO_UDP
)
1795 case htons(ETH_P_IPV6
):
1796 if (ipv6_hdr(skb
)->nexthdr
!= IPPROTO_UDP
)
1804 /* See if we have the UDP port in our list */
1805 list_for_each_entry(vdata
, &pdata
->vxlan_ports
, list
) {
1806 if ((skb
->protocol
== htons(ETH_P_IP
)) &&
1807 (vdata
->sa_family
== AF_INET
) &&
1808 (vdata
->port
== udp_hdr(skb
)->dest
))
1810 else if ((skb
->protocol
== htons(ETH_P_IPV6
)) &&
1811 (vdata
->sa_family
== AF_INET6
) &&
1812 (vdata
->port
== udp_hdr(skb
)->dest
))
1819 static int xgbe_is_tso(struct sk_buff
*skb
)
1821 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1824 if (!skb_is_gso(skb
))
1827 DBGPR(" TSO packet to be processed\n");
1832 static void xgbe_packet_info(struct xgbe_prv_data
*pdata
,
1833 struct xgbe_ring
*ring
, struct sk_buff
*skb
,
1834 struct xgbe_packet_data
*packet
)
1837 unsigned int context_desc
;
1844 packet
->rdesc_count
= 0;
1846 packet
->tx_packets
= 1;
1847 packet
->tx_bytes
= skb
->len
;
1849 if (xgbe_is_tso(skb
)) {
1850 /* TSO requires an extra descriptor if mss is different */
1851 if (skb_shinfo(skb
)->gso_size
!= ring
->tx
.cur_mss
) {
1853 packet
->rdesc_count
++;
1856 /* TSO requires an extra descriptor for TSO header */
1857 packet
->rdesc_count
++;
1859 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1861 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1863 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1864 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1867 if (xgbe_is_vxlan(pdata
, skb
))
1868 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1871 if (skb_vlan_tag_present(skb
)) {
1872 /* VLAN requires an extra descriptor if tag is different */
1873 if (skb_vlan_tag_get(skb
) != ring
->tx
.cur_vlan_ctag
)
1874 /* We can share with the TSO context descriptor */
1875 if (!context_desc
) {
1877 packet
->rdesc_count
++;
1880 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1884 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
1885 (pdata
->tstamp_config
.tx_type
== HWTSTAMP_TX_ON
))
1886 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1889 for (len
= skb_headlen(skb
); len
;) {
1890 packet
->rdesc_count
++;
1891 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1894 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1895 frag
= &skb_shinfo(skb
)->frags
[i
];
1896 for (len
= skb_frag_size(frag
); len
; ) {
1897 packet
->rdesc_count
++;
1898 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1903 static int xgbe_open(struct net_device
*netdev
)
1905 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1908 /* Create the various names based on netdev name */
1909 snprintf(pdata
->an_name
, sizeof(pdata
->an_name
) - 1, "%s-pcs",
1910 netdev_name(netdev
));
1912 snprintf(pdata
->ecc_name
, sizeof(pdata
->ecc_name
) - 1, "%s-ecc",
1913 netdev_name(netdev
));
1915 snprintf(pdata
->i2c_name
, sizeof(pdata
->i2c_name
) - 1, "%s-i2c",
1916 netdev_name(netdev
));
1918 /* Create workqueues */
1919 pdata
->dev_workqueue
=
1920 create_singlethread_workqueue(netdev_name(netdev
));
1921 if (!pdata
->dev_workqueue
) {
1922 netdev_err(netdev
, "device workqueue creation failed\n");
1926 pdata
->an_workqueue
=
1927 create_singlethread_workqueue(pdata
->an_name
);
1928 if (!pdata
->an_workqueue
) {
1929 netdev_err(netdev
, "phy workqueue creation failed\n");
1934 /* Reset the phy settings */
1935 ret
= xgbe_phy_reset(pdata
);
1939 /* Enable the clocks */
1940 ret
= clk_prepare_enable(pdata
->sysclk
);
1942 netdev_alert(netdev
, "dma clk_prepare_enable failed\n");
1946 ret
= clk_prepare_enable(pdata
->ptpclk
);
1948 netdev_alert(netdev
, "ptp clk_prepare_enable failed\n");
1952 INIT_WORK(&pdata
->service_work
, xgbe_service
);
1953 INIT_WORK(&pdata
->restart_work
, xgbe_restart
);
1954 INIT_WORK(&pdata
->stopdev_work
, xgbe_stopdev
);
1955 INIT_WORK(&pdata
->tx_tstamp_work
, xgbe_tx_tstamp
);
1957 ret
= xgbe_alloc_memory(pdata
);
1961 ret
= xgbe_start(pdata
);
1965 clear_bit(XGBE_DOWN
, &pdata
->dev_state
);
1970 xgbe_free_memory(pdata
);
1973 clk_disable_unprepare(pdata
->ptpclk
);
1976 clk_disable_unprepare(pdata
->sysclk
);
1979 destroy_workqueue(pdata
->an_workqueue
);
1982 destroy_workqueue(pdata
->dev_workqueue
);
1987 static int xgbe_close(struct net_device
*netdev
)
1989 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1991 /* Stop the device */
1994 xgbe_free_memory(pdata
);
1996 /* Disable the clocks */
1997 clk_disable_unprepare(pdata
->ptpclk
);
1998 clk_disable_unprepare(pdata
->sysclk
);
2000 flush_workqueue(pdata
->an_workqueue
);
2001 destroy_workqueue(pdata
->an_workqueue
);
2003 flush_workqueue(pdata
->dev_workqueue
);
2004 destroy_workqueue(pdata
->dev_workqueue
);
2006 set_bit(XGBE_DOWN
, &pdata
->dev_state
);
2011 static netdev_tx_t
xgbe_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2013 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2014 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2015 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
2016 struct xgbe_channel
*channel
;
2017 struct xgbe_ring
*ring
;
2018 struct xgbe_packet_data
*packet
;
2019 struct netdev_queue
*txq
;
2022 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb
->len
);
2024 channel
= pdata
->channel
[skb
->queue_mapping
];
2025 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
2026 ring
= channel
->tx_ring
;
2027 packet
= &ring
->packet_data
;
2031 if (skb
->len
== 0) {
2032 netif_err(pdata
, tx_err
, netdev
,
2033 "empty skb received from stack\n");
2034 dev_kfree_skb_any(skb
);
2035 goto tx_netdev_return
;
2038 /* Calculate preliminary packet info */
2039 memset(packet
, 0, sizeof(*packet
));
2040 xgbe_packet_info(pdata
, ring
, skb
, packet
);
2042 /* Check that there are enough descriptors available */
2043 ret
= xgbe_maybe_stop_tx_queue(channel
, ring
, packet
->rdesc_count
);
2045 goto tx_netdev_return
;
2047 ret
= xgbe_prep_tso(skb
, packet
);
2049 netif_err(pdata
, tx_err
, netdev
,
2050 "error processing TSO packet\n");
2051 dev_kfree_skb_any(skb
);
2052 goto tx_netdev_return
;
2054 xgbe_prep_vlan(skb
, packet
);
2056 if (!desc_if
->map_tx_skb(channel
, skb
)) {
2057 dev_kfree_skb_any(skb
);
2058 goto tx_netdev_return
;
2061 xgbe_prep_tx_tstamp(pdata
, skb
, packet
);
2063 /* Report on the actual number of bytes (to be) sent */
2064 netdev_tx_sent_queue(txq
, packet
->tx_bytes
);
2066 /* Configure required descriptor fields for transmission */
2067 hw_if
->dev_xmit(channel
);
2069 if (netif_msg_pktdata(pdata
))
2070 xgbe_print_pkt(netdev
, skb
, true);
2072 /* Stop the queue in advance if there may not be enough descriptors */
2073 xgbe_maybe_stop_tx_queue(channel
, ring
, XGBE_TX_MAX_DESCS
);
2081 static void xgbe_set_rx_mode(struct net_device
*netdev
)
2083 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2084 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2086 DBGPR("-->xgbe_set_rx_mode\n");
2088 hw_if
->config_rx_mode(pdata
);
2090 DBGPR("<--xgbe_set_rx_mode\n");
2093 static int xgbe_set_mac_address(struct net_device
*netdev
, void *addr
)
2095 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2096 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2097 struct sockaddr
*saddr
= addr
;
2099 DBGPR("-->xgbe_set_mac_address\n");
2101 if (!is_valid_ether_addr(saddr
->sa_data
))
2102 return -EADDRNOTAVAIL
;
2104 memcpy(netdev
->dev_addr
, saddr
->sa_data
, netdev
->addr_len
);
2106 hw_if
->set_mac_address(pdata
, netdev
->dev_addr
);
2108 DBGPR("<--xgbe_set_mac_address\n");
2113 static int xgbe_ioctl(struct net_device
*netdev
, struct ifreq
*ifreq
, int cmd
)
2115 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2120 ret
= xgbe_get_hwtstamp_settings(pdata
, ifreq
);
2124 ret
= xgbe_set_hwtstamp_settings(pdata
, ifreq
);
2134 static int xgbe_change_mtu(struct net_device
*netdev
, int mtu
)
2136 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2139 DBGPR("-->xgbe_change_mtu\n");
2141 ret
= xgbe_calc_rx_buf_size(netdev
, mtu
);
2145 pdata
->rx_buf_size
= ret
;
2148 xgbe_restart_dev(pdata
);
2150 DBGPR("<--xgbe_change_mtu\n");
2155 static void xgbe_tx_timeout(struct net_device
*netdev
, unsigned int txqueue
)
2157 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2159 netdev_warn(netdev
, "tx timeout, device restarting\n");
2160 schedule_work(&pdata
->restart_work
);
2163 static void xgbe_get_stats64(struct net_device
*netdev
,
2164 struct rtnl_link_stats64
*s
)
2166 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2167 struct xgbe_mmc_stats
*pstats
= &pdata
->mmc_stats
;
2169 DBGPR("-->%s\n", __func__
);
2171 pdata
->hw_if
.read_mmc_stats(pdata
);
2173 s
->rx_packets
= pstats
->rxframecount_gb
;
2174 s
->rx_bytes
= pstats
->rxoctetcount_gb
;
2175 s
->rx_errors
= pstats
->rxframecount_gb
-
2176 pstats
->rxbroadcastframes_g
-
2177 pstats
->rxmulticastframes_g
-
2178 pstats
->rxunicastframes_g
;
2179 s
->multicast
= pstats
->rxmulticastframes_g
;
2180 s
->rx_length_errors
= pstats
->rxlengtherror
;
2181 s
->rx_crc_errors
= pstats
->rxcrcerror
;
2182 s
->rx_fifo_errors
= pstats
->rxfifooverflow
;
2184 s
->tx_packets
= pstats
->txframecount_gb
;
2185 s
->tx_bytes
= pstats
->txoctetcount_gb
;
2186 s
->tx_errors
= pstats
->txframecount_gb
- pstats
->txframecount_g
;
2187 s
->tx_dropped
= netdev
->stats
.tx_dropped
;
2189 DBGPR("<--%s\n", __func__
);
2192 static int xgbe_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
,
2195 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2196 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2198 DBGPR("-->%s\n", __func__
);
2200 set_bit(vid
, pdata
->active_vlans
);
2201 hw_if
->update_vlan_hash_table(pdata
);
2203 DBGPR("<--%s\n", __func__
);
2208 static int xgbe_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
,
2211 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2212 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2214 DBGPR("-->%s\n", __func__
);
2216 clear_bit(vid
, pdata
->active_vlans
);
2217 hw_if
->update_vlan_hash_table(pdata
);
2219 DBGPR("<--%s\n", __func__
);
2224 #ifdef CONFIG_NET_POLL_CONTROLLER
2225 static void xgbe_poll_controller(struct net_device
*netdev
)
2227 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2228 struct xgbe_channel
*channel
;
2231 DBGPR("-->xgbe_poll_controller\n");
2233 if (pdata
->per_channel_irq
) {
2234 for (i
= 0; i
< pdata
->channel_count
; i
++) {
2235 channel
= pdata
->channel
[i
];
2236 xgbe_dma_isr(channel
->dma_irq
, channel
);
2239 disable_irq(pdata
->dev_irq
);
2240 xgbe_isr(pdata
->dev_irq
, pdata
);
2241 enable_irq(pdata
->dev_irq
);
2244 DBGPR("<--xgbe_poll_controller\n");
2246 #endif /* End CONFIG_NET_POLL_CONTROLLER */
2248 static int xgbe_setup_tc(struct net_device
*netdev
, enum tc_setup_type type
,
2251 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2252 struct tc_mqprio_qopt
*mqprio
= type_data
;
2255 if (type
!= TC_SETUP_QDISC_MQPRIO
)
2258 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
2259 tc
= mqprio
->num_tc
;
2261 if (tc
> pdata
->hw_feat
.tc_cnt
)
2264 pdata
->num_tcs
= tc
;
2265 pdata
->hw_if
.config_tc(pdata
);
2270 static netdev_features_t
xgbe_fix_features(struct net_device
*netdev
,
2271 netdev_features_t features
)
2273 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2274 netdev_features_t vxlan_base
, vxlan_mask
;
2276 vxlan_base
= NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_RX_UDP_TUNNEL_PORT
;
2277 vxlan_mask
= vxlan_base
| NETIF_F_GSO_UDP_TUNNEL_CSUM
;
2279 pdata
->vxlan_features
= features
& vxlan_mask
;
2281 /* Only fix VXLAN-related features */
2282 if (!pdata
->vxlan_features
)
2285 /* If VXLAN isn't supported then clear any features:
2286 * This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets
2287 * automatically set if ndo_udp_tunnel_add is set.
2289 if (!pdata
->hw_feat
.vxn
)
2290 return features
& ~vxlan_mask
;
2292 /* VXLAN CSUM requires VXLAN base */
2293 if ((features
& NETIF_F_GSO_UDP_TUNNEL_CSUM
) &&
2294 !(features
& NETIF_F_GSO_UDP_TUNNEL
)) {
2295 netdev_notice(netdev
,
2296 "forcing tx udp tunnel support\n");
2297 features
|= NETIF_F_GSO_UDP_TUNNEL
;
2300 /* Can't do one without doing the other */
2301 if ((features
& vxlan_base
) != vxlan_base
) {
2302 netdev_notice(netdev
,
2303 "forcing both tx and rx udp tunnel support\n");
2304 features
|= vxlan_base
;
2307 if (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
2308 if (!(features
& NETIF_F_GSO_UDP_TUNNEL_CSUM
)) {
2309 netdev_notice(netdev
,
2310 "forcing tx udp tunnel checksumming on\n");
2311 features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
2314 if (features
& NETIF_F_GSO_UDP_TUNNEL_CSUM
) {
2315 netdev_notice(netdev
,
2316 "forcing tx udp tunnel checksumming off\n");
2317 features
&= ~NETIF_F_GSO_UDP_TUNNEL_CSUM
;
2321 pdata
->vxlan_features
= features
& vxlan_mask
;
2323 /* Adjust UDP Tunnel based on current state */
2324 if (pdata
->vxlan_force_disable
) {
2325 netdev_notice(netdev
,
2326 "VXLAN acceleration disabled, turning off udp tunnel features\n");
2327 features
&= ~vxlan_mask
;
2333 static int xgbe_set_features(struct net_device
*netdev
,
2334 netdev_features_t features
)
2336 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2337 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2338 netdev_features_t rxhash
, rxcsum
, rxvlan
, rxvlan_filter
;
2339 netdev_features_t udp_tunnel
;
2342 rxhash
= pdata
->netdev_features
& NETIF_F_RXHASH
;
2343 rxcsum
= pdata
->netdev_features
& NETIF_F_RXCSUM
;
2344 rxvlan
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_RX
;
2345 rxvlan_filter
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_FILTER
;
2346 udp_tunnel
= pdata
->netdev_features
& NETIF_F_GSO_UDP_TUNNEL
;
2348 if ((features
& NETIF_F_RXHASH
) && !rxhash
)
2349 ret
= hw_if
->enable_rss(pdata
);
2350 else if (!(features
& NETIF_F_RXHASH
) && rxhash
)
2351 ret
= hw_if
->disable_rss(pdata
);
2355 if ((features
& NETIF_F_RXCSUM
) && !rxcsum
)
2356 hw_if
->enable_rx_csum(pdata
);
2357 else if (!(features
& NETIF_F_RXCSUM
) && rxcsum
)
2358 hw_if
->disable_rx_csum(pdata
);
2360 if ((features
& NETIF_F_HW_VLAN_CTAG_RX
) && !rxvlan
)
2361 hw_if
->enable_rx_vlan_stripping(pdata
);
2362 else if (!(features
& NETIF_F_HW_VLAN_CTAG_RX
) && rxvlan
)
2363 hw_if
->disable_rx_vlan_stripping(pdata
);
2365 if ((features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && !rxvlan_filter
)
2366 hw_if
->enable_rx_vlan_filtering(pdata
);
2367 else if (!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && rxvlan_filter
)
2368 hw_if
->disable_rx_vlan_filtering(pdata
);
2370 if ((features
& NETIF_F_GSO_UDP_TUNNEL
) && !udp_tunnel
)
2371 xgbe_enable_vxlan_accel(pdata
);
2372 else if (!(features
& NETIF_F_GSO_UDP_TUNNEL
) && udp_tunnel
)
2373 xgbe_disable_vxlan_accel(pdata
);
2375 pdata
->netdev_features
= features
;
2377 DBGPR("<--xgbe_set_features\n");
2382 static void xgbe_udp_tunnel_add(struct net_device
*netdev
,
2383 struct udp_tunnel_info
*ti
)
2385 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2386 struct xgbe_vxlan_data
*vdata
;
2388 if (!pdata
->hw_feat
.vxn
)
2391 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2394 pdata
->vxlan_port_count
++;
2396 netif_dbg(pdata
, drv
, netdev
,
2397 "adding VXLAN tunnel, family=%hx/port=%hx\n",
2398 ti
->sa_family
, be16_to_cpu(ti
->port
));
2400 if (pdata
->vxlan_force_disable
)
2403 vdata
= kzalloc(sizeof(*vdata
), GFP_ATOMIC
);
2405 /* Can no longer properly track VXLAN ports */
2406 pdata
->vxlan_force_disable
= 1;
2407 netif_dbg(pdata
, drv
, netdev
,
2408 "internal error, disabling VXLAN accelerations\n");
2410 xgbe_disable_vxlan_accel(pdata
);
2414 vdata
->sa_family
= ti
->sa_family
;
2415 vdata
->port
= ti
->port
;
2417 list_add_tail(&vdata
->list
, &pdata
->vxlan_ports
);
2419 /* First port added? */
2420 if (pdata
->vxlan_port_count
== 1) {
2421 xgbe_enable_vxlan_accel(pdata
);
2427 static void xgbe_udp_tunnel_del(struct net_device
*netdev
,
2428 struct udp_tunnel_info
*ti
)
2430 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2431 struct xgbe_vxlan_data
*vdata
;
2433 if (!pdata
->hw_feat
.vxn
)
2436 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2439 netif_dbg(pdata
, drv
, netdev
,
2440 "deleting VXLAN tunnel, family=%hx/port=%hx\n",
2441 ti
->sa_family
, be16_to_cpu(ti
->port
));
2443 /* Don't need safe version since loop terminates with deletion */
2444 list_for_each_entry(vdata
, &pdata
->vxlan_ports
, list
) {
2445 if (vdata
->sa_family
!= ti
->sa_family
)
2448 if (vdata
->port
!= ti
->port
)
2451 list_del(&vdata
->list
);
2457 pdata
->vxlan_port_count
--;
2458 if (!pdata
->vxlan_port_count
) {
2459 xgbe_reset_vxlan_accel(pdata
);
2464 if (pdata
->vxlan_force_disable
)
2467 /* See if VXLAN tunnel id needs to be changed */
2468 vdata
= list_first_entry(&pdata
->vxlan_ports
,
2469 struct xgbe_vxlan_data
, list
);
2470 if (pdata
->vxlan_port
== be16_to_cpu(vdata
->port
))
2473 pdata
->vxlan_port
= be16_to_cpu(vdata
->port
);
2474 pdata
->hw_if
.set_vxlan_id(pdata
);
2477 static netdev_features_t
xgbe_features_check(struct sk_buff
*skb
,
2478 struct net_device
*netdev
,
2479 netdev_features_t features
)
2481 features
= vlan_features_check(skb
, features
);
2482 features
= vxlan_features_check(skb
, features
);
2487 static const struct net_device_ops xgbe_netdev_ops
= {
2488 .ndo_open
= xgbe_open
,
2489 .ndo_stop
= xgbe_close
,
2490 .ndo_start_xmit
= xgbe_xmit
,
2491 .ndo_set_rx_mode
= xgbe_set_rx_mode
,
2492 .ndo_set_mac_address
= xgbe_set_mac_address
,
2493 .ndo_validate_addr
= eth_validate_addr
,
2494 .ndo_do_ioctl
= xgbe_ioctl
,
2495 .ndo_change_mtu
= xgbe_change_mtu
,
2496 .ndo_tx_timeout
= xgbe_tx_timeout
,
2497 .ndo_get_stats64
= xgbe_get_stats64
,
2498 .ndo_vlan_rx_add_vid
= xgbe_vlan_rx_add_vid
,
2499 .ndo_vlan_rx_kill_vid
= xgbe_vlan_rx_kill_vid
,
2500 #ifdef CONFIG_NET_POLL_CONTROLLER
2501 .ndo_poll_controller
= xgbe_poll_controller
,
2503 .ndo_setup_tc
= xgbe_setup_tc
,
2504 .ndo_fix_features
= xgbe_fix_features
,
2505 .ndo_set_features
= xgbe_set_features
,
2506 .ndo_udp_tunnel_add
= xgbe_udp_tunnel_add
,
2507 .ndo_udp_tunnel_del
= xgbe_udp_tunnel_del
,
2508 .ndo_features_check
= xgbe_features_check
,
2511 const struct net_device_ops
*xgbe_get_netdev_ops(void)
2513 return &xgbe_netdev_ops
;
2516 static void xgbe_rx_refresh(struct xgbe_channel
*channel
)
2518 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2519 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2520 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
2521 struct xgbe_ring
*ring
= channel
->rx_ring
;
2522 struct xgbe_ring_data
*rdata
;
2524 while (ring
->dirty
!= ring
->cur
) {
2525 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
2527 /* Reset rdata values */
2528 desc_if
->unmap_rdata(pdata
, rdata
);
2530 if (desc_if
->map_rx_buffer(pdata
, ring
, rdata
))
2533 hw_if
->rx_desc_reset(pdata
, rdata
, ring
->dirty
);
2538 /* Make sure everything is written before the register write */
2541 /* Update the Rx Tail Pointer Register with address of
2542 * the last cleaned entry */
2543 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
- 1);
2544 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDTR_LO
,
2545 lower_32_bits(rdata
->rdesc_dma
));
2548 static struct sk_buff
*xgbe_create_skb(struct xgbe_prv_data
*pdata
,
2549 struct napi_struct
*napi
,
2550 struct xgbe_ring_data
*rdata
,
2553 struct sk_buff
*skb
;
2556 skb
= napi_alloc_skb(napi
, rdata
->rx
.hdr
.dma_len
);
2560 /* Pull in the header buffer which may contain just the header
2561 * or the header plus data
2563 dma_sync_single_range_for_cpu(pdata
->dev
, rdata
->rx
.hdr
.dma_base
,
2564 rdata
->rx
.hdr
.dma_off
,
2565 rdata
->rx
.hdr
.dma_len
, DMA_FROM_DEVICE
);
2567 packet
= page_address(rdata
->rx
.hdr
.pa
.pages
) +
2568 rdata
->rx
.hdr
.pa
.pages_offset
;
2569 skb_copy_to_linear_data(skb
, packet
, len
);
2575 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data
*rdata
,
2576 struct xgbe_packet_data
*packet
)
2578 /* Always zero if not the first descriptor */
2579 if (!XGMAC_GET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
, FIRST
))
2582 /* First descriptor with split header, return header length */
2583 if (rdata
->rx
.hdr_len
)
2584 return rdata
->rx
.hdr_len
;
2586 /* First descriptor but not the last descriptor and no split header,
2587 * so the full buffer was used
2589 if (!XGMAC_GET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
, LAST
))
2590 return rdata
->rx
.hdr
.dma_len
;
2592 /* First descriptor and last descriptor and no split header, so
2593 * calculate how much of the buffer was used
2595 return min_t(unsigned int, rdata
->rx
.hdr
.dma_len
, rdata
->rx
.len
);
2598 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data
*rdata
,
2599 struct xgbe_packet_data
*packet
,
2602 /* Always the full buffer if not the last descriptor */
2603 if (!XGMAC_GET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
, LAST
))
2604 return rdata
->rx
.buf
.dma_len
;
2606 /* Last descriptor so calculate how much of the buffer was used
2607 * for the last bit of data
2609 return rdata
->rx
.len
- len
;
2612 static int xgbe_tx_poll(struct xgbe_channel
*channel
)
2614 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2615 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2616 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
2617 struct xgbe_ring
*ring
= channel
->tx_ring
;
2618 struct xgbe_ring_data
*rdata
;
2619 struct xgbe_ring_desc
*rdesc
;
2620 struct net_device
*netdev
= pdata
->netdev
;
2621 struct netdev_queue
*txq
;
2623 unsigned int tx_packets
= 0, tx_bytes
= 0;
2626 DBGPR("-->xgbe_tx_poll\n");
2628 /* Nothing to do if there isn't a Tx ring for this channel */
2634 /* Be sure we get ring->cur before accessing descriptor data */
2637 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
2639 while ((processed
< XGBE_TX_DESC_MAX_PROC
) &&
2640 (ring
->dirty
!= cur
)) {
2641 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
2642 rdesc
= rdata
->rdesc
;
2644 if (!hw_if
->tx_complete(rdesc
))
2647 /* Make sure descriptor fields are read after reading the OWN
2651 if (netif_msg_tx_done(pdata
))
2652 xgbe_dump_tx_desc(pdata
, ring
, ring
->dirty
, 1, 0);
2654 if (hw_if
->is_last_desc(rdesc
)) {
2655 tx_packets
+= rdata
->tx
.packets
;
2656 tx_bytes
+= rdata
->tx
.bytes
;
2659 /* Free the SKB and reset the descriptor for re-use */
2660 desc_if
->unmap_rdata(pdata
, rdata
);
2661 hw_if
->tx_desc_reset(rdata
);
2670 netdev_tx_completed_queue(txq
, tx_packets
, tx_bytes
);
2672 if ((ring
->tx
.queue_stopped
== 1) &&
2673 (xgbe_tx_avail_desc(ring
) > XGBE_TX_DESC_MIN_FREE
)) {
2674 ring
->tx
.queue_stopped
= 0;
2675 netif_tx_wake_queue(txq
);
2678 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed
);
2683 static int xgbe_rx_poll(struct xgbe_channel
*channel
, int budget
)
2685 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2686 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2687 struct xgbe_ring
*ring
= channel
->rx_ring
;
2688 struct xgbe_ring_data
*rdata
;
2689 struct xgbe_packet_data
*packet
;
2690 struct net_device
*netdev
= pdata
->netdev
;
2691 struct napi_struct
*napi
;
2692 struct sk_buff
*skb
;
2693 struct skb_shared_hwtstamps
*hwtstamps
;
2694 unsigned int last
, error
, context_next
, context
;
2695 unsigned int len
, buf1_len
, buf2_len
, max_len
;
2696 unsigned int received
= 0;
2697 int packet_count
= 0;
2699 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget
);
2701 /* Nothing to do if there isn't a Rx ring for this channel */
2708 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
2710 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2711 packet
= &ring
->packet_data
;
2712 while (packet_count
< budget
) {
2713 DBGPR(" cur = %d\n", ring
->cur
);
2715 /* First time in loop see if we need to restore state */
2716 if (!received
&& rdata
->state_saved
) {
2717 skb
= rdata
->state
.skb
;
2718 error
= rdata
->state
.error
;
2719 len
= rdata
->state
.len
;
2721 memset(packet
, 0, sizeof(*packet
));
2728 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2730 if (xgbe_rx_dirty_desc(ring
) > (XGBE_RX_DESC_CNT
>> 3))
2731 xgbe_rx_refresh(channel
);
2733 if (hw_if
->dev_read(channel
))
2739 last
= XGMAC_GET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
2741 context_next
= XGMAC_GET_BITS(packet
->attributes
,
2742 RX_PACKET_ATTRIBUTES
,
2744 context
= XGMAC_GET_BITS(packet
->attributes
,
2745 RX_PACKET_ATTRIBUTES
,
2748 /* Earlier error, just drain the remaining data */
2749 if ((!last
|| context_next
) && error
)
2752 if (error
|| packet
->errors
) {
2754 netif_err(pdata
, rx_err
, netdev
,
2755 "error in received packet\n");
2761 /* Get the data length in the descriptor buffers */
2762 buf1_len
= xgbe_rx_buf1_len(rdata
, packet
);
2764 buf2_len
= xgbe_rx_buf2_len(rdata
, packet
, len
);
2768 skb
= xgbe_create_skb(pdata
, napi
, rdata
,
2777 dma_sync_single_range_for_cpu(pdata
->dev
,
2778 rdata
->rx
.buf
.dma_base
,
2779 rdata
->rx
.buf
.dma_off
,
2780 rdata
->rx
.buf
.dma_len
,
2783 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
2784 rdata
->rx
.buf
.pa
.pages
,
2785 rdata
->rx
.buf
.pa
.pages_offset
,
2787 rdata
->rx
.buf
.dma_len
);
2788 rdata
->rx
.buf
.pa
.pages
= NULL
;
2793 if (!last
|| context_next
)
2799 /* Be sure we don't exceed the configured MTU */
2800 max_len
= netdev
->mtu
+ ETH_HLEN
;
2801 if (!(netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2802 (skb
->protocol
== htons(ETH_P_8021Q
)))
2803 max_len
+= VLAN_HLEN
;
2805 if (skb
->len
> max_len
) {
2806 netif_err(pdata
, rx_err
, netdev
,
2807 "packet length exceeds configured MTU\n");
2812 if (netif_msg_pktdata(pdata
))
2813 xgbe_print_pkt(netdev
, skb
, false);
2815 skb_checksum_none_assert(skb
);
2816 if (XGMAC_GET_BITS(packet
->attributes
,
2817 RX_PACKET_ATTRIBUTES
, CSUM_DONE
))
2818 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2820 if (XGMAC_GET_BITS(packet
->attributes
,
2821 RX_PACKET_ATTRIBUTES
, TNP
)) {
2822 skb
->encapsulation
= 1;
2824 if (XGMAC_GET_BITS(packet
->attributes
,
2825 RX_PACKET_ATTRIBUTES
, TNPCSUM_DONE
))
2826 skb
->csum_level
= 1;
2829 if (XGMAC_GET_BITS(packet
->attributes
,
2830 RX_PACKET_ATTRIBUTES
, VLAN_CTAG
))
2831 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
2834 if (XGMAC_GET_BITS(packet
->attributes
,
2835 RX_PACKET_ATTRIBUTES
, RX_TSTAMP
)) {
2838 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
2840 hwtstamps
= skb_hwtstamps(skb
);
2841 hwtstamps
->hwtstamp
= ns_to_ktime(nsec
);
2844 if (XGMAC_GET_BITS(packet
->attributes
,
2845 RX_PACKET_ATTRIBUTES
, RSS_HASH
))
2846 skb_set_hash(skb
, packet
->rss_hash
,
2847 packet
->rss_hash_type
);
2850 skb
->protocol
= eth_type_trans(skb
, netdev
);
2851 skb_record_rx_queue(skb
, channel
->queue_index
);
2853 napi_gro_receive(napi
, skb
);
2859 /* Check if we need to save state before leaving */
2860 if (received
&& (!last
|| context_next
)) {
2861 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2862 rdata
->state_saved
= 1;
2863 rdata
->state
.skb
= skb
;
2864 rdata
->state
.len
= len
;
2865 rdata
->state
.error
= error
;
2868 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count
);
2870 return packet_count
;
2873 static int xgbe_one_poll(struct napi_struct
*napi
, int budget
)
2875 struct xgbe_channel
*channel
= container_of(napi
, struct xgbe_channel
,
2877 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2880 DBGPR("-->xgbe_one_poll: budget=%d\n", budget
);
2882 /* Cleanup Tx ring first */
2883 xgbe_tx_poll(channel
);
2885 /* Process Rx ring next */
2886 processed
= xgbe_rx_poll(channel
, budget
);
2888 /* If we processed everything, we are done */
2889 if ((processed
< budget
) && napi_complete_done(napi
, processed
)) {
2890 /* Enable Tx and Rx interrupts */
2891 if (pdata
->channel_irq_mode
)
2892 xgbe_enable_rx_tx_int(pdata
, channel
);
2894 enable_irq(channel
->dma_irq
);
2897 DBGPR("<--xgbe_one_poll: received = %d\n", processed
);
2902 static int xgbe_all_poll(struct napi_struct
*napi
, int budget
)
2904 struct xgbe_prv_data
*pdata
= container_of(napi
, struct xgbe_prv_data
,
2906 struct xgbe_channel
*channel
;
2908 int processed
, last_processed
;
2911 DBGPR("-->xgbe_all_poll: budget=%d\n", budget
);
2914 ring_budget
= budget
/ pdata
->rx_ring_count
;
2916 last_processed
= processed
;
2918 for (i
= 0; i
< pdata
->channel_count
; i
++) {
2919 channel
= pdata
->channel
[i
];
2921 /* Cleanup Tx ring first */
2922 xgbe_tx_poll(channel
);
2924 /* Process Rx ring next */
2925 if (ring_budget
> (budget
- processed
))
2926 ring_budget
= budget
- processed
;
2927 processed
+= xgbe_rx_poll(channel
, ring_budget
);
2929 } while ((processed
< budget
) && (processed
!= last_processed
));
2931 /* If we processed everything, we are done */
2932 if ((processed
< budget
) && napi_complete_done(napi
, processed
)) {
2933 /* Enable Tx and Rx interrupts */
2934 xgbe_enable_rx_tx_ints(pdata
);
2937 DBGPR("<--xgbe_all_poll: received = %d\n", processed
);
2942 void xgbe_dump_tx_desc(struct xgbe_prv_data
*pdata
, struct xgbe_ring
*ring
,
2943 unsigned int idx
, unsigned int count
, unsigned int flag
)
2945 struct xgbe_ring_data
*rdata
;
2946 struct xgbe_ring_desc
*rdesc
;
2949 rdata
= XGBE_GET_DESC_DATA(ring
, idx
);
2950 rdesc
= rdata
->rdesc
;
2951 netdev_dbg(pdata
->netdev
,
2952 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx
,
2953 (flag
== 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2954 le32_to_cpu(rdesc
->desc0
),
2955 le32_to_cpu(rdesc
->desc1
),
2956 le32_to_cpu(rdesc
->desc2
),
2957 le32_to_cpu(rdesc
->desc3
));
2962 void xgbe_dump_rx_desc(struct xgbe_prv_data
*pdata
, struct xgbe_ring
*ring
,
2965 struct xgbe_ring_data
*rdata
;
2966 struct xgbe_ring_desc
*rdesc
;
2968 rdata
= XGBE_GET_DESC_DATA(ring
, idx
);
2969 rdesc
= rdata
->rdesc
;
2970 netdev_dbg(pdata
->netdev
,
2971 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2972 idx
, le32_to_cpu(rdesc
->desc0
), le32_to_cpu(rdesc
->desc1
),
2973 le32_to_cpu(rdesc
->desc2
), le32_to_cpu(rdesc
->desc3
));
2976 void xgbe_print_pkt(struct net_device
*netdev
, struct sk_buff
*skb
, bool tx_rx
)
2978 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
2979 unsigned char buffer
[128];
2982 netdev_dbg(netdev
, "\n************** SKB dump ****************\n");
2984 netdev_dbg(netdev
, "%s packet of %d bytes\n",
2985 (tx_rx
? "TX" : "RX"), skb
->len
);
2987 netdev_dbg(netdev
, "Dst MAC addr: %pM\n", eth
->h_dest
);
2988 netdev_dbg(netdev
, "Src MAC addr: %pM\n", eth
->h_source
);
2989 netdev_dbg(netdev
, "Protocol: %#06hx\n", ntohs(eth
->h_proto
));
2991 for (i
= 0; i
< skb
->len
; i
+= 32) {
2992 unsigned int len
= min(skb
->len
- i
, 32U);
2994 hex_dump_to_buffer(&skb
->data
[i
], len
, 32, 1,
2995 buffer
, sizeof(buffer
), false);
2996 netdev_dbg(netdev
, " %#06x: %s\n", i
, buffer
);
2999 netdev_dbg(netdev
, "\n************** SKB dump ****************\n");