1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
11 Documentation available at:
12 http://www.stlinux.com
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
54 /* As long as the interface is active, we keep the timestamping counter enabled
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
56 * (clock jumps) when changing timestamping settings at runtime.
58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64 /* Module parameters */
66 static int watchdog
= TX_TIMEO
;
67 module_param(watchdog
, int, 0644);
68 MODULE_PARM_DESC(watchdog
, "Transmit timeout in milliseconds (default 5s)");
70 static int debug
= -1;
71 module_param(debug
, int, 0644);
72 MODULE_PARM_DESC(debug
, "Message Level (-1: default, 0: no output, 16: all)");
74 static int phyaddr
= -1;
75 module_param(phyaddr
, int, 0444);
76 MODULE_PARM_DESC(phyaddr
, "Physical device address");
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
91 static int flow_ctrl
= FLOW_AUTO
;
92 module_param(flow_ctrl
, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl
, "Flow control ability [on/off]");
95 static int pause
= PAUSE_TIME
;
96 module_param(pause
, int, 0644);
97 MODULE_PARM_DESC(pause
, "Flow Control Pause Time");
100 static int tc
= TC_DEFAULT
;
101 module_param(tc
, int, 0644);
102 MODULE_PARM_DESC(tc
, "DMA threshold control value");
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz
= DEFAULT_BUFSIZE
;
106 module_param(buf_sz
, int, 0644);
107 MODULE_PARM_DESC(buf_sz
, "DMA buffer size");
109 #define STMMAC_RX_COPYBREAK 256
111 static const u32 default_msg_level
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
112 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
113 NETIF_MSG_IFDOWN
| NETIF_MSG_TIMER
);
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static int eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
117 module_param(eee_timer
, int, 0644);
118 MODULE_PARM_DESC(eee_timer
, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
124 static unsigned int chain_mode
;
125 module_param(chain_mode
, int, 0444);
126 MODULE_PARM_DESC(chain_mode
, "To use chain instead of ring mode");
128 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
);
129 /* For MSI interrupts handling */
130 static irqreturn_t
stmmac_mac_interrupt(int irq
, void *dev_id
);
131 static irqreturn_t
stmmac_safety_interrupt(int irq
, void *dev_id
);
132 static irqreturn_t
stmmac_msi_intr_tx(int irq
, void *data
);
133 static irqreturn_t
stmmac_msi_intr_rx(int irq
, void *data
);
134 static void stmmac_reset_rx_queue(struct stmmac_priv
*priv
, u32 queue
);
135 static void stmmac_reset_tx_queue(struct stmmac_priv
*priv
, u32 queue
);
136 static void stmmac_reset_queues_param(struct stmmac_priv
*priv
);
137 static void stmmac_tx_timer_arm(struct stmmac_priv
*priv
, u32 queue
);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv
*priv
, int queue
);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv
*priv
, u32 txmode
,
140 u32 rxmode
, u32 chan
);
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops
;
144 static void stmmac_init_fs(struct net_device
*dev
);
145 static void stmmac_exit_fs(struct net_device
*dev
);
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
150 int stmmac_bus_clks_config(struct stmmac_priv
*priv
, bool enabled
)
155 ret
= clk_prepare_enable(priv
->plat
->stmmac_clk
);
158 ret
= clk_prepare_enable(priv
->plat
->pclk
);
160 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
163 if (priv
->plat
->clks_config
) {
164 ret
= priv
->plat
->clks_config(priv
->plat
->bsp_priv
, enabled
);
166 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
167 clk_disable_unprepare(priv
->plat
->pclk
);
172 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
173 clk_disable_unprepare(priv
->plat
->pclk
);
174 if (priv
->plat
->clks_config
)
175 priv
->plat
->clks_config(priv
->plat
->bsp_priv
, enabled
);
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config
);
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
187 static void stmmac_verify_args(void)
189 if (unlikely(watchdog
< 0))
191 if (unlikely((buf_sz
< DEFAULT_BUFSIZE
) || (buf_sz
> BUF_SIZE_16KiB
)))
192 buf_sz
= DEFAULT_BUFSIZE
;
193 if (unlikely(flow_ctrl
> 1))
194 flow_ctrl
= FLOW_AUTO
;
195 else if (likely(flow_ctrl
< 0))
196 flow_ctrl
= FLOW_OFF
;
197 if (unlikely((pause
< 0) || (pause
> 0xffff)))
200 eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
203 static void __stmmac_disable_all_queues(struct stmmac_priv
*priv
)
205 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
206 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
207 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
210 for (queue
= 0; queue
< maxq
; queue
++) {
211 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
213 if (stmmac_xdp_is_enabled(priv
) &&
214 test_bit(queue
, priv
->af_xdp_zc_qps
)) {
215 napi_disable(&ch
->rxtx_napi
);
219 if (queue
< rx_queues_cnt
)
220 napi_disable(&ch
->rx_napi
);
221 if (queue
< tx_queues_cnt
)
222 napi_disable(&ch
->tx_napi
);
227 * stmmac_disable_all_queues - Disable all queues
228 * @priv: driver private structure
230 static void stmmac_disable_all_queues(struct stmmac_priv
*priv
)
232 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
233 struct stmmac_rx_queue
*rx_q
;
236 /* synchronize_rcu() needed for pending XDP buffers to drain */
237 for (queue
= 0; queue
< rx_queues_cnt
; queue
++) {
238 rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
239 if (rx_q
->xsk_pool
) {
245 __stmmac_disable_all_queues(priv
);
249 * stmmac_enable_all_queues - Enable all queues
250 * @priv: driver private structure
252 static void stmmac_enable_all_queues(struct stmmac_priv
*priv
)
254 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
255 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
256 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
259 for (queue
= 0; queue
< maxq
; queue
++) {
260 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
262 if (stmmac_xdp_is_enabled(priv
) &&
263 test_bit(queue
, priv
->af_xdp_zc_qps
)) {
264 napi_enable(&ch
->rxtx_napi
);
268 if (queue
< rx_queues_cnt
)
269 napi_enable(&ch
->rx_napi
);
270 if (queue
< tx_queues_cnt
)
271 napi_enable(&ch
->tx_napi
);
275 static void stmmac_service_event_schedule(struct stmmac_priv
*priv
)
277 if (!test_bit(STMMAC_DOWN
, &priv
->state
) &&
278 !test_and_set_bit(STMMAC_SERVICE_SCHED
, &priv
->state
))
279 queue_work(priv
->wq
, &priv
->service_task
);
282 static void stmmac_global_err(struct stmmac_priv
*priv
)
284 netif_carrier_off(priv
->dev
);
285 set_bit(STMMAC_RESET_REQUESTED
, &priv
->state
);
286 stmmac_service_event_schedule(priv
);
290 * stmmac_clk_csr_set - dynamically set the MDC clock
291 * @priv: driver private structure
292 * Description: this is to dynamically set the MDC clock according to the csr
295 * If a specific clk_csr value is passed from the platform
296 * this means that the CSR Clock Range selection cannot be
297 * changed at run-time and it is fixed (as reported in the driver
298 * documentation). Viceversa the driver will try to set the MDC
299 * clock dynamically according to the actual clock input.
301 static void stmmac_clk_csr_set(struct stmmac_priv
*priv
)
305 clk_rate
= clk_get_rate(priv
->plat
->stmmac_clk
);
307 /* Platform provided default clk_csr would be assumed valid
308 * for all other cases except for the below mentioned ones.
309 * For values higher than the IEEE 802.3 specified frequency
310 * we can not estimate the proper divider as it is not known
311 * the frequency of clk_csr_i. So we do not change the default
314 if (!(priv
->clk_csr
& MAC_CSR_H_FRQ_MASK
)) {
315 if (clk_rate
< CSR_F_35M
)
316 priv
->clk_csr
= STMMAC_CSR_20_35M
;
317 else if ((clk_rate
>= CSR_F_35M
) && (clk_rate
< CSR_F_60M
))
318 priv
->clk_csr
= STMMAC_CSR_35_60M
;
319 else if ((clk_rate
>= CSR_F_60M
) && (clk_rate
< CSR_F_100M
))
320 priv
->clk_csr
= STMMAC_CSR_60_100M
;
321 else if ((clk_rate
>= CSR_F_100M
) && (clk_rate
< CSR_F_150M
))
322 priv
->clk_csr
= STMMAC_CSR_100_150M
;
323 else if ((clk_rate
>= CSR_F_150M
) && (clk_rate
< CSR_F_250M
))
324 priv
->clk_csr
= STMMAC_CSR_150_250M
;
325 else if ((clk_rate
>= CSR_F_250M
) && (clk_rate
<= CSR_F_300M
))
326 priv
->clk_csr
= STMMAC_CSR_250_300M
;
329 if (priv
->plat
->flags
& STMMAC_FLAG_HAS_SUN8I
) {
330 if (clk_rate
> 160000000)
331 priv
->clk_csr
= 0x03;
332 else if (clk_rate
> 80000000)
333 priv
->clk_csr
= 0x02;
334 else if (clk_rate
> 40000000)
335 priv
->clk_csr
= 0x01;
340 if (priv
->plat
->has_xgmac
) {
341 if (clk_rate
> 400000000)
343 else if (clk_rate
> 350000000)
345 else if (clk_rate
> 300000000)
347 else if (clk_rate
> 250000000)
349 else if (clk_rate
> 150000000)
356 static void print_pkt(unsigned char *buf
, int len
)
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len
, buf
);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET
, buf
, len
);
362 static inline u32
stmmac_tx_avail(struct stmmac_priv
*priv
, u32 queue
)
364 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
367 if (tx_q
->dirty_tx
> tx_q
->cur_tx
)
368 avail
= tx_q
->dirty_tx
- tx_q
->cur_tx
- 1;
370 avail
= priv
->dma_conf
.dma_tx_size
- tx_q
->cur_tx
+ tx_q
->dirty_tx
- 1;
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
380 static inline u32
stmmac_rx_dirty(struct stmmac_priv
*priv
, u32 queue
)
382 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
385 if (rx_q
->dirty_rx
<= rx_q
->cur_rx
)
386 dirty
= rx_q
->cur_rx
- rx_q
->dirty_rx
;
388 dirty
= priv
->dma_conf
.dma_rx_size
- rx_q
->dirty_rx
+ rx_q
->cur_rx
;
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv
*priv
, bool en
)
397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 priv
->eee_sw_timer_en
= en
? 0 : 1;
399 tx_lpi_timer
= en
? priv
->tx_lpi_timer
: 0;
400 stmmac_set_eee_lpi_timer(priv
, priv
->hw
, tx_lpi_timer
);
404 * stmmac_enable_eee_mode - check and enter in LPI mode
405 * @priv: driver private structure
406 * Description: this function is to verify and enter in LPI mode in case of
409 static int stmmac_enable_eee_mode(struct stmmac_priv
*priv
)
411 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
414 /* check if all TX queues have the work finished */
415 for (queue
= 0; queue
< tx_cnt
; queue
++) {
416 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
418 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
419 return -EBUSY
; /* still unfinished work */
422 /* Check and enter in LPI mode */
423 if (!priv
->tx_path_in_lpi_mode
)
424 stmmac_set_eee_mode(priv
, priv
->hw
,
425 priv
->plat
->flags
& STMMAC_FLAG_EN_TX_LPI_CLOCKGATING
);
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
431 * @priv: driver private structure
432 * Description: this function is to exit and disable EEE in case of
433 * LPI state is true. This is called by the xmit.
435 void stmmac_disable_eee_mode(struct stmmac_priv
*priv
)
437 if (!priv
->eee_sw_timer_en
) {
438 stmmac_lpi_entry_timer_config(priv
, 0);
442 stmmac_reset_eee_mode(priv
, priv
->hw
);
443 del_timer_sync(&priv
->eee_ctrl_timer
);
444 priv
->tx_path_in_lpi_mode
= false;
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
449 * @t: timer_list struct containing private info
451 * if there is no data transfer and if we are not in LPI state,
452 * then MAC Transmitter can be moved to LPI state.
454 static void stmmac_eee_ctrl_timer(struct timer_list
*t
)
456 struct stmmac_priv
*priv
= from_timer(priv
, t
, eee_ctrl_timer
);
458 if (stmmac_enable_eee_mode(priv
))
459 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
463 * stmmac_eee_init - init EEE
464 * @priv: driver private structure
466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
467 * can also manage EEE, this function enable the LPI state and start related
470 bool stmmac_eee_init(struct stmmac_priv
*priv
)
472 int eee_tw_timer
= priv
->eee_tw_timer
;
474 /* Using PCS we cannot dial with the phy registers at this stage
475 * so we do not support extra feature like EEE.
477 if (priv
->hw
->pcs
== STMMAC_PCS_TBI
||
478 priv
->hw
->pcs
== STMMAC_PCS_RTBI
)
481 /* Check if MAC core supports the EEE feature. */
482 if (!priv
->dma_cap
.eee
)
485 mutex_lock(&priv
->lock
);
487 /* Check if it needs to be deactivated */
488 if (!priv
->eee_active
) {
489 if (priv
->eee_enabled
) {
490 netdev_dbg(priv
->dev
, "disable EEE\n");
491 stmmac_lpi_entry_timer_config(priv
, 0);
492 del_timer_sync(&priv
->eee_ctrl_timer
);
493 stmmac_set_eee_timer(priv
, priv
->hw
, 0, eee_tw_timer
);
495 xpcs_config_eee(priv
->hw
->xpcs
,
496 priv
->plat
->mult_fact_100ns
,
499 mutex_unlock(&priv
->lock
);
503 if (priv
->eee_active
&& !priv
->eee_enabled
) {
504 timer_setup(&priv
->eee_ctrl_timer
, stmmac_eee_ctrl_timer
, 0);
505 stmmac_set_eee_timer(priv
, priv
->hw
, STMMAC_DEFAULT_LIT_LS
,
508 xpcs_config_eee(priv
->hw
->xpcs
,
509 priv
->plat
->mult_fact_100ns
,
513 if (priv
->plat
->has_gmac4
&& priv
->tx_lpi_timer
<= STMMAC_ET_MAX
) {
514 del_timer_sync(&priv
->eee_ctrl_timer
);
515 priv
->tx_path_in_lpi_mode
= false;
516 stmmac_lpi_entry_timer_config(priv
, 1);
518 stmmac_lpi_entry_timer_config(priv
, 0);
519 mod_timer(&priv
->eee_ctrl_timer
,
520 STMMAC_LPI_T(priv
->tx_lpi_timer
));
523 mutex_unlock(&priv
->lock
);
524 netdev_dbg(priv
->dev
, "Energy-Efficient Ethernet initialized\n");
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529 * @priv: driver private structure
530 * @p : descriptor pointer
531 * @skb : the socket buffer
533 * This function will read timestamp from the descriptor & pass it to stack.
534 * and also perform some sanity checks.
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv
*priv
,
537 struct dma_desc
*p
, struct sk_buff
*skb
)
539 struct skb_shared_hwtstamps shhwtstamp
;
543 if (!priv
->hwts_tx_en
)
546 /* exit if skb doesn't support hw tstamp */
547 if (likely(!skb
|| !(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)))
550 /* check tx tstamp status */
551 if (stmmac_get_tx_timestamp_status(priv
, p
)) {
552 stmmac_get_timestamp(priv
, p
, priv
->adv_ts
, &ns
);
554 } else if (!stmmac_get_mac_tx_timestamp(priv
, priv
->hw
, &ns
)) {
559 ns
-= priv
->plat
->cdc_error_adj
;
561 memset(&shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
562 shhwtstamp
.hwtstamp
= ns_to_ktime(ns
);
564 netdev_dbg(priv
->dev
, "get valid TX hw timestamp %llu\n", ns
);
565 /* pass tstamp to stack */
566 skb_tstamp_tx(skb
, &shhwtstamp
);
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571 * @priv: driver private structure
572 * @p : descriptor pointer
573 * @np : next descriptor pointer
574 * @skb : the socket buffer
576 * This function will read received packet's timestamp from the descriptor
577 * and pass it to stack. It also perform some sanity checks.
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv
*priv
, struct dma_desc
*p
,
580 struct dma_desc
*np
, struct sk_buff
*skb
)
582 struct skb_shared_hwtstamps
*shhwtstamp
= NULL
;
583 struct dma_desc
*desc
= p
;
586 if (!priv
->hwts_rx_en
)
588 /* For GMAC4, the valid timestamp is from CTX next desc. */
589 if (priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
)
592 /* Check if timestamp is available */
593 if (stmmac_get_rx_timestamp_status(priv
, p
, np
, priv
->adv_ts
)) {
594 stmmac_get_timestamp(priv
, desc
, priv
->adv_ts
, &ns
);
596 ns
-= priv
->plat
->cdc_error_adj
;
598 netdev_dbg(priv
->dev
, "get valid RX hw timestamp %llu\n", ns
);
599 shhwtstamp
= skb_hwtstamps(skb
);
600 memset(shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
601 shhwtstamp
->hwtstamp
= ns_to_ktime(ns
);
603 netdev_dbg(priv
->dev
, "cannot get RX hw timestamp\n");
608 * stmmac_hwtstamp_set - control hardware timestamping.
609 * @dev: device pointer.
610 * @ifr: An IOCTL specific structure, that can contain a pointer to
611 * a proprietary structure used to pass information to the driver.
613 * This function configures the MAC to enable/disable both outgoing(TX)
614 * and incoming(RX) packets time stamping based on user input.
616 * 0 on success and an appropriate -ve integer on failure.
618 static int stmmac_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
620 struct stmmac_priv
*priv
= netdev_priv(dev
);
621 struct hwtstamp_config config
;
624 u32 ptp_over_ipv4_udp
= 0;
625 u32 ptp_over_ipv6_udp
= 0;
626 u32 ptp_over_ethernet
= 0;
627 u32 snap_type_sel
= 0;
628 u32 ts_master_en
= 0;
631 if (!(priv
->dma_cap
.time_stamp
|| priv
->adv_ts
)) {
632 netdev_alert(priv
->dev
, "No support for HW time stamping\n");
633 priv
->hwts_tx_en
= 0;
634 priv
->hwts_rx_en
= 0;
639 if (copy_from_user(&config
, ifr
->ifr_data
,
643 netdev_dbg(priv
->dev
, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 __func__
, config
.flags
, config
.tx_type
, config
.rx_filter
);
646 if (config
.tx_type
!= HWTSTAMP_TX_OFF
&&
647 config
.tx_type
!= HWTSTAMP_TX_ON
)
651 switch (config
.rx_filter
) {
652 case HWTSTAMP_FILTER_NONE
:
653 /* time stamp no incoming packet at all */
654 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
658 /* PTP v1, UDP, any kind of event packet */
659 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
660 /* 'xmac' hardware can support Sync, Pdelay_Req and
661 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 * This leaves Delay_Req timestamps out.
663 * Enable all events *and* general purpose message
666 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
667 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
668 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
672 /* PTP v1, UDP, Sync packet */
673 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
674 /* take time stamp for SYNC messages only */
675 ts_event_en
= PTP_TCR_TSEVNTENA
;
677 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
678 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
682 /* PTP v1, UDP, Delay_req packet */
683 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
684 /* take time stamp for Delay_Req messages only */
685 ts_master_en
= PTP_TCR_TSMSTRENA
;
686 ts_event_en
= PTP_TCR_TSEVNTENA
;
688 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
689 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
693 /* PTP v2, UDP, any kind of event packet */
694 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
695 ptp_v2
= PTP_TCR_TSVER2ENA
;
696 /* take time stamp for all event messages */
697 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
699 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
700 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
704 /* PTP v2, UDP, Sync packet */
705 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
706 ptp_v2
= PTP_TCR_TSVER2ENA
;
707 /* take time stamp for SYNC messages only */
708 ts_event_en
= PTP_TCR_TSEVNTENA
;
710 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
711 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
715 /* PTP v2, UDP, Delay_req packet */
716 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
717 ptp_v2
= PTP_TCR_TSVER2ENA
;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en
= PTP_TCR_TSMSTRENA
;
720 ts_event_en
= PTP_TCR_TSEVNTENA
;
722 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
723 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
726 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
727 /* PTP v2/802.AS1 any layer, any kind of event packet */
728 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
729 ptp_v2
= PTP_TCR_TSVER2ENA
;
730 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
731 if (priv
->synopsys_id
< DWMAC_CORE_4_10
)
732 ts_event_en
= PTP_TCR_TSEVNTENA
;
733 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
734 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
735 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
738 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
739 /* PTP v2/802.AS1, any layer, Sync packet */
740 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
741 ptp_v2
= PTP_TCR_TSVER2ENA
;
742 /* take time stamp for SYNC messages only */
743 ts_event_en
= PTP_TCR_TSEVNTENA
;
745 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
746 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
747 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
751 /* PTP v2/802.AS1, any layer, Delay_req packet */
752 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
753 ptp_v2
= PTP_TCR_TSVER2ENA
;
754 /* take time stamp for Delay_Req messages only */
755 ts_master_en
= PTP_TCR_TSMSTRENA
;
756 ts_event_en
= PTP_TCR_TSEVNTENA
;
758 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
759 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
760 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
763 case HWTSTAMP_FILTER_NTP_ALL
:
764 case HWTSTAMP_FILTER_ALL
:
765 /* time stamp any incoming packet */
766 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
767 tstamp_all
= PTP_TCR_TSENALL
;
774 switch (config
.rx_filter
) {
775 case HWTSTAMP_FILTER_NONE
:
776 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
779 /* PTP v1, UDP, any kind of event packet */
780 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
784 priv
->hwts_rx_en
= ((config
.rx_filter
== HWTSTAMP_FILTER_NONE
) ? 0 : 1);
785 priv
->hwts_tx_en
= config
.tx_type
== HWTSTAMP_TX_ON
;
787 priv
->systime_flags
= STMMAC_HWTS_ACTIVE
;
789 if (priv
->hwts_tx_en
|| priv
->hwts_rx_en
) {
790 priv
->systime_flags
|= tstamp_all
| ptp_v2
|
791 ptp_over_ethernet
| ptp_over_ipv6_udp
|
792 ptp_over_ipv4_udp
| ts_event_en
|
793 ts_master_en
| snap_type_sel
;
796 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, priv
->systime_flags
);
798 memcpy(&priv
->tstamp_config
, &config
, sizeof(config
));
800 return copy_to_user(ifr
->ifr_data
, &config
,
801 sizeof(config
)) ? -EFAULT
: 0;
805 * stmmac_hwtstamp_get - read hardware timestamping.
806 * @dev: device pointer.
807 * @ifr: An IOCTL specific structure, that can contain a pointer to
808 * a proprietary structure used to pass information to the driver.
810 * This function obtain the current hardware timestamping settings
813 static int stmmac_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
815 struct stmmac_priv
*priv
= netdev_priv(dev
);
816 struct hwtstamp_config
*config
= &priv
->tstamp_config
;
818 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
821 return copy_to_user(ifr
->ifr_data
, config
,
822 sizeof(*config
)) ? -EFAULT
: 0;
826 * stmmac_init_tstamp_counter - init hardware timestamping counter
827 * @priv: driver private structure
828 * @systime_flags: timestamping flags
830 * Initialize hardware counter for packet timestamping.
831 * This is valid as long as the interface is open and not suspended.
832 * Will be rerun after resuming from suspend, case in which the timestamping
833 * flags updated by stmmac_hwtstamp_set() also need to be restored.
835 int stmmac_init_tstamp_counter(struct stmmac_priv
*priv
, u32 systime_flags
)
837 bool xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
838 struct timespec64 now
;
842 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
845 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, systime_flags
);
846 priv
->systime_flags
= systime_flags
;
848 /* program Sub Second Increment reg */
849 stmmac_config_sub_second_increment(priv
, priv
->ptpaddr
,
850 priv
->plat
->clk_ptp_rate
,
852 temp
= div_u64(1000000000ULL, sec_inc
);
854 /* Store sub second increment for later use */
855 priv
->sub_second_inc
= sec_inc
;
857 /* calculate default added value:
859 * addend = (2^32)/freq_div_ratio;
860 * where, freq_div_ratio = 1e9ns/sec_inc
862 temp
= (u64
)(temp
<< 32);
863 priv
->default_addend
= div_u64(temp
, priv
->plat
->clk_ptp_rate
);
864 stmmac_config_addend(priv
, priv
->ptpaddr
, priv
->default_addend
);
866 /* initialize system time */
867 ktime_get_real_ts64(&now
);
869 /* lower 32 bits of tv_sec are safe until y2106 */
870 stmmac_init_systime(priv
, priv
->ptpaddr
, (u32
)now
.tv_sec
, now
.tv_nsec
);
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter
);
877 * stmmac_init_ptp - init PTP
878 * @priv: driver private structure
879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880 * This is done by looking at the HW cap. register.
881 * This function also registers the ptp driver.
883 static int stmmac_init_ptp(struct stmmac_priv
*priv
)
885 bool xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
888 if (priv
->plat
->ptp_clk_freq_config
)
889 priv
->plat
->ptp_clk_freq_config(priv
);
891 ret
= stmmac_init_tstamp_counter(priv
, STMMAC_HWTS_ACTIVE
);
896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 if (xmac
&& priv
->dma_cap
.atime_stamp
)
899 /* Dwmac 3.x core with extend_desc can support adv_ts */
900 else if (priv
->extend_desc
&& priv
->dma_cap
.atime_stamp
)
903 if (priv
->dma_cap
.time_stamp
)
904 netdev_info(priv
->dev
, "IEEE 1588-2002 Timestamp supported\n");
907 netdev_info(priv
->dev
,
908 "IEEE 1588-2008 Advanced Timestamp supported\n");
910 priv
->hwts_tx_en
= 0;
911 priv
->hwts_rx_en
= 0;
913 if (priv
->plat
->flags
& STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY
)
914 stmmac_hwtstamp_correct_latency(priv
, priv
);
919 static void stmmac_release_ptp(struct stmmac_priv
*priv
)
921 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
922 stmmac_ptp_unregister(priv
);
926 * stmmac_mac_flow_ctrl - Configure flow control in all queues
927 * @priv: driver private structure
928 * @duplex: duplex passed to the next function
929 * Description: It is used for configuring the flow control in all queues
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv
*priv
, u32 duplex
)
933 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
935 stmmac_flow_ctrl(priv
, priv
->hw
, duplex
, priv
->flow_ctrl
,
936 priv
->pause
, tx_cnt
);
939 static struct phylink_pcs
*stmmac_mac_select_pcs(struct phylink_config
*config
,
940 phy_interface_t interface
)
942 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
945 return &priv
->hw
->xpcs
->pcs
;
947 if (priv
->hw
->lynx_pcs
)
948 return priv
->hw
->lynx_pcs
;
953 static void stmmac_mac_config(struct phylink_config
*config
, unsigned int mode
,
954 const struct phylink_link_state
*state
)
956 /* Nothing to do, xpcs_config() handles everything */
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv
*priv
, bool is_up
)
961 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
962 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
963 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
964 bool *hs_enable
= &fpe_cfg
->hs_enable
;
966 if (is_up
&& *hs_enable
) {
967 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
, fpe_cfg
,
970 *lo_state
= FPE_STATE_OFF
;
971 *lp_state
= FPE_STATE_OFF
;
975 static void stmmac_mac_link_down(struct phylink_config
*config
,
976 unsigned int mode
, phy_interface_t interface
)
978 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
980 stmmac_mac_set(priv
, priv
->ioaddr
, false);
981 priv
->eee_active
= false;
982 priv
->tx_lpi_enabled
= false;
983 priv
->eee_enabled
= stmmac_eee_init(priv
);
984 stmmac_set_eee_pls(priv
, priv
->hw
, false);
986 if (priv
->dma_cap
.fpesel
)
987 stmmac_fpe_link_state_handle(priv
, false);
990 static void stmmac_mac_link_up(struct phylink_config
*config
,
991 struct phy_device
*phy
,
992 unsigned int mode
, phy_interface_t interface
,
993 int speed
, int duplex
,
994 bool tx_pause
, bool rx_pause
)
996 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
999 if ((priv
->plat
->flags
& STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP
) &&
1000 priv
->plat
->serdes_powerup
)
1001 priv
->plat
->serdes_powerup(priv
->dev
, priv
->plat
->bsp_priv
);
1003 old_ctrl
= readl(priv
->ioaddr
+ MAC_CTRL_REG
);
1004 ctrl
= old_ctrl
& ~priv
->hw
->link
.speed_mask
;
1006 if (interface
== PHY_INTERFACE_MODE_USXGMII
) {
1009 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
1012 ctrl
|= priv
->hw
->link
.xgmii
.speed5000
;
1015 ctrl
|= priv
->hw
->link
.xgmii
.speed2500
;
1020 } else if (interface
== PHY_INTERFACE_MODE_XLGMII
) {
1023 ctrl
|= priv
->hw
->link
.xlgmii
.speed100000
;
1026 ctrl
|= priv
->hw
->link
.xlgmii
.speed50000
;
1029 ctrl
|= priv
->hw
->link
.xlgmii
.speed40000
;
1032 ctrl
|= priv
->hw
->link
.xlgmii
.speed25000
;
1035 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
1038 ctrl
|= priv
->hw
->link
.speed2500
;
1041 ctrl
|= priv
->hw
->link
.speed1000
;
1049 ctrl
|= priv
->hw
->link
.speed2500
;
1052 ctrl
|= priv
->hw
->link
.speed1000
;
1055 ctrl
|= priv
->hw
->link
.speed100
;
1058 ctrl
|= priv
->hw
->link
.speed10
;
1065 priv
->speed
= speed
;
1067 if (priv
->plat
->fix_mac_speed
)
1068 priv
->plat
->fix_mac_speed(priv
->plat
->bsp_priv
, speed
, mode
);
1071 ctrl
&= ~priv
->hw
->link
.duplex
;
1073 ctrl
|= priv
->hw
->link
.duplex
;
1075 /* Flow Control operation */
1076 if (rx_pause
&& tx_pause
)
1077 priv
->flow_ctrl
= FLOW_AUTO
;
1078 else if (rx_pause
&& !tx_pause
)
1079 priv
->flow_ctrl
= FLOW_RX
;
1080 else if (!rx_pause
&& tx_pause
)
1081 priv
->flow_ctrl
= FLOW_TX
;
1083 priv
->flow_ctrl
= FLOW_OFF
;
1085 stmmac_mac_flow_ctrl(priv
, duplex
);
1087 if (ctrl
!= old_ctrl
)
1088 writel(ctrl
, priv
->ioaddr
+ MAC_CTRL_REG
);
1090 stmmac_mac_set(priv
, priv
->ioaddr
, true);
1091 if (phy
&& priv
->dma_cap
.eee
) {
1093 phy_init_eee(phy
, !(priv
->plat
->flags
&
1094 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI
)) >= 0;
1095 priv
->eee_enabled
= stmmac_eee_init(priv
);
1096 priv
->tx_lpi_enabled
= priv
->eee_enabled
;
1097 stmmac_set_eee_pls(priv
, priv
->hw
, true);
1100 if (priv
->dma_cap
.fpesel
)
1101 stmmac_fpe_link_state_handle(priv
, true);
1103 if (priv
->plat
->flags
& STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY
)
1104 stmmac_hwtstamp_correct_latency(priv
, priv
);
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops
= {
1108 .mac_select_pcs
= stmmac_mac_select_pcs
,
1109 .mac_config
= stmmac_mac_config
,
1110 .mac_link_down
= stmmac_mac_link_down
,
1111 .mac_link_up
= stmmac_mac_link_up
,
1115 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116 * @priv: driver private structure
1117 * Description: this is to verify if the HW supports the PCS.
1118 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119 * configured for the TBI, RTBI, or SGMII PHY interface.
1121 static void stmmac_check_pcs_mode(struct stmmac_priv
*priv
)
1123 int interface
= priv
->plat
->mac_interface
;
1125 if (priv
->dma_cap
.pcs
) {
1126 if ((interface
== PHY_INTERFACE_MODE_RGMII
) ||
1127 (interface
== PHY_INTERFACE_MODE_RGMII_ID
) ||
1128 (interface
== PHY_INTERFACE_MODE_RGMII_RXID
) ||
1129 (interface
== PHY_INTERFACE_MODE_RGMII_TXID
)) {
1130 netdev_dbg(priv
->dev
, "PCS RGMII support enabled\n");
1131 priv
->hw
->pcs
= STMMAC_PCS_RGMII
;
1132 } else if (interface
== PHY_INTERFACE_MODE_SGMII
) {
1133 netdev_dbg(priv
->dev
, "PCS SGMII support enabled\n");
1134 priv
->hw
->pcs
= STMMAC_PCS_SGMII
;
1140 * stmmac_init_phy - PHY initialization
1141 * @dev: net device structure
1142 * Description: it initializes the driver's PHY state, and attaches the PHY
1143 * to the mac driver.
1147 static int stmmac_init_phy(struct net_device
*dev
)
1149 struct stmmac_priv
*priv
= netdev_priv(dev
);
1150 struct fwnode_handle
*phy_fwnode
;
1151 struct fwnode_handle
*fwnode
;
1154 if (!phylink_expects_phy(priv
->phylink
))
1157 fwnode
= priv
->plat
->port_node
;
1159 fwnode
= dev_fwnode(priv
->device
);
1162 phy_fwnode
= fwnode_get_phy_node(fwnode
);
1166 /* Some DT bindings do not set-up the PHY handle. Let's try to
1169 if (!phy_fwnode
|| IS_ERR(phy_fwnode
)) {
1170 int addr
= priv
->plat
->phy_addr
;
1171 struct phy_device
*phydev
;
1174 netdev_err(priv
->dev
, "no phy found\n");
1178 phydev
= mdiobus_get_phy(priv
->mii
, addr
);
1180 netdev_err(priv
->dev
, "no phy at addr %d\n", addr
);
1184 ret
= phylink_connect_phy(priv
->phylink
, phydev
);
1186 fwnode_handle_put(phy_fwnode
);
1187 ret
= phylink_fwnode_phy_connect(priv
->phylink
, fwnode
, 0);
1190 if (!priv
->plat
->pmt
) {
1191 struct ethtool_wolinfo wol
= { .cmd
= ETHTOOL_GWOL
};
1193 phylink_ethtool_get_wol(priv
->phylink
, &wol
);
1194 device_set_wakeup_capable(priv
->device
, !!wol
.supported
);
1195 device_set_wakeup_enable(priv
->device
, !!wol
.wolopts
);
1201 static void stmmac_set_half_duplex(struct stmmac_priv
*priv
)
1203 /* Half-Duplex can only work with single tx queue */
1204 if (priv
->plat
->tx_queues_to_use
> 1)
1205 priv
->phylink_config
.mac_capabilities
&=
1206 ~(MAC_10HD
| MAC_100HD
| MAC_1000HD
);
1208 priv
->phylink_config
.mac_capabilities
|=
1209 (MAC_10HD
| MAC_100HD
| MAC_1000HD
);
1212 static int stmmac_phy_setup(struct stmmac_priv
*priv
)
1214 struct stmmac_mdio_bus_data
*mdio_bus_data
;
1215 int mode
= priv
->plat
->phy_interface
;
1216 struct fwnode_handle
*fwnode
;
1217 struct phylink
*phylink
;
1220 priv
->phylink_config
.dev
= &priv
->dev
->dev
;
1221 priv
->phylink_config
.type
= PHYLINK_NETDEV
;
1222 priv
->phylink_config
.mac_managed_pm
= true;
1224 mdio_bus_data
= priv
->plat
->mdio_bus_data
;
1226 priv
->phylink_config
.ovr_an_inband
=
1227 mdio_bus_data
->xpcs_an_inband
;
1229 /* Set the platform/firmware specified interface mode. Note, phylink
1230 * deals with the PHY interface mode, not the MAC interface mode.
1232 __set_bit(mode
, priv
->phylink_config
.supported_interfaces
);
1234 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1236 xpcs_get_interfaces(priv
->hw
->xpcs
,
1237 priv
->phylink_config
.supported_interfaces
);
1239 priv
->phylink_config
.mac_capabilities
= MAC_ASYM_PAUSE
| MAC_SYM_PAUSE
|
1240 MAC_10FD
| MAC_100FD
|
1243 stmmac_set_half_duplex(priv
);
1245 /* Get the MAC specific capabilities */
1246 stmmac_mac_phylink_get_caps(priv
);
1248 max_speed
= priv
->plat
->max_speed
;
1250 phylink_limit_mac_speed(&priv
->phylink_config
, max_speed
);
1252 fwnode
= priv
->plat
->port_node
;
1254 fwnode
= dev_fwnode(priv
->device
);
1256 phylink
= phylink_create(&priv
->phylink_config
, fwnode
,
1257 mode
, &stmmac_phylink_mac_ops
);
1258 if (IS_ERR(phylink
))
1259 return PTR_ERR(phylink
);
1261 priv
->phylink
= phylink
;
1265 static void stmmac_display_rx_rings(struct stmmac_priv
*priv
,
1266 struct stmmac_dma_conf
*dma_conf
)
1268 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
1269 unsigned int desc_size
;
1273 /* Display RX rings */
1274 for (queue
= 0; queue
< rx_cnt
; queue
++) {
1275 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1277 pr_info("\tRX Queue %u rings\n", queue
);
1279 if (priv
->extend_desc
) {
1280 head_rx
= (void *)rx_q
->dma_erx
;
1281 desc_size
= sizeof(struct dma_extended_desc
);
1283 head_rx
= (void *)rx_q
->dma_rx
;
1284 desc_size
= sizeof(struct dma_desc
);
1287 /* Display RX ring */
1288 stmmac_display_ring(priv
, head_rx
, dma_conf
->dma_rx_size
, true,
1289 rx_q
->dma_rx_phy
, desc_size
);
1293 static void stmmac_display_tx_rings(struct stmmac_priv
*priv
,
1294 struct stmmac_dma_conf
*dma_conf
)
1296 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
1297 unsigned int desc_size
;
1301 /* Display TX rings */
1302 for (queue
= 0; queue
< tx_cnt
; queue
++) {
1303 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1305 pr_info("\tTX Queue %d rings\n", queue
);
1307 if (priv
->extend_desc
) {
1308 head_tx
= (void *)tx_q
->dma_etx
;
1309 desc_size
= sizeof(struct dma_extended_desc
);
1310 } else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
) {
1311 head_tx
= (void *)tx_q
->dma_entx
;
1312 desc_size
= sizeof(struct dma_edesc
);
1314 head_tx
= (void *)tx_q
->dma_tx
;
1315 desc_size
= sizeof(struct dma_desc
);
1318 stmmac_display_ring(priv
, head_tx
, dma_conf
->dma_tx_size
, false,
1319 tx_q
->dma_tx_phy
, desc_size
);
1323 static void stmmac_display_rings(struct stmmac_priv
*priv
,
1324 struct stmmac_dma_conf
*dma_conf
)
1326 /* Display RX ring */
1327 stmmac_display_rx_rings(priv
, dma_conf
);
1329 /* Display TX ring */
1330 stmmac_display_tx_rings(priv
, dma_conf
);
1333 static int stmmac_set_bfsize(int mtu
, int bufsize
)
1337 if (mtu
>= BUF_SIZE_8KiB
)
1338 ret
= BUF_SIZE_16KiB
;
1339 else if (mtu
>= BUF_SIZE_4KiB
)
1340 ret
= BUF_SIZE_8KiB
;
1341 else if (mtu
>= BUF_SIZE_2KiB
)
1342 ret
= BUF_SIZE_4KiB
;
1343 else if (mtu
> DEFAULT_BUFSIZE
)
1344 ret
= BUF_SIZE_2KiB
;
1346 ret
= DEFAULT_BUFSIZE
;
1352 * stmmac_clear_rx_descriptors - clear RX descriptors
1353 * @priv: driver private structure
1354 * @dma_conf: structure to take the dma data
1355 * @queue: RX queue index
1356 * Description: this function is called to clear the RX descriptors
1357 * in case of both basic and extended descriptors are used.
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv
*priv
,
1360 struct stmmac_dma_conf
*dma_conf
,
1363 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1366 /* Clear the RX descriptors */
1367 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++)
1368 if (priv
->extend_desc
)
1369 stmmac_init_rx_desc(priv
, &rx_q
->dma_erx
[i
].basic
,
1370 priv
->use_riwt
, priv
->mode
,
1371 (i
== dma_conf
->dma_rx_size
- 1),
1372 dma_conf
->dma_buf_sz
);
1374 stmmac_init_rx_desc(priv
, &rx_q
->dma_rx
[i
],
1375 priv
->use_riwt
, priv
->mode
,
1376 (i
== dma_conf
->dma_rx_size
- 1),
1377 dma_conf
->dma_buf_sz
);
1381 * stmmac_clear_tx_descriptors - clear tx descriptors
1382 * @priv: driver private structure
1383 * @dma_conf: structure to take the dma data
1384 * @queue: TX queue index.
1385 * Description: this function is called to clear the TX descriptors
1386 * in case of both basic and extended descriptors are used.
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv
*priv
,
1389 struct stmmac_dma_conf
*dma_conf
,
1392 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1395 /* Clear the TX descriptors */
1396 for (i
= 0; i
< dma_conf
->dma_tx_size
; i
++) {
1397 int last
= (i
== (dma_conf
->dma_tx_size
- 1));
1400 if (priv
->extend_desc
)
1401 p
= &tx_q
->dma_etx
[i
].basic
;
1402 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1403 p
= &tx_q
->dma_entx
[i
].basic
;
1405 p
= &tx_q
->dma_tx
[i
];
1407 stmmac_init_tx_desc(priv
, p
, priv
->mode
, last
);
1412 * stmmac_clear_descriptors - clear descriptors
1413 * @priv: driver private structure
1414 * @dma_conf: structure to take the dma data
1415 * Description: this function is called to clear the TX and RX descriptors
1416 * in case of both basic and extended descriptors are used.
1418 static void stmmac_clear_descriptors(struct stmmac_priv
*priv
,
1419 struct stmmac_dma_conf
*dma_conf
)
1421 u32 rx_queue_cnt
= priv
->plat
->rx_queues_to_use
;
1422 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1425 /* Clear the RX descriptors */
1426 for (queue
= 0; queue
< rx_queue_cnt
; queue
++)
1427 stmmac_clear_rx_descriptors(priv
, dma_conf
, queue
);
1429 /* Clear the TX descriptors */
1430 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1431 stmmac_clear_tx_descriptors(priv
, dma_conf
, queue
);
1435 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436 * @priv: driver private structure
1437 * @dma_conf: structure to take the dma data
1438 * @p: descriptor pointer
1439 * @i: descriptor index
1441 * @queue: RX queue index
1442 * Description: this function is called to allocate a receive buffer, perform
1443 * the DMA mapping and init the descriptor.
1445 static int stmmac_init_rx_buffers(struct stmmac_priv
*priv
,
1446 struct stmmac_dma_conf
*dma_conf
,
1448 int i
, gfp_t flags
, u32 queue
)
1450 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1451 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1452 gfp_t gfp
= (GFP_ATOMIC
| __GFP_NOWARN
);
1454 if (priv
->dma_cap
.host_dma_width
<= 32)
1458 buf
->page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
1461 buf
->page_offset
= stmmac_rx_offset(priv
);
1464 if (priv
->sph
&& !buf
->sec_page
) {
1465 buf
->sec_page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
1469 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
1470 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, true);
1472 buf
->sec_page
= NULL
;
1473 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, false);
1476 buf
->addr
= page_pool_get_dma_addr(buf
->page
) + buf
->page_offset
;
1478 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
1479 if (dma_conf
->dma_buf_sz
== BUF_SIZE_16KiB
)
1480 stmmac_init_desc3(priv
, p
);
1486 * stmmac_free_rx_buffer - free RX dma buffers
1487 * @priv: private structure
1491 static void stmmac_free_rx_buffer(struct stmmac_priv
*priv
,
1492 struct stmmac_rx_queue
*rx_q
,
1495 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1498 page_pool_put_full_page(rx_q
->page_pool
, buf
->page
, false);
1502 page_pool_put_full_page(rx_q
->page_pool
, buf
->sec_page
, false);
1503 buf
->sec_page
= NULL
;
1507 * stmmac_free_tx_buffer - free RX dma buffers
1508 * @priv: private structure
1509 * @dma_conf: structure to take the dma data
1510 * @queue: RX queue index
1513 static void stmmac_free_tx_buffer(struct stmmac_priv
*priv
,
1514 struct stmmac_dma_conf
*dma_conf
,
1517 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1519 if (tx_q
->tx_skbuff_dma
[i
].buf
&&
1520 tx_q
->tx_skbuff_dma
[i
].buf_type
!= STMMAC_TXBUF_T_XDP_TX
) {
1521 if (tx_q
->tx_skbuff_dma
[i
].map_as_page
)
1522 dma_unmap_page(priv
->device
,
1523 tx_q
->tx_skbuff_dma
[i
].buf
,
1524 tx_q
->tx_skbuff_dma
[i
].len
,
1527 dma_unmap_single(priv
->device
,
1528 tx_q
->tx_skbuff_dma
[i
].buf
,
1529 tx_q
->tx_skbuff_dma
[i
].len
,
1533 if (tx_q
->xdpf
[i
] &&
1534 (tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XDP_TX
||
1535 tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
)) {
1536 xdp_return_frame(tx_q
->xdpf
[i
]);
1537 tx_q
->xdpf
[i
] = NULL
;
1540 if (tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XSK_TX
)
1541 tx_q
->xsk_frames_done
++;
1543 if (tx_q
->tx_skbuff
[i
] &&
1544 tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_SKB
) {
1545 dev_kfree_skb_any(tx_q
->tx_skbuff
[i
]);
1546 tx_q
->tx_skbuff
[i
] = NULL
;
1549 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1550 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1554 * dma_free_rx_skbufs - free RX dma buffers
1555 * @priv: private structure
1556 * @dma_conf: structure to take the dma data
1557 * @queue: RX queue index
1559 static void dma_free_rx_skbufs(struct stmmac_priv
*priv
,
1560 struct stmmac_dma_conf
*dma_conf
,
1563 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1566 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++)
1567 stmmac_free_rx_buffer(priv
, rx_q
, i
);
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv
*priv
,
1571 struct stmmac_dma_conf
*dma_conf
,
1572 u32 queue
, gfp_t flags
)
1574 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1577 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++) {
1581 if (priv
->extend_desc
)
1582 p
= &((rx_q
->dma_erx
+ i
)->basic
);
1584 p
= rx_q
->dma_rx
+ i
;
1586 ret
= stmmac_init_rx_buffers(priv
, dma_conf
, p
, i
, flags
,
1591 rx_q
->buf_alloc_num
++;
1598 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599 * @priv: private structure
1600 * @dma_conf: structure to take the dma data
1601 * @queue: RX queue index
1603 static void dma_free_rx_xskbufs(struct stmmac_priv
*priv
,
1604 struct stmmac_dma_conf
*dma_conf
,
1607 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1610 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++) {
1611 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1616 xsk_buff_free(buf
->xdp
);
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv
*priv
,
1622 struct stmmac_dma_conf
*dma_conf
,
1625 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1628 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 * use this macro to make sure no size violations.
1632 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff
);
1634 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++) {
1635 struct stmmac_rx_buffer
*buf
;
1636 dma_addr_t dma_addr
;
1639 if (priv
->extend_desc
)
1640 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ i
);
1642 p
= rx_q
->dma_rx
+ i
;
1644 buf
= &rx_q
->buf_pool
[i
];
1646 buf
->xdp
= xsk_buff_alloc(rx_q
->xsk_pool
);
1650 dma_addr
= xsk_buff_xdp_get_dma(buf
->xdp
);
1651 stmmac_set_desc_addr(priv
, p
, dma_addr
);
1652 rx_q
->buf_alloc_num
++;
1658 static struct xsk_buff_pool
*stmmac_get_xsk_pool(struct stmmac_priv
*priv
, u32 queue
)
1660 if (!stmmac_xdp_is_enabled(priv
) || !test_bit(queue
, priv
->af_xdp_zc_qps
))
1663 return xsk_get_pool_from_qid(priv
->dev
, queue
);
1667 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668 * @priv: driver private structure
1669 * @dma_conf: structure to take the dma data
1670 * @queue: RX queue index
1672 * Description: this function initializes the DMA RX descriptors
1673 * and allocates the socket buffers. It supports the chained and ring
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv
*priv
,
1677 struct stmmac_dma_conf
*dma_conf
,
1678 u32 queue
, gfp_t flags
)
1680 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1683 netif_dbg(priv
, probe
, priv
->dev
,
1684 "(%s) dma_rx_phy=0x%08x\n", __func__
,
1685 (u32
)rx_q
->dma_rx_phy
);
1687 stmmac_clear_rx_descriptors(priv
, dma_conf
, queue
);
1689 xdp_rxq_info_unreg_mem_model(&rx_q
->xdp_rxq
);
1691 rx_q
->xsk_pool
= stmmac_get_xsk_pool(priv
, queue
);
1693 if (rx_q
->xsk_pool
) {
1694 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q
->xdp_rxq
,
1695 MEM_TYPE_XSK_BUFF_POOL
,
1697 netdev_info(priv
->dev
,
1698 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1700 xsk_pool_set_rxq_info(rx_q
->xsk_pool
, &rx_q
->xdp_rxq
);
1702 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q
->xdp_rxq
,
1705 netdev_info(priv
->dev
,
1706 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1710 if (rx_q
->xsk_pool
) {
1711 /* RX XDP ZC buffer pool may not be populated, e.g.
1714 stmmac_alloc_rx_buffers_zc(priv
, dma_conf
, queue
);
1716 ret
= stmmac_alloc_rx_buffers(priv
, dma_conf
, queue
, flags
);
1721 /* Setup the chained descriptor addresses */
1722 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1723 if (priv
->extend_desc
)
1724 stmmac_mode_init(priv
, rx_q
->dma_erx
,
1726 dma_conf
->dma_rx_size
, 1);
1728 stmmac_mode_init(priv
, rx_q
->dma_rx
,
1730 dma_conf
->dma_rx_size
, 0);
1736 static int init_dma_rx_desc_rings(struct net_device
*dev
,
1737 struct stmmac_dma_conf
*dma_conf
,
1740 struct stmmac_priv
*priv
= netdev_priv(dev
);
1741 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1745 /* RX INITIALIZATION */
1746 netif_dbg(priv
, probe
, priv
->dev
,
1747 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1749 for (queue
= 0; queue
< rx_count
; queue
++) {
1750 ret
= __init_dma_rx_desc_rings(priv
, dma_conf
, queue
, flags
);
1752 goto err_init_rx_buffers
;
1757 err_init_rx_buffers
:
1758 while (queue
>= 0) {
1759 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1762 dma_free_rx_xskbufs(priv
, dma_conf
, queue
);
1764 dma_free_rx_skbufs(priv
, dma_conf
, queue
);
1766 rx_q
->buf_alloc_num
= 0;
1767 rx_q
->xsk_pool
= NULL
;
1776 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777 * @priv: driver private structure
1778 * @dma_conf: structure to take the dma data
1779 * @queue: TX queue index
1780 * Description: this function initializes the DMA TX descriptors
1781 * and allocates the socket buffers. It supports the chained and ring
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv
*priv
,
1785 struct stmmac_dma_conf
*dma_conf
,
1788 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1791 netif_dbg(priv
, probe
, priv
->dev
,
1792 "(%s) dma_tx_phy=0x%08x\n", __func__
,
1793 (u32
)tx_q
->dma_tx_phy
);
1795 /* Setup the chained descriptor addresses */
1796 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1797 if (priv
->extend_desc
)
1798 stmmac_mode_init(priv
, tx_q
->dma_etx
,
1800 dma_conf
->dma_tx_size
, 1);
1801 else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
))
1802 stmmac_mode_init(priv
, tx_q
->dma_tx
,
1804 dma_conf
->dma_tx_size
, 0);
1807 tx_q
->xsk_pool
= stmmac_get_xsk_pool(priv
, queue
);
1809 for (i
= 0; i
< dma_conf
->dma_tx_size
; i
++) {
1812 if (priv
->extend_desc
)
1813 p
= &((tx_q
->dma_etx
+ i
)->basic
);
1814 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1815 p
= &((tx_q
->dma_entx
+ i
)->basic
);
1817 p
= tx_q
->dma_tx
+ i
;
1819 stmmac_clear_desc(priv
, p
);
1821 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1822 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1823 tx_q
->tx_skbuff_dma
[i
].len
= 0;
1824 tx_q
->tx_skbuff_dma
[i
].last_segment
= false;
1825 tx_q
->tx_skbuff
[i
] = NULL
;
1831 static int init_dma_tx_desc_rings(struct net_device
*dev
,
1832 struct stmmac_dma_conf
*dma_conf
)
1834 struct stmmac_priv
*priv
= netdev_priv(dev
);
1838 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1840 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1841 __init_dma_tx_desc_rings(priv
, dma_conf
, queue
);
1847 * init_dma_desc_rings - init the RX/TX descriptor rings
1848 * @dev: net device structure
1849 * @dma_conf: structure to take the dma data
1851 * Description: this function initializes the DMA RX/TX descriptors
1852 * and allocates the socket buffers. It supports the chained and ring
1855 static int init_dma_desc_rings(struct net_device
*dev
,
1856 struct stmmac_dma_conf
*dma_conf
,
1859 struct stmmac_priv
*priv
= netdev_priv(dev
);
1862 ret
= init_dma_rx_desc_rings(dev
, dma_conf
, flags
);
1866 ret
= init_dma_tx_desc_rings(dev
, dma_conf
);
1868 stmmac_clear_descriptors(priv
, dma_conf
);
1870 if (netif_msg_hw(priv
))
1871 stmmac_display_rings(priv
, dma_conf
);
1877 * dma_free_tx_skbufs - free TX dma buffers
1878 * @priv: private structure
1879 * @dma_conf: structure to take the dma data
1880 * @queue: TX queue index
1882 static void dma_free_tx_skbufs(struct stmmac_priv
*priv
,
1883 struct stmmac_dma_conf
*dma_conf
,
1886 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1889 tx_q
->xsk_frames_done
= 0;
1891 for (i
= 0; i
< dma_conf
->dma_tx_size
; i
++)
1892 stmmac_free_tx_buffer(priv
, dma_conf
, queue
, i
);
1894 if (tx_q
->xsk_pool
&& tx_q
->xsk_frames_done
) {
1895 xsk_tx_completed(tx_q
->xsk_pool
, tx_q
->xsk_frames_done
);
1896 tx_q
->xsk_frames_done
= 0;
1897 tx_q
->xsk_pool
= NULL
;
1902 * stmmac_free_tx_skbufs - free TX skb buffers
1903 * @priv: private structure
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv
*priv
)
1907 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1910 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1911 dma_free_tx_skbufs(priv
, &priv
->dma_conf
, queue
);
1915 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916 * @priv: private structure
1917 * @dma_conf: structure to take the dma data
1918 * @queue: RX queue index
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv
*priv
,
1921 struct stmmac_dma_conf
*dma_conf
,
1924 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1926 /* Release the DMA RX socket buffers */
1928 dma_free_rx_xskbufs(priv
, dma_conf
, queue
);
1930 dma_free_rx_skbufs(priv
, dma_conf
, queue
);
1932 rx_q
->buf_alloc_num
= 0;
1933 rx_q
->xsk_pool
= NULL
;
1935 /* Free DMA regions of consistent memory previously allocated */
1936 if (!priv
->extend_desc
)
1937 dma_free_coherent(priv
->device
, dma_conf
->dma_rx_size
*
1938 sizeof(struct dma_desc
),
1939 rx_q
->dma_rx
, rx_q
->dma_rx_phy
);
1941 dma_free_coherent(priv
->device
, dma_conf
->dma_rx_size
*
1942 sizeof(struct dma_extended_desc
),
1943 rx_q
->dma_erx
, rx_q
->dma_rx_phy
);
1945 if (xdp_rxq_info_is_reg(&rx_q
->xdp_rxq
))
1946 xdp_rxq_info_unreg(&rx_q
->xdp_rxq
);
1948 kfree(rx_q
->buf_pool
);
1949 if (rx_q
->page_pool
)
1950 page_pool_destroy(rx_q
->page_pool
);
1953 static void free_dma_rx_desc_resources(struct stmmac_priv
*priv
,
1954 struct stmmac_dma_conf
*dma_conf
)
1956 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1959 /* Free RX queue resources */
1960 for (queue
= 0; queue
< rx_count
; queue
++)
1961 __free_dma_rx_desc_resources(priv
, dma_conf
, queue
);
1965 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966 * @priv: private structure
1967 * @dma_conf: structure to take the dma data
1968 * @queue: TX queue index
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv
*priv
,
1971 struct stmmac_dma_conf
*dma_conf
,
1974 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1978 /* Release the DMA TX socket buffers */
1979 dma_free_tx_skbufs(priv
, dma_conf
, queue
);
1981 if (priv
->extend_desc
) {
1982 size
= sizeof(struct dma_extended_desc
);
1983 addr
= tx_q
->dma_etx
;
1984 } else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
) {
1985 size
= sizeof(struct dma_edesc
);
1986 addr
= tx_q
->dma_entx
;
1988 size
= sizeof(struct dma_desc
);
1989 addr
= tx_q
->dma_tx
;
1992 size
*= dma_conf
->dma_tx_size
;
1994 dma_free_coherent(priv
->device
, size
, addr
, tx_q
->dma_tx_phy
);
1996 kfree(tx_q
->tx_skbuff_dma
);
1997 kfree(tx_q
->tx_skbuff
);
2000 static void free_dma_tx_desc_resources(struct stmmac_priv
*priv
,
2001 struct stmmac_dma_conf
*dma_conf
)
2003 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
2006 /* Free TX queue resources */
2007 for (queue
= 0; queue
< tx_count
; queue
++)
2008 __free_dma_tx_desc_resources(priv
, dma_conf
, queue
);
2012 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013 * @priv: private structure
2014 * @dma_conf: structure to take the dma data
2015 * @queue: RX queue index
2016 * Description: according to which descriptor can be used (extend or basic)
2017 * this function allocates the resources for TX and RX paths. In case of
2018 * reception, for example, it pre-allocated the RX socket buffer in order to
2019 * allow zero-copy mechanism.
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv
*priv
,
2022 struct stmmac_dma_conf
*dma_conf
,
2025 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
2026 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
2027 bool xdp_prog
= stmmac_xdp_is_enabled(priv
);
2028 struct page_pool_params pp_params
= { 0 };
2029 unsigned int num_pages
;
2030 unsigned int napi_id
;
2033 rx_q
->queue_index
= queue
;
2034 rx_q
->priv_data
= priv
;
2036 pp_params
.flags
= PP_FLAG_DMA_MAP
| PP_FLAG_DMA_SYNC_DEV
;
2037 pp_params
.pool_size
= dma_conf
->dma_rx_size
;
2038 num_pages
= DIV_ROUND_UP(dma_conf
->dma_buf_sz
, PAGE_SIZE
);
2039 pp_params
.order
= ilog2(num_pages
);
2040 pp_params
.nid
= dev_to_node(priv
->device
);
2041 pp_params
.dev
= priv
->device
;
2042 pp_params
.dma_dir
= xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
2043 pp_params
.offset
= stmmac_rx_offset(priv
);
2044 pp_params
.max_len
= STMMAC_MAX_RX_BUF_SIZE(num_pages
);
2046 rx_q
->page_pool
= page_pool_create(&pp_params
);
2047 if (IS_ERR(rx_q
->page_pool
)) {
2048 ret
= PTR_ERR(rx_q
->page_pool
);
2049 rx_q
->page_pool
= NULL
;
2053 rx_q
->buf_pool
= kcalloc(dma_conf
->dma_rx_size
,
2054 sizeof(*rx_q
->buf_pool
),
2056 if (!rx_q
->buf_pool
)
2059 if (priv
->extend_desc
) {
2060 rx_q
->dma_erx
= dma_alloc_coherent(priv
->device
,
2061 dma_conf
->dma_rx_size
*
2062 sizeof(struct dma_extended_desc
),
2069 rx_q
->dma_rx
= dma_alloc_coherent(priv
->device
,
2070 dma_conf
->dma_rx_size
*
2071 sizeof(struct dma_desc
),
2078 if (stmmac_xdp_is_enabled(priv
) &&
2079 test_bit(queue
, priv
->af_xdp_zc_qps
))
2080 napi_id
= ch
->rxtx_napi
.napi_id
;
2082 napi_id
= ch
->rx_napi
.napi_id
;
2084 ret
= xdp_rxq_info_reg(&rx_q
->xdp_rxq
, priv
->dev
,
2088 netdev_err(priv
->dev
, "Failed to register xdp rxq info\n");
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv
*priv
,
2096 struct stmmac_dma_conf
*dma_conf
)
2098 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
2102 /* RX queues buffers and DMA */
2103 for (queue
= 0; queue
< rx_count
; queue
++) {
2104 ret
= __alloc_dma_rx_desc_resources(priv
, dma_conf
, queue
);
2112 free_dma_rx_desc_resources(priv
, dma_conf
);
2118 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119 * @priv: private structure
2120 * @dma_conf: structure to take the dma data
2121 * @queue: TX queue index
2122 * Description: according to which descriptor can be used (extend or basic)
2123 * this function allocates the resources for TX and RX paths. In case of
2124 * reception, for example, it pre-allocated the RX socket buffer in order to
2125 * allow zero-copy mechanism.
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv
*priv
,
2128 struct stmmac_dma_conf
*dma_conf
,
2131 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
2135 tx_q
->queue_index
= queue
;
2136 tx_q
->priv_data
= priv
;
2138 tx_q
->tx_skbuff_dma
= kcalloc(dma_conf
->dma_tx_size
,
2139 sizeof(*tx_q
->tx_skbuff_dma
),
2141 if (!tx_q
->tx_skbuff_dma
)
2144 tx_q
->tx_skbuff
= kcalloc(dma_conf
->dma_tx_size
,
2145 sizeof(struct sk_buff
*),
2147 if (!tx_q
->tx_skbuff
)
2150 if (priv
->extend_desc
)
2151 size
= sizeof(struct dma_extended_desc
);
2152 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2153 size
= sizeof(struct dma_edesc
);
2155 size
= sizeof(struct dma_desc
);
2157 size
*= dma_conf
->dma_tx_size
;
2159 addr
= dma_alloc_coherent(priv
->device
, size
,
2160 &tx_q
->dma_tx_phy
, GFP_KERNEL
);
2164 if (priv
->extend_desc
)
2165 tx_q
->dma_etx
= addr
;
2166 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2167 tx_q
->dma_entx
= addr
;
2169 tx_q
->dma_tx
= addr
;
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv
*priv
,
2175 struct stmmac_dma_conf
*dma_conf
)
2177 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
2181 /* TX queues buffers and DMA */
2182 for (queue
= 0; queue
< tx_count
; queue
++) {
2183 ret
= __alloc_dma_tx_desc_resources(priv
, dma_conf
, queue
);
2191 free_dma_tx_desc_resources(priv
, dma_conf
);
2196 * alloc_dma_desc_resources - alloc TX/RX resources.
2197 * @priv: private structure
2198 * @dma_conf: structure to take the dma data
2199 * Description: according to which descriptor can be used (extend or basic)
2200 * this function allocates the resources for TX and RX paths. In case of
2201 * reception, for example, it pre-allocated the RX socket buffer in order to
2202 * allow zero-copy mechanism.
2204 static int alloc_dma_desc_resources(struct stmmac_priv
*priv
,
2205 struct stmmac_dma_conf
*dma_conf
)
2208 int ret
= alloc_dma_rx_desc_resources(priv
, dma_conf
);
2213 ret
= alloc_dma_tx_desc_resources(priv
, dma_conf
);
2219 * free_dma_desc_resources - free dma desc resources
2220 * @priv: private structure
2221 * @dma_conf: structure to take the dma data
2223 static void free_dma_desc_resources(struct stmmac_priv
*priv
,
2224 struct stmmac_dma_conf
*dma_conf
)
2226 /* Release the DMA TX socket buffers */
2227 free_dma_tx_desc_resources(priv
, dma_conf
);
2229 /* Release the DMA RX socket buffers later
2230 * to ensure all pending XDP_TX buffers are returned.
2232 free_dma_rx_desc_resources(priv
, dma_conf
);
2236 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237 * @priv: driver private structure
2238 * Description: It is used for enabling the rx queues in the MAC
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv
*priv
)
2242 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
2246 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
2247 mode
= priv
->plat
->rx_queues_cfg
[queue
].mode_to_use
;
2248 stmmac_rx_queue_enable(priv
, priv
->hw
, mode
, queue
);
2253 * stmmac_start_rx_dma - start RX DMA channel
2254 * @priv: driver private structure
2255 * @chan: RX channel index
2257 * This starts a RX DMA channel
2259 static void stmmac_start_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
2261 netdev_dbg(priv
->dev
, "DMA RX processes started in channel %d\n", chan
);
2262 stmmac_start_rx(priv
, priv
->ioaddr
, chan
);
2266 * stmmac_start_tx_dma - start TX DMA channel
2267 * @priv: driver private structure
2268 * @chan: TX channel index
2270 * This starts a TX DMA channel
2272 static void stmmac_start_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
2274 netdev_dbg(priv
->dev
, "DMA TX processes started in channel %d\n", chan
);
2275 stmmac_start_tx(priv
, priv
->ioaddr
, chan
);
2279 * stmmac_stop_rx_dma - stop RX DMA channel
2280 * @priv: driver private structure
2281 * @chan: RX channel index
2283 * This stops a RX DMA channel
2285 static void stmmac_stop_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
2287 netdev_dbg(priv
->dev
, "DMA RX processes stopped in channel %d\n", chan
);
2288 stmmac_stop_rx(priv
, priv
->ioaddr
, chan
);
2292 * stmmac_stop_tx_dma - stop TX DMA channel
2293 * @priv: driver private structure
2294 * @chan: TX channel index
2296 * This stops a TX DMA channel
2298 static void stmmac_stop_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
2300 netdev_dbg(priv
->dev
, "DMA TX processes stopped in channel %d\n", chan
);
2301 stmmac_stop_tx(priv
, priv
->ioaddr
, chan
);
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv
*priv
)
2306 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2307 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2308 u32 dma_csr_ch
= max(rx_channels_count
, tx_channels_count
);
2311 for (chan
= 0; chan
< dma_csr_ch
; chan
++) {
2312 struct stmmac_channel
*ch
= &priv
->channel
[chan
];
2313 unsigned long flags
;
2315 spin_lock_irqsave(&ch
->lock
, flags
);
2316 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
2317 spin_unlock_irqrestore(&ch
->lock
, flags
);
2322 * stmmac_start_all_dma - start all RX and TX DMA channels
2323 * @priv: driver private structure
2325 * This starts all the RX and TX DMA channels
2327 static void stmmac_start_all_dma(struct stmmac_priv
*priv
)
2329 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2330 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2333 for (chan
= 0; chan
< rx_channels_count
; chan
++)
2334 stmmac_start_rx_dma(priv
, chan
);
2336 for (chan
= 0; chan
< tx_channels_count
; chan
++)
2337 stmmac_start_tx_dma(priv
, chan
);
2341 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342 * @priv: driver private structure
2344 * This stops the RX and TX DMA channels
2346 static void stmmac_stop_all_dma(struct stmmac_priv
*priv
)
2348 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2349 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2352 for (chan
= 0; chan
< rx_channels_count
; chan
++)
2353 stmmac_stop_rx_dma(priv
, chan
);
2355 for (chan
= 0; chan
< tx_channels_count
; chan
++)
2356 stmmac_stop_tx_dma(priv
, chan
);
2360 * stmmac_dma_operation_mode - HW DMA operation mode
2361 * @priv: driver private structure
2362 * Description: it is used for configuring the DMA operation mode register in
2363 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2365 static void stmmac_dma_operation_mode(struct stmmac_priv
*priv
)
2367 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2368 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2369 int rxfifosz
= priv
->plat
->rx_fifo_size
;
2370 int txfifosz
= priv
->plat
->tx_fifo_size
;
2377 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
2379 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
2381 /* Adjust for real per queue fifo size */
2382 rxfifosz
/= rx_channels_count
;
2383 txfifosz
/= tx_channels_count
;
2385 if (priv
->plat
->force_thresh_dma_mode
) {
2388 } else if (priv
->plat
->force_sf_dma_mode
|| priv
->plat
->tx_coe
) {
2390 * In case of GMAC, SF mode can be enabled
2391 * to perform the TX COE in HW. This depends on:
2392 * 1) TX COE if actually supported
2393 * 2) There is no bugged Jumbo frame support
2394 * that needs to not insert csum in the TDES.
2396 txmode
= SF_DMA_MODE
;
2397 rxmode
= SF_DMA_MODE
;
2398 priv
->xstats
.threshold
= SF_DMA_MODE
;
2401 rxmode
= SF_DMA_MODE
;
2404 /* configure all channels */
2405 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
2406 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
2409 qmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
2411 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
,
2414 if (rx_q
->xsk_pool
) {
2415 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
2416 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
2420 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
2421 priv
->dma_conf
.dma_buf_sz
,
2426 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
2427 qmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
2429 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
,
2434 static void stmmac_xsk_request_timestamp(void *_priv
)
2436 struct stmmac_metadata_request
*meta_req
= _priv
;
2438 stmmac_enable_tx_timestamp(meta_req
->priv
, meta_req
->tx_desc
);
2439 *meta_req
->set_ic
= true;
2442 static u64
stmmac_xsk_fill_timestamp(void *_priv
)
2444 struct stmmac_xsk_tx_complete
*tx_compl
= _priv
;
2445 struct stmmac_priv
*priv
= tx_compl
->priv
;
2446 struct dma_desc
*desc
= tx_compl
->desc
;
2450 if (!priv
->hwts_tx_en
)
2453 /* check tx tstamp status */
2454 if (stmmac_get_tx_timestamp_status(priv
, desc
)) {
2455 stmmac_get_timestamp(priv
, desc
, priv
->adv_ts
, &ns
);
2457 } else if (!stmmac_get_mac_tx_timestamp(priv
, priv
->hw
, &ns
)) {
2462 ns
-= priv
->plat
->cdc_error_adj
;
2463 return ns_to_ktime(ns
);
2469 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops
= {
2470 .tmo_request_timestamp
= stmmac_xsk_request_timestamp
,
2471 .tmo_fill_timestamp
= stmmac_xsk_fill_timestamp
,
2474 static bool stmmac_xdp_xmit_zc(struct stmmac_priv
*priv
, u32 queue
, u32 budget
)
2476 struct netdev_queue
*nq
= netdev_get_tx_queue(priv
->dev
, queue
);
2477 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
2478 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[queue
];
2479 struct xsk_buff_pool
*pool
= tx_q
->xsk_pool
;
2480 unsigned int entry
= tx_q
->cur_tx
;
2481 struct dma_desc
*tx_desc
= NULL
;
2482 struct xdp_desc xdp_desc
;
2483 bool work_done
= true;
2484 u32 tx_set_ic_bit
= 0;
2486 /* Avoids TX time-out as we are sharing with slow path */
2487 txq_trans_cond_update(nq
);
2489 budget
= min(budget
, stmmac_tx_avail(priv
, queue
));
2491 while (budget
-- > 0) {
2492 struct stmmac_metadata_request meta_req
;
2493 struct xsk_tx_metadata
*meta
= NULL
;
2494 dma_addr_t dma_addr
;
2497 /* We are sharing with slow path and stop XSK TX desc submission when
2498 * available TX ring is less than threshold.
2500 if (unlikely(stmmac_tx_avail(priv
, queue
) < STMMAC_TX_XSK_AVAIL
) ||
2501 !netif_carrier_ok(priv
->dev
)) {
2506 if (!xsk_tx_peek_desc(pool
, &xdp_desc
))
2509 if (priv
->plat
->est
&& priv
->plat
->est
->enable
&&
2510 priv
->plat
->est
->max_sdu
[queue
] &&
2511 xdp_desc
.len
> priv
->plat
->est
->max_sdu
[queue
]) {
2512 priv
->xstats
.max_sdu_txq_drop
[queue
]++;
2516 if (likely(priv
->extend_desc
))
2517 tx_desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
2518 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2519 tx_desc
= &tx_q
->dma_entx
[entry
].basic
;
2521 tx_desc
= tx_q
->dma_tx
+ entry
;
2523 dma_addr
= xsk_buff_raw_get_dma(pool
, xdp_desc
.addr
);
2524 meta
= xsk_buff_get_metadata(pool
, xdp_desc
.addr
);
2525 xsk_buff_raw_dma_sync_for_device(pool
, dma_addr
, xdp_desc
.len
);
2527 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XSK_TX
;
2529 /* To return XDP buffer to XSK pool, we simple call
2530 * xsk_tx_completed(), so we don't need to fill up
2533 tx_q
->tx_skbuff_dma
[entry
].buf
= 0;
2534 tx_q
->xdpf
[entry
] = NULL
;
2536 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
2537 tx_q
->tx_skbuff_dma
[entry
].len
= xdp_desc
.len
;
2538 tx_q
->tx_skbuff_dma
[entry
].last_segment
= true;
2539 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
2541 stmmac_set_desc_addr(priv
, tx_desc
, dma_addr
);
2543 tx_q
->tx_count_frames
++;
2545 if (!priv
->tx_coal_frames
[queue
])
2547 else if (tx_q
->tx_count_frames
% priv
->tx_coal_frames
[queue
] == 0)
2552 meta_req
.priv
= priv
;
2553 meta_req
.tx_desc
= tx_desc
;
2554 meta_req
.set_ic
= &set_ic
;
2555 xsk_tx_metadata_request(meta
, &stmmac_xsk_tx_metadata_ops
,
2558 tx_q
->tx_count_frames
= 0;
2559 stmmac_set_tx_ic(priv
, tx_desc
);
2563 stmmac_prepare_tx_desc(priv
, tx_desc
, 1, xdp_desc
.len
,
2564 true, priv
->mode
, true, true,
2567 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
2569 xsk_tx_metadata_to_compl(meta
,
2570 &tx_q
->tx_skbuff_dma
[entry
].xsk_meta
);
2572 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_conf
.dma_tx_size
);
2573 entry
= tx_q
->cur_tx
;
2575 u64_stats_update_begin(&txq_stats
->napi_syncp
);
2576 u64_stats_add(&txq_stats
->napi
.tx_set_ic_bit
, tx_set_ic_bit
);
2577 u64_stats_update_end(&txq_stats
->napi_syncp
);
2580 stmmac_flush_tx_descriptors(priv
, queue
);
2581 xsk_tx_release(pool
);
2584 /* Return true if all of the 3 conditions are met
2585 * a) TX Budget is still available
2586 * b) work_done = true when XSK TX desc peek is empty (no more
2587 * pending XSK TX for transmission)
2589 return !!budget
&& work_done
;
2592 static void stmmac_bump_dma_threshold(struct stmmac_priv
*priv
, u32 chan
)
2594 if (unlikely(priv
->xstats
.threshold
!= SF_DMA_MODE
) && tc
<= 256) {
2597 if (priv
->plat
->force_thresh_dma_mode
)
2598 stmmac_set_dma_operation_mode(priv
, tc
, tc
, chan
);
2600 stmmac_set_dma_operation_mode(priv
, tc
, SF_DMA_MODE
,
2603 priv
->xstats
.threshold
= tc
;
2608 * stmmac_tx_clean - to manage the transmission completion
2609 * @priv: driver private structure
2610 * @budget: napi budget limiting this functions packet handling
2611 * @queue: TX queue index
2612 * @pending_packets: signal to arm the TX coal timer
2613 * Description: it reclaims the transmit resources after transmission completes.
2614 * If some packets still needs to be handled, due to TX coalesce, set
2615 * pending_packets to true to make NAPI arm the TX coal timer.
2617 static int stmmac_tx_clean(struct stmmac_priv
*priv
, int budget
, u32 queue
,
2618 bool *pending_packets
)
2620 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
2621 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[queue
];
2622 unsigned int bytes_compl
= 0, pkts_compl
= 0;
2623 unsigned int entry
, xmits
= 0, count
= 0;
2624 u32 tx_packets
= 0, tx_errors
= 0;
2626 __netif_tx_lock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
2628 tx_q
->xsk_frames_done
= 0;
2630 entry
= tx_q
->dirty_tx
;
2632 /* Try to clean all TX complete frame in 1 shot */
2633 while ((entry
!= tx_q
->cur_tx
) && count
< priv
->dma_conf
.dma_tx_size
) {
2634 struct xdp_frame
*xdpf
;
2635 struct sk_buff
*skb
;
2639 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_TX
||
2640 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
) {
2641 xdpf
= tx_q
->xdpf
[entry
];
2643 } else if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_SKB
) {
2645 skb
= tx_q
->tx_skbuff
[entry
];
2651 if (priv
->extend_desc
)
2652 p
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
2653 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2654 p
= &tx_q
->dma_entx
[entry
].basic
;
2656 p
= tx_q
->dma_tx
+ entry
;
2658 status
= stmmac_tx_status(priv
, &priv
->xstats
, p
, priv
->ioaddr
);
2659 /* Check if the descriptor is owned by the DMA */
2660 if (unlikely(status
& tx_dma_own
))
2665 /* Make sure descriptor fields are read after reading
2670 /* Just consider the last segment and ...*/
2671 if (likely(!(status
& tx_not_ls
))) {
2672 /* ... verify the status error condition */
2673 if (unlikely(status
& tx_err
)) {
2675 if (unlikely(status
& tx_err_bump_tc
))
2676 stmmac_bump_dma_threshold(priv
, queue
);
2681 stmmac_get_tx_hwtstamp(priv
, p
, skb
);
2682 } else if (tx_q
->xsk_pool
&&
2683 xp_tx_metadata_enabled(tx_q
->xsk_pool
)) {
2684 struct stmmac_xsk_tx_complete tx_compl
= {
2689 xsk_tx_metadata_complete(&tx_q
->tx_skbuff_dma
[entry
].xsk_meta
,
2690 &stmmac_xsk_tx_metadata_ops
,
2695 if (likely(tx_q
->tx_skbuff_dma
[entry
].buf
&&
2696 tx_q
->tx_skbuff_dma
[entry
].buf_type
!= STMMAC_TXBUF_T_XDP_TX
)) {
2697 if (tx_q
->tx_skbuff_dma
[entry
].map_as_page
)
2698 dma_unmap_page(priv
->device
,
2699 tx_q
->tx_skbuff_dma
[entry
].buf
,
2700 tx_q
->tx_skbuff_dma
[entry
].len
,
2703 dma_unmap_single(priv
->device
,
2704 tx_q
->tx_skbuff_dma
[entry
].buf
,
2705 tx_q
->tx_skbuff_dma
[entry
].len
,
2707 tx_q
->tx_skbuff_dma
[entry
].buf
= 0;
2708 tx_q
->tx_skbuff_dma
[entry
].len
= 0;
2709 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
2712 stmmac_clean_desc3(priv
, tx_q
, p
);
2714 tx_q
->tx_skbuff_dma
[entry
].last_segment
= false;
2715 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
2718 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_TX
) {
2719 xdp_return_frame_rx_napi(xdpf
);
2720 tx_q
->xdpf
[entry
] = NULL
;
2724 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
) {
2725 xdp_return_frame(xdpf
);
2726 tx_q
->xdpf
[entry
] = NULL
;
2729 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XSK_TX
)
2730 tx_q
->xsk_frames_done
++;
2732 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_SKB
) {
2735 bytes_compl
+= skb
->len
;
2736 dev_consume_skb_any(skb
);
2737 tx_q
->tx_skbuff
[entry
] = NULL
;
2741 stmmac_release_tx_desc(priv
, p
, priv
->mode
);
2743 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
2745 tx_q
->dirty_tx
= entry
;
2747 netdev_tx_completed_queue(netdev_get_tx_queue(priv
->dev
, queue
),
2748 pkts_compl
, bytes_compl
);
2750 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv
->dev
,
2752 stmmac_tx_avail(priv
, queue
) > STMMAC_TX_THRESH(priv
)) {
2754 netif_dbg(priv
, tx_done
, priv
->dev
,
2755 "%s: restart transmit\n", __func__
);
2756 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, queue
));
2759 if (tx_q
->xsk_pool
) {
2762 if (tx_q
->xsk_frames_done
)
2763 xsk_tx_completed(tx_q
->xsk_pool
, tx_q
->xsk_frames_done
);
2765 if (xsk_uses_need_wakeup(tx_q
->xsk_pool
))
2766 xsk_set_tx_need_wakeup(tx_q
->xsk_pool
);
2768 /* For XSK TX, we try to send as many as possible.
2769 * If XSK work done (XSK TX desc empty and budget still
2770 * available), return "budget - 1" to reenable TX IRQ.
2771 * Else, return "budget" to make NAPI continue polling.
2773 work_done
= stmmac_xdp_xmit_zc(priv
, queue
,
2774 STMMAC_XSK_TX_BUDGET_MAX
);
2781 if (priv
->eee_enabled
&& !priv
->tx_path_in_lpi_mode
&&
2782 priv
->eee_sw_timer_en
) {
2783 if (stmmac_enable_eee_mode(priv
))
2784 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
2787 /* We still have pending packets, let's call for a new scheduling */
2788 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
2789 *pending_packets
= true;
2791 u64_stats_update_begin(&txq_stats
->napi_syncp
);
2792 u64_stats_add(&txq_stats
->napi
.tx_packets
, tx_packets
);
2793 u64_stats_add(&txq_stats
->napi
.tx_pkt_n
, tx_packets
);
2794 u64_stats_inc(&txq_stats
->napi
.tx_clean
);
2795 u64_stats_update_end(&txq_stats
->napi_syncp
);
2797 priv
->xstats
.tx_errors
+= tx_errors
;
2799 __netif_tx_unlock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
2801 /* Combine decisions from TX clean and XSK TX */
2802 return max(count
, xmits
);
2806 * stmmac_tx_err - to manage the tx error
2807 * @priv: driver private structure
2808 * @chan: channel index
2809 * Description: it cleans the descriptors and restarts the transmission
2810 * in case of transmission errors.
2812 static void stmmac_tx_err(struct stmmac_priv
*priv
, u32 chan
)
2814 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
2816 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2818 stmmac_stop_tx_dma(priv
, chan
);
2819 dma_free_tx_skbufs(priv
, &priv
->dma_conf
, chan
);
2820 stmmac_clear_tx_descriptors(priv
, &priv
->dma_conf
, chan
);
2821 stmmac_reset_tx_queue(priv
, chan
);
2822 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2823 tx_q
->dma_tx_phy
, chan
);
2824 stmmac_start_tx_dma(priv
, chan
);
2826 priv
->xstats
.tx_errors
++;
2827 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2831 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2832 * @priv: driver private structure
2833 * @txmode: TX operating mode
2834 * @rxmode: RX operating mode
2835 * @chan: channel index
2836 * Description: it is used for configuring of the DMA operation mode in
2837 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2840 static void stmmac_set_dma_operation_mode(struct stmmac_priv
*priv
, u32 txmode
,
2841 u32 rxmode
, u32 chan
)
2843 u8 rxqmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
2844 u8 txqmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
2845 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2846 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2847 int rxfifosz
= priv
->plat
->rx_fifo_size
;
2848 int txfifosz
= priv
->plat
->tx_fifo_size
;
2851 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
2853 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
2855 /* Adjust for real per queue fifo size */
2856 rxfifosz
/= rx_channels_count
;
2857 txfifosz
/= tx_channels_count
;
2859 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
, rxfifosz
, rxqmode
);
2860 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
, txfifosz
, txqmode
);
2863 static bool stmmac_safety_feat_interrupt(struct stmmac_priv
*priv
)
2867 ret
= stmmac_safety_feat_irq_status(priv
, priv
->dev
,
2868 priv
->ioaddr
, priv
->dma_cap
.asp
, &priv
->sstats
);
2869 if (ret
&& (ret
!= -EINVAL
)) {
2870 stmmac_global_err(priv
);
2877 static int stmmac_napi_check(struct stmmac_priv
*priv
, u32 chan
, u32 dir
)
2879 int status
= stmmac_dma_interrupt_status(priv
, priv
->ioaddr
,
2880 &priv
->xstats
, chan
, dir
);
2881 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
2882 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
2883 struct stmmac_channel
*ch
= &priv
->channel
[chan
];
2884 struct napi_struct
*rx_napi
;
2885 struct napi_struct
*tx_napi
;
2886 unsigned long flags
;
2888 rx_napi
= rx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->rx_napi
;
2889 tx_napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
2891 if ((status
& handle_rx
) && (chan
< priv
->plat
->rx_queues_to_use
)) {
2892 if (napi_schedule_prep(rx_napi
)) {
2893 spin_lock_irqsave(&ch
->lock
, flags
);
2894 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
2895 spin_unlock_irqrestore(&ch
->lock
, flags
);
2896 __napi_schedule(rx_napi
);
2900 if ((status
& handle_tx
) && (chan
< priv
->plat
->tx_queues_to_use
)) {
2901 if (napi_schedule_prep(tx_napi
)) {
2902 spin_lock_irqsave(&ch
->lock
, flags
);
2903 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
2904 spin_unlock_irqrestore(&ch
->lock
, flags
);
2905 __napi_schedule(tx_napi
);
2913 * stmmac_dma_interrupt - DMA ISR
2914 * @priv: driver private structure
2915 * Description: this is the DMA ISR. It is called by the main ISR.
2916 * It calls the dwmac dma routine and schedule poll method in case of some
2919 static void stmmac_dma_interrupt(struct stmmac_priv
*priv
)
2921 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
2922 u32 rx_channel_count
= priv
->plat
->rx_queues_to_use
;
2923 u32 channels_to_check
= tx_channel_count
> rx_channel_count
?
2924 tx_channel_count
: rx_channel_count
;
2926 int status
[max_t(u32
, MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
)];
2928 /* Make sure we never check beyond our status buffer. */
2929 if (WARN_ON_ONCE(channels_to_check
> ARRAY_SIZE(status
)))
2930 channels_to_check
= ARRAY_SIZE(status
);
2932 for (chan
= 0; chan
< channels_to_check
; chan
++)
2933 status
[chan
] = stmmac_napi_check(priv
, chan
,
2936 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
2937 if (unlikely(status
[chan
] & tx_hard_error_bump_tc
)) {
2938 /* Try to bump up the dma threshold on this failure */
2939 stmmac_bump_dma_threshold(priv
, chan
);
2940 } else if (unlikely(status
[chan
] == tx_hard_error
)) {
2941 stmmac_tx_err(priv
, chan
);
2947 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2948 * @priv: driver private structure
2949 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2951 static void stmmac_mmc_setup(struct stmmac_priv
*priv
)
2953 unsigned int mode
= MMC_CNTRL_RESET_ON_READ
| MMC_CNTRL_COUNTER_RESET
|
2954 MMC_CNTRL_PRESET
| MMC_CNTRL_FULL_HALF_PRESET
;
2956 stmmac_mmc_intr_all_mask(priv
, priv
->mmcaddr
);
2958 if (priv
->dma_cap
.rmon
) {
2959 stmmac_mmc_ctrl(priv
, priv
->mmcaddr
, mode
);
2960 memset(&priv
->mmc
, 0, sizeof(struct stmmac_counters
));
2962 netdev_info(priv
->dev
, "No MAC Management Counters available\n");
2966 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2967 * @priv: driver private structure
2969 * new GMAC chip generations have a new register to indicate the
2970 * presence of the optional feature/functions.
2971 * This can be also used to override the value passed through the
2972 * platform and necessary for old MAC10/100 and GMAC chips.
2974 static int stmmac_get_hw_features(struct stmmac_priv
*priv
)
2976 return stmmac_get_hw_feature(priv
, priv
->ioaddr
, &priv
->dma_cap
) == 0;
2980 * stmmac_check_ether_addr - check if the MAC addr is valid
2981 * @priv: driver private structure
2983 * it is to verify if the MAC address is valid, in case of failures it
2984 * generates a random MAC address
2986 static void stmmac_check_ether_addr(struct stmmac_priv
*priv
)
2990 if (!is_valid_ether_addr(priv
->dev
->dev_addr
)) {
2991 stmmac_get_umac_addr(priv
, priv
->hw
, addr
, 0);
2992 if (is_valid_ether_addr(addr
))
2993 eth_hw_addr_set(priv
->dev
, addr
);
2995 eth_hw_addr_random(priv
->dev
);
2996 dev_info(priv
->device
, "device MAC address %pM\n",
2997 priv
->dev
->dev_addr
);
3002 * stmmac_init_dma_engine - DMA init.
3003 * @priv: driver private structure
3005 * It inits the DMA invoking the specific MAC/GMAC callback.
3006 * Some DMA parameters can be passed from the platform;
3007 * in case of these are not passed a default is kept for the MAC or GMAC.
3009 static int stmmac_init_dma_engine(struct stmmac_priv
*priv
)
3011 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
3012 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
3013 u32 dma_csr_ch
= max(rx_channels_count
, tx_channels_count
);
3014 struct stmmac_rx_queue
*rx_q
;
3015 struct stmmac_tx_queue
*tx_q
;
3020 if (!priv
->plat
->dma_cfg
|| !priv
->plat
->dma_cfg
->pbl
) {
3021 dev_err(priv
->device
, "Invalid DMA configuration\n");
3025 if (priv
->extend_desc
&& (priv
->mode
== STMMAC_RING_MODE
))
3028 ret
= stmmac_reset(priv
, priv
->ioaddr
);
3030 dev_err(priv
->device
, "Failed to reset the dma\n");
3034 /* DMA Configuration */
3035 stmmac_dma_init(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, atds
);
3037 if (priv
->plat
->axi
)
3038 stmmac_axi(priv
, priv
->ioaddr
, priv
->plat
->axi
);
3040 /* DMA CSR Channel configuration */
3041 for (chan
= 0; chan
< dma_csr_ch
; chan
++) {
3042 stmmac_init_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, chan
);
3043 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
3046 /* DMA RX Channel Configuration */
3047 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
3048 rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
3050 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
3051 rx_q
->dma_rx_phy
, chan
);
3053 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
3054 (rx_q
->buf_alloc_num
*
3055 sizeof(struct dma_desc
));
3056 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
3057 rx_q
->rx_tail_addr
, chan
);
3060 /* DMA TX Channel Configuration */
3061 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
3062 tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3064 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
3065 tx_q
->dma_tx_phy
, chan
);
3067 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
3068 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
3069 tx_q
->tx_tail_addr
, chan
);
3075 static void stmmac_tx_timer_arm(struct stmmac_priv
*priv
, u32 queue
)
3077 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
3078 u32 tx_coal_timer
= priv
->tx_coal_timer
[queue
];
3079 struct stmmac_channel
*ch
;
3080 struct napi_struct
*napi
;
3085 ch
= &priv
->channel
[tx_q
->queue_index
];
3086 napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
3088 /* Arm timer only if napi is not already scheduled.
3089 * Try to cancel any timer if napi is scheduled, timer will be armed
3090 * again in the next scheduled napi.
3092 if (unlikely(!napi_is_scheduled(napi
)))
3093 hrtimer_start(&tx_q
->txtimer
,
3094 STMMAC_COAL_TIMER(tx_coal_timer
),
3097 hrtimer_try_to_cancel(&tx_q
->txtimer
);
3101 * stmmac_tx_timer - mitigation sw timer for tx.
3104 * This is the timer handler to directly invoke the stmmac_tx_clean.
3106 static enum hrtimer_restart
stmmac_tx_timer(struct hrtimer
*t
)
3108 struct stmmac_tx_queue
*tx_q
= container_of(t
, struct stmmac_tx_queue
, txtimer
);
3109 struct stmmac_priv
*priv
= tx_q
->priv_data
;
3110 struct stmmac_channel
*ch
;
3111 struct napi_struct
*napi
;
3113 ch
= &priv
->channel
[tx_q
->queue_index
];
3114 napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
3116 if (likely(napi_schedule_prep(napi
))) {
3117 unsigned long flags
;
3119 spin_lock_irqsave(&ch
->lock
, flags
);
3120 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, ch
->index
, 0, 1);
3121 spin_unlock_irqrestore(&ch
->lock
, flags
);
3122 __napi_schedule(napi
);
3125 return HRTIMER_NORESTART
;
3129 * stmmac_init_coalesce - init mitigation options.
3130 * @priv: driver private structure
3132 * This inits the coalesce parameters: i.e. timer rate,
3133 * timer handler and default threshold used for enabling the
3134 * interrupt on completion bit.
3136 static void stmmac_init_coalesce(struct stmmac_priv
*priv
)
3138 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
3139 u32 rx_channel_count
= priv
->plat
->rx_queues_to_use
;
3142 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
3143 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3145 priv
->tx_coal_frames
[chan
] = STMMAC_TX_FRAMES
;
3146 priv
->tx_coal_timer
[chan
] = STMMAC_COAL_TX_TIMER
;
3148 hrtimer_init(&tx_q
->txtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
3149 tx_q
->txtimer
.function
= stmmac_tx_timer
;
3152 for (chan
= 0; chan
< rx_channel_count
; chan
++)
3153 priv
->rx_coal_frames
[chan
] = STMMAC_RX_FRAMES
;
3156 static void stmmac_set_rings_length(struct stmmac_priv
*priv
)
3158 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
3159 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
3162 /* set TX ring length */
3163 for (chan
= 0; chan
< tx_channels_count
; chan
++)
3164 stmmac_set_tx_ring_len(priv
, priv
->ioaddr
,
3165 (priv
->dma_conf
.dma_tx_size
- 1), chan
);
3167 /* set RX ring length */
3168 for (chan
= 0; chan
< rx_channels_count
; chan
++)
3169 stmmac_set_rx_ring_len(priv
, priv
->ioaddr
,
3170 (priv
->dma_conf
.dma_rx_size
- 1), chan
);
3174 * stmmac_set_tx_queue_weight - Set TX queue weight
3175 * @priv: driver private structure
3176 * Description: It is used for setting TX queues weight
3178 static void stmmac_set_tx_queue_weight(struct stmmac_priv
*priv
)
3180 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3184 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
3185 weight
= priv
->plat
->tx_queues_cfg
[queue
].weight
;
3186 stmmac_set_mtl_tx_queue_weight(priv
, priv
->hw
, weight
, queue
);
3191 * stmmac_configure_cbs - Configure CBS in TX queue
3192 * @priv: driver private structure
3193 * Description: It is used for configuring CBS in AVB TX queues
3195 static void stmmac_configure_cbs(struct stmmac_priv
*priv
)
3197 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3201 /* queue 0 is reserved for legacy traffic */
3202 for (queue
= 1; queue
< tx_queues_count
; queue
++) {
3203 mode_to_use
= priv
->plat
->tx_queues_cfg
[queue
].mode_to_use
;
3204 if (mode_to_use
== MTL_QUEUE_DCB
)
3207 stmmac_config_cbs(priv
, priv
->hw
,
3208 priv
->plat
->tx_queues_cfg
[queue
].send_slope
,
3209 priv
->plat
->tx_queues_cfg
[queue
].idle_slope
,
3210 priv
->plat
->tx_queues_cfg
[queue
].high_credit
,
3211 priv
->plat
->tx_queues_cfg
[queue
].low_credit
,
3217 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3218 * @priv: driver private structure
3219 * Description: It is used for mapping RX queues to RX dma channels
3221 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv
*priv
)
3223 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3227 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3228 chan
= priv
->plat
->rx_queues_cfg
[queue
].chan
;
3229 stmmac_map_mtl_to_dma(priv
, priv
->hw
, queue
, chan
);
3234 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3235 * @priv: driver private structure
3236 * Description: It is used for configuring the RX Queue Priority
3238 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv
*priv
)
3240 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3244 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3245 if (!priv
->plat
->rx_queues_cfg
[queue
].use_prio
)
3248 prio
= priv
->plat
->rx_queues_cfg
[queue
].prio
;
3249 stmmac_rx_queue_prio(priv
, priv
->hw
, prio
, queue
);
3254 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3255 * @priv: driver private structure
3256 * Description: It is used for configuring the TX Queue Priority
3258 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv
*priv
)
3260 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3264 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
3265 if (!priv
->plat
->tx_queues_cfg
[queue
].use_prio
)
3268 prio
= priv
->plat
->tx_queues_cfg
[queue
].prio
;
3269 stmmac_tx_queue_prio(priv
, priv
->hw
, prio
, queue
);
3274 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3275 * @priv: driver private structure
3276 * Description: It is used for configuring the RX queue routing
3278 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv
*priv
)
3280 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3284 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3285 /* no specific packet type routing specified for the queue */
3286 if (priv
->plat
->rx_queues_cfg
[queue
].pkt_route
== 0x0)
3289 packet
= priv
->plat
->rx_queues_cfg
[queue
].pkt_route
;
3290 stmmac_rx_queue_routing(priv
, priv
->hw
, packet
, queue
);
3294 static void stmmac_mac_config_rss(struct stmmac_priv
*priv
)
3296 if (!priv
->dma_cap
.rssen
|| !priv
->plat
->rss_en
) {
3297 priv
->rss
.enable
= false;
3301 if (priv
->dev
->features
& NETIF_F_RXHASH
)
3302 priv
->rss
.enable
= true;
3304 priv
->rss
.enable
= false;
3306 stmmac_rss_configure(priv
, priv
->hw
, &priv
->rss
,
3307 priv
->plat
->rx_queues_to_use
);
3311 * stmmac_mtl_configuration - Configure MTL
3312 * @priv: driver private structure
3313 * Description: It is used for configurring MTL
3315 static void stmmac_mtl_configuration(struct stmmac_priv
*priv
)
3317 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3318 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3320 if (tx_queues_count
> 1)
3321 stmmac_set_tx_queue_weight(priv
);
3323 /* Configure MTL RX algorithms */
3324 if (rx_queues_count
> 1)
3325 stmmac_prog_mtl_rx_algorithms(priv
, priv
->hw
,
3326 priv
->plat
->rx_sched_algorithm
);
3328 /* Configure MTL TX algorithms */
3329 if (tx_queues_count
> 1)
3330 stmmac_prog_mtl_tx_algorithms(priv
, priv
->hw
,
3331 priv
->plat
->tx_sched_algorithm
);
3333 /* Configure CBS in AVB TX queues */
3334 if (tx_queues_count
> 1)
3335 stmmac_configure_cbs(priv
);
3337 /* Map RX MTL to DMA channels */
3338 stmmac_rx_queue_dma_chan_map(priv
);
3340 /* Enable MAC RX Queues */
3341 stmmac_mac_enable_rx_queues(priv
);
3343 /* Set RX priorities */
3344 if (rx_queues_count
> 1)
3345 stmmac_mac_config_rx_queues_prio(priv
);
3347 /* Set TX priorities */
3348 if (tx_queues_count
> 1)
3349 stmmac_mac_config_tx_queues_prio(priv
);
3351 /* Set RX routing */
3352 if (rx_queues_count
> 1)
3353 stmmac_mac_config_rx_queues_routing(priv
);
3355 /* Receive Side Scaling */
3356 if (rx_queues_count
> 1)
3357 stmmac_mac_config_rss(priv
);
3360 static void stmmac_safety_feat_configuration(struct stmmac_priv
*priv
)
3362 if (priv
->dma_cap
.asp
) {
3363 netdev_info(priv
->dev
, "Enabling Safety Features\n");
3364 stmmac_safety_feat_config(priv
, priv
->ioaddr
, priv
->dma_cap
.asp
,
3365 priv
->plat
->safety_feat_cfg
);
3367 netdev_info(priv
->dev
, "No Safety Features support found\n");
3371 static int stmmac_fpe_start_wq(struct stmmac_priv
*priv
)
3375 clear_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
);
3376 clear_bit(__FPE_REMOVING
, &priv
->fpe_task_state
);
3378 name
= priv
->wq_name
;
3379 sprintf(name
, "%s-fpe", priv
->dev
->name
);
3381 priv
->fpe_wq
= create_singlethread_workqueue(name
);
3382 if (!priv
->fpe_wq
) {
3383 netdev_err(priv
->dev
, "%s: Failed to create workqueue\n", name
);
3387 netdev_info(priv
->dev
, "FPE workqueue start");
3393 * stmmac_hw_setup - setup mac in a usable state.
3394 * @dev : pointer to the device structure.
3395 * @ptp_register: register PTP if set
3397 * this is the main function to setup the HW in a usable state because the
3398 * dma engine is reset, the core registers are configured (e.g. AXI,
3399 * Checksum features, timers). The DMA is ready to start receiving and
3402 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3405 static int stmmac_hw_setup(struct net_device
*dev
, bool ptp_register
)
3407 struct stmmac_priv
*priv
= netdev_priv(dev
);
3408 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
3409 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
3414 /* DMA initialization and SW reset */
3415 ret
= stmmac_init_dma_engine(priv
);
3417 netdev_err(priv
->dev
, "%s: DMA engine initialization failed\n",
3422 /* Copy the MAC addr into the HW */
3423 stmmac_set_umac_addr(priv
, priv
->hw
, dev
->dev_addr
, 0);
3425 /* PS and related bits will be programmed according to the speed */
3426 if (priv
->hw
->pcs
) {
3427 int speed
= priv
->plat
->mac_port_sel_speed
;
3429 if ((speed
== SPEED_10
) || (speed
== SPEED_100
) ||
3430 (speed
== SPEED_1000
)) {
3431 priv
->hw
->ps
= speed
;
3433 dev_warn(priv
->device
, "invalid port speed\n");
3438 /* Initialize the MAC Core */
3439 stmmac_core_init(priv
, priv
->hw
, dev
);
3442 stmmac_mtl_configuration(priv
);
3444 /* Initialize Safety Features */
3445 stmmac_safety_feat_configuration(priv
);
3447 ret
= stmmac_rx_ipc(priv
, priv
->hw
);
3449 netdev_warn(priv
->dev
, "RX IPC Checksum Offload disabled\n");
3450 priv
->plat
->rx_coe
= STMMAC_RX_COE_NONE
;
3451 priv
->hw
->rx_csum
= 0;
3454 /* Enable the MAC Rx/Tx */
3455 stmmac_mac_set(priv
, priv
->ioaddr
, true);
3457 /* Set the HW DMA mode and the COE */
3458 stmmac_dma_operation_mode(priv
);
3460 stmmac_mmc_setup(priv
);
3463 ret
= clk_prepare_enable(priv
->plat
->clk_ptp_ref
);
3465 netdev_warn(priv
->dev
,
3466 "failed to enable PTP reference clock: %pe\n",
3470 ret
= stmmac_init_ptp(priv
);
3471 if (ret
== -EOPNOTSUPP
)
3472 netdev_info(priv
->dev
, "PTP not supported by HW\n");
3474 netdev_warn(priv
->dev
, "PTP init failed\n");
3475 else if (ptp_register
)
3476 stmmac_ptp_register(priv
);
3478 priv
->eee_tw_timer
= STMMAC_DEFAULT_TWT_LS
;
3480 /* Convert the timer from msec to usec */
3481 if (!priv
->tx_lpi_timer
)
3482 priv
->tx_lpi_timer
= eee_timer
* 1000;
3484 if (priv
->use_riwt
) {
3487 for (queue
= 0; queue
< rx_cnt
; queue
++) {
3488 if (!priv
->rx_riwt
[queue
])
3489 priv
->rx_riwt
[queue
] = DEF_DMA_RIWT
;
3491 stmmac_rx_watchdog(priv
, priv
->ioaddr
,
3492 priv
->rx_riwt
[queue
], queue
);
3497 stmmac_pcs_ctrl_ane(priv
, priv
->ioaddr
, 1, priv
->hw
->ps
, 0);
3499 /* set TX and RX rings length */
3500 stmmac_set_rings_length(priv
);
3504 for (chan
= 0; chan
< tx_cnt
; chan
++) {
3505 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3507 /* TSO and TBS cannot co-exist */
3508 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3511 stmmac_enable_tso(priv
, priv
->ioaddr
, 1, chan
);
3515 /* Enable Split Header */
3516 sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
3517 for (chan
= 0; chan
< rx_cnt
; chan
++)
3518 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
3521 /* VLAN Tag Insertion */
3522 if (priv
->dma_cap
.vlins
)
3523 stmmac_enable_vlan(priv
, priv
->hw
, STMMAC_VLAN_INSERT
);
3526 for (chan
= 0; chan
< tx_cnt
; chan
++) {
3527 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3528 int enable
= tx_q
->tbs
& STMMAC_TBS_AVAIL
;
3530 stmmac_enable_tbs(priv
, priv
->ioaddr
, enable
, chan
);
3533 /* Configure real RX and TX queues */
3534 netif_set_real_num_rx_queues(dev
, priv
->plat
->rx_queues_to_use
);
3535 netif_set_real_num_tx_queues(dev
, priv
->plat
->tx_queues_to_use
);
3537 /* Start the ball rolling... */
3538 stmmac_start_all_dma(priv
);
3540 stmmac_set_hw_vlan_mode(priv
, priv
->hw
);
3542 if (priv
->dma_cap
.fpesel
) {
3543 stmmac_fpe_start_wq(priv
);
3545 if (priv
->plat
->fpe_cfg
->enable
)
3546 stmmac_fpe_handshake(priv
, true);
3552 static void stmmac_hw_teardown(struct net_device
*dev
)
3554 struct stmmac_priv
*priv
= netdev_priv(dev
);
3556 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
3559 static void stmmac_free_irq(struct net_device
*dev
,
3560 enum request_irq_err irq_err
, int irq_idx
)
3562 struct stmmac_priv
*priv
= netdev_priv(dev
);
3566 case REQ_IRQ_ERR_ALL
:
3567 irq_idx
= priv
->plat
->tx_queues_to_use
;
3569 case REQ_IRQ_ERR_TX
:
3570 for (j
= irq_idx
- 1; j
>= 0; j
--) {
3571 if (priv
->tx_irq
[j
] > 0) {
3572 irq_set_affinity_hint(priv
->tx_irq
[j
], NULL
);
3573 free_irq(priv
->tx_irq
[j
], &priv
->dma_conf
.tx_queue
[j
]);
3576 irq_idx
= priv
->plat
->rx_queues_to_use
;
3578 case REQ_IRQ_ERR_RX
:
3579 for (j
= irq_idx
- 1; j
>= 0; j
--) {
3580 if (priv
->rx_irq
[j
] > 0) {
3581 irq_set_affinity_hint(priv
->rx_irq
[j
], NULL
);
3582 free_irq(priv
->rx_irq
[j
], &priv
->dma_conf
.rx_queue
[j
]);
3586 if (priv
->sfty_ue_irq
> 0 && priv
->sfty_ue_irq
!= dev
->irq
)
3587 free_irq(priv
->sfty_ue_irq
, dev
);
3589 case REQ_IRQ_ERR_SFTY_UE
:
3590 if (priv
->sfty_ce_irq
> 0 && priv
->sfty_ce_irq
!= dev
->irq
)
3591 free_irq(priv
->sfty_ce_irq
, dev
);
3593 case REQ_IRQ_ERR_SFTY_CE
:
3594 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
)
3595 free_irq(priv
->lpi_irq
, dev
);
3597 case REQ_IRQ_ERR_LPI
:
3598 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
)
3599 free_irq(priv
->wol_irq
, dev
);
3601 case REQ_IRQ_ERR_SFTY
:
3602 if (priv
->sfty_irq
> 0 && priv
->sfty_irq
!= dev
->irq
)
3603 free_irq(priv
->sfty_irq
, dev
);
3605 case REQ_IRQ_ERR_WOL
:
3606 free_irq(dev
->irq
, dev
);
3608 case REQ_IRQ_ERR_MAC
:
3609 case REQ_IRQ_ERR_NO
:
3610 /* If MAC IRQ request error, no more IRQ to free */
3615 static int stmmac_request_irq_multi_msi(struct net_device
*dev
)
3617 struct stmmac_priv
*priv
= netdev_priv(dev
);
3618 enum request_irq_err irq_err
;
3625 /* For common interrupt */
3626 int_name
= priv
->int_name_mac
;
3627 sprintf(int_name
, "%s:%s", dev
->name
, "mac");
3628 ret
= request_irq(dev
->irq
, stmmac_mac_interrupt
,
3630 if (unlikely(ret
< 0)) {
3631 netdev_err(priv
->dev
,
3632 "%s: alloc mac MSI %d (error: %d)\n",
3633 __func__
, dev
->irq
, ret
);
3634 irq_err
= REQ_IRQ_ERR_MAC
;
3638 /* Request the Wake IRQ in case of another line
3641 priv
->wol_irq_disabled
= true;
3642 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
) {
3643 int_name
= priv
->int_name_wol
;
3644 sprintf(int_name
, "%s:%s", dev
->name
, "wol");
3645 ret
= request_irq(priv
->wol_irq
,
3646 stmmac_mac_interrupt
,
3648 if (unlikely(ret
< 0)) {
3649 netdev_err(priv
->dev
,
3650 "%s: alloc wol MSI %d (error: %d)\n",
3651 __func__
, priv
->wol_irq
, ret
);
3652 irq_err
= REQ_IRQ_ERR_WOL
;
3657 /* Request the LPI IRQ in case of another line
3660 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
) {
3661 int_name
= priv
->int_name_lpi
;
3662 sprintf(int_name
, "%s:%s", dev
->name
, "lpi");
3663 ret
= request_irq(priv
->lpi_irq
,
3664 stmmac_mac_interrupt
,
3666 if (unlikely(ret
< 0)) {
3667 netdev_err(priv
->dev
,
3668 "%s: alloc lpi MSI %d (error: %d)\n",
3669 __func__
, priv
->lpi_irq
, ret
);
3670 irq_err
= REQ_IRQ_ERR_LPI
;
3675 /* Request the common Safety Feature Correctible/Uncorrectible
3676 * Error line in case of another line is used
3678 if (priv
->sfty_irq
> 0 && priv
->sfty_irq
!= dev
->irq
) {
3679 int_name
= priv
->int_name_sfty
;
3680 sprintf(int_name
, "%s:%s", dev
->name
, "safety");
3681 ret
= request_irq(priv
->sfty_irq
, stmmac_safety_interrupt
,
3683 if (unlikely(ret
< 0)) {
3684 netdev_err(priv
->dev
,
3685 "%s: alloc sfty MSI %d (error: %d)\n",
3686 __func__
, priv
->sfty_irq
, ret
);
3687 irq_err
= REQ_IRQ_ERR_SFTY
;
3692 /* Request the Safety Feature Correctible Error line in
3693 * case of another line is used
3695 if (priv
->sfty_ce_irq
> 0 && priv
->sfty_ce_irq
!= dev
->irq
) {
3696 int_name
= priv
->int_name_sfty_ce
;
3697 sprintf(int_name
, "%s:%s", dev
->name
, "safety-ce");
3698 ret
= request_irq(priv
->sfty_ce_irq
,
3699 stmmac_safety_interrupt
,
3701 if (unlikely(ret
< 0)) {
3702 netdev_err(priv
->dev
,
3703 "%s: alloc sfty ce MSI %d (error: %d)\n",
3704 __func__
, priv
->sfty_ce_irq
, ret
);
3705 irq_err
= REQ_IRQ_ERR_SFTY_CE
;
3710 /* Request the Safety Feature Uncorrectible Error line in
3711 * case of another line is used
3713 if (priv
->sfty_ue_irq
> 0 && priv
->sfty_ue_irq
!= dev
->irq
) {
3714 int_name
= priv
->int_name_sfty_ue
;
3715 sprintf(int_name
, "%s:%s", dev
->name
, "safety-ue");
3716 ret
= request_irq(priv
->sfty_ue_irq
,
3717 stmmac_safety_interrupt
,
3719 if (unlikely(ret
< 0)) {
3720 netdev_err(priv
->dev
,
3721 "%s: alloc sfty ue MSI %d (error: %d)\n",
3722 __func__
, priv
->sfty_ue_irq
, ret
);
3723 irq_err
= REQ_IRQ_ERR_SFTY_UE
;
3728 /* Request Rx MSI irq */
3729 for (i
= 0; i
< priv
->plat
->rx_queues_to_use
; i
++) {
3730 if (i
>= MTL_MAX_RX_QUEUES
)
3732 if (priv
->rx_irq
[i
] == 0)
3735 int_name
= priv
->int_name_rx_irq
[i
];
3736 sprintf(int_name
, "%s:%s-%d", dev
->name
, "rx", i
);
3737 ret
= request_irq(priv
->rx_irq
[i
],
3739 0, int_name
, &priv
->dma_conf
.rx_queue
[i
]);
3740 if (unlikely(ret
< 0)) {
3741 netdev_err(priv
->dev
,
3742 "%s: alloc rx-%d MSI %d (error: %d)\n",
3743 __func__
, i
, priv
->rx_irq
[i
], ret
);
3744 irq_err
= REQ_IRQ_ERR_RX
;
3748 cpumask_clear(&cpu_mask
);
3749 cpumask_set_cpu(i
% num_online_cpus(), &cpu_mask
);
3750 irq_set_affinity_hint(priv
->rx_irq
[i
], &cpu_mask
);
3753 /* Request Tx MSI irq */
3754 for (i
= 0; i
< priv
->plat
->tx_queues_to_use
; i
++) {
3755 if (i
>= MTL_MAX_TX_QUEUES
)
3757 if (priv
->tx_irq
[i
] == 0)
3760 int_name
= priv
->int_name_tx_irq
[i
];
3761 sprintf(int_name
, "%s:%s-%d", dev
->name
, "tx", i
);
3762 ret
= request_irq(priv
->tx_irq
[i
],
3764 0, int_name
, &priv
->dma_conf
.tx_queue
[i
]);
3765 if (unlikely(ret
< 0)) {
3766 netdev_err(priv
->dev
,
3767 "%s: alloc tx-%d MSI %d (error: %d)\n",
3768 __func__
, i
, priv
->tx_irq
[i
], ret
);
3769 irq_err
= REQ_IRQ_ERR_TX
;
3773 cpumask_clear(&cpu_mask
);
3774 cpumask_set_cpu(i
% num_online_cpus(), &cpu_mask
);
3775 irq_set_affinity_hint(priv
->tx_irq
[i
], &cpu_mask
);
3781 stmmac_free_irq(dev
, irq_err
, irq_idx
);
3785 static int stmmac_request_irq_single(struct net_device
*dev
)
3787 struct stmmac_priv
*priv
= netdev_priv(dev
);
3788 enum request_irq_err irq_err
;
3791 ret
= request_irq(dev
->irq
, stmmac_interrupt
,
3792 IRQF_SHARED
, dev
->name
, dev
);
3793 if (unlikely(ret
< 0)) {
3794 netdev_err(priv
->dev
,
3795 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3796 __func__
, dev
->irq
, ret
);
3797 irq_err
= REQ_IRQ_ERR_MAC
;
3801 /* Request the Wake IRQ in case of another line
3804 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
) {
3805 ret
= request_irq(priv
->wol_irq
, stmmac_interrupt
,
3806 IRQF_SHARED
, dev
->name
, dev
);
3807 if (unlikely(ret
< 0)) {
3808 netdev_err(priv
->dev
,
3809 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3810 __func__
, priv
->wol_irq
, ret
);
3811 irq_err
= REQ_IRQ_ERR_WOL
;
3816 /* Request the IRQ lines */
3817 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
) {
3818 ret
= request_irq(priv
->lpi_irq
, stmmac_interrupt
,
3819 IRQF_SHARED
, dev
->name
, dev
);
3820 if (unlikely(ret
< 0)) {
3821 netdev_err(priv
->dev
,
3822 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3823 __func__
, priv
->lpi_irq
, ret
);
3824 irq_err
= REQ_IRQ_ERR_LPI
;
3829 /* Request the common Safety Feature Correctible/Uncorrectible
3830 * Error line in case of another line is used
3832 if (priv
->sfty_irq
> 0 && priv
->sfty_irq
!= dev
->irq
) {
3833 ret
= request_irq(priv
->sfty_irq
, stmmac_safety_interrupt
,
3834 IRQF_SHARED
, dev
->name
, dev
);
3835 if (unlikely(ret
< 0)) {
3836 netdev_err(priv
->dev
,
3837 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3838 __func__
, priv
->sfty_irq
, ret
);
3839 irq_err
= REQ_IRQ_ERR_SFTY
;
3847 stmmac_free_irq(dev
, irq_err
, 0);
3851 static int stmmac_request_irq(struct net_device
*dev
)
3853 struct stmmac_priv
*priv
= netdev_priv(dev
);
3856 /* Request the IRQ lines */
3857 if (priv
->plat
->flags
& STMMAC_FLAG_MULTI_MSI_EN
)
3858 ret
= stmmac_request_irq_multi_msi(dev
);
3860 ret
= stmmac_request_irq_single(dev
);
3866 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3867 * @priv: driver private structure
3868 * @mtu: MTU to setup the dma queue and buf with
3869 * Description: Allocate and generate a dma_conf based on the provided MTU.
3870 * Allocate the Tx/Rx DMA queue and init them.
3872 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3874 static struct stmmac_dma_conf
*
3875 stmmac_setup_dma_desc(struct stmmac_priv
*priv
, unsigned int mtu
)
3877 struct stmmac_dma_conf
*dma_conf
;
3878 int chan
, bfsize
, ret
;
3880 dma_conf
= kzalloc(sizeof(*dma_conf
), GFP_KERNEL
);
3882 netdev_err(priv
->dev
, "%s: DMA conf allocation failed\n",
3884 return ERR_PTR(-ENOMEM
);
3887 bfsize
= stmmac_set_16kib_bfsize(priv
, mtu
);
3891 if (bfsize
< BUF_SIZE_16KiB
)
3892 bfsize
= stmmac_set_bfsize(mtu
, 0);
3894 dma_conf
->dma_buf_sz
= bfsize
;
3895 /* Chose the tx/rx size from the already defined one in the
3896 * priv struct. (if defined)
3898 dma_conf
->dma_tx_size
= priv
->dma_conf
.dma_tx_size
;
3899 dma_conf
->dma_rx_size
= priv
->dma_conf
.dma_rx_size
;
3901 if (!dma_conf
->dma_tx_size
)
3902 dma_conf
->dma_tx_size
= DMA_DEFAULT_TX_SIZE
;
3903 if (!dma_conf
->dma_rx_size
)
3904 dma_conf
->dma_rx_size
= DMA_DEFAULT_RX_SIZE
;
3906 /* Earlier check for TBS */
3907 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++) {
3908 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[chan
];
3909 int tbs_en
= priv
->plat
->tx_queues_cfg
[chan
].tbs_en
;
3911 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3912 tx_q
->tbs
|= tbs_en
? STMMAC_TBS_AVAIL
: 0;
3915 ret
= alloc_dma_desc_resources(priv
, dma_conf
);
3917 netdev_err(priv
->dev
, "%s: DMA descriptors allocation failed\n",
3922 ret
= init_dma_desc_rings(priv
->dev
, dma_conf
, GFP_KERNEL
);
3924 netdev_err(priv
->dev
, "%s: DMA descriptors initialization failed\n",
3932 free_dma_desc_resources(priv
, dma_conf
);
3935 return ERR_PTR(ret
);
3939 * __stmmac_open - open entry point of the driver
3940 * @dev : pointer to the device structure.
3941 * @dma_conf : structure to take the dma data
3943 * This function is the open entry point of the driver.
3945 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3948 static int __stmmac_open(struct net_device
*dev
,
3949 struct stmmac_dma_conf
*dma_conf
)
3951 struct stmmac_priv
*priv
= netdev_priv(dev
);
3952 int mode
= priv
->plat
->phy_interface
;
3956 ret
= pm_runtime_resume_and_get(priv
->device
);
3960 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
3961 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
&&
3963 xpcs_get_an_mode(priv
->hw
->xpcs
, mode
) != DW_AN_C73
) &&
3964 !priv
->hw
->lynx_pcs
) {
3965 ret
= stmmac_init_phy(dev
);
3967 netdev_err(priv
->dev
,
3968 "%s: Cannot attach to PHY (error: %d)\n",
3970 goto init_phy_error
;
3974 priv
->rx_copybreak
= STMMAC_RX_COPYBREAK
;
3976 buf_sz
= dma_conf
->dma_buf_sz
;
3977 for (int i
= 0; i
< MTL_MAX_TX_QUEUES
; i
++)
3978 if (priv
->dma_conf
.tx_queue
[i
].tbs
& STMMAC_TBS_EN
)
3979 dma_conf
->tx_queue
[i
].tbs
= priv
->dma_conf
.tx_queue
[i
].tbs
;
3980 memcpy(&priv
->dma_conf
, dma_conf
, sizeof(*dma_conf
));
3982 stmmac_reset_queues_param(priv
);
3984 if (!(priv
->plat
->flags
& STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP
) &&
3985 priv
->plat
->serdes_powerup
) {
3986 ret
= priv
->plat
->serdes_powerup(dev
, priv
->plat
->bsp_priv
);
3988 netdev_err(priv
->dev
, "%s: Serdes powerup failed\n",
3994 ret
= stmmac_hw_setup(dev
, true);
3996 netdev_err(priv
->dev
, "%s: Hw setup failed\n", __func__
);
4000 stmmac_init_coalesce(priv
);
4002 phylink_start(priv
->phylink
);
4003 /* We may have called phylink_speed_down before */
4004 phylink_speed_up(priv
->phylink
);
4006 ret
= stmmac_request_irq(dev
);
4010 stmmac_enable_all_queues(priv
);
4011 netif_tx_start_all_queues(priv
->dev
);
4012 stmmac_enable_all_dma_irq(priv
);
4017 phylink_stop(priv
->phylink
);
4019 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
4020 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
4022 stmmac_hw_teardown(dev
);
4024 phylink_disconnect_phy(priv
->phylink
);
4026 pm_runtime_put(priv
->device
);
4030 static int stmmac_open(struct net_device
*dev
)
4032 struct stmmac_priv
*priv
= netdev_priv(dev
);
4033 struct stmmac_dma_conf
*dma_conf
;
4036 dma_conf
= stmmac_setup_dma_desc(priv
, dev
->mtu
);
4037 if (IS_ERR(dma_conf
))
4038 return PTR_ERR(dma_conf
);
4040 ret
= __stmmac_open(dev
, dma_conf
);
4042 free_dma_desc_resources(priv
, dma_conf
);
4048 static void stmmac_fpe_stop_wq(struct stmmac_priv
*priv
)
4050 set_bit(__FPE_REMOVING
, &priv
->fpe_task_state
);
4053 destroy_workqueue(priv
->fpe_wq
);
4054 priv
->fpe_wq
= NULL
;
4057 netdev_info(priv
->dev
, "FPE workqueue stop");
4061 * stmmac_release - close entry point of the driver
4062 * @dev : device pointer.
4064 * This is the stop entry point of the driver.
4066 static int stmmac_release(struct net_device
*dev
)
4068 struct stmmac_priv
*priv
= netdev_priv(dev
);
4071 if (device_may_wakeup(priv
->device
))
4072 phylink_speed_down(priv
->phylink
, false);
4073 /* Stop and disconnect the PHY */
4074 phylink_stop(priv
->phylink
);
4075 phylink_disconnect_phy(priv
->phylink
);
4077 stmmac_disable_all_queues(priv
);
4079 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
4080 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
4082 netif_tx_disable(dev
);
4084 /* Free the IRQ lines */
4085 stmmac_free_irq(dev
, REQ_IRQ_ERR_ALL
, 0);
4087 if (priv
->eee_enabled
) {
4088 priv
->tx_path_in_lpi_mode
= false;
4089 del_timer_sync(&priv
->eee_ctrl_timer
);
4092 /* Stop TX/RX DMA and clear the descriptors */
4093 stmmac_stop_all_dma(priv
);
4095 /* Release and free the Rx/Tx resources */
4096 free_dma_desc_resources(priv
, &priv
->dma_conf
);
4098 /* Disable the MAC Rx/Tx */
4099 stmmac_mac_set(priv
, priv
->ioaddr
, false);
4101 /* Powerdown Serdes if there is */
4102 if (priv
->plat
->serdes_powerdown
)
4103 priv
->plat
->serdes_powerdown(dev
, priv
->plat
->bsp_priv
);
4105 netif_carrier_off(dev
);
4107 stmmac_release_ptp(priv
);
4109 pm_runtime_put(priv
->device
);
4111 if (priv
->dma_cap
.fpesel
)
4112 stmmac_fpe_stop_wq(priv
);
4117 static bool stmmac_vlan_insert(struct stmmac_priv
*priv
, struct sk_buff
*skb
,
4118 struct stmmac_tx_queue
*tx_q
)
4120 u16 tag
= 0x0, inner_tag
= 0x0;
4121 u32 inner_type
= 0x0;
4124 if (!priv
->dma_cap
.vlins
)
4126 if (!skb_vlan_tag_present(skb
))
4128 if (skb
->vlan_proto
== htons(ETH_P_8021AD
)) {
4129 inner_tag
= skb_vlan_tag_get(skb
);
4130 inner_type
= STMMAC_VLAN_INSERT
;
4133 tag
= skb_vlan_tag_get(skb
);
4135 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4136 p
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4138 p
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4140 if (stmmac_set_desc_vlan_tag(priv
, p
, tag
, inner_tag
, inner_type
))
4143 stmmac_set_tx_owner(priv
, p
);
4144 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_conf
.dma_tx_size
);
4149 * stmmac_tso_allocator - close entry point of the driver
4150 * @priv: driver private structure
4151 * @des: buffer start address
4152 * @total_len: total length to fill in descriptors
4153 * @last_segment: condition for the last descriptor
4154 * @queue: TX queue index
4156 * This function fills descriptor and request new descriptors according to
4157 * buffer length to fill
4159 static void stmmac_tso_allocator(struct stmmac_priv
*priv
, dma_addr_t des
,
4160 int total_len
, bool last_segment
, u32 queue
)
4162 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4163 struct dma_desc
*desc
;
4167 tmp_len
= total_len
;
4169 while (tmp_len
> 0) {
4170 dma_addr_t curr_addr
;
4172 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
4173 priv
->dma_conf
.dma_tx_size
);
4174 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
4176 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4177 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4179 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4181 curr_addr
= des
+ (total_len
- tmp_len
);
4182 if (priv
->dma_cap
.addr64
<= 32)
4183 desc
->des0
= cpu_to_le32(curr_addr
);
4185 stmmac_set_desc_addr(priv
, desc
, curr_addr
);
4187 buff_size
= tmp_len
>= TSO_MAX_BUFF_SIZE
?
4188 TSO_MAX_BUFF_SIZE
: tmp_len
;
4190 stmmac_prepare_tso_tx_desc(priv
, desc
, 0, buff_size
,
4192 (last_segment
) && (tmp_len
<= TSO_MAX_BUFF_SIZE
),
4195 tmp_len
-= TSO_MAX_BUFF_SIZE
;
4199 static void stmmac_flush_tx_descriptors(struct stmmac_priv
*priv
, int queue
)
4201 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4204 if (likely(priv
->extend_desc
))
4205 desc_size
= sizeof(struct dma_extended_desc
);
4206 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4207 desc_size
= sizeof(struct dma_edesc
);
4209 desc_size
= sizeof(struct dma_desc
);
4211 /* The own bit must be the latest setting done when prepare the
4212 * descriptor and then barrier is needed to make sure that
4213 * all is coherent before granting the DMA engine.
4217 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
+ (tx_q
->cur_tx
* desc_size
);
4218 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
, tx_q
->tx_tail_addr
, queue
);
4222 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4223 * @skb : the socket buffer
4224 * @dev : device pointer
4225 * Description: this is the transmit function that is called on TSO frames
4226 * (support available on GMAC4 and newer chips).
4227 * Diagram below show the ring programming in case of TSO frames:
4231 * | DES0 |---> buffer1 = L2/L3/L4 header
4232 * | DES1 |---> TCP Payload (can continue on next descr...)
4233 * | DES2 |---> buffer 1 and 2 len
4234 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4240 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4242 * | DES2 | --> buffer 1 and 2 len
4246 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4248 static netdev_tx_t
stmmac_tso_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4250 struct dma_desc
*desc
, *first
, *mss_desc
= NULL
;
4251 struct stmmac_priv
*priv
= netdev_priv(dev
);
4252 int nfrags
= skb_shinfo(skb
)->nr_frags
;
4253 u32 queue
= skb_get_queue_mapping(skb
);
4254 unsigned int first_entry
, tx_packets
;
4255 struct stmmac_txq_stats
*txq_stats
;
4256 int tmp_pay_len
= 0, first_tx
;
4257 struct stmmac_tx_queue
*tx_q
;
4258 bool has_vlan
, set_ic
;
4259 u8 proto_hdr_len
, hdr
;
4264 tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4265 txq_stats
= &priv
->xstats
.txq_stats
[queue
];
4266 first_tx
= tx_q
->cur_tx
;
4268 /* Compute header lengths */
4269 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) {
4270 proto_hdr_len
= skb_transport_offset(skb
) + sizeof(struct udphdr
);
4271 hdr
= sizeof(struct udphdr
);
4273 proto_hdr_len
= skb_tcp_all_headers(skb
);
4274 hdr
= tcp_hdrlen(skb
);
4277 /* Desc availability based on threshold should be enough safe */
4278 if (unlikely(stmmac_tx_avail(priv
, queue
) <
4279 (((skb
->len
- proto_hdr_len
) / TSO_MAX_BUFF_SIZE
+ 1)))) {
4280 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
4281 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
4283 /* This is a hard error, log it. */
4284 netdev_err(priv
->dev
,
4285 "%s: Tx Ring full when queue awake\n",
4288 return NETDEV_TX_BUSY
;
4291 pay_len
= skb_headlen(skb
) - proto_hdr_len
; /* no frags */
4293 mss
= skb_shinfo(skb
)->gso_size
;
4295 /* set new MSS value if needed */
4296 if (mss
!= tx_q
->mss
) {
4297 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4298 mss_desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4300 mss_desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4302 stmmac_set_mss(priv
, mss_desc
, mss
);
4304 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
4305 priv
->dma_conf
.dma_tx_size
);
4306 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
4309 if (netif_msg_tx_queued(priv
)) {
4310 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4311 __func__
, hdr
, proto_hdr_len
, pay_len
, mss
);
4312 pr_info("\tskb->len %d, skb->data_len %d\n", skb
->len
,
4316 /* Check if VLAN can be inserted by HW */
4317 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
4319 first_entry
= tx_q
->cur_tx
;
4320 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
4322 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4323 desc
= &tx_q
->dma_entx
[first_entry
].basic
;
4325 desc
= &tx_q
->dma_tx
[first_entry
];
4329 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
4331 /* first descriptor: fill Headers on Buf1 */
4332 des
= dma_map_single(priv
->device
, skb
->data
, skb_headlen(skb
),
4334 if (dma_mapping_error(priv
->device
, des
))
4337 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
4338 tx_q
->tx_skbuff_dma
[first_entry
].len
= skb_headlen(skb
);
4339 tx_q
->tx_skbuff_dma
[first_entry
].map_as_page
= false;
4340 tx_q
->tx_skbuff_dma
[first_entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4342 if (priv
->dma_cap
.addr64
<= 32) {
4343 first
->des0
= cpu_to_le32(des
);
4345 /* Fill start of payload in buff2 of first descriptor */
4347 first
->des1
= cpu_to_le32(des
+ proto_hdr_len
);
4349 /* If needed take extra descriptors to fill the remaining payload */
4350 tmp_pay_len
= pay_len
- TSO_MAX_BUFF_SIZE
;
4352 stmmac_set_desc_addr(priv
, first
, des
);
4353 tmp_pay_len
= pay_len
;
4354 des
+= proto_hdr_len
;
4358 stmmac_tso_allocator(priv
, des
, tmp_pay_len
, (nfrags
== 0), queue
);
4360 /* Prepare fragments */
4361 for (i
= 0; i
< nfrags
; i
++) {
4362 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4364 des
= skb_frag_dma_map(priv
->device
, frag
, 0,
4365 skb_frag_size(frag
),
4367 if (dma_mapping_error(priv
->device
, des
))
4370 stmmac_tso_allocator(priv
, des
, skb_frag_size(frag
),
4371 (i
== nfrags
- 1), queue
);
4373 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf
= des
;
4374 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].len
= skb_frag_size(frag
);
4375 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].map_as_page
= true;
4376 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf_type
= STMMAC_TXBUF_T_SKB
;
4379 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].last_segment
= true;
4381 /* Only the last descriptor gets to point to the skb. */
4382 tx_q
->tx_skbuff
[tx_q
->cur_tx
] = skb
;
4383 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf_type
= STMMAC_TXBUF_T_SKB
;
4385 /* Manage tx mitigation */
4386 tx_packets
= (tx_q
->cur_tx
+ 1) - first_tx
;
4387 tx_q
->tx_count_frames
+= tx_packets
;
4389 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
4391 else if (!priv
->tx_coal_frames
[queue
])
4393 else if (tx_packets
> priv
->tx_coal_frames
[queue
])
4395 else if ((tx_q
->tx_count_frames
%
4396 priv
->tx_coal_frames
[queue
]) < tx_packets
)
4402 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4403 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4405 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4407 tx_q
->tx_count_frames
= 0;
4408 stmmac_set_tx_ic(priv
, desc
);
4411 /* We've used all descriptors we need for this skb, however,
4412 * advance cur_tx so that it references a fresh descriptor.
4413 * ndo_start_xmit will fill this descriptor the next time it's
4414 * called and stmmac_tx_clean may clean up to this descriptor.
4416 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_conf
.dma_tx_size
);
4418 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
4419 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
4421 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
4424 u64_stats_update_begin(&txq_stats
->q_syncp
);
4425 u64_stats_add(&txq_stats
->q
.tx_bytes
, skb
->len
);
4426 u64_stats_inc(&txq_stats
->q
.tx_tso_frames
);
4427 u64_stats_add(&txq_stats
->q
.tx_tso_nfrags
, nfrags
);
4429 u64_stats_inc(&txq_stats
->q
.tx_set_ic_bit
);
4430 u64_stats_update_end(&txq_stats
->q_syncp
);
4432 if (priv
->sarc_type
)
4433 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
4435 skb_tx_timestamp(skb
);
4437 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
4438 priv
->hwts_tx_en
)) {
4439 /* declare that device is doing timestamping */
4440 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4441 stmmac_enable_tx_timestamp(priv
, first
);
4444 /* Complete the first descriptor before granting the DMA */
4445 stmmac_prepare_tso_tx_desc(priv
, first
, 1,
4448 1, tx_q
->tx_skbuff_dma
[first_entry
].last_segment
,
4449 hdr
/ 4, (skb
->len
- proto_hdr_len
));
4451 /* If context desc is used to change MSS */
4453 /* Make sure that first descriptor has been completely
4454 * written, including its own bit. This is because MSS is
4455 * actually before first descriptor, so we need to make
4456 * sure that MSS's own bit is the last thing written.
4459 stmmac_set_tx_owner(priv
, mss_desc
);
4462 if (netif_msg_pktdata(priv
)) {
4463 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4464 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
4465 tx_q
->cur_tx
, first
, nfrags
);
4466 pr_info(">>> frame to be transmitted: ");
4467 print_pkt(skb
->data
, skb_headlen(skb
));
4470 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
4472 stmmac_flush_tx_descriptors(priv
, queue
);
4473 stmmac_tx_timer_arm(priv
, queue
);
4475 return NETDEV_TX_OK
;
4478 dev_err(priv
->device
, "Tx dma map failed\n");
4480 priv
->xstats
.tx_dropped
++;
4481 return NETDEV_TX_OK
;
4485 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4486 * @skb: socket buffer to check
4488 * Check if a packet has an ethertype that will trigger the IP header checks
4489 * and IP/TCP checksum engine of the stmmac core.
4491 * Return: true if the ethertype can trigger the checksum engine, false
4494 static bool stmmac_has_ip_ethertype(struct sk_buff
*skb
)
4499 proto
= __vlan_get_protocol(skb
, eth_header_parse_protocol(skb
),
4502 return (depth
<= ETH_HLEN
) &&
4503 (proto
== htons(ETH_P_IP
) || proto
== htons(ETH_P_IPV6
));
4507 * stmmac_xmit - Tx entry point of the driver
4508 * @skb : the socket buffer
4509 * @dev : device pointer
4510 * Description : this is the tx entry point of the driver.
4511 * It programs the chain or the ring and supports oversized frames
4514 static netdev_tx_t
stmmac_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4516 unsigned int first_entry
, tx_packets
, enh_desc
;
4517 struct stmmac_priv
*priv
= netdev_priv(dev
);
4518 unsigned int nopaged_len
= skb_headlen(skb
);
4519 int i
, csum_insertion
= 0, is_jumbo
= 0;
4520 u32 queue
= skb_get_queue_mapping(skb
);
4521 int nfrags
= skb_shinfo(skb
)->nr_frags
;
4522 int gso
= skb_shinfo(skb
)->gso_type
;
4523 struct stmmac_txq_stats
*txq_stats
;
4524 struct dma_edesc
*tbs_desc
= NULL
;
4525 struct dma_desc
*desc
, *first
;
4526 struct stmmac_tx_queue
*tx_q
;
4527 bool has_vlan
, set_ic
;
4528 int entry
, first_tx
;
4531 tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4532 txq_stats
= &priv
->xstats
.txq_stats
[queue
];
4533 first_tx
= tx_q
->cur_tx
;
4535 if (priv
->tx_path_in_lpi_mode
&& priv
->eee_sw_timer_en
)
4536 stmmac_disable_eee_mode(priv
);
4538 /* Manage oversized TCP frames for GMAC4 device */
4539 if (skb_is_gso(skb
) && priv
->tso
) {
4540 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))
4541 return stmmac_tso_xmit(skb
, dev
);
4542 if (priv
->plat
->has_gmac4
&& (gso
& SKB_GSO_UDP_L4
))
4543 return stmmac_tso_xmit(skb
, dev
);
4546 if (priv
->plat
->est
&& priv
->plat
->est
->enable
&&
4547 priv
->plat
->est
->max_sdu
[queue
] &&
4548 skb
->len
> priv
->plat
->est
->max_sdu
[queue
]){
4549 priv
->xstats
.max_sdu_txq_drop
[queue
]++;
4553 if (unlikely(stmmac_tx_avail(priv
, queue
) < nfrags
+ 1)) {
4554 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
4555 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
4557 /* This is a hard error, log it. */
4558 netdev_err(priv
->dev
,
4559 "%s: Tx Ring full when queue awake\n",
4562 return NETDEV_TX_BUSY
;
4565 /* Check if VLAN can be inserted by HW */
4566 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
4568 entry
= tx_q
->cur_tx
;
4569 first_entry
= entry
;
4570 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
4572 csum_insertion
= (skb
->ip_summed
== CHECKSUM_PARTIAL
);
4573 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4574 * queues. In that case, checksum offloading for those queues that don't
4575 * support tx coe needs to fallback to software checksum calculation.
4577 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4578 * also have to be checksummed in software.
4580 if (csum_insertion
&&
4581 (priv
->plat
->tx_queues_cfg
[queue
].coe_unsupported
||
4582 !stmmac_has_ip_ethertype(skb
))) {
4583 if (unlikely(skb_checksum_help(skb
)))
4585 csum_insertion
= !csum_insertion
;
4588 if (likely(priv
->extend_desc
))
4589 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4590 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4591 desc
= &tx_q
->dma_entx
[entry
].basic
;
4593 desc
= tx_q
->dma_tx
+ entry
;
4598 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
4600 enh_desc
= priv
->plat
->enh_desc
;
4601 /* To program the descriptors according to the size of the frame */
4603 is_jumbo
= stmmac_is_jumbo_frm(priv
, skb
->len
, enh_desc
);
4605 if (unlikely(is_jumbo
)) {
4606 entry
= stmmac_jumbo_frm(priv
, tx_q
, skb
, csum_insertion
);
4607 if (unlikely(entry
< 0) && (entry
!= -EINVAL
))
4611 for (i
= 0; i
< nfrags
; i
++) {
4612 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4613 int len
= skb_frag_size(frag
);
4614 bool last_segment
= (i
== (nfrags
- 1));
4616 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
4617 WARN_ON(tx_q
->tx_skbuff
[entry
]);
4619 if (likely(priv
->extend_desc
))
4620 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4621 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4622 desc
= &tx_q
->dma_entx
[entry
].basic
;
4624 desc
= tx_q
->dma_tx
+ entry
;
4626 des
= skb_frag_dma_map(priv
->device
, frag
, 0, len
,
4628 if (dma_mapping_error(priv
->device
, des
))
4629 goto dma_map_err
; /* should reuse desc w/o issues */
4631 tx_q
->tx_skbuff_dma
[entry
].buf
= des
;
4633 stmmac_set_desc_addr(priv
, desc
, des
);
4635 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= true;
4636 tx_q
->tx_skbuff_dma
[entry
].len
= len
;
4637 tx_q
->tx_skbuff_dma
[entry
].last_segment
= last_segment
;
4638 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4640 /* Prepare the descriptor and set the own bit too */
4641 stmmac_prepare_tx_desc(priv
, desc
, 0, len
, csum_insertion
,
4642 priv
->mode
, 1, last_segment
, skb
->len
);
4645 /* Only the last descriptor gets to point to the skb. */
4646 tx_q
->tx_skbuff
[entry
] = skb
;
4647 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4649 /* According to the coalesce parameter the IC bit for the latest
4650 * segment is reset and the timer re-started to clean the tx status.
4651 * This approach takes care about the fragments: desc is the first
4652 * element in case of no SG.
4654 tx_packets
= (entry
+ 1) - first_tx
;
4655 tx_q
->tx_count_frames
+= tx_packets
;
4657 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
4659 else if (!priv
->tx_coal_frames
[queue
])
4661 else if (tx_packets
> priv
->tx_coal_frames
[queue
])
4663 else if ((tx_q
->tx_count_frames
%
4664 priv
->tx_coal_frames
[queue
]) < tx_packets
)
4670 if (likely(priv
->extend_desc
))
4671 desc
= &tx_q
->dma_etx
[entry
].basic
;
4672 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4673 desc
= &tx_q
->dma_entx
[entry
].basic
;
4675 desc
= &tx_q
->dma_tx
[entry
];
4677 tx_q
->tx_count_frames
= 0;
4678 stmmac_set_tx_ic(priv
, desc
);
4681 /* We've used all descriptors we need for this skb, however,
4682 * advance cur_tx so that it references a fresh descriptor.
4683 * ndo_start_xmit will fill this descriptor the next time it's
4684 * called and stmmac_tx_clean may clean up to this descriptor.
4686 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
4687 tx_q
->cur_tx
= entry
;
4689 if (netif_msg_pktdata(priv
)) {
4690 netdev_dbg(priv
->dev
,
4691 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4692 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
4693 entry
, first
, nfrags
);
4695 netdev_dbg(priv
->dev
, ">>> frame to be transmitted: ");
4696 print_pkt(skb
->data
, skb
->len
);
4699 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
4700 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
4702 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
4705 u64_stats_update_begin(&txq_stats
->q_syncp
);
4706 u64_stats_add(&txq_stats
->q
.tx_bytes
, skb
->len
);
4708 u64_stats_inc(&txq_stats
->q
.tx_set_ic_bit
);
4709 u64_stats_update_end(&txq_stats
->q_syncp
);
4711 if (priv
->sarc_type
)
4712 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
4714 skb_tx_timestamp(skb
);
4716 /* Ready to fill the first descriptor and set the OWN bit w/o any
4717 * problems because all the descriptors are actually ready to be
4718 * passed to the DMA engine.
4720 if (likely(!is_jumbo
)) {
4721 bool last_segment
= (nfrags
== 0);
4723 des
= dma_map_single(priv
->device
, skb
->data
,
4724 nopaged_len
, DMA_TO_DEVICE
);
4725 if (dma_mapping_error(priv
->device
, des
))
4728 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
4729 tx_q
->tx_skbuff_dma
[first_entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4730 tx_q
->tx_skbuff_dma
[first_entry
].map_as_page
= false;
4732 stmmac_set_desc_addr(priv
, first
, des
);
4734 tx_q
->tx_skbuff_dma
[first_entry
].len
= nopaged_len
;
4735 tx_q
->tx_skbuff_dma
[first_entry
].last_segment
= last_segment
;
4737 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
4738 priv
->hwts_tx_en
)) {
4739 /* declare that device is doing timestamping */
4740 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4741 stmmac_enable_tx_timestamp(priv
, first
);
4744 /* Prepare the first descriptor setting the OWN bit too */
4745 stmmac_prepare_tx_desc(priv
, first
, 1, nopaged_len
,
4746 csum_insertion
, priv
->mode
, 0, last_segment
,
4750 if (tx_q
->tbs
& STMMAC_TBS_EN
) {
4751 struct timespec64 ts
= ns_to_timespec64(skb
->tstamp
);
4753 tbs_desc
= &tx_q
->dma_entx
[first_entry
];
4754 stmmac_set_desc_tbs(priv
, tbs_desc
, ts
.tv_sec
, ts
.tv_nsec
);
4757 stmmac_set_tx_owner(priv
, first
);
4759 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
4761 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
4763 stmmac_flush_tx_descriptors(priv
, queue
);
4764 stmmac_tx_timer_arm(priv
, queue
);
4766 return NETDEV_TX_OK
;
4769 netdev_err(priv
->dev
, "Tx DMA map failed\n");
4772 priv
->xstats
.tx_dropped
++;
4773 return NETDEV_TX_OK
;
4776 static void stmmac_rx_vlan(struct net_device
*dev
, struct sk_buff
*skb
)
4778 struct vlan_ethhdr
*veth
= skb_vlan_eth_hdr(skb
);
4779 __be16 vlan_proto
= veth
->h_vlan_proto
;
4782 if ((vlan_proto
== htons(ETH_P_8021Q
) &&
4783 dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) ||
4784 (vlan_proto
== htons(ETH_P_8021AD
) &&
4785 dev
->features
& NETIF_F_HW_VLAN_STAG_RX
)) {
4786 /* pop the vlan tag */
4787 vlanid
= ntohs(veth
->h_vlan_TCI
);
4788 memmove(skb
->data
+ VLAN_HLEN
, veth
, ETH_ALEN
* 2);
4789 skb_pull(skb
, VLAN_HLEN
);
4790 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlanid
);
4795 * stmmac_rx_refill - refill used skb preallocated buffers
4796 * @priv: driver private structure
4797 * @queue: RX queue index
4798 * Description : this is to reallocate the skb for the reception process
4799 * that is based on zero-copy.
4801 static inline void stmmac_rx_refill(struct stmmac_priv
*priv
, u32 queue
)
4803 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
4804 int dirty
= stmmac_rx_dirty(priv
, queue
);
4805 unsigned int entry
= rx_q
->dirty_rx
;
4806 gfp_t gfp
= (GFP_ATOMIC
| __GFP_NOWARN
);
4808 if (priv
->dma_cap
.host_dma_width
<= 32)
4811 while (dirty
-- > 0) {
4812 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[entry
];
4816 if (priv
->extend_desc
)
4817 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
4819 p
= rx_q
->dma_rx
+ entry
;
4822 buf
->page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
4827 if (priv
->sph
&& !buf
->sec_page
) {
4828 buf
->sec_page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
4832 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
4835 buf
->addr
= page_pool_get_dma_addr(buf
->page
) + buf
->page_offset
;
4837 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
4839 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, true);
4841 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, false);
4842 stmmac_refill_desc3(priv
, rx_q
, p
);
4844 rx_q
->rx_count_frames
++;
4845 rx_q
->rx_count_frames
+= priv
->rx_coal_frames
[queue
];
4846 if (rx_q
->rx_count_frames
> priv
->rx_coal_frames
[queue
])
4847 rx_q
->rx_count_frames
= 0;
4849 use_rx_wd
= !priv
->rx_coal_frames
[queue
];
4850 use_rx_wd
|= rx_q
->rx_count_frames
> 0;
4851 if (!priv
->use_riwt
)
4855 stmmac_set_rx_owner(priv
, p
, use_rx_wd
);
4857 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_rx_size
);
4859 rx_q
->dirty_rx
= entry
;
4860 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
4861 (rx_q
->dirty_rx
* sizeof(struct dma_desc
));
4862 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
, rx_q
->rx_tail_addr
, queue
);
4865 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv
*priv
,
4867 int status
, unsigned int len
)
4869 unsigned int plen
= 0, hlen
= 0;
4870 int coe
= priv
->hw
->rx_csum
;
4872 /* Not first descriptor, buffer is always zero */
4873 if (priv
->sph
&& len
)
4876 /* First descriptor, get split header length */
4877 stmmac_get_rx_header_len(priv
, p
, &hlen
);
4878 if (priv
->sph
&& hlen
) {
4879 priv
->xstats
.rx_split_hdr_pkt_n
++;
4883 /* First descriptor, not last descriptor and not split header */
4884 if (status
& rx_not_ls
)
4885 return priv
->dma_conf
.dma_buf_sz
;
4887 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
4889 /* First descriptor and last descriptor and not split header */
4890 return min_t(unsigned int, priv
->dma_conf
.dma_buf_sz
, plen
);
4893 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv
*priv
,
4895 int status
, unsigned int len
)
4897 int coe
= priv
->hw
->rx_csum
;
4898 unsigned int plen
= 0;
4900 /* Not split header, buffer is not available */
4904 /* Not last descriptor */
4905 if (status
& rx_not_ls
)
4906 return priv
->dma_conf
.dma_buf_sz
;
4908 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
4910 /* Last descriptor */
4914 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv
*priv
, int queue
,
4915 struct xdp_frame
*xdpf
, bool dma_map
)
4917 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[queue
];
4918 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4919 unsigned int entry
= tx_q
->cur_tx
;
4920 struct dma_desc
*tx_desc
;
4921 dma_addr_t dma_addr
;
4924 if (stmmac_tx_avail(priv
, queue
) < STMMAC_TX_THRESH(priv
))
4925 return STMMAC_XDP_CONSUMED
;
4927 if (priv
->plat
->est
&& priv
->plat
->est
->enable
&&
4928 priv
->plat
->est
->max_sdu
[queue
] &&
4929 xdpf
->len
> priv
->plat
->est
->max_sdu
[queue
]) {
4930 priv
->xstats
.max_sdu_txq_drop
[queue
]++;
4931 return STMMAC_XDP_CONSUMED
;
4934 if (likely(priv
->extend_desc
))
4935 tx_desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4936 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4937 tx_desc
= &tx_q
->dma_entx
[entry
].basic
;
4939 tx_desc
= tx_q
->dma_tx
+ entry
;
4942 dma_addr
= dma_map_single(priv
->device
, xdpf
->data
,
4943 xdpf
->len
, DMA_TO_DEVICE
);
4944 if (dma_mapping_error(priv
->device
, dma_addr
))
4945 return STMMAC_XDP_CONSUMED
;
4947 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XDP_NDO
;
4949 struct page
*page
= virt_to_page(xdpf
->data
);
4951 dma_addr
= page_pool_get_dma_addr(page
) + sizeof(*xdpf
) +
4953 dma_sync_single_for_device(priv
->device
, dma_addr
,
4954 xdpf
->len
, DMA_BIDIRECTIONAL
);
4956 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XDP_TX
;
4959 tx_q
->tx_skbuff_dma
[entry
].buf
= dma_addr
;
4960 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
4961 tx_q
->tx_skbuff_dma
[entry
].len
= xdpf
->len
;
4962 tx_q
->tx_skbuff_dma
[entry
].last_segment
= true;
4963 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
4965 tx_q
->xdpf
[entry
] = xdpf
;
4967 stmmac_set_desc_addr(priv
, tx_desc
, dma_addr
);
4969 stmmac_prepare_tx_desc(priv
, tx_desc
, 1, xdpf
->len
,
4970 true, priv
->mode
, true, true,
4973 tx_q
->tx_count_frames
++;
4975 if (tx_q
->tx_count_frames
% priv
->tx_coal_frames
[queue
] == 0)
4981 tx_q
->tx_count_frames
= 0;
4982 stmmac_set_tx_ic(priv
, tx_desc
);
4983 u64_stats_update_begin(&txq_stats
->q_syncp
);
4984 u64_stats_inc(&txq_stats
->q
.tx_set_ic_bit
);
4985 u64_stats_update_end(&txq_stats
->q_syncp
);
4988 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
4990 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
4991 tx_q
->cur_tx
= entry
;
4993 return STMMAC_XDP_TX
;
4996 static int stmmac_xdp_get_tx_queue(struct stmmac_priv
*priv
,
5001 if (unlikely(index
< 0))
5004 while (index
>= priv
->plat
->tx_queues_to_use
)
5005 index
-= priv
->plat
->tx_queues_to_use
;
5010 static int stmmac_xdp_xmit_back(struct stmmac_priv
*priv
,
5011 struct xdp_buff
*xdp
)
5013 struct xdp_frame
*xdpf
= xdp_convert_buff_to_frame(xdp
);
5014 int cpu
= smp_processor_id();
5015 struct netdev_queue
*nq
;
5019 if (unlikely(!xdpf
))
5020 return STMMAC_XDP_CONSUMED
;
5022 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
5023 nq
= netdev_get_tx_queue(priv
->dev
, queue
);
5025 __netif_tx_lock(nq
, cpu
);
5026 /* Avoids TX time-out as we are sharing with slow path */
5027 txq_trans_cond_update(nq
);
5029 res
= stmmac_xdp_xmit_xdpf(priv
, queue
, xdpf
, false);
5030 if (res
== STMMAC_XDP_TX
)
5031 stmmac_flush_tx_descriptors(priv
, queue
);
5033 __netif_tx_unlock(nq
);
5038 static int __stmmac_xdp_run_prog(struct stmmac_priv
*priv
,
5039 struct bpf_prog
*prog
,
5040 struct xdp_buff
*xdp
)
5045 act
= bpf_prog_run_xdp(prog
, xdp
);
5048 res
= STMMAC_XDP_PASS
;
5051 res
= stmmac_xdp_xmit_back(priv
, xdp
);
5054 if (xdp_do_redirect(priv
->dev
, xdp
, prog
) < 0)
5055 res
= STMMAC_XDP_CONSUMED
;
5057 res
= STMMAC_XDP_REDIRECT
;
5060 bpf_warn_invalid_xdp_action(priv
->dev
, prog
, act
);
5063 trace_xdp_exception(priv
->dev
, prog
, act
);
5066 res
= STMMAC_XDP_CONSUMED
;
5073 static struct sk_buff
*stmmac_xdp_run_prog(struct stmmac_priv
*priv
,
5074 struct xdp_buff
*xdp
)
5076 struct bpf_prog
*prog
;
5079 prog
= READ_ONCE(priv
->xdp_prog
);
5081 res
= STMMAC_XDP_PASS
;
5085 res
= __stmmac_xdp_run_prog(priv
, prog
, xdp
);
5087 return ERR_PTR(-res
);
5090 static void stmmac_finalize_xdp_rx(struct stmmac_priv
*priv
,
5093 int cpu
= smp_processor_id();
5096 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
5098 if (xdp_status
& STMMAC_XDP_TX
)
5099 stmmac_tx_timer_arm(priv
, queue
);
5101 if (xdp_status
& STMMAC_XDP_REDIRECT
)
5105 static struct sk_buff
*stmmac_construct_skb_zc(struct stmmac_channel
*ch
,
5106 struct xdp_buff
*xdp
)
5108 unsigned int metasize
= xdp
->data
- xdp
->data_meta
;
5109 unsigned int datasize
= xdp
->data_end
- xdp
->data
;
5110 struct sk_buff
*skb
;
5112 skb
= __napi_alloc_skb(&ch
->rxtx_napi
,
5113 xdp
->data_end
- xdp
->data_hard_start
,
5114 GFP_ATOMIC
| __GFP_NOWARN
);
5118 skb_reserve(skb
, xdp
->data
- xdp
->data_hard_start
);
5119 memcpy(__skb_put(skb
, datasize
), xdp
->data
, datasize
);
5121 skb_metadata_set(skb
, metasize
);
5126 static void stmmac_dispatch_skb_zc(struct stmmac_priv
*priv
, u32 queue
,
5127 struct dma_desc
*p
, struct dma_desc
*np
,
5128 struct xdp_buff
*xdp
)
5130 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[queue
];
5131 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
5132 unsigned int len
= xdp
->data_end
- xdp
->data
;
5133 enum pkt_hash_types hash_type
;
5134 int coe
= priv
->hw
->rx_csum
;
5135 struct sk_buff
*skb
;
5138 skb
= stmmac_construct_skb_zc(ch
, xdp
);
5140 priv
->xstats
.rx_dropped
++;
5144 stmmac_get_rx_hwtstamp(priv
, p
, np
, skb
);
5145 if (priv
->hw
->hw_vlan_en
)
5146 /* MAC level stripping. */
5147 stmmac_rx_hw_vlan(priv
, priv
->hw
, p
, skb
);
5149 /* Driver level stripping. */
5150 stmmac_rx_vlan(priv
->dev
, skb
);
5151 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
5153 if (unlikely(!coe
) || !stmmac_has_ip_ethertype(skb
))
5154 skb_checksum_none_assert(skb
);
5156 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5158 if (!stmmac_get_rx_hash(priv
, p
, &hash
, &hash_type
))
5159 skb_set_hash(skb
, hash
, hash_type
);
5161 skb_record_rx_queue(skb
, queue
);
5162 napi_gro_receive(&ch
->rxtx_napi
, skb
);
5164 u64_stats_update_begin(&rxq_stats
->napi_syncp
);
5165 u64_stats_inc(&rxq_stats
->napi
.rx_pkt_n
);
5166 u64_stats_add(&rxq_stats
->napi
.rx_bytes
, len
);
5167 u64_stats_update_end(&rxq_stats
->napi_syncp
);
5170 static bool stmmac_rx_refill_zc(struct stmmac_priv
*priv
, u32 queue
, u32 budget
)
5172 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
5173 unsigned int entry
= rx_q
->dirty_rx
;
5174 struct dma_desc
*rx_desc
= NULL
;
5177 budget
= min(budget
, stmmac_rx_dirty(priv
, queue
));
5179 while (budget
-- > 0 && entry
!= rx_q
->cur_rx
) {
5180 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[entry
];
5181 dma_addr_t dma_addr
;
5185 buf
->xdp
= xsk_buff_alloc(rx_q
->xsk_pool
);
5192 if (priv
->extend_desc
)
5193 rx_desc
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5195 rx_desc
= rx_q
->dma_rx
+ entry
;
5197 dma_addr
= xsk_buff_xdp_get_dma(buf
->xdp
);
5198 stmmac_set_desc_addr(priv
, rx_desc
, dma_addr
);
5199 stmmac_set_desc_sec_addr(priv
, rx_desc
, 0, false);
5200 stmmac_refill_desc3(priv
, rx_q
, rx_desc
);
5202 rx_q
->rx_count_frames
++;
5203 rx_q
->rx_count_frames
+= priv
->rx_coal_frames
[queue
];
5204 if (rx_q
->rx_count_frames
> priv
->rx_coal_frames
[queue
])
5205 rx_q
->rx_count_frames
= 0;
5207 use_rx_wd
= !priv
->rx_coal_frames
[queue
];
5208 use_rx_wd
|= rx_q
->rx_count_frames
> 0;
5209 if (!priv
->use_riwt
)
5213 stmmac_set_rx_owner(priv
, rx_desc
, use_rx_wd
);
5215 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_rx_size
);
5219 rx_q
->dirty_rx
= entry
;
5220 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
5221 (rx_q
->dirty_rx
* sizeof(struct dma_desc
));
5222 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
, rx_q
->rx_tail_addr
, queue
);
5228 static struct stmmac_xdp_buff
*xsk_buff_to_stmmac_ctx(struct xdp_buff
*xdp
)
5230 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5231 * to represent incoming packet, whereas cb field in the same structure
5232 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5233 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5235 return (struct stmmac_xdp_buff
*)xdp
;
5238 static int stmmac_rx_zc(struct stmmac_priv
*priv
, int limit
, u32 queue
)
5240 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[queue
];
5241 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
5242 unsigned int count
= 0, error
= 0, len
= 0;
5243 int dirty
= stmmac_rx_dirty(priv
, queue
);
5244 unsigned int next_entry
= rx_q
->cur_rx
;
5245 u32 rx_errors
= 0, rx_dropped
= 0;
5246 unsigned int desc_size
;
5247 struct bpf_prog
*prog
;
5248 bool failure
= false;
5252 if (netif_msg_rx_status(priv
)) {
5255 netdev_dbg(priv
->dev
, "%s: descriptor ring:\n", __func__
);
5256 if (priv
->extend_desc
) {
5257 rx_head
= (void *)rx_q
->dma_erx
;
5258 desc_size
= sizeof(struct dma_extended_desc
);
5260 rx_head
= (void *)rx_q
->dma_rx
;
5261 desc_size
= sizeof(struct dma_desc
);
5264 stmmac_display_ring(priv
, rx_head
, priv
->dma_conf
.dma_rx_size
, true,
5265 rx_q
->dma_rx_phy
, desc_size
);
5267 while (count
< limit
) {
5268 struct stmmac_rx_buffer
*buf
;
5269 struct stmmac_xdp_buff
*ctx
;
5270 unsigned int buf1_len
= 0;
5271 struct dma_desc
*np
, *p
;
5275 if (!count
&& rx_q
->state_saved
) {
5276 error
= rx_q
->state
.error
;
5277 len
= rx_q
->state
.len
;
5279 rx_q
->state_saved
= false;
5290 buf
= &rx_q
->buf_pool
[entry
];
5292 if (dirty
>= STMMAC_RX_FILL_BATCH
) {
5293 failure
= failure
||
5294 !stmmac_rx_refill_zc(priv
, queue
, dirty
);
5298 if (priv
->extend_desc
)
5299 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5301 p
= rx_q
->dma_rx
+ entry
;
5303 /* read the status of the incoming frame */
5304 status
= stmmac_rx_status(priv
, &priv
->xstats
, p
);
5305 /* check if managed by the DMA otherwise go ahead */
5306 if (unlikely(status
& dma_own
))
5309 /* Prefetch the next RX descriptor */
5310 rx_q
->cur_rx
= STMMAC_GET_ENTRY(rx_q
->cur_rx
,
5311 priv
->dma_conf
.dma_rx_size
);
5312 next_entry
= rx_q
->cur_rx
;
5314 if (priv
->extend_desc
)
5315 np
= (struct dma_desc
*)(rx_q
->dma_erx
+ next_entry
);
5317 np
= rx_q
->dma_rx
+ next_entry
;
5321 /* Ensure a valid XSK buffer before proceed */
5325 if (priv
->extend_desc
)
5326 stmmac_rx_extended_status(priv
, &priv
->xstats
,
5327 rx_q
->dma_erx
+ entry
);
5328 if (unlikely(status
== discard_frame
)) {
5329 xsk_buff_free(buf
->xdp
);
5333 if (!priv
->hwts_rx_en
)
5337 if (unlikely(error
&& (status
& rx_not_ls
)))
5339 if (unlikely(error
)) {
5344 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5345 if (likely(status
& rx_not_ls
)) {
5346 xsk_buff_free(buf
->xdp
);
5353 ctx
= xsk_buff_to_stmmac_ctx(buf
->xdp
);
5358 /* XDP ZC Frame only support primary buffers for now */
5359 buf1_len
= stmmac_rx_buf1_len(priv
, p
, status
, len
);
5362 /* ACS is disabled; strip manually. */
5363 if (likely(!(status
& rx_not_ls
))) {
5364 buf1_len
-= ETH_FCS_LEN
;
5368 /* RX buffer is good and fit into a XSK pool buffer */
5369 buf
->xdp
->data_end
= buf
->xdp
->data
+ buf1_len
;
5370 xsk_buff_dma_sync_for_cpu(buf
->xdp
, rx_q
->xsk_pool
);
5372 prog
= READ_ONCE(priv
->xdp_prog
);
5373 res
= __stmmac_xdp_run_prog(priv
, prog
, buf
->xdp
);
5376 case STMMAC_XDP_PASS
:
5377 stmmac_dispatch_skb_zc(priv
, queue
, p
, np
, buf
->xdp
);
5378 xsk_buff_free(buf
->xdp
);
5380 case STMMAC_XDP_CONSUMED
:
5381 xsk_buff_free(buf
->xdp
);
5385 case STMMAC_XDP_REDIRECT
:
5395 if (status
& rx_not_ls
) {
5396 rx_q
->state_saved
= true;
5397 rx_q
->state
.error
= error
;
5398 rx_q
->state
.len
= len
;
5401 stmmac_finalize_xdp_rx(priv
, xdp_status
);
5403 u64_stats_update_begin(&rxq_stats
->napi_syncp
);
5404 u64_stats_add(&rxq_stats
->napi
.rx_pkt_n
, count
);
5405 u64_stats_update_end(&rxq_stats
->napi_syncp
);
5407 priv
->xstats
.rx_dropped
+= rx_dropped
;
5408 priv
->xstats
.rx_errors
+= rx_errors
;
5410 if (xsk_uses_need_wakeup(rx_q
->xsk_pool
)) {
5411 if (failure
|| stmmac_rx_dirty(priv
, queue
) > 0)
5412 xsk_set_rx_need_wakeup(rx_q
->xsk_pool
);
5414 xsk_clear_rx_need_wakeup(rx_q
->xsk_pool
);
5419 return failure
? limit
: (int)count
;
5423 * stmmac_rx - manage the receive process
5424 * @priv: driver private structure
5425 * @limit: napi bugget
5426 * @queue: RX queue index.
5427 * Description : this the function called by the napi poll method.
5428 * It gets all the frames inside the ring.
5430 static int stmmac_rx(struct stmmac_priv
*priv
, int limit
, u32 queue
)
5432 u32 rx_errors
= 0, rx_dropped
= 0, rx_bytes
= 0, rx_packets
= 0;
5433 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[queue
];
5434 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
5435 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
5436 unsigned int count
= 0, error
= 0, len
= 0;
5437 int status
= 0, coe
= priv
->hw
->rx_csum
;
5438 unsigned int next_entry
= rx_q
->cur_rx
;
5439 enum dma_data_direction dma_dir
;
5440 unsigned int desc_size
;
5441 struct sk_buff
*skb
= NULL
;
5442 struct stmmac_xdp_buff ctx
;
5446 dma_dir
= page_pool_get_dma_dir(rx_q
->page_pool
);
5447 buf_sz
= DIV_ROUND_UP(priv
->dma_conf
.dma_buf_sz
, PAGE_SIZE
) * PAGE_SIZE
;
5448 limit
= min(priv
->dma_conf
.dma_rx_size
- 1, (unsigned int)limit
);
5450 if (netif_msg_rx_status(priv
)) {
5453 netdev_dbg(priv
->dev
, "%s: descriptor ring:\n", __func__
);
5454 if (priv
->extend_desc
) {
5455 rx_head
= (void *)rx_q
->dma_erx
;
5456 desc_size
= sizeof(struct dma_extended_desc
);
5458 rx_head
= (void *)rx_q
->dma_rx
;
5459 desc_size
= sizeof(struct dma_desc
);
5462 stmmac_display_ring(priv
, rx_head
, priv
->dma_conf
.dma_rx_size
, true,
5463 rx_q
->dma_rx_phy
, desc_size
);
5465 while (count
< limit
) {
5466 unsigned int buf1_len
= 0, buf2_len
= 0;
5467 enum pkt_hash_types hash_type
;
5468 struct stmmac_rx_buffer
*buf
;
5469 struct dma_desc
*np
, *p
;
5473 if (!count
&& rx_q
->state_saved
) {
5474 skb
= rx_q
->state
.skb
;
5475 error
= rx_q
->state
.error
;
5476 len
= rx_q
->state
.len
;
5478 rx_q
->state_saved
= false;
5491 buf
= &rx_q
->buf_pool
[entry
];
5493 if (priv
->extend_desc
)
5494 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5496 p
= rx_q
->dma_rx
+ entry
;
5498 /* read the status of the incoming frame */
5499 status
= stmmac_rx_status(priv
, &priv
->xstats
, p
);
5500 /* check if managed by the DMA otherwise go ahead */
5501 if (unlikely(status
& dma_own
))
5504 rx_q
->cur_rx
= STMMAC_GET_ENTRY(rx_q
->cur_rx
,
5505 priv
->dma_conf
.dma_rx_size
);
5506 next_entry
= rx_q
->cur_rx
;
5508 if (priv
->extend_desc
)
5509 np
= (struct dma_desc
*)(rx_q
->dma_erx
+ next_entry
);
5511 np
= rx_q
->dma_rx
+ next_entry
;
5515 if (priv
->extend_desc
)
5516 stmmac_rx_extended_status(priv
, &priv
->xstats
, rx_q
->dma_erx
+ entry
);
5517 if (unlikely(status
== discard_frame
)) {
5518 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
5521 if (!priv
->hwts_rx_en
)
5525 if (unlikely(error
&& (status
& rx_not_ls
)))
5527 if (unlikely(error
)) {
5534 /* Buffer is good. Go on. */
5536 prefetch(page_address(buf
->page
) + buf
->page_offset
);
5538 prefetch(page_address(buf
->sec_page
));
5540 buf1_len
= stmmac_rx_buf1_len(priv
, p
, status
, len
);
5542 buf2_len
= stmmac_rx_buf2_len(priv
, p
, status
, len
);
5545 /* ACS is disabled; strip manually. */
5546 if (likely(!(status
& rx_not_ls
))) {
5548 buf2_len
-= ETH_FCS_LEN
;
5550 } else if (buf1_len
) {
5551 buf1_len
-= ETH_FCS_LEN
;
5557 unsigned int pre_len
, sync_len
;
5559 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
5562 xdp_init_buff(&ctx
.xdp
, buf_sz
, &rx_q
->xdp_rxq
);
5563 xdp_prepare_buff(&ctx
.xdp
, page_address(buf
->page
),
5564 buf
->page_offset
, buf1_len
, true);
5566 pre_len
= ctx
.xdp
.data_end
- ctx
.xdp
.data_hard_start
-
5573 skb
= stmmac_xdp_run_prog(priv
, &ctx
.xdp
);
5574 /* Due xdp_adjust_tail: DMA sync for_device
5575 * cover max len CPU touch
5577 sync_len
= ctx
.xdp
.data_end
- ctx
.xdp
.data_hard_start
-
5579 sync_len
= max(sync_len
, pre_len
);
5581 /* For Not XDP_PASS verdict */
5583 unsigned int xdp_res
= -PTR_ERR(skb
);
5585 if (xdp_res
& STMMAC_XDP_CONSUMED
) {
5586 page_pool_put_page(rx_q
->page_pool
,
5587 virt_to_head_page(ctx
.xdp
.data
),
5592 /* Clear skb as it was set as
5593 * status by XDP program.
5597 if (unlikely((status
& rx_not_ls
)))
5602 } else if (xdp_res
& (STMMAC_XDP_TX
|
5603 STMMAC_XDP_REDIRECT
)) {
5604 xdp_status
|= xdp_res
;
5614 /* XDP program may expand or reduce tail */
5615 buf1_len
= ctx
.xdp
.data_end
- ctx
.xdp
.data
;
5617 skb
= napi_alloc_skb(&ch
->rx_napi
, buf1_len
);
5624 /* XDP program may adjust header */
5625 skb_copy_to_linear_data(skb
, ctx
.xdp
.data
, buf1_len
);
5626 skb_put(skb
, buf1_len
);
5628 /* Data payload copied into SKB, page ready for recycle */
5629 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
5631 } else if (buf1_len
) {
5632 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
5634 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
5635 buf
->page
, buf
->page_offset
, buf1_len
,
5636 priv
->dma_conf
.dma_buf_sz
);
5638 /* Data payload appended into SKB */
5639 skb_mark_for_recycle(skb
);
5644 dma_sync_single_for_cpu(priv
->device
, buf
->sec_addr
,
5646 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
5647 buf
->sec_page
, 0, buf2_len
,
5648 priv
->dma_conf
.dma_buf_sz
);
5650 /* Data payload appended into SKB */
5651 skb_mark_for_recycle(skb
);
5652 buf
->sec_page
= NULL
;
5656 if (likely(status
& rx_not_ls
))
5661 /* Got entire packet into SKB. Finish it. */
5663 stmmac_get_rx_hwtstamp(priv
, p
, np
, skb
);
5665 if (priv
->hw
->hw_vlan_en
)
5666 /* MAC level stripping. */
5667 stmmac_rx_hw_vlan(priv
, priv
->hw
, p
, skb
);
5669 /* Driver level stripping. */
5670 stmmac_rx_vlan(priv
->dev
, skb
);
5672 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
5674 if (unlikely(!coe
) || !stmmac_has_ip_ethertype(skb
))
5675 skb_checksum_none_assert(skb
);
5677 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5679 if (!stmmac_get_rx_hash(priv
, p
, &hash
, &hash_type
))
5680 skb_set_hash(skb
, hash
, hash_type
);
5682 skb_record_rx_queue(skb
, queue
);
5683 napi_gro_receive(&ch
->rx_napi
, skb
);
5691 if (status
& rx_not_ls
|| skb
) {
5692 rx_q
->state_saved
= true;
5693 rx_q
->state
.skb
= skb
;
5694 rx_q
->state
.error
= error
;
5695 rx_q
->state
.len
= len
;
5698 stmmac_finalize_xdp_rx(priv
, xdp_status
);
5700 stmmac_rx_refill(priv
, queue
);
5702 u64_stats_update_begin(&rxq_stats
->napi_syncp
);
5703 u64_stats_add(&rxq_stats
->napi
.rx_packets
, rx_packets
);
5704 u64_stats_add(&rxq_stats
->napi
.rx_bytes
, rx_bytes
);
5705 u64_stats_add(&rxq_stats
->napi
.rx_pkt_n
, count
);
5706 u64_stats_update_end(&rxq_stats
->napi_syncp
);
5708 priv
->xstats
.rx_dropped
+= rx_dropped
;
5709 priv
->xstats
.rx_errors
+= rx_errors
;
5714 static int stmmac_napi_poll_rx(struct napi_struct
*napi
, int budget
)
5716 struct stmmac_channel
*ch
=
5717 container_of(napi
, struct stmmac_channel
, rx_napi
);
5718 struct stmmac_priv
*priv
= ch
->priv_data
;
5719 struct stmmac_rxq_stats
*rxq_stats
;
5720 u32 chan
= ch
->index
;
5723 rxq_stats
= &priv
->xstats
.rxq_stats
[chan
];
5724 u64_stats_update_begin(&rxq_stats
->napi_syncp
);
5725 u64_stats_inc(&rxq_stats
->napi
.poll
);
5726 u64_stats_update_end(&rxq_stats
->napi_syncp
);
5728 work_done
= stmmac_rx(priv
, budget
, chan
);
5729 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
5730 unsigned long flags
;
5732 spin_lock_irqsave(&ch
->lock
, flags
);
5733 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
5734 spin_unlock_irqrestore(&ch
->lock
, flags
);
5740 static int stmmac_napi_poll_tx(struct napi_struct
*napi
, int budget
)
5742 struct stmmac_channel
*ch
=
5743 container_of(napi
, struct stmmac_channel
, tx_napi
);
5744 struct stmmac_priv
*priv
= ch
->priv_data
;
5745 struct stmmac_txq_stats
*txq_stats
;
5746 bool pending_packets
= false;
5747 u32 chan
= ch
->index
;
5750 txq_stats
= &priv
->xstats
.txq_stats
[chan
];
5751 u64_stats_update_begin(&txq_stats
->napi_syncp
);
5752 u64_stats_inc(&txq_stats
->napi
.poll
);
5753 u64_stats_update_end(&txq_stats
->napi_syncp
);
5755 work_done
= stmmac_tx_clean(priv
, budget
, chan
, &pending_packets
);
5756 work_done
= min(work_done
, budget
);
5758 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
5759 unsigned long flags
;
5761 spin_lock_irqsave(&ch
->lock
, flags
);
5762 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
5763 spin_unlock_irqrestore(&ch
->lock
, flags
);
5766 /* TX still have packet to handle, check if we need to arm tx timer */
5767 if (pending_packets
)
5768 stmmac_tx_timer_arm(priv
, chan
);
5773 static int stmmac_napi_poll_rxtx(struct napi_struct
*napi
, int budget
)
5775 struct stmmac_channel
*ch
=
5776 container_of(napi
, struct stmmac_channel
, rxtx_napi
);
5777 struct stmmac_priv
*priv
= ch
->priv_data
;
5778 bool tx_pending_packets
= false;
5779 int rx_done
, tx_done
, rxtx_done
;
5780 struct stmmac_rxq_stats
*rxq_stats
;
5781 struct stmmac_txq_stats
*txq_stats
;
5782 u32 chan
= ch
->index
;
5784 rxq_stats
= &priv
->xstats
.rxq_stats
[chan
];
5785 u64_stats_update_begin(&rxq_stats
->napi_syncp
);
5786 u64_stats_inc(&rxq_stats
->napi
.poll
);
5787 u64_stats_update_end(&rxq_stats
->napi_syncp
);
5789 txq_stats
= &priv
->xstats
.txq_stats
[chan
];
5790 u64_stats_update_begin(&txq_stats
->napi_syncp
);
5791 u64_stats_inc(&txq_stats
->napi
.poll
);
5792 u64_stats_update_end(&txq_stats
->napi_syncp
);
5794 tx_done
= stmmac_tx_clean(priv
, budget
, chan
, &tx_pending_packets
);
5795 tx_done
= min(tx_done
, budget
);
5797 rx_done
= stmmac_rx_zc(priv
, budget
, chan
);
5799 rxtx_done
= max(tx_done
, rx_done
);
5801 /* If either TX or RX work is not complete, return budget
5804 if (rxtx_done
>= budget
)
5807 /* all work done, exit the polling mode */
5808 if (napi_complete_done(napi
, rxtx_done
)) {
5809 unsigned long flags
;
5811 spin_lock_irqsave(&ch
->lock
, flags
);
5812 /* Both RX and TX work done are compelte,
5813 * so enable both RX & TX IRQs.
5815 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
5816 spin_unlock_irqrestore(&ch
->lock
, flags
);
5819 /* TX still have packet to handle, check if we need to arm tx timer */
5820 if (tx_pending_packets
)
5821 stmmac_tx_timer_arm(priv
, chan
);
5823 return min(rxtx_done
, budget
- 1);
5828 * @dev : Pointer to net device structure
5829 * @txqueue: the index of the hanging transmit queue
5830 * Description: this function is called when a packet transmission fails to
5831 * complete within a reasonable time. The driver will mark the error in the
5832 * netdev structure and arrange for the device to be reset to a sane state
5833 * in order to transmit a new packet.
5835 static void stmmac_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
5837 struct stmmac_priv
*priv
= netdev_priv(dev
);
5839 stmmac_global_err(priv
);
5843 * stmmac_set_rx_mode - entry point for multicast addressing
5844 * @dev : pointer to the device structure
5846 * This function is a driver entry point which gets called by the kernel
5847 * whenever multicast addresses must be enabled/disabled.
5851 static void stmmac_set_rx_mode(struct net_device
*dev
)
5853 struct stmmac_priv
*priv
= netdev_priv(dev
);
5855 stmmac_set_filter(priv
, priv
->hw
, dev
);
5859 * stmmac_change_mtu - entry point to change MTU size for the device.
5860 * @dev : device pointer.
5861 * @new_mtu : the new MTU size for the device.
5862 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5863 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5864 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5866 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5869 static int stmmac_change_mtu(struct net_device
*dev
, int new_mtu
)
5871 struct stmmac_priv
*priv
= netdev_priv(dev
);
5872 int txfifosz
= priv
->plat
->tx_fifo_size
;
5873 struct stmmac_dma_conf
*dma_conf
;
5874 const int mtu
= new_mtu
;
5878 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
5880 txfifosz
/= priv
->plat
->tx_queues_to_use
;
5882 if (stmmac_xdp_is_enabled(priv
) && new_mtu
> ETH_DATA_LEN
) {
5883 netdev_dbg(priv
->dev
, "Jumbo frames not supported for XDP\n");
5887 new_mtu
= STMMAC_ALIGN(new_mtu
);
5889 /* If condition true, FIFO is too small or MTU too large */
5890 if ((txfifosz
< new_mtu
) || (new_mtu
> BUF_SIZE_16KiB
))
5893 if (netif_running(dev
)) {
5894 netdev_dbg(priv
->dev
, "restarting interface to change its MTU\n");
5895 /* Try to allocate the new DMA conf with the new mtu */
5896 dma_conf
= stmmac_setup_dma_desc(priv
, mtu
);
5897 if (IS_ERR(dma_conf
)) {
5898 netdev_err(priv
->dev
, "failed allocating new dma conf for new MTU %d\n",
5900 return PTR_ERR(dma_conf
);
5903 stmmac_release(dev
);
5905 ret
= __stmmac_open(dev
, dma_conf
);
5907 free_dma_desc_resources(priv
, dma_conf
);
5909 netdev_err(priv
->dev
, "failed reopening the interface after MTU change\n");
5915 stmmac_set_rx_mode(dev
);
5919 netdev_update_features(dev
);
5924 static netdev_features_t
stmmac_fix_features(struct net_device
*dev
,
5925 netdev_features_t features
)
5927 struct stmmac_priv
*priv
= netdev_priv(dev
);
5929 if (priv
->plat
->rx_coe
== STMMAC_RX_COE_NONE
)
5930 features
&= ~NETIF_F_RXCSUM
;
5932 if (!priv
->plat
->tx_coe
)
5933 features
&= ~NETIF_F_CSUM_MASK
;
5935 /* Some GMAC devices have a bugged Jumbo frame support that
5936 * needs to have the Tx COE disabled for oversized frames
5937 * (due to limited buffer sizes). In this case we disable
5938 * the TX csum insertion in the TDES and not use SF.
5940 if (priv
->plat
->bugged_jumbo
&& (dev
->mtu
> ETH_DATA_LEN
))
5941 features
&= ~NETIF_F_CSUM_MASK
;
5943 /* Disable tso if asked by ethtool */
5944 if ((priv
->plat
->flags
& STMMAC_FLAG_TSO_EN
) && (priv
->dma_cap
.tsoen
)) {
5945 if (features
& NETIF_F_TSO
)
5954 static int stmmac_set_features(struct net_device
*netdev
,
5955 netdev_features_t features
)
5957 struct stmmac_priv
*priv
= netdev_priv(netdev
);
5959 /* Keep the COE Type in case of csum is supporting */
5960 if (features
& NETIF_F_RXCSUM
)
5961 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
5963 priv
->hw
->rx_csum
= 0;
5964 /* No check needed because rx_coe has been set before and it will be
5965 * fixed in case of issue.
5967 stmmac_rx_ipc(priv
, priv
->hw
);
5969 if (priv
->sph_cap
) {
5970 bool sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
5973 for (chan
= 0; chan
< priv
->plat
->rx_queues_to_use
; chan
++)
5974 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
5977 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
5978 priv
->hw
->hw_vlan_en
= true;
5980 priv
->hw
->hw_vlan_en
= false;
5982 stmmac_set_hw_vlan_mode(priv
, priv
->hw
);
5987 static void stmmac_fpe_event_status(struct stmmac_priv
*priv
, int status
)
5989 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
5990 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
5991 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
5992 bool *hs_enable
= &fpe_cfg
->hs_enable
;
5994 if (status
== FPE_EVENT_UNKNOWN
|| !*hs_enable
)
5997 /* If LP has sent verify mPacket, LP is FPE capable */
5998 if ((status
& FPE_EVENT_RVER
) == FPE_EVENT_RVER
) {
5999 if (*lp_state
< FPE_STATE_CAPABLE
)
6000 *lp_state
= FPE_STATE_CAPABLE
;
6002 /* If user has requested FPE enable, quickly response */
6004 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
6009 /* If Local has sent verify mPacket, Local is FPE capable */
6010 if ((status
& FPE_EVENT_TVER
) == FPE_EVENT_TVER
) {
6011 if (*lo_state
< FPE_STATE_CAPABLE
)
6012 *lo_state
= FPE_STATE_CAPABLE
;
6015 /* If LP has sent response mPacket, LP is entering FPE ON */
6016 if ((status
& FPE_EVENT_RRSP
) == FPE_EVENT_RRSP
)
6017 *lp_state
= FPE_STATE_ENTERING_ON
;
6019 /* If Local has sent response mPacket, Local is entering FPE ON */
6020 if ((status
& FPE_EVENT_TRSP
) == FPE_EVENT_TRSP
)
6021 *lo_state
= FPE_STATE_ENTERING_ON
;
6023 if (!test_bit(__FPE_REMOVING
, &priv
->fpe_task_state
) &&
6024 !test_and_set_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
) &&
6026 queue_work(priv
->fpe_wq
, &priv
->fpe_task
);
6030 static void stmmac_common_interrupt(struct stmmac_priv
*priv
)
6032 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
6033 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
6038 xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
6039 queues_count
= (rx_cnt
> tx_cnt
) ? rx_cnt
: tx_cnt
;
6042 pm_wakeup_event(priv
->device
, 0);
6044 if (priv
->dma_cap
.estsel
)
6045 stmmac_est_irq_status(priv
, priv
, priv
->dev
,
6046 &priv
->xstats
, tx_cnt
);
6048 if (priv
->dma_cap
.fpesel
) {
6049 int status
= stmmac_fpe_irq_status(priv
, priv
->ioaddr
,
6052 stmmac_fpe_event_status(priv
, status
);
6055 /* To handle GMAC own interrupts */
6056 if ((priv
->plat
->has_gmac
) || xmac
) {
6057 int status
= stmmac_host_irq_status(priv
, priv
->hw
, &priv
->xstats
);
6059 if (unlikely(status
)) {
6060 /* For LPI we need to save the tx status */
6061 if (status
& CORE_IRQ_TX_PATH_IN_LPI_MODE
)
6062 priv
->tx_path_in_lpi_mode
= true;
6063 if (status
& CORE_IRQ_TX_PATH_EXIT_LPI_MODE
)
6064 priv
->tx_path_in_lpi_mode
= false;
6067 for (queue
= 0; queue
< queues_count
; queue
++)
6068 stmmac_host_mtl_irq_status(priv
, priv
->hw
, queue
);
6070 /* PCS link status */
6071 if (priv
->hw
->pcs
&&
6072 !(priv
->plat
->flags
& STMMAC_FLAG_HAS_INTEGRATED_PCS
)) {
6073 if (priv
->xstats
.pcs_link
)
6074 netif_carrier_on(priv
->dev
);
6076 netif_carrier_off(priv
->dev
);
6079 stmmac_timestamp_interrupt(priv
, priv
);
6084 * stmmac_interrupt - main ISR
6085 * @irq: interrupt number.
6086 * @dev_id: to pass the net device pointer.
6087 * Description: this is the main driver interrupt service routine.
6089 * o DMA service routine (to manage incoming frame reception and transmission
6091 * o Core interrupts to manage: remote wake-up, management counter, LPI
6094 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
)
6096 struct net_device
*dev
= (struct net_device
*)dev_id
;
6097 struct stmmac_priv
*priv
= netdev_priv(dev
);
6099 /* Check if adapter is up */
6100 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6103 /* Check ASP error if it isn't delivered via an individual IRQ */
6104 if (priv
->sfty_irq
<= 0 && stmmac_safety_feat_interrupt(priv
))
6107 /* To handle Common interrupts */
6108 stmmac_common_interrupt(priv
);
6110 /* To handle DMA interrupts */
6111 stmmac_dma_interrupt(priv
);
6116 static irqreturn_t
stmmac_mac_interrupt(int irq
, void *dev_id
)
6118 struct net_device
*dev
= (struct net_device
*)dev_id
;
6119 struct stmmac_priv
*priv
= netdev_priv(dev
);
6121 /* Check if adapter is up */
6122 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6125 /* To handle Common interrupts */
6126 stmmac_common_interrupt(priv
);
6131 static irqreturn_t
stmmac_safety_interrupt(int irq
, void *dev_id
)
6133 struct net_device
*dev
= (struct net_device
*)dev_id
;
6134 struct stmmac_priv
*priv
= netdev_priv(dev
);
6136 /* Check if adapter is up */
6137 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6140 /* Check if a fatal error happened */
6141 stmmac_safety_feat_interrupt(priv
);
6146 static irqreturn_t
stmmac_msi_intr_tx(int irq
, void *data
)
6148 struct stmmac_tx_queue
*tx_q
= (struct stmmac_tx_queue
*)data
;
6149 struct stmmac_dma_conf
*dma_conf
;
6150 int chan
= tx_q
->queue_index
;
6151 struct stmmac_priv
*priv
;
6154 dma_conf
= container_of(tx_q
, struct stmmac_dma_conf
, tx_queue
[chan
]);
6155 priv
= container_of(dma_conf
, struct stmmac_priv
, dma_conf
);
6157 /* Check if adapter is up */
6158 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6161 status
= stmmac_napi_check(priv
, chan
, DMA_DIR_TX
);
6163 if (unlikely(status
& tx_hard_error_bump_tc
)) {
6164 /* Try to bump up the dma threshold on this failure */
6165 stmmac_bump_dma_threshold(priv
, chan
);
6166 } else if (unlikely(status
== tx_hard_error
)) {
6167 stmmac_tx_err(priv
, chan
);
6173 static irqreturn_t
stmmac_msi_intr_rx(int irq
, void *data
)
6175 struct stmmac_rx_queue
*rx_q
= (struct stmmac_rx_queue
*)data
;
6176 struct stmmac_dma_conf
*dma_conf
;
6177 int chan
= rx_q
->queue_index
;
6178 struct stmmac_priv
*priv
;
6180 dma_conf
= container_of(rx_q
, struct stmmac_dma_conf
, rx_queue
[chan
]);
6181 priv
= container_of(dma_conf
, struct stmmac_priv
, dma_conf
);
6183 /* Check if adapter is up */
6184 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6187 stmmac_napi_check(priv
, chan
, DMA_DIR_RX
);
6193 * stmmac_ioctl - Entry point for the Ioctl
6194 * @dev: Device pointer.
6195 * @rq: An IOCTL specefic structure, that can contain a pointer to
6196 * a proprietary structure used to pass information to the driver.
6197 * @cmd: IOCTL command
6199 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6201 static int stmmac_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
6203 struct stmmac_priv
*priv
= netdev_priv (dev
);
6204 int ret
= -EOPNOTSUPP
;
6206 if (!netif_running(dev
))
6213 ret
= phylink_mii_ioctl(priv
->phylink
, rq
, cmd
);
6216 ret
= stmmac_hwtstamp_set(dev
, rq
);
6219 ret
= stmmac_hwtstamp_get(dev
, rq
);
6228 static int stmmac_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
6231 struct stmmac_priv
*priv
= cb_priv
;
6232 int ret
= -EOPNOTSUPP
;
6234 if (!tc_cls_can_offload_and_chain0(priv
->dev
, type_data
))
6237 __stmmac_disable_all_queues(priv
);
6240 case TC_SETUP_CLSU32
:
6241 ret
= stmmac_tc_setup_cls_u32(priv
, priv
, type_data
);
6243 case TC_SETUP_CLSFLOWER
:
6244 ret
= stmmac_tc_setup_cls(priv
, priv
, type_data
);
6250 stmmac_enable_all_queues(priv
);
6254 static LIST_HEAD(stmmac_block_cb_list
);
6256 static int stmmac_setup_tc(struct net_device
*ndev
, enum tc_setup_type type
,
6259 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6263 return stmmac_tc_query_caps(priv
, priv
, type_data
);
6264 case TC_SETUP_BLOCK
:
6265 return flow_block_cb_setup_simple(type_data
,
6266 &stmmac_block_cb_list
,
6267 stmmac_setup_tc_block_cb
,
6269 case TC_SETUP_QDISC_CBS
:
6270 return stmmac_tc_setup_cbs(priv
, priv
, type_data
);
6271 case TC_SETUP_QDISC_TAPRIO
:
6272 return stmmac_tc_setup_taprio(priv
, priv
, type_data
);
6273 case TC_SETUP_QDISC_ETF
:
6274 return stmmac_tc_setup_etf(priv
, priv
, type_data
);
6280 static u16
stmmac_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
6281 struct net_device
*sb_dev
)
6283 int gso
= skb_shinfo(skb
)->gso_type
;
6285 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
| SKB_GSO_UDP_L4
)) {
6287 * There is no way to determine the number of TSO/USO
6288 * capable Queues. Let's use always the Queue 0
6289 * because if TSO/USO is supported then at least this
6290 * one will be capable.
6295 return netdev_pick_tx(dev
, skb
, NULL
) % dev
->real_num_tx_queues
;
6298 static int stmmac_set_mac_address(struct net_device
*ndev
, void *addr
)
6300 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6303 ret
= pm_runtime_resume_and_get(priv
->device
);
6307 ret
= eth_mac_addr(ndev
, addr
);
6311 stmmac_set_umac_addr(priv
, priv
->hw
, ndev
->dev_addr
, 0);
6314 pm_runtime_put(priv
->device
);
6319 #ifdef CONFIG_DEBUG_FS
6320 static struct dentry
*stmmac_fs_dir
;
6322 static void sysfs_display_ring(void *head
, int size
, int extend_desc
,
6323 struct seq_file
*seq
, dma_addr_t dma_phy_addr
)
6325 struct dma_extended_desc
*ep
= (struct dma_extended_desc
*)head
;
6326 struct dma_desc
*p
= (struct dma_desc
*)head
;
6327 unsigned int desc_size
;
6328 dma_addr_t dma_addr
;
6331 desc_size
= extend_desc
? sizeof(*ep
) : sizeof(*p
);
6332 for (i
= 0; i
< size
; i
++) {
6333 dma_addr
= dma_phy_addr
+ i
* desc_size
;
6334 seq_printf(seq
, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6336 le32_to_cpu(p
->des0
), le32_to_cpu(p
->des1
),
6337 le32_to_cpu(p
->des2
), le32_to_cpu(p
->des3
));
6345 static int stmmac_rings_status_show(struct seq_file
*seq
, void *v
)
6347 struct net_device
*dev
= seq
->private;
6348 struct stmmac_priv
*priv
= netdev_priv(dev
);
6349 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
6350 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
6353 if ((dev
->flags
& IFF_UP
) == 0)
6356 for (queue
= 0; queue
< rx_count
; queue
++) {
6357 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
6359 seq_printf(seq
, "RX Queue %d:\n", queue
);
6361 if (priv
->extend_desc
) {
6362 seq_printf(seq
, "Extended descriptor ring:\n");
6363 sysfs_display_ring((void *)rx_q
->dma_erx
,
6364 priv
->dma_conf
.dma_rx_size
, 1, seq
, rx_q
->dma_rx_phy
);
6366 seq_printf(seq
, "Descriptor ring:\n");
6367 sysfs_display_ring((void *)rx_q
->dma_rx
,
6368 priv
->dma_conf
.dma_rx_size
, 0, seq
, rx_q
->dma_rx_phy
);
6372 for (queue
= 0; queue
< tx_count
; queue
++) {
6373 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
6375 seq_printf(seq
, "TX Queue %d:\n", queue
);
6377 if (priv
->extend_desc
) {
6378 seq_printf(seq
, "Extended descriptor ring:\n");
6379 sysfs_display_ring((void *)tx_q
->dma_etx
,
6380 priv
->dma_conf
.dma_tx_size
, 1, seq
, tx_q
->dma_tx_phy
);
6381 } else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
)) {
6382 seq_printf(seq
, "Descriptor ring:\n");
6383 sysfs_display_ring((void *)tx_q
->dma_tx
,
6384 priv
->dma_conf
.dma_tx_size
, 0, seq
, tx_q
->dma_tx_phy
);
6390 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status
);
6392 static int stmmac_dma_cap_show(struct seq_file
*seq
, void *v
)
6394 static const char * const dwxgmac_timestamp_source
[] = {
6400 static const char * const dwxgmac_safety_feature_desc
[] = {
6402 "All Safety Features with ECC and Parity",
6403 "All Safety Features without ECC or Parity",
6404 "All Safety Features with Parity Only",
6410 struct net_device
*dev
= seq
->private;
6411 struct stmmac_priv
*priv
= netdev_priv(dev
);
6413 if (!priv
->hw_cap_support
) {
6414 seq_printf(seq
, "DMA HW features not supported\n");
6418 seq_printf(seq
, "==============================\n");
6419 seq_printf(seq
, "\tDMA HW features\n");
6420 seq_printf(seq
, "==============================\n");
6422 seq_printf(seq
, "\t10/100 Mbps: %s\n",
6423 (priv
->dma_cap
.mbps_10_100
) ? "Y" : "N");
6424 seq_printf(seq
, "\t1000 Mbps: %s\n",
6425 (priv
->dma_cap
.mbps_1000
) ? "Y" : "N");
6426 seq_printf(seq
, "\tHalf duplex: %s\n",
6427 (priv
->dma_cap
.half_duplex
) ? "Y" : "N");
6428 if (priv
->plat
->has_xgmac
) {
6430 "\tNumber of Additional MAC address registers: %d\n",
6431 priv
->dma_cap
.multi_addr
);
6433 seq_printf(seq
, "\tHash Filter: %s\n",
6434 (priv
->dma_cap
.hash_filter
) ? "Y" : "N");
6435 seq_printf(seq
, "\tMultiple MAC address registers: %s\n",
6436 (priv
->dma_cap
.multi_addr
) ? "Y" : "N");
6438 seq_printf(seq
, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6439 (priv
->dma_cap
.pcs
) ? "Y" : "N");
6440 seq_printf(seq
, "\tSMA (MDIO) Interface: %s\n",
6441 (priv
->dma_cap
.sma_mdio
) ? "Y" : "N");
6442 seq_printf(seq
, "\tPMT Remote wake up: %s\n",
6443 (priv
->dma_cap
.pmt_remote_wake_up
) ? "Y" : "N");
6444 seq_printf(seq
, "\tPMT Magic Frame: %s\n",
6445 (priv
->dma_cap
.pmt_magic_frame
) ? "Y" : "N");
6446 seq_printf(seq
, "\tRMON module: %s\n",
6447 (priv
->dma_cap
.rmon
) ? "Y" : "N");
6448 seq_printf(seq
, "\tIEEE 1588-2002 Time Stamp: %s\n",
6449 (priv
->dma_cap
.time_stamp
) ? "Y" : "N");
6450 seq_printf(seq
, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6451 (priv
->dma_cap
.atime_stamp
) ? "Y" : "N");
6452 if (priv
->plat
->has_xgmac
)
6453 seq_printf(seq
, "\tTimestamp System Time Source: %s\n",
6454 dwxgmac_timestamp_source
[priv
->dma_cap
.tssrc
]);
6455 seq_printf(seq
, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6456 (priv
->dma_cap
.eee
) ? "Y" : "N");
6457 seq_printf(seq
, "\tAV features: %s\n", (priv
->dma_cap
.av
) ? "Y" : "N");
6458 seq_printf(seq
, "\tChecksum Offload in TX: %s\n",
6459 (priv
->dma_cap
.tx_coe
) ? "Y" : "N");
6460 if (priv
->synopsys_id
>= DWMAC_CORE_4_00
||
6461 priv
->plat
->has_xgmac
) {
6462 seq_printf(seq
, "\tIP Checksum Offload in RX: %s\n",
6463 (priv
->dma_cap
.rx_coe
) ? "Y" : "N");
6465 seq_printf(seq
, "\tIP Checksum Offload (type1) in RX: %s\n",
6466 (priv
->dma_cap
.rx_coe_type1
) ? "Y" : "N");
6467 seq_printf(seq
, "\tIP Checksum Offload (type2) in RX: %s\n",
6468 (priv
->dma_cap
.rx_coe_type2
) ? "Y" : "N");
6469 seq_printf(seq
, "\tRXFIFO > 2048bytes: %s\n",
6470 (priv
->dma_cap
.rxfifo_over_2048
) ? "Y" : "N");
6472 seq_printf(seq
, "\tNumber of Additional RX channel: %d\n",
6473 priv
->dma_cap
.number_rx_channel
);
6474 seq_printf(seq
, "\tNumber of Additional TX channel: %d\n",
6475 priv
->dma_cap
.number_tx_channel
);
6476 seq_printf(seq
, "\tNumber of Additional RX queues: %d\n",
6477 priv
->dma_cap
.number_rx_queues
);
6478 seq_printf(seq
, "\tNumber of Additional TX queues: %d\n",
6479 priv
->dma_cap
.number_tx_queues
);
6480 seq_printf(seq
, "\tEnhanced descriptors: %s\n",
6481 (priv
->dma_cap
.enh_desc
) ? "Y" : "N");
6482 seq_printf(seq
, "\tTX Fifo Size: %d\n", priv
->dma_cap
.tx_fifo_size
);
6483 seq_printf(seq
, "\tRX Fifo Size: %d\n", priv
->dma_cap
.rx_fifo_size
);
6484 seq_printf(seq
, "\tHash Table Size: %lu\n", priv
->dma_cap
.hash_tb_sz
?
6485 (BIT(priv
->dma_cap
.hash_tb_sz
) << 5) : 0);
6486 seq_printf(seq
, "\tTSO: %s\n", priv
->dma_cap
.tsoen
? "Y" : "N");
6487 seq_printf(seq
, "\tNumber of PPS Outputs: %d\n",
6488 priv
->dma_cap
.pps_out_num
);
6489 seq_printf(seq
, "\tSafety Features: %s\n",
6490 dwxgmac_safety_feature_desc
[priv
->dma_cap
.asp
]);
6491 seq_printf(seq
, "\tFlexible RX Parser: %s\n",
6492 priv
->dma_cap
.frpsel
? "Y" : "N");
6493 seq_printf(seq
, "\tEnhanced Addressing: %d\n",
6494 priv
->dma_cap
.host_dma_width
);
6495 seq_printf(seq
, "\tReceive Side Scaling: %s\n",
6496 priv
->dma_cap
.rssen
? "Y" : "N");
6497 seq_printf(seq
, "\tVLAN Hash Filtering: %s\n",
6498 priv
->dma_cap
.vlhash
? "Y" : "N");
6499 seq_printf(seq
, "\tSplit Header: %s\n",
6500 priv
->dma_cap
.sphen
? "Y" : "N");
6501 seq_printf(seq
, "\tVLAN TX Insertion: %s\n",
6502 priv
->dma_cap
.vlins
? "Y" : "N");
6503 seq_printf(seq
, "\tDouble VLAN: %s\n",
6504 priv
->dma_cap
.dvlan
? "Y" : "N");
6505 seq_printf(seq
, "\tNumber of L3/L4 Filters: %d\n",
6506 priv
->dma_cap
.l3l4fnum
);
6507 seq_printf(seq
, "\tARP Offloading: %s\n",
6508 priv
->dma_cap
.arpoffsel
? "Y" : "N");
6509 seq_printf(seq
, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6510 priv
->dma_cap
.estsel
? "Y" : "N");
6511 seq_printf(seq
, "\tFrame Preemption (FPE): %s\n",
6512 priv
->dma_cap
.fpesel
? "Y" : "N");
6513 seq_printf(seq
, "\tTime-Based Scheduling (TBS): %s\n",
6514 priv
->dma_cap
.tbssel
? "Y" : "N");
6515 seq_printf(seq
, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6516 priv
->dma_cap
.tbs_ch_num
);
6517 seq_printf(seq
, "\tPer-Stream Filtering: %s\n",
6518 priv
->dma_cap
.sgfsel
? "Y" : "N");
6519 seq_printf(seq
, "\tTX Timestamp FIFO Depth: %lu\n",
6520 BIT(priv
->dma_cap
.ttsfd
) >> 1);
6521 seq_printf(seq
, "\tNumber of Traffic Classes: %d\n",
6522 priv
->dma_cap
.numtc
);
6523 seq_printf(seq
, "\tDCB Feature: %s\n",
6524 priv
->dma_cap
.dcben
? "Y" : "N");
6525 seq_printf(seq
, "\tIEEE 1588 High Word Register: %s\n",
6526 priv
->dma_cap
.advthword
? "Y" : "N");
6527 seq_printf(seq
, "\tPTP Offload: %s\n",
6528 priv
->dma_cap
.ptoen
? "Y" : "N");
6529 seq_printf(seq
, "\tOne-Step Timestamping: %s\n",
6530 priv
->dma_cap
.osten
? "Y" : "N");
6531 seq_printf(seq
, "\tPriority-Based Flow Control: %s\n",
6532 priv
->dma_cap
.pfcen
? "Y" : "N");
6533 seq_printf(seq
, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6534 BIT(priv
->dma_cap
.frpes
) << 6);
6535 seq_printf(seq
, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6536 BIT(priv
->dma_cap
.frpbs
) << 6);
6537 seq_printf(seq
, "\tParallel Instruction Processor Engines: %d\n",
6538 priv
->dma_cap
.frppipe_num
);
6539 seq_printf(seq
, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6540 priv
->dma_cap
.nrvf_num
?
6541 (BIT(priv
->dma_cap
.nrvf_num
) << 1) : 0);
6542 seq_printf(seq
, "\tWidth of the Time Interval Field in GCL: %d\n",
6543 priv
->dma_cap
.estwid
? 4 * priv
->dma_cap
.estwid
+ 12 : 0);
6544 seq_printf(seq
, "\tDepth of GCL: %lu\n",
6545 priv
->dma_cap
.estdep
? (BIT(priv
->dma_cap
.estdep
) << 5) : 0);
6546 seq_printf(seq
, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6547 priv
->dma_cap
.cbtisel
? "Y" : "N");
6548 seq_printf(seq
, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6549 priv
->dma_cap
.aux_snapshot_n
);
6550 seq_printf(seq
, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6551 priv
->dma_cap
.pou_ost_en
? "Y" : "N");
6552 seq_printf(seq
, "\tEnhanced DMA: %s\n",
6553 priv
->dma_cap
.edma
? "Y" : "N");
6554 seq_printf(seq
, "\tDifferent Descriptor Cache: %s\n",
6555 priv
->dma_cap
.ediffc
? "Y" : "N");
6556 seq_printf(seq
, "\tVxLAN/NVGRE: %s\n",
6557 priv
->dma_cap
.vxn
? "Y" : "N");
6558 seq_printf(seq
, "\tDebug Memory Interface: %s\n",
6559 priv
->dma_cap
.dbgmem
? "Y" : "N");
6560 seq_printf(seq
, "\tNumber of Policing Counters: %lu\n",
6561 priv
->dma_cap
.pcsel
? BIT(priv
->dma_cap
.pcsel
+ 3) : 0);
6564 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap
);
6566 /* Use network device events to rename debugfs file entries.
6568 static int stmmac_device_event(struct notifier_block
*unused
,
6569 unsigned long event
, void *ptr
)
6571 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
6572 struct stmmac_priv
*priv
= netdev_priv(dev
);
6574 if (dev
->netdev_ops
!= &stmmac_netdev_ops
)
6578 case NETDEV_CHANGENAME
:
6579 if (priv
->dbgfs_dir
)
6580 priv
->dbgfs_dir
= debugfs_rename(stmmac_fs_dir
,
6590 static struct notifier_block stmmac_notifier
= {
6591 .notifier_call
= stmmac_device_event
,
6594 static void stmmac_init_fs(struct net_device
*dev
)
6596 struct stmmac_priv
*priv
= netdev_priv(dev
);
6600 /* Create per netdev entries */
6601 priv
->dbgfs_dir
= debugfs_create_dir(dev
->name
, stmmac_fs_dir
);
6603 /* Entry to report DMA RX/TX rings */
6604 debugfs_create_file("descriptors_status", 0444, priv
->dbgfs_dir
, dev
,
6605 &stmmac_rings_status_fops
);
6607 /* Entry to report the DMA HW features */
6608 debugfs_create_file("dma_cap", 0444, priv
->dbgfs_dir
, dev
,
6609 &stmmac_dma_cap_fops
);
6614 static void stmmac_exit_fs(struct net_device
*dev
)
6616 struct stmmac_priv
*priv
= netdev_priv(dev
);
6618 debugfs_remove_recursive(priv
->dbgfs_dir
);
6620 #endif /* CONFIG_DEBUG_FS */
6622 static u32
stmmac_vid_crc32_le(__le16 vid_le
)
6624 unsigned char *data
= (unsigned char *)&vid_le
;
6625 unsigned char data_byte
= 0;
6630 bits
= get_bitmask_order(VLAN_VID_MASK
);
6631 for (i
= 0; i
< bits
; i
++) {
6633 data_byte
= data
[i
/ 8];
6635 temp
= ((crc
& 1) ^ data_byte
) & 1;
6646 static int stmmac_vlan_update(struct stmmac_priv
*priv
, bool is_double
)
6653 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
6654 __le16 vid_le
= cpu_to_le16(vid
);
6655 crc
= bitrev32(~stmmac_vid_crc32_le(vid_le
)) >> 28;
6660 if (!priv
->dma_cap
.vlhash
) {
6661 if (count
> 2) /* VID = 0 always passes filter */
6664 pmatch
= cpu_to_le16(vid
);
6668 return stmmac_update_vlan_hash(priv
, priv
->hw
, hash
, pmatch
, is_double
);
6671 static int stmmac_vlan_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
6673 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6674 bool is_double
= false;
6677 ret
= pm_runtime_resume_and_get(priv
->device
);
6681 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
6684 set_bit(vid
, priv
->active_vlans
);
6685 ret
= stmmac_vlan_update(priv
, is_double
);
6687 clear_bit(vid
, priv
->active_vlans
);
6691 if (priv
->hw
->num_vlan
) {
6692 ret
= stmmac_add_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
6697 pm_runtime_put(priv
->device
);
6702 static int stmmac_vlan_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
6704 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6705 bool is_double
= false;
6708 ret
= pm_runtime_resume_and_get(priv
->device
);
6712 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
6715 clear_bit(vid
, priv
->active_vlans
);
6717 if (priv
->hw
->num_vlan
) {
6718 ret
= stmmac_del_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
6720 goto del_vlan_error
;
6723 ret
= stmmac_vlan_update(priv
, is_double
);
6726 pm_runtime_put(priv
->device
);
6731 static int stmmac_bpf(struct net_device
*dev
, struct netdev_bpf
*bpf
)
6733 struct stmmac_priv
*priv
= netdev_priv(dev
);
6735 switch (bpf
->command
) {
6736 case XDP_SETUP_PROG
:
6737 return stmmac_xdp_set_prog(priv
, bpf
->prog
, bpf
->extack
);
6738 case XDP_SETUP_XSK_POOL
:
6739 return stmmac_xdp_setup_pool(priv
, bpf
->xsk
.pool
,
6746 static int stmmac_xdp_xmit(struct net_device
*dev
, int num_frames
,
6747 struct xdp_frame
**frames
, u32 flags
)
6749 struct stmmac_priv
*priv
= netdev_priv(dev
);
6750 int cpu
= smp_processor_id();
6751 struct netdev_queue
*nq
;
6755 if (unlikely(test_bit(STMMAC_DOWN
, &priv
->state
)))
6758 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
6761 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
6762 nq
= netdev_get_tx_queue(priv
->dev
, queue
);
6764 __netif_tx_lock(nq
, cpu
);
6765 /* Avoids TX time-out as we are sharing with slow path */
6766 txq_trans_cond_update(nq
);
6768 for (i
= 0; i
< num_frames
; i
++) {
6771 res
= stmmac_xdp_xmit_xdpf(priv
, queue
, frames
[i
], true);
6772 if (res
== STMMAC_XDP_CONSUMED
)
6778 if (flags
& XDP_XMIT_FLUSH
) {
6779 stmmac_flush_tx_descriptors(priv
, queue
);
6780 stmmac_tx_timer_arm(priv
, queue
);
6783 __netif_tx_unlock(nq
);
6788 void stmmac_disable_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
6790 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6791 unsigned long flags
;
6793 spin_lock_irqsave(&ch
->lock
, flags
);
6794 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, queue
, 1, 0);
6795 spin_unlock_irqrestore(&ch
->lock
, flags
);
6797 stmmac_stop_rx_dma(priv
, queue
);
6798 __free_dma_rx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6801 void stmmac_enable_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
6803 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
6804 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6805 unsigned long flags
;
6809 ret
= __alloc_dma_rx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6811 netdev_err(priv
->dev
, "Failed to alloc RX desc.\n");
6815 ret
= __init_dma_rx_desc_rings(priv
, &priv
->dma_conf
, queue
, GFP_KERNEL
);
6817 __free_dma_rx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6818 netdev_err(priv
->dev
, "Failed to init RX desc.\n");
6822 stmmac_reset_rx_queue(priv
, queue
);
6823 stmmac_clear_rx_descriptors(priv
, &priv
->dma_conf
, queue
);
6825 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6826 rx_q
->dma_rx_phy
, rx_q
->queue_index
);
6828 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+ (rx_q
->buf_alloc_num
*
6829 sizeof(struct dma_desc
));
6830 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
6831 rx_q
->rx_tail_addr
, rx_q
->queue_index
);
6833 if (rx_q
->xsk_pool
&& rx_q
->buf_alloc_num
) {
6834 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
6835 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6839 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6840 priv
->dma_conf
.dma_buf_sz
,
6844 stmmac_start_rx_dma(priv
, queue
);
6846 spin_lock_irqsave(&ch
->lock
, flags
);
6847 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, queue
, 1, 0);
6848 spin_unlock_irqrestore(&ch
->lock
, flags
);
6851 void stmmac_disable_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
6853 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6854 unsigned long flags
;
6856 spin_lock_irqsave(&ch
->lock
, flags
);
6857 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, queue
, 0, 1);
6858 spin_unlock_irqrestore(&ch
->lock
, flags
);
6860 stmmac_stop_tx_dma(priv
, queue
);
6861 __free_dma_tx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6864 void stmmac_enable_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
6866 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
6867 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6868 unsigned long flags
;
6871 ret
= __alloc_dma_tx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6873 netdev_err(priv
->dev
, "Failed to alloc TX desc.\n");
6877 ret
= __init_dma_tx_desc_rings(priv
, &priv
->dma_conf
, queue
);
6879 __free_dma_tx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6880 netdev_err(priv
->dev
, "Failed to init TX desc.\n");
6884 stmmac_reset_tx_queue(priv
, queue
);
6885 stmmac_clear_tx_descriptors(priv
, &priv
->dma_conf
, queue
);
6887 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6888 tx_q
->dma_tx_phy
, tx_q
->queue_index
);
6890 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
6891 stmmac_enable_tbs(priv
, priv
->ioaddr
, 1, tx_q
->queue_index
);
6893 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
6894 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
6895 tx_q
->tx_tail_addr
, tx_q
->queue_index
);
6897 stmmac_start_tx_dma(priv
, queue
);
6899 spin_lock_irqsave(&ch
->lock
, flags
);
6900 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, queue
, 0, 1);
6901 spin_unlock_irqrestore(&ch
->lock
, flags
);
6904 void stmmac_xdp_release(struct net_device
*dev
)
6906 struct stmmac_priv
*priv
= netdev_priv(dev
);
6909 /* Ensure tx function is not running */
6910 netif_tx_disable(dev
);
6912 /* Disable NAPI process */
6913 stmmac_disable_all_queues(priv
);
6915 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
6916 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
6918 /* Free the IRQ lines */
6919 stmmac_free_irq(dev
, REQ_IRQ_ERR_ALL
, 0);
6921 /* Stop TX/RX DMA channels */
6922 stmmac_stop_all_dma(priv
);
6924 /* Release and free the Rx/Tx resources */
6925 free_dma_desc_resources(priv
, &priv
->dma_conf
);
6927 /* Disable the MAC Rx/Tx */
6928 stmmac_mac_set(priv
, priv
->ioaddr
, false);
6930 /* set trans_start so we don't get spurious
6931 * watchdogs during reset
6933 netif_trans_update(dev
);
6934 netif_carrier_off(dev
);
6937 int stmmac_xdp_open(struct net_device
*dev
)
6939 struct stmmac_priv
*priv
= netdev_priv(dev
);
6940 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
6941 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
6942 u32 dma_csr_ch
= max(rx_cnt
, tx_cnt
);
6943 struct stmmac_rx_queue
*rx_q
;
6944 struct stmmac_tx_queue
*tx_q
;
6950 ret
= alloc_dma_desc_resources(priv
, &priv
->dma_conf
);
6952 netdev_err(dev
, "%s: DMA descriptors allocation failed\n",
6954 goto dma_desc_error
;
6957 ret
= init_dma_desc_rings(dev
, &priv
->dma_conf
, GFP_KERNEL
);
6959 netdev_err(dev
, "%s: DMA descriptors initialization failed\n",
6964 stmmac_reset_queues_param(priv
);
6966 /* DMA CSR Channel configuration */
6967 for (chan
= 0; chan
< dma_csr_ch
; chan
++) {
6968 stmmac_init_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, chan
);
6969 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
6972 /* Adjust Split header */
6973 sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
6975 /* DMA RX Channel Configuration */
6976 for (chan
= 0; chan
< rx_cnt
; chan
++) {
6977 rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
6979 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6980 rx_q
->dma_rx_phy
, chan
);
6982 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
6983 (rx_q
->buf_alloc_num
*
6984 sizeof(struct dma_desc
));
6985 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
6986 rx_q
->rx_tail_addr
, chan
);
6988 if (rx_q
->xsk_pool
&& rx_q
->buf_alloc_num
) {
6989 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
6990 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6994 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6995 priv
->dma_conf
.dma_buf_sz
,
6999 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
7002 /* DMA TX Channel Configuration */
7003 for (chan
= 0; chan
< tx_cnt
; chan
++) {
7004 tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
7006 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
7007 tx_q
->dma_tx_phy
, chan
);
7009 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
7010 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
7011 tx_q
->tx_tail_addr
, chan
);
7013 hrtimer_init(&tx_q
->txtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
7014 tx_q
->txtimer
.function
= stmmac_tx_timer
;
7017 /* Enable the MAC Rx/Tx */
7018 stmmac_mac_set(priv
, priv
->ioaddr
, true);
7020 /* Start Rx & Tx DMA Channels */
7021 stmmac_start_all_dma(priv
);
7023 ret
= stmmac_request_irq(dev
);
7027 /* Enable NAPI process*/
7028 stmmac_enable_all_queues(priv
);
7029 netif_carrier_on(dev
);
7030 netif_tx_start_all_queues(dev
);
7031 stmmac_enable_all_dma_irq(priv
);
7036 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
7037 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
7039 stmmac_hw_teardown(dev
);
7041 free_dma_desc_resources(priv
, &priv
->dma_conf
);
7046 int stmmac_xsk_wakeup(struct net_device
*dev
, u32 queue
, u32 flags
)
7048 struct stmmac_priv
*priv
= netdev_priv(dev
);
7049 struct stmmac_rx_queue
*rx_q
;
7050 struct stmmac_tx_queue
*tx_q
;
7051 struct stmmac_channel
*ch
;
7053 if (test_bit(STMMAC_DOWN
, &priv
->state
) ||
7054 !netif_carrier_ok(priv
->dev
))
7057 if (!stmmac_xdp_is_enabled(priv
))
7060 if (queue
>= priv
->plat
->rx_queues_to_use
||
7061 queue
>= priv
->plat
->tx_queues_to_use
)
7064 rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
7065 tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
7066 ch
= &priv
->channel
[queue
];
7068 if (!rx_q
->xsk_pool
&& !tx_q
->xsk_pool
)
7071 if (!napi_if_scheduled_mark_missed(&ch
->rxtx_napi
)) {
7072 /* EQoS does not have per-DMA channel SW interrupt,
7073 * so we schedule RX Napi straight-away.
7075 if (likely(napi_schedule_prep(&ch
->rxtx_napi
)))
7076 __napi_schedule(&ch
->rxtx_napi
);
7082 static void stmmac_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
7084 struct stmmac_priv
*priv
= netdev_priv(dev
);
7085 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
7086 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
7090 for (q
= 0; q
< tx_cnt
; q
++) {
7091 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[q
];
7096 start
= u64_stats_fetch_begin(&txq_stats
->q_syncp
);
7097 tx_bytes
= u64_stats_read(&txq_stats
->q
.tx_bytes
);
7098 } while (u64_stats_fetch_retry(&txq_stats
->q_syncp
, start
));
7100 start
= u64_stats_fetch_begin(&txq_stats
->napi_syncp
);
7101 tx_packets
= u64_stats_read(&txq_stats
->napi
.tx_packets
);
7102 } while (u64_stats_fetch_retry(&txq_stats
->napi_syncp
, start
));
7104 stats
->tx_packets
+= tx_packets
;
7105 stats
->tx_bytes
+= tx_bytes
;
7108 for (q
= 0; q
< rx_cnt
; q
++) {
7109 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[q
];
7114 start
= u64_stats_fetch_begin(&rxq_stats
->napi_syncp
);
7115 rx_packets
= u64_stats_read(&rxq_stats
->napi
.rx_packets
);
7116 rx_bytes
= u64_stats_read(&rxq_stats
->napi
.rx_bytes
);
7117 } while (u64_stats_fetch_retry(&rxq_stats
->napi_syncp
, start
));
7119 stats
->rx_packets
+= rx_packets
;
7120 stats
->rx_bytes
+= rx_bytes
;
7123 stats
->rx_dropped
= priv
->xstats
.rx_dropped
;
7124 stats
->rx_errors
= priv
->xstats
.rx_errors
;
7125 stats
->tx_dropped
= priv
->xstats
.tx_dropped
;
7126 stats
->tx_errors
= priv
->xstats
.tx_errors
;
7127 stats
->tx_carrier_errors
= priv
->xstats
.tx_losscarrier
+ priv
->xstats
.tx_carrier
;
7128 stats
->collisions
= priv
->xstats
.tx_collision
+ priv
->xstats
.rx_collision
;
7129 stats
->rx_length_errors
= priv
->xstats
.rx_length
;
7130 stats
->rx_crc_errors
= priv
->xstats
.rx_crc_errors
;
7131 stats
->rx_over_errors
= priv
->xstats
.rx_overflow_cntr
;
7132 stats
->rx_missed_errors
= priv
->xstats
.rx_missed_cntr
;
7135 static const struct net_device_ops stmmac_netdev_ops
= {
7136 .ndo_open
= stmmac_open
,
7137 .ndo_start_xmit
= stmmac_xmit
,
7138 .ndo_stop
= stmmac_release
,
7139 .ndo_change_mtu
= stmmac_change_mtu
,
7140 .ndo_fix_features
= stmmac_fix_features
,
7141 .ndo_set_features
= stmmac_set_features
,
7142 .ndo_set_rx_mode
= stmmac_set_rx_mode
,
7143 .ndo_tx_timeout
= stmmac_tx_timeout
,
7144 .ndo_eth_ioctl
= stmmac_ioctl
,
7145 .ndo_get_stats64
= stmmac_get_stats64
,
7146 .ndo_setup_tc
= stmmac_setup_tc
,
7147 .ndo_select_queue
= stmmac_select_queue
,
7148 .ndo_set_mac_address
= stmmac_set_mac_address
,
7149 .ndo_vlan_rx_add_vid
= stmmac_vlan_rx_add_vid
,
7150 .ndo_vlan_rx_kill_vid
= stmmac_vlan_rx_kill_vid
,
7151 .ndo_bpf
= stmmac_bpf
,
7152 .ndo_xdp_xmit
= stmmac_xdp_xmit
,
7153 .ndo_xsk_wakeup
= stmmac_xsk_wakeup
,
7156 static void stmmac_reset_subtask(struct stmmac_priv
*priv
)
7158 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED
, &priv
->state
))
7160 if (test_bit(STMMAC_DOWN
, &priv
->state
))
7163 netdev_err(priv
->dev
, "Reset adapter.\n");
7166 netif_trans_update(priv
->dev
);
7167 while (test_and_set_bit(STMMAC_RESETING
, &priv
->state
))
7168 usleep_range(1000, 2000);
7170 set_bit(STMMAC_DOWN
, &priv
->state
);
7171 dev_close(priv
->dev
);
7172 dev_open(priv
->dev
, NULL
);
7173 clear_bit(STMMAC_DOWN
, &priv
->state
);
7174 clear_bit(STMMAC_RESETING
, &priv
->state
);
7178 static void stmmac_service_task(struct work_struct
*work
)
7180 struct stmmac_priv
*priv
= container_of(work
, struct stmmac_priv
,
7183 stmmac_reset_subtask(priv
);
7184 clear_bit(STMMAC_SERVICE_SCHED
, &priv
->state
);
7188 * stmmac_hw_init - Init the MAC device
7189 * @priv: driver private structure
7190 * Description: this function is to configure the MAC device according to
7191 * some platform parameters or the HW capability register. It prepares the
7192 * driver to use either ring or chain modes and to setup either enhanced or
7193 * normal descriptors.
7195 static int stmmac_hw_init(struct stmmac_priv
*priv
)
7199 /* dwmac-sun8i only work in chain mode */
7200 if (priv
->plat
->flags
& STMMAC_FLAG_HAS_SUN8I
)
7202 priv
->chain_mode
= chain_mode
;
7204 /* Initialize HW Interface */
7205 ret
= stmmac_hwif_init(priv
);
7209 /* Get the HW capability (new GMAC newer than 3.50a) */
7210 priv
->hw_cap_support
= stmmac_get_hw_features(priv
);
7211 if (priv
->hw_cap_support
) {
7212 dev_info(priv
->device
, "DMA HW capability register supported\n");
7214 /* We can override some gmac/dma configuration fields: e.g.
7215 * enh_desc, tx_coe (e.g. that are passed through the
7216 * platform) with the values from the HW capability
7217 * register (if supported).
7219 priv
->plat
->enh_desc
= priv
->dma_cap
.enh_desc
;
7220 priv
->plat
->pmt
= priv
->dma_cap
.pmt_remote_wake_up
&&
7221 !(priv
->plat
->flags
& STMMAC_FLAG_USE_PHY_WOL
);
7222 priv
->hw
->pmt
= priv
->plat
->pmt
;
7223 if (priv
->dma_cap
.hash_tb_sz
) {
7224 priv
->hw
->multicast_filter_bins
=
7225 (BIT(priv
->dma_cap
.hash_tb_sz
) << 5);
7226 priv
->hw
->mcast_bits_log2
=
7227 ilog2(priv
->hw
->multicast_filter_bins
);
7230 /* TXCOE doesn't work in thresh DMA mode */
7231 if (priv
->plat
->force_thresh_dma_mode
)
7232 priv
->plat
->tx_coe
= 0;
7234 priv
->plat
->tx_coe
= priv
->dma_cap
.tx_coe
;
7236 /* In case of GMAC4 rx_coe is from HW cap register. */
7237 priv
->plat
->rx_coe
= priv
->dma_cap
.rx_coe
;
7239 if (priv
->dma_cap
.rx_coe_type2
)
7240 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE2
;
7241 else if (priv
->dma_cap
.rx_coe_type1
)
7242 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE1
;
7245 dev_info(priv
->device
, "No HW DMA feature register supported\n");
7248 if (priv
->plat
->rx_coe
) {
7249 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
7250 dev_info(priv
->device
, "RX Checksum Offload Engine supported\n");
7251 if (priv
->synopsys_id
< DWMAC_CORE_4_00
)
7252 dev_info(priv
->device
, "COE Type %d\n", priv
->hw
->rx_csum
);
7254 if (priv
->plat
->tx_coe
)
7255 dev_info(priv
->device
, "TX Checksum insertion supported\n");
7257 if (priv
->plat
->pmt
) {
7258 dev_info(priv
->device
, "Wake-Up On Lan supported\n");
7259 device_set_wakeup_capable(priv
->device
, 1);
7262 if (priv
->dma_cap
.tsoen
)
7263 dev_info(priv
->device
, "TSO supported\n");
7265 priv
->hw
->vlan_fail_q_en
=
7266 (priv
->plat
->flags
& STMMAC_FLAG_VLAN_FAIL_Q_EN
);
7267 priv
->hw
->vlan_fail_q
= priv
->plat
->vlan_fail_q
;
7269 /* Run HW quirks, if any */
7270 if (priv
->hwif_quirks
) {
7271 ret
= priv
->hwif_quirks(priv
);
7276 /* Rx Watchdog is available in the COREs newer than the 3.40.
7277 * In some case, for example on bugged HW this feature
7278 * has to be disable and this can be done by passing the
7279 * riwt_off field from the platform.
7281 if (((priv
->synopsys_id
>= DWMAC_CORE_3_50
) ||
7282 (priv
->plat
->has_xgmac
)) && (!priv
->plat
->riwt_off
)) {
7284 dev_info(priv
->device
,
7285 "Enable RX Mitigation via HW Watchdog Timer\n");
7291 static void stmmac_napi_add(struct net_device
*dev
)
7293 struct stmmac_priv
*priv
= netdev_priv(dev
);
7296 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
7298 for (queue
= 0; queue
< maxq
; queue
++) {
7299 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
7301 ch
->priv_data
= priv
;
7303 spin_lock_init(&ch
->lock
);
7305 if (queue
< priv
->plat
->rx_queues_to_use
) {
7306 netif_napi_add(dev
, &ch
->rx_napi
, stmmac_napi_poll_rx
);
7308 if (queue
< priv
->plat
->tx_queues_to_use
) {
7309 netif_napi_add_tx(dev
, &ch
->tx_napi
,
7310 stmmac_napi_poll_tx
);
7312 if (queue
< priv
->plat
->rx_queues_to_use
&&
7313 queue
< priv
->plat
->tx_queues_to_use
) {
7314 netif_napi_add(dev
, &ch
->rxtx_napi
,
7315 stmmac_napi_poll_rxtx
);
7320 static void stmmac_napi_del(struct net_device
*dev
)
7322 struct stmmac_priv
*priv
= netdev_priv(dev
);
7325 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
7327 for (queue
= 0; queue
< maxq
; queue
++) {
7328 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
7330 if (queue
< priv
->plat
->rx_queues_to_use
)
7331 netif_napi_del(&ch
->rx_napi
);
7332 if (queue
< priv
->plat
->tx_queues_to_use
)
7333 netif_napi_del(&ch
->tx_napi
);
7334 if (queue
< priv
->plat
->rx_queues_to_use
&&
7335 queue
< priv
->plat
->tx_queues_to_use
) {
7336 netif_napi_del(&ch
->rxtx_napi
);
7341 int stmmac_reinit_queues(struct net_device
*dev
, u32 rx_cnt
, u32 tx_cnt
)
7343 struct stmmac_priv
*priv
= netdev_priv(dev
);
7346 if (netif_running(dev
))
7347 stmmac_release(dev
);
7349 stmmac_napi_del(dev
);
7351 priv
->plat
->rx_queues_to_use
= rx_cnt
;
7352 priv
->plat
->tx_queues_to_use
= tx_cnt
;
7353 if (!netif_is_rxfh_configured(dev
))
7354 for (i
= 0; i
< ARRAY_SIZE(priv
->rss
.table
); i
++)
7355 priv
->rss
.table
[i
] = ethtool_rxfh_indir_default(i
,
7358 stmmac_set_half_duplex(priv
);
7359 stmmac_napi_add(dev
);
7361 if (netif_running(dev
))
7362 ret
= stmmac_open(dev
);
7367 int stmmac_reinit_ringparam(struct net_device
*dev
, u32 rx_size
, u32 tx_size
)
7369 struct stmmac_priv
*priv
= netdev_priv(dev
);
7372 if (netif_running(dev
))
7373 stmmac_release(dev
);
7375 priv
->dma_conf
.dma_rx_size
= rx_size
;
7376 priv
->dma_conf
.dma_tx_size
= tx_size
;
7378 if (netif_running(dev
))
7379 ret
= stmmac_open(dev
);
7384 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7385 static void stmmac_fpe_lp_task(struct work_struct
*work
)
7387 struct stmmac_priv
*priv
= container_of(work
, struct stmmac_priv
,
7389 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
7390 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
7391 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
7392 bool *hs_enable
= &fpe_cfg
->hs_enable
;
7393 bool *enable
= &fpe_cfg
->enable
;
7396 while (retries
-- > 0) {
7397 /* Bail out immediately if FPE handshake is OFF */
7398 if (*lo_state
== FPE_STATE_OFF
|| !*hs_enable
)
7401 if (*lo_state
== FPE_STATE_ENTERING_ON
&&
7402 *lp_state
== FPE_STATE_ENTERING_ON
) {
7403 stmmac_fpe_configure(priv
, priv
->ioaddr
,
7405 priv
->plat
->tx_queues_to_use
,
7406 priv
->plat
->rx_queues_to_use
,
7409 netdev_info(priv
->dev
, "configured FPE\n");
7411 *lo_state
= FPE_STATE_ON
;
7412 *lp_state
= FPE_STATE_ON
;
7413 netdev_info(priv
->dev
, "!!! BOTH FPE stations ON\n");
7417 if ((*lo_state
== FPE_STATE_CAPABLE
||
7418 *lo_state
== FPE_STATE_ENTERING_ON
) &&
7419 *lp_state
!= FPE_STATE_ON
) {
7420 netdev_info(priv
->dev
, SEND_VERIFY_MPAKCET_FMT
,
7421 *lo_state
, *lp_state
);
7422 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
7426 /* Sleep then retry */
7430 clear_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
);
7433 void stmmac_fpe_handshake(struct stmmac_priv
*priv
, bool enable
)
7435 if (priv
->plat
->fpe_cfg
->hs_enable
!= enable
) {
7437 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
7438 priv
->plat
->fpe_cfg
,
7441 priv
->plat
->fpe_cfg
->lo_fpe_state
= FPE_STATE_OFF
;
7442 priv
->plat
->fpe_cfg
->lp_fpe_state
= FPE_STATE_OFF
;
7445 priv
->plat
->fpe_cfg
->hs_enable
= enable
;
7449 static int stmmac_xdp_rx_timestamp(const struct xdp_md
*_ctx
, u64
*timestamp
)
7451 const struct stmmac_xdp_buff
*ctx
= (void *)_ctx
;
7452 struct dma_desc
*desc_contains_ts
= ctx
->desc
;
7453 struct stmmac_priv
*priv
= ctx
->priv
;
7454 struct dma_desc
*ndesc
= ctx
->ndesc
;
7455 struct dma_desc
*desc
= ctx
->desc
;
7458 if (!priv
->hwts_rx_en
)
7461 /* For GMAC4, the valid timestamp is from CTX next desc. */
7462 if (priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
)
7463 desc_contains_ts
= ndesc
;
7465 /* Check if timestamp is available */
7466 if (stmmac_get_rx_timestamp_status(priv
, desc
, ndesc
, priv
->adv_ts
)) {
7467 stmmac_get_timestamp(priv
, desc_contains_ts
, priv
->adv_ts
, &ns
);
7468 ns
-= priv
->plat
->cdc_error_adj
;
7469 *timestamp
= ns_to_ktime(ns
);
7476 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops
= {
7477 .xmo_rx_timestamp
= stmmac_xdp_rx_timestamp
,
7482 * @device: device pointer
7483 * @plat_dat: platform data pointer
7484 * @res: stmmac resource pointer
7485 * Description: this is the main probe function used to
7486 * call the alloc_etherdev, allocate the priv structure.
7488 * returns 0 on success, otherwise errno.
7490 int stmmac_dvr_probe(struct device
*device
,
7491 struct plat_stmmacenet_data
*plat_dat
,
7492 struct stmmac_resources
*res
)
7494 struct net_device
*ndev
= NULL
;
7495 struct stmmac_priv
*priv
;
7499 ndev
= devm_alloc_etherdev_mqs(device
, sizeof(struct stmmac_priv
),
7500 MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
);
7504 SET_NETDEV_DEV(ndev
, device
);
7506 priv
= netdev_priv(ndev
);
7507 priv
->device
= device
;
7510 for (i
= 0; i
< MTL_MAX_RX_QUEUES
; i
++)
7511 u64_stats_init(&priv
->xstats
.rxq_stats
[i
].napi_syncp
);
7512 for (i
= 0; i
< MTL_MAX_TX_QUEUES
; i
++) {
7513 u64_stats_init(&priv
->xstats
.txq_stats
[i
].q_syncp
);
7514 u64_stats_init(&priv
->xstats
.txq_stats
[i
].napi_syncp
);
7517 priv
->xstats
.pcpu_stats
=
7518 devm_netdev_alloc_pcpu_stats(device
, struct stmmac_pcpu_stats
);
7519 if (!priv
->xstats
.pcpu_stats
)
7522 stmmac_set_ethtool_ops(ndev
);
7523 priv
->pause
= pause
;
7524 priv
->plat
= plat_dat
;
7525 priv
->ioaddr
= res
->addr
;
7526 priv
->dev
->base_addr
= (unsigned long)res
->addr
;
7527 priv
->plat
->dma_cfg
->multi_msi_en
=
7528 (priv
->plat
->flags
& STMMAC_FLAG_MULTI_MSI_EN
);
7530 priv
->dev
->irq
= res
->irq
;
7531 priv
->wol_irq
= res
->wol_irq
;
7532 priv
->lpi_irq
= res
->lpi_irq
;
7533 priv
->sfty_irq
= res
->sfty_irq
;
7534 priv
->sfty_ce_irq
= res
->sfty_ce_irq
;
7535 priv
->sfty_ue_irq
= res
->sfty_ue_irq
;
7536 for (i
= 0; i
< MTL_MAX_RX_QUEUES
; i
++)
7537 priv
->rx_irq
[i
] = res
->rx_irq
[i
];
7538 for (i
= 0; i
< MTL_MAX_TX_QUEUES
; i
++)
7539 priv
->tx_irq
[i
] = res
->tx_irq
[i
];
7541 if (!is_zero_ether_addr(res
->mac
))
7542 eth_hw_addr_set(priv
->dev
, res
->mac
);
7544 dev_set_drvdata(device
, priv
->dev
);
7546 /* Verify driver arguments */
7547 stmmac_verify_args();
7549 priv
->af_xdp_zc_qps
= bitmap_zalloc(MTL_MAX_TX_QUEUES
, GFP_KERNEL
);
7550 if (!priv
->af_xdp_zc_qps
)
7553 /* Allocate workqueue */
7554 priv
->wq
= create_singlethread_workqueue("stmmac_wq");
7556 dev_err(priv
->device
, "failed to create workqueue\n");
7561 INIT_WORK(&priv
->service_task
, stmmac_service_task
);
7563 /* Initialize Link Partner FPE workqueue */
7564 INIT_WORK(&priv
->fpe_task
, stmmac_fpe_lp_task
);
7566 /* Override with kernel parameters if supplied XXX CRS XXX
7567 * this needs to have multiple instances
7569 if ((phyaddr
>= 0) && (phyaddr
<= 31))
7570 priv
->plat
->phy_addr
= phyaddr
;
7572 if (priv
->plat
->stmmac_rst
) {
7573 ret
= reset_control_assert(priv
->plat
->stmmac_rst
);
7574 reset_control_deassert(priv
->plat
->stmmac_rst
);
7575 /* Some reset controllers have only reset callback instead of
7576 * assert + deassert callbacks pair.
7578 if (ret
== -ENOTSUPP
)
7579 reset_control_reset(priv
->plat
->stmmac_rst
);
7582 ret
= reset_control_deassert(priv
->plat
->stmmac_ahb_rst
);
7583 if (ret
== -ENOTSUPP
)
7584 dev_err(priv
->device
, "unable to bring out of ahb reset: %pe\n",
7587 /* Wait a bit for the reset to take effect */
7590 /* Init MAC and get the capabilities */
7591 ret
= stmmac_hw_init(priv
);
7595 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7597 if (priv
->synopsys_id
< DWMAC_CORE_5_20
)
7598 priv
->plat
->dma_cfg
->dche
= false;
7600 stmmac_check_ether_addr(priv
);
7602 ndev
->netdev_ops
= &stmmac_netdev_ops
;
7604 ndev
->xdp_metadata_ops
= &stmmac_xdp_metadata_ops
;
7605 ndev
->xsk_tx_metadata_ops
= &stmmac_xsk_tx_metadata_ops
;
7607 ndev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
7609 ndev
->xdp_features
= NETDEV_XDP_ACT_BASIC
| NETDEV_XDP_ACT_REDIRECT
|
7610 NETDEV_XDP_ACT_XSK_ZEROCOPY
;
7612 ret
= stmmac_tc_init(priv
, priv
);
7614 ndev
->hw_features
|= NETIF_F_HW_TC
;
7617 if ((priv
->plat
->flags
& STMMAC_FLAG_TSO_EN
) && (priv
->dma_cap
.tsoen
)) {
7618 ndev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
7619 if (priv
->plat
->has_gmac4
)
7620 ndev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
7622 dev_info(priv
->device
, "TSO feature enabled\n");
7625 if (priv
->dma_cap
.sphen
&&
7626 !(priv
->plat
->flags
& STMMAC_FLAG_SPH_DISABLE
)) {
7627 ndev
->hw_features
|= NETIF_F_GRO
;
7628 priv
->sph_cap
= true;
7629 priv
->sph
= priv
->sph_cap
;
7630 dev_info(priv
->device
, "SPH feature enabled\n");
7633 /* Ideally our host DMA address width is the same as for the
7634 * device. However, it may differ and then we have to use our
7635 * host DMA width for allocation and the device DMA width for
7636 * register handling.
7638 if (priv
->plat
->host_dma_width
)
7639 priv
->dma_cap
.host_dma_width
= priv
->plat
->host_dma_width
;
7641 priv
->dma_cap
.host_dma_width
= priv
->dma_cap
.addr64
;
7643 if (priv
->dma_cap
.host_dma_width
) {
7644 ret
= dma_set_mask_and_coherent(device
,
7645 DMA_BIT_MASK(priv
->dma_cap
.host_dma_width
));
7647 dev_info(priv
->device
, "Using %d/%d bits DMA host/device width\n",
7648 priv
->dma_cap
.host_dma_width
, priv
->dma_cap
.addr64
);
7651 * If more than 32 bits can be addressed, make sure to
7652 * enable enhanced addressing mode.
7654 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
))
7655 priv
->plat
->dma_cfg
->eame
= true;
7657 ret
= dma_set_mask_and_coherent(device
, DMA_BIT_MASK(32));
7659 dev_err(priv
->device
, "Failed to set DMA Mask\n");
7663 priv
->dma_cap
.host_dma_width
= 32;
7667 ndev
->features
|= ndev
->hw_features
| NETIF_F_HIGHDMA
;
7668 ndev
->watchdog_timeo
= msecs_to_jiffies(watchdog
);
7669 #ifdef STMMAC_VLAN_TAG_USED
7670 /* Both mac100 and gmac support receive VLAN tag detection */
7671 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
;
7672 ndev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
7673 priv
->hw
->hw_vlan_en
= true;
7675 if (priv
->dma_cap
.vlhash
) {
7676 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
7677 ndev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
7679 if (priv
->dma_cap
.vlins
) {
7680 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
;
7681 if (priv
->dma_cap
.dvlan
)
7682 ndev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
7685 priv
->msg_enable
= netif_msg_init(debug
, default_msg_level
);
7687 priv
->xstats
.threshold
= tc
;
7689 /* Initialize RSS */
7690 rxq
= priv
->plat
->rx_queues_to_use
;
7691 netdev_rss_key_fill(priv
->rss
.key
, sizeof(priv
->rss
.key
));
7692 for (i
= 0; i
< ARRAY_SIZE(priv
->rss
.table
); i
++)
7693 priv
->rss
.table
[i
] = ethtool_rxfh_indir_default(i
, rxq
);
7695 if (priv
->dma_cap
.rssen
&& priv
->plat
->rss_en
)
7696 ndev
->features
|= NETIF_F_RXHASH
;
7698 ndev
->vlan_features
|= ndev
->features
;
7699 /* TSO doesn't work on VLANs yet */
7700 ndev
->vlan_features
&= ~NETIF_F_TSO
;
7702 /* MTU range: 46 - hw-specific max */
7703 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
7704 if (priv
->plat
->has_xgmac
)
7705 ndev
->max_mtu
= XGMAC_JUMBO_LEN
;
7706 else if ((priv
->plat
->enh_desc
) || (priv
->synopsys_id
>= DWMAC_CORE_4_00
))
7707 ndev
->max_mtu
= JUMBO_LEN
;
7709 ndev
->max_mtu
= SKB_MAX_HEAD(NET_SKB_PAD
+ NET_IP_ALIGN
);
7710 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7711 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7713 if ((priv
->plat
->maxmtu
< ndev
->max_mtu
) &&
7714 (priv
->plat
->maxmtu
>= ndev
->min_mtu
))
7715 ndev
->max_mtu
= priv
->plat
->maxmtu
;
7716 else if (priv
->plat
->maxmtu
< ndev
->min_mtu
)
7717 dev_warn(priv
->device
,
7718 "%s: warning: maxmtu having invalid value (%d)\n",
7719 __func__
, priv
->plat
->maxmtu
);
7722 priv
->flow_ctrl
= FLOW_AUTO
; /* RX/TX pause on */
7724 ndev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
7726 /* Setup channels NAPI */
7727 stmmac_napi_add(ndev
);
7729 mutex_init(&priv
->lock
);
7731 /* If a specific clk_csr value is passed from the platform
7732 * this means that the CSR Clock Range selection cannot be
7733 * changed at run-time and it is fixed. Viceversa the driver'll try to
7734 * set the MDC clock dynamically according to the csr actual
7737 if (priv
->plat
->clk_csr
>= 0)
7738 priv
->clk_csr
= priv
->plat
->clk_csr
;
7740 stmmac_clk_csr_set(priv
);
7742 stmmac_check_pcs_mode(priv
);
7744 pm_runtime_get_noresume(device
);
7745 pm_runtime_set_active(device
);
7746 if (!pm_runtime_enabled(device
))
7747 pm_runtime_enable(device
);
7749 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7750 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
) {
7751 /* MDIO bus Registration */
7752 ret
= stmmac_mdio_register(ndev
);
7754 dev_err_probe(priv
->device
, ret
,
7755 "%s: MDIO bus (id: %d) registration failed\n",
7756 __func__
, priv
->plat
->bus_id
);
7757 goto error_mdio_register
;
7761 if (priv
->plat
->speed_mode_2500
)
7762 priv
->plat
->speed_mode_2500(ndev
, priv
->plat
->bsp_priv
);
7764 if (priv
->plat
->mdio_bus_data
&& priv
->plat
->mdio_bus_data
->has_xpcs
) {
7765 ret
= stmmac_xpcs_setup(priv
->mii
);
7767 goto error_xpcs_setup
;
7770 ret
= stmmac_phy_setup(priv
);
7772 netdev_err(ndev
, "failed to setup phy (%d)\n", ret
);
7773 goto error_phy_setup
;
7776 ret
= register_netdev(ndev
);
7778 dev_err(priv
->device
, "%s: ERROR %i registering the device\n",
7780 goto error_netdev_register
;
7783 #ifdef CONFIG_DEBUG_FS
7784 stmmac_init_fs(ndev
);
7787 if (priv
->plat
->dump_debug_regs
)
7788 priv
->plat
->dump_debug_regs(priv
->plat
->bsp_priv
);
7790 /* Let pm_runtime_put() disable the clocks.
7791 * If CONFIG_PM is not enabled, the clocks will stay powered.
7793 pm_runtime_put(device
);
7797 error_netdev_register
:
7798 phylink_destroy(priv
->phylink
);
7801 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7802 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
7803 stmmac_mdio_unregister(ndev
);
7804 error_mdio_register
:
7805 stmmac_napi_del(ndev
);
7807 destroy_workqueue(priv
->wq
);
7809 bitmap_free(priv
->af_xdp_zc_qps
);
7813 EXPORT_SYMBOL_GPL(stmmac_dvr_probe
);
7817 * @dev: device pointer
7818 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7819 * changes the link status, releases the DMA descriptor rings.
7821 void stmmac_dvr_remove(struct device
*dev
)
7823 struct net_device
*ndev
= dev_get_drvdata(dev
);
7824 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7826 netdev_info(priv
->dev
, "%s: removing driver", __func__
);
7828 pm_runtime_get_sync(dev
);
7830 stmmac_stop_all_dma(priv
);
7831 stmmac_mac_set(priv
, priv
->ioaddr
, false);
7832 netif_carrier_off(ndev
);
7833 unregister_netdev(ndev
);
7835 #ifdef CONFIG_DEBUG_FS
7836 stmmac_exit_fs(ndev
);
7838 phylink_destroy(priv
->phylink
);
7839 if (priv
->plat
->stmmac_rst
)
7840 reset_control_assert(priv
->plat
->stmmac_rst
);
7841 reset_control_assert(priv
->plat
->stmmac_ahb_rst
);
7842 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7843 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
7844 stmmac_mdio_unregister(ndev
);
7845 destroy_workqueue(priv
->wq
);
7846 mutex_destroy(&priv
->lock
);
7847 bitmap_free(priv
->af_xdp_zc_qps
);
7849 pm_runtime_disable(dev
);
7850 pm_runtime_put_noidle(dev
);
7852 EXPORT_SYMBOL_GPL(stmmac_dvr_remove
);
7855 * stmmac_suspend - suspend callback
7856 * @dev: device pointer
7857 * Description: this is the function to suspend the device and it is called
7858 * by the platform driver to stop the network queue, release the resources,
7859 * program the PMT register (for WoL), clean and release driver resources.
7861 int stmmac_suspend(struct device
*dev
)
7863 struct net_device
*ndev
= dev_get_drvdata(dev
);
7864 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7867 if (!ndev
|| !netif_running(ndev
))
7870 mutex_lock(&priv
->lock
);
7872 netif_device_detach(ndev
);
7874 stmmac_disable_all_queues(priv
);
7876 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
7877 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
7879 if (priv
->eee_enabled
) {
7880 priv
->tx_path_in_lpi_mode
= false;
7881 del_timer_sync(&priv
->eee_ctrl_timer
);
7884 /* Stop TX/RX DMA */
7885 stmmac_stop_all_dma(priv
);
7887 if (priv
->plat
->serdes_powerdown
)
7888 priv
->plat
->serdes_powerdown(ndev
, priv
->plat
->bsp_priv
);
7890 /* Enable Power down mode by programming the PMT regs */
7891 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7892 stmmac_pmt(priv
, priv
->hw
, priv
->wolopts
);
7895 stmmac_mac_set(priv
, priv
->ioaddr
, false);
7896 pinctrl_pm_select_sleep_state(priv
->device
);
7899 mutex_unlock(&priv
->lock
);
7902 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7903 phylink_suspend(priv
->phylink
, true);
7905 if (device_may_wakeup(priv
->device
))
7906 phylink_speed_down(priv
->phylink
, false);
7907 phylink_suspend(priv
->phylink
, false);
7911 if (priv
->dma_cap
.fpesel
) {
7913 stmmac_fpe_configure(priv
, priv
->ioaddr
,
7914 priv
->plat
->fpe_cfg
,
7915 priv
->plat
->tx_queues_to_use
,
7916 priv
->plat
->rx_queues_to_use
, false);
7918 stmmac_fpe_handshake(priv
, false);
7919 stmmac_fpe_stop_wq(priv
);
7922 priv
->speed
= SPEED_UNKNOWN
;
7925 EXPORT_SYMBOL_GPL(stmmac_suspend
);
7927 static void stmmac_reset_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
7929 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
7935 static void stmmac_reset_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
7937 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
7943 netdev_tx_reset_queue(netdev_get_tx_queue(priv
->dev
, queue
));
7947 * stmmac_reset_queues_param - reset queue parameters
7948 * @priv: device pointer
7950 static void stmmac_reset_queues_param(struct stmmac_priv
*priv
)
7952 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
7953 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
7956 for (queue
= 0; queue
< rx_cnt
; queue
++)
7957 stmmac_reset_rx_queue(priv
, queue
);
7959 for (queue
= 0; queue
< tx_cnt
; queue
++)
7960 stmmac_reset_tx_queue(priv
, queue
);
7964 * stmmac_resume - resume callback
7965 * @dev: device pointer
7966 * Description: when resume this function is invoked to setup the DMA and CORE
7967 * in a usable state.
7969 int stmmac_resume(struct device
*dev
)
7971 struct net_device
*ndev
= dev_get_drvdata(dev
);
7972 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7975 if (!netif_running(ndev
))
7978 /* Power Down bit, into the PM register, is cleared
7979 * automatically as soon as a magic packet or a Wake-up frame
7980 * is received. Anyway, it's better to manually clear
7981 * this bit because it can generate problems while resuming
7982 * from another devices (e.g. serial console).
7984 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7985 mutex_lock(&priv
->lock
);
7986 stmmac_pmt(priv
, priv
->hw
, 0);
7987 mutex_unlock(&priv
->lock
);
7990 pinctrl_pm_select_default_state(priv
->device
);
7991 /* reset the phy so that it's ready */
7993 stmmac_mdio_reset(priv
->mii
);
7996 if (!(priv
->plat
->flags
& STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP
) &&
7997 priv
->plat
->serdes_powerup
) {
7998 ret
= priv
->plat
->serdes_powerup(ndev
,
7999 priv
->plat
->bsp_priv
);
8006 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
8007 phylink_resume(priv
->phylink
);
8009 phylink_resume(priv
->phylink
);
8010 if (device_may_wakeup(priv
->device
))
8011 phylink_speed_up(priv
->phylink
);
8016 mutex_lock(&priv
->lock
);
8018 stmmac_reset_queues_param(priv
);
8020 stmmac_free_tx_skbufs(priv
);
8021 stmmac_clear_descriptors(priv
, &priv
->dma_conf
);
8023 stmmac_hw_setup(ndev
, false);
8024 stmmac_init_coalesce(priv
);
8025 stmmac_set_rx_mode(ndev
);
8027 stmmac_restore_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
);
8029 stmmac_enable_all_queues(priv
);
8030 stmmac_enable_all_dma_irq(priv
);
8032 mutex_unlock(&priv
->lock
);
8035 netif_device_attach(ndev
);
8039 EXPORT_SYMBOL_GPL(stmmac_resume
);
8042 static int __init
stmmac_cmdline_opt(char *str
)
8048 while ((opt
= strsep(&str
, ",")) != NULL
) {
8049 if (!strncmp(opt
, "debug:", 6)) {
8050 if (kstrtoint(opt
+ 6, 0, &debug
))
8052 } else if (!strncmp(opt
, "phyaddr:", 8)) {
8053 if (kstrtoint(opt
+ 8, 0, &phyaddr
))
8055 } else if (!strncmp(opt
, "buf_sz:", 7)) {
8056 if (kstrtoint(opt
+ 7, 0, &buf_sz
))
8058 } else if (!strncmp(opt
, "tc:", 3)) {
8059 if (kstrtoint(opt
+ 3, 0, &tc
))
8061 } else if (!strncmp(opt
, "watchdog:", 9)) {
8062 if (kstrtoint(opt
+ 9, 0, &watchdog
))
8064 } else if (!strncmp(opt
, "flow_ctrl:", 10)) {
8065 if (kstrtoint(opt
+ 10, 0, &flow_ctrl
))
8067 } else if (!strncmp(opt
, "pause:", 6)) {
8068 if (kstrtoint(opt
+ 6, 0, &pause
))
8070 } else if (!strncmp(opt
, "eee_timer:", 10)) {
8071 if (kstrtoint(opt
+ 10, 0, &eee_timer
))
8073 } else if (!strncmp(opt
, "chain_mode:", 11)) {
8074 if (kstrtoint(opt
+ 11, 0, &chain_mode
))
8081 pr_err("%s: ERROR broken module parameter conversion", __func__
);
8085 __setup("stmmaceth=", stmmac_cmdline_opt
);
8088 static int __init
stmmac_init(void)
8090 #ifdef CONFIG_DEBUG_FS
8091 /* Create debugfs main directory if it doesn't exist yet */
8093 stmmac_fs_dir
= debugfs_create_dir(STMMAC_RESOURCE_NAME
, NULL
);
8094 register_netdevice_notifier(&stmmac_notifier
);
8100 static void __exit
stmmac_exit(void)
8102 #ifdef CONFIG_DEBUG_FS
8103 unregister_netdevice_notifier(&stmmac_notifier
);
8104 debugfs_remove_recursive(stmmac_fs_dir
);
8108 module_init(stmmac_init
)
8109 module_exit(stmmac_exit
)
8111 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8112 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8113 MODULE_LICENSE("GPL");