1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
11 Documentation available at:
12 http://www.stlinux.com
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
54 /* As long as the interface is active, we keep the timestamping counter enabled
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
56 * (clock jumps) when changing timestamping settings at runtime.
58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64 /* Module parameters */
66 static int watchdog
= TX_TIMEO
;
67 module_param(watchdog
, int, 0644);
68 MODULE_PARM_DESC(watchdog
, "Transmit timeout in milliseconds (default 5s)");
70 static int debug
= -1;
71 module_param(debug
, int, 0644);
72 MODULE_PARM_DESC(debug
, "Message Level (-1: default, 0: no output, 16: all)");
74 static int phyaddr
= -1;
75 module_param(phyaddr
, int, 0444);
76 MODULE_PARM_DESC(phyaddr
, "Physical device address");
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
91 static int flow_ctrl
= FLOW_AUTO
;
92 module_param(flow_ctrl
, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl
, "Flow control ability [on/off]");
95 static int pause
= PAUSE_TIME
;
96 module_param(pause
, int, 0644);
97 MODULE_PARM_DESC(pause
, "Flow Control Pause Time");
100 static int tc
= TC_DEFAULT
;
101 module_param(tc
, int, 0644);
102 MODULE_PARM_DESC(tc
, "DMA threshold control value");
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz
= DEFAULT_BUFSIZE
;
106 module_param(buf_sz
, int, 0644);
107 MODULE_PARM_DESC(buf_sz
, "DMA buffer size");
109 #define STMMAC_RX_COPYBREAK 256
111 static const u32 default_msg_level
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
112 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
113 NETIF_MSG_IFDOWN
| NETIF_MSG_TIMER
);
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static int eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
117 module_param(eee_timer
, int, 0644);
118 MODULE_PARM_DESC(eee_timer
, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
124 static unsigned int chain_mode
;
125 module_param(chain_mode
, int, 0444);
126 MODULE_PARM_DESC(chain_mode
, "To use chain instead of ring mode");
128 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
);
129 /* For MSI interrupts handling */
130 static irqreturn_t
stmmac_mac_interrupt(int irq
, void *dev_id
);
131 static irqreturn_t
stmmac_safety_interrupt(int irq
, void *dev_id
);
132 static irqreturn_t
stmmac_msi_intr_tx(int irq
, void *data
);
133 static irqreturn_t
stmmac_msi_intr_rx(int irq
, void *data
);
134 static void stmmac_reset_rx_queue(struct stmmac_priv
*priv
, u32 queue
);
135 static void stmmac_reset_tx_queue(struct stmmac_priv
*priv
, u32 queue
);
136 static void stmmac_reset_queues_param(struct stmmac_priv
*priv
);
137 static void stmmac_tx_timer_arm(struct stmmac_priv
*priv
, u32 queue
);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv
*priv
, int queue
);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv
*priv
, u32 txmode
,
140 u32 rxmode
, u32 chan
);
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops
;
144 static void stmmac_init_fs(struct net_device
*dev
);
145 static void stmmac_exit_fs(struct net_device
*dev
);
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
150 int stmmac_bus_clks_config(struct stmmac_priv
*priv
, bool enabled
)
155 ret
= clk_prepare_enable(priv
->plat
->stmmac_clk
);
158 ret
= clk_prepare_enable(priv
->plat
->pclk
);
160 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
163 if (priv
->plat
->clks_config
) {
164 ret
= priv
->plat
->clks_config(priv
->plat
->bsp_priv
, enabled
);
166 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
167 clk_disable_unprepare(priv
->plat
->pclk
);
172 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
173 clk_disable_unprepare(priv
->plat
->pclk
);
174 if (priv
->plat
->clks_config
)
175 priv
->plat
->clks_config(priv
->plat
->bsp_priv
, enabled
);
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config
);
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
187 static void stmmac_verify_args(void)
189 if (unlikely(watchdog
< 0))
191 if (unlikely((buf_sz
< DEFAULT_BUFSIZE
) || (buf_sz
> BUF_SIZE_16KiB
)))
192 buf_sz
= DEFAULT_BUFSIZE
;
193 if (unlikely(flow_ctrl
> 1))
194 flow_ctrl
= FLOW_AUTO
;
195 else if (likely(flow_ctrl
< 0))
196 flow_ctrl
= FLOW_OFF
;
197 if (unlikely((pause
< 0) || (pause
> 0xffff)))
200 eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
203 static void __stmmac_disable_all_queues(struct stmmac_priv
*priv
)
205 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
206 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
207 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
210 for (queue
= 0; queue
< maxq
; queue
++) {
211 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
213 if (stmmac_xdp_is_enabled(priv
) &&
214 test_bit(queue
, priv
->af_xdp_zc_qps
)) {
215 napi_disable(&ch
->rxtx_napi
);
219 if (queue
< rx_queues_cnt
)
220 napi_disable(&ch
->rx_napi
);
221 if (queue
< tx_queues_cnt
)
222 napi_disable(&ch
->tx_napi
);
227 * stmmac_disable_all_queues - Disable all queues
228 * @priv: driver private structure
230 static void stmmac_disable_all_queues(struct stmmac_priv
*priv
)
232 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
233 struct stmmac_rx_queue
*rx_q
;
236 /* synchronize_rcu() needed for pending XDP buffers to drain */
237 for (queue
= 0; queue
< rx_queues_cnt
; queue
++) {
238 rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
239 if (rx_q
->xsk_pool
) {
245 __stmmac_disable_all_queues(priv
);
249 * stmmac_enable_all_queues - Enable all queues
250 * @priv: driver private structure
252 static void stmmac_enable_all_queues(struct stmmac_priv
*priv
)
254 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
255 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
256 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
259 for (queue
= 0; queue
< maxq
; queue
++) {
260 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
262 if (stmmac_xdp_is_enabled(priv
) &&
263 test_bit(queue
, priv
->af_xdp_zc_qps
)) {
264 napi_enable(&ch
->rxtx_napi
);
268 if (queue
< rx_queues_cnt
)
269 napi_enable(&ch
->rx_napi
);
270 if (queue
< tx_queues_cnt
)
271 napi_enable(&ch
->tx_napi
);
275 static void stmmac_service_event_schedule(struct stmmac_priv
*priv
)
277 if (!test_bit(STMMAC_DOWN
, &priv
->state
) &&
278 !test_and_set_bit(STMMAC_SERVICE_SCHED
, &priv
->state
))
279 queue_work(priv
->wq
, &priv
->service_task
);
282 static void stmmac_global_err(struct stmmac_priv
*priv
)
284 netif_carrier_off(priv
->dev
);
285 set_bit(STMMAC_RESET_REQUESTED
, &priv
->state
);
286 stmmac_service_event_schedule(priv
);
290 * stmmac_clk_csr_set - dynamically set the MDC clock
291 * @priv: driver private structure
292 * Description: this is to dynamically set the MDC clock according to the csr
295 * If a specific clk_csr value is passed from the platform
296 * this means that the CSR Clock Range selection cannot be
297 * changed at run-time and it is fixed (as reported in the driver
298 * documentation). Viceversa the driver will try to set the MDC
299 * clock dynamically according to the actual clock input.
301 static void stmmac_clk_csr_set(struct stmmac_priv
*priv
)
305 clk_rate
= clk_get_rate(priv
->plat
->stmmac_clk
);
307 /* Platform provided default clk_csr would be assumed valid
308 * for all other cases except for the below mentioned ones.
309 * For values higher than the IEEE 802.3 specified frequency
310 * we can not estimate the proper divider as it is not known
311 * the frequency of clk_csr_i. So we do not change the default
314 if (!(priv
->clk_csr
& MAC_CSR_H_FRQ_MASK
)) {
315 if (clk_rate
< CSR_F_35M
)
316 priv
->clk_csr
= STMMAC_CSR_20_35M
;
317 else if ((clk_rate
>= CSR_F_35M
) && (clk_rate
< CSR_F_60M
))
318 priv
->clk_csr
= STMMAC_CSR_35_60M
;
319 else if ((clk_rate
>= CSR_F_60M
) && (clk_rate
< CSR_F_100M
))
320 priv
->clk_csr
= STMMAC_CSR_60_100M
;
321 else if ((clk_rate
>= CSR_F_100M
) && (clk_rate
< CSR_F_150M
))
322 priv
->clk_csr
= STMMAC_CSR_100_150M
;
323 else if ((clk_rate
>= CSR_F_150M
) && (clk_rate
< CSR_F_250M
))
324 priv
->clk_csr
= STMMAC_CSR_150_250M
;
325 else if ((clk_rate
>= CSR_F_250M
) && (clk_rate
<= CSR_F_300M
))
326 priv
->clk_csr
= STMMAC_CSR_250_300M
;
329 if (priv
->plat
->flags
& STMMAC_FLAG_HAS_SUN8I
) {
330 if (clk_rate
> 160000000)
331 priv
->clk_csr
= 0x03;
332 else if (clk_rate
> 80000000)
333 priv
->clk_csr
= 0x02;
334 else if (clk_rate
> 40000000)
335 priv
->clk_csr
= 0x01;
340 if (priv
->plat
->has_xgmac
) {
341 if (clk_rate
> 400000000)
343 else if (clk_rate
> 350000000)
345 else if (clk_rate
> 300000000)
347 else if (clk_rate
> 250000000)
349 else if (clk_rate
> 150000000)
356 static void print_pkt(unsigned char *buf
, int len
)
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len
, buf
);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET
, buf
, len
);
362 static inline u32
stmmac_tx_avail(struct stmmac_priv
*priv
, u32 queue
)
364 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
367 if (tx_q
->dirty_tx
> tx_q
->cur_tx
)
368 avail
= tx_q
->dirty_tx
- tx_q
->cur_tx
- 1;
370 avail
= priv
->dma_conf
.dma_tx_size
- tx_q
->cur_tx
+ tx_q
->dirty_tx
- 1;
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
380 static inline u32
stmmac_rx_dirty(struct stmmac_priv
*priv
, u32 queue
)
382 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
385 if (rx_q
->dirty_rx
<= rx_q
->cur_rx
)
386 dirty
= rx_q
->cur_rx
- rx_q
->dirty_rx
;
388 dirty
= priv
->dma_conf
.dma_rx_size
- rx_q
->dirty_rx
+ rx_q
->cur_rx
;
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv
*priv
, bool en
)
397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 priv
->eee_sw_timer_en
= en
? 0 : 1;
399 tx_lpi_timer
= en
? priv
->tx_lpi_timer
: 0;
400 stmmac_set_eee_lpi_timer(priv
, priv
->hw
, tx_lpi_timer
);
404 * stmmac_enable_eee_mode - check and enter in LPI mode
405 * @priv: driver private structure
406 * Description: this function is to verify and enter in LPI mode in case of
409 static int stmmac_enable_eee_mode(struct stmmac_priv
*priv
)
411 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
414 /* check if all TX queues have the work finished */
415 for (queue
= 0; queue
< tx_cnt
; queue
++) {
416 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
418 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
419 return -EBUSY
; /* still unfinished work */
422 /* Check and enter in LPI mode */
423 if (!priv
->tx_path_in_lpi_mode
)
424 stmmac_set_eee_mode(priv
, priv
->hw
,
425 priv
->plat
->flags
& STMMAC_FLAG_EN_TX_LPI_CLOCKGATING
);
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
431 * @priv: driver private structure
432 * Description: this function is to exit and disable EEE in case of
433 * LPI state is true. This is called by the xmit.
435 void stmmac_disable_eee_mode(struct stmmac_priv
*priv
)
437 if (!priv
->eee_sw_timer_en
) {
438 stmmac_lpi_entry_timer_config(priv
, 0);
442 stmmac_reset_eee_mode(priv
, priv
->hw
);
443 del_timer_sync(&priv
->eee_ctrl_timer
);
444 priv
->tx_path_in_lpi_mode
= false;
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
449 * @t: timer_list struct containing private info
451 * if there is no data transfer and if we are not in LPI state,
452 * then MAC Transmitter can be moved to LPI state.
454 static void stmmac_eee_ctrl_timer(struct timer_list
*t
)
456 struct stmmac_priv
*priv
= from_timer(priv
, t
, eee_ctrl_timer
);
458 if (stmmac_enable_eee_mode(priv
))
459 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
463 * stmmac_eee_init - init EEE
464 * @priv: driver private structure
466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
467 * can also manage EEE, this function enable the LPI state and start related
470 bool stmmac_eee_init(struct stmmac_priv
*priv
)
472 int eee_tw_timer
= priv
->eee_tw_timer
;
474 /* Using PCS we cannot dial with the phy registers at this stage
475 * so we do not support extra feature like EEE.
477 if (priv
->hw
->pcs
== STMMAC_PCS_TBI
||
478 priv
->hw
->pcs
== STMMAC_PCS_RTBI
)
481 /* Check if MAC core supports the EEE feature. */
482 if (!priv
->dma_cap
.eee
)
485 mutex_lock(&priv
->lock
);
487 /* Check if it needs to be deactivated */
488 if (!priv
->eee_active
) {
489 if (priv
->eee_enabled
) {
490 netdev_dbg(priv
->dev
, "disable EEE\n");
491 stmmac_lpi_entry_timer_config(priv
, 0);
492 del_timer_sync(&priv
->eee_ctrl_timer
);
493 stmmac_set_eee_timer(priv
, priv
->hw
, 0, eee_tw_timer
);
495 xpcs_config_eee(priv
->hw
->xpcs
,
496 priv
->plat
->mult_fact_100ns
,
499 mutex_unlock(&priv
->lock
);
503 if (priv
->eee_active
&& !priv
->eee_enabled
) {
504 timer_setup(&priv
->eee_ctrl_timer
, stmmac_eee_ctrl_timer
, 0);
505 stmmac_set_eee_timer(priv
, priv
->hw
, STMMAC_DEFAULT_LIT_LS
,
508 xpcs_config_eee(priv
->hw
->xpcs
,
509 priv
->plat
->mult_fact_100ns
,
513 if (priv
->plat
->has_gmac4
&& priv
->tx_lpi_timer
<= STMMAC_ET_MAX
) {
514 del_timer_sync(&priv
->eee_ctrl_timer
);
515 priv
->tx_path_in_lpi_mode
= false;
516 stmmac_lpi_entry_timer_config(priv
, 1);
518 stmmac_lpi_entry_timer_config(priv
, 0);
519 mod_timer(&priv
->eee_ctrl_timer
,
520 STMMAC_LPI_T(priv
->tx_lpi_timer
));
523 mutex_unlock(&priv
->lock
);
524 netdev_dbg(priv
->dev
, "Energy-Efficient Ethernet initialized\n");
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529 * @priv: driver private structure
530 * @p : descriptor pointer
531 * @skb : the socket buffer
533 * This function will read timestamp from the descriptor & pass it to stack.
534 * and also perform some sanity checks.
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv
*priv
,
537 struct dma_desc
*p
, struct sk_buff
*skb
)
539 struct skb_shared_hwtstamps shhwtstamp
;
543 if (!priv
->hwts_tx_en
)
546 /* exit if skb doesn't support hw tstamp */
547 if (likely(!skb
|| !(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)))
550 /* check tx tstamp status */
551 if (stmmac_get_tx_timestamp_status(priv
, p
)) {
552 stmmac_get_timestamp(priv
, p
, priv
->adv_ts
, &ns
);
554 } else if (!stmmac_get_mac_tx_timestamp(priv
, priv
->hw
, &ns
)) {
559 ns
-= priv
->plat
->cdc_error_adj
;
561 memset(&shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
562 shhwtstamp
.hwtstamp
= ns_to_ktime(ns
);
564 netdev_dbg(priv
->dev
, "get valid TX hw timestamp %llu\n", ns
);
565 /* pass tstamp to stack */
566 skb_tstamp_tx(skb
, &shhwtstamp
);
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571 * @priv: driver private structure
572 * @p : descriptor pointer
573 * @np : next descriptor pointer
574 * @skb : the socket buffer
576 * This function will read received packet's timestamp from the descriptor
577 * and pass it to stack. It also perform some sanity checks.
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv
*priv
, struct dma_desc
*p
,
580 struct dma_desc
*np
, struct sk_buff
*skb
)
582 struct skb_shared_hwtstamps
*shhwtstamp
= NULL
;
583 struct dma_desc
*desc
= p
;
586 if (!priv
->hwts_rx_en
)
588 /* For GMAC4, the valid timestamp is from CTX next desc. */
589 if (priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
)
592 /* Check if timestamp is available */
593 if (stmmac_get_rx_timestamp_status(priv
, p
, np
, priv
->adv_ts
)) {
594 stmmac_get_timestamp(priv
, desc
, priv
->adv_ts
, &ns
);
596 ns
-= priv
->plat
->cdc_error_adj
;
598 netdev_dbg(priv
->dev
, "get valid RX hw timestamp %llu\n", ns
);
599 shhwtstamp
= skb_hwtstamps(skb
);
600 memset(shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
601 shhwtstamp
->hwtstamp
= ns_to_ktime(ns
);
603 netdev_dbg(priv
->dev
, "cannot get RX hw timestamp\n");
608 * stmmac_hwtstamp_set - control hardware timestamping.
609 * @dev: device pointer.
610 * @ifr: An IOCTL specific structure, that can contain a pointer to
611 * a proprietary structure used to pass information to the driver.
613 * This function configures the MAC to enable/disable both outgoing(TX)
614 * and incoming(RX) packets time stamping based on user input.
616 * 0 on success and an appropriate -ve integer on failure.
618 static int stmmac_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
620 struct stmmac_priv
*priv
= netdev_priv(dev
);
621 struct hwtstamp_config config
;
624 u32 ptp_over_ipv4_udp
= 0;
625 u32 ptp_over_ipv6_udp
= 0;
626 u32 ptp_over_ethernet
= 0;
627 u32 snap_type_sel
= 0;
628 u32 ts_master_en
= 0;
631 if (!(priv
->dma_cap
.time_stamp
|| priv
->adv_ts
)) {
632 netdev_alert(priv
->dev
, "No support for HW time stamping\n");
633 priv
->hwts_tx_en
= 0;
634 priv
->hwts_rx_en
= 0;
639 if (copy_from_user(&config
, ifr
->ifr_data
,
643 netdev_dbg(priv
->dev
, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 __func__
, config
.flags
, config
.tx_type
, config
.rx_filter
);
646 if (config
.tx_type
!= HWTSTAMP_TX_OFF
&&
647 config
.tx_type
!= HWTSTAMP_TX_ON
)
651 switch (config
.rx_filter
) {
652 case HWTSTAMP_FILTER_NONE
:
653 /* time stamp no incoming packet at all */
654 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
658 /* PTP v1, UDP, any kind of event packet */
659 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
660 /* 'xmac' hardware can support Sync, Pdelay_Req and
661 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 * This leaves Delay_Req timestamps out.
663 * Enable all events *and* general purpose message
666 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
667 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
668 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
672 /* PTP v1, UDP, Sync packet */
673 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
674 /* take time stamp for SYNC messages only */
675 ts_event_en
= PTP_TCR_TSEVNTENA
;
677 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
678 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
682 /* PTP v1, UDP, Delay_req packet */
683 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
684 /* take time stamp for Delay_Req messages only */
685 ts_master_en
= PTP_TCR_TSMSTRENA
;
686 ts_event_en
= PTP_TCR_TSEVNTENA
;
688 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
689 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
693 /* PTP v2, UDP, any kind of event packet */
694 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
695 ptp_v2
= PTP_TCR_TSVER2ENA
;
696 /* take time stamp for all event messages */
697 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
699 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
700 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
704 /* PTP v2, UDP, Sync packet */
705 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
706 ptp_v2
= PTP_TCR_TSVER2ENA
;
707 /* take time stamp for SYNC messages only */
708 ts_event_en
= PTP_TCR_TSEVNTENA
;
710 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
711 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
715 /* PTP v2, UDP, Delay_req packet */
716 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
717 ptp_v2
= PTP_TCR_TSVER2ENA
;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en
= PTP_TCR_TSMSTRENA
;
720 ts_event_en
= PTP_TCR_TSEVNTENA
;
722 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
723 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
726 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
727 /* PTP v2/802.AS1 any layer, any kind of event packet */
728 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
729 ptp_v2
= PTP_TCR_TSVER2ENA
;
730 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
731 if (priv
->synopsys_id
< DWMAC_CORE_4_10
)
732 ts_event_en
= PTP_TCR_TSEVNTENA
;
733 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
734 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
735 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
738 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
739 /* PTP v2/802.AS1, any layer, Sync packet */
740 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
741 ptp_v2
= PTP_TCR_TSVER2ENA
;
742 /* take time stamp for SYNC messages only */
743 ts_event_en
= PTP_TCR_TSEVNTENA
;
745 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
746 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
747 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
751 /* PTP v2/802.AS1, any layer, Delay_req packet */
752 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
753 ptp_v2
= PTP_TCR_TSVER2ENA
;
754 /* take time stamp for Delay_Req messages only */
755 ts_master_en
= PTP_TCR_TSMSTRENA
;
756 ts_event_en
= PTP_TCR_TSEVNTENA
;
758 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
759 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
760 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
763 case HWTSTAMP_FILTER_NTP_ALL
:
764 case HWTSTAMP_FILTER_ALL
:
765 /* time stamp any incoming packet */
766 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
767 tstamp_all
= PTP_TCR_TSENALL
;
774 switch (config
.rx_filter
) {
775 case HWTSTAMP_FILTER_NONE
:
776 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
779 /* PTP v1, UDP, any kind of event packet */
780 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
784 priv
->hwts_rx_en
= ((config
.rx_filter
== HWTSTAMP_FILTER_NONE
) ? 0 : 1);
785 priv
->hwts_tx_en
= config
.tx_type
== HWTSTAMP_TX_ON
;
787 priv
->systime_flags
= STMMAC_HWTS_ACTIVE
;
789 if (priv
->hwts_tx_en
|| priv
->hwts_rx_en
) {
790 priv
->systime_flags
|= tstamp_all
| ptp_v2
|
791 ptp_over_ethernet
| ptp_over_ipv6_udp
|
792 ptp_over_ipv4_udp
| ts_event_en
|
793 ts_master_en
| snap_type_sel
;
796 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, priv
->systime_flags
);
798 memcpy(&priv
->tstamp_config
, &config
, sizeof(config
));
800 return copy_to_user(ifr
->ifr_data
, &config
,
801 sizeof(config
)) ? -EFAULT
: 0;
805 * stmmac_hwtstamp_get - read hardware timestamping.
806 * @dev: device pointer.
807 * @ifr: An IOCTL specific structure, that can contain a pointer to
808 * a proprietary structure used to pass information to the driver.
810 * This function obtain the current hardware timestamping settings
813 static int stmmac_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
815 struct stmmac_priv
*priv
= netdev_priv(dev
);
816 struct hwtstamp_config
*config
= &priv
->tstamp_config
;
818 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
821 return copy_to_user(ifr
->ifr_data
, config
,
822 sizeof(*config
)) ? -EFAULT
: 0;
826 * stmmac_init_tstamp_counter - init hardware timestamping counter
827 * @priv: driver private structure
828 * @systime_flags: timestamping flags
830 * Initialize hardware counter for packet timestamping.
831 * This is valid as long as the interface is open and not suspended.
832 * Will be rerun after resuming from suspend, case in which the timestamping
833 * flags updated by stmmac_hwtstamp_set() also need to be restored.
835 int stmmac_init_tstamp_counter(struct stmmac_priv
*priv
, u32 systime_flags
)
837 bool xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
838 struct timespec64 now
;
842 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
845 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, systime_flags
);
846 priv
->systime_flags
= systime_flags
;
848 /* program Sub Second Increment reg */
849 stmmac_config_sub_second_increment(priv
, priv
->ptpaddr
,
850 priv
->plat
->clk_ptp_rate
,
852 temp
= div_u64(1000000000ULL, sec_inc
);
854 /* Store sub second increment for later use */
855 priv
->sub_second_inc
= sec_inc
;
857 /* calculate default added value:
859 * addend = (2^32)/freq_div_ratio;
860 * where, freq_div_ratio = 1e9ns/sec_inc
862 temp
= (u64
)(temp
<< 32);
863 priv
->default_addend
= div_u64(temp
, priv
->plat
->clk_ptp_rate
);
864 stmmac_config_addend(priv
, priv
->ptpaddr
, priv
->default_addend
);
866 /* initialize system time */
867 ktime_get_real_ts64(&now
);
869 /* lower 32 bits of tv_sec are safe until y2106 */
870 stmmac_init_systime(priv
, priv
->ptpaddr
, (u32
)now
.tv_sec
, now
.tv_nsec
);
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter
);
877 * stmmac_init_ptp - init PTP
878 * @priv: driver private structure
879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880 * This is done by looking at the HW cap. register.
881 * This function also registers the ptp driver.
883 static int stmmac_init_ptp(struct stmmac_priv
*priv
)
885 bool xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
888 if (priv
->plat
->ptp_clk_freq_config
)
889 priv
->plat
->ptp_clk_freq_config(priv
);
891 ret
= stmmac_init_tstamp_counter(priv
, STMMAC_HWTS_ACTIVE
);
896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 if (xmac
&& priv
->dma_cap
.atime_stamp
)
899 /* Dwmac 3.x core with extend_desc can support adv_ts */
900 else if (priv
->extend_desc
&& priv
->dma_cap
.atime_stamp
)
903 if (priv
->dma_cap
.time_stamp
)
904 netdev_info(priv
->dev
, "IEEE 1588-2002 Timestamp supported\n");
907 netdev_info(priv
->dev
,
908 "IEEE 1588-2008 Advanced Timestamp supported\n");
910 priv
->hwts_tx_en
= 0;
911 priv
->hwts_rx_en
= 0;
913 if (priv
->plat
->flags
& STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY
)
914 stmmac_hwtstamp_correct_latency(priv
, priv
);
919 static void stmmac_release_ptp(struct stmmac_priv
*priv
)
921 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
922 stmmac_ptp_unregister(priv
);
926 * stmmac_mac_flow_ctrl - Configure flow control in all queues
927 * @priv: driver private structure
928 * @duplex: duplex passed to the next function
929 * Description: It is used for configuring the flow control in all queues
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv
*priv
, u32 duplex
)
933 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
935 stmmac_flow_ctrl(priv
, priv
->hw
, duplex
, priv
->flow_ctrl
,
936 priv
->pause
, tx_cnt
);
939 static struct phylink_pcs
*stmmac_mac_select_pcs(struct phylink_config
*config
,
940 phy_interface_t interface
)
942 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
945 return &priv
->hw
->xpcs
->pcs
;
947 if (priv
->hw
->lynx_pcs
)
948 return priv
->hw
->lynx_pcs
;
953 static void stmmac_mac_config(struct phylink_config
*config
, unsigned int mode
,
954 const struct phylink_link_state
*state
)
956 /* Nothing to do, xpcs_config() handles everything */
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv
*priv
, bool is_up
)
961 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
962 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
963 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
964 bool *hs_enable
= &fpe_cfg
->hs_enable
;
966 if (is_up
&& *hs_enable
) {
967 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
, MPACKET_VERIFY
);
969 *lo_state
= FPE_STATE_OFF
;
970 *lp_state
= FPE_STATE_OFF
;
974 static void stmmac_mac_link_down(struct phylink_config
*config
,
975 unsigned int mode
, phy_interface_t interface
)
977 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
979 stmmac_mac_set(priv
, priv
->ioaddr
, false);
980 priv
->eee_active
= false;
981 priv
->tx_lpi_enabled
= false;
982 priv
->eee_enabled
= stmmac_eee_init(priv
);
983 stmmac_set_eee_pls(priv
, priv
->hw
, false);
985 if (priv
->dma_cap
.fpesel
)
986 stmmac_fpe_link_state_handle(priv
, false);
989 static void stmmac_mac_link_up(struct phylink_config
*config
,
990 struct phy_device
*phy
,
991 unsigned int mode
, phy_interface_t interface
,
992 int speed
, int duplex
,
993 bool tx_pause
, bool rx_pause
)
995 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
998 if ((priv
->plat
->flags
& STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP
) &&
999 priv
->plat
->serdes_powerup
)
1000 priv
->plat
->serdes_powerup(priv
->dev
, priv
->plat
->bsp_priv
);
1002 old_ctrl
= readl(priv
->ioaddr
+ MAC_CTRL_REG
);
1003 ctrl
= old_ctrl
& ~priv
->hw
->link
.speed_mask
;
1005 if (interface
== PHY_INTERFACE_MODE_USXGMII
) {
1008 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
1011 ctrl
|= priv
->hw
->link
.xgmii
.speed5000
;
1014 ctrl
|= priv
->hw
->link
.xgmii
.speed2500
;
1019 } else if (interface
== PHY_INTERFACE_MODE_XLGMII
) {
1022 ctrl
|= priv
->hw
->link
.xlgmii
.speed100000
;
1025 ctrl
|= priv
->hw
->link
.xlgmii
.speed50000
;
1028 ctrl
|= priv
->hw
->link
.xlgmii
.speed40000
;
1031 ctrl
|= priv
->hw
->link
.xlgmii
.speed25000
;
1034 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
1037 ctrl
|= priv
->hw
->link
.speed2500
;
1040 ctrl
|= priv
->hw
->link
.speed1000
;
1048 ctrl
|= priv
->hw
->link
.speed2500
;
1051 ctrl
|= priv
->hw
->link
.speed1000
;
1054 ctrl
|= priv
->hw
->link
.speed100
;
1057 ctrl
|= priv
->hw
->link
.speed10
;
1064 priv
->speed
= speed
;
1066 if (priv
->plat
->fix_mac_speed
)
1067 priv
->plat
->fix_mac_speed(priv
->plat
->bsp_priv
, speed
, mode
);
1070 ctrl
&= ~priv
->hw
->link
.duplex
;
1072 ctrl
|= priv
->hw
->link
.duplex
;
1074 /* Flow Control operation */
1075 if (rx_pause
&& tx_pause
)
1076 priv
->flow_ctrl
= FLOW_AUTO
;
1077 else if (rx_pause
&& !tx_pause
)
1078 priv
->flow_ctrl
= FLOW_RX
;
1079 else if (!rx_pause
&& tx_pause
)
1080 priv
->flow_ctrl
= FLOW_TX
;
1082 priv
->flow_ctrl
= FLOW_OFF
;
1084 stmmac_mac_flow_ctrl(priv
, duplex
);
1086 if (ctrl
!= old_ctrl
)
1087 writel(ctrl
, priv
->ioaddr
+ MAC_CTRL_REG
);
1089 stmmac_mac_set(priv
, priv
->ioaddr
, true);
1090 if (phy
&& priv
->dma_cap
.eee
) {
1092 phy_init_eee(phy
, !(priv
->plat
->flags
&
1093 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI
)) >= 0;
1094 priv
->eee_enabled
= stmmac_eee_init(priv
);
1095 priv
->tx_lpi_enabled
= priv
->eee_enabled
;
1096 stmmac_set_eee_pls(priv
, priv
->hw
, true);
1099 if (priv
->dma_cap
.fpesel
)
1100 stmmac_fpe_link_state_handle(priv
, true);
1102 if (priv
->plat
->flags
& STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY
)
1103 stmmac_hwtstamp_correct_latency(priv
, priv
);
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops
= {
1107 .mac_select_pcs
= stmmac_mac_select_pcs
,
1108 .mac_config
= stmmac_mac_config
,
1109 .mac_link_down
= stmmac_mac_link_down
,
1110 .mac_link_up
= stmmac_mac_link_up
,
1114 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115 * @priv: driver private structure
1116 * Description: this is to verify if the HW supports the PCS.
1117 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118 * configured for the TBI, RTBI, or SGMII PHY interface.
1120 static void stmmac_check_pcs_mode(struct stmmac_priv
*priv
)
1122 int interface
= priv
->plat
->mac_interface
;
1124 if (priv
->dma_cap
.pcs
) {
1125 if ((interface
== PHY_INTERFACE_MODE_RGMII
) ||
1126 (interface
== PHY_INTERFACE_MODE_RGMII_ID
) ||
1127 (interface
== PHY_INTERFACE_MODE_RGMII_RXID
) ||
1128 (interface
== PHY_INTERFACE_MODE_RGMII_TXID
)) {
1129 netdev_dbg(priv
->dev
, "PCS RGMII support enabled\n");
1130 priv
->hw
->pcs
= STMMAC_PCS_RGMII
;
1131 } else if (interface
== PHY_INTERFACE_MODE_SGMII
) {
1132 netdev_dbg(priv
->dev
, "PCS SGMII support enabled\n");
1133 priv
->hw
->pcs
= STMMAC_PCS_SGMII
;
1139 * stmmac_init_phy - PHY initialization
1140 * @dev: net device structure
1141 * Description: it initializes the driver's PHY state, and attaches the PHY
1142 * to the mac driver.
1146 static int stmmac_init_phy(struct net_device
*dev
)
1148 struct stmmac_priv
*priv
= netdev_priv(dev
);
1149 struct fwnode_handle
*phy_fwnode
;
1150 struct fwnode_handle
*fwnode
;
1153 if (!phylink_expects_phy(priv
->phylink
))
1156 fwnode
= priv
->plat
->port_node
;
1158 fwnode
= dev_fwnode(priv
->device
);
1161 phy_fwnode
= fwnode_get_phy_node(fwnode
);
1165 /* Some DT bindings do not set-up the PHY handle. Let's try to
1168 if (!phy_fwnode
|| IS_ERR(phy_fwnode
)) {
1169 int addr
= priv
->plat
->phy_addr
;
1170 struct phy_device
*phydev
;
1173 netdev_err(priv
->dev
, "no phy found\n");
1177 phydev
= mdiobus_get_phy(priv
->mii
, addr
);
1179 netdev_err(priv
->dev
, "no phy at addr %d\n", addr
);
1183 ret
= phylink_connect_phy(priv
->phylink
, phydev
);
1185 fwnode_handle_put(phy_fwnode
);
1186 ret
= phylink_fwnode_phy_connect(priv
->phylink
, fwnode
, 0);
1189 if (!priv
->plat
->pmt
) {
1190 struct ethtool_wolinfo wol
= { .cmd
= ETHTOOL_GWOL
};
1192 phylink_ethtool_get_wol(priv
->phylink
, &wol
);
1193 device_set_wakeup_capable(priv
->device
, !!wol
.supported
);
1194 device_set_wakeup_enable(priv
->device
, !!wol
.wolopts
);
1200 static void stmmac_set_half_duplex(struct stmmac_priv
*priv
)
1202 /* Half-Duplex can only work with single tx queue */
1203 if (priv
->plat
->tx_queues_to_use
> 1)
1204 priv
->phylink_config
.mac_capabilities
&=
1205 ~(MAC_10HD
| MAC_100HD
| MAC_1000HD
);
1207 priv
->phylink_config
.mac_capabilities
|=
1208 (MAC_10HD
| MAC_100HD
| MAC_1000HD
);
1211 static int stmmac_phy_setup(struct stmmac_priv
*priv
)
1213 struct stmmac_mdio_bus_data
*mdio_bus_data
;
1214 int mode
= priv
->plat
->phy_interface
;
1215 struct fwnode_handle
*fwnode
;
1216 struct phylink
*phylink
;
1219 priv
->phylink_config
.dev
= &priv
->dev
->dev
;
1220 priv
->phylink_config
.type
= PHYLINK_NETDEV
;
1221 priv
->phylink_config
.mac_managed_pm
= true;
1223 mdio_bus_data
= priv
->plat
->mdio_bus_data
;
1225 priv
->phylink_config
.ovr_an_inband
=
1226 mdio_bus_data
->xpcs_an_inband
;
1228 /* Set the platform/firmware specified interface mode. Note, phylink
1229 * deals with the PHY interface mode, not the MAC interface mode.
1231 __set_bit(mode
, priv
->phylink_config
.supported_interfaces
);
1233 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 xpcs_get_interfaces(priv
->hw
->xpcs
,
1236 priv
->phylink_config
.supported_interfaces
);
1238 priv
->phylink_config
.mac_capabilities
= MAC_ASYM_PAUSE
| MAC_SYM_PAUSE
|
1239 MAC_10FD
| MAC_100FD
|
1242 stmmac_set_half_duplex(priv
);
1244 /* Get the MAC specific capabilities */
1245 stmmac_mac_phylink_get_caps(priv
);
1247 max_speed
= priv
->plat
->max_speed
;
1249 phylink_limit_mac_speed(&priv
->phylink_config
, max_speed
);
1251 fwnode
= priv
->plat
->port_node
;
1253 fwnode
= dev_fwnode(priv
->device
);
1255 phylink
= phylink_create(&priv
->phylink_config
, fwnode
,
1256 mode
, &stmmac_phylink_mac_ops
);
1257 if (IS_ERR(phylink
))
1258 return PTR_ERR(phylink
);
1260 priv
->phylink
= phylink
;
1264 static void stmmac_display_rx_rings(struct stmmac_priv
*priv
,
1265 struct stmmac_dma_conf
*dma_conf
)
1267 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
1268 unsigned int desc_size
;
1272 /* Display RX rings */
1273 for (queue
= 0; queue
< rx_cnt
; queue
++) {
1274 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1276 pr_info("\tRX Queue %u rings\n", queue
);
1278 if (priv
->extend_desc
) {
1279 head_rx
= (void *)rx_q
->dma_erx
;
1280 desc_size
= sizeof(struct dma_extended_desc
);
1282 head_rx
= (void *)rx_q
->dma_rx
;
1283 desc_size
= sizeof(struct dma_desc
);
1286 /* Display RX ring */
1287 stmmac_display_ring(priv
, head_rx
, dma_conf
->dma_rx_size
, true,
1288 rx_q
->dma_rx_phy
, desc_size
);
1292 static void stmmac_display_tx_rings(struct stmmac_priv
*priv
,
1293 struct stmmac_dma_conf
*dma_conf
)
1295 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
1296 unsigned int desc_size
;
1300 /* Display TX rings */
1301 for (queue
= 0; queue
< tx_cnt
; queue
++) {
1302 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1304 pr_info("\tTX Queue %d rings\n", queue
);
1306 if (priv
->extend_desc
) {
1307 head_tx
= (void *)tx_q
->dma_etx
;
1308 desc_size
= sizeof(struct dma_extended_desc
);
1309 } else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
) {
1310 head_tx
= (void *)tx_q
->dma_entx
;
1311 desc_size
= sizeof(struct dma_edesc
);
1313 head_tx
= (void *)tx_q
->dma_tx
;
1314 desc_size
= sizeof(struct dma_desc
);
1317 stmmac_display_ring(priv
, head_tx
, dma_conf
->dma_tx_size
, false,
1318 tx_q
->dma_tx_phy
, desc_size
);
1322 static void stmmac_display_rings(struct stmmac_priv
*priv
,
1323 struct stmmac_dma_conf
*dma_conf
)
1325 /* Display RX ring */
1326 stmmac_display_rx_rings(priv
, dma_conf
);
1328 /* Display TX ring */
1329 stmmac_display_tx_rings(priv
, dma_conf
);
1332 static int stmmac_set_bfsize(int mtu
, int bufsize
)
1336 if (mtu
>= BUF_SIZE_8KiB
)
1337 ret
= BUF_SIZE_16KiB
;
1338 else if (mtu
>= BUF_SIZE_4KiB
)
1339 ret
= BUF_SIZE_8KiB
;
1340 else if (mtu
>= BUF_SIZE_2KiB
)
1341 ret
= BUF_SIZE_4KiB
;
1342 else if (mtu
> DEFAULT_BUFSIZE
)
1343 ret
= BUF_SIZE_2KiB
;
1345 ret
= DEFAULT_BUFSIZE
;
1351 * stmmac_clear_rx_descriptors - clear RX descriptors
1352 * @priv: driver private structure
1353 * @dma_conf: structure to take the dma data
1354 * @queue: RX queue index
1355 * Description: this function is called to clear the RX descriptors
1356 * in case of both basic and extended descriptors are used.
1358 static void stmmac_clear_rx_descriptors(struct stmmac_priv
*priv
,
1359 struct stmmac_dma_conf
*dma_conf
,
1362 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1365 /* Clear the RX descriptors */
1366 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++)
1367 if (priv
->extend_desc
)
1368 stmmac_init_rx_desc(priv
, &rx_q
->dma_erx
[i
].basic
,
1369 priv
->use_riwt
, priv
->mode
,
1370 (i
== dma_conf
->dma_rx_size
- 1),
1371 dma_conf
->dma_buf_sz
);
1373 stmmac_init_rx_desc(priv
, &rx_q
->dma_rx
[i
],
1374 priv
->use_riwt
, priv
->mode
,
1375 (i
== dma_conf
->dma_rx_size
- 1),
1376 dma_conf
->dma_buf_sz
);
1380 * stmmac_clear_tx_descriptors - clear tx descriptors
1381 * @priv: driver private structure
1382 * @dma_conf: structure to take the dma data
1383 * @queue: TX queue index.
1384 * Description: this function is called to clear the TX descriptors
1385 * in case of both basic and extended descriptors are used.
1387 static void stmmac_clear_tx_descriptors(struct stmmac_priv
*priv
,
1388 struct stmmac_dma_conf
*dma_conf
,
1391 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1394 /* Clear the TX descriptors */
1395 for (i
= 0; i
< dma_conf
->dma_tx_size
; i
++) {
1396 int last
= (i
== (dma_conf
->dma_tx_size
- 1));
1399 if (priv
->extend_desc
)
1400 p
= &tx_q
->dma_etx
[i
].basic
;
1401 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1402 p
= &tx_q
->dma_entx
[i
].basic
;
1404 p
= &tx_q
->dma_tx
[i
];
1406 stmmac_init_tx_desc(priv
, p
, priv
->mode
, last
);
1411 * stmmac_clear_descriptors - clear descriptors
1412 * @priv: driver private structure
1413 * @dma_conf: structure to take the dma data
1414 * Description: this function is called to clear the TX and RX descriptors
1415 * in case of both basic and extended descriptors are used.
1417 static void stmmac_clear_descriptors(struct stmmac_priv
*priv
,
1418 struct stmmac_dma_conf
*dma_conf
)
1420 u32 rx_queue_cnt
= priv
->plat
->rx_queues_to_use
;
1421 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1424 /* Clear the RX descriptors */
1425 for (queue
= 0; queue
< rx_queue_cnt
; queue
++)
1426 stmmac_clear_rx_descriptors(priv
, dma_conf
, queue
);
1428 /* Clear the TX descriptors */
1429 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1430 stmmac_clear_tx_descriptors(priv
, dma_conf
, queue
);
1434 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1435 * @priv: driver private structure
1436 * @dma_conf: structure to take the dma data
1437 * @p: descriptor pointer
1438 * @i: descriptor index
1440 * @queue: RX queue index
1441 * Description: this function is called to allocate a receive buffer, perform
1442 * the DMA mapping and init the descriptor.
1444 static int stmmac_init_rx_buffers(struct stmmac_priv
*priv
,
1445 struct stmmac_dma_conf
*dma_conf
,
1447 int i
, gfp_t flags
, u32 queue
)
1449 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1450 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1451 gfp_t gfp
= (GFP_ATOMIC
| __GFP_NOWARN
);
1453 if (priv
->dma_cap
.host_dma_width
<= 32)
1457 buf
->page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
1460 buf
->page_offset
= stmmac_rx_offset(priv
);
1463 if (priv
->sph
&& !buf
->sec_page
) {
1464 buf
->sec_page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
1468 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
1469 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, true);
1471 buf
->sec_page
= NULL
;
1472 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, false);
1475 buf
->addr
= page_pool_get_dma_addr(buf
->page
) + buf
->page_offset
;
1477 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
1478 if (dma_conf
->dma_buf_sz
== BUF_SIZE_16KiB
)
1479 stmmac_init_desc3(priv
, p
);
1485 * stmmac_free_rx_buffer - free RX dma buffers
1486 * @priv: private structure
1490 static void stmmac_free_rx_buffer(struct stmmac_priv
*priv
,
1491 struct stmmac_rx_queue
*rx_q
,
1494 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1497 page_pool_put_full_page(rx_q
->page_pool
, buf
->page
, false);
1501 page_pool_put_full_page(rx_q
->page_pool
, buf
->sec_page
, false);
1502 buf
->sec_page
= NULL
;
1506 * stmmac_free_tx_buffer - free RX dma buffers
1507 * @priv: private structure
1508 * @dma_conf: structure to take the dma data
1509 * @queue: RX queue index
1512 static void stmmac_free_tx_buffer(struct stmmac_priv
*priv
,
1513 struct stmmac_dma_conf
*dma_conf
,
1516 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1518 if (tx_q
->tx_skbuff_dma
[i
].buf
&&
1519 tx_q
->tx_skbuff_dma
[i
].buf_type
!= STMMAC_TXBUF_T_XDP_TX
) {
1520 if (tx_q
->tx_skbuff_dma
[i
].map_as_page
)
1521 dma_unmap_page(priv
->device
,
1522 tx_q
->tx_skbuff_dma
[i
].buf
,
1523 tx_q
->tx_skbuff_dma
[i
].len
,
1526 dma_unmap_single(priv
->device
,
1527 tx_q
->tx_skbuff_dma
[i
].buf
,
1528 tx_q
->tx_skbuff_dma
[i
].len
,
1532 if (tx_q
->xdpf
[i
] &&
1533 (tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XDP_TX
||
1534 tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
)) {
1535 xdp_return_frame(tx_q
->xdpf
[i
]);
1536 tx_q
->xdpf
[i
] = NULL
;
1539 if (tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XSK_TX
)
1540 tx_q
->xsk_frames_done
++;
1542 if (tx_q
->tx_skbuff
[i
] &&
1543 tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_SKB
) {
1544 dev_kfree_skb_any(tx_q
->tx_skbuff
[i
]);
1545 tx_q
->tx_skbuff
[i
] = NULL
;
1548 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1549 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1553 * dma_free_rx_skbufs - free RX dma buffers
1554 * @priv: private structure
1555 * @dma_conf: structure to take the dma data
1556 * @queue: RX queue index
1558 static void dma_free_rx_skbufs(struct stmmac_priv
*priv
,
1559 struct stmmac_dma_conf
*dma_conf
,
1562 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1565 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++)
1566 stmmac_free_rx_buffer(priv
, rx_q
, i
);
1569 static int stmmac_alloc_rx_buffers(struct stmmac_priv
*priv
,
1570 struct stmmac_dma_conf
*dma_conf
,
1571 u32 queue
, gfp_t flags
)
1573 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1576 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++) {
1580 if (priv
->extend_desc
)
1581 p
= &((rx_q
->dma_erx
+ i
)->basic
);
1583 p
= rx_q
->dma_rx
+ i
;
1585 ret
= stmmac_init_rx_buffers(priv
, dma_conf
, p
, i
, flags
,
1590 rx_q
->buf_alloc_num
++;
1597 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1598 * @priv: private structure
1599 * @dma_conf: structure to take the dma data
1600 * @queue: RX queue index
1602 static void dma_free_rx_xskbufs(struct stmmac_priv
*priv
,
1603 struct stmmac_dma_conf
*dma_conf
,
1606 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1609 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++) {
1610 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1615 xsk_buff_free(buf
->xdp
);
1620 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv
*priv
,
1621 struct stmmac_dma_conf
*dma_conf
,
1624 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1627 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1628 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1629 * use this macro to make sure no size violations.
1631 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff
);
1633 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++) {
1634 struct stmmac_rx_buffer
*buf
;
1635 dma_addr_t dma_addr
;
1638 if (priv
->extend_desc
)
1639 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ i
);
1641 p
= rx_q
->dma_rx
+ i
;
1643 buf
= &rx_q
->buf_pool
[i
];
1645 buf
->xdp
= xsk_buff_alloc(rx_q
->xsk_pool
);
1649 dma_addr
= xsk_buff_xdp_get_dma(buf
->xdp
);
1650 stmmac_set_desc_addr(priv
, p
, dma_addr
);
1651 rx_q
->buf_alloc_num
++;
1657 static struct xsk_buff_pool
*stmmac_get_xsk_pool(struct stmmac_priv
*priv
, u32 queue
)
1659 if (!stmmac_xdp_is_enabled(priv
) || !test_bit(queue
, priv
->af_xdp_zc_qps
))
1662 return xsk_get_pool_from_qid(priv
->dev
, queue
);
1666 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1667 * @priv: driver private structure
1668 * @dma_conf: structure to take the dma data
1669 * @queue: RX queue index
1671 * Description: this function initializes the DMA RX descriptors
1672 * and allocates the socket buffers. It supports the chained and ring
1675 static int __init_dma_rx_desc_rings(struct stmmac_priv
*priv
,
1676 struct stmmac_dma_conf
*dma_conf
,
1677 u32 queue
, gfp_t flags
)
1679 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1682 netif_dbg(priv
, probe
, priv
->dev
,
1683 "(%s) dma_rx_phy=0x%08x\n", __func__
,
1684 (u32
)rx_q
->dma_rx_phy
);
1686 stmmac_clear_rx_descriptors(priv
, dma_conf
, queue
);
1688 xdp_rxq_info_unreg_mem_model(&rx_q
->xdp_rxq
);
1690 rx_q
->xsk_pool
= stmmac_get_xsk_pool(priv
, queue
);
1692 if (rx_q
->xsk_pool
) {
1693 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q
->xdp_rxq
,
1694 MEM_TYPE_XSK_BUFF_POOL
,
1696 netdev_info(priv
->dev
,
1697 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 xsk_pool_set_rxq_info(rx_q
->xsk_pool
, &rx_q
->xdp_rxq
);
1701 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q
->xdp_rxq
,
1704 netdev_info(priv
->dev
,
1705 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1709 if (rx_q
->xsk_pool
) {
1710 /* RX XDP ZC buffer pool may not be populated, e.g.
1713 stmmac_alloc_rx_buffers_zc(priv
, dma_conf
, queue
);
1715 ret
= stmmac_alloc_rx_buffers(priv
, dma_conf
, queue
, flags
);
1720 /* Setup the chained descriptor addresses */
1721 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1722 if (priv
->extend_desc
)
1723 stmmac_mode_init(priv
, rx_q
->dma_erx
,
1725 dma_conf
->dma_rx_size
, 1);
1727 stmmac_mode_init(priv
, rx_q
->dma_rx
,
1729 dma_conf
->dma_rx_size
, 0);
1735 static int init_dma_rx_desc_rings(struct net_device
*dev
,
1736 struct stmmac_dma_conf
*dma_conf
,
1739 struct stmmac_priv
*priv
= netdev_priv(dev
);
1740 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1744 /* RX INITIALIZATION */
1745 netif_dbg(priv
, probe
, priv
->dev
,
1746 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748 for (queue
= 0; queue
< rx_count
; queue
++) {
1749 ret
= __init_dma_rx_desc_rings(priv
, dma_conf
, queue
, flags
);
1751 goto err_init_rx_buffers
;
1756 err_init_rx_buffers
:
1757 while (queue
>= 0) {
1758 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1761 dma_free_rx_xskbufs(priv
, dma_conf
, queue
);
1763 dma_free_rx_skbufs(priv
, dma_conf
, queue
);
1765 rx_q
->buf_alloc_num
= 0;
1766 rx_q
->xsk_pool
= NULL
;
1775 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1776 * @priv: driver private structure
1777 * @dma_conf: structure to take the dma data
1778 * @queue: TX queue index
1779 * Description: this function initializes the DMA TX descriptors
1780 * and allocates the socket buffers. It supports the chained and ring
1783 static int __init_dma_tx_desc_rings(struct stmmac_priv
*priv
,
1784 struct stmmac_dma_conf
*dma_conf
,
1787 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1790 netif_dbg(priv
, probe
, priv
->dev
,
1791 "(%s) dma_tx_phy=0x%08x\n", __func__
,
1792 (u32
)tx_q
->dma_tx_phy
);
1794 /* Setup the chained descriptor addresses */
1795 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1796 if (priv
->extend_desc
)
1797 stmmac_mode_init(priv
, tx_q
->dma_etx
,
1799 dma_conf
->dma_tx_size
, 1);
1800 else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
))
1801 stmmac_mode_init(priv
, tx_q
->dma_tx
,
1803 dma_conf
->dma_tx_size
, 0);
1806 tx_q
->xsk_pool
= stmmac_get_xsk_pool(priv
, queue
);
1808 for (i
= 0; i
< dma_conf
->dma_tx_size
; i
++) {
1811 if (priv
->extend_desc
)
1812 p
= &((tx_q
->dma_etx
+ i
)->basic
);
1813 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1814 p
= &((tx_q
->dma_entx
+ i
)->basic
);
1816 p
= tx_q
->dma_tx
+ i
;
1818 stmmac_clear_desc(priv
, p
);
1820 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1821 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1822 tx_q
->tx_skbuff_dma
[i
].len
= 0;
1823 tx_q
->tx_skbuff_dma
[i
].last_segment
= false;
1824 tx_q
->tx_skbuff
[i
] = NULL
;
1830 static int init_dma_tx_desc_rings(struct net_device
*dev
,
1831 struct stmmac_dma_conf
*dma_conf
)
1833 struct stmmac_priv
*priv
= netdev_priv(dev
);
1837 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1839 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1840 __init_dma_tx_desc_rings(priv
, dma_conf
, queue
);
1846 * init_dma_desc_rings - init the RX/TX descriptor rings
1847 * @dev: net device structure
1848 * @dma_conf: structure to take the dma data
1850 * Description: this function initializes the DMA RX/TX descriptors
1851 * and allocates the socket buffers. It supports the chained and ring
1854 static int init_dma_desc_rings(struct net_device
*dev
,
1855 struct stmmac_dma_conf
*dma_conf
,
1858 struct stmmac_priv
*priv
= netdev_priv(dev
);
1861 ret
= init_dma_rx_desc_rings(dev
, dma_conf
, flags
);
1865 ret
= init_dma_tx_desc_rings(dev
, dma_conf
);
1867 stmmac_clear_descriptors(priv
, dma_conf
);
1869 if (netif_msg_hw(priv
))
1870 stmmac_display_rings(priv
, dma_conf
);
1876 * dma_free_tx_skbufs - free TX dma buffers
1877 * @priv: private structure
1878 * @dma_conf: structure to take the dma data
1879 * @queue: TX queue index
1881 static void dma_free_tx_skbufs(struct stmmac_priv
*priv
,
1882 struct stmmac_dma_conf
*dma_conf
,
1885 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1888 tx_q
->xsk_frames_done
= 0;
1890 for (i
= 0; i
< dma_conf
->dma_tx_size
; i
++)
1891 stmmac_free_tx_buffer(priv
, dma_conf
, queue
, i
);
1893 if (tx_q
->xsk_pool
&& tx_q
->xsk_frames_done
) {
1894 xsk_tx_completed(tx_q
->xsk_pool
, tx_q
->xsk_frames_done
);
1895 tx_q
->xsk_frames_done
= 0;
1896 tx_q
->xsk_pool
= NULL
;
1901 * stmmac_free_tx_skbufs - free TX skb buffers
1902 * @priv: private structure
1904 static void stmmac_free_tx_skbufs(struct stmmac_priv
*priv
)
1906 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1909 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1910 dma_free_tx_skbufs(priv
, &priv
->dma_conf
, queue
);
1914 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1915 * @priv: private structure
1916 * @dma_conf: structure to take the dma data
1917 * @queue: RX queue index
1919 static void __free_dma_rx_desc_resources(struct stmmac_priv
*priv
,
1920 struct stmmac_dma_conf
*dma_conf
,
1923 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1925 /* Release the DMA RX socket buffers */
1927 dma_free_rx_xskbufs(priv
, dma_conf
, queue
);
1929 dma_free_rx_skbufs(priv
, dma_conf
, queue
);
1931 rx_q
->buf_alloc_num
= 0;
1932 rx_q
->xsk_pool
= NULL
;
1934 /* Free DMA regions of consistent memory previously allocated */
1935 if (!priv
->extend_desc
)
1936 dma_free_coherent(priv
->device
, dma_conf
->dma_rx_size
*
1937 sizeof(struct dma_desc
),
1938 rx_q
->dma_rx
, rx_q
->dma_rx_phy
);
1940 dma_free_coherent(priv
->device
, dma_conf
->dma_rx_size
*
1941 sizeof(struct dma_extended_desc
),
1942 rx_q
->dma_erx
, rx_q
->dma_rx_phy
);
1944 if (xdp_rxq_info_is_reg(&rx_q
->xdp_rxq
))
1945 xdp_rxq_info_unreg(&rx_q
->xdp_rxq
);
1947 kfree(rx_q
->buf_pool
);
1948 if (rx_q
->page_pool
)
1949 page_pool_destroy(rx_q
->page_pool
);
1952 static void free_dma_rx_desc_resources(struct stmmac_priv
*priv
,
1953 struct stmmac_dma_conf
*dma_conf
)
1955 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1958 /* Free RX queue resources */
1959 for (queue
= 0; queue
< rx_count
; queue
++)
1960 __free_dma_rx_desc_resources(priv
, dma_conf
, queue
);
1964 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1965 * @priv: private structure
1966 * @dma_conf: structure to take the dma data
1967 * @queue: TX queue index
1969 static void __free_dma_tx_desc_resources(struct stmmac_priv
*priv
,
1970 struct stmmac_dma_conf
*dma_conf
,
1973 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1977 /* Release the DMA TX socket buffers */
1978 dma_free_tx_skbufs(priv
, dma_conf
, queue
);
1980 if (priv
->extend_desc
) {
1981 size
= sizeof(struct dma_extended_desc
);
1982 addr
= tx_q
->dma_etx
;
1983 } else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
) {
1984 size
= sizeof(struct dma_edesc
);
1985 addr
= tx_q
->dma_entx
;
1987 size
= sizeof(struct dma_desc
);
1988 addr
= tx_q
->dma_tx
;
1991 size
*= dma_conf
->dma_tx_size
;
1993 dma_free_coherent(priv
->device
, size
, addr
, tx_q
->dma_tx_phy
);
1995 kfree(tx_q
->tx_skbuff_dma
);
1996 kfree(tx_q
->tx_skbuff
);
1999 static void free_dma_tx_desc_resources(struct stmmac_priv
*priv
,
2000 struct stmmac_dma_conf
*dma_conf
)
2002 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
2005 /* Free TX queue resources */
2006 for (queue
= 0; queue
< tx_count
; queue
++)
2007 __free_dma_tx_desc_resources(priv
, dma_conf
, queue
);
2011 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2012 * @priv: private structure
2013 * @dma_conf: structure to take the dma data
2014 * @queue: RX queue index
2015 * Description: according to which descriptor can be used (extend or basic)
2016 * this function allocates the resources for TX and RX paths. In case of
2017 * reception, for example, it pre-allocated the RX socket buffer in order to
2018 * allow zero-copy mechanism.
2020 static int __alloc_dma_rx_desc_resources(struct stmmac_priv
*priv
,
2021 struct stmmac_dma_conf
*dma_conf
,
2024 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
2025 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
2026 bool xdp_prog
= stmmac_xdp_is_enabled(priv
);
2027 struct page_pool_params pp_params
= { 0 };
2028 unsigned int num_pages
;
2029 unsigned int napi_id
;
2032 rx_q
->queue_index
= queue
;
2033 rx_q
->priv_data
= priv
;
2035 pp_params
.flags
= PP_FLAG_DMA_MAP
| PP_FLAG_DMA_SYNC_DEV
;
2036 pp_params
.pool_size
= dma_conf
->dma_rx_size
;
2037 num_pages
= DIV_ROUND_UP(dma_conf
->dma_buf_sz
, PAGE_SIZE
);
2038 pp_params
.order
= ilog2(num_pages
);
2039 pp_params
.nid
= dev_to_node(priv
->device
);
2040 pp_params
.dev
= priv
->device
;
2041 pp_params
.dma_dir
= xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
2042 pp_params
.offset
= stmmac_rx_offset(priv
);
2043 pp_params
.max_len
= STMMAC_MAX_RX_BUF_SIZE(num_pages
);
2045 rx_q
->page_pool
= page_pool_create(&pp_params
);
2046 if (IS_ERR(rx_q
->page_pool
)) {
2047 ret
= PTR_ERR(rx_q
->page_pool
);
2048 rx_q
->page_pool
= NULL
;
2052 rx_q
->buf_pool
= kcalloc(dma_conf
->dma_rx_size
,
2053 sizeof(*rx_q
->buf_pool
),
2055 if (!rx_q
->buf_pool
)
2058 if (priv
->extend_desc
) {
2059 rx_q
->dma_erx
= dma_alloc_coherent(priv
->device
,
2060 dma_conf
->dma_rx_size
*
2061 sizeof(struct dma_extended_desc
),
2068 rx_q
->dma_rx
= dma_alloc_coherent(priv
->device
,
2069 dma_conf
->dma_rx_size
*
2070 sizeof(struct dma_desc
),
2077 if (stmmac_xdp_is_enabled(priv
) &&
2078 test_bit(queue
, priv
->af_xdp_zc_qps
))
2079 napi_id
= ch
->rxtx_napi
.napi_id
;
2081 napi_id
= ch
->rx_napi
.napi_id
;
2083 ret
= xdp_rxq_info_reg(&rx_q
->xdp_rxq
, priv
->dev
,
2087 netdev_err(priv
->dev
, "Failed to register xdp rxq info\n");
2094 static int alloc_dma_rx_desc_resources(struct stmmac_priv
*priv
,
2095 struct stmmac_dma_conf
*dma_conf
)
2097 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
2101 /* RX queues buffers and DMA */
2102 for (queue
= 0; queue
< rx_count
; queue
++) {
2103 ret
= __alloc_dma_rx_desc_resources(priv
, dma_conf
, queue
);
2111 free_dma_rx_desc_resources(priv
, dma_conf
);
2117 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2118 * @priv: private structure
2119 * @dma_conf: structure to take the dma data
2120 * @queue: TX queue index
2121 * Description: according to which descriptor can be used (extend or basic)
2122 * this function allocates the resources for TX and RX paths. In case of
2123 * reception, for example, it pre-allocated the RX socket buffer in order to
2124 * allow zero-copy mechanism.
2126 static int __alloc_dma_tx_desc_resources(struct stmmac_priv
*priv
,
2127 struct stmmac_dma_conf
*dma_conf
,
2130 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
2134 tx_q
->queue_index
= queue
;
2135 tx_q
->priv_data
= priv
;
2137 tx_q
->tx_skbuff_dma
= kcalloc(dma_conf
->dma_tx_size
,
2138 sizeof(*tx_q
->tx_skbuff_dma
),
2140 if (!tx_q
->tx_skbuff_dma
)
2143 tx_q
->tx_skbuff
= kcalloc(dma_conf
->dma_tx_size
,
2144 sizeof(struct sk_buff
*),
2146 if (!tx_q
->tx_skbuff
)
2149 if (priv
->extend_desc
)
2150 size
= sizeof(struct dma_extended_desc
);
2151 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2152 size
= sizeof(struct dma_edesc
);
2154 size
= sizeof(struct dma_desc
);
2156 size
*= dma_conf
->dma_tx_size
;
2158 addr
= dma_alloc_coherent(priv
->device
, size
,
2159 &tx_q
->dma_tx_phy
, GFP_KERNEL
);
2163 if (priv
->extend_desc
)
2164 tx_q
->dma_etx
= addr
;
2165 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2166 tx_q
->dma_entx
= addr
;
2168 tx_q
->dma_tx
= addr
;
2173 static int alloc_dma_tx_desc_resources(struct stmmac_priv
*priv
,
2174 struct stmmac_dma_conf
*dma_conf
)
2176 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
2180 /* TX queues buffers and DMA */
2181 for (queue
= 0; queue
< tx_count
; queue
++) {
2182 ret
= __alloc_dma_tx_desc_resources(priv
, dma_conf
, queue
);
2190 free_dma_tx_desc_resources(priv
, dma_conf
);
2195 * alloc_dma_desc_resources - alloc TX/RX resources.
2196 * @priv: private structure
2197 * @dma_conf: structure to take the dma data
2198 * Description: according to which descriptor can be used (extend or basic)
2199 * this function allocates the resources for TX and RX paths. In case of
2200 * reception, for example, it pre-allocated the RX socket buffer in order to
2201 * allow zero-copy mechanism.
2203 static int alloc_dma_desc_resources(struct stmmac_priv
*priv
,
2204 struct stmmac_dma_conf
*dma_conf
)
2207 int ret
= alloc_dma_rx_desc_resources(priv
, dma_conf
);
2212 ret
= alloc_dma_tx_desc_resources(priv
, dma_conf
);
2218 * free_dma_desc_resources - free dma desc resources
2219 * @priv: private structure
2220 * @dma_conf: structure to take the dma data
2222 static void free_dma_desc_resources(struct stmmac_priv
*priv
,
2223 struct stmmac_dma_conf
*dma_conf
)
2225 /* Release the DMA TX socket buffers */
2226 free_dma_tx_desc_resources(priv
, dma_conf
);
2228 /* Release the DMA RX socket buffers later
2229 * to ensure all pending XDP_TX buffers are returned.
2231 free_dma_rx_desc_resources(priv
, dma_conf
);
2235 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2236 * @priv: driver private structure
2237 * Description: It is used for enabling the rx queues in the MAC
2239 static void stmmac_mac_enable_rx_queues(struct stmmac_priv
*priv
)
2241 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
2245 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
2246 mode
= priv
->plat
->rx_queues_cfg
[queue
].mode_to_use
;
2247 stmmac_rx_queue_enable(priv
, priv
->hw
, mode
, queue
);
2252 * stmmac_start_rx_dma - start RX DMA channel
2253 * @priv: driver private structure
2254 * @chan: RX channel index
2256 * This starts a RX DMA channel
2258 static void stmmac_start_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
2260 netdev_dbg(priv
->dev
, "DMA RX processes started in channel %d\n", chan
);
2261 stmmac_start_rx(priv
, priv
->ioaddr
, chan
);
2265 * stmmac_start_tx_dma - start TX DMA channel
2266 * @priv: driver private structure
2267 * @chan: TX channel index
2269 * This starts a TX DMA channel
2271 static void stmmac_start_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
2273 netdev_dbg(priv
->dev
, "DMA TX processes started in channel %d\n", chan
);
2274 stmmac_start_tx(priv
, priv
->ioaddr
, chan
);
2278 * stmmac_stop_rx_dma - stop RX DMA channel
2279 * @priv: driver private structure
2280 * @chan: RX channel index
2282 * This stops a RX DMA channel
2284 static void stmmac_stop_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
2286 netdev_dbg(priv
->dev
, "DMA RX processes stopped in channel %d\n", chan
);
2287 stmmac_stop_rx(priv
, priv
->ioaddr
, chan
);
2291 * stmmac_stop_tx_dma - stop TX DMA channel
2292 * @priv: driver private structure
2293 * @chan: TX channel index
2295 * This stops a TX DMA channel
2297 static void stmmac_stop_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
2299 netdev_dbg(priv
->dev
, "DMA TX processes stopped in channel %d\n", chan
);
2300 stmmac_stop_tx(priv
, priv
->ioaddr
, chan
);
2303 static void stmmac_enable_all_dma_irq(struct stmmac_priv
*priv
)
2305 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2306 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2307 u32 dma_csr_ch
= max(rx_channels_count
, tx_channels_count
);
2310 for (chan
= 0; chan
< dma_csr_ch
; chan
++) {
2311 struct stmmac_channel
*ch
= &priv
->channel
[chan
];
2312 unsigned long flags
;
2314 spin_lock_irqsave(&ch
->lock
, flags
);
2315 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
2316 spin_unlock_irqrestore(&ch
->lock
, flags
);
2321 * stmmac_start_all_dma - start all RX and TX DMA channels
2322 * @priv: driver private structure
2324 * This starts all the RX and TX DMA channels
2326 static void stmmac_start_all_dma(struct stmmac_priv
*priv
)
2328 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2329 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2332 for (chan
= 0; chan
< rx_channels_count
; chan
++)
2333 stmmac_start_rx_dma(priv
, chan
);
2335 for (chan
= 0; chan
< tx_channels_count
; chan
++)
2336 stmmac_start_tx_dma(priv
, chan
);
2340 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2341 * @priv: driver private structure
2343 * This stops the RX and TX DMA channels
2345 static void stmmac_stop_all_dma(struct stmmac_priv
*priv
)
2347 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2348 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2351 for (chan
= 0; chan
< rx_channels_count
; chan
++)
2352 stmmac_stop_rx_dma(priv
, chan
);
2354 for (chan
= 0; chan
< tx_channels_count
; chan
++)
2355 stmmac_stop_tx_dma(priv
, chan
);
2359 * stmmac_dma_operation_mode - HW DMA operation mode
2360 * @priv: driver private structure
2361 * Description: it is used for configuring the DMA operation mode register in
2362 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364 static void stmmac_dma_operation_mode(struct stmmac_priv
*priv
)
2366 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2367 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2368 int rxfifosz
= priv
->plat
->rx_fifo_size
;
2369 int txfifosz
= priv
->plat
->tx_fifo_size
;
2376 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
2378 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
2380 /* Adjust for real per queue fifo size */
2381 rxfifosz
/= rx_channels_count
;
2382 txfifosz
/= tx_channels_count
;
2384 if (priv
->plat
->force_thresh_dma_mode
) {
2387 } else if (priv
->plat
->force_sf_dma_mode
|| priv
->plat
->tx_coe
) {
2389 * In case of GMAC, SF mode can be enabled
2390 * to perform the TX COE in HW. This depends on:
2391 * 1) TX COE if actually supported
2392 * 2) There is no bugged Jumbo frame support
2393 * that needs to not insert csum in the TDES.
2395 txmode
= SF_DMA_MODE
;
2396 rxmode
= SF_DMA_MODE
;
2397 priv
->xstats
.threshold
= SF_DMA_MODE
;
2400 rxmode
= SF_DMA_MODE
;
2403 /* configure all channels */
2404 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
2405 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
2408 qmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
2410 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
,
2413 if (rx_q
->xsk_pool
) {
2414 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
2415 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
2419 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
2420 priv
->dma_conf
.dma_buf_sz
,
2425 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
2426 qmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
2428 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
,
2433 static bool stmmac_xdp_xmit_zc(struct stmmac_priv
*priv
, u32 queue
, u32 budget
)
2435 struct netdev_queue
*nq
= netdev_get_tx_queue(priv
->dev
, queue
);
2436 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
2437 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[queue
];
2438 struct xsk_buff_pool
*pool
= tx_q
->xsk_pool
;
2439 unsigned int entry
= tx_q
->cur_tx
;
2440 struct dma_desc
*tx_desc
= NULL
;
2441 struct xdp_desc xdp_desc
;
2442 bool work_done
= true;
2443 u32 tx_set_ic_bit
= 0;
2444 unsigned long flags
;
2446 /* Avoids TX time-out as we are sharing with slow path */
2447 txq_trans_cond_update(nq
);
2449 budget
= min(budget
, stmmac_tx_avail(priv
, queue
));
2451 while (budget
-- > 0) {
2452 dma_addr_t dma_addr
;
2455 /* We are sharing with slow path and stop XSK TX desc submission when
2456 * available TX ring is less than threshold.
2458 if (unlikely(stmmac_tx_avail(priv
, queue
) < STMMAC_TX_XSK_AVAIL
) ||
2459 !netif_carrier_ok(priv
->dev
)) {
2464 if (!xsk_tx_peek_desc(pool
, &xdp_desc
))
2467 if (likely(priv
->extend_desc
))
2468 tx_desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
2469 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2470 tx_desc
= &tx_q
->dma_entx
[entry
].basic
;
2472 tx_desc
= tx_q
->dma_tx
+ entry
;
2474 dma_addr
= xsk_buff_raw_get_dma(pool
, xdp_desc
.addr
);
2475 xsk_buff_raw_dma_sync_for_device(pool
, dma_addr
, xdp_desc
.len
);
2477 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XSK_TX
;
2479 /* To return XDP buffer to XSK pool, we simple call
2480 * xsk_tx_completed(), so we don't need to fill up
2483 tx_q
->tx_skbuff_dma
[entry
].buf
= 0;
2484 tx_q
->xdpf
[entry
] = NULL
;
2486 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
2487 tx_q
->tx_skbuff_dma
[entry
].len
= xdp_desc
.len
;
2488 tx_q
->tx_skbuff_dma
[entry
].last_segment
= true;
2489 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
2491 stmmac_set_desc_addr(priv
, tx_desc
, dma_addr
);
2493 tx_q
->tx_count_frames
++;
2495 if (!priv
->tx_coal_frames
[queue
])
2497 else if (tx_q
->tx_count_frames
% priv
->tx_coal_frames
[queue
] == 0)
2503 tx_q
->tx_count_frames
= 0;
2504 stmmac_set_tx_ic(priv
, tx_desc
);
2508 stmmac_prepare_tx_desc(priv
, tx_desc
, 1, xdp_desc
.len
,
2509 true, priv
->mode
, true, true,
2512 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
2514 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_conf
.dma_tx_size
);
2515 entry
= tx_q
->cur_tx
;
2517 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
2518 txq_stats
->tx_set_ic_bit
+= tx_set_ic_bit
;
2519 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
2522 stmmac_flush_tx_descriptors(priv
, queue
);
2523 xsk_tx_release(pool
);
2526 /* Return true if all of the 3 conditions are met
2527 * a) TX Budget is still available
2528 * b) work_done = true when XSK TX desc peek is empty (no more
2529 * pending XSK TX for transmission)
2531 return !!budget
&& work_done
;
2534 static void stmmac_bump_dma_threshold(struct stmmac_priv
*priv
, u32 chan
)
2536 if (unlikely(priv
->xstats
.threshold
!= SF_DMA_MODE
) && tc
<= 256) {
2539 if (priv
->plat
->force_thresh_dma_mode
)
2540 stmmac_set_dma_operation_mode(priv
, tc
, tc
, chan
);
2542 stmmac_set_dma_operation_mode(priv
, tc
, SF_DMA_MODE
,
2545 priv
->xstats
.threshold
= tc
;
2550 * stmmac_tx_clean - to manage the transmission completion
2551 * @priv: driver private structure
2552 * @budget: napi budget limiting this functions packet handling
2553 * @queue: TX queue index
2554 * @pending_packets: signal to arm the TX coal timer
2555 * Description: it reclaims the transmit resources after transmission completes.
2556 * If some packets still needs to be handled, due to TX coalesce, set
2557 * pending_packets to true to make NAPI arm the TX coal timer.
2559 static int stmmac_tx_clean(struct stmmac_priv
*priv
, int budget
, u32 queue
,
2560 bool *pending_packets
)
2562 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
2563 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[queue
];
2564 unsigned int bytes_compl
= 0, pkts_compl
= 0;
2565 unsigned int entry
, xmits
= 0, count
= 0;
2566 u32 tx_packets
= 0, tx_errors
= 0;
2567 unsigned long flags
;
2569 __netif_tx_lock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
2571 tx_q
->xsk_frames_done
= 0;
2573 entry
= tx_q
->dirty_tx
;
2575 /* Try to clean all TX complete frame in 1 shot */
2576 while ((entry
!= tx_q
->cur_tx
) && count
< priv
->dma_conf
.dma_tx_size
) {
2577 struct xdp_frame
*xdpf
;
2578 struct sk_buff
*skb
;
2582 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_TX
||
2583 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
) {
2584 xdpf
= tx_q
->xdpf
[entry
];
2586 } else if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_SKB
) {
2588 skb
= tx_q
->tx_skbuff
[entry
];
2594 if (priv
->extend_desc
)
2595 p
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
2596 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2597 p
= &tx_q
->dma_entx
[entry
].basic
;
2599 p
= tx_q
->dma_tx
+ entry
;
2601 status
= stmmac_tx_status(priv
, &priv
->xstats
, p
, priv
->ioaddr
);
2602 /* Check if the descriptor is owned by the DMA */
2603 if (unlikely(status
& tx_dma_own
))
2608 /* Make sure descriptor fields are read after reading
2613 /* Just consider the last segment and ...*/
2614 if (likely(!(status
& tx_not_ls
))) {
2615 /* ... verify the status error condition */
2616 if (unlikely(status
& tx_err
)) {
2618 if (unlikely(status
& tx_err_bump_tc
))
2619 stmmac_bump_dma_threshold(priv
, queue
);
2624 stmmac_get_tx_hwtstamp(priv
, p
, skb
);
2627 if (likely(tx_q
->tx_skbuff_dma
[entry
].buf
&&
2628 tx_q
->tx_skbuff_dma
[entry
].buf_type
!= STMMAC_TXBUF_T_XDP_TX
)) {
2629 if (tx_q
->tx_skbuff_dma
[entry
].map_as_page
)
2630 dma_unmap_page(priv
->device
,
2631 tx_q
->tx_skbuff_dma
[entry
].buf
,
2632 tx_q
->tx_skbuff_dma
[entry
].len
,
2635 dma_unmap_single(priv
->device
,
2636 tx_q
->tx_skbuff_dma
[entry
].buf
,
2637 tx_q
->tx_skbuff_dma
[entry
].len
,
2639 tx_q
->tx_skbuff_dma
[entry
].buf
= 0;
2640 tx_q
->tx_skbuff_dma
[entry
].len
= 0;
2641 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
2644 stmmac_clean_desc3(priv
, tx_q
, p
);
2646 tx_q
->tx_skbuff_dma
[entry
].last_segment
= false;
2647 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
2650 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_TX
) {
2651 xdp_return_frame_rx_napi(xdpf
);
2652 tx_q
->xdpf
[entry
] = NULL
;
2656 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
) {
2657 xdp_return_frame(xdpf
);
2658 tx_q
->xdpf
[entry
] = NULL
;
2661 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XSK_TX
)
2662 tx_q
->xsk_frames_done
++;
2664 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_SKB
) {
2667 bytes_compl
+= skb
->len
;
2668 dev_consume_skb_any(skb
);
2669 tx_q
->tx_skbuff
[entry
] = NULL
;
2673 stmmac_release_tx_desc(priv
, p
, priv
->mode
);
2675 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
2677 tx_q
->dirty_tx
= entry
;
2679 netdev_tx_completed_queue(netdev_get_tx_queue(priv
->dev
, queue
),
2680 pkts_compl
, bytes_compl
);
2682 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv
->dev
,
2684 stmmac_tx_avail(priv
, queue
) > STMMAC_TX_THRESH(priv
)) {
2686 netif_dbg(priv
, tx_done
, priv
->dev
,
2687 "%s: restart transmit\n", __func__
);
2688 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, queue
));
2691 if (tx_q
->xsk_pool
) {
2694 if (tx_q
->xsk_frames_done
)
2695 xsk_tx_completed(tx_q
->xsk_pool
, tx_q
->xsk_frames_done
);
2697 if (xsk_uses_need_wakeup(tx_q
->xsk_pool
))
2698 xsk_set_tx_need_wakeup(tx_q
->xsk_pool
);
2700 /* For XSK TX, we try to send as many as possible.
2701 * If XSK work done (XSK TX desc empty and budget still
2702 * available), return "budget - 1" to reenable TX IRQ.
2703 * Else, return "budget" to make NAPI continue polling.
2705 work_done
= stmmac_xdp_xmit_zc(priv
, queue
,
2706 STMMAC_XSK_TX_BUDGET_MAX
);
2713 if (priv
->eee_enabled
&& !priv
->tx_path_in_lpi_mode
&&
2714 priv
->eee_sw_timer_en
) {
2715 if (stmmac_enable_eee_mode(priv
))
2716 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
2719 /* We still have pending packets, let's call for a new scheduling */
2720 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
2721 *pending_packets
= true;
2723 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
2724 txq_stats
->tx_packets
+= tx_packets
;
2725 txq_stats
->tx_pkt_n
+= tx_packets
;
2726 txq_stats
->tx_clean
++;
2727 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
2729 priv
->xstats
.tx_errors
+= tx_errors
;
2731 __netif_tx_unlock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
2733 /* Combine decisions from TX clean and XSK TX */
2734 return max(count
, xmits
);
2738 * stmmac_tx_err - to manage the tx error
2739 * @priv: driver private structure
2740 * @chan: channel index
2741 * Description: it cleans the descriptors and restarts the transmission
2742 * in case of transmission errors.
2744 static void stmmac_tx_err(struct stmmac_priv
*priv
, u32 chan
)
2746 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
2748 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2750 stmmac_stop_tx_dma(priv
, chan
);
2751 dma_free_tx_skbufs(priv
, &priv
->dma_conf
, chan
);
2752 stmmac_clear_tx_descriptors(priv
, &priv
->dma_conf
, chan
);
2753 stmmac_reset_tx_queue(priv
, chan
);
2754 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2755 tx_q
->dma_tx_phy
, chan
);
2756 stmmac_start_tx_dma(priv
, chan
);
2758 priv
->xstats
.tx_errors
++;
2759 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2763 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2764 * @priv: driver private structure
2765 * @txmode: TX operating mode
2766 * @rxmode: RX operating mode
2767 * @chan: channel index
2768 * Description: it is used for configuring of the DMA operation mode in
2769 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2772 static void stmmac_set_dma_operation_mode(struct stmmac_priv
*priv
, u32 txmode
,
2773 u32 rxmode
, u32 chan
)
2775 u8 rxqmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
2776 u8 txqmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
2777 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2778 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2779 int rxfifosz
= priv
->plat
->rx_fifo_size
;
2780 int txfifosz
= priv
->plat
->tx_fifo_size
;
2783 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
2785 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
2787 /* Adjust for real per queue fifo size */
2788 rxfifosz
/= rx_channels_count
;
2789 txfifosz
/= tx_channels_count
;
2791 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
, rxfifosz
, rxqmode
);
2792 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
, txfifosz
, txqmode
);
2795 static bool stmmac_safety_feat_interrupt(struct stmmac_priv
*priv
)
2799 ret
= stmmac_safety_feat_irq_status(priv
, priv
->dev
,
2800 priv
->ioaddr
, priv
->dma_cap
.asp
, &priv
->sstats
);
2801 if (ret
&& (ret
!= -EINVAL
)) {
2802 stmmac_global_err(priv
);
2809 static int stmmac_napi_check(struct stmmac_priv
*priv
, u32 chan
, u32 dir
)
2811 int status
= stmmac_dma_interrupt_status(priv
, priv
->ioaddr
,
2812 &priv
->xstats
, chan
, dir
);
2813 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
2814 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
2815 struct stmmac_channel
*ch
= &priv
->channel
[chan
];
2816 struct napi_struct
*rx_napi
;
2817 struct napi_struct
*tx_napi
;
2818 unsigned long flags
;
2820 rx_napi
= rx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->rx_napi
;
2821 tx_napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
2823 if ((status
& handle_rx
) && (chan
< priv
->plat
->rx_queues_to_use
)) {
2824 if (napi_schedule_prep(rx_napi
)) {
2825 spin_lock_irqsave(&ch
->lock
, flags
);
2826 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
2827 spin_unlock_irqrestore(&ch
->lock
, flags
);
2828 __napi_schedule(rx_napi
);
2832 if ((status
& handle_tx
) && (chan
< priv
->plat
->tx_queues_to_use
)) {
2833 if (napi_schedule_prep(tx_napi
)) {
2834 spin_lock_irqsave(&ch
->lock
, flags
);
2835 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
2836 spin_unlock_irqrestore(&ch
->lock
, flags
);
2837 __napi_schedule(tx_napi
);
2845 * stmmac_dma_interrupt - DMA ISR
2846 * @priv: driver private structure
2847 * Description: this is the DMA ISR. It is called by the main ISR.
2848 * It calls the dwmac dma routine and schedule poll method in case of some
2851 static void stmmac_dma_interrupt(struct stmmac_priv
*priv
)
2853 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
2854 u32 rx_channel_count
= priv
->plat
->rx_queues_to_use
;
2855 u32 channels_to_check
= tx_channel_count
> rx_channel_count
?
2856 tx_channel_count
: rx_channel_count
;
2858 int status
[max_t(u32
, MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
)];
2860 /* Make sure we never check beyond our status buffer. */
2861 if (WARN_ON_ONCE(channels_to_check
> ARRAY_SIZE(status
)))
2862 channels_to_check
= ARRAY_SIZE(status
);
2864 for (chan
= 0; chan
< channels_to_check
; chan
++)
2865 status
[chan
] = stmmac_napi_check(priv
, chan
,
2868 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
2869 if (unlikely(status
[chan
] & tx_hard_error_bump_tc
)) {
2870 /* Try to bump up the dma threshold on this failure */
2871 stmmac_bump_dma_threshold(priv
, chan
);
2872 } else if (unlikely(status
[chan
] == tx_hard_error
)) {
2873 stmmac_tx_err(priv
, chan
);
2879 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2880 * @priv: driver private structure
2881 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2883 static void stmmac_mmc_setup(struct stmmac_priv
*priv
)
2885 unsigned int mode
= MMC_CNTRL_RESET_ON_READ
| MMC_CNTRL_COUNTER_RESET
|
2886 MMC_CNTRL_PRESET
| MMC_CNTRL_FULL_HALF_PRESET
;
2888 stmmac_mmc_intr_all_mask(priv
, priv
->mmcaddr
);
2890 if (priv
->dma_cap
.rmon
) {
2891 stmmac_mmc_ctrl(priv
, priv
->mmcaddr
, mode
);
2892 memset(&priv
->mmc
, 0, sizeof(struct stmmac_counters
));
2894 netdev_info(priv
->dev
, "No MAC Management Counters available\n");
2898 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2899 * @priv: driver private structure
2901 * new GMAC chip generations have a new register to indicate the
2902 * presence of the optional feature/functions.
2903 * This can be also used to override the value passed through the
2904 * platform and necessary for old MAC10/100 and GMAC chips.
2906 static int stmmac_get_hw_features(struct stmmac_priv
*priv
)
2908 return stmmac_get_hw_feature(priv
, priv
->ioaddr
, &priv
->dma_cap
) == 0;
2912 * stmmac_check_ether_addr - check if the MAC addr is valid
2913 * @priv: driver private structure
2915 * it is to verify if the MAC address is valid, in case of failures it
2916 * generates a random MAC address
2918 static void stmmac_check_ether_addr(struct stmmac_priv
*priv
)
2922 if (!is_valid_ether_addr(priv
->dev
->dev_addr
)) {
2923 stmmac_get_umac_addr(priv
, priv
->hw
, addr
, 0);
2924 if (is_valid_ether_addr(addr
))
2925 eth_hw_addr_set(priv
->dev
, addr
);
2927 eth_hw_addr_random(priv
->dev
);
2928 dev_info(priv
->device
, "device MAC address %pM\n",
2929 priv
->dev
->dev_addr
);
2934 * stmmac_init_dma_engine - DMA init.
2935 * @priv: driver private structure
2937 * It inits the DMA invoking the specific MAC/GMAC callback.
2938 * Some DMA parameters can be passed from the platform;
2939 * in case of these are not passed a default is kept for the MAC or GMAC.
2941 static int stmmac_init_dma_engine(struct stmmac_priv
*priv
)
2943 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2944 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2945 u32 dma_csr_ch
= max(rx_channels_count
, tx_channels_count
);
2946 struct stmmac_rx_queue
*rx_q
;
2947 struct stmmac_tx_queue
*tx_q
;
2952 if (!priv
->plat
->dma_cfg
|| !priv
->plat
->dma_cfg
->pbl
) {
2953 dev_err(priv
->device
, "Invalid DMA configuration\n");
2957 if (priv
->extend_desc
&& (priv
->mode
== STMMAC_RING_MODE
))
2960 ret
= stmmac_reset(priv
, priv
->ioaddr
);
2962 dev_err(priv
->device
, "Failed to reset the dma\n");
2966 /* DMA Configuration */
2967 stmmac_dma_init(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, atds
);
2969 if (priv
->plat
->axi
)
2970 stmmac_axi(priv
, priv
->ioaddr
, priv
->plat
->axi
);
2972 /* DMA CSR Channel configuration */
2973 for (chan
= 0; chan
< dma_csr_ch
; chan
++) {
2974 stmmac_init_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, chan
);
2975 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
2978 /* DMA RX Channel Configuration */
2979 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
2980 rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
2982 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2983 rx_q
->dma_rx_phy
, chan
);
2985 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
2986 (rx_q
->buf_alloc_num
*
2987 sizeof(struct dma_desc
));
2988 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
2989 rx_q
->rx_tail_addr
, chan
);
2992 /* DMA TX Channel Configuration */
2993 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
2994 tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
2996 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2997 tx_q
->dma_tx_phy
, chan
);
2999 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
3000 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
3001 tx_q
->tx_tail_addr
, chan
);
3007 static void stmmac_tx_timer_arm(struct stmmac_priv
*priv
, u32 queue
)
3009 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
3010 u32 tx_coal_timer
= priv
->tx_coal_timer
[queue
];
3011 struct stmmac_channel
*ch
;
3012 struct napi_struct
*napi
;
3017 ch
= &priv
->channel
[tx_q
->queue_index
];
3018 napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
3020 /* Arm timer only if napi is not already scheduled.
3021 * Try to cancel any timer if napi is scheduled, timer will be armed
3022 * again in the next scheduled napi.
3024 if (unlikely(!napi_is_scheduled(napi
)))
3025 hrtimer_start(&tx_q
->txtimer
,
3026 STMMAC_COAL_TIMER(tx_coal_timer
),
3029 hrtimer_try_to_cancel(&tx_q
->txtimer
);
3033 * stmmac_tx_timer - mitigation sw timer for tx.
3036 * This is the timer handler to directly invoke the stmmac_tx_clean.
3038 static enum hrtimer_restart
stmmac_tx_timer(struct hrtimer
*t
)
3040 struct stmmac_tx_queue
*tx_q
= container_of(t
, struct stmmac_tx_queue
, txtimer
);
3041 struct stmmac_priv
*priv
= tx_q
->priv_data
;
3042 struct stmmac_channel
*ch
;
3043 struct napi_struct
*napi
;
3045 ch
= &priv
->channel
[tx_q
->queue_index
];
3046 napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
3048 if (likely(napi_schedule_prep(napi
))) {
3049 unsigned long flags
;
3051 spin_lock_irqsave(&ch
->lock
, flags
);
3052 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, ch
->index
, 0, 1);
3053 spin_unlock_irqrestore(&ch
->lock
, flags
);
3054 __napi_schedule(napi
);
3057 return HRTIMER_NORESTART
;
3061 * stmmac_init_coalesce - init mitigation options.
3062 * @priv: driver private structure
3064 * This inits the coalesce parameters: i.e. timer rate,
3065 * timer handler and default threshold used for enabling the
3066 * interrupt on completion bit.
3068 static void stmmac_init_coalesce(struct stmmac_priv
*priv
)
3070 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
3071 u32 rx_channel_count
= priv
->plat
->rx_queues_to_use
;
3074 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
3075 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3077 priv
->tx_coal_frames
[chan
] = STMMAC_TX_FRAMES
;
3078 priv
->tx_coal_timer
[chan
] = STMMAC_COAL_TX_TIMER
;
3080 hrtimer_init(&tx_q
->txtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
3081 tx_q
->txtimer
.function
= stmmac_tx_timer
;
3084 for (chan
= 0; chan
< rx_channel_count
; chan
++)
3085 priv
->rx_coal_frames
[chan
] = STMMAC_RX_FRAMES
;
3088 static void stmmac_set_rings_length(struct stmmac_priv
*priv
)
3090 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
3091 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
3094 /* set TX ring length */
3095 for (chan
= 0; chan
< tx_channels_count
; chan
++)
3096 stmmac_set_tx_ring_len(priv
, priv
->ioaddr
,
3097 (priv
->dma_conf
.dma_tx_size
- 1), chan
);
3099 /* set RX ring length */
3100 for (chan
= 0; chan
< rx_channels_count
; chan
++)
3101 stmmac_set_rx_ring_len(priv
, priv
->ioaddr
,
3102 (priv
->dma_conf
.dma_rx_size
- 1), chan
);
3106 * stmmac_set_tx_queue_weight - Set TX queue weight
3107 * @priv: driver private structure
3108 * Description: It is used for setting TX queues weight
3110 static void stmmac_set_tx_queue_weight(struct stmmac_priv
*priv
)
3112 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3116 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
3117 weight
= priv
->plat
->tx_queues_cfg
[queue
].weight
;
3118 stmmac_set_mtl_tx_queue_weight(priv
, priv
->hw
, weight
, queue
);
3123 * stmmac_configure_cbs - Configure CBS in TX queue
3124 * @priv: driver private structure
3125 * Description: It is used for configuring CBS in AVB TX queues
3127 static void stmmac_configure_cbs(struct stmmac_priv
*priv
)
3129 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3133 /* queue 0 is reserved for legacy traffic */
3134 for (queue
= 1; queue
< tx_queues_count
; queue
++) {
3135 mode_to_use
= priv
->plat
->tx_queues_cfg
[queue
].mode_to_use
;
3136 if (mode_to_use
== MTL_QUEUE_DCB
)
3139 stmmac_config_cbs(priv
, priv
->hw
,
3140 priv
->plat
->tx_queues_cfg
[queue
].send_slope
,
3141 priv
->plat
->tx_queues_cfg
[queue
].idle_slope
,
3142 priv
->plat
->tx_queues_cfg
[queue
].high_credit
,
3143 priv
->plat
->tx_queues_cfg
[queue
].low_credit
,
3149 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3150 * @priv: driver private structure
3151 * Description: It is used for mapping RX queues to RX dma channels
3153 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv
*priv
)
3155 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3159 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3160 chan
= priv
->plat
->rx_queues_cfg
[queue
].chan
;
3161 stmmac_map_mtl_to_dma(priv
, priv
->hw
, queue
, chan
);
3166 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3167 * @priv: driver private structure
3168 * Description: It is used for configuring the RX Queue Priority
3170 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv
*priv
)
3172 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3176 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3177 if (!priv
->plat
->rx_queues_cfg
[queue
].use_prio
)
3180 prio
= priv
->plat
->rx_queues_cfg
[queue
].prio
;
3181 stmmac_rx_queue_prio(priv
, priv
->hw
, prio
, queue
);
3186 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3187 * @priv: driver private structure
3188 * Description: It is used for configuring the TX Queue Priority
3190 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv
*priv
)
3192 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3196 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
3197 if (!priv
->plat
->tx_queues_cfg
[queue
].use_prio
)
3200 prio
= priv
->plat
->tx_queues_cfg
[queue
].prio
;
3201 stmmac_tx_queue_prio(priv
, priv
->hw
, prio
, queue
);
3206 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3207 * @priv: driver private structure
3208 * Description: It is used for configuring the RX queue routing
3210 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv
*priv
)
3212 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3216 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3217 /* no specific packet type routing specified for the queue */
3218 if (priv
->plat
->rx_queues_cfg
[queue
].pkt_route
== 0x0)
3221 packet
= priv
->plat
->rx_queues_cfg
[queue
].pkt_route
;
3222 stmmac_rx_queue_routing(priv
, priv
->hw
, packet
, queue
);
3226 static void stmmac_mac_config_rss(struct stmmac_priv
*priv
)
3228 if (!priv
->dma_cap
.rssen
|| !priv
->plat
->rss_en
) {
3229 priv
->rss
.enable
= false;
3233 if (priv
->dev
->features
& NETIF_F_RXHASH
)
3234 priv
->rss
.enable
= true;
3236 priv
->rss
.enable
= false;
3238 stmmac_rss_configure(priv
, priv
->hw
, &priv
->rss
,
3239 priv
->plat
->rx_queues_to_use
);
3243 * stmmac_mtl_configuration - Configure MTL
3244 * @priv: driver private structure
3245 * Description: It is used for configurring MTL
3247 static void stmmac_mtl_configuration(struct stmmac_priv
*priv
)
3249 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3250 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3252 if (tx_queues_count
> 1)
3253 stmmac_set_tx_queue_weight(priv
);
3255 /* Configure MTL RX algorithms */
3256 if (rx_queues_count
> 1)
3257 stmmac_prog_mtl_rx_algorithms(priv
, priv
->hw
,
3258 priv
->plat
->rx_sched_algorithm
);
3260 /* Configure MTL TX algorithms */
3261 if (tx_queues_count
> 1)
3262 stmmac_prog_mtl_tx_algorithms(priv
, priv
->hw
,
3263 priv
->plat
->tx_sched_algorithm
);
3265 /* Configure CBS in AVB TX queues */
3266 if (tx_queues_count
> 1)
3267 stmmac_configure_cbs(priv
);
3269 /* Map RX MTL to DMA channels */
3270 stmmac_rx_queue_dma_chan_map(priv
);
3272 /* Enable MAC RX Queues */
3273 stmmac_mac_enable_rx_queues(priv
);
3275 /* Set RX priorities */
3276 if (rx_queues_count
> 1)
3277 stmmac_mac_config_rx_queues_prio(priv
);
3279 /* Set TX priorities */
3280 if (tx_queues_count
> 1)
3281 stmmac_mac_config_tx_queues_prio(priv
);
3283 /* Set RX routing */
3284 if (rx_queues_count
> 1)
3285 stmmac_mac_config_rx_queues_routing(priv
);
3287 /* Receive Side Scaling */
3288 if (rx_queues_count
> 1)
3289 stmmac_mac_config_rss(priv
);
3292 static void stmmac_safety_feat_configuration(struct stmmac_priv
*priv
)
3294 if (priv
->dma_cap
.asp
) {
3295 netdev_info(priv
->dev
, "Enabling Safety Features\n");
3296 stmmac_safety_feat_config(priv
, priv
->ioaddr
, priv
->dma_cap
.asp
,
3297 priv
->plat
->safety_feat_cfg
);
3299 netdev_info(priv
->dev
, "No Safety Features support found\n");
3303 static int stmmac_fpe_start_wq(struct stmmac_priv
*priv
)
3307 clear_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
);
3308 clear_bit(__FPE_REMOVING
, &priv
->fpe_task_state
);
3310 name
= priv
->wq_name
;
3311 sprintf(name
, "%s-fpe", priv
->dev
->name
);
3313 priv
->fpe_wq
= create_singlethread_workqueue(name
);
3314 if (!priv
->fpe_wq
) {
3315 netdev_err(priv
->dev
, "%s: Failed to create workqueue\n", name
);
3319 netdev_info(priv
->dev
, "FPE workqueue start");
3325 * stmmac_hw_setup - setup mac in a usable state.
3326 * @dev : pointer to the device structure.
3327 * @ptp_register: register PTP if set
3329 * this is the main function to setup the HW in a usable state because the
3330 * dma engine is reset, the core registers are configured (e.g. AXI,
3331 * Checksum features, timers). The DMA is ready to start receiving and
3334 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3337 static int stmmac_hw_setup(struct net_device
*dev
, bool ptp_register
)
3339 struct stmmac_priv
*priv
= netdev_priv(dev
);
3340 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
3341 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
3346 /* DMA initialization and SW reset */
3347 ret
= stmmac_init_dma_engine(priv
);
3349 netdev_err(priv
->dev
, "%s: DMA engine initialization failed\n",
3354 /* Copy the MAC addr into the HW */
3355 stmmac_set_umac_addr(priv
, priv
->hw
, dev
->dev_addr
, 0);
3357 /* PS and related bits will be programmed according to the speed */
3358 if (priv
->hw
->pcs
) {
3359 int speed
= priv
->plat
->mac_port_sel_speed
;
3361 if ((speed
== SPEED_10
) || (speed
== SPEED_100
) ||
3362 (speed
== SPEED_1000
)) {
3363 priv
->hw
->ps
= speed
;
3365 dev_warn(priv
->device
, "invalid port speed\n");
3370 /* Initialize the MAC Core */
3371 stmmac_core_init(priv
, priv
->hw
, dev
);
3374 stmmac_mtl_configuration(priv
);
3376 /* Initialize Safety Features */
3377 stmmac_safety_feat_configuration(priv
);
3379 ret
= stmmac_rx_ipc(priv
, priv
->hw
);
3381 netdev_warn(priv
->dev
, "RX IPC Checksum Offload disabled\n");
3382 priv
->plat
->rx_coe
= STMMAC_RX_COE_NONE
;
3383 priv
->hw
->rx_csum
= 0;
3386 /* Enable the MAC Rx/Tx */
3387 stmmac_mac_set(priv
, priv
->ioaddr
, true);
3389 /* Set the HW DMA mode and the COE */
3390 stmmac_dma_operation_mode(priv
);
3392 stmmac_mmc_setup(priv
);
3395 ret
= clk_prepare_enable(priv
->plat
->clk_ptp_ref
);
3397 netdev_warn(priv
->dev
,
3398 "failed to enable PTP reference clock: %pe\n",
3402 ret
= stmmac_init_ptp(priv
);
3403 if (ret
== -EOPNOTSUPP
)
3404 netdev_info(priv
->dev
, "PTP not supported by HW\n");
3406 netdev_warn(priv
->dev
, "PTP init failed\n");
3407 else if (ptp_register
)
3408 stmmac_ptp_register(priv
);
3410 priv
->eee_tw_timer
= STMMAC_DEFAULT_TWT_LS
;
3412 /* Convert the timer from msec to usec */
3413 if (!priv
->tx_lpi_timer
)
3414 priv
->tx_lpi_timer
= eee_timer
* 1000;
3416 if (priv
->use_riwt
) {
3419 for (queue
= 0; queue
< rx_cnt
; queue
++) {
3420 if (!priv
->rx_riwt
[queue
])
3421 priv
->rx_riwt
[queue
] = DEF_DMA_RIWT
;
3423 stmmac_rx_watchdog(priv
, priv
->ioaddr
,
3424 priv
->rx_riwt
[queue
], queue
);
3429 stmmac_pcs_ctrl_ane(priv
, priv
->ioaddr
, 1, priv
->hw
->ps
, 0);
3431 /* set TX and RX rings length */
3432 stmmac_set_rings_length(priv
);
3436 for (chan
= 0; chan
< tx_cnt
; chan
++) {
3437 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3439 /* TSO and TBS cannot co-exist */
3440 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3443 stmmac_enable_tso(priv
, priv
->ioaddr
, 1, chan
);
3447 /* Enable Split Header */
3448 sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
3449 for (chan
= 0; chan
< rx_cnt
; chan
++)
3450 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
3453 /* VLAN Tag Insertion */
3454 if (priv
->dma_cap
.vlins
)
3455 stmmac_enable_vlan(priv
, priv
->hw
, STMMAC_VLAN_INSERT
);
3458 for (chan
= 0; chan
< tx_cnt
; chan
++) {
3459 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3460 int enable
= tx_q
->tbs
& STMMAC_TBS_AVAIL
;
3462 stmmac_enable_tbs(priv
, priv
->ioaddr
, enable
, chan
);
3465 /* Configure real RX and TX queues */
3466 netif_set_real_num_rx_queues(dev
, priv
->plat
->rx_queues_to_use
);
3467 netif_set_real_num_tx_queues(dev
, priv
->plat
->tx_queues_to_use
);
3469 /* Start the ball rolling... */
3470 stmmac_start_all_dma(priv
);
3472 if (priv
->dma_cap
.fpesel
) {
3473 stmmac_fpe_start_wq(priv
);
3475 if (priv
->plat
->fpe_cfg
->enable
)
3476 stmmac_fpe_handshake(priv
, true);
3482 static void stmmac_hw_teardown(struct net_device
*dev
)
3484 struct stmmac_priv
*priv
= netdev_priv(dev
);
3486 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
3489 static void stmmac_free_irq(struct net_device
*dev
,
3490 enum request_irq_err irq_err
, int irq_idx
)
3492 struct stmmac_priv
*priv
= netdev_priv(dev
);
3496 case REQ_IRQ_ERR_ALL
:
3497 irq_idx
= priv
->plat
->tx_queues_to_use
;
3499 case REQ_IRQ_ERR_TX
:
3500 for (j
= irq_idx
- 1; j
>= 0; j
--) {
3501 if (priv
->tx_irq
[j
] > 0) {
3502 irq_set_affinity_hint(priv
->tx_irq
[j
], NULL
);
3503 free_irq(priv
->tx_irq
[j
], &priv
->dma_conf
.tx_queue
[j
]);
3506 irq_idx
= priv
->plat
->rx_queues_to_use
;
3508 case REQ_IRQ_ERR_RX
:
3509 for (j
= irq_idx
- 1; j
>= 0; j
--) {
3510 if (priv
->rx_irq
[j
] > 0) {
3511 irq_set_affinity_hint(priv
->rx_irq
[j
], NULL
);
3512 free_irq(priv
->rx_irq
[j
], &priv
->dma_conf
.rx_queue
[j
]);
3516 if (priv
->sfty_ue_irq
> 0 && priv
->sfty_ue_irq
!= dev
->irq
)
3517 free_irq(priv
->sfty_ue_irq
, dev
);
3519 case REQ_IRQ_ERR_SFTY_UE
:
3520 if (priv
->sfty_ce_irq
> 0 && priv
->sfty_ce_irq
!= dev
->irq
)
3521 free_irq(priv
->sfty_ce_irq
, dev
);
3523 case REQ_IRQ_ERR_SFTY_CE
:
3524 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
)
3525 free_irq(priv
->lpi_irq
, dev
);
3527 case REQ_IRQ_ERR_LPI
:
3528 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
)
3529 free_irq(priv
->wol_irq
, dev
);
3531 case REQ_IRQ_ERR_WOL
:
3532 free_irq(dev
->irq
, dev
);
3534 case REQ_IRQ_ERR_MAC
:
3535 case REQ_IRQ_ERR_NO
:
3536 /* If MAC IRQ request error, no more IRQ to free */
3541 static int stmmac_request_irq_multi_msi(struct net_device
*dev
)
3543 struct stmmac_priv
*priv
= netdev_priv(dev
);
3544 enum request_irq_err irq_err
;
3551 /* For common interrupt */
3552 int_name
= priv
->int_name_mac
;
3553 sprintf(int_name
, "%s:%s", dev
->name
, "mac");
3554 ret
= request_irq(dev
->irq
, stmmac_mac_interrupt
,
3556 if (unlikely(ret
< 0)) {
3557 netdev_err(priv
->dev
,
3558 "%s: alloc mac MSI %d (error: %d)\n",
3559 __func__
, dev
->irq
, ret
);
3560 irq_err
= REQ_IRQ_ERR_MAC
;
3564 /* Request the Wake IRQ in case of another line
3567 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
) {
3568 int_name
= priv
->int_name_wol
;
3569 sprintf(int_name
, "%s:%s", dev
->name
, "wol");
3570 ret
= request_irq(priv
->wol_irq
,
3571 stmmac_mac_interrupt
,
3573 if (unlikely(ret
< 0)) {
3574 netdev_err(priv
->dev
,
3575 "%s: alloc wol MSI %d (error: %d)\n",
3576 __func__
, priv
->wol_irq
, ret
);
3577 irq_err
= REQ_IRQ_ERR_WOL
;
3582 /* Request the LPI IRQ in case of another line
3585 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
) {
3586 int_name
= priv
->int_name_lpi
;
3587 sprintf(int_name
, "%s:%s", dev
->name
, "lpi");
3588 ret
= request_irq(priv
->lpi_irq
,
3589 stmmac_mac_interrupt
,
3591 if (unlikely(ret
< 0)) {
3592 netdev_err(priv
->dev
,
3593 "%s: alloc lpi MSI %d (error: %d)\n",
3594 __func__
, priv
->lpi_irq
, ret
);
3595 irq_err
= REQ_IRQ_ERR_LPI
;
3600 /* Request the Safety Feature Correctible Error line in
3601 * case of another line is used
3603 if (priv
->sfty_ce_irq
> 0 && priv
->sfty_ce_irq
!= dev
->irq
) {
3604 int_name
= priv
->int_name_sfty_ce
;
3605 sprintf(int_name
, "%s:%s", dev
->name
, "safety-ce");
3606 ret
= request_irq(priv
->sfty_ce_irq
,
3607 stmmac_safety_interrupt
,
3609 if (unlikely(ret
< 0)) {
3610 netdev_err(priv
->dev
,
3611 "%s: alloc sfty ce MSI %d (error: %d)\n",
3612 __func__
, priv
->sfty_ce_irq
, ret
);
3613 irq_err
= REQ_IRQ_ERR_SFTY_CE
;
3618 /* Request the Safety Feature Uncorrectible Error line in
3619 * case of another line is used
3621 if (priv
->sfty_ue_irq
> 0 && priv
->sfty_ue_irq
!= dev
->irq
) {
3622 int_name
= priv
->int_name_sfty_ue
;
3623 sprintf(int_name
, "%s:%s", dev
->name
, "safety-ue");
3624 ret
= request_irq(priv
->sfty_ue_irq
,
3625 stmmac_safety_interrupt
,
3627 if (unlikely(ret
< 0)) {
3628 netdev_err(priv
->dev
,
3629 "%s: alloc sfty ue MSI %d (error: %d)\n",
3630 __func__
, priv
->sfty_ue_irq
, ret
);
3631 irq_err
= REQ_IRQ_ERR_SFTY_UE
;
3636 /* Request Rx MSI irq */
3637 for (i
= 0; i
< priv
->plat
->rx_queues_to_use
; i
++) {
3638 if (i
>= MTL_MAX_RX_QUEUES
)
3640 if (priv
->rx_irq
[i
] == 0)
3643 int_name
= priv
->int_name_rx_irq
[i
];
3644 sprintf(int_name
, "%s:%s-%d", dev
->name
, "rx", i
);
3645 ret
= request_irq(priv
->rx_irq
[i
],
3647 0, int_name
, &priv
->dma_conf
.rx_queue
[i
]);
3648 if (unlikely(ret
< 0)) {
3649 netdev_err(priv
->dev
,
3650 "%s: alloc rx-%d MSI %d (error: %d)\n",
3651 __func__
, i
, priv
->rx_irq
[i
], ret
);
3652 irq_err
= REQ_IRQ_ERR_RX
;
3656 cpumask_clear(&cpu_mask
);
3657 cpumask_set_cpu(i
% num_online_cpus(), &cpu_mask
);
3658 irq_set_affinity_hint(priv
->rx_irq
[i
], &cpu_mask
);
3661 /* Request Tx MSI irq */
3662 for (i
= 0; i
< priv
->plat
->tx_queues_to_use
; i
++) {
3663 if (i
>= MTL_MAX_TX_QUEUES
)
3665 if (priv
->tx_irq
[i
] == 0)
3668 int_name
= priv
->int_name_tx_irq
[i
];
3669 sprintf(int_name
, "%s:%s-%d", dev
->name
, "tx", i
);
3670 ret
= request_irq(priv
->tx_irq
[i
],
3672 0, int_name
, &priv
->dma_conf
.tx_queue
[i
]);
3673 if (unlikely(ret
< 0)) {
3674 netdev_err(priv
->dev
,
3675 "%s: alloc tx-%d MSI %d (error: %d)\n",
3676 __func__
, i
, priv
->tx_irq
[i
], ret
);
3677 irq_err
= REQ_IRQ_ERR_TX
;
3681 cpumask_clear(&cpu_mask
);
3682 cpumask_set_cpu(i
% num_online_cpus(), &cpu_mask
);
3683 irq_set_affinity_hint(priv
->tx_irq
[i
], &cpu_mask
);
3689 stmmac_free_irq(dev
, irq_err
, irq_idx
);
3693 static int stmmac_request_irq_single(struct net_device
*dev
)
3695 struct stmmac_priv
*priv
= netdev_priv(dev
);
3696 enum request_irq_err irq_err
;
3699 ret
= request_irq(dev
->irq
, stmmac_interrupt
,
3700 IRQF_SHARED
, dev
->name
, dev
);
3701 if (unlikely(ret
< 0)) {
3702 netdev_err(priv
->dev
,
3703 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3704 __func__
, dev
->irq
, ret
);
3705 irq_err
= REQ_IRQ_ERR_MAC
;
3709 /* Request the Wake IRQ in case of another line
3712 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
) {
3713 ret
= request_irq(priv
->wol_irq
, stmmac_interrupt
,
3714 IRQF_SHARED
, dev
->name
, dev
);
3715 if (unlikely(ret
< 0)) {
3716 netdev_err(priv
->dev
,
3717 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3718 __func__
, priv
->wol_irq
, ret
);
3719 irq_err
= REQ_IRQ_ERR_WOL
;
3724 /* Request the IRQ lines */
3725 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
) {
3726 ret
= request_irq(priv
->lpi_irq
, stmmac_interrupt
,
3727 IRQF_SHARED
, dev
->name
, dev
);
3728 if (unlikely(ret
< 0)) {
3729 netdev_err(priv
->dev
,
3730 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3731 __func__
, priv
->lpi_irq
, ret
);
3732 irq_err
= REQ_IRQ_ERR_LPI
;
3740 stmmac_free_irq(dev
, irq_err
, 0);
3744 static int stmmac_request_irq(struct net_device
*dev
)
3746 struct stmmac_priv
*priv
= netdev_priv(dev
);
3749 /* Request the IRQ lines */
3750 if (priv
->plat
->flags
& STMMAC_FLAG_MULTI_MSI_EN
)
3751 ret
= stmmac_request_irq_multi_msi(dev
);
3753 ret
= stmmac_request_irq_single(dev
);
3759 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3760 * @priv: driver private structure
3761 * @mtu: MTU to setup the dma queue and buf with
3762 * Description: Allocate and generate a dma_conf based on the provided MTU.
3763 * Allocate the Tx/Rx DMA queue and init them.
3765 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3767 static struct stmmac_dma_conf
*
3768 stmmac_setup_dma_desc(struct stmmac_priv
*priv
, unsigned int mtu
)
3770 struct stmmac_dma_conf
*dma_conf
;
3771 int chan
, bfsize
, ret
;
3773 dma_conf
= kzalloc(sizeof(*dma_conf
), GFP_KERNEL
);
3775 netdev_err(priv
->dev
, "%s: DMA conf allocation failed\n",
3777 return ERR_PTR(-ENOMEM
);
3780 bfsize
= stmmac_set_16kib_bfsize(priv
, mtu
);
3784 if (bfsize
< BUF_SIZE_16KiB
)
3785 bfsize
= stmmac_set_bfsize(mtu
, 0);
3787 dma_conf
->dma_buf_sz
= bfsize
;
3788 /* Chose the tx/rx size from the already defined one in the
3789 * priv struct. (if defined)
3791 dma_conf
->dma_tx_size
= priv
->dma_conf
.dma_tx_size
;
3792 dma_conf
->dma_rx_size
= priv
->dma_conf
.dma_rx_size
;
3794 if (!dma_conf
->dma_tx_size
)
3795 dma_conf
->dma_tx_size
= DMA_DEFAULT_TX_SIZE
;
3796 if (!dma_conf
->dma_rx_size
)
3797 dma_conf
->dma_rx_size
= DMA_DEFAULT_RX_SIZE
;
3799 /* Earlier check for TBS */
3800 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++) {
3801 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[chan
];
3802 int tbs_en
= priv
->plat
->tx_queues_cfg
[chan
].tbs_en
;
3804 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3805 tx_q
->tbs
|= tbs_en
? STMMAC_TBS_AVAIL
: 0;
3808 ret
= alloc_dma_desc_resources(priv
, dma_conf
);
3810 netdev_err(priv
->dev
, "%s: DMA descriptors allocation failed\n",
3815 ret
= init_dma_desc_rings(priv
->dev
, dma_conf
, GFP_KERNEL
);
3817 netdev_err(priv
->dev
, "%s: DMA descriptors initialization failed\n",
3825 free_dma_desc_resources(priv
, dma_conf
);
3828 return ERR_PTR(ret
);
3832 * __stmmac_open - open entry point of the driver
3833 * @dev : pointer to the device structure.
3834 * @dma_conf : structure to take the dma data
3836 * This function is the open entry point of the driver.
3838 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3841 static int __stmmac_open(struct net_device
*dev
,
3842 struct stmmac_dma_conf
*dma_conf
)
3844 struct stmmac_priv
*priv
= netdev_priv(dev
);
3845 int mode
= priv
->plat
->phy_interface
;
3849 ret
= pm_runtime_resume_and_get(priv
->device
);
3853 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
3854 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
&&
3856 xpcs_get_an_mode(priv
->hw
->xpcs
, mode
) != DW_AN_C73
) &&
3857 !priv
->hw
->lynx_pcs
) {
3858 ret
= stmmac_init_phy(dev
);
3860 netdev_err(priv
->dev
,
3861 "%s: Cannot attach to PHY (error: %d)\n",
3863 goto init_phy_error
;
3867 priv
->rx_copybreak
= STMMAC_RX_COPYBREAK
;
3869 buf_sz
= dma_conf
->dma_buf_sz
;
3870 memcpy(&priv
->dma_conf
, dma_conf
, sizeof(*dma_conf
));
3872 stmmac_reset_queues_param(priv
);
3874 if (!(priv
->plat
->flags
& STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP
) &&
3875 priv
->plat
->serdes_powerup
) {
3876 ret
= priv
->plat
->serdes_powerup(dev
, priv
->plat
->bsp_priv
);
3878 netdev_err(priv
->dev
, "%s: Serdes powerup failed\n",
3884 ret
= stmmac_hw_setup(dev
, true);
3886 netdev_err(priv
->dev
, "%s: Hw setup failed\n", __func__
);
3890 stmmac_init_coalesce(priv
);
3892 phylink_start(priv
->phylink
);
3893 /* We may have called phylink_speed_down before */
3894 phylink_speed_up(priv
->phylink
);
3896 ret
= stmmac_request_irq(dev
);
3900 stmmac_enable_all_queues(priv
);
3901 netif_tx_start_all_queues(priv
->dev
);
3902 stmmac_enable_all_dma_irq(priv
);
3907 phylink_stop(priv
->phylink
);
3909 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
3910 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
3912 stmmac_hw_teardown(dev
);
3914 phylink_disconnect_phy(priv
->phylink
);
3916 pm_runtime_put(priv
->device
);
3920 static int stmmac_open(struct net_device
*dev
)
3922 struct stmmac_priv
*priv
= netdev_priv(dev
);
3923 struct stmmac_dma_conf
*dma_conf
;
3926 dma_conf
= stmmac_setup_dma_desc(priv
, dev
->mtu
);
3927 if (IS_ERR(dma_conf
))
3928 return PTR_ERR(dma_conf
);
3930 ret
= __stmmac_open(dev
, dma_conf
);
3932 free_dma_desc_resources(priv
, dma_conf
);
3938 static void stmmac_fpe_stop_wq(struct stmmac_priv
*priv
)
3940 set_bit(__FPE_REMOVING
, &priv
->fpe_task_state
);
3943 destroy_workqueue(priv
->fpe_wq
);
3945 netdev_info(priv
->dev
, "FPE workqueue stop");
3949 * stmmac_release - close entry point of the driver
3950 * @dev : device pointer.
3952 * This is the stop entry point of the driver.
3954 static int stmmac_release(struct net_device
*dev
)
3956 struct stmmac_priv
*priv
= netdev_priv(dev
);
3959 if (device_may_wakeup(priv
->device
))
3960 phylink_speed_down(priv
->phylink
, false);
3961 /* Stop and disconnect the PHY */
3962 phylink_stop(priv
->phylink
);
3963 phylink_disconnect_phy(priv
->phylink
);
3965 stmmac_disable_all_queues(priv
);
3967 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
3968 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
3970 netif_tx_disable(dev
);
3972 /* Free the IRQ lines */
3973 stmmac_free_irq(dev
, REQ_IRQ_ERR_ALL
, 0);
3975 if (priv
->eee_enabled
) {
3976 priv
->tx_path_in_lpi_mode
= false;
3977 del_timer_sync(&priv
->eee_ctrl_timer
);
3980 /* Stop TX/RX DMA and clear the descriptors */
3981 stmmac_stop_all_dma(priv
);
3983 /* Release and free the Rx/Tx resources */
3984 free_dma_desc_resources(priv
, &priv
->dma_conf
);
3986 /* Disable the MAC Rx/Tx */
3987 stmmac_mac_set(priv
, priv
->ioaddr
, false);
3989 /* Powerdown Serdes if there is */
3990 if (priv
->plat
->serdes_powerdown
)
3991 priv
->plat
->serdes_powerdown(dev
, priv
->plat
->bsp_priv
);
3993 netif_carrier_off(dev
);
3995 stmmac_release_ptp(priv
);
3997 pm_runtime_put(priv
->device
);
3999 if (priv
->dma_cap
.fpesel
)
4000 stmmac_fpe_stop_wq(priv
);
4005 static bool stmmac_vlan_insert(struct stmmac_priv
*priv
, struct sk_buff
*skb
,
4006 struct stmmac_tx_queue
*tx_q
)
4008 u16 tag
= 0x0, inner_tag
= 0x0;
4009 u32 inner_type
= 0x0;
4012 if (!priv
->dma_cap
.vlins
)
4014 if (!skb_vlan_tag_present(skb
))
4016 if (skb
->vlan_proto
== htons(ETH_P_8021AD
)) {
4017 inner_tag
= skb_vlan_tag_get(skb
);
4018 inner_type
= STMMAC_VLAN_INSERT
;
4021 tag
= skb_vlan_tag_get(skb
);
4023 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4024 p
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4026 p
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4028 if (stmmac_set_desc_vlan_tag(priv
, p
, tag
, inner_tag
, inner_type
))
4031 stmmac_set_tx_owner(priv
, p
);
4032 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_conf
.dma_tx_size
);
4037 * stmmac_tso_allocator - close entry point of the driver
4038 * @priv: driver private structure
4039 * @des: buffer start address
4040 * @total_len: total length to fill in descriptors
4041 * @last_segment: condition for the last descriptor
4042 * @queue: TX queue index
4044 * This function fills descriptor and request new descriptors according to
4045 * buffer length to fill
4047 static void stmmac_tso_allocator(struct stmmac_priv
*priv
, dma_addr_t des
,
4048 int total_len
, bool last_segment
, u32 queue
)
4050 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4051 struct dma_desc
*desc
;
4055 tmp_len
= total_len
;
4057 while (tmp_len
> 0) {
4058 dma_addr_t curr_addr
;
4060 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
4061 priv
->dma_conf
.dma_tx_size
);
4062 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
4064 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4065 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4067 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4069 curr_addr
= des
+ (total_len
- tmp_len
);
4070 if (priv
->dma_cap
.addr64
<= 32)
4071 desc
->des0
= cpu_to_le32(curr_addr
);
4073 stmmac_set_desc_addr(priv
, desc
, curr_addr
);
4075 buff_size
= tmp_len
>= TSO_MAX_BUFF_SIZE
?
4076 TSO_MAX_BUFF_SIZE
: tmp_len
;
4078 stmmac_prepare_tso_tx_desc(priv
, desc
, 0, buff_size
,
4080 (last_segment
) && (tmp_len
<= TSO_MAX_BUFF_SIZE
),
4083 tmp_len
-= TSO_MAX_BUFF_SIZE
;
4087 static void stmmac_flush_tx_descriptors(struct stmmac_priv
*priv
, int queue
)
4089 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4092 if (likely(priv
->extend_desc
))
4093 desc_size
= sizeof(struct dma_extended_desc
);
4094 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4095 desc_size
= sizeof(struct dma_edesc
);
4097 desc_size
= sizeof(struct dma_desc
);
4099 /* The own bit must be the latest setting done when prepare the
4100 * descriptor and then barrier is needed to make sure that
4101 * all is coherent before granting the DMA engine.
4105 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
+ (tx_q
->cur_tx
* desc_size
);
4106 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
, tx_q
->tx_tail_addr
, queue
);
4110 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4111 * @skb : the socket buffer
4112 * @dev : device pointer
4113 * Description: this is the transmit function that is called on TSO frames
4114 * (support available on GMAC4 and newer chips).
4115 * Diagram below show the ring programming in case of TSO frames:
4119 * | DES0 |---> buffer1 = L2/L3/L4 header
4120 * | DES1 |---> TCP Payload (can continue on next descr...)
4121 * | DES2 |---> buffer 1 and 2 len
4122 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4128 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4130 * | DES2 | --> buffer 1 and 2 len
4134 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4136 static netdev_tx_t
stmmac_tso_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4138 struct dma_desc
*desc
, *first
, *mss_desc
= NULL
;
4139 struct stmmac_priv
*priv
= netdev_priv(dev
);
4140 int nfrags
= skb_shinfo(skb
)->nr_frags
;
4141 u32 queue
= skb_get_queue_mapping(skb
);
4142 unsigned int first_entry
, tx_packets
;
4143 struct stmmac_txq_stats
*txq_stats
;
4144 int tmp_pay_len
= 0, first_tx
;
4145 struct stmmac_tx_queue
*tx_q
;
4146 bool has_vlan
, set_ic
;
4147 u8 proto_hdr_len
, hdr
;
4148 unsigned long flags
;
4153 tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4154 txq_stats
= &priv
->xstats
.txq_stats
[queue
];
4155 first_tx
= tx_q
->cur_tx
;
4157 /* Compute header lengths */
4158 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) {
4159 proto_hdr_len
= skb_transport_offset(skb
) + sizeof(struct udphdr
);
4160 hdr
= sizeof(struct udphdr
);
4162 proto_hdr_len
= skb_tcp_all_headers(skb
);
4163 hdr
= tcp_hdrlen(skb
);
4166 /* Desc availability based on threshold should be enough safe */
4167 if (unlikely(stmmac_tx_avail(priv
, queue
) <
4168 (((skb
->len
- proto_hdr_len
) / TSO_MAX_BUFF_SIZE
+ 1)))) {
4169 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
4170 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
4172 /* This is a hard error, log it. */
4173 netdev_err(priv
->dev
,
4174 "%s: Tx Ring full when queue awake\n",
4177 return NETDEV_TX_BUSY
;
4180 pay_len
= skb_headlen(skb
) - proto_hdr_len
; /* no frags */
4182 mss
= skb_shinfo(skb
)->gso_size
;
4184 /* set new MSS value if needed */
4185 if (mss
!= tx_q
->mss
) {
4186 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4187 mss_desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4189 mss_desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4191 stmmac_set_mss(priv
, mss_desc
, mss
);
4193 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
4194 priv
->dma_conf
.dma_tx_size
);
4195 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
4198 if (netif_msg_tx_queued(priv
)) {
4199 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4200 __func__
, hdr
, proto_hdr_len
, pay_len
, mss
);
4201 pr_info("\tskb->len %d, skb->data_len %d\n", skb
->len
,
4205 /* Check if VLAN can be inserted by HW */
4206 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
4208 first_entry
= tx_q
->cur_tx
;
4209 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
4211 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4212 desc
= &tx_q
->dma_entx
[first_entry
].basic
;
4214 desc
= &tx_q
->dma_tx
[first_entry
];
4218 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
4220 /* first descriptor: fill Headers on Buf1 */
4221 des
= dma_map_single(priv
->device
, skb
->data
, skb_headlen(skb
),
4223 if (dma_mapping_error(priv
->device
, des
))
4226 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
4227 tx_q
->tx_skbuff_dma
[first_entry
].len
= skb_headlen(skb
);
4228 tx_q
->tx_skbuff_dma
[first_entry
].map_as_page
= false;
4229 tx_q
->tx_skbuff_dma
[first_entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4231 if (priv
->dma_cap
.addr64
<= 32) {
4232 first
->des0
= cpu_to_le32(des
);
4234 /* Fill start of payload in buff2 of first descriptor */
4236 first
->des1
= cpu_to_le32(des
+ proto_hdr_len
);
4238 /* If needed take extra descriptors to fill the remaining payload */
4239 tmp_pay_len
= pay_len
- TSO_MAX_BUFF_SIZE
;
4241 stmmac_set_desc_addr(priv
, first
, des
);
4242 tmp_pay_len
= pay_len
;
4243 des
+= proto_hdr_len
;
4247 stmmac_tso_allocator(priv
, des
, tmp_pay_len
, (nfrags
== 0), queue
);
4249 /* Prepare fragments */
4250 for (i
= 0; i
< nfrags
; i
++) {
4251 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4253 des
= skb_frag_dma_map(priv
->device
, frag
, 0,
4254 skb_frag_size(frag
),
4256 if (dma_mapping_error(priv
->device
, des
))
4259 stmmac_tso_allocator(priv
, des
, skb_frag_size(frag
),
4260 (i
== nfrags
- 1), queue
);
4262 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf
= des
;
4263 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].len
= skb_frag_size(frag
);
4264 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].map_as_page
= true;
4265 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf_type
= STMMAC_TXBUF_T_SKB
;
4268 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].last_segment
= true;
4270 /* Only the last descriptor gets to point to the skb. */
4271 tx_q
->tx_skbuff
[tx_q
->cur_tx
] = skb
;
4272 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf_type
= STMMAC_TXBUF_T_SKB
;
4274 /* Manage tx mitigation */
4275 tx_packets
= (tx_q
->cur_tx
+ 1) - first_tx
;
4276 tx_q
->tx_count_frames
+= tx_packets
;
4278 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
4280 else if (!priv
->tx_coal_frames
[queue
])
4282 else if (tx_packets
> priv
->tx_coal_frames
[queue
])
4284 else if ((tx_q
->tx_count_frames
%
4285 priv
->tx_coal_frames
[queue
]) < tx_packets
)
4291 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4292 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4294 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4296 tx_q
->tx_count_frames
= 0;
4297 stmmac_set_tx_ic(priv
, desc
);
4300 /* We've used all descriptors we need for this skb, however,
4301 * advance cur_tx so that it references a fresh descriptor.
4302 * ndo_start_xmit will fill this descriptor the next time it's
4303 * called and stmmac_tx_clean may clean up to this descriptor.
4305 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_conf
.dma_tx_size
);
4307 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
4308 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
4310 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
4313 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
4314 txq_stats
->tx_bytes
+= skb
->len
;
4315 txq_stats
->tx_tso_frames
++;
4316 txq_stats
->tx_tso_nfrags
+= nfrags
;
4318 txq_stats
->tx_set_ic_bit
++;
4319 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
4321 if (priv
->sarc_type
)
4322 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
4324 skb_tx_timestamp(skb
);
4326 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
4327 priv
->hwts_tx_en
)) {
4328 /* declare that device is doing timestamping */
4329 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4330 stmmac_enable_tx_timestamp(priv
, first
);
4333 /* Complete the first descriptor before granting the DMA */
4334 stmmac_prepare_tso_tx_desc(priv
, first
, 1,
4337 1, tx_q
->tx_skbuff_dma
[first_entry
].last_segment
,
4338 hdr
/ 4, (skb
->len
- proto_hdr_len
));
4340 /* If context desc is used to change MSS */
4342 /* Make sure that first descriptor has been completely
4343 * written, including its own bit. This is because MSS is
4344 * actually before first descriptor, so we need to make
4345 * sure that MSS's own bit is the last thing written.
4348 stmmac_set_tx_owner(priv
, mss_desc
);
4351 if (netif_msg_pktdata(priv
)) {
4352 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4353 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
4354 tx_q
->cur_tx
, first
, nfrags
);
4355 pr_info(">>> frame to be transmitted: ");
4356 print_pkt(skb
->data
, skb_headlen(skb
));
4359 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
4361 stmmac_flush_tx_descriptors(priv
, queue
);
4362 stmmac_tx_timer_arm(priv
, queue
);
4364 return NETDEV_TX_OK
;
4367 dev_err(priv
->device
, "Tx dma map failed\n");
4369 priv
->xstats
.tx_dropped
++;
4370 return NETDEV_TX_OK
;
4374 * stmmac_xmit - Tx entry point of the driver
4375 * @skb : the socket buffer
4376 * @dev : device pointer
4377 * Description : this is the tx entry point of the driver.
4378 * It programs the chain or the ring and supports oversized frames
4381 static netdev_tx_t
stmmac_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4383 unsigned int first_entry
, tx_packets
, enh_desc
;
4384 struct stmmac_priv
*priv
= netdev_priv(dev
);
4385 unsigned int nopaged_len
= skb_headlen(skb
);
4386 int i
, csum_insertion
= 0, is_jumbo
= 0;
4387 u32 queue
= skb_get_queue_mapping(skb
);
4388 int nfrags
= skb_shinfo(skb
)->nr_frags
;
4389 int gso
= skb_shinfo(skb
)->gso_type
;
4390 struct stmmac_txq_stats
*txq_stats
;
4391 struct dma_edesc
*tbs_desc
= NULL
;
4392 struct dma_desc
*desc
, *first
;
4393 struct stmmac_tx_queue
*tx_q
;
4394 bool has_vlan
, set_ic
;
4395 int entry
, first_tx
;
4396 unsigned long flags
;
4399 tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4400 txq_stats
= &priv
->xstats
.txq_stats
[queue
];
4401 first_tx
= tx_q
->cur_tx
;
4403 if (priv
->tx_path_in_lpi_mode
&& priv
->eee_sw_timer_en
)
4404 stmmac_disable_eee_mode(priv
);
4406 /* Manage oversized TCP frames for GMAC4 device */
4407 if (skb_is_gso(skb
) && priv
->tso
) {
4408 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))
4409 return stmmac_tso_xmit(skb
, dev
);
4410 if (priv
->plat
->has_gmac4
&& (gso
& SKB_GSO_UDP_L4
))
4411 return stmmac_tso_xmit(skb
, dev
);
4414 if (unlikely(stmmac_tx_avail(priv
, queue
) < nfrags
+ 1)) {
4415 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
4416 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
4418 /* This is a hard error, log it. */
4419 netdev_err(priv
->dev
,
4420 "%s: Tx Ring full when queue awake\n",
4423 return NETDEV_TX_BUSY
;
4426 /* Check if VLAN can be inserted by HW */
4427 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
4429 entry
= tx_q
->cur_tx
;
4430 first_entry
= entry
;
4431 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
4433 csum_insertion
= (skb
->ip_summed
== CHECKSUM_PARTIAL
);
4434 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4435 * queues. In that case, checksum offloading for those queues that don't
4436 * support tx coe needs to fallback to software checksum calculation.
4438 if (csum_insertion
&&
4439 priv
->plat
->tx_queues_cfg
[queue
].coe_unsupported
) {
4440 if (unlikely(skb_checksum_help(skb
)))
4442 csum_insertion
= !csum_insertion
;
4445 if (likely(priv
->extend_desc
))
4446 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4447 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4448 desc
= &tx_q
->dma_entx
[entry
].basic
;
4450 desc
= tx_q
->dma_tx
+ entry
;
4455 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
4457 enh_desc
= priv
->plat
->enh_desc
;
4458 /* To program the descriptors according to the size of the frame */
4460 is_jumbo
= stmmac_is_jumbo_frm(priv
, skb
->len
, enh_desc
);
4462 if (unlikely(is_jumbo
)) {
4463 entry
= stmmac_jumbo_frm(priv
, tx_q
, skb
, csum_insertion
);
4464 if (unlikely(entry
< 0) && (entry
!= -EINVAL
))
4468 for (i
= 0; i
< nfrags
; i
++) {
4469 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4470 int len
= skb_frag_size(frag
);
4471 bool last_segment
= (i
== (nfrags
- 1));
4473 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
4474 WARN_ON(tx_q
->tx_skbuff
[entry
]);
4476 if (likely(priv
->extend_desc
))
4477 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4478 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4479 desc
= &tx_q
->dma_entx
[entry
].basic
;
4481 desc
= tx_q
->dma_tx
+ entry
;
4483 des
= skb_frag_dma_map(priv
->device
, frag
, 0, len
,
4485 if (dma_mapping_error(priv
->device
, des
))
4486 goto dma_map_err
; /* should reuse desc w/o issues */
4488 tx_q
->tx_skbuff_dma
[entry
].buf
= des
;
4490 stmmac_set_desc_addr(priv
, desc
, des
);
4492 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= true;
4493 tx_q
->tx_skbuff_dma
[entry
].len
= len
;
4494 tx_q
->tx_skbuff_dma
[entry
].last_segment
= last_segment
;
4495 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4497 /* Prepare the descriptor and set the own bit too */
4498 stmmac_prepare_tx_desc(priv
, desc
, 0, len
, csum_insertion
,
4499 priv
->mode
, 1, last_segment
, skb
->len
);
4502 /* Only the last descriptor gets to point to the skb. */
4503 tx_q
->tx_skbuff
[entry
] = skb
;
4504 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4506 /* According to the coalesce parameter the IC bit for the latest
4507 * segment is reset and the timer re-started to clean the tx status.
4508 * This approach takes care about the fragments: desc is the first
4509 * element in case of no SG.
4511 tx_packets
= (entry
+ 1) - first_tx
;
4512 tx_q
->tx_count_frames
+= tx_packets
;
4514 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
4516 else if (!priv
->tx_coal_frames
[queue
])
4518 else if (tx_packets
> priv
->tx_coal_frames
[queue
])
4520 else if ((tx_q
->tx_count_frames
%
4521 priv
->tx_coal_frames
[queue
]) < tx_packets
)
4527 if (likely(priv
->extend_desc
))
4528 desc
= &tx_q
->dma_etx
[entry
].basic
;
4529 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4530 desc
= &tx_q
->dma_entx
[entry
].basic
;
4532 desc
= &tx_q
->dma_tx
[entry
];
4534 tx_q
->tx_count_frames
= 0;
4535 stmmac_set_tx_ic(priv
, desc
);
4538 /* We've used all descriptors we need for this skb, however,
4539 * advance cur_tx so that it references a fresh descriptor.
4540 * ndo_start_xmit will fill this descriptor the next time it's
4541 * called and stmmac_tx_clean may clean up to this descriptor.
4543 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
4544 tx_q
->cur_tx
= entry
;
4546 if (netif_msg_pktdata(priv
)) {
4547 netdev_dbg(priv
->dev
,
4548 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4549 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
4550 entry
, first
, nfrags
);
4552 netdev_dbg(priv
->dev
, ">>> frame to be transmitted: ");
4553 print_pkt(skb
->data
, skb
->len
);
4556 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
4557 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
4559 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
4562 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
4563 txq_stats
->tx_bytes
+= skb
->len
;
4565 txq_stats
->tx_set_ic_bit
++;
4566 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
4568 if (priv
->sarc_type
)
4569 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
4571 skb_tx_timestamp(skb
);
4573 /* Ready to fill the first descriptor and set the OWN bit w/o any
4574 * problems because all the descriptors are actually ready to be
4575 * passed to the DMA engine.
4577 if (likely(!is_jumbo
)) {
4578 bool last_segment
= (nfrags
== 0);
4580 des
= dma_map_single(priv
->device
, skb
->data
,
4581 nopaged_len
, DMA_TO_DEVICE
);
4582 if (dma_mapping_error(priv
->device
, des
))
4585 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
4586 tx_q
->tx_skbuff_dma
[first_entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4587 tx_q
->tx_skbuff_dma
[first_entry
].map_as_page
= false;
4589 stmmac_set_desc_addr(priv
, first
, des
);
4591 tx_q
->tx_skbuff_dma
[first_entry
].len
= nopaged_len
;
4592 tx_q
->tx_skbuff_dma
[first_entry
].last_segment
= last_segment
;
4594 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
4595 priv
->hwts_tx_en
)) {
4596 /* declare that device is doing timestamping */
4597 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4598 stmmac_enable_tx_timestamp(priv
, first
);
4601 /* Prepare the first descriptor setting the OWN bit too */
4602 stmmac_prepare_tx_desc(priv
, first
, 1, nopaged_len
,
4603 csum_insertion
, priv
->mode
, 0, last_segment
,
4607 if (tx_q
->tbs
& STMMAC_TBS_EN
) {
4608 struct timespec64 ts
= ns_to_timespec64(skb
->tstamp
);
4610 tbs_desc
= &tx_q
->dma_entx
[first_entry
];
4611 stmmac_set_desc_tbs(priv
, tbs_desc
, ts
.tv_sec
, ts
.tv_nsec
);
4614 stmmac_set_tx_owner(priv
, first
);
4616 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
4618 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
4620 stmmac_flush_tx_descriptors(priv
, queue
);
4621 stmmac_tx_timer_arm(priv
, queue
);
4623 return NETDEV_TX_OK
;
4626 netdev_err(priv
->dev
, "Tx DMA map failed\n");
4628 priv
->xstats
.tx_dropped
++;
4629 return NETDEV_TX_OK
;
4632 static void stmmac_rx_vlan(struct net_device
*dev
, struct sk_buff
*skb
)
4634 struct vlan_ethhdr
*veth
= skb_vlan_eth_hdr(skb
);
4635 __be16 vlan_proto
= veth
->h_vlan_proto
;
4638 if ((vlan_proto
== htons(ETH_P_8021Q
) &&
4639 dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) ||
4640 (vlan_proto
== htons(ETH_P_8021AD
) &&
4641 dev
->features
& NETIF_F_HW_VLAN_STAG_RX
)) {
4642 /* pop the vlan tag */
4643 vlanid
= ntohs(veth
->h_vlan_TCI
);
4644 memmove(skb
->data
+ VLAN_HLEN
, veth
, ETH_ALEN
* 2);
4645 skb_pull(skb
, VLAN_HLEN
);
4646 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlanid
);
4651 * stmmac_rx_refill - refill used skb preallocated buffers
4652 * @priv: driver private structure
4653 * @queue: RX queue index
4654 * Description : this is to reallocate the skb for the reception process
4655 * that is based on zero-copy.
4657 static inline void stmmac_rx_refill(struct stmmac_priv
*priv
, u32 queue
)
4659 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
4660 int dirty
= stmmac_rx_dirty(priv
, queue
);
4661 unsigned int entry
= rx_q
->dirty_rx
;
4662 gfp_t gfp
= (GFP_ATOMIC
| __GFP_NOWARN
);
4664 if (priv
->dma_cap
.host_dma_width
<= 32)
4667 while (dirty
-- > 0) {
4668 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[entry
];
4672 if (priv
->extend_desc
)
4673 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
4675 p
= rx_q
->dma_rx
+ entry
;
4678 buf
->page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
4683 if (priv
->sph
&& !buf
->sec_page
) {
4684 buf
->sec_page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
4688 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
4691 buf
->addr
= page_pool_get_dma_addr(buf
->page
) + buf
->page_offset
;
4693 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
4695 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, true);
4697 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, false);
4698 stmmac_refill_desc3(priv
, rx_q
, p
);
4700 rx_q
->rx_count_frames
++;
4701 rx_q
->rx_count_frames
+= priv
->rx_coal_frames
[queue
];
4702 if (rx_q
->rx_count_frames
> priv
->rx_coal_frames
[queue
])
4703 rx_q
->rx_count_frames
= 0;
4705 use_rx_wd
= !priv
->rx_coal_frames
[queue
];
4706 use_rx_wd
|= rx_q
->rx_count_frames
> 0;
4707 if (!priv
->use_riwt
)
4711 stmmac_set_rx_owner(priv
, p
, use_rx_wd
);
4713 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_rx_size
);
4715 rx_q
->dirty_rx
= entry
;
4716 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
4717 (rx_q
->dirty_rx
* sizeof(struct dma_desc
));
4718 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
, rx_q
->rx_tail_addr
, queue
);
4721 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv
*priv
,
4723 int status
, unsigned int len
)
4725 unsigned int plen
= 0, hlen
= 0;
4726 int coe
= priv
->hw
->rx_csum
;
4728 /* Not first descriptor, buffer is always zero */
4729 if (priv
->sph
&& len
)
4732 /* First descriptor, get split header length */
4733 stmmac_get_rx_header_len(priv
, p
, &hlen
);
4734 if (priv
->sph
&& hlen
) {
4735 priv
->xstats
.rx_split_hdr_pkt_n
++;
4739 /* First descriptor, not last descriptor and not split header */
4740 if (status
& rx_not_ls
)
4741 return priv
->dma_conf
.dma_buf_sz
;
4743 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
4745 /* First descriptor and last descriptor and not split header */
4746 return min_t(unsigned int, priv
->dma_conf
.dma_buf_sz
, plen
);
4749 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv
*priv
,
4751 int status
, unsigned int len
)
4753 int coe
= priv
->hw
->rx_csum
;
4754 unsigned int plen
= 0;
4756 /* Not split header, buffer is not available */
4760 /* Not last descriptor */
4761 if (status
& rx_not_ls
)
4762 return priv
->dma_conf
.dma_buf_sz
;
4764 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
4766 /* Last descriptor */
4770 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv
*priv
, int queue
,
4771 struct xdp_frame
*xdpf
, bool dma_map
)
4773 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[queue
];
4774 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4775 unsigned int entry
= tx_q
->cur_tx
;
4776 struct dma_desc
*tx_desc
;
4777 dma_addr_t dma_addr
;
4780 if (stmmac_tx_avail(priv
, queue
) < STMMAC_TX_THRESH(priv
))
4781 return STMMAC_XDP_CONSUMED
;
4783 if (likely(priv
->extend_desc
))
4784 tx_desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4785 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4786 tx_desc
= &tx_q
->dma_entx
[entry
].basic
;
4788 tx_desc
= tx_q
->dma_tx
+ entry
;
4791 dma_addr
= dma_map_single(priv
->device
, xdpf
->data
,
4792 xdpf
->len
, DMA_TO_DEVICE
);
4793 if (dma_mapping_error(priv
->device
, dma_addr
))
4794 return STMMAC_XDP_CONSUMED
;
4796 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XDP_NDO
;
4798 struct page
*page
= virt_to_page(xdpf
->data
);
4800 dma_addr
= page_pool_get_dma_addr(page
) + sizeof(*xdpf
) +
4802 dma_sync_single_for_device(priv
->device
, dma_addr
,
4803 xdpf
->len
, DMA_BIDIRECTIONAL
);
4805 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XDP_TX
;
4808 tx_q
->tx_skbuff_dma
[entry
].buf
= dma_addr
;
4809 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
4810 tx_q
->tx_skbuff_dma
[entry
].len
= xdpf
->len
;
4811 tx_q
->tx_skbuff_dma
[entry
].last_segment
= true;
4812 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
4814 tx_q
->xdpf
[entry
] = xdpf
;
4816 stmmac_set_desc_addr(priv
, tx_desc
, dma_addr
);
4818 stmmac_prepare_tx_desc(priv
, tx_desc
, 1, xdpf
->len
,
4819 true, priv
->mode
, true, true,
4822 tx_q
->tx_count_frames
++;
4824 if (tx_q
->tx_count_frames
% priv
->tx_coal_frames
[queue
] == 0)
4830 unsigned long flags
;
4831 tx_q
->tx_count_frames
= 0;
4832 stmmac_set_tx_ic(priv
, tx_desc
);
4833 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
4834 txq_stats
->tx_set_ic_bit
++;
4835 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
4838 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
4840 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
4841 tx_q
->cur_tx
= entry
;
4843 return STMMAC_XDP_TX
;
4846 static int stmmac_xdp_get_tx_queue(struct stmmac_priv
*priv
,
4851 if (unlikely(index
< 0))
4854 while (index
>= priv
->plat
->tx_queues_to_use
)
4855 index
-= priv
->plat
->tx_queues_to_use
;
4860 static int stmmac_xdp_xmit_back(struct stmmac_priv
*priv
,
4861 struct xdp_buff
*xdp
)
4863 struct xdp_frame
*xdpf
= xdp_convert_buff_to_frame(xdp
);
4864 int cpu
= smp_processor_id();
4865 struct netdev_queue
*nq
;
4869 if (unlikely(!xdpf
))
4870 return STMMAC_XDP_CONSUMED
;
4872 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
4873 nq
= netdev_get_tx_queue(priv
->dev
, queue
);
4875 __netif_tx_lock(nq
, cpu
);
4876 /* Avoids TX time-out as we are sharing with slow path */
4877 txq_trans_cond_update(nq
);
4879 res
= stmmac_xdp_xmit_xdpf(priv
, queue
, xdpf
, false);
4880 if (res
== STMMAC_XDP_TX
)
4881 stmmac_flush_tx_descriptors(priv
, queue
);
4883 __netif_tx_unlock(nq
);
4888 static int __stmmac_xdp_run_prog(struct stmmac_priv
*priv
,
4889 struct bpf_prog
*prog
,
4890 struct xdp_buff
*xdp
)
4895 act
= bpf_prog_run_xdp(prog
, xdp
);
4898 res
= STMMAC_XDP_PASS
;
4901 res
= stmmac_xdp_xmit_back(priv
, xdp
);
4904 if (xdp_do_redirect(priv
->dev
, xdp
, prog
) < 0)
4905 res
= STMMAC_XDP_CONSUMED
;
4907 res
= STMMAC_XDP_REDIRECT
;
4910 bpf_warn_invalid_xdp_action(priv
->dev
, prog
, act
);
4913 trace_xdp_exception(priv
->dev
, prog
, act
);
4916 res
= STMMAC_XDP_CONSUMED
;
4923 static struct sk_buff
*stmmac_xdp_run_prog(struct stmmac_priv
*priv
,
4924 struct xdp_buff
*xdp
)
4926 struct bpf_prog
*prog
;
4929 prog
= READ_ONCE(priv
->xdp_prog
);
4931 res
= STMMAC_XDP_PASS
;
4935 res
= __stmmac_xdp_run_prog(priv
, prog
, xdp
);
4937 return ERR_PTR(-res
);
4940 static void stmmac_finalize_xdp_rx(struct stmmac_priv
*priv
,
4943 int cpu
= smp_processor_id();
4946 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
4948 if (xdp_status
& STMMAC_XDP_TX
)
4949 stmmac_tx_timer_arm(priv
, queue
);
4951 if (xdp_status
& STMMAC_XDP_REDIRECT
)
4955 static struct sk_buff
*stmmac_construct_skb_zc(struct stmmac_channel
*ch
,
4956 struct xdp_buff
*xdp
)
4958 unsigned int metasize
= xdp
->data
- xdp
->data_meta
;
4959 unsigned int datasize
= xdp
->data_end
- xdp
->data
;
4960 struct sk_buff
*skb
;
4962 skb
= __napi_alloc_skb(&ch
->rxtx_napi
,
4963 xdp
->data_end
- xdp
->data_hard_start
,
4964 GFP_ATOMIC
| __GFP_NOWARN
);
4968 skb_reserve(skb
, xdp
->data
- xdp
->data_hard_start
);
4969 memcpy(__skb_put(skb
, datasize
), xdp
->data
, datasize
);
4971 skb_metadata_set(skb
, metasize
);
4976 static void stmmac_dispatch_skb_zc(struct stmmac_priv
*priv
, u32 queue
,
4977 struct dma_desc
*p
, struct dma_desc
*np
,
4978 struct xdp_buff
*xdp
)
4980 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[queue
];
4981 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
4982 unsigned int len
= xdp
->data_end
- xdp
->data
;
4983 enum pkt_hash_types hash_type
;
4984 int coe
= priv
->hw
->rx_csum
;
4985 unsigned long flags
;
4986 struct sk_buff
*skb
;
4989 skb
= stmmac_construct_skb_zc(ch
, xdp
);
4991 priv
->xstats
.rx_dropped
++;
4995 stmmac_get_rx_hwtstamp(priv
, p
, np
, skb
);
4996 stmmac_rx_vlan(priv
->dev
, skb
);
4997 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
5000 skb_checksum_none_assert(skb
);
5002 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5004 if (!stmmac_get_rx_hash(priv
, p
, &hash
, &hash_type
))
5005 skb_set_hash(skb
, hash
, hash_type
);
5007 skb_record_rx_queue(skb
, queue
);
5008 napi_gro_receive(&ch
->rxtx_napi
, skb
);
5010 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5011 rxq_stats
->rx_pkt_n
++;
5012 rxq_stats
->rx_bytes
+= len
;
5013 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5016 static bool stmmac_rx_refill_zc(struct stmmac_priv
*priv
, u32 queue
, u32 budget
)
5018 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
5019 unsigned int entry
= rx_q
->dirty_rx
;
5020 struct dma_desc
*rx_desc
= NULL
;
5023 budget
= min(budget
, stmmac_rx_dirty(priv
, queue
));
5025 while (budget
-- > 0 && entry
!= rx_q
->cur_rx
) {
5026 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[entry
];
5027 dma_addr_t dma_addr
;
5031 buf
->xdp
= xsk_buff_alloc(rx_q
->xsk_pool
);
5038 if (priv
->extend_desc
)
5039 rx_desc
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5041 rx_desc
= rx_q
->dma_rx
+ entry
;
5043 dma_addr
= xsk_buff_xdp_get_dma(buf
->xdp
);
5044 stmmac_set_desc_addr(priv
, rx_desc
, dma_addr
);
5045 stmmac_set_desc_sec_addr(priv
, rx_desc
, 0, false);
5046 stmmac_refill_desc3(priv
, rx_q
, rx_desc
);
5048 rx_q
->rx_count_frames
++;
5049 rx_q
->rx_count_frames
+= priv
->rx_coal_frames
[queue
];
5050 if (rx_q
->rx_count_frames
> priv
->rx_coal_frames
[queue
])
5051 rx_q
->rx_count_frames
= 0;
5053 use_rx_wd
= !priv
->rx_coal_frames
[queue
];
5054 use_rx_wd
|= rx_q
->rx_count_frames
> 0;
5055 if (!priv
->use_riwt
)
5059 stmmac_set_rx_owner(priv
, rx_desc
, use_rx_wd
);
5061 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_rx_size
);
5065 rx_q
->dirty_rx
= entry
;
5066 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
5067 (rx_q
->dirty_rx
* sizeof(struct dma_desc
));
5068 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
, rx_q
->rx_tail_addr
, queue
);
5074 static struct stmmac_xdp_buff
*xsk_buff_to_stmmac_ctx(struct xdp_buff
*xdp
)
5076 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5077 * to represent incoming packet, whereas cb field in the same structure
5078 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5079 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5081 return (struct stmmac_xdp_buff
*)xdp
;
5084 static int stmmac_rx_zc(struct stmmac_priv
*priv
, int limit
, u32 queue
)
5086 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[queue
];
5087 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
5088 unsigned int count
= 0, error
= 0, len
= 0;
5089 int dirty
= stmmac_rx_dirty(priv
, queue
);
5090 unsigned int next_entry
= rx_q
->cur_rx
;
5091 u32 rx_errors
= 0, rx_dropped
= 0;
5092 unsigned int desc_size
;
5093 struct bpf_prog
*prog
;
5094 bool failure
= false;
5095 unsigned long flags
;
5099 if (netif_msg_rx_status(priv
)) {
5102 netdev_dbg(priv
->dev
, "%s: descriptor ring:\n", __func__
);
5103 if (priv
->extend_desc
) {
5104 rx_head
= (void *)rx_q
->dma_erx
;
5105 desc_size
= sizeof(struct dma_extended_desc
);
5107 rx_head
= (void *)rx_q
->dma_rx
;
5108 desc_size
= sizeof(struct dma_desc
);
5111 stmmac_display_ring(priv
, rx_head
, priv
->dma_conf
.dma_rx_size
, true,
5112 rx_q
->dma_rx_phy
, desc_size
);
5114 while (count
< limit
) {
5115 struct stmmac_rx_buffer
*buf
;
5116 struct stmmac_xdp_buff
*ctx
;
5117 unsigned int buf1_len
= 0;
5118 struct dma_desc
*np
, *p
;
5122 if (!count
&& rx_q
->state_saved
) {
5123 error
= rx_q
->state
.error
;
5124 len
= rx_q
->state
.len
;
5126 rx_q
->state_saved
= false;
5137 buf
= &rx_q
->buf_pool
[entry
];
5139 if (dirty
>= STMMAC_RX_FILL_BATCH
) {
5140 failure
= failure
||
5141 !stmmac_rx_refill_zc(priv
, queue
, dirty
);
5145 if (priv
->extend_desc
)
5146 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5148 p
= rx_q
->dma_rx
+ entry
;
5150 /* read the status of the incoming frame */
5151 status
= stmmac_rx_status(priv
, &priv
->xstats
, p
);
5152 /* check if managed by the DMA otherwise go ahead */
5153 if (unlikely(status
& dma_own
))
5156 /* Prefetch the next RX descriptor */
5157 rx_q
->cur_rx
= STMMAC_GET_ENTRY(rx_q
->cur_rx
,
5158 priv
->dma_conf
.dma_rx_size
);
5159 next_entry
= rx_q
->cur_rx
;
5161 if (priv
->extend_desc
)
5162 np
= (struct dma_desc
*)(rx_q
->dma_erx
+ next_entry
);
5164 np
= rx_q
->dma_rx
+ next_entry
;
5168 /* Ensure a valid XSK buffer before proceed */
5172 if (priv
->extend_desc
)
5173 stmmac_rx_extended_status(priv
, &priv
->xstats
,
5174 rx_q
->dma_erx
+ entry
);
5175 if (unlikely(status
== discard_frame
)) {
5176 xsk_buff_free(buf
->xdp
);
5180 if (!priv
->hwts_rx_en
)
5184 if (unlikely(error
&& (status
& rx_not_ls
)))
5186 if (unlikely(error
)) {
5191 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5192 if (likely(status
& rx_not_ls
)) {
5193 xsk_buff_free(buf
->xdp
);
5200 ctx
= xsk_buff_to_stmmac_ctx(buf
->xdp
);
5205 /* XDP ZC Frame only support primary buffers for now */
5206 buf1_len
= stmmac_rx_buf1_len(priv
, p
, status
, len
);
5209 /* ACS is disabled; strip manually. */
5210 if (likely(!(status
& rx_not_ls
))) {
5211 buf1_len
-= ETH_FCS_LEN
;
5215 /* RX buffer is good and fit into a XSK pool buffer */
5216 buf
->xdp
->data_end
= buf
->xdp
->data
+ buf1_len
;
5217 xsk_buff_dma_sync_for_cpu(buf
->xdp
, rx_q
->xsk_pool
);
5219 prog
= READ_ONCE(priv
->xdp_prog
);
5220 res
= __stmmac_xdp_run_prog(priv
, prog
, buf
->xdp
);
5223 case STMMAC_XDP_PASS
:
5224 stmmac_dispatch_skb_zc(priv
, queue
, p
, np
, buf
->xdp
);
5225 xsk_buff_free(buf
->xdp
);
5227 case STMMAC_XDP_CONSUMED
:
5228 xsk_buff_free(buf
->xdp
);
5232 case STMMAC_XDP_REDIRECT
:
5242 if (status
& rx_not_ls
) {
5243 rx_q
->state_saved
= true;
5244 rx_q
->state
.error
= error
;
5245 rx_q
->state
.len
= len
;
5248 stmmac_finalize_xdp_rx(priv
, xdp_status
);
5250 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5251 rxq_stats
->rx_pkt_n
+= count
;
5252 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5254 priv
->xstats
.rx_dropped
+= rx_dropped
;
5255 priv
->xstats
.rx_errors
+= rx_errors
;
5257 if (xsk_uses_need_wakeup(rx_q
->xsk_pool
)) {
5258 if (failure
|| stmmac_rx_dirty(priv
, queue
) > 0)
5259 xsk_set_rx_need_wakeup(rx_q
->xsk_pool
);
5261 xsk_clear_rx_need_wakeup(rx_q
->xsk_pool
);
5266 return failure
? limit
: (int)count
;
5270 * stmmac_rx - manage the receive process
5271 * @priv: driver private structure
5272 * @limit: napi bugget
5273 * @queue: RX queue index.
5274 * Description : this the function called by the napi poll method.
5275 * It gets all the frames inside the ring.
5277 static int stmmac_rx(struct stmmac_priv
*priv
, int limit
, u32 queue
)
5279 u32 rx_errors
= 0, rx_dropped
= 0, rx_bytes
= 0, rx_packets
= 0;
5280 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[queue
];
5281 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
5282 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
5283 unsigned int count
= 0, error
= 0, len
= 0;
5284 int status
= 0, coe
= priv
->hw
->rx_csum
;
5285 unsigned int next_entry
= rx_q
->cur_rx
;
5286 enum dma_data_direction dma_dir
;
5287 unsigned int desc_size
;
5288 struct sk_buff
*skb
= NULL
;
5289 struct stmmac_xdp_buff ctx
;
5290 unsigned long flags
;
5294 dma_dir
= page_pool_get_dma_dir(rx_q
->page_pool
);
5295 buf_sz
= DIV_ROUND_UP(priv
->dma_conf
.dma_buf_sz
, PAGE_SIZE
) * PAGE_SIZE
;
5297 if (netif_msg_rx_status(priv
)) {
5300 netdev_dbg(priv
->dev
, "%s: descriptor ring:\n", __func__
);
5301 if (priv
->extend_desc
) {
5302 rx_head
= (void *)rx_q
->dma_erx
;
5303 desc_size
= sizeof(struct dma_extended_desc
);
5305 rx_head
= (void *)rx_q
->dma_rx
;
5306 desc_size
= sizeof(struct dma_desc
);
5309 stmmac_display_ring(priv
, rx_head
, priv
->dma_conf
.dma_rx_size
, true,
5310 rx_q
->dma_rx_phy
, desc_size
);
5312 while (count
< limit
) {
5313 unsigned int buf1_len
= 0, buf2_len
= 0;
5314 enum pkt_hash_types hash_type
;
5315 struct stmmac_rx_buffer
*buf
;
5316 struct dma_desc
*np
, *p
;
5320 if (!count
&& rx_q
->state_saved
) {
5321 skb
= rx_q
->state
.skb
;
5322 error
= rx_q
->state
.error
;
5323 len
= rx_q
->state
.len
;
5325 rx_q
->state_saved
= false;
5338 buf
= &rx_q
->buf_pool
[entry
];
5340 if (priv
->extend_desc
)
5341 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5343 p
= rx_q
->dma_rx
+ entry
;
5345 /* read the status of the incoming frame */
5346 status
= stmmac_rx_status(priv
, &priv
->xstats
, p
);
5347 /* check if managed by the DMA otherwise go ahead */
5348 if (unlikely(status
& dma_own
))
5351 rx_q
->cur_rx
= STMMAC_GET_ENTRY(rx_q
->cur_rx
,
5352 priv
->dma_conf
.dma_rx_size
);
5353 next_entry
= rx_q
->cur_rx
;
5355 if (priv
->extend_desc
)
5356 np
= (struct dma_desc
*)(rx_q
->dma_erx
+ next_entry
);
5358 np
= rx_q
->dma_rx
+ next_entry
;
5362 if (priv
->extend_desc
)
5363 stmmac_rx_extended_status(priv
, &priv
->xstats
, rx_q
->dma_erx
+ entry
);
5364 if (unlikely(status
== discard_frame
)) {
5365 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
5368 if (!priv
->hwts_rx_en
)
5372 if (unlikely(error
&& (status
& rx_not_ls
)))
5374 if (unlikely(error
)) {
5381 /* Buffer is good. Go on. */
5383 prefetch(page_address(buf
->page
) + buf
->page_offset
);
5385 prefetch(page_address(buf
->sec_page
));
5387 buf1_len
= stmmac_rx_buf1_len(priv
, p
, status
, len
);
5389 buf2_len
= stmmac_rx_buf2_len(priv
, p
, status
, len
);
5392 /* ACS is disabled; strip manually. */
5393 if (likely(!(status
& rx_not_ls
))) {
5395 buf2_len
-= ETH_FCS_LEN
;
5397 } else if (buf1_len
) {
5398 buf1_len
-= ETH_FCS_LEN
;
5404 unsigned int pre_len
, sync_len
;
5406 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
5409 xdp_init_buff(&ctx
.xdp
, buf_sz
, &rx_q
->xdp_rxq
);
5410 xdp_prepare_buff(&ctx
.xdp
, page_address(buf
->page
),
5411 buf
->page_offset
, buf1_len
, true);
5413 pre_len
= ctx
.xdp
.data_end
- ctx
.xdp
.data_hard_start
-
5420 skb
= stmmac_xdp_run_prog(priv
, &ctx
.xdp
);
5421 /* Due xdp_adjust_tail: DMA sync for_device
5422 * cover max len CPU touch
5424 sync_len
= ctx
.xdp
.data_end
- ctx
.xdp
.data_hard_start
-
5426 sync_len
= max(sync_len
, pre_len
);
5428 /* For Not XDP_PASS verdict */
5430 unsigned int xdp_res
= -PTR_ERR(skb
);
5432 if (xdp_res
& STMMAC_XDP_CONSUMED
) {
5433 page_pool_put_page(rx_q
->page_pool
,
5434 virt_to_head_page(ctx
.xdp
.data
),
5439 /* Clear skb as it was set as
5440 * status by XDP program.
5444 if (unlikely((status
& rx_not_ls
)))
5449 } else if (xdp_res
& (STMMAC_XDP_TX
|
5450 STMMAC_XDP_REDIRECT
)) {
5451 xdp_status
|= xdp_res
;
5461 /* XDP program may expand or reduce tail */
5462 buf1_len
= ctx
.xdp
.data_end
- ctx
.xdp
.data
;
5464 skb
= napi_alloc_skb(&ch
->rx_napi
, buf1_len
);
5471 /* XDP program may adjust header */
5472 skb_copy_to_linear_data(skb
, ctx
.xdp
.data
, buf1_len
);
5473 skb_put(skb
, buf1_len
);
5475 /* Data payload copied into SKB, page ready for recycle */
5476 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
5478 } else if (buf1_len
) {
5479 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
5481 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
5482 buf
->page
, buf
->page_offset
, buf1_len
,
5483 priv
->dma_conf
.dma_buf_sz
);
5485 /* Data payload appended into SKB */
5486 skb_mark_for_recycle(skb
);
5491 dma_sync_single_for_cpu(priv
->device
, buf
->sec_addr
,
5493 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
5494 buf
->sec_page
, 0, buf2_len
,
5495 priv
->dma_conf
.dma_buf_sz
);
5497 /* Data payload appended into SKB */
5498 skb_mark_for_recycle(skb
);
5499 buf
->sec_page
= NULL
;
5503 if (likely(status
& rx_not_ls
))
5508 /* Got entire packet into SKB. Finish it. */
5510 stmmac_get_rx_hwtstamp(priv
, p
, np
, skb
);
5511 stmmac_rx_vlan(priv
->dev
, skb
);
5512 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
5515 skb_checksum_none_assert(skb
);
5517 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5519 if (!stmmac_get_rx_hash(priv
, p
, &hash
, &hash_type
))
5520 skb_set_hash(skb
, hash
, hash_type
);
5522 skb_record_rx_queue(skb
, queue
);
5523 napi_gro_receive(&ch
->rx_napi
, skb
);
5531 if (status
& rx_not_ls
|| skb
) {
5532 rx_q
->state_saved
= true;
5533 rx_q
->state
.skb
= skb
;
5534 rx_q
->state
.error
= error
;
5535 rx_q
->state
.len
= len
;
5538 stmmac_finalize_xdp_rx(priv
, xdp_status
);
5540 stmmac_rx_refill(priv
, queue
);
5542 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5543 rxq_stats
->rx_packets
+= rx_packets
;
5544 rxq_stats
->rx_bytes
+= rx_bytes
;
5545 rxq_stats
->rx_pkt_n
+= count
;
5546 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5548 priv
->xstats
.rx_dropped
+= rx_dropped
;
5549 priv
->xstats
.rx_errors
+= rx_errors
;
5554 static int stmmac_napi_poll_rx(struct napi_struct
*napi
, int budget
)
5556 struct stmmac_channel
*ch
=
5557 container_of(napi
, struct stmmac_channel
, rx_napi
);
5558 struct stmmac_priv
*priv
= ch
->priv_data
;
5559 struct stmmac_rxq_stats
*rxq_stats
;
5560 u32 chan
= ch
->index
;
5561 unsigned long flags
;
5564 rxq_stats
= &priv
->xstats
.rxq_stats
[chan
];
5565 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5566 rxq_stats
->napi_poll
++;
5567 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5569 work_done
= stmmac_rx(priv
, budget
, chan
);
5570 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
5571 unsigned long flags
;
5573 spin_lock_irqsave(&ch
->lock
, flags
);
5574 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
5575 spin_unlock_irqrestore(&ch
->lock
, flags
);
5581 static int stmmac_napi_poll_tx(struct napi_struct
*napi
, int budget
)
5583 struct stmmac_channel
*ch
=
5584 container_of(napi
, struct stmmac_channel
, tx_napi
);
5585 struct stmmac_priv
*priv
= ch
->priv_data
;
5586 struct stmmac_txq_stats
*txq_stats
;
5587 bool pending_packets
= false;
5588 u32 chan
= ch
->index
;
5589 unsigned long flags
;
5592 txq_stats
= &priv
->xstats
.txq_stats
[chan
];
5593 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
5594 txq_stats
->napi_poll
++;
5595 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
5597 work_done
= stmmac_tx_clean(priv
, budget
, chan
, &pending_packets
);
5598 work_done
= min(work_done
, budget
);
5600 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
5601 unsigned long flags
;
5603 spin_lock_irqsave(&ch
->lock
, flags
);
5604 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
5605 spin_unlock_irqrestore(&ch
->lock
, flags
);
5608 /* TX still have packet to handle, check if we need to arm tx timer */
5609 if (pending_packets
)
5610 stmmac_tx_timer_arm(priv
, chan
);
5615 static int stmmac_napi_poll_rxtx(struct napi_struct
*napi
, int budget
)
5617 struct stmmac_channel
*ch
=
5618 container_of(napi
, struct stmmac_channel
, rxtx_napi
);
5619 struct stmmac_priv
*priv
= ch
->priv_data
;
5620 bool tx_pending_packets
= false;
5621 int rx_done
, tx_done
, rxtx_done
;
5622 struct stmmac_rxq_stats
*rxq_stats
;
5623 struct stmmac_txq_stats
*txq_stats
;
5624 u32 chan
= ch
->index
;
5625 unsigned long flags
;
5627 rxq_stats
= &priv
->xstats
.rxq_stats
[chan
];
5628 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5629 rxq_stats
->napi_poll
++;
5630 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5632 txq_stats
= &priv
->xstats
.txq_stats
[chan
];
5633 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
5634 txq_stats
->napi_poll
++;
5635 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
5637 tx_done
= stmmac_tx_clean(priv
, budget
, chan
, &tx_pending_packets
);
5638 tx_done
= min(tx_done
, budget
);
5640 rx_done
= stmmac_rx_zc(priv
, budget
, chan
);
5642 rxtx_done
= max(tx_done
, rx_done
);
5644 /* If either TX or RX work is not complete, return budget
5647 if (rxtx_done
>= budget
)
5650 /* all work done, exit the polling mode */
5651 if (napi_complete_done(napi
, rxtx_done
)) {
5652 unsigned long flags
;
5654 spin_lock_irqsave(&ch
->lock
, flags
);
5655 /* Both RX and TX work done are compelte,
5656 * so enable both RX & TX IRQs.
5658 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
5659 spin_unlock_irqrestore(&ch
->lock
, flags
);
5662 /* TX still have packet to handle, check if we need to arm tx timer */
5663 if (tx_pending_packets
)
5664 stmmac_tx_timer_arm(priv
, chan
);
5666 return min(rxtx_done
, budget
- 1);
5671 * @dev : Pointer to net device structure
5672 * @txqueue: the index of the hanging transmit queue
5673 * Description: this function is called when a packet transmission fails to
5674 * complete within a reasonable time. The driver will mark the error in the
5675 * netdev structure and arrange for the device to be reset to a sane state
5676 * in order to transmit a new packet.
5678 static void stmmac_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
5680 struct stmmac_priv
*priv
= netdev_priv(dev
);
5682 stmmac_global_err(priv
);
5686 * stmmac_set_rx_mode - entry point for multicast addressing
5687 * @dev : pointer to the device structure
5689 * This function is a driver entry point which gets called by the kernel
5690 * whenever multicast addresses must be enabled/disabled.
5694 static void stmmac_set_rx_mode(struct net_device
*dev
)
5696 struct stmmac_priv
*priv
= netdev_priv(dev
);
5698 stmmac_set_filter(priv
, priv
->hw
, dev
);
5702 * stmmac_change_mtu - entry point to change MTU size for the device.
5703 * @dev : device pointer.
5704 * @new_mtu : the new MTU size for the device.
5705 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5706 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5707 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5709 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5712 static int stmmac_change_mtu(struct net_device
*dev
, int new_mtu
)
5714 struct stmmac_priv
*priv
= netdev_priv(dev
);
5715 int txfifosz
= priv
->plat
->tx_fifo_size
;
5716 struct stmmac_dma_conf
*dma_conf
;
5717 const int mtu
= new_mtu
;
5721 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
5723 txfifosz
/= priv
->plat
->tx_queues_to_use
;
5725 if (stmmac_xdp_is_enabled(priv
) && new_mtu
> ETH_DATA_LEN
) {
5726 netdev_dbg(priv
->dev
, "Jumbo frames not supported for XDP\n");
5730 new_mtu
= STMMAC_ALIGN(new_mtu
);
5732 /* If condition true, FIFO is too small or MTU too large */
5733 if ((txfifosz
< new_mtu
) || (new_mtu
> BUF_SIZE_16KiB
))
5736 if (netif_running(dev
)) {
5737 netdev_dbg(priv
->dev
, "restarting interface to change its MTU\n");
5738 /* Try to allocate the new DMA conf with the new mtu */
5739 dma_conf
= stmmac_setup_dma_desc(priv
, mtu
);
5740 if (IS_ERR(dma_conf
)) {
5741 netdev_err(priv
->dev
, "failed allocating new dma conf for new MTU %d\n",
5743 return PTR_ERR(dma_conf
);
5746 stmmac_release(dev
);
5748 ret
= __stmmac_open(dev
, dma_conf
);
5750 free_dma_desc_resources(priv
, dma_conf
);
5752 netdev_err(priv
->dev
, "failed reopening the interface after MTU change\n");
5758 stmmac_set_rx_mode(dev
);
5762 netdev_update_features(dev
);
5767 static netdev_features_t
stmmac_fix_features(struct net_device
*dev
,
5768 netdev_features_t features
)
5770 struct stmmac_priv
*priv
= netdev_priv(dev
);
5772 if (priv
->plat
->rx_coe
== STMMAC_RX_COE_NONE
)
5773 features
&= ~NETIF_F_RXCSUM
;
5775 if (!priv
->plat
->tx_coe
)
5776 features
&= ~NETIF_F_CSUM_MASK
;
5778 /* Some GMAC devices have a bugged Jumbo frame support that
5779 * needs to have the Tx COE disabled for oversized frames
5780 * (due to limited buffer sizes). In this case we disable
5781 * the TX csum insertion in the TDES and not use SF.
5783 if (priv
->plat
->bugged_jumbo
&& (dev
->mtu
> ETH_DATA_LEN
))
5784 features
&= ~NETIF_F_CSUM_MASK
;
5786 /* Disable tso if asked by ethtool */
5787 if ((priv
->plat
->flags
& STMMAC_FLAG_TSO_EN
) && (priv
->dma_cap
.tsoen
)) {
5788 if (features
& NETIF_F_TSO
)
5797 static int stmmac_set_features(struct net_device
*netdev
,
5798 netdev_features_t features
)
5800 struct stmmac_priv
*priv
= netdev_priv(netdev
);
5802 /* Keep the COE Type in case of csum is supporting */
5803 if (features
& NETIF_F_RXCSUM
)
5804 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
5806 priv
->hw
->rx_csum
= 0;
5807 /* No check needed because rx_coe has been set before and it will be
5808 * fixed in case of issue.
5810 stmmac_rx_ipc(priv
, priv
->hw
);
5812 if (priv
->sph_cap
) {
5813 bool sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
5816 for (chan
= 0; chan
< priv
->plat
->rx_queues_to_use
; chan
++)
5817 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
5823 static void stmmac_fpe_event_status(struct stmmac_priv
*priv
, int status
)
5825 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
5826 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
5827 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
5828 bool *hs_enable
= &fpe_cfg
->hs_enable
;
5830 if (status
== FPE_EVENT_UNKNOWN
|| !*hs_enable
)
5833 /* If LP has sent verify mPacket, LP is FPE capable */
5834 if ((status
& FPE_EVENT_RVER
) == FPE_EVENT_RVER
) {
5835 if (*lp_state
< FPE_STATE_CAPABLE
)
5836 *lp_state
= FPE_STATE_CAPABLE
;
5838 /* If user has requested FPE enable, quickly response */
5840 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
5844 /* If Local has sent verify mPacket, Local is FPE capable */
5845 if ((status
& FPE_EVENT_TVER
) == FPE_EVENT_TVER
) {
5846 if (*lo_state
< FPE_STATE_CAPABLE
)
5847 *lo_state
= FPE_STATE_CAPABLE
;
5850 /* If LP has sent response mPacket, LP is entering FPE ON */
5851 if ((status
& FPE_EVENT_RRSP
) == FPE_EVENT_RRSP
)
5852 *lp_state
= FPE_STATE_ENTERING_ON
;
5854 /* If Local has sent response mPacket, Local is entering FPE ON */
5855 if ((status
& FPE_EVENT_TRSP
) == FPE_EVENT_TRSP
)
5856 *lo_state
= FPE_STATE_ENTERING_ON
;
5858 if (!test_bit(__FPE_REMOVING
, &priv
->fpe_task_state
) &&
5859 !test_and_set_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
) &&
5861 queue_work(priv
->fpe_wq
, &priv
->fpe_task
);
5865 static void stmmac_common_interrupt(struct stmmac_priv
*priv
)
5867 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
5868 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
5873 xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
5874 queues_count
= (rx_cnt
> tx_cnt
) ? rx_cnt
: tx_cnt
;
5877 pm_wakeup_event(priv
->device
, 0);
5879 if (priv
->dma_cap
.estsel
)
5880 stmmac_est_irq_status(priv
, priv
->ioaddr
, priv
->dev
,
5881 &priv
->xstats
, tx_cnt
);
5883 if (priv
->dma_cap
.fpesel
) {
5884 int status
= stmmac_fpe_irq_status(priv
, priv
->ioaddr
,
5887 stmmac_fpe_event_status(priv
, status
);
5890 /* To handle GMAC own interrupts */
5891 if ((priv
->plat
->has_gmac
) || xmac
) {
5892 int status
= stmmac_host_irq_status(priv
, priv
->hw
, &priv
->xstats
);
5894 if (unlikely(status
)) {
5895 /* For LPI we need to save the tx status */
5896 if (status
& CORE_IRQ_TX_PATH_IN_LPI_MODE
)
5897 priv
->tx_path_in_lpi_mode
= true;
5898 if (status
& CORE_IRQ_TX_PATH_EXIT_LPI_MODE
)
5899 priv
->tx_path_in_lpi_mode
= false;
5902 for (queue
= 0; queue
< queues_count
; queue
++) {
5903 status
= stmmac_host_mtl_irq_status(priv
, priv
->hw
,
5907 /* PCS link status */
5908 if (priv
->hw
->pcs
&&
5909 !(priv
->plat
->flags
& STMMAC_FLAG_HAS_INTEGRATED_PCS
)) {
5910 if (priv
->xstats
.pcs_link
)
5911 netif_carrier_on(priv
->dev
);
5913 netif_carrier_off(priv
->dev
);
5916 stmmac_timestamp_interrupt(priv
, priv
);
5921 * stmmac_interrupt - main ISR
5922 * @irq: interrupt number.
5923 * @dev_id: to pass the net device pointer.
5924 * Description: this is the main driver interrupt service routine.
5926 * o DMA service routine (to manage incoming frame reception and transmission
5928 * o Core interrupts to manage: remote wake-up, management counter, LPI
5931 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
)
5933 struct net_device
*dev
= (struct net_device
*)dev_id
;
5934 struct stmmac_priv
*priv
= netdev_priv(dev
);
5936 /* Check if adapter is up */
5937 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5940 /* Check if a fatal error happened */
5941 if (stmmac_safety_feat_interrupt(priv
))
5944 /* To handle Common interrupts */
5945 stmmac_common_interrupt(priv
);
5947 /* To handle DMA interrupts */
5948 stmmac_dma_interrupt(priv
);
5953 static irqreturn_t
stmmac_mac_interrupt(int irq
, void *dev_id
)
5955 struct net_device
*dev
= (struct net_device
*)dev_id
;
5956 struct stmmac_priv
*priv
= netdev_priv(dev
);
5958 if (unlikely(!dev
)) {
5959 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
5963 /* Check if adapter is up */
5964 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5967 /* To handle Common interrupts */
5968 stmmac_common_interrupt(priv
);
5973 static irqreturn_t
stmmac_safety_interrupt(int irq
, void *dev_id
)
5975 struct net_device
*dev
= (struct net_device
*)dev_id
;
5976 struct stmmac_priv
*priv
= netdev_priv(dev
);
5978 if (unlikely(!dev
)) {
5979 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
5983 /* Check if adapter is up */
5984 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5987 /* Check if a fatal error happened */
5988 stmmac_safety_feat_interrupt(priv
);
5993 static irqreturn_t
stmmac_msi_intr_tx(int irq
, void *data
)
5995 struct stmmac_tx_queue
*tx_q
= (struct stmmac_tx_queue
*)data
;
5996 struct stmmac_dma_conf
*dma_conf
;
5997 int chan
= tx_q
->queue_index
;
5998 struct stmmac_priv
*priv
;
6001 dma_conf
= container_of(tx_q
, struct stmmac_dma_conf
, tx_queue
[chan
]);
6002 priv
= container_of(dma_conf
, struct stmmac_priv
, dma_conf
);
6004 if (unlikely(!data
)) {
6005 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
6009 /* Check if adapter is up */
6010 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6013 status
= stmmac_napi_check(priv
, chan
, DMA_DIR_TX
);
6015 if (unlikely(status
& tx_hard_error_bump_tc
)) {
6016 /* Try to bump up the dma threshold on this failure */
6017 stmmac_bump_dma_threshold(priv
, chan
);
6018 } else if (unlikely(status
== tx_hard_error
)) {
6019 stmmac_tx_err(priv
, chan
);
6025 static irqreturn_t
stmmac_msi_intr_rx(int irq
, void *data
)
6027 struct stmmac_rx_queue
*rx_q
= (struct stmmac_rx_queue
*)data
;
6028 struct stmmac_dma_conf
*dma_conf
;
6029 int chan
= rx_q
->queue_index
;
6030 struct stmmac_priv
*priv
;
6032 dma_conf
= container_of(rx_q
, struct stmmac_dma_conf
, rx_queue
[chan
]);
6033 priv
= container_of(dma_conf
, struct stmmac_priv
, dma_conf
);
6035 if (unlikely(!data
)) {
6036 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
6040 /* Check if adapter is up */
6041 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6044 stmmac_napi_check(priv
, chan
, DMA_DIR_RX
);
6050 * stmmac_ioctl - Entry point for the Ioctl
6051 * @dev: Device pointer.
6052 * @rq: An IOCTL specefic structure, that can contain a pointer to
6053 * a proprietary structure used to pass information to the driver.
6054 * @cmd: IOCTL command
6056 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6058 static int stmmac_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
6060 struct stmmac_priv
*priv
= netdev_priv (dev
);
6061 int ret
= -EOPNOTSUPP
;
6063 if (!netif_running(dev
))
6070 ret
= phylink_mii_ioctl(priv
->phylink
, rq
, cmd
);
6073 ret
= stmmac_hwtstamp_set(dev
, rq
);
6076 ret
= stmmac_hwtstamp_get(dev
, rq
);
6085 static int stmmac_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
6088 struct stmmac_priv
*priv
= cb_priv
;
6089 int ret
= -EOPNOTSUPP
;
6091 if (!tc_cls_can_offload_and_chain0(priv
->dev
, type_data
))
6094 __stmmac_disable_all_queues(priv
);
6097 case TC_SETUP_CLSU32
:
6098 ret
= stmmac_tc_setup_cls_u32(priv
, priv
, type_data
);
6100 case TC_SETUP_CLSFLOWER
:
6101 ret
= stmmac_tc_setup_cls(priv
, priv
, type_data
);
6107 stmmac_enable_all_queues(priv
);
6111 static LIST_HEAD(stmmac_block_cb_list
);
6113 static int stmmac_setup_tc(struct net_device
*ndev
, enum tc_setup_type type
,
6116 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6120 return stmmac_tc_query_caps(priv
, priv
, type_data
);
6121 case TC_SETUP_BLOCK
:
6122 return flow_block_cb_setup_simple(type_data
,
6123 &stmmac_block_cb_list
,
6124 stmmac_setup_tc_block_cb
,
6126 case TC_SETUP_QDISC_CBS
:
6127 return stmmac_tc_setup_cbs(priv
, priv
, type_data
);
6128 case TC_SETUP_QDISC_TAPRIO
:
6129 return stmmac_tc_setup_taprio(priv
, priv
, type_data
);
6130 case TC_SETUP_QDISC_ETF
:
6131 return stmmac_tc_setup_etf(priv
, priv
, type_data
);
6137 static u16
stmmac_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
6138 struct net_device
*sb_dev
)
6140 int gso
= skb_shinfo(skb
)->gso_type
;
6142 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
| SKB_GSO_UDP_L4
)) {
6144 * There is no way to determine the number of TSO/USO
6145 * capable Queues. Let's use always the Queue 0
6146 * because if TSO/USO is supported then at least this
6147 * one will be capable.
6152 return netdev_pick_tx(dev
, skb
, NULL
) % dev
->real_num_tx_queues
;
6155 static int stmmac_set_mac_address(struct net_device
*ndev
, void *addr
)
6157 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6160 ret
= pm_runtime_resume_and_get(priv
->device
);
6164 ret
= eth_mac_addr(ndev
, addr
);
6168 stmmac_set_umac_addr(priv
, priv
->hw
, ndev
->dev_addr
, 0);
6171 pm_runtime_put(priv
->device
);
6176 #ifdef CONFIG_DEBUG_FS
6177 static struct dentry
*stmmac_fs_dir
;
6179 static void sysfs_display_ring(void *head
, int size
, int extend_desc
,
6180 struct seq_file
*seq
, dma_addr_t dma_phy_addr
)
6183 struct dma_extended_desc
*ep
= (struct dma_extended_desc
*)head
;
6184 struct dma_desc
*p
= (struct dma_desc
*)head
;
6185 dma_addr_t dma_addr
;
6187 for (i
= 0; i
< size
; i
++) {
6189 dma_addr
= dma_phy_addr
+ i
* sizeof(*ep
);
6190 seq_printf(seq
, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6192 le32_to_cpu(ep
->basic
.des0
),
6193 le32_to_cpu(ep
->basic
.des1
),
6194 le32_to_cpu(ep
->basic
.des2
),
6195 le32_to_cpu(ep
->basic
.des3
));
6198 dma_addr
= dma_phy_addr
+ i
* sizeof(*p
);
6199 seq_printf(seq
, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6201 le32_to_cpu(p
->des0
), le32_to_cpu(p
->des1
),
6202 le32_to_cpu(p
->des2
), le32_to_cpu(p
->des3
));
6205 seq_printf(seq
, "\n");
6209 static int stmmac_rings_status_show(struct seq_file
*seq
, void *v
)
6211 struct net_device
*dev
= seq
->private;
6212 struct stmmac_priv
*priv
= netdev_priv(dev
);
6213 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
6214 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
6217 if ((dev
->flags
& IFF_UP
) == 0)
6220 for (queue
= 0; queue
< rx_count
; queue
++) {
6221 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
6223 seq_printf(seq
, "RX Queue %d:\n", queue
);
6225 if (priv
->extend_desc
) {
6226 seq_printf(seq
, "Extended descriptor ring:\n");
6227 sysfs_display_ring((void *)rx_q
->dma_erx
,
6228 priv
->dma_conf
.dma_rx_size
, 1, seq
, rx_q
->dma_rx_phy
);
6230 seq_printf(seq
, "Descriptor ring:\n");
6231 sysfs_display_ring((void *)rx_q
->dma_rx
,
6232 priv
->dma_conf
.dma_rx_size
, 0, seq
, rx_q
->dma_rx_phy
);
6236 for (queue
= 0; queue
< tx_count
; queue
++) {
6237 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
6239 seq_printf(seq
, "TX Queue %d:\n", queue
);
6241 if (priv
->extend_desc
) {
6242 seq_printf(seq
, "Extended descriptor ring:\n");
6243 sysfs_display_ring((void *)tx_q
->dma_etx
,
6244 priv
->dma_conf
.dma_tx_size
, 1, seq
, tx_q
->dma_tx_phy
);
6245 } else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
)) {
6246 seq_printf(seq
, "Descriptor ring:\n");
6247 sysfs_display_ring((void *)tx_q
->dma_tx
,
6248 priv
->dma_conf
.dma_tx_size
, 0, seq
, tx_q
->dma_tx_phy
);
6254 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status
);
6256 static int stmmac_dma_cap_show(struct seq_file
*seq
, void *v
)
6258 static const char * const dwxgmac_timestamp_source
[] = {
6264 static const char * const dwxgmac_safety_feature_desc
[] = {
6266 "All Safety Features with ECC and Parity",
6267 "All Safety Features without ECC or Parity",
6268 "All Safety Features with Parity Only",
6274 struct net_device
*dev
= seq
->private;
6275 struct stmmac_priv
*priv
= netdev_priv(dev
);
6277 if (!priv
->hw_cap_support
) {
6278 seq_printf(seq
, "DMA HW features not supported\n");
6282 seq_printf(seq
, "==============================\n");
6283 seq_printf(seq
, "\tDMA HW features\n");
6284 seq_printf(seq
, "==============================\n");
6286 seq_printf(seq
, "\t10/100 Mbps: %s\n",
6287 (priv
->dma_cap
.mbps_10_100
) ? "Y" : "N");
6288 seq_printf(seq
, "\t1000 Mbps: %s\n",
6289 (priv
->dma_cap
.mbps_1000
) ? "Y" : "N");
6290 seq_printf(seq
, "\tHalf duplex: %s\n",
6291 (priv
->dma_cap
.half_duplex
) ? "Y" : "N");
6292 if (priv
->plat
->has_xgmac
) {
6294 "\tNumber of Additional MAC address registers: %d\n",
6295 priv
->dma_cap
.multi_addr
);
6297 seq_printf(seq
, "\tHash Filter: %s\n",
6298 (priv
->dma_cap
.hash_filter
) ? "Y" : "N");
6299 seq_printf(seq
, "\tMultiple MAC address registers: %s\n",
6300 (priv
->dma_cap
.multi_addr
) ? "Y" : "N");
6302 seq_printf(seq
, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6303 (priv
->dma_cap
.pcs
) ? "Y" : "N");
6304 seq_printf(seq
, "\tSMA (MDIO) Interface: %s\n",
6305 (priv
->dma_cap
.sma_mdio
) ? "Y" : "N");
6306 seq_printf(seq
, "\tPMT Remote wake up: %s\n",
6307 (priv
->dma_cap
.pmt_remote_wake_up
) ? "Y" : "N");
6308 seq_printf(seq
, "\tPMT Magic Frame: %s\n",
6309 (priv
->dma_cap
.pmt_magic_frame
) ? "Y" : "N");
6310 seq_printf(seq
, "\tRMON module: %s\n",
6311 (priv
->dma_cap
.rmon
) ? "Y" : "N");
6312 seq_printf(seq
, "\tIEEE 1588-2002 Time Stamp: %s\n",
6313 (priv
->dma_cap
.time_stamp
) ? "Y" : "N");
6314 seq_printf(seq
, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6315 (priv
->dma_cap
.atime_stamp
) ? "Y" : "N");
6316 if (priv
->plat
->has_xgmac
)
6317 seq_printf(seq
, "\tTimestamp System Time Source: %s\n",
6318 dwxgmac_timestamp_source
[priv
->dma_cap
.tssrc
]);
6319 seq_printf(seq
, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6320 (priv
->dma_cap
.eee
) ? "Y" : "N");
6321 seq_printf(seq
, "\tAV features: %s\n", (priv
->dma_cap
.av
) ? "Y" : "N");
6322 seq_printf(seq
, "\tChecksum Offload in TX: %s\n",
6323 (priv
->dma_cap
.tx_coe
) ? "Y" : "N");
6324 if (priv
->synopsys_id
>= DWMAC_CORE_4_00
||
6325 priv
->plat
->has_xgmac
) {
6326 seq_printf(seq
, "\tIP Checksum Offload in RX: %s\n",
6327 (priv
->dma_cap
.rx_coe
) ? "Y" : "N");
6329 seq_printf(seq
, "\tIP Checksum Offload (type1) in RX: %s\n",
6330 (priv
->dma_cap
.rx_coe_type1
) ? "Y" : "N");
6331 seq_printf(seq
, "\tIP Checksum Offload (type2) in RX: %s\n",
6332 (priv
->dma_cap
.rx_coe_type2
) ? "Y" : "N");
6333 seq_printf(seq
, "\tRXFIFO > 2048bytes: %s\n",
6334 (priv
->dma_cap
.rxfifo_over_2048
) ? "Y" : "N");
6336 seq_printf(seq
, "\tNumber of Additional RX channel: %d\n",
6337 priv
->dma_cap
.number_rx_channel
);
6338 seq_printf(seq
, "\tNumber of Additional TX channel: %d\n",
6339 priv
->dma_cap
.number_tx_channel
);
6340 seq_printf(seq
, "\tNumber of Additional RX queues: %d\n",
6341 priv
->dma_cap
.number_rx_queues
);
6342 seq_printf(seq
, "\tNumber of Additional TX queues: %d\n",
6343 priv
->dma_cap
.number_tx_queues
);
6344 seq_printf(seq
, "\tEnhanced descriptors: %s\n",
6345 (priv
->dma_cap
.enh_desc
) ? "Y" : "N");
6346 seq_printf(seq
, "\tTX Fifo Size: %d\n", priv
->dma_cap
.tx_fifo_size
);
6347 seq_printf(seq
, "\tRX Fifo Size: %d\n", priv
->dma_cap
.rx_fifo_size
);
6348 seq_printf(seq
, "\tHash Table Size: %lu\n", priv
->dma_cap
.hash_tb_sz
?
6349 (BIT(priv
->dma_cap
.hash_tb_sz
) << 5) : 0);
6350 seq_printf(seq
, "\tTSO: %s\n", priv
->dma_cap
.tsoen
? "Y" : "N");
6351 seq_printf(seq
, "\tNumber of PPS Outputs: %d\n",
6352 priv
->dma_cap
.pps_out_num
);
6353 seq_printf(seq
, "\tSafety Features: %s\n",
6354 dwxgmac_safety_feature_desc
[priv
->dma_cap
.asp
]);
6355 seq_printf(seq
, "\tFlexible RX Parser: %s\n",
6356 priv
->dma_cap
.frpsel
? "Y" : "N");
6357 seq_printf(seq
, "\tEnhanced Addressing: %d\n",
6358 priv
->dma_cap
.host_dma_width
);
6359 seq_printf(seq
, "\tReceive Side Scaling: %s\n",
6360 priv
->dma_cap
.rssen
? "Y" : "N");
6361 seq_printf(seq
, "\tVLAN Hash Filtering: %s\n",
6362 priv
->dma_cap
.vlhash
? "Y" : "N");
6363 seq_printf(seq
, "\tSplit Header: %s\n",
6364 priv
->dma_cap
.sphen
? "Y" : "N");
6365 seq_printf(seq
, "\tVLAN TX Insertion: %s\n",
6366 priv
->dma_cap
.vlins
? "Y" : "N");
6367 seq_printf(seq
, "\tDouble VLAN: %s\n",
6368 priv
->dma_cap
.dvlan
? "Y" : "N");
6369 seq_printf(seq
, "\tNumber of L3/L4 Filters: %d\n",
6370 priv
->dma_cap
.l3l4fnum
);
6371 seq_printf(seq
, "\tARP Offloading: %s\n",
6372 priv
->dma_cap
.arpoffsel
? "Y" : "N");
6373 seq_printf(seq
, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6374 priv
->dma_cap
.estsel
? "Y" : "N");
6375 seq_printf(seq
, "\tFrame Preemption (FPE): %s\n",
6376 priv
->dma_cap
.fpesel
? "Y" : "N");
6377 seq_printf(seq
, "\tTime-Based Scheduling (TBS): %s\n",
6378 priv
->dma_cap
.tbssel
? "Y" : "N");
6379 seq_printf(seq
, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6380 priv
->dma_cap
.tbs_ch_num
);
6381 seq_printf(seq
, "\tPer-Stream Filtering: %s\n",
6382 priv
->dma_cap
.sgfsel
? "Y" : "N");
6383 seq_printf(seq
, "\tTX Timestamp FIFO Depth: %lu\n",
6384 BIT(priv
->dma_cap
.ttsfd
) >> 1);
6385 seq_printf(seq
, "\tNumber of Traffic Classes: %d\n",
6386 priv
->dma_cap
.numtc
);
6387 seq_printf(seq
, "\tDCB Feature: %s\n",
6388 priv
->dma_cap
.dcben
? "Y" : "N");
6389 seq_printf(seq
, "\tIEEE 1588 High Word Register: %s\n",
6390 priv
->dma_cap
.advthword
? "Y" : "N");
6391 seq_printf(seq
, "\tPTP Offload: %s\n",
6392 priv
->dma_cap
.ptoen
? "Y" : "N");
6393 seq_printf(seq
, "\tOne-Step Timestamping: %s\n",
6394 priv
->dma_cap
.osten
? "Y" : "N");
6395 seq_printf(seq
, "\tPriority-Based Flow Control: %s\n",
6396 priv
->dma_cap
.pfcen
? "Y" : "N");
6397 seq_printf(seq
, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6398 BIT(priv
->dma_cap
.frpes
) << 6);
6399 seq_printf(seq
, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6400 BIT(priv
->dma_cap
.frpbs
) << 6);
6401 seq_printf(seq
, "\tParallel Instruction Processor Engines: %d\n",
6402 priv
->dma_cap
.frppipe_num
);
6403 seq_printf(seq
, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6404 priv
->dma_cap
.nrvf_num
?
6405 (BIT(priv
->dma_cap
.nrvf_num
) << 1) : 0);
6406 seq_printf(seq
, "\tWidth of the Time Interval Field in GCL: %d\n",
6407 priv
->dma_cap
.estwid
? 4 * priv
->dma_cap
.estwid
+ 12 : 0);
6408 seq_printf(seq
, "\tDepth of GCL: %lu\n",
6409 priv
->dma_cap
.estdep
? (BIT(priv
->dma_cap
.estdep
) << 5) : 0);
6410 seq_printf(seq
, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6411 priv
->dma_cap
.cbtisel
? "Y" : "N");
6412 seq_printf(seq
, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6413 priv
->dma_cap
.aux_snapshot_n
);
6414 seq_printf(seq
, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6415 priv
->dma_cap
.pou_ost_en
? "Y" : "N");
6416 seq_printf(seq
, "\tEnhanced DMA: %s\n",
6417 priv
->dma_cap
.edma
? "Y" : "N");
6418 seq_printf(seq
, "\tDifferent Descriptor Cache: %s\n",
6419 priv
->dma_cap
.ediffc
? "Y" : "N");
6420 seq_printf(seq
, "\tVxLAN/NVGRE: %s\n",
6421 priv
->dma_cap
.vxn
? "Y" : "N");
6422 seq_printf(seq
, "\tDebug Memory Interface: %s\n",
6423 priv
->dma_cap
.dbgmem
? "Y" : "N");
6424 seq_printf(seq
, "\tNumber of Policing Counters: %lu\n",
6425 priv
->dma_cap
.pcsel
? BIT(priv
->dma_cap
.pcsel
+ 3) : 0);
6428 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap
);
6430 /* Use network device events to rename debugfs file entries.
6432 static int stmmac_device_event(struct notifier_block
*unused
,
6433 unsigned long event
, void *ptr
)
6435 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
6436 struct stmmac_priv
*priv
= netdev_priv(dev
);
6438 if (dev
->netdev_ops
!= &stmmac_netdev_ops
)
6442 case NETDEV_CHANGENAME
:
6443 if (priv
->dbgfs_dir
)
6444 priv
->dbgfs_dir
= debugfs_rename(stmmac_fs_dir
,
6454 static struct notifier_block stmmac_notifier
= {
6455 .notifier_call
= stmmac_device_event
,
6458 static void stmmac_init_fs(struct net_device
*dev
)
6460 struct stmmac_priv
*priv
= netdev_priv(dev
);
6464 /* Create per netdev entries */
6465 priv
->dbgfs_dir
= debugfs_create_dir(dev
->name
, stmmac_fs_dir
);
6467 /* Entry to report DMA RX/TX rings */
6468 debugfs_create_file("descriptors_status", 0444, priv
->dbgfs_dir
, dev
,
6469 &stmmac_rings_status_fops
);
6471 /* Entry to report the DMA HW features */
6472 debugfs_create_file("dma_cap", 0444, priv
->dbgfs_dir
, dev
,
6473 &stmmac_dma_cap_fops
);
6478 static void stmmac_exit_fs(struct net_device
*dev
)
6480 struct stmmac_priv
*priv
= netdev_priv(dev
);
6482 debugfs_remove_recursive(priv
->dbgfs_dir
);
6484 #endif /* CONFIG_DEBUG_FS */
6486 static u32
stmmac_vid_crc32_le(__le16 vid_le
)
6488 unsigned char *data
= (unsigned char *)&vid_le
;
6489 unsigned char data_byte
= 0;
6494 bits
= get_bitmask_order(VLAN_VID_MASK
);
6495 for (i
= 0; i
< bits
; i
++) {
6497 data_byte
= data
[i
/ 8];
6499 temp
= ((crc
& 1) ^ data_byte
) & 1;
6510 static int stmmac_vlan_update(struct stmmac_priv
*priv
, bool is_double
)
6517 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
6518 __le16 vid_le
= cpu_to_le16(vid
);
6519 crc
= bitrev32(~stmmac_vid_crc32_le(vid_le
)) >> 28;
6524 if (!priv
->dma_cap
.vlhash
) {
6525 if (count
> 2) /* VID = 0 always passes filter */
6528 pmatch
= cpu_to_le16(vid
);
6532 return stmmac_update_vlan_hash(priv
, priv
->hw
, hash
, pmatch
, is_double
);
6535 static int stmmac_vlan_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
6537 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6538 bool is_double
= false;
6541 ret
= pm_runtime_resume_and_get(priv
->device
);
6545 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
6548 set_bit(vid
, priv
->active_vlans
);
6549 ret
= stmmac_vlan_update(priv
, is_double
);
6551 clear_bit(vid
, priv
->active_vlans
);
6555 if (priv
->hw
->num_vlan
) {
6556 ret
= stmmac_add_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
6561 pm_runtime_put(priv
->device
);
6566 static int stmmac_vlan_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
6568 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6569 bool is_double
= false;
6572 ret
= pm_runtime_resume_and_get(priv
->device
);
6576 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
6579 clear_bit(vid
, priv
->active_vlans
);
6581 if (priv
->hw
->num_vlan
) {
6582 ret
= stmmac_del_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
6584 goto del_vlan_error
;
6587 ret
= stmmac_vlan_update(priv
, is_double
);
6590 pm_runtime_put(priv
->device
);
6595 static int stmmac_bpf(struct net_device
*dev
, struct netdev_bpf
*bpf
)
6597 struct stmmac_priv
*priv
= netdev_priv(dev
);
6599 switch (bpf
->command
) {
6600 case XDP_SETUP_PROG
:
6601 return stmmac_xdp_set_prog(priv
, bpf
->prog
, bpf
->extack
);
6602 case XDP_SETUP_XSK_POOL
:
6603 return stmmac_xdp_setup_pool(priv
, bpf
->xsk
.pool
,
6610 static int stmmac_xdp_xmit(struct net_device
*dev
, int num_frames
,
6611 struct xdp_frame
**frames
, u32 flags
)
6613 struct stmmac_priv
*priv
= netdev_priv(dev
);
6614 int cpu
= smp_processor_id();
6615 struct netdev_queue
*nq
;
6619 if (unlikely(test_bit(STMMAC_DOWN
, &priv
->state
)))
6622 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
6625 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
6626 nq
= netdev_get_tx_queue(priv
->dev
, queue
);
6628 __netif_tx_lock(nq
, cpu
);
6629 /* Avoids TX time-out as we are sharing with slow path */
6630 txq_trans_cond_update(nq
);
6632 for (i
= 0; i
< num_frames
; i
++) {
6635 res
= stmmac_xdp_xmit_xdpf(priv
, queue
, frames
[i
], true);
6636 if (res
== STMMAC_XDP_CONSUMED
)
6642 if (flags
& XDP_XMIT_FLUSH
) {
6643 stmmac_flush_tx_descriptors(priv
, queue
);
6644 stmmac_tx_timer_arm(priv
, queue
);
6647 __netif_tx_unlock(nq
);
6652 void stmmac_disable_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
6654 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6655 unsigned long flags
;
6657 spin_lock_irqsave(&ch
->lock
, flags
);
6658 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, queue
, 1, 0);
6659 spin_unlock_irqrestore(&ch
->lock
, flags
);
6661 stmmac_stop_rx_dma(priv
, queue
);
6662 __free_dma_rx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6665 void stmmac_enable_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
6667 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
6668 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6669 unsigned long flags
;
6673 ret
= __alloc_dma_rx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6675 netdev_err(priv
->dev
, "Failed to alloc RX desc.\n");
6679 ret
= __init_dma_rx_desc_rings(priv
, &priv
->dma_conf
, queue
, GFP_KERNEL
);
6681 __free_dma_rx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6682 netdev_err(priv
->dev
, "Failed to init RX desc.\n");
6686 stmmac_reset_rx_queue(priv
, queue
);
6687 stmmac_clear_rx_descriptors(priv
, &priv
->dma_conf
, queue
);
6689 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6690 rx_q
->dma_rx_phy
, rx_q
->queue_index
);
6692 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+ (rx_q
->buf_alloc_num
*
6693 sizeof(struct dma_desc
));
6694 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
6695 rx_q
->rx_tail_addr
, rx_q
->queue_index
);
6697 if (rx_q
->xsk_pool
&& rx_q
->buf_alloc_num
) {
6698 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
6699 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6703 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6704 priv
->dma_conf
.dma_buf_sz
,
6708 stmmac_start_rx_dma(priv
, queue
);
6710 spin_lock_irqsave(&ch
->lock
, flags
);
6711 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, queue
, 1, 0);
6712 spin_unlock_irqrestore(&ch
->lock
, flags
);
6715 void stmmac_disable_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
6717 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6718 unsigned long flags
;
6720 spin_lock_irqsave(&ch
->lock
, flags
);
6721 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, queue
, 0, 1);
6722 spin_unlock_irqrestore(&ch
->lock
, flags
);
6724 stmmac_stop_tx_dma(priv
, queue
);
6725 __free_dma_tx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6728 void stmmac_enable_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
6730 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
6731 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6732 unsigned long flags
;
6735 ret
= __alloc_dma_tx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6737 netdev_err(priv
->dev
, "Failed to alloc TX desc.\n");
6741 ret
= __init_dma_tx_desc_rings(priv
, &priv
->dma_conf
, queue
);
6743 __free_dma_tx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6744 netdev_err(priv
->dev
, "Failed to init TX desc.\n");
6748 stmmac_reset_tx_queue(priv
, queue
);
6749 stmmac_clear_tx_descriptors(priv
, &priv
->dma_conf
, queue
);
6751 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6752 tx_q
->dma_tx_phy
, tx_q
->queue_index
);
6754 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
6755 stmmac_enable_tbs(priv
, priv
->ioaddr
, 1, tx_q
->queue_index
);
6757 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
6758 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
6759 tx_q
->tx_tail_addr
, tx_q
->queue_index
);
6761 stmmac_start_tx_dma(priv
, queue
);
6763 spin_lock_irqsave(&ch
->lock
, flags
);
6764 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, queue
, 0, 1);
6765 spin_unlock_irqrestore(&ch
->lock
, flags
);
6768 void stmmac_xdp_release(struct net_device
*dev
)
6770 struct stmmac_priv
*priv
= netdev_priv(dev
);
6773 /* Ensure tx function is not running */
6774 netif_tx_disable(dev
);
6776 /* Disable NAPI process */
6777 stmmac_disable_all_queues(priv
);
6779 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
6780 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
6782 /* Free the IRQ lines */
6783 stmmac_free_irq(dev
, REQ_IRQ_ERR_ALL
, 0);
6785 /* Stop TX/RX DMA channels */
6786 stmmac_stop_all_dma(priv
);
6788 /* Release and free the Rx/Tx resources */
6789 free_dma_desc_resources(priv
, &priv
->dma_conf
);
6791 /* Disable the MAC Rx/Tx */
6792 stmmac_mac_set(priv
, priv
->ioaddr
, false);
6794 /* set trans_start so we don't get spurious
6795 * watchdogs during reset
6797 netif_trans_update(dev
);
6798 netif_carrier_off(dev
);
6801 int stmmac_xdp_open(struct net_device
*dev
)
6803 struct stmmac_priv
*priv
= netdev_priv(dev
);
6804 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
6805 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
6806 u32 dma_csr_ch
= max(rx_cnt
, tx_cnt
);
6807 struct stmmac_rx_queue
*rx_q
;
6808 struct stmmac_tx_queue
*tx_q
;
6814 ret
= alloc_dma_desc_resources(priv
, &priv
->dma_conf
);
6816 netdev_err(dev
, "%s: DMA descriptors allocation failed\n",
6818 goto dma_desc_error
;
6821 ret
= init_dma_desc_rings(dev
, &priv
->dma_conf
, GFP_KERNEL
);
6823 netdev_err(dev
, "%s: DMA descriptors initialization failed\n",
6828 stmmac_reset_queues_param(priv
);
6830 /* DMA CSR Channel configuration */
6831 for (chan
= 0; chan
< dma_csr_ch
; chan
++) {
6832 stmmac_init_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, chan
);
6833 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
6836 /* Adjust Split header */
6837 sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
6839 /* DMA RX Channel Configuration */
6840 for (chan
= 0; chan
< rx_cnt
; chan
++) {
6841 rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
6843 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6844 rx_q
->dma_rx_phy
, chan
);
6846 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
6847 (rx_q
->buf_alloc_num
*
6848 sizeof(struct dma_desc
));
6849 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
6850 rx_q
->rx_tail_addr
, chan
);
6852 if (rx_q
->xsk_pool
&& rx_q
->buf_alloc_num
) {
6853 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
6854 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6858 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6859 priv
->dma_conf
.dma_buf_sz
,
6863 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
6866 /* DMA TX Channel Configuration */
6867 for (chan
= 0; chan
< tx_cnt
; chan
++) {
6868 tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
6870 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6871 tx_q
->dma_tx_phy
, chan
);
6873 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
6874 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
6875 tx_q
->tx_tail_addr
, chan
);
6877 hrtimer_init(&tx_q
->txtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
6878 tx_q
->txtimer
.function
= stmmac_tx_timer
;
6881 /* Enable the MAC Rx/Tx */
6882 stmmac_mac_set(priv
, priv
->ioaddr
, true);
6884 /* Start Rx & Tx DMA Channels */
6885 stmmac_start_all_dma(priv
);
6887 ret
= stmmac_request_irq(dev
);
6891 /* Enable NAPI process*/
6892 stmmac_enable_all_queues(priv
);
6893 netif_carrier_on(dev
);
6894 netif_tx_start_all_queues(dev
);
6895 stmmac_enable_all_dma_irq(priv
);
6900 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
6901 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
6903 stmmac_hw_teardown(dev
);
6905 free_dma_desc_resources(priv
, &priv
->dma_conf
);
6910 int stmmac_xsk_wakeup(struct net_device
*dev
, u32 queue
, u32 flags
)
6912 struct stmmac_priv
*priv
= netdev_priv(dev
);
6913 struct stmmac_rx_queue
*rx_q
;
6914 struct stmmac_tx_queue
*tx_q
;
6915 struct stmmac_channel
*ch
;
6917 if (test_bit(STMMAC_DOWN
, &priv
->state
) ||
6918 !netif_carrier_ok(priv
->dev
))
6921 if (!stmmac_xdp_is_enabled(priv
))
6924 if (queue
>= priv
->plat
->rx_queues_to_use
||
6925 queue
>= priv
->plat
->tx_queues_to_use
)
6928 rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
6929 tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
6930 ch
= &priv
->channel
[queue
];
6932 if (!rx_q
->xsk_pool
&& !tx_q
->xsk_pool
)
6935 if (!napi_if_scheduled_mark_missed(&ch
->rxtx_napi
)) {
6936 /* EQoS does not have per-DMA channel SW interrupt,
6937 * so we schedule RX Napi straight-away.
6939 if (likely(napi_schedule_prep(&ch
->rxtx_napi
)))
6940 __napi_schedule(&ch
->rxtx_napi
);
6946 static void stmmac_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6948 struct stmmac_priv
*priv
= netdev_priv(dev
);
6949 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
6950 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
6954 for (q
= 0; q
< tx_cnt
; q
++) {
6955 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[q
];
6960 start
= u64_stats_fetch_begin(&txq_stats
->syncp
);
6961 tx_packets
= txq_stats
->tx_packets
;
6962 tx_bytes
= txq_stats
->tx_bytes
;
6963 } while (u64_stats_fetch_retry(&txq_stats
->syncp
, start
));
6965 stats
->tx_packets
+= tx_packets
;
6966 stats
->tx_bytes
+= tx_bytes
;
6969 for (q
= 0; q
< rx_cnt
; q
++) {
6970 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[q
];
6975 start
= u64_stats_fetch_begin(&rxq_stats
->syncp
);
6976 rx_packets
= rxq_stats
->rx_packets
;
6977 rx_bytes
= rxq_stats
->rx_bytes
;
6978 } while (u64_stats_fetch_retry(&rxq_stats
->syncp
, start
));
6980 stats
->rx_packets
+= rx_packets
;
6981 stats
->rx_bytes
+= rx_bytes
;
6984 stats
->rx_dropped
= priv
->xstats
.rx_dropped
;
6985 stats
->rx_errors
= priv
->xstats
.rx_errors
;
6986 stats
->tx_dropped
= priv
->xstats
.tx_dropped
;
6987 stats
->tx_errors
= priv
->xstats
.tx_errors
;
6988 stats
->tx_carrier_errors
= priv
->xstats
.tx_losscarrier
+ priv
->xstats
.tx_carrier
;
6989 stats
->collisions
= priv
->xstats
.tx_collision
+ priv
->xstats
.rx_collision
;
6990 stats
->rx_length_errors
= priv
->xstats
.rx_length
;
6991 stats
->rx_crc_errors
= priv
->xstats
.rx_crc_errors
;
6992 stats
->rx_over_errors
= priv
->xstats
.rx_overflow_cntr
;
6993 stats
->rx_missed_errors
= priv
->xstats
.rx_missed_cntr
;
6996 static const struct net_device_ops stmmac_netdev_ops
= {
6997 .ndo_open
= stmmac_open
,
6998 .ndo_start_xmit
= stmmac_xmit
,
6999 .ndo_stop
= stmmac_release
,
7000 .ndo_change_mtu
= stmmac_change_mtu
,
7001 .ndo_fix_features
= stmmac_fix_features
,
7002 .ndo_set_features
= stmmac_set_features
,
7003 .ndo_set_rx_mode
= stmmac_set_rx_mode
,
7004 .ndo_tx_timeout
= stmmac_tx_timeout
,
7005 .ndo_eth_ioctl
= stmmac_ioctl
,
7006 .ndo_get_stats64
= stmmac_get_stats64
,
7007 .ndo_setup_tc
= stmmac_setup_tc
,
7008 .ndo_select_queue
= stmmac_select_queue
,
7009 .ndo_set_mac_address
= stmmac_set_mac_address
,
7010 .ndo_vlan_rx_add_vid
= stmmac_vlan_rx_add_vid
,
7011 .ndo_vlan_rx_kill_vid
= stmmac_vlan_rx_kill_vid
,
7012 .ndo_bpf
= stmmac_bpf
,
7013 .ndo_xdp_xmit
= stmmac_xdp_xmit
,
7014 .ndo_xsk_wakeup
= stmmac_xsk_wakeup
,
7017 static void stmmac_reset_subtask(struct stmmac_priv
*priv
)
7019 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED
, &priv
->state
))
7021 if (test_bit(STMMAC_DOWN
, &priv
->state
))
7024 netdev_err(priv
->dev
, "Reset adapter.\n");
7027 netif_trans_update(priv
->dev
);
7028 while (test_and_set_bit(STMMAC_RESETING
, &priv
->state
))
7029 usleep_range(1000, 2000);
7031 set_bit(STMMAC_DOWN
, &priv
->state
);
7032 dev_close(priv
->dev
);
7033 dev_open(priv
->dev
, NULL
);
7034 clear_bit(STMMAC_DOWN
, &priv
->state
);
7035 clear_bit(STMMAC_RESETING
, &priv
->state
);
7039 static void stmmac_service_task(struct work_struct
*work
)
7041 struct stmmac_priv
*priv
= container_of(work
, struct stmmac_priv
,
7044 stmmac_reset_subtask(priv
);
7045 clear_bit(STMMAC_SERVICE_SCHED
, &priv
->state
);
7049 * stmmac_hw_init - Init the MAC device
7050 * @priv: driver private structure
7051 * Description: this function is to configure the MAC device according to
7052 * some platform parameters or the HW capability register. It prepares the
7053 * driver to use either ring or chain modes and to setup either enhanced or
7054 * normal descriptors.
7056 static int stmmac_hw_init(struct stmmac_priv
*priv
)
7060 /* dwmac-sun8i only work in chain mode */
7061 if (priv
->plat
->flags
& STMMAC_FLAG_HAS_SUN8I
)
7063 priv
->chain_mode
= chain_mode
;
7065 /* Initialize HW Interface */
7066 ret
= stmmac_hwif_init(priv
);
7070 /* Get the HW capability (new GMAC newer than 3.50a) */
7071 priv
->hw_cap_support
= stmmac_get_hw_features(priv
);
7072 if (priv
->hw_cap_support
) {
7073 dev_info(priv
->device
, "DMA HW capability register supported\n");
7075 /* We can override some gmac/dma configuration fields: e.g.
7076 * enh_desc, tx_coe (e.g. that are passed through the
7077 * platform) with the values from the HW capability
7078 * register (if supported).
7080 priv
->plat
->enh_desc
= priv
->dma_cap
.enh_desc
;
7081 priv
->plat
->pmt
= priv
->dma_cap
.pmt_remote_wake_up
&&
7082 !(priv
->plat
->flags
& STMMAC_FLAG_USE_PHY_WOL
);
7083 priv
->hw
->pmt
= priv
->plat
->pmt
;
7084 if (priv
->dma_cap
.hash_tb_sz
) {
7085 priv
->hw
->multicast_filter_bins
=
7086 (BIT(priv
->dma_cap
.hash_tb_sz
) << 5);
7087 priv
->hw
->mcast_bits_log2
=
7088 ilog2(priv
->hw
->multicast_filter_bins
);
7091 /* TXCOE doesn't work in thresh DMA mode */
7092 if (priv
->plat
->force_thresh_dma_mode
)
7093 priv
->plat
->tx_coe
= 0;
7095 priv
->plat
->tx_coe
= priv
->dma_cap
.tx_coe
;
7097 /* In case of GMAC4 rx_coe is from HW cap register. */
7098 priv
->plat
->rx_coe
= priv
->dma_cap
.rx_coe
;
7100 if (priv
->dma_cap
.rx_coe_type2
)
7101 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE2
;
7102 else if (priv
->dma_cap
.rx_coe_type1
)
7103 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE1
;
7106 dev_info(priv
->device
, "No HW DMA feature register supported\n");
7109 if (priv
->plat
->rx_coe
) {
7110 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
7111 dev_info(priv
->device
, "RX Checksum Offload Engine supported\n");
7112 if (priv
->synopsys_id
< DWMAC_CORE_4_00
)
7113 dev_info(priv
->device
, "COE Type %d\n", priv
->hw
->rx_csum
);
7115 if (priv
->plat
->tx_coe
)
7116 dev_info(priv
->device
, "TX Checksum insertion supported\n");
7118 if (priv
->plat
->pmt
) {
7119 dev_info(priv
->device
, "Wake-Up On Lan supported\n");
7120 device_set_wakeup_capable(priv
->device
, 1);
7123 if (priv
->dma_cap
.tsoen
)
7124 dev_info(priv
->device
, "TSO supported\n");
7126 priv
->hw
->vlan_fail_q_en
=
7127 (priv
->plat
->flags
& STMMAC_FLAG_VLAN_FAIL_Q_EN
);
7128 priv
->hw
->vlan_fail_q
= priv
->plat
->vlan_fail_q
;
7130 /* Run HW quirks, if any */
7131 if (priv
->hwif_quirks
) {
7132 ret
= priv
->hwif_quirks(priv
);
7137 /* Rx Watchdog is available in the COREs newer than the 3.40.
7138 * In some case, for example on bugged HW this feature
7139 * has to be disable and this can be done by passing the
7140 * riwt_off field from the platform.
7142 if (((priv
->synopsys_id
>= DWMAC_CORE_3_50
) ||
7143 (priv
->plat
->has_xgmac
)) && (!priv
->plat
->riwt_off
)) {
7145 dev_info(priv
->device
,
7146 "Enable RX Mitigation via HW Watchdog Timer\n");
7152 static void stmmac_napi_add(struct net_device
*dev
)
7154 struct stmmac_priv
*priv
= netdev_priv(dev
);
7157 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
7159 for (queue
= 0; queue
< maxq
; queue
++) {
7160 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
7162 ch
->priv_data
= priv
;
7164 spin_lock_init(&ch
->lock
);
7166 if (queue
< priv
->plat
->rx_queues_to_use
) {
7167 netif_napi_add(dev
, &ch
->rx_napi
, stmmac_napi_poll_rx
);
7169 if (queue
< priv
->plat
->tx_queues_to_use
) {
7170 netif_napi_add_tx(dev
, &ch
->tx_napi
,
7171 stmmac_napi_poll_tx
);
7173 if (queue
< priv
->plat
->rx_queues_to_use
&&
7174 queue
< priv
->plat
->tx_queues_to_use
) {
7175 netif_napi_add(dev
, &ch
->rxtx_napi
,
7176 stmmac_napi_poll_rxtx
);
7181 static void stmmac_napi_del(struct net_device
*dev
)
7183 struct stmmac_priv
*priv
= netdev_priv(dev
);
7186 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
7188 for (queue
= 0; queue
< maxq
; queue
++) {
7189 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
7191 if (queue
< priv
->plat
->rx_queues_to_use
)
7192 netif_napi_del(&ch
->rx_napi
);
7193 if (queue
< priv
->plat
->tx_queues_to_use
)
7194 netif_napi_del(&ch
->tx_napi
);
7195 if (queue
< priv
->plat
->rx_queues_to_use
&&
7196 queue
< priv
->plat
->tx_queues_to_use
) {
7197 netif_napi_del(&ch
->rxtx_napi
);
7202 int stmmac_reinit_queues(struct net_device
*dev
, u32 rx_cnt
, u32 tx_cnt
)
7204 struct stmmac_priv
*priv
= netdev_priv(dev
);
7207 if (netif_running(dev
))
7208 stmmac_release(dev
);
7210 stmmac_napi_del(dev
);
7212 priv
->plat
->rx_queues_to_use
= rx_cnt
;
7213 priv
->plat
->tx_queues_to_use
= tx_cnt
;
7214 if (!netif_is_rxfh_configured(dev
))
7215 for (i
= 0; i
< ARRAY_SIZE(priv
->rss
.table
); i
++)
7216 priv
->rss
.table
[i
] = ethtool_rxfh_indir_default(i
,
7219 stmmac_set_half_duplex(priv
);
7220 stmmac_napi_add(dev
);
7222 if (netif_running(dev
))
7223 ret
= stmmac_open(dev
);
7228 int stmmac_reinit_ringparam(struct net_device
*dev
, u32 rx_size
, u32 tx_size
)
7230 struct stmmac_priv
*priv
= netdev_priv(dev
);
7233 if (netif_running(dev
))
7234 stmmac_release(dev
);
7236 priv
->dma_conf
.dma_rx_size
= rx_size
;
7237 priv
->dma_conf
.dma_tx_size
= tx_size
;
7239 if (netif_running(dev
))
7240 ret
= stmmac_open(dev
);
7245 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7246 static void stmmac_fpe_lp_task(struct work_struct
*work
)
7248 struct stmmac_priv
*priv
= container_of(work
, struct stmmac_priv
,
7250 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
7251 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
7252 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
7253 bool *hs_enable
= &fpe_cfg
->hs_enable
;
7254 bool *enable
= &fpe_cfg
->enable
;
7257 while (retries
-- > 0) {
7258 /* Bail out immediately if FPE handshake is OFF */
7259 if (*lo_state
== FPE_STATE_OFF
|| !*hs_enable
)
7262 if (*lo_state
== FPE_STATE_ENTERING_ON
&&
7263 *lp_state
== FPE_STATE_ENTERING_ON
) {
7264 stmmac_fpe_configure(priv
, priv
->ioaddr
,
7265 priv
->plat
->tx_queues_to_use
,
7266 priv
->plat
->rx_queues_to_use
,
7269 netdev_info(priv
->dev
, "configured FPE\n");
7271 *lo_state
= FPE_STATE_ON
;
7272 *lp_state
= FPE_STATE_ON
;
7273 netdev_info(priv
->dev
, "!!! BOTH FPE stations ON\n");
7277 if ((*lo_state
== FPE_STATE_CAPABLE
||
7278 *lo_state
== FPE_STATE_ENTERING_ON
) &&
7279 *lp_state
!= FPE_STATE_ON
) {
7280 netdev_info(priv
->dev
, SEND_VERIFY_MPAKCET_FMT
,
7281 *lo_state
, *lp_state
);
7282 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
7285 /* Sleep then retry */
7289 clear_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
);
7292 void stmmac_fpe_handshake(struct stmmac_priv
*priv
, bool enable
)
7294 if (priv
->plat
->fpe_cfg
->hs_enable
!= enable
) {
7296 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
7299 priv
->plat
->fpe_cfg
->lo_fpe_state
= FPE_STATE_OFF
;
7300 priv
->plat
->fpe_cfg
->lp_fpe_state
= FPE_STATE_OFF
;
7303 priv
->plat
->fpe_cfg
->hs_enable
= enable
;
7307 static int stmmac_xdp_rx_timestamp(const struct xdp_md
*_ctx
, u64
*timestamp
)
7309 const struct stmmac_xdp_buff
*ctx
= (void *)_ctx
;
7310 struct dma_desc
*desc_contains_ts
= ctx
->desc
;
7311 struct stmmac_priv
*priv
= ctx
->priv
;
7312 struct dma_desc
*ndesc
= ctx
->ndesc
;
7313 struct dma_desc
*desc
= ctx
->desc
;
7316 if (!priv
->hwts_rx_en
)
7319 /* For GMAC4, the valid timestamp is from CTX next desc. */
7320 if (priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
)
7321 desc_contains_ts
= ndesc
;
7323 /* Check if timestamp is available */
7324 if (stmmac_get_rx_timestamp_status(priv
, desc
, ndesc
, priv
->adv_ts
)) {
7325 stmmac_get_timestamp(priv
, desc_contains_ts
, priv
->adv_ts
, &ns
);
7326 ns
-= priv
->plat
->cdc_error_adj
;
7327 *timestamp
= ns_to_ktime(ns
);
7334 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops
= {
7335 .xmo_rx_timestamp
= stmmac_xdp_rx_timestamp
,
7340 * @device: device pointer
7341 * @plat_dat: platform data pointer
7342 * @res: stmmac resource pointer
7343 * Description: this is the main probe function used to
7344 * call the alloc_etherdev, allocate the priv structure.
7346 * returns 0 on success, otherwise errno.
7348 int stmmac_dvr_probe(struct device
*device
,
7349 struct plat_stmmacenet_data
*plat_dat
,
7350 struct stmmac_resources
*res
)
7352 struct net_device
*ndev
= NULL
;
7353 struct stmmac_priv
*priv
;
7357 ndev
= devm_alloc_etherdev_mqs(device
, sizeof(struct stmmac_priv
),
7358 MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
);
7362 SET_NETDEV_DEV(ndev
, device
);
7364 priv
= netdev_priv(ndev
);
7365 priv
->device
= device
;
7368 for (i
= 0; i
< MTL_MAX_RX_QUEUES
; i
++)
7369 u64_stats_init(&priv
->xstats
.rxq_stats
[i
].syncp
);
7370 for (i
= 0; i
< MTL_MAX_TX_QUEUES
; i
++)
7371 u64_stats_init(&priv
->xstats
.txq_stats
[i
].syncp
);
7373 stmmac_set_ethtool_ops(ndev
);
7374 priv
->pause
= pause
;
7375 priv
->plat
= plat_dat
;
7376 priv
->ioaddr
= res
->addr
;
7377 priv
->dev
->base_addr
= (unsigned long)res
->addr
;
7378 priv
->plat
->dma_cfg
->multi_msi_en
=
7379 (priv
->plat
->flags
& STMMAC_FLAG_MULTI_MSI_EN
);
7381 priv
->dev
->irq
= res
->irq
;
7382 priv
->wol_irq
= res
->wol_irq
;
7383 priv
->lpi_irq
= res
->lpi_irq
;
7384 priv
->sfty_ce_irq
= res
->sfty_ce_irq
;
7385 priv
->sfty_ue_irq
= res
->sfty_ue_irq
;
7386 for (i
= 0; i
< MTL_MAX_RX_QUEUES
; i
++)
7387 priv
->rx_irq
[i
] = res
->rx_irq
[i
];
7388 for (i
= 0; i
< MTL_MAX_TX_QUEUES
; i
++)
7389 priv
->tx_irq
[i
] = res
->tx_irq
[i
];
7391 if (!is_zero_ether_addr(res
->mac
))
7392 eth_hw_addr_set(priv
->dev
, res
->mac
);
7394 dev_set_drvdata(device
, priv
->dev
);
7396 /* Verify driver arguments */
7397 stmmac_verify_args();
7399 priv
->af_xdp_zc_qps
= bitmap_zalloc(MTL_MAX_TX_QUEUES
, GFP_KERNEL
);
7400 if (!priv
->af_xdp_zc_qps
)
7403 /* Allocate workqueue */
7404 priv
->wq
= create_singlethread_workqueue("stmmac_wq");
7406 dev_err(priv
->device
, "failed to create workqueue\n");
7411 INIT_WORK(&priv
->service_task
, stmmac_service_task
);
7413 /* Initialize Link Partner FPE workqueue */
7414 INIT_WORK(&priv
->fpe_task
, stmmac_fpe_lp_task
);
7416 /* Override with kernel parameters if supplied XXX CRS XXX
7417 * this needs to have multiple instances
7419 if ((phyaddr
>= 0) && (phyaddr
<= 31))
7420 priv
->plat
->phy_addr
= phyaddr
;
7422 if (priv
->plat
->stmmac_rst
) {
7423 ret
= reset_control_assert(priv
->plat
->stmmac_rst
);
7424 reset_control_deassert(priv
->plat
->stmmac_rst
);
7425 /* Some reset controllers have only reset callback instead of
7426 * assert + deassert callbacks pair.
7428 if (ret
== -ENOTSUPP
)
7429 reset_control_reset(priv
->plat
->stmmac_rst
);
7432 ret
= reset_control_deassert(priv
->plat
->stmmac_ahb_rst
);
7433 if (ret
== -ENOTSUPP
)
7434 dev_err(priv
->device
, "unable to bring out of ahb reset: %pe\n",
7437 /* Init MAC and get the capabilities */
7438 ret
= stmmac_hw_init(priv
);
7442 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7444 if (priv
->synopsys_id
< DWMAC_CORE_5_20
)
7445 priv
->plat
->dma_cfg
->dche
= false;
7447 stmmac_check_ether_addr(priv
);
7449 ndev
->netdev_ops
= &stmmac_netdev_ops
;
7451 ndev
->xdp_metadata_ops
= &stmmac_xdp_metadata_ops
;
7453 ndev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
7455 ndev
->xdp_features
= NETDEV_XDP_ACT_BASIC
| NETDEV_XDP_ACT_REDIRECT
|
7456 NETDEV_XDP_ACT_XSK_ZEROCOPY
;
7458 ret
= stmmac_tc_init(priv
, priv
);
7460 ndev
->hw_features
|= NETIF_F_HW_TC
;
7463 if ((priv
->plat
->flags
& STMMAC_FLAG_TSO_EN
) && (priv
->dma_cap
.tsoen
)) {
7464 ndev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
7465 if (priv
->plat
->has_gmac4
)
7466 ndev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
7468 dev_info(priv
->device
, "TSO feature enabled\n");
7471 if (priv
->dma_cap
.sphen
&&
7472 !(priv
->plat
->flags
& STMMAC_FLAG_SPH_DISABLE
)) {
7473 ndev
->hw_features
|= NETIF_F_GRO
;
7474 priv
->sph_cap
= true;
7475 priv
->sph
= priv
->sph_cap
;
7476 dev_info(priv
->device
, "SPH feature enabled\n");
7479 /* Ideally our host DMA address width is the same as for the
7480 * device. However, it may differ and then we have to use our
7481 * host DMA width for allocation and the device DMA width for
7482 * register handling.
7484 if (priv
->plat
->host_dma_width
)
7485 priv
->dma_cap
.host_dma_width
= priv
->plat
->host_dma_width
;
7487 priv
->dma_cap
.host_dma_width
= priv
->dma_cap
.addr64
;
7489 if (priv
->dma_cap
.host_dma_width
) {
7490 ret
= dma_set_mask_and_coherent(device
,
7491 DMA_BIT_MASK(priv
->dma_cap
.host_dma_width
));
7493 dev_info(priv
->device
, "Using %d/%d bits DMA host/device width\n",
7494 priv
->dma_cap
.host_dma_width
, priv
->dma_cap
.addr64
);
7497 * If more than 32 bits can be addressed, make sure to
7498 * enable enhanced addressing mode.
7500 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
))
7501 priv
->plat
->dma_cfg
->eame
= true;
7503 ret
= dma_set_mask_and_coherent(device
, DMA_BIT_MASK(32));
7505 dev_err(priv
->device
, "Failed to set DMA Mask\n");
7509 priv
->dma_cap
.host_dma_width
= 32;
7513 ndev
->features
|= ndev
->hw_features
| NETIF_F_HIGHDMA
;
7514 ndev
->watchdog_timeo
= msecs_to_jiffies(watchdog
);
7515 #ifdef STMMAC_VLAN_TAG_USED
7516 /* Both mac100 and gmac support receive VLAN tag detection */
7517 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
;
7518 if (priv
->dma_cap
.vlhash
) {
7519 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
7520 ndev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
7522 if (priv
->dma_cap
.vlins
) {
7523 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
;
7524 if (priv
->dma_cap
.dvlan
)
7525 ndev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
7528 priv
->msg_enable
= netif_msg_init(debug
, default_msg_level
);
7530 priv
->xstats
.threshold
= tc
;
7532 /* Initialize RSS */
7533 rxq
= priv
->plat
->rx_queues_to_use
;
7534 netdev_rss_key_fill(priv
->rss
.key
, sizeof(priv
->rss
.key
));
7535 for (i
= 0; i
< ARRAY_SIZE(priv
->rss
.table
); i
++)
7536 priv
->rss
.table
[i
] = ethtool_rxfh_indir_default(i
, rxq
);
7538 if (priv
->dma_cap
.rssen
&& priv
->plat
->rss_en
)
7539 ndev
->features
|= NETIF_F_RXHASH
;
7541 ndev
->vlan_features
|= ndev
->features
;
7542 /* TSO doesn't work on VLANs yet */
7543 ndev
->vlan_features
&= ~NETIF_F_TSO
;
7545 /* MTU range: 46 - hw-specific max */
7546 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
7547 if (priv
->plat
->has_xgmac
)
7548 ndev
->max_mtu
= XGMAC_JUMBO_LEN
;
7549 else if ((priv
->plat
->enh_desc
) || (priv
->synopsys_id
>= DWMAC_CORE_4_00
))
7550 ndev
->max_mtu
= JUMBO_LEN
;
7552 ndev
->max_mtu
= SKB_MAX_HEAD(NET_SKB_PAD
+ NET_IP_ALIGN
);
7553 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7554 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7556 if ((priv
->plat
->maxmtu
< ndev
->max_mtu
) &&
7557 (priv
->plat
->maxmtu
>= ndev
->min_mtu
))
7558 ndev
->max_mtu
= priv
->plat
->maxmtu
;
7559 else if (priv
->plat
->maxmtu
< ndev
->min_mtu
)
7560 dev_warn(priv
->device
,
7561 "%s: warning: maxmtu having invalid value (%d)\n",
7562 __func__
, priv
->plat
->maxmtu
);
7565 priv
->flow_ctrl
= FLOW_AUTO
; /* RX/TX pause on */
7567 ndev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
7569 /* Setup channels NAPI */
7570 stmmac_napi_add(ndev
);
7572 mutex_init(&priv
->lock
);
7574 /* If a specific clk_csr value is passed from the platform
7575 * this means that the CSR Clock Range selection cannot be
7576 * changed at run-time and it is fixed. Viceversa the driver'll try to
7577 * set the MDC clock dynamically according to the csr actual
7580 if (priv
->plat
->clk_csr
>= 0)
7581 priv
->clk_csr
= priv
->plat
->clk_csr
;
7583 stmmac_clk_csr_set(priv
);
7585 stmmac_check_pcs_mode(priv
);
7587 pm_runtime_get_noresume(device
);
7588 pm_runtime_set_active(device
);
7589 if (!pm_runtime_enabled(device
))
7590 pm_runtime_enable(device
);
7592 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7593 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
) {
7594 /* MDIO bus Registration */
7595 ret
= stmmac_mdio_register(ndev
);
7597 dev_err_probe(priv
->device
, ret
,
7598 "%s: MDIO bus (id: %d) registration failed\n",
7599 __func__
, priv
->plat
->bus_id
);
7600 goto error_mdio_register
;
7604 if (priv
->plat
->speed_mode_2500
)
7605 priv
->plat
->speed_mode_2500(ndev
, priv
->plat
->bsp_priv
);
7607 if (priv
->plat
->mdio_bus_data
&& priv
->plat
->mdio_bus_data
->has_xpcs
) {
7608 ret
= stmmac_xpcs_setup(priv
->mii
);
7610 goto error_xpcs_setup
;
7613 ret
= stmmac_phy_setup(priv
);
7615 netdev_err(ndev
, "failed to setup phy (%d)\n", ret
);
7616 goto error_phy_setup
;
7619 ret
= register_netdev(ndev
);
7621 dev_err(priv
->device
, "%s: ERROR %i registering the device\n",
7623 goto error_netdev_register
;
7626 #ifdef CONFIG_DEBUG_FS
7627 stmmac_init_fs(ndev
);
7630 if (priv
->plat
->dump_debug_regs
)
7631 priv
->plat
->dump_debug_regs(priv
->plat
->bsp_priv
);
7633 /* Let pm_runtime_put() disable the clocks.
7634 * If CONFIG_PM is not enabled, the clocks will stay powered.
7636 pm_runtime_put(device
);
7640 error_netdev_register
:
7641 phylink_destroy(priv
->phylink
);
7644 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7645 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
7646 stmmac_mdio_unregister(ndev
);
7647 error_mdio_register
:
7648 stmmac_napi_del(ndev
);
7650 destroy_workqueue(priv
->wq
);
7652 bitmap_free(priv
->af_xdp_zc_qps
);
7656 EXPORT_SYMBOL_GPL(stmmac_dvr_probe
);
7660 * @dev: device pointer
7661 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7662 * changes the link status, releases the DMA descriptor rings.
7664 void stmmac_dvr_remove(struct device
*dev
)
7666 struct net_device
*ndev
= dev_get_drvdata(dev
);
7667 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7669 netdev_info(priv
->dev
, "%s: removing driver", __func__
);
7671 pm_runtime_get_sync(dev
);
7673 stmmac_stop_all_dma(priv
);
7674 stmmac_mac_set(priv
, priv
->ioaddr
, false);
7675 netif_carrier_off(ndev
);
7676 unregister_netdev(ndev
);
7678 #ifdef CONFIG_DEBUG_FS
7679 stmmac_exit_fs(ndev
);
7681 phylink_destroy(priv
->phylink
);
7682 if (priv
->plat
->stmmac_rst
)
7683 reset_control_assert(priv
->plat
->stmmac_rst
);
7684 reset_control_assert(priv
->plat
->stmmac_ahb_rst
);
7685 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7686 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
7687 stmmac_mdio_unregister(ndev
);
7688 destroy_workqueue(priv
->wq
);
7689 mutex_destroy(&priv
->lock
);
7690 bitmap_free(priv
->af_xdp_zc_qps
);
7692 pm_runtime_disable(dev
);
7693 pm_runtime_put_noidle(dev
);
7695 EXPORT_SYMBOL_GPL(stmmac_dvr_remove
);
7698 * stmmac_suspend - suspend callback
7699 * @dev: device pointer
7700 * Description: this is the function to suspend the device and it is called
7701 * by the platform driver to stop the network queue, release the resources,
7702 * program the PMT register (for WoL), clean and release driver resources.
7704 int stmmac_suspend(struct device
*dev
)
7706 struct net_device
*ndev
= dev_get_drvdata(dev
);
7707 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7710 if (!ndev
|| !netif_running(ndev
))
7713 mutex_lock(&priv
->lock
);
7715 netif_device_detach(ndev
);
7717 stmmac_disable_all_queues(priv
);
7719 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
7720 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
7722 if (priv
->eee_enabled
) {
7723 priv
->tx_path_in_lpi_mode
= false;
7724 del_timer_sync(&priv
->eee_ctrl_timer
);
7727 /* Stop TX/RX DMA */
7728 stmmac_stop_all_dma(priv
);
7730 if (priv
->plat
->serdes_powerdown
)
7731 priv
->plat
->serdes_powerdown(ndev
, priv
->plat
->bsp_priv
);
7733 /* Enable Power down mode by programming the PMT regs */
7734 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7735 stmmac_pmt(priv
, priv
->hw
, priv
->wolopts
);
7738 stmmac_mac_set(priv
, priv
->ioaddr
, false);
7739 pinctrl_pm_select_sleep_state(priv
->device
);
7742 mutex_unlock(&priv
->lock
);
7745 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7746 phylink_suspend(priv
->phylink
, true);
7748 if (device_may_wakeup(priv
->device
))
7749 phylink_speed_down(priv
->phylink
, false);
7750 phylink_suspend(priv
->phylink
, false);
7754 if (priv
->dma_cap
.fpesel
) {
7756 stmmac_fpe_configure(priv
, priv
->ioaddr
,
7757 priv
->plat
->tx_queues_to_use
,
7758 priv
->plat
->rx_queues_to_use
, false);
7760 stmmac_fpe_handshake(priv
, false);
7761 stmmac_fpe_stop_wq(priv
);
7764 priv
->speed
= SPEED_UNKNOWN
;
7767 EXPORT_SYMBOL_GPL(stmmac_suspend
);
7769 static void stmmac_reset_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
7771 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
7777 static void stmmac_reset_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
7779 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
7785 netdev_tx_reset_queue(netdev_get_tx_queue(priv
->dev
, queue
));
7789 * stmmac_reset_queues_param - reset queue parameters
7790 * @priv: device pointer
7792 static void stmmac_reset_queues_param(struct stmmac_priv
*priv
)
7794 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
7795 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
7798 for (queue
= 0; queue
< rx_cnt
; queue
++)
7799 stmmac_reset_rx_queue(priv
, queue
);
7801 for (queue
= 0; queue
< tx_cnt
; queue
++)
7802 stmmac_reset_tx_queue(priv
, queue
);
7806 * stmmac_resume - resume callback
7807 * @dev: device pointer
7808 * Description: when resume this function is invoked to setup the DMA and CORE
7809 * in a usable state.
7811 int stmmac_resume(struct device
*dev
)
7813 struct net_device
*ndev
= dev_get_drvdata(dev
);
7814 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7817 if (!netif_running(ndev
))
7820 /* Power Down bit, into the PM register, is cleared
7821 * automatically as soon as a magic packet or a Wake-up frame
7822 * is received. Anyway, it's better to manually clear
7823 * this bit because it can generate problems while resuming
7824 * from another devices (e.g. serial console).
7826 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7827 mutex_lock(&priv
->lock
);
7828 stmmac_pmt(priv
, priv
->hw
, 0);
7829 mutex_unlock(&priv
->lock
);
7832 pinctrl_pm_select_default_state(priv
->device
);
7833 /* reset the phy so that it's ready */
7835 stmmac_mdio_reset(priv
->mii
);
7838 if (!(priv
->plat
->flags
& STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP
) &&
7839 priv
->plat
->serdes_powerup
) {
7840 ret
= priv
->plat
->serdes_powerup(ndev
,
7841 priv
->plat
->bsp_priv
);
7848 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7849 phylink_resume(priv
->phylink
);
7851 phylink_resume(priv
->phylink
);
7852 if (device_may_wakeup(priv
->device
))
7853 phylink_speed_up(priv
->phylink
);
7858 mutex_lock(&priv
->lock
);
7860 stmmac_reset_queues_param(priv
);
7862 stmmac_free_tx_skbufs(priv
);
7863 stmmac_clear_descriptors(priv
, &priv
->dma_conf
);
7865 stmmac_hw_setup(ndev
, false);
7866 stmmac_init_coalesce(priv
);
7867 stmmac_set_rx_mode(ndev
);
7869 stmmac_restore_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
);
7871 stmmac_enable_all_queues(priv
);
7872 stmmac_enable_all_dma_irq(priv
);
7874 mutex_unlock(&priv
->lock
);
7877 netif_device_attach(ndev
);
7881 EXPORT_SYMBOL_GPL(stmmac_resume
);
7884 static int __init
stmmac_cmdline_opt(char *str
)
7890 while ((opt
= strsep(&str
, ",")) != NULL
) {
7891 if (!strncmp(opt
, "debug:", 6)) {
7892 if (kstrtoint(opt
+ 6, 0, &debug
))
7894 } else if (!strncmp(opt
, "phyaddr:", 8)) {
7895 if (kstrtoint(opt
+ 8, 0, &phyaddr
))
7897 } else if (!strncmp(opt
, "buf_sz:", 7)) {
7898 if (kstrtoint(opt
+ 7, 0, &buf_sz
))
7900 } else if (!strncmp(opt
, "tc:", 3)) {
7901 if (kstrtoint(opt
+ 3, 0, &tc
))
7903 } else if (!strncmp(opt
, "watchdog:", 9)) {
7904 if (kstrtoint(opt
+ 9, 0, &watchdog
))
7906 } else if (!strncmp(opt
, "flow_ctrl:", 10)) {
7907 if (kstrtoint(opt
+ 10, 0, &flow_ctrl
))
7909 } else if (!strncmp(opt
, "pause:", 6)) {
7910 if (kstrtoint(opt
+ 6, 0, &pause
))
7912 } else if (!strncmp(opt
, "eee_timer:", 10)) {
7913 if (kstrtoint(opt
+ 10, 0, &eee_timer
))
7915 } else if (!strncmp(opt
, "chain_mode:", 11)) {
7916 if (kstrtoint(opt
+ 11, 0, &chain_mode
))
7923 pr_err("%s: ERROR broken module parameter conversion", __func__
);
7927 __setup("stmmaceth=", stmmac_cmdline_opt
);
7930 static int __init
stmmac_init(void)
7932 #ifdef CONFIG_DEBUG_FS
7933 /* Create debugfs main directory if it doesn't exist yet */
7935 stmmac_fs_dir
= debugfs_create_dir(STMMAC_RESOURCE_NAME
, NULL
);
7936 register_netdevice_notifier(&stmmac_notifier
);
7942 static void __exit
stmmac_exit(void)
7944 #ifdef CONFIG_DEBUG_FS
7945 unregister_netdevice_notifier(&stmmac_notifier
);
7946 debugfs_remove_recursive(stmmac_fs_dir
);
7950 module_init(stmmac_init
)
7951 module_exit(stmmac_exit
)
7953 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7954 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7955 MODULE_LICENSE("GPL");