1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
11 Documentation available at:
12 http://www.stlinux.com
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
54 /* As long as the interface is active, we keep the timestamping counter enabled
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
56 * (clock jumps) when changing timestamping settings at runtime.
58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64 /* Module parameters */
66 static int watchdog
= TX_TIMEO
;
67 module_param(watchdog
, int, 0644);
68 MODULE_PARM_DESC(watchdog
, "Transmit timeout in milliseconds (default 5s)");
70 static int debug
= -1;
71 module_param(debug
, int, 0644);
72 MODULE_PARM_DESC(debug
, "Message Level (-1: default, 0: no output, 16: all)");
74 static int phyaddr
= -1;
75 module_param(phyaddr
, int, 0444);
76 MODULE_PARM_DESC(phyaddr
, "Physical device address");
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
91 static int flow_ctrl
= FLOW_AUTO
;
92 module_param(flow_ctrl
, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl
, "Flow control ability [on/off]");
95 static int pause
= PAUSE_TIME
;
96 module_param(pause
, int, 0644);
97 MODULE_PARM_DESC(pause
, "Flow Control Pause Time");
100 static int tc
= TC_DEFAULT
;
101 module_param(tc
, int, 0644);
102 MODULE_PARM_DESC(tc
, "DMA threshold control value");
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz
= DEFAULT_BUFSIZE
;
106 module_param(buf_sz
, int, 0644);
107 MODULE_PARM_DESC(buf_sz
, "DMA buffer size");
109 #define STMMAC_RX_COPYBREAK 256
111 static const u32 default_msg_level
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
112 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
113 NETIF_MSG_IFDOWN
| NETIF_MSG_TIMER
);
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static int eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
117 module_param(eee_timer
, int, 0644);
118 MODULE_PARM_DESC(eee_timer
, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
124 static unsigned int chain_mode
;
125 module_param(chain_mode
, int, 0444);
126 MODULE_PARM_DESC(chain_mode
, "To use chain instead of ring mode");
128 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
);
129 /* For MSI interrupts handling */
130 static irqreturn_t
stmmac_mac_interrupt(int irq
, void *dev_id
);
131 static irqreturn_t
stmmac_safety_interrupt(int irq
, void *dev_id
);
132 static irqreturn_t
stmmac_msi_intr_tx(int irq
, void *data
);
133 static irqreturn_t
stmmac_msi_intr_rx(int irq
, void *data
);
134 static void stmmac_reset_rx_queue(struct stmmac_priv
*priv
, u32 queue
);
135 static void stmmac_reset_tx_queue(struct stmmac_priv
*priv
, u32 queue
);
136 static void stmmac_reset_queues_param(struct stmmac_priv
*priv
);
137 static void stmmac_tx_timer_arm(struct stmmac_priv
*priv
, u32 queue
);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv
*priv
, int queue
);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv
*priv
, u32 txmode
,
140 u32 rxmode
, u32 chan
);
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops
;
144 static void stmmac_init_fs(struct net_device
*dev
);
145 static void stmmac_exit_fs(struct net_device
*dev
);
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
150 int stmmac_bus_clks_config(struct stmmac_priv
*priv
, bool enabled
)
155 ret
= clk_prepare_enable(priv
->plat
->stmmac_clk
);
158 ret
= clk_prepare_enable(priv
->plat
->pclk
);
160 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
163 if (priv
->plat
->clks_config
) {
164 ret
= priv
->plat
->clks_config(priv
->plat
->bsp_priv
, enabled
);
166 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
167 clk_disable_unprepare(priv
->plat
->pclk
);
172 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
173 clk_disable_unprepare(priv
->plat
->pclk
);
174 if (priv
->plat
->clks_config
)
175 priv
->plat
->clks_config(priv
->plat
->bsp_priv
, enabled
);
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config
);
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
187 static void stmmac_verify_args(void)
189 if (unlikely(watchdog
< 0))
191 if (unlikely((buf_sz
< DEFAULT_BUFSIZE
) || (buf_sz
> BUF_SIZE_16KiB
)))
192 buf_sz
= DEFAULT_BUFSIZE
;
193 if (unlikely(flow_ctrl
> 1))
194 flow_ctrl
= FLOW_AUTO
;
195 else if (likely(flow_ctrl
< 0))
196 flow_ctrl
= FLOW_OFF
;
197 if (unlikely((pause
< 0) || (pause
> 0xffff)))
200 eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
203 static void __stmmac_disable_all_queues(struct stmmac_priv
*priv
)
205 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
206 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
207 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
210 for (queue
= 0; queue
< maxq
; queue
++) {
211 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
213 if (stmmac_xdp_is_enabled(priv
) &&
214 test_bit(queue
, priv
->af_xdp_zc_qps
)) {
215 napi_disable(&ch
->rxtx_napi
);
219 if (queue
< rx_queues_cnt
)
220 napi_disable(&ch
->rx_napi
);
221 if (queue
< tx_queues_cnt
)
222 napi_disable(&ch
->tx_napi
);
227 * stmmac_disable_all_queues - Disable all queues
228 * @priv: driver private structure
230 static void stmmac_disable_all_queues(struct stmmac_priv
*priv
)
232 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
233 struct stmmac_rx_queue
*rx_q
;
236 /* synchronize_rcu() needed for pending XDP buffers to drain */
237 for (queue
= 0; queue
< rx_queues_cnt
; queue
++) {
238 rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
239 if (rx_q
->xsk_pool
) {
245 __stmmac_disable_all_queues(priv
);
249 * stmmac_enable_all_queues - Enable all queues
250 * @priv: driver private structure
252 static void stmmac_enable_all_queues(struct stmmac_priv
*priv
)
254 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
255 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
256 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
259 for (queue
= 0; queue
< maxq
; queue
++) {
260 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
262 if (stmmac_xdp_is_enabled(priv
) &&
263 test_bit(queue
, priv
->af_xdp_zc_qps
)) {
264 napi_enable(&ch
->rxtx_napi
);
268 if (queue
< rx_queues_cnt
)
269 napi_enable(&ch
->rx_napi
);
270 if (queue
< tx_queues_cnt
)
271 napi_enable(&ch
->tx_napi
);
275 static void stmmac_service_event_schedule(struct stmmac_priv
*priv
)
277 if (!test_bit(STMMAC_DOWN
, &priv
->state
) &&
278 !test_and_set_bit(STMMAC_SERVICE_SCHED
, &priv
->state
))
279 queue_work(priv
->wq
, &priv
->service_task
);
282 static void stmmac_global_err(struct stmmac_priv
*priv
)
284 netif_carrier_off(priv
->dev
);
285 set_bit(STMMAC_RESET_REQUESTED
, &priv
->state
);
286 stmmac_service_event_schedule(priv
);
290 * stmmac_clk_csr_set - dynamically set the MDC clock
291 * @priv: driver private structure
292 * Description: this is to dynamically set the MDC clock according to the csr
295 * If a specific clk_csr value is passed from the platform
296 * this means that the CSR Clock Range selection cannot be
297 * changed at run-time and it is fixed (as reported in the driver
298 * documentation). Viceversa the driver will try to set the MDC
299 * clock dynamically according to the actual clock input.
301 static void stmmac_clk_csr_set(struct stmmac_priv
*priv
)
305 clk_rate
= clk_get_rate(priv
->plat
->stmmac_clk
);
307 /* Platform provided default clk_csr would be assumed valid
308 * for all other cases except for the below mentioned ones.
309 * For values higher than the IEEE 802.3 specified frequency
310 * we can not estimate the proper divider as it is not known
311 * the frequency of clk_csr_i. So we do not change the default
314 if (!(priv
->clk_csr
& MAC_CSR_H_FRQ_MASK
)) {
315 if (clk_rate
< CSR_F_35M
)
316 priv
->clk_csr
= STMMAC_CSR_20_35M
;
317 else if ((clk_rate
>= CSR_F_35M
) && (clk_rate
< CSR_F_60M
))
318 priv
->clk_csr
= STMMAC_CSR_35_60M
;
319 else if ((clk_rate
>= CSR_F_60M
) && (clk_rate
< CSR_F_100M
))
320 priv
->clk_csr
= STMMAC_CSR_60_100M
;
321 else if ((clk_rate
>= CSR_F_100M
) && (clk_rate
< CSR_F_150M
))
322 priv
->clk_csr
= STMMAC_CSR_100_150M
;
323 else if ((clk_rate
>= CSR_F_150M
) && (clk_rate
< CSR_F_250M
))
324 priv
->clk_csr
= STMMAC_CSR_150_250M
;
325 else if ((clk_rate
>= CSR_F_250M
) && (clk_rate
<= CSR_F_300M
))
326 priv
->clk_csr
= STMMAC_CSR_250_300M
;
329 if (priv
->plat
->flags
& STMMAC_FLAG_HAS_SUN8I
) {
330 if (clk_rate
> 160000000)
331 priv
->clk_csr
= 0x03;
332 else if (clk_rate
> 80000000)
333 priv
->clk_csr
= 0x02;
334 else if (clk_rate
> 40000000)
335 priv
->clk_csr
= 0x01;
340 if (priv
->plat
->has_xgmac
) {
341 if (clk_rate
> 400000000)
343 else if (clk_rate
> 350000000)
345 else if (clk_rate
> 300000000)
347 else if (clk_rate
> 250000000)
349 else if (clk_rate
> 150000000)
356 static void print_pkt(unsigned char *buf
, int len
)
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len
, buf
);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET
, buf
, len
);
362 static inline u32
stmmac_tx_avail(struct stmmac_priv
*priv
, u32 queue
)
364 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
367 if (tx_q
->dirty_tx
> tx_q
->cur_tx
)
368 avail
= tx_q
->dirty_tx
- tx_q
->cur_tx
- 1;
370 avail
= priv
->dma_conf
.dma_tx_size
- tx_q
->cur_tx
+ tx_q
->dirty_tx
- 1;
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
380 static inline u32
stmmac_rx_dirty(struct stmmac_priv
*priv
, u32 queue
)
382 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
385 if (rx_q
->dirty_rx
<= rx_q
->cur_rx
)
386 dirty
= rx_q
->cur_rx
- rx_q
->dirty_rx
;
388 dirty
= priv
->dma_conf
.dma_rx_size
- rx_q
->dirty_rx
+ rx_q
->cur_rx
;
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv
*priv
, bool en
)
397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 priv
->eee_sw_timer_en
= en
? 0 : 1;
399 tx_lpi_timer
= en
? priv
->tx_lpi_timer
: 0;
400 stmmac_set_eee_lpi_timer(priv
, priv
->hw
, tx_lpi_timer
);
404 * stmmac_enable_eee_mode - check and enter in LPI mode
405 * @priv: driver private structure
406 * Description: this function is to verify and enter in LPI mode in case of
409 static int stmmac_enable_eee_mode(struct stmmac_priv
*priv
)
411 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
414 /* check if all TX queues have the work finished */
415 for (queue
= 0; queue
< tx_cnt
; queue
++) {
416 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
418 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
419 return -EBUSY
; /* still unfinished work */
422 /* Check and enter in LPI mode */
423 if (!priv
->tx_path_in_lpi_mode
)
424 stmmac_set_eee_mode(priv
, priv
->hw
,
425 priv
->plat
->flags
& STMMAC_FLAG_EN_TX_LPI_CLOCKGATING
);
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
431 * @priv: driver private structure
432 * Description: this function is to exit and disable EEE in case of
433 * LPI state is true. This is called by the xmit.
435 void stmmac_disable_eee_mode(struct stmmac_priv
*priv
)
437 if (!priv
->eee_sw_timer_en
) {
438 stmmac_lpi_entry_timer_config(priv
, 0);
442 stmmac_reset_eee_mode(priv
, priv
->hw
);
443 del_timer_sync(&priv
->eee_ctrl_timer
);
444 priv
->tx_path_in_lpi_mode
= false;
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
449 * @t: timer_list struct containing private info
451 * if there is no data transfer and if we are not in LPI state,
452 * then MAC Transmitter can be moved to LPI state.
454 static void stmmac_eee_ctrl_timer(struct timer_list
*t
)
456 struct stmmac_priv
*priv
= from_timer(priv
, t
, eee_ctrl_timer
);
458 if (stmmac_enable_eee_mode(priv
))
459 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
463 * stmmac_eee_init - init EEE
464 * @priv: driver private structure
466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
467 * can also manage EEE, this function enable the LPI state and start related
470 bool stmmac_eee_init(struct stmmac_priv
*priv
)
472 int eee_tw_timer
= priv
->eee_tw_timer
;
474 /* Using PCS we cannot dial with the phy registers at this stage
475 * so we do not support extra feature like EEE.
477 if (priv
->hw
->pcs
== STMMAC_PCS_TBI
||
478 priv
->hw
->pcs
== STMMAC_PCS_RTBI
)
481 /* Check if MAC core supports the EEE feature. */
482 if (!priv
->dma_cap
.eee
)
485 mutex_lock(&priv
->lock
);
487 /* Check if it needs to be deactivated */
488 if (!priv
->eee_active
) {
489 if (priv
->eee_enabled
) {
490 netdev_dbg(priv
->dev
, "disable EEE\n");
491 stmmac_lpi_entry_timer_config(priv
, 0);
492 del_timer_sync(&priv
->eee_ctrl_timer
);
493 stmmac_set_eee_timer(priv
, priv
->hw
, 0, eee_tw_timer
);
495 xpcs_config_eee(priv
->hw
->xpcs
,
496 priv
->plat
->mult_fact_100ns
,
499 mutex_unlock(&priv
->lock
);
503 if (priv
->eee_active
&& !priv
->eee_enabled
) {
504 timer_setup(&priv
->eee_ctrl_timer
, stmmac_eee_ctrl_timer
, 0);
505 stmmac_set_eee_timer(priv
, priv
->hw
, STMMAC_DEFAULT_LIT_LS
,
508 xpcs_config_eee(priv
->hw
->xpcs
,
509 priv
->plat
->mult_fact_100ns
,
513 if (priv
->plat
->has_gmac4
&& priv
->tx_lpi_timer
<= STMMAC_ET_MAX
) {
514 del_timer_sync(&priv
->eee_ctrl_timer
);
515 priv
->tx_path_in_lpi_mode
= false;
516 stmmac_lpi_entry_timer_config(priv
, 1);
518 stmmac_lpi_entry_timer_config(priv
, 0);
519 mod_timer(&priv
->eee_ctrl_timer
,
520 STMMAC_LPI_T(priv
->tx_lpi_timer
));
523 mutex_unlock(&priv
->lock
);
524 netdev_dbg(priv
->dev
, "Energy-Efficient Ethernet initialized\n");
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529 * @priv: driver private structure
530 * @p : descriptor pointer
531 * @skb : the socket buffer
533 * This function will read timestamp from the descriptor & pass it to stack.
534 * and also perform some sanity checks.
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv
*priv
,
537 struct dma_desc
*p
, struct sk_buff
*skb
)
539 struct skb_shared_hwtstamps shhwtstamp
;
543 if (!priv
->hwts_tx_en
)
546 /* exit if skb doesn't support hw tstamp */
547 if (likely(!skb
|| !(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)))
550 /* check tx tstamp status */
551 if (stmmac_get_tx_timestamp_status(priv
, p
)) {
552 stmmac_get_timestamp(priv
, p
, priv
->adv_ts
, &ns
);
554 } else if (!stmmac_get_mac_tx_timestamp(priv
, priv
->hw
, &ns
)) {
559 ns
-= priv
->plat
->cdc_error_adj
;
561 memset(&shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
562 shhwtstamp
.hwtstamp
= ns_to_ktime(ns
);
564 netdev_dbg(priv
->dev
, "get valid TX hw timestamp %llu\n", ns
);
565 /* pass tstamp to stack */
566 skb_tstamp_tx(skb
, &shhwtstamp
);
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571 * @priv: driver private structure
572 * @p : descriptor pointer
573 * @np : next descriptor pointer
574 * @skb : the socket buffer
576 * This function will read received packet's timestamp from the descriptor
577 * and pass it to stack. It also perform some sanity checks.
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv
*priv
, struct dma_desc
*p
,
580 struct dma_desc
*np
, struct sk_buff
*skb
)
582 struct skb_shared_hwtstamps
*shhwtstamp
= NULL
;
583 struct dma_desc
*desc
= p
;
586 if (!priv
->hwts_rx_en
)
588 /* For GMAC4, the valid timestamp is from CTX next desc. */
589 if (priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
)
592 /* Check if timestamp is available */
593 if (stmmac_get_rx_timestamp_status(priv
, p
, np
, priv
->adv_ts
)) {
594 stmmac_get_timestamp(priv
, desc
, priv
->adv_ts
, &ns
);
596 ns
-= priv
->plat
->cdc_error_adj
;
598 netdev_dbg(priv
->dev
, "get valid RX hw timestamp %llu\n", ns
);
599 shhwtstamp
= skb_hwtstamps(skb
);
600 memset(shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
601 shhwtstamp
->hwtstamp
= ns_to_ktime(ns
);
603 netdev_dbg(priv
->dev
, "cannot get RX hw timestamp\n");
608 * stmmac_hwtstamp_set - control hardware timestamping.
609 * @dev: device pointer.
610 * @ifr: An IOCTL specific structure, that can contain a pointer to
611 * a proprietary structure used to pass information to the driver.
613 * This function configures the MAC to enable/disable both outgoing(TX)
614 * and incoming(RX) packets time stamping based on user input.
616 * 0 on success and an appropriate -ve integer on failure.
618 static int stmmac_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
620 struct stmmac_priv
*priv
= netdev_priv(dev
);
621 struct hwtstamp_config config
;
624 u32 ptp_over_ipv4_udp
= 0;
625 u32 ptp_over_ipv6_udp
= 0;
626 u32 ptp_over_ethernet
= 0;
627 u32 snap_type_sel
= 0;
628 u32 ts_master_en
= 0;
631 if (!(priv
->dma_cap
.time_stamp
|| priv
->adv_ts
)) {
632 netdev_alert(priv
->dev
, "No support for HW time stamping\n");
633 priv
->hwts_tx_en
= 0;
634 priv
->hwts_rx_en
= 0;
639 if (copy_from_user(&config
, ifr
->ifr_data
,
643 netdev_dbg(priv
->dev
, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 __func__
, config
.flags
, config
.tx_type
, config
.rx_filter
);
646 if (config
.tx_type
!= HWTSTAMP_TX_OFF
&&
647 config
.tx_type
!= HWTSTAMP_TX_ON
)
651 switch (config
.rx_filter
) {
652 case HWTSTAMP_FILTER_NONE
:
653 /* time stamp no incoming packet at all */
654 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
658 /* PTP v1, UDP, any kind of event packet */
659 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
660 /* 'xmac' hardware can support Sync, Pdelay_Req and
661 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 * This leaves Delay_Req timestamps out.
663 * Enable all events *and* general purpose message
666 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
667 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
668 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
672 /* PTP v1, UDP, Sync packet */
673 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
674 /* take time stamp for SYNC messages only */
675 ts_event_en
= PTP_TCR_TSEVNTENA
;
677 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
678 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
682 /* PTP v1, UDP, Delay_req packet */
683 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
684 /* take time stamp for Delay_Req messages only */
685 ts_master_en
= PTP_TCR_TSMSTRENA
;
686 ts_event_en
= PTP_TCR_TSEVNTENA
;
688 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
689 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
693 /* PTP v2, UDP, any kind of event packet */
694 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
695 ptp_v2
= PTP_TCR_TSVER2ENA
;
696 /* take time stamp for all event messages */
697 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
699 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
700 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
704 /* PTP v2, UDP, Sync packet */
705 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
706 ptp_v2
= PTP_TCR_TSVER2ENA
;
707 /* take time stamp for SYNC messages only */
708 ts_event_en
= PTP_TCR_TSEVNTENA
;
710 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
711 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
715 /* PTP v2, UDP, Delay_req packet */
716 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
717 ptp_v2
= PTP_TCR_TSVER2ENA
;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en
= PTP_TCR_TSMSTRENA
;
720 ts_event_en
= PTP_TCR_TSEVNTENA
;
722 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
723 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
726 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
727 /* PTP v2/802.AS1 any layer, any kind of event packet */
728 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
729 ptp_v2
= PTP_TCR_TSVER2ENA
;
730 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
731 if (priv
->synopsys_id
< DWMAC_CORE_4_10
)
732 ts_event_en
= PTP_TCR_TSEVNTENA
;
733 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
734 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
735 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
738 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
739 /* PTP v2/802.AS1, any layer, Sync packet */
740 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
741 ptp_v2
= PTP_TCR_TSVER2ENA
;
742 /* take time stamp for SYNC messages only */
743 ts_event_en
= PTP_TCR_TSEVNTENA
;
745 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
746 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
747 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
751 /* PTP v2/802.AS1, any layer, Delay_req packet */
752 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
753 ptp_v2
= PTP_TCR_TSVER2ENA
;
754 /* take time stamp for Delay_Req messages only */
755 ts_master_en
= PTP_TCR_TSMSTRENA
;
756 ts_event_en
= PTP_TCR_TSEVNTENA
;
758 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
759 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
760 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
763 case HWTSTAMP_FILTER_NTP_ALL
:
764 case HWTSTAMP_FILTER_ALL
:
765 /* time stamp any incoming packet */
766 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
767 tstamp_all
= PTP_TCR_TSENALL
;
774 switch (config
.rx_filter
) {
775 case HWTSTAMP_FILTER_NONE
:
776 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
779 /* PTP v1, UDP, any kind of event packet */
780 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
784 priv
->hwts_rx_en
= ((config
.rx_filter
== HWTSTAMP_FILTER_NONE
) ? 0 : 1);
785 priv
->hwts_tx_en
= config
.tx_type
== HWTSTAMP_TX_ON
;
787 priv
->systime_flags
= STMMAC_HWTS_ACTIVE
;
789 if (priv
->hwts_tx_en
|| priv
->hwts_rx_en
) {
790 priv
->systime_flags
|= tstamp_all
| ptp_v2
|
791 ptp_over_ethernet
| ptp_over_ipv6_udp
|
792 ptp_over_ipv4_udp
| ts_event_en
|
793 ts_master_en
| snap_type_sel
;
796 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, priv
->systime_flags
);
798 memcpy(&priv
->tstamp_config
, &config
, sizeof(config
));
800 return copy_to_user(ifr
->ifr_data
, &config
,
801 sizeof(config
)) ? -EFAULT
: 0;
805 * stmmac_hwtstamp_get - read hardware timestamping.
806 * @dev: device pointer.
807 * @ifr: An IOCTL specific structure, that can contain a pointer to
808 * a proprietary structure used to pass information to the driver.
810 * This function obtain the current hardware timestamping settings
813 static int stmmac_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
815 struct stmmac_priv
*priv
= netdev_priv(dev
);
816 struct hwtstamp_config
*config
= &priv
->tstamp_config
;
818 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
821 return copy_to_user(ifr
->ifr_data
, config
,
822 sizeof(*config
)) ? -EFAULT
: 0;
826 * stmmac_init_tstamp_counter - init hardware timestamping counter
827 * @priv: driver private structure
828 * @systime_flags: timestamping flags
830 * Initialize hardware counter for packet timestamping.
831 * This is valid as long as the interface is open and not suspended.
832 * Will be rerun after resuming from suspend, case in which the timestamping
833 * flags updated by stmmac_hwtstamp_set() also need to be restored.
835 int stmmac_init_tstamp_counter(struct stmmac_priv
*priv
, u32 systime_flags
)
837 bool xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
838 struct timespec64 now
;
842 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
845 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, systime_flags
);
846 priv
->systime_flags
= systime_flags
;
848 /* program Sub Second Increment reg */
849 stmmac_config_sub_second_increment(priv
, priv
->ptpaddr
,
850 priv
->plat
->clk_ptp_rate
,
852 temp
= div_u64(1000000000ULL, sec_inc
);
854 /* Store sub second increment for later use */
855 priv
->sub_second_inc
= sec_inc
;
857 /* calculate default added value:
859 * addend = (2^32)/freq_div_ratio;
860 * where, freq_div_ratio = 1e9ns/sec_inc
862 temp
= (u64
)(temp
<< 32);
863 priv
->default_addend
= div_u64(temp
, priv
->plat
->clk_ptp_rate
);
864 stmmac_config_addend(priv
, priv
->ptpaddr
, priv
->default_addend
);
866 /* initialize system time */
867 ktime_get_real_ts64(&now
);
869 /* lower 32 bits of tv_sec are safe until y2106 */
870 stmmac_init_systime(priv
, priv
->ptpaddr
, (u32
)now
.tv_sec
, now
.tv_nsec
);
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter
);
877 * stmmac_init_ptp - init PTP
878 * @priv: driver private structure
879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880 * This is done by looking at the HW cap. register.
881 * This function also registers the ptp driver.
883 static int stmmac_init_ptp(struct stmmac_priv
*priv
)
885 bool xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
888 if (priv
->plat
->ptp_clk_freq_config
)
889 priv
->plat
->ptp_clk_freq_config(priv
);
891 ret
= stmmac_init_tstamp_counter(priv
, STMMAC_HWTS_ACTIVE
);
896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 if (xmac
&& priv
->dma_cap
.atime_stamp
)
899 /* Dwmac 3.x core with extend_desc can support adv_ts */
900 else if (priv
->extend_desc
&& priv
->dma_cap
.atime_stamp
)
903 if (priv
->dma_cap
.time_stamp
)
904 netdev_info(priv
->dev
, "IEEE 1588-2002 Timestamp supported\n");
907 netdev_info(priv
->dev
,
908 "IEEE 1588-2008 Advanced Timestamp supported\n");
910 priv
->hwts_tx_en
= 0;
911 priv
->hwts_rx_en
= 0;
913 if (priv
->plat
->flags
& STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY
)
914 stmmac_hwtstamp_correct_latency(priv
, priv
);
919 static void stmmac_release_ptp(struct stmmac_priv
*priv
)
921 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
922 stmmac_ptp_unregister(priv
);
926 * stmmac_mac_flow_ctrl - Configure flow control in all queues
927 * @priv: driver private structure
928 * @duplex: duplex passed to the next function
929 * Description: It is used for configuring the flow control in all queues
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv
*priv
, u32 duplex
)
933 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
935 stmmac_flow_ctrl(priv
, priv
->hw
, duplex
, priv
->flow_ctrl
,
936 priv
->pause
, tx_cnt
);
939 static struct phylink_pcs
*stmmac_mac_select_pcs(struct phylink_config
*config
,
940 phy_interface_t interface
)
942 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
945 return &priv
->hw
->xpcs
->pcs
;
947 if (priv
->hw
->lynx_pcs
)
948 return priv
->hw
->lynx_pcs
;
953 static void stmmac_mac_config(struct phylink_config
*config
, unsigned int mode
,
954 const struct phylink_link_state
*state
)
956 /* Nothing to do, xpcs_config() handles everything */
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv
*priv
, bool is_up
)
961 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
962 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
963 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
964 bool *hs_enable
= &fpe_cfg
->hs_enable
;
966 if (is_up
&& *hs_enable
) {
967 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
, fpe_cfg
,
970 *lo_state
= FPE_STATE_OFF
;
971 *lp_state
= FPE_STATE_OFF
;
975 static void stmmac_mac_link_down(struct phylink_config
*config
,
976 unsigned int mode
, phy_interface_t interface
)
978 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
980 stmmac_mac_set(priv
, priv
->ioaddr
, false);
981 priv
->eee_active
= false;
982 priv
->tx_lpi_enabled
= false;
983 priv
->eee_enabled
= stmmac_eee_init(priv
);
984 stmmac_set_eee_pls(priv
, priv
->hw
, false);
986 if (priv
->dma_cap
.fpesel
)
987 stmmac_fpe_link_state_handle(priv
, false);
990 static void stmmac_mac_link_up(struct phylink_config
*config
,
991 struct phy_device
*phy
,
992 unsigned int mode
, phy_interface_t interface
,
993 int speed
, int duplex
,
994 bool tx_pause
, bool rx_pause
)
996 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
999 if ((priv
->plat
->flags
& STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP
) &&
1000 priv
->plat
->serdes_powerup
)
1001 priv
->plat
->serdes_powerup(priv
->dev
, priv
->plat
->bsp_priv
);
1003 old_ctrl
= readl(priv
->ioaddr
+ MAC_CTRL_REG
);
1004 ctrl
= old_ctrl
& ~priv
->hw
->link
.speed_mask
;
1006 if (interface
== PHY_INTERFACE_MODE_USXGMII
) {
1009 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
1012 ctrl
|= priv
->hw
->link
.xgmii
.speed5000
;
1015 ctrl
|= priv
->hw
->link
.xgmii
.speed2500
;
1020 } else if (interface
== PHY_INTERFACE_MODE_XLGMII
) {
1023 ctrl
|= priv
->hw
->link
.xlgmii
.speed100000
;
1026 ctrl
|= priv
->hw
->link
.xlgmii
.speed50000
;
1029 ctrl
|= priv
->hw
->link
.xlgmii
.speed40000
;
1032 ctrl
|= priv
->hw
->link
.xlgmii
.speed25000
;
1035 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
1038 ctrl
|= priv
->hw
->link
.speed2500
;
1041 ctrl
|= priv
->hw
->link
.speed1000
;
1049 ctrl
|= priv
->hw
->link
.speed2500
;
1052 ctrl
|= priv
->hw
->link
.speed1000
;
1055 ctrl
|= priv
->hw
->link
.speed100
;
1058 ctrl
|= priv
->hw
->link
.speed10
;
1065 priv
->speed
= speed
;
1067 if (priv
->plat
->fix_mac_speed
)
1068 priv
->plat
->fix_mac_speed(priv
->plat
->bsp_priv
, speed
, mode
);
1071 ctrl
&= ~priv
->hw
->link
.duplex
;
1073 ctrl
|= priv
->hw
->link
.duplex
;
1075 /* Flow Control operation */
1076 if (rx_pause
&& tx_pause
)
1077 priv
->flow_ctrl
= FLOW_AUTO
;
1078 else if (rx_pause
&& !tx_pause
)
1079 priv
->flow_ctrl
= FLOW_RX
;
1080 else if (!rx_pause
&& tx_pause
)
1081 priv
->flow_ctrl
= FLOW_TX
;
1083 priv
->flow_ctrl
= FLOW_OFF
;
1085 stmmac_mac_flow_ctrl(priv
, duplex
);
1087 if (ctrl
!= old_ctrl
)
1088 writel(ctrl
, priv
->ioaddr
+ MAC_CTRL_REG
);
1090 stmmac_mac_set(priv
, priv
->ioaddr
, true);
1091 if (phy
&& priv
->dma_cap
.eee
) {
1093 phy_init_eee(phy
, !(priv
->plat
->flags
&
1094 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI
)) >= 0;
1095 priv
->eee_enabled
= stmmac_eee_init(priv
);
1096 priv
->tx_lpi_enabled
= priv
->eee_enabled
;
1097 stmmac_set_eee_pls(priv
, priv
->hw
, true);
1100 if (priv
->dma_cap
.fpesel
)
1101 stmmac_fpe_link_state_handle(priv
, true);
1103 if (priv
->plat
->flags
& STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY
)
1104 stmmac_hwtstamp_correct_latency(priv
, priv
);
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops
= {
1108 .mac_select_pcs
= stmmac_mac_select_pcs
,
1109 .mac_config
= stmmac_mac_config
,
1110 .mac_link_down
= stmmac_mac_link_down
,
1111 .mac_link_up
= stmmac_mac_link_up
,
1115 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116 * @priv: driver private structure
1117 * Description: this is to verify if the HW supports the PCS.
1118 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119 * configured for the TBI, RTBI, or SGMII PHY interface.
1121 static void stmmac_check_pcs_mode(struct stmmac_priv
*priv
)
1123 int interface
= priv
->plat
->mac_interface
;
1125 if (priv
->dma_cap
.pcs
) {
1126 if ((interface
== PHY_INTERFACE_MODE_RGMII
) ||
1127 (interface
== PHY_INTERFACE_MODE_RGMII_ID
) ||
1128 (interface
== PHY_INTERFACE_MODE_RGMII_RXID
) ||
1129 (interface
== PHY_INTERFACE_MODE_RGMII_TXID
)) {
1130 netdev_dbg(priv
->dev
, "PCS RGMII support enabled\n");
1131 priv
->hw
->pcs
= STMMAC_PCS_RGMII
;
1132 } else if (interface
== PHY_INTERFACE_MODE_SGMII
) {
1133 netdev_dbg(priv
->dev
, "PCS SGMII support enabled\n");
1134 priv
->hw
->pcs
= STMMAC_PCS_SGMII
;
1140 * stmmac_init_phy - PHY initialization
1141 * @dev: net device structure
1142 * Description: it initializes the driver's PHY state, and attaches the PHY
1143 * to the mac driver.
1147 static int stmmac_init_phy(struct net_device
*dev
)
1149 struct stmmac_priv
*priv
= netdev_priv(dev
);
1150 struct fwnode_handle
*phy_fwnode
;
1151 struct fwnode_handle
*fwnode
;
1154 if (!phylink_expects_phy(priv
->phylink
))
1157 fwnode
= priv
->plat
->port_node
;
1159 fwnode
= dev_fwnode(priv
->device
);
1162 phy_fwnode
= fwnode_get_phy_node(fwnode
);
1166 /* Some DT bindings do not set-up the PHY handle. Let's try to
1169 if (!phy_fwnode
|| IS_ERR(phy_fwnode
)) {
1170 int addr
= priv
->plat
->phy_addr
;
1171 struct phy_device
*phydev
;
1174 netdev_err(priv
->dev
, "no phy found\n");
1178 phydev
= mdiobus_get_phy(priv
->mii
, addr
);
1180 netdev_err(priv
->dev
, "no phy at addr %d\n", addr
);
1184 ret
= phylink_connect_phy(priv
->phylink
, phydev
);
1186 fwnode_handle_put(phy_fwnode
);
1187 ret
= phylink_fwnode_phy_connect(priv
->phylink
, fwnode
, 0);
1190 if (!priv
->plat
->pmt
) {
1191 struct ethtool_wolinfo wol
= { .cmd
= ETHTOOL_GWOL
};
1193 phylink_ethtool_get_wol(priv
->phylink
, &wol
);
1194 device_set_wakeup_capable(priv
->device
, !!wol
.supported
);
1195 device_set_wakeup_enable(priv
->device
, !!wol
.wolopts
);
1201 static void stmmac_set_half_duplex(struct stmmac_priv
*priv
)
1203 /* Half-Duplex can only work with single tx queue */
1204 if (priv
->plat
->tx_queues_to_use
> 1)
1205 priv
->phylink_config
.mac_capabilities
&=
1206 ~(MAC_10HD
| MAC_100HD
| MAC_1000HD
);
1208 priv
->phylink_config
.mac_capabilities
|=
1209 (MAC_10HD
| MAC_100HD
| MAC_1000HD
);
1212 static int stmmac_phy_setup(struct stmmac_priv
*priv
)
1214 struct stmmac_mdio_bus_data
*mdio_bus_data
;
1215 int mode
= priv
->plat
->phy_interface
;
1216 struct fwnode_handle
*fwnode
;
1217 struct phylink
*phylink
;
1220 priv
->phylink_config
.dev
= &priv
->dev
->dev
;
1221 priv
->phylink_config
.type
= PHYLINK_NETDEV
;
1222 priv
->phylink_config
.mac_managed_pm
= true;
1224 mdio_bus_data
= priv
->plat
->mdio_bus_data
;
1226 priv
->phylink_config
.ovr_an_inband
=
1227 mdio_bus_data
->xpcs_an_inband
;
1229 /* Set the platform/firmware specified interface mode. Note, phylink
1230 * deals with the PHY interface mode, not the MAC interface mode.
1232 __set_bit(mode
, priv
->phylink_config
.supported_interfaces
);
1234 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1236 xpcs_get_interfaces(priv
->hw
->xpcs
,
1237 priv
->phylink_config
.supported_interfaces
);
1239 priv
->phylink_config
.mac_capabilities
= MAC_ASYM_PAUSE
| MAC_SYM_PAUSE
|
1240 MAC_10FD
| MAC_100FD
|
1243 stmmac_set_half_duplex(priv
);
1245 /* Get the MAC specific capabilities */
1246 stmmac_mac_phylink_get_caps(priv
);
1248 max_speed
= priv
->plat
->max_speed
;
1250 phylink_limit_mac_speed(&priv
->phylink_config
, max_speed
);
1252 fwnode
= priv
->plat
->port_node
;
1254 fwnode
= dev_fwnode(priv
->device
);
1256 phylink
= phylink_create(&priv
->phylink_config
, fwnode
,
1257 mode
, &stmmac_phylink_mac_ops
);
1258 if (IS_ERR(phylink
))
1259 return PTR_ERR(phylink
);
1261 priv
->phylink
= phylink
;
1265 static void stmmac_display_rx_rings(struct stmmac_priv
*priv
,
1266 struct stmmac_dma_conf
*dma_conf
)
1268 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
1269 unsigned int desc_size
;
1273 /* Display RX rings */
1274 for (queue
= 0; queue
< rx_cnt
; queue
++) {
1275 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1277 pr_info("\tRX Queue %u rings\n", queue
);
1279 if (priv
->extend_desc
) {
1280 head_rx
= (void *)rx_q
->dma_erx
;
1281 desc_size
= sizeof(struct dma_extended_desc
);
1283 head_rx
= (void *)rx_q
->dma_rx
;
1284 desc_size
= sizeof(struct dma_desc
);
1287 /* Display RX ring */
1288 stmmac_display_ring(priv
, head_rx
, dma_conf
->dma_rx_size
, true,
1289 rx_q
->dma_rx_phy
, desc_size
);
1293 static void stmmac_display_tx_rings(struct stmmac_priv
*priv
,
1294 struct stmmac_dma_conf
*dma_conf
)
1296 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
1297 unsigned int desc_size
;
1301 /* Display TX rings */
1302 for (queue
= 0; queue
< tx_cnt
; queue
++) {
1303 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1305 pr_info("\tTX Queue %d rings\n", queue
);
1307 if (priv
->extend_desc
) {
1308 head_tx
= (void *)tx_q
->dma_etx
;
1309 desc_size
= sizeof(struct dma_extended_desc
);
1310 } else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
) {
1311 head_tx
= (void *)tx_q
->dma_entx
;
1312 desc_size
= sizeof(struct dma_edesc
);
1314 head_tx
= (void *)tx_q
->dma_tx
;
1315 desc_size
= sizeof(struct dma_desc
);
1318 stmmac_display_ring(priv
, head_tx
, dma_conf
->dma_tx_size
, false,
1319 tx_q
->dma_tx_phy
, desc_size
);
1323 static void stmmac_display_rings(struct stmmac_priv
*priv
,
1324 struct stmmac_dma_conf
*dma_conf
)
1326 /* Display RX ring */
1327 stmmac_display_rx_rings(priv
, dma_conf
);
1329 /* Display TX ring */
1330 stmmac_display_tx_rings(priv
, dma_conf
);
1333 static int stmmac_set_bfsize(int mtu
, int bufsize
)
1337 if (mtu
>= BUF_SIZE_8KiB
)
1338 ret
= BUF_SIZE_16KiB
;
1339 else if (mtu
>= BUF_SIZE_4KiB
)
1340 ret
= BUF_SIZE_8KiB
;
1341 else if (mtu
>= BUF_SIZE_2KiB
)
1342 ret
= BUF_SIZE_4KiB
;
1343 else if (mtu
> DEFAULT_BUFSIZE
)
1344 ret
= BUF_SIZE_2KiB
;
1346 ret
= DEFAULT_BUFSIZE
;
1352 * stmmac_clear_rx_descriptors - clear RX descriptors
1353 * @priv: driver private structure
1354 * @dma_conf: structure to take the dma data
1355 * @queue: RX queue index
1356 * Description: this function is called to clear the RX descriptors
1357 * in case of both basic and extended descriptors are used.
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv
*priv
,
1360 struct stmmac_dma_conf
*dma_conf
,
1363 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1366 /* Clear the RX descriptors */
1367 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++)
1368 if (priv
->extend_desc
)
1369 stmmac_init_rx_desc(priv
, &rx_q
->dma_erx
[i
].basic
,
1370 priv
->use_riwt
, priv
->mode
,
1371 (i
== dma_conf
->dma_rx_size
- 1),
1372 dma_conf
->dma_buf_sz
);
1374 stmmac_init_rx_desc(priv
, &rx_q
->dma_rx
[i
],
1375 priv
->use_riwt
, priv
->mode
,
1376 (i
== dma_conf
->dma_rx_size
- 1),
1377 dma_conf
->dma_buf_sz
);
1381 * stmmac_clear_tx_descriptors - clear tx descriptors
1382 * @priv: driver private structure
1383 * @dma_conf: structure to take the dma data
1384 * @queue: TX queue index.
1385 * Description: this function is called to clear the TX descriptors
1386 * in case of both basic and extended descriptors are used.
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv
*priv
,
1389 struct stmmac_dma_conf
*dma_conf
,
1392 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1395 /* Clear the TX descriptors */
1396 for (i
= 0; i
< dma_conf
->dma_tx_size
; i
++) {
1397 int last
= (i
== (dma_conf
->dma_tx_size
- 1));
1400 if (priv
->extend_desc
)
1401 p
= &tx_q
->dma_etx
[i
].basic
;
1402 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1403 p
= &tx_q
->dma_entx
[i
].basic
;
1405 p
= &tx_q
->dma_tx
[i
];
1407 stmmac_init_tx_desc(priv
, p
, priv
->mode
, last
);
1412 * stmmac_clear_descriptors - clear descriptors
1413 * @priv: driver private structure
1414 * @dma_conf: structure to take the dma data
1415 * Description: this function is called to clear the TX and RX descriptors
1416 * in case of both basic and extended descriptors are used.
1418 static void stmmac_clear_descriptors(struct stmmac_priv
*priv
,
1419 struct stmmac_dma_conf
*dma_conf
)
1421 u32 rx_queue_cnt
= priv
->plat
->rx_queues_to_use
;
1422 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1425 /* Clear the RX descriptors */
1426 for (queue
= 0; queue
< rx_queue_cnt
; queue
++)
1427 stmmac_clear_rx_descriptors(priv
, dma_conf
, queue
);
1429 /* Clear the TX descriptors */
1430 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1431 stmmac_clear_tx_descriptors(priv
, dma_conf
, queue
);
1435 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436 * @priv: driver private structure
1437 * @dma_conf: structure to take the dma data
1438 * @p: descriptor pointer
1439 * @i: descriptor index
1441 * @queue: RX queue index
1442 * Description: this function is called to allocate a receive buffer, perform
1443 * the DMA mapping and init the descriptor.
1445 static int stmmac_init_rx_buffers(struct stmmac_priv
*priv
,
1446 struct stmmac_dma_conf
*dma_conf
,
1448 int i
, gfp_t flags
, u32 queue
)
1450 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1451 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1452 gfp_t gfp
= (GFP_ATOMIC
| __GFP_NOWARN
);
1454 if (priv
->dma_cap
.host_dma_width
<= 32)
1458 buf
->page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
1461 buf
->page_offset
= stmmac_rx_offset(priv
);
1464 if (priv
->sph
&& !buf
->sec_page
) {
1465 buf
->sec_page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
1469 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
1470 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, true);
1472 buf
->sec_page
= NULL
;
1473 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, false);
1476 buf
->addr
= page_pool_get_dma_addr(buf
->page
) + buf
->page_offset
;
1478 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
1479 if (dma_conf
->dma_buf_sz
== BUF_SIZE_16KiB
)
1480 stmmac_init_desc3(priv
, p
);
1486 * stmmac_free_rx_buffer - free RX dma buffers
1487 * @priv: private structure
1491 static void stmmac_free_rx_buffer(struct stmmac_priv
*priv
,
1492 struct stmmac_rx_queue
*rx_q
,
1495 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1498 page_pool_put_full_page(rx_q
->page_pool
, buf
->page
, false);
1502 page_pool_put_full_page(rx_q
->page_pool
, buf
->sec_page
, false);
1503 buf
->sec_page
= NULL
;
1507 * stmmac_free_tx_buffer - free RX dma buffers
1508 * @priv: private structure
1509 * @dma_conf: structure to take the dma data
1510 * @queue: RX queue index
1513 static void stmmac_free_tx_buffer(struct stmmac_priv
*priv
,
1514 struct stmmac_dma_conf
*dma_conf
,
1517 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1519 if (tx_q
->tx_skbuff_dma
[i
].buf
&&
1520 tx_q
->tx_skbuff_dma
[i
].buf_type
!= STMMAC_TXBUF_T_XDP_TX
) {
1521 if (tx_q
->tx_skbuff_dma
[i
].map_as_page
)
1522 dma_unmap_page(priv
->device
,
1523 tx_q
->tx_skbuff_dma
[i
].buf
,
1524 tx_q
->tx_skbuff_dma
[i
].len
,
1527 dma_unmap_single(priv
->device
,
1528 tx_q
->tx_skbuff_dma
[i
].buf
,
1529 tx_q
->tx_skbuff_dma
[i
].len
,
1533 if (tx_q
->xdpf
[i
] &&
1534 (tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XDP_TX
||
1535 tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
)) {
1536 xdp_return_frame(tx_q
->xdpf
[i
]);
1537 tx_q
->xdpf
[i
] = NULL
;
1540 if (tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XSK_TX
)
1541 tx_q
->xsk_frames_done
++;
1543 if (tx_q
->tx_skbuff
[i
] &&
1544 tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_SKB
) {
1545 dev_kfree_skb_any(tx_q
->tx_skbuff
[i
]);
1546 tx_q
->tx_skbuff
[i
] = NULL
;
1549 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1550 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1554 * dma_free_rx_skbufs - free RX dma buffers
1555 * @priv: private structure
1556 * @dma_conf: structure to take the dma data
1557 * @queue: RX queue index
1559 static void dma_free_rx_skbufs(struct stmmac_priv
*priv
,
1560 struct stmmac_dma_conf
*dma_conf
,
1563 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1566 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++)
1567 stmmac_free_rx_buffer(priv
, rx_q
, i
);
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv
*priv
,
1571 struct stmmac_dma_conf
*dma_conf
,
1572 u32 queue
, gfp_t flags
)
1574 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1577 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++) {
1581 if (priv
->extend_desc
)
1582 p
= &((rx_q
->dma_erx
+ i
)->basic
);
1584 p
= rx_q
->dma_rx
+ i
;
1586 ret
= stmmac_init_rx_buffers(priv
, dma_conf
, p
, i
, flags
,
1591 rx_q
->buf_alloc_num
++;
1598 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599 * @priv: private structure
1600 * @dma_conf: structure to take the dma data
1601 * @queue: RX queue index
1603 static void dma_free_rx_xskbufs(struct stmmac_priv
*priv
,
1604 struct stmmac_dma_conf
*dma_conf
,
1607 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1610 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++) {
1611 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1616 xsk_buff_free(buf
->xdp
);
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv
*priv
,
1622 struct stmmac_dma_conf
*dma_conf
,
1625 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1628 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 * use this macro to make sure no size violations.
1632 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff
);
1634 for (i
= 0; i
< dma_conf
->dma_rx_size
; i
++) {
1635 struct stmmac_rx_buffer
*buf
;
1636 dma_addr_t dma_addr
;
1639 if (priv
->extend_desc
)
1640 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ i
);
1642 p
= rx_q
->dma_rx
+ i
;
1644 buf
= &rx_q
->buf_pool
[i
];
1646 buf
->xdp
= xsk_buff_alloc(rx_q
->xsk_pool
);
1650 dma_addr
= xsk_buff_xdp_get_dma(buf
->xdp
);
1651 stmmac_set_desc_addr(priv
, p
, dma_addr
);
1652 rx_q
->buf_alloc_num
++;
1658 static struct xsk_buff_pool
*stmmac_get_xsk_pool(struct stmmac_priv
*priv
, u32 queue
)
1660 if (!stmmac_xdp_is_enabled(priv
) || !test_bit(queue
, priv
->af_xdp_zc_qps
))
1663 return xsk_get_pool_from_qid(priv
->dev
, queue
);
1667 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668 * @priv: driver private structure
1669 * @dma_conf: structure to take the dma data
1670 * @queue: RX queue index
1672 * Description: this function initializes the DMA RX descriptors
1673 * and allocates the socket buffers. It supports the chained and ring
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv
*priv
,
1677 struct stmmac_dma_conf
*dma_conf
,
1678 u32 queue
, gfp_t flags
)
1680 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1683 netif_dbg(priv
, probe
, priv
->dev
,
1684 "(%s) dma_rx_phy=0x%08x\n", __func__
,
1685 (u32
)rx_q
->dma_rx_phy
);
1687 stmmac_clear_rx_descriptors(priv
, dma_conf
, queue
);
1689 xdp_rxq_info_unreg_mem_model(&rx_q
->xdp_rxq
);
1691 rx_q
->xsk_pool
= stmmac_get_xsk_pool(priv
, queue
);
1693 if (rx_q
->xsk_pool
) {
1694 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q
->xdp_rxq
,
1695 MEM_TYPE_XSK_BUFF_POOL
,
1697 netdev_info(priv
->dev
,
1698 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1700 xsk_pool_set_rxq_info(rx_q
->xsk_pool
, &rx_q
->xdp_rxq
);
1702 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q
->xdp_rxq
,
1705 netdev_info(priv
->dev
,
1706 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1710 if (rx_q
->xsk_pool
) {
1711 /* RX XDP ZC buffer pool may not be populated, e.g.
1714 stmmac_alloc_rx_buffers_zc(priv
, dma_conf
, queue
);
1716 ret
= stmmac_alloc_rx_buffers(priv
, dma_conf
, queue
, flags
);
1721 /* Setup the chained descriptor addresses */
1722 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1723 if (priv
->extend_desc
)
1724 stmmac_mode_init(priv
, rx_q
->dma_erx
,
1726 dma_conf
->dma_rx_size
, 1);
1728 stmmac_mode_init(priv
, rx_q
->dma_rx
,
1730 dma_conf
->dma_rx_size
, 0);
1736 static int init_dma_rx_desc_rings(struct net_device
*dev
,
1737 struct stmmac_dma_conf
*dma_conf
,
1740 struct stmmac_priv
*priv
= netdev_priv(dev
);
1741 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1745 /* RX INITIALIZATION */
1746 netif_dbg(priv
, probe
, priv
->dev
,
1747 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1749 for (queue
= 0; queue
< rx_count
; queue
++) {
1750 ret
= __init_dma_rx_desc_rings(priv
, dma_conf
, queue
, flags
);
1752 goto err_init_rx_buffers
;
1757 err_init_rx_buffers
:
1758 while (queue
>= 0) {
1759 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1762 dma_free_rx_xskbufs(priv
, dma_conf
, queue
);
1764 dma_free_rx_skbufs(priv
, dma_conf
, queue
);
1766 rx_q
->buf_alloc_num
= 0;
1767 rx_q
->xsk_pool
= NULL
;
1776 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777 * @priv: driver private structure
1778 * @dma_conf: structure to take the dma data
1779 * @queue: TX queue index
1780 * Description: this function initializes the DMA TX descriptors
1781 * and allocates the socket buffers. It supports the chained and ring
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv
*priv
,
1785 struct stmmac_dma_conf
*dma_conf
,
1788 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1791 netif_dbg(priv
, probe
, priv
->dev
,
1792 "(%s) dma_tx_phy=0x%08x\n", __func__
,
1793 (u32
)tx_q
->dma_tx_phy
);
1795 /* Setup the chained descriptor addresses */
1796 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1797 if (priv
->extend_desc
)
1798 stmmac_mode_init(priv
, tx_q
->dma_etx
,
1800 dma_conf
->dma_tx_size
, 1);
1801 else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
))
1802 stmmac_mode_init(priv
, tx_q
->dma_tx
,
1804 dma_conf
->dma_tx_size
, 0);
1807 tx_q
->xsk_pool
= stmmac_get_xsk_pool(priv
, queue
);
1809 for (i
= 0; i
< dma_conf
->dma_tx_size
; i
++) {
1812 if (priv
->extend_desc
)
1813 p
= &((tx_q
->dma_etx
+ i
)->basic
);
1814 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1815 p
= &((tx_q
->dma_entx
+ i
)->basic
);
1817 p
= tx_q
->dma_tx
+ i
;
1819 stmmac_clear_desc(priv
, p
);
1821 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1822 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1823 tx_q
->tx_skbuff_dma
[i
].len
= 0;
1824 tx_q
->tx_skbuff_dma
[i
].last_segment
= false;
1825 tx_q
->tx_skbuff
[i
] = NULL
;
1831 static int init_dma_tx_desc_rings(struct net_device
*dev
,
1832 struct stmmac_dma_conf
*dma_conf
)
1834 struct stmmac_priv
*priv
= netdev_priv(dev
);
1838 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1840 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1841 __init_dma_tx_desc_rings(priv
, dma_conf
, queue
);
1847 * init_dma_desc_rings - init the RX/TX descriptor rings
1848 * @dev: net device structure
1849 * @dma_conf: structure to take the dma data
1851 * Description: this function initializes the DMA RX/TX descriptors
1852 * and allocates the socket buffers. It supports the chained and ring
1855 static int init_dma_desc_rings(struct net_device
*dev
,
1856 struct stmmac_dma_conf
*dma_conf
,
1859 struct stmmac_priv
*priv
= netdev_priv(dev
);
1862 ret
= init_dma_rx_desc_rings(dev
, dma_conf
, flags
);
1866 ret
= init_dma_tx_desc_rings(dev
, dma_conf
);
1868 stmmac_clear_descriptors(priv
, dma_conf
);
1870 if (netif_msg_hw(priv
))
1871 stmmac_display_rings(priv
, dma_conf
);
1877 * dma_free_tx_skbufs - free TX dma buffers
1878 * @priv: private structure
1879 * @dma_conf: structure to take the dma data
1880 * @queue: TX queue index
1882 static void dma_free_tx_skbufs(struct stmmac_priv
*priv
,
1883 struct stmmac_dma_conf
*dma_conf
,
1886 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1889 tx_q
->xsk_frames_done
= 0;
1891 for (i
= 0; i
< dma_conf
->dma_tx_size
; i
++)
1892 stmmac_free_tx_buffer(priv
, dma_conf
, queue
, i
);
1894 if (tx_q
->xsk_pool
&& tx_q
->xsk_frames_done
) {
1895 xsk_tx_completed(tx_q
->xsk_pool
, tx_q
->xsk_frames_done
);
1896 tx_q
->xsk_frames_done
= 0;
1897 tx_q
->xsk_pool
= NULL
;
1902 * stmmac_free_tx_skbufs - free TX skb buffers
1903 * @priv: private structure
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv
*priv
)
1907 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1910 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1911 dma_free_tx_skbufs(priv
, &priv
->dma_conf
, queue
);
1915 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916 * @priv: private structure
1917 * @dma_conf: structure to take the dma data
1918 * @queue: RX queue index
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv
*priv
,
1921 struct stmmac_dma_conf
*dma_conf
,
1924 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
1926 /* Release the DMA RX socket buffers */
1928 dma_free_rx_xskbufs(priv
, dma_conf
, queue
);
1930 dma_free_rx_skbufs(priv
, dma_conf
, queue
);
1932 rx_q
->buf_alloc_num
= 0;
1933 rx_q
->xsk_pool
= NULL
;
1935 /* Free DMA regions of consistent memory previously allocated */
1936 if (!priv
->extend_desc
)
1937 dma_free_coherent(priv
->device
, dma_conf
->dma_rx_size
*
1938 sizeof(struct dma_desc
),
1939 rx_q
->dma_rx
, rx_q
->dma_rx_phy
);
1941 dma_free_coherent(priv
->device
, dma_conf
->dma_rx_size
*
1942 sizeof(struct dma_extended_desc
),
1943 rx_q
->dma_erx
, rx_q
->dma_rx_phy
);
1945 if (xdp_rxq_info_is_reg(&rx_q
->xdp_rxq
))
1946 xdp_rxq_info_unreg(&rx_q
->xdp_rxq
);
1948 kfree(rx_q
->buf_pool
);
1949 if (rx_q
->page_pool
)
1950 page_pool_destroy(rx_q
->page_pool
);
1953 static void free_dma_rx_desc_resources(struct stmmac_priv
*priv
,
1954 struct stmmac_dma_conf
*dma_conf
)
1956 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1959 /* Free RX queue resources */
1960 for (queue
= 0; queue
< rx_count
; queue
++)
1961 __free_dma_rx_desc_resources(priv
, dma_conf
, queue
);
1965 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966 * @priv: private structure
1967 * @dma_conf: structure to take the dma data
1968 * @queue: TX queue index
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv
*priv
,
1971 struct stmmac_dma_conf
*dma_conf
,
1974 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
1978 /* Release the DMA TX socket buffers */
1979 dma_free_tx_skbufs(priv
, dma_conf
, queue
);
1981 if (priv
->extend_desc
) {
1982 size
= sizeof(struct dma_extended_desc
);
1983 addr
= tx_q
->dma_etx
;
1984 } else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
) {
1985 size
= sizeof(struct dma_edesc
);
1986 addr
= tx_q
->dma_entx
;
1988 size
= sizeof(struct dma_desc
);
1989 addr
= tx_q
->dma_tx
;
1992 size
*= dma_conf
->dma_tx_size
;
1994 dma_free_coherent(priv
->device
, size
, addr
, tx_q
->dma_tx_phy
);
1996 kfree(tx_q
->tx_skbuff_dma
);
1997 kfree(tx_q
->tx_skbuff
);
2000 static void free_dma_tx_desc_resources(struct stmmac_priv
*priv
,
2001 struct stmmac_dma_conf
*dma_conf
)
2003 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
2006 /* Free TX queue resources */
2007 for (queue
= 0; queue
< tx_count
; queue
++)
2008 __free_dma_tx_desc_resources(priv
, dma_conf
, queue
);
2012 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013 * @priv: private structure
2014 * @dma_conf: structure to take the dma data
2015 * @queue: RX queue index
2016 * Description: according to which descriptor can be used (extend or basic)
2017 * this function allocates the resources for TX and RX paths. In case of
2018 * reception, for example, it pre-allocated the RX socket buffer in order to
2019 * allow zero-copy mechanism.
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv
*priv
,
2022 struct stmmac_dma_conf
*dma_conf
,
2025 struct stmmac_rx_queue
*rx_q
= &dma_conf
->rx_queue
[queue
];
2026 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
2027 bool xdp_prog
= stmmac_xdp_is_enabled(priv
);
2028 struct page_pool_params pp_params
= { 0 };
2029 unsigned int num_pages
;
2030 unsigned int napi_id
;
2033 rx_q
->queue_index
= queue
;
2034 rx_q
->priv_data
= priv
;
2036 pp_params
.flags
= PP_FLAG_DMA_MAP
| PP_FLAG_DMA_SYNC_DEV
;
2037 pp_params
.pool_size
= dma_conf
->dma_rx_size
;
2038 num_pages
= DIV_ROUND_UP(dma_conf
->dma_buf_sz
, PAGE_SIZE
);
2039 pp_params
.order
= ilog2(num_pages
);
2040 pp_params
.nid
= dev_to_node(priv
->device
);
2041 pp_params
.dev
= priv
->device
;
2042 pp_params
.dma_dir
= xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
2043 pp_params
.offset
= stmmac_rx_offset(priv
);
2044 pp_params
.max_len
= STMMAC_MAX_RX_BUF_SIZE(num_pages
);
2046 rx_q
->page_pool
= page_pool_create(&pp_params
);
2047 if (IS_ERR(rx_q
->page_pool
)) {
2048 ret
= PTR_ERR(rx_q
->page_pool
);
2049 rx_q
->page_pool
= NULL
;
2053 rx_q
->buf_pool
= kcalloc(dma_conf
->dma_rx_size
,
2054 sizeof(*rx_q
->buf_pool
),
2056 if (!rx_q
->buf_pool
)
2059 if (priv
->extend_desc
) {
2060 rx_q
->dma_erx
= dma_alloc_coherent(priv
->device
,
2061 dma_conf
->dma_rx_size
*
2062 sizeof(struct dma_extended_desc
),
2069 rx_q
->dma_rx
= dma_alloc_coherent(priv
->device
,
2070 dma_conf
->dma_rx_size
*
2071 sizeof(struct dma_desc
),
2078 if (stmmac_xdp_is_enabled(priv
) &&
2079 test_bit(queue
, priv
->af_xdp_zc_qps
))
2080 napi_id
= ch
->rxtx_napi
.napi_id
;
2082 napi_id
= ch
->rx_napi
.napi_id
;
2084 ret
= xdp_rxq_info_reg(&rx_q
->xdp_rxq
, priv
->dev
,
2088 netdev_err(priv
->dev
, "Failed to register xdp rxq info\n");
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv
*priv
,
2096 struct stmmac_dma_conf
*dma_conf
)
2098 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
2102 /* RX queues buffers and DMA */
2103 for (queue
= 0; queue
< rx_count
; queue
++) {
2104 ret
= __alloc_dma_rx_desc_resources(priv
, dma_conf
, queue
);
2112 free_dma_rx_desc_resources(priv
, dma_conf
);
2118 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119 * @priv: private structure
2120 * @dma_conf: structure to take the dma data
2121 * @queue: TX queue index
2122 * Description: according to which descriptor can be used (extend or basic)
2123 * this function allocates the resources for TX and RX paths. In case of
2124 * reception, for example, it pre-allocated the RX socket buffer in order to
2125 * allow zero-copy mechanism.
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv
*priv
,
2128 struct stmmac_dma_conf
*dma_conf
,
2131 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[queue
];
2135 tx_q
->queue_index
= queue
;
2136 tx_q
->priv_data
= priv
;
2138 tx_q
->tx_skbuff_dma
= kcalloc(dma_conf
->dma_tx_size
,
2139 sizeof(*tx_q
->tx_skbuff_dma
),
2141 if (!tx_q
->tx_skbuff_dma
)
2144 tx_q
->tx_skbuff
= kcalloc(dma_conf
->dma_tx_size
,
2145 sizeof(struct sk_buff
*),
2147 if (!tx_q
->tx_skbuff
)
2150 if (priv
->extend_desc
)
2151 size
= sizeof(struct dma_extended_desc
);
2152 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2153 size
= sizeof(struct dma_edesc
);
2155 size
= sizeof(struct dma_desc
);
2157 size
*= dma_conf
->dma_tx_size
;
2159 addr
= dma_alloc_coherent(priv
->device
, size
,
2160 &tx_q
->dma_tx_phy
, GFP_KERNEL
);
2164 if (priv
->extend_desc
)
2165 tx_q
->dma_etx
= addr
;
2166 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2167 tx_q
->dma_entx
= addr
;
2169 tx_q
->dma_tx
= addr
;
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv
*priv
,
2175 struct stmmac_dma_conf
*dma_conf
)
2177 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
2181 /* TX queues buffers and DMA */
2182 for (queue
= 0; queue
< tx_count
; queue
++) {
2183 ret
= __alloc_dma_tx_desc_resources(priv
, dma_conf
, queue
);
2191 free_dma_tx_desc_resources(priv
, dma_conf
);
2196 * alloc_dma_desc_resources - alloc TX/RX resources.
2197 * @priv: private structure
2198 * @dma_conf: structure to take the dma data
2199 * Description: according to which descriptor can be used (extend or basic)
2200 * this function allocates the resources for TX and RX paths. In case of
2201 * reception, for example, it pre-allocated the RX socket buffer in order to
2202 * allow zero-copy mechanism.
2204 static int alloc_dma_desc_resources(struct stmmac_priv
*priv
,
2205 struct stmmac_dma_conf
*dma_conf
)
2208 int ret
= alloc_dma_rx_desc_resources(priv
, dma_conf
);
2213 ret
= alloc_dma_tx_desc_resources(priv
, dma_conf
);
2219 * free_dma_desc_resources - free dma desc resources
2220 * @priv: private structure
2221 * @dma_conf: structure to take the dma data
2223 static void free_dma_desc_resources(struct stmmac_priv
*priv
,
2224 struct stmmac_dma_conf
*dma_conf
)
2226 /* Release the DMA TX socket buffers */
2227 free_dma_tx_desc_resources(priv
, dma_conf
);
2229 /* Release the DMA RX socket buffers later
2230 * to ensure all pending XDP_TX buffers are returned.
2232 free_dma_rx_desc_resources(priv
, dma_conf
);
2236 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237 * @priv: driver private structure
2238 * Description: It is used for enabling the rx queues in the MAC
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv
*priv
)
2242 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
2246 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
2247 mode
= priv
->plat
->rx_queues_cfg
[queue
].mode_to_use
;
2248 stmmac_rx_queue_enable(priv
, priv
->hw
, mode
, queue
);
2253 * stmmac_start_rx_dma - start RX DMA channel
2254 * @priv: driver private structure
2255 * @chan: RX channel index
2257 * This starts a RX DMA channel
2259 static void stmmac_start_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
2261 netdev_dbg(priv
->dev
, "DMA RX processes started in channel %d\n", chan
);
2262 stmmac_start_rx(priv
, priv
->ioaddr
, chan
);
2266 * stmmac_start_tx_dma - start TX DMA channel
2267 * @priv: driver private structure
2268 * @chan: TX channel index
2270 * This starts a TX DMA channel
2272 static void stmmac_start_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
2274 netdev_dbg(priv
->dev
, "DMA TX processes started in channel %d\n", chan
);
2275 stmmac_start_tx(priv
, priv
->ioaddr
, chan
);
2279 * stmmac_stop_rx_dma - stop RX DMA channel
2280 * @priv: driver private structure
2281 * @chan: RX channel index
2283 * This stops a RX DMA channel
2285 static void stmmac_stop_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
2287 netdev_dbg(priv
->dev
, "DMA RX processes stopped in channel %d\n", chan
);
2288 stmmac_stop_rx(priv
, priv
->ioaddr
, chan
);
2292 * stmmac_stop_tx_dma - stop TX DMA channel
2293 * @priv: driver private structure
2294 * @chan: TX channel index
2296 * This stops a TX DMA channel
2298 static void stmmac_stop_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
2300 netdev_dbg(priv
->dev
, "DMA TX processes stopped in channel %d\n", chan
);
2301 stmmac_stop_tx(priv
, priv
->ioaddr
, chan
);
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv
*priv
)
2306 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2307 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2308 u32 dma_csr_ch
= max(rx_channels_count
, tx_channels_count
);
2311 for (chan
= 0; chan
< dma_csr_ch
; chan
++) {
2312 struct stmmac_channel
*ch
= &priv
->channel
[chan
];
2313 unsigned long flags
;
2315 spin_lock_irqsave(&ch
->lock
, flags
);
2316 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
2317 spin_unlock_irqrestore(&ch
->lock
, flags
);
2322 * stmmac_start_all_dma - start all RX and TX DMA channels
2323 * @priv: driver private structure
2325 * This starts all the RX and TX DMA channels
2327 static void stmmac_start_all_dma(struct stmmac_priv
*priv
)
2329 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2330 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2333 for (chan
= 0; chan
< rx_channels_count
; chan
++)
2334 stmmac_start_rx_dma(priv
, chan
);
2336 for (chan
= 0; chan
< tx_channels_count
; chan
++)
2337 stmmac_start_tx_dma(priv
, chan
);
2341 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342 * @priv: driver private structure
2344 * This stops the RX and TX DMA channels
2346 static void stmmac_stop_all_dma(struct stmmac_priv
*priv
)
2348 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2349 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2352 for (chan
= 0; chan
< rx_channels_count
; chan
++)
2353 stmmac_stop_rx_dma(priv
, chan
);
2355 for (chan
= 0; chan
< tx_channels_count
; chan
++)
2356 stmmac_stop_tx_dma(priv
, chan
);
2360 * stmmac_dma_operation_mode - HW DMA operation mode
2361 * @priv: driver private structure
2362 * Description: it is used for configuring the DMA operation mode register in
2363 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2365 static void stmmac_dma_operation_mode(struct stmmac_priv
*priv
)
2367 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2368 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2369 int rxfifosz
= priv
->plat
->rx_fifo_size
;
2370 int txfifosz
= priv
->plat
->tx_fifo_size
;
2377 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
2379 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
2381 /* Adjust for real per queue fifo size */
2382 rxfifosz
/= rx_channels_count
;
2383 txfifosz
/= tx_channels_count
;
2385 if (priv
->plat
->force_thresh_dma_mode
) {
2388 } else if (priv
->plat
->force_sf_dma_mode
|| priv
->plat
->tx_coe
) {
2390 * In case of GMAC, SF mode can be enabled
2391 * to perform the TX COE in HW. This depends on:
2392 * 1) TX COE if actually supported
2393 * 2) There is no bugged Jumbo frame support
2394 * that needs to not insert csum in the TDES.
2396 txmode
= SF_DMA_MODE
;
2397 rxmode
= SF_DMA_MODE
;
2398 priv
->xstats
.threshold
= SF_DMA_MODE
;
2401 rxmode
= SF_DMA_MODE
;
2404 /* configure all channels */
2405 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
2406 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
2409 qmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
2411 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
,
2414 if (rx_q
->xsk_pool
) {
2415 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
2416 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
2420 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
2421 priv
->dma_conf
.dma_buf_sz
,
2426 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
2427 qmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
2429 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
,
2434 static bool stmmac_xdp_xmit_zc(struct stmmac_priv
*priv
, u32 queue
, u32 budget
)
2436 struct netdev_queue
*nq
= netdev_get_tx_queue(priv
->dev
, queue
);
2437 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
2438 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[queue
];
2439 struct xsk_buff_pool
*pool
= tx_q
->xsk_pool
;
2440 unsigned int entry
= tx_q
->cur_tx
;
2441 struct dma_desc
*tx_desc
= NULL
;
2442 struct xdp_desc xdp_desc
;
2443 bool work_done
= true;
2444 u32 tx_set_ic_bit
= 0;
2445 unsigned long flags
;
2447 /* Avoids TX time-out as we are sharing with slow path */
2448 txq_trans_cond_update(nq
);
2450 budget
= min(budget
, stmmac_tx_avail(priv
, queue
));
2452 while (budget
-- > 0) {
2453 dma_addr_t dma_addr
;
2456 /* We are sharing with slow path and stop XSK TX desc submission when
2457 * available TX ring is less than threshold.
2459 if (unlikely(stmmac_tx_avail(priv
, queue
) < STMMAC_TX_XSK_AVAIL
) ||
2460 !netif_carrier_ok(priv
->dev
)) {
2465 if (!xsk_tx_peek_desc(pool
, &xdp_desc
))
2468 if (likely(priv
->extend_desc
))
2469 tx_desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
2470 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2471 tx_desc
= &tx_q
->dma_entx
[entry
].basic
;
2473 tx_desc
= tx_q
->dma_tx
+ entry
;
2475 dma_addr
= xsk_buff_raw_get_dma(pool
, xdp_desc
.addr
);
2476 xsk_buff_raw_dma_sync_for_device(pool
, dma_addr
, xdp_desc
.len
);
2478 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XSK_TX
;
2480 /* To return XDP buffer to XSK pool, we simple call
2481 * xsk_tx_completed(), so we don't need to fill up
2484 tx_q
->tx_skbuff_dma
[entry
].buf
= 0;
2485 tx_q
->xdpf
[entry
] = NULL
;
2487 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
2488 tx_q
->tx_skbuff_dma
[entry
].len
= xdp_desc
.len
;
2489 tx_q
->tx_skbuff_dma
[entry
].last_segment
= true;
2490 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
2492 stmmac_set_desc_addr(priv
, tx_desc
, dma_addr
);
2494 tx_q
->tx_count_frames
++;
2496 if (!priv
->tx_coal_frames
[queue
])
2498 else if (tx_q
->tx_count_frames
% priv
->tx_coal_frames
[queue
] == 0)
2504 tx_q
->tx_count_frames
= 0;
2505 stmmac_set_tx_ic(priv
, tx_desc
);
2509 stmmac_prepare_tx_desc(priv
, tx_desc
, 1, xdp_desc
.len
,
2510 true, priv
->mode
, true, true,
2513 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
2515 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_conf
.dma_tx_size
);
2516 entry
= tx_q
->cur_tx
;
2518 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
2519 txq_stats
->tx_set_ic_bit
+= tx_set_ic_bit
;
2520 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
2523 stmmac_flush_tx_descriptors(priv
, queue
);
2524 xsk_tx_release(pool
);
2527 /* Return true if all of the 3 conditions are met
2528 * a) TX Budget is still available
2529 * b) work_done = true when XSK TX desc peek is empty (no more
2530 * pending XSK TX for transmission)
2532 return !!budget
&& work_done
;
2535 static void stmmac_bump_dma_threshold(struct stmmac_priv
*priv
, u32 chan
)
2537 if (unlikely(priv
->xstats
.threshold
!= SF_DMA_MODE
) && tc
<= 256) {
2540 if (priv
->plat
->force_thresh_dma_mode
)
2541 stmmac_set_dma_operation_mode(priv
, tc
, tc
, chan
);
2543 stmmac_set_dma_operation_mode(priv
, tc
, SF_DMA_MODE
,
2546 priv
->xstats
.threshold
= tc
;
2551 * stmmac_tx_clean - to manage the transmission completion
2552 * @priv: driver private structure
2553 * @budget: napi budget limiting this functions packet handling
2554 * @queue: TX queue index
2555 * @pending_packets: signal to arm the TX coal timer
2556 * Description: it reclaims the transmit resources after transmission completes.
2557 * If some packets still needs to be handled, due to TX coalesce, set
2558 * pending_packets to true to make NAPI arm the TX coal timer.
2560 static int stmmac_tx_clean(struct stmmac_priv
*priv
, int budget
, u32 queue
,
2561 bool *pending_packets
)
2563 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
2564 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[queue
];
2565 unsigned int bytes_compl
= 0, pkts_compl
= 0;
2566 unsigned int entry
, xmits
= 0, count
= 0;
2567 u32 tx_packets
= 0, tx_errors
= 0;
2568 unsigned long flags
;
2570 __netif_tx_lock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
2572 tx_q
->xsk_frames_done
= 0;
2574 entry
= tx_q
->dirty_tx
;
2576 /* Try to clean all TX complete frame in 1 shot */
2577 while ((entry
!= tx_q
->cur_tx
) && count
< priv
->dma_conf
.dma_tx_size
) {
2578 struct xdp_frame
*xdpf
;
2579 struct sk_buff
*skb
;
2583 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_TX
||
2584 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
) {
2585 xdpf
= tx_q
->xdpf
[entry
];
2587 } else if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_SKB
) {
2589 skb
= tx_q
->tx_skbuff
[entry
];
2595 if (priv
->extend_desc
)
2596 p
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
2597 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2598 p
= &tx_q
->dma_entx
[entry
].basic
;
2600 p
= tx_q
->dma_tx
+ entry
;
2602 status
= stmmac_tx_status(priv
, &priv
->xstats
, p
, priv
->ioaddr
);
2603 /* Check if the descriptor is owned by the DMA */
2604 if (unlikely(status
& tx_dma_own
))
2609 /* Make sure descriptor fields are read after reading
2614 /* Just consider the last segment and ...*/
2615 if (likely(!(status
& tx_not_ls
))) {
2616 /* ... verify the status error condition */
2617 if (unlikely(status
& tx_err
)) {
2619 if (unlikely(status
& tx_err_bump_tc
))
2620 stmmac_bump_dma_threshold(priv
, queue
);
2625 stmmac_get_tx_hwtstamp(priv
, p
, skb
);
2628 if (likely(tx_q
->tx_skbuff_dma
[entry
].buf
&&
2629 tx_q
->tx_skbuff_dma
[entry
].buf_type
!= STMMAC_TXBUF_T_XDP_TX
)) {
2630 if (tx_q
->tx_skbuff_dma
[entry
].map_as_page
)
2631 dma_unmap_page(priv
->device
,
2632 tx_q
->tx_skbuff_dma
[entry
].buf
,
2633 tx_q
->tx_skbuff_dma
[entry
].len
,
2636 dma_unmap_single(priv
->device
,
2637 tx_q
->tx_skbuff_dma
[entry
].buf
,
2638 tx_q
->tx_skbuff_dma
[entry
].len
,
2640 tx_q
->tx_skbuff_dma
[entry
].buf
= 0;
2641 tx_q
->tx_skbuff_dma
[entry
].len
= 0;
2642 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
2645 stmmac_clean_desc3(priv
, tx_q
, p
);
2647 tx_q
->tx_skbuff_dma
[entry
].last_segment
= false;
2648 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
2651 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_TX
) {
2652 xdp_return_frame_rx_napi(xdpf
);
2653 tx_q
->xdpf
[entry
] = NULL
;
2657 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
) {
2658 xdp_return_frame(xdpf
);
2659 tx_q
->xdpf
[entry
] = NULL
;
2662 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XSK_TX
)
2663 tx_q
->xsk_frames_done
++;
2665 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_SKB
) {
2668 bytes_compl
+= skb
->len
;
2669 dev_consume_skb_any(skb
);
2670 tx_q
->tx_skbuff
[entry
] = NULL
;
2674 stmmac_release_tx_desc(priv
, p
, priv
->mode
);
2676 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
2678 tx_q
->dirty_tx
= entry
;
2680 netdev_tx_completed_queue(netdev_get_tx_queue(priv
->dev
, queue
),
2681 pkts_compl
, bytes_compl
);
2683 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv
->dev
,
2685 stmmac_tx_avail(priv
, queue
) > STMMAC_TX_THRESH(priv
)) {
2687 netif_dbg(priv
, tx_done
, priv
->dev
,
2688 "%s: restart transmit\n", __func__
);
2689 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, queue
));
2692 if (tx_q
->xsk_pool
) {
2695 if (tx_q
->xsk_frames_done
)
2696 xsk_tx_completed(tx_q
->xsk_pool
, tx_q
->xsk_frames_done
);
2698 if (xsk_uses_need_wakeup(tx_q
->xsk_pool
))
2699 xsk_set_tx_need_wakeup(tx_q
->xsk_pool
);
2701 /* For XSK TX, we try to send as many as possible.
2702 * If XSK work done (XSK TX desc empty and budget still
2703 * available), return "budget - 1" to reenable TX IRQ.
2704 * Else, return "budget" to make NAPI continue polling.
2706 work_done
= stmmac_xdp_xmit_zc(priv
, queue
,
2707 STMMAC_XSK_TX_BUDGET_MAX
);
2714 if (priv
->eee_enabled
&& !priv
->tx_path_in_lpi_mode
&&
2715 priv
->eee_sw_timer_en
) {
2716 if (stmmac_enable_eee_mode(priv
))
2717 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
2720 /* We still have pending packets, let's call for a new scheduling */
2721 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
2722 *pending_packets
= true;
2724 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
2725 txq_stats
->tx_packets
+= tx_packets
;
2726 txq_stats
->tx_pkt_n
+= tx_packets
;
2727 txq_stats
->tx_clean
++;
2728 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
2730 priv
->xstats
.tx_errors
+= tx_errors
;
2732 __netif_tx_unlock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
2734 /* Combine decisions from TX clean and XSK TX */
2735 return max(count
, xmits
);
2739 * stmmac_tx_err - to manage the tx error
2740 * @priv: driver private structure
2741 * @chan: channel index
2742 * Description: it cleans the descriptors and restarts the transmission
2743 * in case of transmission errors.
2745 static void stmmac_tx_err(struct stmmac_priv
*priv
, u32 chan
)
2747 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
2749 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2751 stmmac_stop_tx_dma(priv
, chan
);
2752 dma_free_tx_skbufs(priv
, &priv
->dma_conf
, chan
);
2753 stmmac_clear_tx_descriptors(priv
, &priv
->dma_conf
, chan
);
2754 stmmac_reset_tx_queue(priv
, chan
);
2755 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2756 tx_q
->dma_tx_phy
, chan
);
2757 stmmac_start_tx_dma(priv
, chan
);
2759 priv
->xstats
.tx_errors
++;
2760 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2764 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2765 * @priv: driver private structure
2766 * @txmode: TX operating mode
2767 * @rxmode: RX operating mode
2768 * @chan: channel index
2769 * Description: it is used for configuring of the DMA operation mode in
2770 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2773 static void stmmac_set_dma_operation_mode(struct stmmac_priv
*priv
, u32 txmode
,
2774 u32 rxmode
, u32 chan
)
2776 u8 rxqmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
2777 u8 txqmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
2778 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2779 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2780 int rxfifosz
= priv
->plat
->rx_fifo_size
;
2781 int txfifosz
= priv
->plat
->tx_fifo_size
;
2784 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
2786 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
2788 /* Adjust for real per queue fifo size */
2789 rxfifosz
/= rx_channels_count
;
2790 txfifosz
/= tx_channels_count
;
2792 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
, rxfifosz
, rxqmode
);
2793 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
, txfifosz
, txqmode
);
2796 static bool stmmac_safety_feat_interrupt(struct stmmac_priv
*priv
)
2800 ret
= stmmac_safety_feat_irq_status(priv
, priv
->dev
,
2801 priv
->ioaddr
, priv
->dma_cap
.asp
, &priv
->sstats
);
2802 if (ret
&& (ret
!= -EINVAL
)) {
2803 stmmac_global_err(priv
);
2810 static int stmmac_napi_check(struct stmmac_priv
*priv
, u32 chan
, u32 dir
)
2812 int status
= stmmac_dma_interrupt_status(priv
, priv
->ioaddr
,
2813 &priv
->xstats
, chan
, dir
);
2814 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
2815 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
2816 struct stmmac_channel
*ch
= &priv
->channel
[chan
];
2817 struct napi_struct
*rx_napi
;
2818 struct napi_struct
*tx_napi
;
2819 unsigned long flags
;
2821 rx_napi
= rx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->rx_napi
;
2822 tx_napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
2824 if ((status
& handle_rx
) && (chan
< priv
->plat
->rx_queues_to_use
)) {
2825 if (napi_schedule_prep(rx_napi
)) {
2826 spin_lock_irqsave(&ch
->lock
, flags
);
2827 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
2828 spin_unlock_irqrestore(&ch
->lock
, flags
);
2829 __napi_schedule(rx_napi
);
2833 if ((status
& handle_tx
) && (chan
< priv
->plat
->tx_queues_to_use
)) {
2834 if (napi_schedule_prep(tx_napi
)) {
2835 spin_lock_irqsave(&ch
->lock
, flags
);
2836 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
2837 spin_unlock_irqrestore(&ch
->lock
, flags
);
2838 __napi_schedule(tx_napi
);
2846 * stmmac_dma_interrupt - DMA ISR
2847 * @priv: driver private structure
2848 * Description: this is the DMA ISR. It is called by the main ISR.
2849 * It calls the dwmac dma routine and schedule poll method in case of some
2852 static void stmmac_dma_interrupt(struct stmmac_priv
*priv
)
2854 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
2855 u32 rx_channel_count
= priv
->plat
->rx_queues_to_use
;
2856 u32 channels_to_check
= tx_channel_count
> rx_channel_count
?
2857 tx_channel_count
: rx_channel_count
;
2859 int status
[max_t(u32
, MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
)];
2861 /* Make sure we never check beyond our status buffer. */
2862 if (WARN_ON_ONCE(channels_to_check
> ARRAY_SIZE(status
)))
2863 channels_to_check
= ARRAY_SIZE(status
);
2865 for (chan
= 0; chan
< channels_to_check
; chan
++)
2866 status
[chan
] = stmmac_napi_check(priv
, chan
,
2869 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
2870 if (unlikely(status
[chan
] & tx_hard_error_bump_tc
)) {
2871 /* Try to bump up the dma threshold on this failure */
2872 stmmac_bump_dma_threshold(priv
, chan
);
2873 } else if (unlikely(status
[chan
] == tx_hard_error
)) {
2874 stmmac_tx_err(priv
, chan
);
2880 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2881 * @priv: driver private structure
2882 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2884 static void stmmac_mmc_setup(struct stmmac_priv
*priv
)
2886 unsigned int mode
= MMC_CNTRL_RESET_ON_READ
| MMC_CNTRL_COUNTER_RESET
|
2887 MMC_CNTRL_PRESET
| MMC_CNTRL_FULL_HALF_PRESET
;
2889 stmmac_mmc_intr_all_mask(priv
, priv
->mmcaddr
);
2891 if (priv
->dma_cap
.rmon
) {
2892 stmmac_mmc_ctrl(priv
, priv
->mmcaddr
, mode
);
2893 memset(&priv
->mmc
, 0, sizeof(struct stmmac_counters
));
2895 netdev_info(priv
->dev
, "No MAC Management Counters available\n");
2899 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2900 * @priv: driver private structure
2902 * new GMAC chip generations have a new register to indicate the
2903 * presence of the optional feature/functions.
2904 * This can be also used to override the value passed through the
2905 * platform and necessary for old MAC10/100 and GMAC chips.
2907 static int stmmac_get_hw_features(struct stmmac_priv
*priv
)
2909 return stmmac_get_hw_feature(priv
, priv
->ioaddr
, &priv
->dma_cap
) == 0;
2913 * stmmac_check_ether_addr - check if the MAC addr is valid
2914 * @priv: driver private structure
2916 * it is to verify if the MAC address is valid, in case of failures it
2917 * generates a random MAC address
2919 static void stmmac_check_ether_addr(struct stmmac_priv
*priv
)
2923 if (!is_valid_ether_addr(priv
->dev
->dev_addr
)) {
2924 stmmac_get_umac_addr(priv
, priv
->hw
, addr
, 0);
2925 if (is_valid_ether_addr(addr
))
2926 eth_hw_addr_set(priv
->dev
, addr
);
2928 eth_hw_addr_random(priv
->dev
);
2929 dev_info(priv
->device
, "device MAC address %pM\n",
2930 priv
->dev
->dev_addr
);
2935 * stmmac_init_dma_engine - DMA init.
2936 * @priv: driver private structure
2938 * It inits the DMA invoking the specific MAC/GMAC callback.
2939 * Some DMA parameters can be passed from the platform;
2940 * in case of these are not passed a default is kept for the MAC or GMAC.
2942 static int stmmac_init_dma_engine(struct stmmac_priv
*priv
)
2944 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2945 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2946 u32 dma_csr_ch
= max(rx_channels_count
, tx_channels_count
);
2947 struct stmmac_rx_queue
*rx_q
;
2948 struct stmmac_tx_queue
*tx_q
;
2953 if (!priv
->plat
->dma_cfg
|| !priv
->plat
->dma_cfg
->pbl
) {
2954 dev_err(priv
->device
, "Invalid DMA configuration\n");
2958 if (priv
->extend_desc
&& (priv
->mode
== STMMAC_RING_MODE
))
2961 ret
= stmmac_reset(priv
, priv
->ioaddr
);
2963 dev_err(priv
->device
, "Failed to reset the dma\n");
2967 /* DMA Configuration */
2968 stmmac_dma_init(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, atds
);
2970 if (priv
->plat
->axi
)
2971 stmmac_axi(priv
, priv
->ioaddr
, priv
->plat
->axi
);
2973 /* DMA CSR Channel configuration */
2974 for (chan
= 0; chan
< dma_csr_ch
; chan
++) {
2975 stmmac_init_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, chan
);
2976 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
2979 /* DMA RX Channel Configuration */
2980 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
2981 rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
2983 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2984 rx_q
->dma_rx_phy
, chan
);
2986 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
2987 (rx_q
->buf_alloc_num
*
2988 sizeof(struct dma_desc
));
2989 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
2990 rx_q
->rx_tail_addr
, chan
);
2993 /* DMA TX Channel Configuration */
2994 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
2995 tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
2997 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2998 tx_q
->dma_tx_phy
, chan
);
3000 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
3001 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
3002 tx_q
->tx_tail_addr
, chan
);
3008 static void stmmac_tx_timer_arm(struct stmmac_priv
*priv
, u32 queue
)
3010 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
3011 u32 tx_coal_timer
= priv
->tx_coal_timer
[queue
];
3012 struct stmmac_channel
*ch
;
3013 struct napi_struct
*napi
;
3018 ch
= &priv
->channel
[tx_q
->queue_index
];
3019 napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
3021 /* Arm timer only if napi is not already scheduled.
3022 * Try to cancel any timer if napi is scheduled, timer will be armed
3023 * again in the next scheduled napi.
3025 if (unlikely(!napi_is_scheduled(napi
)))
3026 hrtimer_start(&tx_q
->txtimer
,
3027 STMMAC_COAL_TIMER(tx_coal_timer
),
3030 hrtimer_try_to_cancel(&tx_q
->txtimer
);
3034 * stmmac_tx_timer - mitigation sw timer for tx.
3037 * This is the timer handler to directly invoke the stmmac_tx_clean.
3039 static enum hrtimer_restart
stmmac_tx_timer(struct hrtimer
*t
)
3041 struct stmmac_tx_queue
*tx_q
= container_of(t
, struct stmmac_tx_queue
, txtimer
);
3042 struct stmmac_priv
*priv
= tx_q
->priv_data
;
3043 struct stmmac_channel
*ch
;
3044 struct napi_struct
*napi
;
3046 ch
= &priv
->channel
[tx_q
->queue_index
];
3047 napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
3049 if (likely(napi_schedule_prep(napi
))) {
3050 unsigned long flags
;
3052 spin_lock_irqsave(&ch
->lock
, flags
);
3053 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, ch
->index
, 0, 1);
3054 spin_unlock_irqrestore(&ch
->lock
, flags
);
3055 __napi_schedule(napi
);
3058 return HRTIMER_NORESTART
;
3062 * stmmac_init_coalesce - init mitigation options.
3063 * @priv: driver private structure
3065 * This inits the coalesce parameters: i.e. timer rate,
3066 * timer handler and default threshold used for enabling the
3067 * interrupt on completion bit.
3069 static void stmmac_init_coalesce(struct stmmac_priv
*priv
)
3071 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
3072 u32 rx_channel_count
= priv
->plat
->rx_queues_to_use
;
3075 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
3076 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3078 priv
->tx_coal_frames
[chan
] = STMMAC_TX_FRAMES
;
3079 priv
->tx_coal_timer
[chan
] = STMMAC_COAL_TX_TIMER
;
3081 hrtimer_init(&tx_q
->txtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
3082 tx_q
->txtimer
.function
= stmmac_tx_timer
;
3085 for (chan
= 0; chan
< rx_channel_count
; chan
++)
3086 priv
->rx_coal_frames
[chan
] = STMMAC_RX_FRAMES
;
3089 static void stmmac_set_rings_length(struct stmmac_priv
*priv
)
3091 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
3092 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
3095 /* set TX ring length */
3096 for (chan
= 0; chan
< tx_channels_count
; chan
++)
3097 stmmac_set_tx_ring_len(priv
, priv
->ioaddr
,
3098 (priv
->dma_conf
.dma_tx_size
- 1), chan
);
3100 /* set RX ring length */
3101 for (chan
= 0; chan
< rx_channels_count
; chan
++)
3102 stmmac_set_rx_ring_len(priv
, priv
->ioaddr
,
3103 (priv
->dma_conf
.dma_rx_size
- 1), chan
);
3107 * stmmac_set_tx_queue_weight - Set TX queue weight
3108 * @priv: driver private structure
3109 * Description: It is used for setting TX queues weight
3111 static void stmmac_set_tx_queue_weight(struct stmmac_priv
*priv
)
3113 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3117 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
3118 weight
= priv
->plat
->tx_queues_cfg
[queue
].weight
;
3119 stmmac_set_mtl_tx_queue_weight(priv
, priv
->hw
, weight
, queue
);
3124 * stmmac_configure_cbs - Configure CBS in TX queue
3125 * @priv: driver private structure
3126 * Description: It is used for configuring CBS in AVB TX queues
3128 static void stmmac_configure_cbs(struct stmmac_priv
*priv
)
3130 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3134 /* queue 0 is reserved for legacy traffic */
3135 for (queue
= 1; queue
< tx_queues_count
; queue
++) {
3136 mode_to_use
= priv
->plat
->tx_queues_cfg
[queue
].mode_to_use
;
3137 if (mode_to_use
== MTL_QUEUE_DCB
)
3140 stmmac_config_cbs(priv
, priv
->hw
,
3141 priv
->plat
->tx_queues_cfg
[queue
].send_slope
,
3142 priv
->plat
->tx_queues_cfg
[queue
].idle_slope
,
3143 priv
->plat
->tx_queues_cfg
[queue
].high_credit
,
3144 priv
->plat
->tx_queues_cfg
[queue
].low_credit
,
3150 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3151 * @priv: driver private structure
3152 * Description: It is used for mapping RX queues to RX dma channels
3154 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv
*priv
)
3156 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3160 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3161 chan
= priv
->plat
->rx_queues_cfg
[queue
].chan
;
3162 stmmac_map_mtl_to_dma(priv
, priv
->hw
, queue
, chan
);
3167 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3168 * @priv: driver private structure
3169 * Description: It is used for configuring the RX Queue Priority
3171 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv
*priv
)
3173 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3177 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3178 if (!priv
->plat
->rx_queues_cfg
[queue
].use_prio
)
3181 prio
= priv
->plat
->rx_queues_cfg
[queue
].prio
;
3182 stmmac_rx_queue_prio(priv
, priv
->hw
, prio
, queue
);
3187 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3188 * @priv: driver private structure
3189 * Description: It is used for configuring the TX Queue Priority
3191 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv
*priv
)
3193 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3197 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
3198 if (!priv
->plat
->tx_queues_cfg
[queue
].use_prio
)
3201 prio
= priv
->plat
->tx_queues_cfg
[queue
].prio
;
3202 stmmac_tx_queue_prio(priv
, priv
->hw
, prio
, queue
);
3207 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3208 * @priv: driver private structure
3209 * Description: It is used for configuring the RX queue routing
3211 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv
*priv
)
3213 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3217 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3218 /* no specific packet type routing specified for the queue */
3219 if (priv
->plat
->rx_queues_cfg
[queue
].pkt_route
== 0x0)
3222 packet
= priv
->plat
->rx_queues_cfg
[queue
].pkt_route
;
3223 stmmac_rx_queue_routing(priv
, priv
->hw
, packet
, queue
);
3227 static void stmmac_mac_config_rss(struct stmmac_priv
*priv
)
3229 if (!priv
->dma_cap
.rssen
|| !priv
->plat
->rss_en
) {
3230 priv
->rss
.enable
= false;
3234 if (priv
->dev
->features
& NETIF_F_RXHASH
)
3235 priv
->rss
.enable
= true;
3237 priv
->rss
.enable
= false;
3239 stmmac_rss_configure(priv
, priv
->hw
, &priv
->rss
,
3240 priv
->plat
->rx_queues_to_use
);
3244 * stmmac_mtl_configuration - Configure MTL
3245 * @priv: driver private structure
3246 * Description: It is used for configurring MTL
3248 static void stmmac_mtl_configuration(struct stmmac_priv
*priv
)
3250 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3251 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3253 if (tx_queues_count
> 1)
3254 stmmac_set_tx_queue_weight(priv
);
3256 /* Configure MTL RX algorithms */
3257 if (rx_queues_count
> 1)
3258 stmmac_prog_mtl_rx_algorithms(priv
, priv
->hw
,
3259 priv
->plat
->rx_sched_algorithm
);
3261 /* Configure MTL TX algorithms */
3262 if (tx_queues_count
> 1)
3263 stmmac_prog_mtl_tx_algorithms(priv
, priv
->hw
,
3264 priv
->plat
->tx_sched_algorithm
);
3266 /* Configure CBS in AVB TX queues */
3267 if (tx_queues_count
> 1)
3268 stmmac_configure_cbs(priv
);
3270 /* Map RX MTL to DMA channels */
3271 stmmac_rx_queue_dma_chan_map(priv
);
3273 /* Enable MAC RX Queues */
3274 stmmac_mac_enable_rx_queues(priv
);
3276 /* Set RX priorities */
3277 if (rx_queues_count
> 1)
3278 stmmac_mac_config_rx_queues_prio(priv
);
3280 /* Set TX priorities */
3281 if (tx_queues_count
> 1)
3282 stmmac_mac_config_tx_queues_prio(priv
);
3284 /* Set RX routing */
3285 if (rx_queues_count
> 1)
3286 stmmac_mac_config_rx_queues_routing(priv
);
3288 /* Receive Side Scaling */
3289 if (rx_queues_count
> 1)
3290 stmmac_mac_config_rss(priv
);
3293 static void stmmac_safety_feat_configuration(struct stmmac_priv
*priv
)
3295 if (priv
->dma_cap
.asp
) {
3296 netdev_info(priv
->dev
, "Enabling Safety Features\n");
3297 stmmac_safety_feat_config(priv
, priv
->ioaddr
, priv
->dma_cap
.asp
,
3298 priv
->plat
->safety_feat_cfg
);
3300 netdev_info(priv
->dev
, "No Safety Features support found\n");
3304 static int stmmac_fpe_start_wq(struct stmmac_priv
*priv
)
3308 clear_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
);
3309 clear_bit(__FPE_REMOVING
, &priv
->fpe_task_state
);
3311 name
= priv
->wq_name
;
3312 sprintf(name
, "%s-fpe", priv
->dev
->name
);
3314 priv
->fpe_wq
= create_singlethread_workqueue(name
);
3315 if (!priv
->fpe_wq
) {
3316 netdev_err(priv
->dev
, "%s: Failed to create workqueue\n", name
);
3320 netdev_info(priv
->dev
, "FPE workqueue start");
3326 * stmmac_hw_setup - setup mac in a usable state.
3327 * @dev : pointer to the device structure.
3328 * @ptp_register: register PTP if set
3330 * this is the main function to setup the HW in a usable state because the
3331 * dma engine is reset, the core registers are configured (e.g. AXI,
3332 * Checksum features, timers). The DMA is ready to start receiving and
3335 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3338 static int stmmac_hw_setup(struct net_device
*dev
, bool ptp_register
)
3340 struct stmmac_priv
*priv
= netdev_priv(dev
);
3341 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
3342 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
3347 /* DMA initialization and SW reset */
3348 ret
= stmmac_init_dma_engine(priv
);
3350 netdev_err(priv
->dev
, "%s: DMA engine initialization failed\n",
3355 /* Copy the MAC addr into the HW */
3356 stmmac_set_umac_addr(priv
, priv
->hw
, dev
->dev_addr
, 0);
3358 /* PS and related bits will be programmed according to the speed */
3359 if (priv
->hw
->pcs
) {
3360 int speed
= priv
->plat
->mac_port_sel_speed
;
3362 if ((speed
== SPEED_10
) || (speed
== SPEED_100
) ||
3363 (speed
== SPEED_1000
)) {
3364 priv
->hw
->ps
= speed
;
3366 dev_warn(priv
->device
, "invalid port speed\n");
3371 /* Initialize the MAC Core */
3372 stmmac_core_init(priv
, priv
->hw
, dev
);
3375 stmmac_mtl_configuration(priv
);
3377 /* Initialize Safety Features */
3378 stmmac_safety_feat_configuration(priv
);
3380 ret
= stmmac_rx_ipc(priv
, priv
->hw
);
3382 netdev_warn(priv
->dev
, "RX IPC Checksum Offload disabled\n");
3383 priv
->plat
->rx_coe
= STMMAC_RX_COE_NONE
;
3384 priv
->hw
->rx_csum
= 0;
3387 /* Enable the MAC Rx/Tx */
3388 stmmac_mac_set(priv
, priv
->ioaddr
, true);
3390 /* Set the HW DMA mode and the COE */
3391 stmmac_dma_operation_mode(priv
);
3393 stmmac_mmc_setup(priv
);
3396 ret
= clk_prepare_enable(priv
->plat
->clk_ptp_ref
);
3398 netdev_warn(priv
->dev
,
3399 "failed to enable PTP reference clock: %pe\n",
3403 ret
= stmmac_init_ptp(priv
);
3404 if (ret
== -EOPNOTSUPP
)
3405 netdev_info(priv
->dev
, "PTP not supported by HW\n");
3407 netdev_warn(priv
->dev
, "PTP init failed\n");
3408 else if (ptp_register
)
3409 stmmac_ptp_register(priv
);
3411 priv
->eee_tw_timer
= STMMAC_DEFAULT_TWT_LS
;
3413 /* Convert the timer from msec to usec */
3414 if (!priv
->tx_lpi_timer
)
3415 priv
->tx_lpi_timer
= eee_timer
* 1000;
3417 if (priv
->use_riwt
) {
3420 for (queue
= 0; queue
< rx_cnt
; queue
++) {
3421 if (!priv
->rx_riwt
[queue
])
3422 priv
->rx_riwt
[queue
] = DEF_DMA_RIWT
;
3424 stmmac_rx_watchdog(priv
, priv
->ioaddr
,
3425 priv
->rx_riwt
[queue
], queue
);
3430 stmmac_pcs_ctrl_ane(priv
, priv
->ioaddr
, 1, priv
->hw
->ps
, 0);
3432 /* set TX and RX rings length */
3433 stmmac_set_rings_length(priv
);
3437 for (chan
= 0; chan
< tx_cnt
; chan
++) {
3438 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3440 /* TSO and TBS cannot co-exist */
3441 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3444 stmmac_enable_tso(priv
, priv
->ioaddr
, 1, chan
);
3448 /* Enable Split Header */
3449 sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
3450 for (chan
= 0; chan
< rx_cnt
; chan
++)
3451 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
3454 /* VLAN Tag Insertion */
3455 if (priv
->dma_cap
.vlins
)
3456 stmmac_enable_vlan(priv
, priv
->hw
, STMMAC_VLAN_INSERT
);
3459 for (chan
= 0; chan
< tx_cnt
; chan
++) {
3460 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
3461 int enable
= tx_q
->tbs
& STMMAC_TBS_AVAIL
;
3463 stmmac_enable_tbs(priv
, priv
->ioaddr
, enable
, chan
);
3466 /* Configure real RX and TX queues */
3467 netif_set_real_num_rx_queues(dev
, priv
->plat
->rx_queues_to_use
);
3468 netif_set_real_num_tx_queues(dev
, priv
->plat
->tx_queues_to_use
);
3470 /* Start the ball rolling... */
3471 stmmac_start_all_dma(priv
);
3473 if (priv
->dma_cap
.fpesel
) {
3474 stmmac_fpe_start_wq(priv
);
3476 if (priv
->plat
->fpe_cfg
->enable
)
3477 stmmac_fpe_handshake(priv
, true);
3483 static void stmmac_hw_teardown(struct net_device
*dev
)
3485 struct stmmac_priv
*priv
= netdev_priv(dev
);
3487 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
3490 static void stmmac_free_irq(struct net_device
*dev
,
3491 enum request_irq_err irq_err
, int irq_idx
)
3493 struct stmmac_priv
*priv
= netdev_priv(dev
);
3497 case REQ_IRQ_ERR_ALL
:
3498 irq_idx
= priv
->plat
->tx_queues_to_use
;
3500 case REQ_IRQ_ERR_TX
:
3501 for (j
= irq_idx
- 1; j
>= 0; j
--) {
3502 if (priv
->tx_irq
[j
] > 0) {
3503 irq_set_affinity_hint(priv
->tx_irq
[j
], NULL
);
3504 free_irq(priv
->tx_irq
[j
], &priv
->dma_conf
.tx_queue
[j
]);
3507 irq_idx
= priv
->plat
->rx_queues_to_use
;
3509 case REQ_IRQ_ERR_RX
:
3510 for (j
= irq_idx
- 1; j
>= 0; j
--) {
3511 if (priv
->rx_irq
[j
] > 0) {
3512 irq_set_affinity_hint(priv
->rx_irq
[j
], NULL
);
3513 free_irq(priv
->rx_irq
[j
], &priv
->dma_conf
.rx_queue
[j
]);
3517 if (priv
->sfty_ue_irq
> 0 && priv
->sfty_ue_irq
!= dev
->irq
)
3518 free_irq(priv
->sfty_ue_irq
, dev
);
3520 case REQ_IRQ_ERR_SFTY_UE
:
3521 if (priv
->sfty_ce_irq
> 0 && priv
->sfty_ce_irq
!= dev
->irq
)
3522 free_irq(priv
->sfty_ce_irq
, dev
);
3524 case REQ_IRQ_ERR_SFTY_CE
:
3525 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
)
3526 free_irq(priv
->lpi_irq
, dev
);
3528 case REQ_IRQ_ERR_LPI
:
3529 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
)
3530 free_irq(priv
->wol_irq
, dev
);
3532 case REQ_IRQ_ERR_WOL
:
3533 free_irq(dev
->irq
, dev
);
3535 case REQ_IRQ_ERR_MAC
:
3536 case REQ_IRQ_ERR_NO
:
3537 /* If MAC IRQ request error, no more IRQ to free */
3542 static int stmmac_request_irq_multi_msi(struct net_device
*dev
)
3544 struct stmmac_priv
*priv
= netdev_priv(dev
);
3545 enum request_irq_err irq_err
;
3552 /* For common interrupt */
3553 int_name
= priv
->int_name_mac
;
3554 sprintf(int_name
, "%s:%s", dev
->name
, "mac");
3555 ret
= request_irq(dev
->irq
, stmmac_mac_interrupt
,
3557 if (unlikely(ret
< 0)) {
3558 netdev_err(priv
->dev
,
3559 "%s: alloc mac MSI %d (error: %d)\n",
3560 __func__
, dev
->irq
, ret
);
3561 irq_err
= REQ_IRQ_ERR_MAC
;
3565 /* Request the Wake IRQ in case of another line
3568 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
) {
3569 int_name
= priv
->int_name_wol
;
3570 sprintf(int_name
, "%s:%s", dev
->name
, "wol");
3571 ret
= request_irq(priv
->wol_irq
,
3572 stmmac_mac_interrupt
,
3574 if (unlikely(ret
< 0)) {
3575 netdev_err(priv
->dev
,
3576 "%s: alloc wol MSI %d (error: %d)\n",
3577 __func__
, priv
->wol_irq
, ret
);
3578 irq_err
= REQ_IRQ_ERR_WOL
;
3583 /* Request the LPI IRQ in case of another line
3586 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
) {
3587 int_name
= priv
->int_name_lpi
;
3588 sprintf(int_name
, "%s:%s", dev
->name
, "lpi");
3589 ret
= request_irq(priv
->lpi_irq
,
3590 stmmac_mac_interrupt
,
3592 if (unlikely(ret
< 0)) {
3593 netdev_err(priv
->dev
,
3594 "%s: alloc lpi MSI %d (error: %d)\n",
3595 __func__
, priv
->lpi_irq
, ret
);
3596 irq_err
= REQ_IRQ_ERR_LPI
;
3601 /* Request the Safety Feature Correctible Error line in
3602 * case of another line is used
3604 if (priv
->sfty_ce_irq
> 0 && priv
->sfty_ce_irq
!= dev
->irq
) {
3605 int_name
= priv
->int_name_sfty_ce
;
3606 sprintf(int_name
, "%s:%s", dev
->name
, "safety-ce");
3607 ret
= request_irq(priv
->sfty_ce_irq
,
3608 stmmac_safety_interrupt
,
3610 if (unlikely(ret
< 0)) {
3611 netdev_err(priv
->dev
,
3612 "%s: alloc sfty ce MSI %d (error: %d)\n",
3613 __func__
, priv
->sfty_ce_irq
, ret
);
3614 irq_err
= REQ_IRQ_ERR_SFTY_CE
;
3619 /* Request the Safety Feature Uncorrectible Error line in
3620 * case of another line is used
3622 if (priv
->sfty_ue_irq
> 0 && priv
->sfty_ue_irq
!= dev
->irq
) {
3623 int_name
= priv
->int_name_sfty_ue
;
3624 sprintf(int_name
, "%s:%s", dev
->name
, "safety-ue");
3625 ret
= request_irq(priv
->sfty_ue_irq
,
3626 stmmac_safety_interrupt
,
3628 if (unlikely(ret
< 0)) {
3629 netdev_err(priv
->dev
,
3630 "%s: alloc sfty ue MSI %d (error: %d)\n",
3631 __func__
, priv
->sfty_ue_irq
, ret
);
3632 irq_err
= REQ_IRQ_ERR_SFTY_UE
;
3637 /* Request Rx MSI irq */
3638 for (i
= 0; i
< priv
->plat
->rx_queues_to_use
; i
++) {
3639 if (i
>= MTL_MAX_RX_QUEUES
)
3641 if (priv
->rx_irq
[i
] == 0)
3644 int_name
= priv
->int_name_rx_irq
[i
];
3645 sprintf(int_name
, "%s:%s-%d", dev
->name
, "rx", i
);
3646 ret
= request_irq(priv
->rx_irq
[i
],
3648 0, int_name
, &priv
->dma_conf
.rx_queue
[i
]);
3649 if (unlikely(ret
< 0)) {
3650 netdev_err(priv
->dev
,
3651 "%s: alloc rx-%d MSI %d (error: %d)\n",
3652 __func__
, i
, priv
->rx_irq
[i
], ret
);
3653 irq_err
= REQ_IRQ_ERR_RX
;
3657 cpumask_clear(&cpu_mask
);
3658 cpumask_set_cpu(i
% num_online_cpus(), &cpu_mask
);
3659 irq_set_affinity_hint(priv
->rx_irq
[i
], &cpu_mask
);
3662 /* Request Tx MSI irq */
3663 for (i
= 0; i
< priv
->plat
->tx_queues_to_use
; i
++) {
3664 if (i
>= MTL_MAX_TX_QUEUES
)
3666 if (priv
->tx_irq
[i
] == 0)
3669 int_name
= priv
->int_name_tx_irq
[i
];
3670 sprintf(int_name
, "%s:%s-%d", dev
->name
, "tx", i
);
3671 ret
= request_irq(priv
->tx_irq
[i
],
3673 0, int_name
, &priv
->dma_conf
.tx_queue
[i
]);
3674 if (unlikely(ret
< 0)) {
3675 netdev_err(priv
->dev
,
3676 "%s: alloc tx-%d MSI %d (error: %d)\n",
3677 __func__
, i
, priv
->tx_irq
[i
], ret
);
3678 irq_err
= REQ_IRQ_ERR_TX
;
3682 cpumask_clear(&cpu_mask
);
3683 cpumask_set_cpu(i
% num_online_cpus(), &cpu_mask
);
3684 irq_set_affinity_hint(priv
->tx_irq
[i
], &cpu_mask
);
3690 stmmac_free_irq(dev
, irq_err
, irq_idx
);
3694 static int stmmac_request_irq_single(struct net_device
*dev
)
3696 struct stmmac_priv
*priv
= netdev_priv(dev
);
3697 enum request_irq_err irq_err
;
3700 ret
= request_irq(dev
->irq
, stmmac_interrupt
,
3701 IRQF_SHARED
, dev
->name
, dev
);
3702 if (unlikely(ret
< 0)) {
3703 netdev_err(priv
->dev
,
3704 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3705 __func__
, dev
->irq
, ret
);
3706 irq_err
= REQ_IRQ_ERR_MAC
;
3710 /* Request the Wake IRQ in case of another line
3713 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
) {
3714 ret
= request_irq(priv
->wol_irq
, stmmac_interrupt
,
3715 IRQF_SHARED
, dev
->name
, dev
);
3716 if (unlikely(ret
< 0)) {
3717 netdev_err(priv
->dev
,
3718 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3719 __func__
, priv
->wol_irq
, ret
);
3720 irq_err
= REQ_IRQ_ERR_WOL
;
3725 /* Request the IRQ lines */
3726 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
) {
3727 ret
= request_irq(priv
->lpi_irq
, stmmac_interrupt
,
3728 IRQF_SHARED
, dev
->name
, dev
);
3729 if (unlikely(ret
< 0)) {
3730 netdev_err(priv
->dev
,
3731 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3732 __func__
, priv
->lpi_irq
, ret
);
3733 irq_err
= REQ_IRQ_ERR_LPI
;
3741 stmmac_free_irq(dev
, irq_err
, 0);
3745 static int stmmac_request_irq(struct net_device
*dev
)
3747 struct stmmac_priv
*priv
= netdev_priv(dev
);
3750 /* Request the IRQ lines */
3751 if (priv
->plat
->flags
& STMMAC_FLAG_MULTI_MSI_EN
)
3752 ret
= stmmac_request_irq_multi_msi(dev
);
3754 ret
= stmmac_request_irq_single(dev
);
3760 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3761 * @priv: driver private structure
3762 * @mtu: MTU to setup the dma queue and buf with
3763 * Description: Allocate and generate a dma_conf based on the provided MTU.
3764 * Allocate the Tx/Rx DMA queue and init them.
3766 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3768 static struct stmmac_dma_conf
*
3769 stmmac_setup_dma_desc(struct stmmac_priv
*priv
, unsigned int mtu
)
3771 struct stmmac_dma_conf
*dma_conf
;
3772 int chan
, bfsize
, ret
;
3774 dma_conf
= kzalloc(sizeof(*dma_conf
), GFP_KERNEL
);
3776 netdev_err(priv
->dev
, "%s: DMA conf allocation failed\n",
3778 return ERR_PTR(-ENOMEM
);
3781 bfsize
= stmmac_set_16kib_bfsize(priv
, mtu
);
3785 if (bfsize
< BUF_SIZE_16KiB
)
3786 bfsize
= stmmac_set_bfsize(mtu
, 0);
3788 dma_conf
->dma_buf_sz
= bfsize
;
3789 /* Chose the tx/rx size from the already defined one in the
3790 * priv struct. (if defined)
3792 dma_conf
->dma_tx_size
= priv
->dma_conf
.dma_tx_size
;
3793 dma_conf
->dma_rx_size
= priv
->dma_conf
.dma_rx_size
;
3795 if (!dma_conf
->dma_tx_size
)
3796 dma_conf
->dma_tx_size
= DMA_DEFAULT_TX_SIZE
;
3797 if (!dma_conf
->dma_rx_size
)
3798 dma_conf
->dma_rx_size
= DMA_DEFAULT_RX_SIZE
;
3800 /* Earlier check for TBS */
3801 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++) {
3802 struct stmmac_tx_queue
*tx_q
= &dma_conf
->tx_queue
[chan
];
3803 int tbs_en
= priv
->plat
->tx_queues_cfg
[chan
].tbs_en
;
3805 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3806 tx_q
->tbs
|= tbs_en
? STMMAC_TBS_AVAIL
: 0;
3809 ret
= alloc_dma_desc_resources(priv
, dma_conf
);
3811 netdev_err(priv
->dev
, "%s: DMA descriptors allocation failed\n",
3816 ret
= init_dma_desc_rings(priv
->dev
, dma_conf
, GFP_KERNEL
);
3818 netdev_err(priv
->dev
, "%s: DMA descriptors initialization failed\n",
3826 free_dma_desc_resources(priv
, dma_conf
);
3829 return ERR_PTR(ret
);
3833 * __stmmac_open - open entry point of the driver
3834 * @dev : pointer to the device structure.
3835 * @dma_conf : structure to take the dma data
3837 * This function is the open entry point of the driver.
3839 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3842 static int __stmmac_open(struct net_device
*dev
,
3843 struct stmmac_dma_conf
*dma_conf
)
3845 struct stmmac_priv
*priv
= netdev_priv(dev
);
3846 int mode
= priv
->plat
->phy_interface
;
3850 ret
= pm_runtime_resume_and_get(priv
->device
);
3854 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
3855 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
&&
3857 xpcs_get_an_mode(priv
->hw
->xpcs
, mode
) != DW_AN_C73
) &&
3858 !priv
->hw
->lynx_pcs
) {
3859 ret
= stmmac_init_phy(dev
);
3861 netdev_err(priv
->dev
,
3862 "%s: Cannot attach to PHY (error: %d)\n",
3864 goto init_phy_error
;
3868 priv
->rx_copybreak
= STMMAC_RX_COPYBREAK
;
3870 buf_sz
= dma_conf
->dma_buf_sz
;
3871 memcpy(&priv
->dma_conf
, dma_conf
, sizeof(*dma_conf
));
3873 stmmac_reset_queues_param(priv
);
3875 if (!(priv
->plat
->flags
& STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP
) &&
3876 priv
->plat
->serdes_powerup
) {
3877 ret
= priv
->plat
->serdes_powerup(dev
, priv
->plat
->bsp_priv
);
3879 netdev_err(priv
->dev
, "%s: Serdes powerup failed\n",
3885 ret
= stmmac_hw_setup(dev
, true);
3887 netdev_err(priv
->dev
, "%s: Hw setup failed\n", __func__
);
3891 stmmac_init_coalesce(priv
);
3893 phylink_start(priv
->phylink
);
3894 /* We may have called phylink_speed_down before */
3895 phylink_speed_up(priv
->phylink
);
3897 ret
= stmmac_request_irq(dev
);
3901 stmmac_enable_all_queues(priv
);
3902 netif_tx_start_all_queues(priv
->dev
);
3903 stmmac_enable_all_dma_irq(priv
);
3908 phylink_stop(priv
->phylink
);
3910 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
3911 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
3913 stmmac_hw_teardown(dev
);
3915 phylink_disconnect_phy(priv
->phylink
);
3917 pm_runtime_put(priv
->device
);
3921 static int stmmac_open(struct net_device
*dev
)
3923 struct stmmac_priv
*priv
= netdev_priv(dev
);
3924 struct stmmac_dma_conf
*dma_conf
;
3927 dma_conf
= stmmac_setup_dma_desc(priv
, dev
->mtu
);
3928 if (IS_ERR(dma_conf
))
3929 return PTR_ERR(dma_conf
);
3931 ret
= __stmmac_open(dev
, dma_conf
);
3933 free_dma_desc_resources(priv
, dma_conf
);
3939 static void stmmac_fpe_stop_wq(struct stmmac_priv
*priv
)
3941 set_bit(__FPE_REMOVING
, &priv
->fpe_task_state
);
3944 destroy_workqueue(priv
->fpe_wq
);
3946 netdev_info(priv
->dev
, "FPE workqueue stop");
3950 * stmmac_release - close entry point of the driver
3951 * @dev : device pointer.
3953 * This is the stop entry point of the driver.
3955 static int stmmac_release(struct net_device
*dev
)
3957 struct stmmac_priv
*priv
= netdev_priv(dev
);
3960 if (device_may_wakeup(priv
->device
))
3961 phylink_speed_down(priv
->phylink
, false);
3962 /* Stop and disconnect the PHY */
3963 phylink_stop(priv
->phylink
);
3964 phylink_disconnect_phy(priv
->phylink
);
3966 stmmac_disable_all_queues(priv
);
3968 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
3969 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
3971 netif_tx_disable(dev
);
3973 /* Free the IRQ lines */
3974 stmmac_free_irq(dev
, REQ_IRQ_ERR_ALL
, 0);
3976 if (priv
->eee_enabled
) {
3977 priv
->tx_path_in_lpi_mode
= false;
3978 del_timer_sync(&priv
->eee_ctrl_timer
);
3981 /* Stop TX/RX DMA and clear the descriptors */
3982 stmmac_stop_all_dma(priv
);
3984 /* Release and free the Rx/Tx resources */
3985 free_dma_desc_resources(priv
, &priv
->dma_conf
);
3987 /* Disable the MAC Rx/Tx */
3988 stmmac_mac_set(priv
, priv
->ioaddr
, false);
3990 /* Powerdown Serdes if there is */
3991 if (priv
->plat
->serdes_powerdown
)
3992 priv
->plat
->serdes_powerdown(dev
, priv
->plat
->bsp_priv
);
3994 netif_carrier_off(dev
);
3996 stmmac_release_ptp(priv
);
3998 pm_runtime_put(priv
->device
);
4000 if (priv
->dma_cap
.fpesel
)
4001 stmmac_fpe_stop_wq(priv
);
4006 static bool stmmac_vlan_insert(struct stmmac_priv
*priv
, struct sk_buff
*skb
,
4007 struct stmmac_tx_queue
*tx_q
)
4009 u16 tag
= 0x0, inner_tag
= 0x0;
4010 u32 inner_type
= 0x0;
4013 if (!priv
->dma_cap
.vlins
)
4015 if (!skb_vlan_tag_present(skb
))
4017 if (skb
->vlan_proto
== htons(ETH_P_8021AD
)) {
4018 inner_tag
= skb_vlan_tag_get(skb
);
4019 inner_type
= STMMAC_VLAN_INSERT
;
4022 tag
= skb_vlan_tag_get(skb
);
4024 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4025 p
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4027 p
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4029 if (stmmac_set_desc_vlan_tag(priv
, p
, tag
, inner_tag
, inner_type
))
4032 stmmac_set_tx_owner(priv
, p
);
4033 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_conf
.dma_tx_size
);
4038 * stmmac_tso_allocator - close entry point of the driver
4039 * @priv: driver private structure
4040 * @des: buffer start address
4041 * @total_len: total length to fill in descriptors
4042 * @last_segment: condition for the last descriptor
4043 * @queue: TX queue index
4045 * This function fills descriptor and request new descriptors according to
4046 * buffer length to fill
4048 static void stmmac_tso_allocator(struct stmmac_priv
*priv
, dma_addr_t des
,
4049 int total_len
, bool last_segment
, u32 queue
)
4051 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4052 struct dma_desc
*desc
;
4056 tmp_len
= total_len
;
4058 while (tmp_len
> 0) {
4059 dma_addr_t curr_addr
;
4061 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
4062 priv
->dma_conf
.dma_tx_size
);
4063 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
4065 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4066 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4068 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4070 curr_addr
= des
+ (total_len
- tmp_len
);
4071 if (priv
->dma_cap
.addr64
<= 32)
4072 desc
->des0
= cpu_to_le32(curr_addr
);
4074 stmmac_set_desc_addr(priv
, desc
, curr_addr
);
4076 buff_size
= tmp_len
>= TSO_MAX_BUFF_SIZE
?
4077 TSO_MAX_BUFF_SIZE
: tmp_len
;
4079 stmmac_prepare_tso_tx_desc(priv
, desc
, 0, buff_size
,
4081 (last_segment
) && (tmp_len
<= TSO_MAX_BUFF_SIZE
),
4084 tmp_len
-= TSO_MAX_BUFF_SIZE
;
4088 static void stmmac_flush_tx_descriptors(struct stmmac_priv
*priv
, int queue
)
4090 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4093 if (likely(priv
->extend_desc
))
4094 desc_size
= sizeof(struct dma_extended_desc
);
4095 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4096 desc_size
= sizeof(struct dma_edesc
);
4098 desc_size
= sizeof(struct dma_desc
);
4100 /* The own bit must be the latest setting done when prepare the
4101 * descriptor and then barrier is needed to make sure that
4102 * all is coherent before granting the DMA engine.
4106 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
+ (tx_q
->cur_tx
* desc_size
);
4107 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
, tx_q
->tx_tail_addr
, queue
);
4111 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4112 * @skb : the socket buffer
4113 * @dev : device pointer
4114 * Description: this is the transmit function that is called on TSO frames
4115 * (support available on GMAC4 and newer chips).
4116 * Diagram below show the ring programming in case of TSO frames:
4120 * | DES0 |---> buffer1 = L2/L3/L4 header
4121 * | DES1 |---> TCP Payload (can continue on next descr...)
4122 * | DES2 |---> buffer 1 and 2 len
4123 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4129 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4131 * | DES2 | --> buffer 1 and 2 len
4135 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4137 static netdev_tx_t
stmmac_tso_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4139 struct dma_desc
*desc
, *first
, *mss_desc
= NULL
;
4140 struct stmmac_priv
*priv
= netdev_priv(dev
);
4141 int nfrags
= skb_shinfo(skb
)->nr_frags
;
4142 u32 queue
= skb_get_queue_mapping(skb
);
4143 unsigned int first_entry
, tx_packets
;
4144 struct stmmac_txq_stats
*txq_stats
;
4145 int tmp_pay_len
= 0, first_tx
;
4146 struct stmmac_tx_queue
*tx_q
;
4147 bool has_vlan
, set_ic
;
4148 u8 proto_hdr_len
, hdr
;
4149 unsigned long flags
;
4154 tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4155 txq_stats
= &priv
->xstats
.txq_stats
[queue
];
4156 first_tx
= tx_q
->cur_tx
;
4158 /* Compute header lengths */
4159 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) {
4160 proto_hdr_len
= skb_transport_offset(skb
) + sizeof(struct udphdr
);
4161 hdr
= sizeof(struct udphdr
);
4163 proto_hdr_len
= skb_tcp_all_headers(skb
);
4164 hdr
= tcp_hdrlen(skb
);
4167 /* Desc availability based on threshold should be enough safe */
4168 if (unlikely(stmmac_tx_avail(priv
, queue
) <
4169 (((skb
->len
- proto_hdr_len
) / TSO_MAX_BUFF_SIZE
+ 1)))) {
4170 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
4171 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
4173 /* This is a hard error, log it. */
4174 netdev_err(priv
->dev
,
4175 "%s: Tx Ring full when queue awake\n",
4178 return NETDEV_TX_BUSY
;
4181 pay_len
= skb_headlen(skb
) - proto_hdr_len
; /* no frags */
4183 mss
= skb_shinfo(skb
)->gso_size
;
4185 /* set new MSS value if needed */
4186 if (mss
!= tx_q
->mss
) {
4187 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4188 mss_desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4190 mss_desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4192 stmmac_set_mss(priv
, mss_desc
, mss
);
4194 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
4195 priv
->dma_conf
.dma_tx_size
);
4196 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
4199 if (netif_msg_tx_queued(priv
)) {
4200 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4201 __func__
, hdr
, proto_hdr_len
, pay_len
, mss
);
4202 pr_info("\tskb->len %d, skb->data_len %d\n", skb
->len
,
4206 /* Check if VLAN can be inserted by HW */
4207 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
4209 first_entry
= tx_q
->cur_tx
;
4210 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
4212 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4213 desc
= &tx_q
->dma_entx
[first_entry
].basic
;
4215 desc
= &tx_q
->dma_tx
[first_entry
];
4219 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
4221 /* first descriptor: fill Headers on Buf1 */
4222 des
= dma_map_single(priv
->device
, skb
->data
, skb_headlen(skb
),
4224 if (dma_mapping_error(priv
->device
, des
))
4227 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
4228 tx_q
->tx_skbuff_dma
[first_entry
].len
= skb_headlen(skb
);
4229 tx_q
->tx_skbuff_dma
[first_entry
].map_as_page
= false;
4230 tx_q
->tx_skbuff_dma
[first_entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4232 if (priv
->dma_cap
.addr64
<= 32) {
4233 first
->des0
= cpu_to_le32(des
);
4235 /* Fill start of payload in buff2 of first descriptor */
4237 first
->des1
= cpu_to_le32(des
+ proto_hdr_len
);
4239 /* If needed take extra descriptors to fill the remaining payload */
4240 tmp_pay_len
= pay_len
- TSO_MAX_BUFF_SIZE
;
4242 stmmac_set_desc_addr(priv
, first
, des
);
4243 tmp_pay_len
= pay_len
;
4244 des
+= proto_hdr_len
;
4248 stmmac_tso_allocator(priv
, des
, tmp_pay_len
, (nfrags
== 0), queue
);
4250 /* Prepare fragments */
4251 for (i
= 0; i
< nfrags
; i
++) {
4252 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4254 des
= skb_frag_dma_map(priv
->device
, frag
, 0,
4255 skb_frag_size(frag
),
4257 if (dma_mapping_error(priv
->device
, des
))
4260 stmmac_tso_allocator(priv
, des
, skb_frag_size(frag
),
4261 (i
== nfrags
- 1), queue
);
4263 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf
= des
;
4264 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].len
= skb_frag_size(frag
);
4265 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].map_as_page
= true;
4266 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf_type
= STMMAC_TXBUF_T_SKB
;
4269 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].last_segment
= true;
4271 /* Only the last descriptor gets to point to the skb. */
4272 tx_q
->tx_skbuff
[tx_q
->cur_tx
] = skb
;
4273 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf_type
= STMMAC_TXBUF_T_SKB
;
4275 /* Manage tx mitigation */
4276 tx_packets
= (tx_q
->cur_tx
+ 1) - first_tx
;
4277 tx_q
->tx_count_frames
+= tx_packets
;
4279 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
4281 else if (!priv
->tx_coal_frames
[queue
])
4283 else if (tx_packets
> priv
->tx_coal_frames
[queue
])
4285 else if ((tx_q
->tx_count_frames
%
4286 priv
->tx_coal_frames
[queue
]) < tx_packets
)
4292 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4293 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4295 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4297 tx_q
->tx_count_frames
= 0;
4298 stmmac_set_tx_ic(priv
, desc
);
4301 /* We've used all descriptors we need for this skb, however,
4302 * advance cur_tx so that it references a fresh descriptor.
4303 * ndo_start_xmit will fill this descriptor the next time it's
4304 * called and stmmac_tx_clean may clean up to this descriptor.
4306 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_conf
.dma_tx_size
);
4308 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
4309 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
4311 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
4314 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
4315 txq_stats
->tx_bytes
+= skb
->len
;
4316 txq_stats
->tx_tso_frames
++;
4317 txq_stats
->tx_tso_nfrags
+= nfrags
;
4319 txq_stats
->tx_set_ic_bit
++;
4320 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
4322 if (priv
->sarc_type
)
4323 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
4325 skb_tx_timestamp(skb
);
4327 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
4328 priv
->hwts_tx_en
)) {
4329 /* declare that device is doing timestamping */
4330 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4331 stmmac_enable_tx_timestamp(priv
, first
);
4334 /* Complete the first descriptor before granting the DMA */
4335 stmmac_prepare_tso_tx_desc(priv
, first
, 1,
4338 1, tx_q
->tx_skbuff_dma
[first_entry
].last_segment
,
4339 hdr
/ 4, (skb
->len
- proto_hdr_len
));
4341 /* If context desc is used to change MSS */
4343 /* Make sure that first descriptor has been completely
4344 * written, including its own bit. This is because MSS is
4345 * actually before first descriptor, so we need to make
4346 * sure that MSS's own bit is the last thing written.
4349 stmmac_set_tx_owner(priv
, mss_desc
);
4352 if (netif_msg_pktdata(priv
)) {
4353 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4354 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
4355 tx_q
->cur_tx
, first
, nfrags
);
4356 pr_info(">>> frame to be transmitted: ");
4357 print_pkt(skb
->data
, skb_headlen(skb
));
4360 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
4362 stmmac_flush_tx_descriptors(priv
, queue
);
4363 stmmac_tx_timer_arm(priv
, queue
);
4365 return NETDEV_TX_OK
;
4368 dev_err(priv
->device
, "Tx dma map failed\n");
4370 priv
->xstats
.tx_dropped
++;
4371 return NETDEV_TX_OK
;
4375 * stmmac_xmit - Tx entry point of the driver
4376 * @skb : the socket buffer
4377 * @dev : device pointer
4378 * Description : this is the tx entry point of the driver.
4379 * It programs the chain or the ring and supports oversized frames
4382 static netdev_tx_t
stmmac_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4384 unsigned int first_entry
, tx_packets
, enh_desc
;
4385 struct stmmac_priv
*priv
= netdev_priv(dev
);
4386 unsigned int nopaged_len
= skb_headlen(skb
);
4387 int i
, csum_insertion
= 0, is_jumbo
= 0;
4388 u32 queue
= skb_get_queue_mapping(skb
);
4389 int nfrags
= skb_shinfo(skb
)->nr_frags
;
4390 int gso
= skb_shinfo(skb
)->gso_type
;
4391 struct stmmac_txq_stats
*txq_stats
;
4392 struct dma_edesc
*tbs_desc
= NULL
;
4393 struct dma_desc
*desc
, *first
;
4394 struct stmmac_tx_queue
*tx_q
;
4395 bool has_vlan
, set_ic
;
4396 int entry
, first_tx
;
4397 unsigned long flags
;
4400 tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4401 txq_stats
= &priv
->xstats
.txq_stats
[queue
];
4402 first_tx
= tx_q
->cur_tx
;
4404 if (priv
->tx_path_in_lpi_mode
&& priv
->eee_sw_timer_en
)
4405 stmmac_disable_eee_mode(priv
);
4407 /* Manage oversized TCP frames for GMAC4 device */
4408 if (skb_is_gso(skb
) && priv
->tso
) {
4409 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))
4410 return stmmac_tso_xmit(skb
, dev
);
4411 if (priv
->plat
->has_gmac4
&& (gso
& SKB_GSO_UDP_L4
))
4412 return stmmac_tso_xmit(skb
, dev
);
4415 if (unlikely(stmmac_tx_avail(priv
, queue
) < nfrags
+ 1)) {
4416 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
4417 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
4419 /* This is a hard error, log it. */
4420 netdev_err(priv
->dev
,
4421 "%s: Tx Ring full when queue awake\n",
4424 return NETDEV_TX_BUSY
;
4427 /* Check if VLAN can be inserted by HW */
4428 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
4430 entry
= tx_q
->cur_tx
;
4431 first_entry
= entry
;
4432 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
4434 csum_insertion
= (skb
->ip_summed
== CHECKSUM_PARTIAL
);
4435 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4436 * queues. In that case, checksum offloading for those queues that don't
4437 * support tx coe needs to fallback to software checksum calculation.
4439 if (csum_insertion
&&
4440 priv
->plat
->tx_queues_cfg
[queue
].coe_unsupported
) {
4441 if (unlikely(skb_checksum_help(skb
)))
4443 csum_insertion
= !csum_insertion
;
4446 if (likely(priv
->extend_desc
))
4447 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4448 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4449 desc
= &tx_q
->dma_entx
[entry
].basic
;
4451 desc
= tx_q
->dma_tx
+ entry
;
4456 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
4458 enh_desc
= priv
->plat
->enh_desc
;
4459 /* To program the descriptors according to the size of the frame */
4461 is_jumbo
= stmmac_is_jumbo_frm(priv
, skb
->len
, enh_desc
);
4463 if (unlikely(is_jumbo
)) {
4464 entry
= stmmac_jumbo_frm(priv
, tx_q
, skb
, csum_insertion
);
4465 if (unlikely(entry
< 0) && (entry
!= -EINVAL
))
4469 for (i
= 0; i
< nfrags
; i
++) {
4470 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4471 int len
= skb_frag_size(frag
);
4472 bool last_segment
= (i
== (nfrags
- 1));
4474 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
4475 WARN_ON(tx_q
->tx_skbuff
[entry
]);
4477 if (likely(priv
->extend_desc
))
4478 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4479 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4480 desc
= &tx_q
->dma_entx
[entry
].basic
;
4482 desc
= tx_q
->dma_tx
+ entry
;
4484 des
= skb_frag_dma_map(priv
->device
, frag
, 0, len
,
4486 if (dma_mapping_error(priv
->device
, des
))
4487 goto dma_map_err
; /* should reuse desc w/o issues */
4489 tx_q
->tx_skbuff_dma
[entry
].buf
= des
;
4491 stmmac_set_desc_addr(priv
, desc
, des
);
4493 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= true;
4494 tx_q
->tx_skbuff_dma
[entry
].len
= len
;
4495 tx_q
->tx_skbuff_dma
[entry
].last_segment
= last_segment
;
4496 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4498 /* Prepare the descriptor and set the own bit too */
4499 stmmac_prepare_tx_desc(priv
, desc
, 0, len
, csum_insertion
,
4500 priv
->mode
, 1, last_segment
, skb
->len
);
4503 /* Only the last descriptor gets to point to the skb. */
4504 tx_q
->tx_skbuff
[entry
] = skb
;
4505 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4507 /* According to the coalesce parameter the IC bit for the latest
4508 * segment is reset and the timer re-started to clean the tx status.
4509 * This approach takes care about the fragments: desc is the first
4510 * element in case of no SG.
4512 tx_packets
= (entry
+ 1) - first_tx
;
4513 tx_q
->tx_count_frames
+= tx_packets
;
4515 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
4517 else if (!priv
->tx_coal_frames
[queue
])
4519 else if (tx_packets
> priv
->tx_coal_frames
[queue
])
4521 else if ((tx_q
->tx_count_frames
%
4522 priv
->tx_coal_frames
[queue
]) < tx_packets
)
4528 if (likely(priv
->extend_desc
))
4529 desc
= &tx_q
->dma_etx
[entry
].basic
;
4530 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4531 desc
= &tx_q
->dma_entx
[entry
].basic
;
4533 desc
= &tx_q
->dma_tx
[entry
];
4535 tx_q
->tx_count_frames
= 0;
4536 stmmac_set_tx_ic(priv
, desc
);
4539 /* We've used all descriptors we need for this skb, however,
4540 * advance cur_tx so that it references a fresh descriptor.
4541 * ndo_start_xmit will fill this descriptor the next time it's
4542 * called and stmmac_tx_clean may clean up to this descriptor.
4544 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
4545 tx_q
->cur_tx
= entry
;
4547 if (netif_msg_pktdata(priv
)) {
4548 netdev_dbg(priv
->dev
,
4549 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4550 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
4551 entry
, first
, nfrags
);
4553 netdev_dbg(priv
->dev
, ">>> frame to be transmitted: ");
4554 print_pkt(skb
->data
, skb
->len
);
4557 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
4558 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
4560 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
4563 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
4564 txq_stats
->tx_bytes
+= skb
->len
;
4566 txq_stats
->tx_set_ic_bit
++;
4567 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
4569 if (priv
->sarc_type
)
4570 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
4572 skb_tx_timestamp(skb
);
4574 /* Ready to fill the first descriptor and set the OWN bit w/o any
4575 * problems because all the descriptors are actually ready to be
4576 * passed to the DMA engine.
4578 if (likely(!is_jumbo
)) {
4579 bool last_segment
= (nfrags
== 0);
4581 des
= dma_map_single(priv
->device
, skb
->data
,
4582 nopaged_len
, DMA_TO_DEVICE
);
4583 if (dma_mapping_error(priv
->device
, des
))
4586 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
4587 tx_q
->tx_skbuff_dma
[first_entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4588 tx_q
->tx_skbuff_dma
[first_entry
].map_as_page
= false;
4590 stmmac_set_desc_addr(priv
, first
, des
);
4592 tx_q
->tx_skbuff_dma
[first_entry
].len
= nopaged_len
;
4593 tx_q
->tx_skbuff_dma
[first_entry
].last_segment
= last_segment
;
4595 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
4596 priv
->hwts_tx_en
)) {
4597 /* declare that device is doing timestamping */
4598 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4599 stmmac_enable_tx_timestamp(priv
, first
);
4602 /* Prepare the first descriptor setting the OWN bit too */
4603 stmmac_prepare_tx_desc(priv
, first
, 1, nopaged_len
,
4604 csum_insertion
, priv
->mode
, 0, last_segment
,
4608 if (tx_q
->tbs
& STMMAC_TBS_EN
) {
4609 struct timespec64 ts
= ns_to_timespec64(skb
->tstamp
);
4611 tbs_desc
= &tx_q
->dma_entx
[first_entry
];
4612 stmmac_set_desc_tbs(priv
, tbs_desc
, ts
.tv_sec
, ts
.tv_nsec
);
4615 stmmac_set_tx_owner(priv
, first
);
4617 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
4619 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
4621 stmmac_flush_tx_descriptors(priv
, queue
);
4622 stmmac_tx_timer_arm(priv
, queue
);
4624 return NETDEV_TX_OK
;
4627 netdev_err(priv
->dev
, "Tx DMA map failed\n");
4629 priv
->xstats
.tx_dropped
++;
4630 return NETDEV_TX_OK
;
4633 static void stmmac_rx_vlan(struct net_device
*dev
, struct sk_buff
*skb
)
4635 struct vlan_ethhdr
*veth
= skb_vlan_eth_hdr(skb
);
4636 __be16 vlan_proto
= veth
->h_vlan_proto
;
4639 if ((vlan_proto
== htons(ETH_P_8021Q
) &&
4640 dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) ||
4641 (vlan_proto
== htons(ETH_P_8021AD
) &&
4642 dev
->features
& NETIF_F_HW_VLAN_STAG_RX
)) {
4643 /* pop the vlan tag */
4644 vlanid
= ntohs(veth
->h_vlan_TCI
);
4645 memmove(skb
->data
+ VLAN_HLEN
, veth
, ETH_ALEN
* 2);
4646 skb_pull(skb
, VLAN_HLEN
);
4647 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlanid
);
4652 * stmmac_rx_refill - refill used skb preallocated buffers
4653 * @priv: driver private structure
4654 * @queue: RX queue index
4655 * Description : this is to reallocate the skb for the reception process
4656 * that is based on zero-copy.
4658 static inline void stmmac_rx_refill(struct stmmac_priv
*priv
, u32 queue
)
4660 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
4661 int dirty
= stmmac_rx_dirty(priv
, queue
);
4662 unsigned int entry
= rx_q
->dirty_rx
;
4663 gfp_t gfp
= (GFP_ATOMIC
| __GFP_NOWARN
);
4665 if (priv
->dma_cap
.host_dma_width
<= 32)
4668 while (dirty
-- > 0) {
4669 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[entry
];
4673 if (priv
->extend_desc
)
4674 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
4676 p
= rx_q
->dma_rx
+ entry
;
4679 buf
->page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
4684 if (priv
->sph
&& !buf
->sec_page
) {
4685 buf
->sec_page
= page_pool_alloc_pages(rx_q
->page_pool
, gfp
);
4689 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
4692 buf
->addr
= page_pool_get_dma_addr(buf
->page
) + buf
->page_offset
;
4694 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
4696 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, true);
4698 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, false);
4699 stmmac_refill_desc3(priv
, rx_q
, p
);
4701 rx_q
->rx_count_frames
++;
4702 rx_q
->rx_count_frames
+= priv
->rx_coal_frames
[queue
];
4703 if (rx_q
->rx_count_frames
> priv
->rx_coal_frames
[queue
])
4704 rx_q
->rx_count_frames
= 0;
4706 use_rx_wd
= !priv
->rx_coal_frames
[queue
];
4707 use_rx_wd
|= rx_q
->rx_count_frames
> 0;
4708 if (!priv
->use_riwt
)
4712 stmmac_set_rx_owner(priv
, p
, use_rx_wd
);
4714 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_rx_size
);
4716 rx_q
->dirty_rx
= entry
;
4717 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
4718 (rx_q
->dirty_rx
* sizeof(struct dma_desc
));
4719 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
, rx_q
->rx_tail_addr
, queue
);
4722 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv
*priv
,
4724 int status
, unsigned int len
)
4726 unsigned int plen
= 0, hlen
= 0;
4727 int coe
= priv
->hw
->rx_csum
;
4729 /* Not first descriptor, buffer is always zero */
4730 if (priv
->sph
&& len
)
4733 /* First descriptor, get split header length */
4734 stmmac_get_rx_header_len(priv
, p
, &hlen
);
4735 if (priv
->sph
&& hlen
) {
4736 priv
->xstats
.rx_split_hdr_pkt_n
++;
4740 /* First descriptor, not last descriptor and not split header */
4741 if (status
& rx_not_ls
)
4742 return priv
->dma_conf
.dma_buf_sz
;
4744 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
4746 /* First descriptor and last descriptor and not split header */
4747 return min_t(unsigned int, priv
->dma_conf
.dma_buf_sz
, plen
);
4750 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv
*priv
,
4752 int status
, unsigned int len
)
4754 int coe
= priv
->hw
->rx_csum
;
4755 unsigned int plen
= 0;
4757 /* Not split header, buffer is not available */
4761 /* Not last descriptor */
4762 if (status
& rx_not_ls
)
4763 return priv
->dma_conf
.dma_buf_sz
;
4765 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
4767 /* Last descriptor */
4771 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv
*priv
, int queue
,
4772 struct xdp_frame
*xdpf
, bool dma_map
)
4774 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[queue
];
4775 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
4776 unsigned int entry
= tx_q
->cur_tx
;
4777 struct dma_desc
*tx_desc
;
4778 dma_addr_t dma_addr
;
4781 if (stmmac_tx_avail(priv
, queue
) < STMMAC_TX_THRESH(priv
))
4782 return STMMAC_XDP_CONSUMED
;
4784 if (likely(priv
->extend_desc
))
4785 tx_desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4786 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4787 tx_desc
= &tx_q
->dma_entx
[entry
].basic
;
4789 tx_desc
= tx_q
->dma_tx
+ entry
;
4792 dma_addr
= dma_map_single(priv
->device
, xdpf
->data
,
4793 xdpf
->len
, DMA_TO_DEVICE
);
4794 if (dma_mapping_error(priv
->device
, dma_addr
))
4795 return STMMAC_XDP_CONSUMED
;
4797 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XDP_NDO
;
4799 struct page
*page
= virt_to_page(xdpf
->data
);
4801 dma_addr
= page_pool_get_dma_addr(page
) + sizeof(*xdpf
) +
4803 dma_sync_single_for_device(priv
->device
, dma_addr
,
4804 xdpf
->len
, DMA_BIDIRECTIONAL
);
4806 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XDP_TX
;
4809 tx_q
->tx_skbuff_dma
[entry
].buf
= dma_addr
;
4810 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
4811 tx_q
->tx_skbuff_dma
[entry
].len
= xdpf
->len
;
4812 tx_q
->tx_skbuff_dma
[entry
].last_segment
= true;
4813 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
4815 tx_q
->xdpf
[entry
] = xdpf
;
4817 stmmac_set_desc_addr(priv
, tx_desc
, dma_addr
);
4819 stmmac_prepare_tx_desc(priv
, tx_desc
, 1, xdpf
->len
,
4820 true, priv
->mode
, true, true,
4823 tx_q
->tx_count_frames
++;
4825 if (tx_q
->tx_count_frames
% priv
->tx_coal_frames
[queue
] == 0)
4831 unsigned long flags
;
4832 tx_q
->tx_count_frames
= 0;
4833 stmmac_set_tx_ic(priv
, tx_desc
);
4834 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
4835 txq_stats
->tx_set_ic_bit
++;
4836 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
4839 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
4841 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_tx_size
);
4842 tx_q
->cur_tx
= entry
;
4844 return STMMAC_XDP_TX
;
4847 static int stmmac_xdp_get_tx_queue(struct stmmac_priv
*priv
,
4852 if (unlikely(index
< 0))
4855 while (index
>= priv
->plat
->tx_queues_to_use
)
4856 index
-= priv
->plat
->tx_queues_to_use
;
4861 static int stmmac_xdp_xmit_back(struct stmmac_priv
*priv
,
4862 struct xdp_buff
*xdp
)
4864 struct xdp_frame
*xdpf
= xdp_convert_buff_to_frame(xdp
);
4865 int cpu
= smp_processor_id();
4866 struct netdev_queue
*nq
;
4870 if (unlikely(!xdpf
))
4871 return STMMAC_XDP_CONSUMED
;
4873 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
4874 nq
= netdev_get_tx_queue(priv
->dev
, queue
);
4876 __netif_tx_lock(nq
, cpu
);
4877 /* Avoids TX time-out as we are sharing with slow path */
4878 txq_trans_cond_update(nq
);
4880 res
= stmmac_xdp_xmit_xdpf(priv
, queue
, xdpf
, false);
4881 if (res
== STMMAC_XDP_TX
)
4882 stmmac_flush_tx_descriptors(priv
, queue
);
4884 __netif_tx_unlock(nq
);
4889 static int __stmmac_xdp_run_prog(struct stmmac_priv
*priv
,
4890 struct bpf_prog
*prog
,
4891 struct xdp_buff
*xdp
)
4896 act
= bpf_prog_run_xdp(prog
, xdp
);
4899 res
= STMMAC_XDP_PASS
;
4902 res
= stmmac_xdp_xmit_back(priv
, xdp
);
4905 if (xdp_do_redirect(priv
->dev
, xdp
, prog
) < 0)
4906 res
= STMMAC_XDP_CONSUMED
;
4908 res
= STMMAC_XDP_REDIRECT
;
4911 bpf_warn_invalid_xdp_action(priv
->dev
, prog
, act
);
4914 trace_xdp_exception(priv
->dev
, prog
, act
);
4917 res
= STMMAC_XDP_CONSUMED
;
4924 static struct sk_buff
*stmmac_xdp_run_prog(struct stmmac_priv
*priv
,
4925 struct xdp_buff
*xdp
)
4927 struct bpf_prog
*prog
;
4930 prog
= READ_ONCE(priv
->xdp_prog
);
4932 res
= STMMAC_XDP_PASS
;
4936 res
= __stmmac_xdp_run_prog(priv
, prog
, xdp
);
4938 return ERR_PTR(-res
);
4941 static void stmmac_finalize_xdp_rx(struct stmmac_priv
*priv
,
4944 int cpu
= smp_processor_id();
4947 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
4949 if (xdp_status
& STMMAC_XDP_TX
)
4950 stmmac_tx_timer_arm(priv
, queue
);
4952 if (xdp_status
& STMMAC_XDP_REDIRECT
)
4956 static struct sk_buff
*stmmac_construct_skb_zc(struct stmmac_channel
*ch
,
4957 struct xdp_buff
*xdp
)
4959 unsigned int metasize
= xdp
->data
- xdp
->data_meta
;
4960 unsigned int datasize
= xdp
->data_end
- xdp
->data
;
4961 struct sk_buff
*skb
;
4963 skb
= __napi_alloc_skb(&ch
->rxtx_napi
,
4964 xdp
->data_end
- xdp
->data_hard_start
,
4965 GFP_ATOMIC
| __GFP_NOWARN
);
4969 skb_reserve(skb
, xdp
->data
- xdp
->data_hard_start
);
4970 memcpy(__skb_put(skb
, datasize
), xdp
->data
, datasize
);
4972 skb_metadata_set(skb
, metasize
);
4977 static void stmmac_dispatch_skb_zc(struct stmmac_priv
*priv
, u32 queue
,
4978 struct dma_desc
*p
, struct dma_desc
*np
,
4979 struct xdp_buff
*xdp
)
4981 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[queue
];
4982 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
4983 unsigned int len
= xdp
->data_end
- xdp
->data
;
4984 enum pkt_hash_types hash_type
;
4985 int coe
= priv
->hw
->rx_csum
;
4986 unsigned long flags
;
4987 struct sk_buff
*skb
;
4990 skb
= stmmac_construct_skb_zc(ch
, xdp
);
4992 priv
->xstats
.rx_dropped
++;
4996 stmmac_get_rx_hwtstamp(priv
, p
, np
, skb
);
4997 stmmac_rx_vlan(priv
->dev
, skb
);
4998 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
5001 skb_checksum_none_assert(skb
);
5003 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5005 if (!stmmac_get_rx_hash(priv
, p
, &hash
, &hash_type
))
5006 skb_set_hash(skb
, hash
, hash_type
);
5008 skb_record_rx_queue(skb
, queue
);
5009 napi_gro_receive(&ch
->rxtx_napi
, skb
);
5011 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5012 rxq_stats
->rx_pkt_n
++;
5013 rxq_stats
->rx_bytes
+= len
;
5014 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5017 static bool stmmac_rx_refill_zc(struct stmmac_priv
*priv
, u32 queue
, u32 budget
)
5019 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
5020 unsigned int entry
= rx_q
->dirty_rx
;
5021 struct dma_desc
*rx_desc
= NULL
;
5024 budget
= min(budget
, stmmac_rx_dirty(priv
, queue
));
5026 while (budget
-- > 0 && entry
!= rx_q
->cur_rx
) {
5027 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[entry
];
5028 dma_addr_t dma_addr
;
5032 buf
->xdp
= xsk_buff_alloc(rx_q
->xsk_pool
);
5039 if (priv
->extend_desc
)
5040 rx_desc
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5042 rx_desc
= rx_q
->dma_rx
+ entry
;
5044 dma_addr
= xsk_buff_xdp_get_dma(buf
->xdp
);
5045 stmmac_set_desc_addr(priv
, rx_desc
, dma_addr
);
5046 stmmac_set_desc_sec_addr(priv
, rx_desc
, 0, false);
5047 stmmac_refill_desc3(priv
, rx_q
, rx_desc
);
5049 rx_q
->rx_count_frames
++;
5050 rx_q
->rx_count_frames
+= priv
->rx_coal_frames
[queue
];
5051 if (rx_q
->rx_count_frames
> priv
->rx_coal_frames
[queue
])
5052 rx_q
->rx_count_frames
= 0;
5054 use_rx_wd
= !priv
->rx_coal_frames
[queue
];
5055 use_rx_wd
|= rx_q
->rx_count_frames
> 0;
5056 if (!priv
->use_riwt
)
5060 stmmac_set_rx_owner(priv
, rx_desc
, use_rx_wd
);
5062 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_conf
.dma_rx_size
);
5066 rx_q
->dirty_rx
= entry
;
5067 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
5068 (rx_q
->dirty_rx
* sizeof(struct dma_desc
));
5069 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
, rx_q
->rx_tail_addr
, queue
);
5075 static struct stmmac_xdp_buff
*xsk_buff_to_stmmac_ctx(struct xdp_buff
*xdp
)
5077 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5078 * to represent incoming packet, whereas cb field in the same structure
5079 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5080 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5082 return (struct stmmac_xdp_buff
*)xdp
;
5085 static int stmmac_rx_zc(struct stmmac_priv
*priv
, int limit
, u32 queue
)
5087 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[queue
];
5088 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
5089 unsigned int count
= 0, error
= 0, len
= 0;
5090 int dirty
= stmmac_rx_dirty(priv
, queue
);
5091 unsigned int next_entry
= rx_q
->cur_rx
;
5092 u32 rx_errors
= 0, rx_dropped
= 0;
5093 unsigned int desc_size
;
5094 struct bpf_prog
*prog
;
5095 bool failure
= false;
5096 unsigned long flags
;
5100 if (netif_msg_rx_status(priv
)) {
5103 netdev_dbg(priv
->dev
, "%s: descriptor ring:\n", __func__
);
5104 if (priv
->extend_desc
) {
5105 rx_head
= (void *)rx_q
->dma_erx
;
5106 desc_size
= sizeof(struct dma_extended_desc
);
5108 rx_head
= (void *)rx_q
->dma_rx
;
5109 desc_size
= sizeof(struct dma_desc
);
5112 stmmac_display_ring(priv
, rx_head
, priv
->dma_conf
.dma_rx_size
, true,
5113 rx_q
->dma_rx_phy
, desc_size
);
5115 while (count
< limit
) {
5116 struct stmmac_rx_buffer
*buf
;
5117 struct stmmac_xdp_buff
*ctx
;
5118 unsigned int buf1_len
= 0;
5119 struct dma_desc
*np
, *p
;
5123 if (!count
&& rx_q
->state_saved
) {
5124 error
= rx_q
->state
.error
;
5125 len
= rx_q
->state
.len
;
5127 rx_q
->state_saved
= false;
5138 buf
= &rx_q
->buf_pool
[entry
];
5140 if (dirty
>= STMMAC_RX_FILL_BATCH
) {
5141 failure
= failure
||
5142 !stmmac_rx_refill_zc(priv
, queue
, dirty
);
5146 if (priv
->extend_desc
)
5147 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5149 p
= rx_q
->dma_rx
+ entry
;
5151 /* read the status of the incoming frame */
5152 status
= stmmac_rx_status(priv
, &priv
->xstats
, p
);
5153 /* check if managed by the DMA otherwise go ahead */
5154 if (unlikely(status
& dma_own
))
5157 /* Prefetch the next RX descriptor */
5158 rx_q
->cur_rx
= STMMAC_GET_ENTRY(rx_q
->cur_rx
,
5159 priv
->dma_conf
.dma_rx_size
);
5160 next_entry
= rx_q
->cur_rx
;
5162 if (priv
->extend_desc
)
5163 np
= (struct dma_desc
*)(rx_q
->dma_erx
+ next_entry
);
5165 np
= rx_q
->dma_rx
+ next_entry
;
5169 /* Ensure a valid XSK buffer before proceed */
5173 if (priv
->extend_desc
)
5174 stmmac_rx_extended_status(priv
, &priv
->xstats
,
5175 rx_q
->dma_erx
+ entry
);
5176 if (unlikely(status
== discard_frame
)) {
5177 xsk_buff_free(buf
->xdp
);
5181 if (!priv
->hwts_rx_en
)
5185 if (unlikely(error
&& (status
& rx_not_ls
)))
5187 if (unlikely(error
)) {
5192 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5193 if (likely(status
& rx_not_ls
)) {
5194 xsk_buff_free(buf
->xdp
);
5201 ctx
= xsk_buff_to_stmmac_ctx(buf
->xdp
);
5206 /* XDP ZC Frame only support primary buffers for now */
5207 buf1_len
= stmmac_rx_buf1_len(priv
, p
, status
, len
);
5210 /* ACS is disabled; strip manually. */
5211 if (likely(!(status
& rx_not_ls
))) {
5212 buf1_len
-= ETH_FCS_LEN
;
5216 /* RX buffer is good and fit into a XSK pool buffer */
5217 buf
->xdp
->data_end
= buf
->xdp
->data
+ buf1_len
;
5218 xsk_buff_dma_sync_for_cpu(buf
->xdp
, rx_q
->xsk_pool
);
5220 prog
= READ_ONCE(priv
->xdp_prog
);
5221 res
= __stmmac_xdp_run_prog(priv
, prog
, buf
->xdp
);
5224 case STMMAC_XDP_PASS
:
5225 stmmac_dispatch_skb_zc(priv
, queue
, p
, np
, buf
->xdp
);
5226 xsk_buff_free(buf
->xdp
);
5228 case STMMAC_XDP_CONSUMED
:
5229 xsk_buff_free(buf
->xdp
);
5233 case STMMAC_XDP_REDIRECT
:
5243 if (status
& rx_not_ls
) {
5244 rx_q
->state_saved
= true;
5245 rx_q
->state
.error
= error
;
5246 rx_q
->state
.len
= len
;
5249 stmmac_finalize_xdp_rx(priv
, xdp_status
);
5251 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5252 rxq_stats
->rx_pkt_n
+= count
;
5253 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5255 priv
->xstats
.rx_dropped
+= rx_dropped
;
5256 priv
->xstats
.rx_errors
+= rx_errors
;
5258 if (xsk_uses_need_wakeup(rx_q
->xsk_pool
)) {
5259 if (failure
|| stmmac_rx_dirty(priv
, queue
) > 0)
5260 xsk_set_rx_need_wakeup(rx_q
->xsk_pool
);
5262 xsk_clear_rx_need_wakeup(rx_q
->xsk_pool
);
5267 return failure
? limit
: (int)count
;
5271 * stmmac_rx - manage the receive process
5272 * @priv: driver private structure
5273 * @limit: napi bugget
5274 * @queue: RX queue index.
5275 * Description : this the function called by the napi poll method.
5276 * It gets all the frames inside the ring.
5278 static int stmmac_rx(struct stmmac_priv
*priv
, int limit
, u32 queue
)
5280 u32 rx_errors
= 0, rx_dropped
= 0, rx_bytes
= 0, rx_packets
= 0;
5281 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[queue
];
5282 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
5283 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
5284 unsigned int count
= 0, error
= 0, len
= 0;
5285 int status
= 0, coe
= priv
->hw
->rx_csum
;
5286 unsigned int next_entry
= rx_q
->cur_rx
;
5287 enum dma_data_direction dma_dir
;
5288 unsigned int desc_size
;
5289 struct sk_buff
*skb
= NULL
;
5290 struct stmmac_xdp_buff ctx
;
5291 unsigned long flags
;
5295 dma_dir
= page_pool_get_dma_dir(rx_q
->page_pool
);
5296 buf_sz
= DIV_ROUND_UP(priv
->dma_conf
.dma_buf_sz
, PAGE_SIZE
) * PAGE_SIZE
;
5297 limit
= min(priv
->dma_conf
.dma_rx_size
- 1, (unsigned int)limit
);
5299 if (netif_msg_rx_status(priv
)) {
5302 netdev_dbg(priv
->dev
, "%s: descriptor ring:\n", __func__
);
5303 if (priv
->extend_desc
) {
5304 rx_head
= (void *)rx_q
->dma_erx
;
5305 desc_size
= sizeof(struct dma_extended_desc
);
5307 rx_head
= (void *)rx_q
->dma_rx
;
5308 desc_size
= sizeof(struct dma_desc
);
5311 stmmac_display_ring(priv
, rx_head
, priv
->dma_conf
.dma_rx_size
, true,
5312 rx_q
->dma_rx_phy
, desc_size
);
5314 while (count
< limit
) {
5315 unsigned int buf1_len
= 0, buf2_len
= 0;
5316 enum pkt_hash_types hash_type
;
5317 struct stmmac_rx_buffer
*buf
;
5318 struct dma_desc
*np
, *p
;
5322 if (!count
&& rx_q
->state_saved
) {
5323 skb
= rx_q
->state
.skb
;
5324 error
= rx_q
->state
.error
;
5325 len
= rx_q
->state
.len
;
5327 rx_q
->state_saved
= false;
5340 buf
= &rx_q
->buf_pool
[entry
];
5342 if (priv
->extend_desc
)
5343 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5345 p
= rx_q
->dma_rx
+ entry
;
5347 /* read the status of the incoming frame */
5348 status
= stmmac_rx_status(priv
, &priv
->xstats
, p
);
5349 /* check if managed by the DMA otherwise go ahead */
5350 if (unlikely(status
& dma_own
))
5353 rx_q
->cur_rx
= STMMAC_GET_ENTRY(rx_q
->cur_rx
,
5354 priv
->dma_conf
.dma_rx_size
);
5355 next_entry
= rx_q
->cur_rx
;
5357 if (priv
->extend_desc
)
5358 np
= (struct dma_desc
*)(rx_q
->dma_erx
+ next_entry
);
5360 np
= rx_q
->dma_rx
+ next_entry
;
5364 if (priv
->extend_desc
)
5365 stmmac_rx_extended_status(priv
, &priv
->xstats
, rx_q
->dma_erx
+ entry
);
5366 if (unlikely(status
== discard_frame
)) {
5367 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
5370 if (!priv
->hwts_rx_en
)
5374 if (unlikely(error
&& (status
& rx_not_ls
)))
5376 if (unlikely(error
)) {
5383 /* Buffer is good. Go on. */
5385 prefetch(page_address(buf
->page
) + buf
->page_offset
);
5387 prefetch(page_address(buf
->sec_page
));
5389 buf1_len
= stmmac_rx_buf1_len(priv
, p
, status
, len
);
5391 buf2_len
= stmmac_rx_buf2_len(priv
, p
, status
, len
);
5394 /* ACS is disabled; strip manually. */
5395 if (likely(!(status
& rx_not_ls
))) {
5397 buf2_len
-= ETH_FCS_LEN
;
5399 } else if (buf1_len
) {
5400 buf1_len
-= ETH_FCS_LEN
;
5406 unsigned int pre_len
, sync_len
;
5408 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
5411 xdp_init_buff(&ctx
.xdp
, buf_sz
, &rx_q
->xdp_rxq
);
5412 xdp_prepare_buff(&ctx
.xdp
, page_address(buf
->page
),
5413 buf
->page_offset
, buf1_len
, true);
5415 pre_len
= ctx
.xdp
.data_end
- ctx
.xdp
.data_hard_start
-
5422 skb
= stmmac_xdp_run_prog(priv
, &ctx
.xdp
);
5423 /* Due xdp_adjust_tail: DMA sync for_device
5424 * cover max len CPU touch
5426 sync_len
= ctx
.xdp
.data_end
- ctx
.xdp
.data_hard_start
-
5428 sync_len
= max(sync_len
, pre_len
);
5430 /* For Not XDP_PASS verdict */
5432 unsigned int xdp_res
= -PTR_ERR(skb
);
5434 if (xdp_res
& STMMAC_XDP_CONSUMED
) {
5435 page_pool_put_page(rx_q
->page_pool
,
5436 virt_to_head_page(ctx
.xdp
.data
),
5441 /* Clear skb as it was set as
5442 * status by XDP program.
5446 if (unlikely((status
& rx_not_ls
)))
5451 } else if (xdp_res
& (STMMAC_XDP_TX
|
5452 STMMAC_XDP_REDIRECT
)) {
5453 xdp_status
|= xdp_res
;
5463 /* XDP program may expand or reduce tail */
5464 buf1_len
= ctx
.xdp
.data_end
- ctx
.xdp
.data
;
5466 skb
= napi_alloc_skb(&ch
->rx_napi
, buf1_len
);
5473 /* XDP program may adjust header */
5474 skb_copy_to_linear_data(skb
, ctx
.xdp
.data
, buf1_len
);
5475 skb_put(skb
, buf1_len
);
5477 /* Data payload copied into SKB, page ready for recycle */
5478 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
5480 } else if (buf1_len
) {
5481 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
5483 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
5484 buf
->page
, buf
->page_offset
, buf1_len
,
5485 priv
->dma_conf
.dma_buf_sz
);
5487 /* Data payload appended into SKB */
5488 skb_mark_for_recycle(skb
);
5493 dma_sync_single_for_cpu(priv
->device
, buf
->sec_addr
,
5495 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
5496 buf
->sec_page
, 0, buf2_len
,
5497 priv
->dma_conf
.dma_buf_sz
);
5499 /* Data payload appended into SKB */
5500 skb_mark_for_recycle(skb
);
5501 buf
->sec_page
= NULL
;
5505 if (likely(status
& rx_not_ls
))
5510 /* Got entire packet into SKB. Finish it. */
5512 stmmac_get_rx_hwtstamp(priv
, p
, np
, skb
);
5513 stmmac_rx_vlan(priv
->dev
, skb
);
5514 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
5517 skb_checksum_none_assert(skb
);
5519 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5521 if (!stmmac_get_rx_hash(priv
, p
, &hash
, &hash_type
))
5522 skb_set_hash(skb
, hash
, hash_type
);
5524 skb_record_rx_queue(skb
, queue
);
5525 napi_gro_receive(&ch
->rx_napi
, skb
);
5533 if (status
& rx_not_ls
|| skb
) {
5534 rx_q
->state_saved
= true;
5535 rx_q
->state
.skb
= skb
;
5536 rx_q
->state
.error
= error
;
5537 rx_q
->state
.len
= len
;
5540 stmmac_finalize_xdp_rx(priv
, xdp_status
);
5542 stmmac_rx_refill(priv
, queue
);
5544 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5545 rxq_stats
->rx_packets
+= rx_packets
;
5546 rxq_stats
->rx_bytes
+= rx_bytes
;
5547 rxq_stats
->rx_pkt_n
+= count
;
5548 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5550 priv
->xstats
.rx_dropped
+= rx_dropped
;
5551 priv
->xstats
.rx_errors
+= rx_errors
;
5556 static int stmmac_napi_poll_rx(struct napi_struct
*napi
, int budget
)
5558 struct stmmac_channel
*ch
=
5559 container_of(napi
, struct stmmac_channel
, rx_napi
);
5560 struct stmmac_priv
*priv
= ch
->priv_data
;
5561 struct stmmac_rxq_stats
*rxq_stats
;
5562 u32 chan
= ch
->index
;
5563 unsigned long flags
;
5566 rxq_stats
= &priv
->xstats
.rxq_stats
[chan
];
5567 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5568 rxq_stats
->napi_poll
++;
5569 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5571 work_done
= stmmac_rx(priv
, budget
, chan
);
5572 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
5573 unsigned long flags
;
5575 spin_lock_irqsave(&ch
->lock
, flags
);
5576 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
5577 spin_unlock_irqrestore(&ch
->lock
, flags
);
5583 static int stmmac_napi_poll_tx(struct napi_struct
*napi
, int budget
)
5585 struct stmmac_channel
*ch
=
5586 container_of(napi
, struct stmmac_channel
, tx_napi
);
5587 struct stmmac_priv
*priv
= ch
->priv_data
;
5588 struct stmmac_txq_stats
*txq_stats
;
5589 bool pending_packets
= false;
5590 u32 chan
= ch
->index
;
5591 unsigned long flags
;
5594 txq_stats
= &priv
->xstats
.txq_stats
[chan
];
5595 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
5596 txq_stats
->napi_poll
++;
5597 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
5599 work_done
= stmmac_tx_clean(priv
, budget
, chan
, &pending_packets
);
5600 work_done
= min(work_done
, budget
);
5602 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
5603 unsigned long flags
;
5605 spin_lock_irqsave(&ch
->lock
, flags
);
5606 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
5607 spin_unlock_irqrestore(&ch
->lock
, flags
);
5610 /* TX still have packet to handle, check if we need to arm tx timer */
5611 if (pending_packets
)
5612 stmmac_tx_timer_arm(priv
, chan
);
5617 static int stmmac_napi_poll_rxtx(struct napi_struct
*napi
, int budget
)
5619 struct stmmac_channel
*ch
=
5620 container_of(napi
, struct stmmac_channel
, rxtx_napi
);
5621 struct stmmac_priv
*priv
= ch
->priv_data
;
5622 bool tx_pending_packets
= false;
5623 int rx_done
, tx_done
, rxtx_done
;
5624 struct stmmac_rxq_stats
*rxq_stats
;
5625 struct stmmac_txq_stats
*txq_stats
;
5626 u32 chan
= ch
->index
;
5627 unsigned long flags
;
5629 rxq_stats
= &priv
->xstats
.rxq_stats
[chan
];
5630 flags
= u64_stats_update_begin_irqsave(&rxq_stats
->syncp
);
5631 rxq_stats
->napi_poll
++;
5632 u64_stats_update_end_irqrestore(&rxq_stats
->syncp
, flags
);
5634 txq_stats
= &priv
->xstats
.txq_stats
[chan
];
5635 flags
= u64_stats_update_begin_irqsave(&txq_stats
->syncp
);
5636 txq_stats
->napi_poll
++;
5637 u64_stats_update_end_irqrestore(&txq_stats
->syncp
, flags
);
5639 tx_done
= stmmac_tx_clean(priv
, budget
, chan
, &tx_pending_packets
);
5640 tx_done
= min(tx_done
, budget
);
5642 rx_done
= stmmac_rx_zc(priv
, budget
, chan
);
5644 rxtx_done
= max(tx_done
, rx_done
);
5646 /* If either TX or RX work is not complete, return budget
5649 if (rxtx_done
>= budget
)
5652 /* all work done, exit the polling mode */
5653 if (napi_complete_done(napi
, rxtx_done
)) {
5654 unsigned long flags
;
5656 spin_lock_irqsave(&ch
->lock
, flags
);
5657 /* Both RX and TX work done are compelte,
5658 * so enable both RX & TX IRQs.
5660 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
5661 spin_unlock_irqrestore(&ch
->lock
, flags
);
5664 /* TX still have packet to handle, check if we need to arm tx timer */
5665 if (tx_pending_packets
)
5666 stmmac_tx_timer_arm(priv
, chan
);
5668 return min(rxtx_done
, budget
- 1);
5673 * @dev : Pointer to net device structure
5674 * @txqueue: the index of the hanging transmit queue
5675 * Description: this function is called when a packet transmission fails to
5676 * complete within a reasonable time. The driver will mark the error in the
5677 * netdev structure and arrange for the device to be reset to a sane state
5678 * in order to transmit a new packet.
5680 static void stmmac_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
5682 struct stmmac_priv
*priv
= netdev_priv(dev
);
5684 stmmac_global_err(priv
);
5688 * stmmac_set_rx_mode - entry point for multicast addressing
5689 * @dev : pointer to the device structure
5691 * This function is a driver entry point which gets called by the kernel
5692 * whenever multicast addresses must be enabled/disabled.
5696 static void stmmac_set_rx_mode(struct net_device
*dev
)
5698 struct stmmac_priv
*priv
= netdev_priv(dev
);
5700 stmmac_set_filter(priv
, priv
->hw
, dev
);
5704 * stmmac_change_mtu - entry point to change MTU size for the device.
5705 * @dev : device pointer.
5706 * @new_mtu : the new MTU size for the device.
5707 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5708 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5709 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5711 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5714 static int stmmac_change_mtu(struct net_device
*dev
, int new_mtu
)
5716 struct stmmac_priv
*priv
= netdev_priv(dev
);
5717 int txfifosz
= priv
->plat
->tx_fifo_size
;
5718 struct stmmac_dma_conf
*dma_conf
;
5719 const int mtu
= new_mtu
;
5723 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
5725 txfifosz
/= priv
->plat
->tx_queues_to_use
;
5727 if (stmmac_xdp_is_enabled(priv
) && new_mtu
> ETH_DATA_LEN
) {
5728 netdev_dbg(priv
->dev
, "Jumbo frames not supported for XDP\n");
5732 new_mtu
= STMMAC_ALIGN(new_mtu
);
5734 /* If condition true, FIFO is too small or MTU too large */
5735 if ((txfifosz
< new_mtu
) || (new_mtu
> BUF_SIZE_16KiB
))
5738 if (netif_running(dev
)) {
5739 netdev_dbg(priv
->dev
, "restarting interface to change its MTU\n");
5740 /* Try to allocate the new DMA conf with the new mtu */
5741 dma_conf
= stmmac_setup_dma_desc(priv
, mtu
);
5742 if (IS_ERR(dma_conf
)) {
5743 netdev_err(priv
->dev
, "failed allocating new dma conf for new MTU %d\n",
5745 return PTR_ERR(dma_conf
);
5748 stmmac_release(dev
);
5750 ret
= __stmmac_open(dev
, dma_conf
);
5752 free_dma_desc_resources(priv
, dma_conf
);
5754 netdev_err(priv
->dev
, "failed reopening the interface after MTU change\n");
5760 stmmac_set_rx_mode(dev
);
5764 netdev_update_features(dev
);
5769 static netdev_features_t
stmmac_fix_features(struct net_device
*dev
,
5770 netdev_features_t features
)
5772 struct stmmac_priv
*priv
= netdev_priv(dev
);
5774 if (priv
->plat
->rx_coe
== STMMAC_RX_COE_NONE
)
5775 features
&= ~NETIF_F_RXCSUM
;
5777 if (!priv
->plat
->tx_coe
)
5778 features
&= ~NETIF_F_CSUM_MASK
;
5780 /* Some GMAC devices have a bugged Jumbo frame support that
5781 * needs to have the Tx COE disabled for oversized frames
5782 * (due to limited buffer sizes). In this case we disable
5783 * the TX csum insertion in the TDES and not use SF.
5785 if (priv
->plat
->bugged_jumbo
&& (dev
->mtu
> ETH_DATA_LEN
))
5786 features
&= ~NETIF_F_CSUM_MASK
;
5788 /* Disable tso if asked by ethtool */
5789 if ((priv
->plat
->flags
& STMMAC_FLAG_TSO_EN
) && (priv
->dma_cap
.tsoen
)) {
5790 if (features
& NETIF_F_TSO
)
5799 static int stmmac_set_features(struct net_device
*netdev
,
5800 netdev_features_t features
)
5802 struct stmmac_priv
*priv
= netdev_priv(netdev
);
5804 /* Keep the COE Type in case of csum is supporting */
5805 if (features
& NETIF_F_RXCSUM
)
5806 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
5808 priv
->hw
->rx_csum
= 0;
5809 /* No check needed because rx_coe has been set before and it will be
5810 * fixed in case of issue.
5812 stmmac_rx_ipc(priv
, priv
->hw
);
5814 if (priv
->sph_cap
) {
5815 bool sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
5818 for (chan
= 0; chan
< priv
->plat
->rx_queues_to_use
; chan
++)
5819 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
5825 static void stmmac_fpe_event_status(struct stmmac_priv
*priv
, int status
)
5827 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
5828 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
5829 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
5830 bool *hs_enable
= &fpe_cfg
->hs_enable
;
5832 if (status
== FPE_EVENT_UNKNOWN
|| !*hs_enable
)
5835 /* If LP has sent verify mPacket, LP is FPE capable */
5836 if ((status
& FPE_EVENT_RVER
) == FPE_EVENT_RVER
) {
5837 if (*lp_state
< FPE_STATE_CAPABLE
)
5838 *lp_state
= FPE_STATE_CAPABLE
;
5840 /* If user has requested FPE enable, quickly response */
5842 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
5847 /* If Local has sent verify mPacket, Local is FPE capable */
5848 if ((status
& FPE_EVENT_TVER
) == FPE_EVENT_TVER
) {
5849 if (*lo_state
< FPE_STATE_CAPABLE
)
5850 *lo_state
= FPE_STATE_CAPABLE
;
5853 /* If LP has sent response mPacket, LP is entering FPE ON */
5854 if ((status
& FPE_EVENT_RRSP
) == FPE_EVENT_RRSP
)
5855 *lp_state
= FPE_STATE_ENTERING_ON
;
5857 /* If Local has sent response mPacket, Local is entering FPE ON */
5858 if ((status
& FPE_EVENT_TRSP
) == FPE_EVENT_TRSP
)
5859 *lo_state
= FPE_STATE_ENTERING_ON
;
5861 if (!test_bit(__FPE_REMOVING
, &priv
->fpe_task_state
) &&
5862 !test_and_set_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
) &&
5864 queue_work(priv
->fpe_wq
, &priv
->fpe_task
);
5868 static void stmmac_common_interrupt(struct stmmac_priv
*priv
)
5870 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
5871 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
5876 xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
5877 queues_count
= (rx_cnt
> tx_cnt
) ? rx_cnt
: tx_cnt
;
5880 pm_wakeup_event(priv
->device
, 0);
5882 if (priv
->dma_cap
.estsel
)
5883 stmmac_est_irq_status(priv
, priv
->ioaddr
, priv
->dev
,
5884 &priv
->xstats
, tx_cnt
);
5886 if (priv
->dma_cap
.fpesel
) {
5887 int status
= stmmac_fpe_irq_status(priv
, priv
->ioaddr
,
5890 stmmac_fpe_event_status(priv
, status
);
5893 /* To handle GMAC own interrupts */
5894 if ((priv
->plat
->has_gmac
) || xmac
) {
5895 int status
= stmmac_host_irq_status(priv
, priv
->hw
, &priv
->xstats
);
5897 if (unlikely(status
)) {
5898 /* For LPI we need to save the tx status */
5899 if (status
& CORE_IRQ_TX_PATH_IN_LPI_MODE
)
5900 priv
->tx_path_in_lpi_mode
= true;
5901 if (status
& CORE_IRQ_TX_PATH_EXIT_LPI_MODE
)
5902 priv
->tx_path_in_lpi_mode
= false;
5905 for (queue
= 0; queue
< queues_count
; queue
++) {
5906 status
= stmmac_host_mtl_irq_status(priv
, priv
->hw
,
5910 /* PCS link status */
5911 if (priv
->hw
->pcs
&&
5912 !(priv
->plat
->flags
& STMMAC_FLAG_HAS_INTEGRATED_PCS
)) {
5913 if (priv
->xstats
.pcs_link
)
5914 netif_carrier_on(priv
->dev
);
5916 netif_carrier_off(priv
->dev
);
5919 stmmac_timestamp_interrupt(priv
, priv
);
5924 * stmmac_interrupt - main ISR
5925 * @irq: interrupt number.
5926 * @dev_id: to pass the net device pointer.
5927 * Description: this is the main driver interrupt service routine.
5929 * o DMA service routine (to manage incoming frame reception and transmission
5931 * o Core interrupts to manage: remote wake-up, management counter, LPI
5934 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
)
5936 struct net_device
*dev
= (struct net_device
*)dev_id
;
5937 struct stmmac_priv
*priv
= netdev_priv(dev
);
5939 /* Check if adapter is up */
5940 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5943 /* Check if a fatal error happened */
5944 if (stmmac_safety_feat_interrupt(priv
))
5947 /* To handle Common interrupts */
5948 stmmac_common_interrupt(priv
);
5950 /* To handle DMA interrupts */
5951 stmmac_dma_interrupt(priv
);
5956 static irqreturn_t
stmmac_mac_interrupt(int irq
, void *dev_id
)
5958 struct net_device
*dev
= (struct net_device
*)dev_id
;
5959 struct stmmac_priv
*priv
= netdev_priv(dev
);
5961 if (unlikely(!dev
)) {
5962 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
5966 /* Check if adapter is up */
5967 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5970 /* To handle Common interrupts */
5971 stmmac_common_interrupt(priv
);
5976 static irqreturn_t
stmmac_safety_interrupt(int irq
, void *dev_id
)
5978 struct net_device
*dev
= (struct net_device
*)dev_id
;
5979 struct stmmac_priv
*priv
= netdev_priv(dev
);
5981 if (unlikely(!dev
)) {
5982 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
5986 /* Check if adapter is up */
5987 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5990 /* Check if a fatal error happened */
5991 stmmac_safety_feat_interrupt(priv
);
5996 static irqreturn_t
stmmac_msi_intr_tx(int irq
, void *data
)
5998 struct stmmac_tx_queue
*tx_q
= (struct stmmac_tx_queue
*)data
;
5999 struct stmmac_dma_conf
*dma_conf
;
6000 int chan
= tx_q
->queue_index
;
6001 struct stmmac_priv
*priv
;
6004 dma_conf
= container_of(tx_q
, struct stmmac_dma_conf
, tx_queue
[chan
]);
6005 priv
= container_of(dma_conf
, struct stmmac_priv
, dma_conf
);
6007 if (unlikely(!data
)) {
6008 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
6012 /* Check if adapter is up */
6013 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6016 status
= stmmac_napi_check(priv
, chan
, DMA_DIR_TX
);
6018 if (unlikely(status
& tx_hard_error_bump_tc
)) {
6019 /* Try to bump up the dma threshold on this failure */
6020 stmmac_bump_dma_threshold(priv
, chan
);
6021 } else if (unlikely(status
== tx_hard_error
)) {
6022 stmmac_tx_err(priv
, chan
);
6028 static irqreturn_t
stmmac_msi_intr_rx(int irq
, void *data
)
6030 struct stmmac_rx_queue
*rx_q
= (struct stmmac_rx_queue
*)data
;
6031 struct stmmac_dma_conf
*dma_conf
;
6032 int chan
= rx_q
->queue_index
;
6033 struct stmmac_priv
*priv
;
6035 dma_conf
= container_of(rx_q
, struct stmmac_dma_conf
, rx_queue
[chan
]);
6036 priv
= container_of(dma_conf
, struct stmmac_priv
, dma_conf
);
6038 if (unlikely(!data
)) {
6039 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
6043 /* Check if adapter is up */
6044 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6047 stmmac_napi_check(priv
, chan
, DMA_DIR_RX
);
6053 * stmmac_ioctl - Entry point for the Ioctl
6054 * @dev: Device pointer.
6055 * @rq: An IOCTL specefic structure, that can contain a pointer to
6056 * a proprietary structure used to pass information to the driver.
6057 * @cmd: IOCTL command
6059 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6061 static int stmmac_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
6063 struct stmmac_priv
*priv
= netdev_priv (dev
);
6064 int ret
= -EOPNOTSUPP
;
6066 if (!netif_running(dev
))
6073 ret
= phylink_mii_ioctl(priv
->phylink
, rq
, cmd
);
6076 ret
= stmmac_hwtstamp_set(dev
, rq
);
6079 ret
= stmmac_hwtstamp_get(dev
, rq
);
6088 static int stmmac_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
6091 struct stmmac_priv
*priv
= cb_priv
;
6092 int ret
= -EOPNOTSUPP
;
6094 if (!tc_cls_can_offload_and_chain0(priv
->dev
, type_data
))
6097 __stmmac_disable_all_queues(priv
);
6100 case TC_SETUP_CLSU32
:
6101 ret
= stmmac_tc_setup_cls_u32(priv
, priv
, type_data
);
6103 case TC_SETUP_CLSFLOWER
:
6104 ret
= stmmac_tc_setup_cls(priv
, priv
, type_data
);
6110 stmmac_enable_all_queues(priv
);
6114 static LIST_HEAD(stmmac_block_cb_list
);
6116 static int stmmac_setup_tc(struct net_device
*ndev
, enum tc_setup_type type
,
6119 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6123 return stmmac_tc_query_caps(priv
, priv
, type_data
);
6124 case TC_SETUP_BLOCK
:
6125 return flow_block_cb_setup_simple(type_data
,
6126 &stmmac_block_cb_list
,
6127 stmmac_setup_tc_block_cb
,
6129 case TC_SETUP_QDISC_CBS
:
6130 return stmmac_tc_setup_cbs(priv
, priv
, type_data
);
6131 case TC_SETUP_QDISC_TAPRIO
:
6132 return stmmac_tc_setup_taprio(priv
, priv
, type_data
);
6133 case TC_SETUP_QDISC_ETF
:
6134 return stmmac_tc_setup_etf(priv
, priv
, type_data
);
6140 static u16
stmmac_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
6141 struct net_device
*sb_dev
)
6143 int gso
= skb_shinfo(skb
)->gso_type
;
6145 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
| SKB_GSO_UDP_L4
)) {
6147 * There is no way to determine the number of TSO/USO
6148 * capable Queues. Let's use always the Queue 0
6149 * because if TSO/USO is supported then at least this
6150 * one will be capable.
6155 return netdev_pick_tx(dev
, skb
, NULL
) % dev
->real_num_tx_queues
;
6158 static int stmmac_set_mac_address(struct net_device
*ndev
, void *addr
)
6160 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6163 ret
= pm_runtime_resume_and_get(priv
->device
);
6167 ret
= eth_mac_addr(ndev
, addr
);
6171 stmmac_set_umac_addr(priv
, priv
->hw
, ndev
->dev_addr
, 0);
6174 pm_runtime_put(priv
->device
);
6179 #ifdef CONFIG_DEBUG_FS
6180 static struct dentry
*stmmac_fs_dir
;
6182 static void sysfs_display_ring(void *head
, int size
, int extend_desc
,
6183 struct seq_file
*seq
, dma_addr_t dma_phy_addr
)
6186 struct dma_extended_desc
*ep
= (struct dma_extended_desc
*)head
;
6187 struct dma_desc
*p
= (struct dma_desc
*)head
;
6188 dma_addr_t dma_addr
;
6190 for (i
= 0; i
< size
; i
++) {
6192 dma_addr
= dma_phy_addr
+ i
* sizeof(*ep
);
6193 seq_printf(seq
, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6195 le32_to_cpu(ep
->basic
.des0
),
6196 le32_to_cpu(ep
->basic
.des1
),
6197 le32_to_cpu(ep
->basic
.des2
),
6198 le32_to_cpu(ep
->basic
.des3
));
6201 dma_addr
= dma_phy_addr
+ i
* sizeof(*p
);
6202 seq_printf(seq
, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6204 le32_to_cpu(p
->des0
), le32_to_cpu(p
->des1
),
6205 le32_to_cpu(p
->des2
), le32_to_cpu(p
->des3
));
6208 seq_printf(seq
, "\n");
6212 static int stmmac_rings_status_show(struct seq_file
*seq
, void *v
)
6214 struct net_device
*dev
= seq
->private;
6215 struct stmmac_priv
*priv
= netdev_priv(dev
);
6216 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
6217 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
6220 if ((dev
->flags
& IFF_UP
) == 0)
6223 for (queue
= 0; queue
< rx_count
; queue
++) {
6224 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
6226 seq_printf(seq
, "RX Queue %d:\n", queue
);
6228 if (priv
->extend_desc
) {
6229 seq_printf(seq
, "Extended descriptor ring:\n");
6230 sysfs_display_ring((void *)rx_q
->dma_erx
,
6231 priv
->dma_conf
.dma_rx_size
, 1, seq
, rx_q
->dma_rx_phy
);
6233 seq_printf(seq
, "Descriptor ring:\n");
6234 sysfs_display_ring((void *)rx_q
->dma_rx
,
6235 priv
->dma_conf
.dma_rx_size
, 0, seq
, rx_q
->dma_rx_phy
);
6239 for (queue
= 0; queue
< tx_count
; queue
++) {
6240 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
6242 seq_printf(seq
, "TX Queue %d:\n", queue
);
6244 if (priv
->extend_desc
) {
6245 seq_printf(seq
, "Extended descriptor ring:\n");
6246 sysfs_display_ring((void *)tx_q
->dma_etx
,
6247 priv
->dma_conf
.dma_tx_size
, 1, seq
, tx_q
->dma_tx_phy
);
6248 } else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
)) {
6249 seq_printf(seq
, "Descriptor ring:\n");
6250 sysfs_display_ring((void *)tx_q
->dma_tx
,
6251 priv
->dma_conf
.dma_tx_size
, 0, seq
, tx_q
->dma_tx_phy
);
6257 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status
);
6259 static int stmmac_dma_cap_show(struct seq_file
*seq
, void *v
)
6261 static const char * const dwxgmac_timestamp_source
[] = {
6267 static const char * const dwxgmac_safety_feature_desc
[] = {
6269 "All Safety Features with ECC and Parity",
6270 "All Safety Features without ECC or Parity",
6271 "All Safety Features with Parity Only",
6277 struct net_device
*dev
= seq
->private;
6278 struct stmmac_priv
*priv
= netdev_priv(dev
);
6280 if (!priv
->hw_cap_support
) {
6281 seq_printf(seq
, "DMA HW features not supported\n");
6285 seq_printf(seq
, "==============================\n");
6286 seq_printf(seq
, "\tDMA HW features\n");
6287 seq_printf(seq
, "==============================\n");
6289 seq_printf(seq
, "\t10/100 Mbps: %s\n",
6290 (priv
->dma_cap
.mbps_10_100
) ? "Y" : "N");
6291 seq_printf(seq
, "\t1000 Mbps: %s\n",
6292 (priv
->dma_cap
.mbps_1000
) ? "Y" : "N");
6293 seq_printf(seq
, "\tHalf duplex: %s\n",
6294 (priv
->dma_cap
.half_duplex
) ? "Y" : "N");
6295 if (priv
->plat
->has_xgmac
) {
6297 "\tNumber of Additional MAC address registers: %d\n",
6298 priv
->dma_cap
.multi_addr
);
6300 seq_printf(seq
, "\tHash Filter: %s\n",
6301 (priv
->dma_cap
.hash_filter
) ? "Y" : "N");
6302 seq_printf(seq
, "\tMultiple MAC address registers: %s\n",
6303 (priv
->dma_cap
.multi_addr
) ? "Y" : "N");
6305 seq_printf(seq
, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6306 (priv
->dma_cap
.pcs
) ? "Y" : "N");
6307 seq_printf(seq
, "\tSMA (MDIO) Interface: %s\n",
6308 (priv
->dma_cap
.sma_mdio
) ? "Y" : "N");
6309 seq_printf(seq
, "\tPMT Remote wake up: %s\n",
6310 (priv
->dma_cap
.pmt_remote_wake_up
) ? "Y" : "N");
6311 seq_printf(seq
, "\tPMT Magic Frame: %s\n",
6312 (priv
->dma_cap
.pmt_magic_frame
) ? "Y" : "N");
6313 seq_printf(seq
, "\tRMON module: %s\n",
6314 (priv
->dma_cap
.rmon
) ? "Y" : "N");
6315 seq_printf(seq
, "\tIEEE 1588-2002 Time Stamp: %s\n",
6316 (priv
->dma_cap
.time_stamp
) ? "Y" : "N");
6317 seq_printf(seq
, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6318 (priv
->dma_cap
.atime_stamp
) ? "Y" : "N");
6319 if (priv
->plat
->has_xgmac
)
6320 seq_printf(seq
, "\tTimestamp System Time Source: %s\n",
6321 dwxgmac_timestamp_source
[priv
->dma_cap
.tssrc
]);
6322 seq_printf(seq
, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6323 (priv
->dma_cap
.eee
) ? "Y" : "N");
6324 seq_printf(seq
, "\tAV features: %s\n", (priv
->dma_cap
.av
) ? "Y" : "N");
6325 seq_printf(seq
, "\tChecksum Offload in TX: %s\n",
6326 (priv
->dma_cap
.tx_coe
) ? "Y" : "N");
6327 if (priv
->synopsys_id
>= DWMAC_CORE_4_00
||
6328 priv
->plat
->has_xgmac
) {
6329 seq_printf(seq
, "\tIP Checksum Offload in RX: %s\n",
6330 (priv
->dma_cap
.rx_coe
) ? "Y" : "N");
6332 seq_printf(seq
, "\tIP Checksum Offload (type1) in RX: %s\n",
6333 (priv
->dma_cap
.rx_coe_type1
) ? "Y" : "N");
6334 seq_printf(seq
, "\tIP Checksum Offload (type2) in RX: %s\n",
6335 (priv
->dma_cap
.rx_coe_type2
) ? "Y" : "N");
6336 seq_printf(seq
, "\tRXFIFO > 2048bytes: %s\n",
6337 (priv
->dma_cap
.rxfifo_over_2048
) ? "Y" : "N");
6339 seq_printf(seq
, "\tNumber of Additional RX channel: %d\n",
6340 priv
->dma_cap
.number_rx_channel
);
6341 seq_printf(seq
, "\tNumber of Additional TX channel: %d\n",
6342 priv
->dma_cap
.number_tx_channel
);
6343 seq_printf(seq
, "\tNumber of Additional RX queues: %d\n",
6344 priv
->dma_cap
.number_rx_queues
);
6345 seq_printf(seq
, "\tNumber of Additional TX queues: %d\n",
6346 priv
->dma_cap
.number_tx_queues
);
6347 seq_printf(seq
, "\tEnhanced descriptors: %s\n",
6348 (priv
->dma_cap
.enh_desc
) ? "Y" : "N");
6349 seq_printf(seq
, "\tTX Fifo Size: %d\n", priv
->dma_cap
.tx_fifo_size
);
6350 seq_printf(seq
, "\tRX Fifo Size: %d\n", priv
->dma_cap
.rx_fifo_size
);
6351 seq_printf(seq
, "\tHash Table Size: %lu\n", priv
->dma_cap
.hash_tb_sz
?
6352 (BIT(priv
->dma_cap
.hash_tb_sz
) << 5) : 0);
6353 seq_printf(seq
, "\tTSO: %s\n", priv
->dma_cap
.tsoen
? "Y" : "N");
6354 seq_printf(seq
, "\tNumber of PPS Outputs: %d\n",
6355 priv
->dma_cap
.pps_out_num
);
6356 seq_printf(seq
, "\tSafety Features: %s\n",
6357 dwxgmac_safety_feature_desc
[priv
->dma_cap
.asp
]);
6358 seq_printf(seq
, "\tFlexible RX Parser: %s\n",
6359 priv
->dma_cap
.frpsel
? "Y" : "N");
6360 seq_printf(seq
, "\tEnhanced Addressing: %d\n",
6361 priv
->dma_cap
.host_dma_width
);
6362 seq_printf(seq
, "\tReceive Side Scaling: %s\n",
6363 priv
->dma_cap
.rssen
? "Y" : "N");
6364 seq_printf(seq
, "\tVLAN Hash Filtering: %s\n",
6365 priv
->dma_cap
.vlhash
? "Y" : "N");
6366 seq_printf(seq
, "\tSplit Header: %s\n",
6367 priv
->dma_cap
.sphen
? "Y" : "N");
6368 seq_printf(seq
, "\tVLAN TX Insertion: %s\n",
6369 priv
->dma_cap
.vlins
? "Y" : "N");
6370 seq_printf(seq
, "\tDouble VLAN: %s\n",
6371 priv
->dma_cap
.dvlan
? "Y" : "N");
6372 seq_printf(seq
, "\tNumber of L3/L4 Filters: %d\n",
6373 priv
->dma_cap
.l3l4fnum
);
6374 seq_printf(seq
, "\tARP Offloading: %s\n",
6375 priv
->dma_cap
.arpoffsel
? "Y" : "N");
6376 seq_printf(seq
, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6377 priv
->dma_cap
.estsel
? "Y" : "N");
6378 seq_printf(seq
, "\tFrame Preemption (FPE): %s\n",
6379 priv
->dma_cap
.fpesel
? "Y" : "N");
6380 seq_printf(seq
, "\tTime-Based Scheduling (TBS): %s\n",
6381 priv
->dma_cap
.tbssel
? "Y" : "N");
6382 seq_printf(seq
, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6383 priv
->dma_cap
.tbs_ch_num
);
6384 seq_printf(seq
, "\tPer-Stream Filtering: %s\n",
6385 priv
->dma_cap
.sgfsel
? "Y" : "N");
6386 seq_printf(seq
, "\tTX Timestamp FIFO Depth: %lu\n",
6387 BIT(priv
->dma_cap
.ttsfd
) >> 1);
6388 seq_printf(seq
, "\tNumber of Traffic Classes: %d\n",
6389 priv
->dma_cap
.numtc
);
6390 seq_printf(seq
, "\tDCB Feature: %s\n",
6391 priv
->dma_cap
.dcben
? "Y" : "N");
6392 seq_printf(seq
, "\tIEEE 1588 High Word Register: %s\n",
6393 priv
->dma_cap
.advthword
? "Y" : "N");
6394 seq_printf(seq
, "\tPTP Offload: %s\n",
6395 priv
->dma_cap
.ptoen
? "Y" : "N");
6396 seq_printf(seq
, "\tOne-Step Timestamping: %s\n",
6397 priv
->dma_cap
.osten
? "Y" : "N");
6398 seq_printf(seq
, "\tPriority-Based Flow Control: %s\n",
6399 priv
->dma_cap
.pfcen
? "Y" : "N");
6400 seq_printf(seq
, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6401 BIT(priv
->dma_cap
.frpes
) << 6);
6402 seq_printf(seq
, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6403 BIT(priv
->dma_cap
.frpbs
) << 6);
6404 seq_printf(seq
, "\tParallel Instruction Processor Engines: %d\n",
6405 priv
->dma_cap
.frppipe_num
);
6406 seq_printf(seq
, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6407 priv
->dma_cap
.nrvf_num
?
6408 (BIT(priv
->dma_cap
.nrvf_num
) << 1) : 0);
6409 seq_printf(seq
, "\tWidth of the Time Interval Field in GCL: %d\n",
6410 priv
->dma_cap
.estwid
? 4 * priv
->dma_cap
.estwid
+ 12 : 0);
6411 seq_printf(seq
, "\tDepth of GCL: %lu\n",
6412 priv
->dma_cap
.estdep
? (BIT(priv
->dma_cap
.estdep
) << 5) : 0);
6413 seq_printf(seq
, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6414 priv
->dma_cap
.cbtisel
? "Y" : "N");
6415 seq_printf(seq
, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6416 priv
->dma_cap
.aux_snapshot_n
);
6417 seq_printf(seq
, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6418 priv
->dma_cap
.pou_ost_en
? "Y" : "N");
6419 seq_printf(seq
, "\tEnhanced DMA: %s\n",
6420 priv
->dma_cap
.edma
? "Y" : "N");
6421 seq_printf(seq
, "\tDifferent Descriptor Cache: %s\n",
6422 priv
->dma_cap
.ediffc
? "Y" : "N");
6423 seq_printf(seq
, "\tVxLAN/NVGRE: %s\n",
6424 priv
->dma_cap
.vxn
? "Y" : "N");
6425 seq_printf(seq
, "\tDebug Memory Interface: %s\n",
6426 priv
->dma_cap
.dbgmem
? "Y" : "N");
6427 seq_printf(seq
, "\tNumber of Policing Counters: %lu\n",
6428 priv
->dma_cap
.pcsel
? BIT(priv
->dma_cap
.pcsel
+ 3) : 0);
6431 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap
);
6433 /* Use network device events to rename debugfs file entries.
6435 static int stmmac_device_event(struct notifier_block
*unused
,
6436 unsigned long event
, void *ptr
)
6438 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
6439 struct stmmac_priv
*priv
= netdev_priv(dev
);
6441 if (dev
->netdev_ops
!= &stmmac_netdev_ops
)
6445 case NETDEV_CHANGENAME
:
6446 if (priv
->dbgfs_dir
)
6447 priv
->dbgfs_dir
= debugfs_rename(stmmac_fs_dir
,
6457 static struct notifier_block stmmac_notifier
= {
6458 .notifier_call
= stmmac_device_event
,
6461 static void stmmac_init_fs(struct net_device
*dev
)
6463 struct stmmac_priv
*priv
= netdev_priv(dev
);
6467 /* Create per netdev entries */
6468 priv
->dbgfs_dir
= debugfs_create_dir(dev
->name
, stmmac_fs_dir
);
6470 /* Entry to report DMA RX/TX rings */
6471 debugfs_create_file("descriptors_status", 0444, priv
->dbgfs_dir
, dev
,
6472 &stmmac_rings_status_fops
);
6474 /* Entry to report the DMA HW features */
6475 debugfs_create_file("dma_cap", 0444, priv
->dbgfs_dir
, dev
,
6476 &stmmac_dma_cap_fops
);
6481 static void stmmac_exit_fs(struct net_device
*dev
)
6483 struct stmmac_priv
*priv
= netdev_priv(dev
);
6485 debugfs_remove_recursive(priv
->dbgfs_dir
);
6487 #endif /* CONFIG_DEBUG_FS */
6489 static u32
stmmac_vid_crc32_le(__le16 vid_le
)
6491 unsigned char *data
= (unsigned char *)&vid_le
;
6492 unsigned char data_byte
= 0;
6497 bits
= get_bitmask_order(VLAN_VID_MASK
);
6498 for (i
= 0; i
< bits
; i
++) {
6500 data_byte
= data
[i
/ 8];
6502 temp
= ((crc
& 1) ^ data_byte
) & 1;
6513 static int stmmac_vlan_update(struct stmmac_priv
*priv
, bool is_double
)
6520 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
6521 __le16 vid_le
= cpu_to_le16(vid
);
6522 crc
= bitrev32(~stmmac_vid_crc32_le(vid_le
)) >> 28;
6527 if (!priv
->dma_cap
.vlhash
) {
6528 if (count
> 2) /* VID = 0 always passes filter */
6531 pmatch
= cpu_to_le16(vid
);
6535 return stmmac_update_vlan_hash(priv
, priv
->hw
, hash
, pmatch
, is_double
);
6538 static int stmmac_vlan_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
6540 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6541 bool is_double
= false;
6544 ret
= pm_runtime_resume_and_get(priv
->device
);
6548 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
6551 set_bit(vid
, priv
->active_vlans
);
6552 ret
= stmmac_vlan_update(priv
, is_double
);
6554 clear_bit(vid
, priv
->active_vlans
);
6558 if (priv
->hw
->num_vlan
) {
6559 ret
= stmmac_add_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
6564 pm_runtime_put(priv
->device
);
6569 static int stmmac_vlan_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
6571 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6572 bool is_double
= false;
6575 ret
= pm_runtime_resume_and_get(priv
->device
);
6579 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
6582 clear_bit(vid
, priv
->active_vlans
);
6584 if (priv
->hw
->num_vlan
) {
6585 ret
= stmmac_del_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
6587 goto del_vlan_error
;
6590 ret
= stmmac_vlan_update(priv
, is_double
);
6593 pm_runtime_put(priv
->device
);
6598 static int stmmac_bpf(struct net_device
*dev
, struct netdev_bpf
*bpf
)
6600 struct stmmac_priv
*priv
= netdev_priv(dev
);
6602 switch (bpf
->command
) {
6603 case XDP_SETUP_PROG
:
6604 return stmmac_xdp_set_prog(priv
, bpf
->prog
, bpf
->extack
);
6605 case XDP_SETUP_XSK_POOL
:
6606 return stmmac_xdp_setup_pool(priv
, bpf
->xsk
.pool
,
6613 static int stmmac_xdp_xmit(struct net_device
*dev
, int num_frames
,
6614 struct xdp_frame
**frames
, u32 flags
)
6616 struct stmmac_priv
*priv
= netdev_priv(dev
);
6617 int cpu
= smp_processor_id();
6618 struct netdev_queue
*nq
;
6622 if (unlikely(test_bit(STMMAC_DOWN
, &priv
->state
)))
6625 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
6628 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
6629 nq
= netdev_get_tx_queue(priv
->dev
, queue
);
6631 __netif_tx_lock(nq
, cpu
);
6632 /* Avoids TX time-out as we are sharing with slow path */
6633 txq_trans_cond_update(nq
);
6635 for (i
= 0; i
< num_frames
; i
++) {
6638 res
= stmmac_xdp_xmit_xdpf(priv
, queue
, frames
[i
], true);
6639 if (res
== STMMAC_XDP_CONSUMED
)
6645 if (flags
& XDP_XMIT_FLUSH
) {
6646 stmmac_flush_tx_descriptors(priv
, queue
);
6647 stmmac_tx_timer_arm(priv
, queue
);
6650 __netif_tx_unlock(nq
);
6655 void stmmac_disable_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
6657 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6658 unsigned long flags
;
6660 spin_lock_irqsave(&ch
->lock
, flags
);
6661 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, queue
, 1, 0);
6662 spin_unlock_irqrestore(&ch
->lock
, flags
);
6664 stmmac_stop_rx_dma(priv
, queue
);
6665 __free_dma_rx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6668 void stmmac_enable_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
6670 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
6671 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6672 unsigned long flags
;
6676 ret
= __alloc_dma_rx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6678 netdev_err(priv
->dev
, "Failed to alloc RX desc.\n");
6682 ret
= __init_dma_rx_desc_rings(priv
, &priv
->dma_conf
, queue
, GFP_KERNEL
);
6684 __free_dma_rx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6685 netdev_err(priv
->dev
, "Failed to init RX desc.\n");
6689 stmmac_reset_rx_queue(priv
, queue
);
6690 stmmac_clear_rx_descriptors(priv
, &priv
->dma_conf
, queue
);
6692 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6693 rx_q
->dma_rx_phy
, rx_q
->queue_index
);
6695 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+ (rx_q
->buf_alloc_num
*
6696 sizeof(struct dma_desc
));
6697 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
6698 rx_q
->rx_tail_addr
, rx_q
->queue_index
);
6700 if (rx_q
->xsk_pool
&& rx_q
->buf_alloc_num
) {
6701 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
6702 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6706 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6707 priv
->dma_conf
.dma_buf_sz
,
6711 stmmac_start_rx_dma(priv
, queue
);
6713 spin_lock_irqsave(&ch
->lock
, flags
);
6714 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, queue
, 1, 0);
6715 spin_unlock_irqrestore(&ch
->lock
, flags
);
6718 void stmmac_disable_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
6720 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6721 unsigned long flags
;
6723 spin_lock_irqsave(&ch
->lock
, flags
);
6724 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, queue
, 0, 1);
6725 spin_unlock_irqrestore(&ch
->lock
, flags
);
6727 stmmac_stop_tx_dma(priv
, queue
);
6728 __free_dma_tx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6731 void stmmac_enable_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
6733 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
6734 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6735 unsigned long flags
;
6738 ret
= __alloc_dma_tx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6740 netdev_err(priv
->dev
, "Failed to alloc TX desc.\n");
6744 ret
= __init_dma_tx_desc_rings(priv
, &priv
->dma_conf
, queue
);
6746 __free_dma_tx_desc_resources(priv
, &priv
->dma_conf
, queue
);
6747 netdev_err(priv
->dev
, "Failed to init TX desc.\n");
6751 stmmac_reset_tx_queue(priv
, queue
);
6752 stmmac_clear_tx_descriptors(priv
, &priv
->dma_conf
, queue
);
6754 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6755 tx_q
->dma_tx_phy
, tx_q
->queue_index
);
6757 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
6758 stmmac_enable_tbs(priv
, priv
->ioaddr
, 1, tx_q
->queue_index
);
6760 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
6761 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
6762 tx_q
->tx_tail_addr
, tx_q
->queue_index
);
6764 stmmac_start_tx_dma(priv
, queue
);
6766 spin_lock_irqsave(&ch
->lock
, flags
);
6767 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, queue
, 0, 1);
6768 spin_unlock_irqrestore(&ch
->lock
, flags
);
6771 void stmmac_xdp_release(struct net_device
*dev
)
6773 struct stmmac_priv
*priv
= netdev_priv(dev
);
6776 /* Ensure tx function is not running */
6777 netif_tx_disable(dev
);
6779 /* Disable NAPI process */
6780 stmmac_disable_all_queues(priv
);
6782 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
6783 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
6785 /* Free the IRQ lines */
6786 stmmac_free_irq(dev
, REQ_IRQ_ERR_ALL
, 0);
6788 /* Stop TX/RX DMA channels */
6789 stmmac_stop_all_dma(priv
);
6791 /* Release and free the Rx/Tx resources */
6792 free_dma_desc_resources(priv
, &priv
->dma_conf
);
6794 /* Disable the MAC Rx/Tx */
6795 stmmac_mac_set(priv
, priv
->ioaddr
, false);
6797 /* set trans_start so we don't get spurious
6798 * watchdogs during reset
6800 netif_trans_update(dev
);
6801 netif_carrier_off(dev
);
6804 int stmmac_xdp_open(struct net_device
*dev
)
6806 struct stmmac_priv
*priv
= netdev_priv(dev
);
6807 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
6808 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
6809 u32 dma_csr_ch
= max(rx_cnt
, tx_cnt
);
6810 struct stmmac_rx_queue
*rx_q
;
6811 struct stmmac_tx_queue
*tx_q
;
6817 ret
= alloc_dma_desc_resources(priv
, &priv
->dma_conf
);
6819 netdev_err(dev
, "%s: DMA descriptors allocation failed\n",
6821 goto dma_desc_error
;
6824 ret
= init_dma_desc_rings(dev
, &priv
->dma_conf
, GFP_KERNEL
);
6826 netdev_err(dev
, "%s: DMA descriptors initialization failed\n",
6831 stmmac_reset_queues_param(priv
);
6833 /* DMA CSR Channel configuration */
6834 for (chan
= 0; chan
< dma_csr_ch
; chan
++) {
6835 stmmac_init_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, chan
);
6836 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
6839 /* Adjust Split header */
6840 sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
6842 /* DMA RX Channel Configuration */
6843 for (chan
= 0; chan
< rx_cnt
; chan
++) {
6844 rx_q
= &priv
->dma_conf
.rx_queue
[chan
];
6846 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6847 rx_q
->dma_rx_phy
, chan
);
6849 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
6850 (rx_q
->buf_alloc_num
*
6851 sizeof(struct dma_desc
));
6852 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
6853 rx_q
->rx_tail_addr
, chan
);
6855 if (rx_q
->xsk_pool
&& rx_q
->buf_alloc_num
) {
6856 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
6857 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6861 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6862 priv
->dma_conf
.dma_buf_sz
,
6866 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
6869 /* DMA TX Channel Configuration */
6870 for (chan
= 0; chan
< tx_cnt
; chan
++) {
6871 tx_q
= &priv
->dma_conf
.tx_queue
[chan
];
6873 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6874 tx_q
->dma_tx_phy
, chan
);
6876 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
6877 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
6878 tx_q
->tx_tail_addr
, chan
);
6880 hrtimer_init(&tx_q
->txtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
6881 tx_q
->txtimer
.function
= stmmac_tx_timer
;
6884 /* Enable the MAC Rx/Tx */
6885 stmmac_mac_set(priv
, priv
->ioaddr
, true);
6887 /* Start Rx & Tx DMA Channels */
6888 stmmac_start_all_dma(priv
);
6890 ret
= stmmac_request_irq(dev
);
6894 /* Enable NAPI process*/
6895 stmmac_enable_all_queues(priv
);
6896 netif_carrier_on(dev
);
6897 netif_tx_start_all_queues(dev
);
6898 stmmac_enable_all_dma_irq(priv
);
6903 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
6904 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
6906 stmmac_hw_teardown(dev
);
6908 free_dma_desc_resources(priv
, &priv
->dma_conf
);
6913 int stmmac_xsk_wakeup(struct net_device
*dev
, u32 queue
, u32 flags
)
6915 struct stmmac_priv
*priv
= netdev_priv(dev
);
6916 struct stmmac_rx_queue
*rx_q
;
6917 struct stmmac_tx_queue
*tx_q
;
6918 struct stmmac_channel
*ch
;
6920 if (test_bit(STMMAC_DOWN
, &priv
->state
) ||
6921 !netif_carrier_ok(priv
->dev
))
6924 if (!stmmac_xdp_is_enabled(priv
))
6927 if (queue
>= priv
->plat
->rx_queues_to_use
||
6928 queue
>= priv
->plat
->tx_queues_to_use
)
6931 rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
6932 tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
6933 ch
= &priv
->channel
[queue
];
6935 if (!rx_q
->xsk_pool
&& !tx_q
->xsk_pool
)
6938 if (!napi_if_scheduled_mark_missed(&ch
->rxtx_napi
)) {
6939 /* EQoS does not have per-DMA channel SW interrupt,
6940 * so we schedule RX Napi straight-away.
6942 if (likely(napi_schedule_prep(&ch
->rxtx_napi
)))
6943 __napi_schedule(&ch
->rxtx_napi
);
6949 static void stmmac_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6951 struct stmmac_priv
*priv
= netdev_priv(dev
);
6952 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
6953 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
6957 for (q
= 0; q
< tx_cnt
; q
++) {
6958 struct stmmac_txq_stats
*txq_stats
= &priv
->xstats
.txq_stats
[q
];
6963 start
= u64_stats_fetch_begin(&txq_stats
->syncp
);
6964 tx_packets
= txq_stats
->tx_packets
;
6965 tx_bytes
= txq_stats
->tx_bytes
;
6966 } while (u64_stats_fetch_retry(&txq_stats
->syncp
, start
));
6968 stats
->tx_packets
+= tx_packets
;
6969 stats
->tx_bytes
+= tx_bytes
;
6972 for (q
= 0; q
< rx_cnt
; q
++) {
6973 struct stmmac_rxq_stats
*rxq_stats
= &priv
->xstats
.rxq_stats
[q
];
6978 start
= u64_stats_fetch_begin(&rxq_stats
->syncp
);
6979 rx_packets
= rxq_stats
->rx_packets
;
6980 rx_bytes
= rxq_stats
->rx_bytes
;
6981 } while (u64_stats_fetch_retry(&rxq_stats
->syncp
, start
));
6983 stats
->rx_packets
+= rx_packets
;
6984 stats
->rx_bytes
+= rx_bytes
;
6987 stats
->rx_dropped
= priv
->xstats
.rx_dropped
;
6988 stats
->rx_errors
= priv
->xstats
.rx_errors
;
6989 stats
->tx_dropped
= priv
->xstats
.tx_dropped
;
6990 stats
->tx_errors
= priv
->xstats
.tx_errors
;
6991 stats
->tx_carrier_errors
= priv
->xstats
.tx_losscarrier
+ priv
->xstats
.tx_carrier
;
6992 stats
->collisions
= priv
->xstats
.tx_collision
+ priv
->xstats
.rx_collision
;
6993 stats
->rx_length_errors
= priv
->xstats
.rx_length
;
6994 stats
->rx_crc_errors
= priv
->xstats
.rx_crc_errors
;
6995 stats
->rx_over_errors
= priv
->xstats
.rx_overflow_cntr
;
6996 stats
->rx_missed_errors
= priv
->xstats
.rx_missed_cntr
;
6999 static const struct net_device_ops stmmac_netdev_ops
= {
7000 .ndo_open
= stmmac_open
,
7001 .ndo_start_xmit
= stmmac_xmit
,
7002 .ndo_stop
= stmmac_release
,
7003 .ndo_change_mtu
= stmmac_change_mtu
,
7004 .ndo_fix_features
= stmmac_fix_features
,
7005 .ndo_set_features
= stmmac_set_features
,
7006 .ndo_set_rx_mode
= stmmac_set_rx_mode
,
7007 .ndo_tx_timeout
= stmmac_tx_timeout
,
7008 .ndo_eth_ioctl
= stmmac_ioctl
,
7009 .ndo_get_stats64
= stmmac_get_stats64
,
7010 .ndo_setup_tc
= stmmac_setup_tc
,
7011 .ndo_select_queue
= stmmac_select_queue
,
7012 .ndo_set_mac_address
= stmmac_set_mac_address
,
7013 .ndo_vlan_rx_add_vid
= stmmac_vlan_rx_add_vid
,
7014 .ndo_vlan_rx_kill_vid
= stmmac_vlan_rx_kill_vid
,
7015 .ndo_bpf
= stmmac_bpf
,
7016 .ndo_xdp_xmit
= stmmac_xdp_xmit
,
7017 .ndo_xsk_wakeup
= stmmac_xsk_wakeup
,
7020 static void stmmac_reset_subtask(struct stmmac_priv
*priv
)
7022 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED
, &priv
->state
))
7024 if (test_bit(STMMAC_DOWN
, &priv
->state
))
7027 netdev_err(priv
->dev
, "Reset adapter.\n");
7030 netif_trans_update(priv
->dev
);
7031 while (test_and_set_bit(STMMAC_RESETING
, &priv
->state
))
7032 usleep_range(1000, 2000);
7034 set_bit(STMMAC_DOWN
, &priv
->state
);
7035 dev_close(priv
->dev
);
7036 dev_open(priv
->dev
, NULL
);
7037 clear_bit(STMMAC_DOWN
, &priv
->state
);
7038 clear_bit(STMMAC_RESETING
, &priv
->state
);
7042 static void stmmac_service_task(struct work_struct
*work
)
7044 struct stmmac_priv
*priv
= container_of(work
, struct stmmac_priv
,
7047 stmmac_reset_subtask(priv
);
7048 clear_bit(STMMAC_SERVICE_SCHED
, &priv
->state
);
7052 * stmmac_hw_init - Init the MAC device
7053 * @priv: driver private structure
7054 * Description: this function is to configure the MAC device according to
7055 * some platform parameters or the HW capability register. It prepares the
7056 * driver to use either ring or chain modes and to setup either enhanced or
7057 * normal descriptors.
7059 static int stmmac_hw_init(struct stmmac_priv
*priv
)
7063 /* dwmac-sun8i only work in chain mode */
7064 if (priv
->plat
->flags
& STMMAC_FLAG_HAS_SUN8I
)
7066 priv
->chain_mode
= chain_mode
;
7068 /* Initialize HW Interface */
7069 ret
= stmmac_hwif_init(priv
);
7073 /* Get the HW capability (new GMAC newer than 3.50a) */
7074 priv
->hw_cap_support
= stmmac_get_hw_features(priv
);
7075 if (priv
->hw_cap_support
) {
7076 dev_info(priv
->device
, "DMA HW capability register supported\n");
7078 /* We can override some gmac/dma configuration fields: e.g.
7079 * enh_desc, tx_coe (e.g. that are passed through the
7080 * platform) with the values from the HW capability
7081 * register (if supported).
7083 priv
->plat
->enh_desc
= priv
->dma_cap
.enh_desc
;
7084 priv
->plat
->pmt
= priv
->dma_cap
.pmt_remote_wake_up
&&
7085 !(priv
->plat
->flags
& STMMAC_FLAG_USE_PHY_WOL
);
7086 priv
->hw
->pmt
= priv
->plat
->pmt
;
7087 if (priv
->dma_cap
.hash_tb_sz
) {
7088 priv
->hw
->multicast_filter_bins
=
7089 (BIT(priv
->dma_cap
.hash_tb_sz
) << 5);
7090 priv
->hw
->mcast_bits_log2
=
7091 ilog2(priv
->hw
->multicast_filter_bins
);
7094 /* TXCOE doesn't work in thresh DMA mode */
7095 if (priv
->plat
->force_thresh_dma_mode
)
7096 priv
->plat
->tx_coe
= 0;
7098 priv
->plat
->tx_coe
= priv
->dma_cap
.tx_coe
;
7100 /* In case of GMAC4 rx_coe is from HW cap register. */
7101 priv
->plat
->rx_coe
= priv
->dma_cap
.rx_coe
;
7103 if (priv
->dma_cap
.rx_coe_type2
)
7104 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE2
;
7105 else if (priv
->dma_cap
.rx_coe_type1
)
7106 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE1
;
7109 dev_info(priv
->device
, "No HW DMA feature register supported\n");
7112 if (priv
->plat
->rx_coe
) {
7113 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
7114 dev_info(priv
->device
, "RX Checksum Offload Engine supported\n");
7115 if (priv
->synopsys_id
< DWMAC_CORE_4_00
)
7116 dev_info(priv
->device
, "COE Type %d\n", priv
->hw
->rx_csum
);
7118 if (priv
->plat
->tx_coe
)
7119 dev_info(priv
->device
, "TX Checksum insertion supported\n");
7121 if (priv
->plat
->pmt
) {
7122 dev_info(priv
->device
, "Wake-Up On Lan supported\n");
7123 device_set_wakeup_capable(priv
->device
, 1);
7126 if (priv
->dma_cap
.tsoen
)
7127 dev_info(priv
->device
, "TSO supported\n");
7129 priv
->hw
->vlan_fail_q_en
=
7130 (priv
->plat
->flags
& STMMAC_FLAG_VLAN_FAIL_Q_EN
);
7131 priv
->hw
->vlan_fail_q
= priv
->plat
->vlan_fail_q
;
7133 /* Run HW quirks, if any */
7134 if (priv
->hwif_quirks
) {
7135 ret
= priv
->hwif_quirks(priv
);
7140 /* Rx Watchdog is available in the COREs newer than the 3.40.
7141 * In some case, for example on bugged HW this feature
7142 * has to be disable and this can be done by passing the
7143 * riwt_off field from the platform.
7145 if (((priv
->synopsys_id
>= DWMAC_CORE_3_50
) ||
7146 (priv
->plat
->has_xgmac
)) && (!priv
->plat
->riwt_off
)) {
7148 dev_info(priv
->device
,
7149 "Enable RX Mitigation via HW Watchdog Timer\n");
7155 static void stmmac_napi_add(struct net_device
*dev
)
7157 struct stmmac_priv
*priv
= netdev_priv(dev
);
7160 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
7162 for (queue
= 0; queue
< maxq
; queue
++) {
7163 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
7165 ch
->priv_data
= priv
;
7167 spin_lock_init(&ch
->lock
);
7169 if (queue
< priv
->plat
->rx_queues_to_use
) {
7170 netif_napi_add(dev
, &ch
->rx_napi
, stmmac_napi_poll_rx
);
7172 if (queue
< priv
->plat
->tx_queues_to_use
) {
7173 netif_napi_add_tx(dev
, &ch
->tx_napi
,
7174 stmmac_napi_poll_tx
);
7176 if (queue
< priv
->plat
->rx_queues_to_use
&&
7177 queue
< priv
->plat
->tx_queues_to_use
) {
7178 netif_napi_add(dev
, &ch
->rxtx_napi
,
7179 stmmac_napi_poll_rxtx
);
7184 static void stmmac_napi_del(struct net_device
*dev
)
7186 struct stmmac_priv
*priv
= netdev_priv(dev
);
7189 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
7191 for (queue
= 0; queue
< maxq
; queue
++) {
7192 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
7194 if (queue
< priv
->plat
->rx_queues_to_use
)
7195 netif_napi_del(&ch
->rx_napi
);
7196 if (queue
< priv
->plat
->tx_queues_to_use
)
7197 netif_napi_del(&ch
->tx_napi
);
7198 if (queue
< priv
->plat
->rx_queues_to_use
&&
7199 queue
< priv
->plat
->tx_queues_to_use
) {
7200 netif_napi_del(&ch
->rxtx_napi
);
7205 int stmmac_reinit_queues(struct net_device
*dev
, u32 rx_cnt
, u32 tx_cnt
)
7207 struct stmmac_priv
*priv
= netdev_priv(dev
);
7210 if (netif_running(dev
))
7211 stmmac_release(dev
);
7213 stmmac_napi_del(dev
);
7215 priv
->plat
->rx_queues_to_use
= rx_cnt
;
7216 priv
->plat
->tx_queues_to_use
= tx_cnt
;
7217 if (!netif_is_rxfh_configured(dev
))
7218 for (i
= 0; i
< ARRAY_SIZE(priv
->rss
.table
); i
++)
7219 priv
->rss
.table
[i
] = ethtool_rxfh_indir_default(i
,
7222 stmmac_set_half_duplex(priv
);
7223 stmmac_napi_add(dev
);
7225 if (netif_running(dev
))
7226 ret
= stmmac_open(dev
);
7231 int stmmac_reinit_ringparam(struct net_device
*dev
, u32 rx_size
, u32 tx_size
)
7233 struct stmmac_priv
*priv
= netdev_priv(dev
);
7236 if (netif_running(dev
))
7237 stmmac_release(dev
);
7239 priv
->dma_conf
.dma_rx_size
= rx_size
;
7240 priv
->dma_conf
.dma_tx_size
= tx_size
;
7242 if (netif_running(dev
))
7243 ret
= stmmac_open(dev
);
7248 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7249 static void stmmac_fpe_lp_task(struct work_struct
*work
)
7251 struct stmmac_priv
*priv
= container_of(work
, struct stmmac_priv
,
7253 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
7254 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
7255 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
7256 bool *hs_enable
= &fpe_cfg
->hs_enable
;
7257 bool *enable
= &fpe_cfg
->enable
;
7260 while (retries
-- > 0) {
7261 /* Bail out immediately if FPE handshake is OFF */
7262 if (*lo_state
== FPE_STATE_OFF
|| !*hs_enable
)
7265 if (*lo_state
== FPE_STATE_ENTERING_ON
&&
7266 *lp_state
== FPE_STATE_ENTERING_ON
) {
7267 stmmac_fpe_configure(priv
, priv
->ioaddr
,
7269 priv
->plat
->tx_queues_to_use
,
7270 priv
->plat
->rx_queues_to_use
,
7273 netdev_info(priv
->dev
, "configured FPE\n");
7275 *lo_state
= FPE_STATE_ON
;
7276 *lp_state
= FPE_STATE_ON
;
7277 netdev_info(priv
->dev
, "!!! BOTH FPE stations ON\n");
7281 if ((*lo_state
== FPE_STATE_CAPABLE
||
7282 *lo_state
== FPE_STATE_ENTERING_ON
) &&
7283 *lp_state
!= FPE_STATE_ON
) {
7284 netdev_info(priv
->dev
, SEND_VERIFY_MPAKCET_FMT
,
7285 *lo_state
, *lp_state
);
7286 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
7290 /* Sleep then retry */
7294 clear_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
);
7297 void stmmac_fpe_handshake(struct stmmac_priv
*priv
, bool enable
)
7299 if (priv
->plat
->fpe_cfg
->hs_enable
!= enable
) {
7301 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
7302 priv
->plat
->fpe_cfg
,
7305 priv
->plat
->fpe_cfg
->lo_fpe_state
= FPE_STATE_OFF
;
7306 priv
->plat
->fpe_cfg
->lp_fpe_state
= FPE_STATE_OFF
;
7309 priv
->plat
->fpe_cfg
->hs_enable
= enable
;
7313 static int stmmac_xdp_rx_timestamp(const struct xdp_md
*_ctx
, u64
*timestamp
)
7315 const struct stmmac_xdp_buff
*ctx
= (void *)_ctx
;
7316 struct dma_desc
*desc_contains_ts
= ctx
->desc
;
7317 struct stmmac_priv
*priv
= ctx
->priv
;
7318 struct dma_desc
*ndesc
= ctx
->ndesc
;
7319 struct dma_desc
*desc
= ctx
->desc
;
7322 if (!priv
->hwts_rx_en
)
7325 /* For GMAC4, the valid timestamp is from CTX next desc. */
7326 if (priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
)
7327 desc_contains_ts
= ndesc
;
7329 /* Check if timestamp is available */
7330 if (stmmac_get_rx_timestamp_status(priv
, desc
, ndesc
, priv
->adv_ts
)) {
7331 stmmac_get_timestamp(priv
, desc_contains_ts
, priv
->adv_ts
, &ns
);
7332 ns
-= priv
->plat
->cdc_error_adj
;
7333 *timestamp
= ns_to_ktime(ns
);
7340 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops
= {
7341 .xmo_rx_timestamp
= stmmac_xdp_rx_timestamp
,
7346 * @device: device pointer
7347 * @plat_dat: platform data pointer
7348 * @res: stmmac resource pointer
7349 * Description: this is the main probe function used to
7350 * call the alloc_etherdev, allocate the priv structure.
7352 * returns 0 on success, otherwise errno.
7354 int stmmac_dvr_probe(struct device
*device
,
7355 struct plat_stmmacenet_data
*plat_dat
,
7356 struct stmmac_resources
*res
)
7358 struct net_device
*ndev
= NULL
;
7359 struct stmmac_priv
*priv
;
7363 ndev
= devm_alloc_etherdev_mqs(device
, sizeof(struct stmmac_priv
),
7364 MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
);
7368 SET_NETDEV_DEV(ndev
, device
);
7370 priv
= netdev_priv(ndev
);
7371 priv
->device
= device
;
7374 for (i
= 0; i
< MTL_MAX_RX_QUEUES
; i
++)
7375 u64_stats_init(&priv
->xstats
.rxq_stats
[i
].syncp
);
7376 for (i
= 0; i
< MTL_MAX_TX_QUEUES
; i
++)
7377 u64_stats_init(&priv
->xstats
.txq_stats
[i
].syncp
);
7379 stmmac_set_ethtool_ops(ndev
);
7380 priv
->pause
= pause
;
7381 priv
->plat
= plat_dat
;
7382 priv
->ioaddr
= res
->addr
;
7383 priv
->dev
->base_addr
= (unsigned long)res
->addr
;
7384 priv
->plat
->dma_cfg
->multi_msi_en
=
7385 (priv
->plat
->flags
& STMMAC_FLAG_MULTI_MSI_EN
);
7387 priv
->dev
->irq
= res
->irq
;
7388 priv
->wol_irq
= res
->wol_irq
;
7389 priv
->lpi_irq
= res
->lpi_irq
;
7390 priv
->sfty_ce_irq
= res
->sfty_ce_irq
;
7391 priv
->sfty_ue_irq
= res
->sfty_ue_irq
;
7392 for (i
= 0; i
< MTL_MAX_RX_QUEUES
; i
++)
7393 priv
->rx_irq
[i
] = res
->rx_irq
[i
];
7394 for (i
= 0; i
< MTL_MAX_TX_QUEUES
; i
++)
7395 priv
->tx_irq
[i
] = res
->tx_irq
[i
];
7397 if (!is_zero_ether_addr(res
->mac
))
7398 eth_hw_addr_set(priv
->dev
, res
->mac
);
7400 dev_set_drvdata(device
, priv
->dev
);
7402 /* Verify driver arguments */
7403 stmmac_verify_args();
7405 priv
->af_xdp_zc_qps
= bitmap_zalloc(MTL_MAX_TX_QUEUES
, GFP_KERNEL
);
7406 if (!priv
->af_xdp_zc_qps
)
7409 /* Allocate workqueue */
7410 priv
->wq
= create_singlethread_workqueue("stmmac_wq");
7412 dev_err(priv
->device
, "failed to create workqueue\n");
7417 INIT_WORK(&priv
->service_task
, stmmac_service_task
);
7419 /* Initialize Link Partner FPE workqueue */
7420 INIT_WORK(&priv
->fpe_task
, stmmac_fpe_lp_task
);
7422 /* Override with kernel parameters if supplied XXX CRS XXX
7423 * this needs to have multiple instances
7425 if ((phyaddr
>= 0) && (phyaddr
<= 31))
7426 priv
->plat
->phy_addr
= phyaddr
;
7428 if (priv
->plat
->stmmac_rst
) {
7429 ret
= reset_control_assert(priv
->plat
->stmmac_rst
);
7430 reset_control_deassert(priv
->plat
->stmmac_rst
);
7431 /* Some reset controllers have only reset callback instead of
7432 * assert + deassert callbacks pair.
7434 if (ret
== -ENOTSUPP
)
7435 reset_control_reset(priv
->plat
->stmmac_rst
);
7438 ret
= reset_control_deassert(priv
->plat
->stmmac_ahb_rst
);
7439 if (ret
== -ENOTSUPP
)
7440 dev_err(priv
->device
, "unable to bring out of ahb reset: %pe\n",
7443 /* Init MAC and get the capabilities */
7444 ret
= stmmac_hw_init(priv
);
7448 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7450 if (priv
->synopsys_id
< DWMAC_CORE_5_20
)
7451 priv
->plat
->dma_cfg
->dche
= false;
7453 stmmac_check_ether_addr(priv
);
7455 ndev
->netdev_ops
= &stmmac_netdev_ops
;
7457 ndev
->xdp_metadata_ops
= &stmmac_xdp_metadata_ops
;
7459 ndev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
7461 ndev
->xdp_features
= NETDEV_XDP_ACT_BASIC
| NETDEV_XDP_ACT_REDIRECT
|
7462 NETDEV_XDP_ACT_XSK_ZEROCOPY
;
7464 ret
= stmmac_tc_init(priv
, priv
);
7466 ndev
->hw_features
|= NETIF_F_HW_TC
;
7469 if ((priv
->plat
->flags
& STMMAC_FLAG_TSO_EN
) && (priv
->dma_cap
.tsoen
)) {
7470 ndev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
7471 if (priv
->plat
->has_gmac4
)
7472 ndev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
7474 dev_info(priv
->device
, "TSO feature enabled\n");
7477 if (priv
->dma_cap
.sphen
&&
7478 !(priv
->plat
->flags
& STMMAC_FLAG_SPH_DISABLE
)) {
7479 ndev
->hw_features
|= NETIF_F_GRO
;
7480 priv
->sph_cap
= true;
7481 priv
->sph
= priv
->sph_cap
;
7482 dev_info(priv
->device
, "SPH feature enabled\n");
7485 /* Ideally our host DMA address width is the same as for the
7486 * device. However, it may differ and then we have to use our
7487 * host DMA width for allocation and the device DMA width for
7488 * register handling.
7490 if (priv
->plat
->host_dma_width
)
7491 priv
->dma_cap
.host_dma_width
= priv
->plat
->host_dma_width
;
7493 priv
->dma_cap
.host_dma_width
= priv
->dma_cap
.addr64
;
7495 if (priv
->dma_cap
.host_dma_width
) {
7496 ret
= dma_set_mask_and_coherent(device
,
7497 DMA_BIT_MASK(priv
->dma_cap
.host_dma_width
));
7499 dev_info(priv
->device
, "Using %d/%d bits DMA host/device width\n",
7500 priv
->dma_cap
.host_dma_width
, priv
->dma_cap
.addr64
);
7503 * If more than 32 bits can be addressed, make sure to
7504 * enable enhanced addressing mode.
7506 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
))
7507 priv
->plat
->dma_cfg
->eame
= true;
7509 ret
= dma_set_mask_and_coherent(device
, DMA_BIT_MASK(32));
7511 dev_err(priv
->device
, "Failed to set DMA Mask\n");
7515 priv
->dma_cap
.host_dma_width
= 32;
7519 ndev
->features
|= ndev
->hw_features
| NETIF_F_HIGHDMA
;
7520 ndev
->watchdog_timeo
= msecs_to_jiffies(watchdog
);
7521 #ifdef STMMAC_VLAN_TAG_USED
7522 /* Both mac100 and gmac support receive VLAN tag detection */
7523 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
;
7524 if (priv
->dma_cap
.vlhash
) {
7525 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
7526 ndev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
7528 if (priv
->dma_cap
.vlins
) {
7529 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
;
7530 if (priv
->dma_cap
.dvlan
)
7531 ndev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
7534 priv
->msg_enable
= netif_msg_init(debug
, default_msg_level
);
7536 priv
->xstats
.threshold
= tc
;
7538 /* Initialize RSS */
7539 rxq
= priv
->plat
->rx_queues_to_use
;
7540 netdev_rss_key_fill(priv
->rss
.key
, sizeof(priv
->rss
.key
));
7541 for (i
= 0; i
< ARRAY_SIZE(priv
->rss
.table
); i
++)
7542 priv
->rss
.table
[i
] = ethtool_rxfh_indir_default(i
, rxq
);
7544 if (priv
->dma_cap
.rssen
&& priv
->plat
->rss_en
)
7545 ndev
->features
|= NETIF_F_RXHASH
;
7547 ndev
->vlan_features
|= ndev
->features
;
7548 /* TSO doesn't work on VLANs yet */
7549 ndev
->vlan_features
&= ~NETIF_F_TSO
;
7551 /* MTU range: 46 - hw-specific max */
7552 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
7553 if (priv
->plat
->has_xgmac
)
7554 ndev
->max_mtu
= XGMAC_JUMBO_LEN
;
7555 else if ((priv
->plat
->enh_desc
) || (priv
->synopsys_id
>= DWMAC_CORE_4_00
))
7556 ndev
->max_mtu
= JUMBO_LEN
;
7558 ndev
->max_mtu
= SKB_MAX_HEAD(NET_SKB_PAD
+ NET_IP_ALIGN
);
7559 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7560 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7562 if ((priv
->plat
->maxmtu
< ndev
->max_mtu
) &&
7563 (priv
->plat
->maxmtu
>= ndev
->min_mtu
))
7564 ndev
->max_mtu
= priv
->plat
->maxmtu
;
7565 else if (priv
->plat
->maxmtu
< ndev
->min_mtu
)
7566 dev_warn(priv
->device
,
7567 "%s: warning: maxmtu having invalid value (%d)\n",
7568 __func__
, priv
->plat
->maxmtu
);
7571 priv
->flow_ctrl
= FLOW_AUTO
; /* RX/TX pause on */
7573 ndev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
7575 /* Setup channels NAPI */
7576 stmmac_napi_add(ndev
);
7578 mutex_init(&priv
->lock
);
7580 /* If a specific clk_csr value is passed from the platform
7581 * this means that the CSR Clock Range selection cannot be
7582 * changed at run-time and it is fixed. Viceversa the driver'll try to
7583 * set the MDC clock dynamically according to the csr actual
7586 if (priv
->plat
->clk_csr
>= 0)
7587 priv
->clk_csr
= priv
->plat
->clk_csr
;
7589 stmmac_clk_csr_set(priv
);
7591 stmmac_check_pcs_mode(priv
);
7593 pm_runtime_get_noresume(device
);
7594 pm_runtime_set_active(device
);
7595 if (!pm_runtime_enabled(device
))
7596 pm_runtime_enable(device
);
7598 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7599 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
) {
7600 /* MDIO bus Registration */
7601 ret
= stmmac_mdio_register(ndev
);
7603 dev_err_probe(priv
->device
, ret
,
7604 "%s: MDIO bus (id: %d) registration failed\n",
7605 __func__
, priv
->plat
->bus_id
);
7606 goto error_mdio_register
;
7610 if (priv
->plat
->speed_mode_2500
)
7611 priv
->plat
->speed_mode_2500(ndev
, priv
->plat
->bsp_priv
);
7613 if (priv
->plat
->mdio_bus_data
&& priv
->plat
->mdio_bus_data
->has_xpcs
) {
7614 ret
= stmmac_xpcs_setup(priv
->mii
);
7616 goto error_xpcs_setup
;
7619 ret
= stmmac_phy_setup(priv
);
7621 netdev_err(ndev
, "failed to setup phy (%d)\n", ret
);
7622 goto error_phy_setup
;
7625 ret
= register_netdev(ndev
);
7627 dev_err(priv
->device
, "%s: ERROR %i registering the device\n",
7629 goto error_netdev_register
;
7632 #ifdef CONFIG_DEBUG_FS
7633 stmmac_init_fs(ndev
);
7636 if (priv
->plat
->dump_debug_regs
)
7637 priv
->plat
->dump_debug_regs(priv
->plat
->bsp_priv
);
7639 /* Let pm_runtime_put() disable the clocks.
7640 * If CONFIG_PM is not enabled, the clocks will stay powered.
7642 pm_runtime_put(device
);
7646 error_netdev_register
:
7647 phylink_destroy(priv
->phylink
);
7650 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7651 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
7652 stmmac_mdio_unregister(ndev
);
7653 error_mdio_register
:
7654 stmmac_napi_del(ndev
);
7656 destroy_workqueue(priv
->wq
);
7658 bitmap_free(priv
->af_xdp_zc_qps
);
7662 EXPORT_SYMBOL_GPL(stmmac_dvr_probe
);
7666 * @dev: device pointer
7667 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7668 * changes the link status, releases the DMA descriptor rings.
7670 void stmmac_dvr_remove(struct device
*dev
)
7672 struct net_device
*ndev
= dev_get_drvdata(dev
);
7673 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7675 netdev_info(priv
->dev
, "%s: removing driver", __func__
);
7677 pm_runtime_get_sync(dev
);
7679 stmmac_stop_all_dma(priv
);
7680 stmmac_mac_set(priv
, priv
->ioaddr
, false);
7681 netif_carrier_off(ndev
);
7682 unregister_netdev(ndev
);
7684 #ifdef CONFIG_DEBUG_FS
7685 stmmac_exit_fs(ndev
);
7687 phylink_destroy(priv
->phylink
);
7688 if (priv
->plat
->stmmac_rst
)
7689 reset_control_assert(priv
->plat
->stmmac_rst
);
7690 reset_control_assert(priv
->plat
->stmmac_ahb_rst
);
7691 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7692 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
7693 stmmac_mdio_unregister(ndev
);
7694 destroy_workqueue(priv
->wq
);
7695 mutex_destroy(&priv
->lock
);
7696 bitmap_free(priv
->af_xdp_zc_qps
);
7698 pm_runtime_disable(dev
);
7699 pm_runtime_put_noidle(dev
);
7701 EXPORT_SYMBOL_GPL(stmmac_dvr_remove
);
7704 * stmmac_suspend - suspend callback
7705 * @dev: device pointer
7706 * Description: this is the function to suspend the device and it is called
7707 * by the platform driver to stop the network queue, release the resources,
7708 * program the PMT register (for WoL), clean and release driver resources.
7710 int stmmac_suspend(struct device
*dev
)
7712 struct net_device
*ndev
= dev_get_drvdata(dev
);
7713 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7716 if (!ndev
|| !netif_running(ndev
))
7719 mutex_lock(&priv
->lock
);
7721 netif_device_detach(ndev
);
7723 stmmac_disable_all_queues(priv
);
7725 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
7726 hrtimer_cancel(&priv
->dma_conf
.tx_queue
[chan
].txtimer
);
7728 if (priv
->eee_enabled
) {
7729 priv
->tx_path_in_lpi_mode
= false;
7730 del_timer_sync(&priv
->eee_ctrl_timer
);
7733 /* Stop TX/RX DMA */
7734 stmmac_stop_all_dma(priv
);
7736 if (priv
->plat
->serdes_powerdown
)
7737 priv
->plat
->serdes_powerdown(ndev
, priv
->plat
->bsp_priv
);
7739 /* Enable Power down mode by programming the PMT regs */
7740 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7741 stmmac_pmt(priv
, priv
->hw
, priv
->wolopts
);
7744 stmmac_mac_set(priv
, priv
->ioaddr
, false);
7745 pinctrl_pm_select_sleep_state(priv
->device
);
7748 mutex_unlock(&priv
->lock
);
7751 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7752 phylink_suspend(priv
->phylink
, true);
7754 if (device_may_wakeup(priv
->device
))
7755 phylink_speed_down(priv
->phylink
, false);
7756 phylink_suspend(priv
->phylink
, false);
7760 if (priv
->dma_cap
.fpesel
) {
7762 stmmac_fpe_configure(priv
, priv
->ioaddr
,
7763 priv
->plat
->fpe_cfg
,
7764 priv
->plat
->tx_queues_to_use
,
7765 priv
->plat
->rx_queues_to_use
, false);
7767 stmmac_fpe_handshake(priv
, false);
7768 stmmac_fpe_stop_wq(priv
);
7771 priv
->speed
= SPEED_UNKNOWN
;
7774 EXPORT_SYMBOL_GPL(stmmac_suspend
);
7776 static void stmmac_reset_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
7778 struct stmmac_rx_queue
*rx_q
= &priv
->dma_conf
.rx_queue
[queue
];
7784 static void stmmac_reset_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
7786 struct stmmac_tx_queue
*tx_q
= &priv
->dma_conf
.tx_queue
[queue
];
7792 netdev_tx_reset_queue(netdev_get_tx_queue(priv
->dev
, queue
));
7796 * stmmac_reset_queues_param - reset queue parameters
7797 * @priv: device pointer
7799 static void stmmac_reset_queues_param(struct stmmac_priv
*priv
)
7801 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
7802 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
7805 for (queue
= 0; queue
< rx_cnt
; queue
++)
7806 stmmac_reset_rx_queue(priv
, queue
);
7808 for (queue
= 0; queue
< tx_cnt
; queue
++)
7809 stmmac_reset_tx_queue(priv
, queue
);
7813 * stmmac_resume - resume callback
7814 * @dev: device pointer
7815 * Description: when resume this function is invoked to setup the DMA and CORE
7816 * in a usable state.
7818 int stmmac_resume(struct device
*dev
)
7820 struct net_device
*ndev
= dev_get_drvdata(dev
);
7821 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7824 if (!netif_running(ndev
))
7827 /* Power Down bit, into the PM register, is cleared
7828 * automatically as soon as a magic packet or a Wake-up frame
7829 * is received. Anyway, it's better to manually clear
7830 * this bit because it can generate problems while resuming
7831 * from another devices (e.g. serial console).
7833 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7834 mutex_lock(&priv
->lock
);
7835 stmmac_pmt(priv
, priv
->hw
, 0);
7836 mutex_unlock(&priv
->lock
);
7839 pinctrl_pm_select_default_state(priv
->device
);
7840 /* reset the phy so that it's ready */
7842 stmmac_mdio_reset(priv
->mii
);
7845 if (!(priv
->plat
->flags
& STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP
) &&
7846 priv
->plat
->serdes_powerup
) {
7847 ret
= priv
->plat
->serdes_powerup(ndev
,
7848 priv
->plat
->bsp_priv
);
7855 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7856 phylink_resume(priv
->phylink
);
7858 phylink_resume(priv
->phylink
);
7859 if (device_may_wakeup(priv
->device
))
7860 phylink_speed_up(priv
->phylink
);
7865 mutex_lock(&priv
->lock
);
7867 stmmac_reset_queues_param(priv
);
7869 stmmac_free_tx_skbufs(priv
);
7870 stmmac_clear_descriptors(priv
, &priv
->dma_conf
);
7872 stmmac_hw_setup(ndev
, false);
7873 stmmac_init_coalesce(priv
);
7874 stmmac_set_rx_mode(ndev
);
7876 stmmac_restore_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
);
7878 stmmac_enable_all_queues(priv
);
7879 stmmac_enable_all_dma_irq(priv
);
7881 mutex_unlock(&priv
->lock
);
7884 netif_device_attach(ndev
);
7888 EXPORT_SYMBOL_GPL(stmmac_resume
);
7891 static int __init
stmmac_cmdline_opt(char *str
)
7897 while ((opt
= strsep(&str
, ",")) != NULL
) {
7898 if (!strncmp(opt
, "debug:", 6)) {
7899 if (kstrtoint(opt
+ 6, 0, &debug
))
7901 } else if (!strncmp(opt
, "phyaddr:", 8)) {
7902 if (kstrtoint(opt
+ 8, 0, &phyaddr
))
7904 } else if (!strncmp(opt
, "buf_sz:", 7)) {
7905 if (kstrtoint(opt
+ 7, 0, &buf_sz
))
7907 } else if (!strncmp(opt
, "tc:", 3)) {
7908 if (kstrtoint(opt
+ 3, 0, &tc
))
7910 } else if (!strncmp(opt
, "watchdog:", 9)) {
7911 if (kstrtoint(opt
+ 9, 0, &watchdog
))
7913 } else if (!strncmp(opt
, "flow_ctrl:", 10)) {
7914 if (kstrtoint(opt
+ 10, 0, &flow_ctrl
))
7916 } else if (!strncmp(opt
, "pause:", 6)) {
7917 if (kstrtoint(opt
+ 6, 0, &pause
))
7919 } else if (!strncmp(opt
, "eee_timer:", 10)) {
7920 if (kstrtoint(opt
+ 10, 0, &eee_timer
))
7922 } else if (!strncmp(opt
, "chain_mode:", 11)) {
7923 if (kstrtoint(opt
+ 11, 0, &chain_mode
))
7930 pr_err("%s: ERROR broken module parameter conversion", __func__
);
7934 __setup("stmmaceth=", stmmac_cmdline_opt
);
7937 static int __init
stmmac_init(void)
7939 #ifdef CONFIG_DEBUG_FS
7940 /* Create debugfs main directory if it doesn't exist yet */
7942 stmmac_fs_dir
= debugfs_create_dir(STMMAC_RESOURCE_NAME
, NULL
);
7943 register_netdevice_notifier(&stmmac_notifier
);
7949 static void __exit
stmmac_exit(void)
7951 #ifdef CONFIG_DEBUG_FS
7952 unregister_netdevice_notifier(&stmmac_notifier
);
7953 debugfs_remove_recursive(stmmac_fs_dir
);
7957 module_init(stmmac_init
)
7958 module_exit(stmmac_exit
)
7960 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7961 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7962 MODULE_LICENSE("GPL");