]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: Let descriptor code set skbuff address
[thirdparty/kernel/stable.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
5 Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
18
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "hwif.h"
55
56 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
57 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
58
59 /* Module parameters */
60 #define TX_TIMEO 5000
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, 0644);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64
65 static int debug = -1;
66 module_param(debug, int, 0644);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68
69 static int phyaddr = -1;
70 module_param(phyaddr, int, 0444);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
72
73 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
74 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
75
76 static int flow_ctrl = FLOW_OFF;
77 module_param(flow_ctrl, int, 0644);
78 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
79
80 static int pause = PAUSE_TIME;
81 module_param(pause, int, 0644);
82 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
83
84 #define TC_DEFAULT 64
85 static int tc = TC_DEFAULT;
86 module_param(tc, int, 0644);
87 MODULE_PARM_DESC(tc, "DMA threshold control value");
88
89 #define DEFAULT_BUFSIZE 1536
90 static int buf_sz = DEFAULT_BUFSIZE;
91 module_param(buf_sz, int, 0644);
92 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
93
94 #define STMMAC_RX_COPYBREAK 256
95
96 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
97 NETIF_MSG_LINK | NETIF_MSG_IFUP |
98 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
99
100 #define STMMAC_DEFAULT_LPI_TIMER 1000
101 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102 module_param(eee_timer, int, 0644);
103 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105
106 /* By default the driver will use the ring mode to manage tx and rx descriptors,
107 * but allow user to force to use the chain instead of the ring
108 */
109 static unsigned int chain_mode;
110 module_param(chain_mode, int, 0444);
111 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
112
113 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
114
115 #ifdef CONFIG_DEBUG_FS
116 static int stmmac_init_fs(struct net_device *dev);
117 static void stmmac_exit_fs(struct net_device *dev);
118 #endif
119
120 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121
122 /**
123 * stmmac_verify_args - verify the driver parameters.
124 * Description: it checks the driver parameters and set a default in case of
125 * errors.
126 */
127 static void stmmac_verify_args(void)
128 {
129 if (unlikely(watchdog < 0))
130 watchdog = TX_TIMEO;
131 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132 buf_sz = DEFAULT_BUFSIZE;
133 if (unlikely(flow_ctrl > 1))
134 flow_ctrl = FLOW_AUTO;
135 else if (likely(flow_ctrl < 0))
136 flow_ctrl = FLOW_OFF;
137 if (unlikely((pause < 0) || (pause > 0xffff)))
138 pause = PAUSE_TIME;
139 if (eee_timer < 0)
140 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
141 }
142
143 /**
144 * stmmac_disable_all_queues - Disable all queues
145 * @priv: driver private structure
146 */
147 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148 {
149 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150 u32 queue;
151
152 for (queue = 0; queue < rx_queues_cnt; queue++) {
153 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154
155 napi_disable(&rx_q->napi);
156 }
157 }
158
159 /**
160 * stmmac_enable_all_queues - Enable all queues
161 * @priv: driver private structure
162 */
163 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164 {
165 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166 u32 queue;
167
168 for (queue = 0; queue < rx_queues_cnt; queue++) {
169 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170
171 napi_enable(&rx_q->napi);
172 }
173 }
174
175 /**
176 * stmmac_stop_all_queues - Stop all queues
177 * @priv: driver private structure
178 */
179 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180 {
181 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182 u32 queue;
183
184 for (queue = 0; queue < tx_queues_cnt; queue++)
185 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186 }
187
188 /**
189 * stmmac_start_all_queues - Start all queues
190 * @priv: driver private structure
191 */
192 static void stmmac_start_all_queues(struct stmmac_priv *priv)
193 {
194 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195 u32 queue;
196
197 for (queue = 0; queue < tx_queues_cnt; queue++)
198 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199 }
200
201 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
202 {
203 if (!test_bit(STMMAC_DOWN, &priv->state) &&
204 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
205 queue_work(priv->wq, &priv->service_task);
206 }
207
208 static void stmmac_global_err(struct stmmac_priv *priv)
209 {
210 netif_carrier_off(priv->dev);
211 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
212 stmmac_service_event_schedule(priv);
213 }
214
215 /**
216 * stmmac_clk_csr_set - dynamically set the MDC clock
217 * @priv: driver private structure
218 * Description: this is to dynamically set the MDC clock according to the csr
219 * clock input.
220 * Note:
221 * If a specific clk_csr value is passed from the platform
222 * this means that the CSR Clock Range selection cannot be
223 * changed at run-time and it is fixed (as reported in the driver
224 * documentation). Viceversa the driver will try to set the MDC
225 * clock dynamically according to the actual clock input.
226 */
227 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228 {
229 u32 clk_rate;
230
231 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232
233 /* Platform provided default clk_csr would be assumed valid
234 * for all other cases except for the below mentioned ones.
235 * For values higher than the IEEE 802.3 specified frequency
236 * we can not estimate the proper divider as it is not known
237 * the frequency of clk_csr_i. So we do not change the default
238 * divider.
239 */
240 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241 if (clk_rate < CSR_F_35M)
242 priv->clk_csr = STMMAC_CSR_20_35M;
243 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244 priv->clk_csr = STMMAC_CSR_35_60M;
245 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246 priv->clk_csr = STMMAC_CSR_60_100M;
247 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248 priv->clk_csr = STMMAC_CSR_100_150M;
249 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250 priv->clk_csr = STMMAC_CSR_150_250M;
251 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252 priv->clk_csr = STMMAC_CSR_250_300M;
253 }
254
255 if (priv->plat->has_sun8i) {
256 if (clk_rate > 160000000)
257 priv->clk_csr = 0x03;
258 else if (clk_rate > 80000000)
259 priv->clk_csr = 0x02;
260 else if (clk_rate > 40000000)
261 priv->clk_csr = 0x01;
262 else
263 priv->clk_csr = 0;
264 }
265 }
266
267 static void print_pkt(unsigned char *buf, int len)
268 {
269 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
271 }
272
273 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
274 {
275 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276 u32 avail;
277
278 if (tx_q->dirty_tx > tx_q->cur_tx)
279 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280 else
281 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282
283 return avail;
284 }
285
286 /**
287 * stmmac_rx_dirty - Get RX queue dirty
288 * @priv: driver private structure
289 * @queue: RX queue index
290 */
291 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292 {
293 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294 u32 dirty;
295
296 if (rx_q->dirty_rx <= rx_q->cur_rx)
297 dirty = rx_q->cur_rx - rx_q->dirty_rx;
298 else
299 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300
301 return dirty;
302 }
303
304 /**
305 * stmmac_hw_fix_mac_speed - callback for speed selection
306 * @priv: driver private structure
307 * Description: on some platforms (e.g. ST), some HW system configuration
308 * registers have to be set according to the link speed negotiated.
309 */
310 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
311 {
312 struct net_device *ndev = priv->dev;
313 struct phy_device *phydev = ndev->phydev;
314
315 if (likely(priv->plat->fix_mac_speed))
316 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
317 }
318
319 /**
320 * stmmac_enable_eee_mode - check and enter in LPI mode
321 * @priv: driver private structure
322 * Description: this function is to verify and enter in LPI mode in case of
323 * EEE.
324 */
325 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326 {
327 u32 tx_cnt = priv->plat->tx_queues_to_use;
328 u32 queue;
329
330 /* check if all TX queues have the work finished */
331 for (queue = 0; queue < tx_cnt; queue++) {
332 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333
334 if (tx_q->dirty_tx != tx_q->cur_tx)
335 return; /* still unfinished work */
336 }
337
338 /* Check and enter in LPI mode */
339 if (!priv->tx_path_in_lpi_mode)
340 stmmac_set_eee_mode(priv, priv->hw,
341 priv->plat->en_tx_lpi_clockgating);
342 }
343
344 /**
345 * stmmac_disable_eee_mode - disable and exit from LPI mode
346 * @priv: driver private structure
347 * Description: this function is to exit and disable EEE in case of
348 * LPI state is true. This is called by the xmit.
349 */
350 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351 {
352 stmmac_reset_eee_mode(priv, priv->hw);
353 del_timer_sync(&priv->eee_ctrl_timer);
354 priv->tx_path_in_lpi_mode = false;
355 }
356
357 /**
358 * stmmac_eee_ctrl_timer - EEE TX SW timer.
359 * @arg : data hook
360 * Description:
361 * if there is no data transfer and if we are not in LPI state,
362 * then MAC Transmitter can be moved to LPI state.
363 */
364 static void stmmac_eee_ctrl_timer(struct timer_list *t)
365 {
366 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367
368 stmmac_enable_eee_mode(priv);
369 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370 }
371
372 /**
373 * stmmac_eee_init - init EEE
374 * @priv: driver private structure
375 * Description:
376 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
377 * can also manage EEE, this function enable the LPI state and start related
378 * timer.
379 */
380 bool stmmac_eee_init(struct stmmac_priv *priv)
381 {
382 struct net_device *ndev = priv->dev;
383 int interface = priv->plat->interface;
384 unsigned long flags;
385 bool ret = false;
386
387 if ((interface != PHY_INTERFACE_MODE_MII) &&
388 (interface != PHY_INTERFACE_MODE_GMII) &&
389 !phy_interface_mode_is_rgmii(interface))
390 goto out;
391
392 /* Using PCS we cannot dial with the phy registers at this stage
393 * so we do not support extra feature like EEE.
394 */
395 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
396 (priv->hw->pcs == STMMAC_PCS_TBI) ||
397 (priv->hw->pcs == STMMAC_PCS_RTBI))
398 goto out;
399
400 /* MAC core supports the EEE feature. */
401 if (priv->dma_cap.eee) {
402 int tx_lpi_timer = priv->tx_lpi_timer;
403
404 /* Check if the PHY supports EEE */
405 if (phy_init_eee(ndev->phydev, 1)) {
406 /* To manage at run-time if the EEE cannot be supported
407 * anymore (for example because the lp caps have been
408 * changed).
409 * In that case the driver disable own timers.
410 */
411 spin_lock_irqsave(&priv->lock, flags);
412 if (priv->eee_active) {
413 netdev_dbg(priv->dev, "disable EEE\n");
414 del_timer_sync(&priv->eee_ctrl_timer);
415 stmmac_set_eee_timer(priv, priv->hw, 0,
416 tx_lpi_timer);
417 }
418 priv->eee_active = 0;
419 spin_unlock_irqrestore(&priv->lock, flags);
420 goto out;
421 }
422 /* Activate the EEE and start timers */
423 spin_lock_irqsave(&priv->lock, flags);
424 if (!priv->eee_active) {
425 priv->eee_active = 1;
426 timer_setup(&priv->eee_ctrl_timer,
427 stmmac_eee_ctrl_timer, 0);
428 mod_timer(&priv->eee_ctrl_timer,
429 STMMAC_LPI_T(eee_timer));
430
431 stmmac_set_eee_timer(priv, priv->hw,
432 STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
433 }
434 /* Set HW EEE according to the speed */
435 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
436
437 ret = true;
438 spin_unlock_irqrestore(&priv->lock, flags);
439
440 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
441 }
442 out:
443 return ret;
444 }
445
446 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
447 * @priv: driver private structure
448 * @p : descriptor pointer
449 * @skb : the socket buffer
450 * Description :
451 * This function will read timestamp from the descriptor & pass it to stack.
452 * and also perform some sanity checks.
453 */
454 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
455 struct dma_desc *p, struct sk_buff *skb)
456 {
457 struct skb_shared_hwtstamps shhwtstamp;
458 u64 ns;
459
460 if (!priv->hwts_tx_en)
461 return;
462
463 /* exit if skb doesn't support hw tstamp */
464 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
465 return;
466
467 /* check tx tstamp status */
468 if (stmmac_get_tx_timestamp_status(priv, p)) {
469 /* get the valid tstamp */
470 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
471
472 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
473 shhwtstamp.hwtstamp = ns_to_ktime(ns);
474
475 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
476 /* pass tstamp to stack */
477 skb_tstamp_tx(skb, &shhwtstamp);
478 }
479
480 return;
481 }
482
483 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
484 * @priv: driver private structure
485 * @p : descriptor pointer
486 * @np : next descriptor pointer
487 * @skb : the socket buffer
488 * Description :
489 * This function will read received packet's timestamp from the descriptor
490 * and pass it to stack. It also perform some sanity checks.
491 */
492 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
493 struct dma_desc *np, struct sk_buff *skb)
494 {
495 struct skb_shared_hwtstamps *shhwtstamp = NULL;
496 struct dma_desc *desc = p;
497 u64 ns;
498
499 if (!priv->hwts_rx_en)
500 return;
501 /* For GMAC4, the valid timestamp is from CTX next desc. */
502 if (priv->plat->has_gmac4)
503 desc = np;
504
505 /* Check if timestamp is available */
506 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
507 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
508 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
509 shhwtstamp = skb_hwtstamps(skb);
510 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
511 shhwtstamp->hwtstamp = ns_to_ktime(ns);
512 } else {
513 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
514 }
515 }
516
517 /**
518 * stmmac_hwtstamp_ioctl - control hardware timestamping.
519 * @dev: device pointer.
520 * @ifr: An IOCTL specific structure, that can contain a pointer to
521 * a proprietary structure used to pass information to the driver.
522 * Description:
523 * This function configures the MAC to enable/disable both outgoing(TX)
524 * and incoming(RX) packets time stamping based on user input.
525 * Return Value:
526 * 0 on success and an appropriate -ve integer on failure.
527 */
528 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
529 {
530 struct stmmac_priv *priv = netdev_priv(dev);
531 struct hwtstamp_config config;
532 struct timespec64 now;
533 u64 temp = 0;
534 u32 ptp_v2 = 0;
535 u32 tstamp_all = 0;
536 u32 ptp_over_ipv4_udp = 0;
537 u32 ptp_over_ipv6_udp = 0;
538 u32 ptp_over_ethernet = 0;
539 u32 snap_type_sel = 0;
540 u32 ts_master_en = 0;
541 u32 ts_event_en = 0;
542 u32 value = 0;
543 u32 sec_inc;
544
545 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
546 netdev_alert(priv->dev, "No support for HW time stamping\n");
547 priv->hwts_tx_en = 0;
548 priv->hwts_rx_en = 0;
549
550 return -EOPNOTSUPP;
551 }
552
553 if (copy_from_user(&config, ifr->ifr_data,
554 sizeof(struct hwtstamp_config)))
555 return -EFAULT;
556
557 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
558 __func__, config.flags, config.tx_type, config.rx_filter);
559
560 /* reserved for future extensions */
561 if (config.flags)
562 return -EINVAL;
563
564 if (config.tx_type != HWTSTAMP_TX_OFF &&
565 config.tx_type != HWTSTAMP_TX_ON)
566 return -ERANGE;
567
568 if (priv->adv_ts) {
569 switch (config.rx_filter) {
570 case HWTSTAMP_FILTER_NONE:
571 /* time stamp no incoming packet at all */
572 config.rx_filter = HWTSTAMP_FILTER_NONE;
573 break;
574
575 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
576 /* PTP v1, UDP, any kind of event packet */
577 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
578 /* take time stamp for all event messages */
579 if (priv->plat->has_gmac4)
580 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
581 else
582 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
583
584 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586 break;
587
588 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
589 /* PTP v1, UDP, Sync packet */
590 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
591 /* take time stamp for SYNC messages only */
592 ts_event_en = PTP_TCR_TSEVNTENA;
593
594 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
595 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
596 break;
597
598 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
599 /* PTP v1, UDP, Delay_req packet */
600 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
601 /* take time stamp for Delay_Req messages only */
602 ts_master_en = PTP_TCR_TSMSTRENA;
603 ts_event_en = PTP_TCR_TSEVNTENA;
604
605 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
606 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
607 break;
608
609 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
610 /* PTP v2, UDP, any kind of event packet */
611 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
612 ptp_v2 = PTP_TCR_TSVER2ENA;
613 /* take time stamp for all event messages */
614 if (priv->plat->has_gmac4)
615 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
616 else
617 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
618
619 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
620 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
621 break;
622
623 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
624 /* PTP v2, UDP, Sync packet */
625 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
626 ptp_v2 = PTP_TCR_TSVER2ENA;
627 /* take time stamp for SYNC messages only */
628 ts_event_en = PTP_TCR_TSEVNTENA;
629
630 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
631 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
632 break;
633
634 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
635 /* PTP v2, UDP, Delay_req packet */
636 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
637 ptp_v2 = PTP_TCR_TSVER2ENA;
638 /* take time stamp for Delay_Req messages only */
639 ts_master_en = PTP_TCR_TSMSTRENA;
640 ts_event_en = PTP_TCR_TSEVNTENA;
641
642 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644 break;
645
646 case HWTSTAMP_FILTER_PTP_V2_EVENT:
647 /* PTP v2/802.AS1 any layer, any kind of event packet */
648 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
649 ptp_v2 = PTP_TCR_TSVER2ENA;
650 /* take time stamp for all event messages */
651 if (priv->plat->has_gmac4)
652 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
653 else
654 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
655
656 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658 ptp_over_ethernet = PTP_TCR_TSIPENA;
659 break;
660
661 case HWTSTAMP_FILTER_PTP_V2_SYNC:
662 /* PTP v2/802.AS1, any layer, Sync packet */
663 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
664 ptp_v2 = PTP_TCR_TSVER2ENA;
665 /* take time stamp for SYNC messages only */
666 ts_event_en = PTP_TCR_TSEVNTENA;
667
668 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670 ptp_over_ethernet = PTP_TCR_TSIPENA;
671 break;
672
673 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
674 /* PTP v2/802.AS1, any layer, Delay_req packet */
675 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
676 ptp_v2 = PTP_TCR_TSVER2ENA;
677 /* take time stamp for Delay_Req messages only */
678 ts_master_en = PTP_TCR_TSMSTRENA;
679 ts_event_en = PTP_TCR_TSEVNTENA;
680
681 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 ptp_over_ethernet = PTP_TCR_TSIPENA;
684 break;
685
686 case HWTSTAMP_FILTER_NTP_ALL:
687 case HWTSTAMP_FILTER_ALL:
688 /* time stamp any incoming packet */
689 config.rx_filter = HWTSTAMP_FILTER_ALL;
690 tstamp_all = PTP_TCR_TSENALL;
691 break;
692
693 default:
694 return -ERANGE;
695 }
696 } else {
697 switch (config.rx_filter) {
698 case HWTSTAMP_FILTER_NONE:
699 config.rx_filter = HWTSTAMP_FILTER_NONE;
700 break;
701 default:
702 /* PTP v1, UDP, any kind of event packet */
703 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
704 break;
705 }
706 }
707 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
708 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
709
710 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
711 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
712 else {
713 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
714 tstamp_all | ptp_v2 | ptp_over_ethernet |
715 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
716 ts_master_en | snap_type_sel);
717 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
718
719 /* program Sub Second Increment reg */
720 stmmac_config_sub_second_increment(priv,
721 priv->ptpaddr, priv->plat->clk_ptp_rate,
722 priv->plat->has_gmac4, &sec_inc);
723 temp = div_u64(1000000000ULL, sec_inc);
724
725 /* calculate default added value:
726 * formula is :
727 * addend = (2^32)/freq_div_ratio;
728 * where, freq_div_ratio = 1e9ns/sec_inc
729 */
730 temp = (u64)(temp << 32);
731 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
732 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
733
734 /* initialize system time */
735 ktime_get_real_ts64(&now);
736
737 /* lower 32 bits of tv_sec are safe until y2106 */
738 stmmac_init_systime(priv, priv->ptpaddr,
739 (u32)now.tv_sec, now.tv_nsec);
740 }
741
742 return copy_to_user(ifr->ifr_data, &config,
743 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
744 }
745
746 /**
747 * stmmac_init_ptp - init PTP
748 * @priv: driver private structure
749 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
750 * This is done by looking at the HW cap. register.
751 * This function also registers the ptp driver.
752 */
753 static int stmmac_init_ptp(struct stmmac_priv *priv)
754 {
755 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
756 return -EOPNOTSUPP;
757
758 priv->adv_ts = 0;
759 /* Check if adv_ts can be enabled for dwmac 4.x core */
760 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
761 priv->adv_ts = 1;
762 /* Dwmac 3.x core with extend_desc can support adv_ts */
763 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
764 priv->adv_ts = 1;
765
766 if (priv->dma_cap.time_stamp)
767 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
768
769 if (priv->adv_ts)
770 netdev_info(priv->dev,
771 "IEEE 1588-2008 Advanced Timestamp supported\n");
772
773 priv->hwts_tx_en = 0;
774 priv->hwts_rx_en = 0;
775
776 stmmac_ptp_register(priv);
777
778 return 0;
779 }
780
781 static void stmmac_release_ptp(struct stmmac_priv *priv)
782 {
783 if (priv->plat->clk_ptp_ref)
784 clk_disable_unprepare(priv->plat->clk_ptp_ref);
785 stmmac_ptp_unregister(priv);
786 }
787
788 /**
789 * stmmac_mac_flow_ctrl - Configure flow control in all queues
790 * @priv: driver private structure
791 * Description: It is used for configuring the flow control in all queues
792 */
793 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
794 {
795 u32 tx_cnt = priv->plat->tx_queues_to_use;
796
797 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
798 priv->pause, tx_cnt);
799 }
800
801 /**
802 * stmmac_adjust_link - adjusts the link parameters
803 * @dev: net device structure
804 * Description: this is the helper called by the physical abstraction layer
805 * drivers to communicate the phy link status. According the speed and duplex
806 * this driver can invoke registered glue-logic as well.
807 * It also invoke the eee initialization because it could happen when switch
808 * on different networks (that are eee capable).
809 */
810 static void stmmac_adjust_link(struct net_device *dev)
811 {
812 struct stmmac_priv *priv = netdev_priv(dev);
813 struct phy_device *phydev = dev->phydev;
814 unsigned long flags;
815 bool new_state = false;
816
817 if (!phydev)
818 return;
819
820 spin_lock_irqsave(&priv->lock, flags);
821
822 if (phydev->link) {
823 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
824
825 /* Now we make sure that we can be in full duplex mode.
826 * If not, we operate in half-duplex mode. */
827 if (phydev->duplex != priv->oldduplex) {
828 new_state = true;
829 if (!phydev->duplex)
830 ctrl &= ~priv->hw->link.duplex;
831 else
832 ctrl |= priv->hw->link.duplex;
833 priv->oldduplex = phydev->duplex;
834 }
835 /* Flow Control operation */
836 if (phydev->pause)
837 stmmac_mac_flow_ctrl(priv, phydev->duplex);
838
839 if (phydev->speed != priv->speed) {
840 new_state = true;
841 ctrl &= ~priv->hw->link.speed_mask;
842 switch (phydev->speed) {
843 case SPEED_1000:
844 ctrl |= priv->hw->link.speed1000;
845 break;
846 case SPEED_100:
847 ctrl |= priv->hw->link.speed100;
848 break;
849 case SPEED_10:
850 ctrl |= priv->hw->link.speed10;
851 break;
852 default:
853 netif_warn(priv, link, priv->dev,
854 "broken speed: %d\n", phydev->speed);
855 phydev->speed = SPEED_UNKNOWN;
856 break;
857 }
858 if (phydev->speed != SPEED_UNKNOWN)
859 stmmac_hw_fix_mac_speed(priv);
860 priv->speed = phydev->speed;
861 }
862
863 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
864
865 if (!priv->oldlink) {
866 new_state = true;
867 priv->oldlink = true;
868 }
869 } else if (priv->oldlink) {
870 new_state = true;
871 priv->oldlink = false;
872 priv->speed = SPEED_UNKNOWN;
873 priv->oldduplex = DUPLEX_UNKNOWN;
874 }
875
876 if (new_state && netif_msg_link(priv))
877 phy_print_status(phydev);
878
879 spin_unlock_irqrestore(&priv->lock, flags);
880
881 if (phydev->is_pseudo_fixed_link)
882 /* Stop PHY layer to call the hook to adjust the link in case
883 * of a switch is attached to the stmmac driver.
884 */
885 phydev->irq = PHY_IGNORE_INTERRUPT;
886 else
887 /* At this stage, init the EEE if supported.
888 * Never called in case of fixed_link.
889 */
890 priv->eee_enabled = stmmac_eee_init(priv);
891 }
892
893 /**
894 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
895 * @priv: driver private structure
896 * Description: this is to verify if the HW supports the PCS.
897 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
898 * configured for the TBI, RTBI, or SGMII PHY interface.
899 */
900 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
901 {
902 int interface = priv->plat->interface;
903
904 if (priv->dma_cap.pcs) {
905 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
906 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
907 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
908 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
909 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
910 priv->hw->pcs = STMMAC_PCS_RGMII;
911 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
912 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
913 priv->hw->pcs = STMMAC_PCS_SGMII;
914 }
915 }
916 }
917
918 /**
919 * stmmac_init_phy - PHY initialization
920 * @dev: net device structure
921 * Description: it initializes the driver's PHY state, and attaches the PHY
922 * to the mac driver.
923 * Return value:
924 * 0 on success
925 */
926 static int stmmac_init_phy(struct net_device *dev)
927 {
928 struct stmmac_priv *priv = netdev_priv(dev);
929 struct phy_device *phydev;
930 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
931 char bus_id[MII_BUS_ID_SIZE];
932 int interface = priv->plat->interface;
933 int max_speed = priv->plat->max_speed;
934 priv->oldlink = false;
935 priv->speed = SPEED_UNKNOWN;
936 priv->oldduplex = DUPLEX_UNKNOWN;
937
938 if (priv->plat->phy_node) {
939 phydev = of_phy_connect(dev, priv->plat->phy_node,
940 &stmmac_adjust_link, 0, interface);
941 } else {
942 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
943 priv->plat->bus_id);
944
945 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
946 priv->plat->phy_addr);
947 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
948 phy_id_fmt);
949
950 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
951 interface);
952 }
953
954 if (IS_ERR_OR_NULL(phydev)) {
955 netdev_err(priv->dev, "Could not attach to PHY\n");
956 if (!phydev)
957 return -ENODEV;
958
959 return PTR_ERR(phydev);
960 }
961
962 /* Stop Advertising 1000BASE Capability if interface is not GMII */
963 if ((interface == PHY_INTERFACE_MODE_MII) ||
964 (interface == PHY_INTERFACE_MODE_RMII) ||
965 (max_speed < 1000 && max_speed > 0))
966 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
967 SUPPORTED_1000baseT_Full);
968
969 /*
970 * Broken HW is sometimes missing the pull-up resistor on the
971 * MDIO line, which results in reads to non-existent devices returning
972 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
973 * device as well.
974 * Note: phydev->phy_id is the result of reading the UID PHY registers.
975 */
976 if (!priv->plat->phy_node && phydev->phy_id == 0) {
977 phy_disconnect(phydev);
978 return -ENODEV;
979 }
980
981 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
982 * subsequent PHY polling, make sure we force a link transition if
983 * we have a UP/DOWN/UP transition
984 */
985 if (phydev->is_pseudo_fixed_link)
986 phydev->irq = PHY_POLL;
987
988 phy_attached_info(phydev);
989 return 0;
990 }
991
992 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
993 {
994 u32 rx_cnt = priv->plat->rx_queues_to_use;
995 void *head_rx;
996 u32 queue;
997
998 /* Display RX rings */
999 for (queue = 0; queue < rx_cnt; queue++) {
1000 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1001
1002 pr_info("\tRX Queue %u rings\n", queue);
1003
1004 if (priv->extend_desc)
1005 head_rx = (void *)rx_q->dma_erx;
1006 else
1007 head_rx = (void *)rx_q->dma_rx;
1008
1009 /* Display RX ring */
1010 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1011 }
1012 }
1013
1014 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1015 {
1016 u32 tx_cnt = priv->plat->tx_queues_to_use;
1017 void *head_tx;
1018 u32 queue;
1019
1020 /* Display TX rings */
1021 for (queue = 0; queue < tx_cnt; queue++) {
1022 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1023
1024 pr_info("\tTX Queue %d rings\n", queue);
1025
1026 if (priv->extend_desc)
1027 head_tx = (void *)tx_q->dma_etx;
1028 else
1029 head_tx = (void *)tx_q->dma_tx;
1030
1031 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1032 }
1033 }
1034
1035 static void stmmac_display_rings(struct stmmac_priv *priv)
1036 {
1037 /* Display RX ring */
1038 stmmac_display_rx_rings(priv);
1039
1040 /* Display TX ring */
1041 stmmac_display_tx_rings(priv);
1042 }
1043
1044 static int stmmac_set_bfsize(int mtu, int bufsize)
1045 {
1046 int ret = bufsize;
1047
1048 if (mtu >= BUF_SIZE_4KiB)
1049 ret = BUF_SIZE_8KiB;
1050 else if (mtu >= BUF_SIZE_2KiB)
1051 ret = BUF_SIZE_4KiB;
1052 else if (mtu > DEFAULT_BUFSIZE)
1053 ret = BUF_SIZE_2KiB;
1054 else
1055 ret = DEFAULT_BUFSIZE;
1056
1057 return ret;
1058 }
1059
1060 /**
1061 * stmmac_clear_rx_descriptors - clear RX descriptors
1062 * @priv: driver private structure
1063 * @queue: RX queue index
1064 * Description: this function is called to clear the RX descriptors
1065 * in case of both basic and extended descriptors are used.
1066 */
1067 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1068 {
1069 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1070 int i;
1071
1072 /* Clear the RX descriptors */
1073 for (i = 0; i < DMA_RX_SIZE; i++)
1074 if (priv->extend_desc)
1075 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1076 priv->use_riwt, priv->mode,
1077 (i == DMA_RX_SIZE - 1));
1078 else
1079 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1080 priv->use_riwt, priv->mode,
1081 (i == DMA_RX_SIZE - 1));
1082 }
1083
1084 /**
1085 * stmmac_clear_tx_descriptors - clear tx descriptors
1086 * @priv: driver private structure
1087 * @queue: TX queue index.
1088 * Description: this function is called to clear the TX descriptors
1089 * in case of both basic and extended descriptors are used.
1090 */
1091 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1092 {
1093 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1094 int i;
1095
1096 /* Clear the TX descriptors */
1097 for (i = 0; i < DMA_TX_SIZE; i++)
1098 if (priv->extend_desc)
1099 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1100 priv->mode, (i == DMA_TX_SIZE - 1));
1101 else
1102 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1103 priv->mode, (i == DMA_TX_SIZE - 1));
1104 }
1105
1106 /**
1107 * stmmac_clear_descriptors - clear descriptors
1108 * @priv: driver private structure
1109 * Description: this function is called to clear the TX and RX descriptors
1110 * in case of both basic and extended descriptors are used.
1111 */
1112 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1113 {
1114 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1115 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1116 u32 queue;
1117
1118 /* Clear the RX descriptors */
1119 for (queue = 0; queue < rx_queue_cnt; queue++)
1120 stmmac_clear_rx_descriptors(priv, queue);
1121
1122 /* Clear the TX descriptors */
1123 for (queue = 0; queue < tx_queue_cnt; queue++)
1124 stmmac_clear_tx_descriptors(priv, queue);
1125 }
1126
1127 /**
1128 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1129 * @priv: driver private structure
1130 * @p: descriptor pointer
1131 * @i: descriptor index
1132 * @flags: gfp flag
1133 * @queue: RX queue index
1134 * Description: this function is called to allocate a receive buffer, perform
1135 * the DMA mapping and init the descriptor.
1136 */
1137 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1138 int i, gfp_t flags, u32 queue)
1139 {
1140 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1141 struct sk_buff *skb;
1142
1143 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1144 if (!skb) {
1145 netdev_err(priv->dev,
1146 "%s: Rx init fails; skb is NULL\n", __func__);
1147 return -ENOMEM;
1148 }
1149 rx_q->rx_skbuff[i] = skb;
1150 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1151 priv->dma_buf_sz,
1152 DMA_FROM_DEVICE);
1153 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1154 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1155 dev_kfree_skb_any(skb);
1156 return -EINVAL;
1157 }
1158
1159 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1160
1161 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1162 stmmac_init_desc3(priv, p);
1163
1164 return 0;
1165 }
1166
1167 /**
1168 * stmmac_free_rx_buffer - free RX dma buffers
1169 * @priv: private structure
1170 * @queue: RX queue index
1171 * @i: buffer index.
1172 */
1173 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1174 {
1175 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1176
1177 if (rx_q->rx_skbuff[i]) {
1178 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1179 priv->dma_buf_sz, DMA_FROM_DEVICE);
1180 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1181 }
1182 rx_q->rx_skbuff[i] = NULL;
1183 }
1184
1185 /**
1186 * stmmac_free_tx_buffer - free RX dma buffers
1187 * @priv: private structure
1188 * @queue: RX queue index
1189 * @i: buffer index.
1190 */
1191 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1192 {
1193 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1194
1195 if (tx_q->tx_skbuff_dma[i].buf) {
1196 if (tx_q->tx_skbuff_dma[i].map_as_page)
1197 dma_unmap_page(priv->device,
1198 tx_q->tx_skbuff_dma[i].buf,
1199 tx_q->tx_skbuff_dma[i].len,
1200 DMA_TO_DEVICE);
1201 else
1202 dma_unmap_single(priv->device,
1203 tx_q->tx_skbuff_dma[i].buf,
1204 tx_q->tx_skbuff_dma[i].len,
1205 DMA_TO_DEVICE);
1206 }
1207
1208 if (tx_q->tx_skbuff[i]) {
1209 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1210 tx_q->tx_skbuff[i] = NULL;
1211 tx_q->tx_skbuff_dma[i].buf = 0;
1212 tx_q->tx_skbuff_dma[i].map_as_page = false;
1213 }
1214 }
1215
1216 /**
1217 * init_dma_rx_desc_rings - init the RX descriptor rings
1218 * @dev: net device structure
1219 * @flags: gfp flag.
1220 * Description: this function initializes the DMA RX descriptors
1221 * and allocates the socket buffers. It supports the chained and ring
1222 * modes.
1223 */
1224 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1225 {
1226 struct stmmac_priv *priv = netdev_priv(dev);
1227 u32 rx_count = priv->plat->rx_queues_to_use;
1228 int ret = -ENOMEM;
1229 int bfsize = 0;
1230 int queue;
1231 int i;
1232
1233 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1234 if (bfsize < 0)
1235 bfsize = 0;
1236
1237 if (bfsize < BUF_SIZE_16KiB)
1238 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1239
1240 priv->dma_buf_sz = bfsize;
1241
1242 /* RX INITIALIZATION */
1243 netif_dbg(priv, probe, priv->dev,
1244 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1245
1246 for (queue = 0; queue < rx_count; queue++) {
1247 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1248
1249 netif_dbg(priv, probe, priv->dev,
1250 "(%s) dma_rx_phy=0x%08x\n", __func__,
1251 (u32)rx_q->dma_rx_phy);
1252
1253 for (i = 0; i < DMA_RX_SIZE; i++) {
1254 struct dma_desc *p;
1255
1256 if (priv->extend_desc)
1257 p = &((rx_q->dma_erx + i)->basic);
1258 else
1259 p = rx_q->dma_rx + i;
1260
1261 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1262 queue);
1263 if (ret)
1264 goto err_init_rx_buffers;
1265
1266 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1267 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1268 (unsigned int)rx_q->rx_skbuff_dma[i]);
1269 }
1270
1271 rx_q->cur_rx = 0;
1272 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1273
1274 stmmac_clear_rx_descriptors(priv, queue);
1275
1276 /* Setup the chained descriptor addresses */
1277 if (priv->mode == STMMAC_CHAIN_MODE) {
1278 if (priv->extend_desc)
1279 stmmac_mode_init(priv, rx_q->dma_erx,
1280 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1281 else
1282 stmmac_mode_init(priv, rx_q->dma_rx,
1283 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1284 }
1285 }
1286
1287 buf_sz = bfsize;
1288
1289 return 0;
1290
1291 err_init_rx_buffers:
1292 while (queue >= 0) {
1293 while (--i >= 0)
1294 stmmac_free_rx_buffer(priv, queue, i);
1295
1296 if (queue == 0)
1297 break;
1298
1299 i = DMA_RX_SIZE;
1300 queue--;
1301 }
1302
1303 return ret;
1304 }
1305
1306 /**
1307 * init_dma_tx_desc_rings - init the TX descriptor rings
1308 * @dev: net device structure.
1309 * Description: this function initializes the DMA TX descriptors
1310 * and allocates the socket buffers. It supports the chained and ring
1311 * modes.
1312 */
1313 static int init_dma_tx_desc_rings(struct net_device *dev)
1314 {
1315 struct stmmac_priv *priv = netdev_priv(dev);
1316 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1317 u32 queue;
1318 int i;
1319
1320 for (queue = 0; queue < tx_queue_cnt; queue++) {
1321 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1322
1323 netif_dbg(priv, probe, priv->dev,
1324 "(%s) dma_tx_phy=0x%08x\n", __func__,
1325 (u32)tx_q->dma_tx_phy);
1326
1327 /* Setup the chained descriptor addresses */
1328 if (priv->mode == STMMAC_CHAIN_MODE) {
1329 if (priv->extend_desc)
1330 stmmac_mode_init(priv, tx_q->dma_etx,
1331 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1332 else
1333 stmmac_mode_init(priv, tx_q->dma_tx,
1334 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1335 }
1336
1337 for (i = 0; i < DMA_TX_SIZE; i++) {
1338 struct dma_desc *p;
1339 if (priv->extend_desc)
1340 p = &((tx_q->dma_etx + i)->basic);
1341 else
1342 p = tx_q->dma_tx + i;
1343
1344 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1345 p->des0 = 0;
1346 p->des1 = 0;
1347 p->des2 = 0;
1348 p->des3 = 0;
1349 } else {
1350 p->des2 = 0;
1351 }
1352
1353 tx_q->tx_skbuff_dma[i].buf = 0;
1354 tx_q->tx_skbuff_dma[i].map_as_page = false;
1355 tx_q->tx_skbuff_dma[i].len = 0;
1356 tx_q->tx_skbuff_dma[i].last_segment = false;
1357 tx_q->tx_skbuff[i] = NULL;
1358 }
1359
1360 tx_q->dirty_tx = 0;
1361 tx_q->cur_tx = 0;
1362 tx_q->mss = 0;
1363
1364 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1365 }
1366
1367 return 0;
1368 }
1369
1370 /**
1371 * init_dma_desc_rings - init the RX/TX descriptor rings
1372 * @dev: net device structure
1373 * @flags: gfp flag.
1374 * Description: this function initializes the DMA RX/TX descriptors
1375 * and allocates the socket buffers. It supports the chained and ring
1376 * modes.
1377 */
1378 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1379 {
1380 struct stmmac_priv *priv = netdev_priv(dev);
1381 int ret;
1382
1383 ret = init_dma_rx_desc_rings(dev, flags);
1384 if (ret)
1385 return ret;
1386
1387 ret = init_dma_tx_desc_rings(dev);
1388
1389 stmmac_clear_descriptors(priv);
1390
1391 if (netif_msg_hw(priv))
1392 stmmac_display_rings(priv);
1393
1394 return ret;
1395 }
1396
1397 /**
1398 * dma_free_rx_skbufs - free RX dma buffers
1399 * @priv: private structure
1400 * @queue: RX queue index
1401 */
1402 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1403 {
1404 int i;
1405
1406 for (i = 0; i < DMA_RX_SIZE; i++)
1407 stmmac_free_rx_buffer(priv, queue, i);
1408 }
1409
1410 /**
1411 * dma_free_tx_skbufs - free TX dma buffers
1412 * @priv: private structure
1413 * @queue: TX queue index
1414 */
1415 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1416 {
1417 int i;
1418
1419 for (i = 0; i < DMA_TX_SIZE; i++)
1420 stmmac_free_tx_buffer(priv, queue, i);
1421 }
1422
1423 /**
1424 * free_dma_rx_desc_resources - free RX dma desc resources
1425 * @priv: private structure
1426 */
1427 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1428 {
1429 u32 rx_count = priv->plat->rx_queues_to_use;
1430 u32 queue;
1431
1432 /* Free RX queue resources */
1433 for (queue = 0; queue < rx_count; queue++) {
1434 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1435
1436 /* Release the DMA RX socket buffers */
1437 dma_free_rx_skbufs(priv, queue);
1438
1439 /* Free DMA regions of consistent memory previously allocated */
1440 if (!priv->extend_desc)
1441 dma_free_coherent(priv->device,
1442 DMA_RX_SIZE * sizeof(struct dma_desc),
1443 rx_q->dma_rx, rx_q->dma_rx_phy);
1444 else
1445 dma_free_coherent(priv->device, DMA_RX_SIZE *
1446 sizeof(struct dma_extended_desc),
1447 rx_q->dma_erx, rx_q->dma_rx_phy);
1448
1449 kfree(rx_q->rx_skbuff_dma);
1450 kfree(rx_q->rx_skbuff);
1451 }
1452 }
1453
1454 /**
1455 * free_dma_tx_desc_resources - free TX dma desc resources
1456 * @priv: private structure
1457 */
1458 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1459 {
1460 u32 tx_count = priv->plat->tx_queues_to_use;
1461 u32 queue;
1462
1463 /* Free TX queue resources */
1464 for (queue = 0; queue < tx_count; queue++) {
1465 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1466
1467 /* Release the DMA TX socket buffers */
1468 dma_free_tx_skbufs(priv, queue);
1469
1470 /* Free DMA regions of consistent memory previously allocated */
1471 if (!priv->extend_desc)
1472 dma_free_coherent(priv->device,
1473 DMA_TX_SIZE * sizeof(struct dma_desc),
1474 tx_q->dma_tx, tx_q->dma_tx_phy);
1475 else
1476 dma_free_coherent(priv->device, DMA_TX_SIZE *
1477 sizeof(struct dma_extended_desc),
1478 tx_q->dma_etx, tx_q->dma_tx_phy);
1479
1480 kfree(tx_q->tx_skbuff_dma);
1481 kfree(tx_q->tx_skbuff);
1482 }
1483 }
1484
1485 /**
1486 * alloc_dma_rx_desc_resources - alloc RX resources.
1487 * @priv: private structure
1488 * Description: according to which descriptor can be used (extend or basic)
1489 * this function allocates the resources for TX and RX paths. In case of
1490 * reception, for example, it pre-allocated the RX socket buffer in order to
1491 * allow zero-copy mechanism.
1492 */
1493 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1494 {
1495 u32 rx_count = priv->plat->rx_queues_to_use;
1496 int ret = -ENOMEM;
1497 u32 queue;
1498
1499 /* RX queues buffers and DMA */
1500 for (queue = 0; queue < rx_count; queue++) {
1501 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1502
1503 rx_q->queue_index = queue;
1504 rx_q->priv_data = priv;
1505
1506 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1507 sizeof(dma_addr_t),
1508 GFP_KERNEL);
1509 if (!rx_q->rx_skbuff_dma)
1510 goto err_dma;
1511
1512 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1513 sizeof(struct sk_buff *),
1514 GFP_KERNEL);
1515 if (!rx_q->rx_skbuff)
1516 goto err_dma;
1517
1518 if (priv->extend_desc) {
1519 rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1520 DMA_RX_SIZE *
1521 sizeof(struct
1522 dma_extended_desc),
1523 &rx_q->dma_rx_phy,
1524 GFP_KERNEL);
1525 if (!rx_q->dma_erx)
1526 goto err_dma;
1527
1528 } else {
1529 rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1530 DMA_RX_SIZE *
1531 sizeof(struct
1532 dma_desc),
1533 &rx_q->dma_rx_phy,
1534 GFP_KERNEL);
1535 if (!rx_q->dma_rx)
1536 goto err_dma;
1537 }
1538 }
1539
1540 return 0;
1541
1542 err_dma:
1543 free_dma_rx_desc_resources(priv);
1544
1545 return ret;
1546 }
1547
1548 /**
1549 * alloc_dma_tx_desc_resources - alloc TX resources.
1550 * @priv: private structure
1551 * Description: according to which descriptor can be used (extend or basic)
1552 * this function allocates the resources for TX and RX paths. In case of
1553 * reception, for example, it pre-allocated the RX socket buffer in order to
1554 * allow zero-copy mechanism.
1555 */
1556 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1557 {
1558 u32 tx_count = priv->plat->tx_queues_to_use;
1559 int ret = -ENOMEM;
1560 u32 queue;
1561
1562 /* TX queues buffers and DMA */
1563 for (queue = 0; queue < tx_count; queue++) {
1564 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1565
1566 tx_q->queue_index = queue;
1567 tx_q->priv_data = priv;
1568
1569 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1570 sizeof(*tx_q->tx_skbuff_dma),
1571 GFP_KERNEL);
1572 if (!tx_q->tx_skbuff_dma)
1573 goto err_dma;
1574
1575 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1576 sizeof(struct sk_buff *),
1577 GFP_KERNEL);
1578 if (!tx_q->tx_skbuff)
1579 goto err_dma;
1580
1581 if (priv->extend_desc) {
1582 tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1583 DMA_TX_SIZE *
1584 sizeof(struct
1585 dma_extended_desc),
1586 &tx_q->dma_tx_phy,
1587 GFP_KERNEL);
1588 if (!tx_q->dma_etx)
1589 goto err_dma;
1590 } else {
1591 tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1592 DMA_TX_SIZE *
1593 sizeof(struct
1594 dma_desc),
1595 &tx_q->dma_tx_phy,
1596 GFP_KERNEL);
1597 if (!tx_q->dma_tx)
1598 goto err_dma;
1599 }
1600 }
1601
1602 return 0;
1603
1604 err_dma:
1605 free_dma_tx_desc_resources(priv);
1606
1607 return ret;
1608 }
1609
1610 /**
1611 * alloc_dma_desc_resources - alloc TX/RX resources.
1612 * @priv: private structure
1613 * Description: according to which descriptor can be used (extend or basic)
1614 * this function allocates the resources for TX and RX paths. In case of
1615 * reception, for example, it pre-allocated the RX socket buffer in order to
1616 * allow zero-copy mechanism.
1617 */
1618 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1619 {
1620 /* RX Allocation */
1621 int ret = alloc_dma_rx_desc_resources(priv);
1622
1623 if (ret)
1624 return ret;
1625
1626 ret = alloc_dma_tx_desc_resources(priv);
1627
1628 return ret;
1629 }
1630
1631 /**
1632 * free_dma_desc_resources - free dma desc resources
1633 * @priv: private structure
1634 */
1635 static void free_dma_desc_resources(struct stmmac_priv *priv)
1636 {
1637 /* Release the DMA RX socket buffers */
1638 free_dma_rx_desc_resources(priv);
1639
1640 /* Release the DMA TX socket buffers */
1641 free_dma_tx_desc_resources(priv);
1642 }
1643
1644 /**
1645 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1646 * @priv: driver private structure
1647 * Description: It is used for enabling the rx queues in the MAC
1648 */
1649 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1650 {
1651 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1652 int queue;
1653 u8 mode;
1654
1655 for (queue = 0; queue < rx_queues_count; queue++) {
1656 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1657 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1658 }
1659 }
1660
1661 /**
1662 * stmmac_start_rx_dma - start RX DMA channel
1663 * @priv: driver private structure
1664 * @chan: RX channel index
1665 * Description:
1666 * This starts a RX DMA channel
1667 */
1668 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1669 {
1670 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1671 stmmac_start_rx(priv, priv->ioaddr, chan);
1672 }
1673
1674 /**
1675 * stmmac_start_tx_dma - start TX DMA channel
1676 * @priv: driver private structure
1677 * @chan: TX channel index
1678 * Description:
1679 * This starts a TX DMA channel
1680 */
1681 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1682 {
1683 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1684 stmmac_start_tx(priv, priv->ioaddr, chan);
1685 }
1686
1687 /**
1688 * stmmac_stop_rx_dma - stop RX DMA channel
1689 * @priv: driver private structure
1690 * @chan: RX channel index
1691 * Description:
1692 * This stops a RX DMA channel
1693 */
1694 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1695 {
1696 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1697 stmmac_stop_rx(priv, priv->ioaddr, chan);
1698 }
1699
1700 /**
1701 * stmmac_stop_tx_dma - stop TX DMA channel
1702 * @priv: driver private structure
1703 * @chan: TX channel index
1704 * Description:
1705 * This stops a TX DMA channel
1706 */
1707 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1708 {
1709 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1710 stmmac_stop_tx(priv, priv->ioaddr, chan);
1711 }
1712
1713 /**
1714 * stmmac_start_all_dma - start all RX and TX DMA channels
1715 * @priv: driver private structure
1716 * Description:
1717 * This starts all the RX and TX DMA channels
1718 */
1719 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1720 {
1721 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1722 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1723 u32 chan = 0;
1724
1725 for (chan = 0; chan < rx_channels_count; chan++)
1726 stmmac_start_rx_dma(priv, chan);
1727
1728 for (chan = 0; chan < tx_channels_count; chan++)
1729 stmmac_start_tx_dma(priv, chan);
1730 }
1731
1732 /**
1733 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1734 * @priv: driver private structure
1735 * Description:
1736 * This stops the RX and TX DMA channels
1737 */
1738 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1739 {
1740 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1741 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1742 u32 chan = 0;
1743
1744 for (chan = 0; chan < rx_channels_count; chan++)
1745 stmmac_stop_rx_dma(priv, chan);
1746
1747 for (chan = 0; chan < tx_channels_count; chan++)
1748 stmmac_stop_tx_dma(priv, chan);
1749 }
1750
1751 /**
1752 * stmmac_dma_operation_mode - HW DMA operation mode
1753 * @priv: driver private structure
1754 * Description: it is used for configuring the DMA operation mode register in
1755 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1756 */
1757 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1758 {
1759 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1760 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1761 int rxfifosz = priv->plat->rx_fifo_size;
1762 int txfifosz = priv->plat->tx_fifo_size;
1763 u32 txmode = 0;
1764 u32 rxmode = 0;
1765 u32 chan = 0;
1766 u8 qmode = 0;
1767
1768 if (rxfifosz == 0)
1769 rxfifosz = priv->dma_cap.rx_fifo_size;
1770 if (txfifosz == 0)
1771 txfifosz = priv->dma_cap.tx_fifo_size;
1772
1773 /* Adjust for real per queue fifo size */
1774 rxfifosz /= rx_channels_count;
1775 txfifosz /= tx_channels_count;
1776
1777 if (priv->plat->force_thresh_dma_mode) {
1778 txmode = tc;
1779 rxmode = tc;
1780 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1781 /*
1782 * In case of GMAC, SF mode can be enabled
1783 * to perform the TX COE in HW. This depends on:
1784 * 1) TX COE if actually supported
1785 * 2) There is no bugged Jumbo frame support
1786 * that needs to not insert csum in the TDES.
1787 */
1788 txmode = SF_DMA_MODE;
1789 rxmode = SF_DMA_MODE;
1790 priv->xstats.threshold = SF_DMA_MODE;
1791 } else {
1792 txmode = tc;
1793 rxmode = SF_DMA_MODE;
1794 }
1795
1796 /* configure all channels */
1797 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1798 for (chan = 0; chan < rx_channels_count; chan++) {
1799 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1800
1801 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1802 rxfifosz, qmode);
1803 }
1804
1805 for (chan = 0; chan < tx_channels_count; chan++) {
1806 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1807
1808 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1809 txfifosz, qmode);
1810 }
1811 } else {
1812 stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
1813 }
1814 }
1815
1816 /**
1817 * stmmac_tx_clean - to manage the transmission completion
1818 * @priv: driver private structure
1819 * @queue: TX queue index
1820 * Description: it reclaims the transmit resources after transmission completes.
1821 */
1822 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1823 {
1824 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1825 unsigned int bytes_compl = 0, pkts_compl = 0;
1826 unsigned int entry;
1827
1828 netif_tx_lock(priv->dev);
1829
1830 priv->xstats.tx_clean++;
1831
1832 entry = tx_q->dirty_tx;
1833 while (entry != tx_q->cur_tx) {
1834 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1835 struct dma_desc *p;
1836 int status;
1837
1838 if (priv->extend_desc)
1839 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1840 else
1841 p = tx_q->dma_tx + entry;
1842
1843 status = stmmac_tx_status(priv, &priv->dev->stats,
1844 &priv->xstats, p, priv->ioaddr);
1845 /* Check if the descriptor is owned by the DMA */
1846 if (unlikely(status & tx_dma_own))
1847 break;
1848
1849 /* Make sure descriptor fields are read after reading
1850 * the own bit.
1851 */
1852 dma_rmb();
1853
1854 /* Just consider the last segment and ...*/
1855 if (likely(!(status & tx_not_ls))) {
1856 /* ... verify the status error condition */
1857 if (unlikely(status & tx_err)) {
1858 priv->dev->stats.tx_errors++;
1859 } else {
1860 priv->dev->stats.tx_packets++;
1861 priv->xstats.tx_pkt_n++;
1862 }
1863 stmmac_get_tx_hwtstamp(priv, p, skb);
1864 }
1865
1866 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1867 if (tx_q->tx_skbuff_dma[entry].map_as_page)
1868 dma_unmap_page(priv->device,
1869 tx_q->tx_skbuff_dma[entry].buf,
1870 tx_q->tx_skbuff_dma[entry].len,
1871 DMA_TO_DEVICE);
1872 else
1873 dma_unmap_single(priv->device,
1874 tx_q->tx_skbuff_dma[entry].buf,
1875 tx_q->tx_skbuff_dma[entry].len,
1876 DMA_TO_DEVICE);
1877 tx_q->tx_skbuff_dma[entry].buf = 0;
1878 tx_q->tx_skbuff_dma[entry].len = 0;
1879 tx_q->tx_skbuff_dma[entry].map_as_page = false;
1880 }
1881
1882 stmmac_clean_desc3(priv, tx_q, p);
1883
1884 tx_q->tx_skbuff_dma[entry].last_segment = false;
1885 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1886
1887 if (likely(skb != NULL)) {
1888 pkts_compl++;
1889 bytes_compl += skb->len;
1890 dev_consume_skb_any(skb);
1891 tx_q->tx_skbuff[entry] = NULL;
1892 }
1893
1894 stmmac_release_tx_desc(priv, p, priv->mode);
1895
1896 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1897 }
1898 tx_q->dirty_tx = entry;
1899
1900 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1901 pkts_compl, bytes_compl);
1902
1903 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1904 queue))) &&
1905 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1906
1907 netif_dbg(priv, tx_done, priv->dev,
1908 "%s: restart transmit\n", __func__);
1909 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1910 }
1911
1912 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1913 stmmac_enable_eee_mode(priv);
1914 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1915 }
1916 netif_tx_unlock(priv->dev);
1917 }
1918
1919 /**
1920 * stmmac_tx_err - to manage the tx error
1921 * @priv: driver private structure
1922 * @chan: channel index
1923 * Description: it cleans the descriptors and restarts the transmission
1924 * in case of transmission errors.
1925 */
1926 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1927 {
1928 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1929 int i;
1930
1931 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1932
1933 stmmac_stop_tx_dma(priv, chan);
1934 dma_free_tx_skbufs(priv, chan);
1935 for (i = 0; i < DMA_TX_SIZE; i++)
1936 if (priv->extend_desc)
1937 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1938 priv->mode, (i == DMA_TX_SIZE - 1));
1939 else
1940 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1941 priv->mode, (i == DMA_TX_SIZE - 1));
1942 tx_q->dirty_tx = 0;
1943 tx_q->cur_tx = 0;
1944 tx_q->mss = 0;
1945 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1946 stmmac_start_tx_dma(priv, chan);
1947
1948 priv->dev->stats.tx_errors++;
1949 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1950 }
1951
1952 /**
1953 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1954 * @priv: driver private structure
1955 * @txmode: TX operating mode
1956 * @rxmode: RX operating mode
1957 * @chan: channel index
1958 * Description: it is used for configuring of the DMA operation mode in
1959 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1960 * mode.
1961 */
1962 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1963 u32 rxmode, u32 chan)
1964 {
1965 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1966 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1967 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1968 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1969 int rxfifosz = priv->plat->rx_fifo_size;
1970 int txfifosz = priv->plat->tx_fifo_size;
1971
1972 if (rxfifosz == 0)
1973 rxfifosz = priv->dma_cap.rx_fifo_size;
1974 if (txfifosz == 0)
1975 txfifosz = priv->dma_cap.tx_fifo_size;
1976
1977 /* Adjust for real per queue fifo size */
1978 rxfifosz /= rx_channels_count;
1979 txfifosz /= tx_channels_count;
1980
1981 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1982 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz,
1983 rxqmode);
1984 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz,
1985 txqmode);
1986 } else {
1987 stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
1988 }
1989 }
1990
1991 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1992 {
1993 int ret = false;
1994
1995 /* Safety features are only available in cores >= 5.10 */
1996 if (priv->synopsys_id < DWMAC_CORE_5_10)
1997 return ret;
1998 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
1999 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2000 if (ret && (ret != -EINVAL)) {
2001 stmmac_global_err(priv);
2002 return true;
2003 }
2004
2005 return false;
2006 }
2007
2008 /**
2009 * stmmac_dma_interrupt - DMA ISR
2010 * @priv: driver private structure
2011 * Description: this is the DMA ISR. It is called by the main ISR.
2012 * It calls the dwmac dma routine and schedule poll method in case of some
2013 * work can be done.
2014 */
2015 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2016 {
2017 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2018 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2019 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2020 tx_channel_count : rx_channel_count;
2021 u32 chan;
2022 bool poll_scheduled = false;
2023 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2024
2025 /* Make sure we never check beyond our status buffer. */
2026 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2027 channels_to_check = ARRAY_SIZE(status);
2028
2029 /* Each DMA channel can be used for rx and tx simultaneously, yet
2030 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2031 * stmmac_channel struct.
2032 * Because of this, stmmac_poll currently checks (and possibly wakes)
2033 * all tx queues rather than just a single tx queue.
2034 */
2035 for (chan = 0; chan < channels_to_check; chan++)
2036 status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2037 &priv->xstats, chan);
2038
2039 for (chan = 0; chan < rx_channel_count; chan++) {
2040 if (likely(status[chan] & handle_rx)) {
2041 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2042
2043 if (likely(napi_schedule_prep(&rx_q->napi))) {
2044 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2045 __napi_schedule(&rx_q->napi);
2046 poll_scheduled = true;
2047 }
2048 }
2049 }
2050
2051 /* If we scheduled poll, we already know that tx queues will be checked.
2052 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2053 * completed transmission, if so, call stmmac_poll (once).
2054 */
2055 if (!poll_scheduled) {
2056 for (chan = 0; chan < tx_channel_count; chan++) {
2057 if (status[chan] & handle_tx) {
2058 /* It doesn't matter what rx queue we choose
2059 * here. We use 0 since it always exists.
2060 */
2061 struct stmmac_rx_queue *rx_q =
2062 &priv->rx_queue[0];
2063
2064 if (likely(napi_schedule_prep(&rx_q->napi))) {
2065 stmmac_disable_dma_irq(priv,
2066 priv->ioaddr, chan);
2067 __napi_schedule(&rx_q->napi);
2068 }
2069 break;
2070 }
2071 }
2072 }
2073
2074 for (chan = 0; chan < tx_channel_count; chan++) {
2075 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2076 /* Try to bump up the dma threshold on this failure */
2077 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2078 (tc <= 256)) {
2079 tc += 64;
2080 if (priv->plat->force_thresh_dma_mode)
2081 stmmac_set_dma_operation_mode(priv,
2082 tc,
2083 tc,
2084 chan);
2085 else
2086 stmmac_set_dma_operation_mode(priv,
2087 tc,
2088 SF_DMA_MODE,
2089 chan);
2090 priv->xstats.threshold = tc;
2091 }
2092 } else if (unlikely(status[chan] == tx_hard_error)) {
2093 stmmac_tx_err(priv, chan);
2094 }
2095 }
2096 }
2097
2098 /**
2099 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2100 * @priv: driver private structure
2101 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2102 */
2103 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2104 {
2105 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2106 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2107
2108 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2109 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2110 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2111 } else {
2112 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2113 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2114 }
2115
2116 dwmac_mmc_intr_all_mask(priv->mmcaddr);
2117
2118 if (priv->dma_cap.rmon) {
2119 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2120 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2121 } else
2122 netdev_info(priv->dev, "No MAC Management Counters available\n");
2123 }
2124
2125 /**
2126 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2127 * @priv: driver private structure
2128 * Description:
2129 * new GMAC chip generations have a new register to indicate the
2130 * presence of the optional feature/functions.
2131 * This can be also used to override the value passed through the
2132 * platform and necessary for old MAC10/100 and GMAC chips.
2133 */
2134 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2135 {
2136 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2137 }
2138
2139 /**
2140 * stmmac_check_ether_addr - check if the MAC addr is valid
2141 * @priv: driver private structure
2142 * Description:
2143 * it is to verify if the MAC address is valid, in case of failures it
2144 * generates a random MAC address
2145 */
2146 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2147 {
2148 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2149 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2150 if (!is_valid_ether_addr(priv->dev->dev_addr))
2151 eth_hw_addr_random(priv->dev);
2152 netdev_info(priv->dev, "device MAC address %pM\n",
2153 priv->dev->dev_addr);
2154 }
2155 }
2156
2157 /**
2158 * stmmac_init_dma_engine - DMA init.
2159 * @priv: driver private structure
2160 * Description:
2161 * It inits the DMA invoking the specific MAC/GMAC callback.
2162 * Some DMA parameters can be passed from the platform;
2163 * in case of these are not passed a default is kept for the MAC or GMAC.
2164 */
2165 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2166 {
2167 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2168 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2169 struct stmmac_rx_queue *rx_q;
2170 struct stmmac_tx_queue *tx_q;
2171 u32 dummy_dma_rx_phy = 0;
2172 u32 dummy_dma_tx_phy = 0;
2173 u32 chan = 0;
2174 int atds = 0;
2175 int ret = 0;
2176
2177 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2178 dev_err(priv->device, "Invalid DMA configuration\n");
2179 return -EINVAL;
2180 }
2181
2182 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2183 atds = 1;
2184
2185 ret = stmmac_reset(priv, priv->ioaddr);
2186 if (ret) {
2187 dev_err(priv->device, "Failed to reset the dma\n");
2188 return ret;
2189 }
2190
2191 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2192 /* DMA Configuration */
2193 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
2194 dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2195
2196 /* DMA RX Channel Configuration */
2197 for (chan = 0; chan < rx_channels_count; chan++) {
2198 rx_q = &priv->rx_queue[chan];
2199
2200 stmmac_init_rx_chan(priv, priv->ioaddr,
2201 priv->plat->dma_cfg, rx_q->dma_rx_phy,
2202 chan);
2203
2204 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2205 (DMA_RX_SIZE * sizeof(struct dma_desc));
2206 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2207 rx_q->rx_tail_addr, chan);
2208 }
2209
2210 /* DMA TX Channel Configuration */
2211 for (chan = 0; chan < tx_channels_count; chan++) {
2212 tx_q = &priv->tx_queue[chan];
2213
2214 stmmac_init_chan(priv, priv->ioaddr,
2215 priv->plat->dma_cfg, chan);
2216
2217 stmmac_init_tx_chan(priv, priv->ioaddr,
2218 priv->plat->dma_cfg, tx_q->dma_tx_phy,
2219 chan);
2220
2221 tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2222 (DMA_TX_SIZE * sizeof(struct dma_desc));
2223 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2224 tx_q->tx_tail_addr, chan);
2225 }
2226 } else {
2227 rx_q = &priv->rx_queue[chan];
2228 tx_q = &priv->tx_queue[chan];
2229 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
2230 tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2231 }
2232
2233 if (priv->plat->axi)
2234 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2235
2236 return ret;
2237 }
2238
2239 /**
2240 * stmmac_tx_timer - mitigation sw timer for tx.
2241 * @data: data pointer
2242 * Description:
2243 * This is the timer handler to directly invoke the stmmac_tx_clean.
2244 */
2245 static void stmmac_tx_timer(struct timer_list *t)
2246 {
2247 struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2248 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2249 u32 queue;
2250
2251 /* let's scan all the tx queues */
2252 for (queue = 0; queue < tx_queues_count; queue++)
2253 stmmac_tx_clean(priv, queue);
2254 }
2255
2256 /**
2257 * stmmac_init_tx_coalesce - init tx mitigation options.
2258 * @priv: driver private structure
2259 * Description:
2260 * This inits the transmit coalesce parameters: i.e. timer rate,
2261 * timer handler and default threshold used for enabling the
2262 * interrupt on completion bit.
2263 */
2264 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2265 {
2266 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2267 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2268 timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2269 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2270 add_timer(&priv->txtimer);
2271 }
2272
2273 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2274 {
2275 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2276 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2277 u32 chan;
2278
2279 /* set TX ring length */
2280 for (chan = 0; chan < tx_channels_count; chan++)
2281 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2282 (DMA_TX_SIZE - 1), chan);
2283
2284 /* set RX ring length */
2285 for (chan = 0; chan < rx_channels_count; chan++)
2286 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2287 (DMA_RX_SIZE - 1), chan);
2288 }
2289
2290 /**
2291 * stmmac_set_tx_queue_weight - Set TX queue weight
2292 * @priv: driver private structure
2293 * Description: It is used for setting TX queues weight
2294 */
2295 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2296 {
2297 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2298 u32 weight;
2299 u32 queue;
2300
2301 for (queue = 0; queue < tx_queues_count; queue++) {
2302 weight = priv->plat->tx_queues_cfg[queue].weight;
2303 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2304 }
2305 }
2306
2307 /**
2308 * stmmac_configure_cbs - Configure CBS in TX queue
2309 * @priv: driver private structure
2310 * Description: It is used for configuring CBS in AVB TX queues
2311 */
2312 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2313 {
2314 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2315 u32 mode_to_use;
2316 u32 queue;
2317
2318 /* queue 0 is reserved for legacy traffic */
2319 for (queue = 1; queue < tx_queues_count; queue++) {
2320 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2321 if (mode_to_use == MTL_QUEUE_DCB)
2322 continue;
2323
2324 stmmac_config_cbs(priv, priv->hw,
2325 priv->plat->tx_queues_cfg[queue].send_slope,
2326 priv->plat->tx_queues_cfg[queue].idle_slope,
2327 priv->plat->tx_queues_cfg[queue].high_credit,
2328 priv->plat->tx_queues_cfg[queue].low_credit,
2329 queue);
2330 }
2331 }
2332
2333 /**
2334 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2335 * @priv: driver private structure
2336 * Description: It is used for mapping RX queues to RX dma channels
2337 */
2338 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2339 {
2340 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2341 u32 queue;
2342 u32 chan;
2343
2344 for (queue = 0; queue < rx_queues_count; queue++) {
2345 chan = priv->plat->rx_queues_cfg[queue].chan;
2346 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2347 }
2348 }
2349
2350 /**
2351 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2352 * @priv: driver private structure
2353 * Description: It is used for configuring the RX Queue Priority
2354 */
2355 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2356 {
2357 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2358 u32 queue;
2359 u32 prio;
2360
2361 for (queue = 0; queue < rx_queues_count; queue++) {
2362 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2363 continue;
2364
2365 prio = priv->plat->rx_queues_cfg[queue].prio;
2366 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2367 }
2368 }
2369
2370 /**
2371 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2372 * @priv: driver private structure
2373 * Description: It is used for configuring the TX Queue Priority
2374 */
2375 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2376 {
2377 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2378 u32 queue;
2379 u32 prio;
2380
2381 for (queue = 0; queue < tx_queues_count; queue++) {
2382 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2383 continue;
2384
2385 prio = priv->plat->tx_queues_cfg[queue].prio;
2386 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2387 }
2388 }
2389
2390 /**
2391 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2392 * @priv: driver private structure
2393 * Description: It is used for configuring the RX queue routing
2394 */
2395 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2396 {
2397 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2398 u32 queue;
2399 u8 packet;
2400
2401 for (queue = 0; queue < rx_queues_count; queue++) {
2402 /* no specific packet type routing specified for the queue */
2403 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2404 continue;
2405
2406 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2407 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2408 }
2409 }
2410
2411 /**
2412 * stmmac_mtl_configuration - Configure MTL
2413 * @priv: driver private structure
2414 * Description: It is used for configurring MTL
2415 */
2416 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2417 {
2418 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2419 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2420
2421 if (tx_queues_count > 1)
2422 stmmac_set_tx_queue_weight(priv);
2423
2424 /* Configure MTL RX algorithms */
2425 if (rx_queues_count > 1)
2426 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2427 priv->plat->rx_sched_algorithm);
2428
2429 /* Configure MTL TX algorithms */
2430 if (tx_queues_count > 1)
2431 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2432 priv->plat->tx_sched_algorithm);
2433
2434 /* Configure CBS in AVB TX queues */
2435 if (tx_queues_count > 1)
2436 stmmac_configure_cbs(priv);
2437
2438 /* Map RX MTL to DMA channels */
2439 stmmac_rx_queue_dma_chan_map(priv);
2440
2441 /* Enable MAC RX Queues */
2442 stmmac_mac_enable_rx_queues(priv);
2443
2444 /* Set RX priorities */
2445 if (rx_queues_count > 1)
2446 stmmac_mac_config_rx_queues_prio(priv);
2447
2448 /* Set TX priorities */
2449 if (tx_queues_count > 1)
2450 stmmac_mac_config_tx_queues_prio(priv);
2451
2452 /* Set RX routing */
2453 if (rx_queues_count > 1)
2454 stmmac_mac_config_rx_queues_routing(priv);
2455 }
2456
2457 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2458 {
2459 if (priv->dma_cap.asp) {
2460 netdev_info(priv->dev, "Enabling Safety Features\n");
2461 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2462 } else {
2463 netdev_info(priv->dev, "No Safety Features support found\n");
2464 }
2465 }
2466
2467 /**
2468 * stmmac_hw_setup - setup mac in a usable state.
2469 * @dev : pointer to the device structure.
2470 * Description:
2471 * this is the main function to setup the HW in a usable state because the
2472 * dma engine is reset, the core registers are configured (e.g. AXI,
2473 * Checksum features, timers). The DMA is ready to start receiving and
2474 * transmitting.
2475 * Return value:
2476 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2477 * file on failure.
2478 */
2479 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2480 {
2481 struct stmmac_priv *priv = netdev_priv(dev);
2482 u32 rx_cnt = priv->plat->rx_queues_to_use;
2483 u32 tx_cnt = priv->plat->tx_queues_to_use;
2484 u32 chan;
2485 int ret;
2486
2487 /* DMA initialization and SW reset */
2488 ret = stmmac_init_dma_engine(priv);
2489 if (ret < 0) {
2490 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2491 __func__);
2492 return ret;
2493 }
2494
2495 /* Copy the MAC addr into the HW */
2496 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2497
2498 /* PS and related bits will be programmed according to the speed */
2499 if (priv->hw->pcs) {
2500 int speed = priv->plat->mac_port_sel_speed;
2501
2502 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2503 (speed == SPEED_1000)) {
2504 priv->hw->ps = speed;
2505 } else {
2506 dev_warn(priv->device, "invalid port speed\n");
2507 priv->hw->ps = 0;
2508 }
2509 }
2510
2511 /* Initialize the MAC Core */
2512 stmmac_core_init(priv, priv->hw, dev);
2513
2514 /* Initialize MTL*/
2515 if (priv->synopsys_id >= DWMAC_CORE_4_00)
2516 stmmac_mtl_configuration(priv);
2517
2518 /* Initialize Safety Features */
2519 if (priv->synopsys_id >= DWMAC_CORE_5_10)
2520 stmmac_safety_feat_configuration(priv);
2521
2522 ret = stmmac_rx_ipc(priv, priv->hw);
2523 if (!ret) {
2524 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2525 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2526 priv->hw->rx_csum = 0;
2527 }
2528
2529 /* Enable the MAC Rx/Tx */
2530 stmmac_mac_set(priv, priv->ioaddr, true);
2531
2532 /* Set the HW DMA mode and the COE */
2533 stmmac_dma_operation_mode(priv);
2534
2535 stmmac_mmc_setup(priv);
2536
2537 if (init_ptp) {
2538 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2539 if (ret < 0)
2540 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2541
2542 ret = stmmac_init_ptp(priv);
2543 if (ret == -EOPNOTSUPP)
2544 netdev_warn(priv->dev, "PTP not supported by HW\n");
2545 else if (ret)
2546 netdev_warn(priv->dev, "PTP init failed\n");
2547 }
2548
2549 #ifdef CONFIG_DEBUG_FS
2550 ret = stmmac_init_fs(dev);
2551 if (ret < 0)
2552 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2553 __func__);
2554 #endif
2555 /* Start the ball rolling... */
2556 stmmac_start_all_dma(priv);
2557
2558 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2559
2560 if (priv->use_riwt) {
2561 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2562 if (!ret)
2563 priv->rx_riwt = MAX_DMA_RIWT;
2564 }
2565
2566 if (priv->hw->pcs)
2567 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2568
2569 /* set TX and RX rings length */
2570 stmmac_set_rings_length(priv);
2571
2572 /* Enable TSO */
2573 if (priv->tso) {
2574 for (chan = 0; chan < tx_cnt; chan++)
2575 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2576 }
2577
2578 return 0;
2579 }
2580
2581 static void stmmac_hw_teardown(struct net_device *dev)
2582 {
2583 struct stmmac_priv *priv = netdev_priv(dev);
2584
2585 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2586 }
2587
2588 /**
2589 * stmmac_open - open entry point of the driver
2590 * @dev : pointer to the device structure.
2591 * Description:
2592 * This function is the open entry point of the driver.
2593 * Return value:
2594 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2595 * file on failure.
2596 */
2597 static int stmmac_open(struct net_device *dev)
2598 {
2599 struct stmmac_priv *priv = netdev_priv(dev);
2600 int ret;
2601
2602 stmmac_check_ether_addr(priv);
2603
2604 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2605 priv->hw->pcs != STMMAC_PCS_TBI &&
2606 priv->hw->pcs != STMMAC_PCS_RTBI) {
2607 ret = stmmac_init_phy(dev);
2608 if (ret) {
2609 netdev_err(priv->dev,
2610 "%s: Cannot attach to PHY (error: %d)\n",
2611 __func__, ret);
2612 return ret;
2613 }
2614 }
2615
2616 /* Extra statistics */
2617 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2618 priv->xstats.threshold = tc;
2619
2620 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2621 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2622
2623 ret = alloc_dma_desc_resources(priv);
2624 if (ret < 0) {
2625 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2626 __func__);
2627 goto dma_desc_error;
2628 }
2629
2630 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2631 if (ret < 0) {
2632 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2633 __func__);
2634 goto init_error;
2635 }
2636
2637 ret = stmmac_hw_setup(dev, true);
2638 if (ret < 0) {
2639 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2640 goto init_error;
2641 }
2642
2643 stmmac_init_tx_coalesce(priv);
2644
2645 if (dev->phydev)
2646 phy_start(dev->phydev);
2647
2648 /* Request the IRQ lines */
2649 ret = request_irq(dev->irq, stmmac_interrupt,
2650 IRQF_SHARED, dev->name, dev);
2651 if (unlikely(ret < 0)) {
2652 netdev_err(priv->dev,
2653 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2654 __func__, dev->irq, ret);
2655 goto irq_error;
2656 }
2657
2658 /* Request the Wake IRQ in case of another line is used for WoL */
2659 if (priv->wol_irq != dev->irq) {
2660 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2661 IRQF_SHARED, dev->name, dev);
2662 if (unlikely(ret < 0)) {
2663 netdev_err(priv->dev,
2664 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2665 __func__, priv->wol_irq, ret);
2666 goto wolirq_error;
2667 }
2668 }
2669
2670 /* Request the IRQ lines */
2671 if (priv->lpi_irq > 0) {
2672 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2673 dev->name, dev);
2674 if (unlikely(ret < 0)) {
2675 netdev_err(priv->dev,
2676 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2677 __func__, priv->lpi_irq, ret);
2678 goto lpiirq_error;
2679 }
2680 }
2681
2682 stmmac_enable_all_queues(priv);
2683 stmmac_start_all_queues(priv);
2684
2685 return 0;
2686
2687 lpiirq_error:
2688 if (priv->wol_irq != dev->irq)
2689 free_irq(priv->wol_irq, dev);
2690 wolirq_error:
2691 free_irq(dev->irq, dev);
2692 irq_error:
2693 if (dev->phydev)
2694 phy_stop(dev->phydev);
2695
2696 del_timer_sync(&priv->txtimer);
2697 stmmac_hw_teardown(dev);
2698 init_error:
2699 free_dma_desc_resources(priv);
2700 dma_desc_error:
2701 if (dev->phydev)
2702 phy_disconnect(dev->phydev);
2703
2704 return ret;
2705 }
2706
2707 /**
2708 * stmmac_release - close entry point of the driver
2709 * @dev : device pointer.
2710 * Description:
2711 * This is the stop entry point of the driver.
2712 */
2713 static int stmmac_release(struct net_device *dev)
2714 {
2715 struct stmmac_priv *priv = netdev_priv(dev);
2716
2717 if (priv->eee_enabled)
2718 del_timer_sync(&priv->eee_ctrl_timer);
2719
2720 /* Stop and disconnect the PHY */
2721 if (dev->phydev) {
2722 phy_stop(dev->phydev);
2723 phy_disconnect(dev->phydev);
2724 }
2725
2726 stmmac_stop_all_queues(priv);
2727
2728 stmmac_disable_all_queues(priv);
2729
2730 del_timer_sync(&priv->txtimer);
2731
2732 /* Free the IRQ lines */
2733 free_irq(dev->irq, dev);
2734 if (priv->wol_irq != dev->irq)
2735 free_irq(priv->wol_irq, dev);
2736 if (priv->lpi_irq > 0)
2737 free_irq(priv->lpi_irq, dev);
2738
2739 /* Stop TX/RX DMA and clear the descriptors */
2740 stmmac_stop_all_dma(priv);
2741
2742 /* Release and free the Rx/Tx resources */
2743 free_dma_desc_resources(priv);
2744
2745 /* Disable the MAC Rx/Tx */
2746 stmmac_mac_set(priv, priv->ioaddr, false);
2747
2748 netif_carrier_off(dev);
2749
2750 #ifdef CONFIG_DEBUG_FS
2751 stmmac_exit_fs(dev);
2752 #endif
2753
2754 stmmac_release_ptp(priv);
2755
2756 return 0;
2757 }
2758
2759 /**
2760 * stmmac_tso_allocator - close entry point of the driver
2761 * @priv: driver private structure
2762 * @des: buffer start address
2763 * @total_len: total length to fill in descriptors
2764 * @last_segmant: condition for the last descriptor
2765 * @queue: TX queue index
2766 * Description:
2767 * This function fills descriptor and request new descriptors according to
2768 * buffer length to fill
2769 */
2770 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2771 int total_len, bool last_segment, u32 queue)
2772 {
2773 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2774 struct dma_desc *desc;
2775 u32 buff_size;
2776 int tmp_len;
2777
2778 tmp_len = total_len;
2779
2780 while (tmp_len > 0) {
2781 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2782 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2783 desc = tx_q->dma_tx + tx_q->cur_tx;
2784
2785 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2786 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2787 TSO_MAX_BUFF_SIZE : tmp_len;
2788
2789 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2790 0, 1,
2791 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2792 0, 0);
2793
2794 tmp_len -= TSO_MAX_BUFF_SIZE;
2795 }
2796 }
2797
2798 /**
2799 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2800 * @skb : the socket buffer
2801 * @dev : device pointer
2802 * Description: this is the transmit function that is called on TSO frames
2803 * (support available on GMAC4 and newer chips).
2804 * Diagram below show the ring programming in case of TSO frames:
2805 *
2806 * First Descriptor
2807 * --------
2808 * | DES0 |---> buffer1 = L2/L3/L4 header
2809 * | DES1 |---> TCP Payload (can continue on next descr...)
2810 * | DES2 |---> buffer 1 and 2 len
2811 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2812 * --------
2813 * |
2814 * ...
2815 * |
2816 * --------
2817 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2818 * | DES1 | --|
2819 * | DES2 | --> buffer 1 and 2 len
2820 * | DES3 |
2821 * --------
2822 *
2823 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2824 */
2825 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2826 {
2827 struct dma_desc *desc, *first, *mss_desc = NULL;
2828 struct stmmac_priv *priv = netdev_priv(dev);
2829 int nfrags = skb_shinfo(skb)->nr_frags;
2830 u32 queue = skb_get_queue_mapping(skb);
2831 unsigned int first_entry, des;
2832 struct stmmac_tx_queue *tx_q;
2833 int tmp_pay_len = 0;
2834 u32 pay_len, mss;
2835 u8 proto_hdr_len;
2836 int i;
2837
2838 tx_q = &priv->tx_queue[queue];
2839
2840 /* Compute header lengths */
2841 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2842
2843 /* Desc availability based on threshold should be enough safe */
2844 if (unlikely(stmmac_tx_avail(priv, queue) <
2845 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2846 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2847 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2848 queue));
2849 /* This is a hard error, log it. */
2850 netdev_err(priv->dev,
2851 "%s: Tx Ring full when queue awake\n",
2852 __func__);
2853 }
2854 return NETDEV_TX_BUSY;
2855 }
2856
2857 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2858
2859 mss = skb_shinfo(skb)->gso_size;
2860
2861 /* set new MSS value if needed */
2862 if (mss != tx_q->mss) {
2863 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2864 stmmac_set_mss(priv, mss_desc, mss);
2865 tx_q->mss = mss;
2866 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2867 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2868 }
2869
2870 if (netif_msg_tx_queued(priv)) {
2871 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2872 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2873 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2874 skb->data_len);
2875 }
2876
2877 first_entry = tx_q->cur_tx;
2878 WARN_ON(tx_q->tx_skbuff[first_entry]);
2879
2880 desc = tx_q->dma_tx + first_entry;
2881 first = desc;
2882
2883 /* first descriptor: fill Headers on Buf1 */
2884 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2885 DMA_TO_DEVICE);
2886 if (dma_mapping_error(priv->device, des))
2887 goto dma_map_err;
2888
2889 tx_q->tx_skbuff_dma[first_entry].buf = des;
2890 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2891
2892 first->des0 = cpu_to_le32(des);
2893
2894 /* Fill start of payload in buff2 of first descriptor */
2895 if (pay_len)
2896 first->des1 = cpu_to_le32(des + proto_hdr_len);
2897
2898 /* If needed take extra descriptors to fill the remaining payload */
2899 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2900
2901 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2902
2903 /* Prepare fragments */
2904 for (i = 0; i < nfrags; i++) {
2905 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2906
2907 des = skb_frag_dma_map(priv->device, frag, 0,
2908 skb_frag_size(frag),
2909 DMA_TO_DEVICE);
2910 if (dma_mapping_error(priv->device, des))
2911 goto dma_map_err;
2912
2913 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2914 (i == nfrags - 1), queue);
2915
2916 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2917 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2918 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2919 }
2920
2921 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2922
2923 /* Only the last descriptor gets to point to the skb. */
2924 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2925
2926 /* We've used all descriptors we need for this skb, however,
2927 * advance cur_tx so that it references a fresh descriptor.
2928 * ndo_start_xmit will fill this descriptor the next time it's
2929 * called and stmmac_tx_clean may clean up to this descriptor.
2930 */
2931 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2932
2933 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2934 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2935 __func__);
2936 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2937 }
2938
2939 dev->stats.tx_bytes += skb->len;
2940 priv->xstats.tx_tso_frames++;
2941 priv->xstats.tx_tso_nfrags += nfrags;
2942
2943 /* Manage tx mitigation */
2944 priv->tx_count_frames += nfrags + 1;
2945 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2946 mod_timer(&priv->txtimer,
2947 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2948 } else {
2949 priv->tx_count_frames = 0;
2950 stmmac_set_tx_ic(priv, desc);
2951 priv->xstats.tx_set_ic_bit++;
2952 }
2953
2954 skb_tx_timestamp(skb);
2955
2956 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2957 priv->hwts_tx_en)) {
2958 /* declare that device is doing timestamping */
2959 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2960 stmmac_enable_tx_timestamp(priv, first);
2961 }
2962
2963 /* Complete the first descriptor before granting the DMA */
2964 stmmac_prepare_tso_tx_desc(priv, first, 1,
2965 proto_hdr_len,
2966 pay_len,
2967 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2968 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2969
2970 /* If context desc is used to change MSS */
2971 if (mss_desc) {
2972 /* Make sure that first descriptor has been completely
2973 * written, including its own bit. This is because MSS is
2974 * actually before first descriptor, so we need to make
2975 * sure that MSS's own bit is the last thing written.
2976 */
2977 dma_wmb();
2978 stmmac_set_tx_owner(priv, mss_desc);
2979 }
2980
2981 /* The own bit must be the latest setting done when prepare the
2982 * descriptor and then barrier is needed to make sure that
2983 * all is coherent before granting the DMA engine.
2984 */
2985 wmb();
2986
2987 if (netif_msg_pktdata(priv)) {
2988 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2989 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2990 tx_q->cur_tx, first, nfrags);
2991
2992 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2993
2994 pr_info(">>> frame to be transmitted: ");
2995 print_pkt(skb->data, skb_headlen(skb));
2996 }
2997
2998 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2999
3000 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3001
3002 return NETDEV_TX_OK;
3003
3004 dma_map_err:
3005 dev_err(priv->device, "Tx dma map failed\n");
3006 dev_kfree_skb(skb);
3007 priv->dev->stats.tx_dropped++;
3008 return NETDEV_TX_OK;
3009 }
3010
3011 /**
3012 * stmmac_xmit - Tx entry point of the driver
3013 * @skb : the socket buffer
3014 * @dev : device pointer
3015 * Description : this is the tx entry point of the driver.
3016 * It programs the chain or the ring and supports oversized frames
3017 * and SG feature.
3018 */
3019 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3020 {
3021 struct stmmac_priv *priv = netdev_priv(dev);
3022 unsigned int nopaged_len = skb_headlen(skb);
3023 int i, csum_insertion = 0, is_jumbo = 0;
3024 u32 queue = skb_get_queue_mapping(skb);
3025 int nfrags = skb_shinfo(skb)->nr_frags;
3026 int entry;
3027 unsigned int first_entry;
3028 struct dma_desc *desc, *first;
3029 struct stmmac_tx_queue *tx_q;
3030 unsigned int enh_desc;
3031 unsigned int des;
3032
3033 tx_q = &priv->tx_queue[queue];
3034
3035 /* Manage oversized TCP frames for GMAC4 device */
3036 if (skb_is_gso(skb) && priv->tso) {
3037 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3038 return stmmac_tso_xmit(skb, dev);
3039 }
3040
3041 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3042 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3043 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3044 queue));
3045 /* This is a hard error, log it. */
3046 netdev_err(priv->dev,
3047 "%s: Tx Ring full when queue awake\n",
3048 __func__);
3049 }
3050 return NETDEV_TX_BUSY;
3051 }
3052
3053 if (priv->tx_path_in_lpi_mode)
3054 stmmac_disable_eee_mode(priv);
3055
3056 entry = tx_q->cur_tx;
3057 first_entry = entry;
3058 WARN_ON(tx_q->tx_skbuff[first_entry]);
3059
3060 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3061
3062 if (likely(priv->extend_desc))
3063 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3064 else
3065 desc = tx_q->dma_tx + entry;
3066
3067 first = desc;
3068
3069 enh_desc = priv->plat->enh_desc;
3070 /* To program the descriptors according to the size of the frame */
3071 if (enh_desc)
3072 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3073
3074 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3075 DWMAC_CORE_4_00)) {
3076 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3077 if (unlikely(entry < 0))
3078 goto dma_map_err;
3079 }
3080
3081 for (i = 0; i < nfrags; i++) {
3082 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3083 int len = skb_frag_size(frag);
3084 bool last_segment = (i == (nfrags - 1));
3085
3086 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3087 WARN_ON(tx_q->tx_skbuff[entry]);
3088
3089 if (likely(priv->extend_desc))
3090 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3091 else
3092 desc = tx_q->dma_tx + entry;
3093
3094 des = skb_frag_dma_map(priv->device, frag, 0, len,
3095 DMA_TO_DEVICE);
3096 if (dma_mapping_error(priv->device, des))
3097 goto dma_map_err; /* should reuse desc w/o issues */
3098
3099 tx_q->tx_skbuff_dma[entry].buf = des;
3100
3101 stmmac_set_desc_addr(priv, desc, des);
3102
3103 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3104 tx_q->tx_skbuff_dma[entry].len = len;
3105 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3106
3107 /* Prepare the descriptor and set the own bit too */
3108 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3109 priv->mode, 1, last_segment, skb->len);
3110 }
3111
3112 /* Only the last descriptor gets to point to the skb. */
3113 tx_q->tx_skbuff[entry] = skb;
3114
3115 /* We've used all descriptors we need for this skb, however,
3116 * advance cur_tx so that it references a fresh descriptor.
3117 * ndo_start_xmit will fill this descriptor the next time it's
3118 * called and stmmac_tx_clean may clean up to this descriptor.
3119 */
3120 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3121 tx_q->cur_tx = entry;
3122
3123 if (netif_msg_pktdata(priv)) {
3124 void *tx_head;
3125
3126 netdev_dbg(priv->dev,
3127 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3128 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3129 entry, first, nfrags);
3130
3131 if (priv->extend_desc)
3132 tx_head = (void *)tx_q->dma_etx;
3133 else
3134 tx_head = (void *)tx_q->dma_tx;
3135
3136 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3137
3138 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3139 print_pkt(skb->data, skb->len);
3140 }
3141
3142 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3143 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3144 __func__);
3145 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3146 }
3147
3148 dev->stats.tx_bytes += skb->len;
3149
3150 /* According to the coalesce parameter the IC bit for the latest
3151 * segment is reset and the timer re-started to clean the tx status.
3152 * This approach takes care about the fragments: desc is the first
3153 * element in case of no SG.
3154 */
3155 priv->tx_count_frames += nfrags + 1;
3156 if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3157 !priv->tx_timer_armed) {
3158 mod_timer(&priv->txtimer,
3159 STMMAC_COAL_TIMER(priv->tx_coal_timer));
3160 priv->tx_timer_armed = true;
3161 } else {
3162 priv->tx_count_frames = 0;
3163 stmmac_set_tx_ic(priv, desc);
3164 priv->xstats.tx_set_ic_bit++;
3165 priv->tx_timer_armed = false;
3166 }
3167
3168 skb_tx_timestamp(skb);
3169
3170 /* Ready to fill the first descriptor and set the OWN bit w/o any
3171 * problems because all the descriptors are actually ready to be
3172 * passed to the DMA engine.
3173 */
3174 if (likely(!is_jumbo)) {
3175 bool last_segment = (nfrags == 0);
3176
3177 des = dma_map_single(priv->device, skb->data,
3178 nopaged_len, DMA_TO_DEVICE);
3179 if (dma_mapping_error(priv->device, des))
3180 goto dma_map_err;
3181
3182 tx_q->tx_skbuff_dma[first_entry].buf = des;
3183
3184 stmmac_set_desc_addr(priv, first, des);
3185
3186 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3187 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3188
3189 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3190 priv->hwts_tx_en)) {
3191 /* declare that device is doing timestamping */
3192 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3193 stmmac_enable_tx_timestamp(priv, first);
3194 }
3195
3196 /* Prepare the first descriptor setting the OWN bit too */
3197 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3198 csum_insertion, priv->mode, 1, last_segment,
3199 skb->len);
3200
3201 /* The own bit must be the latest setting done when prepare the
3202 * descriptor and then barrier is needed to make sure that
3203 * all is coherent before granting the DMA engine.
3204 */
3205 wmb();
3206 }
3207
3208 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3209
3210 if (priv->synopsys_id < DWMAC_CORE_4_00)
3211 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3212 else
3213 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3214 queue);
3215
3216 return NETDEV_TX_OK;
3217
3218 dma_map_err:
3219 netdev_err(priv->dev, "Tx DMA map failed\n");
3220 dev_kfree_skb(skb);
3221 priv->dev->stats.tx_dropped++;
3222 return NETDEV_TX_OK;
3223 }
3224
3225 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3226 {
3227 struct ethhdr *ehdr;
3228 u16 vlanid;
3229
3230 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3231 NETIF_F_HW_VLAN_CTAG_RX &&
3232 !__vlan_get_tag(skb, &vlanid)) {
3233 /* pop the vlan tag */
3234 ehdr = (struct ethhdr *)skb->data;
3235 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3236 skb_pull(skb, VLAN_HLEN);
3237 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3238 }
3239 }
3240
3241
3242 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3243 {
3244 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3245 return 0;
3246
3247 return 1;
3248 }
3249
3250 /**
3251 * stmmac_rx_refill - refill used skb preallocated buffers
3252 * @priv: driver private structure
3253 * @queue: RX queue index
3254 * Description : this is to reallocate the skb for the reception process
3255 * that is based on zero-copy.
3256 */
3257 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3258 {
3259 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3260 int dirty = stmmac_rx_dirty(priv, queue);
3261 unsigned int entry = rx_q->dirty_rx;
3262
3263 int bfsize = priv->dma_buf_sz;
3264
3265 while (dirty-- > 0) {
3266 struct dma_desc *p;
3267
3268 if (priv->extend_desc)
3269 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3270 else
3271 p = rx_q->dma_rx + entry;
3272
3273 if (likely(!rx_q->rx_skbuff[entry])) {
3274 struct sk_buff *skb;
3275
3276 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3277 if (unlikely(!skb)) {
3278 /* so for a while no zero-copy! */
3279 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3280 if (unlikely(net_ratelimit()))
3281 dev_err(priv->device,
3282 "fail to alloc skb entry %d\n",
3283 entry);
3284 break;
3285 }
3286
3287 rx_q->rx_skbuff[entry] = skb;
3288 rx_q->rx_skbuff_dma[entry] =
3289 dma_map_single(priv->device, skb->data, bfsize,
3290 DMA_FROM_DEVICE);
3291 if (dma_mapping_error(priv->device,
3292 rx_q->rx_skbuff_dma[entry])) {
3293 netdev_err(priv->dev, "Rx DMA map failed\n");
3294 dev_kfree_skb(skb);
3295 break;
3296 }
3297
3298 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3299 stmmac_refill_desc3(priv, rx_q, p);
3300
3301 if (rx_q->rx_zeroc_thresh > 0)
3302 rx_q->rx_zeroc_thresh--;
3303
3304 netif_dbg(priv, rx_status, priv->dev,
3305 "refill entry #%d\n", entry);
3306 }
3307 dma_wmb();
3308
3309 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3310 stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0);
3311 else
3312 stmmac_set_rx_owner(priv, p);
3313
3314 dma_wmb();
3315
3316 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3317 }
3318 rx_q->dirty_rx = entry;
3319 }
3320
3321 /**
3322 * stmmac_rx - manage the receive process
3323 * @priv: driver private structure
3324 * @limit: napi bugget
3325 * @queue: RX queue index.
3326 * Description : this the function called by the napi poll method.
3327 * It gets all the frames inside the ring.
3328 */
3329 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3330 {
3331 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3332 unsigned int entry = rx_q->cur_rx;
3333 int coe = priv->hw->rx_csum;
3334 unsigned int next_entry;
3335 unsigned int count = 0;
3336
3337 if (netif_msg_rx_status(priv)) {
3338 void *rx_head;
3339
3340 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3341 if (priv->extend_desc)
3342 rx_head = (void *)rx_q->dma_erx;
3343 else
3344 rx_head = (void *)rx_q->dma_rx;
3345
3346 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3347 }
3348 while (count < limit) {
3349 int status;
3350 struct dma_desc *p;
3351 struct dma_desc *np;
3352
3353 if (priv->extend_desc)
3354 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3355 else
3356 p = rx_q->dma_rx + entry;
3357
3358 /* read the status of the incoming frame */
3359 status = stmmac_rx_status(priv, &priv->dev->stats,
3360 &priv->xstats, p);
3361 /* check if managed by the DMA otherwise go ahead */
3362 if (unlikely(status & dma_own))
3363 break;
3364
3365 count++;
3366
3367 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3368 next_entry = rx_q->cur_rx;
3369
3370 if (priv->extend_desc)
3371 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3372 else
3373 np = rx_q->dma_rx + next_entry;
3374
3375 prefetch(np);
3376
3377 if (priv->extend_desc)
3378 stmmac_rx_extended_status(priv, &priv->dev->stats,
3379 &priv->xstats, rx_q->dma_erx + entry);
3380 if (unlikely(status == discard_frame)) {
3381 priv->dev->stats.rx_errors++;
3382 if (priv->hwts_rx_en && !priv->extend_desc) {
3383 /* DESC2 & DESC3 will be overwritten by device
3384 * with timestamp value, hence reinitialize
3385 * them in stmmac_rx_refill() function so that
3386 * device can reuse it.
3387 */
3388 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3389 rx_q->rx_skbuff[entry] = NULL;
3390 dma_unmap_single(priv->device,
3391 rx_q->rx_skbuff_dma[entry],
3392 priv->dma_buf_sz,
3393 DMA_FROM_DEVICE);
3394 }
3395 } else {
3396 struct sk_buff *skb;
3397 int frame_len;
3398 unsigned int des;
3399
3400 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3401 des = le32_to_cpu(p->des0);
3402 else
3403 des = le32_to_cpu(p->des2);
3404
3405 frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3406
3407 /* If frame length is greater than skb buffer size
3408 * (preallocated during init) then the packet is
3409 * ignored
3410 */
3411 if (frame_len > priv->dma_buf_sz) {
3412 netdev_err(priv->dev,
3413 "len %d larger than size (%d)\n",
3414 frame_len, priv->dma_buf_sz);
3415 priv->dev->stats.rx_length_errors++;
3416 break;
3417 }
3418
3419 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3420 * Type frames (LLC/LLC-SNAP)
3421 *
3422 * llc_snap is never checked in GMAC >= 4, so this ACS
3423 * feature is always disabled and packets need to be
3424 * stripped manually.
3425 */
3426 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3427 unlikely(status != llc_snap))
3428 frame_len -= ETH_FCS_LEN;
3429
3430 if (netif_msg_rx_status(priv)) {
3431 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3432 p, entry, des);
3433 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3434 frame_len, status);
3435 }
3436
3437 /* The zero-copy is always used for all the sizes
3438 * in case of GMAC4 because it needs
3439 * to refill the used descriptors, always.
3440 */
3441 if (unlikely(!priv->plat->has_gmac4 &&
3442 ((frame_len < priv->rx_copybreak) ||
3443 stmmac_rx_threshold_count(rx_q)))) {
3444 skb = netdev_alloc_skb_ip_align(priv->dev,
3445 frame_len);
3446 if (unlikely(!skb)) {
3447 if (net_ratelimit())
3448 dev_warn(priv->device,
3449 "packet dropped\n");
3450 priv->dev->stats.rx_dropped++;
3451 break;
3452 }
3453
3454 dma_sync_single_for_cpu(priv->device,
3455 rx_q->rx_skbuff_dma
3456 [entry], frame_len,
3457 DMA_FROM_DEVICE);
3458 skb_copy_to_linear_data(skb,
3459 rx_q->
3460 rx_skbuff[entry]->data,
3461 frame_len);
3462
3463 skb_put(skb, frame_len);
3464 dma_sync_single_for_device(priv->device,
3465 rx_q->rx_skbuff_dma
3466 [entry], frame_len,
3467 DMA_FROM_DEVICE);
3468 } else {
3469 skb = rx_q->rx_skbuff[entry];
3470 if (unlikely(!skb)) {
3471 netdev_err(priv->dev,
3472 "%s: Inconsistent Rx chain\n",
3473 priv->dev->name);
3474 priv->dev->stats.rx_dropped++;
3475 break;
3476 }
3477 prefetch(skb->data - NET_IP_ALIGN);
3478 rx_q->rx_skbuff[entry] = NULL;
3479 rx_q->rx_zeroc_thresh++;
3480
3481 skb_put(skb, frame_len);
3482 dma_unmap_single(priv->device,
3483 rx_q->rx_skbuff_dma[entry],
3484 priv->dma_buf_sz,
3485 DMA_FROM_DEVICE);
3486 }
3487
3488 if (netif_msg_pktdata(priv)) {
3489 netdev_dbg(priv->dev, "frame received (%dbytes)",
3490 frame_len);
3491 print_pkt(skb->data, frame_len);
3492 }
3493
3494 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3495
3496 stmmac_rx_vlan(priv->dev, skb);
3497
3498 skb->protocol = eth_type_trans(skb, priv->dev);
3499
3500 if (unlikely(!coe))
3501 skb_checksum_none_assert(skb);
3502 else
3503 skb->ip_summed = CHECKSUM_UNNECESSARY;
3504
3505 napi_gro_receive(&rx_q->napi, skb);
3506
3507 priv->dev->stats.rx_packets++;
3508 priv->dev->stats.rx_bytes += frame_len;
3509 }
3510 entry = next_entry;
3511 }
3512
3513 stmmac_rx_refill(priv, queue);
3514
3515 priv->xstats.rx_pkt_n += count;
3516
3517 return count;
3518 }
3519
3520 /**
3521 * stmmac_poll - stmmac poll method (NAPI)
3522 * @napi : pointer to the napi structure.
3523 * @budget : maximum number of packets that the current CPU can receive from
3524 * all interfaces.
3525 * Description :
3526 * To look at the incoming frames and clear the tx resources.
3527 */
3528 static int stmmac_poll(struct napi_struct *napi, int budget)
3529 {
3530 struct stmmac_rx_queue *rx_q =
3531 container_of(napi, struct stmmac_rx_queue, napi);
3532 struct stmmac_priv *priv = rx_q->priv_data;
3533 u32 tx_count = priv->plat->tx_queues_to_use;
3534 u32 chan = rx_q->queue_index;
3535 int work_done = 0;
3536 u32 queue;
3537
3538 priv->xstats.napi_poll++;
3539
3540 /* check all the queues */
3541 for (queue = 0; queue < tx_count; queue++)
3542 stmmac_tx_clean(priv, queue);
3543
3544 work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3545 if (work_done < budget) {
3546 napi_complete_done(napi, work_done);
3547 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3548 }
3549 return work_done;
3550 }
3551
3552 /**
3553 * stmmac_tx_timeout
3554 * @dev : Pointer to net device structure
3555 * Description: this function is called when a packet transmission fails to
3556 * complete within a reasonable time. The driver will mark the error in the
3557 * netdev structure and arrange for the device to be reset to a sane state
3558 * in order to transmit a new packet.
3559 */
3560 static void stmmac_tx_timeout(struct net_device *dev)
3561 {
3562 struct stmmac_priv *priv = netdev_priv(dev);
3563
3564 stmmac_global_err(priv);
3565 }
3566
3567 /**
3568 * stmmac_set_rx_mode - entry point for multicast addressing
3569 * @dev : pointer to the device structure
3570 * Description:
3571 * This function is a driver entry point which gets called by the kernel
3572 * whenever multicast addresses must be enabled/disabled.
3573 * Return value:
3574 * void.
3575 */
3576 static void stmmac_set_rx_mode(struct net_device *dev)
3577 {
3578 struct stmmac_priv *priv = netdev_priv(dev);
3579
3580 stmmac_set_filter(priv, priv->hw, dev);
3581 }
3582
3583 /**
3584 * stmmac_change_mtu - entry point to change MTU size for the device.
3585 * @dev : device pointer.
3586 * @new_mtu : the new MTU size for the device.
3587 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3588 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3589 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3590 * Return value:
3591 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3592 * file on failure.
3593 */
3594 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3595 {
3596 struct stmmac_priv *priv = netdev_priv(dev);
3597
3598 if (netif_running(dev)) {
3599 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3600 return -EBUSY;
3601 }
3602
3603 dev->mtu = new_mtu;
3604
3605 netdev_update_features(dev);
3606
3607 return 0;
3608 }
3609
3610 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3611 netdev_features_t features)
3612 {
3613 struct stmmac_priv *priv = netdev_priv(dev);
3614
3615 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3616 features &= ~NETIF_F_RXCSUM;
3617
3618 if (!priv->plat->tx_coe)
3619 features &= ~NETIF_F_CSUM_MASK;
3620
3621 /* Some GMAC devices have a bugged Jumbo frame support that
3622 * needs to have the Tx COE disabled for oversized frames
3623 * (due to limited buffer sizes). In this case we disable
3624 * the TX csum insertion in the TDES and not use SF.
3625 */
3626 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3627 features &= ~NETIF_F_CSUM_MASK;
3628
3629 /* Disable tso if asked by ethtool */
3630 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3631 if (features & NETIF_F_TSO)
3632 priv->tso = true;
3633 else
3634 priv->tso = false;
3635 }
3636
3637 return features;
3638 }
3639
3640 static int stmmac_set_features(struct net_device *netdev,
3641 netdev_features_t features)
3642 {
3643 struct stmmac_priv *priv = netdev_priv(netdev);
3644
3645 /* Keep the COE Type in case of csum is supporting */
3646 if (features & NETIF_F_RXCSUM)
3647 priv->hw->rx_csum = priv->plat->rx_coe;
3648 else
3649 priv->hw->rx_csum = 0;
3650 /* No check needed because rx_coe has been set before and it will be
3651 * fixed in case of issue.
3652 */
3653 stmmac_rx_ipc(priv, priv->hw);
3654
3655 return 0;
3656 }
3657
3658 /**
3659 * stmmac_interrupt - main ISR
3660 * @irq: interrupt number.
3661 * @dev_id: to pass the net device pointer.
3662 * Description: this is the main driver interrupt service routine.
3663 * It can call:
3664 * o DMA service routine (to manage incoming frame reception and transmission
3665 * status)
3666 * o Core interrupts to manage: remote wake-up, management counter, LPI
3667 * interrupts.
3668 */
3669 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3670 {
3671 struct net_device *dev = (struct net_device *)dev_id;
3672 struct stmmac_priv *priv = netdev_priv(dev);
3673 u32 rx_cnt = priv->plat->rx_queues_to_use;
3674 u32 tx_cnt = priv->plat->tx_queues_to_use;
3675 u32 queues_count;
3676 u32 queue;
3677
3678 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3679
3680 if (priv->irq_wake)
3681 pm_wakeup_event(priv->device, 0);
3682
3683 if (unlikely(!dev)) {
3684 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3685 return IRQ_NONE;
3686 }
3687
3688 /* Check if adapter is up */
3689 if (test_bit(STMMAC_DOWN, &priv->state))
3690 return IRQ_HANDLED;
3691 /* Check if a fatal error happened */
3692 if (stmmac_safety_feat_interrupt(priv))
3693 return IRQ_HANDLED;
3694
3695 /* To handle GMAC own interrupts */
3696 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3697 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3698
3699 if (unlikely(status)) {
3700 /* For LPI we need to save the tx status */
3701 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3702 priv->tx_path_in_lpi_mode = true;
3703 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3704 priv->tx_path_in_lpi_mode = false;
3705 }
3706
3707 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3708 for (queue = 0; queue < queues_count; queue++) {
3709 struct stmmac_rx_queue *rx_q =
3710 &priv->rx_queue[queue];
3711
3712 status |= stmmac_host_mtl_irq_status(priv,
3713 priv->hw, queue);
3714
3715 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3716 stmmac_set_rx_tail_ptr(priv,
3717 priv->ioaddr,
3718 rx_q->rx_tail_addr,
3719 queue);
3720 }
3721 }
3722
3723 /* PCS link status */
3724 if (priv->hw->pcs) {
3725 if (priv->xstats.pcs_link)
3726 netif_carrier_on(dev);
3727 else
3728 netif_carrier_off(dev);
3729 }
3730 }
3731
3732 /* To handle DMA interrupts */
3733 stmmac_dma_interrupt(priv);
3734
3735 return IRQ_HANDLED;
3736 }
3737
3738 #ifdef CONFIG_NET_POLL_CONTROLLER
3739 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3740 * to allow network I/O with interrupts disabled.
3741 */
3742 static void stmmac_poll_controller(struct net_device *dev)
3743 {
3744 disable_irq(dev->irq);
3745 stmmac_interrupt(dev->irq, dev);
3746 enable_irq(dev->irq);
3747 }
3748 #endif
3749
3750 /**
3751 * stmmac_ioctl - Entry point for the Ioctl
3752 * @dev: Device pointer.
3753 * @rq: An IOCTL specefic structure, that can contain a pointer to
3754 * a proprietary structure used to pass information to the driver.
3755 * @cmd: IOCTL command
3756 * Description:
3757 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3758 */
3759 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3760 {
3761 int ret = -EOPNOTSUPP;
3762
3763 if (!netif_running(dev))
3764 return -EINVAL;
3765
3766 switch (cmd) {
3767 case SIOCGMIIPHY:
3768 case SIOCGMIIREG:
3769 case SIOCSMIIREG:
3770 if (!dev->phydev)
3771 return -EINVAL;
3772 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3773 break;
3774 case SIOCSHWTSTAMP:
3775 ret = stmmac_hwtstamp_ioctl(dev, rq);
3776 break;
3777 default:
3778 break;
3779 }
3780
3781 return ret;
3782 }
3783
3784 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3785 void *cb_priv)
3786 {
3787 struct stmmac_priv *priv = cb_priv;
3788 int ret = -EOPNOTSUPP;
3789
3790 stmmac_disable_all_queues(priv);
3791
3792 switch (type) {
3793 case TC_SETUP_CLSU32:
3794 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3795 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3796 break;
3797 default:
3798 break;
3799 }
3800
3801 stmmac_enable_all_queues(priv);
3802 return ret;
3803 }
3804
3805 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3806 struct tc_block_offload *f)
3807 {
3808 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3809 return -EOPNOTSUPP;
3810
3811 switch (f->command) {
3812 case TC_BLOCK_BIND:
3813 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3814 priv, priv);
3815 case TC_BLOCK_UNBIND:
3816 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3817 return 0;
3818 default:
3819 return -EOPNOTSUPP;
3820 }
3821 }
3822
3823 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3824 void *type_data)
3825 {
3826 struct stmmac_priv *priv = netdev_priv(ndev);
3827
3828 switch (type) {
3829 case TC_SETUP_BLOCK:
3830 return stmmac_setup_tc_block(priv, type_data);
3831 default:
3832 return -EOPNOTSUPP;
3833 }
3834 }
3835
3836 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3837 {
3838 struct stmmac_priv *priv = netdev_priv(ndev);
3839 int ret = 0;
3840
3841 ret = eth_mac_addr(ndev, addr);
3842 if (ret)
3843 return ret;
3844
3845 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3846
3847 return ret;
3848 }
3849
3850 #ifdef CONFIG_DEBUG_FS
3851 static struct dentry *stmmac_fs_dir;
3852
3853 static void sysfs_display_ring(void *head, int size, int extend_desc,
3854 struct seq_file *seq)
3855 {
3856 int i;
3857 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3858 struct dma_desc *p = (struct dma_desc *)head;
3859
3860 for (i = 0; i < size; i++) {
3861 if (extend_desc) {
3862 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3863 i, (unsigned int)virt_to_phys(ep),
3864 le32_to_cpu(ep->basic.des0),
3865 le32_to_cpu(ep->basic.des1),
3866 le32_to_cpu(ep->basic.des2),
3867 le32_to_cpu(ep->basic.des3));
3868 ep++;
3869 } else {
3870 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3871 i, (unsigned int)virt_to_phys(p),
3872 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3873 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3874 p++;
3875 }
3876 seq_printf(seq, "\n");
3877 }
3878 }
3879
3880 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3881 {
3882 struct net_device *dev = seq->private;
3883 struct stmmac_priv *priv = netdev_priv(dev);
3884 u32 rx_count = priv->plat->rx_queues_to_use;
3885 u32 tx_count = priv->plat->tx_queues_to_use;
3886 u32 queue;
3887
3888 for (queue = 0; queue < rx_count; queue++) {
3889 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3890
3891 seq_printf(seq, "RX Queue %d:\n", queue);
3892
3893 if (priv->extend_desc) {
3894 seq_printf(seq, "Extended descriptor ring:\n");
3895 sysfs_display_ring((void *)rx_q->dma_erx,
3896 DMA_RX_SIZE, 1, seq);
3897 } else {
3898 seq_printf(seq, "Descriptor ring:\n");
3899 sysfs_display_ring((void *)rx_q->dma_rx,
3900 DMA_RX_SIZE, 0, seq);
3901 }
3902 }
3903
3904 for (queue = 0; queue < tx_count; queue++) {
3905 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3906
3907 seq_printf(seq, "TX Queue %d:\n", queue);
3908
3909 if (priv->extend_desc) {
3910 seq_printf(seq, "Extended descriptor ring:\n");
3911 sysfs_display_ring((void *)tx_q->dma_etx,
3912 DMA_TX_SIZE, 1, seq);
3913 } else {
3914 seq_printf(seq, "Descriptor ring:\n");
3915 sysfs_display_ring((void *)tx_q->dma_tx,
3916 DMA_TX_SIZE, 0, seq);
3917 }
3918 }
3919
3920 return 0;
3921 }
3922
3923 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3924 {
3925 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3926 }
3927
3928 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3929
3930 static const struct file_operations stmmac_rings_status_fops = {
3931 .owner = THIS_MODULE,
3932 .open = stmmac_sysfs_ring_open,
3933 .read = seq_read,
3934 .llseek = seq_lseek,
3935 .release = single_release,
3936 };
3937
3938 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3939 {
3940 struct net_device *dev = seq->private;
3941 struct stmmac_priv *priv = netdev_priv(dev);
3942
3943 if (!priv->hw_cap_support) {
3944 seq_printf(seq, "DMA HW features not supported\n");
3945 return 0;
3946 }
3947
3948 seq_printf(seq, "==============================\n");
3949 seq_printf(seq, "\tDMA HW features\n");
3950 seq_printf(seq, "==============================\n");
3951
3952 seq_printf(seq, "\t10/100 Mbps: %s\n",
3953 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3954 seq_printf(seq, "\t1000 Mbps: %s\n",
3955 (priv->dma_cap.mbps_1000) ? "Y" : "N");
3956 seq_printf(seq, "\tHalf duplex: %s\n",
3957 (priv->dma_cap.half_duplex) ? "Y" : "N");
3958 seq_printf(seq, "\tHash Filter: %s\n",
3959 (priv->dma_cap.hash_filter) ? "Y" : "N");
3960 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3961 (priv->dma_cap.multi_addr) ? "Y" : "N");
3962 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3963 (priv->dma_cap.pcs) ? "Y" : "N");
3964 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3965 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3966 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3967 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3968 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3969 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3970 seq_printf(seq, "\tRMON module: %s\n",
3971 (priv->dma_cap.rmon) ? "Y" : "N");
3972 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3973 (priv->dma_cap.time_stamp) ? "Y" : "N");
3974 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3975 (priv->dma_cap.atime_stamp) ? "Y" : "N");
3976 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3977 (priv->dma_cap.eee) ? "Y" : "N");
3978 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3979 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3980 (priv->dma_cap.tx_coe) ? "Y" : "N");
3981 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3982 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3983 (priv->dma_cap.rx_coe) ? "Y" : "N");
3984 } else {
3985 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3986 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3987 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3988 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3989 }
3990 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3991 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3992 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3993 priv->dma_cap.number_rx_channel);
3994 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3995 priv->dma_cap.number_tx_channel);
3996 seq_printf(seq, "\tEnhanced descriptors: %s\n",
3997 (priv->dma_cap.enh_desc) ? "Y" : "N");
3998
3999 return 0;
4000 }
4001
4002 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4003 {
4004 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4005 }
4006
4007 static const struct file_operations stmmac_dma_cap_fops = {
4008 .owner = THIS_MODULE,
4009 .open = stmmac_sysfs_dma_cap_open,
4010 .read = seq_read,
4011 .llseek = seq_lseek,
4012 .release = single_release,
4013 };
4014
4015 static int stmmac_init_fs(struct net_device *dev)
4016 {
4017 struct stmmac_priv *priv = netdev_priv(dev);
4018
4019 /* Create per netdev entries */
4020 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4021
4022 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4023 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4024
4025 return -ENOMEM;
4026 }
4027
4028 /* Entry to report DMA RX/TX rings */
4029 priv->dbgfs_rings_status =
4030 debugfs_create_file("descriptors_status", 0444,
4031 priv->dbgfs_dir, dev,
4032 &stmmac_rings_status_fops);
4033
4034 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4035 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4036 debugfs_remove_recursive(priv->dbgfs_dir);
4037
4038 return -ENOMEM;
4039 }
4040
4041 /* Entry to report the DMA HW features */
4042 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4043 priv->dbgfs_dir,
4044 dev, &stmmac_dma_cap_fops);
4045
4046 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4047 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4048 debugfs_remove_recursive(priv->dbgfs_dir);
4049
4050 return -ENOMEM;
4051 }
4052
4053 return 0;
4054 }
4055
4056 static void stmmac_exit_fs(struct net_device *dev)
4057 {
4058 struct stmmac_priv *priv = netdev_priv(dev);
4059
4060 debugfs_remove_recursive(priv->dbgfs_dir);
4061 }
4062 #endif /* CONFIG_DEBUG_FS */
4063
4064 static const struct net_device_ops stmmac_netdev_ops = {
4065 .ndo_open = stmmac_open,
4066 .ndo_start_xmit = stmmac_xmit,
4067 .ndo_stop = stmmac_release,
4068 .ndo_change_mtu = stmmac_change_mtu,
4069 .ndo_fix_features = stmmac_fix_features,
4070 .ndo_set_features = stmmac_set_features,
4071 .ndo_set_rx_mode = stmmac_set_rx_mode,
4072 .ndo_tx_timeout = stmmac_tx_timeout,
4073 .ndo_do_ioctl = stmmac_ioctl,
4074 .ndo_setup_tc = stmmac_setup_tc,
4075 #ifdef CONFIG_NET_POLL_CONTROLLER
4076 .ndo_poll_controller = stmmac_poll_controller,
4077 #endif
4078 .ndo_set_mac_address = stmmac_set_mac_address,
4079 };
4080
4081 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4082 {
4083 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4084 return;
4085 if (test_bit(STMMAC_DOWN, &priv->state))
4086 return;
4087
4088 netdev_err(priv->dev, "Reset adapter.\n");
4089
4090 rtnl_lock();
4091 netif_trans_update(priv->dev);
4092 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4093 usleep_range(1000, 2000);
4094
4095 set_bit(STMMAC_DOWN, &priv->state);
4096 dev_close(priv->dev);
4097 dev_open(priv->dev);
4098 clear_bit(STMMAC_DOWN, &priv->state);
4099 clear_bit(STMMAC_RESETING, &priv->state);
4100 rtnl_unlock();
4101 }
4102
4103 static void stmmac_service_task(struct work_struct *work)
4104 {
4105 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4106 service_task);
4107
4108 stmmac_reset_subtask(priv);
4109 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4110 }
4111
4112 /**
4113 * stmmac_hw_init - Init the MAC device
4114 * @priv: driver private structure
4115 * Description: this function is to configure the MAC device according to
4116 * some platform parameters or the HW capability register. It prepares the
4117 * driver to use either ring or chain modes and to setup either enhanced or
4118 * normal descriptors.
4119 */
4120 static int stmmac_hw_init(struct stmmac_priv *priv)
4121 {
4122 int ret;
4123
4124 /* dwmac-sun8i only work in chain mode */
4125 if (priv->plat->has_sun8i)
4126 chain_mode = 1;
4127 priv->chain_mode = chain_mode;
4128
4129 /* Initialize HW Interface */
4130 ret = stmmac_hwif_init(priv);
4131 if (ret)
4132 return ret;
4133
4134 /* Get the HW capability (new GMAC newer than 3.50a) */
4135 priv->hw_cap_support = stmmac_get_hw_features(priv);
4136 if (priv->hw_cap_support) {
4137 dev_info(priv->device, "DMA HW capability register supported\n");
4138
4139 /* We can override some gmac/dma configuration fields: e.g.
4140 * enh_desc, tx_coe (e.g. that are passed through the
4141 * platform) with the values from the HW capability
4142 * register (if supported).
4143 */
4144 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4145 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4146 priv->hw->pmt = priv->plat->pmt;
4147
4148 /* TXCOE doesn't work in thresh DMA mode */
4149 if (priv->plat->force_thresh_dma_mode)
4150 priv->plat->tx_coe = 0;
4151 else
4152 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4153
4154 /* In case of GMAC4 rx_coe is from HW cap register. */
4155 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4156
4157 if (priv->dma_cap.rx_coe_type2)
4158 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4159 else if (priv->dma_cap.rx_coe_type1)
4160 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4161
4162 } else {
4163 dev_info(priv->device, "No HW DMA feature register supported\n");
4164 }
4165
4166 if (priv->plat->rx_coe) {
4167 priv->hw->rx_csum = priv->plat->rx_coe;
4168 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4169 if (priv->synopsys_id < DWMAC_CORE_4_00)
4170 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4171 }
4172 if (priv->plat->tx_coe)
4173 dev_info(priv->device, "TX Checksum insertion supported\n");
4174
4175 if (priv->plat->pmt) {
4176 dev_info(priv->device, "Wake-Up On Lan supported\n");
4177 device_set_wakeup_capable(priv->device, 1);
4178 }
4179
4180 if (priv->dma_cap.tsoen)
4181 dev_info(priv->device, "TSO supported\n");
4182
4183 return 0;
4184 }
4185
4186 /**
4187 * stmmac_dvr_probe
4188 * @device: device pointer
4189 * @plat_dat: platform data pointer
4190 * @res: stmmac resource pointer
4191 * Description: this is the main probe function used to
4192 * call the alloc_etherdev, allocate the priv structure.
4193 * Return:
4194 * returns 0 on success, otherwise errno.
4195 */
4196 int stmmac_dvr_probe(struct device *device,
4197 struct plat_stmmacenet_data *plat_dat,
4198 struct stmmac_resources *res)
4199 {
4200 struct net_device *ndev = NULL;
4201 struct stmmac_priv *priv;
4202 int ret = 0;
4203 u32 queue;
4204
4205 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4206 MTL_MAX_TX_QUEUES,
4207 MTL_MAX_RX_QUEUES);
4208 if (!ndev)
4209 return -ENOMEM;
4210
4211 SET_NETDEV_DEV(ndev, device);
4212
4213 priv = netdev_priv(ndev);
4214 priv->device = device;
4215 priv->dev = ndev;
4216
4217 stmmac_set_ethtool_ops(ndev);
4218 priv->pause = pause;
4219 priv->plat = plat_dat;
4220 priv->ioaddr = res->addr;
4221 priv->dev->base_addr = (unsigned long)res->addr;
4222
4223 priv->dev->irq = res->irq;
4224 priv->wol_irq = res->wol_irq;
4225 priv->lpi_irq = res->lpi_irq;
4226
4227 if (res->mac)
4228 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4229
4230 dev_set_drvdata(device, priv->dev);
4231
4232 /* Verify driver arguments */
4233 stmmac_verify_args();
4234
4235 /* Allocate workqueue */
4236 priv->wq = create_singlethread_workqueue("stmmac_wq");
4237 if (!priv->wq) {
4238 dev_err(priv->device, "failed to create workqueue\n");
4239 goto error_wq;
4240 }
4241
4242 INIT_WORK(&priv->service_task, stmmac_service_task);
4243
4244 /* Override with kernel parameters if supplied XXX CRS XXX
4245 * this needs to have multiple instances
4246 */
4247 if ((phyaddr >= 0) && (phyaddr <= 31))
4248 priv->plat->phy_addr = phyaddr;
4249
4250 if (priv->plat->stmmac_rst) {
4251 ret = reset_control_assert(priv->plat->stmmac_rst);
4252 reset_control_deassert(priv->plat->stmmac_rst);
4253 /* Some reset controllers have only reset callback instead of
4254 * assert + deassert callbacks pair.
4255 */
4256 if (ret == -ENOTSUPP)
4257 reset_control_reset(priv->plat->stmmac_rst);
4258 }
4259
4260 /* Init MAC and get the capabilities */
4261 ret = stmmac_hw_init(priv);
4262 if (ret)
4263 goto error_hw_init;
4264
4265 /* Configure real RX and TX queues */
4266 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4267 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4268
4269 ndev->netdev_ops = &stmmac_netdev_ops;
4270
4271 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4272 NETIF_F_RXCSUM;
4273
4274 ret = stmmac_tc_init(priv, priv);
4275 if (!ret) {
4276 ndev->hw_features |= NETIF_F_HW_TC;
4277 }
4278
4279 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4280 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4281 priv->tso = true;
4282 dev_info(priv->device, "TSO feature enabled\n");
4283 }
4284 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4285 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4286 #ifdef STMMAC_VLAN_TAG_USED
4287 /* Both mac100 and gmac support receive VLAN tag detection */
4288 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4289 #endif
4290 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4291
4292 /* MTU range: 46 - hw-specific max */
4293 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4294 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4295 ndev->max_mtu = JUMBO_LEN;
4296 else
4297 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4298 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4299 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4300 */
4301 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4302 (priv->plat->maxmtu >= ndev->min_mtu))
4303 ndev->max_mtu = priv->plat->maxmtu;
4304 else if (priv->plat->maxmtu < ndev->min_mtu)
4305 dev_warn(priv->device,
4306 "%s: warning: maxmtu having invalid value (%d)\n",
4307 __func__, priv->plat->maxmtu);
4308
4309 if (flow_ctrl)
4310 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4311
4312 /* Rx Watchdog is available in the COREs newer than the 3.40.
4313 * In some case, for example on bugged HW this feature
4314 * has to be disable and this can be done by passing the
4315 * riwt_off field from the platform.
4316 */
4317 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4318 priv->use_riwt = 1;
4319 dev_info(priv->device,
4320 "Enable RX Mitigation via HW Watchdog Timer\n");
4321 }
4322
4323 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4324 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4325
4326 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4327 (8 * priv->plat->rx_queues_to_use));
4328 }
4329
4330 spin_lock_init(&priv->lock);
4331
4332 /* If a specific clk_csr value is passed from the platform
4333 * this means that the CSR Clock Range selection cannot be
4334 * changed at run-time and it is fixed. Viceversa the driver'll try to
4335 * set the MDC clock dynamically according to the csr actual
4336 * clock input.
4337 */
4338 if (!priv->plat->clk_csr)
4339 stmmac_clk_csr_set(priv);
4340 else
4341 priv->clk_csr = priv->plat->clk_csr;
4342
4343 stmmac_check_pcs_mode(priv);
4344
4345 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4346 priv->hw->pcs != STMMAC_PCS_TBI &&
4347 priv->hw->pcs != STMMAC_PCS_RTBI) {
4348 /* MDIO bus Registration */
4349 ret = stmmac_mdio_register(ndev);
4350 if (ret < 0) {
4351 dev_err(priv->device,
4352 "%s: MDIO bus (id: %d) registration failed",
4353 __func__, priv->plat->bus_id);
4354 goto error_mdio_register;
4355 }
4356 }
4357
4358 ret = register_netdev(ndev);
4359 if (ret) {
4360 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4361 __func__, ret);
4362 goto error_netdev_register;
4363 }
4364
4365 return ret;
4366
4367 error_netdev_register:
4368 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4369 priv->hw->pcs != STMMAC_PCS_TBI &&
4370 priv->hw->pcs != STMMAC_PCS_RTBI)
4371 stmmac_mdio_unregister(ndev);
4372 error_mdio_register:
4373 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4374 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4375
4376 netif_napi_del(&rx_q->napi);
4377 }
4378 error_hw_init:
4379 destroy_workqueue(priv->wq);
4380 error_wq:
4381 free_netdev(ndev);
4382
4383 return ret;
4384 }
4385 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4386
4387 /**
4388 * stmmac_dvr_remove
4389 * @dev: device pointer
4390 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4391 * changes the link status, releases the DMA descriptor rings.
4392 */
4393 int stmmac_dvr_remove(struct device *dev)
4394 {
4395 struct net_device *ndev = dev_get_drvdata(dev);
4396 struct stmmac_priv *priv = netdev_priv(ndev);
4397
4398 netdev_info(priv->dev, "%s: removing driver", __func__);
4399
4400 stmmac_stop_all_dma(priv);
4401
4402 stmmac_mac_set(priv, priv->ioaddr, false);
4403 netif_carrier_off(ndev);
4404 unregister_netdev(ndev);
4405 if (priv->plat->stmmac_rst)
4406 reset_control_assert(priv->plat->stmmac_rst);
4407 clk_disable_unprepare(priv->plat->pclk);
4408 clk_disable_unprepare(priv->plat->stmmac_clk);
4409 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4410 priv->hw->pcs != STMMAC_PCS_TBI &&
4411 priv->hw->pcs != STMMAC_PCS_RTBI)
4412 stmmac_mdio_unregister(ndev);
4413 destroy_workqueue(priv->wq);
4414 free_netdev(ndev);
4415
4416 return 0;
4417 }
4418 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4419
4420 /**
4421 * stmmac_suspend - suspend callback
4422 * @dev: device pointer
4423 * Description: this is the function to suspend the device and it is called
4424 * by the platform driver to stop the network queue, release the resources,
4425 * program the PMT register (for WoL), clean and release driver resources.
4426 */
4427 int stmmac_suspend(struct device *dev)
4428 {
4429 struct net_device *ndev = dev_get_drvdata(dev);
4430 struct stmmac_priv *priv = netdev_priv(ndev);
4431 unsigned long flags;
4432
4433 if (!ndev || !netif_running(ndev))
4434 return 0;
4435
4436 if (ndev->phydev)
4437 phy_stop(ndev->phydev);
4438
4439 spin_lock_irqsave(&priv->lock, flags);
4440
4441 netif_device_detach(ndev);
4442 stmmac_stop_all_queues(priv);
4443
4444 stmmac_disable_all_queues(priv);
4445
4446 /* Stop TX/RX DMA */
4447 stmmac_stop_all_dma(priv);
4448
4449 /* Enable Power down mode by programming the PMT regs */
4450 if (device_may_wakeup(priv->device)) {
4451 stmmac_pmt(priv, priv->hw, priv->wolopts);
4452 priv->irq_wake = 1;
4453 } else {
4454 stmmac_mac_set(priv, priv->ioaddr, false);
4455 pinctrl_pm_select_sleep_state(priv->device);
4456 /* Disable clock in case of PWM is off */
4457 clk_disable(priv->plat->pclk);
4458 clk_disable(priv->plat->stmmac_clk);
4459 }
4460 spin_unlock_irqrestore(&priv->lock, flags);
4461
4462 priv->oldlink = false;
4463 priv->speed = SPEED_UNKNOWN;
4464 priv->oldduplex = DUPLEX_UNKNOWN;
4465 return 0;
4466 }
4467 EXPORT_SYMBOL_GPL(stmmac_suspend);
4468
4469 /**
4470 * stmmac_reset_queues_param - reset queue parameters
4471 * @dev: device pointer
4472 */
4473 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4474 {
4475 u32 rx_cnt = priv->plat->rx_queues_to_use;
4476 u32 tx_cnt = priv->plat->tx_queues_to_use;
4477 u32 queue;
4478
4479 for (queue = 0; queue < rx_cnt; queue++) {
4480 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4481
4482 rx_q->cur_rx = 0;
4483 rx_q->dirty_rx = 0;
4484 }
4485
4486 for (queue = 0; queue < tx_cnt; queue++) {
4487 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4488
4489 tx_q->cur_tx = 0;
4490 tx_q->dirty_tx = 0;
4491 tx_q->mss = 0;
4492 }
4493 }
4494
4495 /**
4496 * stmmac_resume - resume callback
4497 * @dev: device pointer
4498 * Description: when resume this function is invoked to setup the DMA and CORE
4499 * in a usable state.
4500 */
4501 int stmmac_resume(struct device *dev)
4502 {
4503 struct net_device *ndev = dev_get_drvdata(dev);
4504 struct stmmac_priv *priv = netdev_priv(ndev);
4505 unsigned long flags;
4506
4507 if (!netif_running(ndev))
4508 return 0;
4509
4510 /* Power Down bit, into the PM register, is cleared
4511 * automatically as soon as a magic packet or a Wake-up frame
4512 * is received. Anyway, it's better to manually clear
4513 * this bit because it can generate problems while resuming
4514 * from another devices (e.g. serial console).
4515 */
4516 if (device_may_wakeup(priv->device)) {
4517 spin_lock_irqsave(&priv->lock, flags);
4518 stmmac_pmt(priv, priv->hw, 0);
4519 spin_unlock_irqrestore(&priv->lock, flags);
4520 priv->irq_wake = 0;
4521 } else {
4522 pinctrl_pm_select_default_state(priv->device);
4523 /* enable the clk previously disabled */
4524 clk_enable(priv->plat->stmmac_clk);
4525 clk_enable(priv->plat->pclk);
4526 /* reset the phy so that it's ready */
4527 if (priv->mii)
4528 stmmac_mdio_reset(priv->mii);
4529 }
4530
4531 netif_device_attach(ndev);
4532
4533 spin_lock_irqsave(&priv->lock, flags);
4534
4535 stmmac_reset_queues_param(priv);
4536
4537 stmmac_clear_descriptors(priv);
4538
4539 stmmac_hw_setup(ndev, false);
4540 stmmac_init_tx_coalesce(priv);
4541 stmmac_set_rx_mode(ndev);
4542
4543 stmmac_enable_all_queues(priv);
4544
4545 stmmac_start_all_queues(priv);
4546
4547 spin_unlock_irqrestore(&priv->lock, flags);
4548
4549 if (ndev->phydev)
4550 phy_start(ndev->phydev);
4551
4552 return 0;
4553 }
4554 EXPORT_SYMBOL_GPL(stmmac_resume);
4555
4556 #ifndef MODULE
4557 static int __init stmmac_cmdline_opt(char *str)
4558 {
4559 char *opt;
4560
4561 if (!str || !*str)
4562 return -EINVAL;
4563 while ((opt = strsep(&str, ",")) != NULL) {
4564 if (!strncmp(opt, "debug:", 6)) {
4565 if (kstrtoint(opt + 6, 0, &debug))
4566 goto err;
4567 } else if (!strncmp(opt, "phyaddr:", 8)) {
4568 if (kstrtoint(opt + 8, 0, &phyaddr))
4569 goto err;
4570 } else if (!strncmp(opt, "buf_sz:", 7)) {
4571 if (kstrtoint(opt + 7, 0, &buf_sz))
4572 goto err;
4573 } else if (!strncmp(opt, "tc:", 3)) {
4574 if (kstrtoint(opt + 3, 0, &tc))
4575 goto err;
4576 } else if (!strncmp(opt, "watchdog:", 9)) {
4577 if (kstrtoint(opt + 9, 0, &watchdog))
4578 goto err;
4579 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4580 if (kstrtoint(opt + 10, 0, &flow_ctrl))
4581 goto err;
4582 } else if (!strncmp(opt, "pause:", 6)) {
4583 if (kstrtoint(opt + 6, 0, &pause))
4584 goto err;
4585 } else if (!strncmp(opt, "eee_timer:", 10)) {
4586 if (kstrtoint(opt + 10, 0, &eee_timer))
4587 goto err;
4588 } else if (!strncmp(opt, "chain_mode:", 11)) {
4589 if (kstrtoint(opt + 11, 0, &chain_mode))
4590 goto err;
4591 }
4592 }
4593 return 0;
4594
4595 err:
4596 pr_err("%s: ERROR broken module parameter conversion", __func__);
4597 return -EINVAL;
4598 }
4599
4600 __setup("stmmaceth=", stmmac_cmdline_opt);
4601 #endif /* MODULE */
4602
4603 static int __init stmmac_init(void)
4604 {
4605 #ifdef CONFIG_DEBUG_FS
4606 /* Create debugfs main directory if it doesn't exist yet */
4607 if (!stmmac_fs_dir) {
4608 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4609
4610 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4611 pr_err("ERROR %s, debugfs create directory failed\n",
4612 STMMAC_RESOURCE_NAME);
4613
4614 return -ENOMEM;
4615 }
4616 }
4617 #endif
4618
4619 return 0;
4620 }
4621
4622 static void __exit stmmac_exit(void)
4623 {
4624 #ifdef CONFIG_DEBUG_FS
4625 debugfs_remove_recursive(stmmac_fs_dir);
4626 #endif
4627 }
4628
4629 module_init(stmmac_init)
4630 module_exit(stmmac_exit)
4631
4632 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4633 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4634 MODULE_LICENSE("GPL");