]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: Enable 16KB buffer size
[thirdparty/linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
CommitLineData
4fa9c49f 1// SPDX-License-Identifier: GPL-2.0-only
47dd7a54
GC
2/*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
286a8372 6 Copyright(C) 2007-2011 STMicroelectronics Ltd
47dd7a54 7
47dd7a54
GC
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15*******************************************************************************/
16
6a81c26f 17#include <linux/clk.h>
47dd7a54
GC
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
47dd7a54
GC
20#include <linux/ip.h>
21#include <linux/tcp.h>
22#include <linux/skbuff.h>
23#include <linux/ethtool.h>
24#include <linux/if_ether.h>
25#include <linux/crc32.h>
26#include <linux/mii.h>
01789349 27#include <linux/if.h>
47dd7a54
GC
28#include <linux/if_vlan.h>
29#include <linux/dma-mapping.h>
5a0e3ad6 30#include <linux/slab.h>
70c71606 31#include <linux/prefetch.h>
db88f10a 32#include <linux/pinctrl/consumer.h>
50fb4f74 33#ifdef CONFIG_DEBUG_FS
7ac29055
GC
34#include <linux/debugfs.h>
35#include <linux/seq_file.h>
50fb4f74 36#endif /* CONFIG_DEBUG_FS */
891434b1 37#include <linux/net_tstamp.h>
eeef2f6b 38#include <linux/phylink.h>
b7766206 39#include <linux/udp.h>
4dbbe8dd 40#include <net/pkt_cls.h>
891434b1 41#include "stmmac_ptp.h"
286a8372 42#include "stmmac.h"
c5e4ddbd 43#include <linux/reset.h>
5790cf3c 44#include <linux/of_mdio.h>
19d857c9 45#include "dwmac1000.h"
7d9e6c5a 46#include "dwxgmac2.h"
42de047d 47#include "hwif.h"
47dd7a54 48
8d558f02 49#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
f748be53 50#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
47dd7a54
GC
51
52/* Module parameters */
32ceabca 53#define TX_TIMEO 5000
47dd7a54 54static int watchdog = TX_TIMEO;
d3757ba4 55module_param(watchdog, int, 0644);
32ceabca 56MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
47dd7a54 57
32ceabca 58static int debug = -1;
d3757ba4 59module_param(debug, int, 0644);
32ceabca 60MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
47dd7a54 61
47d1f71f 62static int phyaddr = -1;
d3757ba4 63module_param(phyaddr, int, 0444);
47dd7a54
GC
64MODULE_PARM_DESC(phyaddr, "Physical device address");
65
e3ad57c9 66#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
120e87f9 67#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
47dd7a54 68
e9989339 69static int flow_ctrl = FLOW_AUTO;
d3757ba4 70module_param(flow_ctrl, int, 0644);
47dd7a54
GC
71MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
72
73static int pause = PAUSE_TIME;
d3757ba4 74module_param(pause, int, 0644);
47dd7a54
GC
75MODULE_PARM_DESC(pause, "Flow Control Pause Time");
76
77#define TC_DEFAULT 64
78static int tc = TC_DEFAULT;
d3757ba4 79module_param(tc, int, 0644);
47dd7a54
GC
80MODULE_PARM_DESC(tc, "DMA threshold control value");
81
d916701c
GC
82#define DEFAULT_BUFSIZE 1536
83static int buf_sz = DEFAULT_BUFSIZE;
d3757ba4 84module_param(buf_sz, int, 0644);
47dd7a54
GC
85MODULE_PARM_DESC(buf_sz, "DMA buffer size");
86
22ad3838
GC
87#define STMMAC_RX_COPYBREAK 256
88
47dd7a54
GC
89static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
90 NETIF_MSG_LINK | NETIF_MSG_IFUP |
91 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
92
d765955d
GC
93#define STMMAC_DEFAULT_LPI_TIMER 1000
94static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
d3757ba4 95module_param(eee_timer, int, 0644);
d765955d 96MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
f5351ef7 97#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
d765955d 98
22d3efe5
PM
99/* By default the driver will use the ring mode to manage tx and rx descriptors,
100 * but allow user to force to use the chain instead of the ring
4a7d666a
GC
101 */
102static unsigned int chain_mode;
d3757ba4 103module_param(chain_mode, int, 0444);
4a7d666a
GC
104MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
105
47dd7a54 106static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
47dd7a54 107
50fb4f74 108#ifdef CONFIG_DEBUG_FS
8d72ab11 109static void stmmac_init_fs(struct net_device *dev);
466c5ac8 110static void stmmac_exit_fs(struct net_device *dev);
bfab27a1
GC
111#endif
112
9125cdd1
GC
113#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
114
47dd7a54
GC
115/**
116 * stmmac_verify_args - verify the driver parameters.
732fdf0e
GC
117 * Description: it checks the driver parameters and set a default in case of
118 * errors.
47dd7a54
GC
119 */
120static void stmmac_verify_args(void)
121{
122 if (unlikely(watchdog < 0))
123 watchdog = TX_TIMEO;
d916701c
GC
124 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
125 buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
126 if (unlikely(flow_ctrl > 1))
127 flow_ctrl = FLOW_AUTO;
128 else if (likely(flow_ctrl < 0))
129 flow_ctrl = FLOW_OFF;
130 if (unlikely((pause < 0) || (pause > 0xffff)))
131 pause = PAUSE_TIME;
d765955d
GC
132 if (eee_timer < 0)
133 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
47dd7a54
GC
134}
135
c22a3f48
JP
136/**
137 * stmmac_disable_all_queues - Disable all queues
138 * @priv: driver private structure
139 */
140static void stmmac_disable_all_queues(struct stmmac_priv *priv)
141{
142 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
8fce3331
JA
143 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
144 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
c22a3f48
JP
145 u32 queue;
146
8fce3331
JA
147 for (queue = 0; queue < maxq; queue++) {
148 struct stmmac_channel *ch = &priv->channel[queue];
c22a3f48 149
4ccb4585
JA
150 if (queue < rx_queues_cnt)
151 napi_disable(&ch->rx_napi);
152 if (queue < tx_queues_cnt)
153 napi_disable(&ch->tx_napi);
c22a3f48
JP
154 }
155}
156
157/**
158 * stmmac_enable_all_queues - Enable all queues
159 * @priv: driver private structure
160 */
161static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162{
163 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
8fce3331
JA
164 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
165 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
c22a3f48
JP
166 u32 queue;
167
8fce3331
JA
168 for (queue = 0; queue < maxq; queue++) {
169 struct stmmac_channel *ch = &priv->channel[queue];
c22a3f48 170
4ccb4585
JA
171 if (queue < rx_queues_cnt)
172 napi_enable(&ch->rx_napi);
173 if (queue < tx_queues_cnt)
174 napi_enable(&ch->tx_napi);
c22a3f48
JP
175 }
176}
177
178/**
179 * stmmac_stop_all_queues - Stop all queues
180 * @priv: driver private structure
181 */
182static void stmmac_stop_all_queues(struct stmmac_priv *priv)
183{
184 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
185 u32 queue;
186
187 for (queue = 0; queue < tx_queues_cnt; queue++)
188 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
189}
190
191/**
192 * stmmac_start_all_queues - Start all queues
193 * @priv: driver private structure
194 */
195static void stmmac_start_all_queues(struct stmmac_priv *priv)
196{
197 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
198 u32 queue;
199
200 for (queue = 0; queue < tx_queues_cnt; queue++)
201 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
202}
203
34877a15
JA
204static void stmmac_service_event_schedule(struct stmmac_priv *priv)
205{
206 if (!test_bit(STMMAC_DOWN, &priv->state) &&
207 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
208 queue_work(priv->wq, &priv->service_task);
209}
210
211static void stmmac_global_err(struct stmmac_priv *priv)
212{
213 netif_carrier_off(priv->dev);
214 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
215 stmmac_service_event_schedule(priv);
216}
217
32ceabca
GC
218/**
219 * stmmac_clk_csr_set - dynamically set the MDC clock
220 * @priv: driver private structure
221 * Description: this is to dynamically set the MDC clock according to the csr
222 * clock input.
223 * Note:
224 * If a specific clk_csr value is passed from the platform
225 * this means that the CSR Clock Range selection cannot be
226 * changed at run-time and it is fixed (as reported in the driver
227 * documentation). Viceversa the driver will try to set the MDC
228 * clock dynamically according to the actual clock input.
229 */
cd7201f4
GC
230static void stmmac_clk_csr_set(struct stmmac_priv *priv)
231{
cd7201f4
GC
232 u32 clk_rate;
233
f573c0b9 234 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
cd7201f4
GC
235
236 /* Platform provided default clk_csr would be assumed valid
ceb69499
GC
237 * for all other cases except for the below mentioned ones.
238 * For values higher than the IEEE 802.3 specified frequency
239 * we can not estimate the proper divider as it is not known
240 * the frequency of clk_csr_i. So we do not change the default
241 * divider.
242 */
cd7201f4
GC
243 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
244 if (clk_rate < CSR_F_35M)
245 priv->clk_csr = STMMAC_CSR_20_35M;
246 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
247 priv->clk_csr = STMMAC_CSR_35_60M;
248 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
249 priv->clk_csr = STMMAC_CSR_60_100M;
250 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
251 priv->clk_csr = STMMAC_CSR_100_150M;
252 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
253 priv->clk_csr = STMMAC_CSR_150_250M;
19d857c9 254 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
cd7201f4 255 priv->clk_csr = STMMAC_CSR_250_300M;
ceb69499 256 }
9f93ac8d
LC
257
258 if (priv->plat->has_sun8i) {
259 if (clk_rate > 160000000)
260 priv->clk_csr = 0x03;
261 else if (clk_rate > 80000000)
262 priv->clk_csr = 0x02;
263 else if (clk_rate > 40000000)
264 priv->clk_csr = 0x01;
265 else
266 priv->clk_csr = 0;
267 }
7d9e6c5a
JA
268
269 if (priv->plat->has_xgmac) {
270 if (clk_rate > 400000000)
271 priv->clk_csr = 0x5;
272 else if (clk_rate > 350000000)
273 priv->clk_csr = 0x4;
274 else if (clk_rate > 300000000)
275 priv->clk_csr = 0x3;
276 else if (clk_rate > 250000000)
277 priv->clk_csr = 0x2;
278 else if (clk_rate > 150000000)
279 priv->clk_csr = 0x1;
280 else
281 priv->clk_csr = 0x0;
282 }
cd7201f4
GC
283}
284
47dd7a54
GC
285static void print_pkt(unsigned char *buf, int len)
286{
424c4f78
AS
287 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
288 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
47dd7a54 289}
47dd7a54 290
ce736788 291static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
47dd7a54 292{
ce736788 293 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
a6a3e026 294 u32 avail;
e3ad57c9 295
ce736788
JP
296 if (tx_q->dirty_tx > tx_q->cur_tx)
297 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
e3ad57c9 298 else
ce736788 299 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
e3ad57c9
GC
300
301 return avail;
302}
303
54139cf3
JP
304/**
305 * stmmac_rx_dirty - Get RX queue dirty
306 * @priv: driver private structure
307 * @queue: RX queue index
308 */
309static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
e3ad57c9 310{
54139cf3 311 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
a6a3e026 312 u32 dirty;
e3ad57c9 313
54139cf3
JP
314 if (rx_q->dirty_rx <= rx_q->cur_rx)
315 dirty = rx_q->cur_rx - rx_q->dirty_rx;
e3ad57c9 316 else
54139cf3 317 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
e3ad57c9
GC
318
319 return dirty;
47dd7a54
GC
320}
321
32ceabca 322/**
732fdf0e 323 * stmmac_enable_eee_mode - check and enter in LPI mode
32ceabca 324 * @priv: driver private structure
732fdf0e
GC
325 * Description: this function is to verify and enter in LPI mode in case of
326 * EEE.
32ceabca 327 */
d765955d
GC
328static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
329{
ce736788
JP
330 u32 tx_cnt = priv->plat->tx_queues_to_use;
331 u32 queue;
332
333 /* check if all TX queues have the work finished */
334 for (queue = 0; queue < tx_cnt; queue++) {
335 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
336
337 if (tx_q->dirty_tx != tx_q->cur_tx)
338 return; /* still unfinished work */
339 }
340
d765955d 341 /* Check and enter in LPI mode */
ce736788 342 if (!priv->tx_path_in_lpi_mode)
c10d4c82
JA
343 stmmac_set_eee_mode(priv, priv->hw,
344 priv->plat->en_tx_lpi_clockgating);
d765955d
GC
345}
346
32ceabca 347/**
732fdf0e 348 * stmmac_disable_eee_mode - disable and exit from LPI mode
32ceabca
GC
349 * @priv: driver private structure
350 * Description: this function is to exit and disable EEE in case of
351 * LPI state is true. This is called by the xmit.
352 */
d765955d
GC
353void stmmac_disable_eee_mode(struct stmmac_priv *priv)
354{
c10d4c82 355 stmmac_reset_eee_mode(priv, priv->hw);
d765955d
GC
356 del_timer_sync(&priv->eee_ctrl_timer);
357 priv->tx_path_in_lpi_mode = false;
358}
359
360/**
732fdf0e 361 * stmmac_eee_ctrl_timer - EEE TX SW timer.
d765955d
GC
362 * @arg : data hook
363 * Description:
32ceabca 364 * if there is no data transfer and if we are not in LPI state,
d765955d
GC
365 * then MAC Transmitter can be moved to LPI state.
366 */
e99e88a9 367static void stmmac_eee_ctrl_timer(struct timer_list *t)
d765955d 368{
e99e88a9 369 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
d765955d
GC
370
371 stmmac_enable_eee_mode(priv);
f5351ef7 372 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d
GC
373}
374
375/**
732fdf0e 376 * stmmac_eee_init - init EEE
32ceabca 377 * @priv: driver private structure
d765955d 378 * Description:
732fdf0e
GC
379 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
380 * can also manage EEE, this function enable the LPI state and start related
381 * timer.
d765955d
GC
382 */
383bool stmmac_eee_init(struct stmmac_priv *priv)
384{
74371272 385 int tx_lpi_timer = priv->tx_lpi_timer;
879626e3 386
f5351ef7
GC
387 /* Using PCS we cannot dial with the phy registers at this stage
388 * so we do not support extra feature like EEE.
389 */
3fe5cadb
GC
390 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
391 (priv->hw->pcs == STMMAC_PCS_TBI) ||
392 (priv->hw->pcs == STMMAC_PCS_RTBI))
74371272 393 return false;
d765955d 394
74371272
JA
395 /* Check if MAC core supports the EEE feature. */
396 if (!priv->dma_cap.eee)
397 return false;
398
399 mutex_lock(&priv->lock);
4741cf9c 400
74371272 401 /* Check if it needs to be deactivated */
177d935a
JH
402 if (!priv->eee_active) {
403 if (priv->eee_enabled) {
404 netdev_dbg(priv->dev, "disable EEE\n");
405 del_timer_sync(&priv->eee_ctrl_timer);
406 stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
407 }
0867bb97 408 mutex_unlock(&priv->lock);
74371272 409 return false;
d765955d 410 }
74371272
JA
411
412 if (priv->eee_active && !priv->eee_enabled) {
413 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
414 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
415 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
416 tx_lpi_timer);
417 }
418
419 mutex_unlock(&priv->lock);
420 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
421 return true;
d765955d
GC
422}
423
732fdf0e 424/* stmmac_get_tx_hwtstamp - get HW TX timestamps
32ceabca 425 * @priv: driver private structure
ba1ffd74 426 * @p : descriptor pointer
891434b1
RK
427 * @skb : the socket buffer
428 * Description :
429 * This function will read timestamp from the descriptor & pass it to stack.
430 * and also perform some sanity checks.
431 */
432static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
ba1ffd74 433 struct dma_desc *p, struct sk_buff *skb)
891434b1
RK
434{
435 struct skb_shared_hwtstamps shhwtstamp;
25e80cd0 436 bool found = false;
df103170 437 u64 ns = 0;
891434b1
RK
438
439 if (!priv->hwts_tx_en)
440 return;
441
ceb69499 442 /* exit if skb doesn't support hw tstamp */
75e4364f 443 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
891434b1
RK
444 return;
445
891434b1 446 /* check tx tstamp status */
42de047d 447 if (stmmac_get_tx_timestamp_status(priv, p)) {
42de047d 448 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
25e80cd0
JA
449 found = true;
450 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
451 found = true;
452 }
891434b1 453
25e80cd0 454 if (found) {
ba1ffd74
GC
455 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
456 shhwtstamp.hwtstamp = ns_to_ktime(ns);
891434b1 457
33d4c482 458 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
ba1ffd74
GC
459 /* pass tstamp to stack */
460 skb_tstamp_tx(skb, &shhwtstamp);
461 }
891434b1
RK
462}
463
732fdf0e 464/* stmmac_get_rx_hwtstamp - get HW RX timestamps
32ceabca 465 * @priv: driver private structure
ba1ffd74
GC
466 * @p : descriptor pointer
467 * @np : next descriptor pointer
891434b1
RK
468 * @skb : the socket buffer
469 * Description :
470 * This function will read received packet's timestamp from the descriptor
471 * and pass it to stack. It also perform some sanity checks.
472 */
ba1ffd74
GC
473static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
474 struct dma_desc *np, struct sk_buff *skb)
891434b1
RK
475{
476 struct skb_shared_hwtstamps *shhwtstamp = NULL;
98870943 477 struct dma_desc *desc = p;
df103170 478 u64 ns = 0;
891434b1
RK
479
480 if (!priv->hwts_rx_en)
481 return;
98870943 482 /* For GMAC4, the valid timestamp is from CTX next desc. */
7d9e6c5a 483 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
98870943 484 desc = np;
891434b1 485
ba1ffd74 486 /* Check if timestamp is available */
42de047d
JA
487 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
488 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
33d4c482 489 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
ba1ffd74
GC
490 shhwtstamp = skb_hwtstamps(skb);
491 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492 shhwtstamp->hwtstamp = ns_to_ktime(ns);
493 } else {
33d4c482 494 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
ba1ffd74 495 }
891434b1
RK
496}
497
498/**
d6228b7c 499 * stmmac_hwtstamp_set - control hardware timestamping.
891434b1 500 * @dev: device pointer.
8d45e42b 501 * @ifr: An IOCTL specific structure, that can contain a pointer to
891434b1
RK
502 * a proprietary structure used to pass information to the driver.
503 * Description:
504 * This function configures the MAC to enable/disable both outgoing(TX)
505 * and incoming(RX) packets time stamping based on user input.
506 * Return Value:
507 * 0 on success and an appropriate -ve integer on failure.
508 */
d6228b7c 509static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
891434b1
RK
510{
511 struct stmmac_priv *priv = netdev_priv(dev);
512 struct hwtstamp_config config;
0a624155 513 struct timespec64 now;
891434b1
RK
514 u64 temp = 0;
515 u32 ptp_v2 = 0;
516 u32 tstamp_all = 0;
517 u32 ptp_over_ipv4_udp = 0;
518 u32 ptp_over_ipv6_udp = 0;
519 u32 ptp_over_ethernet = 0;
520 u32 snap_type_sel = 0;
521 u32 ts_master_en = 0;
522 u32 ts_event_en = 0;
df103170 523 u32 sec_inc = 0;
891434b1 524 u32 value = 0;
7d9e6c5a
JA
525 bool xmac;
526
527 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
891434b1
RK
528
529 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
530 netdev_alert(priv->dev, "No support for HW time stamping\n");
531 priv->hwts_tx_en = 0;
532 priv->hwts_rx_en = 0;
533
534 return -EOPNOTSUPP;
535 }
536
537 if (copy_from_user(&config, ifr->ifr_data,
d6228b7c 538 sizeof(config)))
891434b1
RK
539 return -EFAULT;
540
38ddc59d
LC
541 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
542 __func__, config.flags, config.tx_type, config.rx_filter);
891434b1
RK
543
544 /* reserved for future extensions */
545 if (config.flags)
546 return -EINVAL;
547
5f3da328
BH
548 if (config.tx_type != HWTSTAMP_TX_OFF &&
549 config.tx_type != HWTSTAMP_TX_ON)
891434b1 550 return -ERANGE;
891434b1
RK
551
552 if (priv->adv_ts) {
553 switch (config.rx_filter) {
891434b1 554 case HWTSTAMP_FILTER_NONE:
ceb69499 555 /* time stamp no incoming packet at all */
891434b1
RK
556 config.rx_filter = HWTSTAMP_FILTER_NONE;
557 break;
558
891434b1 559 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
ceb69499 560 /* PTP v1, UDP, any kind of event packet */
891434b1 561 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
7d8e249f
IA
562 /* 'xmac' hardware can support Sync, Pdelay_Req and
563 * Pdelay_resp by setting bit14 and bits17/16 to 01
564 * This leaves Delay_Req timestamps out.
565 * Enable all events *and* general purpose message
566 * timestamping
567 */
568 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
569 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571 break;
572
891434b1 573 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
ceb69499 574 /* PTP v1, UDP, Sync packet */
891434b1
RK
575 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576 /* take time stamp for SYNC messages only */
577 ts_event_en = PTP_TCR_TSEVNTENA;
578
579 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581 break;
582
891434b1 583 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ceb69499 584 /* PTP v1, UDP, Delay_req packet */
891434b1
RK
585 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586 /* take time stamp for Delay_Req messages only */
587 ts_master_en = PTP_TCR_TSMSTRENA;
588 ts_event_en = PTP_TCR_TSEVNTENA;
589
590 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592 break;
593
891434b1 594 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
ceb69499 595 /* PTP v2, UDP, any kind of event packet */
891434b1
RK
596 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597 ptp_v2 = PTP_TCR_TSVER2ENA;
598 /* take time stamp for all event messages */
7d8e249f 599 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
600
601 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
603 break;
604
891434b1 605 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
ceb69499 606 /* PTP v2, UDP, Sync packet */
891434b1
RK
607 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
608 ptp_v2 = PTP_TCR_TSVER2ENA;
609 /* take time stamp for SYNC messages only */
610 ts_event_en = PTP_TCR_TSEVNTENA;
611
612 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
614 break;
615
891434b1 616 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ceb69499 617 /* PTP v2, UDP, Delay_req packet */
891434b1
RK
618 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
619 ptp_v2 = PTP_TCR_TSVER2ENA;
620 /* take time stamp for Delay_Req messages only */
621 ts_master_en = PTP_TCR_TSMSTRENA;
622 ts_event_en = PTP_TCR_TSEVNTENA;
623
624 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
625 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
626 break;
627
891434b1 628 case HWTSTAMP_FILTER_PTP_V2_EVENT:
ceb69499 629 /* PTP v2/802.AS1 any layer, any kind of event packet */
891434b1
RK
630 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
631 ptp_v2 = PTP_TCR_TSVER2ENA;
7d8e249f 632 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
14f34733 633 ts_event_en = PTP_TCR_TSEVNTENA;
891434b1
RK
634 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636 ptp_over_ethernet = PTP_TCR_TSIPENA;
637 break;
638
891434b1 639 case HWTSTAMP_FILTER_PTP_V2_SYNC:
ceb69499 640 /* PTP v2/802.AS1, any layer, Sync packet */
891434b1
RK
641 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
642 ptp_v2 = PTP_TCR_TSVER2ENA;
643 /* take time stamp for SYNC messages only */
644 ts_event_en = PTP_TCR_TSEVNTENA;
645
646 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
647 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
648 ptp_over_ethernet = PTP_TCR_TSIPENA;
649 break;
650
891434b1 651 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ceb69499 652 /* PTP v2/802.AS1, any layer, Delay_req packet */
891434b1
RK
653 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
654 ptp_v2 = PTP_TCR_TSVER2ENA;
655 /* take time stamp for Delay_Req messages only */
656 ts_master_en = PTP_TCR_TSMSTRENA;
657 ts_event_en = PTP_TCR_TSEVNTENA;
658
659 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
660 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
661 ptp_over_ethernet = PTP_TCR_TSIPENA;
662 break;
663
e3412575 664 case HWTSTAMP_FILTER_NTP_ALL:
891434b1 665 case HWTSTAMP_FILTER_ALL:
ceb69499 666 /* time stamp any incoming packet */
891434b1
RK
667 config.rx_filter = HWTSTAMP_FILTER_ALL;
668 tstamp_all = PTP_TCR_TSENALL;
669 break;
670
671 default:
672 return -ERANGE;
673 }
674 } else {
675 switch (config.rx_filter) {
676 case HWTSTAMP_FILTER_NONE:
677 config.rx_filter = HWTSTAMP_FILTER_NONE;
678 break;
679 default:
680 /* PTP v1, UDP, any kind of event packet */
681 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
682 break;
683 }
684 }
685 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
5f3da328 686 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
891434b1
RK
687
688 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
cc4c9001 689 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
891434b1
RK
690 else {
691 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
ceb69499
GC
692 tstamp_all | ptp_v2 | ptp_over_ethernet |
693 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
694 ts_master_en | snap_type_sel);
cc4c9001 695 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
891434b1
RK
696
697 /* program Sub Second Increment reg */
cc4c9001
JA
698 stmmac_config_sub_second_increment(priv,
699 priv->ptpaddr, priv->plat->clk_ptp_rate,
7d9e6c5a 700 xmac, &sec_inc);
19d857c9 701 temp = div_u64(1000000000ULL, sec_inc);
891434b1 702
9a8a02c9
JA
703 /* Store sub second increment and flags for later use */
704 priv->sub_second_inc = sec_inc;
705 priv->systime_flags = value;
706
891434b1
RK
707 /* calculate default added value:
708 * formula is :
709 * addend = (2^32)/freq_div_ratio;
19d857c9 710 * where, freq_div_ratio = 1e9ns/sec_inc
891434b1 711 */
19d857c9 712 temp = (u64)(temp << 32);
f573c0b9 713 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
cc4c9001 714 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
891434b1
RK
715
716 /* initialize system time */
0a624155
AB
717 ktime_get_real_ts64(&now);
718
719 /* lower 32 bits of tv_sec are safe until y2106 */
cc4c9001
JA
720 stmmac_init_systime(priv, priv->ptpaddr,
721 (u32)now.tv_sec, now.tv_nsec);
891434b1
RK
722 }
723
d6228b7c
AP
724 memcpy(&priv->tstamp_config, &config, sizeof(config));
725
891434b1 726 return copy_to_user(ifr->ifr_data, &config,
d6228b7c
AP
727 sizeof(config)) ? -EFAULT : 0;
728}
729
730/**
731 * stmmac_hwtstamp_get - read hardware timestamping.
732 * @dev: device pointer.
733 * @ifr: An IOCTL specific structure, that can contain a pointer to
734 * a proprietary structure used to pass information to the driver.
735 * Description:
736 * This function obtain the current hardware timestamping settings
737 as requested.
738 */
739static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
740{
741 struct stmmac_priv *priv = netdev_priv(dev);
742 struct hwtstamp_config *config = &priv->tstamp_config;
743
744 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
745 return -EOPNOTSUPP;
746
747 return copy_to_user(ifr->ifr_data, config,
748 sizeof(*config)) ? -EFAULT : 0;
891434b1
RK
749}
750
32ceabca 751/**
732fdf0e 752 * stmmac_init_ptp - init PTP
32ceabca 753 * @priv: driver private structure
732fdf0e 754 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
32ceabca 755 * This is done by looking at the HW cap. register.
732fdf0e 756 * This function also registers the ptp driver.
32ceabca 757 */
92ba6888 758static int stmmac_init_ptp(struct stmmac_priv *priv)
891434b1 759{
7d9e6c5a
JA
760 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
761
92ba6888
RK
762 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
763 return -EOPNOTSUPP;
764
7cd01399 765 priv->adv_ts = 0;
7d9e6c5a
JA
766 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
767 if (xmac && priv->dma_cap.atime_stamp)
be9b3174
GC
768 priv->adv_ts = 1;
769 /* Dwmac 3.x core with extend_desc can support adv_ts */
770 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
7cd01399
VB
771 priv->adv_ts = 1;
772
be9b3174
GC
773 if (priv->dma_cap.time_stamp)
774 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7cd01399 775
be9b3174
GC
776 if (priv->adv_ts)
777 netdev_info(priv->dev,
778 "IEEE 1588-2008 Advanced Timestamp supported\n");
891434b1 779
891434b1
RK
780 priv->hwts_tx_en = 0;
781 priv->hwts_rx_en = 0;
92ba6888 782
c30a70d3
GC
783 stmmac_ptp_register(priv);
784
785 return 0;
92ba6888
RK
786}
787
788static void stmmac_release_ptp(struct stmmac_priv *priv)
789{
f573c0b9 790 if (priv->plat->clk_ptp_ref)
791 clk_disable_unprepare(priv->plat->clk_ptp_ref);
92ba6888 792 stmmac_ptp_unregister(priv);
891434b1
RK
793}
794
29feff39
JP
795/**
796 * stmmac_mac_flow_ctrl - Configure flow control in all queues
797 * @priv: driver private structure
798 * Description: It is used for configuring the flow control in all queues
799 */
800static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
801{
802 u32 tx_cnt = priv->plat->tx_queues_to_use;
803
c10d4c82
JA
804 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
805 priv->pause, tx_cnt);
29feff39
JP
806}
807
eeef2f6b
JA
808static void stmmac_validate(struct phylink_config *config,
809 unsigned long *supported,
810 struct phylink_link_state *state)
811{
812 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
5b0d7d7d 813 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
eeef2f6b
JA
814 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
815 int tx_cnt = priv->plat->tx_queues_to_use;
816 int max_speed = priv->plat->max_speed;
817
5b0d7d7d
JA
818 phylink_set(mac_supported, 10baseT_Half);
819 phylink_set(mac_supported, 10baseT_Full);
820 phylink_set(mac_supported, 100baseT_Half);
821 phylink_set(mac_supported, 100baseT_Full);
df7699c7
JA
822 phylink_set(mac_supported, 1000baseT_Half);
823 phylink_set(mac_supported, 1000baseT_Full);
824 phylink_set(mac_supported, 1000baseKX_Full);
5b0d7d7d
JA
825
826 phylink_set(mac_supported, Autoneg);
827 phylink_set(mac_supported, Pause);
828 phylink_set(mac_supported, Asym_Pause);
829 phylink_set_port_modes(mac_supported);
830
eeef2f6b
JA
831 /* Cut down 1G if asked to */
832 if ((max_speed > 0) && (max_speed < 1000)) {
833 phylink_set(mask, 1000baseT_Full);
834 phylink_set(mask, 1000baseX_Full);
5b0d7d7d 835 } else if (priv->plat->has_xgmac) {
d9da2c87
JA
836 if (!max_speed || (max_speed >= 2500)) {
837 phylink_set(mac_supported, 2500baseT_Full);
838 phylink_set(mac_supported, 2500baseX_Full);
839 }
840 if (!max_speed || (max_speed >= 5000)) {
841 phylink_set(mac_supported, 5000baseT_Full);
842 }
843 if (!max_speed || (max_speed >= 10000)) {
844 phylink_set(mac_supported, 10000baseSR_Full);
845 phylink_set(mac_supported, 10000baseLR_Full);
846 phylink_set(mac_supported, 10000baseER_Full);
847 phylink_set(mac_supported, 10000baseLRM_Full);
848 phylink_set(mac_supported, 10000baseT_Full);
849 phylink_set(mac_supported, 10000baseKX4_Full);
850 phylink_set(mac_supported, 10000baseKR_Full);
851 }
eeef2f6b
JA
852 }
853
854 /* Half-Duplex can only work with single queue */
855 if (tx_cnt > 1) {
856 phylink_set(mask, 10baseT_Half);
857 phylink_set(mask, 100baseT_Half);
858 phylink_set(mask, 1000baseT_Half);
859 }
860
5b0d7d7d
JA
861 bitmap_and(supported, supported, mac_supported,
862 __ETHTOOL_LINK_MODE_MASK_NBITS);
863 bitmap_andnot(supported, supported, mask,
864 __ETHTOOL_LINK_MODE_MASK_NBITS);
865 bitmap_and(state->advertising, state->advertising, mac_supported,
866 __ETHTOOL_LINK_MODE_MASK_NBITS);
eeef2f6b
JA
867 bitmap_andnot(state->advertising, state->advertising, mask,
868 __ETHTOOL_LINK_MODE_MASK_NBITS);
869}
870
d46b7e4f
RK
871static void stmmac_mac_pcs_get_state(struct phylink_config *config,
872 struct phylink_link_state *state)
eeef2f6b 873{
d46b7e4f 874 state->link = 0;
eeef2f6b
JA
875}
876
74371272
JA
877static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
878 const struct phylink_link_state *state)
9ad372fc 879{
74371272 880 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9ad372fc
JA
881 u32 ctrl;
882
883 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
74371272 884 ctrl &= ~priv->hw->link.speed_mask;
9ad372fc 885
5b0d7d7d
JA
886 if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
887 switch (state->speed) {
888 case SPEED_10000:
889 ctrl |= priv->hw->link.xgmii.speed10000;
890 break;
891 case SPEED_5000:
892 ctrl |= priv->hw->link.xgmii.speed5000;
893 break;
894 case SPEED_2500:
895 ctrl |= priv->hw->link.xgmii.speed2500;
896 break;
897 default:
898 return;
899 }
900 } else {
901 switch (state->speed) {
902 case SPEED_2500:
903 ctrl |= priv->hw->link.speed2500;
904 break;
905 case SPEED_1000:
906 ctrl |= priv->hw->link.speed1000;
907 break;
908 case SPEED_100:
909 ctrl |= priv->hw->link.speed100;
910 break;
911 case SPEED_10:
912 ctrl |= priv->hw->link.speed10;
913 break;
914 default:
915 return;
916 }
9ad372fc
JA
917 }
918
74371272 919 priv->speed = state->speed;
9ad372fc 920
74371272
JA
921 if (priv->plat->fix_mac_speed)
922 priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
923
924 if (!state->duplex)
925 ctrl &= ~priv->hw->link.duplex;
926 else
927 ctrl |= priv->hw->link.duplex;
9ad372fc
JA
928
929 /* Flow Control operation */
74371272
JA
930 if (state->pause)
931 stmmac_mac_flow_ctrl(priv, state->duplex);
9ad372fc
JA
932
933 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
934}
935
eeef2f6b
JA
936static void stmmac_mac_an_restart(struct phylink_config *config)
937{
938 /* Not Supported */
939}
940
74371272
JA
941static void stmmac_mac_link_down(struct phylink_config *config,
942 unsigned int mode, phy_interface_t interface)
9ad372fc 943{
74371272 944 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9ad372fc
JA
945
946 stmmac_mac_set(priv, priv->ioaddr, false);
74371272
JA
947 priv->eee_active = false;
948 stmmac_eee_init(priv);
949 stmmac_set_eee_pls(priv, priv->hw, false);
9ad372fc
JA
950}
951
74371272
JA
952static void stmmac_mac_link_up(struct phylink_config *config,
953 unsigned int mode, phy_interface_t interface,
954 struct phy_device *phy)
9ad372fc 955{
74371272 956 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9ad372fc
JA
957
958 stmmac_mac_set(priv, priv->ioaddr, true);
5b111770 959 if (phy && priv->dma_cap.eee) {
74371272
JA
960 priv->eee_active = phy_init_eee(phy, 1) >= 0;
961 priv->eee_enabled = stmmac_eee_init(priv);
962 stmmac_set_eee_pls(priv, priv->hw, true);
963 }
9ad372fc
JA
964}
965
74371272 966static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
eeef2f6b 967 .validate = stmmac_validate,
d46b7e4f 968 .mac_pcs_get_state = stmmac_mac_pcs_get_state,
74371272 969 .mac_config = stmmac_mac_config,
eeef2f6b 970 .mac_an_restart = stmmac_mac_an_restart,
74371272
JA
971 .mac_link_down = stmmac_mac_link_down,
972 .mac_link_up = stmmac_mac_link_up,
eeef2f6b
JA
973};
974
32ceabca 975/**
732fdf0e 976 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
32ceabca
GC
977 * @priv: driver private structure
978 * Description: this is to verify if the HW supports the PCS.
979 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
980 * configured for the TBI, RTBI, or SGMII PHY interface.
981 */
e58bb43f
GC
982static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
983{
984 int interface = priv->plat->interface;
985
986 if (priv->dma_cap.pcs) {
0d909dcd
BA
987 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
988 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
989 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
990 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
38ddc59d 991 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
3fe5cadb 992 priv->hw->pcs = STMMAC_PCS_RGMII;
0d909dcd 993 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
38ddc59d 994 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
3fe5cadb 995 priv->hw->pcs = STMMAC_PCS_SGMII;
e58bb43f
GC
996 }
997 }
998}
999
47dd7a54
GC
1000/**
1001 * stmmac_init_phy - PHY initialization
1002 * @dev: net device structure
1003 * Description: it initializes the driver's PHY state, and attaches the PHY
1004 * to the mac driver.
1005 * Return value:
1006 * 0 on success
1007 */
1008static int stmmac_init_phy(struct net_device *dev)
1009{
1010 struct stmmac_priv *priv = netdev_priv(dev);
74371272
JA
1011 struct device_node *node;
1012 int ret;
5790cf3c 1013
4838a540 1014 node = priv->plat->phylink_node;
5790cf3c 1015
42e87024 1016 if (node)
74371272 1017 ret = phylink_of_phy_connect(priv->phylink, node, 0);
42e87024
JA
1018
1019 /* Some DT bindings do not set-up the PHY handle. Let's try to
1020 * manually parse it
1021 */
1022 if (!node || ret) {
74371272
JA
1023 int addr = priv->plat->phy_addr;
1024 struct phy_device *phydev;
47dd7a54 1025
74371272
JA
1026 phydev = mdiobus_get_phy(priv->mii, addr);
1027 if (!phydev) {
1028 netdev_err(priv->dev, "no phy at addr %d\n", addr);
dfc50fca 1029 return -ENODEV;
74371272 1030 }
dfc50fca 1031
74371272 1032 ret = phylink_connect_phy(priv->phylink, phydev);
47dd7a54
GC
1033 }
1034
74371272
JA
1035 return ret;
1036}
79ee1dc3 1037
74371272
JA
1038static int stmmac_phy_setup(struct stmmac_priv *priv)
1039{
c63d1e5c 1040 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
0060c878 1041 int mode = priv->plat->phy_interface;
74371272 1042 struct phylink *phylink;
b6cfffa7 1043
74371272
JA
1044 priv->phylink_config.dev = &priv->dev->dev;
1045 priv->phylink_config.type = PHYLINK_NETDEV;
8e99fc5f 1046
c63d1e5c 1047 phylink = phylink_create(&priv->phylink_config, fwnode,
74371272
JA
1048 mode, &stmmac_phylink_mac_ops);
1049 if (IS_ERR(phylink))
1050 return PTR_ERR(phylink);
c51e424d 1051
74371272 1052 priv->phylink = phylink;
47dd7a54
GC
1053 return 0;
1054}
1055
71fedb01 1056static void stmmac_display_rx_rings(struct stmmac_priv *priv)
c24602ef 1057{
54139cf3 1058 u32 rx_cnt = priv->plat->rx_queues_to_use;
71fedb01 1059 void *head_rx;
54139cf3 1060 u32 queue;
aff3d9ef 1061
54139cf3
JP
1062 /* Display RX rings */
1063 for (queue = 0; queue < rx_cnt; queue++) {
1064 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
d0225e7d 1065
54139cf3
JP
1066 pr_info("\tRX Queue %u rings\n", queue);
1067
1068 if (priv->extend_desc)
1069 head_rx = (void *)rx_q->dma_erx;
1070 else
1071 head_rx = (void *)rx_q->dma_rx;
1072
1073 /* Display RX ring */
42de047d 1074 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
54139cf3 1075 }
71fedb01
JP
1076}
1077
1078static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1079{
ce736788 1080 u32 tx_cnt = priv->plat->tx_queues_to_use;
71fedb01 1081 void *head_tx;
ce736788 1082 u32 queue;
71fedb01 1083
ce736788
JP
1084 /* Display TX rings */
1085 for (queue = 0; queue < tx_cnt; queue++) {
1086 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01 1087
ce736788
JP
1088 pr_info("\tTX Queue %d rings\n", queue);
1089
1090 if (priv->extend_desc)
1091 head_tx = (void *)tx_q->dma_etx;
1092 else
1093 head_tx = (void *)tx_q->dma_tx;
1094
42de047d 1095 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
ce736788 1096 }
c24602ef
GC
1097}
1098
71fedb01
JP
1099static void stmmac_display_rings(struct stmmac_priv *priv)
1100{
1101 /* Display RX ring */
1102 stmmac_display_rx_rings(priv);
1103
1104 /* Display TX ring */
1105 stmmac_display_tx_rings(priv);
1106}
1107
286a8372
GC
1108static int stmmac_set_bfsize(int mtu, int bufsize)
1109{
1110 int ret = bufsize;
1111
b2f3a481
JA
1112 if (mtu >= BUF_SIZE_8KiB)
1113 ret = BUF_SIZE_16KiB;
1114 else if (mtu >= BUF_SIZE_4KiB)
286a8372
GC
1115 ret = BUF_SIZE_8KiB;
1116 else if (mtu >= BUF_SIZE_2KiB)
1117 ret = BUF_SIZE_4KiB;
d916701c 1118 else if (mtu > DEFAULT_BUFSIZE)
286a8372
GC
1119 ret = BUF_SIZE_2KiB;
1120 else
d916701c 1121 ret = DEFAULT_BUFSIZE;
286a8372
GC
1122
1123 return ret;
1124}
1125
32ceabca 1126/**
71fedb01 1127 * stmmac_clear_rx_descriptors - clear RX descriptors
32ceabca 1128 * @priv: driver private structure
54139cf3 1129 * @queue: RX queue index
71fedb01 1130 * Description: this function is called to clear the RX descriptors
32ceabca
GC
1131 * in case of both basic and extended descriptors are used.
1132 */
54139cf3 1133static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
c24602ef 1134{
54139cf3 1135 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5bacd778 1136 int i;
c24602ef 1137
71fedb01 1138 /* Clear the RX descriptors */
e3ad57c9 1139 for (i = 0; i < DMA_RX_SIZE; i++)
c24602ef 1140 if (priv->extend_desc)
42de047d
JA
1141 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1142 priv->use_riwt, priv->mode,
583e6361
AK
1143 (i == DMA_RX_SIZE - 1),
1144 priv->dma_buf_sz);
c24602ef 1145 else
42de047d
JA
1146 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1147 priv->use_riwt, priv->mode,
583e6361
AK
1148 (i == DMA_RX_SIZE - 1),
1149 priv->dma_buf_sz);
71fedb01
JP
1150}
1151
1152/**
1153 * stmmac_clear_tx_descriptors - clear tx descriptors
1154 * @priv: driver private structure
ce736788 1155 * @queue: TX queue index.
71fedb01
JP
1156 * Description: this function is called to clear the TX descriptors
1157 * in case of both basic and extended descriptors are used.
1158 */
ce736788 1159static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
71fedb01 1160{
ce736788 1161 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01
JP
1162 int i;
1163
1164 /* Clear the TX descriptors */
e3ad57c9 1165 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef 1166 if (priv->extend_desc)
42de047d
JA
1167 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1168 priv->mode, (i == DMA_TX_SIZE - 1));
c24602ef 1169 else
42de047d
JA
1170 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1171 priv->mode, (i == DMA_TX_SIZE - 1));
c24602ef
GC
1172}
1173
71fedb01
JP
1174/**
1175 * stmmac_clear_descriptors - clear descriptors
1176 * @priv: driver private structure
1177 * Description: this function is called to clear the TX and RX descriptors
1178 * in case of both basic and extended descriptors are used.
1179 */
1180static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1181{
54139cf3 1182 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
ce736788 1183 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
54139cf3
JP
1184 u32 queue;
1185
71fedb01 1186 /* Clear the RX descriptors */
54139cf3
JP
1187 for (queue = 0; queue < rx_queue_cnt; queue++)
1188 stmmac_clear_rx_descriptors(priv, queue);
71fedb01
JP
1189
1190 /* Clear the TX descriptors */
ce736788
JP
1191 for (queue = 0; queue < tx_queue_cnt; queue++)
1192 stmmac_clear_tx_descriptors(priv, queue);
71fedb01
JP
1193}
1194
732fdf0e
GC
1195/**
1196 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1197 * @priv: driver private structure
1198 * @p: descriptor pointer
1199 * @i: descriptor index
54139cf3
JP
1200 * @flags: gfp flag
1201 * @queue: RX queue index
732fdf0e
GC
1202 * Description: this function is called to allocate a receive buffer, perform
1203 * the DMA mapping and init the descriptor.
1204 */
c24602ef 1205static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
54139cf3 1206 int i, gfp_t flags, u32 queue)
c24602ef 1207{
54139cf3 1208 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2af6106a 1209 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
c24602ef 1210
2af6106a
JA
1211 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1212 if (!buf->page)
56329137 1213 return -ENOMEM;
c24602ef 1214
67afd6d1
JA
1215 if (priv->sph) {
1216 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1217 if (!buf->sec_page)
1218 return -ENOMEM;
1219
1220 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1221 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
1222 } else {
1223 buf->sec_page = NULL;
1224 }
1225
2af6106a
JA
1226 buf->addr = page_pool_get_dma_addr(buf->page);
1227 stmmac_set_desc_addr(priv, p, buf->addr);
2c520b1c
JA
1228 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1229 stmmac_init_desc3(priv, p);
c24602ef
GC
1230
1231 return 0;
1232}
1233
71fedb01
JP
1234/**
1235 * stmmac_free_rx_buffer - free RX dma buffers
1236 * @priv: private structure
54139cf3 1237 * @queue: RX queue index
71fedb01
JP
1238 * @i: buffer index.
1239 */
54139cf3 1240static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
56329137 1241{
54139cf3 1242 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2af6106a 1243 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
54139cf3 1244
2af6106a
JA
1245 if (buf->page)
1246 page_pool_put_page(rx_q->page_pool, buf->page, false);
1247 buf->page = NULL;
67afd6d1
JA
1248
1249 if (buf->sec_page)
1250 page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
1251 buf->sec_page = NULL;
aff3d9ef
JP
1252}
1253
1254/**
71fedb01
JP
1255 * stmmac_free_tx_buffer - free RX dma buffers
1256 * @priv: private structure
ce736788 1257 * @queue: RX queue index
71fedb01
JP
1258 * @i: buffer index.
1259 */
ce736788 1260static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
71fedb01 1261{
ce736788
JP
1262 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1263
1264 if (tx_q->tx_skbuff_dma[i].buf) {
1265 if (tx_q->tx_skbuff_dma[i].map_as_page)
71fedb01 1266 dma_unmap_page(priv->device,
ce736788
JP
1267 tx_q->tx_skbuff_dma[i].buf,
1268 tx_q->tx_skbuff_dma[i].len,
71fedb01
JP
1269 DMA_TO_DEVICE);
1270 else
1271 dma_unmap_single(priv->device,
ce736788
JP
1272 tx_q->tx_skbuff_dma[i].buf,
1273 tx_q->tx_skbuff_dma[i].len,
71fedb01
JP
1274 DMA_TO_DEVICE);
1275 }
1276
ce736788
JP
1277 if (tx_q->tx_skbuff[i]) {
1278 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1279 tx_q->tx_skbuff[i] = NULL;
1280 tx_q->tx_skbuff_dma[i].buf = 0;
1281 tx_q->tx_skbuff_dma[i].map_as_page = false;
71fedb01
JP
1282 }
1283}
1284
1285/**
1286 * init_dma_rx_desc_rings - init the RX descriptor rings
47dd7a54 1287 * @dev: net device structure
732fdf0e 1288 * @flags: gfp flag.
71fedb01 1289 * Description: this function initializes the DMA RX descriptors
5bacd778 1290 * and allocates the socket buffers. It supports the chained and ring
286a8372 1291 * modes.
47dd7a54 1292 */
71fedb01 1293static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
47dd7a54 1294{
47dd7a54 1295 struct stmmac_priv *priv = netdev_priv(dev);
54139cf3 1296 u32 rx_count = priv->plat->rx_queues_to_use;
56329137 1297 int ret = -ENOMEM;
1d3028f4 1298 int queue;
54139cf3 1299 int i;
47dd7a54 1300
54139cf3 1301 /* RX INITIALIZATION */
b3e51069
LC
1302 netif_dbg(priv, probe, priv->dev,
1303 "SKB addresses:\nskb\t\tskb data\tdma data\n");
47dd7a54 1304
54139cf3
JP
1305 for (queue = 0; queue < rx_count; queue++) {
1306 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
c24602ef 1307
54139cf3
JP
1308 netif_dbg(priv, probe, priv->dev,
1309 "(%s) dma_rx_phy=0x%08x\n", __func__,
1310 (u32)rx_q->dma_rx_phy);
f748be53 1311
cbcf0999
JA
1312 stmmac_clear_rx_descriptors(priv, queue);
1313
54139cf3
JP
1314 for (i = 0; i < DMA_RX_SIZE; i++) {
1315 struct dma_desc *p;
aff3d9ef 1316
54139cf3
JP
1317 if (priv->extend_desc)
1318 p = &((rx_q->dma_erx + i)->basic);
1319 else
1320 p = rx_q->dma_rx + i;
1321
1322 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1323 queue);
1324 if (ret)
1325 goto err_init_rx_buffers;
54139cf3
JP
1326 }
1327
1328 rx_q->cur_rx = 0;
1329 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1330
54139cf3
JP
1331 /* Setup the chained descriptor addresses */
1332 if (priv->mode == STMMAC_CHAIN_MODE) {
1333 if (priv->extend_desc)
2c520b1c
JA
1334 stmmac_mode_init(priv, rx_q->dma_erx,
1335 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
54139cf3 1336 else
2c520b1c
JA
1337 stmmac_mode_init(priv, rx_q->dma_rx,
1338 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
54139cf3 1339 }
71fedb01
JP
1340 }
1341
1342 return 0;
54139cf3 1343
71fedb01 1344err_init_rx_buffers:
54139cf3
JP
1345 while (queue >= 0) {
1346 while (--i >= 0)
1347 stmmac_free_rx_buffer(priv, queue, i);
1348
1349 if (queue == 0)
1350 break;
1351
1352 i = DMA_RX_SIZE;
1353 queue--;
1354 }
1355
71fedb01
JP
1356 return ret;
1357}
1358
1359/**
1360 * init_dma_tx_desc_rings - init the TX descriptor rings
1361 * @dev: net device structure.
1362 * Description: this function initializes the DMA TX descriptors
1363 * and allocates the socket buffers. It supports the chained and ring
1364 * modes.
1365 */
1366static int init_dma_tx_desc_rings(struct net_device *dev)
1367{
1368 struct stmmac_priv *priv = netdev_priv(dev);
ce736788
JP
1369 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1370 u32 queue;
71fedb01
JP
1371 int i;
1372
ce736788
JP
1373 for (queue = 0; queue < tx_queue_cnt; queue++) {
1374 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01 1375
ce736788
JP
1376 netif_dbg(priv, probe, priv->dev,
1377 "(%s) dma_tx_phy=0x%08x\n", __func__,
1378 (u32)tx_q->dma_tx_phy);
f748be53 1379
ce736788
JP
1380 /* Setup the chained descriptor addresses */
1381 if (priv->mode == STMMAC_CHAIN_MODE) {
1382 if (priv->extend_desc)
2c520b1c
JA
1383 stmmac_mode_init(priv, tx_q->dma_etx,
1384 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
ce736788 1385 else
2c520b1c
JA
1386 stmmac_mode_init(priv, tx_q->dma_tx,
1387 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
ce736788 1388 }
aff3d9ef 1389
ce736788
JP
1390 for (i = 0; i < DMA_TX_SIZE; i++) {
1391 struct dma_desc *p;
ce736788
JP
1392 if (priv->extend_desc)
1393 p = &((tx_q->dma_etx + i)->basic);
1394 else
1395 p = tx_q->dma_tx + i;
1396
44c67f85 1397 stmmac_clear_desc(priv, p);
ce736788
JP
1398
1399 tx_q->tx_skbuff_dma[i].buf = 0;
1400 tx_q->tx_skbuff_dma[i].map_as_page = false;
1401 tx_q->tx_skbuff_dma[i].len = 0;
1402 tx_q->tx_skbuff_dma[i].last_segment = false;
1403 tx_q->tx_skbuff[i] = NULL;
5bacd778 1404 }
aff3d9ef 1405
ce736788
JP
1406 tx_q->dirty_tx = 0;
1407 tx_q->cur_tx = 0;
8d212a9e 1408 tx_q->mss = 0;
286a8372 1409
c22a3f48
JP
1410 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1411 }
aff3d9ef 1412
71fedb01
JP
1413 return 0;
1414}
1415
1416/**
1417 * init_dma_desc_rings - init the RX/TX descriptor rings
1418 * @dev: net device structure
1419 * @flags: gfp flag.
1420 * Description: this function initializes the DMA RX/TX descriptors
1421 * and allocates the socket buffers. It supports the chained and ring
1422 * modes.
1423 */
1424static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1425{
1426 struct stmmac_priv *priv = netdev_priv(dev);
1427 int ret;
1428
1429 ret = init_dma_rx_desc_rings(dev, flags);
1430 if (ret)
1431 return ret;
1432
1433 ret = init_dma_tx_desc_rings(dev);
1434
5bacd778 1435 stmmac_clear_descriptors(priv);
47dd7a54 1436
c24602ef
GC
1437 if (netif_msg_hw(priv))
1438 stmmac_display_rings(priv);
56329137 1439
56329137 1440 return ret;
47dd7a54
GC
1441}
1442
71fedb01
JP
1443/**
1444 * dma_free_rx_skbufs - free RX dma buffers
1445 * @priv: private structure
54139cf3 1446 * @queue: RX queue index
71fedb01 1447 */
54139cf3 1448static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
47dd7a54
GC
1449{
1450 int i;
1451
e3ad57c9 1452 for (i = 0; i < DMA_RX_SIZE; i++)
54139cf3 1453 stmmac_free_rx_buffer(priv, queue, i);
47dd7a54
GC
1454}
1455
71fedb01
JP
1456/**
1457 * dma_free_tx_skbufs - free TX dma buffers
1458 * @priv: private structure
ce736788 1459 * @queue: TX queue index
71fedb01 1460 */
ce736788 1461static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
47dd7a54
GC
1462{
1463 int i;
1464
71fedb01 1465 for (i = 0; i < DMA_TX_SIZE; i++)
ce736788 1466 stmmac_free_tx_buffer(priv, queue, i);
47dd7a54
GC
1467}
1468
54139cf3
JP
1469/**
1470 * free_dma_rx_desc_resources - free RX dma desc resources
1471 * @priv: private structure
1472 */
1473static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1474{
1475 u32 rx_count = priv->plat->rx_queues_to_use;
1476 u32 queue;
1477
1478 /* Free RX queue resources */
1479 for (queue = 0; queue < rx_count; queue++) {
1480 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1481
1482 /* Release the DMA RX socket buffers */
1483 dma_free_rx_skbufs(priv, queue);
1484
1485 /* Free DMA regions of consistent memory previously allocated */
1486 if (!priv->extend_desc)
1487 dma_free_coherent(priv->device,
1488 DMA_RX_SIZE * sizeof(struct dma_desc),
1489 rx_q->dma_rx, rx_q->dma_rx_phy);
1490 else
1491 dma_free_coherent(priv->device, DMA_RX_SIZE *
1492 sizeof(struct dma_extended_desc),
1493 rx_q->dma_erx, rx_q->dma_rx_phy);
1494
2af6106a 1495 kfree(rx_q->buf_pool);
c3f812ce 1496 if (rx_q->page_pool)
2af6106a 1497 page_pool_destroy(rx_q->page_pool);
54139cf3
JP
1498 }
1499}
1500
ce736788
JP
1501/**
1502 * free_dma_tx_desc_resources - free TX dma desc resources
1503 * @priv: private structure
1504 */
1505static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1506{
1507 u32 tx_count = priv->plat->tx_queues_to_use;
62242260 1508 u32 queue;
ce736788
JP
1509
1510 /* Free TX queue resources */
1511 for (queue = 0; queue < tx_count; queue++) {
1512 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1513
1514 /* Release the DMA TX socket buffers */
1515 dma_free_tx_skbufs(priv, queue);
1516
1517 /* Free DMA regions of consistent memory previously allocated */
1518 if (!priv->extend_desc)
1519 dma_free_coherent(priv->device,
1520 DMA_TX_SIZE * sizeof(struct dma_desc),
1521 tx_q->dma_tx, tx_q->dma_tx_phy);
1522 else
1523 dma_free_coherent(priv->device, DMA_TX_SIZE *
1524 sizeof(struct dma_extended_desc),
1525 tx_q->dma_etx, tx_q->dma_tx_phy);
1526
1527 kfree(tx_q->tx_skbuff_dma);
1528 kfree(tx_q->tx_skbuff);
1529 }
1530}
1531
732fdf0e 1532/**
71fedb01 1533 * alloc_dma_rx_desc_resources - alloc RX resources.
732fdf0e
GC
1534 * @priv: private structure
1535 * Description: according to which descriptor can be used (extend or basic)
5bacd778
LC
1536 * this function allocates the resources for TX and RX paths. In case of
1537 * reception, for example, it pre-allocated the RX socket buffer in order to
1538 * allow zero-copy mechanism.
732fdf0e 1539 */
71fedb01 1540static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
09f8d696 1541{
54139cf3 1542 u32 rx_count = priv->plat->rx_queues_to_use;
09f8d696 1543 int ret = -ENOMEM;
54139cf3 1544 u32 queue;
09f8d696 1545
54139cf3
JP
1546 /* RX queues buffers and DMA */
1547 for (queue = 0; queue < rx_count; queue++) {
1548 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2af6106a 1549 struct page_pool_params pp_params = { 0 };
4f28bd95 1550 unsigned int num_pages;
09f8d696 1551
54139cf3
JP
1552 rx_q->queue_index = queue;
1553 rx_q->priv_data = priv;
5bacd778 1554
2af6106a
JA
1555 pp_params.flags = PP_FLAG_DMA_MAP;
1556 pp_params.pool_size = DMA_RX_SIZE;
4f28bd95
TR
1557 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1558 pp_params.order = ilog2(num_pages);
2af6106a
JA
1559 pp_params.nid = dev_to_node(priv->device);
1560 pp_params.dev = priv->device;
1561 pp_params.dma_dir = DMA_FROM_DEVICE;
1562
1563 rx_q->page_pool = page_pool_create(&pp_params);
1564 if (IS_ERR(rx_q->page_pool)) {
1565 ret = PTR_ERR(rx_q->page_pool);
1566 rx_q->page_pool = NULL;
63c3aa6b 1567 goto err_dma;
2af6106a 1568 }
71fedb01 1569
ec5e5ce1
JA
1570 rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
1571 GFP_KERNEL);
2af6106a 1572 if (!rx_q->buf_pool)
71fedb01 1573 goto err_dma;
54139cf3
JP
1574
1575 if (priv->extend_desc) {
750afb08
LC
1576 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1577 DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1578 &rx_q->dma_rx_phy,
1579 GFP_KERNEL);
54139cf3
JP
1580 if (!rx_q->dma_erx)
1581 goto err_dma;
1582
1583 } else {
750afb08
LC
1584 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1585 DMA_RX_SIZE * sizeof(struct dma_desc),
1586 &rx_q->dma_rx_phy,
1587 GFP_KERNEL);
54139cf3
JP
1588 if (!rx_q->dma_rx)
1589 goto err_dma;
1590 }
71fedb01
JP
1591 }
1592
1593 return 0;
1594
1595err_dma:
54139cf3
JP
1596 free_dma_rx_desc_resources(priv);
1597
71fedb01
JP
1598 return ret;
1599}
1600
1601/**
1602 * alloc_dma_tx_desc_resources - alloc TX resources.
1603 * @priv: private structure
1604 * Description: according to which descriptor can be used (extend or basic)
1605 * this function allocates the resources for TX and RX paths. In case of
1606 * reception, for example, it pre-allocated the RX socket buffer in order to
1607 * allow zero-copy mechanism.
1608 */
1609static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1610{
ce736788 1611 u32 tx_count = priv->plat->tx_queues_to_use;
71fedb01 1612 int ret = -ENOMEM;
ce736788 1613 u32 queue;
71fedb01 1614
ce736788
JP
1615 /* TX queues buffers and DMA */
1616 for (queue = 0; queue < tx_count; queue++) {
1617 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5bacd778 1618
ce736788
JP
1619 tx_q->queue_index = queue;
1620 tx_q->priv_data = priv;
5bacd778 1621
ec5e5ce1
JA
1622 tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1623 sizeof(*tx_q->tx_skbuff_dma),
1624 GFP_KERNEL);
ce736788 1625 if (!tx_q->tx_skbuff_dma)
62242260 1626 goto err_dma;
ce736788 1627
ec5e5ce1
JA
1628 tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1629 sizeof(struct sk_buff *),
1630 GFP_KERNEL);
ce736788 1631 if (!tx_q->tx_skbuff)
62242260 1632 goto err_dma;
ce736788
JP
1633
1634 if (priv->extend_desc) {
750afb08
LC
1635 tx_q->dma_etx = dma_alloc_coherent(priv->device,
1636 DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1637 &tx_q->dma_tx_phy,
1638 GFP_KERNEL);
ce736788 1639 if (!tx_q->dma_etx)
62242260 1640 goto err_dma;
ce736788 1641 } else {
750afb08
LC
1642 tx_q->dma_tx = dma_alloc_coherent(priv->device,
1643 DMA_TX_SIZE * sizeof(struct dma_desc),
1644 &tx_q->dma_tx_phy,
1645 GFP_KERNEL);
ce736788 1646 if (!tx_q->dma_tx)
62242260 1647 goto err_dma;
ce736788 1648 }
09f8d696
SK
1649 }
1650
1651 return 0;
1652
62242260 1653err_dma:
ce736788
JP
1654 free_dma_tx_desc_resources(priv);
1655
09f8d696
SK
1656 return ret;
1657}
1658
71fedb01
JP
1659/**
1660 * alloc_dma_desc_resources - alloc TX/RX resources.
1661 * @priv: private structure
1662 * Description: according to which descriptor can be used (extend or basic)
1663 * this function allocates the resources for TX and RX paths. In case of
1664 * reception, for example, it pre-allocated the RX socket buffer in order to
1665 * allow zero-copy mechanism.
1666 */
1667static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1668{
54139cf3 1669 /* RX Allocation */
71fedb01
JP
1670 int ret = alloc_dma_rx_desc_resources(priv);
1671
1672 if (ret)
1673 return ret;
1674
1675 ret = alloc_dma_tx_desc_resources(priv);
1676
1677 return ret;
1678}
1679
71fedb01
JP
1680/**
1681 * free_dma_desc_resources - free dma desc resources
1682 * @priv: private structure
1683 */
1684static void free_dma_desc_resources(struct stmmac_priv *priv)
1685{
1686 /* Release the DMA RX socket buffers */
1687 free_dma_rx_desc_resources(priv);
1688
1689 /* Release the DMA TX socket buffers */
1690 free_dma_tx_desc_resources(priv);
1691}
1692
9eb12474 1693/**
1694 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1695 * @priv: driver private structure
1696 * Description: It is used for enabling the rx queues in the MAC
1697 */
1698static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1699{
4f6046f5
JP
1700 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1701 int queue;
1702 u8 mode;
9eb12474 1703
4f6046f5
JP
1704 for (queue = 0; queue < rx_queues_count; queue++) {
1705 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
c10d4c82 1706 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
4f6046f5 1707 }
9eb12474 1708}
1709
ae4f0d46
JP
1710/**
1711 * stmmac_start_rx_dma - start RX DMA channel
1712 * @priv: driver private structure
1713 * @chan: RX channel index
1714 * Description:
1715 * This starts a RX DMA channel
1716 */
1717static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1718{
1719 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
a4e887fa 1720 stmmac_start_rx(priv, priv->ioaddr, chan);
ae4f0d46
JP
1721}
1722
1723/**
1724 * stmmac_start_tx_dma - start TX DMA channel
1725 * @priv: driver private structure
1726 * @chan: TX channel index
1727 * Description:
1728 * This starts a TX DMA channel
1729 */
1730static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1731{
1732 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
a4e887fa 1733 stmmac_start_tx(priv, priv->ioaddr, chan);
ae4f0d46
JP
1734}
1735
1736/**
1737 * stmmac_stop_rx_dma - stop RX DMA channel
1738 * @priv: driver private structure
1739 * @chan: RX channel index
1740 * Description:
1741 * This stops a RX DMA channel
1742 */
1743static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1744{
1745 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
a4e887fa 1746 stmmac_stop_rx(priv, priv->ioaddr, chan);
ae4f0d46
JP
1747}
1748
1749/**
1750 * stmmac_stop_tx_dma - stop TX DMA channel
1751 * @priv: driver private structure
1752 * @chan: TX channel index
1753 * Description:
1754 * This stops a TX DMA channel
1755 */
1756static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1757{
1758 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
a4e887fa 1759 stmmac_stop_tx(priv, priv->ioaddr, chan);
ae4f0d46
JP
1760}
1761
1762/**
1763 * stmmac_start_all_dma - start all RX and TX DMA channels
1764 * @priv: driver private structure
1765 * Description:
1766 * This starts all the RX and TX DMA channels
1767 */
1768static void stmmac_start_all_dma(struct stmmac_priv *priv)
1769{
1770 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1771 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1772 u32 chan = 0;
1773
1774 for (chan = 0; chan < rx_channels_count; chan++)
1775 stmmac_start_rx_dma(priv, chan);
1776
1777 for (chan = 0; chan < tx_channels_count; chan++)
1778 stmmac_start_tx_dma(priv, chan);
1779}
1780
1781/**
1782 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1783 * @priv: driver private structure
1784 * Description:
1785 * This stops the RX and TX DMA channels
1786 */
1787static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1788{
1789 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1790 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1791 u32 chan = 0;
1792
1793 for (chan = 0; chan < rx_channels_count; chan++)
1794 stmmac_stop_rx_dma(priv, chan);
1795
1796 for (chan = 0; chan < tx_channels_count; chan++)
1797 stmmac_stop_tx_dma(priv, chan);
1798}
1799
47dd7a54
GC
1800/**
1801 * stmmac_dma_operation_mode - HW DMA operation mode
32ceabca 1802 * @priv: driver private structure
732fdf0e
GC
1803 * Description: it is used for configuring the DMA operation mode register in
1804 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
47dd7a54
GC
1805 */
1806static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1807{
6deee222
JP
1808 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1809 u32 tx_channels_count = priv->plat->tx_queues_to_use;
f88203a2 1810 int rxfifosz = priv->plat->rx_fifo_size;
52a76235 1811 int txfifosz = priv->plat->tx_fifo_size;
6deee222
JP
1812 u32 txmode = 0;
1813 u32 rxmode = 0;
1814 u32 chan = 0;
a0daae13 1815 u8 qmode = 0;
f88203a2 1816
11fbf811
TR
1817 if (rxfifosz == 0)
1818 rxfifosz = priv->dma_cap.rx_fifo_size;
52a76235
JA
1819 if (txfifosz == 0)
1820 txfifosz = priv->dma_cap.tx_fifo_size;
1821
1822 /* Adjust for real per queue fifo size */
1823 rxfifosz /= rx_channels_count;
1824 txfifosz /= tx_channels_count;
11fbf811 1825
6deee222
JP
1826 if (priv->plat->force_thresh_dma_mode) {
1827 txmode = tc;
1828 rxmode = tc;
1829 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
61b8013a
SK
1830 /*
1831 * In case of GMAC, SF mode can be enabled
1832 * to perform the TX COE in HW. This depends on:
ebbb293f
GC
1833 * 1) TX COE if actually supported
1834 * 2) There is no bugged Jumbo frame support
1835 * that needs to not insert csum in the TDES.
1836 */
6deee222
JP
1837 txmode = SF_DMA_MODE;
1838 rxmode = SF_DMA_MODE;
b2dec116 1839 priv->xstats.threshold = SF_DMA_MODE;
6deee222
JP
1840 } else {
1841 txmode = tc;
1842 rxmode = SF_DMA_MODE;
1843 }
1844
1845 /* configure all channels */
ab0204e3
JA
1846 for (chan = 0; chan < rx_channels_count; chan++) {
1847 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
a0daae13 1848
ab0204e3
JA
1849 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1850 rxfifosz, qmode);
4205c88e
JA
1851 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1852 chan);
ab0204e3 1853 }
a0daae13 1854
ab0204e3
JA
1855 for (chan = 0; chan < tx_channels_count; chan++) {
1856 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
6deee222 1857
ab0204e3
JA
1858 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1859 txfifosz, qmode);
6deee222 1860 }
47dd7a54
GC
1861}
1862
47dd7a54 1863/**
732fdf0e 1864 * stmmac_tx_clean - to manage the transmission completion
32ceabca 1865 * @priv: driver private structure
ce736788 1866 * @queue: TX queue index
732fdf0e 1867 * Description: it reclaims the transmit resources after transmission completes.
47dd7a54 1868 */
8fce3331 1869static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
47dd7a54 1870{
ce736788 1871 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
38979574 1872 unsigned int bytes_compl = 0, pkts_compl = 0;
8fce3331 1873 unsigned int entry, count = 0;
47dd7a54 1874
8fce3331 1875 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
a9097a96 1876
9125cdd1
GC
1877 priv->xstats.tx_clean++;
1878
8d5f4b07 1879 entry = tx_q->dirty_tx;
8fce3331 1880 while ((entry != tx_q->cur_tx) && (count < budget)) {
ce736788 1881 struct sk_buff *skb = tx_q->tx_skbuff[entry];
c24602ef 1882 struct dma_desc *p;
c363b658 1883 int status;
c24602ef
GC
1884
1885 if (priv->extend_desc)
ce736788 1886 p = (struct dma_desc *)(tx_q->dma_etx + entry);
c24602ef 1887 else
ce736788 1888 p = tx_q->dma_tx + entry;
47dd7a54 1889
42de047d
JA
1890 status = stmmac_tx_status(priv, &priv->dev->stats,
1891 &priv->xstats, p, priv->ioaddr);
c363b658
FG
1892 /* Check if the descriptor is owned by the DMA */
1893 if (unlikely(status & tx_dma_own))
1894 break;
1895
8fce3331
JA
1896 count++;
1897
a6b25da5
NC
1898 /* Make sure descriptor fields are read after reading
1899 * the own bit.
1900 */
1901 dma_rmb();
1902
c363b658
FG
1903 /* Just consider the last segment and ...*/
1904 if (likely(!(status & tx_not_ls))) {
1905 /* ... verify the status error condition */
1906 if (unlikely(status & tx_err)) {
1907 priv->dev->stats.tx_errors++;
1908 } else {
47dd7a54
GC
1909 priv->dev->stats.tx_packets++;
1910 priv->xstats.tx_pkt_n++;
c363b658 1911 }
ba1ffd74 1912 stmmac_get_tx_hwtstamp(priv, p, skb);
47dd7a54 1913 }
47dd7a54 1914
ce736788
JP
1915 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1916 if (tx_q->tx_skbuff_dma[entry].map_as_page)
362b37be 1917 dma_unmap_page(priv->device,
ce736788
JP
1918 tx_q->tx_skbuff_dma[entry].buf,
1919 tx_q->tx_skbuff_dma[entry].len,
362b37be
GC
1920 DMA_TO_DEVICE);
1921 else
1922 dma_unmap_single(priv->device,
ce736788
JP
1923 tx_q->tx_skbuff_dma[entry].buf,
1924 tx_q->tx_skbuff_dma[entry].len,
362b37be 1925 DMA_TO_DEVICE);
ce736788
JP
1926 tx_q->tx_skbuff_dma[entry].buf = 0;
1927 tx_q->tx_skbuff_dma[entry].len = 0;
1928 tx_q->tx_skbuff_dma[entry].map_as_page = false;
cf32deec 1929 }
f748be53 1930
2c520b1c 1931 stmmac_clean_desc3(priv, tx_q, p);
f748be53 1932
ce736788
JP
1933 tx_q->tx_skbuff_dma[entry].last_segment = false;
1934 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
47dd7a54
GC
1935
1936 if (likely(skb != NULL)) {
38979574
BG
1937 pkts_compl++;
1938 bytes_compl += skb->len;
7c565c33 1939 dev_consume_skb_any(skb);
ce736788 1940 tx_q->tx_skbuff[entry] = NULL;
47dd7a54
GC
1941 }
1942
42de047d 1943 stmmac_release_tx_desc(priv, p, priv->mode);
47dd7a54 1944
e3ad57c9 1945 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
47dd7a54 1946 }
ce736788 1947 tx_q->dirty_tx = entry;
38979574 1948
c22a3f48
JP
1949 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1950 pkts_compl, bytes_compl);
1951
1952 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1953 queue))) &&
1954 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
38979574 1955
739c8e14
LS
1956 netif_dbg(priv, tx_done, priv->dev,
1957 "%s: restart transmit\n", __func__);
c22a3f48 1958 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
47dd7a54 1959 }
d765955d
GC
1960
1961 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1962 stmmac_enable_eee_mode(priv);
f5351ef7 1963 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d 1964 }
8fce3331 1965
4ccb4585
JA
1966 /* We still have pending packets, let's call for a new scheduling */
1967 if (tx_q->dirty_tx != tx_q->cur_tx)
1968 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1969
8fce3331
JA
1970 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1971
1972 return count;
47dd7a54
GC
1973}
1974
47dd7a54 1975/**
732fdf0e 1976 * stmmac_tx_err - to manage the tx error
32ceabca 1977 * @priv: driver private structure
5bacd778 1978 * @chan: channel index
47dd7a54 1979 * Description: it cleans the descriptors and restarts the transmission
732fdf0e 1980 * in case of transmission errors.
47dd7a54 1981 */
5bacd778 1982static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
47dd7a54 1983{
ce736788 1984 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
c24602ef 1985 int i;
ce736788 1986
c22a3f48 1987 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
47dd7a54 1988
ae4f0d46 1989 stmmac_stop_tx_dma(priv, chan);
ce736788 1990 dma_free_tx_skbufs(priv, chan);
e3ad57c9 1991 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef 1992 if (priv->extend_desc)
42de047d
JA
1993 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1994 priv->mode, (i == DMA_TX_SIZE - 1));
c24602ef 1995 else
42de047d
JA
1996 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1997 priv->mode, (i == DMA_TX_SIZE - 1));
ce736788
JP
1998 tx_q->dirty_tx = 0;
1999 tx_q->cur_tx = 0;
8d212a9e 2000 tx_q->mss = 0;
c22a3f48 2001 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
f421031e
JK
2002 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2003 tx_q->dma_tx_phy, chan);
ae4f0d46 2004 stmmac_start_tx_dma(priv, chan);
47dd7a54
GC
2005
2006 priv->dev->stats.tx_errors++;
c22a3f48 2007 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
47dd7a54
GC
2008}
2009
6deee222
JP
2010/**
2011 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2012 * @priv: driver private structure
2013 * @txmode: TX operating mode
2014 * @rxmode: RX operating mode
2015 * @chan: channel index
2016 * Description: it is used for configuring of the DMA operation mode in
2017 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2018 * mode.
2019 */
2020static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2021 u32 rxmode, u32 chan)
2022{
a0daae13
JA
2023 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2024 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
52a76235
JA
2025 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2026 u32 tx_channels_count = priv->plat->tx_queues_to_use;
6deee222 2027 int rxfifosz = priv->plat->rx_fifo_size;
52a76235 2028 int txfifosz = priv->plat->tx_fifo_size;
6deee222
JP
2029
2030 if (rxfifosz == 0)
2031 rxfifosz = priv->dma_cap.rx_fifo_size;
52a76235
JA
2032 if (txfifosz == 0)
2033 txfifosz = priv->dma_cap.tx_fifo_size;
2034
2035 /* Adjust for real per queue fifo size */
2036 rxfifosz /= rx_channels_count;
2037 txfifosz /= tx_channels_count;
6deee222 2038
ab0204e3
JA
2039 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2040 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
6deee222
JP
2041}
2042
8bf993a5
JA
2043static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2044{
63a550fc 2045 int ret;
8bf993a5 2046
c10d4c82
JA
2047 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2048 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2049 if (ret && (ret != -EINVAL)) {
8bf993a5 2050 stmmac_global_err(priv);
c10d4c82
JA
2051 return true;
2052 }
2053
2054 return false;
8bf993a5
JA
2055}
2056
8fce3331
JA
2057static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2058{
2059 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2060 &priv->xstats, chan);
2061 struct stmmac_channel *ch = &priv->channel[chan];
8fce3331 2062
4ccb4585 2063 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3ba07deb
JA
2064 if (napi_schedule_prep(&ch->rx_napi)) {
2065 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2066 __napi_schedule_irqoff(&ch->rx_napi);
2067 status |= handle_tx;
2068 }
8fce3331
JA
2069 }
2070
a66b5884 2071 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
4ccb4585 2072 napi_schedule_irqoff(&ch->tx_napi);
8fce3331
JA
2073
2074 return status;
2075}
2076
32ceabca 2077/**
732fdf0e 2078 * stmmac_dma_interrupt - DMA ISR
32ceabca
GC
2079 * @priv: driver private structure
2080 * Description: this is the DMA ISR. It is called by the main ISR.
732fdf0e
GC
2081 * It calls the dwmac dma routine and schedule poll method in case of some
2082 * work can be done.
32ceabca 2083 */
aec7ff27
GC
2084static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2085{
d62a107a 2086 u32 tx_channel_count = priv->plat->tx_queues_to_use;
5a6a0445
NC
2087 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2088 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2089 tx_channel_count : rx_channel_count;
d62a107a 2090 u32 chan;
8ac60ffb
KC
2091 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2092
2093 /* Make sure we never check beyond our status buffer. */
2094 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2095 channels_to_check = ARRAY_SIZE(status);
5a6a0445 2096
5a6a0445 2097 for (chan = 0; chan < channels_to_check; chan++)
8fce3331 2098 status[chan] = stmmac_napi_check(priv, chan);
6deee222 2099
5a6a0445
NC
2100 for (chan = 0; chan < tx_channel_count; chan++) {
2101 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
d62a107a
JP
2102 /* Try to bump up the dma threshold on this failure */
2103 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2104 (tc <= 256)) {
2105 tc += 64;
2106 if (priv->plat->force_thresh_dma_mode)
2107 stmmac_set_dma_operation_mode(priv,
2108 tc,
2109 tc,
2110 chan);
2111 else
2112 stmmac_set_dma_operation_mode(priv,
2113 tc,
2114 SF_DMA_MODE,
2115 chan);
2116 priv->xstats.threshold = tc;
2117 }
5a6a0445 2118 } else if (unlikely(status[chan] == tx_hard_error)) {
d62a107a 2119 stmmac_tx_err(priv, chan);
47dd7a54 2120 }
d62a107a 2121 }
47dd7a54
GC
2122}
2123
32ceabca
GC
2124/**
2125 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2126 * @priv: driver private structure
2127 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2128 */
1c901a46
GC
2129static void stmmac_mmc_setup(struct stmmac_priv *priv)
2130{
2131 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
36ff7c1e 2132 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1c901a46 2133
3b1dd2c5 2134 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
4f795b25
GC
2135
2136 if (priv->dma_cap.rmon) {
3b1dd2c5 2137 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
4f795b25
GC
2138 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2139 } else
38ddc59d 2140 netdev_info(priv->dev, "No MAC Management Counters available\n");
1c901a46
GC
2141}
2142
19e30c14 2143/**
732fdf0e 2144 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
32ceabca 2145 * @priv: driver private structure
19e30c14
GC
2146 * Description:
2147 * new GMAC chip generations have a new register to indicate the
2148 * presence of the optional feature/functions.
2149 * This can be also used to override the value passed through the
2150 * platform and necessary for old MAC10/100 and GMAC chips.
e7434821
GC
2151 */
2152static int stmmac_get_hw_features(struct stmmac_priv *priv)
2153{
a4e887fa 2154 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
e7434821
GC
2155}
2156
32ceabca 2157/**
732fdf0e 2158 * stmmac_check_ether_addr - check if the MAC addr is valid
32ceabca
GC
2159 * @priv: driver private structure
2160 * Description:
2161 * it is to verify if the MAC address is valid, in case of failures it
2162 * generates a random MAC address
2163 */
bfab27a1
GC
2164static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2165{
bfab27a1 2166 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
c10d4c82 2167 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
ceb69499 2168 if (!is_valid_ether_addr(priv->dev->dev_addr))
f2cedb63 2169 eth_hw_addr_random(priv->dev);
af649352
JZ
2170 dev_info(priv->device, "device MAC address %pM\n",
2171 priv->dev->dev_addr);
bfab27a1 2172 }
bfab27a1
GC
2173}
2174
32ceabca 2175/**
732fdf0e 2176 * stmmac_init_dma_engine - DMA init.
32ceabca
GC
2177 * @priv: driver private structure
2178 * Description:
2179 * It inits the DMA invoking the specific MAC/GMAC callback.
2180 * Some DMA parameters can be passed from the platform;
2181 * in case of these are not passed a default is kept for the MAC or GMAC.
2182 */
0f1f88a8
GC
2183static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2184{
47f2a9ce
JP
2185 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2186 u32 tx_channels_count = priv->plat->tx_queues_to_use;
24aaed0c 2187 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
54139cf3 2188 struct stmmac_rx_queue *rx_q;
ce736788 2189 struct stmmac_tx_queue *tx_q;
47f2a9ce 2190 u32 chan = 0;
c24602ef 2191 int atds = 0;
495db273 2192 int ret = 0;
0f1f88a8 2193
a332e2fa
NC
2194 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2195 dev_err(priv->device, "Invalid DMA configuration\n");
89ab75bf 2196 return -EINVAL;
0f1f88a8
GC
2197 }
2198
c24602ef
GC
2199 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2200 atds = 1;
2201
a4e887fa 2202 ret = stmmac_reset(priv, priv->ioaddr);
495db273
GC
2203 if (ret) {
2204 dev_err(priv->device, "Failed to reset the dma\n");
2205 return ret;
2206 }
2207
7d9e6c5a
JA
2208 /* DMA Configuration */
2209 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2210
2211 if (priv->plat->axi)
2212 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2213
af8f3fb7
WV
2214 /* DMA CSR Channel configuration */
2215 for (chan = 0; chan < dma_csr_ch; chan++)
2216 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2217
24aaed0c
JA
2218 /* DMA RX Channel Configuration */
2219 for (chan = 0; chan < rx_channels_count; chan++) {
2220 rx_q = &priv->rx_queue[chan];
47f2a9ce 2221
24aaed0c
JA
2222 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2223 rx_q->dma_rx_phy, chan);
54139cf3 2224
24aaed0c
JA
2225 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2226 (DMA_RX_SIZE * sizeof(struct dma_desc));
2227 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2228 rx_q->rx_tail_addr, chan);
2229 }
47f2a9ce 2230
24aaed0c
JA
2231 /* DMA TX Channel Configuration */
2232 for (chan = 0; chan < tx_channels_count; chan++) {
2233 tx_q = &priv->tx_queue[chan];
47f2a9ce 2234
24aaed0c
JA
2235 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2236 tx_q->dma_tx_phy, chan);
ce736788 2237
0431100b 2238 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
24aaed0c
JA
2239 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2240 tx_q->tx_tail_addr, chan);
2241 }
47f2a9ce 2242
495db273 2243 return ret;
0f1f88a8
GC
2244}
2245
8fce3331
JA
2246static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2247{
2248 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2249
2250 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2251}
2252
9125cdd1 2253/**
732fdf0e 2254 * stmmac_tx_timer - mitigation sw timer for tx.
9125cdd1
GC
2255 * @data: data pointer
2256 * Description:
2257 * This is the timer handler to directly invoke the stmmac_tx_clean.
2258 */
e99e88a9 2259static void stmmac_tx_timer(struct timer_list *t)
9125cdd1 2260{
8fce3331
JA
2261 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2262 struct stmmac_priv *priv = tx_q->priv_data;
2263 struct stmmac_channel *ch;
2264
2265 ch = &priv->channel[tx_q->queue_index];
9125cdd1 2266
4ccb4585
JA
2267 /*
2268 * If NAPI is already running we can miss some events. Let's rearm
2269 * the timer and try again.
2270 */
2271 if (likely(napi_schedule_prep(&ch->tx_napi)))
2272 __napi_schedule(&ch->tx_napi);
2273 else
2274 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
9125cdd1
GC
2275}
2276
2277/**
d429b66e 2278 * stmmac_init_coalesce - init mitigation options.
32ceabca 2279 * @priv: driver private structure
9125cdd1 2280 * Description:
d429b66e 2281 * This inits the coalesce parameters: i.e. timer rate,
9125cdd1
GC
2282 * timer handler and default threshold used for enabling the
2283 * interrupt on completion bit.
2284 */
d429b66e 2285static void stmmac_init_coalesce(struct stmmac_priv *priv)
9125cdd1 2286{
8fce3331
JA
2287 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2288 u32 chan;
2289
9125cdd1
GC
2290 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2291 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
d429b66e 2292 priv->rx_coal_frames = STMMAC_RX_FRAMES;
8fce3331
JA
2293
2294 for (chan = 0; chan < tx_channel_count; chan++) {
2295 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2296
2297 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2298 }
9125cdd1
GC
2299}
2300
4854ab99
JP
2301static void stmmac_set_rings_length(struct stmmac_priv *priv)
2302{
2303 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2304 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2305 u32 chan;
2306
2307 /* set TX ring length */
a4e887fa
JA
2308 for (chan = 0; chan < tx_channels_count; chan++)
2309 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2310 (DMA_TX_SIZE - 1), chan);
4854ab99
JP
2311
2312 /* set RX ring length */
a4e887fa
JA
2313 for (chan = 0; chan < rx_channels_count; chan++)
2314 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2315 (DMA_RX_SIZE - 1), chan);
4854ab99
JP
2316}
2317
6a3a7193
JP
2318/**
2319 * stmmac_set_tx_queue_weight - Set TX queue weight
2320 * @priv: driver private structure
2321 * Description: It is used for setting TX queues weight
2322 */
2323static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2324{
2325 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2326 u32 weight;
2327 u32 queue;
2328
2329 for (queue = 0; queue < tx_queues_count; queue++) {
2330 weight = priv->plat->tx_queues_cfg[queue].weight;
c10d4c82 2331 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
6a3a7193
JP
2332 }
2333}
2334
19d91873
JP
2335/**
2336 * stmmac_configure_cbs - Configure CBS in TX queue
2337 * @priv: driver private structure
2338 * Description: It is used for configuring CBS in AVB TX queues
2339 */
2340static void stmmac_configure_cbs(struct stmmac_priv *priv)
2341{
2342 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2343 u32 mode_to_use;
2344 u32 queue;
2345
44781fef
JP
2346 /* queue 0 is reserved for legacy traffic */
2347 for (queue = 1; queue < tx_queues_count; queue++) {
19d91873
JP
2348 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2349 if (mode_to_use == MTL_QUEUE_DCB)
2350 continue;
2351
c10d4c82 2352 stmmac_config_cbs(priv, priv->hw,
19d91873
JP
2353 priv->plat->tx_queues_cfg[queue].send_slope,
2354 priv->plat->tx_queues_cfg[queue].idle_slope,
2355 priv->plat->tx_queues_cfg[queue].high_credit,
2356 priv->plat->tx_queues_cfg[queue].low_credit,
2357 queue);
2358 }
2359}
2360
d43042f4
JP
2361/**
2362 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2363 * @priv: driver private structure
2364 * Description: It is used for mapping RX queues to RX dma channels
2365 */
2366static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2367{
2368 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2369 u32 queue;
2370 u32 chan;
2371
2372 for (queue = 0; queue < rx_queues_count; queue++) {
2373 chan = priv->plat->rx_queues_cfg[queue].chan;
c10d4c82 2374 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
d43042f4
JP
2375 }
2376}
2377
a8f5102a
JP
2378/**
2379 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2380 * @priv: driver private structure
2381 * Description: It is used for configuring the RX Queue Priority
2382 */
2383static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2384{
2385 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2386 u32 queue;
2387 u32 prio;
2388
2389 for (queue = 0; queue < rx_queues_count; queue++) {
2390 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2391 continue;
2392
2393 prio = priv->plat->rx_queues_cfg[queue].prio;
c10d4c82 2394 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
a8f5102a
JP
2395 }
2396}
2397
2398/**
2399 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2400 * @priv: driver private structure
2401 * Description: It is used for configuring the TX Queue Priority
2402 */
2403static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2404{
2405 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2406 u32 queue;
2407 u32 prio;
2408
2409 for (queue = 0; queue < tx_queues_count; queue++) {
2410 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2411 continue;
2412
2413 prio = priv->plat->tx_queues_cfg[queue].prio;
c10d4c82 2414 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
a8f5102a
JP
2415 }
2416}
2417
abe80fdc
JP
2418/**
2419 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2420 * @priv: driver private structure
2421 * Description: It is used for configuring the RX queue routing
2422 */
2423static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2424{
2425 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2426 u32 queue;
2427 u8 packet;
2428
2429 for (queue = 0; queue < rx_queues_count; queue++) {
2430 /* no specific packet type routing specified for the queue */
2431 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2432 continue;
2433
2434 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
c10d4c82 2435 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
abe80fdc
JP
2436 }
2437}
2438
76067459
JA
2439static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2440{
2441 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2442 priv->rss.enable = false;
2443 return;
2444 }
2445
2446 if (priv->dev->features & NETIF_F_RXHASH)
2447 priv->rss.enable = true;
2448 else
2449 priv->rss.enable = false;
2450
2451 stmmac_rss_configure(priv, priv->hw, &priv->rss,
2452 priv->plat->rx_queues_to_use);
2453}
2454
d0a9c9f9
JP
2455/**
2456 * stmmac_mtl_configuration - Configure MTL
2457 * @priv: driver private structure
2458 * Description: It is used for configurring MTL
2459 */
2460static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2461{
2462 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2463 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2464
c10d4c82 2465 if (tx_queues_count > 1)
6a3a7193
JP
2466 stmmac_set_tx_queue_weight(priv);
2467
d0a9c9f9 2468 /* Configure MTL RX algorithms */
c10d4c82
JA
2469 if (rx_queues_count > 1)
2470 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2471 priv->plat->rx_sched_algorithm);
d0a9c9f9
JP
2472
2473 /* Configure MTL TX algorithms */
c10d4c82
JA
2474 if (tx_queues_count > 1)
2475 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2476 priv->plat->tx_sched_algorithm);
d0a9c9f9 2477
19d91873 2478 /* Configure CBS in AVB TX queues */
c10d4c82 2479 if (tx_queues_count > 1)
19d91873
JP
2480 stmmac_configure_cbs(priv);
2481
d43042f4 2482 /* Map RX MTL to DMA channels */
c10d4c82 2483 stmmac_rx_queue_dma_chan_map(priv);
d43042f4 2484
d0a9c9f9 2485 /* Enable MAC RX Queues */
c10d4c82 2486 stmmac_mac_enable_rx_queues(priv);
6deee222 2487
a8f5102a 2488 /* Set RX priorities */
c10d4c82 2489 if (rx_queues_count > 1)
a8f5102a
JP
2490 stmmac_mac_config_rx_queues_prio(priv);
2491
2492 /* Set TX priorities */
c10d4c82 2493 if (tx_queues_count > 1)
a8f5102a 2494 stmmac_mac_config_tx_queues_prio(priv);
abe80fdc
JP
2495
2496 /* Set RX routing */
c10d4c82 2497 if (rx_queues_count > 1)
abe80fdc 2498 stmmac_mac_config_rx_queues_routing(priv);
76067459
JA
2499
2500 /* Receive Side Scaling */
2501 if (rx_queues_count > 1)
2502 stmmac_mac_config_rss(priv);
d0a9c9f9
JP
2503}
2504
8bf993a5
JA
2505static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2506{
c10d4c82 2507 if (priv->dma_cap.asp) {
8bf993a5 2508 netdev_info(priv->dev, "Enabling Safety Features\n");
c10d4c82 2509 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
8bf993a5
JA
2510 } else {
2511 netdev_info(priv->dev, "No Safety Features support found\n");
2512 }
2513}
2514
523f11b5 2515/**
732fdf0e 2516 * stmmac_hw_setup - setup mac in a usable state.
523f11b5
SK
2517 * @dev : pointer to the device structure.
2518 * Description:
732fdf0e
GC
2519 * this is the main function to setup the HW in a usable state because the
2520 * dma engine is reset, the core registers are configured (e.g. AXI,
2521 * Checksum features, timers). The DMA is ready to start receiving and
2522 * transmitting.
523f11b5
SK
2523 * Return value:
2524 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2525 * file on failure.
2526 */
fe131929 2527static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
523f11b5
SK
2528{
2529 struct stmmac_priv *priv = netdev_priv(dev);
3c55d4d0 2530 u32 rx_cnt = priv->plat->rx_queues_to_use;
146617b8
JP
2531 u32 tx_cnt = priv->plat->tx_queues_to_use;
2532 u32 chan;
523f11b5
SK
2533 int ret;
2534
523f11b5
SK
2535 /* DMA initialization and SW reset */
2536 ret = stmmac_init_dma_engine(priv);
2537 if (ret < 0) {
38ddc59d
LC
2538 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2539 __func__);
523f11b5
SK
2540 return ret;
2541 }
2542
2543 /* Copy the MAC addr into the HW */
c10d4c82 2544 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
523f11b5 2545
02e57b9d
GC
2546 /* PS and related bits will be programmed according to the speed */
2547 if (priv->hw->pcs) {
2548 int speed = priv->plat->mac_port_sel_speed;
2549
2550 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2551 (speed == SPEED_1000)) {
2552 priv->hw->ps = speed;
2553 } else {
2554 dev_warn(priv->device, "invalid port speed\n");
2555 priv->hw->ps = 0;
2556 }
2557 }
2558
523f11b5 2559 /* Initialize the MAC Core */
c10d4c82 2560 stmmac_core_init(priv, priv->hw, dev);
523f11b5 2561
d0a9c9f9 2562 /* Initialize MTL*/
63a550fc 2563 stmmac_mtl_configuration(priv);
9eb12474 2564
8bf993a5 2565 /* Initialize Safety Features */
63a550fc 2566 stmmac_safety_feat_configuration(priv);
8bf993a5 2567
c10d4c82 2568 ret = stmmac_rx_ipc(priv, priv->hw);
978aded4 2569 if (!ret) {
38ddc59d 2570 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
978aded4 2571 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
d2afb5bd 2572 priv->hw->rx_csum = 0;
978aded4
GC
2573 }
2574
523f11b5 2575 /* Enable the MAC Rx/Tx */
c10d4c82 2576 stmmac_mac_set(priv, priv->ioaddr, true);
523f11b5 2577
b4f0a661
JP
2578 /* Set the HW DMA mode and the COE */
2579 stmmac_dma_operation_mode(priv);
2580
523f11b5
SK
2581 stmmac_mmc_setup(priv);
2582
fe131929 2583 if (init_ptp) {
0ad2be79
TR
2584 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2585 if (ret < 0)
2586 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2587
fe131929 2588 ret = stmmac_init_ptp(priv);
722eef28
HK
2589 if (ret == -EOPNOTSUPP)
2590 netdev_warn(priv->dev, "PTP not supported by HW\n");
2591 else if (ret)
2592 netdev_warn(priv->dev, "PTP init failed\n");
fe131929 2593 }
523f11b5 2594
523f11b5
SK
2595 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2596
a4e887fa 2597 if (priv->use_riwt) {
4e4337cc
JA
2598 if (!priv->rx_riwt)
2599 priv->rx_riwt = DEF_DMA_RIWT;
2600
2601 ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
523f11b5
SK
2602 }
2603
c10d4c82 2604 if (priv->hw->pcs)
c9ad4c10 2605 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
523f11b5 2606
4854ab99
JP
2607 /* set TX and RX rings length */
2608 stmmac_set_rings_length(priv);
2609
f748be53 2610 /* Enable TSO */
146617b8
JP
2611 if (priv->tso) {
2612 for (chan = 0; chan < tx_cnt; chan++)
a4e887fa 2613 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
146617b8 2614 }
f748be53 2615
67afd6d1
JA
2616 /* Enable Split Header */
2617 if (priv->sph && priv->hw->rx_csum) {
2618 for (chan = 0; chan < rx_cnt; chan++)
2619 stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2620 }
2621
30d93227
JA
2622 /* VLAN Tag Insertion */
2623 if (priv->dma_cap.vlins)
2624 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2625
7d9e6c5a
JA
2626 /* Start the ball rolling... */
2627 stmmac_start_all_dma(priv);
2628
523f11b5
SK
2629 return 0;
2630}
2631
c66f6c37
TR
2632static void stmmac_hw_teardown(struct net_device *dev)
2633{
2634 struct stmmac_priv *priv = netdev_priv(dev);
2635
2636 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2637}
2638
47dd7a54
GC
2639/**
2640 * stmmac_open - open entry point of the driver
2641 * @dev : pointer to the device structure.
2642 * Description:
2643 * This function is the open entry point of the driver.
2644 * Return value:
2645 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2646 * file on failure.
2647 */
2648static int stmmac_open(struct net_device *dev)
2649{
2650 struct stmmac_priv *priv = netdev_priv(dev);
5d626c87 2651 int bfsize = 0;
8fce3331 2652 u32 chan;
47dd7a54
GC
2653 int ret;
2654
3fe5cadb
GC
2655 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2656 priv->hw->pcs != STMMAC_PCS_TBI &&
2657 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
2658 ret = stmmac_init_phy(dev);
2659 if (ret) {
38ddc59d
LC
2660 netdev_err(priv->dev,
2661 "%s: Cannot attach to PHY (error: %d)\n",
2662 __func__, ret);
89df20d9 2663 return ret;
e58bb43f 2664 }
f66ffe28 2665 }
47dd7a54 2666
523f11b5
SK
2667 /* Extra statistics */
2668 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2669 priv->xstats.threshold = tc;
2670
5d626c87
JA
2671 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2672 if (bfsize < 0)
2673 bfsize = 0;
2674
2675 if (bfsize < BUF_SIZE_16KiB)
2676 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2677
2678 priv->dma_buf_sz = bfsize;
2679 buf_sz = bfsize;
2680
22ad3838 2681 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
56329137 2682
5bacd778
LC
2683 ret = alloc_dma_desc_resources(priv);
2684 if (ret < 0) {
2685 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2686 __func__);
2687 goto dma_desc_error;
2688 }
2689
2690 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2691 if (ret < 0) {
2692 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2693 __func__);
2694 goto init_error;
2695 }
2696
fe131929 2697 ret = stmmac_hw_setup(dev, true);
56329137 2698 if (ret < 0) {
38ddc59d 2699 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
c9324d18 2700 goto init_error;
47dd7a54
GC
2701 }
2702
d429b66e 2703 stmmac_init_coalesce(priv);
777da230 2704
74371272 2705 phylink_start(priv->phylink);
47dd7a54 2706
f66ffe28
GC
2707 /* Request the IRQ lines */
2708 ret = request_irq(dev->irq, stmmac_interrupt,
ceb69499 2709 IRQF_SHARED, dev->name, dev);
f66ffe28 2710 if (unlikely(ret < 0)) {
38ddc59d
LC
2711 netdev_err(priv->dev,
2712 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2713 __func__, dev->irq, ret);
6c1e5abe 2714 goto irq_error;
f66ffe28
GC
2715 }
2716
7a13f8f5
FV
2717 /* Request the Wake IRQ in case of another line is used for WoL */
2718 if (priv->wol_irq != dev->irq) {
2719 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2720 IRQF_SHARED, dev->name, dev);
2721 if (unlikely(ret < 0)) {
38ddc59d
LC
2722 netdev_err(priv->dev,
2723 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2724 __func__, priv->wol_irq, ret);
c9324d18 2725 goto wolirq_error;
7a13f8f5
FV
2726 }
2727 }
2728
d765955d 2729 /* Request the IRQ lines */
d7ec8584 2730 if (priv->lpi_irq > 0) {
d765955d
GC
2731 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2732 dev->name, dev);
2733 if (unlikely(ret < 0)) {
38ddc59d
LC
2734 netdev_err(priv->dev,
2735 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2736 __func__, priv->lpi_irq, ret);
c9324d18 2737 goto lpiirq_error;
d765955d
GC
2738 }
2739 }
2740
c22a3f48
JP
2741 stmmac_enable_all_queues(priv);
2742 stmmac_start_all_queues(priv);
f66ffe28 2743
47dd7a54 2744 return 0;
f66ffe28 2745
c9324d18 2746lpiirq_error:
d765955d
GC
2747 if (priv->wol_irq != dev->irq)
2748 free_irq(priv->wol_irq, dev);
c9324d18 2749wolirq_error:
7a13f8f5 2750 free_irq(dev->irq, dev);
6c1e5abe 2751irq_error:
74371272 2752 phylink_stop(priv->phylink);
7a13f8f5 2753
8fce3331
JA
2754 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2755 del_timer_sync(&priv->tx_queue[chan].txtimer);
2756
c66f6c37 2757 stmmac_hw_teardown(dev);
c9324d18
GC
2758init_error:
2759 free_dma_desc_resources(priv);
5bacd778 2760dma_desc_error:
74371272 2761 phylink_disconnect_phy(priv->phylink);
f66ffe28 2762 return ret;
47dd7a54
GC
2763}
2764
2765/**
2766 * stmmac_release - close entry point of the driver
2767 * @dev : device pointer.
2768 * Description:
2769 * This is the stop entry point of the driver.
2770 */
2771static int stmmac_release(struct net_device *dev)
2772{
2773 struct stmmac_priv *priv = netdev_priv(dev);
8fce3331 2774 u32 chan;
47dd7a54 2775
d765955d
GC
2776 if (priv->eee_enabled)
2777 del_timer_sync(&priv->eee_ctrl_timer);
2778
47dd7a54 2779 /* Stop and disconnect the PHY */
74371272
JA
2780 phylink_stop(priv->phylink);
2781 phylink_disconnect_phy(priv->phylink);
47dd7a54 2782
c22a3f48 2783 stmmac_stop_all_queues(priv);
47dd7a54 2784
c22a3f48 2785 stmmac_disable_all_queues(priv);
47dd7a54 2786
8fce3331
JA
2787 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2788 del_timer_sync(&priv->tx_queue[chan].txtimer);
9125cdd1 2789
47dd7a54
GC
2790 /* Free the IRQ lines */
2791 free_irq(dev->irq, dev);
7a13f8f5
FV
2792 if (priv->wol_irq != dev->irq)
2793 free_irq(priv->wol_irq, dev);
d7ec8584 2794 if (priv->lpi_irq > 0)
d765955d 2795 free_irq(priv->lpi_irq, dev);
47dd7a54
GC
2796
2797 /* Stop TX/RX DMA and clear the descriptors */
ae4f0d46 2798 stmmac_stop_all_dma(priv);
47dd7a54
GC
2799
2800 /* Release and free the Rx/Tx resources */
2801 free_dma_desc_resources(priv);
2802
19449bfc 2803 /* Disable the MAC Rx/Tx */
c10d4c82 2804 stmmac_mac_set(priv, priv->ioaddr, false);
47dd7a54
GC
2805
2806 netif_carrier_off(dev);
2807
92ba6888
RK
2808 stmmac_release_ptp(priv);
2809
47dd7a54
GC
2810 return 0;
2811}
2812
30d93227
JA
2813static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
2814 struct stmmac_tx_queue *tx_q)
2815{
2816 u16 tag = 0x0, inner_tag = 0x0;
2817 u32 inner_type = 0x0;
2818 struct dma_desc *p;
2819
2820 if (!priv->dma_cap.vlins)
2821 return false;
2822 if (!skb_vlan_tag_present(skb))
2823 return false;
2824 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
2825 inner_tag = skb_vlan_tag_get(skb);
2826 inner_type = STMMAC_VLAN_INSERT;
2827 }
2828
2829 tag = skb_vlan_tag_get(skb);
2830
2831 p = tx_q->dma_tx + tx_q->cur_tx;
2832 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
2833 return false;
2834
2835 stmmac_set_tx_owner(priv, p);
2836 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2837 return true;
2838}
2839
f748be53
AT
2840/**
2841 * stmmac_tso_allocator - close entry point of the driver
2842 * @priv: driver private structure
2843 * @des: buffer start address
2844 * @total_len: total length to fill in descriptors
2845 * @last_segmant: condition for the last descriptor
ce736788 2846 * @queue: TX queue index
f748be53
AT
2847 * Description:
2848 * This function fills descriptor and request new descriptors according to
2849 * buffer length to fill
2850 */
a993db88 2851static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
ce736788 2852 int total_len, bool last_segment, u32 queue)
f748be53 2853{
ce736788 2854 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
f748be53 2855 struct dma_desc *desc;
5bacd778 2856 u32 buff_size;
ce736788 2857 int tmp_len;
f748be53
AT
2858
2859 tmp_len = total_len;
2860
2861 while (tmp_len > 0) {
a993db88
JA
2862 dma_addr_t curr_addr;
2863
ce736788 2864 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
b4c9784c 2865 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
ce736788 2866 desc = tx_q->dma_tx + tx_q->cur_tx;
f748be53 2867
a993db88
JA
2868 curr_addr = des + (total_len - tmp_len);
2869 if (priv->dma_cap.addr64 <= 32)
2870 desc->des0 = cpu_to_le32(curr_addr);
2871 else
2872 stmmac_set_desc_addr(priv, desc, curr_addr);
2873
f748be53
AT
2874 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2875 TSO_MAX_BUFF_SIZE : tmp_len;
2876
42de047d
JA
2877 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2878 0, 1,
2879 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2880 0, 0);
f748be53
AT
2881
2882 tmp_len -= TSO_MAX_BUFF_SIZE;
2883 }
2884}
2885
2886/**
2887 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2888 * @skb : the socket buffer
2889 * @dev : device pointer
2890 * Description: this is the transmit function that is called on TSO frames
2891 * (support available on GMAC4 and newer chips).
2892 * Diagram below show the ring programming in case of TSO frames:
2893 *
2894 * First Descriptor
2895 * --------
2896 * | DES0 |---> buffer1 = L2/L3/L4 header
2897 * | DES1 |---> TCP Payload (can continue on next descr...)
2898 * | DES2 |---> buffer 1 and 2 len
2899 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2900 * --------
2901 * |
2902 * ...
2903 * |
2904 * --------
2905 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2906 * | DES1 | --|
2907 * | DES2 | --> buffer 1 and 2 len
2908 * | DES3 |
2909 * --------
2910 *
2911 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2912 */
2913static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2914{
ce736788 2915 struct dma_desc *desc, *first, *mss_desc = NULL;
f748be53
AT
2916 struct stmmac_priv *priv = netdev_priv(dev);
2917 int nfrags = skb_shinfo(skb)->nr_frags;
ce736788 2918 u32 queue = skb_get_queue_mapping(skb);
c2837423
JA
2919 unsigned int first_entry, tx_packets;
2920 int tmp_pay_len = 0, first_tx;
ce736788 2921 struct stmmac_tx_queue *tx_q;
b7766206 2922 u8 proto_hdr_len, hdr;
c2837423 2923 bool has_vlan, set_ic;
ce736788 2924 u32 pay_len, mss;
a993db88 2925 dma_addr_t des;
f748be53
AT
2926 int i;
2927
ce736788 2928 tx_q = &priv->tx_queue[queue];
c2837423 2929 first_tx = tx_q->cur_tx;
ce736788 2930
f748be53 2931 /* Compute header lengths */
b7766206
JA
2932 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2933 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
2934 hdr = sizeof(struct udphdr);
2935 } else {
2936 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2937 hdr = tcp_hdrlen(skb);
2938 }
f748be53
AT
2939
2940 /* Desc availability based on threshold should be enough safe */
ce736788 2941 if (unlikely(stmmac_tx_avail(priv, queue) <
f748be53 2942 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
c22a3f48
JP
2943 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2944 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2945 queue));
f748be53 2946 /* This is a hard error, log it. */
38ddc59d
LC
2947 netdev_err(priv->dev,
2948 "%s: Tx Ring full when queue awake\n",
2949 __func__);
f748be53 2950 }
f748be53
AT
2951 return NETDEV_TX_BUSY;
2952 }
2953
2954 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2955
2956 mss = skb_shinfo(skb)->gso_size;
2957
2958 /* set new MSS value if needed */
8d212a9e 2959 if (mss != tx_q->mss) {
ce736788 2960 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
42de047d 2961 stmmac_set_mss(priv, mss_desc, mss);
8d212a9e 2962 tx_q->mss = mss;
ce736788 2963 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
b4c9784c 2964 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
f748be53
AT
2965 }
2966
2967 if (netif_msg_tx_queued(priv)) {
b7766206
JA
2968 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2969 __func__, hdr, proto_hdr_len, pay_len, mss);
f748be53
AT
2970 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2971 skb->data_len);
2972 }
2973
30d93227
JA
2974 /* Check if VLAN can be inserted by HW */
2975 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
2976
ce736788 2977 first_entry = tx_q->cur_tx;
b4c9784c 2978 WARN_ON(tx_q->tx_skbuff[first_entry]);
f748be53 2979
ce736788 2980 desc = tx_q->dma_tx + first_entry;
f748be53
AT
2981 first = desc;
2982
30d93227
JA
2983 if (has_vlan)
2984 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
2985
f748be53
AT
2986 /* first descriptor: fill Headers on Buf1 */
2987 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2988 DMA_TO_DEVICE);
2989 if (dma_mapping_error(priv->device, des))
2990 goto dma_map_err;
2991
ce736788
JP
2992 tx_q->tx_skbuff_dma[first_entry].buf = des;
2993 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
f748be53 2994
a993db88
JA
2995 if (priv->dma_cap.addr64 <= 32) {
2996 first->des0 = cpu_to_le32(des);
f748be53 2997
a993db88
JA
2998 /* Fill start of payload in buff2 of first descriptor */
2999 if (pay_len)
3000 first->des1 = cpu_to_le32(des + proto_hdr_len);
f748be53 3001
a993db88
JA
3002 /* If needed take extra descriptors to fill the remaining payload */
3003 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3004 } else {
3005 stmmac_set_desc_addr(priv, first, des);
3006 tmp_pay_len = pay_len;
34c15202 3007 des += proto_hdr_len;
b2f07199 3008 pay_len = 0;
a993db88 3009 }
f748be53 3010
ce736788 3011 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
f748be53
AT
3012
3013 /* Prepare fragments */
3014 for (i = 0; i < nfrags; i++) {
3015 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3016
3017 des = skb_frag_dma_map(priv->device, frag, 0,
3018 skb_frag_size(frag),
3019 DMA_TO_DEVICE);
937071c1
TR
3020 if (dma_mapping_error(priv->device, des))
3021 goto dma_map_err;
f748be53
AT
3022
3023 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
ce736788 3024 (i == nfrags - 1), queue);
f748be53 3025
ce736788
JP
3026 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3027 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
ce736788 3028 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
f748be53
AT
3029 }
3030
ce736788 3031 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
f748be53 3032
05cf0d1b
NC
3033 /* Only the last descriptor gets to point to the skb. */
3034 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3035
7df4a3a7 3036 /* Manage tx mitigation */
c2837423
JA
3037 tx_packets = (tx_q->cur_tx + 1) - first_tx;
3038 tx_q->tx_count_frames += tx_packets;
3039
3040 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3041 set_ic = true;
3042 else if (!priv->tx_coal_frames)
3043 set_ic = false;
3044 else if (tx_packets > priv->tx_coal_frames)
3045 set_ic = true;
3046 else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3047 set_ic = true;
3048 else
3049 set_ic = false;
3050
3051 if (set_ic) {
7df4a3a7
JA
3052 desc = &tx_q->dma_tx[tx_q->cur_tx];
3053 tx_q->tx_count_frames = 0;
3054 stmmac_set_tx_ic(priv, desc);
3055 priv->xstats.tx_set_ic_bit++;
c2837423
JA
3056 } else {
3057 stmmac_tx_timer_arm(priv, queue);
7df4a3a7
JA
3058 }
3059
05cf0d1b
NC
3060 /* We've used all descriptors we need for this skb, however,
3061 * advance cur_tx so that it references a fresh descriptor.
3062 * ndo_start_xmit will fill this descriptor the next time it's
3063 * called and stmmac_tx_clean may clean up to this descriptor.
3064 */
ce736788 3065 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
f748be53 3066
ce736788 3067 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
3068 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3069 __func__);
c22a3f48 3070 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
f748be53
AT
3071 }
3072
3073 dev->stats.tx_bytes += skb->len;
3074 priv->xstats.tx_tso_frames++;
3075 priv->xstats.tx_tso_nfrags += nfrags;
3076
8000ddc0
JA
3077 if (priv->sarc_type)
3078 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3079
74abc9b1 3080 skb_tx_timestamp(skb);
f748be53
AT
3081
3082 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3083 priv->hwts_tx_en)) {
3084 /* declare that device is doing timestamping */
3085 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
42de047d 3086 stmmac_enable_tx_timestamp(priv, first);
f748be53
AT
3087 }
3088
3089 /* Complete the first descriptor before granting the DMA */
42de047d 3090 stmmac_prepare_tso_tx_desc(priv, first, 1,
f748be53
AT
3091 proto_hdr_len,
3092 pay_len,
ce736788 3093 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
b7766206 3094 hdr / 4, (skb->len - proto_hdr_len));
f748be53
AT
3095
3096 /* If context desc is used to change MSS */
15d2ee42
NC
3097 if (mss_desc) {
3098 /* Make sure that first descriptor has been completely
3099 * written, including its own bit. This is because MSS is
3100 * actually before first descriptor, so we need to make
3101 * sure that MSS's own bit is the last thing written.
3102 */
3103 dma_wmb();
42de047d 3104 stmmac_set_tx_owner(priv, mss_desc);
15d2ee42 3105 }
f748be53
AT
3106
3107 /* The own bit must be the latest setting done when prepare the
3108 * descriptor and then barrier is needed to make sure that
3109 * all is coherent before granting the DMA engine.
3110 */
95eb930a 3111 wmb();
f748be53
AT
3112
3113 if (netif_msg_pktdata(priv)) {
3114 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
ce736788
JP
3115 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3116 tx_q->cur_tx, first, nfrags);
f748be53 3117
42de047d 3118 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
f748be53
AT
3119
3120 pr_info(">>> frame to be transmitted: ");
3121 print_pkt(skb->data, skb_headlen(skb));
3122 }
3123
c22a3f48 3124 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
f748be53 3125
0431100b 3126 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
a4e887fa 3127 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
f748be53 3128
f748be53
AT
3129 return NETDEV_TX_OK;
3130
3131dma_map_err:
f748be53
AT
3132 dev_err(priv->device, "Tx dma map failed\n");
3133 dev_kfree_skb(skb);
3134 priv->dev->stats.tx_dropped++;
3135 return NETDEV_TX_OK;
3136}
3137
47dd7a54 3138/**
732fdf0e 3139 * stmmac_xmit - Tx entry point of the driver
47dd7a54
GC
3140 * @skb : the socket buffer
3141 * @dev : device pointer
32ceabca
GC
3142 * Description : this is the tx entry point of the driver.
3143 * It programs the chain or the ring and supports oversized frames
3144 * and SG feature.
47dd7a54
GC
3145 */
3146static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3147{
c2837423 3148 unsigned int first_entry, tx_packets, enh_desc;
47dd7a54 3149 struct stmmac_priv *priv = netdev_priv(dev);
0e80bdc9 3150 unsigned int nopaged_len = skb_headlen(skb);
4a7d666a 3151 int i, csum_insertion = 0, is_jumbo = 0;
ce736788 3152 u32 queue = skb_get_queue_mapping(skb);
47dd7a54 3153 int nfrags = skb_shinfo(skb)->nr_frags;
b7766206 3154 int gso = skb_shinfo(skb)->gso_type;
47dd7a54 3155 struct dma_desc *desc, *first;
ce736788 3156 struct stmmac_tx_queue *tx_q;
c2837423
JA
3157 bool has_vlan, set_ic;
3158 int entry, first_tx;
a993db88 3159 dma_addr_t des;
f748be53 3160
ce736788 3161 tx_q = &priv->tx_queue[queue];
c2837423 3162 first_tx = tx_q->cur_tx;
ce736788 3163
e2cd682d
JA
3164 if (priv->tx_path_in_lpi_mode)
3165 stmmac_disable_eee_mode(priv);
3166
f748be53
AT
3167 /* Manage oversized TCP frames for GMAC4 device */
3168 if (skb_is_gso(skb) && priv->tso) {
b7766206
JA
3169 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3170 return stmmac_tso_xmit(skb, dev);
3171 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
f748be53
AT
3172 return stmmac_tso_xmit(skb, dev);
3173 }
47dd7a54 3174
ce736788 3175 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
c22a3f48
JP
3176 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3177 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3178 queue));
47dd7a54 3179 /* This is a hard error, log it. */
38ddc59d
LC
3180 netdev_err(priv->dev,
3181 "%s: Tx Ring full when queue awake\n",
3182 __func__);
47dd7a54
GC
3183 }
3184 return NETDEV_TX_BUSY;
3185 }
3186
30d93227
JA
3187 /* Check if VLAN can be inserted by HW */
3188 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3189
ce736788 3190 entry = tx_q->cur_tx;
0e80bdc9 3191 first_entry = entry;
b4c9784c 3192 WARN_ON(tx_q->tx_skbuff[first_entry]);
47dd7a54 3193
5e982f3b 3194 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
47dd7a54 3195
0e80bdc9 3196 if (likely(priv->extend_desc))
ce736788 3197 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
c24602ef 3198 else
ce736788 3199 desc = tx_q->dma_tx + entry;
c24602ef 3200
47dd7a54
GC
3201 first = desc;
3202
30d93227
JA
3203 if (has_vlan)
3204 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3205
0e80bdc9 3206 enh_desc = priv->plat->enh_desc;
4a7d666a 3207 /* To program the descriptors according to the size of the frame */
29896a67 3208 if (enh_desc)
2c520b1c 3209 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
29896a67 3210
63a550fc 3211 if (unlikely(is_jumbo)) {
2c520b1c 3212 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
63a550fc 3213 if (unlikely(entry < 0) && (entry != -EINVAL))
362b37be 3214 goto dma_map_err;
29896a67 3215 }
47dd7a54
GC
3216
3217 for (i = 0; i < nfrags; i++) {
9e903e08
ED
3218 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3219 int len = skb_frag_size(frag);
be434d50 3220 bool last_segment = (i == (nfrags - 1));
47dd7a54 3221
e3ad57c9 3222 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
b4c9784c 3223 WARN_ON(tx_q->tx_skbuff[entry]);
e3ad57c9 3224
0e80bdc9 3225 if (likely(priv->extend_desc))
ce736788 3226 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
c24602ef 3227 else
ce736788 3228 desc = tx_q->dma_tx + entry;
47dd7a54 3229
f748be53
AT
3230 des = skb_frag_dma_map(priv->device, frag, 0, len,
3231 DMA_TO_DEVICE);
3232 if (dma_mapping_error(priv->device, des))
362b37be
GC
3233 goto dma_map_err; /* should reuse desc w/o issues */
3234
ce736788 3235 tx_q->tx_skbuff_dma[entry].buf = des;
6844171d
JA
3236
3237 stmmac_set_desc_addr(priv, desc, des);
f748be53 3238
ce736788
JP
3239 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3240 tx_q->tx_skbuff_dma[entry].len = len;
3241 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
0e80bdc9
GC
3242
3243 /* Prepare the descriptor and set the own bit too */
42de047d
JA
3244 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3245 priv->mode, 1, last_segment, skb->len);
47dd7a54
GC
3246 }
3247
05cf0d1b
NC
3248 /* Only the last descriptor gets to point to the skb. */
3249 tx_q->tx_skbuff[entry] = skb;
e3ad57c9 3250
7df4a3a7
JA
3251 /* According to the coalesce parameter the IC bit for the latest
3252 * segment is reset and the timer re-started to clean the tx status.
3253 * This approach takes care about the fragments: desc is the first
3254 * element in case of no SG.
3255 */
c2837423
JA
3256 tx_packets = (entry + 1) - first_tx;
3257 tx_q->tx_count_frames += tx_packets;
3258
3259 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3260 set_ic = true;
3261 else if (!priv->tx_coal_frames)
3262 set_ic = false;
3263 else if (tx_packets > priv->tx_coal_frames)
3264 set_ic = true;
3265 else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3266 set_ic = true;
3267 else
3268 set_ic = false;
3269
3270 if (set_ic) {
7df4a3a7
JA
3271 if (likely(priv->extend_desc))
3272 desc = &tx_q->dma_etx[entry].basic;
3273 else
3274 desc = &tx_q->dma_tx[entry];
3275
3276 tx_q->tx_count_frames = 0;
3277 stmmac_set_tx_ic(priv, desc);
3278 priv->xstats.tx_set_ic_bit++;
c2837423
JA
3279 } else {
3280 stmmac_tx_timer_arm(priv, queue);
7df4a3a7
JA
3281 }
3282
05cf0d1b
NC
3283 /* We've used all descriptors we need for this skb, however,
3284 * advance cur_tx so that it references a fresh descriptor.
3285 * ndo_start_xmit will fill this descriptor the next time it's
3286 * called and stmmac_tx_clean may clean up to this descriptor.
3287 */
3288 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
ce736788 3289 tx_q->cur_tx = entry;
47dd7a54 3290
47dd7a54 3291 if (netif_msg_pktdata(priv)) {
d0225e7d
AT
3292 void *tx_head;
3293
38ddc59d
LC
3294 netdev_dbg(priv->dev,
3295 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
ce736788 3296 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
38ddc59d 3297 entry, first, nfrags);
83d7af64 3298
c24602ef 3299 if (priv->extend_desc)
ce736788 3300 tx_head = (void *)tx_q->dma_etx;
c24602ef 3301 else
ce736788 3302 tx_head = (void *)tx_q->dma_tx;
d0225e7d 3303
42de047d 3304 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
c24602ef 3305
38ddc59d 3306 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
47dd7a54
GC
3307 print_pkt(skb->data, skb->len);
3308 }
0e80bdc9 3309
ce736788 3310 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
3311 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3312 __func__);
c22a3f48 3313 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
47dd7a54
GC
3314 }
3315
3316 dev->stats.tx_bytes += skb->len;
3317
8000ddc0
JA
3318 if (priv->sarc_type)
3319 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3320
74abc9b1 3321 skb_tx_timestamp(skb);
3e82ce12 3322
0e80bdc9
GC
3323 /* Ready to fill the first descriptor and set the OWN bit w/o any
3324 * problems because all the descriptors are actually ready to be
3325 * passed to the DMA engine.
3326 */
3327 if (likely(!is_jumbo)) {
3328 bool last_segment = (nfrags == 0);
3329
f748be53
AT
3330 des = dma_map_single(priv->device, skb->data,
3331 nopaged_len, DMA_TO_DEVICE);
3332 if (dma_mapping_error(priv->device, des))
0e80bdc9
GC
3333 goto dma_map_err;
3334
ce736788 3335 tx_q->tx_skbuff_dma[first_entry].buf = des;
6844171d
JA
3336
3337 stmmac_set_desc_addr(priv, first, des);
f748be53 3338
ce736788
JP
3339 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3340 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
0e80bdc9
GC
3341
3342 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3343 priv->hwts_tx_en)) {
3344 /* declare that device is doing timestamping */
3345 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
42de047d 3346 stmmac_enable_tx_timestamp(priv, first);
0e80bdc9
GC
3347 }
3348
3349 /* Prepare the first descriptor setting the OWN bit too */
42de047d
JA
3350 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3351 csum_insertion, priv->mode, 1, last_segment,
3352 skb->len);
80acbed9
AK
3353 } else {
3354 stmmac_set_tx_owner(priv, first);
0e80bdc9
GC
3355 }
3356
80acbed9
AK
3357 /* The own bit must be the latest setting done when prepare the
3358 * descriptor and then barrier is needed to make sure that
3359 * all is coherent before granting the DMA engine.
3360 */
3361 wmb();
3362
c22a3f48 3363 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
f748be53 3364
f1565c60 3365 stmmac_enable_dma_transmission(priv, priv->ioaddr);
8fce3331 3366
0431100b 3367 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
f1565c60 3368 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
52f64fae 3369
362b37be 3370 return NETDEV_TX_OK;
a9097a96 3371
362b37be 3372dma_map_err:
38ddc59d 3373 netdev_err(priv->dev, "Tx DMA map failed\n");
362b37be
GC
3374 dev_kfree_skb(skb);
3375 priv->dev->stats.tx_dropped++;
47dd7a54
GC
3376 return NETDEV_TX_OK;
3377}
3378
b9381985
VB
3379static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3380{
ab188e8f
EN
3381 struct vlan_ethhdr *veth;
3382 __be16 vlan_proto;
b9381985
VB
3383 u16 vlanid;
3384
ab188e8f
EN
3385 veth = (struct vlan_ethhdr *)skb->data;
3386 vlan_proto = veth->h_vlan_proto;
3387
3388 if ((vlan_proto == htons(ETH_P_8021Q) &&
3389 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3390 (vlan_proto == htons(ETH_P_8021AD) &&
3391 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
b9381985 3392 /* pop the vlan tag */
ab188e8f
EN
3393 vlanid = ntohs(veth->h_vlan_TCI);
3394 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
b9381985 3395 skb_pull(skb, VLAN_HLEN);
ab188e8f 3396 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
b9381985
VB
3397 }
3398}
3399
3400
54139cf3 3401static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
120e87f9 3402{
54139cf3 3403 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
120e87f9
GC
3404 return 0;
3405
3406 return 1;
3407}
3408
32ceabca 3409/**
732fdf0e 3410 * stmmac_rx_refill - refill used skb preallocated buffers
32ceabca 3411 * @priv: driver private structure
54139cf3 3412 * @queue: RX queue index
32ceabca
GC
3413 * Description : this is to reallocate the skb for the reception process
3414 * that is based on zero-copy.
3415 */
54139cf3 3416static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
47dd7a54 3417{
54139cf3 3418 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3caa61c2 3419 int len, dirty = stmmac_rx_dirty(priv, queue);
54139cf3
JP
3420 unsigned int entry = rx_q->dirty_rx;
3421
3caa61c2
JA
3422 len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3423
e3ad57c9 3424 while (dirty-- > 0) {
2af6106a 3425 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
c24602ef 3426 struct dma_desc *p;
d429b66e 3427 bool use_rx_wd;
c24602ef
GC
3428
3429 if (priv->extend_desc)
54139cf3 3430 p = (struct dma_desc *)(rx_q->dma_erx + entry);
c24602ef 3431 else
54139cf3 3432 p = rx_q->dma_rx + entry;
c24602ef 3433
2af6106a
JA
3434 if (!buf->page) {
3435 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
3436 if (!buf->page)
362b37be 3437 break;
47dd7a54 3438 }
2af6106a 3439
67afd6d1
JA
3440 if (priv->sph && !buf->sec_page) {
3441 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
3442 if (!buf->sec_page)
3443 break;
3444
3445 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3446
3447 dma_sync_single_for_device(priv->device, buf->sec_addr,
3448 len, DMA_FROM_DEVICE);
3449 }
3450
2af6106a 3451 buf->addr = page_pool_get_dma_addr(buf->page);
3caa61c2
JA
3452
3453 /* Sync whole allocation to device. This will invalidate old
3454 * data.
3455 */
3456 dma_sync_single_for_device(priv->device, buf->addr, len,
3457 DMA_FROM_DEVICE);
3458
2af6106a 3459 stmmac_set_desc_addr(priv, p, buf->addr);
67afd6d1 3460 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
2af6106a 3461 stmmac_refill_desc3(priv, rx_q, p);
f748be53 3462
d429b66e 3463 rx_q->rx_count_frames++;
6fa9d691
JA
3464 rx_q->rx_count_frames += priv->rx_coal_frames;
3465 if (rx_q->rx_count_frames > priv->rx_coal_frames)
3466 rx_q->rx_count_frames = 0;
09146abe
JA
3467
3468 use_rx_wd = !priv->rx_coal_frames;
3469 use_rx_wd |= rx_q->rx_count_frames > 0;
3470 if (!priv->use_riwt)
3471 use_rx_wd = false;
d429b66e 3472
ad688cdb 3473 dma_wmb();
2af6106a 3474 stmmac_set_rx_owner(priv, p, use_rx_wd);
e3ad57c9
GC
3475
3476 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
47dd7a54 3477 }
54139cf3 3478 rx_q->dirty_rx = entry;
858a31ff
JA
3479 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3480 (rx_q->dirty_rx * sizeof(struct dma_desc));
4523a561 3481 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
47dd7a54
GC
3482}
3483
88ebe2cf
JA
3484static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3485 struct dma_desc *p,
3486 int status, unsigned int len)
3487{
3488 int ret, coe = priv->hw->rx_csum;
3489 unsigned int plen = 0, hlen = 0;
3490
3491 /* Not first descriptor, buffer is always zero */
3492 if (priv->sph && len)
3493 return 0;
3494
3495 /* First descriptor, get split header length */
3496 ret = stmmac_get_rx_header_len(priv, p, &hlen);
3497 if (priv->sph && hlen) {
3498 priv->xstats.rx_split_hdr_pkt_n++;
3499 return hlen;
3500 }
3501
3502 /* First descriptor, not last descriptor and not split header */
3503 if (status & rx_not_ls)
3504 return priv->dma_buf_sz;
3505
3506 plen = stmmac_get_rx_frame_len(priv, p, coe);
3507
3508 /* First descriptor and last descriptor and not split header */
3509 return min_t(unsigned int, priv->dma_buf_sz, plen);
3510}
3511
3512static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3513 struct dma_desc *p,
3514 int status, unsigned int len)
3515{
3516 int coe = priv->hw->rx_csum;
3517 unsigned int plen = 0;
3518
3519 /* Not split header, buffer is not available */
3520 if (!priv->sph)
3521 return 0;
3522
3523 /* Not last descriptor */
3524 if (status & rx_not_ls)
3525 return priv->dma_buf_sz;
3526
3527 plen = stmmac_get_rx_frame_len(priv, p, coe);
3528
3529 /* Last descriptor */
3530 return plen - len;
3531}
3532
32ceabca 3533/**
732fdf0e 3534 * stmmac_rx - manage the receive process
32ceabca 3535 * @priv: driver private structure
54139cf3
JP
3536 * @limit: napi bugget
3537 * @queue: RX queue index.
32ceabca
GC
3538 * Description : this the function called by the napi poll method.
3539 * It gets all the frames inside the ring.
3540 */
54139cf3 3541static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
47dd7a54 3542{
54139cf3 3543 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
8fce3331 3544 struct stmmac_channel *ch = &priv->channel[queue];
ec222003
JA
3545 unsigned int count = 0, error = 0, len = 0;
3546 int status = 0, coe = priv->hw->rx_csum;
07b39753 3547 unsigned int next_entry = rx_q->cur_rx;
ec222003 3548 struct sk_buff *skb = NULL;
47dd7a54 3549
83d7af64 3550 if (netif_msg_rx_status(priv)) {
d0225e7d
AT
3551 void *rx_head;
3552
38ddc59d 3553 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
c24602ef 3554 if (priv->extend_desc)
54139cf3 3555 rx_head = (void *)rx_q->dma_erx;
c24602ef 3556 else
54139cf3 3557 rx_head = (void *)rx_q->dma_rx;
d0225e7d 3558
42de047d 3559 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
47dd7a54 3560 }
c24602ef 3561 while (count < limit) {
88ebe2cf 3562 unsigned int buf1_len = 0, buf2_len = 0;
ec222003 3563 enum pkt_hash_types hash_type;
2af6106a
JA
3564 struct stmmac_rx_buffer *buf;
3565 struct dma_desc *np, *p;
ec222003
JA
3566 int entry;
3567 u32 hash;
47dd7a54 3568
ec222003
JA
3569 if (!count && rx_q->state_saved) {
3570 skb = rx_q->state.skb;
3571 error = rx_q->state.error;
3572 len = rx_q->state.len;
3573 } else {
3574 rx_q->state_saved = false;
3575 skb = NULL;
3576 error = 0;
3577 len = 0;
3578 }
3579
3580 if (count >= limit)
3581 break;
3582
3583read_again:
88ebe2cf
JA
3584 buf1_len = 0;
3585 buf2_len = 0;
07b39753 3586 entry = next_entry;
2af6106a 3587 buf = &rx_q->buf_pool[entry];
07b39753 3588
c24602ef 3589 if (priv->extend_desc)
54139cf3 3590 p = (struct dma_desc *)(rx_q->dma_erx + entry);
c24602ef 3591 else
54139cf3 3592 p = rx_q->dma_rx + entry;
c24602ef 3593
c1fa3212 3594 /* read the status of the incoming frame */
42de047d
JA
3595 status = stmmac_rx_status(priv, &priv->dev->stats,
3596 &priv->xstats, p);
c1fa3212
FG
3597 /* check if managed by the DMA otherwise go ahead */
3598 if (unlikely(status & dma_own))
47dd7a54
GC
3599 break;
3600
54139cf3
JP
3601 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3602 next_entry = rx_q->cur_rx;
e3ad57c9 3603
c24602ef 3604 if (priv->extend_desc)
54139cf3 3605 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
c24602ef 3606 else
54139cf3 3607 np = rx_q->dma_rx + next_entry;
ba1ffd74
GC
3608
3609 prefetch(np);
47dd7a54 3610
42de047d
JA
3611 if (priv->extend_desc)
3612 stmmac_rx_extended_status(priv, &priv->dev->stats,
3613 &priv->xstats, rx_q->dma_erx + entry);
891434b1 3614 if (unlikely(status == discard_frame)) {
2af6106a 3615 page_pool_recycle_direct(rx_q->page_pool, buf->page);
2af6106a 3616 buf->page = NULL;
ec222003 3617 error = 1;
0b273ca4
JA
3618 if (!priv->hwts_rx_en)
3619 priv->dev->stats.rx_errors++;
ec222003
JA
3620 }
3621
3622 if (unlikely(error && (status & rx_not_ls)))
3623 goto read_again;
3624 if (unlikely(error)) {
399e06a5 3625 dev_kfree_skb(skb);
88ebe2cf 3626 skb = NULL;
cda4985a 3627 count++;
ec222003
JA
3628 continue;
3629 }
3630
3631 /* Buffer is good. Go on. */
3632
88ebe2cf
JA
3633 prefetch(page_address(buf->page));
3634 if (buf->sec_page)
3635 prefetch(page_address(buf->sec_page));
3636
3637 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3638 len += buf1_len;
3639 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3640 len += buf2_len;
3641
3642 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3643 * Type frames (LLC/LLC-SNAP)
3644 *
3645 * llc_snap is never checked in GMAC >= 4, so this ACS
3646 * feature is always disabled and packets need to be
3647 * stripped manually.
3648 */
93b5dce4
JA
3649 if (likely(!(status & rx_not_ls)) &&
3650 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3651 unlikely(status != llc_snap))) {
88ebe2cf
JA
3652 if (buf2_len)
3653 buf2_len -= ETH_FCS_LEN;
3654 else
3655 buf1_len -= ETH_FCS_LEN;
3656
3657 len -= ETH_FCS_LEN;
ec222003 3658 }
22ad3838 3659
ec222003 3660 if (!skb) {
88ebe2cf 3661 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
ec222003 3662 if (!skb) {
2af6106a 3663 priv->dev->stats.rx_dropped++;
cda4985a 3664 count++;
88ebe2cf 3665 goto drain_data;
47dd7a54 3666 }
47dd7a54 3667
88ebe2cf
JA
3668 dma_sync_single_for_cpu(priv->device, buf->addr,
3669 buf1_len, DMA_FROM_DEVICE);
2af6106a 3670 skb_copy_to_linear_data(skb, page_address(buf->page),
88ebe2cf
JA
3671 buf1_len);
3672 skb_put(skb, buf1_len);
2af6106a 3673
ec222003
JA
3674 /* Data payload copied into SKB, page ready for recycle */
3675 page_pool_recycle_direct(rx_q->page_pool, buf->page);
3676 buf->page = NULL;
88ebe2cf 3677 } else if (buf1_len) {
ec222003 3678 dma_sync_single_for_cpu(priv->device, buf->addr,
88ebe2cf 3679 buf1_len, DMA_FROM_DEVICE);
ec222003 3680 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
88ebe2cf 3681 buf->page, 0, buf1_len,
ec222003 3682 priv->dma_buf_sz);
b9381985 3683
ec222003
JA
3684 /* Data payload appended into SKB */
3685 page_pool_release_page(rx_q->page_pool, buf->page);
3686 buf->page = NULL;
3687 }
47dd7a54 3688
88ebe2cf 3689 if (buf2_len) {
67afd6d1 3690 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
88ebe2cf 3691 buf2_len, DMA_FROM_DEVICE);
67afd6d1 3692 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
88ebe2cf 3693 buf->sec_page, 0, buf2_len,
67afd6d1
JA
3694 priv->dma_buf_sz);
3695
67afd6d1
JA
3696 /* Data payload appended into SKB */
3697 page_pool_release_page(rx_q->page_pool, buf->sec_page);
3698 buf->sec_page = NULL;
3699 }
3700
88ebe2cf 3701drain_data:
ec222003
JA
3702 if (likely(status & rx_not_ls))
3703 goto read_again;
88ebe2cf
JA
3704 if (!skb)
3705 continue;
62a2ab93 3706
ec222003 3707 /* Got entire packet into SKB. Finish it. */
76067459 3708
ec222003
JA
3709 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3710 stmmac_rx_vlan(priv->dev, skb);
3711 skb->protocol = eth_type_trans(skb, priv->dev);
47dd7a54 3712
ec222003
JA
3713 if (unlikely(!coe))
3714 skb_checksum_none_assert(skb);
3715 else
3716 skb->ip_summed = CHECKSUM_UNNECESSARY;
2af6106a 3717
ec222003
JA
3718 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
3719 skb_set_hash(skb, hash, hash_type);
3720
3721 skb_record_rx_queue(skb, queue);
3722 napi_gro_receive(&ch->rx_napi, skb);
88ebe2cf 3723 skb = NULL;
ec222003
JA
3724
3725 priv->dev->stats.rx_packets++;
3726 priv->dev->stats.rx_bytes += len;
cda4985a 3727 count++;
ec222003
JA
3728 }
3729
88ebe2cf 3730 if (status & rx_not_ls || skb) {
ec222003
JA
3731 rx_q->state_saved = true;
3732 rx_q->state.skb = skb;
3733 rx_q->state.error = error;
3734 rx_q->state.len = len;
47dd7a54
GC
3735 }
3736
54139cf3 3737 stmmac_rx_refill(priv, queue);
47dd7a54
GC
3738
3739 priv->xstats.rx_pkt_n += count;
3740
3741 return count;
3742}
3743
4ccb4585 3744static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
47dd7a54 3745{
8fce3331 3746 struct stmmac_channel *ch =
4ccb4585 3747 container_of(napi, struct stmmac_channel, rx_napi);
8fce3331 3748 struct stmmac_priv *priv = ch->priv_data;
8fce3331 3749 u32 chan = ch->index;
4ccb4585 3750 int work_done;
47dd7a54 3751
9125cdd1 3752 priv->xstats.napi_poll++;
ce736788 3753
4ccb4585
JA
3754 work_done = stmmac_rx(priv, budget, chan);
3755 if (work_done < budget && napi_complete_done(napi, work_done))
3756 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3757 return work_done;
3758}
ce736788 3759
4ccb4585
JA
3760static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3761{
3762 struct stmmac_channel *ch =
3763 container_of(napi, struct stmmac_channel, tx_napi);
3764 struct stmmac_priv *priv = ch->priv_data;
3765 struct stmmac_tx_queue *tx_q;
3766 u32 chan = ch->index;
3767 int work_done;
8fce3331 3768
4ccb4585
JA
3769 priv->xstats.napi_poll++;
3770
3771 work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3772 work_done = min(work_done, budget);
8fce3331 3773
a66b5884
JA
3774 if (work_done < budget)
3775 napi_complete_done(napi, work_done);
4ccb4585
JA
3776
3777 /* Force transmission restart */
3778 tx_q = &priv->tx_queue[chan];
3779 if (tx_q->cur_tx != tx_q->dirty_tx) {
3780 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3781 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3782 chan);
fa0be0a4 3783 }
8fce3331 3784
47dd7a54
GC
3785 return work_done;
3786}
3787
3788/**
3789 * stmmac_tx_timeout
3790 * @dev : Pointer to net device structure
3791 * Description: this function is called when a packet transmission fails to
7284a3f1 3792 * complete within a reasonable time. The driver will mark the error in the
47dd7a54
GC
3793 * netdev structure and arrange for the device to be reset to a sane state
3794 * in order to transmit a new packet.
3795 */
3796static void stmmac_tx_timeout(struct net_device *dev)
3797{
3798 struct stmmac_priv *priv = netdev_priv(dev);
3799
34877a15 3800 stmmac_global_err(priv);
47dd7a54
GC
3801}
3802
47dd7a54 3803/**
01789349 3804 * stmmac_set_rx_mode - entry point for multicast addressing
47dd7a54
GC
3805 * @dev : pointer to the device structure
3806 * Description:
3807 * This function is a driver entry point which gets called by the kernel
3808 * whenever multicast addresses must be enabled/disabled.
3809 * Return value:
3810 * void.
3811 */
01789349 3812static void stmmac_set_rx_mode(struct net_device *dev)
47dd7a54
GC
3813{
3814 struct stmmac_priv *priv = netdev_priv(dev);
3815
c10d4c82 3816 stmmac_set_filter(priv, priv->hw, dev);
47dd7a54
GC
3817}
3818
3819/**
3820 * stmmac_change_mtu - entry point to change MTU size for the device.
3821 * @dev : device pointer.
3822 * @new_mtu : the new MTU size for the device.
3823 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3824 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3825 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3826 * Return value:
3827 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3828 * file on failure.
3829 */
3830static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3831{
38ddc59d 3832 struct stmmac_priv *priv = netdev_priv(dev);
eaf4fac4
JA
3833 int txfifosz = priv->plat->tx_fifo_size;
3834
3835 if (txfifosz == 0)
3836 txfifosz = priv->dma_cap.tx_fifo_size;
3837
3838 txfifosz /= priv->plat->tx_queues_to_use;
38ddc59d 3839
47dd7a54 3840 if (netif_running(dev)) {
38ddc59d 3841 netdev_err(priv->dev, "must be stopped to change its MTU\n");
47dd7a54
GC
3842 return -EBUSY;
3843 }
3844
eaf4fac4
JA
3845 new_mtu = STMMAC_ALIGN(new_mtu);
3846
3847 /* If condition true, FIFO is too small or MTU too large */
3848 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3849 return -EINVAL;
3850
5e982f3b 3851 dev->mtu = new_mtu;
f748be53 3852
5e982f3b
MM
3853 netdev_update_features(dev);
3854
3855 return 0;
3856}
3857
c8f44aff 3858static netdev_features_t stmmac_fix_features(struct net_device *dev,
ceb69499 3859 netdev_features_t features)
5e982f3b
MM
3860{
3861 struct stmmac_priv *priv = netdev_priv(dev);
3862
38912bdb 3863 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5e982f3b 3864 features &= ~NETIF_F_RXCSUM;
d2afb5bd 3865
5e982f3b 3866 if (!priv->plat->tx_coe)
a188222b 3867 features &= ~NETIF_F_CSUM_MASK;
5e982f3b 3868
ebbb293f
GC
3869 /* Some GMAC devices have a bugged Jumbo frame support that
3870 * needs to have the Tx COE disabled for oversized frames
3871 * (due to limited buffer sizes). In this case we disable
8d45e42b 3872 * the TX csum insertion in the TDES and not use SF.
ceb69499 3873 */
5e982f3b 3874 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
a188222b 3875 features &= ~NETIF_F_CSUM_MASK;
ebbb293f 3876
f748be53
AT
3877 /* Disable tso if asked by ethtool */
3878 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3879 if (features & NETIF_F_TSO)
3880 priv->tso = true;
3881 else
3882 priv->tso = false;
3883 }
3884
5e982f3b 3885 return features;
47dd7a54
GC
3886}
3887
d2afb5bd
GC
3888static int stmmac_set_features(struct net_device *netdev,
3889 netdev_features_t features)
3890{
3891 struct stmmac_priv *priv = netdev_priv(netdev);
67afd6d1
JA
3892 bool sph_en;
3893 u32 chan;
d2afb5bd
GC
3894
3895 /* Keep the COE Type in case of csum is supporting */
3896 if (features & NETIF_F_RXCSUM)
3897 priv->hw->rx_csum = priv->plat->rx_coe;
3898 else
3899 priv->hw->rx_csum = 0;
3900 /* No check needed because rx_coe has been set before and it will be
3901 * fixed in case of issue.
3902 */
c10d4c82 3903 stmmac_rx_ipc(priv, priv->hw);
d2afb5bd 3904
67afd6d1
JA
3905 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3906 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
3907 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3908
d2afb5bd
GC
3909 return 0;
3910}
3911
32ceabca
GC
3912/**
3913 * stmmac_interrupt - main ISR
3914 * @irq: interrupt number.
3915 * @dev_id: to pass the net device pointer.
3916 * Description: this is the main driver interrupt service routine.
732fdf0e
GC
3917 * It can call:
3918 * o DMA service routine (to manage incoming frame reception and transmission
3919 * status)
3920 * o Core interrupts to manage: remote wake-up, management counter, LPI
3921 * interrupts.
32ceabca 3922 */
47dd7a54
GC
3923static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3924{
3925 struct net_device *dev = (struct net_device *)dev_id;
3926 struct stmmac_priv *priv = netdev_priv(dev);
7bac4e1e
JP
3927 u32 rx_cnt = priv->plat->rx_queues_to_use;
3928 u32 tx_cnt = priv->plat->tx_queues_to_use;
3929 u32 queues_count;
3930 u32 queue;
7d9e6c5a 3931 bool xmac;
7bac4e1e 3932
7d9e6c5a 3933 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7bac4e1e 3934 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
47dd7a54 3935
89f7f2cf
SK
3936 if (priv->irq_wake)
3937 pm_wakeup_event(priv->device, 0);
3938
47dd7a54 3939 if (unlikely(!dev)) {
38ddc59d 3940 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
47dd7a54
GC
3941 return IRQ_NONE;
3942 }
3943
34877a15
JA
3944 /* Check if adapter is up */
3945 if (test_bit(STMMAC_DOWN, &priv->state))
3946 return IRQ_HANDLED;
8bf993a5
JA
3947 /* Check if a fatal error happened */
3948 if (stmmac_safety_feat_interrupt(priv))
3949 return IRQ_HANDLED;
34877a15 3950
d765955d 3951 /* To handle GMAC own interrupts */
7d9e6c5a 3952 if ((priv->plat->has_gmac) || xmac) {
c10d4c82 3953 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
61fac60a 3954 int mtl_status;
8f71a88d 3955
d765955d 3956 if (unlikely(status)) {
d765955d 3957 /* For LPI we need to save the tx status */
0982a0f6 3958 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
d765955d 3959 priv->tx_path_in_lpi_mode = true;
0982a0f6 3960 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
d765955d 3961 priv->tx_path_in_lpi_mode = false;
7bac4e1e
JP
3962 }
3963
61fac60a
JA
3964 for (queue = 0; queue < queues_count; queue++) {
3965 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
54139cf3 3966
61fac60a
JA
3967 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3968 queue);
3969 if (mtl_status != -EINVAL)
3970 status |= mtl_status;
7bac4e1e 3971
61fac60a
JA
3972 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3973 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3974 rx_q->rx_tail_addr,
3975 queue);
d765955d 3976 }
70523e63
GC
3977
3978 /* PCS link status */
3fe5cadb 3979 if (priv->hw->pcs) {
70523e63
GC
3980 if (priv->xstats.pcs_link)
3981 netif_carrier_on(dev);
3982 else
3983 netif_carrier_off(dev);
3984 }
d765955d 3985 }
aec7ff27 3986
d765955d 3987 /* To handle DMA interrupts */
aec7ff27 3988 stmmac_dma_interrupt(priv);
47dd7a54
GC
3989
3990 return IRQ_HANDLED;
3991}
3992
3993#ifdef CONFIG_NET_POLL_CONTROLLER
3994/* Polling receive - used by NETCONSOLE and other diagnostic tools
ceb69499
GC
3995 * to allow network I/O with interrupts disabled.
3996 */
47dd7a54
GC
3997static void stmmac_poll_controller(struct net_device *dev)
3998{
3999 disable_irq(dev->irq);
4000 stmmac_interrupt(dev->irq, dev);
4001 enable_irq(dev->irq);
4002}
4003#endif
4004
4005/**
4006 * stmmac_ioctl - Entry point for the Ioctl
4007 * @dev: Device pointer.
4008 * @rq: An IOCTL specefic structure, that can contain a pointer to
4009 * a proprietary structure used to pass information to the driver.
4010 * @cmd: IOCTL command
4011 * Description:
32ceabca 4012 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
47dd7a54
GC
4013 */
4014static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4015{
74371272 4016 struct stmmac_priv *priv = netdev_priv (dev);
891434b1 4017 int ret = -EOPNOTSUPP;
47dd7a54
GC
4018
4019 if (!netif_running(dev))
4020 return -EINVAL;
4021
891434b1
RK
4022 switch (cmd) {
4023 case SIOCGMIIPHY:
4024 case SIOCGMIIREG:
4025 case SIOCSMIIREG:
74371272 4026 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
891434b1
RK
4027 break;
4028 case SIOCSHWTSTAMP:
d6228b7c
AP
4029 ret = stmmac_hwtstamp_set(dev, rq);
4030 break;
4031 case SIOCGHWTSTAMP:
4032 ret = stmmac_hwtstamp_get(dev, rq);
891434b1
RK
4033 break;
4034 default:
4035 break;
4036 }
28b04113 4037
47dd7a54
GC
4038 return ret;
4039}
4040
4dbbe8dd
JA
4041static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4042 void *cb_priv)
4043{
4044 struct stmmac_priv *priv = cb_priv;
4045 int ret = -EOPNOTSUPP;
4046
425eabdd
JA
4047 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4048 return ret;
4049
4dbbe8dd
JA
4050 stmmac_disable_all_queues(priv);
4051
4052 switch (type) {
4053 case TC_SETUP_CLSU32:
425eabdd
JA
4054 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4055 break;
4056 case TC_SETUP_CLSFLOWER:
4057 ret = stmmac_tc_setup_cls(priv, priv, type_data);
4dbbe8dd
JA
4058 break;
4059 default:
4060 break;
4061 }
4062
4063 stmmac_enable_all_queues(priv);
4064 return ret;
4065}
4066
955bcb6e
PNA
4067static LIST_HEAD(stmmac_block_cb_list);
4068
4dbbe8dd
JA
4069static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4070 void *type_data)
4071{
4072 struct stmmac_priv *priv = netdev_priv(ndev);
4073
4074 switch (type) {
4075 case TC_SETUP_BLOCK:
955bcb6e
PNA
4076 return flow_block_cb_setup_simple(type_data,
4077 &stmmac_block_cb_list,
4e95bc26
PNA
4078 stmmac_setup_tc_block_cb,
4079 priv, priv, true);
1f705bc6
JA
4080 case TC_SETUP_QDISC_CBS:
4081 return stmmac_tc_setup_cbs(priv, priv, type_data);
4dbbe8dd
JA
4082 default:
4083 return -EOPNOTSUPP;
4084 }
4085}
4086
4993e5b3
JA
4087static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4088 struct net_device *sb_dev)
4089{
b7766206
JA
4090 int gso = skb_shinfo(skb)->gso_type;
4091
4092 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4993e5b3 4093 /*
b7766206 4094 * There is no way to determine the number of TSO/USO
4993e5b3 4095 * capable Queues. Let's use always the Queue 0
b7766206 4096 * because if TSO/USO is supported then at least this
4993e5b3
JA
4097 * one will be capable.
4098 */
4099 return 0;
4100 }
4101
4102 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4103}
4104
a830405e
BV
4105static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4106{
4107 struct stmmac_priv *priv = netdev_priv(ndev);
4108 int ret = 0;
4109
4110 ret = eth_mac_addr(ndev, addr);
4111 if (ret)
4112 return ret;
4113
c10d4c82 4114 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
a830405e
BV
4115
4116 return ret;
4117}
4118
50fb4f74 4119#ifdef CONFIG_DEBUG_FS
7ac29055 4120static struct dentry *stmmac_fs_dir;
7ac29055 4121
c24602ef 4122static void sysfs_display_ring(void *head, int size, int extend_desc,
ceb69499 4123 struct seq_file *seq)
7ac29055 4124{
7ac29055 4125 int i;
ceb69499
GC
4126 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4127 struct dma_desc *p = (struct dma_desc *)head;
7ac29055 4128
c24602ef 4129 for (i = 0; i < size; i++) {
c24602ef 4130 if (extend_desc) {
c24602ef 4131 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499 4132 i, (unsigned int)virt_to_phys(ep),
f8be0d78
MW
4133 le32_to_cpu(ep->basic.des0),
4134 le32_to_cpu(ep->basic.des1),
4135 le32_to_cpu(ep->basic.des2),
4136 le32_to_cpu(ep->basic.des3));
c24602ef
GC
4137 ep++;
4138 } else {
c24602ef 4139 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
66c25f6e 4140 i, (unsigned int)virt_to_phys(p),
f8be0d78
MW
4141 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4142 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
c24602ef
GC
4143 p++;
4144 }
7ac29055
GC
4145 seq_printf(seq, "\n");
4146 }
c24602ef 4147}
7ac29055 4148
fb0d9c63 4149static int stmmac_rings_status_show(struct seq_file *seq, void *v)
c24602ef
GC
4150{
4151 struct net_device *dev = seq->private;
4152 struct stmmac_priv *priv = netdev_priv(dev);
54139cf3 4153 u32 rx_count = priv->plat->rx_queues_to_use;
ce736788 4154 u32 tx_count = priv->plat->tx_queues_to_use;
54139cf3
JP
4155 u32 queue;
4156
5f2b8b62
TR
4157 if ((dev->flags & IFF_UP) == 0)
4158 return 0;
4159
54139cf3
JP
4160 for (queue = 0; queue < rx_count; queue++) {
4161 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4162
4163 seq_printf(seq, "RX Queue %d:\n", queue);
4164
4165 if (priv->extend_desc) {
4166 seq_printf(seq, "Extended descriptor ring:\n");
4167 sysfs_display_ring((void *)rx_q->dma_erx,
4168 DMA_RX_SIZE, 1, seq);
4169 } else {
4170 seq_printf(seq, "Descriptor ring:\n");
4171 sysfs_display_ring((void *)rx_q->dma_rx,
4172 DMA_RX_SIZE, 0, seq);
4173 }
4174 }
aff3d9ef 4175
ce736788
JP
4176 for (queue = 0; queue < tx_count; queue++) {
4177 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4178
4179 seq_printf(seq, "TX Queue %d:\n", queue);
4180
4181 if (priv->extend_desc) {
4182 seq_printf(seq, "Extended descriptor ring:\n");
4183 sysfs_display_ring((void *)tx_q->dma_etx,
4184 DMA_TX_SIZE, 1, seq);
4185 } else {
4186 seq_printf(seq, "Descriptor ring:\n");
4187 sysfs_display_ring((void *)tx_q->dma_tx,
4188 DMA_TX_SIZE, 0, seq);
4189 }
7ac29055
GC
4190 }
4191
4192 return 0;
4193}
fb0d9c63 4194DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
7ac29055 4195
fb0d9c63 4196static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
e7434821
GC
4197{
4198 struct net_device *dev = seq->private;
4199 struct stmmac_priv *priv = netdev_priv(dev);
4200
19e30c14 4201 if (!priv->hw_cap_support) {
e7434821
GC
4202 seq_printf(seq, "DMA HW features not supported\n");
4203 return 0;
4204 }
4205
4206 seq_printf(seq, "==============================\n");
4207 seq_printf(seq, "\tDMA HW features\n");
4208 seq_printf(seq, "==============================\n");
4209
22d3efe5 4210 seq_printf(seq, "\t10/100 Mbps: %s\n",
e7434821 4211 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
22d3efe5 4212 seq_printf(seq, "\t1000 Mbps: %s\n",
e7434821 4213 (priv->dma_cap.mbps_1000) ? "Y" : "N");
22d3efe5 4214 seq_printf(seq, "\tHalf duplex: %s\n",
e7434821
GC
4215 (priv->dma_cap.half_duplex) ? "Y" : "N");
4216 seq_printf(seq, "\tHash Filter: %s\n",
4217 (priv->dma_cap.hash_filter) ? "Y" : "N");
4218 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4219 (priv->dma_cap.multi_addr) ? "Y" : "N");
8d45e42b 4220 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
e7434821
GC
4221 (priv->dma_cap.pcs) ? "Y" : "N");
4222 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4223 (priv->dma_cap.sma_mdio) ? "Y" : "N");
4224 seq_printf(seq, "\tPMT Remote wake up: %s\n",
4225 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4226 seq_printf(seq, "\tPMT Magic Frame: %s\n",
4227 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4228 seq_printf(seq, "\tRMON module: %s\n",
4229 (priv->dma_cap.rmon) ? "Y" : "N");
4230 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4231 (priv->dma_cap.time_stamp) ? "Y" : "N");
22d3efe5 4232 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
e7434821 4233 (priv->dma_cap.atime_stamp) ? "Y" : "N");
22d3efe5 4234 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
e7434821
GC
4235 (priv->dma_cap.eee) ? "Y" : "N");
4236 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4237 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4238 (priv->dma_cap.tx_coe) ? "Y" : "N");
f748be53
AT
4239 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4240 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4241 (priv->dma_cap.rx_coe) ? "Y" : "N");
4242 } else {
4243 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4244 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4245 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4246 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4247 }
e7434821
GC
4248 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4249 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4250 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4251 priv->dma_cap.number_rx_channel);
4252 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4253 priv->dma_cap.number_tx_channel);
4254 seq_printf(seq, "\tEnhanced descriptors: %s\n",
4255 (priv->dma_cap.enh_desc) ? "Y" : "N");
4256
4257 return 0;
4258}
fb0d9c63 4259DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
e7434821 4260
8d72ab11 4261static void stmmac_init_fs(struct net_device *dev)
7ac29055 4262{
466c5ac8
MO
4263 struct stmmac_priv *priv = netdev_priv(dev);
4264
4265 /* Create per netdev entries */
4266 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
7ac29055 4267
7ac29055 4268 /* Entry to report DMA RX/TX rings */
8d72ab11
GKH
4269 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4270 &stmmac_rings_status_fops);
7ac29055 4271
e7434821 4272 /* Entry to report the DMA HW features */
8d72ab11
GKH
4273 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4274 &stmmac_dma_cap_fops);
7ac29055
GC
4275}
4276
466c5ac8 4277static void stmmac_exit_fs(struct net_device *dev)
7ac29055 4278{
466c5ac8
MO
4279 struct stmmac_priv *priv = netdev_priv(dev);
4280
4281 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055 4282}
50fb4f74 4283#endif /* CONFIG_DEBUG_FS */
7ac29055 4284
3cd1cfcb
JA
4285static u32 stmmac_vid_crc32_le(__le16 vid_le)
4286{
4287 unsigned char *data = (unsigned char *)&vid_le;
4288 unsigned char data_byte = 0;
4289 u32 crc = ~0x0;
4290 u32 temp = 0;
4291 int i, bits;
4292
4293 bits = get_bitmask_order(VLAN_VID_MASK);
4294 for (i = 0; i < bits; i++) {
4295 if ((i % 8) == 0)
4296 data_byte = data[i / 8];
4297
4298 temp = ((crc & 1) ^ data_byte) & 1;
4299 crc >>= 1;
4300 data_byte >>= 1;
4301
4302 if (temp)
4303 crc ^= 0xedb88320;
4304 }
4305
4306 return crc;
4307}
4308
4309static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4310{
4311 u32 crc, hash = 0;
a24cae70 4312 __le16 pmatch = 0;
c7ab0b80
JA
4313 int count = 0;
4314 u16 vid = 0;
3cd1cfcb
JA
4315
4316 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4317 __le16 vid_le = cpu_to_le16(vid);
4318 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4319 hash |= (1 << crc);
c7ab0b80
JA
4320 count++;
4321 }
4322
4323 if (!priv->dma_cap.vlhash) {
4324 if (count > 2) /* VID = 0 always passes filter */
4325 return -EOPNOTSUPP;
4326
a24cae70 4327 pmatch = cpu_to_le16(vid);
c7ab0b80 4328 hash = 0;
3cd1cfcb
JA
4329 }
4330
a24cae70 4331 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
3cd1cfcb
JA
4332}
4333
4334static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4335{
4336 struct stmmac_priv *priv = netdev_priv(ndev);
4337 bool is_double = false;
4338 int ret;
4339
3cd1cfcb
JA
4340 if (be16_to_cpu(proto) == ETH_P_8021AD)
4341 is_double = true;
4342
4343 set_bit(vid, priv->active_vlans);
4344 ret = stmmac_vlan_update(priv, is_double);
4345 if (ret) {
4346 clear_bit(vid, priv->active_vlans);
4347 return ret;
4348 }
4349
4350 return ret;
4351}
4352
4353static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4354{
4355 struct stmmac_priv *priv = netdev_priv(ndev);
4356 bool is_double = false;
4357
3cd1cfcb
JA
4358 if (be16_to_cpu(proto) == ETH_P_8021AD)
4359 is_double = true;
4360
4361 clear_bit(vid, priv->active_vlans);
4362 return stmmac_vlan_update(priv, is_double);
4363}
4364
47dd7a54
GC
4365static const struct net_device_ops stmmac_netdev_ops = {
4366 .ndo_open = stmmac_open,
4367 .ndo_start_xmit = stmmac_xmit,
4368 .ndo_stop = stmmac_release,
4369 .ndo_change_mtu = stmmac_change_mtu,
5e982f3b 4370 .ndo_fix_features = stmmac_fix_features,
d2afb5bd 4371 .ndo_set_features = stmmac_set_features,
01789349 4372 .ndo_set_rx_mode = stmmac_set_rx_mode,
47dd7a54
GC
4373 .ndo_tx_timeout = stmmac_tx_timeout,
4374 .ndo_do_ioctl = stmmac_ioctl,
4dbbe8dd 4375 .ndo_setup_tc = stmmac_setup_tc,
4993e5b3 4376 .ndo_select_queue = stmmac_select_queue,
47dd7a54
GC
4377#ifdef CONFIG_NET_POLL_CONTROLLER
4378 .ndo_poll_controller = stmmac_poll_controller,
4379#endif
a830405e 4380 .ndo_set_mac_address = stmmac_set_mac_address,
3cd1cfcb
JA
4381 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4382 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
47dd7a54
GC
4383};
4384
34877a15
JA
4385static void stmmac_reset_subtask(struct stmmac_priv *priv)
4386{
4387 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4388 return;
4389 if (test_bit(STMMAC_DOWN, &priv->state))
4390 return;
4391
4392 netdev_err(priv->dev, "Reset adapter.\n");
4393
4394 rtnl_lock();
4395 netif_trans_update(priv->dev);
4396 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4397 usleep_range(1000, 2000);
4398
4399 set_bit(STMMAC_DOWN, &priv->state);
4400 dev_close(priv->dev);
00f54e68 4401 dev_open(priv->dev, NULL);
34877a15
JA
4402 clear_bit(STMMAC_DOWN, &priv->state);
4403 clear_bit(STMMAC_RESETING, &priv->state);
4404 rtnl_unlock();
4405}
4406
4407static void stmmac_service_task(struct work_struct *work)
4408{
4409 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4410 service_task);
4411
4412 stmmac_reset_subtask(priv);
4413 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4414}
4415
cf3f047b
GC
4416/**
4417 * stmmac_hw_init - Init the MAC device
32ceabca 4418 * @priv: driver private structure
732fdf0e
GC
4419 * Description: this function is to configure the MAC device according to
4420 * some platform parameters or the HW capability register. It prepares the
4421 * driver to use either ring or chain modes and to setup either enhanced or
4422 * normal descriptors.
cf3f047b
GC
4423 */
4424static int stmmac_hw_init(struct stmmac_priv *priv)
4425{
5f0456b4 4426 int ret;
cf3f047b 4427
9f93ac8d
LC
4428 /* dwmac-sun8i only work in chain mode */
4429 if (priv->plat->has_sun8i)
4430 chain_mode = 1;
5f0456b4 4431 priv->chain_mode = chain_mode;
9f93ac8d 4432
5f0456b4
JA
4433 /* Initialize HW Interface */
4434 ret = stmmac_hwif_init(priv);
4435 if (ret)
4436 return ret;
4a7d666a 4437
cf3f047b
GC
4438 /* Get the HW capability (new GMAC newer than 3.50a) */
4439 priv->hw_cap_support = stmmac_get_hw_features(priv);
4440 if (priv->hw_cap_support) {
38ddc59d 4441 dev_info(priv->device, "DMA HW capability register supported\n");
cf3f047b
GC
4442
4443 /* We can override some gmac/dma configuration fields: e.g.
4444 * enh_desc, tx_coe (e.g. that are passed through the
4445 * platform) with the values from the HW capability
4446 * register (if supported).
4447 */
4448 priv->plat->enh_desc = priv->dma_cap.enh_desc;
cf3f047b 4449 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3fe5cadb 4450 priv->hw->pmt = priv->plat->pmt;
b8ef7020
BH
4451 if (priv->dma_cap.hash_tb_sz) {
4452 priv->hw->multicast_filter_bins =
4453 (BIT(priv->dma_cap.hash_tb_sz) << 5);
4454 priv->hw->mcast_bits_log2 =
4455 ilog2(priv->hw->multicast_filter_bins);
4456 }
38912bdb 4457
a8df35d4
EG
4458 /* TXCOE doesn't work in thresh DMA mode */
4459 if (priv->plat->force_thresh_dma_mode)
4460 priv->plat->tx_coe = 0;
4461 else
4462 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4463
f748be53
AT
4464 /* In case of GMAC4 rx_coe is from HW cap register. */
4465 priv->plat->rx_coe = priv->dma_cap.rx_coe;
38912bdb
DS
4466
4467 if (priv->dma_cap.rx_coe_type2)
4468 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4469 else if (priv->dma_cap.rx_coe_type1)
4470 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4471
38ddc59d
LC
4472 } else {
4473 dev_info(priv->device, "No HW DMA feature register supported\n");
4474 }
cf3f047b 4475
d2afb5bd
GC
4476 if (priv->plat->rx_coe) {
4477 priv->hw->rx_csum = priv->plat->rx_coe;
38ddc59d 4478 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
f748be53 4479 if (priv->synopsys_id < DWMAC_CORE_4_00)
38ddc59d 4480 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
d2afb5bd 4481 }
cf3f047b 4482 if (priv->plat->tx_coe)
38ddc59d 4483 dev_info(priv->device, "TX Checksum insertion supported\n");
cf3f047b
GC
4484
4485 if (priv->plat->pmt) {
38ddc59d 4486 dev_info(priv->device, "Wake-Up On Lan supported\n");
cf3f047b
GC
4487 device_set_wakeup_capable(priv->device, 1);
4488 }
4489
f748be53 4490 if (priv->dma_cap.tsoen)
38ddc59d 4491 dev_info(priv->device, "TSO supported\n");
f748be53 4492
7cfde0af
JA
4493 /* Run HW quirks, if any */
4494 if (priv->hwif_quirks) {
4495 ret = priv->hwif_quirks(priv);
4496 if (ret)
4497 return ret;
4498 }
4499
3b509466
JA
4500 /* Rx Watchdog is available in the COREs newer than the 3.40.
4501 * In some case, for example on bugged HW this feature
4502 * has to be disable and this can be done by passing the
4503 * riwt_off field from the platform.
4504 */
4505 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4506 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4507 priv->use_riwt = 1;
4508 dev_info(priv->device,
4509 "Enable RX Mitigation via HW Watchdog Timer\n");
4510 }
4511
c24602ef 4512 return 0;
cf3f047b
GC
4513}
4514
47dd7a54 4515/**
bfab27a1
GC
4516 * stmmac_dvr_probe
4517 * @device: device pointer
ff3dd78c 4518 * @plat_dat: platform data pointer
e56788cf 4519 * @res: stmmac resource pointer
bfab27a1
GC
4520 * Description: this is the main probe function used to
4521 * call the alloc_etherdev, allocate the priv structure.
9afec6ef 4522 * Return:
15ffac73 4523 * returns 0 on success, otherwise errno.
47dd7a54 4524 */
15ffac73
JE
4525int stmmac_dvr_probe(struct device *device,
4526 struct plat_stmmacenet_data *plat_dat,
4527 struct stmmac_resources *res)
47dd7a54 4528{
bfab27a1
GC
4529 struct net_device *ndev = NULL;
4530 struct stmmac_priv *priv;
76067459
JA
4531 u32 queue, rxq, maxq;
4532 int i, ret = 0;
47dd7a54 4533
9737070c
JZ
4534 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
4535 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
41de8d4c 4536 if (!ndev)
15ffac73 4537 return -ENOMEM;
bfab27a1
GC
4538
4539 SET_NETDEV_DEV(ndev, device);
4540
4541 priv = netdev_priv(ndev);
4542 priv->device = device;
4543 priv->dev = ndev;
47dd7a54 4544
bfab27a1 4545 stmmac_set_ethtool_ops(ndev);
cf3f047b
GC
4546 priv->pause = pause;
4547 priv->plat = plat_dat;
e56788cf
JE
4548 priv->ioaddr = res->addr;
4549 priv->dev->base_addr = (unsigned long)res->addr;
4550
4551 priv->dev->irq = res->irq;
4552 priv->wol_irq = res->wol_irq;
4553 priv->lpi_irq = res->lpi_irq;
4554
a51645f7 4555 if (!IS_ERR_OR_NULL(res->mac))
e56788cf 4556 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
cf3f047b 4557
a7a62685 4558 dev_set_drvdata(device, priv->dev);
803f8fc4 4559
cf3f047b
GC
4560 /* Verify driver arguments */
4561 stmmac_verify_args();
bfab27a1 4562
34877a15
JA
4563 /* Allocate workqueue */
4564 priv->wq = create_singlethread_workqueue("stmmac_wq");
4565 if (!priv->wq) {
4566 dev_err(priv->device, "failed to create workqueue\n");
9737070c 4567 return -ENOMEM;
34877a15
JA
4568 }
4569
4570 INIT_WORK(&priv->service_task, stmmac_service_task);
4571
cf3f047b 4572 /* Override with kernel parameters if supplied XXX CRS XXX
ceb69499
GC
4573 * this needs to have multiple instances
4574 */
cf3f047b
GC
4575 if ((phyaddr >= 0) && (phyaddr <= 31))
4576 priv->plat->phy_addr = phyaddr;
4577
90f522a2
EP
4578 if (priv->plat->stmmac_rst) {
4579 ret = reset_control_assert(priv->plat->stmmac_rst);
f573c0b9 4580 reset_control_deassert(priv->plat->stmmac_rst);
90f522a2
EP
4581 /* Some reset controllers have only reset callback instead of
4582 * assert + deassert callbacks pair.
4583 */
4584 if (ret == -ENOTSUPP)
4585 reset_control_reset(priv->plat->stmmac_rst);
4586 }
c5e4ddbd 4587
cf3f047b 4588 /* Init MAC and get the capabilities */
c24602ef
GC
4589 ret = stmmac_hw_init(priv);
4590 if (ret)
62866e98 4591 goto error_hw_init;
cf3f047b 4592
b561af36
VK
4593 stmmac_check_ether_addr(priv);
4594
c22a3f48 4595 /* Configure real RX and TX queues */
c02b7a91
JP
4596 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4597 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
c22a3f48 4598
cf3f047b 4599 ndev->netdev_ops = &stmmac_netdev_ops;
bfab27a1 4600
cf3f047b
GC
4601 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4602 NETIF_F_RXCSUM;
f748be53 4603
4dbbe8dd
JA
4604 ret = stmmac_tc_init(priv, priv);
4605 if (!ret) {
4606 ndev->hw_features |= NETIF_F_HW_TC;
4607 }
4608
f748be53 4609 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
9edfa7da 4610 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
b7766206
JA
4611 if (priv->plat->has_gmac4)
4612 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
f748be53 4613 priv->tso = true;
38ddc59d 4614 dev_info(priv->device, "TSO feature enabled\n");
f748be53 4615 }
a993db88 4616
67afd6d1
JA
4617 if (priv->dma_cap.sphen) {
4618 ndev->hw_features |= NETIF_F_GRO;
4619 priv->sph = true;
4620 dev_info(priv->device, "SPH feature enabled\n");
4621 }
4622
a993db88
JA
4623 if (priv->dma_cap.addr64) {
4624 ret = dma_set_mask_and_coherent(device,
4625 DMA_BIT_MASK(priv->dma_cap.addr64));
4626 if (!ret) {
4627 dev_info(priv->device, "Using %d bits DMA width\n",
4628 priv->dma_cap.addr64);
968a2978
TR
4629
4630 /*
4631 * If more than 32 bits can be addressed, make sure to
4632 * enable enhanced addressing mode.
4633 */
4634 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
4635 priv->plat->dma_cfg->eame = true;
a993db88
JA
4636 } else {
4637 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4638 if (ret) {
4639 dev_err(priv->device, "Failed to set DMA Mask\n");
4640 goto error_hw_init;
4641 }
4642
4643 priv->dma_cap.addr64 = 32;
4644 }
4645 }
4646
bfab27a1
GC
4647 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4648 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
47dd7a54
GC
4649#ifdef STMMAC_VLAN_TAG_USED
4650 /* Both mac100 and gmac support receive VLAN tag detection */
ab188e8f 4651 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
3cd1cfcb
JA
4652 if (priv->dma_cap.vlhash) {
4653 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4654 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
4655 }
30d93227
JA
4656 if (priv->dma_cap.vlins) {
4657 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
4658 if (priv->dma_cap.dvlan)
4659 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
4660 }
47dd7a54
GC
4661#endif
4662 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4663
76067459
JA
4664 /* Initialize RSS */
4665 rxq = priv->plat->rx_queues_to_use;
4666 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
4667 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
4668 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
4669
4670 if (priv->dma_cap.rssen && priv->plat->rss_en)
4671 ndev->features |= NETIF_F_RXHASH;
4672
44770e11
JW
4673 /* MTU range: 46 - hw-specific max */
4674 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
56bcd591 4675 if (priv->plat->has_xgmac)
7d9e6c5a 4676 ndev->max_mtu = XGMAC_JUMBO_LEN;
56bcd591
JA
4677 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4678 ndev->max_mtu = JUMBO_LEN;
44770e11
JW
4679 else
4680 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
a2cd64f3
KHL
4681 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4682 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4683 */
4684 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4685 (priv->plat->maxmtu >= ndev->min_mtu))
44770e11 4686 ndev->max_mtu = priv->plat->maxmtu;
a2cd64f3 4687 else if (priv->plat->maxmtu < ndev->min_mtu)
b618ab45
HK
4688 dev_warn(priv->device,
4689 "%s: warning: maxmtu having invalid value (%d)\n",
4690 __func__, priv->plat->maxmtu);
44770e11 4691
47dd7a54
GC
4692 if (flow_ctrl)
4693 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4694
8fce3331
JA
4695 /* Setup channels NAPI */
4696 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
c22a3f48 4697
8fce3331
JA
4698 for (queue = 0; queue < maxq; queue++) {
4699 struct stmmac_channel *ch = &priv->channel[queue];
4700
4701 ch->priv_data = priv;
4702 ch->index = queue;
4703
4ccb4585
JA
4704 if (queue < priv->plat->rx_queues_to_use) {
4705 netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4706 NAPI_POLL_WEIGHT);
4707 }
4708 if (queue < priv->plat->tx_queues_to_use) {
4d97972b
FI
4709 netif_tx_napi_add(ndev, &ch->tx_napi,
4710 stmmac_napi_poll_tx,
4711 NAPI_POLL_WEIGHT);
4ccb4585 4712 }
c22a3f48 4713 }
47dd7a54 4714
29555fa3 4715 mutex_init(&priv->lock);
f8e96161 4716
cd7201f4
GC
4717 /* If a specific clk_csr value is passed from the platform
4718 * this means that the CSR Clock Range selection cannot be
4719 * changed at run-time and it is fixed. Viceversa the driver'll try to
4720 * set the MDC clock dynamically according to the csr actual
4721 * clock input.
4722 */
5e7f7fc5 4723 if (priv->plat->clk_csr >= 0)
cd7201f4 4724 priv->clk_csr = priv->plat->clk_csr;
5e7f7fc5
BH
4725 else
4726 stmmac_clk_csr_set(priv);
cd7201f4 4727
e58bb43f
GC
4728 stmmac_check_pcs_mode(priv);
4729
3fe5cadb
GC
4730 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4731 priv->hw->pcs != STMMAC_PCS_TBI &&
4732 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
4733 /* MDIO bus Registration */
4734 ret = stmmac_mdio_register(ndev);
4735 if (ret < 0) {
b618ab45
HK
4736 dev_err(priv->device,
4737 "%s: MDIO bus (id: %d) registration failed",
4738 __func__, priv->plat->bus_id);
e58bb43f
GC
4739 goto error_mdio_register;
4740 }
4bfcbd7a
FV
4741 }
4742
74371272
JA
4743 ret = stmmac_phy_setup(priv);
4744 if (ret) {
4745 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
4746 goto error_phy_setup;
4747 }
4748
57016590 4749 ret = register_netdev(ndev);
b2eb09af 4750 if (ret) {
b618ab45
HK
4751 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4752 __func__, ret);
b2eb09af
FF
4753 goto error_netdev_register;
4754 }
57016590 4755
5f2b8b62 4756#ifdef CONFIG_DEBUG_FS
8d72ab11 4757 stmmac_init_fs(ndev);
5f2b8b62
TR
4758#endif
4759
57016590 4760 return ret;
47dd7a54 4761
6a81c26f 4762error_netdev_register:
74371272
JA
4763 phylink_destroy(priv->phylink);
4764error_phy_setup:
b2eb09af
FF
4765 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4766 priv->hw->pcs != STMMAC_PCS_TBI &&
4767 priv->hw->pcs != STMMAC_PCS_RTBI)
4768 stmmac_mdio_unregister(ndev);
6a81c26f 4769error_mdio_register:
8fce3331
JA
4770 for (queue = 0; queue < maxq; queue++) {
4771 struct stmmac_channel *ch = &priv->channel[queue];
c22a3f48 4772
4ccb4585
JA
4773 if (queue < priv->plat->rx_queues_to_use)
4774 netif_napi_del(&ch->rx_napi);
4775 if (queue < priv->plat->tx_queues_to_use)
4776 netif_napi_del(&ch->tx_napi);
c22a3f48 4777 }
62866e98 4778error_hw_init:
34877a15 4779 destroy_workqueue(priv->wq);
47dd7a54 4780
15ffac73 4781 return ret;
47dd7a54 4782}
b2e2f0c7 4783EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
47dd7a54
GC
4784
4785/**
4786 * stmmac_dvr_remove
f4e7bd81 4787 * @dev: device pointer
47dd7a54 4788 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
bfab27a1 4789 * changes the link status, releases the DMA descriptor rings.
47dd7a54 4790 */
f4e7bd81 4791int stmmac_dvr_remove(struct device *dev)
47dd7a54 4792{
f4e7bd81 4793 struct net_device *ndev = dev_get_drvdata(dev);
aec7ff27 4794 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 4795
38ddc59d 4796 netdev_info(priv->dev, "%s: removing driver", __func__);
47dd7a54 4797
5f2b8b62
TR
4798#ifdef CONFIG_DEBUG_FS
4799 stmmac_exit_fs(ndev);
4800#endif
ae4f0d46 4801 stmmac_stop_all_dma(priv);
47dd7a54 4802
c10d4c82 4803 stmmac_mac_set(priv, priv->ioaddr, false);
47dd7a54 4804 netif_carrier_off(ndev);
47dd7a54 4805 unregister_netdev(ndev);
74371272 4806 phylink_destroy(priv->phylink);
f573c0b9 4807 if (priv->plat->stmmac_rst)
4808 reset_control_assert(priv->plat->stmmac_rst);
4809 clk_disable_unprepare(priv->plat->pclk);
4810 clk_disable_unprepare(priv->plat->stmmac_clk);
3fe5cadb
GC
4811 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4812 priv->hw->pcs != STMMAC_PCS_TBI &&
4813 priv->hw->pcs != STMMAC_PCS_RTBI)
e743471f 4814 stmmac_mdio_unregister(ndev);
34877a15 4815 destroy_workqueue(priv->wq);
29555fa3 4816 mutex_destroy(&priv->lock);
47dd7a54
GC
4817
4818 return 0;
4819}
b2e2f0c7 4820EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
47dd7a54 4821
732fdf0e
GC
4822/**
4823 * stmmac_suspend - suspend callback
f4e7bd81 4824 * @dev: device pointer
732fdf0e
GC
4825 * Description: this is the function to suspend the device and it is called
4826 * by the platform driver to stop the network queue, release the resources,
4827 * program the PMT register (for WoL), clean and release driver resources.
4828 */
f4e7bd81 4829int stmmac_suspend(struct device *dev)
47dd7a54 4830{
f4e7bd81 4831 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 4832 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 4833
874bd42d 4834 if (!ndev || !netif_running(ndev))
47dd7a54
GC
4835 return 0;
4836
3e2bf04f 4837 phylink_mac_change(priv->phylink, false);
47dd7a54 4838
134cc4ce 4839 mutex_lock(&priv->lock);
19e13cb2 4840
874bd42d 4841 netif_device_detach(ndev);
c22a3f48 4842 stmmac_stop_all_queues(priv);
47dd7a54 4843
c22a3f48 4844 stmmac_disable_all_queues(priv);
874bd42d
GC
4845
4846 /* Stop TX/RX DMA */
ae4f0d46 4847 stmmac_stop_all_dma(priv);
c24602ef 4848
874bd42d 4849 /* Enable Power down mode by programming the PMT regs */
89f7f2cf 4850 if (device_may_wakeup(priv->device)) {
c10d4c82 4851 stmmac_pmt(priv, priv->hw, priv->wolopts);
89f7f2cf
SK
4852 priv->irq_wake = 1;
4853 } else {
134cc4ce 4854 mutex_unlock(&priv->lock);
3e2bf04f
JA
4855 rtnl_lock();
4856 phylink_stop(priv->phylink);
4857 rtnl_unlock();
134cc4ce 4858 mutex_lock(&priv->lock);
3e2bf04f 4859
c10d4c82 4860 stmmac_mac_set(priv, priv->ioaddr, false);
db88f10a 4861 pinctrl_pm_select_sleep_state(priv->device);
ba1377ff 4862 /* Disable clock in case of PWM is off */
e497c20e
BH
4863 if (priv->plat->clk_ptp_ref)
4864 clk_disable_unprepare(priv->plat->clk_ptp_ref);
4865 clk_disable_unprepare(priv->plat->pclk);
4866 clk_disable_unprepare(priv->plat->stmmac_clk);
ba1377ff 4867 }
29555fa3 4868 mutex_unlock(&priv->lock);
2d871aa0 4869
bd00632c 4870 priv->speed = SPEED_UNKNOWN;
47dd7a54
GC
4871 return 0;
4872}
b2e2f0c7 4873EXPORT_SYMBOL_GPL(stmmac_suspend);
47dd7a54 4874
54139cf3
JP
4875/**
4876 * stmmac_reset_queues_param - reset queue parameters
4877 * @dev: device pointer
4878 */
4879static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4880{
4881 u32 rx_cnt = priv->plat->rx_queues_to_use;
ce736788 4882 u32 tx_cnt = priv->plat->tx_queues_to_use;
54139cf3
JP
4883 u32 queue;
4884
4885 for (queue = 0; queue < rx_cnt; queue++) {
4886 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4887
4888 rx_q->cur_rx = 0;
4889 rx_q->dirty_rx = 0;
4890 }
4891
ce736788
JP
4892 for (queue = 0; queue < tx_cnt; queue++) {
4893 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4894
4895 tx_q->cur_tx = 0;
4896 tx_q->dirty_tx = 0;
8d212a9e 4897 tx_q->mss = 0;
ce736788 4898 }
54139cf3
JP
4899}
4900
732fdf0e
GC
4901/**
4902 * stmmac_resume - resume callback
f4e7bd81 4903 * @dev: device pointer
732fdf0e
GC
4904 * Description: when resume this function is invoked to setup the DMA and CORE
4905 * in a usable state.
4906 */
f4e7bd81 4907int stmmac_resume(struct device *dev)
47dd7a54 4908{
f4e7bd81 4909 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 4910 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 4911
874bd42d 4912 if (!netif_running(ndev))
47dd7a54
GC
4913 return 0;
4914
47dd7a54
GC
4915 /* Power Down bit, into the PM register, is cleared
4916 * automatically as soon as a magic packet or a Wake-up frame
4917 * is received. Anyway, it's better to manually clear
4918 * this bit because it can generate problems while resuming
ceb69499
GC
4919 * from another devices (e.g. serial console).
4920 */
623997fb 4921 if (device_may_wakeup(priv->device)) {
29555fa3 4922 mutex_lock(&priv->lock);
c10d4c82 4923 stmmac_pmt(priv, priv->hw, 0);
29555fa3 4924 mutex_unlock(&priv->lock);
89f7f2cf 4925 priv->irq_wake = 0;
623997fb 4926 } else {
db88f10a 4927 pinctrl_pm_select_default_state(priv->device);
8d45e42b 4928 /* enable the clk previously disabled */
e497c20e
BH
4929 clk_prepare_enable(priv->plat->stmmac_clk);
4930 clk_prepare_enable(priv->plat->pclk);
4931 if (priv->plat->clk_ptp_ref)
4932 clk_prepare_enable(priv->plat->clk_ptp_ref);
623997fb
SK
4933 /* reset the phy so that it's ready */
4934 if (priv->mii)
4935 stmmac_mdio_reset(priv->mii);
4936 }
47dd7a54 4937
874bd42d 4938 netif_device_attach(ndev);
47dd7a54 4939
29555fa3 4940 mutex_lock(&priv->lock);
f55d84b0 4941
54139cf3
JP
4942 stmmac_reset_queues_param(priv);
4943
ae79a639
GC
4944 stmmac_clear_descriptors(priv);
4945
fe131929 4946 stmmac_hw_setup(ndev, false);
d429b66e 4947 stmmac_init_coalesce(priv);
ac316c78 4948 stmmac_set_rx_mode(ndev);
47dd7a54 4949
c22a3f48 4950 stmmac_enable_all_queues(priv);
47dd7a54 4951
c22a3f48 4952 stmmac_start_all_queues(priv);
47dd7a54 4953
19e13cb2 4954 mutex_unlock(&priv->lock);
102463b1 4955
3e2bf04f
JA
4956 if (!device_may_wakeup(priv->device)) {
4957 rtnl_lock();
4958 phylink_start(priv->phylink);
4959 rtnl_unlock();
4960 }
4961
4962 phylink_mac_change(priv->phylink, true);
19e13cb2 4963
47dd7a54
GC
4964 return 0;
4965}
b2e2f0c7 4966EXPORT_SYMBOL_GPL(stmmac_resume);
ba27ec66 4967
47dd7a54
GC
4968#ifndef MODULE
4969static int __init stmmac_cmdline_opt(char *str)
4970{
4971 char *opt;
4972
4973 if (!str || !*str)
4974 return -EINVAL;
4975 while ((opt = strsep(&str, ",")) != NULL) {
f3240e28 4976 if (!strncmp(opt, "debug:", 6)) {
ea2ab871 4977 if (kstrtoint(opt + 6, 0, &debug))
f3240e28
GC
4978 goto err;
4979 } else if (!strncmp(opt, "phyaddr:", 8)) {
ea2ab871 4980 if (kstrtoint(opt + 8, 0, &phyaddr))
f3240e28 4981 goto err;
f3240e28 4982 } else if (!strncmp(opt, "buf_sz:", 7)) {
ea2ab871 4983 if (kstrtoint(opt + 7, 0, &buf_sz))
f3240e28
GC
4984 goto err;
4985 } else if (!strncmp(opt, "tc:", 3)) {
ea2ab871 4986 if (kstrtoint(opt + 3, 0, &tc))
f3240e28
GC
4987 goto err;
4988 } else if (!strncmp(opt, "watchdog:", 9)) {
ea2ab871 4989 if (kstrtoint(opt + 9, 0, &watchdog))
f3240e28
GC
4990 goto err;
4991 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
ea2ab871 4992 if (kstrtoint(opt + 10, 0, &flow_ctrl))
f3240e28
GC
4993 goto err;
4994 } else if (!strncmp(opt, "pause:", 6)) {
ea2ab871 4995 if (kstrtoint(opt + 6, 0, &pause))
f3240e28 4996 goto err;
506f669c 4997 } else if (!strncmp(opt, "eee_timer:", 10)) {
d765955d
GC
4998 if (kstrtoint(opt + 10, 0, &eee_timer))
4999 goto err;
4a7d666a
GC
5000 } else if (!strncmp(opt, "chain_mode:", 11)) {
5001 if (kstrtoint(opt + 11, 0, &chain_mode))
5002 goto err;
f3240e28 5003 }
47dd7a54
GC
5004 }
5005 return 0;
f3240e28
GC
5006
5007err:
5008 pr_err("%s: ERROR broken module parameter conversion", __func__);
5009 return -EINVAL;
47dd7a54
GC
5010}
5011
5012__setup("stmmaceth=", stmmac_cmdline_opt);
ceb69499 5013#endif /* MODULE */
6fc0d0f2 5014
466c5ac8
MO
5015static int __init stmmac_init(void)
5016{
5017#ifdef CONFIG_DEBUG_FS
5018 /* Create debugfs main directory if it doesn't exist yet */
8d72ab11 5019 if (!stmmac_fs_dir)
466c5ac8 5020 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
466c5ac8
MO
5021#endif
5022
5023 return 0;
5024}
5025
5026static void __exit stmmac_exit(void)
5027{
5028#ifdef CONFIG_DEBUG_FS
5029 debugfs_remove_recursive(stmmac_fs_dir);
5030#endif
5031}
5032
5033module_init(stmmac_init)
5034module_exit(stmmac_exit)
5035
6fc0d0f2
GC
5036MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5037MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
5038MODULE_LICENSE("GPL");