]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
Merge tag 'hyperv-fixes-signed' of git://git.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/linux.git] / drivers / net / ethernet / stmicro / stmmac / dwmac4_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
5 *
6 * This only implements the mac core functions for this chip.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <net/dsa.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
22
23 static void dwmac4_core_init(struct mac_device_info *hw,
24 struct net_device *dev)
25 {
26 void __iomem *ioaddr = hw->pcsr;
27 u32 value = readl(ioaddr + GMAC_CONFIG);
28 int mtu = dev->mtu;
29
30 value |= GMAC_CORE_INIT;
31
32 if (mtu > 1500)
33 value |= GMAC_CONFIG_2K;
34 if (mtu > 2000)
35 value |= GMAC_CONFIG_JE;
36
37 if (hw->ps) {
38 value |= GMAC_CONFIG_TE;
39
40 value &= hw->link.speed_mask;
41 switch (hw->ps) {
42 case SPEED_1000:
43 value |= hw->link.speed1000;
44 break;
45 case SPEED_100:
46 value |= hw->link.speed100;
47 break;
48 case SPEED_10:
49 value |= hw->link.speed10;
50 break;
51 }
52 }
53
54 writel(value, ioaddr + GMAC_CONFIG);
55
56 /* Enable GMAC interrupts */
57 value = GMAC_INT_DEFAULT_ENABLE;
58
59 if (hw->pcs)
60 value |= GMAC_PCS_IRQ_DEFAULT;
61
62 writel(value, ioaddr + GMAC_INT_EN);
63 }
64
65 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
66 u8 mode, u32 queue)
67 {
68 void __iomem *ioaddr = hw->pcsr;
69 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
70
71 value &= GMAC_RX_QUEUE_CLEAR(queue);
72 if (mode == MTL_QUEUE_AVB)
73 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
74 else if (mode == MTL_QUEUE_DCB)
75 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
76
77 writel(value, ioaddr + GMAC_RXQ_CTRL0);
78 }
79
80 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
81 u32 prio, u32 queue)
82 {
83 void __iomem *ioaddr = hw->pcsr;
84 u32 base_register;
85 u32 value;
86
87 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
88 if (queue >= 4)
89 queue -= 4;
90
91 value = readl(ioaddr + base_register);
92
93 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
94 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
95 GMAC_RXQCTRL_PSRQX_MASK(queue);
96 writel(value, ioaddr + base_register);
97 }
98
99 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
100 u32 prio, u32 queue)
101 {
102 void __iomem *ioaddr = hw->pcsr;
103 u32 base_register;
104 u32 value;
105
106 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
107 if (queue >= 4)
108 queue -= 4;
109
110 value = readl(ioaddr + base_register);
111
112 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
113 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
114 GMAC_TXQCTRL_PSTQX_MASK(queue);
115
116 writel(value, ioaddr + base_register);
117 }
118
119 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
120 u8 packet, u32 queue)
121 {
122 void __iomem *ioaddr = hw->pcsr;
123 u32 value;
124
125 static const struct stmmac_rx_routing route_possibilities[] = {
126 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
127 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
128 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
129 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
130 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
131 };
132
133 value = readl(ioaddr + GMAC_RXQ_CTRL1);
134
135 /* routing configuration */
136 value &= ~route_possibilities[packet - 1].reg_mask;
137 value |= (queue << route_possibilities[packet-1].reg_shift) &
138 route_possibilities[packet - 1].reg_mask;
139
140 /* some packets require extra ops */
141 if (packet == PACKET_AVCPQ) {
142 value &= ~GMAC_RXQCTRL_TACPQE;
143 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
144 } else if (packet == PACKET_MCBCQ) {
145 value &= ~GMAC_RXQCTRL_MCBCQEN;
146 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
147 }
148
149 writel(value, ioaddr + GMAC_RXQ_CTRL1);
150 }
151
152 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
153 u32 rx_alg)
154 {
155 void __iomem *ioaddr = hw->pcsr;
156 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
157
158 value &= ~MTL_OPERATION_RAA;
159 switch (rx_alg) {
160 case MTL_RX_ALGORITHM_SP:
161 value |= MTL_OPERATION_RAA_SP;
162 break;
163 case MTL_RX_ALGORITHM_WSP:
164 value |= MTL_OPERATION_RAA_WSP;
165 break;
166 default:
167 break;
168 }
169
170 writel(value, ioaddr + MTL_OPERATION_MODE);
171 }
172
173 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
174 u32 tx_alg)
175 {
176 void __iomem *ioaddr = hw->pcsr;
177 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
178
179 value &= ~MTL_OPERATION_SCHALG_MASK;
180 switch (tx_alg) {
181 case MTL_TX_ALGORITHM_WRR:
182 value |= MTL_OPERATION_SCHALG_WRR;
183 break;
184 case MTL_TX_ALGORITHM_WFQ:
185 value |= MTL_OPERATION_SCHALG_WFQ;
186 break;
187 case MTL_TX_ALGORITHM_DWRR:
188 value |= MTL_OPERATION_SCHALG_DWRR;
189 break;
190 case MTL_TX_ALGORITHM_SP:
191 value |= MTL_OPERATION_SCHALG_SP;
192 break;
193 default:
194 break;
195 }
196
197 writel(value, ioaddr + MTL_OPERATION_MODE);
198 }
199
200 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
201 u32 weight, u32 queue)
202 {
203 void __iomem *ioaddr = hw->pcsr;
204 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
205
206 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
207 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
208 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
209 }
210
211 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
212 {
213 void __iomem *ioaddr = hw->pcsr;
214 u32 value;
215
216 if (queue < 4)
217 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
218 else
219 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
220
221 if (queue == 0 || queue == 4) {
222 value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
223 value |= MTL_RXQ_DMA_Q04MDMACH(chan);
224 } else {
225 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
226 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
227 }
228
229 if (queue < 4)
230 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
231 else
232 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
233 }
234
235 static void dwmac4_config_cbs(struct mac_device_info *hw,
236 u32 send_slope, u32 idle_slope,
237 u32 high_credit, u32 low_credit, u32 queue)
238 {
239 void __iomem *ioaddr = hw->pcsr;
240 u32 value;
241
242 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
243 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
244 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
245 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
246 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
247
248 /* enable AV algorithm */
249 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
250 value |= MTL_ETS_CTRL_AVALG;
251 value |= MTL_ETS_CTRL_CC;
252 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
253
254 /* configure send slope */
255 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
256 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
257 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
258 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
259
260 /* configure idle slope (same register as tx weight) */
261 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
262
263 /* configure high credit */
264 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
265 value &= ~MTL_HIGH_CRED_HC_MASK;
266 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
267 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
268
269 /* configure high credit */
270 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
271 value &= ~MTL_HIGH_CRED_LC_MASK;
272 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
273 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
274 }
275
276 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
277 {
278 void __iomem *ioaddr = hw->pcsr;
279 int i;
280
281 for (i = 0; i < GMAC_REG_NUM; i++)
282 reg_space[i] = readl(ioaddr + i * 4);
283 }
284
285 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
286 {
287 void __iomem *ioaddr = hw->pcsr;
288 u32 value = readl(ioaddr + GMAC_CONFIG);
289
290 if (hw->rx_csum)
291 value |= GMAC_CONFIG_IPC;
292 else
293 value &= ~GMAC_CONFIG_IPC;
294
295 writel(value, ioaddr + GMAC_CONFIG);
296
297 value = readl(ioaddr + GMAC_CONFIG);
298
299 return !!(value & GMAC_CONFIG_IPC);
300 }
301
302 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
303 {
304 void __iomem *ioaddr = hw->pcsr;
305 unsigned int pmt = 0;
306 u32 config;
307
308 if (mode & WAKE_MAGIC) {
309 pr_debug("GMAC: WOL Magic frame\n");
310 pmt |= power_down | magic_pkt_en;
311 }
312 if (mode & WAKE_UCAST) {
313 pr_debug("GMAC: WOL on global unicast\n");
314 pmt |= power_down | global_unicast | wake_up_frame_en;
315 }
316
317 if (pmt) {
318 /* The receiver must be enabled for WOL before powering down */
319 config = readl(ioaddr + GMAC_CONFIG);
320 config |= GMAC_CONFIG_RE;
321 writel(config, ioaddr + GMAC_CONFIG);
322 }
323 writel(pmt, ioaddr + GMAC_PMT);
324 }
325
326 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
327 unsigned char *addr, unsigned int reg_n)
328 {
329 void __iomem *ioaddr = hw->pcsr;
330
331 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
332 GMAC_ADDR_LOW(reg_n));
333 }
334
335 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
336 unsigned char *addr, unsigned int reg_n)
337 {
338 void __iomem *ioaddr = hw->pcsr;
339
340 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
341 GMAC_ADDR_LOW(reg_n));
342 }
343
344 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
345 bool en_tx_lpi_clockgating)
346 {
347 void __iomem *ioaddr = hw->pcsr;
348 u32 value;
349
350 /* Enable the link status receive on RGMII, SGMII ore SMII
351 * receive path and instruct the transmit to enter in LPI
352 * state.
353 */
354 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
355 value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
356
357 if (en_tx_lpi_clockgating)
358 value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
359
360 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
361 }
362
363 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
364 {
365 void __iomem *ioaddr = hw->pcsr;
366 u32 value;
367
368 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
369 value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
370 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
371 }
372
373 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
374 {
375 void __iomem *ioaddr = hw->pcsr;
376 u32 value;
377
378 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
379
380 if (link)
381 value |= GMAC4_LPI_CTRL_STATUS_PLS;
382 else
383 value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
384
385 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
386 }
387
388 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
389 {
390 void __iomem *ioaddr = hw->pcsr;
391 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
392
393 /* Program the timers in the LPI timer control register:
394 * LS: minimum time (ms) for which the link
395 * status from PHY should be ok before transmitting
396 * the LPI pattern.
397 * TW: minimum time (us) for which the core waits
398 * after it has stopped transmitting the LPI pattern.
399 */
400 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
401 }
402
403 static void dwmac4_set_filter(struct mac_device_info *hw,
404 struct net_device *dev)
405 {
406 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
407 int numhashregs = (hw->multicast_filter_bins >> 5);
408 int mcbitslog2 = hw->mcast_bits_log2;
409 unsigned int value;
410 int i;
411
412 value = readl(ioaddr + GMAC_PACKET_FILTER);
413 value &= ~GMAC_PACKET_FILTER_HMC;
414 value &= ~GMAC_PACKET_FILTER_HPF;
415 value &= ~GMAC_PACKET_FILTER_PCF;
416 value &= ~GMAC_PACKET_FILTER_PM;
417 value &= ~GMAC_PACKET_FILTER_PR;
418 if (dev->flags & IFF_PROMISC) {
419 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
420 } else if ((dev->flags & IFF_ALLMULTI) ||
421 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
422 /* Pass all multi */
423 value |= GMAC_PACKET_FILTER_PM;
424 /* Set all the bits of the HASH tab */
425 for (i = 0; i < numhashregs; i++)
426 writel(0xffffffff, ioaddr + GMAC_HASH_TAB(i));
427 } else if (!netdev_mc_empty(dev)) {
428 struct netdev_hw_addr *ha;
429 u32 mc_filter[8];
430
431 /* Hash filter for multicast */
432 value |= GMAC_PACKET_FILTER_HMC;
433
434 memset(mc_filter, 0, sizeof(mc_filter));
435 netdev_for_each_mc_addr(ha, dev) {
436 /* The upper n bits of the calculated CRC are used to
437 * index the contents of the hash table. The number of
438 * bits used depends on the hardware configuration
439 * selected at core configuration time.
440 */
441 int bit_nr = bitrev32(~crc32_le(~0, ha->addr,
442 ETH_ALEN)) >> (32 - mcbitslog2);
443 /* The most significant bit determines the register to
444 * use (H/L) while the other 5 bits determine the bit
445 * within the register.
446 */
447 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
448 }
449 for (i = 0; i < numhashregs; i++)
450 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
451 }
452
453 value |= GMAC_PACKET_FILTER_HPF;
454
455 /* Handle multiple unicast addresses */
456 if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
457 /* Switch to promiscuous mode if more than 128 addrs
458 * are required
459 */
460 value |= GMAC_PACKET_FILTER_PR;
461 } else {
462 struct netdev_hw_addr *ha;
463 int reg = 1;
464
465 netdev_for_each_uc_addr(ha, dev) {
466 dwmac4_set_umac_addr(hw, ha->addr, reg);
467 reg++;
468 }
469
470 while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
471 writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
472 writel(0, ioaddr + GMAC_ADDR_LOW(reg));
473 reg++;
474 }
475 }
476
477 writel(value, ioaddr + GMAC_PACKET_FILTER);
478 }
479
480 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
481 unsigned int fc, unsigned int pause_time,
482 u32 tx_cnt)
483 {
484 void __iomem *ioaddr = hw->pcsr;
485 unsigned int flow = 0;
486 u32 queue = 0;
487
488 pr_debug("GMAC Flow-Control:\n");
489 if (fc & FLOW_RX) {
490 pr_debug("\tReceive Flow-Control ON\n");
491 flow |= GMAC_RX_FLOW_CTRL_RFE;
492 }
493 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
494
495 if (fc & FLOW_TX) {
496 pr_debug("\tTransmit Flow-Control ON\n");
497
498 if (duplex)
499 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
500
501 for (queue = 0; queue < tx_cnt; queue++) {
502 flow = GMAC_TX_FLOW_CTRL_TFE;
503
504 if (duplex)
505 flow |=
506 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
507
508 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
509 }
510 } else {
511 for (queue = 0; queue < tx_cnt; queue++)
512 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
513 }
514 }
515
516 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
517 bool loopback)
518 {
519 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
520 }
521
522 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
523 {
524 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
525 }
526
527 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
528 {
529 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
530 }
531
532 /* RGMII or SMII interface */
533 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
534 {
535 u32 status;
536
537 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
538 x->irq_rgmii_n++;
539
540 /* Check the link status */
541 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
542 int speed_value;
543
544 x->pcs_link = 1;
545
546 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
547 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
548 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
549 x->pcs_speed = SPEED_1000;
550 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
551 x->pcs_speed = SPEED_100;
552 else
553 x->pcs_speed = SPEED_10;
554
555 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
556
557 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
558 x->pcs_duplex ? "Full" : "Half");
559 } else {
560 x->pcs_link = 0;
561 pr_info("Link is Down\n");
562 }
563 }
564
565 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
566 {
567 void __iomem *ioaddr = hw->pcsr;
568 u32 mtl_int_qx_status;
569 int ret = 0;
570
571 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
572
573 /* Check MTL Interrupt */
574 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
575 /* read Queue x Interrupt status */
576 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
577
578 if (status & MTL_RX_OVERFLOW_INT) {
579 /* clear Interrupt */
580 writel(status | MTL_RX_OVERFLOW_INT,
581 ioaddr + MTL_CHAN_INT_CTRL(chan));
582 ret = CORE_IRQ_MTL_RX_OVERFLOW;
583 }
584 }
585
586 return ret;
587 }
588
589 static int dwmac4_irq_status(struct mac_device_info *hw,
590 struct stmmac_extra_stats *x)
591 {
592 void __iomem *ioaddr = hw->pcsr;
593 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
594 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
595 int ret = 0;
596
597 /* Discard disabled bits */
598 intr_status &= intr_enable;
599
600 /* Not used events (e.g. MMC interrupts) are not handled. */
601 if ((intr_status & mmc_tx_irq))
602 x->mmc_tx_irq_n++;
603 if (unlikely(intr_status & mmc_rx_irq))
604 x->mmc_rx_irq_n++;
605 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
606 x->mmc_rx_csum_offload_irq_n++;
607 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
608 if (unlikely(intr_status & pmt_irq)) {
609 readl(ioaddr + GMAC_PMT);
610 x->irq_receive_pmt_irq_n++;
611 }
612
613 /* MAC tx/rx EEE LPI entry/exit interrupts */
614 if (intr_status & lpi_irq) {
615 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
616 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
617
618 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
619 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
620 x->irq_tx_path_in_lpi_mode_n++;
621 }
622 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
623 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
624 x->irq_tx_path_exit_lpi_mode_n++;
625 }
626 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
627 x->irq_rx_path_in_lpi_mode_n++;
628 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
629 x->irq_rx_path_exit_lpi_mode_n++;
630 }
631
632 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
633 if (intr_status & PCS_RGSMIIIS_IRQ)
634 dwmac4_phystatus(ioaddr, x);
635
636 return ret;
637 }
638
639 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
640 u32 rx_queues, u32 tx_queues)
641 {
642 u32 value;
643 u32 queue;
644
645 for (queue = 0; queue < tx_queues; queue++) {
646 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
647
648 if (value & MTL_DEBUG_TXSTSFSTS)
649 x->mtl_tx_status_fifo_full++;
650 if (value & MTL_DEBUG_TXFSTS)
651 x->mtl_tx_fifo_not_empty++;
652 if (value & MTL_DEBUG_TWCSTS)
653 x->mmtl_fifo_ctrl++;
654 if (value & MTL_DEBUG_TRCSTS_MASK) {
655 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
656 >> MTL_DEBUG_TRCSTS_SHIFT;
657 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
658 x->mtl_tx_fifo_read_ctrl_write++;
659 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
660 x->mtl_tx_fifo_read_ctrl_wait++;
661 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
662 x->mtl_tx_fifo_read_ctrl_read++;
663 else
664 x->mtl_tx_fifo_read_ctrl_idle++;
665 }
666 if (value & MTL_DEBUG_TXPAUSED)
667 x->mac_tx_in_pause++;
668 }
669
670 for (queue = 0; queue < rx_queues; queue++) {
671 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
672
673 if (value & MTL_DEBUG_RXFSTS_MASK) {
674 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
675 >> MTL_DEBUG_RRCSTS_SHIFT;
676
677 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
678 x->mtl_rx_fifo_fill_level_full++;
679 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
680 x->mtl_rx_fifo_fill_above_thresh++;
681 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
682 x->mtl_rx_fifo_fill_below_thresh++;
683 else
684 x->mtl_rx_fifo_fill_level_empty++;
685 }
686 if (value & MTL_DEBUG_RRCSTS_MASK) {
687 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
688 MTL_DEBUG_RRCSTS_SHIFT;
689
690 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
691 x->mtl_rx_fifo_read_ctrl_flush++;
692 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
693 x->mtl_rx_fifo_read_ctrl_read_data++;
694 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
695 x->mtl_rx_fifo_read_ctrl_status++;
696 else
697 x->mtl_rx_fifo_read_ctrl_idle++;
698 }
699 if (value & MTL_DEBUG_RWCSTS)
700 x->mtl_rx_fifo_ctrl_active++;
701 }
702
703 /* GMAC debug */
704 value = readl(ioaddr + GMAC_DEBUG);
705
706 if (value & GMAC_DEBUG_TFCSTS_MASK) {
707 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
708 >> GMAC_DEBUG_TFCSTS_SHIFT;
709
710 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
711 x->mac_tx_frame_ctrl_xfer++;
712 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
713 x->mac_tx_frame_ctrl_pause++;
714 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
715 x->mac_tx_frame_ctrl_wait++;
716 else
717 x->mac_tx_frame_ctrl_idle++;
718 }
719 if (value & GMAC_DEBUG_TPESTS)
720 x->mac_gmii_tx_proto_engine++;
721 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
722 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
723 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
724 if (value & GMAC_DEBUG_RPESTS)
725 x->mac_gmii_rx_proto_engine++;
726 }
727
728 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
729 {
730 u32 value = readl(ioaddr + GMAC_CONFIG);
731
732 if (enable)
733 value |= GMAC_CONFIG_LM;
734 else
735 value &= ~GMAC_CONFIG_LM;
736
737 writel(value, ioaddr + GMAC_CONFIG);
738 }
739
740 const struct stmmac_ops dwmac4_ops = {
741 .core_init = dwmac4_core_init,
742 .set_mac = stmmac_set_mac,
743 .rx_ipc = dwmac4_rx_ipc_enable,
744 .rx_queue_enable = dwmac4_rx_queue_enable,
745 .rx_queue_prio = dwmac4_rx_queue_priority,
746 .tx_queue_prio = dwmac4_tx_queue_priority,
747 .rx_queue_routing = dwmac4_rx_queue_routing,
748 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
749 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
750 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
751 .map_mtl_to_dma = dwmac4_map_mtl_dma,
752 .config_cbs = dwmac4_config_cbs,
753 .dump_regs = dwmac4_dump_regs,
754 .host_irq_status = dwmac4_irq_status,
755 .host_mtl_irq_status = dwmac4_irq_mtl_status,
756 .flow_ctrl = dwmac4_flow_ctrl,
757 .pmt = dwmac4_pmt,
758 .set_umac_addr = dwmac4_set_umac_addr,
759 .get_umac_addr = dwmac4_get_umac_addr,
760 .set_eee_mode = dwmac4_set_eee_mode,
761 .reset_eee_mode = dwmac4_reset_eee_mode,
762 .set_eee_timer = dwmac4_set_eee_timer,
763 .set_eee_pls = dwmac4_set_eee_pls,
764 .pcs_ctrl_ane = dwmac4_ctrl_ane,
765 .pcs_rane = dwmac4_rane,
766 .pcs_get_adv_lp = dwmac4_get_adv_lp,
767 .debug = dwmac4_debug,
768 .set_filter = dwmac4_set_filter,
769 .set_mac_loopback = dwmac4_set_mac_loopback,
770 };
771
772 const struct stmmac_ops dwmac410_ops = {
773 .core_init = dwmac4_core_init,
774 .set_mac = stmmac_dwmac4_set_mac,
775 .rx_ipc = dwmac4_rx_ipc_enable,
776 .rx_queue_enable = dwmac4_rx_queue_enable,
777 .rx_queue_prio = dwmac4_rx_queue_priority,
778 .tx_queue_prio = dwmac4_tx_queue_priority,
779 .rx_queue_routing = dwmac4_rx_queue_routing,
780 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
781 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
782 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
783 .map_mtl_to_dma = dwmac4_map_mtl_dma,
784 .config_cbs = dwmac4_config_cbs,
785 .dump_regs = dwmac4_dump_regs,
786 .host_irq_status = dwmac4_irq_status,
787 .host_mtl_irq_status = dwmac4_irq_mtl_status,
788 .flow_ctrl = dwmac4_flow_ctrl,
789 .pmt = dwmac4_pmt,
790 .set_umac_addr = dwmac4_set_umac_addr,
791 .get_umac_addr = dwmac4_get_umac_addr,
792 .set_eee_mode = dwmac4_set_eee_mode,
793 .reset_eee_mode = dwmac4_reset_eee_mode,
794 .set_eee_timer = dwmac4_set_eee_timer,
795 .set_eee_pls = dwmac4_set_eee_pls,
796 .pcs_ctrl_ane = dwmac4_ctrl_ane,
797 .pcs_rane = dwmac4_rane,
798 .pcs_get_adv_lp = dwmac4_get_adv_lp,
799 .debug = dwmac4_debug,
800 .set_filter = dwmac4_set_filter,
801 .set_mac_loopback = dwmac4_set_mac_loopback,
802 };
803
804 const struct stmmac_ops dwmac510_ops = {
805 .core_init = dwmac4_core_init,
806 .set_mac = stmmac_dwmac4_set_mac,
807 .rx_ipc = dwmac4_rx_ipc_enable,
808 .rx_queue_enable = dwmac4_rx_queue_enable,
809 .rx_queue_prio = dwmac4_rx_queue_priority,
810 .tx_queue_prio = dwmac4_tx_queue_priority,
811 .rx_queue_routing = dwmac4_rx_queue_routing,
812 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
813 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
814 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
815 .map_mtl_to_dma = dwmac4_map_mtl_dma,
816 .config_cbs = dwmac4_config_cbs,
817 .dump_regs = dwmac4_dump_regs,
818 .host_irq_status = dwmac4_irq_status,
819 .host_mtl_irq_status = dwmac4_irq_mtl_status,
820 .flow_ctrl = dwmac4_flow_ctrl,
821 .pmt = dwmac4_pmt,
822 .set_umac_addr = dwmac4_set_umac_addr,
823 .get_umac_addr = dwmac4_get_umac_addr,
824 .set_eee_mode = dwmac4_set_eee_mode,
825 .reset_eee_mode = dwmac4_reset_eee_mode,
826 .set_eee_timer = dwmac4_set_eee_timer,
827 .set_eee_pls = dwmac4_set_eee_pls,
828 .pcs_ctrl_ane = dwmac4_ctrl_ane,
829 .pcs_rane = dwmac4_rane,
830 .pcs_get_adv_lp = dwmac4_get_adv_lp,
831 .debug = dwmac4_debug,
832 .set_filter = dwmac4_set_filter,
833 .safety_feat_config = dwmac5_safety_feat_config,
834 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
835 .safety_feat_dump = dwmac5_safety_feat_dump,
836 .rxp_config = dwmac5_rxp_config,
837 .flex_pps_config = dwmac5_flex_pps_config,
838 .set_mac_loopback = dwmac4_set_mac_loopback,
839 };
840
841 int dwmac4_setup(struct stmmac_priv *priv)
842 {
843 struct mac_device_info *mac = priv->hw;
844
845 dev_info(priv->device, "\tDWMAC4/5\n");
846
847 priv->dev->priv_flags |= IFF_UNICAST_FLT;
848 mac->pcsr = priv->ioaddr;
849 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
850 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
851 mac->mcast_bits_log2 = 0;
852
853 if (mac->multicast_filter_bins)
854 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
855
856 mac->link.duplex = GMAC_CONFIG_DM;
857 mac->link.speed10 = GMAC_CONFIG_PS;
858 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
859 mac->link.speed1000 = 0;
860 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
861 mac->mii.addr = GMAC_MDIO_ADDR;
862 mac->mii.data = GMAC_MDIO_DATA;
863 mac->mii.addr_shift = 21;
864 mac->mii.addr_mask = GENMASK(25, 21);
865 mac->mii.reg_shift = 16;
866 mac->mii.reg_mask = GENMASK(20, 16);
867 mac->mii.clk_csr_shift = 8;
868 mac->mii.clk_csr_mask = GENMASK(11, 8);
869
870 return 0;
871 }