1 // SPDX-License-Identifier: GPL-2.0-only
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
6 * This only implements the mac core functions for this chip.
8 * Copyright (C) 2015 STMicroelectronics Ltd
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
19 #include "stmmac_pcs.h"
23 static void dwmac4_core_init(struct mac_device_info
*hw
,
24 struct net_device
*dev
)
26 void __iomem
*ioaddr
= hw
->pcsr
;
27 u32 value
= readl(ioaddr
+ GMAC_CONFIG
);
30 value
|= GMAC_CORE_INIT
;
33 value
|= GMAC_CONFIG_2K
;
35 value
|= GMAC_CONFIG_JE
;
38 value
|= GMAC_CONFIG_TE
;
40 value
&= hw
->link
.speed_mask
;
43 value
|= hw
->link
.speed1000
;
46 value
|= hw
->link
.speed100
;
49 value
|= hw
->link
.speed10
;
54 writel(value
, ioaddr
+ GMAC_CONFIG
);
56 /* Enable GMAC interrupts */
57 value
= GMAC_INT_DEFAULT_ENABLE
;
60 value
|= GMAC_PCS_IRQ_DEFAULT
;
62 writel(value
, ioaddr
+ GMAC_INT_EN
);
65 static void dwmac4_rx_queue_enable(struct mac_device_info
*hw
,
68 void __iomem
*ioaddr
= hw
->pcsr
;
69 u32 value
= readl(ioaddr
+ GMAC_RXQ_CTRL0
);
71 value
&= GMAC_RX_QUEUE_CLEAR(queue
);
72 if (mode
== MTL_QUEUE_AVB
)
73 value
|= GMAC_RX_AV_QUEUE_ENABLE(queue
);
74 else if (mode
== MTL_QUEUE_DCB
)
75 value
|= GMAC_RX_DCB_QUEUE_ENABLE(queue
);
77 writel(value
, ioaddr
+ GMAC_RXQ_CTRL0
);
80 static void dwmac4_rx_queue_priority(struct mac_device_info
*hw
,
83 void __iomem
*ioaddr
= hw
->pcsr
;
87 base_register
= (queue
< 4) ? GMAC_RXQ_CTRL2
: GMAC_RXQ_CTRL3
;
91 value
= readl(ioaddr
+ base_register
);
93 value
&= ~GMAC_RXQCTRL_PSRQX_MASK(queue
);
94 value
|= (prio
<< GMAC_RXQCTRL_PSRQX_SHIFT(queue
)) &
95 GMAC_RXQCTRL_PSRQX_MASK(queue
);
96 writel(value
, ioaddr
+ base_register
);
99 static void dwmac4_tx_queue_priority(struct mac_device_info
*hw
,
102 void __iomem
*ioaddr
= hw
->pcsr
;
106 base_register
= (queue
< 4) ? GMAC_TXQ_PRTY_MAP0
: GMAC_TXQ_PRTY_MAP1
;
110 value
= readl(ioaddr
+ base_register
);
112 value
&= ~GMAC_TXQCTRL_PSTQX_MASK(queue
);
113 value
|= (prio
<< GMAC_TXQCTRL_PSTQX_SHIFT(queue
)) &
114 GMAC_TXQCTRL_PSTQX_MASK(queue
);
116 writel(value
, ioaddr
+ base_register
);
119 static void dwmac4_rx_queue_routing(struct mac_device_info
*hw
,
120 u8 packet
, u32 queue
)
122 void __iomem
*ioaddr
= hw
->pcsr
;
125 static const struct stmmac_rx_routing route_possibilities
[] = {
126 { GMAC_RXQCTRL_AVCPQ_MASK
, GMAC_RXQCTRL_AVCPQ_SHIFT
},
127 { GMAC_RXQCTRL_PTPQ_MASK
, GMAC_RXQCTRL_PTPQ_SHIFT
},
128 { GMAC_RXQCTRL_DCBCPQ_MASK
, GMAC_RXQCTRL_DCBCPQ_SHIFT
},
129 { GMAC_RXQCTRL_UPQ_MASK
, GMAC_RXQCTRL_UPQ_SHIFT
},
130 { GMAC_RXQCTRL_MCBCQ_MASK
, GMAC_RXQCTRL_MCBCQ_SHIFT
},
133 value
= readl(ioaddr
+ GMAC_RXQ_CTRL1
);
135 /* routing configuration */
136 value
&= ~route_possibilities
[packet
- 1].reg_mask
;
137 value
|= (queue
<< route_possibilities
[packet
-1].reg_shift
) &
138 route_possibilities
[packet
- 1].reg_mask
;
140 /* some packets require extra ops */
141 if (packet
== PACKET_AVCPQ
) {
142 value
&= ~GMAC_RXQCTRL_TACPQE
;
143 value
|= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT
;
144 } else if (packet
== PACKET_MCBCQ
) {
145 value
&= ~GMAC_RXQCTRL_MCBCQEN
;
146 value
|= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT
;
149 writel(value
, ioaddr
+ GMAC_RXQ_CTRL1
);
152 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info
*hw
,
155 void __iomem
*ioaddr
= hw
->pcsr
;
156 u32 value
= readl(ioaddr
+ MTL_OPERATION_MODE
);
158 value
&= ~MTL_OPERATION_RAA
;
160 case MTL_RX_ALGORITHM_SP
:
161 value
|= MTL_OPERATION_RAA_SP
;
163 case MTL_RX_ALGORITHM_WSP
:
164 value
|= MTL_OPERATION_RAA_WSP
;
170 writel(value
, ioaddr
+ MTL_OPERATION_MODE
);
173 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info
*hw
,
176 void __iomem
*ioaddr
= hw
->pcsr
;
177 u32 value
= readl(ioaddr
+ MTL_OPERATION_MODE
);
179 value
&= ~MTL_OPERATION_SCHALG_MASK
;
181 case MTL_TX_ALGORITHM_WRR
:
182 value
|= MTL_OPERATION_SCHALG_WRR
;
184 case MTL_TX_ALGORITHM_WFQ
:
185 value
|= MTL_OPERATION_SCHALG_WFQ
;
187 case MTL_TX_ALGORITHM_DWRR
:
188 value
|= MTL_OPERATION_SCHALG_DWRR
;
190 case MTL_TX_ALGORITHM_SP
:
191 value
|= MTL_OPERATION_SCHALG_SP
;
197 writel(value
, ioaddr
+ MTL_OPERATION_MODE
);
200 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info
*hw
,
201 u32 weight
, u32 queue
)
203 void __iomem
*ioaddr
= hw
->pcsr
;
204 u32 value
= readl(ioaddr
+ MTL_TXQX_WEIGHT_BASE_ADDR(queue
));
206 value
&= ~MTL_TXQ_WEIGHT_ISCQW_MASK
;
207 value
|= weight
& MTL_TXQ_WEIGHT_ISCQW_MASK
;
208 writel(value
, ioaddr
+ MTL_TXQX_WEIGHT_BASE_ADDR(queue
));
211 static void dwmac4_map_mtl_dma(struct mac_device_info
*hw
, u32 queue
, u32 chan
)
213 void __iomem
*ioaddr
= hw
->pcsr
;
217 value
= readl(ioaddr
+ MTL_RXQ_DMA_MAP0
);
219 value
= readl(ioaddr
+ MTL_RXQ_DMA_MAP1
);
221 if (queue
== 0 || queue
== 4) {
222 value
&= ~MTL_RXQ_DMA_Q04MDMACH_MASK
;
223 value
|= MTL_RXQ_DMA_Q04MDMACH(chan
);
225 value
&= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue
);
226 value
|= MTL_RXQ_DMA_QXMDMACH(chan
, queue
);
230 writel(value
, ioaddr
+ MTL_RXQ_DMA_MAP0
);
232 writel(value
, ioaddr
+ MTL_RXQ_DMA_MAP1
);
235 static void dwmac4_config_cbs(struct mac_device_info
*hw
,
236 u32 send_slope
, u32 idle_slope
,
237 u32 high_credit
, u32 low_credit
, u32 queue
)
239 void __iomem
*ioaddr
= hw
->pcsr
;
242 pr_debug("Queue %d configured as AVB. Parameters:\n", queue
);
243 pr_debug("\tsend_slope: 0x%08x\n", send_slope
);
244 pr_debug("\tidle_slope: 0x%08x\n", idle_slope
);
245 pr_debug("\thigh_credit: 0x%08x\n", high_credit
);
246 pr_debug("\tlow_credit: 0x%08x\n", low_credit
);
248 /* enable AV algorithm */
249 value
= readl(ioaddr
+ MTL_ETSX_CTRL_BASE_ADDR(queue
));
250 value
|= MTL_ETS_CTRL_AVALG
;
251 value
|= MTL_ETS_CTRL_CC
;
252 writel(value
, ioaddr
+ MTL_ETSX_CTRL_BASE_ADDR(queue
));
254 /* configure send slope */
255 value
= readl(ioaddr
+ MTL_SEND_SLP_CREDX_BASE_ADDR(queue
));
256 value
&= ~MTL_SEND_SLP_CRED_SSC_MASK
;
257 value
|= send_slope
& MTL_SEND_SLP_CRED_SSC_MASK
;
258 writel(value
, ioaddr
+ MTL_SEND_SLP_CREDX_BASE_ADDR(queue
));
260 /* configure idle slope (same register as tx weight) */
261 dwmac4_set_mtl_tx_queue_weight(hw
, idle_slope
, queue
);
263 /* configure high credit */
264 value
= readl(ioaddr
+ MTL_HIGH_CREDX_BASE_ADDR(queue
));
265 value
&= ~MTL_HIGH_CRED_HC_MASK
;
266 value
|= high_credit
& MTL_HIGH_CRED_HC_MASK
;
267 writel(value
, ioaddr
+ MTL_HIGH_CREDX_BASE_ADDR(queue
));
269 /* configure high credit */
270 value
= readl(ioaddr
+ MTL_LOW_CREDX_BASE_ADDR(queue
));
271 value
&= ~MTL_HIGH_CRED_LC_MASK
;
272 value
|= low_credit
& MTL_HIGH_CRED_LC_MASK
;
273 writel(value
, ioaddr
+ MTL_LOW_CREDX_BASE_ADDR(queue
));
276 static void dwmac4_dump_regs(struct mac_device_info
*hw
, u32
*reg_space
)
278 void __iomem
*ioaddr
= hw
->pcsr
;
281 for (i
= 0; i
< GMAC_REG_NUM
; i
++)
282 reg_space
[i
] = readl(ioaddr
+ i
* 4);
285 static int dwmac4_rx_ipc_enable(struct mac_device_info
*hw
)
287 void __iomem
*ioaddr
= hw
->pcsr
;
288 u32 value
= readl(ioaddr
+ GMAC_CONFIG
);
291 value
|= GMAC_CONFIG_IPC
;
293 value
&= ~GMAC_CONFIG_IPC
;
295 writel(value
, ioaddr
+ GMAC_CONFIG
);
297 value
= readl(ioaddr
+ GMAC_CONFIG
);
299 return !!(value
& GMAC_CONFIG_IPC
);
302 static void dwmac4_pmt(struct mac_device_info
*hw
, unsigned long mode
)
304 void __iomem
*ioaddr
= hw
->pcsr
;
305 unsigned int pmt
= 0;
308 if (mode
& WAKE_MAGIC
) {
309 pr_debug("GMAC: WOL Magic frame\n");
310 pmt
|= power_down
| magic_pkt_en
;
312 if (mode
& WAKE_UCAST
) {
313 pr_debug("GMAC: WOL on global unicast\n");
314 pmt
|= power_down
| global_unicast
| wake_up_frame_en
;
318 /* The receiver must be enabled for WOL before powering down */
319 config
= readl(ioaddr
+ GMAC_CONFIG
);
320 config
|= GMAC_CONFIG_RE
;
321 writel(config
, ioaddr
+ GMAC_CONFIG
);
323 writel(pmt
, ioaddr
+ GMAC_PMT
);
326 static void dwmac4_set_umac_addr(struct mac_device_info
*hw
,
327 unsigned char *addr
, unsigned int reg_n
)
329 void __iomem
*ioaddr
= hw
->pcsr
;
331 stmmac_dwmac4_set_mac_addr(ioaddr
, addr
, GMAC_ADDR_HIGH(reg_n
),
332 GMAC_ADDR_LOW(reg_n
));
335 static void dwmac4_get_umac_addr(struct mac_device_info
*hw
,
336 unsigned char *addr
, unsigned int reg_n
)
338 void __iomem
*ioaddr
= hw
->pcsr
;
340 stmmac_dwmac4_get_mac_addr(ioaddr
, addr
, GMAC_ADDR_HIGH(reg_n
),
341 GMAC_ADDR_LOW(reg_n
));
344 static void dwmac4_set_eee_mode(struct mac_device_info
*hw
,
345 bool en_tx_lpi_clockgating
)
347 void __iomem
*ioaddr
= hw
->pcsr
;
350 /* Enable the link status receive on RGMII, SGMII ore SMII
351 * receive path and instruct the transmit to enter in LPI
354 value
= readl(ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
355 value
|= GMAC4_LPI_CTRL_STATUS_LPIEN
| GMAC4_LPI_CTRL_STATUS_LPITXA
;
357 if (en_tx_lpi_clockgating
)
358 value
|= GMAC4_LPI_CTRL_STATUS_LPITCSE
;
360 writel(value
, ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
363 static void dwmac4_reset_eee_mode(struct mac_device_info
*hw
)
365 void __iomem
*ioaddr
= hw
->pcsr
;
368 value
= readl(ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
369 value
&= ~(GMAC4_LPI_CTRL_STATUS_LPIEN
| GMAC4_LPI_CTRL_STATUS_LPITXA
);
370 writel(value
, ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
373 static void dwmac4_set_eee_pls(struct mac_device_info
*hw
, int link
)
375 void __iomem
*ioaddr
= hw
->pcsr
;
378 value
= readl(ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
381 value
|= GMAC4_LPI_CTRL_STATUS_PLS
;
383 value
&= ~GMAC4_LPI_CTRL_STATUS_PLS
;
385 writel(value
, ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
388 static void dwmac4_set_eee_timer(struct mac_device_info
*hw
, int ls
, int tw
)
390 void __iomem
*ioaddr
= hw
->pcsr
;
391 int value
= ((tw
& 0xffff)) | ((ls
& 0x3ff) << 16);
393 /* Program the timers in the LPI timer control register:
394 * LS: minimum time (ms) for which the link
395 * status from PHY should be ok before transmitting
397 * TW: minimum time (us) for which the core waits
398 * after it has stopped transmitting the LPI pattern.
400 writel(value
, ioaddr
+ GMAC4_LPI_TIMER_CTRL
);
403 static void dwmac4_set_filter(struct mac_device_info
*hw
,
404 struct net_device
*dev
)
406 void __iomem
*ioaddr
= (void __iomem
*)dev
->base_addr
;
407 int numhashregs
= (hw
->multicast_filter_bins
>> 5);
408 int mcbitslog2
= hw
->mcast_bits_log2
;
412 value
= readl(ioaddr
+ GMAC_PACKET_FILTER
);
413 value
&= ~GMAC_PACKET_FILTER_HMC
;
414 value
&= ~GMAC_PACKET_FILTER_HPF
;
415 value
&= ~GMAC_PACKET_FILTER_PCF
;
416 value
&= ~GMAC_PACKET_FILTER_PM
;
417 value
&= ~GMAC_PACKET_FILTER_PR
;
418 if (dev
->flags
& IFF_PROMISC
) {
419 value
= GMAC_PACKET_FILTER_PR
| GMAC_PACKET_FILTER_PCF
;
420 } else if ((dev
->flags
& IFF_ALLMULTI
) ||
421 (netdev_mc_count(dev
) > hw
->multicast_filter_bins
)) {
423 value
|= GMAC_PACKET_FILTER_PM
;
424 /* Set all the bits of the HASH tab */
425 for (i
= 0; i
< numhashregs
; i
++)
426 writel(0xffffffff, ioaddr
+ GMAC_HASH_TAB(i
));
427 } else if (!netdev_mc_empty(dev
)) {
428 struct netdev_hw_addr
*ha
;
431 /* Hash filter for multicast */
432 value
|= GMAC_PACKET_FILTER_HMC
;
434 memset(mc_filter
, 0, sizeof(mc_filter
));
435 netdev_for_each_mc_addr(ha
, dev
) {
436 /* The upper n bits of the calculated CRC are used to
437 * index the contents of the hash table. The number of
438 * bits used depends on the hardware configuration
439 * selected at core configuration time.
441 int bit_nr
= bitrev32(~crc32_le(~0, ha
->addr
,
442 ETH_ALEN
)) >> (32 - mcbitslog2
);
443 /* The most significant bit determines the register to
444 * use (H/L) while the other 5 bits determine the bit
445 * within the register.
447 mc_filter
[bit_nr
>> 5] |= (1 << (bit_nr
& 0x1f));
449 for (i
= 0; i
< numhashregs
; i
++)
450 writel(mc_filter
[i
], ioaddr
+ GMAC_HASH_TAB(i
));
453 value
|= GMAC_PACKET_FILTER_HPF
;
455 /* Handle multiple unicast addresses */
456 if (netdev_uc_count(dev
) > GMAC_MAX_PERFECT_ADDRESSES
) {
457 /* Switch to promiscuous mode if more than 128 addrs
460 value
|= GMAC_PACKET_FILTER_PR
;
462 struct netdev_hw_addr
*ha
;
465 netdev_for_each_uc_addr(ha
, dev
) {
466 dwmac4_set_umac_addr(hw
, ha
->addr
, reg
);
470 while (reg
< GMAC_MAX_PERFECT_ADDRESSES
) {
471 writel(0, ioaddr
+ GMAC_ADDR_HIGH(reg
));
472 writel(0, ioaddr
+ GMAC_ADDR_LOW(reg
));
477 writel(value
, ioaddr
+ GMAC_PACKET_FILTER
);
480 static void dwmac4_flow_ctrl(struct mac_device_info
*hw
, unsigned int duplex
,
481 unsigned int fc
, unsigned int pause_time
,
484 void __iomem
*ioaddr
= hw
->pcsr
;
485 unsigned int flow
= 0;
488 pr_debug("GMAC Flow-Control:\n");
490 pr_debug("\tReceive Flow-Control ON\n");
491 flow
|= GMAC_RX_FLOW_CTRL_RFE
;
493 writel(flow
, ioaddr
+ GMAC_RX_FLOW_CTRL
);
496 pr_debug("\tTransmit Flow-Control ON\n");
499 pr_debug("\tduplex mode: PAUSE %d\n", pause_time
);
501 for (queue
= 0; queue
< tx_cnt
; queue
++) {
502 flow
= GMAC_TX_FLOW_CTRL_TFE
;
506 (pause_time
<< GMAC_TX_FLOW_CTRL_PT_SHIFT
);
508 writel(flow
, ioaddr
+ GMAC_QX_TX_FLOW_CTRL(queue
));
511 for (queue
= 0; queue
< tx_cnt
; queue
++)
512 writel(0, ioaddr
+ GMAC_QX_TX_FLOW_CTRL(queue
));
516 static void dwmac4_ctrl_ane(void __iomem
*ioaddr
, bool ane
, bool srgmi_ral
,
519 dwmac_ctrl_ane(ioaddr
, GMAC_PCS_BASE
, ane
, srgmi_ral
, loopback
);
522 static void dwmac4_rane(void __iomem
*ioaddr
, bool restart
)
524 dwmac_rane(ioaddr
, GMAC_PCS_BASE
, restart
);
527 static void dwmac4_get_adv_lp(void __iomem
*ioaddr
, struct rgmii_adv
*adv
)
529 dwmac_get_adv_lp(ioaddr
, GMAC_PCS_BASE
, adv
);
532 /* RGMII or SMII interface */
533 static void dwmac4_phystatus(void __iomem
*ioaddr
, struct stmmac_extra_stats
*x
)
537 status
= readl(ioaddr
+ GMAC_PHYIF_CONTROL_STATUS
);
540 /* Check the link status */
541 if (status
& GMAC_PHYIF_CTRLSTATUS_LNKSTS
) {
546 speed_value
= ((status
& GMAC_PHYIF_CTRLSTATUS_SPEED
) >>
547 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT
);
548 if (speed_value
== GMAC_PHYIF_CTRLSTATUS_SPEED_125
)
549 x
->pcs_speed
= SPEED_1000
;
550 else if (speed_value
== GMAC_PHYIF_CTRLSTATUS_SPEED_25
)
551 x
->pcs_speed
= SPEED_100
;
553 x
->pcs_speed
= SPEED_10
;
555 x
->pcs_duplex
= (status
& GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK
);
557 pr_info("Link is Up - %d/%s\n", (int)x
->pcs_speed
,
558 x
->pcs_duplex
? "Full" : "Half");
561 pr_info("Link is Down\n");
565 static int dwmac4_irq_mtl_status(struct mac_device_info
*hw
, u32 chan
)
567 void __iomem
*ioaddr
= hw
->pcsr
;
568 u32 mtl_int_qx_status
;
571 mtl_int_qx_status
= readl(ioaddr
+ MTL_INT_STATUS
);
573 /* Check MTL Interrupt */
574 if (mtl_int_qx_status
& MTL_INT_QX(chan
)) {
575 /* read Queue x Interrupt status */
576 u32 status
= readl(ioaddr
+ MTL_CHAN_INT_CTRL(chan
));
578 if (status
& MTL_RX_OVERFLOW_INT
) {
579 /* clear Interrupt */
580 writel(status
| MTL_RX_OVERFLOW_INT
,
581 ioaddr
+ MTL_CHAN_INT_CTRL(chan
));
582 ret
= CORE_IRQ_MTL_RX_OVERFLOW
;
589 static int dwmac4_irq_status(struct mac_device_info
*hw
,
590 struct stmmac_extra_stats
*x
)
592 void __iomem
*ioaddr
= hw
->pcsr
;
593 u32 intr_status
= readl(ioaddr
+ GMAC_INT_STATUS
);
594 u32 intr_enable
= readl(ioaddr
+ GMAC_INT_EN
);
597 /* Discard disabled bits */
598 intr_status
&= intr_enable
;
600 /* Not used events (e.g. MMC interrupts) are not handled. */
601 if ((intr_status
& mmc_tx_irq
))
603 if (unlikely(intr_status
& mmc_rx_irq
))
605 if (unlikely(intr_status
& mmc_rx_csum_offload_irq
))
606 x
->mmc_rx_csum_offload_irq_n
++;
607 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
608 if (unlikely(intr_status
& pmt_irq
)) {
609 readl(ioaddr
+ GMAC_PMT
);
610 x
->irq_receive_pmt_irq_n
++;
613 /* MAC tx/rx EEE LPI entry/exit interrupts */
614 if (intr_status
& lpi_irq
) {
615 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
616 u32 status
= readl(ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
618 if (status
& GMAC4_LPI_CTRL_STATUS_TLPIEN
) {
619 ret
|= CORE_IRQ_TX_PATH_IN_LPI_MODE
;
620 x
->irq_tx_path_in_lpi_mode_n
++;
622 if (status
& GMAC4_LPI_CTRL_STATUS_TLPIEX
) {
623 ret
|= CORE_IRQ_TX_PATH_EXIT_LPI_MODE
;
624 x
->irq_tx_path_exit_lpi_mode_n
++;
626 if (status
& GMAC4_LPI_CTRL_STATUS_RLPIEN
)
627 x
->irq_rx_path_in_lpi_mode_n
++;
628 if (status
& GMAC4_LPI_CTRL_STATUS_RLPIEX
)
629 x
->irq_rx_path_exit_lpi_mode_n
++;
632 dwmac_pcs_isr(ioaddr
, GMAC_PCS_BASE
, intr_status
, x
);
633 if (intr_status
& PCS_RGSMIIIS_IRQ
)
634 dwmac4_phystatus(ioaddr
, x
);
639 static void dwmac4_debug(void __iomem
*ioaddr
, struct stmmac_extra_stats
*x
,
640 u32 rx_queues
, u32 tx_queues
)
645 for (queue
= 0; queue
< tx_queues
; queue
++) {
646 value
= readl(ioaddr
+ MTL_CHAN_TX_DEBUG(queue
));
648 if (value
& MTL_DEBUG_TXSTSFSTS
)
649 x
->mtl_tx_status_fifo_full
++;
650 if (value
& MTL_DEBUG_TXFSTS
)
651 x
->mtl_tx_fifo_not_empty
++;
652 if (value
& MTL_DEBUG_TWCSTS
)
654 if (value
& MTL_DEBUG_TRCSTS_MASK
) {
655 u32 trcsts
= (value
& MTL_DEBUG_TRCSTS_MASK
)
656 >> MTL_DEBUG_TRCSTS_SHIFT
;
657 if (trcsts
== MTL_DEBUG_TRCSTS_WRITE
)
658 x
->mtl_tx_fifo_read_ctrl_write
++;
659 else if (trcsts
== MTL_DEBUG_TRCSTS_TXW
)
660 x
->mtl_tx_fifo_read_ctrl_wait
++;
661 else if (trcsts
== MTL_DEBUG_TRCSTS_READ
)
662 x
->mtl_tx_fifo_read_ctrl_read
++;
664 x
->mtl_tx_fifo_read_ctrl_idle
++;
666 if (value
& MTL_DEBUG_TXPAUSED
)
667 x
->mac_tx_in_pause
++;
670 for (queue
= 0; queue
< rx_queues
; queue
++) {
671 value
= readl(ioaddr
+ MTL_CHAN_RX_DEBUG(queue
));
673 if (value
& MTL_DEBUG_RXFSTS_MASK
) {
674 u32 rxfsts
= (value
& MTL_DEBUG_RXFSTS_MASK
)
675 >> MTL_DEBUG_RRCSTS_SHIFT
;
677 if (rxfsts
== MTL_DEBUG_RXFSTS_FULL
)
678 x
->mtl_rx_fifo_fill_level_full
++;
679 else if (rxfsts
== MTL_DEBUG_RXFSTS_AT
)
680 x
->mtl_rx_fifo_fill_above_thresh
++;
681 else if (rxfsts
== MTL_DEBUG_RXFSTS_BT
)
682 x
->mtl_rx_fifo_fill_below_thresh
++;
684 x
->mtl_rx_fifo_fill_level_empty
++;
686 if (value
& MTL_DEBUG_RRCSTS_MASK
) {
687 u32 rrcsts
= (value
& MTL_DEBUG_RRCSTS_MASK
) >>
688 MTL_DEBUG_RRCSTS_SHIFT
;
690 if (rrcsts
== MTL_DEBUG_RRCSTS_FLUSH
)
691 x
->mtl_rx_fifo_read_ctrl_flush
++;
692 else if (rrcsts
== MTL_DEBUG_RRCSTS_RSTAT
)
693 x
->mtl_rx_fifo_read_ctrl_read_data
++;
694 else if (rrcsts
== MTL_DEBUG_RRCSTS_RDATA
)
695 x
->mtl_rx_fifo_read_ctrl_status
++;
697 x
->mtl_rx_fifo_read_ctrl_idle
++;
699 if (value
& MTL_DEBUG_RWCSTS
)
700 x
->mtl_rx_fifo_ctrl_active
++;
704 value
= readl(ioaddr
+ GMAC_DEBUG
);
706 if (value
& GMAC_DEBUG_TFCSTS_MASK
) {
707 u32 tfcsts
= (value
& GMAC_DEBUG_TFCSTS_MASK
)
708 >> GMAC_DEBUG_TFCSTS_SHIFT
;
710 if (tfcsts
== GMAC_DEBUG_TFCSTS_XFER
)
711 x
->mac_tx_frame_ctrl_xfer
++;
712 else if (tfcsts
== GMAC_DEBUG_TFCSTS_GEN_PAUSE
)
713 x
->mac_tx_frame_ctrl_pause
++;
714 else if (tfcsts
== GMAC_DEBUG_TFCSTS_WAIT
)
715 x
->mac_tx_frame_ctrl_wait
++;
717 x
->mac_tx_frame_ctrl_idle
++;
719 if (value
& GMAC_DEBUG_TPESTS
)
720 x
->mac_gmii_tx_proto_engine
++;
721 if (value
& GMAC_DEBUG_RFCFCSTS_MASK
)
722 x
->mac_rx_frame_ctrl_fifo
= (value
& GMAC_DEBUG_RFCFCSTS_MASK
)
723 >> GMAC_DEBUG_RFCFCSTS_SHIFT
;
724 if (value
& GMAC_DEBUG_RPESTS
)
725 x
->mac_gmii_rx_proto_engine
++;
728 static void dwmac4_set_mac_loopback(void __iomem
*ioaddr
, bool enable
)
730 u32 value
= readl(ioaddr
+ GMAC_CONFIG
);
733 value
|= GMAC_CONFIG_LM
;
735 value
&= ~GMAC_CONFIG_LM
;
737 writel(value
, ioaddr
+ GMAC_CONFIG
);
740 const struct stmmac_ops dwmac4_ops
= {
741 .core_init
= dwmac4_core_init
,
742 .set_mac
= stmmac_set_mac
,
743 .rx_ipc
= dwmac4_rx_ipc_enable
,
744 .rx_queue_enable
= dwmac4_rx_queue_enable
,
745 .rx_queue_prio
= dwmac4_rx_queue_priority
,
746 .tx_queue_prio
= dwmac4_tx_queue_priority
,
747 .rx_queue_routing
= dwmac4_rx_queue_routing
,
748 .prog_mtl_rx_algorithms
= dwmac4_prog_mtl_rx_algorithms
,
749 .prog_mtl_tx_algorithms
= dwmac4_prog_mtl_tx_algorithms
,
750 .set_mtl_tx_queue_weight
= dwmac4_set_mtl_tx_queue_weight
,
751 .map_mtl_to_dma
= dwmac4_map_mtl_dma
,
752 .config_cbs
= dwmac4_config_cbs
,
753 .dump_regs
= dwmac4_dump_regs
,
754 .host_irq_status
= dwmac4_irq_status
,
755 .host_mtl_irq_status
= dwmac4_irq_mtl_status
,
756 .flow_ctrl
= dwmac4_flow_ctrl
,
758 .set_umac_addr
= dwmac4_set_umac_addr
,
759 .get_umac_addr
= dwmac4_get_umac_addr
,
760 .set_eee_mode
= dwmac4_set_eee_mode
,
761 .reset_eee_mode
= dwmac4_reset_eee_mode
,
762 .set_eee_timer
= dwmac4_set_eee_timer
,
763 .set_eee_pls
= dwmac4_set_eee_pls
,
764 .pcs_ctrl_ane
= dwmac4_ctrl_ane
,
765 .pcs_rane
= dwmac4_rane
,
766 .pcs_get_adv_lp
= dwmac4_get_adv_lp
,
767 .debug
= dwmac4_debug
,
768 .set_filter
= dwmac4_set_filter
,
769 .set_mac_loopback
= dwmac4_set_mac_loopback
,
772 const struct stmmac_ops dwmac410_ops
= {
773 .core_init
= dwmac4_core_init
,
774 .set_mac
= stmmac_dwmac4_set_mac
,
775 .rx_ipc
= dwmac4_rx_ipc_enable
,
776 .rx_queue_enable
= dwmac4_rx_queue_enable
,
777 .rx_queue_prio
= dwmac4_rx_queue_priority
,
778 .tx_queue_prio
= dwmac4_tx_queue_priority
,
779 .rx_queue_routing
= dwmac4_rx_queue_routing
,
780 .prog_mtl_rx_algorithms
= dwmac4_prog_mtl_rx_algorithms
,
781 .prog_mtl_tx_algorithms
= dwmac4_prog_mtl_tx_algorithms
,
782 .set_mtl_tx_queue_weight
= dwmac4_set_mtl_tx_queue_weight
,
783 .map_mtl_to_dma
= dwmac4_map_mtl_dma
,
784 .config_cbs
= dwmac4_config_cbs
,
785 .dump_regs
= dwmac4_dump_regs
,
786 .host_irq_status
= dwmac4_irq_status
,
787 .host_mtl_irq_status
= dwmac4_irq_mtl_status
,
788 .flow_ctrl
= dwmac4_flow_ctrl
,
790 .set_umac_addr
= dwmac4_set_umac_addr
,
791 .get_umac_addr
= dwmac4_get_umac_addr
,
792 .set_eee_mode
= dwmac4_set_eee_mode
,
793 .reset_eee_mode
= dwmac4_reset_eee_mode
,
794 .set_eee_timer
= dwmac4_set_eee_timer
,
795 .set_eee_pls
= dwmac4_set_eee_pls
,
796 .pcs_ctrl_ane
= dwmac4_ctrl_ane
,
797 .pcs_rane
= dwmac4_rane
,
798 .pcs_get_adv_lp
= dwmac4_get_adv_lp
,
799 .debug
= dwmac4_debug
,
800 .set_filter
= dwmac4_set_filter
,
801 .set_mac_loopback
= dwmac4_set_mac_loopback
,
804 const struct stmmac_ops dwmac510_ops
= {
805 .core_init
= dwmac4_core_init
,
806 .set_mac
= stmmac_dwmac4_set_mac
,
807 .rx_ipc
= dwmac4_rx_ipc_enable
,
808 .rx_queue_enable
= dwmac4_rx_queue_enable
,
809 .rx_queue_prio
= dwmac4_rx_queue_priority
,
810 .tx_queue_prio
= dwmac4_tx_queue_priority
,
811 .rx_queue_routing
= dwmac4_rx_queue_routing
,
812 .prog_mtl_rx_algorithms
= dwmac4_prog_mtl_rx_algorithms
,
813 .prog_mtl_tx_algorithms
= dwmac4_prog_mtl_tx_algorithms
,
814 .set_mtl_tx_queue_weight
= dwmac4_set_mtl_tx_queue_weight
,
815 .map_mtl_to_dma
= dwmac4_map_mtl_dma
,
816 .config_cbs
= dwmac4_config_cbs
,
817 .dump_regs
= dwmac4_dump_regs
,
818 .host_irq_status
= dwmac4_irq_status
,
819 .host_mtl_irq_status
= dwmac4_irq_mtl_status
,
820 .flow_ctrl
= dwmac4_flow_ctrl
,
822 .set_umac_addr
= dwmac4_set_umac_addr
,
823 .get_umac_addr
= dwmac4_get_umac_addr
,
824 .set_eee_mode
= dwmac4_set_eee_mode
,
825 .reset_eee_mode
= dwmac4_reset_eee_mode
,
826 .set_eee_timer
= dwmac4_set_eee_timer
,
827 .set_eee_pls
= dwmac4_set_eee_pls
,
828 .pcs_ctrl_ane
= dwmac4_ctrl_ane
,
829 .pcs_rane
= dwmac4_rane
,
830 .pcs_get_adv_lp
= dwmac4_get_adv_lp
,
831 .debug
= dwmac4_debug
,
832 .set_filter
= dwmac4_set_filter
,
833 .safety_feat_config
= dwmac5_safety_feat_config
,
834 .safety_feat_irq_status
= dwmac5_safety_feat_irq_status
,
835 .safety_feat_dump
= dwmac5_safety_feat_dump
,
836 .rxp_config
= dwmac5_rxp_config
,
837 .flex_pps_config
= dwmac5_flex_pps_config
,
838 .set_mac_loopback
= dwmac4_set_mac_loopback
,
841 int dwmac4_setup(struct stmmac_priv
*priv
)
843 struct mac_device_info
*mac
= priv
->hw
;
845 dev_info(priv
->device
, "\tDWMAC4/5\n");
847 priv
->dev
->priv_flags
|= IFF_UNICAST_FLT
;
848 mac
->pcsr
= priv
->ioaddr
;
849 mac
->multicast_filter_bins
= priv
->plat
->multicast_filter_bins
;
850 mac
->unicast_filter_entries
= priv
->plat
->unicast_filter_entries
;
851 mac
->mcast_bits_log2
= 0;
853 if (mac
->multicast_filter_bins
)
854 mac
->mcast_bits_log2
= ilog2(mac
->multicast_filter_bins
);
856 mac
->link
.duplex
= GMAC_CONFIG_DM
;
857 mac
->link
.speed10
= GMAC_CONFIG_PS
;
858 mac
->link
.speed100
= GMAC_CONFIG_FES
| GMAC_CONFIG_PS
;
859 mac
->link
.speed1000
= 0;
860 mac
->link
.speed_mask
= GMAC_CONFIG_FES
| GMAC_CONFIG_PS
;
861 mac
->mii
.addr
= GMAC_MDIO_ADDR
;
862 mac
->mii
.data
= GMAC_MDIO_DATA
;
863 mac
->mii
.addr_shift
= 21;
864 mac
->mii
.addr_mask
= GENMASK(25, 21);
865 mac
->mii
.reg_shift
= 16;
866 mac
->mii
.reg_mask
= GENMASK(20, 16);
867 mac
->mii
.clk_csr_shift
= 8;
868 mac
->mii
.clk_csr_mask
= GENMASK(11, 8);