2 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 * DWC Ether MAC version 4.xx has been used for developing this code.
5 * This contains the functions to handle the dma.
7 * Copyright (C) 2015 STMicroelectronics Ltd
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * Author: Alexandre Torgue <alexandre.torgue@st.com>
18 #include "dwmac4_dma.h"
20 static void dwmac4_dma_axi(void __iomem
*ioaddr
, struct stmmac_axi
*axi
)
22 u32 value
= readl(ioaddr
+ DMA_SYS_BUS_MODE
);
25 pr_info("dwmac4: Master AXI performs %s burst length\n",
26 (value
& DMA_SYS_BUS_FB
) ? "fixed" : "any");
29 value
|= DMA_AXI_EN_LPI
;
31 value
|= DMA_AXI_LPI_XIT_FRM
;
33 value
&= ~DMA_AXI_WR_OSR_LMT
;
34 value
|= (axi
->axi_wr_osr_lmt
& DMA_AXI_OSR_MAX
) <<
35 DMA_AXI_WR_OSR_LMT_SHIFT
;
37 value
&= ~DMA_AXI_RD_OSR_LMT
;
38 value
|= (axi
->axi_rd_osr_lmt
& DMA_AXI_OSR_MAX
) <<
39 DMA_AXI_RD_OSR_LMT_SHIFT
;
41 /* Depending on the UNDEF bit the Master AXI will perform any burst
42 * length according to the BLEN programmed (by default all BLEN are
45 for (i
= 0; i
< AXI_BLEN
; i
++) {
46 switch (axi
->axi_blen
[i
]) {
48 value
|= DMA_AXI_BLEN256
;
51 value
|= DMA_AXI_BLEN128
;
54 value
|= DMA_AXI_BLEN64
;
57 value
|= DMA_AXI_BLEN32
;
60 value
|= DMA_AXI_BLEN16
;
63 value
|= DMA_AXI_BLEN8
;
66 value
|= DMA_AXI_BLEN4
;
71 writel(value
, ioaddr
+ DMA_SYS_BUS_MODE
);
74 static void dwmac4_dma_init_rx_chan(void __iomem
*ioaddr
,
75 struct stmmac_dma_cfg
*dma_cfg
,
76 u32 dma_rx_phy
, u32 chan
)
79 u32 rxpbl
= dma_cfg
->rxpbl
?: dma_cfg
->pbl
;
81 value
= readl(ioaddr
+ DMA_CHAN_RX_CONTROL(chan
));
82 value
= value
| (rxpbl
<< DMA_BUS_MODE_RPBL_SHIFT
);
83 writel(value
, ioaddr
+ DMA_CHAN_RX_CONTROL(chan
));
85 writel(dma_rx_phy
, ioaddr
+ DMA_CHAN_RX_BASE_ADDR(chan
));
88 static void dwmac4_dma_init_tx_chan(void __iomem
*ioaddr
,
89 struct stmmac_dma_cfg
*dma_cfg
,
90 u32 dma_tx_phy
, u32 chan
)
93 u32 txpbl
= dma_cfg
->txpbl
?: dma_cfg
->pbl
;
95 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
96 value
= value
| (txpbl
<< DMA_BUS_MODE_PBL_SHIFT
);
97 writel(value
, ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
99 writel(dma_tx_phy
, ioaddr
+ DMA_CHAN_TX_BASE_ADDR(chan
));
102 static void dwmac4_dma_init_channel(void __iomem
*ioaddr
,
103 struct stmmac_dma_cfg
*dma_cfg
, u32 chan
)
107 /* common channel control register config */
108 value
= readl(ioaddr
+ DMA_CHAN_CONTROL(chan
));
110 value
= value
| DMA_BUS_MODE_PBL
;
111 writel(value
, ioaddr
+ DMA_CHAN_CONTROL(chan
));
113 /* Mask interrupts by writing to CSR7 */
114 writel(DMA_CHAN_INTR_DEFAULT_MASK
,
115 ioaddr
+ DMA_CHAN_INTR_ENA(chan
));
118 static void dwmac4_dma_init(void __iomem
*ioaddr
,
119 struct stmmac_dma_cfg
*dma_cfg
,
120 u32 dma_tx
, u32 dma_rx
, int atds
)
122 u32 value
= readl(ioaddr
+ DMA_SYS_BUS_MODE
);
124 /* Set the Fixed burst mode */
125 if (dma_cfg
->fixed_burst
)
126 value
|= DMA_SYS_BUS_FB
;
128 /* Mixed Burst has no effect when fb is set */
129 if (dma_cfg
->mixed_burst
)
130 value
|= DMA_SYS_BUS_MB
;
133 value
|= DMA_SYS_BUS_AAL
;
135 writel(value
, ioaddr
+ DMA_SYS_BUS_MODE
);
138 static void _dwmac4_dump_dma_regs(void __iomem
*ioaddr
, u32 channel
,
141 reg_space
[DMA_CHAN_CONTROL(channel
) / 4] =
142 readl(ioaddr
+ DMA_CHAN_CONTROL(channel
));
143 reg_space
[DMA_CHAN_TX_CONTROL(channel
) / 4] =
144 readl(ioaddr
+ DMA_CHAN_TX_CONTROL(channel
));
145 reg_space
[DMA_CHAN_RX_CONTROL(channel
) / 4] =
146 readl(ioaddr
+ DMA_CHAN_RX_CONTROL(channel
));
147 reg_space
[DMA_CHAN_TX_BASE_ADDR(channel
) / 4] =
148 readl(ioaddr
+ DMA_CHAN_TX_BASE_ADDR(channel
));
149 reg_space
[DMA_CHAN_RX_BASE_ADDR(channel
) / 4] =
150 readl(ioaddr
+ DMA_CHAN_RX_BASE_ADDR(channel
));
151 reg_space
[DMA_CHAN_TX_END_ADDR(channel
) / 4] =
152 readl(ioaddr
+ DMA_CHAN_TX_END_ADDR(channel
));
153 reg_space
[DMA_CHAN_RX_END_ADDR(channel
) / 4] =
154 readl(ioaddr
+ DMA_CHAN_RX_END_ADDR(channel
));
155 reg_space
[DMA_CHAN_TX_RING_LEN(channel
) / 4] =
156 readl(ioaddr
+ DMA_CHAN_TX_RING_LEN(channel
));
157 reg_space
[DMA_CHAN_RX_RING_LEN(channel
) / 4] =
158 readl(ioaddr
+ DMA_CHAN_RX_RING_LEN(channel
));
159 reg_space
[DMA_CHAN_INTR_ENA(channel
) / 4] =
160 readl(ioaddr
+ DMA_CHAN_INTR_ENA(channel
));
161 reg_space
[DMA_CHAN_RX_WATCHDOG(channel
) / 4] =
162 readl(ioaddr
+ DMA_CHAN_RX_WATCHDOG(channel
));
163 reg_space
[DMA_CHAN_SLOT_CTRL_STATUS(channel
) / 4] =
164 readl(ioaddr
+ DMA_CHAN_SLOT_CTRL_STATUS(channel
));
165 reg_space
[DMA_CHAN_CUR_TX_DESC(channel
) / 4] =
166 readl(ioaddr
+ DMA_CHAN_CUR_TX_DESC(channel
));
167 reg_space
[DMA_CHAN_CUR_RX_DESC(channel
) / 4] =
168 readl(ioaddr
+ DMA_CHAN_CUR_RX_DESC(channel
));
169 reg_space
[DMA_CHAN_CUR_TX_BUF_ADDR(channel
) / 4] =
170 readl(ioaddr
+ DMA_CHAN_CUR_TX_BUF_ADDR(channel
));
171 reg_space
[DMA_CHAN_CUR_RX_BUF_ADDR(channel
) / 4] =
172 readl(ioaddr
+ DMA_CHAN_CUR_RX_BUF_ADDR(channel
));
173 reg_space
[DMA_CHAN_STATUS(channel
) / 4] =
174 readl(ioaddr
+ DMA_CHAN_STATUS(channel
));
177 static void dwmac4_dump_dma_regs(void __iomem
*ioaddr
, u32
*reg_space
)
181 for (i
= 0; i
< DMA_CHANNEL_NB_MAX
; i
++)
182 _dwmac4_dump_dma_regs(ioaddr
, i
, reg_space
);
185 static void dwmac4_rx_watchdog(void __iomem
*ioaddr
, u32 riwt
, u32 number_chan
)
189 for (chan
= 0; chan
< number_chan
; chan
++)
190 writel(riwt
, ioaddr
+ DMA_CHAN_RX_WATCHDOG(chan
));
193 static void dwmac4_dma_rx_chan_op_mode(void __iomem
*ioaddr
, int mode
,
194 u32 channel
, int fifosz
, u8 qmode
)
196 unsigned int rqs
= fifosz
/ 256 - 1;
197 u32 mtl_rx_op
, mtl_rx_int
;
199 mtl_rx_op
= readl(ioaddr
+ MTL_CHAN_RX_OP_MODE(channel
));
201 if (mode
== SF_DMA_MODE
) {
202 pr_debug("GMAC: enable RX store and forward mode\n");
203 mtl_rx_op
|= MTL_OP_MODE_RSF
;
205 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode
);
206 mtl_rx_op
&= ~MTL_OP_MODE_RSF
;
207 mtl_rx_op
&= MTL_OP_MODE_RTC_MASK
;
209 mtl_rx_op
|= MTL_OP_MODE_RTC_32
;
211 mtl_rx_op
|= MTL_OP_MODE_RTC_64
;
213 mtl_rx_op
|= MTL_OP_MODE_RTC_96
;
215 mtl_rx_op
|= MTL_OP_MODE_RTC_128
;
218 mtl_rx_op
&= ~MTL_OP_MODE_RQS_MASK
;
219 mtl_rx_op
|= rqs
<< MTL_OP_MODE_RQS_SHIFT
;
221 /* Enable flow control only if each channel gets 4 KiB or more FIFO and
222 * only if channel is not an AVB channel.
224 if ((fifosz
>= 4096) && (qmode
!= MTL_QUEUE_AVB
)) {
225 unsigned int rfd
, rfa
;
227 mtl_rx_op
|= MTL_OP_MODE_EHFC
;
229 /* Set Threshold for Activating Flow Control to min 2 frames,
230 * i.e. 1500 * 2 = 3000 bytes.
232 * Set Threshold for Deactivating Flow Control to min 1 frame,
237 /* This violates the above formula because of FIFO size
238 * limit therefore overflow may occur in spite of this.
240 rfd
= 0x03; /* Full-2.5K */
241 rfa
= 0x01; /* Full-1.5K */
245 rfd
= 0x06; /* Full-4K */
246 rfa
= 0x0a; /* Full-6K */
250 rfd
= 0x06; /* Full-4K */
251 rfa
= 0x12; /* Full-10K */
255 rfd
= 0x06; /* Full-4K */
256 rfa
= 0x1e; /* Full-16K */
260 mtl_rx_op
&= ~MTL_OP_MODE_RFD_MASK
;
261 mtl_rx_op
|= rfd
<< MTL_OP_MODE_RFD_SHIFT
;
263 mtl_rx_op
&= ~MTL_OP_MODE_RFA_MASK
;
264 mtl_rx_op
|= rfa
<< MTL_OP_MODE_RFA_SHIFT
;
267 writel(mtl_rx_op
, ioaddr
+ MTL_CHAN_RX_OP_MODE(channel
));
269 /* Enable MTL RX overflow */
270 mtl_rx_int
= readl(ioaddr
+ MTL_CHAN_INT_CTRL(channel
));
271 writel(mtl_rx_int
| MTL_RX_OVERFLOW_INT_EN
,
272 ioaddr
+ MTL_CHAN_INT_CTRL(channel
));
275 static void dwmac4_dma_tx_chan_op_mode(void __iomem
*ioaddr
, int mode
,
276 u32 channel
, int fifosz
, u8 qmode
)
278 u32 mtl_tx_op
= readl(ioaddr
+ MTL_CHAN_TX_OP_MODE(channel
));
279 unsigned int tqs
= fifosz
/ 256 - 1;
281 if (mode
== SF_DMA_MODE
) {
282 pr_debug("GMAC: enable TX store and forward mode\n");
283 /* Transmit COE type 2 cannot be done in cut-through mode. */
284 mtl_tx_op
|= MTL_OP_MODE_TSF
;
286 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode
);
287 mtl_tx_op
&= ~MTL_OP_MODE_TSF
;
288 mtl_tx_op
&= MTL_OP_MODE_TTC_MASK
;
289 /* Set the transmit threshold */
291 mtl_tx_op
|= MTL_OP_MODE_TTC_32
;
293 mtl_tx_op
|= MTL_OP_MODE_TTC_64
;
295 mtl_tx_op
|= MTL_OP_MODE_TTC_96
;
296 else if (mode
<= 128)
297 mtl_tx_op
|= MTL_OP_MODE_TTC_128
;
298 else if (mode
<= 192)
299 mtl_tx_op
|= MTL_OP_MODE_TTC_192
;
300 else if (mode
<= 256)
301 mtl_tx_op
|= MTL_OP_MODE_TTC_256
;
302 else if (mode
<= 384)
303 mtl_tx_op
|= MTL_OP_MODE_TTC_384
;
305 mtl_tx_op
|= MTL_OP_MODE_TTC_512
;
307 /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
308 * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
309 * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
310 * with reset values: TXQEN off, TQS 256 bytes.
312 * TXQEN must be written for multi-channel operation and TQS must
313 * reflect the available fifo size per queue (total fifo size / number
314 * of enabled queues).
316 mtl_tx_op
&= ~MTL_OP_MODE_TXQEN_MASK
;
317 if (qmode
!= MTL_QUEUE_AVB
)
318 mtl_tx_op
|= MTL_OP_MODE_TXQEN
;
320 mtl_tx_op
|= MTL_OP_MODE_TXQEN_AV
;
321 mtl_tx_op
&= ~MTL_OP_MODE_TQS_MASK
;
322 mtl_tx_op
|= tqs
<< MTL_OP_MODE_TQS_SHIFT
;
324 writel(mtl_tx_op
, ioaddr
+ MTL_CHAN_TX_OP_MODE(channel
));
327 static void dwmac4_get_hw_feature(void __iomem
*ioaddr
,
328 struct dma_features
*dma_cap
)
330 u32 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE0
);
332 /* MAC HW feature0 */
333 dma_cap
->mbps_10_100
= (hw_cap
& GMAC_HW_FEAT_MIISEL
);
334 dma_cap
->mbps_1000
= (hw_cap
& GMAC_HW_FEAT_GMIISEL
) >> 1;
335 dma_cap
->half_duplex
= (hw_cap
& GMAC_HW_FEAT_HDSEL
) >> 2;
336 dma_cap
->hash_filter
= (hw_cap
& GMAC_HW_FEAT_VLHASH
) >> 4;
337 dma_cap
->multi_addr
= (hw_cap
& GMAC_HW_FEAT_ADDMAC
) >> 18;
338 dma_cap
->pcs
= (hw_cap
& GMAC_HW_FEAT_PCSSEL
) >> 3;
339 dma_cap
->sma_mdio
= (hw_cap
& GMAC_HW_FEAT_SMASEL
) >> 5;
340 dma_cap
->pmt_remote_wake_up
= (hw_cap
& GMAC_HW_FEAT_RWKSEL
) >> 6;
341 dma_cap
->pmt_magic_frame
= (hw_cap
& GMAC_HW_FEAT_MGKSEL
) >> 7;
343 dma_cap
->rmon
= (hw_cap
& GMAC_HW_FEAT_MMCSEL
) >> 8;
345 dma_cap
->atime_stamp
= (hw_cap
& GMAC_HW_FEAT_TSSEL
) >> 12;
346 /* 802.3az - Energy-Efficient Ethernet (EEE) */
347 dma_cap
->eee
= (hw_cap
& GMAC_HW_FEAT_EEESEL
) >> 13;
349 dma_cap
->tx_coe
= (hw_cap
& GMAC_HW_FEAT_TXCOSEL
) >> 14;
350 dma_cap
->rx_coe
= (hw_cap
& GMAC_HW_FEAT_RXCOESEL
) >> 16;
352 /* MAC HW feature1 */
353 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE1
);
354 dma_cap
->av
= (hw_cap
& GMAC_HW_FEAT_AVSEL
) >> 20;
355 dma_cap
->tsoen
= (hw_cap
& GMAC_HW_TSOEN
) >> 18;
356 /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
357 * shifting and store the sizes in bytes.
359 dma_cap
->tx_fifo_size
= 128 << ((hw_cap
& GMAC_HW_TXFIFOSIZE
) >> 6);
360 dma_cap
->rx_fifo_size
= 128 << ((hw_cap
& GMAC_HW_RXFIFOSIZE
) >> 0);
361 /* MAC HW feature2 */
362 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE2
);
363 /* TX and RX number of channels */
364 dma_cap
->number_rx_channel
=
365 ((hw_cap
& GMAC_HW_FEAT_RXCHCNT
) >> 12) + 1;
366 dma_cap
->number_tx_channel
=
367 ((hw_cap
& GMAC_HW_FEAT_TXCHCNT
) >> 18) + 1;
368 /* TX and RX number of queues */
369 dma_cap
->number_rx_queues
=
370 ((hw_cap
& GMAC_HW_FEAT_RXQCNT
) >> 0) + 1;
371 dma_cap
->number_tx_queues
=
372 ((hw_cap
& GMAC_HW_FEAT_TXQCNT
) >> 6) + 1;
375 dma_cap
->time_stamp
= 0;
377 /* MAC HW feature3 */
378 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE3
);
381 dma_cap
->asp
= (hw_cap
& GMAC_HW_FEAT_ASP
) >> 28;
384 /* Enable/disable TSO feature and set MSS */
385 static void dwmac4_enable_tso(void __iomem
*ioaddr
, bool en
, u32 chan
)
391 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
392 writel(value
| DMA_CONTROL_TSE
,
393 ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
396 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
397 writel(value
& ~DMA_CONTROL_TSE
,
398 ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
402 const struct stmmac_dma_ops dwmac4_dma_ops
= {
403 .reset
= dwmac4_dma_reset
,
404 .init
= dwmac4_dma_init
,
405 .init_chan
= dwmac4_dma_init_channel
,
406 .init_rx_chan
= dwmac4_dma_init_rx_chan
,
407 .init_tx_chan
= dwmac4_dma_init_tx_chan
,
408 .axi
= dwmac4_dma_axi
,
409 .dump_regs
= dwmac4_dump_dma_regs
,
410 .dma_rx_mode
= dwmac4_dma_rx_chan_op_mode
,
411 .dma_tx_mode
= dwmac4_dma_tx_chan_op_mode
,
412 .enable_dma_irq
= dwmac4_enable_dma_irq
,
413 .disable_dma_irq
= dwmac4_disable_dma_irq
,
414 .start_tx
= dwmac4_dma_start_tx
,
415 .stop_tx
= dwmac4_dma_stop_tx
,
416 .start_rx
= dwmac4_dma_start_rx
,
417 .stop_rx
= dwmac4_dma_stop_rx
,
418 .dma_interrupt
= dwmac4_dma_interrupt
,
419 .get_hw_feature
= dwmac4_get_hw_feature
,
420 .rx_watchdog
= dwmac4_rx_watchdog
,
421 .set_rx_ring_len
= dwmac4_set_rx_ring_len
,
422 .set_tx_ring_len
= dwmac4_set_tx_ring_len
,
423 .set_rx_tail_ptr
= dwmac4_set_rx_tail_ptr
,
424 .set_tx_tail_ptr
= dwmac4_set_tx_tail_ptr
,
425 .enable_tso
= dwmac4_enable_tso
,
428 const struct stmmac_dma_ops dwmac410_dma_ops
= {
429 .reset
= dwmac4_dma_reset
,
430 .init
= dwmac4_dma_init
,
431 .init_chan
= dwmac4_dma_init_channel
,
432 .init_rx_chan
= dwmac4_dma_init_rx_chan
,
433 .init_tx_chan
= dwmac4_dma_init_tx_chan
,
434 .axi
= dwmac4_dma_axi
,
435 .dump_regs
= dwmac4_dump_dma_regs
,
436 .dma_rx_mode
= dwmac4_dma_rx_chan_op_mode
,
437 .dma_tx_mode
= dwmac4_dma_tx_chan_op_mode
,
438 .enable_dma_irq
= dwmac410_enable_dma_irq
,
439 .disable_dma_irq
= dwmac4_disable_dma_irq
,
440 .start_tx
= dwmac4_dma_start_tx
,
441 .stop_tx
= dwmac4_dma_stop_tx
,
442 .start_rx
= dwmac4_dma_start_rx
,
443 .stop_rx
= dwmac4_dma_stop_rx
,
444 .dma_interrupt
= dwmac4_dma_interrupt
,
445 .get_hw_feature
= dwmac4_get_hw_feature
,
446 .rx_watchdog
= dwmac4_rx_watchdog
,
447 .set_rx_ring_len
= dwmac4_set_rx_ring_len
,
448 .set_tx_ring_len
= dwmac4_set_tx_ring_len
,
449 .set_rx_tail_ptr
= dwmac4_set_rx_tail_ptr
,
450 .set_tx_tail_ptr
= dwmac4_set_tx_tail_ptr
,
451 .enable_tso
= dwmac4_enable_tso
,