]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
net: stmmac: Enable OSP for GMAC4
[thirdparty/kernel/stable.git] / drivers / net / ethernet / stmicro / stmmac / dwmac4_dma.c
1 /*
2 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 * DWC Ether MAC version 4.xx has been used for developing this code.
4 *
5 * This contains the functions to handle the dma.
6 *
7 * Copyright (C) 2015 STMicroelectronics Ltd
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * Author: Alexandre Torgue <alexandre.torgue@st.com>
14 */
15
16 #include <linux/io.h>
17 #include "dwmac4.h"
18 #include "dwmac4_dma.h"
19
20 static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
21 {
22 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
23 int i;
24
25 pr_info("dwmac4: Master AXI performs %s burst length\n",
26 (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
27
28 if (axi->axi_lpi_en)
29 value |= DMA_AXI_EN_LPI;
30 if (axi->axi_xit_frm)
31 value |= DMA_AXI_LPI_XIT_FRM;
32
33 value &= ~DMA_AXI_WR_OSR_LMT;
34 value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
35 DMA_AXI_WR_OSR_LMT_SHIFT;
36
37 value &= ~DMA_AXI_RD_OSR_LMT;
38 value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
39 DMA_AXI_RD_OSR_LMT_SHIFT;
40
41 /* Depending on the UNDEF bit the Master AXI will perform any burst
42 * length according to the BLEN programmed (by default all BLEN are
43 * set).
44 */
45 for (i = 0; i < AXI_BLEN; i++) {
46 switch (axi->axi_blen[i]) {
47 case 256:
48 value |= DMA_AXI_BLEN256;
49 break;
50 case 128:
51 value |= DMA_AXI_BLEN128;
52 break;
53 case 64:
54 value |= DMA_AXI_BLEN64;
55 break;
56 case 32:
57 value |= DMA_AXI_BLEN32;
58 break;
59 case 16:
60 value |= DMA_AXI_BLEN16;
61 break;
62 case 8:
63 value |= DMA_AXI_BLEN8;
64 break;
65 case 4:
66 value |= DMA_AXI_BLEN4;
67 break;
68 }
69 }
70
71 writel(value, ioaddr + DMA_SYS_BUS_MODE);
72 }
73
74 static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
75 struct stmmac_dma_cfg *dma_cfg,
76 u32 dma_rx_phy, u32 chan)
77 {
78 u32 value;
79 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
80
81 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
82 value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
83 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
84
85 writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
86 }
87
88 static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
89 struct stmmac_dma_cfg *dma_cfg,
90 u32 dma_tx_phy, u32 chan)
91 {
92 u32 value;
93 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
94
95 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
96 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
97
98 /* Enable OSP to get best performance */
99 value |= DMA_CONTROL_OSP;
100
101 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
102
103 writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
104 }
105
106 static void dwmac4_dma_init_channel(void __iomem *ioaddr,
107 struct stmmac_dma_cfg *dma_cfg, u32 chan)
108 {
109 u32 value;
110
111 /* common channel control register config */
112 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
113 if (dma_cfg->pblx8)
114 value = value | DMA_BUS_MODE_PBL;
115 writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
116
117 /* Mask interrupts by writing to CSR7 */
118 writel(DMA_CHAN_INTR_DEFAULT_MASK,
119 ioaddr + DMA_CHAN_INTR_ENA(chan));
120 }
121
122 static void dwmac4_dma_init(void __iomem *ioaddr,
123 struct stmmac_dma_cfg *dma_cfg,
124 u32 dma_tx, u32 dma_rx, int atds)
125 {
126 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
127
128 /* Set the Fixed burst mode */
129 if (dma_cfg->fixed_burst)
130 value |= DMA_SYS_BUS_FB;
131
132 /* Mixed Burst has no effect when fb is set */
133 if (dma_cfg->mixed_burst)
134 value |= DMA_SYS_BUS_MB;
135
136 if (dma_cfg->aal)
137 value |= DMA_SYS_BUS_AAL;
138
139 writel(value, ioaddr + DMA_SYS_BUS_MODE);
140 }
141
142 static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
143 u32 *reg_space)
144 {
145 reg_space[DMA_CHAN_CONTROL(channel) / 4] =
146 readl(ioaddr + DMA_CHAN_CONTROL(channel));
147 reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] =
148 readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
149 reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] =
150 readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
151 reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] =
152 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
153 reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] =
154 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
155 reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] =
156 readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel));
157 reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] =
158 readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel));
159 reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] =
160 readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel));
161 reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] =
162 readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel));
163 reg_space[DMA_CHAN_INTR_ENA(channel) / 4] =
164 readl(ioaddr + DMA_CHAN_INTR_ENA(channel));
165 reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] =
166 readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel));
167 reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] =
168 readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel));
169 reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] =
170 readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel));
171 reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] =
172 readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel));
173 reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] =
174 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel));
175 reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] =
176 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel));
177 reg_space[DMA_CHAN_STATUS(channel) / 4] =
178 readl(ioaddr + DMA_CHAN_STATUS(channel));
179 }
180
181 static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
182 {
183 int i;
184
185 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
186 _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
187 }
188
189 static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
190 {
191 u32 chan;
192
193 for (chan = 0; chan < number_chan; chan++)
194 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
195 }
196
197 static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
198 u32 channel, int fifosz, u8 qmode)
199 {
200 unsigned int rqs = fifosz / 256 - 1;
201 u32 mtl_rx_op, mtl_rx_int;
202
203 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
204
205 if (mode == SF_DMA_MODE) {
206 pr_debug("GMAC: enable RX store and forward mode\n");
207 mtl_rx_op |= MTL_OP_MODE_RSF;
208 } else {
209 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
210 mtl_rx_op &= ~MTL_OP_MODE_RSF;
211 mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
212 if (mode <= 32)
213 mtl_rx_op |= MTL_OP_MODE_RTC_32;
214 else if (mode <= 64)
215 mtl_rx_op |= MTL_OP_MODE_RTC_64;
216 else if (mode <= 96)
217 mtl_rx_op |= MTL_OP_MODE_RTC_96;
218 else
219 mtl_rx_op |= MTL_OP_MODE_RTC_128;
220 }
221
222 mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
223 mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
224
225 /* Enable flow control only if each channel gets 4 KiB or more FIFO and
226 * only if channel is not an AVB channel.
227 */
228 if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
229 unsigned int rfd, rfa;
230
231 mtl_rx_op |= MTL_OP_MODE_EHFC;
232
233 /* Set Threshold for Activating Flow Control to min 2 frames,
234 * i.e. 1500 * 2 = 3000 bytes.
235 *
236 * Set Threshold for Deactivating Flow Control to min 1 frame,
237 * i.e. 1500 bytes.
238 */
239 switch (fifosz) {
240 case 4096:
241 /* This violates the above formula because of FIFO size
242 * limit therefore overflow may occur in spite of this.
243 */
244 rfd = 0x03; /* Full-2.5K */
245 rfa = 0x01; /* Full-1.5K */
246 break;
247
248 case 8192:
249 rfd = 0x06; /* Full-4K */
250 rfa = 0x0a; /* Full-6K */
251 break;
252
253 case 16384:
254 rfd = 0x06; /* Full-4K */
255 rfa = 0x12; /* Full-10K */
256 break;
257
258 default:
259 rfd = 0x06; /* Full-4K */
260 rfa = 0x1e; /* Full-16K */
261 break;
262 }
263
264 mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
265 mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
266
267 mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
268 mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
269 }
270
271 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
272
273 /* Enable MTL RX overflow */
274 mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
275 writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
276 ioaddr + MTL_CHAN_INT_CTRL(channel));
277 }
278
279 static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
280 u32 channel, int fifosz, u8 qmode)
281 {
282 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
283 unsigned int tqs = fifosz / 256 - 1;
284
285 if (mode == SF_DMA_MODE) {
286 pr_debug("GMAC: enable TX store and forward mode\n");
287 /* Transmit COE type 2 cannot be done in cut-through mode. */
288 mtl_tx_op |= MTL_OP_MODE_TSF;
289 } else {
290 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
291 mtl_tx_op &= ~MTL_OP_MODE_TSF;
292 mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
293 /* Set the transmit threshold */
294 if (mode <= 32)
295 mtl_tx_op |= MTL_OP_MODE_TTC_32;
296 else if (mode <= 64)
297 mtl_tx_op |= MTL_OP_MODE_TTC_64;
298 else if (mode <= 96)
299 mtl_tx_op |= MTL_OP_MODE_TTC_96;
300 else if (mode <= 128)
301 mtl_tx_op |= MTL_OP_MODE_TTC_128;
302 else if (mode <= 192)
303 mtl_tx_op |= MTL_OP_MODE_TTC_192;
304 else if (mode <= 256)
305 mtl_tx_op |= MTL_OP_MODE_TTC_256;
306 else if (mode <= 384)
307 mtl_tx_op |= MTL_OP_MODE_TTC_384;
308 else
309 mtl_tx_op |= MTL_OP_MODE_TTC_512;
310 }
311 /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
312 * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
313 * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
314 * with reset values: TXQEN off, TQS 256 bytes.
315 *
316 * TXQEN must be written for multi-channel operation and TQS must
317 * reflect the available fifo size per queue (total fifo size / number
318 * of enabled queues).
319 */
320 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
321 if (qmode != MTL_QUEUE_AVB)
322 mtl_tx_op |= MTL_OP_MODE_TXQEN;
323 else
324 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
325 mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
326 mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
327
328 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
329 }
330
331 static void dwmac4_get_hw_feature(void __iomem *ioaddr,
332 struct dma_features *dma_cap)
333 {
334 u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
335
336 /* MAC HW feature0 */
337 dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
338 dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
339 dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
340 dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
341 dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
342 dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
343 dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
344 dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
345 dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
346 /* MMC */
347 dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
348 /* IEEE 1588-2008 */
349 dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
350 /* 802.3az - Energy-Efficient Ethernet (EEE) */
351 dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
352 /* TX and RX csum */
353 dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
354 dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
355
356 /* MAC HW feature1 */
357 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
358 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
359 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
360 /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
361 * shifting and store the sizes in bytes.
362 */
363 dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
364 dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
365 /* MAC HW feature2 */
366 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
367 /* TX and RX number of channels */
368 dma_cap->number_rx_channel =
369 ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
370 dma_cap->number_tx_channel =
371 ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
372 /* TX and RX number of queues */
373 dma_cap->number_rx_queues =
374 ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1;
375 dma_cap->number_tx_queues =
376 ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1;
377
378 /* IEEE 1588-2002 */
379 dma_cap->time_stamp = 0;
380
381 /* MAC HW feature3 */
382 hw_cap = readl(ioaddr + GMAC_HW_FEATURE3);
383
384 /* 5.10 Features */
385 dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
386 dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
387 dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
388 dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
389 }
390
391 /* Enable/disable TSO feature and set MSS */
392 static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
393 {
394 u32 value;
395
396 if (en) {
397 /* enable TSO */
398 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
399 writel(value | DMA_CONTROL_TSE,
400 ioaddr + DMA_CHAN_TX_CONTROL(chan));
401 } else {
402 /* enable TSO */
403 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
404 writel(value & ~DMA_CONTROL_TSE,
405 ioaddr + DMA_CHAN_TX_CONTROL(chan));
406 }
407 }
408
409 const struct stmmac_dma_ops dwmac4_dma_ops = {
410 .reset = dwmac4_dma_reset,
411 .init = dwmac4_dma_init,
412 .init_chan = dwmac4_dma_init_channel,
413 .init_rx_chan = dwmac4_dma_init_rx_chan,
414 .init_tx_chan = dwmac4_dma_init_tx_chan,
415 .axi = dwmac4_dma_axi,
416 .dump_regs = dwmac4_dump_dma_regs,
417 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
418 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
419 .enable_dma_irq = dwmac4_enable_dma_irq,
420 .disable_dma_irq = dwmac4_disable_dma_irq,
421 .start_tx = dwmac4_dma_start_tx,
422 .stop_tx = dwmac4_dma_stop_tx,
423 .start_rx = dwmac4_dma_start_rx,
424 .stop_rx = dwmac4_dma_stop_rx,
425 .dma_interrupt = dwmac4_dma_interrupt,
426 .get_hw_feature = dwmac4_get_hw_feature,
427 .rx_watchdog = dwmac4_rx_watchdog,
428 .set_rx_ring_len = dwmac4_set_rx_ring_len,
429 .set_tx_ring_len = dwmac4_set_tx_ring_len,
430 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
431 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
432 .enable_tso = dwmac4_enable_tso,
433 };
434
435 const struct stmmac_dma_ops dwmac410_dma_ops = {
436 .reset = dwmac4_dma_reset,
437 .init = dwmac4_dma_init,
438 .init_chan = dwmac4_dma_init_channel,
439 .init_rx_chan = dwmac4_dma_init_rx_chan,
440 .init_tx_chan = dwmac4_dma_init_tx_chan,
441 .axi = dwmac4_dma_axi,
442 .dump_regs = dwmac4_dump_dma_regs,
443 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
444 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
445 .enable_dma_irq = dwmac410_enable_dma_irq,
446 .disable_dma_irq = dwmac4_disable_dma_irq,
447 .start_tx = dwmac4_dma_start_tx,
448 .stop_tx = dwmac4_dma_stop_tx,
449 .start_rx = dwmac4_dma_start_rx,
450 .stop_rx = dwmac4_dma_stop_rx,
451 .dma_interrupt = dwmac4_dma_interrupt,
452 .get_hw_feature = dwmac4_get_hw_feature,
453 .rx_watchdog = dwmac4_rx_watchdog,
454 .set_rx_ring_len = dwmac4_set_rx_ring_len,
455 .set_tx_ring_len = dwmac4_set_tx_ring_len,
456 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
457 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
458 .enable_tso = dwmac4_enable_tso,
459 };