]>
Commit | Line | Data |
---|---|---|
ccfd6988 ÁFR |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> | |
4 | * | |
5 | * Derived from linux/drivers/dma/bcm63xx-iudma.c: | |
6 | * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu> | |
7 | * | |
8 | * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c: | |
9 | * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> | |
10 | * | |
11 | * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h: | |
12 | * Copyright (C) 2000-2010 Broadcom Corporation | |
13 | * | |
14 | * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c: | |
15 | * Copyright (C) 2010 Broadcom Corporation | |
16 | */ | |
17 | ||
d678a59d | 18 | #include <common.h> |
ccfd6988 | 19 | #include <clk.h> |
1eb69ae4 | 20 | #include <cpu_func.h> |
ccfd6988 ÁFR |
21 | #include <dm.h> |
22 | #include <dma-uclass.h> | |
f7ae49fc | 23 | #include <log.h> |
336d4615 | 24 | #include <malloc.h> |
ccfd6988 | 25 | #include <memalign.h> |
90526e9f | 26 | #include <net.h> |
ccfd6988 ÁFR |
27 | #include <reset.h> |
28 | #include <asm/io.h> | |
cd93d625 | 29 | #include <linux/bitops.h> |
c05ed00a | 30 | #include <linux/delay.h> |
1e94b46f | 31 | #include <linux/printk.h> |
ccfd6988 ÁFR |
32 | |
33 | #define DMA_RX_DESC 6 | |
34 | #define DMA_TX_DESC 1 | |
35 | ||
36 | /* DMA Channels */ | |
37 | #define DMA_CHAN_FLOWC(x) ((x) >> 1) | |
38 | #define DMA_CHAN_MAX 16 | |
39 | #define DMA_CHAN_SIZE 0x10 | |
40 | #define DMA_CHAN_TOUT 500 | |
41 | ||
42 | /* DMA Global Configuration register */ | |
43 | #define DMA_CFG_REG 0x00 | |
44 | #define DMA_CFG_ENABLE_SHIFT 0 | |
45 | #define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT) | |
46 | #define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1) | |
47 | #define DMA_CFG_NCHANS_SHIFT 24 | |
48 | #define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT) | |
49 | ||
50 | /* DMA Global Flow Control registers */ | |
51 | #define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c) | |
52 | #define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c) | |
53 | #define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c) | |
54 | #define DMA_FLOWC_ALLOC_FORCE_SHIFT 31 | |
55 | #define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT) | |
56 | ||
57 | /* DMA Global Reset register */ | |
58 | #define DMA_RST_REG 0x34 | |
59 | #define DMA_RST_CHAN_SHIFT 0 | |
60 | #define DMA_RST_CHAN_MASK(x) (1 << x) | |
61 | ||
62 | /* DMA Channel Configuration register */ | |
63 | #define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) | |
64 | #define DMAC_CFG_ENABLE_SHIFT 0 | |
65 | #define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT) | |
66 | #define DMAC_CFG_PKT_HALT_SHIFT 1 | |
67 | #define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT) | |
68 | #define DMAC_CFG_BRST_HALT_SHIFT 2 | |
69 | #define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT) | |
70 | ||
71 | /* DMA Channel Max Burst Length register */ | |
72 | #define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) | |
73 | ||
74 | /* DMA SRAM Descriptor Ring Start register */ | |
75 | #define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) | |
76 | ||
77 | /* DMA SRAM State/Bytes done/ring offset register */ | |
78 | #define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04) | |
79 | ||
80 | /* DMA SRAM Buffer Descriptor status and length register */ | |
81 | #define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08) | |
82 | ||
83 | /* DMA SRAM Buffer Descriptor status and length register */ | |
84 | #define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) | |
85 | ||
86 | /* DMA Descriptor Status */ | |
87 | #define DMAD_ST_CRC_SHIFT 8 | |
88 | #define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT) | |
89 | #define DMAD_ST_WRAP_SHIFT 12 | |
90 | #define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT) | |
91 | #define DMAD_ST_SOP_SHIFT 13 | |
92 | #define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT) | |
93 | #define DMAD_ST_EOP_SHIFT 14 | |
94 | #define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT) | |
95 | #define DMAD_ST_OWN_SHIFT 15 | |
96 | #define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT) | |
97 | ||
98 | #define DMAD6348_ST_OV_ERR_SHIFT 0 | |
99 | #define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT) | |
100 | #define DMAD6348_ST_CRC_ERR_SHIFT 1 | |
101 | #define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT) | |
102 | #define DMAD6348_ST_RX_ERR_SHIFT 2 | |
103 | #define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT) | |
104 | #define DMAD6348_ST_OS_ERR_SHIFT 4 | |
105 | #define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT) | |
106 | #define DMAD6348_ST_UN_ERR_SHIFT 9 | |
107 | #define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT) | |
108 | ||
109 | struct bcm6348_dma_desc { | |
110 | uint16_t length; | |
111 | uint16_t status; | |
112 | uint32_t address; | |
113 | }; | |
114 | ||
115 | struct bcm6348_chan_priv { | |
116 | void __iomem *dma_ring; | |
117 | uint8_t dma_ring_size; | |
118 | uint8_t desc_id; | |
119 | uint8_t desc_cnt; | |
120 | bool *busy_desc; | |
121 | bool running; | |
122 | }; | |
123 | ||
124 | struct bcm6348_iudma_hw { | |
125 | uint16_t err_mask; | |
126 | }; | |
127 | ||
128 | struct bcm6348_iudma_priv { | |
129 | const struct bcm6348_iudma_hw *hw; | |
130 | void __iomem *base; | |
131 | void __iomem *chan; | |
132 | void __iomem *sram; | |
133 | struct bcm6348_chan_priv **ch_priv; | |
134 | uint8_t n_channels; | |
135 | }; | |
136 | ||
137 | static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch) | |
138 | { | |
139 | return !(ch & 1); | |
140 | } | |
141 | ||
142 | static inline void bcm6348_iudma_fdc(void *ptr, ulong size) | |
143 | { | |
144 | ulong start = (ulong) ptr; | |
145 | ||
146 | flush_dcache_range(start, start + size); | |
147 | } | |
148 | ||
149 | static inline void bcm6348_iudma_idc(void *ptr, ulong size) | |
150 | { | |
151 | ulong start = (ulong) ptr; | |
152 | ||
153 | invalidate_dcache_range(start, start + size); | |
154 | } | |
155 | ||
156 | static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv, | |
157 | uint8_t ch) | |
158 | { | |
159 | unsigned int timeout = DMA_CHAN_TOUT; | |
160 | ||
161 | do { | |
162 | uint32_t cfg, halt; | |
163 | ||
164 | if (timeout > DMA_CHAN_TOUT / 2) | |
165 | halt = DMAC_CFG_PKT_HALT_MASK; | |
166 | else | |
167 | halt = DMAC_CFG_BRST_HALT_MASK; | |
168 | ||
169 | /* try to stop dma channel */ | |
170 | writel_be(halt, priv->chan + DMAC_CFG_REG(ch)); | |
171 | mb(); | |
172 | ||
173 | /* check if channel was stopped */ | |
174 | cfg = readl_be(priv->chan + DMAC_CFG_REG(ch)); | |
175 | if (!(cfg & DMAC_CFG_ENABLE_MASK)) | |
176 | break; | |
177 | ||
178 | udelay(1); | |
179 | } while (--timeout); | |
180 | ||
181 | if (!timeout) | |
182 | pr_err("unable to stop channel %u\n", ch); | |
183 | ||
184 | /* reset dma channel */ | |
185 | setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); | |
186 | mb(); | |
187 | clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); | |
188 | } | |
189 | ||
190 | static int bcm6348_iudma_disable(struct dma *dma) | |
191 | { | |
192 | struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
193 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
194 | ||
195 | /* stop dma channel */ | |
196 | bcm6348_iudma_chan_stop(priv, dma->id); | |
197 | ||
198 | /* dma flow control */ | |
199 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
200 | writel_be(DMA_FLOWC_ALLOC_FORCE_MASK, | |
201 | DMA_FLOWC_ALLOC_REG(dma->id)); | |
202 | ||
203 | /* init channel config */ | |
204 | ch_priv->running = false; | |
205 | ch_priv->desc_id = 0; | |
206 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
207 | ch_priv->desc_cnt = 0; | |
208 | else | |
209 | ch_priv->desc_cnt = ch_priv->dma_ring_size; | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
214 | static int bcm6348_iudma_enable(struct dma *dma) | |
215 | { | |
216 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
217 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
218 | struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; | |
219 | uint8_t i; | |
220 | ||
221 | /* dma ring init */ | |
222 | for (i = 0; i < ch_priv->desc_cnt; i++) { | |
223 | if (bcm6348_iudma_chan_is_rx(dma->id)) { | |
224 | ch_priv->busy_desc[i] = false; | |
225 | dma_desc->status |= DMAD_ST_OWN_MASK; | |
226 | } else { | |
227 | dma_desc->status = 0; | |
228 | dma_desc->length = 0; | |
229 | dma_desc->address = 0; | |
230 | } | |
231 | ||
232 | if (i == ch_priv->desc_cnt - 1) | |
233 | dma_desc->status |= DMAD_ST_WRAP_MASK; | |
234 | ||
235 | dma_desc++; | |
236 | } | |
237 | ||
238 | /* init to first descriptor */ | |
239 | ch_priv->desc_id = 0; | |
240 | ||
241 | /* force cache writeback */ | |
242 | bcm6348_iudma_fdc(ch_priv->dma_ring, | |
243 | sizeof(*dma_desc) * ch_priv->desc_cnt); | |
244 | ||
245 | /* clear sram */ | |
246 | writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id)); | |
247 | writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id)); | |
248 | writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id)); | |
249 | ||
250 | /* set dma ring start */ | |
251 | writel_be(virt_to_phys(ch_priv->dma_ring), | |
252 | priv->sram + DMAS_RSTART_REG(dma->id)); | |
253 | ||
254 | /* set flow control */ | |
255 | if (bcm6348_iudma_chan_is_rx(dma->id)) { | |
256 | u32 val; | |
257 | ||
258 | setbits_be32(priv->base + DMA_CFG_REG, | |
259 | DMA_CFG_FLOWC_ENABLE(dma->id)); | |
260 | ||
261 | val = ch_priv->desc_cnt / 3; | |
262 | writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id)); | |
263 | ||
264 | val = (ch_priv->desc_cnt * 2) / 3; | |
265 | writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id)); | |
266 | ||
267 | writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id)); | |
268 | } | |
269 | ||
270 | /* set dma max burst */ | |
271 | writel_be(ch_priv->desc_cnt, | |
272 | priv->chan + DMAC_BURST_REG(dma->id)); | |
273 | ||
274 | /* kick rx dma channel */ | |
275 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
276 | setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), | |
277 | DMAC_CFG_ENABLE_MASK); | |
278 | ||
279 | /* channel is now enabled */ | |
280 | ch_priv->running = true; | |
281 | ||
282 | return 0; | |
283 | } | |
284 | ||
285 | static int bcm6348_iudma_request(struct dma *dma) | |
286 | { | |
287 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
288 | struct bcm6348_chan_priv *ch_priv; | |
289 | ||
290 | /* check if channel is valid */ | |
291 | if (dma->id >= priv->n_channels) | |
292 | return -ENODEV; | |
293 | ||
294 | /* alloc channel private data */ | |
295 | priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv)); | |
296 | if (!priv->ch_priv[dma->id]) | |
297 | return -ENOMEM; | |
298 | ch_priv = priv->ch_priv[dma->id]; | |
299 | ||
300 | /* alloc dma ring */ | |
301 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
302 | ch_priv->dma_ring_size = DMA_RX_DESC; | |
303 | else | |
304 | ch_priv->dma_ring_size = DMA_TX_DESC; | |
305 | ||
306 | ch_priv->dma_ring = | |
307 | malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) * | |
308 | ch_priv->dma_ring_size); | |
309 | if (!ch_priv->dma_ring) | |
310 | return -ENOMEM; | |
311 | ||
312 | /* init channel config */ | |
313 | ch_priv->running = false; | |
314 | ch_priv->desc_id = 0; | |
315 | if (bcm6348_iudma_chan_is_rx(dma->id)) { | |
316 | ch_priv->desc_cnt = 0; | |
4908067b | 317 | ch_priv->busy_desc = NULL; |
ccfd6988 ÁFR |
318 | } else { |
319 | ch_priv->desc_cnt = ch_priv->dma_ring_size; | |
4908067b | 320 | ch_priv->busy_desc = calloc(ch_priv->desc_cnt, sizeof(bool)); |
ccfd6988 ÁFR |
321 | } |
322 | ||
323 | return 0; | |
324 | } | |
325 | ||
326 | static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata) | |
327 | { | |
328 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
329 | const struct bcm6348_iudma_hw *hw = priv->hw; | |
330 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
331 | struct bcm6348_dma_desc *dma_desc = dma_desc = ch_priv->dma_ring; | |
332 | int ret; | |
333 | ||
e4f907e9 ÁFR |
334 | if (!ch_priv->running) |
335 | return -EINVAL; | |
336 | ||
ccfd6988 ÁFR |
337 | /* get dma ring descriptor address */ |
338 | dma_desc += ch_priv->desc_id; | |
339 | ||
340 | /* invalidate cache data */ | |
341 | bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc)); | |
342 | ||
343 | /* check dma own */ | |
344 | if (dma_desc->status & DMAD_ST_OWN_MASK) | |
345 | return -EAGAIN; | |
346 | ||
347 | /* check pkt */ | |
348 | if (!(dma_desc->status & DMAD_ST_EOP_MASK) || | |
349 | !(dma_desc->status & DMAD_ST_SOP_MASK) || | |
350 | (dma_desc->status & hw->err_mask)) { | |
351 | pr_err("invalid pkt received (ch=%ld desc=%u) (st=%04x)\n", | |
352 | dma->id, ch_priv->desc_id, dma_desc->status); | |
353 | ret = -EAGAIN; | |
354 | } else { | |
355 | /* set dma buffer address */ | |
356 | *dst = phys_to_virt(dma_desc->address); | |
357 | ||
358 | /* invalidate cache data */ | |
359 | bcm6348_iudma_idc(*dst, dma_desc->length); | |
360 | ||
361 | /* return packet length */ | |
362 | ret = dma_desc->length; | |
363 | } | |
364 | ||
365 | /* busy dma descriptor */ | |
366 | ch_priv->busy_desc[ch_priv->desc_id] = true; | |
367 | ||
368 | /* increment dma descriptor */ | |
369 | ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt; | |
370 | ||
371 | return ret; | |
372 | } | |
373 | ||
374 | static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len, | |
375 | void *metadata) | |
376 | { | |
377 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
378 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
379 | struct bcm6348_dma_desc *dma_desc; | |
380 | uint16_t status; | |
381 | ||
e4f907e9 ÁFR |
382 | if (!ch_priv->running) |
383 | return -EINVAL; | |
384 | ||
ccfd6988 ÁFR |
385 | /* flush cache */ |
386 | bcm6348_iudma_fdc(src, len); | |
387 | ||
388 | /* get dma ring descriptor address */ | |
389 | dma_desc = ch_priv->dma_ring; | |
390 | dma_desc += ch_priv->desc_id; | |
391 | ||
392 | /* config dma descriptor */ | |
393 | status = (DMAD_ST_OWN_MASK | | |
394 | DMAD_ST_EOP_MASK | | |
395 | DMAD_ST_CRC_MASK | | |
396 | DMAD_ST_SOP_MASK); | |
397 | if (ch_priv->desc_id == ch_priv->desc_cnt - 1) | |
398 | status |= DMAD_ST_WRAP_MASK; | |
399 | ||
400 | /* set dma descriptor */ | |
401 | dma_desc->address = virt_to_phys(src); | |
402 | dma_desc->length = len; | |
403 | dma_desc->status = status; | |
404 | ||
405 | /* flush cache */ | |
406 | bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc)); | |
407 | ||
408 | /* kick tx dma channel */ | |
409 | setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK); | |
410 | ||
411 | /* poll dma status */ | |
412 | do { | |
413 | /* invalidate cache */ | |
414 | bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc)); | |
415 | ||
416 | if (!(dma_desc->status & DMAD_ST_OWN_MASK)) | |
417 | break; | |
418 | } while(1); | |
419 | ||
420 | /* increment dma descriptor */ | |
421 | ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt; | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
426 | static int bcm6348_iudma_free_rcv_buf(struct dma *dma, void *dst, size_t size) | |
427 | { | |
428 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
429 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
430 | struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; | |
431 | uint16_t status; | |
432 | uint8_t i; | |
433 | u32 cfg; | |
434 | ||
435 | /* get dirty dma descriptor */ | |
436 | for (i = 0; i < ch_priv->desc_cnt; i++) { | |
437 | if (phys_to_virt(dma_desc->address) == dst) | |
438 | break; | |
439 | ||
440 | dma_desc++; | |
441 | } | |
442 | ||
443 | /* dma descriptor not found */ | |
444 | if (i == ch_priv->desc_cnt) { | |
445 | pr_err("dirty dma descriptor not found\n"); | |
446 | return -ENOENT; | |
447 | } | |
448 | ||
449 | /* invalidate cache */ | |
450 | bcm6348_iudma_idc(ch_priv->dma_ring, | |
451 | sizeof(*dma_desc) * ch_priv->desc_cnt); | |
452 | ||
453 | /* free dma descriptor */ | |
454 | ch_priv->busy_desc[i] = false; | |
455 | ||
456 | status = DMAD_ST_OWN_MASK; | |
457 | if (i == ch_priv->desc_cnt - 1) | |
458 | status |= DMAD_ST_WRAP_MASK; | |
459 | ||
460 | dma_desc->status |= status; | |
461 | dma_desc->length = PKTSIZE_ALIGN; | |
462 | ||
463 | /* tell dma we allocated one buffer */ | |
464 | writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id)); | |
465 | ||
466 | /* flush cache */ | |
467 | bcm6348_iudma_fdc(ch_priv->dma_ring, | |
468 | sizeof(*dma_desc) * ch_priv->desc_cnt); | |
469 | ||
470 | /* kick rx dma channel if disabled */ | |
471 | cfg = readl_be(priv->chan + DMAC_CFG_REG(dma->id)); | |
472 | if (!(cfg & DMAC_CFG_ENABLE_MASK)) | |
473 | setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), | |
474 | DMAC_CFG_ENABLE_MASK); | |
475 | ||
476 | return 0; | |
477 | } | |
478 | ||
479 | static int bcm6348_iudma_add_rcv_buf(struct dma *dma, void *dst, size_t size) | |
480 | { | |
481 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
482 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
483 | struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; | |
484 | ||
485 | /* no more dma descriptors available */ | |
486 | if (ch_priv->desc_cnt == ch_priv->dma_ring_size) { | |
487 | pr_err("max number of buffers reached\n"); | |
488 | return -EINVAL; | |
489 | } | |
490 | ||
491 | /* get next dma descriptor */ | |
492 | dma_desc += ch_priv->desc_cnt; | |
493 | ||
494 | /* init dma descriptor */ | |
495 | dma_desc->address = virt_to_phys(dst); | |
496 | dma_desc->length = size; | |
497 | dma_desc->status = 0; | |
498 | ||
499 | /* flush cache */ | |
500 | bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc)); | |
501 | ||
502 | /* increment dma descriptors */ | |
503 | ch_priv->desc_cnt++; | |
504 | ||
505 | return 0; | |
506 | } | |
507 | ||
508 | static int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst, | |
509 | size_t size) | |
510 | { | |
511 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
512 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
513 | ||
514 | /* only add new rx buffers if channel isn't running */ | |
515 | if (ch_priv->running) | |
516 | return bcm6348_iudma_free_rcv_buf(dma, dst, size); | |
517 | else | |
518 | return bcm6348_iudma_add_rcv_buf(dma, dst, size); | |
519 | } | |
520 | ||
521 | static const struct dma_ops bcm6348_iudma_ops = { | |
522 | .disable = bcm6348_iudma_disable, | |
523 | .enable = bcm6348_iudma_enable, | |
524 | .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf, | |
525 | .request = bcm6348_iudma_request, | |
526 | .receive = bcm6348_iudma_receive, | |
527 | .send = bcm6348_iudma_send, | |
528 | }; | |
529 | ||
530 | static const struct bcm6348_iudma_hw bcm6348_hw = { | |
531 | .err_mask = (DMAD6348_ST_OV_ERR_MASK | | |
532 | DMAD6348_ST_CRC_ERR_MASK | | |
533 | DMAD6348_ST_RX_ERR_MASK | | |
534 | DMAD6348_ST_OS_ERR_MASK | | |
535 | DMAD6348_ST_UN_ERR_MASK), | |
536 | }; | |
537 | ||
538 | static const struct bcm6348_iudma_hw bcm6368_hw = { | |
539 | .err_mask = 0, | |
540 | }; | |
541 | ||
542 | static const struct udevice_id bcm6348_iudma_ids[] = { | |
543 | { | |
544 | .compatible = "brcm,bcm6348-iudma", | |
545 | .data = (ulong)&bcm6348_hw, | |
546 | }, { | |
547 | .compatible = "brcm,bcm6368-iudma", | |
548 | .data = (ulong)&bcm6368_hw, | |
549 | }, { /* sentinel */ } | |
550 | }; | |
551 | ||
552 | static int bcm6348_iudma_probe(struct udevice *dev) | |
553 | { | |
554 | struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); | |
555 | struct bcm6348_iudma_priv *priv = dev_get_priv(dev); | |
556 | const struct bcm6348_iudma_hw *hw = | |
557 | (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev); | |
558 | uint8_t ch; | |
559 | int i; | |
560 | ||
561 | uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM | | |
562 | DMA_SUPPORTS_MEM_TO_DEV); | |
563 | priv->hw = hw; | |
564 | ||
565 | /* dma global base address */ | |
566 | priv->base = dev_remap_addr_name(dev, "dma"); | |
567 | if (!priv->base) | |
568 | return -EINVAL; | |
569 | ||
570 | /* dma channels base address */ | |
571 | priv->chan = dev_remap_addr_name(dev, "dma-channels"); | |
572 | if (!priv->chan) | |
573 | return -EINVAL; | |
574 | ||
575 | /* dma sram base address */ | |
576 | priv->sram = dev_remap_addr_name(dev, "dma-sram"); | |
577 | if (!priv->sram) | |
578 | return -EINVAL; | |
579 | ||
580 | /* get number of channels */ | |
581 | priv->n_channels = dev_read_u32_default(dev, "dma-channels", 8); | |
582 | if (priv->n_channels > DMA_CHAN_MAX) | |
583 | return -EINVAL; | |
584 | ||
585 | /* try to enable clocks */ | |
586 | for (i = 0; ; i++) { | |
587 | struct clk clk; | |
588 | int ret; | |
589 | ||
590 | ret = clk_get_by_index(dev, i, &clk); | |
591 | if (ret < 0) | |
592 | break; | |
593 | ||
594 | ret = clk_enable(&clk); | |
595 | if (ret < 0) { | |
596 | pr_err("error enabling clock %d\n", i); | |
597 | return ret; | |
598 | } | |
ccfd6988 ÁFR |
599 | } |
600 | ||
601 | /* try to perform resets */ | |
602 | for (i = 0; ; i++) { | |
603 | struct reset_ctl reset; | |
604 | int ret; | |
605 | ||
606 | ret = reset_get_by_index(dev, i, &reset); | |
607 | if (ret < 0) | |
608 | break; | |
609 | ||
610 | ret = reset_deassert(&reset); | |
611 | if (ret < 0) { | |
612 | pr_err("error deasserting reset %d\n", i); | |
613 | return ret; | |
614 | } | |
615 | ||
616 | ret = reset_free(&reset); | |
617 | if (ret < 0) { | |
618 | pr_err("error freeing reset %d\n", i); | |
619 | return ret; | |
620 | } | |
621 | } | |
622 | ||
623 | /* disable dma controller */ | |
624 | clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); | |
625 | ||
626 | /* alloc channel private data pointers */ | |
627 | priv->ch_priv = calloc(priv->n_channels, | |
628 | sizeof(struct bcm6348_chan_priv*)); | |
629 | if (!priv->ch_priv) | |
630 | return -ENOMEM; | |
631 | ||
632 | /* stop dma channels */ | |
633 | for (ch = 0; ch < priv->n_channels; ch++) | |
634 | bcm6348_iudma_chan_stop(priv, ch); | |
635 | ||
636 | /* enable dma controller */ | |
637 | setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); | |
638 | ||
639 | return 0; | |
640 | } | |
641 | ||
642 | U_BOOT_DRIVER(bcm6348_iudma) = { | |
643 | .name = "bcm6348_iudma", | |
644 | .id = UCLASS_DMA, | |
645 | .of_match = bcm6348_iudma_ids, | |
646 | .ops = &bcm6348_iudma_ops, | |
41575d8e | 647 | .priv_auto = sizeof(struct bcm6348_iudma_priv), |
ccfd6988 ÁFR |
648 | .probe = bcm6348_iudma_probe, |
649 | }; |