]>
Commit | Line | Data |
---|---|---|
ccfd6988 ÁFR |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> | |
4 | * | |
5 | * Derived from linux/drivers/dma/bcm63xx-iudma.c: | |
6 | * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu> | |
7 | * | |
8 | * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c: | |
9 | * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> | |
10 | * | |
11 | * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h: | |
12 | * Copyright (C) 2000-2010 Broadcom Corporation | |
13 | * | |
14 | * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c: | |
15 | * Copyright (C) 2010 Broadcom Corporation | |
16 | */ | |
17 | ||
18 | #include <common.h> | |
19 | #include <clk.h> | |
1eb69ae4 | 20 | #include <cpu_func.h> |
ccfd6988 ÁFR |
21 | #include <dm.h> |
22 | #include <dma-uclass.h> | |
336d4615 | 23 | #include <malloc.h> |
ccfd6988 | 24 | #include <memalign.h> |
90526e9f | 25 | #include <net.h> |
ccfd6988 ÁFR |
26 | #include <reset.h> |
27 | #include <asm/io.h> | |
28 | ||
29 | #define DMA_RX_DESC 6 | |
30 | #define DMA_TX_DESC 1 | |
31 | ||
32 | /* DMA Channels */ | |
33 | #define DMA_CHAN_FLOWC(x) ((x) >> 1) | |
34 | #define DMA_CHAN_MAX 16 | |
35 | #define DMA_CHAN_SIZE 0x10 | |
36 | #define DMA_CHAN_TOUT 500 | |
37 | ||
38 | /* DMA Global Configuration register */ | |
39 | #define DMA_CFG_REG 0x00 | |
40 | #define DMA_CFG_ENABLE_SHIFT 0 | |
41 | #define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT) | |
42 | #define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1) | |
43 | #define DMA_CFG_NCHANS_SHIFT 24 | |
44 | #define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT) | |
45 | ||
46 | /* DMA Global Flow Control registers */ | |
47 | #define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c) | |
48 | #define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c) | |
49 | #define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c) | |
50 | #define DMA_FLOWC_ALLOC_FORCE_SHIFT 31 | |
51 | #define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT) | |
52 | ||
53 | /* DMA Global Reset register */ | |
54 | #define DMA_RST_REG 0x34 | |
55 | #define DMA_RST_CHAN_SHIFT 0 | |
56 | #define DMA_RST_CHAN_MASK(x) (1 << x) | |
57 | ||
58 | /* DMA Channel Configuration register */ | |
59 | #define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) | |
60 | #define DMAC_CFG_ENABLE_SHIFT 0 | |
61 | #define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT) | |
62 | #define DMAC_CFG_PKT_HALT_SHIFT 1 | |
63 | #define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT) | |
64 | #define DMAC_CFG_BRST_HALT_SHIFT 2 | |
65 | #define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT) | |
66 | ||
67 | /* DMA Channel Max Burst Length register */ | |
68 | #define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) | |
69 | ||
70 | /* DMA SRAM Descriptor Ring Start register */ | |
71 | #define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) | |
72 | ||
73 | /* DMA SRAM State/Bytes done/ring offset register */ | |
74 | #define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04) | |
75 | ||
76 | /* DMA SRAM Buffer Descriptor status and length register */ | |
77 | #define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08) | |
78 | ||
79 | /* DMA SRAM Buffer Descriptor status and length register */ | |
80 | #define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) | |
81 | ||
82 | /* DMA Descriptor Status */ | |
83 | #define DMAD_ST_CRC_SHIFT 8 | |
84 | #define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT) | |
85 | #define DMAD_ST_WRAP_SHIFT 12 | |
86 | #define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT) | |
87 | #define DMAD_ST_SOP_SHIFT 13 | |
88 | #define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT) | |
89 | #define DMAD_ST_EOP_SHIFT 14 | |
90 | #define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT) | |
91 | #define DMAD_ST_OWN_SHIFT 15 | |
92 | #define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT) | |
93 | ||
94 | #define DMAD6348_ST_OV_ERR_SHIFT 0 | |
95 | #define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT) | |
96 | #define DMAD6348_ST_CRC_ERR_SHIFT 1 | |
97 | #define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT) | |
98 | #define DMAD6348_ST_RX_ERR_SHIFT 2 | |
99 | #define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT) | |
100 | #define DMAD6348_ST_OS_ERR_SHIFT 4 | |
101 | #define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT) | |
102 | #define DMAD6348_ST_UN_ERR_SHIFT 9 | |
103 | #define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT) | |
104 | ||
105 | struct bcm6348_dma_desc { | |
106 | uint16_t length; | |
107 | uint16_t status; | |
108 | uint32_t address; | |
109 | }; | |
110 | ||
111 | struct bcm6348_chan_priv { | |
112 | void __iomem *dma_ring; | |
113 | uint8_t dma_ring_size; | |
114 | uint8_t desc_id; | |
115 | uint8_t desc_cnt; | |
116 | bool *busy_desc; | |
117 | bool running; | |
118 | }; | |
119 | ||
120 | struct bcm6348_iudma_hw { | |
121 | uint16_t err_mask; | |
122 | }; | |
123 | ||
124 | struct bcm6348_iudma_priv { | |
125 | const struct bcm6348_iudma_hw *hw; | |
126 | void __iomem *base; | |
127 | void __iomem *chan; | |
128 | void __iomem *sram; | |
129 | struct bcm6348_chan_priv **ch_priv; | |
130 | uint8_t n_channels; | |
131 | }; | |
132 | ||
133 | static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch) | |
134 | { | |
135 | return !(ch & 1); | |
136 | } | |
137 | ||
138 | static inline void bcm6348_iudma_fdc(void *ptr, ulong size) | |
139 | { | |
140 | ulong start = (ulong) ptr; | |
141 | ||
142 | flush_dcache_range(start, start + size); | |
143 | } | |
144 | ||
145 | static inline void bcm6348_iudma_idc(void *ptr, ulong size) | |
146 | { | |
147 | ulong start = (ulong) ptr; | |
148 | ||
149 | invalidate_dcache_range(start, start + size); | |
150 | } | |
151 | ||
152 | static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv, | |
153 | uint8_t ch) | |
154 | { | |
155 | unsigned int timeout = DMA_CHAN_TOUT; | |
156 | ||
157 | do { | |
158 | uint32_t cfg, halt; | |
159 | ||
160 | if (timeout > DMA_CHAN_TOUT / 2) | |
161 | halt = DMAC_CFG_PKT_HALT_MASK; | |
162 | else | |
163 | halt = DMAC_CFG_BRST_HALT_MASK; | |
164 | ||
165 | /* try to stop dma channel */ | |
166 | writel_be(halt, priv->chan + DMAC_CFG_REG(ch)); | |
167 | mb(); | |
168 | ||
169 | /* check if channel was stopped */ | |
170 | cfg = readl_be(priv->chan + DMAC_CFG_REG(ch)); | |
171 | if (!(cfg & DMAC_CFG_ENABLE_MASK)) | |
172 | break; | |
173 | ||
174 | udelay(1); | |
175 | } while (--timeout); | |
176 | ||
177 | if (!timeout) | |
178 | pr_err("unable to stop channel %u\n", ch); | |
179 | ||
180 | /* reset dma channel */ | |
181 | setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); | |
182 | mb(); | |
183 | clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); | |
184 | } | |
185 | ||
186 | static int bcm6348_iudma_disable(struct dma *dma) | |
187 | { | |
188 | struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
189 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
190 | ||
191 | /* stop dma channel */ | |
192 | bcm6348_iudma_chan_stop(priv, dma->id); | |
193 | ||
194 | /* dma flow control */ | |
195 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
196 | writel_be(DMA_FLOWC_ALLOC_FORCE_MASK, | |
197 | DMA_FLOWC_ALLOC_REG(dma->id)); | |
198 | ||
199 | /* init channel config */ | |
200 | ch_priv->running = false; | |
201 | ch_priv->desc_id = 0; | |
202 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
203 | ch_priv->desc_cnt = 0; | |
204 | else | |
205 | ch_priv->desc_cnt = ch_priv->dma_ring_size; | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
210 | static int bcm6348_iudma_enable(struct dma *dma) | |
211 | { | |
212 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
213 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
214 | struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; | |
215 | uint8_t i; | |
216 | ||
217 | /* dma ring init */ | |
218 | for (i = 0; i < ch_priv->desc_cnt; i++) { | |
219 | if (bcm6348_iudma_chan_is_rx(dma->id)) { | |
220 | ch_priv->busy_desc[i] = false; | |
221 | dma_desc->status |= DMAD_ST_OWN_MASK; | |
222 | } else { | |
223 | dma_desc->status = 0; | |
224 | dma_desc->length = 0; | |
225 | dma_desc->address = 0; | |
226 | } | |
227 | ||
228 | if (i == ch_priv->desc_cnt - 1) | |
229 | dma_desc->status |= DMAD_ST_WRAP_MASK; | |
230 | ||
231 | dma_desc++; | |
232 | } | |
233 | ||
234 | /* init to first descriptor */ | |
235 | ch_priv->desc_id = 0; | |
236 | ||
237 | /* force cache writeback */ | |
238 | bcm6348_iudma_fdc(ch_priv->dma_ring, | |
239 | sizeof(*dma_desc) * ch_priv->desc_cnt); | |
240 | ||
241 | /* clear sram */ | |
242 | writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id)); | |
243 | writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id)); | |
244 | writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id)); | |
245 | ||
246 | /* set dma ring start */ | |
247 | writel_be(virt_to_phys(ch_priv->dma_ring), | |
248 | priv->sram + DMAS_RSTART_REG(dma->id)); | |
249 | ||
250 | /* set flow control */ | |
251 | if (bcm6348_iudma_chan_is_rx(dma->id)) { | |
252 | u32 val; | |
253 | ||
254 | setbits_be32(priv->base + DMA_CFG_REG, | |
255 | DMA_CFG_FLOWC_ENABLE(dma->id)); | |
256 | ||
257 | val = ch_priv->desc_cnt / 3; | |
258 | writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id)); | |
259 | ||
260 | val = (ch_priv->desc_cnt * 2) / 3; | |
261 | writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id)); | |
262 | ||
263 | writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id)); | |
264 | } | |
265 | ||
266 | /* set dma max burst */ | |
267 | writel_be(ch_priv->desc_cnt, | |
268 | priv->chan + DMAC_BURST_REG(dma->id)); | |
269 | ||
270 | /* kick rx dma channel */ | |
271 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
272 | setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), | |
273 | DMAC_CFG_ENABLE_MASK); | |
274 | ||
275 | /* channel is now enabled */ | |
276 | ch_priv->running = true; | |
277 | ||
278 | return 0; | |
279 | } | |
280 | ||
281 | static int bcm6348_iudma_request(struct dma *dma) | |
282 | { | |
283 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
284 | struct bcm6348_chan_priv *ch_priv; | |
285 | ||
286 | /* check if channel is valid */ | |
287 | if (dma->id >= priv->n_channels) | |
288 | return -ENODEV; | |
289 | ||
290 | /* alloc channel private data */ | |
291 | priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv)); | |
292 | if (!priv->ch_priv[dma->id]) | |
293 | return -ENOMEM; | |
294 | ch_priv = priv->ch_priv[dma->id]; | |
295 | ||
296 | /* alloc dma ring */ | |
297 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
298 | ch_priv->dma_ring_size = DMA_RX_DESC; | |
299 | else | |
300 | ch_priv->dma_ring_size = DMA_TX_DESC; | |
301 | ||
302 | ch_priv->dma_ring = | |
303 | malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) * | |
304 | ch_priv->dma_ring_size); | |
305 | if (!ch_priv->dma_ring) | |
306 | return -ENOMEM; | |
307 | ||
308 | /* init channel config */ | |
309 | ch_priv->running = false; | |
310 | ch_priv->desc_id = 0; | |
311 | if (bcm6348_iudma_chan_is_rx(dma->id)) { | |
312 | ch_priv->desc_cnt = 0; | |
313 | ch_priv->busy_desc = calloc(ch_priv->desc_cnt, sizeof(bool)); | |
314 | } else { | |
315 | ch_priv->desc_cnt = ch_priv->dma_ring_size; | |
316 | ch_priv->busy_desc = NULL; | |
317 | } | |
318 | ||
319 | return 0; | |
320 | } | |
321 | ||
322 | static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata) | |
323 | { | |
324 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
325 | const struct bcm6348_iudma_hw *hw = priv->hw; | |
326 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
327 | struct bcm6348_dma_desc *dma_desc = dma_desc = ch_priv->dma_ring; | |
328 | int ret; | |
329 | ||
e4f907e9 ÁFR |
330 | if (!ch_priv->running) |
331 | return -EINVAL; | |
332 | ||
ccfd6988 ÁFR |
333 | /* get dma ring descriptor address */ |
334 | dma_desc += ch_priv->desc_id; | |
335 | ||
336 | /* invalidate cache data */ | |
337 | bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc)); | |
338 | ||
339 | /* check dma own */ | |
340 | if (dma_desc->status & DMAD_ST_OWN_MASK) | |
341 | return -EAGAIN; | |
342 | ||
343 | /* check pkt */ | |
344 | if (!(dma_desc->status & DMAD_ST_EOP_MASK) || | |
345 | !(dma_desc->status & DMAD_ST_SOP_MASK) || | |
346 | (dma_desc->status & hw->err_mask)) { | |
347 | pr_err("invalid pkt received (ch=%ld desc=%u) (st=%04x)\n", | |
348 | dma->id, ch_priv->desc_id, dma_desc->status); | |
349 | ret = -EAGAIN; | |
350 | } else { | |
351 | /* set dma buffer address */ | |
352 | *dst = phys_to_virt(dma_desc->address); | |
353 | ||
354 | /* invalidate cache data */ | |
355 | bcm6348_iudma_idc(*dst, dma_desc->length); | |
356 | ||
357 | /* return packet length */ | |
358 | ret = dma_desc->length; | |
359 | } | |
360 | ||
361 | /* busy dma descriptor */ | |
362 | ch_priv->busy_desc[ch_priv->desc_id] = true; | |
363 | ||
364 | /* increment dma descriptor */ | |
365 | ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt; | |
366 | ||
367 | return ret; | |
368 | } | |
369 | ||
370 | static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len, | |
371 | void *metadata) | |
372 | { | |
373 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
374 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
375 | struct bcm6348_dma_desc *dma_desc; | |
376 | uint16_t status; | |
377 | ||
e4f907e9 ÁFR |
378 | if (!ch_priv->running) |
379 | return -EINVAL; | |
380 | ||
ccfd6988 ÁFR |
381 | /* flush cache */ |
382 | bcm6348_iudma_fdc(src, len); | |
383 | ||
384 | /* get dma ring descriptor address */ | |
385 | dma_desc = ch_priv->dma_ring; | |
386 | dma_desc += ch_priv->desc_id; | |
387 | ||
388 | /* config dma descriptor */ | |
389 | status = (DMAD_ST_OWN_MASK | | |
390 | DMAD_ST_EOP_MASK | | |
391 | DMAD_ST_CRC_MASK | | |
392 | DMAD_ST_SOP_MASK); | |
393 | if (ch_priv->desc_id == ch_priv->desc_cnt - 1) | |
394 | status |= DMAD_ST_WRAP_MASK; | |
395 | ||
396 | /* set dma descriptor */ | |
397 | dma_desc->address = virt_to_phys(src); | |
398 | dma_desc->length = len; | |
399 | dma_desc->status = status; | |
400 | ||
401 | /* flush cache */ | |
402 | bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc)); | |
403 | ||
404 | /* kick tx dma channel */ | |
405 | setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK); | |
406 | ||
407 | /* poll dma status */ | |
408 | do { | |
409 | /* invalidate cache */ | |
410 | bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc)); | |
411 | ||
412 | if (!(dma_desc->status & DMAD_ST_OWN_MASK)) | |
413 | break; | |
414 | } while(1); | |
415 | ||
416 | /* increment dma descriptor */ | |
417 | ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt; | |
418 | ||
419 | return 0; | |
420 | } | |
421 | ||
422 | static int bcm6348_iudma_free_rcv_buf(struct dma *dma, void *dst, size_t size) | |
423 | { | |
424 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
425 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
426 | struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; | |
427 | uint16_t status; | |
428 | uint8_t i; | |
429 | u32 cfg; | |
430 | ||
431 | /* get dirty dma descriptor */ | |
432 | for (i = 0; i < ch_priv->desc_cnt; i++) { | |
433 | if (phys_to_virt(dma_desc->address) == dst) | |
434 | break; | |
435 | ||
436 | dma_desc++; | |
437 | } | |
438 | ||
439 | /* dma descriptor not found */ | |
440 | if (i == ch_priv->desc_cnt) { | |
441 | pr_err("dirty dma descriptor not found\n"); | |
442 | return -ENOENT; | |
443 | } | |
444 | ||
445 | /* invalidate cache */ | |
446 | bcm6348_iudma_idc(ch_priv->dma_ring, | |
447 | sizeof(*dma_desc) * ch_priv->desc_cnt); | |
448 | ||
449 | /* free dma descriptor */ | |
450 | ch_priv->busy_desc[i] = false; | |
451 | ||
452 | status = DMAD_ST_OWN_MASK; | |
453 | if (i == ch_priv->desc_cnt - 1) | |
454 | status |= DMAD_ST_WRAP_MASK; | |
455 | ||
456 | dma_desc->status |= status; | |
457 | dma_desc->length = PKTSIZE_ALIGN; | |
458 | ||
459 | /* tell dma we allocated one buffer */ | |
460 | writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id)); | |
461 | ||
462 | /* flush cache */ | |
463 | bcm6348_iudma_fdc(ch_priv->dma_ring, | |
464 | sizeof(*dma_desc) * ch_priv->desc_cnt); | |
465 | ||
466 | /* kick rx dma channel if disabled */ | |
467 | cfg = readl_be(priv->chan + DMAC_CFG_REG(dma->id)); | |
468 | if (!(cfg & DMAC_CFG_ENABLE_MASK)) | |
469 | setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), | |
470 | DMAC_CFG_ENABLE_MASK); | |
471 | ||
472 | return 0; | |
473 | } | |
474 | ||
475 | static int bcm6348_iudma_add_rcv_buf(struct dma *dma, void *dst, size_t size) | |
476 | { | |
477 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
478 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
479 | struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; | |
480 | ||
481 | /* no more dma descriptors available */ | |
482 | if (ch_priv->desc_cnt == ch_priv->dma_ring_size) { | |
483 | pr_err("max number of buffers reached\n"); | |
484 | return -EINVAL; | |
485 | } | |
486 | ||
487 | /* get next dma descriptor */ | |
488 | dma_desc += ch_priv->desc_cnt; | |
489 | ||
490 | /* init dma descriptor */ | |
491 | dma_desc->address = virt_to_phys(dst); | |
492 | dma_desc->length = size; | |
493 | dma_desc->status = 0; | |
494 | ||
495 | /* flush cache */ | |
496 | bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc)); | |
497 | ||
498 | /* increment dma descriptors */ | |
499 | ch_priv->desc_cnt++; | |
500 | ||
501 | return 0; | |
502 | } | |
503 | ||
504 | static int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst, | |
505 | size_t size) | |
506 | { | |
507 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
508 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
509 | ||
510 | /* only add new rx buffers if channel isn't running */ | |
511 | if (ch_priv->running) | |
512 | return bcm6348_iudma_free_rcv_buf(dma, dst, size); | |
513 | else | |
514 | return bcm6348_iudma_add_rcv_buf(dma, dst, size); | |
515 | } | |
516 | ||
517 | static const struct dma_ops bcm6348_iudma_ops = { | |
518 | .disable = bcm6348_iudma_disable, | |
519 | .enable = bcm6348_iudma_enable, | |
520 | .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf, | |
521 | .request = bcm6348_iudma_request, | |
522 | .receive = bcm6348_iudma_receive, | |
523 | .send = bcm6348_iudma_send, | |
524 | }; | |
525 | ||
526 | static const struct bcm6348_iudma_hw bcm6348_hw = { | |
527 | .err_mask = (DMAD6348_ST_OV_ERR_MASK | | |
528 | DMAD6348_ST_CRC_ERR_MASK | | |
529 | DMAD6348_ST_RX_ERR_MASK | | |
530 | DMAD6348_ST_OS_ERR_MASK | | |
531 | DMAD6348_ST_UN_ERR_MASK), | |
532 | }; | |
533 | ||
534 | static const struct bcm6348_iudma_hw bcm6368_hw = { | |
535 | .err_mask = 0, | |
536 | }; | |
537 | ||
538 | static const struct udevice_id bcm6348_iudma_ids[] = { | |
539 | { | |
540 | .compatible = "brcm,bcm6348-iudma", | |
541 | .data = (ulong)&bcm6348_hw, | |
542 | }, { | |
543 | .compatible = "brcm,bcm6368-iudma", | |
544 | .data = (ulong)&bcm6368_hw, | |
545 | }, { /* sentinel */ } | |
546 | }; | |
547 | ||
548 | static int bcm6348_iudma_probe(struct udevice *dev) | |
549 | { | |
550 | struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); | |
551 | struct bcm6348_iudma_priv *priv = dev_get_priv(dev); | |
552 | const struct bcm6348_iudma_hw *hw = | |
553 | (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev); | |
554 | uint8_t ch; | |
555 | int i; | |
556 | ||
557 | uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM | | |
558 | DMA_SUPPORTS_MEM_TO_DEV); | |
559 | priv->hw = hw; | |
560 | ||
561 | /* dma global base address */ | |
562 | priv->base = dev_remap_addr_name(dev, "dma"); | |
563 | if (!priv->base) | |
564 | return -EINVAL; | |
565 | ||
566 | /* dma channels base address */ | |
567 | priv->chan = dev_remap_addr_name(dev, "dma-channels"); | |
568 | if (!priv->chan) | |
569 | return -EINVAL; | |
570 | ||
571 | /* dma sram base address */ | |
572 | priv->sram = dev_remap_addr_name(dev, "dma-sram"); | |
573 | if (!priv->sram) | |
574 | return -EINVAL; | |
575 | ||
576 | /* get number of channels */ | |
577 | priv->n_channels = dev_read_u32_default(dev, "dma-channels", 8); | |
578 | if (priv->n_channels > DMA_CHAN_MAX) | |
579 | return -EINVAL; | |
580 | ||
581 | /* try to enable clocks */ | |
582 | for (i = 0; ; i++) { | |
583 | struct clk clk; | |
584 | int ret; | |
585 | ||
586 | ret = clk_get_by_index(dev, i, &clk); | |
587 | if (ret < 0) | |
588 | break; | |
589 | ||
590 | ret = clk_enable(&clk); | |
591 | if (ret < 0) { | |
592 | pr_err("error enabling clock %d\n", i); | |
593 | return ret; | |
594 | } | |
595 | ||
596 | ret = clk_free(&clk); | |
597 | if (ret < 0) { | |
598 | pr_err("error freeing clock %d\n", i); | |
599 | return ret; | |
600 | } | |
601 | } | |
602 | ||
603 | /* try to perform resets */ | |
604 | for (i = 0; ; i++) { | |
605 | struct reset_ctl reset; | |
606 | int ret; | |
607 | ||
608 | ret = reset_get_by_index(dev, i, &reset); | |
609 | if (ret < 0) | |
610 | break; | |
611 | ||
612 | ret = reset_deassert(&reset); | |
613 | if (ret < 0) { | |
614 | pr_err("error deasserting reset %d\n", i); | |
615 | return ret; | |
616 | } | |
617 | ||
618 | ret = reset_free(&reset); | |
619 | if (ret < 0) { | |
620 | pr_err("error freeing reset %d\n", i); | |
621 | return ret; | |
622 | } | |
623 | } | |
624 | ||
625 | /* disable dma controller */ | |
626 | clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); | |
627 | ||
628 | /* alloc channel private data pointers */ | |
629 | priv->ch_priv = calloc(priv->n_channels, | |
630 | sizeof(struct bcm6348_chan_priv*)); | |
631 | if (!priv->ch_priv) | |
632 | return -ENOMEM; | |
633 | ||
634 | /* stop dma channels */ | |
635 | for (ch = 0; ch < priv->n_channels; ch++) | |
636 | bcm6348_iudma_chan_stop(priv, ch); | |
637 | ||
638 | /* enable dma controller */ | |
639 | setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); | |
640 | ||
641 | return 0; | |
642 | } | |
643 | ||
644 | U_BOOT_DRIVER(bcm6348_iudma) = { | |
645 | .name = "bcm6348_iudma", | |
646 | .id = UCLASS_DMA, | |
647 | .of_match = bcm6348_iudma_ids, | |
648 | .ops = &bcm6348_iudma_ops, | |
649 | .priv_auto_alloc_size = sizeof(struct bcm6348_iudma_priv), | |
650 | .probe = bcm6348_iudma_probe, | |
651 | }; |