]>
Commit | Line | Data |
---|---|---|
ccfd6988 ÁFR |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> | |
4 | * | |
5 | * Derived from linux/drivers/dma/bcm63xx-iudma.c: | |
6 | * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu> | |
7 | * | |
8 | * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c: | |
9 | * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> | |
10 | * | |
11 | * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h: | |
12 | * Copyright (C) 2000-2010 Broadcom Corporation | |
13 | * | |
14 | * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c: | |
15 | * Copyright (C) 2010 Broadcom Corporation | |
16 | */ | |
17 | ||
18 | #include <common.h> | |
19 | #include <clk.h> | |
20 | #include <dm.h> | |
21 | #include <dma-uclass.h> | |
22 | #include <memalign.h> | |
23 | #include <reset.h> | |
24 | #include <asm/io.h> | |
25 | ||
26 | #define DMA_RX_DESC 6 | |
27 | #define DMA_TX_DESC 1 | |
28 | ||
29 | /* DMA Channels */ | |
30 | #define DMA_CHAN_FLOWC(x) ((x) >> 1) | |
31 | #define DMA_CHAN_MAX 16 | |
32 | #define DMA_CHAN_SIZE 0x10 | |
33 | #define DMA_CHAN_TOUT 500 | |
34 | ||
35 | /* DMA Global Configuration register */ | |
36 | #define DMA_CFG_REG 0x00 | |
37 | #define DMA_CFG_ENABLE_SHIFT 0 | |
38 | #define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT) | |
39 | #define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1) | |
40 | #define DMA_CFG_NCHANS_SHIFT 24 | |
41 | #define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT) | |
42 | ||
43 | /* DMA Global Flow Control registers */ | |
44 | #define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c) | |
45 | #define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c) | |
46 | #define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c) | |
47 | #define DMA_FLOWC_ALLOC_FORCE_SHIFT 31 | |
48 | #define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT) | |
49 | ||
50 | /* DMA Global Reset register */ | |
51 | #define DMA_RST_REG 0x34 | |
52 | #define DMA_RST_CHAN_SHIFT 0 | |
53 | #define DMA_RST_CHAN_MASK(x) (1 << x) | |
54 | ||
55 | /* DMA Channel Configuration register */ | |
56 | #define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) | |
57 | #define DMAC_CFG_ENABLE_SHIFT 0 | |
58 | #define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT) | |
59 | #define DMAC_CFG_PKT_HALT_SHIFT 1 | |
60 | #define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT) | |
61 | #define DMAC_CFG_BRST_HALT_SHIFT 2 | |
62 | #define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT) | |
63 | ||
64 | /* DMA Channel Max Burst Length register */ | |
65 | #define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) | |
66 | ||
67 | /* DMA SRAM Descriptor Ring Start register */ | |
68 | #define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) | |
69 | ||
70 | /* DMA SRAM State/Bytes done/ring offset register */ | |
71 | #define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04) | |
72 | ||
73 | /* DMA SRAM Buffer Descriptor status and length register */ | |
74 | #define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08) | |
75 | ||
76 | /* DMA SRAM Buffer Descriptor status and length register */ | |
77 | #define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) | |
78 | ||
79 | /* DMA Descriptor Status */ | |
80 | #define DMAD_ST_CRC_SHIFT 8 | |
81 | #define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT) | |
82 | #define DMAD_ST_WRAP_SHIFT 12 | |
83 | #define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT) | |
84 | #define DMAD_ST_SOP_SHIFT 13 | |
85 | #define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT) | |
86 | #define DMAD_ST_EOP_SHIFT 14 | |
87 | #define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT) | |
88 | #define DMAD_ST_OWN_SHIFT 15 | |
89 | #define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT) | |
90 | ||
91 | #define DMAD6348_ST_OV_ERR_SHIFT 0 | |
92 | #define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT) | |
93 | #define DMAD6348_ST_CRC_ERR_SHIFT 1 | |
94 | #define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT) | |
95 | #define DMAD6348_ST_RX_ERR_SHIFT 2 | |
96 | #define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT) | |
97 | #define DMAD6348_ST_OS_ERR_SHIFT 4 | |
98 | #define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT) | |
99 | #define DMAD6348_ST_UN_ERR_SHIFT 9 | |
100 | #define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT) | |
101 | ||
102 | struct bcm6348_dma_desc { | |
103 | uint16_t length; | |
104 | uint16_t status; | |
105 | uint32_t address; | |
106 | }; | |
107 | ||
108 | struct bcm6348_chan_priv { | |
109 | void __iomem *dma_ring; | |
110 | uint8_t dma_ring_size; | |
111 | uint8_t desc_id; | |
112 | uint8_t desc_cnt; | |
113 | bool *busy_desc; | |
114 | bool running; | |
115 | }; | |
116 | ||
117 | struct bcm6348_iudma_hw { | |
118 | uint16_t err_mask; | |
119 | }; | |
120 | ||
121 | struct bcm6348_iudma_priv { | |
122 | const struct bcm6348_iudma_hw *hw; | |
123 | void __iomem *base; | |
124 | void __iomem *chan; | |
125 | void __iomem *sram; | |
126 | struct bcm6348_chan_priv **ch_priv; | |
127 | uint8_t n_channels; | |
128 | }; | |
129 | ||
130 | static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch) | |
131 | { | |
132 | return !(ch & 1); | |
133 | } | |
134 | ||
135 | static inline void bcm6348_iudma_fdc(void *ptr, ulong size) | |
136 | { | |
137 | ulong start = (ulong) ptr; | |
138 | ||
139 | flush_dcache_range(start, start + size); | |
140 | } | |
141 | ||
142 | static inline void bcm6348_iudma_idc(void *ptr, ulong size) | |
143 | { | |
144 | ulong start = (ulong) ptr; | |
145 | ||
146 | invalidate_dcache_range(start, start + size); | |
147 | } | |
148 | ||
149 | static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv, | |
150 | uint8_t ch) | |
151 | { | |
152 | unsigned int timeout = DMA_CHAN_TOUT; | |
153 | ||
154 | do { | |
155 | uint32_t cfg, halt; | |
156 | ||
157 | if (timeout > DMA_CHAN_TOUT / 2) | |
158 | halt = DMAC_CFG_PKT_HALT_MASK; | |
159 | else | |
160 | halt = DMAC_CFG_BRST_HALT_MASK; | |
161 | ||
162 | /* try to stop dma channel */ | |
163 | writel_be(halt, priv->chan + DMAC_CFG_REG(ch)); | |
164 | mb(); | |
165 | ||
166 | /* check if channel was stopped */ | |
167 | cfg = readl_be(priv->chan + DMAC_CFG_REG(ch)); | |
168 | if (!(cfg & DMAC_CFG_ENABLE_MASK)) | |
169 | break; | |
170 | ||
171 | udelay(1); | |
172 | } while (--timeout); | |
173 | ||
174 | if (!timeout) | |
175 | pr_err("unable to stop channel %u\n", ch); | |
176 | ||
177 | /* reset dma channel */ | |
178 | setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); | |
179 | mb(); | |
180 | clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); | |
181 | } | |
182 | ||
183 | static int bcm6348_iudma_disable(struct dma *dma) | |
184 | { | |
185 | struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
186 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
187 | ||
188 | /* stop dma channel */ | |
189 | bcm6348_iudma_chan_stop(priv, dma->id); | |
190 | ||
191 | /* dma flow control */ | |
192 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
193 | writel_be(DMA_FLOWC_ALLOC_FORCE_MASK, | |
194 | DMA_FLOWC_ALLOC_REG(dma->id)); | |
195 | ||
196 | /* init channel config */ | |
197 | ch_priv->running = false; | |
198 | ch_priv->desc_id = 0; | |
199 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
200 | ch_priv->desc_cnt = 0; | |
201 | else | |
202 | ch_priv->desc_cnt = ch_priv->dma_ring_size; | |
203 | ||
204 | return 0; | |
205 | } | |
206 | ||
207 | static int bcm6348_iudma_enable(struct dma *dma) | |
208 | { | |
209 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
210 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
211 | struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; | |
212 | uint8_t i; | |
213 | ||
214 | /* dma ring init */ | |
215 | for (i = 0; i < ch_priv->desc_cnt; i++) { | |
216 | if (bcm6348_iudma_chan_is_rx(dma->id)) { | |
217 | ch_priv->busy_desc[i] = false; | |
218 | dma_desc->status |= DMAD_ST_OWN_MASK; | |
219 | } else { | |
220 | dma_desc->status = 0; | |
221 | dma_desc->length = 0; | |
222 | dma_desc->address = 0; | |
223 | } | |
224 | ||
225 | if (i == ch_priv->desc_cnt - 1) | |
226 | dma_desc->status |= DMAD_ST_WRAP_MASK; | |
227 | ||
228 | dma_desc++; | |
229 | } | |
230 | ||
231 | /* init to first descriptor */ | |
232 | ch_priv->desc_id = 0; | |
233 | ||
234 | /* force cache writeback */ | |
235 | bcm6348_iudma_fdc(ch_priv->dma_ring, | |
236 | sizeof(*dma_desc) * ch_priv->desc_cnt); | |
237 | ||
238 | /* clear sram */ | |
239 | writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id)); | |
240 | writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id)); | |
241 | writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id)); | |
242 | ||
243 | /* set dma ring start */ | |
244 | writel_be(virt_to_phys(ch_priv->dma_ring), | |
245 | priv->sram + DMAS_RSTART_REG(dma->id)); | |
246 | ||
247 | /* set flow control */ | |
248 | if (bcm6348_iudma_chan_is_rx(dma->id)) { | |
249 | u32 val; | |
250 | ||
251 | setbits_be32(priv->base + DMA_CFG_REG, | |
252 | DMA_CFG_FLOWC_ENABLE(dma->id)); | |
253 | ||
254 | val = ch_priv->desc_cnt / 3; | |
255 | writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id)); | |
256 | ||
257 | val = (ch_priv->desc_cnt * 2) / 3; | |
258 | writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id)); | |
259 | ||
260 | writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id)); | |
261 | } | |
262 | ||
263 | /* set dma max burst */ | |
264 | writel_be(ch_priv->desc_cnt, | |
265 | priv->chan + DMAC_BURST_REG(dma->id)); | |
266 | ||
267 | /* kick rx dma channel */ | |
268 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
269 | setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), | |
270 | DMAC_CFG_ENABLE_MASK); | |
271 | ||
272 | /* channel is now enabled */ | |
273 | ch_priv->running = true; | |
274 | ||
275 | return 0; | |
276 | } | |
277 | ||
278 | static int bcm6348_iudma_request(struct dma *dma) | |
279 | { | |
280 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
281 | struct bcm6348_chan_priv *ch_priv; | |
282 | ||
283 | /* check if channel is valid */ | |
284 | if (dma->id >= priv->n_channels) | |
285 | return -ENODEV; | |
286 | ||
287 | /* alloc channel private data */ | |
288 | priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv)); | |
289 | if (!priv->ch_priv[dma->id]) | |
290 | return -ENOMEM; | |
291 | ch_priv = priv->ch_priv[dma->id]; | |
292 | ||
293 | /* alloc dma ring */ | |
294 | if (bcm6348_iudma_chan_is_rx(dma->id)) | |
295 | ch_priv->dma_ring_size = DMA_RX_DESC; | |
296 | else | |
297 | ch_priv->dma_ring_size = DMA_TX_DESC; | |
298 | ||
299 | ch_priv->dma_ring = | |
300 | malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) * | |
301 | ch_priv->dma_ring_size); | |
302 | if (!ch_priv->dma_ring) | |
303 | return -ENOMEM; | |
304 | ||
305 | /* init channel config */ | |
306 | ch_priv->running = false; | |
307 | ch_priv->desc_id = 0; | |
308 | if (bcm6348_iudma_chan_is_rx(dma->id)) { | |
309 | ch_priv->desc_cnt = 0; | |
310 | ch_priv->busy_desc = calloc(ch_priv->desc_cnt, sizeof(bool)); | |
311 | } else { | |
312 | ch_priv->desc_cnt = ch_priv->dma_ring_size; | |
313 | ch_priv->busy_desc = NULL; | |
314 | } | |
315 | ||
316 | return 0; | |
317 | } | |
318 | ||
319 | static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata) | |
320 | { | |
321 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
322 | const struct bcm6348_iudma_hw *hw = priv->hw; | |
323 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
324 | struct bcm6348_dma_desc *dma_desc = dma_desc = ch_priv->dma_ring; | |
325 | int ret; | |
326 | ||
327 | /* get dma ring descriptor address */ | |
328 | dma_desc += ch_priv->desc_id; | |
329 | ||
330 | /* invalidate cache data */ | |
331 | bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc)); | |
332 | ||
333 | /* check dma own */ | |
334 | if (dma_desc->status & DMAD_ST_OWN_MASK) | |
335 | return -EAGAIN; | |
336 | ||
337 | /* check pkt */ | |
338 | if (!(dma_desc->status & DMAD_ST_EOP_MASK) || | |
339 | !(dma_desc->status & DMAD_ST_SOP_MASK) || | |
340 | (dma_desc->status & hw->err_mask)) { | |
341 | pr_err("invalid pkt received (ch=%ld desc=%u) (st=%04x)\n", | |
342 | dma->id, ch_priv->desc_id, dma_desc->status); | |
343 | ret = -EAGAIN; | |
344 | } else { | |
345 | /* set dma buffer address */ | |
346 | *dst = phys_to_virt(dma_desc->address); | |
347 | ||
348 | /* invalidate cache data */ | |
349 | bcm6348_iudma_idc(*dst, dma_desc->length); | |
350 | ||
351 | /* return packet length */ | |
352 | ret = dma_desc->length; | |
353 | } | |
354 | ||
355 | /* busy dma descriptor */ | |
356 | ch_priv->busy_desc[ch_priv->desc_id] = true; | |
357 | ||
358 | /* increment dma descriptor */ | |
359 | ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt; | |
360 | ||
361 | return ret; | |
362 | } | |
363 | ||
364 | static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len, | |
365 | void *metadata) | |
366 | { | |
367 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
368 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
369 | struct bcm6348_dma_desc *dma_desc; | |
370 | uint16_t status; | |
371 | ||
372 | /* flush cache */ | |
373 | bcm6348_iudma_fdc(src, len); | |
374 | ||
375 | /* get dma ring descriptor address */ | |
376 | dma_desc = ch_priv->dma_ring; | |
377 | dma_desc += ch_priv->desc_id; | |
378 | ||
379 | /* config dma descriptor */ | |
380 | status = (DMAD_ST_OWN_MASK | | |
381 | DMAD_ST_EOP_MASK | | |
382 | DMAD_ST_CRC_MASK | | |
383 | DMAD_ST_SOP_MASK); | |
384 | if (ch_priv->desc_id == ch_priv->desc_cnt - 1) | |
385 | status |= DMAD_ST_WRAP_MASK; | |
386 | ||
387 | /* set dma descriptor */ | |
388 | dma_desc->address = virt_to_phys(src); | |
389 | dma_desc->length = len; | |
390 | dma_desc->status = status; | |
391 | ||
392 | /* flush cache */ | |
393 | bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc)); | |
394 | ||
395 | /* kick tx dma channel */ | |
396 | setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK); | |
397 | ||
398 | /* poll dma status */ | |
399 | do { | |
400 | /* invalidate cache */ | |
401 | bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc)); | |
402 | ||
403 | if (!(dma_desc->status & DMAD_ST_OWN_MASK)) | |
404 | break; | |
405 | } while(1); | |
406 | ||
407 | /* increment dma descriptor */ | |
408 | ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt; | |
409 | ||
410 | return 0; | |
411 | } | |
412 | ||
413 | static int bcm6348_iudma_free_rcv_buf(struct dma *dma, void *dst, size_t size) | |
414 | { | |
415 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
416 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
417 | struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; | |
418 | uint16_t status; | |
419 | uint8_t i; | |
420 | u32 cfg; | |
421 | ||
422 | /* get dirty dma descriptor */ | |
423 | for (i = 0; i < ch_priv->desc_cnt; i++) { | |
424 | if (phys_to_virt(dma_desc->address) == dst) | |
425 | break; | |
426 | ||
427 | dma_desc++; | |
428 | } | |
429 | ||
430 | /* dma descriptor not found */ | |
431 | if (i == ch_priv->desc_cnt) { | |
432 | pr_err("dirty dma descriptor not found\n"); | |
433 | return -ENOENT; | |
434 | } | |
435 | ||
436 | /* invalidate cache */ | |
437 | bcm6348_iudma_idc(ch_priv->dma_ring, | |
438 | sizeof(*dma_desc) * ch_priv->desc_cnt); | |
439 | ||
440 | /* free dma descriptor */ | |
441 | ch_priv->busy_desc[i] = false; | |
442 | ||
443 | status = DMAD_ST_OWN_MASK; | |
444 | if (i == ch_priv->desc_cnt - 1) | |
445 | status |= DMAD_ST_WRAP_MASK; | |
446 | ||
447 | dma_desc->status |= status; | |
448 | dma_desc->length = PKTSIZE_ALIGN; | |
449 | ||
450 | /* tell dma we allocated one buffer */ | |
451 | writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id)); | |
452 | ||
453 | /* flush cache */ | |
454 | bcm6348_iudma_fdc(ch_priv->dma_ring, | |
455 | sizeof(*dma_desc) * ch_priv->desc_cnt); | |
456 | ||
457 | /* kick rx dma channel if disabled */ | |
458 | cfg = readl_be(priv->chan + DMAC_CFG_REG(dma->id)); | |
459 | if (!(cfg & DMAC_CFG_ENABLE_MASK)) | |
460 | setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), | |
461 | DMAC_CFG_ENABLE_MASK); | |
462 | ||
463 | return 0; | |
464 | } | |
465 | ||
466 | static int bcm6348_iudma_add_rcv_buf(struct dma *dma, void *dst, size_t size) | |
467 | { | |
468 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
469 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
470 | struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; | |
471 | ||
472 | /* no more dma descriptors available */ | |
473 | if (ch_priv->desc_cnt == ch_priv->dma_ring_size) { | |
474 | pr_err("max number of buffers reached\n"); | |
475 | return -EINVAL; | |
476 | } | |
477 | ||
478 | /* get next dma descriptor */ | |
479 | dma_desc += ch_priv->desc_cnt; | |
480 | ||
481 | /* init dma descriptor */ | |
482 | dma_desc->address = virt_to_phys(dst); | |
483 | dma_desc->length = size; | |
484 | dma_desc->status = 0; | |
485 | ||
486 | /* flush cache */ | |
487 | bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc)); | |
488 | ||
489 | /* increment dma descriptors */ | |
490 | ch_priv->desc_cnt++; | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
495 | static int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst, | |
496 | size_t size) | |
497 | { | |
498 | const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); | |
499 | struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; | |
500 | ||
501 | /* only add new rx buffers if channel isn't running */ | |
502 | if (ch_priv->running) | |
503 | return bcm6348_iudma_free_rcv_buf(dma, dst, size); | |
504 | else | |
505 | return bcm6348_iudma_add_rcv_buf(dma, dst, size); | |
506 | } | |
507 | ||
508 | static const struct dma_ops bcm6348_iudma_ops = { | |
509 | .disable = bcm6348_iudma_disable, | |
510 | .enable = bcm6348_iudma_enable, | |
511 | .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf, | |
512 | .request = bcm6348_iudma_request, | |
513 | .receive = bcm6348_iudma_receive, | |
514 | .send = bcm6348_iudma_send, | |
515 | }; | |
516 | ||
517 | static const struct bcm6348_iudma_hw bcm6348_hw = { | |
518 | .err_mask = (DMAD6348_ST_OV_ERR_MASK | | |
519 | DMAD6348_ST_CRC_ERR_MASK | | |
520 | DMAD6348_ST_RX_ERR_MASK | | |
521 | DMAD6348_ST_OS_ERR_MASK | | |
522 | DMAD6348_ST_UN_ERR_MASK), | |
523 | }; | |
524 | ||
525 | static const struct bcm6348_iudma_hw bcm6368_hw = { | |
526 | .err_mask = 0, | |
527 | }; | |
528 | ||
529 | static const struct udevice_id bcm6348_iudma_ids[] = { | |
530 | { | |
531 | .compatible = "brcm,bcm6348-iudma", | |
532 | .data = (ulong)&bcm6348_hw, | |
533 | }, { | |
534 | .compatible = "brcm,bcm6368-iudma", | |
535 | .data = (ulong)&bcm6368_hw, | |
536 | }, { /* sentinel */ } | |
537 | }; | |
538 | ||
539 | static int bcm6348_iudma_probe(struct udevice *dev) | |
540 | { | |
541 | struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); | |
542 | struct bcm6348_iudma_priv *priv = dev_get_priv(dev); | |
543 | const struct bcm6348_iudma_hw *hw = | |
544 | (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev); | |
545 | uint8_t ch; | |
546 | int i; | |
547 | ||
548 | uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM | | |
549 | DMA_SUPPORTS_MEM_TO_DEV); | |
550 | priv->hw = hw; | |
551 | ||
552 | /* dma global base address */ | |
553 | priv->base = dev_remap_addr_name(dev, "dma"); | |
554 | if (!priv->base) | |
555 | return -EINVAL; | |
556 | ||
557 | /* dma channels base address */ | |
558 | priv->chan = dev_remap_addr_name(dev, "dma-channels"); | |
559 | if (!priv->chan) | |
560 | return -EINVAL; | |
561 | ||
562 | /* dma sram base address */ | |
563 | priv->sram = dev_remap_addr_name(dev, "dma-sram"); | |
564 | if (!priv->sram) | |
565 | return -EINVAL; | |
566 | ||
567 | /* get number of channels */ | |
568 | priv->n_channels = dev_read_u32_default(dev, "dma-channels", 8); | |
569 | if (priv->n_channels > DMA_CHAN_MAX) | |
570 | return -EINVAL; | |
571 | ||
572 | /* try to enable clocks */ | |
573 | for (i = 0; ; i++) { | |
574 | struct clk clk; | |
575 | int ret; | |
576 | ||
577 | ret = clk_get_by_index(dev, i, &clk); | |
578 | if (ret < 0) | |
579 | break; | |
580 | ||
581 | ret = clk_enable(&clk); | |
582 | if (ret < 0) { | |
583 | pr_err("error enabling clock %d\n", i); | |
584 | return ret; | |
585 | } | |
586 | ||
587 | ret = clk_free(&clk); | |
588 | if (ret < 0) { | |
589 | pr_err("error freeing clock %d\n", i); | |
590 | return ret; | |
591 | } | |
592 | } | |
593 | ||
594 | /* try to perform resets */ | |
595 | for (i = 0; ; i++) { | |
596 | struct reset_ctl reset; | |
597 | int ret; | |
598 | ||
599 | ret = reset_get_by_index(dev, i, &reset); | |
600 | if (ret < 0) | |
601 | break; | |
602 | ||
603 | ret = reset_deassert(&reset); | |
604 | if (ret < 0) { | |
605 | pr_err("error deasserting reset %d\n", i); | |
606 | return ret; | |
607 | } | |
608 | ||
609 | ret = reset_free(&reset); | |
610 | if (ret < 0) { | |
611 | pr_err("error freeing reset %d\n", i); | |
612 | return ret; | |
613 | } | |
614 | } | |
615 | ||
616 | /* disable dma controller */ | |
617 | clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); | |
618 | ||
619 | /* alloc channel private data pointers */ | |
620 | priv->ch_priv = calloc(priv->n_channels, | |
621 | sizeof(struct bcm6348_chan_priv*)); | |
622 | if (!priv->ch_priv) | |
623 | return -ENOMEM; | |
624 | ||
625 | /* stop dma channels */ | |
626 | for (ch = 0; ch < priv->n_channels; ch++) | |
627 | bcm6348_iudma_chan_stop(priv, ch); | |
628 | ||
629 | /* enable dma controller */ | |
630 | setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); | |
631 | ||
632 | return 0; | |
633 | } | |
634 | ||
635 | U_BOOT_DRIVER(bcm6348_iudma) = { | |
636 | .name = "bcm6348_iudma", | |
637 | .id = UCLASS_DMA, | |
638 | .of_match = bcm6348_iudma_ids, | |
639 | .ops = &bcm6348_iudma_ops, | |
640 | .priv_auto_alloc_size = sizeof(struct bcm6348_iudma_priv), | |
641 | .probe = bcm6348_iudma_probe, | |
642 | }; |