]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
31650d64 MV |
2 | /* |
3 | * Freescale i.MX28 APBH DMA driver | |
4 | * | |
5 | * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> | |
6 | * on behalf of DENX Software Engineering GmbH | |
7 | * | |
8 | * Based on code from LTIB: | |
9 | * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved. | |
39320e72 PF |
10 | * Copyright 2017 NXP |
11 | * | |
31650d64 MV |
12 | */ |
13 | ||
1eb69ae4 | 14 | #include <cpu_func.h> |
90526e9f | 15 | #include <asm/cache.h> |
31650d64 MV |
16 | #include <linux/list.h> |
17 | ||
d678a59d | 18 | #include <common.h> |
31650d64 | 19 | #include <malloc.h> |
1221ce45 | 20 | #include <linux/errno.h> |
31650d64 MV |
21 | #include <asm/io.h> |
22 | #include <asm/arch/clock.h> | |
23 | #include <asm/arch/imx-regs.h> | |
24 | #include <asm/arch/sys_proto.h> | |
552a848e SB |
25 | #include <asm/mach-imx/dma.h> |
26 | #include <asm/mach-imx/regs-apbh.h> | |
31650d64 MV |
27 | |
28 | static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS]; | |
29 | ||
30 | /* | |
31 | * Test is the DMA channel is valid channel | |
32 | */ | |
33 | int mxs_dma_validate_chan(int channel) | |
34 | { | |
35 | struct mxs_dma_chan *pchan; | |
36 | ||
37 | if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS)) | |
38 | return -EINVAL; | |
39 | ||
40 | pchan = mxs_dma_channels + channel; | |
41 | if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED)) | |
42 | return -EINVAL; | |
43 | ||
44 | return 0; | |
45 | } | |
46 | ||
aa72e43b MV |
47 | /* |
48 | * Return the address of the command within a descriptor. | |
49 | */ | |
50 | static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc) | |
51 | { | |
52 | return desc->address + offsetof(struct mxs_dma_desc, cmd); | |
53 | } | |
54 | ||
55 | /* | |
56 | * Read a DMA channel's hardware semaphore. | |
57 | * | |
58 | * As used by the MXS platform's DMA software, the DMA channel's hardware | |
59 | * semaphore reflects the number of DMA commands the hardware will process, but | |
60 | * has not yet finished. This is a volatile value read directly from hardware, | |
61 | * so it must be be viewed as immediately stale. | |
62 | * | |
63 | * If the channel is not marked busy, or has finished processing all its | |
64 | * commands, this value should be zero. | |
65 | * | |
66 | * See mxs_dma_append() for details on how DMA command blocks must be configured | |
67 | * to maintain the expected behavior of the semaphore's value. | |
68 | */ | |
69 | static int mxs_dma_read_semaphore(int channel) | |
70 | { | |
9c471142 OS |
71 | struct mxs_apbh_regs *apbh_regs = |
72 | (struct mxs_apbh_regs *)MXS_APBH_BASE; | |
aa72e43b MV |
73 | uint32_t tmp; |
74 | int ret; | |
75 | ||
76 | ret = mxs_dma_validate_chan(channel); | |
77 | if (ret) | |
78 | return ret; | |
79 | ||
80 | tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema); | |
81 | ||
82 | tmp &= APBH_CHn_SEMA_PHORE_MASK; | |
83 | tmp >>= APBH_CHn_SEMA_PHORE_OFFSET; | |
84 | ||
85 | return tmp; | |
86 | } | |
87 | ||
10015025 | 88 | #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) |
c3dfe707 MV |
89 | void mxs_dma_flush_desc(struct mxs_dma_desc *desc) |
90 | { | |
91 | uint32_t addr; | |
92 | uint32_t size; | |
93 | ||
39320e72 | 94 | addr = (uintptr_t)desc; |
c3dfe707 MV |
95 | size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT); |
96 | ||
97 | flush_dcache_range(addr, addr + size); | |
98 | } | |
99 | #else | |
100 | inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {} | |
101 | #endif | |
102 | ||
31650d64 MV |
103 | /* |
104 | * Enable a DMA channel. | |
105 | * | |
106 | * If the given channel has any DMA descriptors on its active list, this | |
107 | * function causes the DMA hardware to begin processing them. | |
108 | * | |
109 | * This function marks the DMA channel as "busy," whether or not there are any | |
110 | * descriptors to process. | |
111 | */ | |
aa72e43b | 112 | static int mxs_dma_enable(int channel) |
31650d64 | 113 | { |
9c471142 OS |
114 | struct mxs_apbh_regs *apbh_regs = |
115 | (struct mxs_apbh_regs *)MXS_APBH_BASE; | |
31650d64 MV |
116 | unsigned int sem; |
117 | struct mxs_dma_chan *pchan; | |
118 | struct mxs_dma_desc *pdesc; | |
119 | int ret; | |
120 | ||
121 | ret = mxs_dma_validate_chan(channel); | |
122 | if (ret) | |
123 | return ret; | |
124 | ||
125 | pchan = mxs_dma_channels + channel; | |
126 | ||
127 | if (pchan->pending_num == 0) { | |
128 | pchan->flags |= MXS_DMA_FLAGS_BUSY; | |
129 | return 0; | |
130 | } | |
131 | ||
132 | pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node); | |
133 | if (pdesc == NULL) | |
134 | return -EFAULT; | |
135 | ||
136 | if (pchan->flags & MXS_DMA_FLAGS_BUSY) { | |
137 | if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN)) | |
138 | return 0; | |
139 | ||
140 | sem = mxs_dma_read_semaphore(channel); | |
141 | if (sem == 0) | |
142 | return 0; | |
143 | ||
144 | if (sem == 1) { | |
145 | pdesc = list_entry(pdesc->node.next, | |
146 | struct mxs_dma_desc, node); | |
147 | writel(mxs_dma_cmd_address(pdesc), | |
148 | &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar); | |
149 | } | |
150 | writel(pchan->pending_num, | |
151 | &apbh_regs->ch[channel].hw_apbh_ch_sema); | |
152 | pchan->active_num += pchan->pending_num; | |
153 | pchan->pending_num = 0; | |
154 | } else { | |
155 | pchan->active_num += pchan->pending_num; | |
156 | pchan->pending_num = 0; | |
157 | writel(mxs_dma_cmd_address(pdesc), | |
158 | &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar); | |
159 | writel(pchan->active_num, | |
160 | &apbh_regs->ch[channel].hw_apbh_ch_sema); | |
161 | writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), | |
162 | &apbh_regs->hw_apbh_ctrl0_clr); | |
163 | } | |
164 | ||
165 | pchan->flags |= MXS_DMA_FLAGS_BUSY; | |
166 | return 0; | |
167 | } | |
168 | ||
169 | /* | |
170 | * Disable a DMA channel. | |
171 | * | |
172 | * This function shuts down a DMA channel and marks it as "not busy." Any | |
173 | * descriptors on the active list are immediately moved to the head of the | |
174 | * "done" list, whether or not they have actually been processed by the | |
175 | * hardware. The "ready" flags of these descriptors are NOT cleared, so they | |
176 | * still appear to be active. | |
177 | * | |
178 | * This function immediately shuts down a DMA channel's hardware, aborting any | |
179 | * I/O that may be in progress, potentially leaving I/O hardware in an undefined | |
180 | * state. It is unwise to call this function if there is ANY chance the hardware | |
181 | * is still processing a command. | |
182 | */ | |
aa72e43b | 183 | static int mxs_dma_disable(int channel) |
31650d64 MV |
184 | { |
185 | struct mxs_dma_chan *pchan; | |
9c471142 OS |
186 | struct mxs_apbh_regs *apbh_regs = |
187 | (struct mxs_apbh_regs *)MXS_APBH_BASE; | |
31650d64 MV |
188 | int ret; |
189 | ||
190 | ret = mxs_dma_validate_chan(channel); | |
191 | if (ret) | |
192 | return ret; | |
193 | ||
194 | pchan = mxs_dma_channels + channel; | |
195 | ||
196 | if (!(pchan->flags & MXS_DMA_FLAGS_BUSY)) | |
197 | return -EINVAL; | |
198 | ||
199 | writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), | |
200 | &apbh_regs->hw_apbh_ctrl0_set); | |
201 | ||
202 | pchan->flags &= ~MXS_DMA_FLAGS_BUSY; | |
203 | pchan->active_num = 0; | |
204 | pchan->pending_num = 0; | |
205 | list_splice_init(&pchan->active, &pchan->done); | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
210 | /* | |
211 | * Resets the DMA channel hardware. | |
212 | */ | |
aa72e43b | 213 | static int mxs_dma_reset(int channel) |
31650d64 | 214 | { |
9c471142 OS |
215 | struct mxs_apbh_regs *apbh_regs = |
216 | (struct mxs_apbh_regs *)MXS_APBH_BASE; | |
31650d64 | 217 | int ret; |
0e5c05ef MV |
218 | #if defined(CONFIG_MX23) |
219 | uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set); | |
220 | uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET; | |
39320e72 PF |
221 | #elif defined(CONFIG_MX28) || defined(CONFIG_MX6) || defined(CONFIG_MX7) || \ |
222 | defined(CONFIG_IMX8) || defined(CONFIG_IMX8M) | |
223 | u32 setreg = (uintptr_t)(&apbh_regs->hw_apbh_channel_ctrl_set); | |
224 | u32 offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET; | |
0e5c05ef | 225 | #endif |
31650d64 MV |
226 | |
227 | ret = mxs_dma_validate_chan(channel); | |
228 | if (ret) | |
229 | return ret; | |
230 | ||
39320e72 | 231 | writel(1 << (channel + offset), (uintptr_t)setreg); |
31650d64 MV |
232 | |
233 | return 0; | |
234 | } | |
235 | ||
31650d64 MV |
236 | /* |
237 | * Enable or disable DMA interrupt. | |
238 | * | |
239 | * This function enables the given DMA channel to interrupt the CPU. | |
240 | */ | |
aa72e43b | 241 | static int mxs_dma_enable_irq(int channel, int enable) |
31650d64 | 242 | { |
9c471142 OS |
243 | struct mxs_apbh_regs *apbh_regs = |
244 | (struct mxs_apbh_regs *)MXS_APBH_BASE; | |
31650d64 MV |
245 | int ret; |
246 | ||
247 | ret = mxs_dma_validate_chan(channel); | |
248 | if (ret) | |
249 | return ret; | |
250 | ||
251 | if (enable) | |
252 | writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET), | |
253 | &apbh_regs->hw_apbh_ctrl1_set); | |
254 | else | |
255 | writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET), | |
256 | &apbh_regs->hw_apbh_ctrl1_clr); | |
257 | ||
258 | return 0; | |
259 | } | |
260 | ||
31650d64 MV |
261 | /* |
262 | * Clear DMA interrupt. | |
263 | * | |
264 | * The software that is using the DMA channel must register to receive its | |
265 | * interrupts and, when they arrive, must call this function to clear them. | |
266 | */ | |
aa72e43b | 267 | static int mxs_dma_ack_irq(int channel) |
31650d64 | 268 | { |
9c471142 OS |
269 | struct mxs_apbh_regs *apbh_regs = |
270 | (struct mxs_apbh_regs *)MXS_APBH_BASE; | |
31650d64 MV |
271 | int ret; |
272 | ||
273 | ret = mxs_dma_validate_chan(channel); | |
274 | if (ret) | |
275 | return ret; | |
276 | ||
277 | writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr); | |
278 | writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | /* | |
284 | * Request to reserve a DMA channel | |
285 | */ | |
aa72e43b | 286 | static int mxs_dma_request(int channel) |
31650d64 MV |
287 | { |
288 | struct mxs_dma_chan *pchan; | |
289 | ||
290 | if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS)) | |
291 | return -EINVAL; | |
292 | ||
293 | pchan = mxs_dma_channels + channel; | |
294 | if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID) | |
295 | return -ENODEV; | |
296 | ||
297 | if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED) | |
298 | return -EBUSY; | |
299 | ||
300 | pchan->flags |= MXS_DMA_FLAGS_ALLOCATED; | |
301 | pchan->active_num = 0; | |
302 | pchan->pending_num = 0; | |
303 | ||
304 | INIT_LIST_HEAD(&pchan->active); | |
305 | INIT_LIST_HEAD(&pchan->done); | |
306 | ||
307 | return 0; | |
308 | } | |
309 | ||
310 | /* | |
311 | * Release a DMA channel. | |
312 | * | |
313 | * This function releases a DMA channel from its current owner. | |
314 | * | |
315 | * The channel will NOT be released if it's marked "busy" (see | |
316 | * mxs_dma_enable()). | |
317 | */ | |
96666a39 | 318 | int mxs_dma_release(int channel) |
31650d64 MV |
319 | { |
320 | struct mxs_dma_chan *pchan; | |
321 | int ret; | |
322 | ||
323 | ret = mxs_dma_validate_chan(channel); | |
324 | if (ret) | |
325 | return ret; | |
326 | ||
327 | pchan = mxs_dma_channels + channel; | |
328 | ||
329 | if (pchan->flags & MXS_DMA_FLAGS_BUSY) | |
330 | return -EBUSY; | |
331 | ||
332 | pchan->dev = 0; | |
333 | pchan->active_num = 0; | |
334 | pchan->pending_num = 0; | |
335 | pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED; | |
336 | ||
337 | return 0; | |
338 | } | |
339 | ||
340 | /* | |
341 | * Allocate DMA descriptor | |
342 | */ | |
343 | struct mxs_dma_desc *mxs_dma_desc_alloc(void) | |
344 | { | |
345 | struct mxs_dma_desc *pdesc; | |
c3dfe707 | 346 | uint32_t size; |
31650d64 | 347 | |
c3dfe707 MV |
348 | size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT); |
349 | pdesc = memalign(MXS_DMA_ALIGNMENT, size); | |
31650d64 MV |
350 | |
351 | if (pdesc == NULL) | |
352 | return NULL; | |
353 | ||
354 | memset(pdesc, 0, sizeof(*pdesc)); | |
355 | pdesc->address = (dma_addr_t)pdesc; | |
356 | ||
357 | return pdesc; | |
358 | }; | |
359 | ||
360 | /* | |
361 | * Free DMA descriptor | |
362 | */ | |
363 | void mxs_dma_desc_free(struct mxs_dma_desc *pdesc) | |
364 | { | |
365 | if (pdesc == NULL) | |
366 | return; | |
367 | ||
368 | free(pdesc); | |
369 | } | |
370 | ||
31650d64 MV |
371 | /* |
372 | * Add a DMA descriptor to a channel. | |
373 | * | |
374 | * If the descriptor list for this channel is not empty, this function sets the | |
375 | * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so | |
376 | * it will chain to the new descriptor's command. | |
377 | * | |
378 | * Then, this function marks the new descriptor as "ready," adds it to the end | |
379 | * of the active descriptor list, and increments the count of pending | |
380 | * descriptors. | |
381 | * | |
382 | * The MXS platform DMA software imposes some rules on DMA commands to maintain | |
383 | * important invariants. These rules are NOT checked, but they must be carefully | |
384 | * applied by software that uses MXS DMA channels. | |
385 | * | |
386 | * Invariant: | |
387 | * The DMA channel's hardware semaphore must reflect the number of DMA | |
388 | * commands the hardware will process, but has not yet finished. | |
389 | * | |
390 | * Explanation: | |
391 | * A DMA channel begins processing commands when its hardware semaphore is | |
392 | * written with a value greater than zero, and it stops processing commands | |
393 | * when the semaphore returns to zero. | |
394 | * | |
395 | * When a channel finishes a DMA command, it will decrement its semaphore if | |
396 | * the DECREMENT_SEMAPHORE bit is set in that command's flags bits. | |
397 | * | |
398 | * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set, | |
399 | * unless it suits the purposes of the software. For example, one could | |
400 | * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE | |
401 | * bit set only in the last one. Then, setting the DMA channel's hardware | |
402 | * semaphore to one would cause the entire series of five commands to be | |
403 | * processed. However, this example would violate the invariant given above. | |
404 | * | |
405 | * Rule: | |
406 | * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA | |
407 | * channel's hardware semaphore will be decremented EVERY time a command is | |
408 | * processed. | |
409 | */ | |
410 | int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc) | |
411 | { | |
412 | struct mxs_dma_chan *pchan; | |
413 | struct mxs_dma_desc *last; | |
414 | int ret; | |
415 | ||
416 | ret = mxs_dma_validate_chan(channel); | |
417 | if (ret) | |
418 | return ret; | |
419 | ||
420 | pchan = mxs_dma_channels + channel; | |
421 | ||
422 | pdesc->cmd.next = mxs_dma_cmd_address(pdesc); | |
423 | pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST; | |
424 | ||
425 | if (!list_empty(&pchan->active)) { | |
426 | last = list_entry(pchan->active.prev, struct mxs_dma_desc, | |
427 | node); | |
428 | ||
429 | pdesc->flags &= ~MXS_DMA_DESC_FIRST; | |
430 | last->flags &= ~MXS_DMA_DESC_LAST; | |
431 | ||
432 | last->cmd.next = mxs_dma_cmd_address(pdesc); | |
433 | last->cmd.data |= MXS_DMA_DESC_CHAIN; | |
c3dfe707 MV |
434 | |
435 | mxs_dma_flush_desc(last); | |
31650d64 MV |
436 | } |
437 | pdesc->flags |= MXS_DMA_DESC_READY; | |
438 | if (pdesc->flags & MXS_DMA_DESC_FIRST) | |
439 | pchan->pending_num++; | |
440 | list_add_tail(&pdesc->node, &pchan->active); | |
441 | ||
c3dfe707 MV |
442 | mxs_dma_flush_desc(pdesc); |
443 | ||
31650d64 MV |
444 | return ret; |
445 | } | |
446 | ||
31650d64 MV |
447 | /* |
448 | * Clean up processed DMA descriptors. | |
449 | * | |
450 | * This function removes processed DMA descriptors from the "active" list. Pass | |
451 | * in a non-NULL list head to get the descriptors moved to your list. Pass NULL | |
452 | * to get the descriptors moved to the channel's "done" list. Descriptors on | |
453 | * the "done" list can be retrieved with mxs_dma_get_finished(). | |
454 | * | |
455 | * This function marks the DMA channel as "not busy" if no unprocessed | |
456 | * descriptors remain on the "active" list. | |
457 | */ | |
aa72e43b | 458 | static int mxs_dma_finish(int channel, struct list_head *head) |
31650d64 MV |
459 | { |
460 | int sem; | |
461 | struct mxs_dma_chan *pchan; | |
462 | struct list_head *p, *q; | |
463 | struct mxs_dma_desc *pdesc; | |
464 | int ret; | |
465 | ||
466 | ret = mxs_dma_validate_chan(channel); | |
467 | if (ret) | |
468 | return ret; | |
469 | ||
470 | pchan = mxs_dma_channels + channel; | |
471 | ||
472 | sem = mxs_dma_read_semaphore(channel); | |
473 | if (sem < 0) | |
474 | return sem; | |
475 | ||
476 | if (sem == pchan->active_num) | |
477 | return 0; | |
478 | ||
479 | list_for_each_safe(p, q, &pchan->active) { | |
480 | if ((pchan->active_num) <= sem) | |
481 | break; | |
482 | ||
483 | pdesc = list_entry(p, struct mxs_dma_desc, node); | |
484 | pdesc->flags &= ~MXS_DMA_DESC_READY; | |
485 | ||
486 | if (head) | |
487 | list_move_tail(p, head); | |
488 | else | |
489 | list_move_tail(p, &pchan->done); | |
490 | ||
491 | if (pdesc->flags & MXS_DMA_DESC_LAST) | |
492 | pchan->active_num--; | |
493 | } | |
494 | ||
495 | if (sem == 0) | |
496 | pchan->flags &= ~MXS_DMA_FLAGS_BUSY; | |
497 | ||
498 | return 0; | |
499 | } | |
500 | ||
501 | /* | |
502 | * Wait for DMA channel to complete | |
503 | */ | |
aa72e43b | 504 | static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan) |
31650d64 | 505 | { |
9c471142 OS |
506 | struct mxs_apbh_regs *apbh_regs = |
507 | (struct mxs_apbh_regs *)MXS_APBH_BASE; | |
31650d64 MV |
508 | int ret; |
509 | ||
510 | ret = mxs_dma_validate_chan(chan); | |
511 | if (ret) | |
512 | return ret; | |
513 | ||
fa7a51cb | 514 | if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg, |
31650d64 MV |
515 | 1 << chan, timeout)) { |
516 | ret = -ETIMEDOUT; | |
517 | mxs_dma_reset(chan); | |
518 | } | |
519 | ||
12dab4ce | 520 | return ret; |
31650d64 MV |
521 | } |
522 | ||
523 | /* | |
524 | * Execute the DMA channel | |
525 | */ | |
526 | int mxs_dma_go(int chan) | |
527 | { | |
1375f044 | 528 | uint32_t timeout = 10000000; |
31650d64 MV |
529 | int ret; |
530 | ||
531 | LIST_HEAD(tmp_desc_list); | |
532 | ||
533 | mxs_dma_enable_irq(chan, 1); | |
534 | mxs_dma_enable(chan); | |
535 | ||
536 | /* Wait for DMA to finish. */ | |
537 | ret = mxs_dma_wait_complete(timeout, chan); | |
538 | ||
539 | /* Clear out the descriptors we just ran. */ | |
540 | mxs_dma_finish(chan, &tmp_desc_list); | |
541 | ||
542 | /* Shut the DMA channel down. */ | |
543 | mxs_dma_ack_irq(chan); | |
544 | mxs_dma_reset(chan); | |
545 | mxs_dma_enable_irq(chan, 0); | |
546 | mxs_dma_disable(chan); | |
547 | ||
548 | return ret; | |
549 | } | |
550 | ||
69f7345c MV |
551 | /* |
552 | * Execute a continuously running circular DMA descriptor. | |
553 | * NOTE: This is not intended for general use, but rather | |
554 | * for the LCD driver in Smart-LCD mode. It allows | |
555 | * continuous triggering of the RUN bit there. | |
556 | */ | |
557 | void mxs_dma_circ_start(int chan, struct mxs_dma_desc *pdesc) | |
558 | { | |
559 | struct mxs_apbh_regs *apbh_regs = | |
560 | (struct mxs_apbh_regs *)MXS_APBH_BASE; | |
561 | ||
562 | mxs_dma_flush_desc(pdesc); | |
563 | ||
564 | mxs_dma_enable_irq(chan, 1); | |
565 | ||
566 | writel(mxs_dma_cmd_address(pdesc), | |
567 | &apbh_regs->ch[chan].hw_apbh_ch_nxtcmdar); | |
568 | writel(1, &apbh_regs->ch[chan].hw_apbh_ch_sema); | |
569 | writel(1 << (chan + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), | |
570 | &apbh_regs->hw_apbh_ctrl0_clr); | |
571 | } | |
572 | ||
31650d64 MV |
573 | /* |
574 | * Initialize the DMA hardware | |
575 | */ | |
96666a39 | 576 | void mxs_dma_init(void) |
31650d64 | 577 | { |
9c471142 OS |
578 | struct mxs_apbh_regs *apbh_regs = |
579 | (struct mxs_apbh_regs *)MXS_APBH_BASE; | |
31650d64 | 580 | |
fa7a51cb | 581 | mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg); |
31650d64 MV |
582 | |
583 | #ifdef CONFIG_APBH_DMA_BURST8 | |
584 | writel(APBH_CTRL0_AHB_BURST8_EN, | |
585 | &apbh_regs->hw_apbh_ctrl0_set); | |
586 | #else | |
587 | writel(APBH_CTRL0_AHB_BURST8_EN, | |
588 | &apbh_regs->hw_apbh_ctrl0_clr); | |
589 | #endif | |
590 | ||
591 | #ifdef CONFIG_APBH_DMA_BURST | |
592 | writel(APBH_CTRL0_APB_BURST_EN, | |
593 | &apbh_regs->hw_apbh_ctrl0_set); | |
594 | #else | |
595 | writel(APBH_CTRL0_APB_BURST_EN, | |
596 | &apbh_regs->hw_apbh_ctrl0_clr); | |
597 | #endif | |
96666a39 | 598 | } |
31650d64 | 599 | |
96666a39 MV |
600 | int mxs_dma_init_channel(int channel) |
601 | { | |
602 | struct mxs_dma_chan *pchan; | |
603 | int ret; | |
31650d64 | 604 | |
96666a39 MV |
605 | pchan = mxs_dma_channels + channel; |
606 | pchan->flags = MXS_DMA_FLAGS_VALID; | |
31650d64 | 607 | |
96666a39 | 608 | ret = mxs_dma_request(channel); |
31650d64 | 609 | |
96666a39 MV |
610 | if (ret) { |
611 | printf("MXS DMA: Can't acquire DMA channel %i\n", | |
612 | channel); | |
613 | return ret; | |
31650d64 MV |
614 | } |
615 | ||
96666a39 MV |
616 | mxs_dma_reset(channel); |
617 | mxs_dma_ack_irq(channel); | |
31650d64 | 618 | |
96666a39 | 619 | return 0; |
31650d64 | 620 | } |