]>
Commit | Line | Data |
---|---|---|
9d831528 AD |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
3 | // Copyright (c) 2013-2014 Freescale Semiconductor, Inc | |
4 | // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it> | |
5 | ||
6 | #include <linux/dmapool.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/slab.h> | |
0fa89f97 | 9 | #include <linux/dma-mapping.h> |
9d831528 AD |
10 | |
11 | #include "fsl-edma-common.h" | |
12 | ||
13 | #define EDMA_CR 0x00 | |
14 | #define EDMA_ES 0x04 | |
15 | #define EDMA_ERQ 0x0C | |
16 | #define EDMA_EEI 0x14 | |
17 | #define EDMA_SERQ 0x1B | |
18 | #define EDMA_CERQ 0x1A | |
19 | #define EDMA_SEEI 0x19 | |
20 | #define EDMA_CEEI 0x18 | |
21 | #define EDMA_CINT 0x1F | |
22 | #define EDMA_CERR 0x1E | |
23 | #define EDMA_SSRT 0x1D | |
24 | #define EDMA_CDNE 0x1C | |
25 | #define EDMA_INTR 0x24 | |
26 | #define EDMA_ERR 0x2C | |
27 | ||
28 | #define EDMA64_ERQH 0x08 | |
29 | #define EDMA64_EEIH 0x10 | |
30 | #define EDMA64_SERQ 0x18 | |
31 | #define EDMA64_CERQ 0x19 | |
32 | #define EDMA64_SEEI 0x1a | |
33 | #define EDMA64_CEEI 0x1b | |
34 | #define EDMA64_CINT 0x1c | |
35 | #define EDMA64_CERR 0x1d | |
36 | #define EDMA64_SSRT 0x1e | |
37 | #define EDMA64_CDNE 0x1f | |
38 | #define EDMA64_INTH 0x20 | |
39 | #define EDMA64_INTL 0x24 | |
40 | #define EDMA64_ERRH 0x28 | |
41 | #define EDMA64_ERRL 0x2c | |
42 | ||
43 | #define EDMA_TCD 0x1000 | |
44 | ||
45 | static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) | |
46 | { | |
377eaf3b | 47 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 AD |
48 | u32 ch = fsl_chan->vchan.chan.chan_id; |
49 | ||
e7a3ff92 AD |
50 | if (fsl_chan->edma->version == v1) { |
51 | edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); | |
52 | edma_writeb(fsl_chan->edma, ch, regs->serq); | |
53 | } else { | |
54 | /* ColdFire is big endian, and accesses natively | |
55 | * big endian I/O peripherals | |
56 | */ | |
57 | iowrite8(EDMA_SEEI_SEEI(ch), regs->seei); | |
58 | iowrite8(ch, regs->serq); | |
59 | } | |
9d831528 AD |
60 | } |
61 | ||
62 | void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) | |
63 | { | |
377eaf3b | 64 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 AD |
65 | u32 ch = fsl_chan->vchan.chan.chan_id; |
66 | ||
e7a3ff92 AD |
67 | if (fsl_chan->edma->version == v1) { |
68 | edma_writeb(fsl_chan->edma, ch, regs->cerq); | |
69 | edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); | |
70 | } else { | |
71 | /* ColdFire is big endian, and accesses natively | |
72 | * big endian I/O peripherals | |
73 | */ | |
74 | iowrite8(ch, regs->cerq); | |
75 | iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei); | |
76 | } | |
9d831528 AD |
77 | } |
78 | EXPORT_SYMBOL_GPL(fsl_edma_disable_request); | |
79 | ||
80 | void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, | |
81 | unsigned int slot, bool enable) | |
82 | { | |
83 | u32 ch = fsl_chan->vchan.chan.chan_id; | |
84 | void __iomem *muxaddr; | |
85 | unsigned int chans_per_mux, ch_off; | |
86 | ||
87 | chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; | |
88 | ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; | |
89 | muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; | |
90 | slot = EDMAMUX_CHCFG_SOURCE(slot); | |
91 | ||
92 | if (enable) | |
93 | iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off); | |
94 | else | |
95 | iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off); | |
96 | } | |
97 | EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); | |
98 | ||
99 | static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) | |
100 | { | |
101 | switch (addr_width) { | |
102 | case 1: | |
103 | return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT; | |
104 | case 2: | |
105 | return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT; | |
106 | case 4: | |
107 | return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; | |
108 | case 8: | |
109 | return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT; | |
110 | default: | |
111 | return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; | |
112 | } | |
113 | } | |
114 | ||
115 | void fsl_edma_free_desc(struct virt_dma_desc *vdesc) | |
116 | { | |
117 | struct fsl_edma_desc *fsl_desc; | |
118 | int i; | |
119 | ||
120 | fsl_desc = to_fsl_edma_desc(vdesc); | |
121 | for (i = 0; i < fsl_desc->n_tcds; i++) | |
122 | dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd, | |
123 | fsl_desc->tcd[i].ptcd); | |
124 | kfree(fsl_desc); | |
125 | } | |
126 | EXPORT_SYMBOL_GPL(fsl_edma_free_desc); | |
127 | ||
128 | int fsl_edma_terminate_all(struct dma_chan *chan) | |
129 | { | |
130 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
131 | unsigned long flags; | |
132 | LIST_HEAD(head); | |
133 | ||
134 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
135 | fsl_edma_disable_request(fsl_chan); | |
136 | fsl_chan->edesc = NULL; | |
137 | fsl_chan->idle = true; | |
138 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | |
139 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
140 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | |
141 | return 0; | |
142 | } | |
143 | EXPORT_SYMBOL_GPL(fsl_edma_terminate_all); | |
144 | ||
145 | int fsl_edma_pause(struct dma_chan *chan) | |
146 | { | |
147 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
148 | unsigned long flags; | |
149 | ||
150 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
151 | if (fsl_chan->edesc) { | |
152 | fsl_edma_disable_request(fsl_chan); | |
153 | fsl_chan->status = DMA_PAUSED; | |
154 | fsl_chan->idle = true; | |
155 | } | |
156 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
157 | return 0; | |
158 | } | |
159 | EXPORT_SYMBOL_GPL(fsl_edma_pause); | |
160 | ||
161 | int fsl_edma_resume(struct dma_chan *chan) | |
162 | { | |
163 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
164 | unsigned long flags; | |
165 | ||
166 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
167 | if (fsl_chan->edesc) { | |
168 | fsl_edma_enable_request(fsl_chan); | |
169 | fsl_chan->status = DMA_IN_PROGRESS; | |
170 | fsl_chan->idle = false; | |
171 | } | |
172 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
173 | return 0; | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(fsl_edma_resume); | |
176 | ||
0fa89f97 LT |
177 | static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan) |
178 | { | |
179 | if (fsl_chan->dma_dir != DMA_NONE) | |
180 | dma_unmap_resource(fsl_chan->vchan.chan.device->dev, | |
181 | fsl_chan->dma_dev_addr, | |
182 | fsl_chan->dma_dev_size, | |
183 | fsl_chan->dma_dir, 0); | |
184 | fsl_chan->dma_dir = DMA_NONE; | |
185 | } | |
186 | ||
187 | static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan, | |
188 | enum dma_transfer_direction dir) | |
189 | { | |
190 | struct device *dev = fsl_chan->vchan.chan.device->dev; | |
191 | enum dma_data_direction dma_dir; | |
192 | phys_addr_t addr = 0; | |
193 | u32 size = 0; | |
194 | ||
195 | switch (dir) { | |
196 | case DMA_MEM_TO_DEV: | |
197 | dma_dir = DMA_FROM_DEVICE; | |
198 | addr = fsl_chan->cfg.dst_addr; | |
199 | size = fsl_chan->cfg.dst_maxburst; | |
200 | break; | |
201 | case DMA_DEV_TO_MEM: | |
202 | dma_dir = DMA_TO_DEVICE; | |
203 | addr = fsl_chan->cfg.src_addr; | |
204 | size = fsl_chan->cfg.src_maxburst; | |
205 | break; | |
206 | default: | |
207 | dma_dir = DMA_NONE; | |
208 | break; | |
209 | } | |
210 | ||
211 | /* Already mapped for this config? */ | |
212 | if (fsl_chan->dma_dir == dma_dir) | |
213 | return true; | |
214 | ||
215 | fsl_edma_unprep_slave_dma(fsl_chan); | |
216 | ||
217 | fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0); | |
218 | if (dma_mapping_error(dev, fsl_chan->dma_dev_addr)) | |
219 | return false; | |
220 | fsl_chan->dma_dev_size = size; | |
221 | fsl_chan->dma_dir = dma_dir; | |
222 | ||
223 | return true; | |
224 | } | |
225 | ||
9d831528 AD |
226 | int fsl_edma_slave_config(struct dma_chan *chan, |
227 | struct dma_slave_config *cfg) | |
228 | { | |
229 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
230 | ||
0e819e35 | 231 | memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); |
0fa89f97 | 232 | fsl_edma_unprep_slave_dma(fsl_chan); |
9d831528 AD |
233 | |
234 | return 0; | |
235 | } | |
236 | EXPORT_SYMBOL_GPL(fsl_edma_slave_config); | |
237 | ||
238 | static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, | |
239 | struct virt_dma_desc *vdesc, bool in_progress) | |
240 | { | |
241 | struct fsl_edma_desc *edesc = fsl_chan->edesc; | |
377eaf3b | 242 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 | 243 | u32 ch = fsl_chan->vchan.chan.chan_id; |
0e819e35 | 244 | enum dma_transfer_direction dir = edesc->dirn; |
9d831528 AD |
245 | dma_addr_t cur_addr, dma_addr; |
246 | size_t len, size; | |
247 | int i; | |
248 | ||
249 | /* calculate the total size in this desc */ | |
250 | for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) | |
251 | len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes) | |
252 | * le16_to_cpu(edesc->tcd[i].vtcd->biter); | |
253 | ||
254 | if (!in_progress) | |
255 | return len; | |
256 | ||
257 | if (dir == DMA_MEM_TO_DEV) | |
377eaf3b | 258 | cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].saddr); |
9d831528 | 259 | else |
377eaf3b | 260 | cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].daddr); |
9d831528 AD |
261 | |
262 | /* figure out the finished and calculate the residue */ | |
263 | for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { | |
264 | size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes) | |
265 | * le16_to_cpu(edesc->tcd[i].vtcd->biter); | |
266 | if (dir == DMA_MEM_TO_DEV) | |
267 | dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); | |
268 | else | |
269 | dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); | |
270 | ||
271 | len -= size; | |
272 | if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { | |
273 | len += dma_addr + size - cur_addr; | |
274 | break; | |
275 | } | |
276 | } | |
277 | ||
278 | return len; | |
279 | } | |
280 | ||
281 | enum dma_status fsl_edma_tx_status(struct dma_chan *chan, | |
282 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
283 | { | |
284 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
285 | struct virt_dma_desc *vdesc; | |
286 | enum dma_status status; | |
287 | unsigned long flags; | |
288 | ||
289 | status = dma_cookie_status(chan, cookie, txstate); | |
290 | if (status == DMA_COMPLETE) | |
291 | return status; | |
292 | ||
293 | if (!txstate) | |
294 | return fsl_chan->status; | |
295 | ||
296 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
297 | vdesc = vchan_find_desc(&fsl_chan->vchan, cookie); | |
298 | if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie) | |
299 | txstate->residue = | |
300 | fsl_edma_desc_residue(fsl_chan, vdesc, true); | |
301 | else if (vdesc) | |
302 | txstate->residue = | |
303 | fsl_edma_desc_residue(fsl_chan, vdesc, false); | |
304 | else | |
305 | txstate->residue = 0; | |
306 | ||
307 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
308 | ||
309 | return fsl_chan->status; | |
310 | } | |
311 | EXPORT_SYMBOL_GPL(fsl_edma_tx_status); | |
312 | ||
313 | static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, | |
314 | struct fsl_edma_hw_tcd *tcd) | |
315 | { | |
316 | struct fsl_edma_engine *edma = fsl_chan->edma; | |
377eaf3b | 317 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 AD |
318 | u32 ch = fsl_chan->vchan.chan.chan_id; |
319 | ||
320 | /* | |
321 | * TCD parameters are stored in struct fsl_edma_hw_tcd in little | |
322 | * endian format. However, we need to load the TCD registers in | |
323 | * big- or little-endian obeying the eDMA engine model endian. | |
324 | */ | |
377eaf3b AD |
325 | edma_writew(edma, 0, ®s->tcd[ch].csr); |
326 | edma_writel(edma, le32_to_cpu(tcd->saddr), ®s->tcd[ch].saddr); | |
327 | edma_writel(edma, le32_to_cpu(tcd->daddr), ®s->tcd[ch].daddr); | |
9d831528 | 328 | |
377eaf3b AD |
329 | edma_writew(edma, le16_to_cpu(tcd->attr), ®s->tcd[ch].attr); |
330 | edma_writew(edma, le16_to_cpu(tcd->soff), ®s->tcd[ch].soff); | |
9d831528 | 331 | |
377eaf3b AD |
332 | edma_writel(edma, le32_to_cpu(tcd->nbytes), ®s->tcd[ch].nbytes); |
333 | edma_writel(edma, le32_to_cpu(tcd->slast), ®s->tcd[ch].slast); | |
9d831528 | 334 | |
377eaf3b AD |
335 | edma_writew(edma, le16_to_cpu(tcd->citer), ®s->tcd[ch].citer); |
336 | edma_writew(edma, le16_to_cpu(tcd->biter), ®s->tcd[ch].biter); | |
337 | edma_writew(edma, le16_to_cpu(tcd->doff), ®s->tcd[ch].doff); | |
9d831528 | 338 | |
377eaf3b AD |
339 | edma_writel(edma, le32_to_cpu(tcd->dlast_sga), |
340 | ®s->tcd[ch].dlast_sga); | |
9d831528 | 341 | |
377eaf3b | 342 | edma_writew(edma, le16_to_cpu(tcd->csr), ®s->tcd[ch].csr); |
9d831528 AD |
343 | } |
344 | ||
345 | static inline | |
346 | void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, | |
347 | u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, | |
348 | u16 biter, u16 doff, u32 dlast_sga, bool major_int, | |
349 | bool disable_req, bool enable_sg) | |
350 | { | |
351 | u16 csr = 0; | |
352 | ||
353 | /* | |
354 | * eDMA hardware SGs require the TCDs to be stored in little | |
355 | * endian format irrespective of the register endian model. | |
356 | * So we put the value in little endian in memory, waiting | |
357 | * for fsl_edma_set_tcd_regs doing the swap. | |
358 | */ | |
359 | tcd->saddr = cpu_to_le32(src); | |
360 | tcd->daddr = cpu_to_le32(dst); | |
361 | ||
362 | tcd->attr = cpu_to_le16(attr); | |
363 | ||
377eaf3b | 364 | tcd->soff = cpu_to_le16(soff); |
9d831528 | 365 | |
377eaf3b AD |
366 | tcd->nbytes = cpu_to_le32(nbytes); |
367 | tcd->slast = cpu_to_le32(slast); | |
9d831528 AD |
368 | |
369 | tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); | |
377eaf3b | 370 | tcd->doff = cpu_to_le16(doff); |
9d831528 | 371 | |
377eaf3b | 372 | tcd->dlast_sga = cpu_to_le32(dlast_sga); |
9d831528 AD |
373 | |
374 | tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); | |
375 | if (major_int) | |
376 | csr |= EDMA_TCD_CSR_INT_MAJOR; | |
377 | ||
378 | if (disable_req) | |
379 | csr |= EDMA_TCD_CSR_D_REQ; | |
380 | ||
381 | if (enable_sg) | |
382 | csr |= EDMA_TCD_CSR_E_SG; | |
383 | ||
384 | tcd->csr = cpu_to_le16(csr); | |
385 | } | |
386 | ||
387 | static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, | |
388 | int sg_len) | |
389 | { | |
390 | struct fsl_edma_desc *fsl_desc; | |
391 | int i; | |
392 | ||
de1fa4f6 | 393 | fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT); |
9d831528 AD |
394 | if (!fsl_desc) |
395 | return NULL; | |
396 | ||
397 | fsl_desc->echan = fsl_chan; | |
398 | fsl_desc->n_tcds = sg_len; | |
399 | for (i = 0; i < sg_len; i++) { | |
400 | fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool, | |
401 | GFP_NOWAIT, &fsl_desc->tcd[i].ptcd); | |
402 | if (!fsl_desc->tcd[i].vtcd) | |
403 | goto err; | |
404 | } | |
405 | return fsl_desc; | |
406 | ||
407 | err: | |
408 | while (--i >= 0) | |
409 | dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd, | |
410 | fsl_desc->tcd[i].ptcd); | |
411 | kfree(fsl_desc); | |
412 | return NULL; | |
413 | } | |
414 | ||
415 | struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( | |
416 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | |
417 | size_t period_len, enum dma_transfer_direction direction, | |
418 | unsigned long flags) | |
419 | { | |
420 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
421 | struct fsl_edma_desc *fsl_desc; | |
422 | dma_addr_t dma_buf_next; | |
423 | int sg_len, i; | |
424 | u32 src_addr, dst_addr, last_sg, nbytes; | |
425 | u16 soff, doff, iter; | |
426 | ||
0e819e35 | 427 | if (!is_slave_direction(direction)) |
9d831528 AD |
428 | return NULL; |
429 | ||
0fa89f97 LT |
430 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) |
431 | return NULL; | |
432 | ||
9d831528 AD |
433 | sg_len = buf_len / period_len; |
434 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); | |
435 | if (!fsl_desc) | |
436 | return NULL; | |
437 | fsl_desc->iscyclic = true; | |
0e819e35 | 438 | fsl_desc->dirn = direction; |
9d831528 AD |
439 | |
440 | dma_buf_next = dma_addr; | |
0e819e35 VK |
441 | if (direction == DMA_MEM_TO_DEV) { |
442 | fsl_chan->attr = | |
443 | fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); | |
444 | nbytes = fsl_chan->cfg.dst_addr_width * | |
445 | fsl_chan->cfg.dst_maxburst; | |
446 | } else { | |
447 | fsl_chan->attr = | |
448 | fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); | |
449 | nbytes = fsl_chan->cfg.src_addr_width * | |
450 | fsl_chan->cfg.src_maxburst; | |
451 | } | |
452 | ||
9d831528 AD |
453 | iter = period_len / nbytes; |
454 | ||
455 | for (i = 0; i < sg_len; i++) { | |
456 | if (dma_buf_next >= dma_addr + buf_len) | |
457 | dma_buf_next = dma_addr; | |
458 | ||
459 | /* get next sg's physical address */ | |
460 | last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; | |
461 | ||
0e819e35 | 462 | if (direction == DMA_MEM_TO_DEV) { |
9d831528 | 463 | src_addr = dma_buf_next; |
0fa89f97 | 464 | dst_addr = fsl_chan->dma_dev_addr; |
0e819e35 | 465 | soff = fsl_chan->cfg.dst_addr_width; |
9d831528 AD |
466 | doff = 0; |
467 | } else { | |
0fa89f97 | 468 | src_addr = fsl_chan->dma_dev_addr; |
9d831528 AD |
469 | dst_addr = dma_buf_next; |
470 | soff = 0; | |
0e819e35 | 471 | doff = fsl_chan->cfg.src_addr_width; |
9d831528 AD |
472 | } |
473 | ||
474 | fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr, | |
0e819e35 | 475 | fsl_chan->attr, soff, nbytes, 0, iter, |
9d831528 AD |
476 | iter, doff, last_sg, true, false, true); |
477 | dma_buf_next += period_len; | |
478 | } | |
479 | ||
480 | return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); | |
481 | } | |
482 | EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic); | |
483 | ||
484 | struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( | |
485 | struct dma_chan *chan, struct scatterlist *sgl, | |
486 | unsigned int sg_len, enum dma_transfer_direction direction, | |
487 | unsigned long flags, void *context) | |
488 | { | |
489 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
490 | struct fsl_edma_desc *fsl_desc; | |
491 | struct scatterlist *sg; | |
492 | u32 src_addr, dst_addr, last_sg, nbytes; | |
493 | u16 soff, doff, iter; | |
494 | int i; | |
495 | ||
0e819e35 | 496 | if (!is_slave_direction(direction)) |
9d831528 AD |
497 | return NULL; |
498 | ||
0fa89f97 LT |
499 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) |
500 | return NULL; | |
501 | ||
9d831528 AD |
502 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); |
503 | if (!fsl_desc) | |
504 | return NULL; | |
505 | fsl_desc->iscyclic = false; | |
0e819e35 VK |
506 | fsl_desc->dirn = direction; |
507 | ||
508 | if (direction == DMA_MEM_TO_DEV) { | |
509 | fsl_chan->attr = | |
510 | fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); | |
511 | nbytes = fsl_chan->cfg.dst_addr_width * | |
512 | fsl_chan->cfg.dst_maxburst; | |
513 | } else { | |
514 | fsl_chan->attr = | |
515 | fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); | |
516 | nbytes = fsl_chan->cfg.src_addr_width * | |
517 | fsl_chan->cfg.src_maxburst; | |
518 | } | |
9d831528 | 519 | |
9d831528 AD |
520 | for_each_sg(sgl, sg, sg_len, i) { |
521 | /* get next sg's physical address */ | |
522 | last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; | |
523 | ||
0e819e35 | 524 | if (direction == DMA_MEM_TO_DEV) { |
9d831528 | 525 | src_addr = sg_dma_address(sg); |
0fa89f97 | 526 | dst_addr = fsl_chan->dma_dev_addr; |
0e819e35 | 527 | soff = fsl_chan->cfg.dst_addr_width; |
9d831528 AD |
528 | doff = 0; |
529 | } else { | |
0fa89f97 | 530 | src_addr = fsl_chan->dma_dev_addr; |
9d831528 AD |
531 | dst_addr = sg_dma_address(sg); |
532 | soff = 0; | |
0e819e35 | 533 | doff = fsl_chan->cfg.src_addr_width; |
9d831528 AD |
534 | } |
535 | ||
536 | iter = sg_dma_len(sg) / nbytes; | |
537 | if (i < sg_len - 1) { | |
538 | last_sg = fsl_desc->tcd[(i + 1)].ptcd; | |
539 | fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, | |
0e819e35 | 540 | dst_addr, fsl_chan->attr, soff, |
9d831528 AD |
541 | nbytes, 0, iter, iter, doff, last_sg, |
542 | false, false, true); | |
543 | } else { | |
544 | last_sg = 0; | |
545 | fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, | |
0e819e35 | 546 | dst_addr, fsl_chan->attr, soff, |
9d831528 AD |
547 | nbytes, 0, iter, iter, doff, last_sg, |
548 | true, true, false); | |
549 | } | |
550 | } | |
551 | ||
552 | return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); | |
553 | } | |
554 | EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg); | |
555 | ||
556 | void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) | |
557 | { | |
558 | struct virt_dma_desc *vdesc; | |
559 | ||
560 | vdesc = vchan_next_desc(&fsl_chan->vchan); | |
561 | if (!vdesc) | |
562 | return; | |
563 | fsl_chan->edesc = to_fsl_edma_desc(vdesc); | |
564 | fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); | |
565 | fsl_edma_enable_request(fsl_chan); | |
566 | fsl_chan->status = DMA_IN_PROGRESS; | |
567 | fsl_chan->idle = false; | |
568 | } | |
569 | EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc); | |
570 | ||
571 | void fsl_edma_issue_pending(struct dma_chan *chan) | |
572 | { | |
573 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
574 | unsigned long flags; | |
575 | ||
576 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
577 | ||
578 | if (unlikely(fsl_chan->pm_state != RUNNING)) { | |
579 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
580 | /* cannot submit due to suspend */ | |
581 | return; | |
582 | } | |
583 | ||
584 | if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) | |
585 | fsl_edma_xfer_desc(fsl_chan); | |
586 | ||
587 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
588 | } | |
589 | EXPORT_SYMBOL_GPL(fsl_edma_issue_pending); | |
590 | ||
591 | int fsl_edma_alloc_chan_resources(struct dma_chan *chan) | |
592 | { | |
593 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
594 | ||
595 | fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, | |
596 | sizeof(struct fsl_edma_hw_tcd), | |
597 | 32, 0); | |
598 | return 0; | |
599 | } | |
600 | EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources); | |
601 | ||
602 | void fsl_edma_free_chan_resources(struct dma_chan *chan) | |
603 | { | |
604 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
605 | unsigned long flags; | |
606 | LIST_HEAD(head); | |
607 | ||
608 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
609 | fsl_edma_disable_request(fsl_chan); | |
610 | fsl_edma_chan_mux(fsl_chan, 0, false); | |
611 | fsl_chan->edesc = NULL; | |
612 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | |
0fa89f97 | 613 | fsl_edma_unprep_slave_dma(fsl_chan); |
9d831528 AD |
614 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); |
615 | ||
616 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | |
617 | dma_pool_destroy(fsl_chan->tcd_pool); | |
618 | fsl_chan->tcd_pool = NULL; | |
619 | } | |
620 | EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources); | |
621 | ||
622 | void fsl_edma_cleanup_vchan(struct dma_device *dmadev) | |
623 | { | |
624 | struct fsl_edma_chan *chan, *_chan; | |
625 | ||
626 | list_for_each_entry_safe(chan, _chan, | |
627 | &dmadev->channels, vchan.chan.device_node) { | |
628 | list_del(&chan->vchan.chan.device_node); | |
629 | tasklet_kill(&chan->vchan.task); | |
630 | } | |
631 | } | |
632 | EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan); | |
633 | ||
377eaf3b AD |
634 | /* |
635 | * On the 32 channels Vybrid/mpc577x edma version (here called "v1"), | |
636 | * register offsets are different compared to ColdFire mcf5441x 64 channels | |
637 | * edma (here called "v2"). | |
638 | * | |
639 | * This function sets up register offsets as per proper declared version | |
640 | * so must be called in xxx_edma_probe() just after setting the | |
641 | * edma "version" and "membase" appropriately. | |
642 | */ | |
643 | void fsl_edma_setup_regs(struct fsl_edma_engine *edma) | |
644 | { | |
645 | edma->regs.cr = edma->membase + EDMA_CR; | |
646 | edma->regs.es = edma->membase + EDMA_ES; | |
647 | edma->regs.erql = edma->membase + EDMA_ERQ; | |
648 | edma->regs.eeil = edma->membase + EDMA_EEI; | |
649 | ||
650 | edma->regs.serq = edma->membase + ((edma->version == v1) ? | |
651 | EDMA_SERQ : EDMA64_SERQ); | |
652 | edma->regs.cerq = edma->membase + ((edma->version == v1) ? | |
653 | EDMA_CERQ : EDMA64_CERQ); | |
654 | edma->regs.seei = edma->membase + ((edma->version == v1) ? | |
655 | EDMA_SEEI : EDMA64_SEEI); | |
656 | edma->regs.ceei = edma->membase + ((edma->version == v1) ? | |
657 | EDMA_CEEI : EDMA64_CEEI); | |
658 | edma->regs.cint = edma->membase + ((edma->version == v1) ? | |
659 | EDMA_CINT : EDMA64_CINT); | |
660 | edma->regs.cerr = edma->membase + ((edma->version == v1) ? | |
661 | EDMA_CERR : EDMA64_CERR); | |
662 | edma->regs.ssrt = edma->membase + ((edma->version == v1) ? | |
663 | EDMA_SSRT : EDMA64_SSRT); | |
664 | edma->regs.cdne = edma->membase + ((edma->version == v1) ? | |
665 | EDMA_CDNE : EDMA64_CDNE); | |
666 | edma->regs.intl = edma->membase + ((edma->version == v1) ? | |
667 | EDMA_INTR : EDMA64_INTL); | |
668 | edma->regs.errl = edma->membase + ((edma->version == v1) ? | |
669 | EDMA_ERR : EDMA64_ERRL); | |
670 | ||
671 | if (edma->version == v2) { | |
672 | edma->regs.erqh = edma->membase + EDMA64_ERQH; | |
673 | edma->regs.eeih = edma->membase + EDMA64_EEIH; | |
674 | edma->regs.errh = edma->membase + EDMA64_ERRH; | |
675 | edma->regs.inth = edma->membase + EDMA64_INTH; | |
676 | } | |
677 | ||
678 | edma->regs.tcd = edma->membase + EDMA_TCD; | |
679 | } | |
680 | EXPORT_SYMBOL_GPL(fsl_edma_setup_regs); | |
681 | ||
9d831528 | 682 | MODULE_LICENSE("GPL v2"); |