]>
Commit | Line | Data |
---|---|---|
47e20577 MS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
3 | // Actions Semi Owl SoCs DMA driver | |
4 | // | |
5 | // Copyright (c) 2014 Actions Semi Inc. | |
6 | // Author: David Liu <liuwei@actions-semi.com> | |
7 | // | |
8 | // Copyright (c) 2018 Linaro Ltd. | |
9 | // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> | |
10 | ||
11 | #include <linux/bitops.h> | |
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/dmapool.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/io.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/of_device.h> | |
d64e1b3f | 24 | #include <linux/of_dma.h> |
47e20577 MS |
25 | #include <linux/slab.h> |
26 | #include "virt-dma.h" | |
27 | ||
28 | #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff | |
29 | ||
30 | /* Global DMA Controller Registers */ | |
31 | #define OWL_DMA_IRQ_PD0 0x00 | |
32 | #define OWL_DMA_IRQ_PD1 0x04 | |
33 | #define OWL_DMA_IRQ_PD2 0x08 | |
34 | #define OWL_DMA_IRQ_PD3 0x0C | |
35 | #define OWL_DMA_IRQ_EN0 0x10 | |
36 | #define OWL_DMA_IRQ_EN1 0x14 | |
37 | #define OWL_DMA_IRQ_EN2 0x18 | |
38 | #define OWL_DMA_IRQ_EN3 0x1C | |
39 | #define OWL_DMA_SECURE_ACCESS_CTL 0x20 | |
40 | #define OWL_DMA_NIC_QOS 0x24 | |
41 | #define OWL_DMA_DBGSEL 0x28 | |
42 | #define OWL_DMA_IDLE_STAT 0x2C | |
43 | ||
44 | /* Channel Registers */ | |
45 | #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100) | |
46 | #define OWL_DMAX_MODE 0x00 | |
47 | #define OWL_DMAX_SOURCE 0x04 | |
48 | #define OWL_DMAX_DESTINATION 0x08 | |
49 | #define OWL_DMAX_FRAME_LEN 0x0C | |
50 | #define OWL_DMAX_FRAME_CNT 0x10 | |
51 | #define OWL_DMAX_REMAIN_FRAME_CNT 0x14 | |
52 | #define OWL_DMAX_REMAIN_CNT 0x18 | |
53 | #define OWL_DMAX_SOURCE_STRIDE 0x1C | |
54 | #define OWL_DMAX_DESTINATION_STRIDE 0x20 | |
55 | #define OWL_DMAX_START 0x24 | |
56 | #define OWL_DMAX_PAUSE 0x28 | |
57 | #define OWL_DMAX_CHAINED_CTL 0x2C | |
58 | #define OWL_DMAX_CONSTANT 0x30 | |
59 | #define OWL_DMAX_LINKLIST_CTL 0x34 | |
60 | #define OWL_DMAX_NEXT_DESCRIPTOR 0x38 | |
61 | #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C | |
62 | #define OWL_DMAX_INT_CTL 0x40 | |
63 | #define OWL_DMAX_INT_STATUS 0x44 | |
64 | #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48 | |
65 | #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C | |
66 | ||
67 | /* OWL_DMAX_MODE Bits */ | |
68 | #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0) | |
69 | #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8) | |
70 | #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0) | |
71 | #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2) | |
72 | #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3) | |
73 | #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10) | |
74 | #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0) | |
75 | #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2) | |
76 | #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3) | |
77 | #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16) | |
78 | #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0) | |
79 | #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1) | |
80 | #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2) | |
81 | #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18) | |
82 | #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0) | |
83 | #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1) | |
84 | #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2) | |
85 | #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20) | |
86 | #define OWL_DMA_MODE_CB BIT(23) | |
87 | #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28) | |
88 | #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0) | |
89 | #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1) | |
90 | #define OWL_DMA_MODE_CFE BIT(29) | |
91 | #define OWL_DMA_MODE_LME BIT(30) | |
92 | #define OWL_DMA_MODE_CME BIT(31) | |
93 | ||
94 | /* OWL_DMAX_LINKLIST_CTL Bits */ | |
95 | #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8) | |
96 | #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0) | |
97 | #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1) | |
98 | #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2) | |
99 | #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10) | |
100 | #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0) | |
101 | #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1) | |
102 | #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2) | |
103 | #define OWL_DMA_LLC_SUSPEND BIT(16) | |
104 | ||
105 | /* OWL_DMAX_INT_CTL Bits */ | |
106 | #define OWL_DMA_INTCTL_BLOCK BIT(0) | |
107 | #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1) | |
108 | #define OWL_DMA_INTCTL_FRAME BIT(2) | |
109 | #define OWL_DMA_INTCTL_HALF_FRAME BIT(3) | |
110 | #define OWL_DMA_INTCTL_LAST_FRAME BIT(4) | |
111 | ||
112 | /* OWL_DMAX_INT_STATUS Bits */ | |
113 | #define OWL_DMA_INTSTAT_BLOCK BIT(0) | |
114 | #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1) | |
115 | #define OWL_DMA_INTSTAT_FRAME BIT(2) | |
116 | #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3) | |
117 | #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4) | |
118 | ||
119 | /* Pack shift and newshift in a single word */ | |
120 | #define BIT_FIELD(val, width, shift, newshift) \ | |
121 | ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift)) | |
122 | ||
123 | /** | |
124 | * struct owl_dma_lli_hw - Hardware link list for dma transfer | |
125 | * @next_lli: physical address of the next link list | |
126 | * @saddr: source physical address | |
127 | * @daddr: destination physical address | |
128 | * @flen: frame length | |
129 | * @fcnt: frame count | |
130 | * @src_stride: source stride | |
131 | * @dst_stride: destination stride | |
132 | * @ctrla: dma_mode and linklist ctrl config | |
133 | * @ctrlb: interrupt config | |
134 | * @const_num: data for constant fill | |
135 | */ | |
136 | struct owl_dma_lli_hw { | |
137 | u32 next_lli; | |
138 | u32 saddr; | |
139 | u32 daddr; | |
140 | u32 flen:20; | |
141 | u32 fcnt:12; | |
142 | u32 src_stride; | |
143 | u32 dst_stride; | |
144 | u32 ctrla; | |
145 | u32 ctrlb; | |
146 | u32 const_num; | |
147 | }; | |
148 | ||
149 | /** | |
150 | * struct owl_dma_lli - Link list for dma transfer | |
151 | * @hw: hardware link list | |
152 | * @phys: physical address of hardware link list | |
153 | * @node: node for txd's lli_list | |
154 | */ | |
155 | struct owl_dma_lli { | |
156 | struct owl_dma_lli_hw hw; | |
157 | dma_addr_t phys; | |
158 | struct list_head node; | |
159 | }; | |
160 | ||
161 | /** | |
162 | * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor | |
163 | * @vd: virtual DMA descriptor | |
164 | * @lli_list: link list of lli nodes | |
a3e40316 | 165 | * @cyclic: flag to indicate cyclic transfers |
47e20577 MS |
166 | */ |
167 | struct owl_dma_txd { | |
168 | struct virt_dma_desc vd; | |
169 | struct list_head lli_list; | |
d64e1b3f | 170 | bool cyclic; |
47e20577 MS |
171 | }; |
172 | ||
173 | /** | |
174 | * struct owl_dma_pchan - Holder for the physical channels | |
175 | * @id: physical index to this channel | |
176 | * @base: virtual memory base for the dma channel | |
177 | * @vchan: the virtual channel currently being served by this physical channel | |
47e20577 MS |
178 | */ |
179 | struct owl_dma_pchan { | |
180 | u32 id; | |
181 | void __iomem *base; | |
182 | struct owl_dma_vchan *vchan; | |
47e20577 MS |
183 | }; |
184 | ||
185 | /** | |
186 | * struct owl_dma_pchan - Wrapper for DMA ENGINE channel | |
187 | * @vc: wrappped virtual channel | |
188 | * @pchan: the physical channel utilized by this channel | |
189 | * @txd: active transaction on this channel | |
a3e40316 MS |
190 | * @cfg: slave configuration for this channel |
191 | * @drq: physical DMA request ID for this channel | |
47e20577 MS |
192 | */ |
193 | struct owl_dma_vchan { | |
194 | struct virt_dma_chan vc; | |
195 | struct owl_dma_pchan *pchan; | |
196 | struct owl_dma_txd *txd; | |
d64e1b3f MS |
197 | struct dma_slave_config cfg; |
198 | u8 drq; | |
47e20577 MS |
199 | }; |
200 | ||
201 | /** | |
202 | * struct owl_dma - Holder for the Owl DMA controller | |
203 | * @dma: dma engine for this instance | |
204 | * @base: virtual memory base for the DMA controller | |
205 | * @clk: clock for the DMA controller | |
206 | * @lock: a lock to use when change DMA controller global register | |
207 | * @lli_pool: a pool for the LLI descriptors | |
a3e40316 | 208 | * @irq: interrupt ID for the DMA controller |
47e20577 MS |
209 | * @nr_pchans: the number of physical channels |
210 | * @pchans: array of data for the physical channels | |
211 | * @nr_vchans: the number of physical channels | |
212 | * @vchans: array of data for the physical channels | |
213 | */ | |
214 | struct owl_dma { | |
215 | struct dma_device dma; | |
216 | void __iomem *base; | |
217 | struct clk *clk; | |
218 | spinlock_t lock; | |
219 | struct dma_pool *lli_pool; | |
220 | int irq; | |
221 | ||
222 | unsigned int nr_pchans; | |
223 | struct owl_dma_pchan *pchans; | |
224 | ||
225 | unsigned int nr_vchans; | |
226 | struct owl_dma_vchan *vchans; | |
227 | }; | |
228 | ||
229 | static void pchan_update(struct owl_dma_pchan *pchan, u32 reg, | |
230 | u32 val, bool state) | |
231 | { | |
232 | u32 regval; | |
233 | ||
234 | regval = readl(pchan->base + reg); | |
235 | ||
236 | if (state) | |
237 | regval |= val; | |
238 | else | |
239 | regval &= ~val; | |
240 | ||
241 | writel(val, pchan->base + reg); | |
242 | } | |
243 | ||
244 | static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data) | |
245 | { | |
246 | writel(data, pchan->base + reg); | |
247 | } | |
248 | ||
249 | static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg) | |
250 | { | |
251 | return readl(pchan->base + reg); | |
252 | } | |
253 | ||
254 | static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state) | |
255 | { | |
256 | u32 regval; | |
257 | ||
258 | regval = readl(od->base + reg); | |
259 | ||
260 | if (state) | |
261 | regval |= val; | |
262 | else | |
263 | regval &= ~val; | |
264 | ||
265 | writel(val, od->base + reg); | |
266 | } | |
267 | ||
268 | static void dma_writel(struct owl_dma *od, u32 reg, u32 data) | |
269 | { | |
270 | writel(data, od->base + reg); | |
271 | } | |
272 | ||
273 | static u32 dma_readl(struct owl_dma *od, u32 reg) | |
274 | { | |
275 | return readl(od->base + reg); | |
276 | } | |
277 | ||
278 | static inline struct owl_dma *to_owl_dma(struct dma_device *dd) | |
279 | { | |
280 | return container_of(dd, struct owl_dma, dma); | |
281 | } | |
282 | ||
283 | static struct device *chan2dev(struct dma_chan *chan) | |
284 | { | |
285 | return &chan->dev->device; | |
286 | } | |
287 | ||
288 | static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan) | |
289 | { | |
290 | return container_of(chan, struct owl_dma_vchan, vc.chan); | |
291 | } | |
292 | ||
293 | static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx) | |
294 | { | |
295 | return container_of(tx, struct owl_dma_txd, vd.tx); | |
296 | } | |
297 | ||
298 | static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl) | |
299 | { | |
300 | u32 ctl; | |
301 | ||
302 | ctl = BIT_FIELD(mode, 4, 28, 28) | | |
303 | BIT_FIELD(mode, 8, 16, 20) | | |
304 | BIT_FIELD(mode, 4, 8, 16) | | |
305 | BIT_FIELD(mode, 6, 0, 10) | | |
306 | BIT_FIELD(llc_ctl, 2, 10, 8) | | |
307 | BIT_FIELD(llc_ctl, 2, 8, 6); | |
308 | ||
309 | return ctl; | |
310 | } | |
311 | ||
312 | static inline u32 llc_hw_ctrlb(u32 int_ctl) | |
313 | { | |
314 | u32 ctl; | |
315 | ||
316 | ctl = BIT_FIELD(int_ctl, 7, 0, 18); | |
317 | ||
318 | return ctl; | |
319 | } | |
320 | ||
321 | static void owl_dma_free_lli(struct owl_dma *od, | |
322 | struct owl_dma_lli *lli) | |
323 | { | |
324 | list_del(&lli->node); | |
325 | dma_pool_free(od->lli_pool, lli, lli->phys); | |
326 | } | |
327 | ||
328 | static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od) | |
329 | { | |
330 | struct owl_dma_lli *lli; | |
331 | dma_addr_t phys; | |
332 | ||
333 | lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys); | |
334 | if (!lli) | |
335 | return NULL; | |
336 | ||
337 | INIT_LIST_HEAD(&lli->node); | |
338 | lli->phys = phys; | |
339 | ||
340 | return lli; | |
341 | } | |
342 | ||
343 | static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, | |
344 | struct owl_dma_lli *prev, | |
d64e1b3f MS |
345 | struct owl_dma_lli *next, |
346 | bool is_cyclic) | |
47e20577 | 347 | { |
d64e1b3f MS |
348 | if (!is_cyclic) |
349 | list_add_tail(&next->node, &txd->lli_list); | |
47e20577 MS |
350 | |
351 | if (prev) { | |
352 | prev->hw.next_lli = next->phys; | |
353 | prev->hw.ctrla |= llc_hw_ctrla(OWL_DMA_MODE_LME, 0); | |
354 | } | |
355 | ||
356 | return next; | |
357 | } | |
358 | ||
359 | static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, | |
360 | struct owl_dma_lli *lli, | |
361 | dma_addr_t src, dma_addr_t dst, | |
d64e1b3f MS |
362 | u32 len, enum dma_transfer_direction dir, |
363 | struct dma_slave_config *sconfig, | |
364 | bool is_cyclic) | |
47e20577 MS |
365 | { |
366 | struct owl_dma_lli_hw *hw = &lli->hw; | |
367 | u32 mode; | |
368 | ||
369 | mode = OWL_DMA_MODE_PW(0); | |
370 | ||
371 | switch (dir) { | |
372 | case DMA_MEM_TO_MEM: | |
373 | mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU | | |
374 | OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC | | |
375 | OWL_DMA_MODE_DAM_INC; | |
376 | ||
d64e1b3f MS |
377 | break; |
378 | case DMA_MEM_TO_DEV: | |
379 | mode |= OWL_DMA_MODE_TS(vchan->drq) | |
380 | | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV | |
381 | | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST; | |
382 | ||
383 | /* | |
384 | * Hardware only supports 32bit and 8bit buswidth. Since the | |
385 | * default is 32bit, select 8bit only when requested. | |
386 | */ | |
387 | if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) | |
388 | mode |= OWL_DMA_MODE_NDDBW_8BIT; | |
389 | ||
390 | break; | |
391 | case DMA_DEV_TO_MEM: | |
392 | mode |= OWL_DMA_MODE_TS(vchan->drq) | |
393 | | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU | |
394 | | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC; | |
395 | ||
396 | /* | |
397 | * Hardware only supports 32bit and 8bit buswidth. Since the | |
398 | * default is 32bit, select 8bit only when requested. | |
399 | */ | |
400 | if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) | |
401 | mode |= OWL_DMA_MODE_NDDBW_8BIT; | |
402 | ||
47e20577 MS |
403 | break; |
404 | default: | |
405 | return -EINVAL; | |
406 | } | |
407 | ||
408 | hw->next_lli = 0; /* One link list by default */ | |
409 | hw->saddr = src; | |
410 | hw->daddr = dst; | |
411 | ||
412 | hw->fcnt = 1; /* Frame count fixed as 1 */ | |
413 | hw->flen = len; /* Max frame length is 1MB */ | |
414 | hw->src_stride = 0; | |
415 | hw->dst_stride = 0; | |
416 | hw->ctrla = llc_hw_ctrla(mode, | |
417 | OWL_DMA_LLC_SAV_LOAD_NEXT | | |
418 | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
419 | ||
d64e1b3f MS |
420 | if (is_cyclic) |
421 | hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK); | |
422 | else | |
423 | hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); | |
47e20577 MS |
424 | |
425 | return 0; | |
426 | } | |
427 | ||
428 | static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, | |
429 | struct owl_dma_vchan *vchan) | |
430 | { | |
431 | struct owl_dma_pchan *pchan = NULL; | |
432 | unsigned long flags; | |
433 | int i; | |
434 | ||
435 | for (i = 0; i < od->nr_pchans; i++) { | |
436 | pchan = &od->pchans[i]; | |
437 | ||
f8f482de | 438 | spin_lock_irqsave(&od->lock, flags); |
47e20577 MS |
439 | if (!pchan->vchan) { |
440 | pchan->vchan = vchan; | |
f8f482de | 441 | spin_unlock_irqrestore(&od->lock, flags); |
47e20577 MS |
442 | break; |
443 | } | |
444 | ||
f8f482de | 445 | spin_unlock_irqrestore(&od->lock, flags); |
47e20577 MS |
446 | } |
447 | ||
448 | return pchan; | |
449 | } | |
450 | ||
451 | static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan) | |
452 | { | |
453 | unsigned int val; | |
454 | ||
455 | val = dma_readl(od, OWL_DMA_IDLE_STAT); | |
456 | ||
457 | return !(val & (1 << pchan->id)); | |
458 | } | |
459 | ||
460 | static void owl_dma_terminate_pchan(struct owl_dma *od, | |
461 | struct owl_dma_pchan *pchan) | |
462 | { | |
463 | unsigned long flags; | |
464 | u32 irq_pd; | |
465 | ||
466 | pchan_writel(pchan, OWL_DMAX_START, 0); | |
467 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
468 | ||
469 | spin_lock_irqsave(&od->lock, flags); | |
470 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false); | |
471 | ||
472 | irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0); | |
473 | if (irq_pd & (1 << pchan->id)) { | |
474 | dev_warn(od->dma.dev, | |
475 | "terminating pchan %d that still has pending irq\n", | |
476 | pchan->id); | |
477 | dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id)); | |
478 | } | |
479 | ||
480 | pchan->vchan = NULL; | |
481 | ||
482 | spin_unlock_irqrestore(&od->lock, flags); | |
483 | } | |
484 | ||
d64e1b3f MS |
485 | static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan) |
486 | { | |
487 | pchan_writel(pchan, 1, OWL_DMAX_PAUSE); | |
488 | } | |
489 | ||
490 | static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan) | |
491 | { | |
492 | pchan_writel(pchan, 0, OWL_DMAX_PAUSE); | |
493 | } | |
494 | ||
47e20577 MS |
495 | static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) |
496 | { | |
497 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
498 | struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc); | |
499 | struct owl_dma_pchan *pchan = vchan->pchan; | |
500 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
501 | struct owl_dma_lli *lli; | |
502 | unsigned long flags; | |
503 | u32 int_ctl; | |
504 | ||
505 | list_del(&vd->node); | |
506 | ||
507 | vchan->txd = txd; | |
508 | ||
509 | /* Wait for channel inactive */ | |
510 | while (owl_dma_pchan_busy(od, pchan)) | |
511 | cpu_relax(); | |
512 | ||
513 | lli = list_first_entry(&txd->lli_list, | |
514 | struct owl_dma_lli, node); | |
515 | ||
d64e1b3f MS |
516 | if (txd->cyclic) |
517 | int_ctl = OWL_DMA_INTCTL_BLOCK; | |
518 | else | |
519 | int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; | |
47e20577 MS |
520 | |
521 | pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); | |
522 | pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, | |
523 | OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
524 | pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys); | |
525 | pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl); | |
526 | ||
527 | /* Clear IRQ status for this pchan */ | |
528 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
529 | ||
530 | spin_lock_irqsave(&od->lock, flags); | |
531 | ||
532 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true); | |
533 | ||
534 | spin_unlock_irqrestore(&od->lock, flags); | |
535 | ||
536 | dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id); | |
537 | ||
538 | /* Start DMA transfer for this pchan */ | |
539 | pchan_writel(pchan, OWL_DMAX_START, 0x1); | |
540 | ||
541 | return 0; | |
542 | } | |
543 | ||
544 | static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan) | |
545 | { | |
546 | /* Ensure that the physical channel is stopped */ | |
547 | owl_dma_terminate_pchan(od, vchan->pchan); | |
548 | ||
549 | vchan->pchan = NULL; | |
550 | } | |
551 | ||
552 | static irqreturn_t owl_dma_interrupt(int irq, void *dev_id) | |
553 | { | |
554 | struct owl_dma *od = dev_id; | |
555 | struct owl_dma_vchan *vchan; | |
556 | struct owl_dma_pchan *pchan; | |
557 | unsigned long pending; | |
558 | int i; | |
559 | unsigned int global_irq_pending, chan_irq_pending; | |
560 | ||
561 | spin_lock(&od->lock); | |
562 | ||
563 | pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
564 | ||
565 | /* Clear IRQ status for each pchan */ | |
566 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
567 | pchan = &od->pchans[i]; | |
568 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
569 | } | |
570 | ||
571 | /* Clear pending IRQ */ | |
572 | dma_writel(od, OWL_DMA_IRQ_PD0, pending); | |
573 | ||
574 | /* Check missed pending IRQ */ | |
575 | for (i = 0; i < od->nr_pchans; i++) { | |
576 | pchan = &od->pchans[i]; | |
577 | chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) & | |
578 | pchan_readl(pchan, OWL_DMAX_INT_STATUS); | |
579 | ||
580 | /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */ | |
581 | dma_readl(od, OWL_DMA_IRQ_PD0); | |
582 | ||
583 | global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
584 | ||
585 | if (chan_irq_pending && !(global_irq_pending & BIT(i))) { | |
586 | dev_dbg(od->dma.dev, | |
587 | "global and channel IRQ pending match err\n"); | |
588 | ||
589 | /* Clear IRQ status for this pchan */ | |
590 | pchan_update(pchan, OWL_DMAX_INT_STATUS, | |
591 | 0xff, false); | |
592 | ||
593 | /* Update global IRQ pending */ | |
594 | pending |= BIT(i); | |
595 | } | |
596 | } | |
597 | ||
598 | spin_unlock(&od->lock); | |
599 | ||
600 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
601 | struct owl_dma_txd *txd; | |
602 | ||
603 | pchan = &od->pchans[i]; | |
604 | ||
605 | vchan = pchan->vchan; | |
606 | if (!vchan) { | |
607 | dev_warn(od->dma.dev, "no vchan attached on pchan %d\n", | |
608 | pchan->id); | |
609 | continue; | |
610 | } | |
611 | ||
612 | spin_lock(&vchan->vc.lock); | |
613 | ||
614 | txd = vchan->txd; | |
615 | if (txd) { | |
616 | vchan->txd = NULL; | |
617 | ||
618 | vchan_cookie_complete(&txd->vd); | |
619 | ||
620 | /* | |
621 | * Start the next descriptor (if any), | |
622 | * otherwise free this channel. | |
623 | */ | |
624 | if (vchan_next_desc(&vchan->vc)) | |
625 | owl_dma_start_next_txd(vchan); | |
626 | else | |
627 | owl_dma_phy_free(od, vchan); | |
628 | } | |
629 | ||
630 | spin_unlock(&vchan->vc.lock); | |
631 | } | |
632 | ||
633 | return IRQ_HANDLED; | |
634 | } | |
635 | ||
636 | static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd) | |
637 | { | |
638 | struct owl_dma_lli *lli, *_lli; | |
639 | ||
640 | if (unlikely(!txd)) | |
641 | return; | |
642 | ||
643 | list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) | |
644 | owl_dma_free_lli(od, lli); | |
645 | ||
646 | kfree(txd); | |
647 | } | |
648 | ||
649 | static void owl_dma_desc_free(struct virt_dma_desc *vd) | |
650 | { | |
651 | struct owl_dma *od = to_owl_dma(vd->tx.chan->device); | |
652 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
653 | ||
654 | owl_dma_free_txd(od, txd); | |
655 | } | |
656 | ||
657 | static int owl_dma_terminate_all(struct dma_chan *chan) | |
658 | { | |
659 | struct owl_dma *od = to_owl_dma(chan->device); | |
660 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
661 | unsigned long flags; | |
662 | LIST_HEAD(head); | |
663 | ||
664 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
665 | ||
666 | if (vchan->pchan) | |
667 | owl_dma_phy_free(od, vchan); | |
668 | ||
669 | if (vchan->txd) { | |
670 | owl_dma_desc_free(&vchan->txd->vd); | |
671 | vchan->txd = NULL; | |
672 | } | |
673 | ||
674 | vchan_get_all_descriptors(&vchan->vc, &head); | |
47e20577 MS |
675 | |
676 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
677 | ||
51fe9cd2 SH |
678 | vchan_dma_desc_free_list(&vchan->vc, &head); |
679 | ||
47e20577 MS |
680 | return 0; |
681 | } | |
682 | ||
d64e1b3f MS |
683 | static int owl_dma_config(struct dma_chan *chan, |
684 | struct dma_slave_config *config) | |
685 | { | |
686 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
687 | ||
688 | /* Reject definitely invalid configurations */ | |
689 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | |
690 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | |
691 | return -EINVAL; | |
692 | ||
693 | memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config)); | |
694 | ||
695 | return 0; | |
696 | } | |
697 | ||
698 | static int owl_dma_pause(struct dma_chan *chan) | |
699 | { | |
700 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
701 | unsigned long flags; | |
702 | ||
703 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
704 | ||
705 | owl_dma_pause_pchan(vchan->pchan); | |
706 | ||
707 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
708 | ||
709 | return 0; | |
710 | } | |
711 | ||
712 | static int owl_dma_resume(struct dma_chan *chan) | |
713 | { | |
714 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
715 | unsigned long flags; | |
716 | ||
717 | if (!vchan->pchan && !vchan->txd) | |
718 | return 0; | |
719 | ||
720 | dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); | |
721 | ||
722 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
723 | ||
724 | owl_dma_resume_pchan(vchan->pchan); | |
725 | ||
726 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
727 | ||
728 | return 0; | |
729 | } | |
730 | ||
47e20577 MS |
731 | static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) |
732 | { | |
733 | struct owl_dma_pchan *pchan; | |
734 | struct owl_dma_txd *txd; | |
735 | struct owl_dma_lli *lli; | |
736 | unsigned int next_lli_phy; | |
737 | size_t bytes; | |
738 | ||
739 | pchan = vchan->pchan; | |
740 | txd = vchan->txd; | |
741 | ||
742 | if (!pchan || !txd) | |
743 | return 0; | |
744 | ||
745 | /* Get remain count of current node in link list */ | |
746 | bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT); | |
747 | ||
748 | /* Loop through the preceding nodes to get total remaining bytes */ | |
749 | if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) { | |
750 | next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR); | |
751 | list_for_each_entry(lli, &txd->lli_list, node) { | |
752 | /* Start from the next active node */ | |
753 | if (lli->phys == next_lli_phy) { | |
754 | list_for_each_entry(lli, &txd->lli_list, node) | |
755 | bytes += lli->hw.flen; | |
756 | break; | |
757 | } | |
758 | } | |
759 | } | |
760 | ||
761 | return bytes; | |
762 | } | |
763 | ||
764 | static enum dma_status owl_dma_tx_status(struct dma_chan *chan, | |
765 | dma_cookie_t cookie, | |
766 | struct dma_tx_state *state) | |
767 | { | |
768 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
769 | struct owl_dma_lli *lli; | |
770 | struct virt_dma_desc *vd; | |
771 | struct owl_dma_txd *txd; | |
772 | enum dma_status ret; | |
773 | unsigned long flags; | |
774 | size_t bytes = 0; | |
775 | ||
776 | ret = dma_cookie_status(chan, cookie, state); | |
777 | if (ret == DMA_COMPLETE || !state) | |
778 | return ret; | |
779 | ||
780 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
781 | ||
782 | vd = vchan_find_desc(&vchan->vc, cookie); | |
783 | if (vd) { | |
784 | txd = to_owl_txd(&vd->tx); | |
785 | list_for_each_entry(lli, &txd->lli_list, node) | |
786 | bytes += lli->hw.flen; | |
787 | } else { | |
788 | bytes = owl_dma_getbytes_chan(vchan); | |
789 | } | |
790 | ||
791 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
792 | ||
793 | dma_set_residue(state, bytes); | |
794 | ||
795 | return ret; | |
796 | } | |
797 | ||
798 | static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan) | |
799 | { | |
800 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
801 | struct owl_dma_pchan *pchan; | |
802 | ||
803 | pchan = owl_dma_get_pchan(od, vchan); | |
804 | if (!pchan) | |
805 | return; | |
806 | ||
807 | dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id); | |
808 | ||
809 | vchan->pchan = pchan; | |
810 | owl_dma_start_next_txd(vchan); | |
811 | } | |
812 | ||
813 | static void owl_dma_issue_pending(struct dma_chan *chan) | |
814 | { | |
815 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
816 | unsigned long flags; | |
817 | ||
818 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
819 | if (vchan_issue_pending(&vchan->vc)) { | |
820 | if (!vchan->pchan) | |
821 | owl_dma_phy_alloc_and_start(vchan); | |
822 | } | |
823 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
824 | } | |
825 | ||
826 | static struct dma_async_tx_descriptor | |
827 | *owl_dma_prep_memcpy(struct dma_chan *chan, | |
828 | dma_addr_t dst, dma_addr_t src, | |
829 | size_t len, unsigned long flags) | |
830 | { | |
831 | struct owl_dma *od = to_owl_dma(chan->device); | |
832 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
833 | struct owl_dma_txd *txd; | |
834 | struct owl_dma_lli *lli, *prev = NULL; | |
835 | size_t offset, bytes; | |
836 | int ret; | |
837 | ||
838 | if (!len) | |
839 | return NULL; | |
840 | ||
841 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
842 | if (!txd) | |
843 | return NULL; | |
844 | ||
845 | INIT_LIST_HEAD(&txd->lli_list); | |
846 | ||
847 | /* Process the transfer as frame by frame */ | |
848 | for (offset = 0; offset < len; offset += bytes) { | |
849 | lli = owl_dma_alloc_lli(od); | |
850 | if (!lli) { | |
851 | dev_warn(chan2dev(chan), "failed to allocate lli\n"); | |
852 | goto err_txd_free; | |
853 | } | |
854 | ||
855 | bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); | |
856 | ||
857 | ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset, | |
d64e1b3f MS |
858 | bytes, DMA_MEM_TO_MEM, |
859 | &vchan->cfg, txd->cyclic); | |
47e20577 MS |
860 | if (ret) { |
861 | dev_warn(chan2dev(chan), "failed to config lli\n"); | |
862 | goto err_txd_free; | |
863 | } | |
864 | ||
d64e1b3f | 865 | prev = owl_dma_add_lli(txd, prev, lli, false); |
47e20577 MS |
866 | } |
867 | ||
868 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
869 | ||
870 | err_txd_free: | |
871 | owl_dma_free_txd(od, txd); | |
872 | return NULL; | |
873 | } | |
874 | ||
d64e1b3f MS |
875 | static struct dma_async_tx_descriptor |
876 | *owl_dma_prep_slave_sg(struct dma_chan *chan, | |
877 | struct scatterlist *sgl, | |
878 | unsigned int sg_len, | |
879 | enum dma_transfer_direction dir, | |
880 | unsigned long flags, void *context) | |
881 | { | |
882 | struct owl_dma *od = to_owl_dma(chan->device); | |
883 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
884 | struct dma_slave_config *sconfig = &vchan->cfg; | |
885 | struct owl_dma_txd *txd; | |
886 | struct owl_dma_lli *lli, *prev = NULL; | |
887 | struct scatterlist *sg; | |
888 | dma_addr_t addr, src = 0, dst = 0; | |
889 | size_t len; | |
890 | int ret, i; | |
891 | ||
892 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
893 | if (!txd) | |
894 | return NULL; | |
895 | ||
896 | INIT_LIST_HEAD(&txd->lli_list); | |
897 | ||
898 | for_each_sg(sgl, sg, sg_len, i) { | |
899 | addr = sg_dma_address(sg); | |
900 | len = sg_dma_len(sg); | |
901 | ||
902 | if (len > OWL_DMA_FRAME_MAX_LENGTH) { | |
903 | dev_err(od->dma.dev, | |
904 | "frame length exceeds max supported length"); | |
905 | goto err_txd_free; | |
906 | } | |
907 | ||
908 | lli = owl_dma_alloc_lli(od); | |
909 | if (!lli) { | |
910 | dev_err(chan2dev(chan), "failed to allocate lli"); | |
911 | goto err_txd_free; | |
912 | } | |
913 | ||
914 | if (dir == DMA_MEM_TO_DEV) { | |
915 | src = addr; | |
916 | dst = sconfig->dst_addr; | |
917 | } else { | |
918 | src = sconfig->src_addr; | |
919 | dst = addr; | |
920 | } | |
921 | ||
922 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig, | |
923 | txd->cyclic); | |
924 | if (ret) { | |
925 | dev_warn(chan2dev(chan), "failed to config lli"); | |
926 | goto err_txd_free; | |
927 | } | |
928 | ||
929 | prev = owl_dma_add_lli(txd, prev, lli, false); | |
930 | } | |
931 | ||
932 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
933 | ||
934 | err_txd_free: | |
935 | owl_dma_free_txd(od, txd); | |
936 | ||
937 | return NULL; | |
938 | } | |
939 | ||
940 | static struct dma_async_tx_descriptor | |
941 | *owl_prep_dma_cyclic(struct dma_chan *chan, | |
942 | dma_addr_t buf_addr, size_t buf_len, | |
943 | size_t period_len, | |
944 | enum dma_transfer_direction dir, | |
945 | unsigned long flags) | |
946 | { | |
947 | struct owl_dma *od = to_owl_dma(chan->device); | |
948 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
949 | struct dma_slave_config *sconfig = &vchan->cfg; | |
950 | struct owl_dma_txd *txd; | |
951 | struct owl_dma_lli *lli, *prev = NULL, *first = NULL; | |
952 | dma_addr_t src = 0, dst = 0; | |
953 | unsigned int periods = buf_len / period_len; | |
954 | int ret, i; | |
955 | ||
956 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
957 | if (!txd) | |
958 | return NULL; | |
959 | ||
960 | INIT_LIST_HEAD(&txd->lli_list); | |
961 | txd->cyclic = true; | |
962 | ||
963 | for (i = 0; i < periods; i++) { | |
964 | lli = owl_dma_alloc_lli(od); | |
965 | if (!lli) { | |
966 | dev_warn(chan2dev(chan), "failed to allocate lli"); | |
967 | goto err_txd_free; | |
968 | } | |
969 | ||
970 | if (dir == DMA_MEM_TO_DEV) { | |
971 | src = buf_addr + (period_len * i); | |
972 | dst = sconfig->dst_addr; | |
973 | } else if (dir == DMA_DEV_TO_MEM) { | |
974 | src = sconfig->src_addr; | |
975 | dst = buf_addr + (period_len * i); | |
976 | } | |
977 | ||
978 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len, | |
979 | dir, sconfig, txd->cyclic); | |
980 | if (ret) { | |
981 | dev_warn(chan2dev(chan), "failed to config lli"); | |
982 | goto err_txd_free; | |
983 | } | |
984 | ||
985 | if (!first) | |
986 | first = lli; | |
987 | ||
988 | prev = owl_dma_add_lli(txd, prev, lli, false); | |
989 | } | |
990 | ||
991 | /* close the cyclic list */ | |
992 | owl_dma_add_lli(txd, prev, first, true); | |
993 | ||
994 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
995 | ||
996 | err_txd_free: | |
997 | owl_dma_free_txd(od, txd); | |
998 | ||
999 | return NULL; | |
1000 | } | |
1001 | ||
47e20577 MS |
1002 | static void owl_dma_free_chan_resources(struct dma_chan *chan) |
1003 | { | |
1004 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
1005 | ||
1006 | /* Ensure all queued descriptors are freed */ | |
1007 | vchan_free_chan_resources(&vchan->vc); | |
1008 | } | |
1009 | ||
1010 | static inline void owl_dma_free(struct owl_dma *od) | |
1011 | { | |
1012 | struct owl_dma_vchan *vchan = NULL; | |
1013 | struct owl_dma_vchan *next; | |
1014 | ||
1015 | list_for_each_entry_safe(vchan, | |
1016 | next, &od->dma.channels, vc.chan.device_node) { | |
1017 | list_del(&vchan->vc.chan.device_node); | |
1018 | tasklet_kill(&vchan->vc.task); | |
1019 | } | |
1020 | } | |
1021 | ||
d64e1b3f MS |
1022 | static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec, |
1023 | struct of_dma *ofdma) | |
1024 | { | |
1025 | struct owl_dma *od = ofdma->of_dma_data; | |
1026 | struct owl_dma_vchan *vchan; | |
1027 | struct dma_chan *chan; | |
1028 | u8 drq = dma_spec->args[0]; | |
1029 | ||
1030 | if (drq > od->nr_vchans) | |
1031 | return NULL; | |
1032 | ||
1033 | chan = dma_get_any_slave_channel(&od->dma); | |
1034 | if (!chan) | |
1035 | return NULL; | |
1036 | ||
1037 | vchan = to_owl_vchan(chan); | |
1038 | vchan->drq = drq; | |
1039 | ||
1040 | return chan; | |
1041 | } | |
1042 | ||
47e20577 MS |
1043 | static int owl_dma_probe(struct platform_device *pdev) |
1044 | { | |
1045 | struct device_node *np = pdev->dev.of_node; | |
1046 | struct owl_dma *od; | |
47e20577 MS |
1047 | int ret, i, nr_channels, nr_requests; |
1048 | ||
1049 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); | |
1050 | if (!od) | |
1051 | return -ENOMEM; | |
1052 | ||
ecb4d34f | 1053 | od->base = devm_platform_ioremap_resource(pdev, 0); |
47e20577 MS |
1054 | if (IS_ERR(od->base)) |
1055 | return PTR_ERR(od->base); | |
1056 | ||
1057 | ret = of_property_read_u32(np, "dma-channels", &nr_channels); | |
1058 | if (ret) { | |
1059 | dev_err(&pdev->dev, "can't get dma-channels\n"); | |
1060 | return ret; | |
1061 | } | |
1062 | ||
1063 | ret = of_property_read_u32(np, "dma-requests", &nr_requests); | |
1064 | if (ret) { | |
1065 | dev_err(&pdev->dev, "can't get dma-requests\n"); | |
1066 | return ret; | |
1067 | } | |
1068 | ||
1069 | dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", | |
1070 | nr_channels, nr_requests); | |
1071 | ||
1072 | od->nr_pchans = nr_channels; | |
1073 | od->nr_vchans = nr_requests; | |
1074 | ||
1075 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | |
1076 | ||
1077 | platform_set_drvdata(pdev, od); | |
1078 | spin_lock_init(&od->lock); | |
1079 | ||
1080 | dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); | |
d64e1b3f MS |
1081 | dma_cap_set(DMA_SLAVE, od->dma.cap_mask); |
1082 | dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); | |
47e20577 MS |
1083 | |
1084 | od->dma.dev = &pdev->dev; | |
1085 | od->dma.device_free_chan_resources = owl_dma_free_chan_resources; | |
1086 | od->dma.device_tx_status = owl_dma_tx_status; | |
1087 | od->dma.device_issue_pending = owl_dma_issue_pending; | |
1088 | od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; | |
d64e1b3f MS |
1089 | od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; |
1090 | od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; | |
1091 | od->dma.device_config = owl_dma_config; | |
1092 | od->dma.device_pause = owl_dma_pause; | |
1093 | od->dma.device_resume = owl_dma_resume; | |
47e20577 MS |
1094 | od->dma.device_terminate_all = owl_dma_terminate_all; |
1095 | od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1096 | od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1097 | od->dma.directions = BIT(DMA_MEM_TO_MEM); | |
1098 | od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1099 | ||
1100 | INIT_LIST_HEAD(&od->dma.channels); | |
1101 | ||
1102 | od->clk = devm_clk_get(&pdev->dev, NULL); | |
1103 | if (IS_ERR(od->clk)) { | |
1104 | dev_err(&pdev->dev, "unable to get clock\n"); | |
1105 | return PTR_ERR(od->clk); | |
1106 | } | |
1107 | ||
1108 | /* | |
1109 | * Eventhough the DMA controller is capable of generating 4 | |
1110 | * IRQ's for DMA priority feature, we only use 1 IRQ for | |
1111 | * simplification. | |
1112 | */ | |
1113 | od->irq = platform_get_irq(pdev, 0); | |
1114 | ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, | |
1115 | dev_name(&pdev->dev), od); | |
1116 | if (ret) { | |
1117 | dev_err(&pdev->dev, "unable to request IRQ\n"); | |
1118 | return ret; | |
1119 | } | |
1120 | ||
1121 | /* Init physical channel */ | |
1122 | od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, | |
1123 | sizeof(struct owl_dma_pchan), GFP_KERNEL); | |
1124 | if (!od->pchans) | |
1125 | return -ENOMEM; | |
1126 | ||
1127 | for (i = 0; i < od->nr_pchans; i++) { | |
1128 | struct owl_dma_pchan *pchan = &od->pchans[i]; | |
1129 | ||
1130 | pchan->id = i; | |
1131 | pchan->base = od->base + OWL_DMA_CHAN_BASE(i); | |
1132 | } | |
1133 | ||
1134 | /* Init virtual channel */ | |
1135 | od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, | |
1136 | sizeof(struct owl_dma_vchan), GFP_KERNEL); | |
1137 | if (!od->vchans) | |
1138 | return -ENOMEM; | |
1139 | ||
1140 | for (i = 0; i < od->nr_vchans; i++) { | |
1141 | struct owl_dma_vchan *vchan = &od->vchans[i]; | |
1142 | ||
1143 | vchan->vc.desc_free = owl_dma_desc_free; | |
1144 | vchan_init(&vchan->vc, &od->dma); | |
1145 | } | |
1146 | ||
1147 | /* Create a pool of consistent memory blocks for hardware descriptors */ | |
1148 | od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, | |
1149 | sizeof(struct owl_dma_lli), | |
1150 | __alignof__(struct owl_dma_lli), | |
1151 | 0); | |
1152 | if (!od->lli_pool) { | |
1153 | dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); | |
1154 | return -ENOMEM; | |
1155 | } | |
1156 | ||
1157 | clk_prepare_enable(od->clk); | |
1158 | ||
1159 | ret = dma_async_device_register(&od->dma); | |
1160 | if (ret) { | |
1161 | dev_err(&pdev->dev, "failed to register DMA engine device\n"); | |
1162 | goto err_pool_free; | |
1163 | } | |
1164 | ||
d64e1b3f MS |
1165 | /* Device-tree DMA controller registration */ |
1166 | ret = of_dma_controller_register(pdev->dev.of_node, | |
1167 | owl_dma_of_xlate, od); | |
1168 | if (ret) { | |
1169 | dev_err(&pdev->dev, "of_dma_controller_register failed\n"); | |
1170 | goto err_dma_unregister; | |
1171 | } | |
1172 | ||
47e20577 MS |
1173 | return 0; |
1174 | ||
d64e1b3f MS |
1175 | err_dma_unregister: |
1176 | dma_async_device_unregister(&od->dma); | |
47e20577 MS |
1177 | err_pool_free: |
1178 | clk_disable_unprepare(od->clk); | |
1179 | dma_pool_destroy(od->lli_pool); | |
1180 | ||
1181 | return ret; | |
1182 | } | |
1183 | ||
1184 | static int owl_dma_remove(struct platform_device *pdev) | |
1185 | { | |
1186 | struct owl_dma *od = platform_get_drvdata(pdev); | |
1187 | ||
d64e1b3f | 1188 | of_dma_controller_free(pdev->dev.of_node); |
47e20577 MS |
1189 | dma_async_device_unregister(&od->dma); |
1190 | ||
1191 | /* Mask all interrupts for this execution environment */ | |
1192 | dma_writel(od, OWL_DMA_IRQ_EN0, 0x0); | |
1193 | ||
1194 | /* Make sure we won't have any further interrupts */ | |
1195 | devm_free_irq(od->dma.dev, od->irq, od); | |
1196 | ||
1197 | owl_dma_free(od); | |
1198 | ||
1199 | clk_disable_unprepare(od->clk); | |
1200 | ||
1201 | return 0; | |
1202 | } | |
1203 | ||
1204 | static const struct of_device_id owl_dma_match[] = { | |
1205 | { .compatible = "actions,s900-dma", }, | |
1206 | { /* sentinel */ } | |
1207 | }; | |
1208 | MODULE_DEVICE_TABLE(of, owl_dma_match); | |
1209 | ||
1210 | static struct platform_driver owl_dma_driver = { | |
1211 | .probe = owl_dma_probe, | |
1212 | .remove = owl_dma_remove, | |
1213 | .driver = { | |
1214 | .name = "dma-owl", | |
1215 | .of_match_table = of_match_ptr(owl_dma_match), | |
1216 | }, | |
1217 | }; | |
1218 | ||
1219 | static int owl_dma_init(void) | |
1220 | { | |
1221 | return platform_driver_register(&owl_dma_driver); | |
1222 | } | |
1223 | subsys_initcall(owl_dma_init); | |
1224 | ||
1225 | static void __exit owl_dma_exit(void) | |
1226 | { | |
1227 | platform_driver_unregister(&owl_dma_driver); | |
1228 | } | |
1229 | module_exit(owl_dma_exit); | |
1230 | ||
1231 | MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>"); | |
1232 | MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); | |
1233 | MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver"); | |
1234 | MODULE_LICENSE("GPL"); |