]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/dma/mmp_tdma.c
Merge tag 'drm/tegra/for-5.7-fixes' of git://anongit.freedesktop.org/tegra/linux...
[thirdparty/linux.git] / drivers / dma / mmp_tdma.c
CommitLineData
4415d92d 1// SPDX-License-Identifier: GPL-2.0-or-later
c6da0ba8
ZG
2/*
3 * Driver For Marvell Two-channel DMA Engine
4 *
5 * Copyright: Marvell International Ltd.
c6da0ba8
ZG
6 */
7
7331205a 8#include <linux/err.h>
c6da0ba8
ZG
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/types.h>
12#include <linux/interrupt.h>
13#include <linux/dma-mapping.h>
14#include <linux/slab.h>
15#include <linux/dmaengine.h>
16#include <linux/platform_device.h>
17#include <linux/device.h>
293b2da1 18#include <linux/platform_data/dma-mmp_tdma.h>
f1a77570 19#include <linux/of_device.h>
7dedc002 20#include <linux/of_dma.h>
c6da0ba8
ZG
21
22#include "dmaengine.h"
23
24/*
25 * Two-Channel DMA registers
26 */
27#define TDBCR 0x00 /* Byte Count */
28#define TDSAR 0x10 /* Src Addr */
29#define TDDAR 0x20 /* Dst Addr */
30#define TDNDPR 0x30 /* Next Desc */
31#define TDCR 0x40 /* Control */
32#define TDCP 0x60 /* Priority*/
33#define TDCDPR 0x70 /* Current Desc */
34#define TDIMR 0x80 /* Int Mask */
35#define TDISR 0xa0 /* Int Status */
36
37/* Two-Channel DMA Control Register */
38#define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */
39#define TDCR_SSZ_12_BITS (0x1 << 22)
40#define TDCR_SSZ_16_BITS (0x2 << 22)
41#define TDCR_SSZ_20_BITS (0x3 << 22)
42#define TDCR_SSZ_24_BITS (0x4 << 22)
43#define TDCR_SSZ_32_BITS (0x5 << 22)
44#define TDCR_SSZ_SHIFT (0x1 << 22)
45#define TDCR_SSZ_MASK (0x7 << 22)
46#define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */
47#define TDCR_ABR (0x1 << 20) /* Channel Abort */
48#define TDCR_CDE (0x1 << 17) /* Close Desc Enable */
49#define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */
50#define TDCR_CHANACT (0x1 << 14) /* Channel Active */
51#define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */
52#define TDCR_CHANEN (0x1 << 12) /* Channel Enable */
53#define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */
54#define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */
55#define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */
56#define TDCR_BURSTSZ_4B (0x0 << 6)
57#define TDCR_BURSTSZ_8B (0x1 << 6)
58#define TDCR_BURSTSZ_16B (0x3 << 6)
59#define TDCR_BURSTSZ_32B (0x6 << 6)
60#define TDCR_BURSTSZ_64B (0x7 << 6)
20a90b0e
QZ
61#define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
62#define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
63#define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
64#define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
65#define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
c6da0ba8
ZG
66#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
67#define TDCR_BURSTSZ_128B (0x5 << 6)
68#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
69#define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */
70#define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */
71#define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */
72#define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */
73#define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */
74#define TDCR_DSTDESCCONT (0x1 << 1)
75#define TDCR_SRCDESTCONT (0x1 << 0)
76
77/* Two-Channel DMA Int Mask Register */
78#define TDIMR_COMP (0x1 << 0)
79
80/* Two-Channel DMA Int Status Register */
81#define TDISR_COMP (0x1 << 0)
82
83/*
84 * Two-Channel DMA Descriptor Struct
85 * NOTE: desc's buf must be aligned to 16 bytes.
86 */
87struct mmp_tdma_desc {
88 u32 byte_cnt;
89 u32 src_addr;
90 u32 dst_addr;
91 u32 nxt_desc;
92};
93
94enum mmp_tdma_type {
95 MMP_AUD_TDMA = 0,
96 PXA910_SQU,
97};
98
c6da0ba8
ZG
99#define TDMA_MAX_XFER_BYTES SZ_64K
100
101struct mmp_tdma_chan {
102 struct device *dev;
103 struct dma_chan chan;
104 struct dma_async_tx_descriptor desc;
105 struct tasklet_struct tasklet;
106
107 struct mmp_tdma_desc *desc_arr;
1eed601a 108 dma_addr_t desc_arr_phys;
c6da0ba8
ZG
109 int desc_num;
110 enum dma_transfer_direction dir;
111 dma_addr_t dev_addr;
112 u32 burst_sz;
113 enum dma_slave_buswidth buswidth;
114 enum dma_status status;
314448f0 115 struct dma_slave_config slave_config;
c6da0ba8
ZG
116
117 int idx;
118 enum mmp_tdma_type type;
119 int irq;
9d0f1fa6 120 void __iomem *reg_base;
c6da0ba8
ZG
121
122 size_t buf_len;
123 size_t period_len;
124 size_t pos;
3b0f4a54
NC
125
126 struct gen_pool *pool;
c6da0ba8
ZG
127};
128
129#define TDMA_CHANNEL_NUM 2
130struct mmp_tdma_device {
131 struct device *dev;
132 void __iomem *base;
133 struct dma_device device;
134 struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
c6da0ba8
ZG
135};
136
137#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
138
314448f0
VK
139static int mmp_tdma_config_write(struct dma_chan *chan,
140 enum dma_transfer_direction dir,
141 struct dma_slave_config *dmaengine_cfg);
142
c6da0ba8
ZG
143static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
144{
145 writel(phys, tdmac->reg_base + TDNDPR);
146 writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND,
147 tdmac->reg_base + TDCR);
148}
149
e6222263
QZ
150static void mmp_tdma_enable_irq(struct mmp_tdma_chan *tdmac, bool enable)
151{
152 if (enable)
153 writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
154 else
155 writel(0, tdmac->reg_base + TDIMR);
156}
157
c6da0ba8
ZG
158static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
159{
c6da0ba8
ZG
160 /* enable dma chan */
161 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
162 tdmac->reg_base + TDCR);
163 tdmac->status = DMA_IN_PROGRESS;
164}
165
f43a6fd4 166static int mmp_tdma_disable_chan(struct dma_chan *chan)
c6da0ba8 167{
f43a6fd4 168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
1eed601a 169 u32 tdcr;
f43a6fd4 170
1eed601a
QZ
171 tdcr = readl(tdmac->reg_base + TDCR);
172 tdcr |= TDCR_ABR;
173 tdcr &= ~TDCR_CHANEN;
174 writel(tdcr, tdmac->reg_base + TDCR);
8e3c518f 175
f64eabd0 176 tdmac->status = DMA_COMPLETE;
f43a6fd4
MR
177
178 return 0;
c6da0ba8
ZG
179}
180
f43a6fd4 181static int mmp_tdma_resume_chan(struct dma_chan *chan)
c6da0ba8 182{
f43a6fd4
MR
183 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
184
c6da0ba8
ZG
185 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
186 tdmac->reg_base + TDCR);
187 tdmac->status = DMA_IN_PROGRESS;
f43a6fd4
MR
188
189 return 0;
c6da0ba8
ZG
190}
191
f43a6fd4 192static int mmp_tdma_pause_chan(struct dma_chan *chan)
c6da0ba8 193{
f43a6fd4
MR
194 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
195
c6da0ba8
ZG
196 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
197 tdmac->reg_base + TDCR);
198 tdmac->status = DMA_PAUSED;
f43a6fd4
MR
199
200 return 0;
c6da0ba8
ZG
201}
202
f43a6fd4 203static int mmp_tdma_config_chan(struct dma_chan *chan)
c6da0ba8 204{
f43a6fd4 205 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
a9ebbcd9 206 unsigned int tdcr = 0;
c6da0ba8 207
f43a6fd4 208 mmp_tdma_disable_chan(chan);
c6da0ba8
ZG
209
210 if (tdmac->dir == DMA_MEM_TO_DEV)
211 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
212 else if (tdmac->dir == DMA_DEV_TO_MEM)
213 tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC;
214
215 if (tdmac->type == MMP_AUD_TDMA) {
216 tdcr |= TDCR_PACKMOD;
217
218 switch (tdmac->burst_sz) {
219 case 4:
220 tdcr |= TDCR_BURSTSZ_4B;
221 break;
222 case 8:
223 tdcr |= TDCR_BURSTSZ_8B;
224 break;
225 case 16:
226 tdcr |= TDCR_BURSTSZ_16B;
227 break;
228 case 32:
229 tdcr |= TDCR_BURSTSZ_32B;
230 break;
231 case 64:
232 tdcr |= TDCR_BURSTSZ_64B;
233 break;
234 case 128:
235 tdcr |= TDCR_BURSTSZ_128B;
236 break;
237 default:
238 dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
239 return -EINVAL;
240 }
241
242 switch (tdmac->buswidth) {
243 case DMA_SLAVE_BUSWIDTH_1_BYTE:
244 tdcr |= TDCR_SSZ_8_BITS;
245 break;
246 case DMA_SLAVE_BUSWIDTH_2_BYTES:
247 tdcr |= TDCR_SSZ_16_BITS;
248 break;
249 case DMA_SLAVE_BUSWIDTH_4_BYTES:
250 tdcr |= TDCR_SSZ_32_BITS;
251 break;
252 default:
253 dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n");
254 return -EINVAL;
255 }
256 } else if (tdmac->type == PXA910_SQU) {
c6da0ba8 257 tdcr |= TDCR_SSPMOD;
20a90b0e
QZ
258
259 switch (tdmac->burst_sz) {
260 case 1:
261 tdcr |= TDCR_BURSTSZ_SQU_1B;
262 break;
263 case 2:
264 tdcr |= TDCR_BURSTSZ_SQU_2B;
265 break;
266 case 4:
267 tdcr |= TDCR_BURSTSZ_SQU_4B;
268 break;
269 case 8:
270 tdcr |= TDCR_BURSTSZ_SQU_8B;
271 break;
272 case 16:
273 tdcr |= TDCR_BURSTSZ_SQU_16B;
274 break;
275 case 32:
276 tdcr |= TDCR_BURSTSZ_SQU_32B;
277 break;
278 default:
279 dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
280 return -EINVAL;
281 }
c6da0ba8
ZG
282 }
283
284 writel(tdcr, tdmac->reg_base + TDCR);
285 return 0;
286}
287
288static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
289{
290 u32 reg = readl(tdmac->reg_base + TDISR);
291
292 if (reg & TDISR_COMP) {
293 /* clear irq */
294 reg &= ~TDISR_COMP;
295 writel(reg, tdmac->reg_base + TDISR);
296
297 return 0;
298 }
299 return -EAGAIN;
300}
301
1eed601a
QZ
302static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac)
303{
304 size_t reg;
305
306 if (tdmac->idx == 0) {
307 reg = __raw_readl(tdmac->reg_base + TDSAR);
308 reg -= tdmac->desc_arr[0].src_addr;
309 } else if (tdmac->idx == 1) {
310 reg = __raw_readl(tdmac->reg_base + TDDAR);
311 reg -= tdmac->desc_arr[0].dst_addr;
312 } else
313 return -EINVAL;
314
315 return reg;
316}
317
c6da0ba8
ZG
318static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
319{
320 struct mmp_tdma_chan *tdmac = dev_id;
321
322 if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
c6da0ba8
ZG
323 tasklet_schedule(&tdmac->tasklet);
324 return IRQ_HANDLED;
325 } else
326 return IRQ_NONE;
327}
328
329static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id)
330{
331 struct mmp_tdma_device *tdev = dev_id;
332 int i, ret;
333 int irq_num = 0;
334
335 for (i = 0; i < TDMA_CHANNEL_NUM; i++) {
336 struct mmp_tdma_chan *tdmac = tdev->tdmac[i];
337
338 ret = mmp_tdma_chan_handler(irq, tdmac);
339 if (ret == IRQ_HANDLED)
340 irq_num++;
341 }
342
343 if (irq_num)
344 return IRQ_HANDLED;
345 else
346 return IRQ_NONE;
347}
348
349static void dma_do_tasklet(unsigned long data)
350{
351 struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
352
81141bac 353 dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
c6da0ba8
ZG
354}
355
356static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
357{
358 struct gen_pool *gpool;
359 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
360
3b0f4a54 361 gpool = tdmac->pool;
1eed601a 362 if (gpool && tdmac->desc_arr)
c6da0ba8
ZG
363 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
364 size);
365 tdmac->desc_arr = NULL;
0c894463
LR
366 if (tdmac->status == DMA_ERROR)
367 tdmac->status = DMA_COMPLETE;
c6da0ba8
ZG
368
369 return;
370}
371
372static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx)
373{
374 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan);
375
376 mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys);
377
378 return 0;
379}
380
381static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
382{
383 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
384 int ret;
385
386 dma_async_tx_descriptor_init(&tdmac->desc, chan);
387 tdmac->desc.tx_submit = mmp_tdma_tx_submit;
388
389 if (tdmac->irq) {
390 ret = devm_request_irq(tdmac->dev, tdmac->irq,
174b537a 391 mmp_tdma_chan_handler, 0, "tdma", tdmac);
c6da0ba8
ZG
392 if (ret)
393 return ret;
394 }
395 return 1;
396}
397
398static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
399{
400 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
401
402 if (tdmac->irq)
403 devm_free_irq(tdmac->dev, tdmac->irq, tdmac);
404 mmp_tdma_free_descriptor(tdmac);
405 return;
406}
407
0422e304 408static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
c6da0ba8
ZG
409{
410 struct gen_pool *gpool;
411 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
412
3b0f4a54 413 gpool = tdmac->pool;
c6da0ba8
ZG
414 if (!gpool)
415 return NULL;
416
a6dd30e2 417 tdmac->desc_arr = gen_pool_dma_alloc(gpool, size, &tdmac->desc_arr_phys);
c6da0ba8
ZG
418
419 return tdmac->desc_arr;
420}
421
422static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
423 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
424 size_t period_len, enum dma_transfer_direction direction,
31c1e5a1 425 unsigned long flags)
c6da0ba8
ZG
426{
427 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
428 struct mmp_tdma_desc *desc;
429 int num_periods = buf_len / period_len;
430 int i = 0, buf = 0;
431
f64eabd0 432 if (tdmac->status != DMA_COMPLETE)
c6da0ba8
ZG
433 return NULL;
434
435 if (period_len > TDMA_MAX_XFER_BYTES) {
436 dev_err(tdmac->dev,
3e13b386 437 "maximum period size exceeded: %zu > %d\n",
c6da0ba8
ZG
438 period_len, TDMA_MAX_XFER_BYTES);
439 goto err_out;
440 }
441
442 tdmac->status = DMA_IN_PROGRESS;
443 tdmac->desc_num = num_periods;
444 desc = mmp_tdma_alloc_descriptor(tdmac);
445 if (!desc)
446 goto err_out;
447
363c3270
LR
448 if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
449 goto err_out;
314448f0 450
c6da0ba8
ZG
451 while (buf < buf_len) {
452 desc = &tdmac->desc_arr[i];
453
454 if (i + 1 == num_periods)
455 desc->nxt_desc = tdmac->desc_arr_phys;
456 else
457 desc->nxt_desc = tdmac->desc_arr_phys +
458 sizeof(*desc) * (i + 1);
459
460 if (direction == DMA_MEM_TO_DEV) {
461 desc->src_addr = dma_addr;
462 desc->dst_addr = tdmac->dev_addr;
463 } else {
464 desc->src_addr = tdmac->dev_addr;
465 desc->dst_addr = dma_addr;
466 }
467 desc->byte_cnt = period_len;
468 dma_addr += period_len;
469 buf += period_len;
470 i++;
471 }
472
e6222263
QZ
473 /* enable interrupt */
474 if (flags & DMA_PREP_INTERRUPT)
475 mmp_tdma_enable_irq(tdmac, true);
476
c6da0ba8
ZG
477 tdmac->buf_len = buf_len;
478 tdmac->period_len = period_len;
479 tdmac->pos = 0;
480
481 return &tdmac->desc;
482
483err_out:
484 tdmac->status = DMA_ERROR;
485 return NULL;
486}
487
f43a6fd4 488static int mmp_tdma_terminate_all(struct dma_chan *chan)
c6da0ba8
ZG
489{
490 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
f43a6fd4
MR
491
492 mmp_tdma_disable_chan(chan);
493 /* disable interrupt */
494 mmp_tdma_enable_irq(tdmac, false);
3c20ba5f
AB
495
496 return 0;
f43a6fd4
MR
497}
498
499static int mmp_tdma_config(struct dma_chan *chan,
500 struct dma_slave_config *dmaengine_cfg)
501{
502 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
503
314448f0
VK
504 memcpy(&tdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
505
506 return 0;
507}
508
509static int mmp_tdma_config_write(struct dma_chan *chan,
510 enum dma_transfer_direction dir,
511 struct dma_slave_config *dmaengine_cfg)
512{
513 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
514
515 if (dir == DMA_DEV_TO_MEM) {
f43a6fd4
MR
516 tdmac->dev_addr = dmaengine_cfg->src_addr;
517 tdmac->burst_sz = dmaengine_cfg->src_maxburst;
518 tdmac->buswidth = dmaengine_cfg->src_addr_width;
519 } else {
520 tdmac->dev_addr = dmaengine_cfg->dst_addr;
521 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
522 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
c6da0ba8 523 }
314448f0 524 tdmac->dir = dir;
c6da0ba8 525
f43a6fd4 526 return mmp_tdma_config_chan(chan);
c6da0ba8
ZG
527}
528
529static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
530 dma_cookie_t cookie, struct dma_tx_state *txstate)
531{
532 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
533
1eed601a 534 tdmac->pos = mmp_tdma_get_pos(tdmac);
c14d2bc4
AS
535 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
536 tdmac->buf_len - tdmac->pos);
c6da0ba8
ZG
537
538 return tdmac->status;
539}
540
541static void mmp_tdma_issue_pending(struct dma_chan *chan)
542{
543 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
544
545 mmp_tdma_enable_chan(tdmac);
546}
547
4bf27b8b 548static int mmp_tdma_remove(struct platform_device *pdev)
c6da0ba8 549{
c236ba4a
CY
550 if (pdev->dev.of_node)
551 of_dma_controller_free(pdev->dev.of_node);
552
c6da0ba8
ZG
553 return 0;
554}
555
463a1f8b 556static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
3b0f4a54
NC
557 int idx, int irq,
558 int type, struct gen_pool *pool)
c6da0ba8
ZG
559{
560 struct mmp_tdma_chan *tdmac;
561
562 if (idx >= TDMA_CHANNEL_NUM) {
563 dev_err(tdev->dev, "too many channels for device!\n");
564 return -EINVAL;
565 }
566
567 /* alloc channel */
568 tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
aef94fea 569 if (!tdmac)
c6da0ba8 570 return -ENOMEM;
aef94fea 571
c6da0ba8 572 if (irq)
f1a77570 573 tdmac->irq = irq;
c6da0ba8
ZG
574 tdmac->dev = tdev->dev;
575 tdmac->chan.device = &tdev->device;
576 tdmac->idx = idx;
577 tdmac->type = type;
9d0f1fa6 578 tdmac->reg_base = tdev->base + idx * 4;
3b0f4a54 579 tdmac->pool = pool;
f64eabd0 580 tdmac->status = DMA_COMPLETE;
c6da0ba8
ZG
581 tdev->tdmac[tdmac->idx] = tdmac;
582 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
583
584 /* add the channel to tdma_chan list */
585 list_add_tail(&tdmac->chan.device_node,
586 &tdev->device.channels);
c6da0ba8
ZG
587 return 0;
588}
589
7dedc002 590struct mmp_tdma_filter_param {
7dedc002
NC
591 unsigned int chan_id;
592};
593
594static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
595{
596 struct mmp_tdma_filter_param *param = fn_param;
7dedc002
NC
597
598 if (chan->chan_id != param->chan_id)
599 return false;
600
601 return true;
602}
603
0422e304 604static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
7dedc002
NC
605 struct of_dma *ofdma)
606{
607 struct mmp_tdma_device *tdev = ofdma->of_dma_data;
608 dma_cap_mask_t mask = tdev->device.cap_mask;
609 struct mmp_tdma_filter_param param;
610
611 if (dma_spec->args_count != 1)
612 return NULL;
613
7dedc002
NC
614 param.chan_id = dma_spec->args[0];
615
616 if (param.chan_id >= TDMA_CHANNEL_NUM)
617 return NULL;
618
1d967195
BW
619 return __dma_request_channel(&mask, mmp_tdma_filter_fn, &param,
620 ofdma->of_node);
7dedc002
NC
621}
622
57c03422 623static const struct of_device_id mmp_tdma_dt_ids[] = {
f1a77570
ZG
624 { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
625 { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
626 {}
627};
628MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
629
463a1f8b 630static int mmp_tdma_probe(struct platform_device *pdev)
c6da0ba8 631{
f1a77570
ZG
632 enum mmp_tdma_type type;
633 const struct of_device_id *of_id;
c6da0ba8
ZG
634 struct mmp_tdma_device *tdev;
635 struct resource *iores;
636 int i, ret;
f1a77570 637 int irq = 0, irq_num = 0;
c6da0ba8 638 int chan_num = TDMA_CHANNEL_NUM;
1eed601a 639 struct gen_pool *pool = NULL;
c6da0ba8 640
f1a77570
ZG
641 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
642 if (of_id)
643 type = (enum mmp_tdma_type) of_id->data;
644 else
645 type = platform_get_device_id(pdev)->driver_data;
646
c6da0ba8
ZG
647 /* always have couple channels */
648 tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
649 if (!tdev)
650 return -ENOMEM;
651
652 tdev->dev = &pdev->dev;
c6da0ba8 653
f1a77570
ZG
654 for (i = 0; i < chan_num; i++) {
655 if (platform_get_irq(pdev, i) > 0)
656 irq_num++;
657 }
c6da0ba8
ZG
658
659 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7331205a
TR
660 tdev->base = devm_ioremap_resource(&pdev->dev, iores);
661 if (IS_ERR(tdev->base))
662 return PTR_ERR(tdev->base);
c6da0ba8 663
f1a77570
ZG
664 INIT_LIST_HEAD(&tdev->device.channels);
665
3b0f4a54 666 if (pdev->dev.of_node)
abdd4a70 667 pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
3b0f4a54
NC
668 else
669 pool = sram_get_gpool("asram");
670 if (!pool) {
671 dev_err(&pdev->dev, "asram pool not available\n");
672 return -ENOMEM;
673 }
674
f1a77570
ZG
675 if (irq_num != chan_num) {
676 irq = platform_get_irq(pdev, 0);
677 ret = devm_request_irq(&pdev->dev, irq,
174b537a 678 mmp_tdma_int_handler, 0, "tdma", tdev);
c6da0ba8
ZG
679 if (ret)
680 return ret;
681 }
682
c6da0ba8
ZG
683 /* initialize channel parameters */
684 for (i = 0; i < chan_num; i++) {
f1a77570 685 irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
3b0f4a54 686 ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
c6da0ba8
ZG
687 if (ret)
688 return ret;
689 }
690
f1a77570
ZG
691 dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
692 dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
c6da0ba8
ZG
693 tdev->device.dev = &pdev->dev;
694 tdev->device.device_alloc_chan_resources =
695 mmp_tdma_alloc_chan_resources;
696 tdev->device.device_free_chan_resources =
697 mmp_tdma_free_chan_resources;
698 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
699 tdev->device.device_tx_status = mmp_tdma_tx_status;
700 tdev->device.device_issue_pending = mmp_tdma_issue_pending;
f43a6fd4
MR
701 tdev->device.device_config = mmp_tdma_config;
702 tdev->device.device_pause = mmp_tdma_pause_chan;
703 tdev->device.device_resume = mmp_tdma_resume_chan;
704 tdev->device.device_terminate_all = mmp_tdma_terminate_all;
77a68e56 705 tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
c6da0ba8
ZG
706
707 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
708 platform_set_drvdata(pdev, tdev);
709
a5f99a95 710 ret = dmaenginem_async_device_register(&tdev->device);
c6da0ba8
ZG
711 if (ret) {
712 dev_err(tdev->device.dev, "unable to register\n");
713 return ret;
714 }
715
7dedc002
NC
716 if (pdev->dev.of_node) {
717 ret = of_dma_controller_register(pdev->dev.of_node,
718 mmp_tdma_xlate, tdev);
719 if (ret) {
720 dev_err(tdev->device.dev,
721 "failed to register controller\n");
a5f99a95 722 return ret;
7dedc002
NC
723 }
724 }
725
c6da0ba8
ZG
726 dev_info(tdev->device.dev, "initialized\n");
727 return 0;
728}
729
730static const struct platform_device_id mmp_tdma_id_table[] = {
731 { "mmp-adma", MMP_AUD_TDMA },
732 { "pxa910-squ", PXA910_SQU },
733 { },
734};
735
736static struct platform_driver mmp_tdma_driver = {
737 .driver = {
738 .name = "mmp-tdma",
f1a77570 739 .of_match_table = mmp_tdma_dt_ids,
c6da0ba8
ZG
740 },
741 .id_table = mmp_tdma_id_table,
742 .probe = mmp_tdma_probe,
a7d6e3ec 743 .remove = mmp_tdma_remove,
c6da0ba8
ZG
744};
745
746module_platform_driver(mmp_tdma_driver);
747
748MODULE_LICENSE("GPL");
749MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
750MODULE_ALIAS("platform:mmp-tdma");
751MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
752MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");