]> git.ipfire.org Git - people/arne_f/kernel.git/blob - drivers/dma/qcom/hidma.c
dmaengine: qcom_hidma: assign channel cookie correctly
[people/arne_f/kernel.git] / drivers / dma / qcom / hidma.c
1 /*
2 * Qualcomm Technologies HIDMA DMA engine interface
3 *
4 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 /*
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
21 *
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
25 *
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37 * more details.
38 *
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
41 */
42
43 /* Linux Foundation elects GPLv2 license only. */
44
45 #include <linux/dmaengine.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/list.h>
48 #include <linux/module.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
52 #include <linux/of_dma.h>
53 #include <linux/property.h>
54 #include <linux/delay.h>
55 #include <linux/acpi.h>
56 #include <linux/irq.h>
57 #include <linux/atomic.h>
58 #include <linux/pm_runtime.h>
59
60 #include "../dmaengine.h"
61 #include "hidma.h"
62
63 /*
64 * Default idle time is 2 seconds. This parameter can
65 * be overridden by changing the following
66 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
67 * during kernel boot.
68 */
69 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
70 #define HIDMA_ERR_INFO_SW 0xFF
71 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
72 #define HIDMA_NR_DEFAULT_DESC 10
73
74 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
75 {
76 return container_of(dmadev, struct hidma_dev, ddev);
77 }
78
79 static inline
80 struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
81 {
82 return container_of(_lldevp, struct hidma_dev, lldev);
83 }
84
85 static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
86 {
87 return container_of(dmach, struct hidma_chan, chan);
88 }
89
90 static inline
91 struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
92 {
93 return container_of(t, struct hidma_desc, desc);
94 }
95
96 static void hidma_free(struct hidma_dev *dmadev)
97 {
98 INIT_LIST_HEAD(&dmadev->ddev.channels);
99 }
100
101 static unsigned int nr_desc_prm;
102 module_param(nr_desc_prm, uint, 0644);
103 MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
104
105
106 /* process completed descriptors */
107 static void hidma_process_completed(struct hidma_chan *mchan)
108 {
109 struct dma_device *ddev = mchan->chan.device;
110 struct hidma_dev *mdma = to_hidma_dev(ddev);
111 struct dma_async_tx_descriptor *desc;
112 dma_cookie_t last_cookie;
113 struct hidma_desc *mdesc;
114 struct hidma_desc *next;
115 unsigned long irqflags;
116 struct list_head list;
117
118 INIT_LIST_HEAD(&list);
119
120 /* Get all completed descriptors */
121 spin_lock_irqsave(&mchan->lock, irqflags);
122 list_splice_tail_init(&mchan->completed, &list);
123 spin_unlock_irqrestore(&mchan->lock, irqflags);
124
125 /* Execute callbacks and run dependencies */
126 list_for_each_entry_safe(mdesc, next, &list, node) {
127 enum dma_status llstat;
128 struct dmaengine_desc_callback cb;
129 struct dmaengine_result result;
130
131 desc = &mdesc->desc;
132 last_cookie = desc->cookie;
133
134 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
135
136 spin_lock_irqsave(&mchan->lock, irqflags);
137 if (llstat == DMA_COMPLETE) {
138 mchan->last_success = last_cookie;
139 result.result = DMA_TRANS_NOERROR;
140 } else {
141 result.result = DMA_TRANS_ABORTED;
142 }
143
144 dma_cookie_complete(desc);
145 spin_unlock_irqrestore(&mchan->lock, irqflags);
146
147 dmaengine_desc_get_callback(desc, &cb);
148
149 dma_run_dependencies(desc);
150
151 spin_lock_irqsave(&mchan->lock, irqflags);
152 list_move(&mdesc->node, &mchan->free);
153 spin_unlock_irqrestore(&mchan->lock, irqflags);
154
155 dmaengine_desc_callback_invoke(&cb, &result);
156 }
157 }
158
159 /*
160 * Called once for each submitted descriptor.
161 * PM is locked once for each descriptor that is currently
162 * in execution.
163 */
164 static void hidma_callback(void *data)
165 {
166 struct hidma_desc *mdesc = data;
167 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
168 struct dma_device *ddev = mchan->chan.device;
169 struct hidma_dev *dmadev = to_hidma_dev(ddev);
170 unsigned long irqflags;
171 bool queued = false;
172
173 spin_lock_irqsave(&mchan->lock, irqflags);
174 if (mdesc->node.next) {
175 /* Delete from the active list, add to completed list */
176 list_move_tail(&mdesc->node, &mchan->completed);
177 queued = true;
178
179 /* calculate the next running descriptor */
180 mchan->running = list_first_entry(&mchan->active,
181 struct hidma_desc, node);
182 }
183 spin_unlock_irqrestore(&mchan->lock, irqflags);
184
185 hidma_process_completed(mchan);
186
187 if (queued) {
188 pm_runtime_mark_last_busy(dmadev->ddev.dev);
189 pm_runtime_put_autosuspend(dmadev->ddev.dev);
190 }
191 }
192
193 static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
194 {
195 struct hidma_chan *mchan;
196 struct dma_device *ddev;
197
198 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
199 if (!mchan)
200 return -ENOMEM;
201
202 ddev = &dmadev->ddev;
203 mchan->dma_sig = dma_sig;
204 mchan->dmadev = dmadev;
205 mchan->chan.device = ddev;
206 dma_cookie_init(&mchan->chan);
207
208 INIT_LIST_HEAD(&mchan->free);
209 INIT_LIST_HEAD(&mchan->prepared);
210 INIT_LIST_HEAD(&mchan->active);
211 INIT_LIST_HEAD(&mchan->completed);
212
213 spin_lock_init(&mchan->lock);
214 list_add_tail(&mchan->chan.device_node, &ddev->channels);
215 dmadev->ddev.chancnt++;
216 return 0;
217 }
218
219 static void hidma_issue_task(unsigned long arg)
220 {
221 struct hidma_dev *dmadev = (struct hidma_dev *)arg;
222
223 pm_runtime_get_sync(dmadev->ddev.dev);
224 hidma_ll_start(dmadev->lldev);
225 }
226
227 static void hidma_issue_pending(struct dma_chan *dmach)
228 {
229 struct hidma_chan *mchan = to_hidma_chan(dmach);
230 struct hidma_dev *dmadev = mchan->dmadev;
231 unsigned long flags;
232 int status;
233
234 spin_lock_irqsave(&mchan->lock, flags);
235 if (!mchan->running) {
236 struct hidma_desc *desc = list_first_entry(&mchan->active,
237 struct hidma_desc,
238 node);
239 mchan->running = desc;
240 }
241 spin_unlock_irqrestore(&mchan->lock, flags);
242
243 /* PM will be released in hidma_callback function. */
244 status = pm_runtime_get(dmadev->ddev.dev);
245 if (status < 0)
246 tasklet_schedule(&dmadev->task);
247 else
248 hidma_ll_start(dmadev->lldev);
249 }
250
251 static inline bool hidma_txn_is_success(dma_cookie_t cookie,
252 dma_cookie_t last_success, dma_cookie_t last_used)
253 {
254 if (last_success <= last_used) {
255 if ((cookie <= last_success) || (cookie > last_used))
256 return true;
257 } else {
258 if ((cookie <= last_success) && (cookie > last_used))
259 return true;
260 }
261 return false;
262 }
263
264 static enum dma_status hidma_tx_status(struct dma_chan *dmach,
265 dma_cookie_t cookie,
266 struct dma_tx_state *txstate)
267 {
268 struct hidma_chan *mchan = to_hidma_chan(dmach);
269 enum dma_status ret;
270
271 ret = dma_cookie_status(dmach, cookie, txstate);
272 if (ret == DMA_COMPLETE) {
273 bool is_success;
274
275 is_success = hidma_txn_is_success(cookie, mchan->last_success,
276 dmach->cookie);
277 return is_success ? ret : DMA_ERROR;
278 }
279
280 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
281 unsigned long flags;
282 dma_cookie_t runcookie;
283
284 spin_lock_irqsave(&mchan->lock, flags);
285 if (mchan->running)
286 runcookie = mchan->running->desc.cookie;
287 else
288 runcookie = -EINVAL;
289
290 if (runcookie == cookie)
291 ret = DMA_PAUSED;
292
293 spin_unlock_irqrestore(&mchan->lock, flags);
294 }
295
296 return ret;
297 }
298
299 /*
300 * Submit descriptor to hardware.
301 * Lock the PM for each descriptor we are sending.
302 */
303 static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
304 {
305 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
306 struct hidma_dev *dmadev = mchan->dmadev;
307 struct hidma_desc *mdesc;
308 unsigned long irqflags;
309 dma_cookie_t cookie;
310
311 pm_runtime_get_sync(dmadev->ddev.dev);
312 if (!hidma_ll_isenabled(dmadev->lldev)) {
313 pm_runtime_mark_last_busy(dmadev->ddev.dev);
314 pm_runtime_put_autosuspend(dmadev->ddev.dev);
315 return -ENODEV;
316 }
317
318 mdesc = container_of(txd, struct hidma_desc, desc);
319 spin_lock_irqsave(&mchan->lock, irqflags);
320
321 /* Move descriptor to active */
322 list_move_tail(&mdesc->node, &mchan->active);
323
324 /* Update cookie */
325 cookie = dma_cookie_assign(txd);
326
327 hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
328 spin_unlock_irqrestore(&mchan->lock, irqflags);
329
330 return cookie;
331 }
332
333 static int hidma_alloc_chan_resources(struct dma_chan *dmach)
334 {
335 struct hidma_chan *mchan = to_hidma_chan(dmach);
336 struct hidma_dev *dmadev = mchan->dmadev;
337 struct hidma_desc *mdesc, *tmp;
338 unsigned long irqflags;
339 LIST_HEAD(descs);
340 unsigned int i;
341 int rc = 0;
342
343 if (mchan->allocated)
344 return 0;
345
346 /* Alloc descriptors for this channel */
347 for (i = 0; i < dmadev->nr_descriptors; i++) {
348 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
349 if (!mdesc) {
350 rc = -ENOMEM;
351 break;
352 }
353 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
354 mdesc->desc.tx_submit = hidma_tx_submit;
355
356 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
357 "DMA engine", hidma_callback, mdesc,
358 &mdesc->tre_ch);
359 if (rc) {
360 dev_err(dmach->device->dev,
361 "channel alloc failed at %u\n", i);
362 kfree(mdesc);
363 break;
364 }
365 list_add_tail(&mdesc->node, &descs);
366 }
367
368 if (rc) {
369 /* return the allocated descriptors */
370 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
371 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
372 kfree(mdesc);
373 }
374 return rc;
375 }
376
377 spin_lock_irqsave(&mchan->lock, irqflags);
378 list_splice_tail_init(&descs, &mchan->free);
379 mchan->allocated = true;
380 spin_unlock_irqrestore(&mchan->lock, irqflags);
381 return 1;
382 }
383
384 static struct dma_async_tx_descriptor *
385 hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
386 size_t len, unsigned long flags)
387 {
388 struct hidma_chan *mchan = to_hidma_chan(dmach);
389 struct hidma_desc *mdesc = NULL;
390 struct hidma_dev *mdma = mchan->dmadev;
391 unsigned long irqflags;
392
393 /* Get free descriptor */
394 spin_lock_irqsave(&mchan->lock, irqflags);
395 if (!list_empty(&mchan->free)) {
396 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
397 list_del(&mdesc->node);
398 }
399 spin_unlock_irqrestore(&mchan->lock, irqflags);
400
401 if (!mdesc)
402 return NULL;
403
404 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
405 src, dest, len, flags);
406
407 /* Place descriptor in prepared list */
408 spin_lock_irqsave(&mchan->lock, irqflags);
409 list_add_tail(&mdesc->node, &mchan->prepared);
410 spin_unlock_irqrestore(&mchan->lock, irqflags);
411
412 return &mdesc->desc;
413 }
414
415 static int hidma_terminate_channel(struct dma_chan *chan)
416 {
417 struct hidma_chan *mchan = to_hidma_chan(chan);
418 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
419 struct hidma_desc *tmp, *mdesc;
420 unsigned long irqflags;
421 LIST_HEAD(list);
422 int rc;
423
424 pm_runtime_get_sync(dmadev->ddev.dev);
425 /* give completed requests a chance to finish */
426 hidma_process_completed(mchan);
427
428 spin_lock_irqsave(&mchan->lock, irqflags);
429 mchan->last_success = 0;
430 list_splice_init(&mchan->active, &list);
431 list_splice_init(&mchan->prepared, &list);
432 list_splice_init(&mchan->completed, &list);
433 spin_unlock_irqrestore(&mchan->lock, irqflags);
434
435 /* this suspends the existing transfer */
436 rc = hidma_ll_disable(dmadev->lldev);
437 if (rc) {
438 dev_err(dmadev->ddev.dev, "channel did not pause\n");
439 goto out;
440 }
441
442 /* return all user requests */
443 list_for_each_entry_safe(mdesc, tmp, &list, node) {
444 struct dma_async_tx_descriptor *txd = &mdesc->desc;
445
446 dma_descriptor_unmap(txd);
447 dmaengine_desc_get_callback_invoke(txd, NULL);
448 dma_run_dependencies(txd);
449
450 /* move myself to free_list */
451 list_move(&mdesc->node, &mchan->free);
452 }
453
454 rc = hidma_ll_enable(dmadev->lldev);
455 out:
456 pm_runtime_mark_last_busy(dmadev->ddev.dev);
457 pm_runtime_put_autosuspend(dmadev->ddev.dev);
458 return rc;
459 }
460
461 static int hidma_terminate_all(struct dma_chan *chan)
462 {
463 struct hidma_chan *mchan = to_hidma_chan(chan);
464 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
465 int rc;
466
467 rc = hidma_terminate_channel(chan);
468 if (rc)
469 return rc;
470
471 /* reinitialize the hardware */
472 pm_runtime_get_sync(dmadev->ddev.dev);
473 rc = hidma_ll_setup(dmadev->lldev);
474 pm_runtime_mark_last_busy(dmadev->ddev.dev);
475 pm_runtime_put_autosuspend(dmadev->ddev.dev);
476 return rc;
477 }
478
479 static void hidma_free_chan_resources(struct dma_chan *dmach)
480 {
481 struct hidma_chan *mchan = to_hidma_chan(dmach);
482 struct hidma_dev *mdma = mchan->dmadev;
483 struct hidma_desc *mdesc, *tmp;
484 unsigned long irqflags;
485 LIST_HEAD(descs);
486
487 /* terminate running transactions and free descriptors */
488 hidma_terminate_channel(dmach);
489
490 spin_lock_irqsave(&mchan->lock, irqflags);
491
492 /* Move data */
493 list_splice_tail_init(&mchan->free, &descs);
494
495 /* Free descriptors */
496 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
497 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
498 list_del(&mdesc->node);
499 kfree(mdesc);
500 }
501
502 mchan->allocated = 0;
503 spin_unlock_irqrestore(&mchan->lock, irqflags);
504 }
505
506 static int hidma_pause(struct dma_chan *chan)
507 {
508 struct hidma_chan *mchan;
509 struct hidma_dev *dmadev;
510
511 mchan = to_hidma_chan(chan);
512 dmadev = to_hidma_dev(mchan->chan.device);
513 if (!mchan->paused) {
514 pm_runtime_get_sync(dmadev->ddev.dev);
515 if (hidma_ll_disable(dmadev->lldev))
516 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
517 mchan->paused = true;
518 pm_runtime_mark_last_busy(dmadev->ddev.dev);
519 pm_runtime_put_autosuspend(dmadev->ddev.dev);
520 }
521 return 0;
522 }
523
524 static int hidma_resume(struct dma_chan *chan)
525 {
526 struct hidma_chan *mchan;
527 struct hidma_dev *dmadev;
528 int rc = 0;
529
530 mchan = to_hidma_chan(chan);
531 dmadev = to_hidma_dev(mchan->chan.device);
532 if (mchan->paused) {
533 pm_runtime_get_sync(dmadev->ddev.dev);
534 rc = hidma_ll_enable(dmadev->lldev);
535 if (!rc)
536 mchan->paused = false;
537 else
538 dev_err(dmadev->ddev.dev,
539 "failed to resume the channel");
540 pm_runtime_mark_last_busy(dmadev->ddev.dev);
541 pm_runtime_put_autosuspend(dmadev->ddev.dev);
542 }
543 return rc;
544 }
545
546 static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
547 {
548 struct hidma_lldev *lldev = arg;
549
550 /*
551 * All interrupts are request driven.
552 * HW doesn't send an interrupt by itself.
553 */
554 return hidma_ll_inthandler(chirq, lldev);
555 }
556
557 static ssize_t hidma_show_values(struct device *dev,
558 struct device_attribute *attr, char *buf)
559 {
560 struct platform_device *pdev = to_platform_device(dev);
561 struct hidma_dev *mdev = platform_get_drvdata(pdev);
562
563 buf[0] = 0;
564
565 if (strcmp(attr->attr.name, "chid") == 0)
566 sprintf(buf, "%d\n", mdev->chidx);
567
568 return strlen(buf);
569 }
570
571 static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
572 int mode)
573 {
574 struct device_attribute *attrs;
575 char *name_copy;
576
577 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
578 GFP_KERNEL);
579 if (!attrs)
580 return -ENOMEM;
581
582 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
583 if (!name_copy)
584 return -ENOMEM;
585
586 attrs->attr.name = name_copy;
587 attrs->attr.mode = mode;
588 attrs->show = hidma_show_values;
589 sysfs_attr_init(&attrs->attr);
590
591 return device_create_file(dev->ddev.dev, attrs);
592 }
593
594 static int hidma_probe(struct platform_device *pdev)
595 {
596 struct hidma_dev *dmadev;
597 struct resource *trca_resource;
598 struct resource *evca_resource;
599 int chirq;
600 void __iomem *evca;
601 void __iomem *trca;
602 int rc;
603
604 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
605 pm_runtime_use_autosuspend(&pdev->dev);
606 pm_runtime_set_active(&pdev->dev);
607 pm_runtime_enable(&pdev->dev);
608
609 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
610 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
611 if (IS_ERR(trca)) {
612 rc = -ENOMEM;
613 goto bailout;
614 }
615
616 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
617 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
618 if (IS_ERR(evca)) {
619 rc = -ENOMEM;
620 goto bailout;
621 }
622
623 /*
624 * This driver only handles the channel IRQs.
625 * Common IRQ is handled by the management driver.
626 */
627 chirq = platform_get_irq(pdev, 0);
628 if (chirq < 0) {
629 rc = -ENODEV;
630 goto bailout;
631 }
632
633 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
634 if (!dmadev) {
635 rc = -ENOMEM;
636 goto bailout;
637 }
638
639 INIT_LIST_HEAD(&dmadev->ddev.channels);
640 spin_lock_init(&dmadev->lock);
641 dmadev->ddev.dev = &pdev->dev;
642 pm_runtime_get_sync(dmadev->ddev.dev);
643
644 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
645 if (WARN_ON(!pdev->dev.dma_mask)) {
646 rc = -ENXIO;
647 goto dmafree;
648 }
649
650 dmadev->dev_evca = evca;
651 dmadev->evca_resource = evca_resource;
652 dmadev->dev_trca = trca;
653 dmadev->trca_resource = trca_resource;
654 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
655 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
656 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
657 dmadev->ddev.device_tx_status = hidma_tx_status;
658 dmadev->ddev.device_issue_pending = hidma_issue_pending;
659 dmadev->ddev.device_pause = hidma_pause;
660 dmadev->ddev.device_resume = hidma_resume;
661 dmadev->ddev.device_terminate_all = hidma_terminate_all;
662 dmadev->ddev.copy_align = 8;
663
664 device_property_read_u32(&pdev->dev, "desc-count",
665 &dmadev->nr_descriptors);
666
667 if (!dmadev->nr_descriptors && nr_desc_prm)
668 dmadev->nr_descriptors = nr_desc_prm;
669
670 if (!dmadev->nr_descriptors)
671 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
672
673 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
674
675 /* Set DMA mask to 64 bits. */
676 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
677 if (rc) {
678 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
679 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
680 if (rc)
681 goto dmafree;
682 }
683
684 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
685 dmadev->nr_descriptors, dmadev->dev_trca,
686 dmadev->dev_evca, dmadev->chidx);
687 if (!dmadev->lldev) {
688 rc = -EPROBE_DEFER;
689 goto dmafree;
690 }
691
692 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
693 "qcom-hidma", dmadev->lldev);
694 if (rc)
695 goto uninit;
696
697 INIT_LIST_HEAD(&dmadev->ddev.channels);
698 rc = hidma_chan_init(dmadev, 0);
699 if (rc)
700 goto uninit;
701
702 rc = dma_async_device_register(&dmadev->ddev);
703 if (rc)
704 goto uninit;
705
706 dmadev->irq = chirq;
707 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
708 hidma_debug_init(dmadev);
709 hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
710 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
711 platform_set_drvdata(pdev, dmadev);
712 pm_runtime_mark_last_busy(dmadev->ddev.dev);
713 pm_runtime_put_autosuspend(dmadev->ddev.dev);
714 return 0;
715
716 uninit:
717 hidma_debug_uninit(dmadev);
718 hidma_ll_uninit(dmadev->lldev);
719 dmafree:
720 if (dmadev)
721 hidma_free(dmadev);
722 bailout:
723 pm_runtime_put_sync(&pdev->dev);
724 pm_runtime_disable(&pdev->dev);
725 return rc;
726 }
727
728 static int hidma_remove(struct platform_device *pdev)
729 {
730 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
731
732 pm_runtime_get_sync(dmadev->ddev.dev);
733 dma_async_device_unregister(&dmadev->ddev);
734 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
735 tasklet_kill(&dmadev->task);
736 hidma_debug_uninit(dmadev);
737 hidma_ll_uninit(dmadev->lldev);
738 hidma_free(dmadev);
739
740 dev_info(&pdev->dev, "HI-DMA engine removed\n");
741 pm_runtime_put_sync_suspend(&pdev->dev);
742 pm_runtime_disable(&pdev->dev);
743
744 return 0;
745 }
746
747 #if IS_ENABLED(CONFIG_ACPI)
748 static const struct acpi_device_id hidma_acpi_ids[] = {
749 {"QCOM8061"},
750 {},
751 };
752 #endif
753
754 static const struct of_device_id hidma_match[] = {
755 {.compatible = "qcom,hidma-1.0",},
756 {},
757 };
758 MODULE_DEVICE_TABLE(of, hidma_match);
759
760 static struct platform_driver hidma_driver = {
761 .probe = hidma_probe,
762 .remove = hidma_remove,
763 .driver = {
764 .name = "hidma",
765 .of_match_table = hidma_match,
766 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
767 },
768 };
769
770 module_platform_driver(hidma_driver);
771 MODULE_LICENSE("GPL v2");