]> git.ipfire.org Git - people/arne_f/kernel.git/blame - drivers/dma/virt-dma.h
net: qmi_wwan: fix divide by 0 on bad descriptors
[people/arne_f/kernel.git] / drivers / dma / virt-dma.h
CommitLineData
50437bff
RK
1/*
2 * Virtual DMA channel support for DMAengine
3 *
4 * Copyright (C) 2012 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef VIRT_DMA_H
11#define VIRT_DMA_H
12
13#include <linux/dmaengine.h>
14#include <linux/interrupt.h>
15
16#include "dmaengine.h"
17
18struct virt_dma_desc {
19 struct dma_async_tx_descriptor tx;
20 /* protected by vc.lock */
21 struct list_head node;
22};
23
24struct virt_dma_chan {
25 struct dma_chan chan;
26 struct tasklet_struct task;
27 void (*desc_free)(struct virt_dma_desc *);
28
29 spinlock_t lock;
30
31 /* protected by vc.lock */
13bb26ae 32 struct list_head desc_allocated;
50437bff
RK
33 struct list_head desc_submitted;
34 struct list_head desc_issued;
35 struct list_head desc_completed;
571fa740
RK
36
37 struct virt_dma_desc *cyclic;
50437bff
RK
38};
39
40static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
41{
42 return container_of(chan, struct virt_dma_chan, chan);
43}
44
45void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
50437bff 46void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
fe045874 47struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
02aa8486
BX
48extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
49extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
50437bff
RK
50
51/**
52 * vchan_tx_prep - prepare a descriptor
28ca3e85
LPC
53 * @vc: virtual channel allocating this descriptor
54 * @vd: virtual descriptor to prepare
55 * @tx_flags: flags argument passed in to prepare function
50437bff
RK
56 */
57static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
58 struct virt_dma_desc *vd, unsigned long tx_flags)
59{
13bb26ae 60 unsigned long flags;
50437bff
RK
61
62 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
63 vd->tx.flags = tx_flags;
64 vd->tx.tx_submit = vchan_tx_submit;
13bb26ae
RJ
65 vd->tx.desc_free = vchan_tx_desc_free;
66
67 spin_lock_irqsave(&vc->lock, flags);
68 list_add_tail(&vd->node, &vc->desc_allocated);
69 spin_unlock_irqrestore(&vc->lock, flags);
50437bff
RK
70
71 return &vd->tx;
72}
73
74/**
75 * vchan_issue_pending - move submitted descriptors to issued list
28ca3e85 76 * @vc: virtual channel to update
50437bff
RK
77 *
78 * vc.lock must be held by caller
79 */
80static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
81{
82 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
83 return !list_empty(&vc->desc_issued);
84}
85
86/**
87 * vchan_cookie_complete - report completion of a descriptor
28ca3e85 88 * @vd: virtual descriptor to update
50437bff
RK
89 *
90 * vc.lock must be held by caller
91 */
92static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
93{
94 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
af58652a 95 dma_cookie_t cookie;
50437bff 96
af58652a 97 cookie = vd->tx.cookie;
50437bff
RK
98 dma_cookie_complete(&vd->tx);
99 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
af58652a 100 vd, cookie);
50437bff
RK
101 list_add_tail(&vd->node, &vc->desc_completed);
102
103 tasklet_schedule(&vc->task);
104}
105
571fa740
RK
106/**
107 * vchan_cyclic_callback - report the completion of a period
28ca3e85 108 * @vd: virtual descriptor
571fa740
RK
109 */
110static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
111{
112 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
113
114 vc->cyclic = vd;
115 tasklet_schedule(&vc->task);
116}
117
50437bff
RK
118/**
119 * vchan_next_desc - peek at the next descriptor to be processed
28ca3e85 120 * @vc: virtual channel to obtain descriptor from
50437bff
RK
121 *
122 * vc.lock must be held by caller
123 */
124static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
125{
360af35b
MY
126 return list_first_entry_or_null(&vc->desc_issued,
127 struct virt_dma_desc, node);
50437bff
RK
128}
129
130/**
8c8fe97b 131 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
28ca3e85
LPC
132 * @vc: virtual channel to get descriptors from
133 * @head: list of descriptors found
50437bff
RK
134 *
135 * vc.lock must be held by caller
136 *
137 * Removes all submitted and issued descriptors from internal lists, and
138 * provides a list of all descriptors found
139 */
140static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
141 struct list_head *head)
142{
13bb26ae 143 list_splice_tail_init(&vc->desc_allocated, head);
50437bff
RK
144 list_splice_tail_init(&vc->desc_submitted, head);
145 list_splice_tail_init(&vc->desc_issued, head);
146 list_splice_tail_init(&vc->desc_completed, head);
147}
148
149static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
150{
13bb26ae 151 struct virt_dma_desc *vd;
50437bff
RK
152 unsigned long flags;
153 LIST_HEAD(head);
154
155 spin_lock_irqsave(&vc->lock, flags);
156 vchan_get_all_descriptors(vc, &head);
13bb26ae
RJ
157 list_for_each_entry(vd, &head, node)
158 dmaengine_desc_clear_reuse(&vd->tx);
50437bff
RK
159 spin_unlock_irqrestore(&vc->lock, flags);
160
161 vchan_dma_desc_free_list(vc, &head);
162}
163
2ed08629
LPC
164/**
165 * vchan_synchronize() - synchronize callback execution to the current context
166 * @vc: virtual channel to synchronize
167 *
168 * Makes sure that all scheduled or active callbacks have finished running. For
169 * proper operation the caller has to ensure that no new callbacks are scheduled
170 * after the invocation of this function started.
171 */
172static inline void vchan_synchronize(struct virt_dma_chan *vc)
173{
174 tasklet_kill(&vc->task);
175}
176
50437bff 177#endif