]>
Commit | Line | Data |
---|---|---|
244ab90e AL |
1 | /* |
2 | * DMA helper functions | |
3 | * | |
9c211ad2 | 4 | * Copyright (c) 2009, 2020 Red Hat |
244ab90e AL |
5 | * |
6 | * This work is licensed under the terms of the GNU General Public License | |
7 | * (GNU GPL), version 2 or later. | |
8 | */ | |
9 | ||
10 | #ifndef DMA_H | |
11 | #define DMA_H | |
12 | ||
022c62cb | 13 | #include "exec/memory.h" |
df32fd1c | 14 | #include "exec/address-spaces.h" |
737e150e | 15 | #include "block/block.h" |
5e5a94b6 | 16 | #include "block/accounting.h" |
244ab90e | 17 | |
10dc8aef PB |
18 | typedef struct ScatterGatherEntry ScatterGatherEntry; |
19 | ||
43cf8ae6 DG |
20 | typedef enum { |
21 | DMA_DIRECTION_TO_DEVICE = 0, | |
22 | DMA_DIRECTION_FROM_DEVICE = 1, | |
23 | } DMADirection; | |
24 | ||
fead0c24 PB |
25 | struct QEMUSGList { |
26 | ScatterGatherEntry *sg; | |
27 | int nsg; | |
28 | int nalloc; | |
29 | size_t size; | |
f487b677 | 30 | DeviceState *dev; |
df32fd1c | 31 | AddressSpace *as; |
fead0c24 PB |
32 | }; |
33 | ||
4be403c8 | 34 | #ifndef CONFIG_USER_ONLY |
d9d1055e | 35 | |
e5332e63 DG |
36 | /* |
37 | * When an IOMMU is present, bus addresses become distinct from | |
38 | * CPU/memory physical addresses and may be a different size. Because | |
39 | * the IOVA size depends more on the bus than on the platform, we more | |
40 | * or less have to treat these as 64-bit always to cover all (or at | |
41 | * least most) cases. | |
42 | */ | |
43 | typedef uint64_t dma_addr_t; | |
44 | ||
45 | #define DMA_ADDR_BITS 64 | |
46 | #define DMA_ADDR_FMT "%" PRIx64 | |
47 | ||
df32fd1c | 48 | static inline void dma_barrier(AddressSpace *as, DMADirection dir) |
7a0bac4d BH |
49 | { |
50 | /* | |
51 | * This is called before DMA read and write operations | |
52 | * unless the _relaxed form is used and is responsible | |
53 | * for providing some sane ordering of accesses vs | |
54 | * concurrently running VCPUs. | |
55 | * | |
56 | * Users of map(), unmap() or lower level st/ld_* | |
57 | * operations are responsible for providing their own | |
58 | * ordering via barriers. | |
59 | * | |
60 | * This primitive implementation does a simple smp_mb() | |
61 | * before each operation which provides pretty much full | |
62 | * ordering. | |
63 | * | |
64 | * A smarter implementation can be devised if needed to | |
65 | * use lighter barriers based on the direction of the | |
66 | * transfer, the DMA context, etc... | |
67 | */ | |
77ac58dd | 68 | smp_mb(); |
7a0bac4d BH |
69 | } |
70 | ||
d86a77f8 DG |
71 | /* Checks that the given range of addresses is valid for DMA. This is |
72 | * useful for certain cases, but usually you should just use | |
73 | * dma_memory_{read,write}() and check for errors */ | |
df32fd1c | 74 | static inline bool dma_memory_valid(AddressSpace *as, |
e5332e63 | 75 | dma_addr_t addr, dma_addr_t len, |
7ccb391c | 76 | DMADirection dir, MemTxAttrs attrs) |
d86a77f8 | 77 | { |
df32fd1c | 78 | return address_space_access_valid(as, addr, len, |
fddffa42 | 79 | dir == DMA_DIRECTION_FROM_DEVICE, |
7ccb391c | 80 | attrs); |
d86a77f8 DG |
81 | } |
82 | ||
9989bcd3 PMD |
83 | static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as, |
84 | dma_addr_t addr, | |
85 | void *buf, dma_addr_t len, | |
4afd0f2f PMD |
86 | DMADirection dir, |
87 | MemTxAttrs attrs) | |
d86a77f8 | 88 | { |
4afd0f2f | 89 | return address_space_rw(as, addr, attrs, |
9989bcd3 | 90 | buf, len, dir == DMA_DIRECTION_FROM_DEVICE); |
d86a77f8 DG |
91 | } |
92 | ||
b1f51303 PMD |
93 | static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as, |
94 | dma_addr_t addr, | |
95 | void *buf, dma_addr_t len) | |
7a0bac4d | 96 | { |
4afd0f2f PMD |
97 | return dma_memory_rw_relaxed(as, addr, buf, len, |
98 | DMA_DIRECTION_TO_DEVICE, | |
99 | MEMTXATTRS_UNSPECIFIED); | |
7a0bac4d BH |
100 | } |
101 | ||
77c71d1d PMD |
102 | static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as, |
103 | dma_addr_t addr, | |
104 | const void *buf, | |
105 | dma_addr_t len) | |
7a0bac4d | 106 | { |
df32fd1c | 107 | return dma_memory_rw_relaxed(as, addr, (void *)buf, len, |
4afd0f2f PMD |
108 | DMA_DIRECTION_FROM_DEVICE, |
109 | MEMTXATTRS_UNSPECIFIED); | |
7a0bac4d BH |
110 | } |
111 | ||
9989bcd3 PMD |
112 | /** |
113 | * dma_memory_rw: Read from or write to an address space from DMA controller. | |
114 | * | |
115 | * Return a MemTxResult indicating whether the operation succeeded | |
116 | * or failed (eg unassigned memory, device rejected the transaction, | |
117 | * IOMMU fault). | |
118 | * | |
119 | * @as: #AddressSpace to be accessed | |
120 | * @addr: address within that address space | |
121 | * @buf: buffer with the data transferred | |
122 | * @len: the number of bytes to read or write | |
123 | * @dir: indicates the transfer direction | |
23faf569 | 124 | * @attrs: memory transaction attributes |
9989bcd3 PMD |
125 | */ |
126 | static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr, | |
127 | void *buf, dma_addr_t len, | |
23faf569 | 128 | DMADirection dir, MemTxAttrs attrs) |
7a0bac4d | 129 | { |
df32fd1c | 130 | dma_barrier(as, dir); |
7a0bac4d | 131 | |
23faf569 | 132 | return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs); |
7a0bac4d BH |
133 | } |
134 | ||
b1f51303 PMD |
135 | /** |
136 | * dma_memory_read: Read from an address space from DMA controller. | |
137 | * | |
138 | * Return a MemTxResult indicating whether the operation succeeded | |
139 | * or failed (eg unassigned memory, device rejected the transaction, | |
140 | * IOMMU fault). Called within RCU critical section. | |
141 | * | |
142 | * @as: #AddressSpace to be accessed | |
143 | * @addr: address within that address space | |
144 | * @buf: buffer with the data transferred | |
145 | * @len: length of the data transferred | |
ba06fe8a | 146 | * @attrs: memory transaction attributes |
b1f51303 PMD |
147 | */ |
148 | static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr, | |
ba06fe8a PMD |
149 | void *buf, dma_addr_t len, |
150 | MemTxAttrs attrs) | |
d86a77f8 | 151 | { |
23faf569 | 152 | return dma_memory_rw(as, addr, buf, len, |
ba06fe8a | 153 | DMA_DIRECTION_TO_DEVICE, attrs); |
d86a77f8 DG |
154 | } |
155 | ||
77c71d1d PMD |
156 | /** |
157 | * address_space_write: Write to address space from DMA controller. | |
158 | * | |
159 | * Return a MemTxResult indicating whether the operation succeeded | |
160 | * or failed (eg unassigned memory, device rejected the transaction, | |
161 | * IOMMU fault). | |
162 | * | |
163 | * @as: #AddressSpace to be accessed | |
164 | * @addr: address within that address space | |
165 | * @buf: buffer with the data transferred | |
166 | * @len: the number of bytes to write | |
ba06fe8a | 167 | * @attrs: memory transaction attributes |
77c71d1d PMD |
168 | */ |
169 | static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr, | |
ba06fe8a PMD |
170 | const void *buf, dma_addr_t len, |
171 | MemTxAttrs attrs) | |
d86a77f8 | 172 | { |
df32fd1c | 173 | return dma_memory_rw(as, addr, (void *)buf, len, |
ba06fe8a | 174 | DMA_DIRECTION_FROM_DEVICE, attrs); |
d86a77f8 DG |
175 | } |
176 | ||
bb755f52 PMD |
177 | /** |
178 | * dma_memory_set: Fill memory with a constant byte from DMA controller. | |
179 | * | |
180 | * Return a MemTxResult indicating whether the operation succeeded | |
181 | * or failed (eg unassigned memory, device rejected the transaction, | |
182 | * IOMMU fault). | |
183 | * | |
184 | * @as: #AddressSpace to be accessed | |
185 | * @addr: address within that address space | |
186 | * @c: constant byte to fill the memory | |
187 | * @len: the number of bytes to fill with the constant byte | |
7a36e42d | 188 | * @attrs: memory transaction attributes |
bb755f52 PMD |
189 | */ |
190 | MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr, | |
7a36e42d | 191 | uint8_t c, dma_addr_t len, MemTxAttrs attrs); |
d86a77f8 | 192 | |
9c211ad2 PMD |
193 | /** |
194 | * address_space_map: Map a physical memory region into a host virtual address. | |
195 | * | |
196 | * May map a subset of the requested range, given by and returned in @plen. | |
197 | * May return %NULL and set *@plen to zero(0), if resources needed to perform | |
198 | * the mapping are exhausted. | |
199 | * Use only for reads OR writes - not for read-modify-write operations. | |
200 | * | |
201 | * @as: #AddressSpace to be accessed | |
202 | * @addr: address within that address space | |
203 | * @len: pointer to length of buffer; updated on return | |
204 | * @dir: indicates the transfer direction | |
205 | */ | |
df32fd1c | 206 | static inline void *dma_memory_map(AddressSpace *as, |
d86a77f8 DG |
207 | dma_addr_t addr, dma_addr_t *len, |
208 | DMADirection dir) | |
209 | { | |
24addbc7 PB |
210 | hwaddr xlen = *len; |
211 | void *p; | |
212 | ||
f26404fb PM |
213 | p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE, |
214 | MEMTXATTRS_UNSPECIFIED); | |
24addbc7 PB |
215 | *len = xlen; |
216 | return p; | |
d86a77f8 DG |
217 | } |
218 | ||
9c211ad2 PMD |
219 | /** |
220 | * address_space_unmap: Unmaps a memory region previously mapped | |
221 | * by dma_memory_map() | |
222 | * | |
223 | * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE. | |
224 | * @access_len gives the amount of memory that was actually read or written | |
225 | * by the caller. | |
226 | * | |
227 | * @as: #AddressSpace used | |
228 | * @buffer: host pointer as returned by address_space_map() | |
229 | * @len: buffer length as returned by address_space_map() | |
230 | * @dir: indicates the transfer direction | |
231 | * @access_len: amount of data actually transferred | |
232 | */ | |
df32fd1c | 233 | static inline void dma_memory_unmap(AddressSpace *as, |
d86a77f8 DG |
234 | void *buffer, dma_addr_t len, |
235 | DMADirection dir, dma_addr_t access_len) | |
236 | { | |
df32fd1c | 237 | address_space_unmap(as, buffer, (hwaddr)len, |
24addbc7 | 238 | dir == DMA_DIRECTION_FROM_DEVICE, access_len); |
d86a77f8 DG |
239 | } |
240 | ||
241 | #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ | |
df32fd1c | 242 | static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \ |
d86a77f8 DG |
243 | dma_addr_t addr) \ |
244 | { \ | |
245 | uint##_bits##_t val; \ | |
ba06fe8a | 246 | dma_memory_read(as, addr, &val, (_bits) / 8, MEMTXATTRS_UNSPECIFIED); \ |
d86a77f8 DG |
247 | return _end##_bits##_to_cpu(val); \ |
248 | } \ | |
df32fd1c | 249 | static inline void st##_sname##_##_end##_dma(AddressSpace *as, \ |
d86a77f8 DG |
250 | dma_addr_t addr, \ |
251 | uint##_bits##_t val) \ | |
252 | { \ | |
253 | val = cpu_to_##_end##_bits(val); \ | |
ba06fe8a | 254 | dma_memory_write(as, addr, &val, (_bits) / 8, MEMTXATTRS_UNSPECIFIED); \ |
d86a77f8 DG |
255 | } |
256 | ||
df32fd1c | 257 | static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr) |
d86a77f8 DG |
258 | { |
259 | uint8_t val; | |
260 | ||
ba06fe8a | 261 | dma_memory_read(as, addr, &val, 1, MEMTXATTRS_UNSPECIFIED); |
d86a77f8 DG |
262 | return val; |
263 | } | |
264 | ||
df32fd1c | 265 | static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val) |
d86a77f8 | 266 | { |
ba06fe8a | 267 | dma_memory_write(as, addr, &val, 1, MEMTXATTRS_UNSPECIFIED); |
d86a77f8 DG |
268 | } |
269 | ||
270 | DEFINE_LDST_DMA(uw, w, 16, le); | |
271 | DEFINE_LDST_DMA(l, l, 32, le); | |
272 | DEFINE_LDST_DMA(q, q, 64, le); | |
273 | DEFINE_LDST_DMA(uw, w, 16, be); | |
274 | DEFINE_LDST_DMA(l, l, 32, be); | |
275 | DEFINE_LDST_DMA(q, q, 64, be); | |
276 | ||
277 | #undef DEFINE_LDST_DMA | |
278 | ||
10dc8aef | 279 | struct ScatterGatherEntry { |
d3231181 DG |
280 | dma_addr_t base; |
281 | dma_addr_t len; | |
10dc8aef | 282 | }; |
244ab90e | 283 | |
f487b677 PB |
284 | void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, |
285 | AddressSpace *as); | |
d3231181 | 286 | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len); |
244ab90e | 287 | void qemu_sglist_destroy(QEMUSGList *qsg); |
10dc8aef | 288 | #endif |
244ab90e | 289 | |
8a8e63eb PB |
290 | typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov, |
291 | BlockCompletionFunc *cb, void *cb_opaque, | |
292 | void *opaque); | |
7c84b1b8 | 293 | |
8a8e63eb | 294 | BlockAIOCB *dma_blk_io(AioContext *ctx, |
99868af3 | 295 | QEMUSGList *sg, uint64_t offset, uint32_t align, |
8a8e63eb PB |
296 | DMAIOFunc *io_func, void *io_func_opaque, |
297 | BlockCompletionFunc *cb, void *opaque, DMADirection dir); | |
4be74634 | 298 | BlockAIOCB *dma_blk_read(BlockBackend *blk, |
99868af3 | 299 | QEMUSGList *sg, uint64_t offset, uint32_t align, |
4be74634 MA |
300 | BlockCompletionFunc *cb, void *opaque); |
301 | BlockAIOCB *dma_blk_write(BlockBackend *blk, | |
99868af3 | 302 | QEMUSGList *sg, uint64_t offset, uint32_t align, |
097310b5 | 303 | BlockCompletionFunc *cb, void *opaque); |
8171ee35 PB |
304 | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg); |
305 | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); | |
306 | ||
4be74634 | 307 | void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, |
84a69356 PB |
308 | QEMUSGList *sg, enum BlockAcctType type); |
309 | ||
f14fb6c2 EA |
310 | /** |
311 | * dma_aligned_pow2_mask: Return the address bit mask of the largest | |
312 | * power of 2 size less or equal than @end - @start + 1, aligned with @start, | |
313 | * and bounded by 1 << @max_addr_bits bits. | |
314 | * | |
315 | * @start: range start address | |
316 | * @end: range end address (greater than @start) | |
317 | * @max_addr_bits: max address bits (<= 64) | |
318 | */ | |
319 | uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end, | |
320 | int max_addr_bits); | |
321 | ||
244ab90e | 322 | #endif |