]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
73fa0d10 AW |
2 | /* |
3 | * VFIO: IOMMU DMA mapping support for Type1 IOMMU | |
4 | * | |
5 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
6 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
7 | * | |
73fa0d10 AW |
8 | * Derived from original vfio: |
9 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
10 | * Author: Tom Lyon, pugs@cisco.com | |
11 | * | |
12 | * We arbitrarily define a Type1 IOMMU as one matching the below code. | |
13 | * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel | |
14 | * VT-d, but that makes it harder to re-use as theoretically anyone | |
15 | * implementing a similar IOMMU could make use of this. We expect the | |
16 | * IOMMU to support the IOMMU API and have few to no restrictions around | |
17 | * the IOVA range that can be mapped. The Type1 IOMMU is currently | |
18 | * optimized for relatively static mappings of a userspace process with | |
19 | * userpsace pages pinned into memory. We also assume devices and IOMMU | |
20 | * domains are PCI based as the IOMMU API is still centered around a | |
21 | * device/bus interface rather than a group interface. | |
22 | */ | |
23 | ||
24 | #include <linux/compat.h> | |
25 | #include <linux/device.h> | |
26 | #include <linux/fs.h> | |
27 | #include <linux/iommu.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/mm.h> | |
8d46c0cc | 30 | #include <linux/mmu_context.h> |
cd9b2268 | 31 | #include <linux/rbtree.h> |
3f07c014 | 32 | #include <linux/sched/signal.h> |
6e84f315 | 33 | #include <linux/sched/mm.h> |
73fa0d10 AW |
34 | #include <linux/slab.h> |
35 | #include <linux/uaccess.h> | |
36 | #include <linux/vfio.h> | |
37 | #include <linux/workqueue.h> | |
a54eb550 | 38 | #include <linux/mdev.h> |
c086de81 | 39 | #include <linux/notifier.h> |
5d704992 | 40 | #include <linux/dma-iommu.h> |
9d72f87b | 41 | #include <linux/irqdomain.h> |
73fa0d10 AW |
42 | |
43 | #define DRIVER_VERSION "0.2" | |
44 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" | |
45 | #define DRIVER_DESC "Type1 IOMMU driver for VFIO" | |
46 | ||
47 | static bool allow_unsafe_interrupts; | |
48 | module_param_named(allow_unsafe_interrupts, | |
49 | allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); | |
50 | MODULE_PARM_DESC(allow_unsafe_interrupts, | |
51 | "Enable VFIO IOMMU support for on platforms without interrupt remapping support."); | |
52 | ||
5c6c2b21 AW |
53 | static bool disable_hugepages; |
54 | module_param_named(disable_hugepages, | |
55 | disable_hugepages, bool, S_IRUGO | S_IWUSR); | |
56 | MODULE_PARM_DESC(disable_hugepages, | |
57 | "Disable VFIO IOMMU support for IOMMU hugepages."); | |
58 | ||
49285593 AW |
59 | static unsigned int dma_entry_limit __read_mostly = U16_MAX; |
60 | module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); | |
61 | MODULE_PARM_DESC(dma_entry_limit, | |
62 | "Maximum number of user DMA mappings per container (65535)."); | |
63 | ||
73fa0d10 | 64 | struct vfio_iommu { |
1ef3e2bc | 65 | struct list_head domain_list; |
1108696a | 66 | struct list_head iova_list; |
a54eb550 | 67 | struct vfio_domain *external_domain; /* domain for external user */ |
73fa0d10 | 68 | struct mutex lock; |
cd9b2268 | 69 | struct rb_root dma_list; |
c086de81 | 70 | struct blocking_notifier_head notifier; |
49285593 | 71 | unsigned int dma_avail; |
f5c9eceb WD |
72 | bool v2; |
73 | bool nesting; | |
1ef3e2bc AW |
74 | }; |
75 | ||
76 | struct vfio_domain { | |
77 | struct iommu_domain *domain; | |
78 | struct list_head next; | |
73fa0d10 | 79 | struct list_head group_list; |
1ef3e2bc | 80 | int prot; /* IOMMU_CACHE */ |
6fe1010d | 81 | bool fgsp; /* Fine-grained super pages */ |
73fa0d10 AW |
82 | }; |
83 | ||
84 | struct vfio_dma { | |
cd9b2268 | 85 | struct rb_node node; |
73fa0d10 AW |
86 | dma_addr_t iova; /* Device address */ |
87 | unsigned long vaddr; /* Process virtual addr */ | |
166fd7d9 | 88 | size_t size; /* Map size (bytes) */ |
73fa0d10 | 89 | int prot; /* IOMMU_READ/WRITE */ |
a54eb550 | 90 | bool iommu_mapped; |
48d8476b | 91 | bool lock_cap; /* capable(CAP_IPC_LOCK) */ |
8f0d5bb9 | 92 | struct task_struct *task; |
a54eb550 | 93 | struct rb_root pfn_list; /* Ex-user pinned pfn list */ |
73fa0d10 AW |
94 | }; |
95 | ||
96 | struct vfio_group { | |
97 | struct iommu_group *iommu_group; | |
98 | struct list_head next; | |
7bd50f0c | 99 | bool mdev_group; /* An mdev group */ |
73fa0d10 AW |
100 | }; |
101 | ||
1108696a SK |
102 | struct vfio_iova { |
103 | struct list_head list; | |
104 | dma_addr_t start; | |
105 | dma_addr_t end; | |
106 | }; | |
107 | ||
a54eb550 KW |
108 | /* |
109 | * Guest RAM pinning working set or DMA target | |
110 | */ | |
111 | struct vfio_pfn { | |
112 | struct rb_node node; | |
113 | dma_addr_t iova; /* Device address */ | |
114 | unsigned long pfn; /* Host pfn */ | |
115 | atomic_t ref_count; | |
116 | }; | |
117 | ||
6bd06f5a SS |
118 | struct vfio_regions { |
119 | struct list_head list; | |
120 | dma_addr_t iova; | |
121 | phys_addr_t phys; | |
122 | size_t len; | |
123 | }; | |
124 | ||
a54eb550 KW |
125 | #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ |
126 | (!list_empty(&iommu->domain_list)) | |
127 | ||
128 | static int put_pfn(unsigned long pfn, int prot); | |
129 | ||
73fa0d10 AW |
130 | /* |
131 | * This code handles mapping and unmapping of user data buffers | |
132 | * into DMA'ble space using the IOMMU | |
133 | */ | |
134 | ||
cd9b2268 AW |
135 | static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, |
136 | dma_addr_t start, size_t size) | |
137 | { | |
138 | struct rb_node *node = iommu->dma_list.rb_node; | |
139 | ||
140 | while (node) { | |
141 | struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); | |
142 | ||
143 | if (start + size <= dma->iova) | |
144 | node = node->rb_left; | |
166fd7d9 | 145 | else if (start >= dma->iova + dma->size) |
cd9b2268 AW |
146 | node = node->rb_right; |
147 | else | |
148 | return dma; | |
149 | } | |
150 | ||
151 | return NULL; | |
152 | } | |
153 | ||
1ef3e2bc | 154 | static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) |
cd9b2268 AW |
155 | { |
156 | struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; | |
157 | struct vfio_dma *dma; | |
158 | ||
159 | while (*link) { | |
160 | parent = *link; | |
161 | dma = rb_entry(parent, struct vfio_dma, node); | |
162 | ||
166fd7d9 | 163 | if (new->iova + new->size <= dma->iova) |
cd9b2268 AW |
164 | link = &(*link)->rb_left; |
165 | else | |
166 | link = &(*link)->rb_right; | |
167 | } | |
168 | ||
169 | rb_link_node(&new->node, parent, link); | |
170 | rb_insert_color(&new->node, &iommu->dma_list); | |
171 | } | |
172 | ||
1ef3e2bc | 173 | static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) |
cd9b2268 AW |
174 | { |
175 | rb_erase(&old->node, &iommu->dma_list); | |
176 | } | |
177 | ||
a54eb550 KW |
178 | /* |
179 | * Helper Functions for host iova-pfn list | |
180 | */ | |
181 | static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) | |
182 | { | |
183 | struct vfio_pfn *vpfn; | |
184 | struct rb_node *node = dma->pfn_list.rb_node; | |
185 | ||
186 | while (node) { | |
187 | vpfn = rb_entry(node, struct vfio_pfn, node); | |
188 | ||
189 | if (iova < vpfn->iova) | |
190 | node = node->rb_left; | |
191 | else if (iova > vpfn->iova) | |
192 | node = node->rb_right; | |
193 | else | |
194 | return vpfn; | |
195 | } | |
196 | return NULL; | |
197 | } | |
198 | ||
199 | static void vfio_link_pfn(struct vfio_dma *dma, | |
200 | struct vfio_pfn *new) | |
201 | { | |
202 | struct rb_node **link, *parent = NULL; | |
203 | struct vfio_pfn *vpfn; | |
204 | ||
205 | link = &dma->pfn_list.rb_node; | |
206 | while (*link) { | |
207 | parent = *link; | |
208 | vpfn = rb_entry(parent, struct vfio_pfn, node); | |
209 | ||
210 | if (new->iova < vpfn->iova) | |
211 | link = &(*link)->rb_left; | |
212 | else | |
213 | link = &(*link)->rb_right; | |
214 | } | |
215 | ||
216 | rb_link_node(&new->node, parent, link); | |
217 | rb_insert_color(&new->node, &dma->pfn_list); | |
218 | } | |
219 | ||
220 | static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old) | |
221 | { | |
222 | rb_erase(&old->node, &dma->pfn_list); | |
223 | } | |
224 | ||
225 | static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova, | |
226 | unsigned long pfn) | |
227 | { | |
228 | struct vfio_pfn *vpfn; | |
229 | ||
230 | vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL); | |
231 | if (!vpfn) | |
232 | return -ENOMEM; | |
233 | ||
234 | vpfn->iova = iova; | |
235 | vpfn->pfn = pfn; | |
236 | atomic_set(&vpfn->ref_count, 1); | |
237 | vfio_link_pfn(dma, vpfn); | |
238 | return 0; | |
239 | } | |
240 | ||
241 | static void vfio_remove_from_pfn_list(struct vfio_dma *dma, | |
242 | struct vfio_pfn *vpfn) | |
243 | { | |
244 | vfio_unlink_pfn(dma, vpfn); | |
245 | kfree(vpfn); | |
246 | } | |
247 | ||
248 | static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma, | |
249 | unsigned long iova) | |
250 | { | |
251 | struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); | |
252 | ||
253 | if (vpfn) | |
254 | atomic_inc(&vpfn->ref_count); | |
255 | return vpfn; | |
256 | } | |
257 | ||
258 | static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) | |
259 | { | |
260 | int ret = 0; | |
261 | ||
262 | if (atomic_dec_and_test(&vpfn->ref_count)) { | |
263 | ret = put_pfn(vpfn->pfn, dma->prot); | |
264 | vfio_remove_from_pfn_list(dma, vpfn); | |
265 | } | |
266 | return ret; | |
267 | } | |
268 | ||
48d8476b | 269 | static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) |
73fa0d10 | 270 | { |
73fa0d10 | 271 | struct mm_struct *mm; |
0cfef2b7 | 272 | int ret; |
73fa0d10 | 273 | |
3624a248 | 274 | if (!npage) |
0cfef2b7 | 275 | return 0; |
3624a248 | 276 | |
48d8476b | 277 | mm = async ? get_task_mm(dma->task) : dma->task->mm; |
3624a248 | 278 | if (!mm) |
0cfef2b7 | 279 | return -ESRCH; /* process exited */ |
73fa0d10 | 280 | |
0cfef2b7 AW |
281 | ret = down_write_killable(&mm->mmap_sem); |
282 | if (!ret) { | |
79eb597c DJ |
283 | ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task, |
284 | dma->lock_cap); | |
0cfef2b7 | 285 | up_write(&mm->mmap_sem); |
6c38c055 AW |
286 | } |
287 | ||
48d8476b | 288 | if (async) |
3624a248 | 289 | mmput(mm); |
0cfef2b7 AW |
290 | |
291 | return ret; | |
73fa0d10 AW |
292 | } |
293 | ||
294 | /* | |
295 | * Some mappings aren't backed by a struct page, for example an mmap'd | |
296 | * MMIO range for our own or another device. These use a different | |
297 | * pfn conversion and shouldn't be tracked as locked pages. | |
026948f0 BL |
298 | * For compound pages, any driver that sets the reserved bit in head |
299 | * page needs to set the reserved bit in all subpages to be safe. | |
73fa0d10 AW |
300 | */ |
301 | static bool is_invalid_reserved_pfn(unsigned long pfn) | |
302 | { | |
026948f0 BL |
303 | if (pfn_valid(pfn)) |
304 | return PageReserved(pfn_to_page(pfn)); | |
73fa0d10 AW |
305 | |
306 | return true; | |
307 | } | |
308 | ||
309 | static int put_pfn(unsigned long pfn, int prot) | |
310 | { | |
311 | if (!is_invalid_reserved_pfn(pfn)) { | |
312 | struct page *page = pfn_to_page(pfn); | |
19fed0da | 313 | |
f1f6a7dd | 314 | unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE); |
73fa0d10 AW |
315 | return 1; |
316 | } | |
317 | return 0; | |
318 | } | |
319 | ||
ea85cf35 KW |
320 | static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, |
321 | int prot, unsigned long *pfn) | |
73fa0d10 AW |
322 | { |
323 | struct page *page[1]; | |
324 | struct vm_area_struct *vma; | |
bb94b55a | 325 | unsigned int flags = 0; |
ea85cf35 | 326 | int ret; |
73fa0d10 | 327 | |
bb94b55a JG |
328 | if (prot & IOMMU_WRITE) |
329 | flags |= FOLL_WRITE; | |
330 | ||
331 | down_read(&mm->mmap_sem); | |
19fed0da | 332 | ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM, |
3567813e | 333 | page, NULL, NULL); |
ea85cf35 | 334 | if (ret == 1) { |
73fa0d10 | 335 | *pfn = page_to_pfn(page[0]); |
3567813e JH |
336 | ret = 0; |
337 | goto done; | |
73fa0d10 AW |
338 | } |
339 | ||
6cf5354c AK |
340 | vaddr = untagged_addr(vaddr); |
341 | ||
ea85cf35 | 342 | vma = find_vma_intersection(mm, vaddr, vaddr + 1); |
73fa0d10 AW |
343 | |
344 | if (vma && vma->vm_flags & VM_PFNMAP) { | |
5cbf3264 SC |
345 | if (!follow_pfn(vma, vaddr, pfn) && |
346 | is_invalid_reserved_pfn(*pfn)) | |
73fa0d10 AW |
347 | ret = 0; |
348 | } | |
3567813e | 349 | done: |
ea85cf35 | 350 | up_read(&mm->mmap_sem); |
73fa0d10 AW |
351 | return ret; |
352 | } | |
353 | ||
166fd7d9 AW |
354 | /* |
355 | * Attempt to pin pages. We really don't want to track all the pfns and | |
356 | * the iommu can only map chunks of consecutive pfns anyway, so get the | |
357 | * first page and all consecutive pages with the same locking. | |
358 | */ | |
8f0d5bb9 | 359 | static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, |
7cb671e7 | 360 | long npage, unsigned long *pfn_base, |
48d8476b | 361 | unsigned long limit) |
73fa0d10 | 362 | { |
7cb671e7 | 363 | unsigned long pfn = 0; |
6c38c055 | 364 | long ret, pinned = 0, lock_acct = 0; |
89c29def | 365 | bool rsvd; |
a54eb550 | 366 | dma_addr_t iova = vaddr - dma->vaddr + dma->iova; |
73fa0d10 | 367 | |
6c38c055 AW |
368 | /* This code path is only user initiated */ |
369 | if (!current->mm) | |
166fd7d9 | 370 | return -ENODEV; |
73fa0d10 | 371 | |
6c38c055 | 372 | ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); |
166fd7d9 | 373 | if (ret) |
6c38c055 | 374 | return ret; |
73fa0d10 | 375 | |
6c38c055 | 376 | pinned++; |
89c29def | 377 | rsvd = is_invalid_reserved_pfn(*pfn_base); |
73fa0d10 | 378 | |
a54eb550 KW |
379 | /* |
380 | * Reserved pages aren't counted against the user, externally pinned | |
381 | * pages are already counted against the user. | |
382 | */ | |
89c29def | 383 | if (!rsvd && !vfio_find_vpfn(dma, iova)) { |
48d8476b | 384 | if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { |
a54eb550 KW |
385 | put_pfn(*pfn_base, dma->prot); |
386 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, | |
387 | limit << PAGE_SHIFT); | |
6c38c055 | 388 | return -ENOMEM; |
a54eb550 KW |
389 | } |
390 | lock_acct++; | |
5c6c2b21 AW |
391 | } |
392 | ||
6c38c055 AW |
393 | if (unlikely(disable_hugepages)) |
394 | goto out; | |
73fa0d10 | 395 | |
6c38c055 AW |
396 | /* Lock all the consecutive pages from pfn_base */ |
397 | for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; | |
398 | pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { | |
6c38c055 AW |
399 | ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); |
400 | if (ret) | |
401 | break; | |
402 | ||
89c29def AW |
403 | if (pfn != *pfn_base + pinned || |
404 | rsvd != is_invalid_reserved_pfn(pfn)) { | |
6c38c055 AW |
405 | put_pfn(pfn, dma->prot); |
406 | break; | |
407 | } | |
166fd7d9 | 408 | |
89c29def | 409 | if (!rsvd && !vfio_find_vpfn(dma, iova)) { |
48d8476b | 410 | if (!dma->lock_cap && |
6c38c055 | 411 | current->mm->locked_vm + lock_acct + 1 > limit) { |
a54eb550 | 412 | put_pfn(pfn, dma->prot); |
6c38c055 AW |
413 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", |
414 | __func__, limit << PAGE_SHIFT); | |
0cfef2b7 AW |
415 | ret = -ENOMEM; |
416 | goto unpin_out; | |
a54eb550 | 417 | } |
6c38c055 | 418 | lock_acct++; |
166fd7d9 AW |
419 | } |
420 | } | |
421 | ||
6c38c055 | 422 | out: |
48d8476b | 423 | ret = vfio_lock_acct(dma, lock_acct, false); |
0cfef2b7 AW |
424 | |
425 | unpin_out: | |
426 | if (ret) { | |
89c29def AW |
427 | if (!rsvd) { |
428 | for (pfn = *pfn_base ; pinned ; pfn++, pinned--) | |
429 | put_pfn(pfn, dma->prot); | |
430 | } | |
0cfef2b7 AW |
431 | |
432 | return ret; | |
433 | } | |
166fd7d9 | 434 | |
6c38c055 | 435 | return pinned; |
166fd7d9 AW |
436 | } |
437 | ||
a54eb550 KW |
438 | static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, |
439 | unsigned long pfn, long npage, | |
440 | bool do_accounting) | |
166fd7d9 | 441 | { |
a54eb550 | 442 | long unlocked = 0, locked = 0; |
166fd7d9 AW |
443 | long i; |
444 | ||
6c38c055 | 445 | for (i = 0; i < npage; i++, iova += PAGE_SIZE) { |
a54eb550 KW |
446 | if (put_pfn(pfn++, dma->prot)) { |
447 | unlocked++; | |
6c38c055 | 448 | if (vfio_find_vpfn(dma, iova)) |
a54eb550 KW |
449 | locked++; |
450 | } | |
451 | } | |
452 | ||
453 | if (do_accounting) | |
48d8476b | 454 | vfio_lock_acct(dma, locked - unlocked, true); |
a54eb550 KW |
455 | |
456 | return unlocked; | |
457 | } | |
458 | ||
459 | static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, | |
460 | unsigned long *pfn_base, bool do_accounting) | |
461 | { | |
a54eb550 KW |
462 | struct mm_struct *mm; |
463 | int ret; | |
a54eb550 KW |
464 | |
465 | mm = get_task_mm(dma->task); | |
466 | if (!mm) | |
467 | return -ENODEV; | |
468 | ||
469 | ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); | |
80dbe1fb | 470 | if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { |
48d8476b | 471 | ret = vfio_lock_acct(dma, 1, true); |
0cfef2b7 AW |
472 | if (ret) { |
473 | put_pfn(*pfn_base, dma->prot); | |
80dbe1fb AW |
474 | if (ret == -ENOMEM) |
475 | pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK " | |
476 | "(%ld) exceeded\n", __func__, | |
477 | dma->task->comm, task_pid_nr(dma->task), | |
478 | task_rlimit(dma->task, RLIMIT_MEMLOCK)); | |
0cfef2b7 AW |
479 | } |
480 | } | |
481 | ||
a54eb550 KW |
482 | mmput(mm); |
483 | return ret; | |
484 | } | |
485 | ||
486 | static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, | |
487 | bool do_accounting) | |
488 | { | |
489 | int unlocked; | |
490 | struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); | |
491 | ||
492 | if (!vpfn) | |
493 | return 0; | |
494 | ||
495 | unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); | |
166fd7d9 AW |
496 | |
497 | if (do_accounting) | |
48d8476b | 498 | vfio_lock_acct(dma, -unlocked, true); |
166fd7d9 AW |
499 | |
500 | return unlocked; | |
501 | } | |
502 | ||
a54eb550 KW |
503 | static int vfio_iommu_type1_pin_pages(void *iommu_data, |
504 | unsigned long *user_pfn, | |
505 | int npage, int prot, | |
506 | unsigned long *phys_pfn) | |
507 | { | |
508 | struct vfio_iommu *iommu = iommu_data; | |
509 | int i, j, ret; | |
510 | unsigned long remote_vaddr; | |
511 | struct vfio_dma *dma; | |
512 | bool do_accounting; | |
513 | ||
514 | if (!iommu || !user_pfn || !phys_pfn) | |
515 | return -EINVAL; | |
516 | ||
517 | /* Supported for v2 version only */ | |
518 | if (!iommu->v2) | |
519 | return -EACCES; | |
520 | ||
521 | mutex_lock(&iommu->lock); | |
522 | ||
c086de81 | 523 | /* Fail if notifier list is empty */ |
be068fa2 | 524 | if (!iommu->notifier.head) { |
a54eb550 KW |
525 | ret = -EINVAL; |
526 | goto pin_done; | |
527 | } | |
528 | ||
529 | /* | |
530 | * If iommu capable domain exist in the container then all pages are | |
531 | * already pinned and accounted. Accouting should be done if there is no | |
532 | * iommu capable domain in the container. | |
533 | */ | |
534 | do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); | |
535 | ||
536 | for (i = 0; i < npage; i++) { | |
537 | dma_addr_t iova; | |
538 | struct vfio_pfn *vpfn; | |
539 | ||
540 | iova = user_pfn[i] << PAGE_SHIFT; | |
2b8bb1d7 | 541 | dma = vfio_find_dma(iommu, iova, PAGE_SIZE); |
a54eb550 KW |
542 | if (!dma) { |
543 | ret = -EINVAL; | |
544 | goto pin_unwind; | |
545 | } | |
546 | ||
547 | if ((dma->prot & prot) != prot) { | |
548 | ret = -EPERM; | |
549 | goto pin_unwind; | |
550 | } | |
551 | ||
552 | vpfn = vfio_iova_get_vfio_pfn(dma, iova); | |
553 | if (vpfn) { | |
554 | phys_pfn[i] = vpfn->pfn; | |
555 | continue; | |
556 | } | |
557 | ||
0ea971f8 | 558 | remote_vaddr = dma->vaddr + (iova - dma->iova); |
a54eb550 KW |
559 | ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], |
560 | do_accounting); | |
80dbe1fb | 561 | if (ret) |
a54eb550 | 562 | goto pin_unwind; |
a54eb550 KW |
563 | |
564 | ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]); | |
565 | if (ret) { | |
566 | vfio_unpin_page_external(dma, iova, do_accounting); | |
567 | goto pin_unwind; | |
568 | } | |
569 | } | |
570 | ||
571 | ret = i; | |
572 | goto pin_done; | |
573 | ||
574 | pin_unwind: | |
575 | phys_pfn[i] = 0; | |
576 | for (j = 0; j < i; j++) { | |
577 | dma_addr_t iova; | |
578 | ||
579 | iova = user_pfn[j] << PAGE_SHIFT; | |
2b8bb1d7 | 580 | dma = vfio_find_dma(iommu, iova, PAGE_SIZE); |
a54eb550 KW |
581 | vfio_unpin_page_external(dma, iova, do_accounting); |
582 | phys_pfn[j] = 0; | |
583 | } | |
584 | pin_done: | |
585 | mutex_unlock(&iommu->lock); | |
586 | return ret; | |
587 | } | |
588 | ||
589 | static int vfio_iommu_type1_unpin_pages(void *iommu_data, | |
590 | unsigned long *user_pfn, | |
591 | int npage) | |
592 | { | |
593 | struct vfio_iommu *iommu = iommu_data; | |
594 | bool do_accounting; | |
595 | int i; | |
596 | ||
597 | if (!iommu || !user_pfn) | |
598 | return -EINVAL; | |
599 | ||
600 | /* Supported for v2 version only */ | |
601 | if (!iommu->v2) | |
602 | return -EACCES; | |
603 | ||
604 | mutex_lock(&iommu->lock); | |
605 | ||
a54eb550 KW |
606 | do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); |
607 | for (i = 0; i < npage; i++) { | |
608 | struct vfio_dma *dma; | |
609 | dma_addr_t iova; | |
610 | ||
611 | iova = user_pfn[i] << PAGE_SHIFT; | |
2b8bb1d7 | 612 | dma = vfio_find_dma(iommu, iova, PAGE_SIZE); |
a54eb550 KW |
613 | if (!dma) |
614 | goto unpin_exit; | |
615 | vfio_unpin_page_external(dma, iova, do_accounting); | |
616 | } | |
617 | ||
618 | unpin_exit: | |
619 | mutex_unlock(&iommu->lock); | |
620 | return i > npage ? npage : (i > 0 ? i : -EINVAL); | |
621 | } | |
622 | ||
6bd06f5a | 623 | static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, |
a7d20dc1 WD |
624 | struct list_head *regions, |
625 | struct iommu_iotlb_gather *iotlb_gather) | |
6bd06f5a SS |
626 | { |
627 | long unlocked = 0; | |
628 | struct vfio_regions *entry, *next; | |
629 | ||
a7d20dc1 | 630 | iommu_tlb_sync(domain->domain, iotlb_gather); |
6bd06f5a SS |
631 | |
632 | list_for_each_entry_safe(entry, next, regions, list) { | |
633 | unlocked += vfio_unpin_pages_remote(dma, | |
634 | entry->iova, | |
635 | entry->phys >> PAGE_SHIFT, | |
636 | entry->len >> PAGE_SHIFT, | |
637 | false); | |
638 | list_del(&entry->list); | |
639 | kfree(entry); | |
640 | } | |
641 | ||
642 | cond_resched(); | |
643 | ||
644 | return unlocked; | |
645 | } | |
646 | ||
647 | /* | |
648 | * Generally, VFIO needs to unpin remote pages after each IOTLB flush. | |
649 | * Therefore, when using IOTLB flush sync interface, VFIO need to keep track | |
650 | * of these regions (currently using a list). | |
651 | * | |
652 | * This value specifies maximum number of regions for each IOTLB flush sync. | |
653 | */ | |
654 | #define VFIO_IOMMU_TLB_SYNC_MAX 512 | |
655 | ||
656 | static size_t unmap_unpin_fast(struct vfio_domain *domain, | |
657 | struct vfio_dma *dma, dma_addr_t *iova, | |
658 | size_t len, phys_addr_t phys, long *unlocked, | |
659 | struct list_head *unmapped_list, | |
a7d20dc1 WD |
660 | int *unmapped_cnt, |
661 | struct iommu_iotlb_gather *iotlb_gather) | |
6bd06f5a SS |
662 | { |
663 | size_t unmapped = 0; | |
664 | struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
665 | ||
666 | if (entry) { | |
a7d20dc1 WD |
667 | unmapped = iommu_unmap_fast(domain->domain, *iova, len, |
668 | iotlb_gather); | |
6bd06f5a SS |
669 | |
670 | if (!unmapped) { | |
671 | kfree(entry); | |
672 | } else { | |
6bd06f5a SS |
673 | entry->iova = *iova; |
674 | entry->phys = phys; | |
675 | entry->len = unmapped; | |
676 | list_add_tail(&entry->list, unmapped_list); | |
677 | ||
678 | *iova += unmapped; | |
679 | (*unmapped_cnt)++; | |
680 | } | |
681 | } | |
682 | ||
683 | /* | |
684 | * Sync if the number of fast-unmap regions hits the limit | |
685 | * or in case of errors. | |
686 | */ | |
687 | if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { | |
a7d20dc1 WD |
688 | *unlocked += vfio_sync_unpin(dma, domain, unmapped_list, |
689 | iotlb_gather); | |
6bd06f5a SS |
690 | *unmapped_cnt = 0; |
691 | } | |
692 | ||
693 | return unmapped; | |
694 | } | |
695 | ||
696 | static size_t unmap_unpin_slow(struct vfio_domain *domain, | |
697 | struct vfio_dma *dma, dma_addr_t *iova, | |
698 | size_t len, phys_addr_t phys, | |
699 | long *unlocked) | |
700 | { | |
701 | size_t unmapped = iommu_unmap(domain->domain, *iova, len); | |
702 | ||
703 | if (unmapped) { | |
704 | *unlocked += vfio_unpin_pages_remote(dma, *iova, | |
705 | phys >> PAGE_SHIFT, | |
706 | unmapped >> PAGE_SHIFT, | |
707 | false); | |
708 | *iova += unmapped; | |
709 | cond_resched(); | |
710 | } | |
711 | return unmapped; | |
712 | } | |
713 | ||
a54eb550 KW |
714 | static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, |
715 | bool do_accounting) | |
166fd7d9 | 716 | { |
1ef3e2bc AW |
717 | dma_addr_t iova = dma->iova, end = dma->iova + dma->size; |
718 | struct vfio_domain *domain, *d; | |
6bd06f5a | 719 | LIST_HEAD(unmapped_region_list); |
a7d20dc1 | 720 | struct iommu_iotlb_gather iotlb_gather; |
6bd06f5a | 721 | int unmapped_region_cnt = 0; |
166fd7d9 AW |
722 | long unlocked = 0; |
723 | ||
1ef3e2bc | 724 | if (!dma->size) |
a54eb550 KW |
725 | return 0; |
726 | ||
727 | if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) | |
728 | return 0; | |
729 | ||
1ef3e2bc AW |
730 | /* |
731 | * We use the IOMMU to track the physical addresses, otherwise we'd | |
732 | * need a much more complicated tracking system. Unfortunately that | |
733 | * means we need to use one of the iommu domains to figure out the | |
734 | * pfns to unpin. The rest need to be unmapped in advance so we have | |
735 | * no iommu translations remaining when the pages are unpinned. | |
736 | */ | |
737 | domain = d = list_first_entry(&iommu->domain_list, | |
738 | struct vfio_domain, next); | |
739 | ||
c5e66887 | 740 | list_for_each_entry_continue(d, &iommu->domain_list, next) { |
1ef3e2bc | 741 | iommu_unmap(d->domain, dma->iova, dma->size); |
c5e66887 AW |
742 | cond_resched(); |
743 | } | |
1ef3e2bc | 744 | |
a7d20dc1 | 745 | iommu_iotlb_gather_init(&iotlb_gather); |
166fd7d9 | 746 | while (iova < end) { |
6fe1010d AW |
747 | size_t unmapped, len; |
748 | phys_addr_t phys, next; | |
166fd7d9 | 749 | |
1ef3e2bc | 750 | phys = iommu_iova_to_phys(domain->domain, iova); |
166fd7d9 AW |
751 | if (WARN_ON(!phys)) { |
752 | iova += PAGE_SIZE; | |
753 | continue; | |
73fa0d10 | 754 | } |
166fd7d9 | 755 | |
6fe1010d AW |
756 | /* |
757 | * To optimize for fewer iommu_unmap() calls, each of which | |
758 | * may require hardware cache flushing, try to find the | |
759 | * largest contiguous physical memory chunk to unmap. | |
760 | */ | |
761 | for (len = PAGE_SIZE; | |
762 | !domain->fgsp && iova + len < end; len += PAGE_SIZE) { | |
763 | next = iommu_iova_to_phys(domain->domain, iova + len); | |
764 | if (next != phys + len) | |
765 | break; | |
766 | } | |
767 | ||
6bd06f5a SS |
768 | /* |
769 | * First, try to use fast unmap/unpin. In case of failure, | |
770 | * switch to slow unmap/unpin path. | |
771 | */ | |
772 | unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, | |
773 | &unlocked, &unmapped_region_list, | |
a7d20dc1 WD |
774 | &unmapped_region_cnt, |
775 | &iotlb_gather); | |
6bd06f5a SS |
776 | if (!unmapped) { |
777 | unmapped = unmap_unpin_slow(domain, dma, &iova, len, | |
778 | phys, &unlocked); | |
779 | if (WARN_ON(!unmapped)) | |
780 | break; | |
781 | } | |
73fa0d10 | 782 | } |
166fd7d9 | 783 | |
a54eb550 | 784 | dma->iommu_mapped = false; |
6bd06f5a | 785 | |
a7d20dc1 WD |
786 | if (unmapped_region_cnt) { |
787 | unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list, | |
788 | &iotlb_gather); | |
789 | } | |
6bd06f5a | 790 | |
a54eb550 | 791 | if (do_accounting) { |
48d8476b | 792 | vfio_lock_acct(dma, -unlocked, true); |
a54eb550 KW |
793 | return 0; |
794 | } | |
795 | return unlocked; | |
73fa0d10 AW |
796 | } |
797 | ||
1ef3e2bc | 798 | static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) |
73fa0d10 | 799 | { |
a54eb550 | 800 | vfio_unmap_unpin(iommu, dma, true); |
1ef3e2bc | 801 | vfio_unlink_dma(iommu, dma); |
8f0d5bb9 | 802 | put_task_struct(dma->task); |
1ef3e2bc | 803 | kfree(dma); |
49285593 | 804 | iommu->dma_avail++; |
1ef3e2bc | 805 | } |
73fa0d10 | 806 | |
1ef3e2bc AW |
807 | static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) |
808 | { | |
809 | struct vfio_domain *domain; | |
4644321f | 810 | unsigned long bitmap = ULONG_MAX; |
166fd7d9 | 811 | |
1ef3e2bc AW |
812 | mutex_lock(&iommu->lock); |
813 | list_for_each_entry(domain, &iommu->domain_list, next) | |
d16e0faa | 814 | bitmap &= domain->domain->pgsize_bitmap; |
1ef3e2bc | 815 | mutex_unlock(&iommu->lock); |
73fa0d10 | 816 | |
4644321f EA |
817 | /* |
818 | * In case the IOMMU supports page sizes smaller than PAGE_SIZE | |
819 | * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes. | |
820 | * That way the user will be able to map/unmap buffers whose size/ | |
821 | * start address is aligned with PAGE_SIZE. Pinning code uses that | |
822 | * granularity while iommu driver can use the sub-PAGE_SIZE size | |
823 | * to map the buffer. | |
824 | */ | |
825 | if (bitmap & ~PAGE_MASK) { | |
826 | bitmap &= PAGE_MASK; | |
827 | bitmap |= PAGE_SIZE; | |
828 | } | |
829 | ||
1ef3e2bc | 830 | return bitmap; |
73fa0d10 AW |
831 | } |
832 | ||
833 | static int vfio_dma_do_unmap(struct vfio_iommu *iommu, | |
834 | struct vfio_iommu_type1_dma_unmap *unmap) | |
835 | { | |
73fa0d10 | 836 | uint64_t mask; |
c086de81 | 837 | struct vfio_dma *dma, *dma_last = NULL; |
1ef3e2bc | 838 | size_t unmapped = 0; |
c086de81 | 839 | int ret = 0, retries = 0; |
73fa0d10 | 840 | |
1ef3e2bc | 841 | mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; |
73fa0d10 AW |
842 | |
843 | if (unmap->iova & mask) | |
844 | return -EINVAL; | |
f5bfdbf2 | 845 | if (!unmap->size || unmap->size & mask) |
73fa0d10 | 846 | return -EINVAL; |
58fec830 | 847 | if (unmap->iova + unmap->size - 1 < unmap->iova || |
71a7d3d7 DC |
848 | unmap->size > SIZE_MAX) |
849 | return -EINVAL; | |
73fa0d10 | 850 | |
73fa0d10 | 851 | WARN_ON(mask & PAGE_MASK); |
c086de81 | 852 | again: |
73fa0d10 AW |
853 | mutex_lock(&iommu->lock); |
854 | ||
1ef3e2bc AW |
855 | /* |
856 | * vfio-iommu-type1 (v1) - User mappings were coalesced together to | |
857 | * avoid tracking individual mappings. This means that the granularity | |
858 | * of the original mapping was lost and the user was allowed to attempt | |
859 | * to unmap any range. Depending on the contiguousness of physical | |
860 | * memory and page sizes supported by the IOMMU, arbitrary unmaps may | |
861 | * or may not have worked. We only guaranteed unmap granularity | |
862 | * matching the original mapping; even though it was untracked here, | |
863 | * the original mappings are reflected in IOMMU mappings. This | |
864 | * resulted in a couple unusual behaviors. First, if a range is not | |
865 | * able to be unmapped, ex. a set of 4k pages that was mapped as a | |
866 | * 2M hugepage into the IOMMU, the unmap ioctl returns success but with | |
867 | * a zero sized unmap. Also, if an unmap request overlaps the first | |
868 | * address of a hugepage, the IOMMU will unmap the entire hugepage. | |
869 | * This also returns success and the returned unmap size reflects the | |
870 | * actual size unmapped. | |
871 | * | |
872 | * We attempt to maintain compatibility with this "v1" interface, but | |
873 | * we take control out of the hands of the IOMMU. Therefore, an unmap | |
874 | * request offset from the beginning of the original mapping will | |
875 | * return success with zero sized unmap. And an unmap request covering | |
876 | * the first iova of mapping will unmap the entire range. | |
877 | * | |
878 | * The v2 version of this interface intends to be more deterministic. | |
879 | * Unmap requests must fully cover previous mappings. Multiple | |
880 | * mappings may still be unmaped by specifying large ranges, but there | |
881 | * must not be any previous mappings bisected by the range. An error | |
882 | * will be returned if these conditions are not met. The v2 interface | |
883 | * will only return success and a size of zero if there were no | |
884 | * mappings within the range. | |
885 | */ | |
886 | if (iommu->v2) { | |
7c03f428 | 887 | dma = vfio_find_dma(iommu, unmap->iova, 1); |
1ef3e2bc AW |
888 | if (dma && dma->iova != unmap->iova) { |
889 | ret = -EINVAL; | |
890 | goto unlock; | |
891 | } | |
892 | dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0); | |
893 | if (dma && dma->iova + dma->size != unmap->iova + unmap->size) { | |
894 | ret = -EINVAL; | |
895 | goto unlock; | |
896 | } | |
897 | } | |
898 | ||
166fd7d9 | 899 | while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { |
1ef3e2bc | 900 | if (!iommu->v2 && unmap->iova > dma->iova) |
166fd7d9 | 901 | break; |
8f0d5bb9 KW |
902 | /* |
903 | * Task with same address space who mapped this iova range is | |
904 | * allowed to unmap the iova range. | |
905 | */ | |
906 | if (dma->task->mm != current->mm) | |
907 | break; | |
c086de81 KW |
908 | |
909 | if (!RB_EMPTY_ROOT(&dma->pfn_list)) { | |
910 | struct vfio_iommu_type1_dma_unmap nb_unmap; | |
911 | ||
912 | if (dma_last == dma) { | |
913 | BUG_ON(++retries > 10); | |
914 | } else { | |
915 | dma_last = dma; | |
916 | retries = 0; | |
917 | } | |
918 | ||
919 | nb_unmap.iova = dma->iova; | |
920 | nb_unmap.size = dma->size; | |
921 | ||
922 | /* | |
923 | * Notify anyone (mdev vendor drivers) to invalidate and | |
924 | * unmap iovas within the range we're about to unmap. | |
925 | * Vendor drivers MUST unpin pages in response to an | |
926 | * invalidation. | |
927 | */ | |
928 | mutex_unlock(&iommu->lock); | |
929 | blocking_notifier_call_chain(&iommu->notifier, | |
930 | VFIO_IOMMU_NOTIFY_DMA_UNMAP, | |
931 | &nb_unmap); | |
932 | goto again; | |
933 | } | |
1ef3e2bc AW |
934 | unmapped += dma->size; |
935 | vfio_remove_dma(iommu, dma); | |
166fd7d9 | 936 | } |
cd9b2268 | 937 | |
1ef3e2bc | 938 | unlock: |
73fa0d10 | 939 | mutex_unlock(&iommu->lock); |
166fd7d9 | 940 | |
1ef3e2bc | 941 | /* Report how much was unmapped */ |
166fd7d9 AW |
942 | unmap->size = unmapped; |
943 | ||
944 | return ret; | |
945 | } | |
946 | ||
1ef3e2bc AW |
947 | static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, |
948 | unsigned long pfn, long npage, int prot) | |
949 | { | |
950 | struct vfio_domain *d; | |
951 | int ret; | |
952 | ||
953 | list_for_each_entry(d, &iommu->domain_list, next) { | |
954 | ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, | |
955 | npage << PAGE_SHIFT, prot | d->prot); | |
7a30423a JR |
956 | if (ret) |
957 | goto unwind; | |
c5e66887 AW |
958 | |
959 | cond_resched(); | |
1ef3e2bc AW |
960 | } |
961 | ||
962 | return 0; | |
963 | ||
964 | unwind: | |
965 | list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) | |
966 | iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); | |
166fd7d9 | 967 | |
cd9b2268 | 968 | return ret; |
73fa0d10 AW |
969 | } |
970 | ||
8f0d5bb9 KW |
971 | static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, |
972 | size_t map_size) | |
973 | { | |
974 | dma_addr_t iova = dma->iova; | |
975 | unsigned long vaddr = dma->vaddr; | |
976 | size_t size = map_size; | |
977 | long npage; | |
7cb671e7 | 978 | unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
8f0d5bb9 KW |
979 | int ret = 0; |
980 | ||
981 | while (size) { | |
982 | /* Pin a contiguous chunk of memory */ | |
983 | npage = vfio_pin_pages_remote(dma, vaddr + dma->size, | |
48d8476b | 984 | size >> PAGE_SHIFT, &pfn, limit); |
8f0d5bb9 KW |
985 | if (npage <= 0) { |
986 | WARN_ON(!npage); | |
987 | ret = (int)npage; | |
988 | break; | |
989 | } | |
990 | ||
991 | /* Map it! */ | |
992 | ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, | |
993 | dma->prot); | |
994 | if (ret) { | |
a54eb550 KW |
995 | vfio_unpin_pages_remote(dma, iova + dma->size, pfn, |
996 | npage, true); | |
8f0d5bb9 KW |
997 | break; |
998 | } | |
999 | ||
1000 | size -= npage << PAGE_SHIFT; | |
1001 | dma->size += npage << PAGE_SHIFT; | |
1002 | } | |
1003 | ||
a54eb550 KW |
1004 | dma->iommu_mapped = true; |
1005 | ||
8f0d5bb9 KW |
1006 | if (ret) |
1007 | vfio_remove_dma(iommu, dma); | |
1008 | ||
1009 | return ret; | |
1010 | } | |
1011 | ||
9b77e5c7 SK |
1012 | /* |
1013 | * Check dma map request is within a valid iova range | |
1014 | */ | |
1015 | static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu, | |
1016 | dma_addr_t start, dma_addr_t end) | |
1017 | { | |
1018 | struct list_head *iova = &iommu->iova_list; | |
1019 | struct vfio_iova *node; | |
1020 | ||
1021 | list_for_each_entry(node, iova, list) { | |
1022 | if (start >= node->start && end <= node->end) | |
1023 | return true; | |
1024 | } | |
1025 | ||
1026 | /* | |
1027 | * Check for list_empty() as well since a container with | |
1028 | * a single mdev device will have an empty list. | |
1029 | */ | |
1030 | return list_empty(iova); | |
1031 | } | |
1032 | ||
73fa0d10 AW |
1033 | static int vfio_dma_do_map(struct vfio_iommu *iommu, |
1034 | struct vfio_iommu_type1_dma_map *map) | |
1035 | { | |
c8dbca16 | 1036 | dma_addr_t iova = map->iova; |
166fd7d9 | 1037 | unsigned long vaddr = map->vaddr; |
73fa0d10 AW |
1038 | size_t size = map->size; |
1039 | int ret = 0, prot = 0; | |
1040 | uint64_t mask; | |
1ef3e2bc | 1041 | struct vfio_dma *dma; |
166fd7d9 | 1042 | |
c8dbca16 AW |
1043 | /* Verify that none of our __u64 fields overflow */ |
1044 | if (map->size != size || map->vaddr != vaddr || map->iova != iova) | |
1045 | return -EINVAL; | |
73fa0d10 | 1046 | |
1ef3e2bc | 1047 | mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; |
73fa0d10 | 1048 | |
c8dbca16 AW |
1049 | WARN_ON(mask & PAGE_MASK); |
1050 | ||
73fa0d10 AW |
1051 | /* READ/WRITE from device perspective */ |
1052 | if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) | |
1053 | prot |= IOMMU_WRITE; | |
1054 | if (map->flags & VFIO_DMA_MAP_FLAG_READ) | |
1055 | prot |= IOMMU_READ; | |
1056 | ||
c8dbca16 | 1057 | if (!prot || !size || (size | iova | vaddr) & mask) |
73fa0d10 AW |
1058 | return -EINVAL; |
1059 | ||
c8dbca16 AW |
1060 | /* Don't allow IOVA or virtual address wrap */ |
1061 | if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) | |
73fa0d10 AW |
1062 | return -EINVAL; |
1063 | ||
1064 | mutex_lock(&iommu->lock); | |
1065 | ||
c8dbca16 | 1066 | if (vfio_find_dma(iommu, iova, size)) { |
8f0d5bb9 KW |
1067 | ret = -EEXIST; |
1068 | goto out_unlock; | |
73fa0d10 AW |
1069 | } |
1070 | ||
49285593 AW |
1071 | if (!iommu->dma_avail) { |
1072 | ret = -ENOSPC; | |
1073 | goto out_unlock; | |
1074 | } | |
1075 | ||
9b77e5c7 SK |
1076 | if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { |
1077 | ret = -EINVAL; | |
1078 | goto out_unlock; | |
1079 | } | |
1080 | ||
1ef3e2bc AW |
1081 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
1082 | if (!dma) { | |
8f0d5bb9 KW |
1083 | ret = -ENOMEM; |
1084 | goto out_unlock; | |
1ef3e2bc AW |
1085 | } |
1086 | ||
49285593 | 1087 | iommu->dma_avail--; |
c8dbca16 AW |
1088 | dma->iova = iova; |
1089 | dma->vaddr = vaddr; | |
1ef3e2bc | 1090 | dma->prot = prot; |
48d8476b AW |
1091 | |
1092 | /* | |
1093 | * We need to be able to both add to a task's locked memory and test | |
1094 | * against the locked memory limit and we need to be able to do both | |
1095 | * outside of this call path as pinning can be asynchronous via the | |
1096 | * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a | |
1097 | * task_struct and VM locked pages requires an mm_struct, however | |
1098 | * holding an indefinite mm reference is not recommended, therefore we | |
1099 | * only hold a reference to a task. We could hold a reference to | |
1100 | * current, however QEMU uses this call path through vCPU threads, | |
1101 | * which can be killed resulting in a NULL mm and failure in the unmap | |
1102 | * path when called via a different thread. Avoid this problem by | |
1103 | * using the group_leader as threads within the same group require | |
1104 | * both CLONE_THREAD and CLONE_VM and will therefore use the same | |
1105 | * mm_struct. | |
1106 | * | |
1107 | * Previously we also used the task for testing CAP_IPC_LOCK at the | |
1108 | * time of pinning and accounting, however has_capability() makes use | |
1109 | * of real_cred, a copy-on-write field, so we can't guarantee that it | |
1110 | * matches group_leader, or in fact that it might not change by the | |
1111 | * time it's evaluated. If a process were to call MAP_DMA with | |
1112 | * CAP_IPC_LOCK but later drop it, it doesn't make sense that they | |
1113 | * possibly see different results for an iommu_mapped vfio_dma vs | |
1114 | * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the | |
1115 | * time of calling MAP_DMA. | |
1116 | */ | |
1117 | get_task_struct(current->group_leader); | |
1118 | dma->task = current->group_leader; | |
1119 | dma->lock_cap = capable(CAP_IPC_LOCK); | |
1120 | ||
a54eb550 | 1121 | dma->pfn_list = RB_ROOT; |
166fd7d9 | 1122 | |
1ef3e2bc AW |
1123 | /* Insert zero-sized and grow as we map chunks of it */ |
1124 | vfio_link_dma(iommu, dma); | |
166fd7d9 | 1125 | |
a54eb550 KW |
1126 | /* Don't pin and map if container doesn't contain IOMMU capable domain*/ |
1127 | if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) | |
1128 | dma->size = size; | |
1129 | else | |
1130 | ret = vfio_pin_map_dma(iommu, dma, size); | |
1131 | ||
8f0d5bb9 | 1132 | out_unlock: |
1ef3e2bc AW |
1133 | mutex_unlock(&iommu->lock); |
1134 | return ret; | |
1135 | } | |
1136 | ||
1137 | static int vfio_bus_type(struct device *dev, void *data) | |
1138 | { | |
1139 | struct bus_type **bus = data; | |
1140 | ||
1141 | if (*bus && *bus != dev->bus) | |
1142 | return -EINVAL; | |
1143 | ||
1144 | *bus = dev->bus; | |
1145 | ||
1146 | return 0; | |
1147 | } | |
1148 | ||
1149 | static int vfio_iommu_replay(struct vfio_iommu *iommu, | |
1150 | struct vfio_domain *domain) | |
1151 | { | |
1152 | struct vfio_domain *d; | |
1153 | struct rb_node *n; | |
7cb671e7 | 1154 | unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
1ef3e2bc AW |
1155 | int ret; |
1156 | ||
1157 | /* Arbitrarily pick the first domain in the list for lookups */ | |
1158 | d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); | |
1159 | n = rb_first(&iommu->dma_list); | |
1160 | ||
1ef3e2bc AW |
1161 | for (; n; n = rb_next(n)) { |
1162 | struct vfio_dma *dma; | |
1163 | dma_addr_t iova; | |
1164 | ||
1165 | dma = rb_entry(n, struct vfio_dma, node); | |
1166 | iova = dma->iova; | |
1167 | ||
1168 | while (iova < dma->iova + dma->size) { | |
a54eb550 | 1169 | phys_addr_t phys; |
1ef3e2bc | 1170 | size_t size; |
73fa0d10 | 1171 | |
a54eb550 KW |
1172 | if (dma->iommu_mapped) { |
1173 | phys_addr_t p; | |
1174 | dma_addr_t i; | |
1175 | ||
1176 | phys = iommu_iova_to_phys(d->domain, iova); | |
1177 | ||
1178 | if (WARN_ON(!phys)) { | |
1179 | iova += PAGE_SIZE; | |
1180 | continue; | |
1181 | } | |
1182 | ||
1183 | size = PAGE_SIZE; | |
1184 | p = phys + size; | |
1185 | i = iova + size; | |
1186 | while (i < dma->iova + dma->size && | |
1187 | p == iommu_iova_to_phys(d->domain, i)) { | |
1188 | size += PAGE_SIZE; | |
1189 | p += PAGE_SIZE; | |
1190 | i += PAGE_SIZE; | |
1191 | } | |
1192 | } else { | |
1193 | unsigned long pfn; | |
1194 | unsigned long vaddr = dma->vaddr + | |
1195 | (iova - dma->iova); | |
1196 | size_t n = dma->iova + dma->size - iova; | |
1197 | long npage; | |
1198 | ||
1199 | npage = vfio_pin_pages_remote(dma, vaddr, | |
1200 | n >> PAGE_SHIFT, | |
48d8476b | 1201 | &pfn, limit); |
a54eb550 KW |
1202 | if (npage <= 0) { |
1203 | WARN_ON(!npage); | |
1204 | ret = (int)npage; | |
1205 | return ret; | |
1206 | } | |
1207 | ||
1208 | phys = pfn << PAGE_SHIFT; | |
1209 | size = npage << PAGE_SHIFT; | |
166fd7d9 AW |
1210 | } |
1211 | ||
1ef3e2bc AW |
1212 | ret = iommu_map(domain->domain, iova, phys, |
1213 | size, dma->prot | domain->prot); | |
1214 | if (ret) | |
1215 | return ret; | |
d93b3ac0 | 1216 | |
1ef3e2bc AW |
1217 | iova += size; |
1218 | } | |
a54eb550 | 1219 | dma->iommu_mapped = true; |
166fd7d9 | 1220 | } |
1ef3e2bc | 1221 | return 0; |
73fa0d10 AW |
1222 | } |
1223 | ||
6fe1010d AW |
1224 | /* |
1225 | * We change our unmap behavior slightly depending on whether the IOMMU | |
1226 | * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage | |
1227 | * for practically any contiguous power-of-two mapping we give it. This means | |
1228 | * we don't need to look for contiguous chunks ourselves to make unmapping | |
1229 | * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d | |
1230 | * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks | |
1231 | * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when | |
1232 | * hugetlbfs is in use. | |
1233 | */ | |
1234 | static void vfio_test_domain_fgsp(struct vfio_domain *domain) | |
1235 | { | |
1236 | struct page *pages; | |
1237 | int ret, order = get_order(PAGE_SIZE * 2); | |
1238 | ||
1239 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); | |
1240 | if (!pages) | |
1241 | return; | |
1242 | ||
1243 | ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, | |
1244 | IOMMU_READ | IOMMU_WRITE | domain->prot); | |
1245 | if (!ret) { | |
1246 | size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); | |
1247 | ||
1248 | if (unmapped == PAGE_SIZE) | |
1249 | iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE); | |
1250 | else | |
1251 | domain->fgsp = true; | |
1252 | } | |
1253 | ||
1254 | __free_pages(pages, order); | |
1255 | } | |
1256 | ||
7896c998 KW |
1257 | static struct vfio_group *find_iommu_group(struct vfio_domain *domain, |
1258 | struct iommu_group *iommu_group) | |
1259 | { | |
1260 | struct vfio_group *g; | |
1261 | ||
1262 | list_for_each_entry(g, &domain->group_list, next) { | |
1263 | if (g->iommu_group == iommu_group) | |
1264 | return g; | |
1265 | } | |
1266 | ||
1267 | return NULL; | |
1268 | } | |
1269 | ||
b09d6e47 SK |
1270 | static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions, |
1271 | phys_addr_t *base) | |
5d704992 | 1272 | { |
b09d6e47 | 1273 | struct iommu_resv_region *region; |
5d704992 EA |
1274 | bool ret = false; |
1275 | ||
b09d6e47 | 1276 | list_for_each_entry(region, group_resv_regions, list) { |
f203f7f1 RM |
1277 | /* |
1278 | * The presence of any 'real' MSI regions should take | |
1279 | * precedence over the software-managed one if the | |
1280 | * IOMMU driver happens to advertise both types. | |
1281 | */ | |
1282 | if (region->type == IOMMU_RESV_MSI) { | |
1283 | ret = false; | |
1284 | break; | |
1285 | } | |
1286 | ||
9d3a4de4 | 1287 | if (region->type == IOMMU_RESV_SW_MSI) { |
5d704992 EA |
1288 | *base = region->start; |
1289 | ret = true; | |
5d704992 EA |
1290 | } |
1291 | } | |
b09d6e47 | 1292 | |
5d704992 EA |
1293 | return ret; |
1294 | } | |
1295 | ||
7bd50f0c LB |
1296 | static struct device *vfio_mdev_get_iommu_device(struct device *dev) |
1297 | { | |
1298 | struct device *(*fn)(struct device *dev); | |
1299 | struct device *iommu_device; | |
1300 | ||
1301 | fn = symbol_get(mdev_get_iommu_device); | |
1302 | if (fn) { | |
1303 | iommu_device = fn(dev); | |
1304 | symbol_put(mdev_get_iommu_device); | |
1305 | ||
1306 | return iommu_device; | |
1307 | } | |
1308 | ||
1309 | return NULL; | |
1310 | } | |
1311 | ||
1312 | static int vfio_mdev_attach_domain(struct device *dev, void *data) | |
1313 | { | |
1314 | struct iommu_domain *domain = data; | |
1315 | struct device *iommu_device; | |
1316 | ||
1317 | iommu_device = vfio_mdev_get_iommu_device(dev); | |
1318 | if (iommu_device) { | |
1319 | if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX)) | |
1320 | return iommu_aux_attach_device(domain, iommu_device); | |
1321 | else | |
1322 | return iommu_attach_device(domain, iommu_device); | |
1323 | } | |
1324 | ||
1325 | return -EINVAL; | |
1326 | } | |
1327 | ||
1328 | static int vfio_mdev_detach_domain(struct device *dev, void *data) | |
1329 | { | |
1330 | struct iommu_domain *domain = data; | |
1331 | struct device *iommu_device; | |
1332 | ||
1333 | iommu_device = vfio_mdev_get_iommu_device(dev); | |
1334 | if (iommu_device) { | |
1335 | if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX)) | |
1336 | iommu_aux_detach_device(domain, iommu_device); | |
1337 | else | |
1338 | iommu_detach_device(domain, iommu_device); | |
1339 | } | |
1340 | ||
1341 | return 0; | |
1342 | } | |
1343 | ||
1344 | static int vfio_iommu_attach_group(struct vfio_domain *domain, | |
1345 | struct vfio_group *group) | |
1346 | { | |
1347 | if (group->mdev_group) | |
1348 | return iommu_group_for_each_dev(group->iommu_group, | |
1349 | domain->domain, | |
1350 | vfio_mdev_attach_domain); | |
1351 | else | |
1352 | return iommu_attach_group(domain->domain, group->iommu_group); | |
1353 | } | |
1354 | ||
1355 | static void vfio_iommu_detach_group(struct vfio_domain *domain, | |
1356 | struct vfio_group *group) | |
1357 | { | |
1358 | if (group->mdev_group) | |
1359 | iommu_group_for_each_dev(group->iommu_group, domain->domain, | |
1360 | vfio_mdev_detach_domain); | |
1361 | else | |
1362 | iommu_detach_group(domain->domain, group->iommu_group); | |
1363 | } | |
1364 | ||
be068fa2 LB |
1365 | static bool vfio_bus_is_mdev(struct bus_type *bus) |
1366 | { | |
1367 | struct bus_type *mdev_bus; | |
1368 | bool ret = false; | |
1369 | ||
1370 | mdev_bus = symbol_get(mdev_bus_type); | |
1371 | if (mdev_bus) { | |
1372 | ret = (bus == mdev_bus); | |
1373 | symbol_put(mdev_bus_type); | |
1374 | } | |
1375 | ||
1376 | return ret; | |
1377 | } | |
1378 | ||
1379 | static int vfio_mdev_iommu_device(struct device *dev, void *data) | |
1380 | { | |
1381 | struct device **old = data, *new; | |
1382 | ||
1383 | new = vfio_mdev_get_iommu_device(dev); | |
1384 | if (!new || (*old && *old != new)) | |
1385 | return -EINVAL; | |
1386 | ||
1387 | *old = new; | |
1388 | ||
1389 | return 0; | |
1390 | } | |
1391 | ||
1108696a SK |
1392 | /* |
1393 | * This is a helper function to insert an address range to iova list. | |
1394 | * The list is initially created with a single entry corresponding to | |
1395 | * the IOMMU domain geometry to which the device group is attached. | |
1396 | * The list aperture gets modified when a new domain is added to the | |
1397 | * container if the new aperture doesn't conflict with the current one | |
1398 | * or with any existing dma mappings. The list is also modified to | |
1399 | * exclude any reserved regions associated with the device group. | |
1400 | */ | |
1401 | static int vfio_iommu_iova_insert(struct list_head *head, | |
1402 | dma_addr_t start, dma_addr_t end) | |
1403 | { | |
1404 | struct vfio_iova *region; | |
1405 | ||
1406 | region = kmalloc(sizeof(*region), GFP_KERNEL); | |
1407 | if (!region) | |
1408 | return -ENOMEM; | |
1409 | ||
1410 | INIT_LIST_HEAD(®ion->list); | |
1411 | region->start = start; | |
1412 | region->end = end; | |
1413 | ||
1414 | list_add_tail(®ion->list, head); | |
1415 | return 0; | |
1416 | } | |
1417 | ||
1418 | /* | |
1419 | * Check the new iommu aperture conflicts with existing aper or with any | |
1420 | * existing dma mappings. | |
1421 | */ | |
1422 | static bool vfio_iommu_aper_conflict(struct vfio_iommu *iommu, | |
1423 | dma_addr_t start, dma_addr_t end) | |
1424 | { | |
1425 | struct vfio_iova *first, *last; | |
1426 | struct list_head *iova = &iommu->iova_list; | |
1427 | ||
1428 | if (list_empty(iova)) | |
1429 | return false; | |
1430 | ||
1431 | /* Disjoint sets, return conflict */ | |
1432 | first = list_first_entry(iova, struct vfio_iova, list); | |
1433 | last = list_last_entry(iova, struct vfio_iova, list); | |
1434 | if (start > last->end || end < first->start) | |
1435 | return true; | |
1436 | ||
1437 | /* Check for any existing dma mappings below the new start */ | |
1438 | if (start > first->start) { | |
1439 | if (vfio_find_dma(iommu, first->start, start - first->start)) | |
1440 | return true; | |
1441 | } | |
1442 | ||
1443 | /* Check for any existing dma mappings beyond the new end */ | |
1444 | if (end < last->end) { | |
1445 | if (vfio_find_dma(iommu, end + 1, last->end - end)) | |
1446 | return true; | |
1447 | } | |
1448 | ||
1449 | return false; | |
1450 | } | |
1451 | ||
1452 | /* | |
1453 | * Resize iommu iova aperture window. This is called only if the new | |
1454 | * aperture has no conflict with existing aperture and dma mappings. | |
1455 | */ | |
1456 | static int vfio_iommu_aper_resize(struct list_head *iova, | |
1457 | dma_addr_t start, dma_addr_t end) | |
1458 | { | |
1459 | struct vfio_iova *node, *next; | |
1460 | ||
1461 | if (list_empty(iova)) | |
1462 | return vfio_iommu_iova_insert(iova, start, end); | |
1463 | ||
1464 | /* Adjust iova list start */ | |
1465 | list_for_each_entry_safe(node, next, iova, list) { | |
1466 | if (start < node->start) | |
1467 | break; | |
1468 | if (start >= node->start && start < node->end) { | |
1469 | node->start = start; | |
1470 | break; | |
1471 | } | |
1472 | /* Delete nodes before new start */ | |
1473 | list_del(&node->list); | |
1474 | kfree(node); | |
1475 | } | |
1476 | ||
1477 | /* Adjust iova list end */ | |
1478 | list_for_each_entry_safe(node, next, iova, list) { | |
1479 | if (end > node->end) | |
1480 | continue; | |
1481 | if (end > node->start && end <= node->end) { | |
1482 | node->end = end; | |
1483 | continue; | |
1484 | } | |
1485 | /* Delete nodes after new end */ | |
1486 | list_del(&node->list); | |
1487 | kfree(node); | |
1488 | } | |
1489 | ||
1490 | return 0; | |
1491 | } | |
1492 | ||
af029169 SK |
1493 | /* |
1494 | * Check reserved region conflicts with existing dma mappings | |
1495 | */ | |
1496 | static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu, | |
1497 | struct list_head *resv_regions) | |
1498 | { | |
1499 | struct iommu_resv_region *region; | |
1500 | ||
1501 | /* Check for conflict with existing dma mappings */ | |
1502 | list_for_each_entry(region, resv_regions, list) { | |
1503 | if (region->type == IOMMU_RESV_DIRECT_RELAXABLE) | |
1504 | continue; | |
1505 | ||
1506 | if (vfio_find_dma(iommu, region->start, region->length)) | |
1507 | return true; | |
1508 | } | |
1509 | ||
1510 | return false; | |
1511 | } | |
1512 | ||
1513 | /* | |
1514 | * Check iova region overlap with reserved regions and | |
1515 | * exclude them from the iommu iova range | |
1516 | */ | |
1517 | static int vfio_iommu_resv_exclude(struct list_head *iova, | |
1518 | struct list_head *resv_regions) | |
1519 | { | |
1520 | struct iommu_resv_region *resv; | |
1521 | struct vfio_iova *n, *next; | |
1522 | ||
1523 | list_for_each_entry(resv, resv_regions, list) { | |
1524 | phys_addr_t start, end; | |
1525 | ||
1526 | if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE) | |
1527 | continue; | |
1528 | ||
1529 | start = resv->start; | |
1530 | end = resv->start + resv->length - 1; | |
1531 | ||
1532 | list_for_each_entry_safe(n, next, iova, list) { | |
1533 | int ret = 0; | |
1534 | ||
1535 | /* No overlap */ | |
1536 | if (start > n->end || end < n->start) | |
1537 | continue; | |
1538 | /* | |
1539 | * Insert a new node if current node overlaps with the | |
1540 | * reserve region to exlude that from valid iova range. | |
1541 | * Note that, new node is inserted before the current | |
1542 | * node and finally the current node is deleted keeping | |
1543 | * the list updated and sorted. | |
1544 | */ | |
1545 | if (start > n->start) | |
1546 | ret = vfio_iommu_iova_insert(&n->list, n->start, | |
1547 | start - 1); | |
1548 | if (!ret && end < n->end) | |
1549 | ret = vfio_iommu_iova_insert(&n->list, end + 1, | |
1550 | n->end); | |
1551 | if (ret) | |
1552 | return ret; | |
1553 | ||
1554 | list_del(&n->list); | |
1555 | kfree(n); | |
1556 | } | |
1557 | } | |
1558 | ||
1559 | if (list_empty(iova)) | |
1560 | return -EINVAL; | |
1561 | ||
1562 | return 0; | |
1563 | } | |
1564 | ||
1565 | static void vfio_iommu_resv_free(struct list_head *resv_regions) | |
1566 | { | |
1567 | struct iommu_resv_region *n, *next; | |
1568 | ||
1569 | list_for_each_entry_safe(n, next, resv_regions, list) { | |
1570 | list_del(&n->list); | |
1571 | kfree(n); | |
1572 | } | |
1573 | } | |
1574 | ||
1108696a SK |
1575 | static void vfio_iommu_iova_free(struct list_head *iova) |
1576 | { | |
1577 | struct vfio_iova *n, *next; | |
1578 | ||
1579 | list_for_each_entry_safe(n, next, iova, list) { | |
1580 | list_del(&n->list); | |
1581 | kfree(n); | |
1582 | } | |
1583 | } | |
1584 | ||
1585 | static int vfio_iommu_iova_get_copy(struct vfio_iommu *iommu, | |
1586 | struct list_head *iova_copy) | |
1587 | { | |
1588 | struct list_head *iova = &iommu->iova_list; | |
1589 | struct vfio_iova *n; | |
1590 | int ret; | |
1591 | ||
1592 | list_for_each_entry(n, iova, list) { | |
1593 | ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end); | |
1594 | if (ret) | |
1595 | goto out_free; | |
1596 | } | |
1597 | ||
1598 | return 0; | |
1599 | ||
1600 | out_free: | |
1601 | vfio_iommu_iova_free(iova_copy); | |
1602 | return ret; | |
1603 | } | |
1604 | ||
1605 | static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu, | |
1606 | struct list_head *iova_copy) | |
1607 | { | |
1608 | struct list_head *iova = &iommu->iova_list; | |
1609 | ||
1610 | vfio_iommu_iova_free(iova); | |
1611 | ||
1612 | list_splice_tail(iova_copy, iova); | |
1613 | } | |
73fa0d10 AW |
1614 | static int vfio_iommu_type1_attach_group(void *iommu_data, |
1615 | struct iommu_group *iommu_group) | |
1616 | { | |
1617 | struct vfio_iommu *iommu = iommu_data; | |
7896c998 | 1618 | struct vfio_group *group; |
1ef3e2bc | 1619 | struct vfio_domain *domain, *d; |
be068fa2 | 1620 | struct bus_type *bus = NULL; |
73fa0d10 | 1621 | int ret; |
9d72f87b | 1622 | bool resv_msi, msi_remap; |
95f89e09 | 1623 | phys_addr_t resv_msi_base = 0; |
1108696a SK |
1624 | struct iommu_domain_geometry geo; |
1625 | LIST_HEAD(iova_copy); | |
af029169 | 1626 | LIST_HEAD(group_resv_regions); |
73fa0d10 | 1627 | |
73fa0d10 AW |
1628 | mutex_lock(&iommu->lock); |
1629 | ||
1ef3e2bc | 1630 | list_for_each_entry(d, &iommu->domain_list, next) { |
7896c998 | 1631 | if (find_iommu_group(d, iommu_group)) { |
73fa0d10 | 1632 | mutex_unlock(&iommu->lock); |
73fa0d10 AW |
1633 | return -EINVAL; |
1634 | } | |
1635 | } | |
1636 | ||
a54eb550 KW |
1637 | if (iommu->external_domain) { |
1638 | if (find_iommu_group(iommu->external_domain, iommu_group)) { | |
1639 | mutex_unlock(&iommu->lock); | |
1640 | return -EINVAL; | |
1641 | } | |
1642 | } | |
1643 | ||
1ef3e2bc AW |
1644 | group = kzalloc(sizeof(*group), GFP_KERNEL); |
1645 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | |
1646 | if (!group || !domain) { | |
1647 | ret = -ENOMEM; | |
1648 | goto out_free; | |
1649 | } | |
1650 | ||
1651 | group->iommu_group = iommu_group; | |
1652 | ||
1653 | /* Determine bus_type in order to allocate a domain */ | |
1654 | ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type); | |
1655 | if (ret) | |
1656 | goto out_free; | |
1657 | ||
be068fa2 LB |
1658 | if (vfio_bus_is_mdev(bus)) { |
1659 | struct device *iommu_device = NULL; | |
a54eb550 | 1660 | |
be068fa2 LB |
1661 | group->mdev_group = true; |
1662 | ||
1663 | /* Determine the isolation type */ | |
1664 | ret = iommu_group_for_each_dev(iommu_group, &iommu_device, | |
1665 | vfio_mdev_iommu_device); | |
1666 | if (ret || !iommu_device) { | |
a54eb550 KW |
1667 | if (!iommu->external_domain) { |
1668 | INIT_LIST_HEAD(&domain->group_list); | |
1669 | iommu->external_domain = domain; | |
be068fa2 | 1670 | } else { |
a54eb550 | 1671 | kfree(domain); |
be068fa2 | 1672 | } |
a54eb550 KW |
1673 | |
1674 | list_add(&group->next, | |
1675 | &iommu->external_domain->group_list); | |
1676 | mutex_unlock(&iommu->lock); | |
be068fa2 | 1677 | |
a54eb550 KW |
1678 | return 0; |
1679 | } | |
be068fa2 LB |
1680 | |
1681 | bus = iommu_device->bus; | |
a54eb550 KW |
1682 | } |
1683 | ||
1ef3e2bc AW |
1684 | domain->domain = iommu_domain_alloc(bus); |
1685 | if (!domain->domain) { | |
1686 | ret = -EIO; | |
1687 | goto out_free; | |
1688 | } | |
1689 | ||
f5c9eceb WD |
1690 | if (iommu->nesting) { |
1691 | int attr = 1; | |
1692 | ||
1693 | ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING, | |
1694 | &attr); | |
1695 | if (ret) | |
1696 | goto out_domain; | |
1697 | } | |
1698 | ||
7bd50f0c | 1699 | ret = vfio_iommu_attach_group(domain, group); |
1ef3e2bc AW |
1700 | if (ret) |
1701 | goto out_domain; | |
1702 | ||
1108696a SK |
1703 | /* Get aperture info */ |
1704 | iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo); | |
1705 | ||
1706 | if (vfio_iommu_aper_conflict(iommu, geo.aperture_start, | |
1707 | geo.aperture_end)) { | |
1708 | ret = -EINVAL; | |
1709 | goto out_detach; | |
1710 | } | |
1711 | ||
af029169 SK |
1712 | ret = iommu_get_group_resv_regions(iommu_group, &group_resv_regions); |
1713 | if (ret) | |
1714 | goto out_detach; | |
1715 | ||
1716 | if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) { | |
1717 | ret = -EINVAL; | |
1718 | goto out_detach; | |
1719 | } | |
1720 | ||
1108696a SK |
1721 | /* |
1722 | * We don't want to work on the original iova list as the list | |
1723 | * gets modified and in case of failure we have to retain the | |
1724 | * original list. Get a copy here. | |
1725 | */ | |
1726 | ret = vfio_iommu_iova_get_copy(iommu, &iova_copy); | |
1727 | if (ret) | |
1728 | goto out_detach; | |
1729 | ||
1730 | ret = vfio_iommu_aper_resize(&iova_copy, geo.aperture_start, | |
1731 | geo.aperture_end); | |
1732 | if (ret) | |
1733 | goto out_detach; | |
1734 | ||
af029169 SK |
1735 | ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions); |
1736 | if (ret) | |
1737 | goto out_detach; | |
1738 | ||
b09d6e47 | 1739 | resv_msi = vfio_iommu_has_sw_msi(&group_resv_regions, &resv_msi_base); |
5d704992 | 1740 | |
1ef3e2bc AW |
1741 | INIT_LIST_HEAD(&domain->group_list); |
1742 | list_add(&group->next, &domain->group_list); | |
1743 | ||
db406cc0 RM |
1744 | msi_remap = irq_domain_check_msi_remap() || |
1745 | iommu_capable(bus, IOMMU_CAP_INTR_REMAP); | |
9d72f87b EA |
1746 | |
1747 | if (!allow_unsafe_interrupts && !msi_remap) { | |
1ef3e2bc AW |
1748 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", |
1749 | __func__); | |
1750 | ret = -EPERM; | |
1751 | goto out_detach; | |
1752 | } | |
1753 | ||
eb165f05 | 1754 | if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) |
1ef3e2bc AW |
1755 | domain->prot |= IOMMU_CACHE; |
1756 | ||
73fa0d10 | 1757 | /* |
1ef3e2bc AW |
1758 | * Try to match an existing compatible domain. We don't want to |
1759 | * preclude an IOMMU driver supporting multiple bus_types and being | |
1760 | * able to include different bus_types in the same IOMMU domain, so | |
1761 | * we test whether the domains use the same iommu_ops rather than | |
1762 | * testing if they're on the same bus_type. | |
73fa0d10 | 1763 | */ |
1ef3e2bc AW |
1764 | list_for_each_entry(d, &iommu->domain_list, next) { |
1765 | if (d->domain->ops == domain->domain->ops && | |
1766 | d->prot == domain->prot) { | |
7bd50f0c LB |
1767 | vfio_iommu_detach_group(domain, group); |
1768 | if (!vfio_iommu_attach_group(d, group)) { | |
1ef3e2bc AW |
1769 | list_add(&group->next, &d->group_list); |
1770 | iommu_domain_free(domain->domain); | |
1771 | kfree(domain); | |
1108696a | 1772 | goto done; |
1ef3e2bc AW |
1773 | } |
1774 | ||
7bd50f0c | 1775 | ret = vfio_iommu_attach_group(domain, group); |
1ef3e2bc AW |
1776 | if (ret) |
1777 | goto out_domain; | |
1778 | } | |
73fa0d10 AW |
1779 | } |
1780 | ||
6fe1010d AW |
1781 | vfio_test_domain_fgsp(domain); |
1782 | ||
1ef3e2bc AW |
1783 | /* replay mappings on new domains */ |
1784 | ret = vfio_iommu_replay(iommu, domain); | |
1785 | if (ret) | |
1786 | goto out_detach; | |
1787 | ||
2c9f1af5 WY |
1788 | if (resv_msi) { |
1789 | ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); | |
f44efca0 | 1790 | if (ret && ret != -ENODEV) |
2c9f1af5 WY |
1791 | goto out_detach; |
1792 | } | |
5d704992 | 1793 | |
1ef3e2bc | 1794 | list_add(&domain->next, &iommu->domain_list); |
1108696a SK |
1795 | done: |
1796 | /* Delete the old one and insert new iova list */ | |
1797 | vfio_iommu_iova_insert_copy(iommu, &iova_copy); | |
73fa0d10 | 1798 | mutex_unlock(&iommu->lock); |
af029169 | 1799 | vfio_iommu_resv_free(&group_resv_regions); |
73fa0d10 AW |
1800 | |
1801 | return 0; | |
1ef3e2bc AW |
1802 | |
1803 | out_detach: | |
7bd50f0c | 1804 | vfio_iommu_detach_group(domain, group); |
1ef3e2bc AW |
1805 | out_domain: |
1806 | iommu_domain_free(domain->domain); | |
1108696a | 1807 | vfio_iommu_iova_free(&iova_copy); |
af029169 | 1808 | vfio_iommu_resv_free(&group_resv_regions); |
1ef3e2bc AW |
1809 | out_free: |
1810 | kfree(domain); | |
1811 | kfree(group); | |
1812 | mutex_unlock(&iommu->lock); | |
1813 | return ret; | |
1814 | } | |
1815 | ||
1816 | static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu) | |
1817 | { | |
1818 | struct rb_node *node; | |
1819 | ||
1820 | while ((node = rb_first(&iommu->dma_list))) | |
1821 | vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); | |
73fa0d10 AW |
1822 | } |
1823 | ||
a54eb550 KW |
1824 | static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) |
1825 | { | |
1826 | struct rb_node *n, *p; | |
1827 | ||
1828 | n = rb_first(&iommu->dma_list); | |
1829 | for (; n; n = rb_next(n)) { | |
1830 | struct vfio_dma *dma; | |
1831 | long locked = 0, unlocked = 0; | |
1832 | ||
1833 | dma = rb_entry(n, struct vfio_dma, node); | |
1834 | unlocked += vfio_unmap_unpin(iommu, dma, false); | |
1835 | p = rb_first(&dma->pfn_list); | |
1836 | for (; p; p = rb_next(p)) { | |
1837 | struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, | |
1838 | node); | |
1839 | ||
1840 | if (!is_invalid_reserved_pfn(vpfn->pfn)) | |
1841 | locked++; | |
1842 | } | |
48d8476b | 1843 | vfio_lock_acct(dma, locked - unlocked, true); |
a54eb550 KW |
1844 | } |
1845 | } | |
1846 | ||
1847 | static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu) | |
1848 | { | |
1849 | struct rb_node *n; | |
1850 | ||
1851 | n = rb_first(&iommu->dma_list); | |
1852 | for (; n; n = rb_next(n)) { | |
1853 | struct vfio_dma *dma; | |
1854 | ||
1855 | dma = rb_entry(n, struct vfio_dma, node); | |
1856 | ||
1857 | if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list))) | |
1858 | break; | |
1859 | } | |
3cedd7d7 KW |
1860 | /* mdev vendor driver must unregister notifier */ |
1861 | WARN_ON(iommu->notifier.head); | |
a54eb550 KW |
1862 | } |
1863 | ||
f45daadf SK |
1864 | /* |
1865 | * Called when a domain is removed in detach. It is possible that | |
1866 | * the removed domain decided the iova aperture window. Modify the | |
1867 | * iova aperture with the smallest window among existing domains. | |
1868 | */ | |
1869 | static void vfio_iommu_aper_expand(struct vfio_iommu *iommu, | |
1870 | struct list_head *iova_copy) | |
1871 | { | |
1872 | struct vfio_domain *domain; | |
1873 | struct iommu_domain_geometry geo; | |
1874 | struct vfio_iova *node; | |
1875 | dma_addr_t start = 0; | |
1876 | dma_addr_t end = (dma_addr_t)~0; | |
1877 | ||
1878 | if (list_empty(iova_copy)) | |
1879 | return; | |
1880 | ||
1881 | list_for_each_entry(domain, &iommu->domain_list, next) { | |
1882 | iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, | |
1883 | &geo); | |
1884 | if (geo.aperture_start > start) | |
1885 | start = geo.aperture_start; | |
1886 | if (geo.aperture_end < end) | |
1887 | end = geo.aperture_end; | |
1888 | } | |
1889 | ||
1890 | /* Modify aperture limits. The new aper is either same or bigger */ | |
1891 | node = list_first_entry(iova_copy, struct vfio_iova, list); | |
1892 | node->start = start; | |
1893 | node = list_last_entry(iova_copy, struct vfio_iova, list); | |
1894 | node->end = end; | |
1895 | } | |
1896 | ||
1897 | /* | |
1898 | * Called when a group is detached. The reserved regions for that | |
1899 | * group can be part of valid iova now. But since reserved regions | |
1900 | * may be duplicated among groups, populate the iova valid regions | |
1901 | * list again. | |
1902 | */ | |
1903 | static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu, | |
1904 | struct list_head *iova_copy) | |
1905 | { | |
1906 | struct vfio_domain *d; | |
1907 | struct vfio_group *g; | |
1908 | struct vfio_iova *node; | |
1909 | dma_addr_t start, end; | |
1910 | LIST_HEAD(resv_regions); | |
1911 | int ret; | |
1912 | ||
1913 | if (list_empty(iova_copy)) | |
1914 | return -EINVAL; | |
1915 | ||
1916 | list_for_each_entry(d, &iommu->domain_list, next) { | |
1917 | list_for_each_entry(g, &d->group_list, next) { | |
1918 | ret = iommu_get_group_resv_regions(g->iommu_group, | |
1919 | &resv_regions); | |
1920 | if (ret) | |
1921 | goto done; | |
1922 | } | |
1923 | } | |
1924 | ||
1925 | node = list_first_entry(iova_copy, struct vfio_iova, list); | |
1926 | start = node->start; | |
1927 | node = list_last_entry(iova_copy, struct vfio_iova, list); | |
1928 | end = node->end; | |
1929 | ||
1930 | /* purge the iova list and create new one */ | |
1931 | vfio_iommu_iova_free(iova_copy); | |
1932 | ||
1933 | ret = vfio_iommu_aper_resize(iova_copy, start, end); | |
1934 | if (ret) | |
1935 | goto done; | |
1936 | ||
1937 | /* Exclude current reserved regions from iova ranges */ | |
1938 | ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions); | |
1939 | done: | |
1940 | vfio_iommu_resv_free(&resv_regions); | |
1941 | return ret; | |
1942 | } | |
1943 | ||
73fa0d10 AW |
1944 | static void vfio_iommu_type1_detach_group(void *iommu_data, |
1945 | struct iommu_group *iommu_group) | |
1946 | { | |
1947 | struct vfio_iommu *iommu = iommu_data; | |
1ef3e2bc | 1948 | struct vfio_domain *domain; |
73fa0d10 | 1949 | struct vfio_group *group; |
f45daadf | 1950 | LIST_HEAD(iova_copy); |
73fa0d10 AW |
1951 | |
1952 | mutex_lock(&iommu->lock); | |
1953 | ||
a54eb550 KW |
1954 | if (iommu->external_domain) { |
1955 | group = find_iommu_group(iommu->external_domain, iommu_group); | |
1956 | if (group) { | |
1957 | list_del(&group->next); | |
1958 | kfree(group); | |
1959 | ||
1960 | if (list_empty(&iommu->external_domain->group_list)) { | |
1961 | vfio_sanity_check_pfn_list(iommu); | |
1962 | ||
1963 | if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) | |
1964 | vfio_iommu_unmap_unpin_all(iommu); | |
1965 | ||
1966 | kfree(iommu->external_domain); | |
1967 | iommu->external_domain = NULL; | |
1968 | } | |
1969 | goto detach_group_done; | |
1970 | } | |
1971 | } | |
1972 | ||
f45daadf SK |
1973 | /* |
1974 | * Get a copy of iova list. This will be used to update | |
1975 | * and to replace the current one later. Please note that | |
1976 | * we will leave the original list as it is if update fails. | |
1977 | */ | |
1978 | vfio_iommu_iova_get_copy(iommu, &iova_copy); | |
1979 | ||
1ef3e2bc | 1980 | list_for_each_entry(domain, &iommu->domain_list, next) { |
7896c998 KW |
1981 | group = find_iommu_group(domain, iommu_group); |
1982 | if (!group) | |
1983 | continue; | |
1ef3e2bc | 1984 | |
7bd50f0c | 1985 | vfio_iommu_detach_group(domain, group); |
7896c998 KW |
1986 | list_del(&group->next); |
1987 | kfree(group); | |
1988 | /* | |
a54eb550 KW |
1989 | * Group ownership provides privilege, if the group list is |
1990 | * empty, the domain goes away. If it's the last domain with | |
1991 | * iommu and external domain doesn't exist, then all the | |
1992 | * mappings go away too. If it's the last domain with iommu and | |
1993 | * external domain exist, update accounting | |
7896c998 KW |
1994 | */ |
1995 | if (list_empty(&domain->group_list)) { | |
a54eb550 KW |
1996 | if (list_is_singular(&iommu->domain_list)) { |
1997 | if (!iommu->external_domain) | |
1998 | vfio_iommu_unmap_unpin_all(iommu); | |
1999 | else | |
2000 | vfio_iommu_unmap_unpin_reaccount(iommu); | |
2001 | } | |
7896c998 KW |
2002 | iommu_domain_free(domain->domain); |
2003 | list_del(&domain->next); | |
2004 | kfree(domain); | |
f45daadf | 2005 | vfio_iommu_aper_expand(iommu, &iova_copy); |
73fa0d10 | 2006 | } |
a54eb550 | 2007 | break; |
73fa0d10 AW |
2008 | } |
2009 | ||
f45daadf SK |
2010 | if (!vfio_iommu_resv_refresh(iommu, &iova_copy)) |
2011 | vfio_iommu_iova_insert_copy(iommu, &iova_copy); | |
2012 | else | |
2013 | vfio_iommu_iova_free(&iova_copy); | |
2014 | ||
a54eb550 | 2015 | detach_group_done: |
73fa0d10 AW |
2016 | mutex_unlock(&iommu->lock); |
2017 | } | |
2018 | ||
2019 | static void *vfio_iommu_type1_open(unsigned long arg) | |
2020 | { | |
2021 | struct vfio_iommu *iommu; | |
2022 | ||
73fa0d10 AW |
2023 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
2024 | if (!iommu) | |
2025 | return ERR_PTR(-ENOMEM); | |
2026 | ||
f5c9eceb WD |
2027 | switch (arg) { |
2028 | case VFIO_TYPE1_IOMMU: | |
2029 | break; | |
2030 | case VFIO_TYPE1_NESTING_IOMMU: | |
2031 | iommu->nesting = true; | |
544c05a6 | 2032 | /* fall through */ |
f5c9eceb WD |
2033 | case VFIO_TYPE1v2_IOMMU: |
2034 | iommu->v2 = true; | |
2035 | break; | |
2036 | default: | |
2037 | kfree(iommu); | |
2038 | return ERR_PTR(-EINVAL); | |
2039 | } | |
2040 | ||
1ef3e2bc | 2041 | INIT_LIST_HEAD(&iommu->domain_list); |
1108696a | 2042 | INIT_LIST_HEAD(&iommu->iova_list); |
cd9b2268 | 2043 | iommu->dma_list = RB_ROOT; |
49285593 | 2044 | iommu->dma_avail = dma_entry_limit; |
73fa0d10 | 2045 | mutex_init(&iommu->lock); |
c086de81 | 2046 | BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); |
73fa0d10 AW |
2047 | |
2048 | return iommu; | |
2049 | } | |
2050 | ||
a54eb550 KW |
2051 | static void vfio_release_domain(struct vfio_domain *domain, bool external) |
2052 | { | |
2053 | struct vfio_group *group, *group_tmp; | |
2054 | ||
2055 | list_for_each_entry_safe(group, group_tmp, | |
2056 | &domain->group_list, next) { | |
2057 | if (!external) | |
7bd50f0c | 2058 | vfio_iommu_detach_group(domain, group); |
a54eb550 KW |
2059 | list_del(&group->next); |
2060 | kfree(group); | |
2061 | } | |
2062 | ||
2063 | if (!external) | |
2064 | iommu_domain_free(domain->domain); | |
2065 | } | |
2066 | ||
73fa0d10 AW |
2067 | static void vfio_iommu_type1_release(void *iommu_data) |
2068 | { | |
2069 | struct vfio_iommu *iommu = iommu_data; | |
1ef3e2bc | 2070 | struct vfio_domain *domain, *domain_tmp; |
a54eb550 KW |
2071 | |
2072 | if (iommu->external_domain) { | |
2073 | vfio_release_domain(iommu->external_domain, true); | |
2074 | vfio_sanity_check_pfn_list(iommu); | |
2075 | kfree(iommu->external_domain); | |
2076 | } | |
73fa0d10 | 2077 | |
1ef3e2bc | 2078 | vfio_iommu_unmap_unpin_all(iommu); |
73fa0d10 | 2079 | |
1ef3e2bc AW |
2080 | list_for_each_entry_safe(domain, domain_tmp, |
2081 | &iommu->domain_list, next) { | |
a54eb550 | 2082 | vfio_release_domain(domain, false); |
1ef3e2bc AW |
2083 | list_del(&domain->next); |
2084 | kfree(domain); | |
73fa0d10 | 2085 | } |
1108696a SK |
2086 | |
2087 | vfio_iommu_iova_free(&iommu->iova_list); | |
2088 | ||
73fa0d10 AW |
2089 | kfree(iommu); |
2090 | } | |
2091 | ||
aa429318 AW |
2092 | static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) |
2093 | { | |
2094 | struct vfio_domain *domain; | |
2095 | int ret = 1; | |
2096 | ||
2097 | mutex_lock(&iommu->lock); | |
2098 | list_for_each_entry(domain, &iommu->domain_list, next) { | |
2099 | if (!(domain->prot & IOMMU_CACHE)) { | |
2100 | ret = 0; | |
f5bfdbf2 | 2101 | break; |
aa429318 | 2102 | } |
73fa0d10 | 2103 | } |
aa429318 | 2104 | mutex_unlock(&iommu->lock); |
73fa0d10 | 2105 | |
aa429318 | 2106 | return ret; |
73fa0d10 AW |
2107 | } |
2108 | ||
a7170720 SK |
2109 | static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps, |
2110 | struct vfio_iommu_type1_info_cap_iova_range *cap_iovas, | |
2111 | size_t size) | |
2112 | { | |
2113 | struct vfio_info_cap_header *header; | |
2114 | struct vfio_iommu_type1_info_cap_iova_range *iova_cap; | |
2115 | ||
2116 | header = vfio_info_cap_add(caps, size, | |
2117 | VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, 1); | |
2118 | if (IS_ERR(header)) | |
2119 | return PTR_ERR(header); | |
2120 | ||
2121 | iova_cap = container_of(header, | |
2122 | struct vfio_iommu_type1_info_cap_iova_range, | |
2123 | header); | |
2124 | iova_cap->nr_iovas = cap_iovas->nr_iovas; | |
2125 | memcpy(iova_cap->iova_ranges, cap_iovas->iova_ranges, | |
2126 | cap_iovas->nr_iovas * sizeof(*cap_iovas->iova_ranges)); | |
2127 | return 0; | |
2128 | } | |
2129 | ||
2130 | static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu, | |
2131 | struct vfio_info_cap *caps) | |
2132 | { | |
2133 | struct vfio_iommu_type1_info_cap_iova_range *cap_iovas; | |
2134 | struct vfio_iova *iova; | |
2135 | size_t size; | |
2136 | int iovas = 0, i = 0, ret; | |
2137 | ||
2138 | mutex_lock(&iommu->lock); | |
2139 | ||
2140 | list_for_each_entry(iova, &iommu->iova_list, list) | |
2141 | iovas++; | |
2142 | ||
2143 | if (!iovas) { | |
2144 | /* | |
2145 | * Return 0 as a container with a single mdev device | |
2146 | * will have an empty list | |
2147 | */ | |
2148 | ret = 0; | |
2149 | goto out_unlock; | |
2150 | } | |
2151 | ||
2152 | size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges)); | |
2153 | ||
2154 | cap_iovas = kzalloc(size, GFP_KERNEL); | |
2155 | if (!cap_iovas) { | |
2156 | ret = -ENOMEM; | |
2157 | goto out_unlock; | |
2158 | } | |
2159 | ||
2160 | cap_iovas->nr_iovas = iovas; | |
2161 | ||
2162 | list_for_each_entry(iova, &iommu->iova_list, list) { | |
2163 | cap_iovas->iova_ranges[i].start = iova->start; | |
2164 | cap_iovas->iova_ranges[i].end = iova->end; | |
2165 | i++; | |
2166 | } | |
2167 | ||
2168 | ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size); | |
2169 | ||
2170 | kfree(cap_iovas); | |
2171 | out_unlock: | |
2172 | mutex_unlock(&iommu->lock); | |
2173 | return ret; | |
2174 | } | |
2175 | ||
73fa0d10 AW |
2176 | static long vfio_iommu_type1_ioctl(void *iommu_data, |
2177 | unsigned int cmd, unsigned long arg) | |
2178 | { | |
2179 | struct vfio_iommu *iommu = iommu_data; | |
2180 | unsigned long minsz; | |
2181 | ||
2182 | if (cmd == VFIO_CHECK_EXTENSION) { | |
2183 | switch (arg) { | |
2184 | case VFIO_TYPE1_IOMMU: | |
1ef3e2bc | 2185 | case VFIO_TYPE1v2_IOMMU: |
f5c9eceb | 2186 | case VFIO_TYPE1_NESTING_IOMMU: |
73fa0d10 | 2187 | return 1; |
aa429318 AW |
2188 | case VFIO_DMA_CC_IOMMU: |
2189 | if (!iommu) | |
2190 | return 0; | |
2191 | return vfio_domains_have_iommu_cache(iommu); | |
73fa0d10 AW |
2192 | default: |
2193 | return 0; | |
2194 | } | |
2195 | } else if (cmd == VFIO_IOMMU_GET_INFO) { | |
2196 | struct vfio_iommu_type1_info info; | |
a7170720 SK |
2197 | struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; |
2198 | unsigned long capsz; | |
2199 | int ret; | |
73fa0d10 AW |
2200 | |
2201 | minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); | |
2202 | ||
a7170720 SK |
2203 | /* For backward compatibility, cannot require this */ |
2204 | capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); | |
2205 | ||
73fa0d10 AW |
2206 | if (copy_from_user(&info, (void __user *)arg, minsz)) |
2207 | return -EFAULT; | |
2208 | ||
2209 | if (info.argsz < minsz) | |
2210 | return -EINVAL; | |
2211 | ||
a7170720 SK |
2212 | if (info.argsz >= capsz) { |
2213 | minsz = capsz; | |
2214 | info.cap_offset = 0; /* output, no-recopy necessary */ | |
2215 | } | |
2216 | ||
d4f50ee2 | 2217 | info.flags = VFIO_IOMMU_INFO_PGSIZES; |
73fa0d10 | 2218 | |
1ef3e2bc | 2219 | info.iova_pgsizes = vfio_pgsize_bitmap(iommu); |
73fa0d10 | 2220 | |
a7170720 SK |
2221 | ret = vfio_iommu_iova_build_caps(iommu, &caps); |
2222 | if (ret) | |
2223 | return ret; | |
2224 | ||
2225 | if (caps.size) { | |
2226 | info.flags |= VFIO_IOMMU_INFO_CAPS; | |
2227 | ||
2228 | if (info.argsz < sizeof(info) + caps.size) { | |
2229 | info.argsz = sizeof(info) + caps.size; | |
2230 | } else { | |
2231 | vfio_info_cap_shift(&caps, sizeof(info)); | |
2232 | if (copy_to_user((void __user *)arg + | |
2233 | sizeof(info), caps.buf, | |
2234 | caps.size)) { | |
2235 | kfree(caps.buf); | |
2236 | return -EFAULT; | |
2237 | } | |
2238 | info.cap_offset = sizeof(info); | |
2239 | } | |
2240 | ||
2241 | kfree(caps.buf); | |
2242 | } | |
2243 | ||
8160c4e4 MT |
2244 | return copy_to_user((void __user *)arg, &info, minsz) ? |
2245 | -EFAULT : 0; | |
73fa0d10 AW |
2246 | |
2247 | } else if (cmd == VFIO_IOMMU_MAP_DMA) { | |
2248 | struct vfio_iommu_type1_dma_map map; | |
2249 | uint32_t mask = VFIO_DMA_MAP_FLAG_READ | | |
2250 | VFIO_DMA_MAP_FLAG_WRITE; | |
2251 | ||
2252 | minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); | |
2253 | ||
2254 | if (copy_from_user(&map, (void __user *)arg, minsz)) | |
2255 | return -EFAULT; | |
2256 | ||
2257 | if (map.argsz < minsz || map.flags & ~mask) | |
2258 | return -EINVAL; | |
2259 | ||
2260 | return vfio_dma_do_map(iommu, &map); | |
2261 | ||
2262 | } else if (cmd == VFIO_IOMMU_UNMAP_DMA) { | |
2263 | struct vfio_iommu_type1_dma_unmap unmap; | |
166fd7d9 | 2264 | long ret; |
73fa0d10 AW |
2265 | |
2266 | minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); | |
2267 | ||
2268 | if (copy_from_user(&unmap, (void __user *)arg, minsz)) | |
2269 | return -EFAULT; | |
2270 | ||
2271 | if (unmap.argsz < minsz || unmap.flags) | |
2272 | return -EINVAL; | |
2273 | ||
166fd7d9 AW |
2274 | ret = vfio_dma_do_unmap(iommu, &unmap); |
2275 | if (ret) | |
2276 | return ret; | |
2277 | ||
8160c4e4 MT |
2278 | return copy_to_user((void __user *)arg, &unmap, minsz) ? |
2279 | -EFAULT : 0; | |
73fa0d10 AW |
2280 | } |
2281 | ||
2282 | return -ENOTTY; | |
2283 | } | |
2284 | ||
c086de81 | 2285 | static int vfio_iommu_type1_register_notifier(void *iommu_data, |
22195cbd | 2286 | unsigned long *events, |
c086de81 KW |
2287 | struct notifier_block *nb) |
2288 | { | |
2289 | struct vfio_iommu *iommu = iommu_data; | |
2290 | ||
22195cbd JS |
2291 | /* clear known events */ |
2292 | *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP; | |
2293 | ||
2294 | /* refuse to register if still events remaining */ | |
2295 | if (*events) | |
2296 | return -EINVAL; | |
2297 | ||
c086de81 KW |
2298 | return blocking_notifier_chain_register(&iommu->notifier, nb); |
2299 | } | |
2300 | ||
2301 | static int vfio_iommu_type1_unregister_notifier(void *iommu_data, | |
2302 | struct notifier_block *nb) | |
2303 | { | |
2304 | struct vfio_iommu *iommu = iommu_data; | |
2305 | ||
2306 | return blocking_notifier_chain_unregister(&iommu->notifier, nb); | |
2307 | } | |
2308 | ||
8d46c0cc YZ |
2309 | static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, |
2310 | dma_addr_t user_iova, void *data, | |
2311 | size_t count, bool write, | |
2312 | size_t *copied) | |
2313 | { | |
2314 | struct mm_struct *mm; | |
2315 | unsigned long vaddr; | |
2316 | struct vfio_dma *dma; | |
2317 | bool kthread = current->mm == NULL; | |
2318 | size_t offset; | |
2319 | ||
2320 | *copied = 0; | |
2321 | ||
2322 | dma = vfio_find_dma(iommu, user_iova, 1); | |
2323 | if (!dma) | |
2324 | return -EINVAL; | |
2325 | ||
2326 | if ((write && !(dma->prot & IOMMU_WRITE)) || | |
2327 | !(dma->prot & IOMMU_READ)) | |
2328 | return -EPERM; | |
2329 | ||
2330 | mm = get_task_mm(dma->task); | |
2331 | ||
2332 | if (!mm) | |
2333 | return -EPERM; | |
2334 | ||
2335 | if (kthread) | |
2336 | use_mm(mm); | |
2337 | else if (current->mm != mm) | |
2338 | goto out; | |
2339 | ||
2340 | offset = user_iova - dma->iova; | |
2341 | ||
2342 | if (count > dma->size - offset) | |
2343 | count = dma->size - offset; | |
2344 | ||
2345 | vaddr = dma->vaddr + offset; | |
2346 | ||
2347 | if (write) | |
205323b8 | 2348 | *copied = copy_to_user((void __user *)vaddr, data, |
8d46c0cc YZ |
2349 | count) ? 0 : count; |
2350 | else | |
205323b8 | 2351 | *copied = copy_from_user(data, (void __user *)vaddr, |
8d46c0cc YZ |
2352 | count) ? 0 : count; |
2353 | if (kthread) | |
2354 | unuse_mm(mm); | |
2355 | out: | |
2356 | mmput(mm); | |
2357 | return *copied ? 0 : -EFAULT; | |
2358 | } | |
2359 | ||
2360 | static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova, | |
2361 | void *data, size_t count, bool write) | |
2362 | { | |
2363 | struct vfio_iommu *iommu = iommu_data; | |
2364 | int ret = 0; | |
2365 | size_t done; | |
2366 | ||
2367 | mutex_lock(&iommu->lock); | |
2368 | while (count > 0) { | |
2369 | ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data, | |
2370 | count, write, &done); | |
2371 | if (ret) | |
2372 | break; | |
2373 | ||
2374 | count -= done; | |
2375 | data += done; | |
2376 | user_iova += done; | |
2377 | } | |
2378 | ||
2379 | mutex_unlock(&iommu->lock); | |
2380 | return ret; | |
2381 | } | |
2382 | ||
73fa0d10 | 2383 | static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { |
c086de81 KW |
2384 | .name = "vfio-iommu-type1", |
2385 | .owner = THIS_MODULE, | |
2386 | .open = vfio_iommu_type1_open, | |
2387 | .release = vfio_iommu_type1_release, | |
2388 | .ioctl = vfio_iommu_type1_ioctl, | |
2389 | .attach_group = vfio_iommu_type1_attach_group, | |
2390 | .detach_group = vfio_iommu_type1_detach_group, | |
2391 | .pin_pages = vfio_iommu_type1_pin_pages, | |
2392 | .unpin_pages = vfio_iommu_type1_unpin_pages, | |
2393 | .register_notifier = vfio_iommu_type1_register_notifier, | |
2394 | .unregister_notifier = vfio_iommu_type1_unregister_notifier, | |
8d46c0cc | 2395 | .dma_rw = vfio_iommu_type1_dma_rw, |
73fa0d10 AW |
2396 | }; |
2397 | ||
2398 | static int __init vfio_iommu_type1_init(void) | |
2399 | { | |
73fa0d10 AW |
2400 | return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1); |
2401 | } | |
2402 | ||
2403 | static void __exit vfio_iommu_type1_cleanup(void) | |
2404 | { | |
2405 | vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1); | |
2406 | } | |
2407 | ||
2408 | module_init(vfio_iommu_type1_init); | |
2409 | module_exit(vfio_iommu_type1_cleanup); | |
2410 | ||
2411 | MODULE_VERSION(DRIVER_VERSION); | |
2412 | MODULE_LICENSE("GPL v2"); | |
2413 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
2414 | MODULE_DESCRIPTION(DRIVER_DESC); |