2 * VFIO: IOMMU DMA mapping support for Type1 IOMMU
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
15 * We arbitrarily define a Type1 IOMMU as one matching the below code.
16 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
17 * VT-d, but that makes it harder to re-use as theoretically anyone
18 * implementing a similar IOMMU could make use of this. We expect the
19 * IOMMU to support the IOMMU API and have few to no restrictions around
20 * the IOVA range that can be mapped. The Type1 IOMMU is currently
21 * optimized for relatively static mappings of a userspace process with
22 * userpsace pages pinned into memory. We also assume devices and IOMMU
23 * domains are PCI based as the IOMMU API is still centered around a
24 * device/bus interface rather than a group interface.
27 #include <linux/compat.h>
28 #include <linux/device.h>
30 #include <linux/iommu.h>
31 #include <linux/module.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/uaccess.h>
37 #include <linux/vfio.h>
38 #include <linux/workqueue.h>
40 #define DRIVER_VERSION "0.2"
41 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
42 #define DRIVER_DESC "Type1 IOMMU driver for VFIO"
44 static bool allow_unsafe_interrupts
;
45 module_param_named(allow_unsafe_interrupts
,
46 allow_unsafe_interrupts
, bool, S_IRUGO
| S_IWUSR
);
47 MODULE_PARM_DESC(allow_unsafe_interrupts
,
48 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
50 static bool disable_hugepages
;
51 module_param_named(disable_hugepages
,
52 disable_hugepages
, bool, S_IRUGO
| S_IWUSR
);
53 MODULE_PARM_DESC(disable_hugepages
,
54 "Disable VFIO IOMMU support for IOMMU hugepages.");
57 struct list_head domain_list
;
59 struct rb_root dma_list
;
65 struct iommu_domain
*domain
;
66 struct list_head next
;
67 struct list_head group_list
;
68 int prot
; /* IOMMU_CACHE */
69 bool fgsp
; /* Fine-grained super pages */
74 dma_addr_t iova
; /* Device address */
75 unsigned long vaddr
; /* Process virtual addr */
76 size_t size
; /* Map size (bytes) */
77 int prot
; /* IOMMU_READ/WRITE */
81 struct iommu_group
*iommu_group
;
82 struct list_head next
;
86 * This code handles mapping and unmapping of user data buffers
87 * into DMA'ble space using the IOMMU
90 static struct vfio_dma
*vfio_find_dma(struct vfio_iommu
*iommu
,
91 dma_addr_t start
, size_t size
)
93 struct rb_node
*node
= iommu
->dma_list
.rb_node
;
96 struct vfio_dma
*dma
= rb_entry(node
, struct vfio_dma
, node
);
98 if (start
+ size
<= dma
->iova
)
100 else if (start
>= dma
->iova
+ dma
->size
)
101 node
= node
->rb_right
;
109 static void vfio_link_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*new)
111 struct rb_node
**link
= &iommu
->dma_list
.rb_node
, *parent
= NULL
;
112 struct vfio_dma
*dma
;
116 dma
= rb_entry(parent
, struct vfio_dma
, node
);
118 if (new->iova
+ new->size
<= dma
->iova
)
119 link
= &(*link
)->rb_left
;
121 link
= &(*link
)->rb_right
;
124 rb_link_node(&new->node
, parent
, link
);
125 rb_insert_color(&new->node
, &iommu
->dma_list
);
128 static void vfio_unlink_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*old
)
130 rb_erase(&old
->node
, &iommu
->dma_list
);
134 struct mm_struct
*mm
;
136 struct work_struct work
;
139 /* delayed decrement/increment for locked_vm */
140 static void vfio_lock_acct_bg(struct work_struct
*work
)
142 struct vwork
*vwork
= container_of(work
, struct vwork
, work
);
143 struct mm_struct
*mm
;
146 down_write(&mm
->mmap_sem
);
147 mm
->locked_vm
+= vwork
->npage
;
148 up_write(&mm
->mmap_sem
);
153 static void vfio_lock_acct(struct task_struct
*task
, long npage
)
156 struct mm_struct
*mm
;
161 mm
= get_task_mm(task
);
163 return; /* process exited or nothing to do */
165 if (down_write_trylock(&mm
->mmap_sem
)) {
166 mm
->locked_vm
+= npage
;
167 up_write(&mm
->mmap_sem
);
173 * Couldn't get mmap_sem lock, so must setup to update
174 * mm->locked_vm later. If locked_vm were atomic, we
175 * wouldn't need this silliness
177 vwork
= kmalloc(sizeof(struct vwork
), GFP_KERNEL
);
182 INIT_WORK(&vwork
->work
, vfio_lock_acct_bg
);
184 vwork
->npage
= npage
;
185 schedule_work(&vwork
->work
);
189 * Some mappings aren't backed by a struct page, for example an mmap'd
190 * MMIO range for our own or another device. These use a different
191 * pfn conversion and shouldn't be tracked as locked pages.
193 static bool is_invalid_reserved_pfn(unsigned long pfn
)
195 if (pfn_valid(pfn
)) {
197 struct page
*tail
= pfn_to_page(pfn
);
198 struct page
*head
= compound_head(tail
);
199 reserved
= !!(PageReserved(head
));
202 * "head" is not a dangling pointer
203 * (compound_head takes care of that)
204 * but the hugepage may have been split
205 * from under us (and we may not hold a
206 * reference count on the head page so it can
207 * be reused before we run PageReferenced), so
208 * we've to check PageTail before returning
215 return PageReserved(tail
);
221 static int put_pfn(unsigned long pfn
, int prot
)
223 if (!is_invalid_reserved_pfn(pfn
)) {
224 struct page
*page
= pfn_to_page(pfn
);
225 if (prot
& IOMMU_WRITE
)
233 static int vaddr_get_pfn(unsigned long vaddr
, int prot
, unsigned long *pfn
)
235 struct page
*page
[1];
236 struct vm_area_struct
*vma
;
239 if (get_user_pages_fast(vaddr
, 1, !!(prot
& IOMMU_WRITE
), page
) == 1) {
240 *pfn
= page_to_pfn(page
[0]);
244 down_read(¤t
->mm
->mmap_sem
);
246 vma
= find_vma_intersection(current
->mm
, vaddr
, vaddr
+ 1);
248 if (vma
&& vma
->vm_flags
& VM_PFNMAP
) {
249 *pfn
= ((vaddr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
250 if (is_invalid_reserved_pfn(*pfn
))
254 up_read(¤t
->mm
->mmap_sem
);
260 * Attempt to pin pages. We really don't want to track all the pfns and
261 * the iommu can only map chunks of consecutive pfns anyway, so get the
262 * first page and all consecutive pages with the same locking.
264 static long vfio_pin_pages_remote(unsigned long vaddr
, long npage
,
265 int prot
, unsigned long *pfn_base
)
267 unsigned long limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
268 bool lock_cap
= capable(CAP_IPC_LOCK
);
275 ret
= vaddr_get_pfn(vaddr
, prot
, pfn_base
);
279 rsvd
= is_invalid_reserved_pfn(*pfn_base
);
281 if (!rsvd
&& !lock_cap
&& current
->mm
->locked_vm
+ 1 > limit
) {
282 put_pfn(*pfn_base
, prot
);
283 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__
,
284 limit
<< PAGE_SHIFT
);
288 if (unlikely(disable_hugepages
)) {
290 vfio_lock_acct(current
, 1);
294 /* Lock all the consecutive pages from pfn_base */
295 for (i
= 1, vaddr
+= PAGE_SIZE
; i
< npage
; i
++, vaddr
+= PAGE_SIZE
) {
296 unsigned long pfn
= 0;
298 ret
= vaddr_get_pfn(vaddr
, prot
, &pfn
);
302 if (pfn
!= *pfn_base
+ i
||
303 rsvd
!= is_invalid_reserved_pfn(pfn
)) {
308 if (!rsvd
&& !lock_cap
&&
309 current
->mm
->locked_vm
+ i
+ 1 > limit
) {
311 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
312 __func__
, limit
<< PAGE_SHIFT
);
318 vfio_lock_acct(current
, i
);
323 static long vfio_unpin_pages_remote(unsigned long pfn
, long npage
,
324 int prot
, bool do_accounting
)
326 unsigned long unlocked
= 0;
329 for (i
= 0; i
< npage
; i
++)
330 unlocked
+= put_pfn(pfn
++, prot
);
333 vfio_lock_acct(current
, -unlocked
);
338 static void vfio_unmap_unpin(struct vfio_iommu
*iommu
, struct vfio_dma
*dma
)
340 dma_addr_t iova
= dma
->iova
, end
= dma
->iova
+ dma
->size
;
341 struct vfio_domain
*domain
, *d
;
347 * We use the IOMMU to track the physical addresses, otherwise we'd
348 * need a much more complicated tracking system. Unfortunately that
349 * means we need to use one of the iommu domains to figure out the
350 * pfns to unpin. The rest need to be unmapped in advance so we have
351 * no iommu translations remaining when the pages are unpinned.
353 domain
= d
= list_first_entry(&iommu
->domain_list
,
354 struct vfio_domain
, next
);
356 list_for_each_entry_continue(d
, &iommu
->domain_list
, next
) {
357 iommu_unmap(d
->domain
, dma
->iova
, dma
->size
);
362 size_t unmapped
, len
;
363 phys_addr_t phys
, next
;
365 phys
= iommu_iova_to_phys(domain
->domain
, iova
);
366 if (WARN_ON(!phys
)) {
372 * To optimize for fewer iommu_unmap() calls, each of which
373 * may require hardware cache flushing, try to find the
374 * largest contiguous physical memory chunk to unmap.
376 for (len
= PAGE_SIZE
;
377 !domain
->fgsp
&& iova
+ len
< end
; len
+= PAGE_SIZE
) {
378 next
= iommu_iova_to_phys(domain
->domain
, iova
+ len
);
379 if (next
!= phys
+ len
)
383 unmapped
= iommu_unmap(domain
->domain
, iova
, len
);
384 if (WARN_ON(!unmapped
))
387 unlocked
+= vfio_unpin_pages_remote(phys
>> PAGE_SHIFT
,
388 unmapped
>> PAGE_SHIFT
,
395 vfio_lock_acct(current
, -unlocked
);
398 static void vfio_remove_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*dma
)
400 vfio_unmap_unpin(iommu
, dma
);
401 vfio_unlink_dma(iommu
, dma
);
405 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu
*iommu
)
407 struct vfio_domain
*domain
;
408 unsigned long bitmap
= ULONG_MAX
;
410 mutex_lock(&iommu
->lock
);
411 list_for_each_entry(domain
, &iommu
->domain_list
, next
)
412 bitmap
&= domain
->domain
->pgsize_bitmap
;
413 mutex_unlock(&iommu
->lock
);
416 * In case the IOMMU supports page sizes smaller than PAGE_SIZE
417 * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
418 * That way the user will be able to map/unmap buffers whose size/
419 * start address is aligned with PAGE_SIZE. Pinning code uses that
420 * granularity while iommu driver can use the sub-PAGE_SIZE size
423 if (bitmap
& ~PAGE_MASK
) {
431 static int vfio_dma_do_unmap(struct vfio_iommu
*iommu
,
432 struct vfio_iommu_type1_dma_unmap
*unmap
)
435 struct vfio_dma
*dma
;
439 mask
= ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu
))) - 1;
441 if (unmap
->iova
& mask
)
443 if (!unmap
->size
|| unmap
->size
& mask
)
446 WARN_ON(mask
& PAGE_MASK
);
448 mutex_lock(&iommu
->lock
);
451 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
452 * avoid tracking individual mappings. This means that the granularity
453 * of the original mapping was lost and the user was allowed to attempt
454 * to unmap any range. Depending on the contiguousness of physical
455 * memory and page sizes supported by the IOMMU, arbitrary unmaps may
456 * or may not have worked. We only guaranteed unmap granularity
457 * matching the original mapping; even though it was untracked here,
458 * the original mappings are reflected in IOMMU mappings. This
459 * resulted in a couple unusual behaviors. First, if a range is not
460 * able to be unmapped, ex. a set of 4k pages that was mapped as a
461 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
462 * a zero sized unmap. Also, if an unmap request overlaps the first
463 * address of a hugepage, the IOMMU will unmap the entire hugepage.
464 * This also returns success and the returned unmap size reflects the
465 * actual size unmapped.
467 * We attempt to maintain compatibility with this "v1" interface, but
468 * we take control out of the hands of the IOMMU. Therefore, an unmap
469 * request offset from the beginning of the original mapping will
470 * return success with zero sized unmap. And an unmap request covering
471 * the first iova of mapping will unmap the entire range.
473 * The v2 version of this interface intends to be more deterministic.
474 * Unmap requests must fully cover previous mappings. Multiple
475 * mappings may still be unmaped by specifying large ranges, but there
476 * must not be any previous mappings bisected by the range. An error
477 * will be returned if these conditions are not met. The v2 interface
478 * will only return success and a size of zero if there were no
479 * mappings within the range.
482 dma
= vfio_find_dma(iommu
, unmap
->iova
, 0);
483 if (dma
&& dma
->iova
!= unmap
->iova
) {
487 dma
= vfio_find_dma(iommu
, unmap
->iova
+ unmap
->size
- 1, 0);
488 if (dma
&& dma
->iova
+ dma
->size
!= unmap
->iova
+ unmap
->size
) {
494 while ((dma
= vfio_find_dma(iommu
, unmap
->iova
, unmap
->size
))) {
495 if (!iommu
->v2
&& unmap
->iova
> dma
->iova
)
497 unmapped
+= dma
->size
;
498 vfio_remove_dma(iommu
, dma
);
502 mutex_unlock(&iommu
->lock
);
504 /* Report how much was unmapped */
505 unmap
->size
= unmapped
;
511 * Turns out AMD IOMMU has a page table bug where it won't map large pages
512 * to a region that previously mapped smaller pages. This should be fixed
513 * soon, so this is just a temporary workaround to break mappings down into
514 * PAGE_SIZE. Better to map smaller pages than nothing.
516 static int map_try_harder(struct vfio_domain
*domain
, dma_addr_t iova
,
517 unsigned long pfn
, long npage
, int prot
)
522 for (i
= 0; i
< npage
; i
++, pfn
++, iova
+= PAGE_SIZE
) {
523 ret
= iommu_map(domain
->domain
, iova
,
524 (phys_addr_t
)pfn
<< PAGE_SHIFT
,
525 PAGE_SIZE
, prot
| domain
->prot
);
530 for (; i
< npage
&& i
> 0; i
--, iova
-= PAGE_SIZE
)
531 iommu_unmap(domain
->domain
, iova
, PAGE_SIZE
);
536 static int vfio_iommu_map(struct vfio_iommu
*iommu
, dma_addr_t iova
,
537 unsigned long pfn
, long npage
, int prot
)
539 struct vfio_domain
*d
;
542 list_for_each_entry(d
, &iommu
->domain_list
, next
) {
543 ret
= iommu_map(d
->domain
, iova
, (phys_addr_t
)pfn
<< PAGE_SHIFT
,
544 npage
<< PAGE_SHIFT
, prot
| d
->prot
);
547 map_try_harder(d
, iova
, pfn
, npage
, prot
))
557 list_for_each_entry_continue_reverse(d
, &iommu
->domain_list
, next
)
558 iommu_unmap(d
->domain
, iova
, npage
<< PAGE_SHIFT
);
563 static int vfio_dma_do_map(struct vfio_iommu
*iommu
,
564 struct vfio_iommu_type1_dma_map
*map
)
566 dma_addr_t iova
= map
->iova
;
567 unsigned long vaddr
= map
->vaddr
;
568 size_t size
= map
->size
;
570 int ret
= 0, prot
= 0;
572 struct vfio_dma
*dma
;
575 /* Verify that none of our __u64 fields overflow */
576 if (map
->size
!= size
|| map
->vaddr
!= vaddr
|| map
->iova
!= iova
)
579 mask
= ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu
))) - 1;
581 WARN_ON(mask
& PAGE_MASK
);
583 /* READ/WRITE from device perspective */
584 if (map
->flags
& VFIO_DMA_MAP_FLAG_WRITE
)
586 if (map
->flags
& VFIO_DMA_MAP_FLAG_READ
)
589 if (!prot
|| !size
|| (size
| iova
| vaddr
) & mask
)
592 /* Don't allow IOVA or virtual address wrap */
593 if (iova
+ size
- 1 < iova
|| vaddr
+ size
- 1 < vaddr
)
596 mutex_lock(&iommu
->lock
);
598 if (vfio_find_dma(iommu
, iova
, size
)) {
599 mutex_unlock(&iommu
->lock
);
603 dma
= kzalloc(sizeof(*dma
), GFP_KERNEL
);
605 mutex_unlock(&iommu
->lock
);
613 /* Insert zero-sized and grow as we map chunks of it */
614 vfio_link_dma(iommu
, dma
);
617 /* Pin a contiguous chunk of memory */
618 npage
= vfio_pin_pages_remote(vaddr
+ dma
->size
,
619 size
>> PAGE_SHIFT
, prot
, &pfn
);
627 ret
= vfio_iommu_map(iommu
, iova
+ dma
->size
, pfn
, npage
, prot
);
629 vfio_unpin_pages_remote(pfn
, npage
, prot
, true);
633 size
-= npage
<< PAGE_SHIFT
;
634 dma
->size
+= npage
<< PAGE_SHIFT
;
638 vfio_remove_dma(iommu
, dma
);
640 mutex_unlock(&iommu
->lock
);
644 static int vfio_bus_type(struct device
*dev
, void *data
)
646 struct bus_type
**bus
= data
;
648 if (*bus
&& *bus
!= dev
->bus
)
656 static int vfio_iommu_replay(struct vfio_iommu
*iommu
,
657 struct vfio_domain
*domain
)
659 struct vfio_domain
*d
;
663 /* Arbitrarily pick the first domain in the list for lookups */
664 d
= list_first_entry(&iommu
->domain_list
, struct vfio_domain
, next
);
665 n
= rb_first(&iommu
->dma_list
);
667 /* If there's not a domain, there better not be any mappings */
668 if (WARN_ON(n
&& !d
))
671 for (; n
; n
= rb_next(n
)) {
672 struct vfio_dma
*dma
;
675 dma
= rb_entry(n
, struct vfio_dma
, node
);
678 while (iova
< dma
->iova
+ dma
->size
) {
679 phys_addr_t phys
= iommu_iova_to_phys(d
->domain
, iova
);
682 if (WARN_ON(!phys
)) {
689 while (iova
+ size
< dma
->iova
+ dma
->size
&&
690 phys
+ size
== iommu_iova_to_phys(d
->domain
,
694 ret
= iommu_map(domain
->domain
, iova
, phys
,
695 size
, dma
->prot
| domain
->prot
);
707 * We change our unmap behavior slightly depending on whether the IOMMU
708 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
709 * for practically any contiguous power-of-two mapping we give it. This means
710 * we don't need to look for contiguous chunks ourselves to make unmapping
711 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
712 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
713 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
714 * hugetlbfs is in use.
716 static void vfio_test_domain_fgsp(struct vfio_domain
*domain
)
719 int ret
, order
= get_order(PAGE_SIZE
* 2);
721 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
725 ret
= iommu_map(domain
->domain
, 0, page_to_phys(pages
), PAGE_SIZE
* 2,
726 IOMMU_READ
| IOMMU_WRITE
| domain
->prot
);
728 size_t unmapped
= iommu_unmap(domain
->domain
, 0, PAGE_SIZE
);
730 if (unmapped
== PAGE_SIZE
)
731 iommu_unmap(domain
->domain
, PAGE_SIZE
, PAGE_SIZE
);
736 __free_pages(pages
, order
);
739 static int vfio_iommu_type1_attach_group(void *iommu_data
,
740 struct iommu_group
*iommu_group
)
742 struct vfio_iommu
*iommu
= iommu_data
;
743 struct vfio_group
*group
, *g
;
744 struct vfio_domain
*domain
, *d
;
745 struct bus_type
*bus
= NULL
;
748 mutex_lock(&iommu
->lock
);
750 list_for_each_entry(d
, &iommu
->domain_list
, next
) {
751 list_for_each_entry(g
, &d
->group_list
, next
) {
752 if (g
->iommu_group
!= iommu_group
)
755 mutex_unlock(&iommu
->lock
);
760 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
761 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
762 if (!group
|| !domain
) {
767 group
->iommu_group
= iommu_group
;
769 /* Determine bus_type in order to allocate a domain */
770 ret
= iommu_group_for_each_dev(iommu_group
, &bus
, vfio_bus_type
);
774 domain
->domain
= iommu_domain_alloc(bus
);
775 if (!domain
->domain
) {
780 if (iommu
->nesting
) {
783 ret
= iommu_domain_set_attr(domain
->domain
, DOMAIN_ATTR_NESTING
,
789 ret
= iommu_attach_group(domain
->domain
, iommu_group
);
793 INIT_LIST_HEAD(&domain
->group_list
);
794 list_add(&group
->next
, &domain
->group_list
);
796 if (!allow_unsafe_interrupts
&&
797 !iommu_capable(bus
, IOMMU_CAP_INTR_REMAP
)) {
798 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
804 if (iommu_capable(bus
, IOMMU_CAP_CACHE_COHERENCY
))
805 domain
->prot
|= IOMMU_CACHE
;
808 * Try to match an existing compatible domain. We don't want to
809 * preclude an IOMMU driver supporting multiple bus_types and being
810 * able to include different bus_types in the same IOMMU domain, so
811 * we test whether the domains use the same iommu_ops rather than
812 * testing if they're on the same bus_type.
814 list_for_each_entry(d
, &iommu
->domain_list
, next
) {
815 if (d
->domain
->ops
== domain
->domain
->ops
&&
816 d
->prot
== domain
->prot
) {
817 iommu_detach_group(domain
->domain
, iommu_group
);
818 if (!iommu_attach_group(d
->domain
, iommu_group
)) {
819 list_add(&group
->next
, &d
->group_list
);
820 iommu_domain_free(domain
->domain
);
822 mutex_unlock(&iommu
->lock
);
826 ret
= iommu_attach_group(domain
->domain
, iommu_group
);
832 vfio_test_domain_fgsp(domain
);
834 /* replay mappings on new domains */
835 ret
= vfio_iommu_replay(iommu
, domain
);
839 list_add(&domain
->next
, &iommu
->domain_list
);
841 mutex_unlock(&iommu
->lock
);
846 iommu_detach_group(domain
->domain
, iommu_group
);
848 iommu_domain_free(domain
->domain
);
852 mutex_unlock(&iommu
->lock
);
856 static void vfio_iommu_unmap_unpin_all(struct vfio_iommu
*iommu
)
858 struct rb_node
*node
;
860 while ((node
= rb_first(&iommu
->dma_list
)))
861 vfio_remove_dma(iommu
, rb_entry(node
, struct vfio_dma
, node
));
864 static void vfio_iommu_type1_detach_group(void *iommu_data
,
865 struct iommu_group
*iommu_group
)
867 struct vfio_iommu
*iommu
= iommu_data
;
868 struct vfio_domain
*domain
;
869 struct vfio_group
*group
;
871 mutex_lock(&iommu
->lock
);
873 list_for_each_entry(domain
, &iommu
->domain_list
, next
) {
874 list_for_each_entry(group
, &domain
->group_list
, next
) {
875 if (group
->iommu_group
!= iommu_group
)
878 iommu_detach_group(domain
->domain
, iommu_group
);
879 list_del(&group
->next
);
882 * Group ownership provides privilege, if the group
883 * list is empty, the domain goes away. If it's the
884 * last domain, then all the mappings go away too.
886 if (list_empty(&domain
->group_list
)) {
887 if (list_is_singular(&iommu
->domain_list
))
888 vfio_iommu_unmap_unpin_all(iommu
);
889 iommu_domain_free(domain
->domain
);
890 list_del(&domain
->next
);
898 mutex_unlock(&iommu
->lock
);
901 static void *vfio_iommu_type1_open(unsigned long arg
)
903 struct vfio_iommu
*iommu
;
905 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
907 return ERR_PTR(-ENOMEM
);
910 case VFIO_TYPE1_IOMMU
:
912 case VFIO_TYPE1_NESTING_IOMMU
:
913 iommu
->nesting
= true;
914 case VFIO_TYPE1v2_IOMMU
:
919 return ERR_PTR(-EINVAL
);
922 INIT_LIST_HEAD(&iommu
->domain_list
);
923 iommu
->dma_list
= RB_ROOT
;
924 mutex_init(&iommu
->lock
);
929 static void vfio_iommu_type1_release(void *iommu_data
)
931 struct vfio_iommu
*iommu
= iommu_data
;
932 struct vfio_domain
*domain
, *domain_tmp
;
933 struct vfio_group
*group
, *group_tmp
;
935 vfio_iommu_unmap_unpin_all(iommu
);
937 list_for_each_entry_safe(domain
, domain_tmp
,
938 &iommu
->domain_list
, next
) {
939 list_for_each_entry_safe(group
, group_tmp
,
940 &domain
->group_list
, next
) {
941 iommu_detach_group(domain
->domain
, group
->iommu_group
);
942 list_del(&group
->next
);
945 iommu_domain_free(domain
->domain
);
946 list_del(&domain
->next
);
953 static int vfio_domains_have_iommu_cache(struct vfio_iommu
*iommu
)
955 struct vfio_domain
*domain
;
958 mutex_lock(&iommu
->lock
);
959 list_for_each_entry(domain
, &iommu
->domain_list
, next
) {
960 if (!(domain
->prot
& IOMMU_CACHE
)) {
965 mutex_unlock(&iommu
->lock
);
970 static long vfio_iommu_type1_ioctl(void *iommu_data
,
971 unsigned int cmd
, unsigned long arg
)
973 struct vfio_iommu
*iommu
= iommu_data
;
976 if (cmd
== VFIO_CHECK_EXTENSION
) {
978 case VFIO_TYPE1_IOMMU
:
979 case VFIO_TYPE1v2_IOMMU
:
980 case VFIO_TYPE1_NESTING_IOMMU
:
982 case VFIO_DMA_CC_IOMMU
:
985 return vfio_domains_have_iommu_cache(iommu
);
989 } else if (cmd
== VFIO_IOMMU_GET_INFO
) {
990 struct vfio_iommu_type1_info info
;
992 minsz
= offsetofend(struct vfio_iommu_type1_info
, iova_pgsizes
);
994 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
997 if (info
.argsz
< minsz
)
1000 info
.flags
= VFIO_IOMMU_INFO_PGSIZES
;
1002 info
.iova_pgsizes
= vfio_pgsize_bitmap(iommu
);
1004 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1007 } else if (cmd
== VFIO_IOMMU_MAP_DMA
) {
1008 struct vfio_iommu_type1_dma_map map
;
1009 uint32_t mask
= VFIO_DMA_MAP_FLAG_READ
|
1010 VFIO_DMA_MAP_FLAG_WRITE
;
1012 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
1014 if (copy_from_user(&map
, (void __user
*)arg
, minsz
))
1017 if (map
.argsz
< minsz
|| map
.flags
& ~mask
)
1020 return vfio_dma_do_map(iommu
, &map
);
1022 } else if (cmd
== VFIO_IOMMU_UNMAP_DMA
) {
1023 struct vfio_iommu_type1_dma_unmap unmap
;
1026 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
, size
);
1028 if (copy_from_user(&unmap
, (void __user
*)arg
, minsz
))
1031 if (unmap
.argsz
< minsz
|| unmap
.flags
)
1034 ret
= vfio_dma_do_unmap(iommu
, &unmap
);
1038 return copy_to_user((void __user
*)arg
, &unmap
, minsz
) ?
1045 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1
= {
1046 .name
= "vfio-iommu-type1",
1047 .owner
= THIS_MODULE
,
1048 .open
= vfio_iommu_type1_open
,
1049 .release
= vfio_iommu_type1_release
,
1050 .ioctl
= vfio_iommu_type1_ioctl
,
1051 .attach_group
= vfio_iommu_type1_attach_group
,
1052 .detach_group
= vfio_iommu_type1_detach_group
,
1055 static int __init
vfio_iommu_type1_init(void)
1057 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1
);
1060 static void __exit
vfio_iommu_type1_cleanup(void)
1062 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1
);
1065 module_init(vfio_iommu_type1_init
);
1066 module_exit(vfio_iommu_type1_cleanup
);
1068 MODULE_VERSION(DRIVER_VERSION
);
1069 MODULE_LICENSE("GPL v2");
1070 MODULE_AUTHOR(DRIVER_AUTHOR
);
1071 MODULE_DESCRIPTION(DRIVER_DESC
);