]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/vfio/vfio_iommu_type1.c
Merge tag 'drm/tegra/for-5.1-rc5' of git://anongit.freedesktop.org/tegra/linux into...
[thirdparty/kernel/stable.git] / drivers / vfio / vfio_iommu_type1.c
CommitLineData
73fa0d10
AW
1/*
2 * VFIO: IOMMU DMA mapping support for Type1 IOMMU
3 *
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
14 *
15 * We arbitrarily define a Type1 IOMMU as one matching the below code.
16 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
17 * VT-d, but that makes it harder to re-use as theoretically anyone
18 * implementing a similar IOMMU could make use of this. We expect the
19 * IOMMU to support the IOMMU API and have few to no restrictions around
20 * the IOVA range that can be mapped. The Type1 IOMMU is currently
21 * optimized for relatively static mappings of a userspace process with
22 * userpsace pages pinned into memory. We also assume devices and IOMMU
23 * domains are PCI based as the IOMMU API is still centered around a
24 * device/bus interface rather than a group interface.
25 */
26
27#include <linux/compat.h>
28#include <linux/device.h>
29#include <linux/fs.h>
30#include <linux/iommu.h>
31#include <linux/module.h>
32#include <linux/mm.h>
cd9b2268 33#include <linux/rbtree.h>
3f07c014 34#include <linux/sched/signal.h>
6e84f315 35#include <linux/sched/mm.h>
73fa0d10
AW
36#include <linux/slab.h>
37#include <linux/uaccess.h>
38#include <linux/vfio.h>
39#include <linux/workqueue.h>
a54eb550 40#include <linux/mdev.h>
c086de81 41#include <linux/notifier.h>
5d704992 42#include <linux/dma-iommu.h>
9d72f87b 43#include <linux/irqdomain.h>
73fa0d10
AW
44
45#define DRIVER_VERSION "0.2"
46#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
47#define DRIVER_DESC "Type1 IOMMU driver for VFIO"
48
49static bool allow_unsafe_interrupts;
50module_param_named(allow_unsafe_interrupts,
51 allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
52MODULE_PARM_DESC(allow_unsafe_interrupts,
53 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
54
5c6c2b21
AW
55static bool disable_hugepages;
56module_param_named(disable_hugepages,
57 disable_hugepages, bool, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(disable_hugepages,
59 "Disable VFIO IOMMU support for IOMMU hugepages.");
60
49285593
AW
61static unsigned int dma_entry_limit __read_mostly = U16_MAX;
62module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
63MODULE_PARM_DESC(dma_entry_limit,
64 "Maximum number of user DMA mappings per container (65535).");
65
73fa0d10 66struct vfio_iommu {
1ef3e2bc 67 struct list_head domain_list;
a54eb550 68 struct vfio_domain *external_domain; /* domain for external user */
73fa0d10 69 struct mutex lock;
cd9b2268 70 struct rb_root dma_list;
c086de81 71 struct blocking_notifier_head notifier;
49285593 72 unsigned int dma_avail;
f5c9eceb
WD
73 bool v2;
74 bool nesting;
1ef3e2bc
AW
75};
76
77struct vfio_domain {
78 struct iommu_domain *domain;
79 struct list_head next;
73fa0d10 80 struct list_head group_list;
1ef3e2bc 81 int prot; /* IOMMU_CACHE */
6fe1010d 82 bool fgsp; /* Fine-grained super pages */
73fa0d10
AW
83};
84
85struct vfio_dma {
cd9b2268 86 struct rb_node node;
73fa0d10
AW
87 dma_addr_t iova; /* Device address */
88 unsigned long vaddr; /* Process virtual addr */
166fd7d9 89 size_t size; /* Map size (bytes) */
73fa0d10 90 int prot; /* IOMMU_READ/WRITE */
a54eb550 91 bool iommu_mapped;
48d8476b 92 bool lock_cap; /* capable(CAP_IPC_LOCK) */
8f0d5bb9 93 struct task_struct *task;
a54eb550 94 struct rb_root pfn_list; /* Ex-user pinned pfn list */
73fa0d10
AW
95};
96
97struct vfio_group {
98 struct iommu_group *iommu_group;
99 struct list_head next;
100};
101
a54eb550
KW
102/*
103 * Guest RAM pinning working set or DMA target
104 */
105struct vfio_pfn {
106 struct rb_node node;
107 dma_addr_t iova; /* Device address */
108 unsigned long pfn; /* Host pfn */
109 atomic_t ref_count;
110};
111
6bd06f5a
SS
112struct vfio_regions {
113 struct list_head list;
114 dma_addr_t iova;
115 phys_addr_t phys;
116 size_t len;
117};
118
a54eb550
KW
119#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
120 (!list_empty(&iommu->domain_list))
121
122static int put_pfn(unsigned long pfn, int prot);
123
73fa0d10
AW
124/*
125 * This code handles mapping and unmapping of user data buffers
126 * into DMA'ble space using the IOMMU
127 */
128
cd9b2268
AW
129static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
130 dma_addr_t start, size_t size)
131{
132 struct rb_node *node = iommu->dma_list.rb_node;
133
134 while (node) {
135 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
136
137 if (start + size <= dma->iova)
138 node = node->rb_left;
166fd7d9 139 else if (start >= dma->iova + dma->size)
cd9b2268
AW
140 node = node->rb_right;
141 else
142 return dma;
143 }
144
145 return NULL;
146}
147
1ef3e2bc 148static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
cd9b2268
AW
149{
150 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
151 struct vfio_dma *dma;
152
153 while (*link) {
154 parent = *link;
155 dma = rb_entry(parent, struct vfio_dma, node);
156
166fd7d9 157 if (new->iova + new->size <= dma->iova)
cd9b2268
AW
158 link = &(*link)->rb_left;
159 else
160 link = &(*link)->rb_right;
161 }
162
163 rb_link_node(&new->node, parent, link);
164 rb_insert_color(&new->node, &iommu->dma_list);
165}
166
1ef3e2bc 167static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
cd9b2268
AW
168{
169 rb_erase(&old->node, &iommu->dma_list);
170}
171
a54eb550
KW
172/*
173 * Helper Functions for host iova-pfn list
174 */
175static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova)
176{
177 struct vfio_pfn *vpfn;
178 struct rb_node *node = dma->pfn_list.rb_node;
179
180 while (node) {
181 vpfn = rb_entry(node, struct vfio_pfn, node);
182
183 if (iova < vpfn->iova)
184 node = node->rb_left;
185 else if (iova > vpfn->iova)
186 node = node->rb_right;
187 else
188 return vpfn;
189 }
190 return NULL;
191}
192
193static void vfio_link_pfn(struct vfio_dma *dma,
194 struct vfio_pfn *new)
195{
196 struct rb_node **link, *parent = NULL;
197 struct vfio_pfn *vpfn;
198
199 link = &dma->pfn_list.rb_node;
200 while (*link) {
201 parent = *link;
202 vpfn = rb_entry(parent, struct vfio_pfn, node);
203
204 if (new->iova < vpfn->iova)
205 link = &(*link)->rb_left;
206 else
207 link = &(*link)->rb_right;
208 }
209
210 rb_link_node(&new->node, parent, link);
211 rb_insert_color(&new->node, &dma->pfn_list);
212}
213
214static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old)
215{
216 rb_erase(&old->node, &dma->pfn_list);
217}
218
219static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
220 unsigned long pfn)
221{
222 struct vfio_pfn *vpfn;
223
224 vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL);
225 if (!vpfn)
226 return -ENOMEM;
227
228 vpfn->iova = iova;
229 vpfn->pfn = pfn;
230 atomic_set(&vpfn->ref_count, 1);
231 vfio_link_pfn(dma, vpfn);
232 return 0;
233}
234
235static void vfio_remove_from_pfn_list(struct vfio_dma *dma,
236 struct vfio_pfn *vpfn)
237{
238 vfio_unlink_pfn(dma, vpfn);
239 kfree(vpfn);
240}
241
242static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
243 unsigned long iova)
244{
245 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
246
247 if (vpfn)
248 atomic_inc(&vpfn->ref_count);
249 return vpfn;
250}
251
252static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
253{
254 int ret = 0;
255
256 if (atomic_dec_and_test(&vpfn->ref_count)) {
257 ret = put_pfn(vpfn->pfn, dma->prot);
258 vfio_remove_from_pfn_list(dma, vpfn);
259 }
260 return ret;
261}
262
48d8476b 263static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
73fa0d10 264{
73fa0d10 265 struct mm_struct *mm;
0cfef2b7 266 int ret;
73fa0d10 267
3624a248 268 if (!npage)
0cfef2b7 269 return 0;
3624a248 270
48d8476b 271 mm = async ? get_task_mm(dma->task) : dma->task->mm;
3624a248 272 if (!mm)
0cfef2b7 273 return -ESRCH; /* process exited */
73fa0d10 274
0cfef2b7
AW
275 ret = down_write_killable(&mm->mmap_sem);
276 if (!ret) {
277 if (npage > 0) {
48d8476b 278 if (!dma->lock_cap) {
0cfef2b7
AW
279 unsigned long limit;
280
48d8476b 281 limit = task_rlimit(dma->task,
0cfef2b7
AW
282 RLIMIT_MEMLOCK) >> PAGE_SHIFT;
283
284 if (mm->locked_vm + npage > limit)
285 ret = -ENOMEM;
286 }
287 }
288
289 if (!ret)
290 mm->locked_vm += npage;
73fa0d10 291
0cfef2b7 292 up_write(&mm->mmap_sem);
6c38c055
AW
293 }
294
48d8476b 295 if (async)
3624a248 296 mmput(mm);
0cfef2b7
AW
297
298 return ret;
73fa0d10
AW
299}
300
301/*
302 * Some mappings aren't backed by a struct page, for example an mmap'd
303 * MMIO range for our own or another device. These use a different
304 * pfn conversion and shouldn't be tracked as locked pages.
305 */
306static bool is_invalid_reserved_pfn(unsigned long pfn)
307{
308 if (pfn_valid(pfn)) {
309 bool reserved;
310 struct page *tail = pfn_to_page(pfn);
668f9abb 311 struct page *head = compound_head(tail);
73fa0d10
AW
312 reserved = !!(PageReserved(head));
313 if (head != tail) {
314 /*
315 * "head" is not a dangling pointer
668f9abb 316 * (compound_head takes care of that)
73fa0d10
AW
317 * but the hugepage may have been split
318 * from under us (and we may not hold a
319 * reference count on the head page so it can
320 * be reused before we run PageReferenced), so
321 * we've to check PageTail before returning
322 * what we just read.
323 */
324 smp_rmb();
325 if (PageTail(tail))
326 return reserved;
327 }
328 return PageReserved(tail);
329 }
330
331 return true;
332}
333
334static int put_pfn(unsigned long pfn, int prot)
335{
336 if (!is_invalid_reserved_pfn(pfn)) {
337 struct page *page = pfn_to_page(pfn);
338 if (prot & IOMMU_WRITE)
339 SetPageDirty(page);
340 put_page(page);
341 return 1;
342 }
343 return 0;
344}
345
ea85cf35
KW
346static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
347 int prot, unsigned long *pfn)
73fa0d10
AW
348{
349 struct page *page[1];
350 struct vm_area_struct *vma;
94db151d 351 struct vm_area_struct *vmas[1];
bb94b55a 352 unsigned int flags = 0;
ea85cf35 353 int ret;
73fa0d10 354
bb94b55a
JG
355 if (prot & IOMMU_WRITE)
356 flags |= FOLL_WRITE;
357
358 down_read(&mm->mmap_sem);
ea85cf35 359 if (mm == current->mm) {
bb94b55a 360 ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
ea85cf35 361 } else {
ea85cf35 362 ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
94db151d
DW
363 vmas, NULL);
364 /*
365 * The lifetime of a vaddr_get_pfn() page pin is
366 * userspace-controlled. In the fs-dax case this could
367 * lead to indefinite stalls in filesystem operations.
368 * Disallow attempts to pin fs-dax pages via this
369 * interface.
370 */
371 if (ret > 0 && vma_is_fsdax(vmas[0])) {
372 ret = -EOPNOTSUPP;
373 put_page(page[0]);
374 }
ea85cf35 375 }
bb94b55a 376 up_read(&mm->mmap_sem);
ea85cf35
KW
377
378 if (ret == 1) {
73fa0d10
AW
379 *pfn = page_to_pfn(page[0]);
380 return 0;
381 }
382
ea85cf35 383 down_read(&mm->mmap_sem);
73fa0d10 384
ea85cf35 385 vma = find_vma_intersection(mm, vaddr, vaddr + 1);
73fa0d10
AW
386
387 if (vma && vma->vm_flags & VM_PFNMAP) {
388 *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
389 if (is_invalid_reserved_pfn(*pfn))
390 ret = 0;
391 }
392
ea85cf35 393 up_read(&mm->mmap_sem);
73fa0d10
AW
394 return ret;
395}
396
166fd7d9
AW
397/*
398 * Attempt to pin pages. We really don't want to track all the pfns and
399 * the iommu can only map chunks of consecutive pfns anyway, so get the
400 * first page and all consecutive pages with the same locking.
401 */
8f0d5bb9 402static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
7cb671e7 403 long npage, unsigned long *pfn_base,
48d8476b 404 unsigned long limit)
73fa0d10 405{
7cb671e7 406 unsigned long pfn = 0;
6c38c055 407 long ret, pinned = 0, lock_acct = 0;
89c29def 408 bool rsvd;
a54eb550 409 dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
73fa0d10 410
6c38c055
AW
411 /* This code path is only user initiated */
412 if (!current->mm)
166fd7d9 413 return -ENODEV;
73fa0d10 414
6c38c055 415 ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
166fd7d9 416 if (ret)
6c38c055 417 return ret;
73fa0d10 418
6c38c055 419 pinned++;
89c29def 420 rsvd = is_invalid_reserved_pfn(*pfn_base);
73fa0d10 421
a54eb550
KW
422 /*
423 * Reserved pages aren't counted against the user, externally pinned
424 * pages are already counted against the user.
425 */
89c29def 426 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
48d8476b 427 if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) {
a54eb550
KW
428 put_pfn(*pfn_base, dma->prot);
429 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
430 limit << PAGE_SHIFT);
6c38c055 431 return -ENOMEM;
a54eb550
KW
432 }
433 lock_acct++;
5c6c2b21
AW
434 }
435
6c38c055
AW
436 if (unlikely(disable_hugepages))
437 goto out;
73fa0d10 438
6c38c055
AW
439 /* Lock all the consecutive pages from pfn_base */
440 for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
441 pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
6c38c055
AW
442 ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
443 if (ret)
444 break;
445
89c29def
AW
446 if (pfn != *pfn_base + pinned ||
447 rsvd != is_invalid_reserved_pfn(pfn)) {
6c38c055
AW
448 put_pfn(pfn, dma->prot);
449 break;
450 }
166fd7d9 451
89c29def 452 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
48d8476b 453 if (!dma->lock_cap &&
6c38c055 454 current->mm->locked_vm + lock_acct + 1 > limit) {
a54eb550 455 put_pfn(pfn, dma->prot);
6c38c055
AW
456 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
457 __func__, limit << PAGE_SHIFT);
0cfef2b7
AW
458 ret = -ENOMEM;
459 goto unpin_out;
a54eb550 460 }
6c38c055 461 lock_acct++;
166fd7d9
AW
462 }
463 }
464
6c38c055 465out:
48d8476b 466 ret = vfio_lock_acct(dma, lock_acct, false);
0cfef2b7
AW
467
468unpin_out:
469 if (ret) {
89c29def
AW
470 if (!rsvd) {
471 for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
472 put_pfn(pfn, dma->prot);
473 }
0cfef2b7
AW
474
475 return ret;
476 }
166fd7d9 477
6c38c055 478 return pinned;
166fd7d9
AW
479}
480
a54eb550
KW
481static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
482 unsigned long pfn, long npage,
483 bool do_accounting)
166fd7d9 484{
a54eb550 485 long unlocked = 0, locked = 0;
166fd7d9
AW
486 long i;
487
6c38c055 488 for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
a54eb550
KW
489 if (put_pfn(pfn++, dma->prot)) {
490 unlocked++;
6c38c055 491 if (vfio_find_vpfn(dma, iova))
a54eb550
KW
492 locked++;
493 }
494 }
495
496 if (do_accounting)
48d8476b 497 vfio_lock_acct(dma, locked - unlocked, true);
a54eb550
KW
498
499 return unlocked;
500}
501
502static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
503 unsigned long *pfn_base, bool do_accounting)
504{
a54eb550
KW
505 struct mm_struct *mm;
506 int ret;
a54eb550
KW
507
508 mm = get_task_mm(dma->task);
509 if (!mm)
510 return -ENODEV;
511
512 ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
80dbe1fb 513 if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
48d8476b 514 ret = vfio_lock_acct(dma, 1, true);
0cfef2b7
AW
515 if (ret) {
516 put_pfn(*pfn_base, dma->prot);
80dbe1fb
AW
517 if (ret == -ENOMEM)
518 pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK "
519 "(%ld) exceeded\n", __func__,
520 dma->task->comm, task_pid_nr(dma->task),
521 task_rlimit(dma->task, RLIMIT_MEMLOCK));
0cfef2b7
AW
522 }
523 }
524
a54eb550
KW
525 mmput(mm);
526 return ret;
527}
528
529static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
530 bool do_accounting)
531{
532 int unlocked;
533 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
534
535 if (!vpfn)
536 return 0;
537
538 unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
166fd7d9
AW
539
540 if (do_accounting)
48d8476b 541 vfio_lock_acct(dma, -unlocked, true);
166fd7d9
AW
542
543 return unlocked;
544}
545
a54eb550
KW
546static int vfio_iommu_type1_pin_pages(void *iommu_data,
547 unsigned long *user_pfn,
548 int npage, int prot,
549 unsigned long *phys_pfn)
550{
551 struct vfio_iommu *iommu = iommu_data;
552 int i, j, ret;
553 unsigned long remote_vaddr;
554 struct vfio_dma *dma;
555 bool do_accounting;
556
557 if (!iommu || !user_pfn || !phys_pfn)
558 return -EINVAL;
559
560 /* Supported for v2 version only */
561 if (!iommu->v2)
562 return -EACCES;
563
564 mutex_lock(&iommu->lock);
565
c086de81
KW
566 /* Fail if notifier list is empty */
567 if ((!iommu->external_domain) || (!iommu->notifier.head)) {
a54eb550
KW
568 ret = -EINVAL;
569 goto pin_done;
570 }
571
572 /*
573 * If iommu capable domain exist in the container then all pages are
574 * already pinned and accounted. Accouting should be done if there is no
575 * iommu capable domain in the container.
576 */
577 do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
578
579 for (i = 0; i < npage; i++) {
580 dma_addr_t iova;
581 struct vfio_pfn *vpfn;
582
583 iova = user_pfn[i] << PAGE_SHIFT;
2b8bb1d7 584 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
a54eb550
KW
585 if (!dma) {
586 ret = -EINVAL;
587 goto pin_unwind;
588 }
589
590 if ((dma->prot & prot) != prot) {
591 ret = -EPERM;
592 goto pin_unwind;
593 }
594
595 vpfn = vfio_iova_get_vfio_pfn(dma, iova);
596 if (vpfn) {
597 phys_pfn[i] = vpfn->pfn;
598 continue;
599 }
600
601 remote_vaddr = dma->vaddr + iova - dma->iova;
602 ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
603 do_accounting);
80dbe1fb 604 if (ret)
a54eb550 605 goto pin_unwind;
a54eb550
KW
606
607 ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
608 if (ret) {
609 vfio_unpin_page_external(dma, iova, do_accounting);
610 goto pin_unwind;
611 }
612 }
613
614 ret = i;
615 goto pin_done;
616
617pin_unwind:
618 phys_pfn[i] = 0;
619 for (j = 0; j < i; j++) {
620 dma_addr_t iova;
621
622 iova = user_pfn[j] << PAGE_SHIFT;
2b8bb1d7 623 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
a54eb550
KW
624 vfio_unpin_page_external(dma, iova, do_accounting);
625 phys_pfn[j] = 0;
626 }
627pin_done:
628 mutex_unlock(&iommu->lock);
629 return ret;
630}
631
632static int vfio_iommu_type1_unpin_pages(void *iommu_data,
633 unsigned long *user_pfn,
634 int npage)
635{
636 struct vfio_iommu *iommu = iommu_data;
637 bool do_accounting;
638 int i;
639
640 if (!iommu || !user_pfn)
641 return -EINVAL;
642
643 /* Supported for v2 version only */
644 if (!iommu->v2)
645 return -EACCES;
646
647 mutex_lock(&iommu->lock);
648
649 if (!iommu->external_domain) {
650 mutex_unlock(&iommu->lock);
651 return -EINVAL;
652 }
653
654 do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
655 for (i = 0; i < npage; i++) {
656 struct vfio_dma *dma;
657 dma_addr_t iova;
658
659 iova = user_pfn[i] << PAGE_SHIFT;
2b8bb1d7 660 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
a54eb550
KW
661 if (!dma)
662 goto unpin_exit;
663 vfio_unpin_page_external(dma, iova, do_accounting);
664 }
665
666unpin_exit:
667 mutex_unlock(&iommu->lock);
668 return i > npage ? npage : (i > 0 ? i : -EINVAL);
669}
670
6bd06f5a
SS
671static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
672 struct list_head *regions)
673{
674 long unlocked = 0;
675 struct vfio_regions *entry, *next;
676
677 iommu_tlb_sync(domain->domain);
678
679 list_for_each_entry_safe(entry, next, regions, list) {
680 unlocked += vfio_unpin_pages_remote(dma,
681 entry->iova,
682 entry->phys >> PAGE_SHIFT,
683 entry->len >> PAGE_SHIFT,
684 false);
685 list_del(&entry->list);
686 kfree(entry);
687 }
688
689 cond_resched();
690
691 return unlocked;
692}
693
694/*
695 * Generally, VFIO needs to unpin remote pages after each IOTLB flush.
696 * Therefore, when using IOTLB flush sync interface, VFIO need to keep track
697 * of these regions (currently using a list).
698 *
699 * This value specifies maximum number of regions for each IOTLB flush sync.
700 */
701#define VFIO_IOMMU_TLB_SYNC_MAX 512
702
703static size_t unmap_unpin_fast(struct vfio_domain *domain,
704 struct vfio_dma *dma, dma_addr_t *iova,
705 size_t len, phys_addr_t phys, long *unlocked,
706 struct list_head *unmapped_list,
707 int *unmapped_cnt)
708{
709 size_t unmapped = 0;
710 struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
711
712 if (entry) {
713 unmapped = iommu_unmap_fast(domain->domain, *iova, len);
714
715 if (!unmapped) {
716 kfree(entry);
717 } else {
718 iommu_tlb_range_add(domain->domain, *iova, unmapped);
719 entry->iova = *iova;
720 entry->phys = phys;
721 entry->len = unmapped;
722 list_add_tail(&entry->list, unmapped_list);
723
724 *iova += unmapped;
725 (*unmapped_cnt)++;
726 }
727 }
728
729 /*
730 * Sync if the number of fast-unmap regions hits the limit
731 * or in case of errors.
732 */
733 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
734 *unlocked += vfio_sync_unpin(dma, domain,
735 unmapped_list);
736 *unmapped_cnt = 0;
737 }
738
739 return unmapped;
740}
741
742static size_t unmap_unpin_slow(struct vfio_domain *domain,
743 struct vfio_dma *dma, dma_addr_t *iova,
744 size_t len, phys_addr_t phys,
745 long *unlocked)
746{
747 size_t unmapped = iommu_unmap(domain->domain, *iova, len);
748
749 if (unmapped) {
750 *unlocked += vfio_unpin_pages_remote(dma, *iova,
751 phys >> PAGE_SHIFT,
752 unmapped >> PAGE_SHIFT,
753 false);
754 *iova += unmapped;
755 cond_resched();
756 }
757 return unmapped;
758}
759
a54eb550
KW
760static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
761 bool do_accounting)
166fd7d9 762{
1ef3e2bc
AW
763 dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
764 struct vfio_domain *domain, *d;
6bd06f5a
SS
765 LIST_HEAD(unmapped_region_list);
766 int unmapped_region_cnt = 0;
166fd7d9
AW
767 long unlocked = 0;
768
1ef3e2bc 769 if (!dma->size)
a54eb550
KW
770 return 0;
771
772 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
773 return 0;
774
1ef3e2bc
AW
775 /*
776 * We use the IOMMU to track the physical addresses, otherwise we'd
777 * need a much more complicated tracking system. Unfortunately that
778 * means we need to use one of the iommu domains to figure out the
779 * pfns to unpin. The rest need to be unmapped in advance so we have
780 * no iommu translations remaining when the pages are unpinned.
781 */
782 domain = d = list_first_entry(&iommu->domain_list,
783 struct vfio_domain, next);
784
c5e66887 785 list_for_each_entry_continue(d, &iommu->domain_list, next) {
1ef3e2bc 786 iommu_unmap(d->domain, dma->iova, dma->size);
c5e66887
AW
787 cond_resched();
788 }
1ef3e2bc 789
166fd7d9 790 while (iova < end) {
6fe1010d
AW
791 size_t unmapped, len;
792 phys_addr_t phys, next;
166fd7d9 793
1ef3e2bc 794 phys = iommu_iova_to_phys(domain->domain, iova);
166fd7d9
AW
795 if (WARN_ON(!phys)) {
796 iova += PAGE_SIZE;
797 continue;
73fa0d10 798 }
166fd7d9 799
6fe1010d
AW
800 /*
801 * To optimize for fewer iommu_unmap() calls, each of which
802 * may require hardware cache flushing, try to find the
803 * largest contiguous physical memory chunk to unmap.
804 */
805 for (len = PAGE_SIZE;
806 !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
807 next = iommu_iova_to_phys(domain->domain, iova + len);
808 if (next != phys + len)
809 break;
810 }
811
6bd06f5a
SS
812 /*
813 * First, try to use fast unmap/unpin. In case of failure,
814 * switch to slow unmap/unpin path.
815 */
816 unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
817 &unlocked, &unmapped_region_list,
818 &unmapped_region_cnt);
819 if (!unmapped) {
820 unmapped = unmap_unpin_slow(domain, dma, &iova, len,
821 phys, &unlocked);
822 if (WARN_ON(!unmapped))
823 break;
824 }
73fa0d10 825 }
166fd7d9 826
a54eb550 827 dma->iommu_mapped = false;
6bd06f5a
SS
828
829 if (unmapped_region_cnt)
830 unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list);
831
a54eb550 832 if (do_accounting) {
48d8476b 833 vfio_lock_acct(dma, -unlocked, true);
a54eb550
KW
834 return 0;
835 }
836 return unlocked;
73fa0d10
AW
837}
838
1ef3e2bc 839static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
73fa0d10 840{
a54eb550 841 vfio_unmap_unpin(iommu, dma, true);
1ef3e2bc 842 vfio_unlink_dma(iommu, dma);
8f0d5bb9 843 put_task_struct(dma->task);
1ef3e2bc 844 kfree(dma);
49285593 845 iommu->dma_avail++;
1ef3e2bc 846}
73fa0d10 847
1ef3e2bc
AW
848static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
849{
850 struct vfio_domain *domain;
4644321f 851 unsigned long bitmap = ULONG_MAX;
166fd7d9 852
1ef3e2bc
AW
853 mutex_lock(&iommu->lock);
854 list_for_each_entry(domain, &iommu->domain_list, next)
d16e0faa 855 bitmap &= domain->domain->pgsize_bitmap;
1ef3e2bc 856 mutex_unlock(&iommu->lock);
73fa0d10 857
4644321f
EA
858 /*
859 * In case the IOMMU supports page sizes smaller than PAGE_SIZE
860 * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
861 * That way the user will be able to map/unmap buffers whose size/
862 * start address is aligned with PAGE_SIZE. Pinning code uses that
863 * granularity while iommu driver can use the sub-PAGE_SIZE size
864 * to map the buffer.
865 */
866 if (bitmap & ~PAGE_MASK) {
867 bitmap &= PAGE_MASK;
868 bitmap |= PAGE_SIZE;
869 }
870
1ef3e2bc 871 return bitmap;
73fa0d10
AW
872}
873
874static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
875 struct vfio_iommu_type1_dma_unmap *unmap)
876{
73fa0d10 877 uint64_t mask;
c086de81 878 struct vfio_dma *dma, *dma_last = NULL;
1ef3e2bc 879 size_t unmapped = 0;
c086de81 880 int ret = 0, retries = 0;
73fa0d10 881
1ef3e2bc 882 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
73fa0d10
AW
883
884 if (unmap->iova & mask)
885 return -EINVAL;
f5bfdbf2 886 if (!unmap->size || unmap->size & mask)
73fa0d10 887 return -EINVAL;
58fec830 888 if (unmap->iova + unmap->size - 1 < unmap->iova ||
71a7d3d7
DC
889 unmap->size > SIZE_MAX)
890 return -EINVAL;
73fa0d10 891
73fa0d10 892 WARN_ON(mask & PAGE_MASK);
c086de81 893again:
73fa0d10
AW
894 mutex_lock(&iommu->lock);
895
1ef3e2bc
AW
896 /*
897 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
898 * avoid tracking individual mappings. This means that the granularity
899 * of the original mapping was lost and the user was allowed to attempt
900 * to unmap any range. Depending on the contiguousness of physical
901 * memory and page sizes supported by the IOMMU, arbitrary unmaps may
902 * or may not have worked. We only guaranteed unmap granularity
903 * matching the original mapping; even though it was untracked here,
904 * the original mappings are reflected in IOMMU mappings. This
905 * resulted in a couple unusual behaviors. First, if a range is not
906 * able to be unmapped, ex. a set of 4k pages that was mapped as a
907 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
908 * a zero sized unmap. Also, if an unmap request overlaps the first
909 * address of a hugepage, the IOMMU will unmap the entire hugepage.
910 * This also returns success and the returned unmap size reflects the
911 * actual size unmapped.
912 *
913 * We attempt to maintain compatibility with this "v1" interface, but
914 * we take control out of the hands of the IOMMU. Therefore, an unmap
915 * request offset from the beginning of the original mapping will
916 * return success with zero sized unmap. And an unmap request covering
917 * the first iova of mapping will unmap the entire range.
918 *
919 * The v2 version of this interface intends to be more deterministic.
920 * Unmap requests must fully cover previous mappings. Multiple
921 * mappings may still be unmaped by specifying large ranges, but there
922 * must not be any previous mappings bisected by the range. An error
923 * will be returned if these conditions are not met. The v2 interface
924 * will only return success and a size of zero if there were no
925 * mappings within the range.
926 */
927 if (iommu->v2) {
7c03f428 928 dma = vfio_find_dma(iommu, unmap->iova, 1);
1ef3e2bc
AW
929 if (dma && dma->iova != unmap->iova) {
930 ret = -EINVAL;
931 goto unlock;
932 }
933 dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
934 if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
935 ret = -EINVAL;
936 goto unlock;
937 }
938 }
939
166fd7d9 940 while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
1ef3e2bc 941 if (!iommu->v2 && unmap->iova > dma->iova)
166fd7d9 942 break;
8f0d5bb9
KW
943 /*
944 * Task with same address space who mapped this iova range is
945 * allowed to unmap the iova range.
946 */
947 if (dma->task->mm != current->mm)
948 break;
c086de81
KW
949
950 if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
951 struct vfio_iommu_type1_dma_unmap nb_unmap;
952
953 if (dma_last == dma) {
954 BUG_ON(++retries > 10);
955 } else {
956 dma_last = dma;
957 retries = 0;
958 }
959
960 nb_unmap.iova = dma->iova;
961 nb_unmap.size = dma->size;
962
963 /*
964 * Notify anyone (mdev vendor drivers) to invalidate and
965 * unmap iovas within the range we're about to unmap.
966 * Vendor drivers MUST unpin pages in response to an
967 * invalidation.
968 */
969 mutex_unlock(&iommu->lock);
970 blocking_notifier_call_chain(&iommu->notifier,
971 VFIO_IOMMU_NOTIFY_DMA_UNMAP,
972 &nb_unmap);
973 goto again;
974 }
1ef3e2bc
AW
975 unmapped += dma->size;
976 vfio_remove_dma(iommu, dma);
166fd7d9 977 }
cd9b2268 978
1ef3e2bc 979unlock:
73fa0d10 980 mutex_unlock(&iommu->lock);
166fd7d9 981
1ef3e2bc 982 /* Report how much was unmapped */
166fd7d9
AW
983 unmap->size = unmapped;
984
985 return ret;
986}
987
1ef3e2bc
AW
988static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
989 unsigned long pfn, long npage, int prot)
990{
991 struct vfio_domain *d;
992 int ret;
993
994 list_for_each_entry(d, &iommu->domain_list, next) {
995 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
996 npage << PAGE_SHIFT, prot | d->prot);
7a30423a
JR
997 if (ret)
998 goto unwind;
c5e66887
AW
999
1000 cond_resched();
1ef3e2bc
AW
1001 }
1002
1003 return 0;
1004
1005unwind:
1006 list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
1007 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
166fd7d9 1008
cd9b2268 1009 return ret;
73fa0d10
AW
1010}
1011
8f0d5bb9
KW
1012static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
1013 size_t map_size)
1014{
1015 dma_addr_t iova = dma->iova;
1016 unsigned long vaddr = dma->vaddr;
1017 size_t size = map_size;
1018 long npage;
7cb671e7 1019 unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8f0d5bb9
KW
1020 int ret = 0;
1021
1022 while (size) {
1023 /* Pin a contiguous chunk of memory */
1024 npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
48d8476b 1025 size >> PAGE_SHIFT, &pfn, limit);
8f0d5bb9
KW
1026 if (npage <= 0) {
1027 WARN_ON(!npage);
1028 ret = (int)npage;
1029 break;
1030 }
1031
1032 /* Map it! */
1033 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
1034 dma->prot);
1035 if (ret) {
a54eb550
KW
1036 vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
1037 npage, true);
8f0d5bb9
KW
1038 break;
1039 }
1040
1041 size -= npage << PAGE_SHIFT;
1042 dma->size += npage << PAGE_SHIFT;
1043 }
1044
a54eb550
KW
1045 dma->iommu_mapped = true;
1046
8f0d5bb9
KW
1047 if (ret)
1048 vfio_remove_dma(iommu, dma);
1049
1050 return ret;
1051}
1052
73fa0d10
AW
1053static int vfio_dma_do_map(struct vfio_iommu *iommu,
1054 struct vfio_iommu_type1_dma_map *map)
1055{
c8dbca16 1056 dma_addr_t iova = map->iova;
166fd7d9 1057 unsigned long vaddr = map->vaddr;
73fa0d10
AW
1058 size_t size = map->size;
1059 int ret = 0, prot = 0;
1060 uint64_t mask;
1ef3e2bc 1061 struct vfio_dma *dma;
166fd7d9 1062
c8dbca16
AW
1063 /* Verify that none of our __u64 fields overflow */
1064 if (map->size != size || map->vaddr != vaddr || map->iova != iova)
1065 return -EINVAL;
73fa0d10 1066
1ef3e2bc 1067 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
73fa0d10 1068
c8dbca16
AW
1069 WARN_ON(mask & PAGE_MASK);
1070
73fa0d10
AW
1071 /* READ/WRITE from device perspective */
1072 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
1073 prot |= IOMMU_WRITE;
1074 if (map->flags & VFIO_DMA_MAP_FLAG_READ)
1075 prot |= IOMMU_READ;
1076
c8dbca16 1077 if (!prot || !size || (size | iova | vaddr) & mask)
73fa0d10
AW
1078 return -EINVAL;
1079
c8dbca16
AW
1080 /* Don't allow IOVA or virtual address wrap */
1081 if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
73fa0d10
AW
1082 return -EINVAL;
1083
1084 mutex_lock(&iommu->lock);
1085
c8dbca16 1086 if (vfio_find_dma(iommu, iova, size)) {
8f0d5bb9
KW
1087 ret = -EEXIST;
1088 goto out_unlock;
73fa0d10
AW
1089 }
1090
49285593
AW
1091 if (!iommu->dma_avail) {
1092 ret = -ENOSPC;
1093 goto out_unlock;
1094 }
1095
1ef3e2bc
AW
1096 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1097 if (!dma) {
8f0d5bb9
KW
1098 ret = -ENOMEM;
1099 goto out_unlock;
1ef3e2bc
AW
1100 }
1101
49285593 1102 iommu->dma_avail--;
c8dbca16
AW
1103 dma->iova = iova;
1104 dma->vaddr = vaddr;
1ef3e2bc 1105 dma->prot = prot;
48d8476b
AW
1106
1107 /*
1108 * We need to be able to both add to a task's locked memory and test
1109 * against the locked memory limit and we need to be able to do both
1110 * outside of this call path as pinning can be asynchronous via the
1111 * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a
1112 * task_struct and VM locked pages requires an mm_struct, however
1113 * holding an indefinite mm reference is not recommended, therefore we
1114 * only hold a reference to a task. We could hold a reference to
1115 * current, however QEMU uses this call path through vCPU threads,
1116 * which can be killed resulting in a NULL mm and failure in the unmap
1117 * path when called via a different thread. Avoid this problem by
1118 * using the group_leader as threads within the same group require
1119 * both CLONE_THREAD and CLONE_VM and will therefore use the same
1120 * mm_struct.
1121 *
1122 * Previously we also used the task for testing CAP_IPC_LOCK at the
1123 * time of pinning and accounting, however has_capability() makes use
1124 * of real_cred, a copy-on-write field, so we can't guarantee that it
1125 * matches group_leader, or in fact that it might not change by the
1126 * time it's evaluated. If a process were to call MAP_DMA with
1127 * CAP_IPC_LOCK but later drop it, it doesn't make sense that they
1128 * possibly see different results for an iommu_mapped vfio_dma vs
1129 * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the
1130 * time of calling MAP_DMA.
1131 */
1132 get_task_struct(current->group_leader);
1133 dma->task = current->group_leader;
1134 dma->lock_cap = capable(CAP_IPC_LOCK);
1135
a54eb550 1136 dma->pfn_list = RB_ROOT;
166fd7d9 1137
1ef3e2bc
AW
1138 /* Insert zero-sized and grow as we map chunks of it */
1139 vfio_link_dma(iommu, dma);
166fd7d9 1140
a54eb550
KW
1141 /* Don't pin and map if container doesn't contain IOMMU capable domain*/
1142 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
1143 dma->size = size;
1144 else
1145 ret = vfio_pin_map_dma(iommu, dma, size);
1146
8f0d5bb9 1147out_unlock:
1ef3e2bc
AW
1148 mutex_unlock(&iommu->lock);
1149 return ret;
1150}
1151
1152static int vfio_bus_type(struct device *dev, void *data)
1153{
1154 struct bus_type **bus = data;
1155
1156 if (*bus && *bus != dev->bus)
1157 return -EINVAL;
1158
1159 *bus = dev->bus;
1160
1161 return 0;
1162}
1163
1164static int vfio_iommu_replay(struct vfio_iommu *iommu,
1165 struct vfio_domain *domain)
1166{
1167 struct vfio_domain *d;
1168 struct rb_node *n;
7cb671e7 1169 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1ef3e2bc
AW
1170 int ret;
1171
1172 /* Arbitrarily pick the first domain in the list for lookups */
1173 d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
1174 n = rb_first(&iommu->dma_list);
1175
1ef3e2bc
AW
1176 for (; n; n = rb_next(n)) {
1177 struct vfio_dma *dma;
1178 dma_addr_t iova;
1179
1180 dma = rb_entry(n, struct vfio_dma, node);
1181 iova = dma->iova;
1182
1183 while (iova < dma->iova + dma->size) {
a54eb550 1184 phys_addr_t phys;
1ef3e2bc 1185 size_t size;
73fa0d10 1186
a54eb550
KW
1187 if (dma->iommu_mapped) {
1188 phys_addr_t p;
1189 dma_addr_t i;
1190
1191 phys = iommu_iova_to_phys(d->domain, iova);
1192
1193 if (WARN_ON(!phys)) {
1194 iova += PAGE_SIZE;
1195 continue;
1196 }
1197
1198 size = PAGE_SIZE;
1199 p = phys + size;
1200 i = iova + size;
1201 while (i < dma->iova + dma->size &&
1202 p == iommu_iova_to_phys(d->domain, i)) {
1203 size += PAGE_SIZE;
1204 p += PAGE_SIZE;
1205 i += PAGE_SIZE;
1206 }
1207 } else {
1208 unsigned long pfn;
1209 unsigned long vaddr = dma->vaddr +
1210 (iova - dma->iova);
1211 size_t n = dma->iova + dma->size - iova;
1212 long npage;
1213
1214 npage = vfio_pin_pages_remote(dma, vaddr,
1215 n >> PAGE_SHIFT,
48d8476b 1216 &pfn, limit);
a54eb550
KW
1217 if (npage <= 0) {
1218 WARN_ON(!npage);
1219 ret = (int)npage;
1220 return ret;
1221 }
1222
1223 phys = pfn << PAGE_SHIFT;
1224 size = npage << PAGE_SHIFT;
166fd7d9
AW
1225 }
1226
1ef3e2bc
AW
1227 ret = iommu_map(domain->domain, iova, phys,
1228 size, dma->prot | domain->prot);
1229 if (ret)
1230 return ret;
d93b3ac0 1231
1ef3e2bc
AW
1232 iova += size;
1233 }
a54eb550 1234 dma->iommu_mapped = true;
166fd7d9 1235 }
1ef3e2bc 1236 return 0;
73fa0d10
AW
1237}
1238
6fe1010d
AW
1239/*
1240 * We change our unmap behavior slightly depending on whether the IOMMU
1241 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
1242 * for practically any contiguous power-of-two mapping we give it. This means
1243 * we don't need to look for contiguous chunks ourselves to make unmapping
1244 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
1245 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
1246 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
1247 * hugetlbfs is in use.
1248 */
1249static void vfio_test_domain_fgsp(struct vfio_domain *domain)
1250{
1251 struct page *pages;
1252 int ret, order = get_order(PAGE_SIZE * 2);
1253
1254 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1255 if (!pages)
1256 return;
1257
1258 ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
1259 IOMMU_READ | IOMMU_WRITE | domain->prot);
1260 if (!ret) {
1261 size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
1262
1263 if (unmapped == PAGE_SIZE)
1264 iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
1265 else
1266 domain->fgsp = true;
1267 }
1268
1269 __free_pages(pages, order);
1270}
1271
7896c998
KW
1272static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
1273 struct iommu_group *iommu_group)
1274{
1275 struct vfio_group *g;
1276
1277 list_for_each_entry(g, &domain->group_list, next) {
1278 if (g->iommu_group == iommu_group)
1279 return g;
1280 }
1281
1282 return NULL;
1283}
1284
9d3a4de4 1285static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
5d704992
EA
1286{
1287 struct list_head group_resv_regions;
1288 struct iommu_resv_region *region, *next;
1289 bool ret = false;
1290
1291 INIT_LIST_HEAD(&group_resv_regions);
1292 iommu_get_group_resv_regions(group, &group_resv_regions);
1293 list_for_each_entry(region, &group_resv_regions, list) {
f203f7f1
RM
1294 /*
1295 * The presence of any 'real' MSI regions should take
1296 * precedence over the software-managed one if the
1297 * IOMMU driver happens to advertise both types.
1298 */
1299 if (region->type == IOMMU_RESV_MSI) {
1300 ret = false;
1301 break;
1302 }
1303
9d3a4de4 1304 if (region->type == IOMMU_RESV_SW_MSI) {
5d704992
EA
1305 *base = region->start;
1306 ret = true;
5d704992
EA
1307 }
1308 }
5d704992
EA
1309 list_for_each_entry_safe(region, next, &group_resv_regions, list)
1310 kfree(region);
1311 return ret;
1312}
1313
73fa0d10
AW
1314static int vfio_iommu_type1_attach_group(void *iommu_data,
1315 struct iommu_group *iommu_group)
1316{
1317 struct vfio_iommu *iommu = iommu_data;
7896c998 1318 struct vfio_group *group;
1ef3e2bc 1319 struct vfio_domain *domain, *d;
a54eb550 1320 struct bus_type *bus = NULL, *mdev_bus;
73fa0d10 1321 int ret;
9d72f87b 1322 bool resv_msi, msi_remap;
5d704992 1323 phys_addr_t resv_msi_base;
73fa0d10 1324
73fa0d10
AW
1325 mutex_lock(&iommu->lock);
1326
1ef3e2bc 1327 list_for_each_entry(d, &iommu->domain_list, next) {
7896c998 1328 if (find_iommu_group(d, iommu_group)) {
73fa0d10 1329 mutex_unlock(&iommu->lock);
73fa0d10
AW
1330 return -EINVAL;
1331 }
1332 }
1333
a54eb550
KW
1334 if (iommu->external_domain) {
1335 if (find_iommu_group(iommu->external_domain, iommu_group)) {
1336 mutex_unlock(&iommu->lock);
1337 return -EINVAL;
1338 }
1339 }
1340
1ef3e2bc
AW
1341 group = kzalloc(sizeof(*group), GFP_KERNEL);
1342 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
1343 if (!group || !domain) {
1344 ret = -ENOMEM;
1345 goto out_free;
1346 }
1347
1348 group->iommu_group = iommu_group;
1349
1350 /* Determine bus_type in order to allocate a domain */
1351 ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
1352 if (ret)
1353 goto out_free;
1354
a54eb550
KW
1355 mdev_bus = symbol_get(mdev_bus_type);
1356
1357 if (mdev_bus) {
1358 if ((bus == mdev_bus) && !iommu_present(bus)) {
1359 symbol_put(mdev_bus_type);
1360 if (!iommu->external_domain) {
1361 INIT_LIST_HEAD(&domain->group_list);
1362 iommu->external_domain = domain;
1363 } else
1364 kfree(domain);
1365
1366 list_add(&group->next,
1367 &iommu->external_domain->group_list);
1368 mutex_unlock(&iommu->lock);
1369 return 0;
1370 }
1371 symbol_put(mdev_bus_type);
1372 }
1373
1ef3e2bc
AW
1374 domain->domain = iommu_domain_alloc(bus);
1375 if (!domain->domain) {
1376 ret = -EIO;
1377 goto out_free;
1378 }
1379
f5c9eceb
WD
1380 if (iommu->nesting) {
1381 int attr = 1;
1382
1383 ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
1384 &attr);
1385 if (ret)
1386 goto out_domain;
1387 }
1388
1ef3e2bc
AW
1389 ret = iommu_attach_group(domain->domain, iommu_group);
1390 if (ret)
1391 goto out_domain;
1392
9d3a4de4 1393 resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
5d704992 1394
1ef3e2bc
AW
1395 INIT_LIST_HEAD(&domain->group_list);
1396 list_add(&group->next, &domain->group_list);
1397
db406cc0
RM
1398 msi_remap = irq_domain_check_msi_remap() ||
1399 iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
9d72f87b
EA
1400
1401 if (!allow_unsafe_interrupts && !msi_remap) {
1ef3e2bc
AW
1402 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
1403 __func__);
1404 ret = -EPERM;
1405 goto out_detach;
1406 }
1407
eb165f05 1408 if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
1ef3e2bc
AW
1409 domain->prot |= IOMMU_CACHE;
1410
73fa0d10 1411 /*
1ef3e2bc
AW
1412 * Try to match an existing compatible domain. We don't want to
1413 * preclude an IOMMU driver supporting multiple bus_types and being
1414 * able to include different bus_types in the same IOMMU domain, so
1415 * we test whether the domains use the same iommu_ops rather than
1416 * testing if they're on the same bus_type.
73fa0d10 1417 */
1ef3e2bc
AW
1418 list_for_each_entry(d, &iommu->domain_list, next) {
1419 if (d->domain->ops == domain->domain->ops &&
1420 d->prot == domain->prot) {
1421 iommu_detach_group(domain->domain, iommu_group);
1422 if (!iommu_attach_group(d->domain, iommu_group)) {
1423 list_add(&group->next, &d->group_list);
1424 iommu_domain_free(domain->domain);
1425 kfree(domain);
1426 mutex_unlock(&iommu->lock);
1427 return 0;
1428 }
1429
1430 ret = iommu_attach_group(domain->domain, iommu_group);
1431 if (ret)
1432 goto out_domain;
1433 }
73fa0d10
AW
1434 }
1435
6fe1010d
AW
1436 vfio_test_domain_fgsp(domain);
1437
1ef3e2bc
AW
1438 /* replay mappings on new domains */
1439 ret = vfio_iommu_replay(iommu, domain);
1440 if (ret)
1441 goto out_detach;
1442
2c9f1af5
WY
1443 if (resv_msi) {
1444 ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
1445 if (ret)
1446 goto out_detach;
1447 }
5d704992 1448
1ef3e2bc 1449 list_add(&domain->next, &iommu->domain_list);
73fa0d10
AW
1450
1451 mutex_unlock(&iommu->lock);
1452
1453 return 0;
1ef3e2bc
AW
1454
1455out_detach:
1456 iommu_detach_group(domain->domain, iommu_group);
1457out_domain:
1458 iommu_domain_free(domain->domain);
1459out_free:
1460 kfree(domain);
1461 kfree(group);
1462 mutex_unlock(&iommu->lock);
1463 return ret;
1464}
1465
1466static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
1467{
1468 struct rb_node *node;
1469
1470 while ((node = rb_first(&iommu->dma_list)))
1471 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
73fa0d10
AW
1472}
1473
a54eb550
KW
1474static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
1475{
1476 struct rb_node *n, *p;
1477
1478 n = rb_first(&iommu->dma_list);
1479 for (; n; n = rb_next(n)) {
1480 struct vfio_dma *dma;
1481 long locked = 0, unlocked = 0;
1482
1483 dma = rb_entry(n, struct vfio_dma, node);
1484 unlocked += vfio_unmap_unpin(iommu, dma, false);
1485 p = rb_first(&dma->pfn_list);
1486 for (; p; p = rb_next(p)) {
1487 struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
1488 node);
1489
1490 if (!is_invalid_reserved_pfn(vpfn->pfn))
1491 locked++;
1492 }
48d8476b 1493 vfio_lock_acct(dma, locked - unlocked, true);
a54eb550
KW
1494 }
1495}
1496
1497static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
1498{
1499 struct rb_node *n;
1500
1501 n = rb_first(&iommu->dma_list);
1502 for (; n; n = rb_next(n)) {
1503 struct vfio_dma *dma;
1504
1505 dma = rb_entry(n, struct vfio_dma, node);
1506
1507 if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
1508 break;
1509 }
3cedd7d7
KW
1510 /* mdev vendor driver must unregister notifier */
1511 WARN_ON(iommu->notifier.head);
a54eb550
KW
1512}
1513
73fa0d10
AW
1514static void vfio_iommu_type1_detach_group(void *iommu_data,
1515 struct iommu_group *iommu_group)
1516{
1517 struct vfio_iommu *iommu = iommu_data;
1ef3e2bc 1518 struct vfio_domain *domain;
73fa0d10
AW
1519 struct vfio_group *group;
1520
1521 mutex_lock(&iommu->lock);
1522
a54eb550
KW
1523 if (iommu->external_domain) {
1524 group = find_iommu_group(iommu->external_domain, iommu_group);
1525 if (group) {
1526 list_del(&group->next);
1527 kfree(group);
1528
1529 if (list_empty(&iommu->external_domain->group_list)) {
1530 vfio_sanity_check_pfn_list(iommu);
1531
1532 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
1533 vfio_iommu_unmap_unpin_all(iommu);
1534
1535 kfree(iommu->external_domain);
1536 iommu->external_domain = NULL;
1537 }
1538 goto detach_group_done;
1539 }
1540 }
1541
1ef3e2bc 1542 list_for_each_entry(domain, &iommu->domain_list, next) {
7896c998
KW
1543 group = find_iommu_group(domain, iommu_group);
1544 if (!group)
1545 continue;
1ef3e2bc 1546
7896c998
KW
1547 iommu_detach_group(domain->domain, iommu_group);
1548 list_del(&group->next);
1549 kfree(group);
1550 /*
a54eb550
KW
1551 * Group ownership provides privilege, if the group list is
1552 * empty, the domain goes away. If it's the last domain with
1553 * iommu and external domain doesn't exist, then all the
1554 * mappings go away too. If it's the last domain with iommu and
1555 * external domain exist, update accounting
7896c998
KW
1556 */
1557 if (list_empty(&domain->group_list)) {
a54eb550
KW
1558 if (list_is_singular(&iommu->domain_list)) {
1559 if (!iommu->external_domain)
1560 vfio_iommu_unmap_unpin_all(iommu);
1561 else
1562 vfio_iommu_unmap_unpin_reaccount(iommu);
1563 }
7896c998
KW
1564 iommu_domain_free(domain->domain);
1565 list_del(&domain->next);
1566 kfree(domain);
73fa0d10 1567 }
a54eb550 1568 break;
73fa0d10
AW
1569 }
1570
a54eb550 1571detach_group_done:
73fa0d10
AW
1572 mutex_unlock(&iommu->lock);
1573}
1574
1575static void *vfio_iommu_type1_open(unsigned long arg)
1576{
1577 struct vfio_iommu *iommu;
1578
73fa0d10
AW
1579 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1580 if (!iommu)
1581 return ERR_PTR(-ENOMEM);
1582
f5c9eceb
WD
1583 switch (arg) {
1584 case VFIO_TYPE1_IOMMU:
1585 break;
1586 case VFIO_TYPE1_NESTING_IOMMU:
1587 iommu->nesting = true;
544c05a6 1588 /* fall through */
f5c9eceb
WD
1589 case VFIO_TYPE1v2_IOMMU:
1590 iommu->v2 = true;
1591 break;
1592 default:
1593 kfree(iommu);
1594 return ERR_PTR(-EINVAL);
1595 }
1596
1ef3e2bc 1597 INIT_LIST_HEAD(&iommu->domain_list);
cd9b2268 1598 iommu->dma_list = RB_ROOT;
49285593 1599 iommu->dma_avail = dma_entry_limit;
73fa0d10 1600 mutex_init(&iommu->lock);
c086de81 1601 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
73fa0d10
AW
1602
1603 return iommu;
1604}
1605
a54eb550
KW
1606static void vfio_release_domain(struct vfio_domain *domain, bool external)
1607{
1608 struct vfio_group *group, *group_tmp;
1609
1610 list_for_each_entry_safe(group, group_tmp,
1611 &domain->group_list, next) {
1612 if (!external)
1613 iommu_detach_group(domain->domain, group->iommu_group);
1614 list_del(&group->next);
1615 kfree(group);
1616 }
1617
1618 if (!external)
1619 iommu_domain_free(domain->domain);
1620}
1621
73fa0d10
AW
1622static void vfio_iommu_type1_release(void *iommu_data)
1623{
1624 struct vfio_iommu *iommu = iommu_data;
1ef3e2bc 1625 struct vfio_domain *domain, *domain_tmp;
a54eb550
KW
1626
1627 if (iommu->external_domain) {
1628 vfio_release_domain(iommu->external_domain, true);
1629 vfio_sanity_check_pfn_list(iommu);
1630 kfree(iommu->external_domain);
1631 }
73fa0d10 1632
1ef3e2bc 1633 vfio_iommu_unmap_unpin_all(iommu);
73fa0d10 1634
1ef3e2bc
AW
1635 list_for_each_entry_safe(domain, domain_tmp,
1636 &iommu->domain_list, next) {
a54eb550 1637 vfio_release_domain(domain, false);
1ef3e2bc
AW
1638 list_del(&domain->next);
1639 kfree(domain);
73fa0d10 1640 }
73fa0d10
AW
1641 kfree(iommu);
1642}
1643
aa429318
AW
1644static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
1645{
1646 struct vfio_domain *domain;
1647 int ret = 1;
1648
1649 mutex_lock(&iommu->lock);
1650 list_for_each_entry(domain, &iommu->domain_list, next) {
1651 if (!(domain->prot & IOMMU_CACHE)) {
1652 ret = 0;
f5bfdbf2 1653 break;
aa429318 1654 }
73fa0d10 1655 }
aa429318 1656 mutex_unlock(&iommu->lock);
73fa0d10 1657
aa429318 1658 return ret;
73fa0d10
AW
1659}
1660
1661static long vfio_iommu_type1_ioctl(void *iommu_data,
1662 unsigned int cmd, unsigned long arg)
1663{
1664 struct vfio_iommu *iommu = iommu_data;
1665 unsigned long minsz;
1666
1667 if (cmd == VFIO_CHECK_EXTENSION) {
1668 switch (arg) {
1669 case VFIO_TYPE1_IOMMU:
1ef3e2bc 1670 case VFIO_TYPE1v2_IOMMU:
f5c9eceb 1671 case VFIO_TYPE1_NESTING_IOMMU:
73fa0d10 1672 return 1;
aa429318
AW
1673 case VFIO_DMA_CC_IOMMU:
1674 if (!iommu)
1675 return 0;
1676 return vfio_domains_have_iommu_cache(iommu);
73fa0d10
AW
1677 default:
1678 return 0;
1679 }
1680 } else if (cmd == VFIO_IOMMU_GET_INFO) {
1681 struct vfio_iommu_type1_info info;
1682
1683 minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
1684
1685 if (copy_from_user(&info, (void __user *)arg, minsz))
1686 return -EFAULT;
1687
1688 if (info.argsz < minsz)
1689 return -EINVAL;
1690
d4f50ee2 1691 info.flags = VFIO_IOMMU_INFO_PGSIZES;
73fa0d10 1692
1ef3e2bc 1693 info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
73fa0d10 1694
8160c4e4
MT
1695 return copy_to_user((void __user *)arg, &info, minsz) ?
1696 -EFAULT : 0;
73fa0d10
AW
1697
1698 } else if (cmd == VFIO_IOMMU_MAP_DMA) {
1699 struct vfio_iommu_type1_dma_map map;
1700 uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
1701 VFIO_DMA_MAP_FLAG_WRITE;
1702
1703 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
1704
1705 if (copy_from_user(&map, (void __user *)arg, minsz))
1706 return -EFAULT;
1707
1708 if (map.argsz < minsz || map.flags & ~mask)
1709 return -EINVAL;
1710
1711 return vfio_dma_do_map(iommu, &map);
1712
1713 } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
1714 struct vfio_iommu_type1_dma_unmap unmap;
166fd7d9 1715 long ret;
73fa0d10
AW
1716
1717 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
1718
1719 if (copy_from_user(&unmap, (void __user *)arg, minsz))
1720 return -EFAULT;
1721
1722 if (unmap.argsz < minsz || unmap.flags)
1723 return -EINVAL;
1724
166fd7d9
AW
1725 ret = vfio_dma_do_unmap(iommu, &unmap);
1726 if (ret)
1727 return ret;
1728
8160c4e4
MT
1729 return copy_to_user((void __user *)arg, &unmap, minsz) ?
1730 -EFAULT : 0;
73fa0d10
AW
1731 }
1732
1733 return -ENOTTY;
1734}
1735
c086de81 1736static int vfio_iommu_type1_register_notifier(void *iommu_data,
22195cbd 1737 unsigned long *events,
c086de81
KW
1738 struct notifier_block *nb)
1739{
1740 struct vfio_iommu *iommu = iommu_data;
1741
22195cbd
JS
1742 /* clear known events */
1743 *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
1744
1745 /* refuse to register if still events remaining */
1746 if (*events)
1747 return -EINVAL;
1748
c086de81
KW
1749 return blocking_notifier_chain_register(&iommu->notifier, nb);
1750}
1751
1752static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
1753 struct notifier_block *nb)
1754{
1755 struct vfio_iommu *iommu = iommu_data;
1756
1757 return blocking_notifier_chain_unregister(&iommu->notifier, nb);
1758}
1759
73fa0d10 1760static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
c086de81
KW
1761 .name = "vfio-iommu-type1",
1762 .owner = THIS_MODULE,
1763 .open = vfio_iommu_type1_open,
1764 .release = vfio_iommu_type1_release,
1765 .ioctl = vfio_iommu_type1_ioctl,
1766 .attach_group = vfio_iommu_type1_attach_group,
1767 .detach_group = vfio_iommu_type1_detach_group,
1768 .pin_pages = vfio_iommu_type1_pin_pages,
1769 .unpin_pages = vfio_iommu_type1_unpin_pages,
1770 .register_notifier = vfio_iommu_type1_register_notifier,
1771 .unregister_notifier = vfio_iommu_type1_unregister_notifier,
73fa0d10
AW
1772};
1773
1774static int __init vfio_iommu_type1_init(void)
1775{
73fa0d10
AW
1776 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
1777}
1778
1779static void __exit vfio_iommu_type1_cleanup(void)
1780{
1781 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
1782}
1783
1784module_init(vfio_iommu_type1_init);
1785module_exit(vfio_iommu_type1_cleanup);
1786
1787MODULE_VERSION(DRIVER_VERSION);
1788MODULE_LICENSE("GPL v2");
1789MODULE_AUTHOR(DRIVER_AUTHOR);
1790MODULE_DESCRIPTION(DRIVER_DESC);