]>
Commit | Line | Data |
---|---|---|
556f9dcf GKH |
1 | From 32f6daad4651a748a58a3ab6da0611862175722f Mon Sep 17 00:00:00 2001 |
2 | From: Alex Williamson <alex.williamson@redhat.com> | |
3 | Date: Wed, 11 Apr 2012 09:51:49 -0600 | |
4 | Subject: KVM: unmap pages from the iommu when slots are removed | |
5 | ||
6 | From: Alex Williamson <alex.williamson@redhat.com> | |
7 | ||
8 | commit 32f6daad4651a748a58a3ab6da0611862175722f upstream. | |
9 | ||
10 | We've been adding new mappings, but not destroying old mappings. | |
11 | This can lead to a page leak as pages are pinned using | |
12 | get_user_pages, but only unpinned with put_page if they still | |
13 | exist in the memslots list on vm shutdown. A memslot that is | |
14 | destroyed while an iommu domain is enabled for the guest will | |
15 | therefore result in an elevated page reference count that is | |
16 | never cleared. | |
17 | ||
18 | Additionally, without this fix, the iommu is only programmed | |
19 | with the first translation for a gpa. This can result in | |
20 | peer-to-peer errors if a mapping is destroyed and replaced by a | |
21 | new mapping at the same gpa as the iommu will still be pointing | |
22 | to the original, pinned memory address. | |
23 | ||
24 | Signed-off-by: Alex Williamson <alex.williamson@redhat.com> | |
25 | Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> | |
26 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
27 | ||
28 | --- | |
29 | include/linux/kvm_host.h | 6 ++++++ | |
30 | virt/kvm/iommu.c | 7 ++++++- | |
31 | virt/kvm/kvm_main.c | 5 +++-- | |
32 | 3 files changed, 15 insertions(+), 3 deletions(-) | |
33 | ||
34 | --- a/include/linux/kvm_host.h | |
35 | +++ b/include/linux/kvm_host.h | |
36 | @@ -593,6 +593,7 @@ void kvm_free_irq_source_id(struct kvm * | |
37 | ||
38 | #ifdef CONFIG_IOMMU_API | |
39 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); | |
40 | +void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); | |
41 | int kvm_iommu_map_guest(struct kvm *kvm); | |
42 | int kvm_iommu_unmap_guest(struct kvm *kvm); | |
43 | int kvm_assign_device(struct kvm *kvm, | |
44 | @@ -606,6 +607,11 @@ static inline int kvm_iommu_map_pages(st | |
45 | return 0; | |
46 | } | |
47 | ||
48 | +static inline void kvm_iommu_unmap_pages(struct kvm *kvm, | |
49 | + struct kvm_memory_slot *slot) | |
50 | +{ | |
51 | +} | |
52 | + | |
53 | static inline int kvm_iommu_map_guest(struct kvm *kvm) | |
54 | { | |
55 | return -ENODEV; | |
56 | --- a/virt/kvm/iommu.c | |
57 | +++ b/virt/kvm/iommu.c | |
58 | @@ -310,6 +310,11 @@ static void kvm_iommu_put_pages(struct k | |
59 | } | |
60 | } | |
61 | ||
62 | +void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) | |
63 | +{ | |
64 | + kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); | |
65 | +} | |
66 | + | |
67 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) | |
68 | { | |
69 | int idx; | |
70 | @@ -320,7 +325,7 @@ static int kvm_iommu_unmap_memslots(stru | |
71 | slots = kvm_memslots(kvm); | |
72 | ||
73 | kvm_for_each_memslot(memslot, slots) | |
74 | - kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages); | |
75 | + kvm_iommu_unmap_pages(kvm, memslot); | |
76 | ||
77 | srcu_read_unlock(&kvm->srcu, idx); | |
78 | ||
79 | --- a/virt/kvm/kvm_main.c | |
80 | +++ b/virt/kvm/kvm_main.c | |
81 | @@ -873,12 +873,13 @@ skip_lpage: | |
82 | if (r) | |
83 | goto out_free; | |
84 | ||
85 | - /* map the pages in iommu page table */ | |
86 | + /* map/unmap the pages in iommu page table */ | |
87 | if (npages) { | |
88 | r = kvm_iommu_map_pages(kvm, &new); | |
89 | if (r) | |
90 | goto out_free; | |
91 | - } | |
92 | + } else | |
93 | + kvm_iommu_unmap_pages(kvm, &old); | |
94 | ||
95 | r = -ENOMEM; | |
96 | slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), |