]>
Commit | Line | Data |
---|---|---|
d7a00f6e GKH |
1 | From cbb577cc37fca4093444fcd778fe9de880b30ff2 Mon Sep 17 00:00:00 2001 |
2 | From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | |
3 | Date: Mon, 12 Apr 2010 19:35:35 +0900 | |
4 | Subject: KVM: fix the handling of dirty bitmaps to avoid overflows | |
5 | ||
6 | From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | |
7 | ||
8 | (Cherry-picked from commit 87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d) | |
9 | ||
10 | Int is not long enough to store the size of a dirty bitmap. | |
11 | ||
12 | This patch fixes this problem with the introduction of a wrapper | |
13 | function to calculate the sizes of dirty bitmaps. | |
14 | ||
15 | Note: in mark_page_dirty(), we have to consider the fact that | |
16 | __set_bit() takes the offset as int, not long. | |
17 | ||
18 | Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | |
19 | Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> | |
20 | Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> | |
21 | ||
22 | --- | |
23 | arch/ia64/kvm/kvm-ia64.c | 9 +++++---- | |
24 | arch/x86/kvm/x86.c | 4 ++-- | |
25 | include/linux/kvm_host.h | 5 +++++ | |
26 | virt/kvm/kvm_main.c | 13 ++++++++----- | |
27 | 4 files changed, 20 insertions(+), 11 deletions(-) | |
28 | ||
29 | --- a/arch/ia64/kvm/kvm-ia64.c | |
30 | +++ b/arch/ia64/kvm/kvm-ia64.c | |
31 | @@ -1797,7 +1797,8 @@ static int kvm_ia64_sync_dirty_log(struc | |
32 | { | |
33 | struct kvm_memory_slot *memslot; | |
34 | int r, i; | |
35 | - long n, base; | |
36 | + long base; | |
37 | + unsigned long n; | |
38 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + | |
39 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | |
40 | ||
41 | @@ -1810,7 +1811,7 @@ static int kvm_ia64_sync_dirty_log(struc | |
42 | if (!memslot->dirty_bitmap) | |
43 | goto out; | |
44 | ||
45 | - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
46 | + n = kvm_dirty_bitmap_bytes(memslot); | |
47 | base = memslot->base_gfn / BITS_PER_LONG; | |
48 | ||
49 | for (i = 0; i < n/sizeof(long); ++i) { | |
50 | @@ -1826,7 +1827,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv | |
51 | struct kvm_dirty_log *log) | |
52 | { | |
53 | int r; | |
54 | - int n; | |
55 | + unsigned long n; | |
56 | struct kvm_memory_slot *memslot; | |
57 | int is_dirty = 0; | |
58 | ||
59 | @@ -1844,7 +1845,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv | |
60 | if (is_dirty) { | |
61 | kvm_flush_remote_tlbs(kvm); | |
62 | memslot = &kvm->memslots[log->slot]; | |
63 | - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
64 | + n = kvm_dirty_bitmap_bytes(memslot); | |
65 | memset(memslot->dirty_bitmap, 0, n); | |
66 | } | |
67 | r = 0; | |
68 | --- a/arch/x86/kvm/x86.c | |
69 | +++ b/arch/x86/kvm/x86.c | |
70 | @@ -2133,7 +2133,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv | |
71 | struct kvm_dirty_log *log) | |
72 | { | |
73 | int r; | |
74 | - int n; | |
75 | + unsigned long n; | |
76 | struct kvm_memory_slot *memslot; | |
77 | int is_dirty = 0; | |
78 | ||
79 | @@ -2149,7 +2149,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv | |
80 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | |
81 | spin_unlock(&kvm->mmu_lock); | |
82 | memslot = &kvm->memslots[log->slot]; | |
83 | - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
84 | + n = kvm_dirty_bitmap_bytes(memslot); | |
85 | memset(memslot->dirty_bitmap, 0, n); | |
86 | } | |
87 | r = 0; | |
88 | --- a/include/linux/kvm_host.h | |
89 | +++ b/include/linux/kvm_host.h | |
90 | @@ -116,6 +116,11 @@ struct kvm_memory_slot { | |
91 | int user_alloc; | |
92 | }; | |
93 | ||
94 | +static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) | |
95 | +{ | |
96 | + return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
97 | +} | |
98 | + | |
99 | struct kvm_kernel_irq_routing_entry { | |
100 | u32 gsi; | |
101 | u32 type; | |
102 | --- a/virt/kvm/kvm_main.c | |
103 | +++ b/virt/kvm/kvm_main.c | |
104 | @@ -1226,7 +1226,7 @@ skip_lpage: | |
105 | ||
106 | /* Allocate page dirty bitmap if needed */ | |
107 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | |
108 | - unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | |
109 | + unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); | |
110 | ||
111 | new.dirty_bitmap = vmalloc(dirty_bytes); | |
112 | if (!new.dirty_bitmap) | |
113 | @@ -1309,7 +1309,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |
114 | { | |
115 | struct kvm_memory_slot *memslot; | |
116 | int r, i; | |
117 | - int n; | |
118 | + unsigned long n; | |
119 | unsigned long any = 0; | |
120 | ||
121 | r = -EINVAL; | |
122 | @@ -1321,7 +1321,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |
123 | if (!memslot->dirty_bitmap) | |
124 | goto out; | |
125 | ||
126 | - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
127 | + n = kvm_dirty_bitmap_bytes(memslot); | |
128 | ||
129 | for (i = 0; !any && i < n/sizeof(long); ++i) | |
130 | any = memslot->dirty_bitmap[i]; | |
131 | @@ -1663,10 +1663,13 @@ void mark_page_dirty(struct kvm *kvm, gf | |
132 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | |
133 | if (memslot && memslot->dirty_bitmap) { | |
134 | unsigned long rel_gfn = gfn - memslot->base_gfn; | |
135 | + unsigned long *p = memslot->dirty_bitmap + | |
136 | + rel_gfn / BITS_PER_LONG; | |
137 | + int offset = rel_gfn % BITS_PER_LONG; | |
138 | ||
139 | /* avoid RMW */ | |
140 | - if (!test_bit(rel_gfn, memslot->dirty_bitmap)) | |
141 | - set_bit(rel_gfn, memslot->dirty_bitmap); | |
142 | + if (!test_bit(offset, p)) | |
143 | + set_bit(offset, p); | |
144 | } | |
145 | } | |
146 |