]>
Commit | Line | Data |
---|---|---|
9659d8ac GKH |
1 | From stefan.bader@canonical.com Wed Apr 7 14:42:11 2010 |
2 | From: Gleb Natapov <gleb@redhat.com> | |
3 | Date: Fri, 19 Mar 2010 15:47:31 +0100 | |
4 | Subject: KVM: x86 emulator: fix memory access during x86 emulation | |
5 | To: stable@kernel.org | |
6 | Cc: Marcelo Tosatti <mtosatti@redhat.com>, Avi Kivity <avi@redhat.com>, Gleb Natapov <gleb@redhat.com> | |
7 | Message-ID: <1269010059-25309-4-git-send-email-stefan.bader@canonical.com> | |
8 | ||
9 | ||
10 | From: Gleb Natapov <gleb@redhat.com> | |
11 | ||
12 | commit 1871c6020d7308afb99127bba51f04548e7ca84e upstream | |
13 | ||
14 | Currently when x86 emulator needs to access memory, page walk is done with | |
15 | broadest permission possible, so if emulated instruction was executed | |
16 | by userspace process it can still access kernel memory. Fix that by | |
17 | providing correct memory access to page walker during emulation. | |
18 | ||
19 | Signed-off-by: Gleb Natapov <gleb@redhat.com> | |
20 | Signed-off-by: Avi Kivity <avi@redhat.com> | |
21 | Signed-off-by: Stefan Bader <stefan.bader@canonical.com> | |
22 | Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> | |
23 | --- | |
24 | arch/x86/include/asm/kvm_emulate.h | 14 +++ | |
25 | arch/x86/include/asm/kvm_host.h | 7 + | |
26 | arch/x86/kvm/emulate.c | 6 - | |
27 | arch/x86/kvm/mmu.c | 17 +--- | |
28 | arch/x86/kvm/mmu.h | 6 + | |
29 | arch/x86/kvm/paging_tmpl.h | 11 ++- | |
30 | arch/x86/kvm/x86.c | 131 ++++++++++++++++++++++++++++--------- | |
31 | 7 files changed, 142 insertions(+), 50 deletions(-) | |
32 | ||
33 | --- a/arch/x86/include/asm/kvm_emulate.h | |
34 | +++ b/arch/x86/include/asm/kvm_emulate.h | |
35 | @@ -54,13 +54,23 @@ struct x86_emulate_ctxt; | |
36 | struct x86_emulate_ops { | |
37 | /* | |
38 | * read_std: Read bytes of standard (non-emulated/special) memory. | |
39 | - * Used for instruction fetch, stack operations, and others. | |
40 | + * Used for descriptor reading. | |
41 | * @addr: [IN ] Linear address from which to read. | |
42 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | |
43 | * @bytes: [IN ] Number of bytes to read from memory. | |
44 | */ | |
45 | int (*read_std)(unsigned long addr, void *val, | |
46 | - unsigned int bytes, struct kvm_vcpu *vcpu); | |
47 | + unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); | |
48 | + | |
49 | + /* | |
50 | + * fetch: Read bytes of standard (non-emulated/special) memory. | |
51 | + * Used for instruction fetch. | |
52 | + * @addr: [IN ] Linear address from which to read. | |
53 | + * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | |
54 | + * @bytes: [IN ] Number of bytes to read from memory. | |
55 | + */ | |
56 | + int (*fetch)(unsigned long addr, void *val, | |
57 | + unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); | |
58 | ||
59 | /* | |
60 | * read_emulated: Read bytes from emulated/special memory area. | |
61 | --- a/arch/x86/include/asm/kvm_host.h | |
62 | +++ b/arch/x86/include/asm/kvm_host.h | |
63 | @@ -256,7 +256,8 @@ struct kvm_mmu { | |
64 | void (*new_cr3)(struct kvm_vcpu *vcpu); | |
65 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); | |
66 | void (*free)(struct kvm_vcpu *vcpu); | |
67 | - gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); | |
68 | + gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, | |
69 | + u32 *error); | |
70 | void (*prefetch_page)(struct kvm_vcpu *vcpu, | |
71 | struct kvm_mmu_page *page); | |
72 | int (*sync_page)(struct kvm_vcpu *vcpu, | |
73 | @@ -645,6 +646,10 @@ void __kvm_mmu_free_some_pages(struct kv | |
74 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | |
75 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | |
76 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | |
77 | +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | |
78 | +gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | |
79 | +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | |
80 | +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | |
81 | ||
82 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | |
83 | ||
84 | --- a/arch/x86/kvm/emulate.c | |
85 | +++ b/arch/x86/kvm/emulate.c | |
86 | @@ -612,7 +612,7 @@ static int do_fetch_insn_byte(struct x86 | |
87 | ||
88 | if (linear < fc->start || linear >= fc->end) { | |
89 | size = min(15UL, PAGE_SIZE - offset_in_page(linear)); | |
90 | - rc = ops->read_std(linear, fc->data, size, ctxt->vcpu); | |
91 | + rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL); | |
92 | if (rc) | |
93 | return rc; | |
94 | fc->start = linear; | |
95 | @@ -667,11 +667,11 @@ static int read_descriptor(struct x86_em | |
96 | op_bytes = 3; | |
97 | *address = 0; | |
98 | rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, | |
99 | - ctxt->vcpu); | |
100 | + ctxt->vcpu, NULL); | |
101 | if (rc) | |
102 | return rc; | |
103 | rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, | |
104 | - ctxt->vcpu); | |
105 | + ctxt->vcpu, NULL); | |
106 | return rc; | |
107 | } | |
108 | ||
109 | --- a/arch/x86/kvm/mmu.c | |
110 | +++ b/arch/x86/kvm/mmu.c | |
111 | @@ -136,12 +136,6 @@ module_param(oos_shadow, bool, 0644); | |
112 | #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ | |
113 | | PT64_NX_MASK) | |
114 | ||
115 | -#define PFERR_PRESENT_MASK (1U << 0) | |
116 | -#define PFERR_WRITE_MASK (1U << 1) | |
117 | -#define PFERR_USER_MASK (1U << 2) | |
118 | -#define PFERR_RSVD_MASK (1U << 3) | |
119 | -#define PFERR_FETCH_MASK (1U << 4) | |
120 | - | |
121 | #define PT_PDPE_LEVEL 3 | |
122 | #define PT_DIRECTORY_LEVEL 2 | |
123 | #define PT_PAGE_TABLE_LEVEL 1 | |
124 | @@ -1639,7 +1633,7 @@ struct page *gva_to_page(struct kvm_vcpu | |
125 | { | |
126 | struct page *page; | |
127 | ||
128 | - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); | |
129 | + gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); | |
130 | ||
131 | if (gpa == UNMAPPED_GVA) | |
132 | return NULL; | |
133 | @@ -2162,8 +2156,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu | |
134 | spin_unlock(&vcpu->kvm->mmu_lock); | |
135 | } | |
136 | ||
137 | -static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) | |
138 | +static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, | |
139 | + u32 access, u32 *error) | |
140 | { | |
141 | + if (error) | |
142 | + *error = 0; | |
143 | return vaddr; | |
144 | } | |
145 | ||
146 | @@ -2747,7 +2744,7 @@ int kvm_mmu_unprotect_page_virt(struct k | |
147 | if (tdp_enabled) | |
148 | return 0; | |
149 | ||
150 | - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); | |
151 | + gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); | |
152 | ||
153 | spin_lock(&vcpu->kvm->mmu_lock); | |
154 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); | |
155 | @@ -3245,7 +3242,7 @@ static void audit_mappings_page(struct k | |
156 | if (is_shadow_present_pte(ent) && !is_last_spte(ent, level)) | |
157 | audit_mappings_page(vcpu, ent, va, level - 1); | |
158 | else { | |
159 | - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va); | |
160 | + gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL); | |
161 | gfn_t gfn = gpa >> PAGE_SHIFT; | |
162 | pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn); | |
163 | hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT; | |
164 | --- a/arch/x86/kvm/mmu.h | |
165 | +++ b/arch/x86/kvm/mmu.h | |
166 | @@ -37,6 +37,12 @@ | |
167 | #define PT32_ROOT_LEVEL 2 | |
168 | #define PT32E_ROOT_LEVEL 3 | |
169 | ||
170 | +#define PFERR_PRESENT_MASK (1U << 0) | |
171 | +#define PFERR_WRITE_MASK (1U << 1) | |
172 | +#define PFERR_USER_MASK (1U << 2) | |
173 | +#define PFERR_RSVD_MASK (1U << 3) | |
174 | +#define PFERR_FETCH_MASK (1U << 4) | |
175 | + | |
176 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); | |
177 | ||
178 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | |
179 | --- a/arch/x86/kvm/paging_tmpl.h | |
180 | +++ b/arch/x86/kvm/paging_tmpl.h | |
181 | @@ -491,18 +491,23 @@ static void FNAME(invlpg)(struct kvm_vcp | |
182 | spin_unlock(&vcpu->kvm->mmu_lock); | |
183 | } | |
184 | ||
185 | -static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | |
186 | +static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, | |
187 | + u32 *error) | |
188 | { | |
189 | struct guest_walker walker; | |
190 | gpa_t gpa = UNMAPPED_GVA; | |
191 | int r; | |
192 | ||
193 | - r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0); | |
194 | + r = FNAME(walk_addr)(&walker, vcpu, vaddr, | |
195 | + !!(access & PFERR_WRITE_MASK), | |
196 | + !!(access & PFERR_USER_MASK), | |
197 | + !!(access & PFERR_FETCH_MASK)); | |
198 | ||
199 | if (r) { | |
200 | gpa = gfn_to_gpa(walker.gfn); | |
201 | gpa |= vaddr & ~PAGE_MASK; | |
202 | - } | |
203 | + } else if (error) | |
204 | + *error = walker.error_code; | |
205 | ||
206 | return gpa; | |
207 | } | |
208 | --- a/arch/x86/kvm/x86.c | |
209 | +++ b/arch/x86/kvm/x86.c | |
210 | @@ -2505,14 +2505,41 @@ static int vcpu_mmio_read(struct kvm_vcp | |
211 | return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v); | |
212 | } | |
213 | ||
214 | -static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, | |
215 | - struct kvm_vcpu *vcpu) | |
216 | +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) | |
217 | +{ | |
218 | + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; | |
219 | + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); | |
220 | +} | |
221 | + | |
222 | + gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) | |
223 | +{ | |
224 | + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; | |
225 | + access |= PFERR_FETCH_MASK; | |
226 | + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); | |
227 | +} | |
228 | + | |
229 | +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) | |
230 | +{ | |
231 | + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; | |
232 | + access |= PFERR_WRITE_MASK; | |
233 | + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); | |
234 | +} | |
235 | + | |
236 | +/* uses this to access any guest's mapped memory without checking CPL */ | |
237 | +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) | |
238 | +{ | |
239 | + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error); | |
240 | +} | |
241 | + | |
242 | +static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, | |
243 | + struct kvm_vcpu *vcpu, u32 access, | |
244 | + u32 *error) | |
245 | { | |
246 | void *data = val; | |
247 | int r = X86EMUL_CONTINUE; | |
248 | ||
249 | while (bytes) { | |
250 | - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | |
251 | + gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error); | |
252 | unsigned offset = addr & (PAGE_SIZE-1); | |
253 | unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); | |
254 | int ret; | |
255 | @@ -2535,14 +2562,37 @@ out: | |
256 | return r; | |
257 | } | |
258 | ||
259 | +/* used for instruction fetching */ | |
260 | +static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, | |
261 | + struct kvm_vcpu *vcpu, u32 *error) | |
262 | +{ | |
263 | + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; | |
264 | + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, | |
265 | + access | PFERR_FETCH_MASK, error); | |
266 | +} | |
267 | + | |
268 | +static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, | |
269 | + struct kvm_vcpu *vcpu, u32 *error) | |
270 | +{ | |
271 | + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; | |
272 | + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, | |
273 | + error); | |
274 | +} | |
275 | + | |
276 | +static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, | |
277 | + struct kvm_vcpu *vcpu, u32 *error) | |
278 | +{ | |
279 | + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error); | |
280 | +} | |
281 | + | |
282 | static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, | |
283 | - struct kvm_vcpu *vcpu) | |
284 | + struct kvm_vcpu *vcpu, u32 *error) | |
285 | { | |
286 | void *data = val; | |
287 | int r = X86EMUL_CONTINUE; | |
288 | ||
289 | while (bytes) { | |
290 | - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | |
291 | + gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error); | |
292 | unsigned offset = addr & (PAGE_SIZE-1); | |
293 | unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); | |
294 | int ret; | |
295 | @@ -2572,6 +2622,7 @@ static int emulator_read_emulated(unsign | |
296 | struct kvm_vcpu *vcpu) | |
297 | { | |
298 | gpa_t gpa; | |
299 | + u32 error_code; | |
300 | ||
301 | if (vcpu->mmio_read_completed) { | |
302 | memcpy(val, vcpu->mmio_data, bytes); | |
303 | @@ -2581,17 +2632,20 @@ static int emulator_read_emulated(unsign | |
304 | return X86EMUL_CONTINUE; | |
305 | } | |
306 | ||
307 | - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | |
308 | + gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code); | |
309 | + | |
310 | + if (gpa == UNMAPPED_GVA) { | |
311 | + kvm_inject_page_fault(vcpu, addr, error_code); | |
312 | + return X86EMUL_PROPAGATE_FAULT; | |
313 | + } | |
314 | ||
315 | /* For APIC access vmexit */ | |
316 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) | |
317 | goto mmio; | |
318 | ||
319 | - if (kvm_read_guest_virt(addr, val, bytes, vcpu) | |
320 | + if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL) | |
321 | == X86EMUL_CONTINUE) | |
322 | return X86EMUL_CONTINUE; | |
323 | - if (gpa == UNMAPPED_GVA) | |
324 | - return X86EMUL_PROPAGATE_FAULT; | |
325 | ||
326 | mmio: | |
327 | /* | |
328 | @@ -2630,11 +2684,12 @@ static int emulator_write_emulated_onepa | |
329 | struct kvm_vcpu *vcpu) | |
330 | { | |
331 | gpa_t gpa; | |
332 | + u32 error_code; | |
333 | ||
334 | - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | |
335 | + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code); | |
336 | ||
337 | if (gpa == UNMAPPED_GVA) { | |
338 | - kvm_inject_page_fault(vcpu, addr, 2); | |
339 | + kvm_inject_page_fault(vcpu, addr, error_code); | |
340 | return X86EMUL_PROPAGATE_FAULT; | |
341 | } | |
342 | ||
343 | @@ -2698,7 +2753,7 @@ static int emulator_cmpxchg_emulated(uns | |
344 | char *kaddr; | |
345 | u64 val; | |
346 | ||
347 | - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | |
348 | + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); | |
349 | ||
350 | if (gpa == UNMAPPED_GVA || | |
351 | (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) | |
352 | @@ -2777,7 +2832,7 @@ void kvm_report_emulation_failure(struct | |
353 | ||
354 | rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); | |
355 | ||
356 | - kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu); | |
357 | + kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL); | |
358 | ||
359 | printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", | |
360 | context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); | |
361 | @@ -2785,7 +2840,8 @@ void kvm_report_emulation_failure(struct | |
362 | EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); | |
363 | ||
364 | static struct x86_emulate_ops emulate_ops = { | |
365 | - .read_std = kvm_read_guest_virt, | |
366 | + .read_std = kvm_read_guest_virt_system, | |
367 | + .fetch = kvm_fetch_guest_virt, | |
368 | .read_emulated = emulator_read_emulated, | |
369 | .write_emulated = emulator_write_emulated, | |
370 | .cmpxchg_emulated = emulator_cmpxchg_emulated, | |
371 | @@ -2922,12 +2978,17 @@ static int pio_copy_data(struct kvm_vcpu | |
372 | gva_t q = vcpu->arch.pio.guest_gva; | |
373 | unsigned bytes; | |
374 | int ret; | |
375 | + u32 error_code; | |
376 | ||
377 | bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count; | |
378 | if (vcpu->arch.pio.in) | |
379 | - ret = kvm_write_guest_virt(q, p, bytes, vcpu); | |
380 | + ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code); | |
381 | else | |
382 | - ret = kvm_read_guest_virt(q, p, bytes, vcpu); | |
383 | + ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code); | |
384 | + | |
385 | + if (ret == X86EMUL_PROPAGATE_FAULT) | |
386 | + kvm_inject_page_fault(vcpu, q, error_code); | |
387 | + | |
388 | return ret; | |
389 | } | |
390 | ||
391 | @@ -2948,7 +3009,7 @@ int complete_pio(struct kvm_vcpu *vcpu) | |
392 | if (io->in) { | |
393 | r = pio_copy_data(vcpu); | |
394 | if (r) | |
395 | - return r; | |
396 | + goto out; | |
397 | } | |
398 | ||
399 | delta = 1; | |
400 | @@ -2975,7 +3036,7 @@ int complete_pio(struct kvm_vcpu *vcpu) | |
401 | kvm_register_write(vcpu, VCPU_REGS_RSI, val); | |
402 | } | |
403 | } | |
404 | - | |
405 | +out: | |
406 | io->count -= io->cur_count; | |
407 | io->cur_count = 0; | |
408 | ||
409 | @@ -3095,10 +3156,8 @@ int kvm_emulate_pio_string(struct kvm_vc | |
410 | if (!vcpu->arch.pio.in) { | |
411 | /* string PIO write */ | |
412 | ret = pio_copy_data(vcpu); | |
413 | - if (ret == X86EMUL_PROPAGATE_FAULT) { | |
414 | - kvm_inject_gp(vcpu, 0); | |
415 | + if (ret == X86EMUL_PROPAGATE_FAULT) | |
416 | return 1; | |
417 | - } | |
418 | if (ret == 0 && !pio_string_write(vcpu)) { | |
419 | complete_pio(vcpu); | |
420 | if (vcpu->arch.pio.count == 0) | |
421 | @@ -4078,7 +4137,9 @@ static int load_guest_segment_descriptor | |
422 | kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); | |
423 | return 1; | |
424 | } | |
425 | - return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); | |
426 | + return kvm_read_guest_virt_system(dtable.base + index*8, | |
427 | + seg_desc, sizeof(*seg_desc), | |
428 | + vcpu, NULL); | |
429 | } | |
430 | ||
431 | /* allowed just for 8 bytes segments */ | |
432 | @@ -4092,15 +4153,23 @@ static int save_guest_segment_descriptor | |
433 | ||
434 | if (dtable.limit < index * 8 + 7) | |
435 | return 1; | |
436 | - return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); | |
437 | + return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL); | |
438 | +} | |
439 | + | |
440 | +static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu, | |
441 | + struct desc_struct *seg_desc) | |
442 | +{ | |
443 | + u32 base_addr = get_desc_base(seg_desc); | |
444 | + | |
445 | + return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL); | |
446 | } | |
447 | ||
448 | -static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu, | |
449 | +static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu, | |
450 | struct desc_struct *seg_desc) | |
451 | { | |
452 | u32 base_addr = get_desc_base(seg_desc); | |
453 | ||
454 | - return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); | |
455 | + return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL); | |
456 | } | |
457 | ||
458 | static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) | |
459 | @@ -4303,7 +4372,7 @@ static int kvm_task_switch_16(struct kvm | |
460 | sizeof tss_segment_16)) | |
461 | goto out; | |
462 | ||
463 | - if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), | |
464 | + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc), | |
465 | &tss_segment_16, sizeof tss_segment_16)) | |
466 | goto out; | |
467 | ||
468 | @@ -4311,7 +4380,7 @@ static int kvm_task_switch_16(struct kvm | |
469 | tss_segment_16.prev_task_link = old_tss_sel; | |
470 | ||
471 | if (kvm_write_guest(vcpu->kvm, | |
472 | - get_tss_base_addr(vcpu, nseg_desc), | |
473 | + get_tss_base_addr_write(vcpu, nseg_desc), | |
474 | &tss_segment_16.prev_task_link, | |
475 | sizeof tss_segment_16.prev_task_link)) | |
476 | goto out; | |
477 | @@ -4342,7 +4411,7 @@ static int kvm_task_switch_32(struct kvm | |
478 | sizeof tss_segment_32)) | |
479 | goto out; | |
480 | ||
481 | - if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), | |
482 | + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc), | |
483 | &tss_segment_32, sizeof tss_segment_32)) | |
484 | goto out; | |
485 | ||
486 | @@ -4350,7 +4419,7 @@ static int kvm_task_switch_32(struct kvm | |
487 | tss_segment_32.prev_task_link = old_tss_sel; | |
488 | ||
489 | if (kvm_write_guest(vcpu->kvm, | |
490 | - get_tss_base_addr(vcpu, nseg_desc), | |
491 | + get_tss_base_addr_write(vcpu, nseg_desc), | |
492 | &tss_segment_32.prev_task_link, | |
493 | sizeof tss_segment_32.prev_task_link)) | |
494 | goto out; | |
495 | @@ -4373,7 +4442,7 @@ int kvm_task_switch(struct kvm_vcpu *vcp | |
496 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); | |
497 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); | |
498 | ||
499 | - old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); | |
500 | + old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); | |
501 | ||
502 | /* FIXME: Handle errors. Failure to read either TSS or their | |
503 | * descriptors should generate a pagefault. | |
504 | @@ -4582,7 +4651,7 @@ int kvm_arch_vcpu_ioctl_translate(struct | |
505 | ||
506 | vcpu_load(vcpu); | |
507 | down_read(&vcpu->kvm->slots_lock); | |
508 | - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); | |
509 | + gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); | |
510 | up_read(&vcpu->kvm->slots_lock); | |
511 | tr->physical_address = gpa; | |
512 | tr->valid = gpa != UNMAPPED_GVA; |