2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
35 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
36 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
37 #define PT_HAVE_ACCESSED_DIRTY(mmu) true
39 #define PT_MAX_FULL_LEVELS 4
40 #define CMPXCHG cmpxchg
42 #define CMPXCHG cmpxchg64
43 #define PT_MAX_FULL_LEVELS 2
46 #define pt_element_t u32
47 #define guest_walker guest_walker32
48 #define FNAME(name) paging##32_##name
49 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
50 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
51 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
52 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
53 #define PT_LEVEL_BITS PT32_LEVEL_BITS
54 #define PT_MAX_FULL_LEVELS 2
55 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
56 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
57 #define PT_HAVE_ACCESSED_DIRTY(mmu) true
58 #define CMPXCHG cmpxchg
59 #elif PTTYPE == PTTYPE_EPT
60 #define pt_element_t u64
61 #define guest_walker guest_walkerEPT
62 #define FNAME(name) ept_##name
63 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
64 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
65 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
66 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
67 #define PT_LEVEL_BITS PT64_LEVEL_BITS
68 #define PT_GUEST_DIRTY_SHIFT 9
69 #define PT_GUEST_ACCESSED_SHIFT 8
70 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
71 #define CMPXCHG cmpxchg64
72 #define PT_MAX_FULL_LEVELS 4
74 #error Invalid PTTYPE value
77 #define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT)
78 #define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
80 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
81 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
84 * The guest_walker structure emulates the behavior of the hardware page
90 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
91 pt_element_t ptes
[PT_MAX_FULL_LEVELS
];
92 pt_element_t prefetch_ptes
[PTE_PREFETCH_NUM
];
93 gpa_t pte_gpa
[PT_MAX_FULL_LEVELS
];
94 pt_element_t __user
*ptep_user
[PT_MAX_FULL_LEVELS
];
95 bool pte_writable
[PT_MAX_FULL_LEVELS
];
99 struct x86_exception fault
;
102 static gfn_t
gpte_to_gfn_lvl(pt_element_t gpte
, int lvl
)
104 return (gpte
& PT_LVL_ADDR_MASK(lvl
)) >> PAGE_SHIFT
;
107 static inline void FNAME(protect_clean_gpte
)(struct kvm_mmu
*mmu
, unsigned *access
,
112 /* dirty bit is not supported, so no need to track it */
113 if (!PT_HAVE_ACCESSED_DIRTY(mmu
))
116 BUILD_BUG_ON(PT_WRITABLE_MASK
!= ACC_WRITE_MASK
);
118 mask
= (unsigned)~ACC_WRITE_MASK
;
119 /* Allow write access to dirty gptes */
120 mask
|= (gpte
>> (PT_GUEST_DIRTY_SHIFT
- PT_WRITABLE_SHIFT
)) &
125 static inline int FNAME(is_present_gpte
)(unsigned long pte
)
127 #if PTTYPE != PTTYPE_EPT
128 return pte
& PT_PRESENT_MASK
;
134 static int FNAME(cmpxchg_gpte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
135 pt_element_t __user
*ptep_user
, unsigned index
,
136 pt_element_t orig_pte
, pt_element_t new_pte
)
143 npages
= get_user_pages_fast((unsigned long)ptep_user
, 1, FOLL_WRITE
, &page
);
144 if (likely(npages
== 1)) {
145 table
= kmap_atomic(page
);
146 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
147 kunmap_atomic(table
);
149 kvm_release_page_dirty(page
);
151 struct vm_area_struct
*vma
;
152 unsigned long vaddr
= (unsigned long)ptep_user
& PAGE_MASK
;
156 down_read(¤t
->mm
->mmap_sem
);
157 vma
= find_vma_intersection(current
->mm
, vaddr
, vaddr
+ PAGE_SIZE
);
158 if (!vma
|| !(vma
->vm_flags
& VM_PFNMAP
)) {
159 up_read(¤t
->mm
->mmap_sem
);
162 pfn
= ((vaddr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
163 paddr
= pfn
<< PAGE_SHIFT
;
164 table
= memremap(paddr
, PAGE_SIZE
, MEMREMAP_WB
);
166 up_read(¤t
->mm
->mmap_sem
);
169 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
171 up_read(¤t
->mm
->mmap_sem
);
174 return (ret
!= orig_pte
);
177 static bool FNAME(prefetch_invalid_gpte
)(struct kvm_vcpu
*vcpu
,
178 struct kvm_mmu_page
*sp
, u64
*spte
,
181 if (is_rsvd_bits_set(vcpu
->arch
.mmu
, gpte
, PT_PAGE_TABLE_LEVEL
))
184 if (!FNAME(is_present_gpte
)(gpte
))
187 /* if accessed bit is not supported prefetch non accessed gpte */
188 if (PT_HAVE_ACCESSED_DIRTY(vcpu
->arch
.mmu
) &&
189 !(gpte
& PT_GUEST_ACCESSED_MASK
))
195 drop_spte(vcpu
->kvm
, spte
);
200 * For PTTYPE_EPT, a page table can be executable but not readable
201 * on supported processors. Therefore, set_spte does not automatically
202 * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
203 * to signify readability since it isn't used in the EPT case
205 static inline unsigned FNAME(gpte_access
)(u64 gpte
)
208 #if PTTYPE == PTTYPE_EPT
209 access
= ((gpte
& VMX_EPT_WRITABLE_MASK
) ? ACC_WRITE_MASK
: 0) |
210 ((gpte
& VMX_EPT_EXECUTABLE_MASK
) ? ACC_EXEC_MASK
: 0) |
211 ((gpte
& VMX_EPT_READABLE_MASK
) ? ACC_USER_MASK
: 0);
213 BUILD_BUG_ON(ACC_EXEC_MASK
!= PT_PRESENT_MASK
);
214 BUILD_BUG_ON(ACC_EXEC_MASK
!= 1);
215 access
= gpte
& (PT_WRITABLE_MASK
| PT_USER_MASK
| PT_PRESENT_MASK
);
216 /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */
217 access
^= (gpte
>> PT64_NX_SHIFT
);
223 static int FNAME(update_accessed_dirty_bits
)(struct kvm_vcpu
*vcpu
,
225 struct guest_walker
*walker
,
228 unsigned level
, index
;
229 pt_element_t pte
, orig_pte
;
230 pt_element_t __user
*ptep_user
;
234 /* dirty/accessed bits are not supported, so no need to update them */
235 if (!PT_HAVE_ACCESSED_DIRTY(mmu
))
238 for (level
= walker
->max_level
; level
>= walker
->level
; --level
) {
239 pte
= orig_pte
= walker
->ptes
[level
- 1];
240 table_gfn
= walker
->table_gfn
[level
- 1];
241 ptep_user
= walker
->ptep_user
[level
- 1];
242 index
= offset_in_page(ptep_user
) / sizeof(pt_element_t
);
243 if (!(pte
& PT_GUEST_ACCESSED_MASK
)) {
244 trace_kvm_mmu_set_accessed_bit(table_gfn
, index
, sizeof(pte
));
245 pte
|= PT_GUEST_ACCESSED_MASK
;
247 if (level
== walker
->level
&& write_fault
&&
248 !(pte
& PT_GUEST_DIRTY_MASK
)) {
249 trace_kvm_mmu_set_dirty_bit(table_gfn
, index
, sizeof(pte
));
250 #if PTTYPE == PTTYPE_EPT
251 if (kvm_arch_write_log_dirty(vcpu
))
254 pte
|= PT_GUEST_DIRTY_MASK
;
260 * If the slot is read-only, simply do not process the accessed
261 * and dirty bits. This is the correct thing to do if the slot
262 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
263 * are only supported if the accessed and dirty bits are already
264 * set in the ROM (so that MMIO writes are never needed).
266 * Note that NPT does not allow this at all and faults, since
267 * it always wants nested page table entries for the guest
268 * page tables to be writable. And EPT works but will simply
269 * overwrite the read-only memory to set the accessed and dirty
272 if (unlikely(!walker
->pte_writable
[level
- 1]))
275 ret
= FNAME(cmpxchg_gpte
)(vcpu
, mmu
, ptep_user
, index
, orig_pte
, pte
);
279 kvm_vcpu_mark_page_dirty(vcpu
, table_gfn
);
280 walker
->ptes
[level
- 1] = pte
;
285 static inline unsigned FNAME(gpte_pkeys
)(struct kvm_vcpu
*vcpu
, u64 gpte
)
289 pte_t pte
= {.pte
= gpte
};
291 pkeys
= pte_flags_pkey(pte_flags(pte
));
297 * Fetch a guest pte for a guest virtual address
299 static int FNAME(walk_addr_generic
)(struct guest_walker
*walker
,
300 struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
301 gva_t addr
, u32 access
)
305 pt_element_t __user
*uninitialized_var(ptep_user
);
307 u64 pt_access
, pte_access
;
308 unsigned index
, accessed_dirty
, pte_pkey
;
309 unsigned nested_access
;
313 u64 walk_nx_mask
= 0;
314 const int write_fault
= access
& PFERR_WRITE_MASK
;
315 const int user_fault
= access
& PFERR_USER_MASK
;
316 const int fetch_fault
= access
& PFERR_FETCH_MASK
;
321 trace_kvm_mmu_pagetable_walk(addr
, access
);
323 walker
->level
= mmu
->root_level
;
324 pte
= mmu
->get_cr3(vcpu
);
325 have_ad
= PT_HAVE_ACCESSED_DIRTY(mmu
);
328 walk_nx_mask
= 1ULL << PT64_NX_SHIFT
;
329 if (walker
->level
== PT32E_ROOT_LEVEL
) {
330 pte
= mmu
->get_pdptr(vcpu
, (addr
>> 30) & 3);
331 trace_kvm_mmu_paging_element(pte
, walker
->level
);
332 if (!FNAME(is_present_gpte
)(pte
))
337 walker
->max_level
= walker
->level
;
338 ASSERT(!(is_long_mode(vcpu
) && !is_pae(vcpu
)));
341 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
342 * by the MOV to CR instruction are treated as reads and do not cause the
343 * processor to set the dirty flag in any EPT paging-structure entry.
345 nested_access
= (have_ad
? PFERR_WRITE_MASK
: 0) | PFERR_USER_MASK
;
352 unsigned long host_addr
;
354 pt_access
= pte_access
;
357 index
= PT_INDEX(addr
, walker
->level
);
358 table_gfn
= gpte_to_gfn(pte
);
359 offset
= index
* sizeof(pt_element_t
);
360 pte_gpa
= gfn_to_gpa(table_gfn
) + offset
;
362 BUG_ON(walker
->level
< 1);
363 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
364 walker
->pte_gpa
[walker
->level
- 1] = pte_gpa
;
366 real_gfn
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(table_gfn
),
371 * FIXME: This can happen if emulation (for of an INS/OUTS
372 * instruction) triggers a nested page fault. The exit
373 * qualification / exit info field will incorrectly have
374 * "guest page access" as the nested page fault's cause,
375 * instead of "guest page structure access". To fix this,
376 * the x86_exception struct should be augmented with enough
377 * information to fix the exit_qualification or exit_info_1
380 if (unlikely(real_gfn
== UNMAPPED_GVA
))
383 real_gfn
= gpa_to_gfn(real_gfn
);
385 host_addr
= kvm_vcpu_gfn_to_hva_prot(vcpu
, real_gfn
,
386 &walker
->pte_writable
[walker
->level
- 1]);
387 if (unlikely(kvm_is_error_hva(host_addr
)))
390 ptep_user
= (pt_element_t __user
*)((void *)host_addr
+ offset
);
391 if (unlikely(__copy_from_user(&pte
, ptep_user
, sizeof(pte
))))
393 walker
->ptep_user
[walker
->level
- 1] = ptep_user
;
395 trace_kvm_mmu_paging_element(pte
, walker
->level
);
398 * Inverting the NX it lets us AND it like other
401 pte_access
= pt_access
& (pte
^ walk_nx_mask
);
403 if (unlikely(!FNAME(is_present_gpte
)(pte
)))
406 if (unlikely(is_rsvd_bits_set(mmu
, pte
, walker
->level
))) {
407 errcode
= PFERR_RSVD_MASK
| PFERR_PRESENT_MASK
;
411 walker
->ptes
[walker
->level
- 1] = pte
;
412 } while (!is_last_gpte(mmu
, walker
->level
, pte
));
414 pte_pkey
= FNAME(gpte_pkeys
)(vcpu
, pte
);
415 accessed_dirty
= have_ad
? pte_access
& PT_GUEST_ACCESSED_MASK
: 0;
417 /* Convert to ACC_*_MASK flags for struct guest_walker. */
418 walker
->pt_access
= FNAME(gpte_access
)(pt_access
^ walk_nx_mask
);
419 walker
->pte_access
= FNAME(gpte_access
)(pte_access
^ walk_nx_mask
);
420 errcode
= permission_fault(vcpu
, mmu
, walker
->pte_access
, pte_pkey
, access
);
421 if (unlikely(errcode
))
424 gfn
= gpte_to_gfn_lvl(pte
, walker
->level
);
425 gfn
+= (addr
& PT_LVL_OFFSET_MASK(walker
->level
)) >> PAGE_SHIFT
;
427 if (PTTYPE
== 32 && walker
->level
== PT_DIRECTORY_LEVEL
&& is_cpuid_PSE36())
428 gfn
+= pse36_gfn_delta(pte
);
430 real_gpa
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(gfn
), access
, &walker
->fault
);
431 if (real_gpa
== UNMAPPED_GVA
)
434 walker
->gfn
= real_gpa
>> PAGE_SHIFT
;
437 FNAME(protect_clean_gpte
)(mmu
, &walker
->pte_access
, pte
);
440 * On a write fault, fold the dirty bit into accessed_dirty.
441 * For modes without A/D bits support accessed_dirty will be
444 accessed_dirty
&= pte
>>
445 (PT_GUEST_DIRTY_SHIFT
- PT_GUEST_ACCESSED_SHIFT
);
447 if (unlikely(!accessed_dirty
)) {
448 ret
= FNAME(update_accessed_dirty_bits
)(vcpu
, mmu
, walker
, write_fault
);
449 if (unlikely(ret
< 0))
455 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
456 __func__
, (u64
)pte
, walker
->pte_access
, walker
->pt_access
);
460 errcode
|= write_fault
| user_fault
;
461 if (fetch_fault
&& (mmu
->nx
||
462 kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
)))
463 errcode
|= PFERR_FETCH_MASK
;
465 walker
->fault
.vector
= PF_VECTOR
;
466 walker
->fault
.error_code_valid
= true;
467 walker
->fault
.error_code
= errcode
;
469 #if PTTYPE == PTTYPE_EPT
471 * Use PFERR_RSVD_MASK in error_code to to tell if EPT
472 * misconfiguration requires to be injected. The detection is
473 * done by is_rsvd_bits_set() above.
475 * We set up the value of exit_qualification to inject:
476 * [2:0] - Derive from the access bits. The exit_qualification might be
477 * out of date if it is serving an EPT misconfiguration.
478 * [5:3] - Calculated by the page walk of the guest EPT page tables
479 * [7:8] - Derived from [7:8] of real exit_qualification
481 * The other bits are set to 0.
483 if (!(errcode
& PFERR_RSVD_MASK
)) {
484 vcpu
->arch
.exit_qualification
&= 0x180;
486 vcpu
->arch
.exit_qualification
|= EPT_VIOLATION_ACC_WRITE
;
488 vcpu
->arch
.exit_qualification
|= EPT_VIOLATION_ACC_READ
;
490 vcpu
->arch
.exit_qualification
|= EPT_VIOLATION_ACC_INSTR
;
491 vcpu
->arch
.exit_qualification
|= (pte_access
& 0x7) << 3;
494 walker
->fault
.address
= addr
;
495 walker
->fault
.nested_page_fault
= mmu
!= vcpu
->arch
.walk_mmu
;
497 trace_kvm_mmu_walker_error(walker
->fault
.error_code
);
501 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
502 struct kvm_vcpu
*vcpu
, gva_t addr
, u32 access
)
504 return FNAME(walk_addr_generic
)(walker
, vcpu
, vcpu
->arch
.mmu
, addr
,
508 #if PTTYPE != PTTYPE_EPT
509 static int FNAME(walk_addr_nested
)(struct guest_walker
*walker
,
510 struct kvm_vcpu
*vcpu
, gva_t addr
,
513 return FNAME(walk_addr_generic
)(walker
, vcpu
, &vcpu
->arch
.nested_mmu
,
519 FNAME(prefetch_gpte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
520 u64
*spte
, pt_element_t gpte
, bool no_dirty_log
)
526 if (FNAME(prefetch_invalid_gpte
)(vcpu
, sp
, spte
, gpte
))
529 pgprintk("%s: gpte %llx spte %p\n", __func__
, (u64
)gpte
, spte
);
531 gfn
= gpte_to_gfn(gpte
);
532 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(gpte
);
533 FNAME(protect_clean_gpte
)(vcpu
->arch
.mmu
, &pte_access
, gpte
);
534 pfn
= pte_prefetch_gfn_to_pfn(vcpu
, gfn
,
535 no_dirty_log
&& (pte_access
& ACC_WRITE_MASK
));
536 if (is_error_pfn(pfn
))
540 * we call mmu_set_spte() with host_writable = true because
541 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
543 mmu_set_spte(vcpu
, spte
, pte_access
, 0, PT_PAGE_TABLE_LEVEL
, gfn
, pfn
,
549 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
550 u64
*spte
, const void *pte
)
552 pt_element_t gpte
= *(const pt_element_t
*)pte
;
554 FNAME(prefetch_gpte
)(vcpu
, sp
, spte
, gpte
, false);
557 static bool FNAME(gpte_changed
)(struct kvm_vcpu
*vcpu
,
558 struct guest_walker
*gw
, int level
)
560 pt_element_t curr_pte
;
561 gpa_t base_gpa
, pte_gpa
= gw
->pte_gpa
[level
- 1];
565 if (level
== PT_PAGE_TABLE_LEVEL
) {
566 mask
= PTE_PREFETCH_NUM
* sizeof(pt_element_t
) - 1;
567 base_gpa
= pte_gpa
& ~mask
;
568 index
= (pte_gpa
- base_gpa
) / sizeof(pt_element_t
);
570 r
= kvm_vcpu_read_guest_atomic(vcpu
, base_gpa
,
571 gw
->prefetch_ptes
, sizeof(gw
->prefetch_ptes
));
572 curr_pte
= gw
->prefetch_ptes
[index
];
574 r
= kvm_vcpu_read_guest_atomic(vcpu
, pte_gpa
,
575 &curr_pte
, sizeof(curr_pte
));
577 return r
|| curr_pte
!= gw
->ptes
[level
- 1];
580 static void FNAME(pte_prefetch
)(struct kvm_vcpu
*vcpu
, struct guest_walker
*gw
,
583 struct kvm_mmu_page
*sp
;
584 pt_element_t
*gptep
= gw
->prefetch_ptes
;
588 sp
= page_header(__pa(sptep
));
590 if (sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)
594 return __direct_pte_prefetch(vcpu
, sp
, sptep
);
596 i
= (sptep
- sp
->spt
) & ~(PTE_PREFETCH_NUM
- 1);
599 for (i
= 0; i
< PTE_PREFETCH_NUM
; i
++, spte
++) {
603 if (is_shadow_present_pte(*spte
))
606 if (!FNAME(prefetch_gpte
)(vcpu
, sp
, spte
, gptep
[i
], true))
612 * Fetch a shadow pte for a specific level in the paging hierarchy.
613 * If the guest tries to write a write-protected page, we need to
614 * emulate this operation, return 1 to indicate this case.
616 static int FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
617 struct guest_walker
*gw
,
618 int write_fault
, int hlevel
,
619 kvm_pfn_t pfn
, bool map_writable
, bool prefault
)
621 struct kvm_mmu_page
*sp
= NULL
;
622 struct kvm_shadow_walk_iterator it
;
623 unsigned direct_access
, access
= gw
->pt_access
;
626 direct_access
= gw
->pte_access
;
628 top_level
= vcpu
->arch
.mmu
->root_level
;
629 if (top_level
== PT32E_ROOT_LEVEL
)
630 top_level
= PT32_ROOT_LEVEL
;
632 * Verify that the top-level gpte is still there. Since the page
633 * is a root page, it is either write protected (and cannot be
634 * changed from now on) or it is invalid (in which case, we don't
635 * really care if it changes underneath us after this point).
637 if (FNAME(gpte_changed
)(vcpu
, gw
, top_level
))
638 goto out_gpte_changed
;
640 if (!VALID_PAGE(vcpu
->arch
.mmu
->root_hpa
))
641 goto out_gpte_changed
;
643 for (shadow_walk_init(&it
, vcpu
, addr
);
644 shadow_walk_okay(&it
) && it
.level
> gw
->level
;
645 shadow_walk_next(&it
)) {
648 clear_sp_write_flooding_count(it
.sptep
);
649 drop_large_spte(vcpu
, it
.sptep
);
652 if (!is_shadow_present_pte(*it
.sptep
)) {
653 table_gfn
= gw
->table_gfn
[it
.level
- 2];
654 sp
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, it
.level
-1,
659 * Verify that the gpte in the page we've just write
660 * protected is still there.
662 if (FNAME(gpte_changed
)(vcpu
, gw
, it
.level
- 1))
663 goto out_gpte_changed
;
666 link_shadow_page(vcpu
, it
.sptep
, sp
);
670 shadow_walk_okay(&it
) && it
.level
> hlevel
;
671 shadow_walk_next(&it
)) {
674 clear_sp_write_flooding_count(it
.sptep
);
675 validate_direct_spte(vcpu
, it
.sptep
, direct_access
);
677 drop_large_spte(vcpu
, it
.sptep
);
679 if (is_shadow_present_pte(*it
.sptep
))
682 direct_gfn
= gw
->gfn
& ~(KVM_PAGES_PER_HPAGE(it
.level
) - 1);
684 sp
= kvm_mmu_get_page(vcpu
, direct_gfn
, addr
, it
.level
-1,
685 true, direct_access
);
686 link_shadow_page(vcpu
, it
.sptep
, sp
);
689 clear_sp_write_flooding_count(it
.sptep
);
690 ret
= mmu_set_spte(vcpu
, it
.sptep
, gw
->pte_access
, write_fault
,
691 it
.level
, gw
->gfn
, pfn
, prefault
, map_writable
);
692 FNAME(pte_prefetch
)(vcpu
, gw
, it
.sptep
);
697 kvm_release_pfn_clean(pfn
);
702 * To see whether the mapped gfn can write its page table in the current
705 * It is the helper function of FNAME(page_fault). When guest uses large page
706 * size to map the writable gfn which is used as current page table, we should
707 * force kvm to use small page size to map it because new shadow page will be
708 * created when kvm establishes shadow page table that stop kvm using large
709 * page size. Do it early can avoid unnecessary #PF and emulation.
711 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
712 * currently used as its page table.
714 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
715 * since the PDPT is always shadowed, that means, we can not use large page
716 * size to map the gfn which is used as PDPT.
719 FNAME(is_self_change_mapping
)(struct kvm_vcpu
*vcpu
,
720 struct guest_walker
*walker
, int user_fault
,
721 bool *write_fault_to_shadow_pgtable
)
724 gfn_t mask
= ~(KVM_PAGES_PER_HPAGE(walker
->level
) - 1);
725 bool self_changed
= false;
727 if (!(walker
->pte_access
& ACC_WRITE_MASK
||
728 (!is_write_protection(vcpu
) && !user_fault
)))
731 for (level
= walker
->level
; level
<= walker
->max_level
; level
++) {
732 gfn_t gfn
= walker
->gfn
^ walker
->table_gfn
[level
- 1];
734 self_changed
|= !(gfn
& mask
);
735 *write_fault_to_shadow_pgtable
|= !gfn
;
742 * Page fault handler. There are several causes for a page fault:
743 * - there is no shadow pte for the guest pte
744 * - write access through a shadow pte marked read only so that we can set
746 * - write access to a shadow pte marked read only so we can update the page
747 * dirty bitmap, when userspace requests it
748 * - mmio access; in this case we will never install a present shadow pte
749 * - normal guest page fault due to the guest pte marked not present, not
750 * writable, or not executable
752 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
753 * a negative value on error.
755 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
, u32 error_code
,
758 int write_fault
= error_code
& PFERR_WRITE_MASK
;
759 int user_fault
= error_code
& PFERR_USER_MASK
;
760 struct guest_walker walker
;
763 int level
= PT_PAGE_TABLE_LEVEL
;
764 bool force_pt_level
= false;
765 unsigned long mmu_seq
;
766 bool map_writable
, is_self_change_mapping
;
768 pgprintk("%s: addr %lx err %x\n", __func__
, addr
, error_code
);
770 r
= mmu_topup_memory_caches(vcpu
);
775 * If PFEC.RSVD is set, this is a shadow page fault.
776 * The bit needs to be cleared before walking guest page tables.
778 error_code
&= ~PFERR_RSVD_MASK
;
781 * Look up the guest pte for the faulting address.
783 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, error_code
);
786 * The page is not mapped by the guest. Let the guest handle it.
789 pgprintk("%s: guest page fault\n", __func__
);
791 inject_page_fault(vcpu
, &walker
.fault
);
796 if (page_fault_handle_page_track(vcpu
, error_code
, walker
.gfn
)) {
797 shadow_page_table_clear_flood(vcpu
, addr
);
798 return RET_PF_EMULATE
;
801 vcpu
->arch
.write_fault_to_shadow_pgtable
= false;
803 is_self_change_mapping
= FNAME(is_self_change_mapping
)(vcpu
,
804 &walker
, user_fault
, &vcpu
->arch
.write_fault_to_shadow_pgtable
);
806 if (walker
.level
>= PT_DIRECTORY_LEVEL
&& !is_self_change_mapping
) {
807 level
= mapping_level(vcpu
, walker
.gfn
, &force_pt_level
);
808 if (likely(!force_pt_level
)) {
809 level
= min(walker
.level
, level
);
810 walker
.gfn
= walker
.gfn
& ~(KVM_PAGES_PER_HPAGE(level
) - 1);
813 force_pt_level
= true;
815 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
818 if (try_async_pf(vcpu
, prefault
, walker
.gfn
, addr
, &pfn
, write_fault
,
822 if (handle_abnormal_pfn(vcpu
, addr
, walker
.gfn
, pfn
, walker
.pte_access
, &r
))
826 * Do not change pte_access if the pfn is a mmio page, otherwise
827 * we will cache the incorrect access into mmio spte.
829 if (write_fault
&& !(walker
.pte_access
& ACC_WRITE_MASK
) &&
830 !is_write_protection(vcpu
) && !user_fault
&&
831 !is_noslot_pfn(pfn
)) {
832 walker
.pte_access
|= ACC_WRITE_MASK
;
833 walker
.pte_access
&= ~ACC_USER_MASK
;
836 * If we converted a user page to a kernel page,
837 * so that the kernel can write to it when cr0.wp=0,
838 * then we should prevent the kernel from executing it
839 * if SMEP is enabled.
841 if (kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
))
842 walker
.pte_access
&= ~ACC_EXEC_MASK
;
845 spin_lock(&vcpu
->kvm
->mmu_lock
);
846 if (mmu_notifier_retry(vcpu
->kvm
, mmu_seq
))
849 kvm_mmu_audit(vcpu
, AUDIT_PRE_PAGE_FAULT
);
850 if (make_mmu_pages_available(vcpu
) < 0)
853 transparent_hugepage_adjust(vcpu
, &walker
.gfn
, &pfn
, &level
);
854 r
= FNAME(fetch
)(vcpu
, addr
, &walker
, write_fault
,
855 level
, pfn
, map_writable
, prefault
);
856 ++vcpu
->stat
.pf_fixed
;
857 kvm_mmu_audit(vcpu
, AUDIT_POST_PAGE_FAULT
);
858 spin_unlock(&vcpu
->kvm
->mmu_lock
);
863 spin_unlock(&vcpu
->kvm
->mmu_lock
);
864 kvm_release_pfn_clean(pfn
);
868 static gpa_t
FNAME(get_level1_sp_gpa
)(struct kvm_mmu_page
*sp
)
872 WARN_ON(sp
->role
.level
!= PT_PAGE_TABLE_LEVEL
);
875 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
877 return gfn_to_gpa(sp
->gfn
) + offset
* sizeof(pt_element_t
);
880 static void FNAME(invlpg
)(struct kvm_vcpu
*vcpu
, gva_t gva
, hpa_t root_hpa
)
882 struct kvm_shadow_walk_iterator iterator
;
883 struct kvm_mmu_page
*sp
;
887 vcpu_clear_mmio_info(vcpu
, gva
);
890 * No need to check return value here, rmap_can_add() can
891 * help us to skip pte prefetch later.
893 mmu_topup_memory_caches(vcpu
);
895 if (!VALID_PAGE(root_hpa
)) {
900 spin_lock(&vcpu
->kvm
->mmu_lock
);
901 for_each_shadow_entry_using_root(vcpu
, root_hpa
, gva
, iterator
) {
902 level
= iterator
.level
;
903 sptep
= iterator
.sptep
;
905 sp
= page_header(__pa(sptep
));
906 if (is_last_spte(*sptep
, level
)) {
913 pte_gpa
= FNAME(get_level1_sp_gpa
)(sp
);
914 pte_gpa
+= (sptep
- sp
->spt
) * sizeof(pt_element_t
);
916 if (mmu_page_zap_pte(vcpu
->kvm
, sp
, sptep
))
917 kvm_flush_remote_tlbs_with_address(vcpu
->kvm
,
918 sp
->gfn
, KVM_PAGES_PER_HPAGE(sp
->role
.level
));
920 if (!rmap_can_add(vcpu
))
923 if (kvm_vcpu_read_guest_atomic(vcpu
, pte_gpa
, &gpte
,
924 sizeof(pt_element_t
)))
927 FNAME(update_pte
)(vcpu
, sp
, sptep
, &gpte
);
930 if (!is_shadow_present_pte(*sptep
) || !sp
->unsync_children
)
933 spin_unlock(&vcpu
->kvm
->mmu_lock
);
936 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
, u32 access
,
937 struct x86_exception
*exception
)
939 struct guest_walker walker
;
940 gpa_t gpa
= UNMAPPED_GVA
;
943 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
, access
);
946 gpa
= gfn_to_gpa(walker
.gfn
);
947 gpa
|= vaddr
& ~PAGE_MASK
;
948 } else if (exception
)
949 *exception
= walker
.fault
;
954 #if PTTYPE != PTTYPE_EPT
955 static gpa_t
FNAME(gva_to_gpa_nested
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
,
957 struct x86_exception
*exception
)
959 struct guest_walker walker
;
960 gpa_t gpa
= UNMAPPED_GVA
;
963 r
= FNAME(walk_addr_nested
)(&walker
, vcpu
, vaddr
, access
);
966 gpa
= gfn_to_gpa(walker
.gfn
);
967 gpa
|= vaddr
& ~PAGE_MASK
;
968 } else if (exception
)
969 *exception
= walker
.fault
;
976 * Using the cached information from sp->gfns is safe because:
977 * - The spte has a reference to the struct page, so the pfn for a given gfn
978 * can't change unless all sptes pointing to it are nuked first.
981 * We should flush all tlbs if spte is dropped even though guest is
982 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
983 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
984 * used by guest then tlbs are not flushed, so guest is allowed to access the
986 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
988 static int FNAME(sync_page
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
)
990 int i
, nr_present
= 0;
993 int set_spte_ret
= 0;
995 /* direct kvm_mmu_page can not be unsync. */
996 BUG_ON(sp
->role
.direct
);
998 first_pte_gpa
= FNAME(get_level1_sp_gpa
)(sp
);
1000 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
++) {
1001 unsigned pte_access
;
1009 pte_gpa
= first_pte_gpa
+ i
* sizeof(pt_element_t
);
1011 if (kvm_vcpu_read_guest_atomic(vcpu
, pte_gpa
, &gpte
,
1012 sizeof(pt_element_t
)))
1015 if (FNAME(prefetch_invalid_gpte
)(vcpu
, sp
, &sp
->spt
[i
], gpte
)) {
1017 * Update spte before increasing tlbs_dirty to make
1018 * sure no tlb flush is lost after spte is zapped; see
1019 * the comments in kvm_flush_remote_tlbs().
1022 vcpu
->kvm
->tlbs_dirty
++;
1026 gfn
= gpte_to_gfn(gpte
);
1027 pte_access
= sp
->role
.access
;
1028 pte_access
&= FNAME(gpte_access
)(gpte
);
1029 FNAME(protect_clean_gpte
)(vcpu
->arch
.mmu
, &pte_access
, gpte
);
1031 if (sync_mmio_spte(vcpu
, &sp
->spt
[i
], gfn
, pte_access
,
1035 if (gfn
!= sp
->gfns
[i
]) {
1036 drop_spte(vcpu
->kvm
, &sp
->spt
[i
]);
1038 * The same as above where we are doing
1039 * prefetch_invalid_gpte().
1042 vcpu
->kvm
->tlbs_dirty
++;
1048 host_writable
= sp
->spt
[i
] & SPTE_HOST_WRITEABLE
;
1050 set_spte_ret
|= set_spte(vcpu
, &sp
->spt
[i
],
1051 pte_access
, PT_PAGE_TABLE_LEVEL
,
1052 gfn
, spte_to_pfn(sp
->spt
[i
]),
1053 true, false, host_writable
);
1056 if (set_spte_ret
& SET_SPTE_NEED_REMOTE_TLB_FLUSH
)
1057 kvm_flush_remote_tlbs(vcpu
->kvm
);
1065 #undef PT_BASE_ADDR_MASK
1067 #undef PT_LVL_ADDR_MASK
1068 #undef PT_LVL_OFFSET_MASK
1069 #undef PT_LEVEL_BITS
1070 #undef PT_MAX_FULL_LEVELS
1072 #undef gpte_to_gfn_lvl
1074 #undef PT_GUEST_ACCESSED_MASK
1075 #undef PT_GUEST_DIRTY_MASK
1076 #undef PT_GUEST_DIRTY_SHIFT
1077 #undef PT_GUEST_ACCESSED_SHIFT
1078 #undef PT_HAVE_ACCESSED_DIRTY