]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/x86/kvm/mmu.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / x86 / kvm / mmu.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_H
3 #define __KVM_X86_MMU_H
4
5 #include <linux/kvm_host.h>
6 #include "kvm_cache_regs.h"
7
8 #define PT64_PT_BITS 9
9 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
10 #define PT32_PT_BITS 10
11 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
12
13 #define PT_WRITABLE_SHIFT 1
14 #define PT_USER_SHIFT 2
15
16 #define PT_PRESENT_MASK (1ULL << 0)
17 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
18 #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
19 #define PT_PWT_MASK (1ULL << 3)
20 #define PT_PCD_MASK (1ULL << 4)
21 #define PT_ACCESSED_SHIFT 5
22 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
23 #define PT_DIRTY_SHIFT 6
24 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
25 #define PT_PAGE_SIZE_SHIFT 7
26 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
27 #define PT_PAT_MASK (1ULL << 7)
28 #define PT_GLOBAL_MASK (1ULL << 8)
29 #define PT64_NX_SHIFT 63
30 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
31
32 #define PT_PAT_SHIFT 7
33 #define PT_DIR_PAT_SHIFT 12
34 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
35
36 #define PT32_DIR_PSE36_SIZE 4
37 #define PT32_DIR_PSE36_SHIFT 13
38 #define PT32_DIR_PSE36_MASK \
39 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
40
41 #define PT64_ROOT_5LEVEL 5
42 #define PT64_ROOT_4LEVEL 4
43 #define PT32_ROOT_LEVEL 2
44 #define PT32E_ROOT_LEVEL 3
45
46 #define PT_PDPE_LEVEL 3
47 #define PT_DIRECTORY_LEVEL 2
48 #define PT_PAGE_TABLE_LEVEL 1
49 #define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
50
51 static inline u64 rsvd_bits(int s, int e)
52 {
53 if (e < s)
54 return 0;
55
56 return ((1ULL << (e - s + 1)) - 1) << s;
57 }
58
59 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
60
61 void
62 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
63
64 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
65 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
66 bool accessed_dirty);
67 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
68 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
69 u64 fault_address, char *insn, int insn_len,
70 bool need_unprotect);
71
72 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
73 {
74 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
75 return kvm->arch.n_max_mmu_pages -
76 kvm->arch.n_used_mmu_pages;
77
78 return 0;
79 }
80
81 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
82 {
83 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
84 return 0;
85
86 return kvm_mmu_load(vcpu);
87 }
88
89 /*
90 * Currently, we have two sorts of write-protection, a) the first one
91 * write-protects guest page to sync the guest modification, b) another one is
92 * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
93 * between these two sorts are:
94 * 1) the first case clears SPTE_MMU_WRITEABLE bit.
95 * 2) the first case requires flushing tlb immediately avoiding corrupting
96 * shadow page table between all vcpus so it should be in the protection of
97 * mmu-lock. And the another case does not need to flush tlb until returning
98 * the dirty bitmap to userspace since it only write-protects the page
99 * logged in the bitmap, that means the page in the dirty bitmap is not
100 * missed, so it can flush tlb out of mmu-lock.
101 *
102 * So, there is the problem: the first case can meet the corrupted tlb caused
103 * by another case which write-protects pages but without flush tlb
104 * immediately. In order to making the first case be aware this problem we let
105 * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
106 * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
107 *
108 * Anyway, whenever a spte is updated (only permission and status bits are
109 * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
110 * readonly, if that happens, we need to flush tlb. Fortunately,
111 * mmu_spte_update() has already handled it perfectly.
112 *
113 * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
114 * - if we want to see if it has writable tlb entry or if the spte can be
115 * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
116 * case, otherwise
117 * - if we fix page fault on the spte or do write-protection by dirty logging,
118 * check PT_WRITABLE_MASK.
119 *
120 * TODO: introduce APIs to split these two cases.
121 */
122 static inline int is_writable_pte(unsigned long pte)
123 {
124 return pte & PT_WRITABLE_MASK;
125 }
126
127 static inline bool is_write_protection(struct kvm_vcpu *vcpu)
128 {
129 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
130 }
131
132 /*
133 * Check if a given access (described through the I/D, W/R and U/S bits of a
134 * page fault error code pfec) causes a permission fault with the given PTE
135 * access rights (in ACC_* format).
136 *
137 * Return zero if the access does not fault; return the page fault error code
138 * if the access faults.
139 */
140 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
141 unsigned pte_access, unsigned pte_pkey,
142 unsigned pfec)
143 {
144 int cpl = kvm_x86_ops->get_cpl(vcpu);
145 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
146
147 /*
148 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
149 *
150 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
151 * (these are implicit supervisor accesses) regardless of the value
152 * of EFLAGS.AC.
153 *
154 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
155 * the result in X86_EFLAGS_AC. We then insert it in place of
156 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
157 * but it will be one in index if SMAP checks are being overridden.
158 * It is important to keep this branchless.
159 */
160 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
161 int index = (pfec >> 1) +
162 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
163 bool fault = (mmu->permissions[index] >> pte_access) & 1;
164 u32 errcode = PFERR_PRESENT_MASK;
165
166 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
167 if (unlikely(mmu->pkru_mask)) {
168 u32 pkru_bits, offset;
169
170 /*
171 * PKRU defines 32 bits, there are 16 domains and 2
172 * attribute bits per domain in pkru. pte_pkey is the
173 * index of the protection domain, so pte_pkey * 2 is
174 * is the index of the first bit for the domain.
175 */
176 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
177
178 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
179 offset = (pfec & ~1) +
180 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
181
182 pkru_bits &= mmu->pkru_mask >> offset;
183 errcode |= -pkru_bits & PFERR_PK_MASK;
184 fault |= (pkru_bits != 0);
185 }
186
187 return -(u32)fault & errcode;
188 }
189
190 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
191 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
192
193 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
194 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
195 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
196 struct kvm_memory_slot *slot, u64 gfn);
197 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
198 #endif