]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
c0ef6326 SP |
2 | /* |
3 | * Copyright (C) 2016 - ARM Ltd | |
4 | * | |
5 | * stage2 page table helpers | |
c0ef6326 SP |
6 | */ |
7 | ||
8 | #ifndef __ARM64_S2_PGTABLE_H_ | |
9 | #define __ARM64_S2_PGTABLE_H_ | |
10 | ||
865b30cd | 11 | #include <linux/hugetlb.h> |
c0ef6326 SP |
12 | #include <asm/pgtable.h> |
13 | ||
61fa5a86 SP |
14 | /* |
15 | * PGDIR_SHIFT determines the size a top-level page table entry can map | |
16 | * and depends on the number of levels in the page table. Compute the | |
17 | * PGDIR_SHIFT for a given number of levels. | |
18 | */ | |
19 | #define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls)) | |
20 | ||
c0ef6326 | 21 | /* |
6992195c CD |
22 | * The hardware supports concatenation of up to 16 tables at stage2 entry |
23 | * level and we use the feature whenever possible, which means we resolve 4 | |
24 | * additional bits of address at the entry level. | |
da04fa04 | 25 | * |
6992195c CD |
26 | * This implies, the total number of page table levels required for |
27 | * IPA_SHIFT at stage2 expected by the hardware can be calculated using | |
28 | * the same logic used for the (non-collapsable) stage1 page tables but for | |
29 | * (IPA_SHIFT - 4). | |
c0ef6326 | 30 | */ |
61fa5a86 | 31 | #define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) |
13ac4bbc | 32 | #define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) |
c0ef6326 | 33 | |
61fa5a86 SP |
34 | /* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */ |
35 | #define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm)) | |
36 | #define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm)) | |
37 | #define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1) | |
da04fa04 SP |
38 | |
39 | /* | |
40 | * The number of PTRS across all concatenated stage2 tables given by the | |
41 | * number of bits resolved at the initial level. | |
58b3efc8 SP |
42 | * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA), |
43 | * in which case, stage2_pgd_ptrs will have one entry. | |
da04fa04 | 44 | */ |
58b3efc8 SP |
45 | #define pgd_ptrs_shift(ipa, pgdir_shift) \ |
46 | ((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0) | |
47 | #define __s2_pgd_ptrs(ipa, lvls) \ | |
48 | (1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls)))) | |
61fa5a86 SP |
49 | #define __s2_pgd_size(ipa, lvls) (__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t)) |
50 | ||
51 | #define stage2_pgd_ptrs(kvm) __s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)) | |
52 | #define stage2_pgd_size(kvm) __s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)) | |
da04fa04 SP |
53 | |
54 | /* | |
e55cac5b SP |
55 | * kvm_mmmu_cache_min_pages() is the number of pages required to install |
56 | * a stage-2 translation. We pre-allocate the entry level page table at | |
57 | * the VM creation. | |
da04fa04 | 58 | */ |
61fa5a86 | 59 | #define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1) |
da04fa04 | 60 | |
865b30cd | 61 | /* Stage2 PUD definitions when the level is present */ |
61fa5a86 SP |
62 | static inline bool kvm_stage2_has_pud(struct kvm *kvm) |
63 | { | |
64 | return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3); | |
65 | } | |
66 | ||
da04fa04 | 67 | #define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) |
e55cac5b | 68 | #define S2_PUD_SIZE (1UL << S2_PUD_SHIFT) |
da04fa04 SP |
69 | #define S2_PUD_MASK (~(S2_PUD_SIZE - 1)) |
70 | ||
865b30cd SP |
71 | static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd) |
72 | { | |
61fa5a86 | 73 | if (kvm_stage2_has_pud(kvm)) |
865b30cd SP |
74 | return pgd_none(pgd); |
75 | else | |
76 | return 0; | |
77 | } | |
c0ef6326 | 78 | |
865b30cd SP |
79 | static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp) |
80 | { | |
61fa5a86 | 81 | if (kvm_stage2_has_pud(kvm)) |
865b30cd SP |
82 | pgd_clear(pgdp); |
83 | } | |
da04fa04 | 84 | |
865b30cd | 85 | static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd) |
da04fa04 | 86 | { |
61fa5a86 | 87 | if (kvm_stage2_has_pud(kvm)) |
865b30cd SP |
88 | return pgd_present(pgd); |
89 | else | |
90 | return 1; | |
91 | } | |
da04fa04 | 92 | |
865b30cd SP |
93 | static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud) |
94 | { | |
61fa5a86 | 95 | if (kvm_stage2_has_pud(kvm)) |
865b30cd | 96 | pgd_populate(NULL, pgd, pud); |
da04fa04 SP |
97 | } |
98 | ||
865b30cd SP |
99 | static inline pud_t *stage2_pud_offset(struct kvm *kvm, |
100 | pgd_t *pgd, unsigned long address) | |
101 | { | |
61fa5a86 | 102 | if (kvm_stage2_has_pud(kvm)) |
865b30cd SP |
103 | return pud_offset(pgd, address); |
104 | else | |
105 | return (pud_t *)pgd; | |
106 | } | |
da04fa04 | 107 | |
865b30cd SP |
108 | static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud) |
109 | { | |
61fa5a86 | 110 | if (kvm_stage2_has_pud(kvm)) |
14b94d07 | 111 | free_page((unsigned long)pud); |
865b30cd SP |
112 | } |
113 | ||
114 | static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp) | |
115 | { | |
61fa5a86 | 116 | if (kvm_stage2_has_pud(kvm)) |
865b30cd SP |
117 | return kvm_page_empty(pudp); |
118 | else | |
119 | return false; | |
120 | } | |
da04fa04 | 121 | |
865b30cd SP |
122 | static inline phys_addr_t |
123 | stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |
124 | { | |
61fa5a86 | 125 | if (kvm_stage2_has_pud(kvm)) { |
865b30cd | 126 | phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; |
da04fa04 | 127 | |
865b30cd SP |
128 | return (boundary - 1 < end - 1) ? boundary : end; |
129 | } else { | |
130 | return end; | |
131 | } | |
132 | } | |
133 | ||
134 | /* Stage2 PMD definitions when the level is present */ | |
61fa5a86 SP |
135 | static inline bool kvm_stage2_has_pmd(struct kvm *kvm) |
136 | { | |
137 | return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2); | |
138 | } | |
139 | ||
da04fa04 | 140 | #define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) |
e55cac5b | 141 | #define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) |
da04fa04 SP |
142 | #define S2_PMD_MASK (~(S2_PMD_SIZE - 1)) |
143 | ||
865b30cd SP |
144 | static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud) |
145 | { | |
61fa5a86 | 146 | if (kvm_stage2_has_pmd(kvm)) |
865b30cd SP |
147 | return pud_none(pud); |
148 | else | |
149 | return 0; | |
150 | } | |
c0ef6326 | 151 | |
865b30cd SP |
152 | static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud) |
153 | { | |
61fa5a86 | 154 | if (kvm_stage2_has_pmd(kvm)) |
865b30cd SP |
155 | pud_clear(pud); |
156 | } | |
da04fa04 | 157 | |
865b30cd | 158 | static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud) |
da04fa04 | 159 | { |
61fa5a86 | 160 | if (kvm_stage2_has_pmd(kvm)) |
865b30cd SP |
161 | return pud_present(pud); |
162 | else | |
163 | return 1; | |
164 | } | |
c0ef6326 | 165 | |
865b30cd SP |
166 | static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd) |
167 | { | |
61fa5a86 | 168 | if (kvm_stage2_has_pmd(kvm)) |
865b30cd | 169 | pud_populate(NULL, pud, pmd); |
da04fa04 SP |
170 | } |
171 | ||
865b30cd SP |
172 | static inline pmd_t *stage2_pmd_offset(struct kvm *kvm, |
173 | pud_t *pud, unsigned long address) | |
174 | { | |
61fa5a86 | 175 | if (kvm_stage2_has_pmd(kvm)) |
865b30cd SP |
176 | return pmd_offset(pud, address); |
177 | else | |
178 | return (pmd_t *)pud; | |
179 | } | |
c0ef6326 | 180 | |
865b30cd SP |
181 | static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd) |
182 | { | |
61fa5a86 | 183 | if (kvm_stage2_has_pmd(kvm)) |
14b94d07 | 184 | free_page((unsigned long)pmd); |
865b30cd | 185 | } |
c0ef6326 | 186 | |
865b30cd SP |
187 | static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud) |
188 | { | |
61fa5a86 | 189 | if (kvm_stage2_has_pmd(kvm)) |
865b30cd SP |
190 | return pud_huge(pud); |
191 | else | |
192 | return 0; | |
193 | } | |
194 | ||
195 | static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp) | |
196 | { | |
61fa5a86 | 197 | if (kvm_stage2_has_pmd(kvm)) |
865b30cd SP |
198 | return kvm_page_empty(pmdp); |
199 | else | |
200 | return 0; | |
201 | } | |
202 | ||
203 | static inline phys_addr_t | |
204 | stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |
205 | { | |
61fa5a86 | 206 | if (kvm_stage2_has_pmd(kvm)) { |
865b30cd SP |
207 | phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; |
208 | ||
209 | return (boundary - 1 < end - 1) ? boundary : end; | |
210 | } else { | |
211 | return end; | |
212 | } | |
213 | } | |
214 | ||
215 | static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep) | |
216 | { | |
217 | return kvm_page_empty(ptep); | |
218 | } | |
c0ef6326 | 219 | |
865b30cd SP |
220 | static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr) |
221 | { | |
61fa5a86 | 222 | return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1)); |
865b30cd | 223 | } |
da04fa04 | 224 | |
e55cac5b SP |
225 | static inline phys_addr_t |
226 | stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |
da04fa04 | 227 | { |
61fa5a86 | 228 | phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm); |
da04fa04 SP |
229 | |
230 | return (boundary - 1 < end - 1) ? boundary : end; | |
231 | } | |
c0ef6326 SP |
232 | |
233 | #endif /* __ARM64_S2_PGTABLE_H_ */ |