1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 #include <asm/spectre.h>
14 #include <nvhe/early_alloc.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mem_protect.h>
19 #include <nvhe/spinlock.h>
21 struct kvm_pgtable pkvm_pgtable
;
22 hyp_spinlock_t pkvm_pgd_lock
;
24 struct memblock_region hyp_memory
[HYP_MEMBLOCK_REGIONS
];
25 unsigned int hyp_memblock_nr
;
27 static u64 __io_map_base
;
29 struct hyp_fixmap_slot
{
33 static DEFINE_PER_CPU(struct hyp_fixmap_slot
, fixmap_slots
);
35 static int __pkvm_create_mappings(unsigned long start
, unsigned long size
,
36 unsigned long phys
, enum kvm_pgtable_prot prot
)
40 hyp_spin_lock(&pkvm_pgd_lock
);
41 err
= kvm_pgtable_hyp_map(&pkvm_pgtable
, start
, size
, phys
, prot
);
42 hyp_spin_unlock(&pkvm_pgd_lock
);
47 static int __pkvm_alloc_private_va_range(unsigned long start
, size_t size
)
51 hyp_assert_lock_held(&pkvm_pgd_lock
);
53 if (!start
|| start
< __io_map_base
)
56 /* The allocated size is always a multiple of PAGE_SIZE */
57 cur
= start
+ PAGE_ALIGN(size
);
59 /* Are we overflowing on the vmemmap ? */
60 if (cur
> __hyp_vmemmap
)
69 * pkvm_alloc_private_va_range - Allocates a private VA range.
70 * @size: The size of the VA range to reserve.
71 * @haddr: The hypervisor virtual start address of the allocation.
73 * The private virtual address (VA) range is allocated above __io_map_base
74 * and aligned based on the order of @size.
76 * Return: 0 on success or negative error code on failure.
78 int pkvm_alloc_private_va_range(size_t size
, unsigned long *haddr
)
83 hyp_spin_lock(&pkvm_pgd_lock
);
85 ret
= __pkvm_alloc_private_va_range(addr
, size
);
86 hyp_spin_unlock(&pkvm_pgd_lock
);
93 int __pkvm_create_private_mapping(phys_addr_t phys
, size_t size
,
94 enum kvm_pgtable_prot prot
,
100 size
= PAGE_ALIGN(size
+ offset_in_page(phys
));
101 err
= pkvm_alloc_private_va_range(size
, &addr
);
105 err
= __pkvm_create_mappings(addr
, size
, phys
, prot
);
109 *haddr
= addr
+ offset_in_page(phys
);
113 int pkvm_create_mappings_locked(void *from
, void *to
, enum kvm_pgtable_prot prot
)
115 unsigned long start
= (unsigned long)from
;
116 unsigned long end
= (unsigned long)to
;
117 unsigned long virt_addr
;
120 hyp_assert_lock_held(&pkvm_pgd_lock
);
122 start
= start
& PAGE_MASK
;
123 end
= PAGE_ALIGN(end
);
125 for (virt_addr
= start
; virt_addr
< end
; virt_addr
+= PAGE_SIZE
) {
128 phys
= hyp_virt_to_phys((void *)virt_addr
);
129 err
= kvm_pgtable_hyp_map(&pkvm_pgtable
, virt_addr
, PAGE_SIZE
,
138 int pkvm_create_mappings(void *from
, void *to
, enum kvm_pgtable_prot prot
)
142 hyp_spin_lock(&pkvm_pgd_lock
);
143 ret
= pkvm_create_mappings_locked(from
, to
, prot
);
144 hyp_spin_unlock(&pkvm_pgd_lock
);
149 int hyp_back_vmemmap(phys_addr_t back
)
151 unsigned long i
, start
, size
, end
= 0;
154 for (i
= 0; i
< hyp_memblock_nr
; i
++) {
155 start
= hyp_memory
[i
].base
;
156 start
= ALIGN_DOWN((u64
)hyp_phys_to_page(start
), PAGE_SIZE
);
158 * The begining of the hyp_vmemmap region for the current
159 * memblock may already be backed by the page backing the end
160 * the previous region, so avoid mapping it twice.
162 start
= max(start
, end
);
164 end
= hyp_memory
[i
].base
+ hyp_memory
[i
].size
;
165 end
= PAGE_ALIGN((u64
)hyp_phys_to_page(end
));
170 ret
= __pkvm_create_mappings(start
, size
, back
, PAGE_HYP
);
174 memset(hyp_phys_to_virt(back
), 0, size
);
181 static void *__hyp_bp_vect_base
;
182 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot
)
187 case HYP_VECTOR_DIRECT
: {
188 vector
= __kvm_hyp_vector
;
191 case HYP_VECTOR_SPECTRE_DIRECT
: {
192 vector
= __bp_harden_hyp_vecs
;
195 case HYP_VECTOR_INDIRECT
:
196 case HYP_VECTOR_SPECTRE_INDIRECT
: {
197 vector
= (void *)__hyp_bp_vect_base
;
204 vector
= __kvm_vector_slot2addr(vector
, slot
);
205 *this_cpu_ptr(&kvm_hyp_vector
) = (unsigned long)vector
;
210 int hyp_map_vectors(void)
213 unsigned long bp_base
;
216 if (!kvm_system_needs_idmapped_vectors()) {
217 __hyp_bp_vect_base
= __bp_harden_hyp_vecs
;
221 phys
= __hyp_pa(__bp_harden_hyp_vecs
);
222 ret
= __pkvm_create_private_mapping(phys
, __BP_HARDEN_HYP_VECS_SZ
,
223 PAGE_HYP_EXEC
, &bp_base
);
227 __hyp_bp_vect_base
= (void *)bp_base
;
232 void *hyp_fixmap_map(phys_addr_t phys
)
234 struct hyp_fixmap_slot
*slot
= this_cpu_ptr(&fixmap_slots
);
235 kvm_pte_t pte
, *ptep
= slot
->ptep
;
238 pte
&= ~kvm_phys_to_pte(KVM_PHYS_INVALID
);
239 pte
|= kvm_phys_to_pte(phys
) | KVM_PTE_VALID
;
240 WRITE_ONCE(*ptep
, pte
);
243 return (void *)slot
->addr
;
246 static void fixmap_clear_slot(struct hyp_fixmap_slot
*slot
)
248 kvm_pte_t
*ptep
= slot
->ptep
;
249 u64 addr
= slot
->addr
;
251 WRITE_ONCE(*ptep
, *ptep
& ~KVM_PTE_VALID
);
254 * Irritatingly, the architecture requires that we use inner-shareable
255 * broadcast TLB invalidation here in case another CPU speculates
256 * through our fixmap and decides to create an "amalagamation of the
257 * values held in the TLB" due to the apparent lack of a
258 * break-before-make sequence.
260 * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
263 __tlbi_level(vale2is
, __TLBI_VADDR(addr
, 0), (KVM_PGTABLE_MAX_LEVELS
- 1));
268 void hyp_fixmap_unmap(void)
270 fixmap_clear_slot(this_cpu_ptr(&fixmap_slots
));
273 static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx
*ctx
,
274 enum kvm_pgtable_walk_flags visit
)
276 struct hyp_fixmap_slot
*slot
= per_cpu_ptr(&fixmap_slots
, (u64
)ctx
->arg
);
278 if (!kvm_pte_valid(ctx
->old
) || ctx
->level
!= KVM_PGTABLE_MAX_LEVELS
- 1)
281 slot
->addr
= ctx
->addr
;
282 slot
->ptep
= ctx
->ptep
;
285 * Clear the PTE, but keep the page-table page refcount elevated to
286 * prevent it from ever being freed. This lets us manipulate the PTEs
287 * by hand safely without ever needing to allocate memory.
289 fixmap_clear_slot(slot
);
294 static int create_fixmap_slot(u64 addr
, u64 cpu
)
296 struct kvm_pgtable_walker walker
= {
297 .cb
= __create_fixmap_slot_cb
,
298 .flags
= KVM_PGTABLE_WALK_LEAF
,
302 return kvm_pgtable_walk(&pkvm_pgtable
, addr
, PAGE_SIZE
, &walker
);
305 int hyp_create_pcpu_fixmap(void)
307 unsigned long addr
, i
;
310 for (i
= 0; i
< hyp_nr_cpus
; i
++) {
311 ret
= pkvm_alloc_private_va_range(PAGE_SIZE
, &addr
);
315 ret
= kvm_pgtable_hyp_map(&pkvm_pgtable
, addr
, PAGE_SIZE
,
316 __hyp_pa(__hyp_bss_start
), PAGE_HYP
);
320 ret
= create_fixmap_slot(addr
, i
);
328 int hyp_create_idmap(u32 hyp_va_bits
)
330 unsigned long start
, end
;
332 start
= hyp_virt_to_phys((void *)__hyp_idmap_text_start
);
333 start
= ALIGN_DOWN(start
, PAGE_SIZE
);
335 end
= hyp_virt_to_phys((void *)__hyp_idmap_text_end
);
336 end
= ALIGN(end
, PAGE_SIZE
);
339 * One half of the VA space is reserved to linearly map portions of
340 * memory -- see va_layout.c for more details. The other half of the VA
341 * space contains the trampoline page, and needs some care. Split that
342 * second half in two and find the quarter of VA space not conflicting
343 * with the idmap to place the IOs and the vmemmap. IOs use the lower
344 * half of the quarter and the vmemmap the upper half.
346 __io_map_base
= start
& BIT(hyp_va_bits
- 2);
347 __io_map_base
^= BIT(hyp_va_bits
- 2);
348 __hyp_vmemmap
= __io_map_base
| BIT(hyp_va_bits
- 3);
350 return __pkvm_create_mappings(start
, end
- start
, start
, PAGE_HYP_EXEC
);
353 int pkvm_create_stack(phys_addr_t phys
, unsigned long *haddr
)
355 unsigned long addr
, prev_base
;
359 hyp_spin_lock(&pkvm_pgd_lock
);
361 prev_base
= __io_map_base
;
363 * Efficient stack verification using the PAGE_SHIFT bit implies
364 * an alignment of our allocation on the order of the size.
366 size
= PAGE_SIZE
* 2;
367 addr
= ALIGN(__io_map_base
, size
);
369 ret
= __pkvm_alloc_private_va_range(addr
, size
);
372 * Since the stack grows downwards, map the stack to the page
373 * at the higher address and leave the lower guard page
376 * Any valid stack address now has the PAGE_SHIFT bit as 1
377 * and addresses corresponding to the guard page have the
378 * PAGE_SHIFT bit as 0 - this is used for overflow detection.
380 ret
= kvm_pgtable_hyp_map(&pkvm_pgtable
, addr
+ PAGE_SIZE
,
381 PAGE_SIZE
, phys
, PAGE_HYP
);
383 __io_map_base
= prev_base
;
385 hyp_spin_unlock(&pkvm_pgd_lock
);
387 *haddr
= addr
+ size
;
392 static void *admit_host_page(void *arg
)
394 struct kvm_hyp_memcache
*host_mc
= arg
;
396 if (!host_mc
->nr_pages
)
400 * The host still owns the pages in its memcache, so we need to go
401 * through a full host-to-hyp donation cycle to change it. Fortunately,
402 * __pkvm_host_donate_hyp() takes care of races for us, so if it
403 * succeeds we're good to go.
405 if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc
->head
), 1))
408 return pop_hyp_memcache(host_mc
, hyp_phys_to_virt
);
411 /* Refill our local memcache by poping pages from the one provided by the host. */
412 int refill_memcache(struct kvm_hyp_memcache
*mc
, unsigned long min_pages
,
413 struct kvm_hyp_memcache
*host_mc
)
415 struct kvm_hyp_memcache tmp
= *host_mc
;
418 ret
= __topup_hyp_memcache(mc
, min_pages
, admit_host_page
,
419 hyp_virt_to_phys
, &tmp
);