void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void);
+u32 kvm_hyp_va_bits(void);
void kvm_apply_hyp_relocations(void);
#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
-int __init kvm_mmu_init(u32 *hyp_va_bits);
+int __init kvm_mmu_init(u32 hyp_va_bits);
static inline void *__kvm_vector_slot2addr(void *base,
enum arm64_hyp_spectre_vector slot)
/* Inits Hyp-mode on all online CPUs */
static int __init init_hyp_mode(void)
{
- u32 hyp_va_bits;
+ u32 hyp_va_bits = kvm_hyp_va_bits();
int cpu;
int err = -ENOMEM;
/*
* Allocate Hyp PGD and setup Hyp identity mapping
*/
- err = kvm_mmu_init(&hyp_va_bits);
+ err = kvm_mmu_init(hyp_va_bits);
if (err)
goto out_err;
.virt_to_phys = kvm_host_pa,
};
-int __init kvm_mmu_init(u32 *hyp_va_bits)
+int __init kvm_mmu_init(u32 hyp_va_bits)
{
int err;
- u32 idmap_bits;
- u32 kernel_bits;
hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
*/
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
- /*
- * The ID map is always configured for 48 bits of translation, which
- * may be fewer than the number of VA bits used by the regular kernel
- * stage 1, when VA_BITS=52.
- *
- * At EL2, there is only one TTBR register, and we can't switch between
- * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
- * line: we need to use the extended range with *both* our translation
- * tables.
- *
- * So use the maximum of the idmap VA bits and the regular kernel stage
- * 1 VA bits to assure that the hypervisor can both ID map its code page
- * and map any kernel memory.
- */
- idmap_bits = IDMAP_VA_BITS;
- kernel_bits = vabits_actual;
- *hyp_va_bits = max(idmap_bits, kernel_bits);
-
- kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
+ kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits);
kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
kvm_debug("HYP VA range: %lx:%lx\n",
kern_hyp_va(PAGE_OFFSET),
goto out;
}
- err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
+ err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits, &kvm_hyp_mm_ops);
if (err)
goto out_free_pgtable;
goto out_destroy_pgtable;
io_map_base = hyp_idmap_start;
- __hyp_va_bits = *hyp_va_bits;
+ __hyp_va_bits = hyp_va_bits;
return 0;
out_destroy_pgtable:
hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
}
+/*
+ * Calculate the actual VA size used by the hypervisor
+ */
+__init u32 kvm_hyp_va_bits(void)
+{
+ /*
+ * The ID map is always configured for 48 bits of translation, which may
+ * be different from the number of VA bits used by the regular kernel
+ * stage 1.
+ *
+ * At EL2, there is only one TTBR register, and we can't switch between
+ * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
+ * line: we need to use the extended range with *both* our translation
+ * tables.
+ *
+ * So use the maximum of the idmap VA bits and the regular kernel stage
+ * 1 VA bits as the hypervisor VA size to assure that the hypervisor can
+ * both ID map its code page and map any kernel memory.
+ */
+ return max(IDMAP_VA_BITS, vabits_actual);
+}
+
/*
* We want to generate a hyp VA with the following format (with V ==
- * vabits_actual):
+ * hypervisor VA bits):
*
* 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
* ---------------------------------------------------------
{
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
u64 hyp_va_msb;
+ u32 hyp_va_bits = kvm_hyp_va_bits();
/* Where is my RAM region? */
- hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
- hyp_va_msb ^= BIT(vabits_actual - 1);
+ hyp_va_msb = idmap_addr & BIT(hyp_va_bits - 1);
+ hyp_va_msb ^= BIT(hyp_va_bits - 1);
tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
(u64)(high_memory - 1));
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
tag_val = hyp_va_msb;
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (hyp_va_bits - 1)) {
/* We have some free bits to insert a random tag. */
- tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
+ tag_val |= get_random_long() & GENMASK_ULL(hyp_va_bits - 2, tag_lsb);
}
tag_val >>= tag_lsb;