]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Stage-2 huge mappings for np-guests
authorVincent Donnefort <vdonnefort@google.com>
Wed, 21 May 2025 12:48:33 +0000 (13:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 21 May 2025 13:33:51 +0000 (14:33 +0100)
Now np-guests hypercalls with range are supported, we can let the
hypervisor to install block mappings whenever the Stage-1 allows it,
that is when backed by either Hugetlbfs or THPs. The size of those block
mappings is limited to PMD_SIZE.

Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Link: https://lore.kernel.org/r/20250521124834.1070650-10-vdonnefort@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/pkvm.c

index 5a7a38c5d67c77c8462901e55d1bdf99838a2146..1490820b9ebe36645e4e6ac7b7d53531cfe90103 100644 (file)
@@ -166,12 +166,6 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
        return 0;
 }
 
-static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
-                                     enum kvm_pgtable_prot prot)
-{
-       return true;
-}
-
 static void *guest_s2_zalloc_pages_exact(size_t size)
 {
        void *addr = hyp_alloc_pages(&current_vm->pool, get_order(size));
@@ -278,8 +272,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
        };
 
        guest_lock_component(vm);
-       ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
-                                       guest_stage2_force_pte_cb);
+       ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL);
        guest_unlock_component(vm);
        if (ret)
                return ret;
@@ -908,12 +901,24 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
 
 static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size)
 {
+       size_t block_size;
+
        if (nr_pages == 1) {
                *size = PAGE_SIZE;
                return 0;
        }
 
-       return -EINVAL;
+       /* We solely support second to last level huge mapping */
+       block_size = kvm_granule_size(KVM_PGTABLE_LAST_LEVEL - 1);
+
+       if (nr_pages != block_size >> PAGE_SHIFT)
+               return -EINVAL;
+
+       if (!IS_ALIGNED(phys | ipa, block_size))
+               return -EINVAL;
+
+       *size = block_size;
+       return 0;
 }
 
 int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
index 754f2fe0cc67382338fd05628b14d3d1fd785831..e445db2cb4a4371608fc4dde7259efc924e51e72 100644 (file)
@@ -1304,6 +1304,10 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
        if (map_size == PAGE_SIZE)
                return true;
 
+       /* pKVM only supports PMD_SIZE huge-mappings */
+       if (is_protected_kvm_enabled() && map_size != PMD_SIZE)
+               return false;
+
        size = memslot->npages * PAGE_SIZE;
 
        gpa_start = memslot->base_gfn << PAGE_SHIFT;
@@ -1537,7 +1541,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * logging_active is guaranteed to never be true for VM_PFNMAP
         * memslots.
         */
-       if (logging_active || is_protected_kvm_enabled()) {
+       if (logging_active) {
                force_pte = true;
                vma_shift = PAGE_SHIFT;
        } else {
index e65932f9a107d18071413f2e4312af9e263a61c0..793089ba23c5196fcd28fe993526281c3bc2714b 100644 (file)
@@ -351,7 +351,7 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
        u64 pfn = phys >> PAGE_SHIFT;
        int ret;
 
-       if (size != PAGE_SIZE)
+       if (size != PAGE_SIZE && size != PMD_SIZE)
                return -EINVAL;
 
        lockdep_assert_held_write(&kvm->mmu_lock);