]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Add a range to pkvm_mappings
authorQuentin Perret <qperret@google.com>
Wed, 21 May 2025 12:48:32 +0000 (13:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 21 May 2025 13:33:51 +0000 (14:33 +0100)
In preparation for supporting stage-2 huge mappings for np-guest, add a
nr_pages member for pkvm_mappings to allow EL1 to track the size of the
stage-2 mapping.

Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Link: https://lore.kernel.org/r/20250521124834.1070650-9-vdonnefort@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_pkvm.h
arch/arm64/kvm/pkvm.c

index da75d41c948c1a86d962a09ca857ef7bf0dd0017..ea58282f59bb4fe8841064bfafffe9af3c17b667 100644 (file)
@@ -173,6 +173,7 @@ struct pkvm_mapping {
        struct rb_node node;
        u64 gfn;
        u64 pfn;
+       u64 nr_pages;
        u64 __subtree_last;     /* Internal member for interval tree */
 };
 
index 0562da0249c331842a062e205de0fd063e103bb9..e65932f9a107d18071413f2e4312af9e263a61c0 100644 (file)
@@ -283,7 +283,7 @@ static u64 __pkvm_mapping_start(struct pkvm_mapping *m)
 
 static u64 __pkvm_mapping_end(struct pkvm_mapping *m)
 {
-       return (m->gfn + 1) * PAGE_SIZE - 1;
+       return (m->gfn + m->nr_pages) * PAGE_SIZE - 1;
 }
 
 INTERVAL_TREE_DEFINE(struct pkvm_mapping, node, u64, __subtree_last,
@@ -324,7 +324,8 @@ static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 e
                return 0;
 
        for_each_mapping_in_range_safe(pgt, start, end, mapping) {
-               ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, 1);
+               ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn,
+                                       mapping->nr_pages);
                if (WARN_ON(ret))
                        return ret;
                pkvm_mapping_remove(mapping, &pgt->pkvm_mappings);
@@ -354,16 +355,32 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
                return -EINVAL;
 
        lockdep_assert_held_write(&kvm->mmu_lock);
-       ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, 1, prot);
-       if (ret) {
-               /* Is the gfn already mapped due to a racing vCPU? */
-               if (ret == -EPERM)
+
+       /*
+        * Calling stage2_map() on top of existing mappings is either happening because of a race
+        * with another vCPU, or because we're changing between page and block mappings. As per
+        * user_mem_abort(), same-size permission faults are handled in the relax_perms() path.
+        */
+       mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1);
+       if (mapping) {
+               if (size == (mapping->nr_pages * PAGE_SIZE))
                        return -EAGAIN;
+
+               /* Remove _any_ pkvm_mapping overlapping with the range, bigger or smaller. */
+               ret = __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
+               if (ret)
+                       return ret;
+               mapping = NULL;
        }
 
+       ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, size / PAGE_SIZE, prot);
+       if (WARN_ON(ret))
+               return ret;
+
        swap(mapping, cache->mapping);
        mapping->gfn = gfn;
        mapping->pfn = pfn;
+       mapping->nr_pages = size / PAGE_SIZE;
        pkvm_mapping_insert(mapping, &pgt->pkvm_mappings);
 
        return ret;
@@ -385,7 +402,8 @@ int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
 
        lockdep_assert_held(&kvm->mmu_lock);
        for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
-               ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn, 1);
+               ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn,
+                                       mapping->nr_pages);
                if (WARN_ON(ret))
                        break;
        }
@@ -400,7 +418,8 @@ int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
 
        lockdep_assert_held(&kvm->mmu_lock);
        for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
-               __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn), PAGE_SIZE);
+               __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn),
+                                         PAGE_SIZE * mapping->nr_pages);
 
        return 0;
 }
@@ -415,7 +434,7 @@ bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64
        lockdep_assert_held(&kvm->mmu_lock);
        for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
                young |= kvm_call_hyp_nvhe(__pkvm_host_test_clear_young_guest, handle, mapping->gfn,
-                                          1, mkold);
+                                          mapping->nr_pages, mkold);
 
        return young;
 }