]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Add a range to __pkvm_host_share_guest()
authorVincent Donnefort <vdonnefort@google.com>
Wed, 21 May 2025 12:48:27 +0000 (13:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 21 May 2025 13:33:51 +0000 (14:33 +0100)
In preparation for supporting stage-2 huge mappings for np-guest. Add a
nr_pages argument to the __pkvm_host_share_guest hypercall. This range
supports only two values: 1 or PMD_SIZE / PAGE_SIZE (that is 512 on a
4K-pages system).

Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Link: https://lore.kernel.org/r/20250521124834.1070650-4-vdonnefort@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/pkvm.c

index 26016eb9323fa49e3b386897f6f9874949b4514e..47aa7b01114ff7ed8ac6ec67383ea82f5fb79c6f 100644 (file)
@@ -39,7 +39,7 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
-int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
                            enum kvm_pgtable_prot prot);
 int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
 int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot);
index 59db9606e6e15f30a35c694495477afda908a49b..4d3d215955c3252ce2d343ae1f7877b6f7c9fdad 100644 (file)
@@ -245,7 +245,8 @@ static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(u64, pfn, host_ctxt, 1);
        DECLARE_REG(u64, gfn, host_ctxt, 2);
-       DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
+       DECLARE_REG(u64, nr_pages, host_ctxt, 3);
+       DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4);
        struct pkvm_hyp_vcpu *hyp_vcpu;
        int ret = -EINVAL;
 
@@ -260,7 +261,7 @@ static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
        if (ret)
                goto out;
 
-       ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot);
+       ret = __pkvm_host_share_guest(pfn, gfn, nr_pages, hyp_vcpu, prot);
 out:
        cpu_reg(host_ctxt, 1) =  ret;
 }
index 1018a6f66359171c6179f64c5becef88fa3388aa..8e0847aa090d26612759495f12f8b528f859fef9 100644 (file)
@@ -695,10 +695,9 @@ static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
        return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
 }
 
-static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
+static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr,
                                          u64 size, enum pkvm_page_state state)
 {
-       struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
        struct check_walk_data d = {
                .desired        = state,
                .get_page_state = guest_get_page_state,
@@ -907,48 +906,72 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
        return ret;
 }
 
-int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size)
+{
+       if (nr_pages == 1) {
+               *size = PAGE_SIZE;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
                            enum kvm_pgtable_prot prot)
 {
        struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
        u64 phys = hyp_pfn_to_phys(pfn);
        u64 ipa = hyp_pfn_to_phys(gfn);
-       struct hyp_page *page;
+       u64 size;
        int ret;
 
        if (prot & ~KVM_PGTABLE_PROT_RWX)
                return -EINVAL;
 
-       ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
+       ret = __guest_check_transition_size(phys, ipa, nr_pages, &size);
+       if (ret)
+               return ret;
+
+       ret = check_range_allowed_memory(phys, phys + size);
        if (ret)
                return ret;
 
        host_lock_component();
        guest_lock_component(vm);
 
-       ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE);
+       ret = __guest_check_page_state_range(vm, ipa, size, PKVM_NOPAGE);
        if (ret)
                goto unlock;
 
-       page = hyp_phys_to_page(phys);
-       switch (get_host_state(page)) {
-       case PKVM_PAGE_OWNED:
-               WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED));
-               break;
-       case PKVM_PAGE_SHARED_OWNED:
-               if (page->host_share_guest_count)
-                       break;
-               /* Only host to np-guest multi-sharing is tolerated */
-               fallthrough;
-       default:
-               ret = -EPERM;
-               goto unlock;
+       for_each_hyp_page(page, phys, size) {
+               switch (get_host_state(page)) {
+               case PKVM_PAGE_OWNED:
+                       continue;
+               case PKVM_PAGE_SHARED_OWNED:
+                       if (page->host_share_guest_count == U32_MAX) {
+                               ret = -EBUSY;
+                               goto unlock;
+                       }
+
+                       /* Only host to np-guest multi-sharing is tolerated */
+                       if (page->host_share_guest_count)
+                               continue;
+
+                       fallthrough;
+               default:
+                       ret = -EPERM;
+                       goto unlock;
+               }
        }
 
-       WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
+       for_each_hyp_page(page, phys, size) {
+               set_host_state(page, PKVM_PAGE_SHARED_OWNED);
+               page->host_share_guest_count++;
+       }
+
+       WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, size, phys,
                                       pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED),
                                       &vcpu->vcpu.arch.pkvm_memcache, 0));
-       page->host_share_guest_count++;
 
 unlock:
        guest_unlock_component(vm);
@@ -1169,6 +1192,9 @@ static void assert_page_state(void)
        struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu;
        u64 phys = hyp_virt_to_phys(virt);
        u64 ipa[2] = { selftest_ipa(), selftest_ipa() + PAGE_SIZE };
+       struct pkvm_hyp_vm *vm;
+
+       vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
 
        host_lock_component();
        WARN_ON(__host_check_page_state_range(phys, size, selftest_state.host));
@@ -1179,8 +1205,8 @@ static void assert_page_state(void)
        hyp_unlock_component();
 
        guest_lock_component(&selftest_vm);
-       WARN_ON(__guest_check_page_state_range(vcpu, ipa[0], size, selftest_state.guest[0]));
-       WARN_ON(__guest_check_page_state_range(vcpu, ipa[1], size, selftest_state.guest[1]));
+       WARN_ON(__guest_check_page_state_range(vm, ipa[0], size, selftest_state.guest[0]));
+       WARN_ON(__guest_check_page_state_range(vm, ipa[1], size, selftest_state.guest[1]));
        guest_unlock_component(&selftest_vm);
 }
 
@@ -1218,7 +1244,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   __pkvm_host_share_ffa, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_host_unshare_ffa, pfn, 1);
        assert_transition_res(-EPERM,   hyp_pin_shared_mem, virt, virt + size);
-       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, vcpu, prot);
+       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
        assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, vm);
 
        selftest_state.host = PKVM_PAGE_OWNED;
@@ -1237,7 +1263,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   __pkvm_host_donate_hyp, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_host_share_ffa, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_hyp_donate_host, pfn, 1);
-       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, vcpu, prot);
+       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
        assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, vm);
 
        assert_transition_res(0,        hyp_pin_shared_mem, virt, virt + size);
@@ -1249,7 +1275,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   __pkvm_host_donate_hyp, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_host_share_ffa, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_hyp_donate_host, pfn, 1);
-       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, vcpu, prot);
+       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
        assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, vm);
 
        hyp_unpin_shared_mem(virt, virt + size);
@@ -1268,7 +1294,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   __pkvm_host_share_hyp, pfn);
        assert_transition_res(-EPERM,   __pkvm_host_unshare_hyp, pfn);
        assert_transition_res(-EPERM,   __pkvm_hyp_donate_host, pfn, 1);
-       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, vcpu, prot);
+       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
        assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, vm);
        assert_transition_res(-EPERM,   hyp_pin_shared_mem, virt, virt + size);
 
@@ -1279,8 +1305,8 @@ void pkvm_ownership_selftest(void *base)
 
        selftest_state.host = PKVM_PAGE_SHARED_OWNED;
        selftest_state.guest[0] = PKVM_PAGE_SHARED_BORROWED;
-       assert_transition_res(0,        __pkvm_host_share_guest, pfn, gfn, vcpu, prot);
-       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, vcpu, prot);
+       assert_transition_res(0,        __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+       assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
        assert_transition_res(-EPERM,   __pkvm_host_share_ffa, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_host_donate_hyp, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_host_share_hyp, pfn);
@@ -1289,7 +1315,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   hyp_pin_shared_mem, virt, virt + size);
 
        selftest_state.guest[1] = PKVM_PAGE_SHARED_BORROWED;
-       assert_transition_res(0,        __pkvm_host_share_guest, pfn, gfn + 1, vcpu, prot);
+       assert_transition_res(0,        __pkvm_host_share_guest, pfn, gfn + 1, 1, vcpu, prot);
        WARN_ON(hyp_virt_to_page(virt)->host_share_guest_count != 2);
 
        selftest_state.guest[0] = PKVM_NOPAGE;
index f4761a47928037d3e35d3c91b7d752162db3dbcd..987bc5fb18f9fe8ca6cf4600937e40f7232d6da5 100644 (file)
@@ -366,7 +366,7 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
                return -EINVAL;
 
        lockdep_assert_held_write(&kvm->mmu_lock);
-       ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, prot);
+       ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, 1, prot);
        if (ret) {
                /* Is the gfn already mapped due to a racing vCPU? */
                if (ret == -EPERM)