]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Annotate guest donations with handle and gfn in host stage-2
authorWill Deacon <will@kernel.org>
Mon, 30 Mar 2026 14:48:25 +0000 (15:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 30 Mar 2026 15:58:08 +0000 (16:58 +0100)
Handling host kernel faults arising from accesses to donated guest
memory will require an rmap-like mechanism to identify the guest mapping
of the faulting page.

Extend the page donation logic to encode the guest handle and gfn
alongside the owner information in the host stage-2 pte.

Reviewed-by: Fuad Tabba <tabba@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Tested-by: Mostafa Saleh <smostafa@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20260330144841.26181-25-will@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/nvhe/mem_protect.c

index 90003cbf5603bc152a8c0bd152ee1fc242d606d8..51cb5c89fd207fbdae3bbc17b03039bff54e8293 100644 (file)
@@ -593,7 +593,6 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
                if (!ret)
                        __host_update_page_state(addr, size, PKVM_PAGE_OWNED);
                break;
-       case PKVM_ID_GUEST:
        case PKVM_ID_HYP:
                ret = host_stage2_set_owner_metadata_locked(addr, size,
                                                            owner_id, 0);
@@ -603,6 +602,20 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
        return ret;
 }
 
+#define KVM_HOST_PTE_OWNER_GUEST_HANDLE_MASK   GENMASK(15, 0)
+/* We need 40 bits for the GFN to cover a 52-bit IPA with 4k pages and LPA2 */
+#define KVM_HOST_PTE_OWNER_GUEST_GFN_MASK      GENMASK(55, 16)
+static u64 host_stage2_encode_gfn_meta(struct pkvm_hyp_vm *vm, u64 gfn)
+{
+       pkvm_handle_t handle = vm->kvm.arch.pkvm.handle;
+
+       BUILD_BUG_ON((pkvm_handle_t)-1 > KVM_HOST_PTE_OWNER_GUEST_HANDLE_MASK);
+       WARN_ON(!FIELD_FIT(KVM_HOST_PTE_OWNER_GUEST_GFN_MASK, gfn));
+
+       return FIELD_PREP(KVM_HOST_PTE_OWNER_GUEST_HANDLE_MASK, handle) |
+              FIELD_PREP(KVM_HOST_PTE_OWNER_GUEST_GFN_MASK, gfn);
+}
+
 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
 {
        /*
@@ -1125,6 +1138,7 @@ int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu)
        struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
        u64 phys = hyp_pfn_to_phys(pfn);
        u64 ipa = hyp_pfn_to_phys(gfn);
+       u64 meta;
        int ret;
 
        host_lock_component();
@@ -1138,7 +1152,9 @@ int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu)
        if (ret)
                goto unlock;
 
-       WARN_ON(host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_GUEST));
+       meta = host_stage2_encode_gfn_meta(vm, gfn);
+       WARN_ON(host_stage2_set_owner_metadata_locked(phys, PAGE_SIZE,
+                                                     PKVM_ID_GUEST, meta));
        WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
                                       pkvm_mkstate(KVM_PGTABLE_PROT_RWX, PKVM_PAGE_OWNED),
                                       &vcpu->vcpu.arch.pkvm_memcache, 0));