]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Implement the MEM_UNSHARE hypercall for protected VMs
authorWill Deacon <will@kernel.org>
Mon, 30 Mar 2026 14:48:31 +0000 (15:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 30 Mar 2026 15:58:09 +0000 (16:58 +0100)
Implement the ARM_SMCCC_KVM_FUNC_MEM_UNSHARE hypercall to allow
protected VMs to unshare memory that was previously shared with the host
using the ARM_SMCCC_KVM_FUNC_MEM_SHARE hypercall.

Reviewed-by: Vincent Donnefort <vdonnefort@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Tested-by: Mostafa Saleh <smostafa@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20260330144841.26181-31-will@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/pkvm.c

index fea8aecae5efaa4488f0dc585403168ff11b511d..99d8398afe20b8d290bbc619e07b375baf24d472 100644 (file)
@@ -35,6 +35,7 @@ extern unsigned long hyp_nr_cpus;
 int __pkvm_prot_finalize(void);
 int __pkvm_host_share_hyp(u64 pfn);
 int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn);
+int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn);
 int __pkvm_host_unshare_hyp(u64 pfn);
 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
index 593eca37f8633866c640727ca4238c40975f9e29..db94323b430c2c33ee4256ae5bb257f5357036c6 100644 (file)
@@ -991,6 +991,40 @@ unlock:
        return ret;
 }
 
+int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn)
+{
+       struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+       u64 meta, phys, ipa = hyp_pfn_to_phys(gfn);
+       kvm_pte_t pte;
+       int ret;
+
+       host_lock_component();
+       guest_lock_component(vm);
+
+       ret = get_valid_guest_pte(vm, ipa, &pte, &phys);
+       if (ret)
+               goto unlock;
+
+       ret = -EPERM;
+       if (pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)) != PKVM_PAGE_SHARED_OWNED)
+               goto unlock;
+       if (__host_check_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_BORROWED))
+               goto unlock;
+
+       ret = 0;
+       meta = host_stage2_encode_gfn_meta(vm, gfn);
+       WARN_ON(host_stage2_set_owner_metadata_locked(phys, PAGE_SIZE,
+                                                     PKVM_ID_GUEST, meta));
+       WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
+                                      pkvm_mkstate(KVM_PGTABLE_PROT_RWX, PKVM_PAGE_OWNED),
+                                      &vcpu->vcpu.arch.pkvm_memcache, 0));
+unlock:
+       guest_unlock_component(vm);
+       host_unlock_component();
+
+       return ret;
+}
+
 int __pkvm_host_unshare_hyp(u64 pfn)
 {
        u64 phys = hyp_pfn_to_phys(pfn);
index 40830760386368dd3bcce079452eb72c7c50f81b..6f3b94a37fe39d5097fd7c8c12540a8ff68df40c 100644 (file)
@@ -1025,6 +1025,19 @@ out_host:
        return false;
 }
 
+static void pkvm_memunshare_call(u64 *ret, struct kvm_vcpu *vcpu)
+{
+       struct pkvm_hyp_vcpu *hyp_vcpu;
+       u64 ipa = smccc_get_arg1(vcpu);
+
+       if (!PAGE_ALIGNED(ipa))
+               return;
+
+       hyp_vcpu = container_of(vcpu, struct pkvm_hyp_vcpu, vcpu);
+       if (!__pkvm_guest_unshare_host(hyp_vcpu, hyp_phys_to_pfn(ipa)))
+               ret[0] = SMCCC_RET_SUCCESS;
+}
+
 /*
  * Handler for protected VM HVC calls.
  *
@@ -1042,6 +1055,7 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
                val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES);
                val[0] |= BIT(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO);
                val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MEM_SHARE);
+               val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MEM_UNSHARE);
                break;
        case ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID:
                if (smccc_get_arg1(vcpu) ||
@@ -1060,6 +1074,14 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
 
                handled = pkvm_memshare_call(val, vcpu, exit_code);
                break;
+       case ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID:
+               if (smccc_get_arg2(vcpu) ||
+                   smccc_get_arg3(vcpu)) {
+                       break;
+               }
+
+               pkvm_memunshare_call(val, vcpu);
+               break;
        default:
                /* Punt everything else back to the host, for now. */
                handled = false;