]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Add hvc handler at EL2 for hypercalls from protected VMs
authorWill Deacon <will@kernel.org>
Mon, 30 Mar 2026 14:48:29 +0000 (15:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 30 Mar 2026 15:58:09 +0000 (16:58 +0100)
Add a hypercall handler at EL2 for hypercalls originating from protected
VMs. For now, this implements only the FEATURES and MEMINFO calls, but
subsequent patches will implement the SHARE and UNSHARE functions
necessary for virtio.

Unhandled hypercalls (including PSCI) are passed back to the host.

Reviewed-by: Vincent Donnefort <vdonnefort@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Tested-by: Mostafa Saleh <smostafa@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20260330144841.26181-29-will@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/switch.c

index a5a7bb453f3e4e01e2340e519ae4dfdf1de53904..c904647d2f7606e2477dff5de918bfd22380a1ba 100644 (file)
@@ -88,6 +88,7 @@ struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
 struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle);
 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
 
+bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
 void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
index cdeefe3d74ff7af26424a36501a08c069df6e1a2..1f184c4994faf54c373a204f4f83b7cb52ff982d 100644 (file)
@@ -4,6 +4,8 @@
  * Author: Fuad Tabba <tabba@google.com>
  */
 
+#include <kvm/arm_hypercalls.h>
+
 #include <linux/kvm_host.h>
 #include <linux/mm.h>
 
@@ -971,3 +973,38 @@ err_unlock:
        hyp_spin_unlock(&vm_table_lock);
        return err;
 }
+/*
+ * Handler for protected VM HVC calls.
+ *
+ * Returns true if the hypervisor has handled the exit (and control
+ * should return to the guest) or false if it hasn't (and the handling
+ * should be performed by the host).
+ */
+bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+       u64 val[4] = { SMCCC_RET_INVALID_PARAMETER };
+       bool handled = true;
+
+       switch (smccc_get_function(vcpu)) {
+       case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
+               val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES);
+               val[0] |= BIT(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO);
+               break;
+       case ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID:
+               if (smccc_get_arg1(vcpu) ||
+                   smccc_get_arg2(vcpu) ||
+                   smccc_get_arg3(vcpu)) {
+                       break;
+               }
+
+               val[0] = PAGE_SIZE;
+               break;
+       default:
+               /* Punt everything else back to the host, for now. */
+               handled = false;
+       }
+
+       if (handled)
+               smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
+       return handled;
+}
index 779089e42681e8c4add0e404e55a2f9b912ba38a..51bd88dc6012f82e99ad0f49de430d611984e2d2 100644 (file)
@@ -190,6 +190,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
 
 static const exit_handler_fn pvm_exit_handlers[] = {
        [0 ... ESR_ELx_EC_MAX]          = NULL,
+       [ESR_ELx_EC_HVC64]              = kvm_handle_pvm_hvc64,
        [ESR_ELx_EC_SYS64]              = kvm_handle_pvm_sys64,
        [ESR_ELx_EC_SVE]                = kvm_handle_pvm_restricted,
        [ESR_ELx_EC_FP_ASIMD]           = kvm_hyp_handle_fpsimd,