]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Allow userspace to limit NV support to nVHE
authorMarc Zyngier <maz@kernel.org>
Thu, 20 Feb 2025 13:49:03 +0000 (13:49 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Mon, 24 Feb 2025 19:30:17 +0000 (11:30 -0800)
NV is hard. No kidding.

In order to make things simpler, we have established that NV would
support two mutually exclusive configurations:

- VHE-only, and supporting recursive virtualisation

- nVHE-only, and not supporting recursive virtualisation

For that purpose, introduce a new vcpu feature flag that denotes
the second configuration. We use this flag to limit the idregs
further.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Link: https://lore.kernel.org/r/20250220134907.554085-11-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kvm/nested.c

index 568bf858f3198e2a6f651eb8ae793b54fd49e67a..3bcab2a106c983a7ad72bda09f7c5f0e941ef84d 100644 (file)
@@ -105,6 +105,7 @@ struct kvm_regs {
 #define KVM_ARM_VCPU_PTRAUTH_ADDRESS   5 /* VCPU uses address authentication */
 #define KVM_ARM_VCPU_PTRAUTH_GENERIC   6 /* VCPU uses generic authentication */
 #define KVM_ARM_VCPU_HAS_EL2           7 /* Support nested virtualization */
+#define KVM_ARM_VCPU_HAS_EL2_E2H0      8 /* Limit NV support to E2H RES0 */
 
 struct kvm_vcpu_init {
        __u32 target;
index 409e5e67ae1e1a24537488cb06132e9da2b0d816..933dc3acac5fb71bd3ceff78084241677ada1135 100644 (file)
@@ -51,6 +51,10 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
        struct kvm_s2_mmu *tmp;
        int num_mmus, ret = 0;
 
+       if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features) &&
+           !cpus_have_final_cap(ARM64_HAS_HCR_NV1))
+               return -EINVAL;
+
        /*
         * Let's treat memory allocation failures as benign: If we fail to
         * allocate anything, return an error and keep the allocated array
@@ -894,6 +898,9 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
                        ID_AA64MMFR1_EL1_HPDS   |
                        ID_AA64MMFR1_EL1_VH     |
                        ID_AA64MMFR1_EL1_VMIDBits);
+               /* FEAT_E2H0 implies no VHE */
+               if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features))
+                       val &= ~ID_AA64MMFR1_EL1_VH;
                break;
 
        case SYS_ID_AA64MMFR2_EL1:
@@ -909,8 +916,25 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
                break;
 
        case SYS_ID_AA64MMFR4_EL1:
-               val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
-               val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
+               /*
+                * You get EITHER
+                *
+                * - FEAT_VHE without FEAT_E2H0
+                * - FEAT_NV limited to FEAT_NV2
+                * - HCR_EL2.NV1 being RES0
+                *
+                * OR
+                *
+                * - FEAT_E2H0 without FEAT_VHE nor FEAT_NV
+                *
+                * Life is too short for anything else.
+                */
+               if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) {
+                       val = 0;
+               } else {
+                       val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
+                       val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
+               }
                break;
 
        case SYS_ID_AA64DFR0_EL1: