]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Use kvm_has_feat() to check if FEAT_SSBS is advertised to the guest
authorShaoqin Huang <shahuang@redhat.com>
Tue, 23 Jul 2024 07:20:01 +0000 (03:20 -0400)
committerMarc Zyngier <maz@kernel.org>
Sun, 25 Aug 2024 16:48:44 +0000 (17:48 +0100)
Currently KVM use cpus_have_final_cap() to check if FEAT_SSBS is
advertised to the guest. But if FEAT_SSBS is writable and isn't
advertised to the guest, this is wrong.

Update it to use kvm_has_feat() to check if FEAT_SSBS is advertised
to the guest, thus the KVM can do the right thing if FEAT_SSBS isn't
advertised to the guest.

Signed-off-by: Shaoqin Huang <shahuang@redhat.com>
Link: https://lore.kernel.org/r/20240723072004.1470688-3-shahuang@redhat.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hypercalls.c

index 5763d979d8cae00ac206a6f80f10c2d36f86bdac..ee6573befb8134eb6528d3eba4180b193c1c155f 100644 (file)
@@ -317,7 +317,7 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
                                 * to the guest, and hide SSBS so that the
                                 * guest stays protected.
                                 */
-                               if (cpus_have_final_cap(ARM64_SSBS))
+                               if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SSBS, IMP))
                                        break;
                                fallthrough;
                        case SPECTRE_UNAFFECTED:
@@ -428,7 +428,7 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  * Convert the workaround level into an easy-to-compare number, where higher
  * values mean better protection.
  */
-static int get_kernel_wa_level(u64 regid)
+static int get_kernel_wa_level(struct kvm_vcpu *vcpu, u64 regid)
 {
        switch (regid) {
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
@@ -449,7 +449,7 @@ static int get_kernel_wa_level(u64 regid)
                         * don't have any FW mitigation if SSBS is there at
                         * all times.
                         */
-                       if (cpus_have_final_cap(ARM64_SSBS))
+                       if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SSBS, IMP))
                                return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
                        fallthrough;
                case SPECTRE_UNAFFECTED:
@@ -486,7 +486,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
-               val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
+               val = get_kernel_wa_level(vcpu, reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
                break;
        case KVM_REG_ARM_STD_BMAP:
                val = READ_ONCE(smccc_feat->std_bmap);
@@ -588,7 +588,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
                if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
                        return -EINVAL;
 
-               if (get_kernel_wa_level(reg->id) < val)
+               if (get_kernel_wa_level(vcpu, reg->id) < val)
                        return -EINVAL;
 
                return 0;
@@ -624,7 +624,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
                 * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
                 * other way around.
                 */
-               if (get_kernel_wa_level(reg->id) < wa_level)
+               if (get_kernel_wa_level(vcpu, reg->id) < wa_level)
                        return -EINVAL;
 
                return 0;