]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: SVM: Drop "always" flag from list of possible passthrough MSRs
authorSean Christopherson <seanjc@google.com>
Tue, 10 Jun 2025 22:57:21 +0000 (15:57 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 20 Jun 2025 20:07:27 +0000 (13:07 -0700)
Drop the "always" flag from the array of possible passthrough MSRs, and
instead manually initialize the permissions for the handful of MSRs that
KVM passes through by default.  In addition to cutting down on boilerplate
copy+paste code and eliminating a misleading flag (the MSRs aren't always
passed through, e.g. thanks to MSR filters), this will allow for removing
the direct_access_msrs array entirely.

Link: https://lore.kernel.org/r/20250610225737.156318-17-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/svm.c

index 7cdb6503b88a1e1978244b804f178c443b5de8c5..6e7040f86e9e1735405f09eee2dc0ec619ab15f8 100644 (file)
@@ -84,51 +84,48 @@ static DEFINE_PER_CPU(u64, current_tsc_ratio);
 
 #define X2APIC_MSR(x)  (APIC_BASE_MSR + (x >> 4))
 
-static const struct svm_direct_access_msrs {
-       u32 index;   /* Index of the MSR */
-       bool always; /* True if intercept is initially cleared */
-} direct_access_msrs[] = {
-       { .index = MSR_STAR,                            .always = true  },
-       { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
-       { .index = MSR_IA32_SYSENTER_EIP,               .always = false },
-       { .index = MSR_IA32_SYSENTER_ESP,               .always = false },
+static const u32 direct_access_msrs[] = {
+       MSR_STAR,
+       MSR_IA32_SYSENTER_CS,
+       MSR_IA32_SYSENTER_EIP,
+       MSR_IA32_SYSENTER_ESP,
 #ifdef CONFIG_X86_64
-       { .index = MSR_GS_BASE,                         .always = true  },
-       { .index = MSR_FS_BASE,                         .always = true  },
-       { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
-       { .index = MSR_LSTAR,                           .always = true  },
-       { .index = MSR_CSTAR,                           .always = true  },
-       { .index = MSR_SYSCALL_MASK,                    .always = true  },
+       MSR_GS_BASE,
+       MSR_FS_BASE,
+       MSR_KERNEL_GS_BASE,
+       MSR_LSTAR,
+       MSR_CSTAR,
+       MSR_SYSCALL_MASK,
 #endif
-       { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
-       { .index = MSR_IA32_PRED_CMD,                   .always = false },
-       { .index = MSR_IA32_FLUSH_CMD,                  .always = false },
-       { .index = MSR_IA32_DEBUGCTLMSR,                .always = false },
-       { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
-       { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
-       { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
-       { .index = MSR_IA32_LASTINTTOIP,                .always = false },
-       { .index = MSR_IA32_XSS,                        .always = false },
-       { .index = MSR_EFER,                            .always = false },
-       { .index = MSR_IA32_CR_PAT,                     .always = false },
-       { .index = MSR_AMD64_SEV_ES_GHCB,               .always = false },
-       { .index = MSR_TSC_AUX,                         .always = false },
-       { .index = X2APIC_MSR(APIC_ID),                 .always = false },
-       { .index = X2APIC_MSR(APIC_LVR),                .always = false },
-       { .index = X2APIC_MSR(APIC_TASKPRI),            .always = false },
-       { .index = X2APIC_MSR(APIC_ARBPRI),             .always = false },
-       { .index = X2APIC_MSR(APIC_PROCPRI),            .always = false },
-       { .index = X2APIC_MSR(APIC_EOI),                .always = false },
-       { .index = X2APIC_MSR(APIC_RRR),                .always = false },
-       { .index = X2APIC_MSR(APIC_LDR),                .always = false },
-       { .index = X2APIC_MSR(APIC_DFR),                .always = false },
-       { .index = X2APIC_MSR(APIC_SPIV),               .always = false },
-       { .index = X2APIC_MSR(APIC_ISR),                .always = false },
-       { .index = X2APIC_MSR(APIC_TMR),                .always = false },
-       { .index = X2APIC_MSR(APIC_IRR),                .always = false },
-       { .index = X2APIC_MSR(APIC_ESR),                .always = false },
-       { .index = X2APIC_MSR(APIC_ICR),                .always = false },
-       { .index = X2APIC_MSR(APIC_ICR2),               .always = false },
+       MSR_IA32_SPEC_CTRL,
+       MSR_IA32_PRED_CMD,
+       MSR_IA32_FLUSH_CMD,
+       MSR_IA32_DEBUGCTLMSR,
+       MSR_IA32_LASTBRANCHFROMIP,
+       MSR_IA32_LASTBRANCHTOIP,
+       MSR_IA32_LASTINTFROMIP,
+       MSR_IA32_LASTINTTOIP,
+       MSR_IA32_XSS,
+       MSR_EFER,
+       MSR_IA32_CR_PAT,
+       MSR_AMD64_SEV_ES_GHCB,
+       MSR_TSC_AUX,
+       X2APIC_MSR(APIC_ID),
+       X2APIC_MSR(APIC_LVR),
+       X2APIC_MSR(APIC_TASKPRI),
+       X2APIC_MSR(APIC_ARBPRI),
+       X2APIC_MSR(APIC_PROCPRI),
+       X2APIC_MSR(APIC_EOI),
+       X2APIC_MSR(APIC_RRR),
+       X2APIC_MSR(APIC_LDR),
+       X2APIC_MSR(APIC_DFR),
+       X2APIC_MSR(APIC_SPIV),
+       X2APIC_MSR(APIC_ISR),
+       X2APIC_MSR(APIC_TMR),
+       X2APIC_MSR(APIC_IRR),
+       X2APIC_MSR(APIC_ESR),
+       X2APIC_MSR(APIC_ICR),
+       X2APIC_MSR(APIC_ICR2),
 
        /*
         * Note:
@@ -137,14 +134,14 @@ static const struct svm_direct_access_msrs {
         * the AVIC hardware would generate GP fault. Therefore, always
         * intercept the MSR 0x832, and do not setup direct_access_msr.
         */
-       { .index = X2APIC_MSR(APIC_LVTTHMR),            .always = false },
-       { .index = X2APIC_MSR(APIC_LVTPC),              .always = false },
-       { .index = X2APIC_MSR(APIC_LVT0),               .always = false },
-       { .index = X2APIC_MSR(APIC_LVT1),               .always = false },
-       { .index = X2APIC_MSR(APIC_LVTERR),             .always = false },
-       { .index = X2APIC_MSR(APIC_TMICT),              .always = false },
-       { .index = X2APIC_MSR(APIC_TMCCT),              .always = false },
-       { .index = X2APIC_MSR(APIC_TDCR),               .always = false },
+       X2APIC_MSR(APIC_LVTTHMR),
+       X2APIC_MSR(APIC_LVTPC),
+       X2APIC_MSR(APIC_LVT0),
+       X2APIC_MSR(APIC_LVT1),
+       X2APIC_MSR(APIC_LVTERR),
+       X2APIC_MSR(APIC_TMICT),
+       X2APIC_MSR(APIC_TMCCT),
+       X2APIC_MSR(APIC_TDCR),
 };
 
 static_assert(ARRAY_SIZE(direct_access_msrs) ==
@@ -761,7 +758,7 @@ static int direct_access_msr_slot(u32 msr)
        u32 i;
 
        for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
-               if (direct_access_msrs[i].index == msr)
+               if (direct_access_msrs[i] == msr)
                        return i;
        }
 
@@ -925,14 +922,17 @@ u32 *svm_vcpu_alloc_msrpm(void)
 
 static void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu)
 {
-       int i;
+       svm_disable_intercept_for_msr(vcpu, MSR_STAR, MSR_TYPE_RW);
+       svm_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
 
-       for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
-               if (!direct_access_msrs[i].always)
-                       continue;
-               svm_disable_intercept_for_msr(vcpu, direct_access_msrs[i].index,
-                                             MSR_TYPE_RW);
-       }
+#ifdef CONFIG_X86_64
+       svm_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
+       svm_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
+       svm_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+       svm_disable_intercept_for_msr(vcpu, MSR_LSTAR, MSR_TYPE_RW);
+       svm_disable_intercept_for_msr(vcpu, MSR_CSTAR, MSR_TYPE_RW);
+       svm_disable_intercept_for_msr(vcpu, MSR_SYSCALL_MASK, MSR_TYPE_RW);
+#endif
 }
 
 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
@@ -946,7 +946,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
                return;
 
        for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
-               int index = direct_access_msrs[i].index;
+               int index = direct_access_msrs[i];
 
                if ((index < APIC_BASE_MSR) ||
                    (index > APIC_BASE_MSR + 0xff))
@@ -974,7 +974,7 @@ static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
         * back in sync after this.
         */
        for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
-               u32 msr = direct_access_msrs[i].index;
+               u32 msr = direct_access_msrs[i];
                u32 read = test_bit(i, svm->shadow_msr_intercept.read);
                u32 write = test_bit(i, svm->shadow_msr_intercept.write);
 
@@ -1014,7 +1014,7 @@ static __init int init_msrpm_offsets(void)
        for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
                u32 offset;
 
-               offset = svm_msrpm_offset(direct_access_msrs[i].index);
+               offset = svm_msrpm_offset(direct_access_msrs[i]);
                if (WARN_ON(offset == MSR_INVALID))
                        return -EIO;