]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: SVM: Move x2AVIC MSR interception helper to avic.c
authorSean Christopherson <seanjc@google.com>
Fri, 19 Sep 2025 21:59:29 +0000 (14:59 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 23 Sep 2025 15:56:46 +0000 (08:56 -0700)
Move svm_set_x2apic_msr_interception() to avic.c as it's only relevant
when x2AVIC is enabled/supported and only called by AVIC code.  In
addition to scoping AVIC code to avic.c, this will allow burying the
global x2avic_enabled variable in avic.

Opportunistically rename the helper to explicitly scope it to "avic".

No functional change intended.

Reviewed-by: Naveen N Rao (AMD) <naveen@kernel.org>
Tested-by: Naveen N Rao (AMD) <naveen@kernel.org>
Link: https://lore.kernel.org/r/20250919215934.1590410-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index a34c5c3b164e2491f09c0f8d3b22f7f47a27c33f..478a18208a76678e07c3f5ef8593b3b0e9e1b540 100644 (file)
@@ -79,6 +79,57 @@ static bool next_vm_id_wrapped = 0;
 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
 bool x2avic_enabled;
 
+
+static void avic_set_x2apic_msr_interception(struct vcpu_svm *svm,
+                                            bool intercept)
+{
+       static const u32 x2avic_passthrough_msrs[] = {
+               X2APIC_MSR(APIC_ID),
+               X2APIC_MSR(APIC_LVR),
+               X2APIC_MSR(APIC_TASKPRI),
+               X2APIC_MSR(APIC_ARBPRI),
+               X2APIC_MSR(APIC_PROCPRI),
+               X2APIC_MSR(APIC_EOI),
+               X2APIC_MSR(APIC_RRR),
+               X2APIC_MSR(APIC_LDR),
+               X2APIC_MSR(APIC_DFR),
+               X2APIC_MSR(APIC_SPIV),
+               X2APIC_MSR(APIC_ISR),
+               X2APIC_MSR(APIC_TMR),
+               X2APIC_MSR(APIC_IRR),
+               X2APIC_MSR(APIC_ESR),
+               X2APIC_MSR(APIC_ICR),
+               X2APIC_MSR(APIC_ICR2),
+
+               /*
+                * Note!  Always intercept LVTT, as TSC-deadline timer mode
+                * isn't virtualized by hardware, and the CPU will generate a
+                * #GP instead of a #VMEXIT.
+                */
+               X2APIC_MSR(APIC_LVTTHMR),
+               X2APIC_MSR(APIC_LVTPC),
+               X2APIC_MSR(APIC_LVT0),
+               X2APIC_MSR(APIC_LVT1),
+               X2APIC_MSR(APIC_LVTERR),
+               X2APIC_MSR(APIC_TMICT),
+               X2APIC_MSR(APIC_TMCCT),
+               X2APIC_MSR(APIC_TDCR),
+       };
+       int i;
+
+       if (intercept == svm->x2avic_msrs_intercepted)
+               return;
+
+       if (!x2avic_enabled)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(x2avic_passthrough_msrs); i++)
+               svm_set_intercept_for_msr(&svm->vcpu, x2avic_passthrough_msrs[i],
+                                         MSR_TYPE_RW, intercept);
+
+       svm->x2avic_msrs_intercepted = intercept;
+}
+
 static void avic_activate_vmcb(struct vcpu_svm *svm)
 {
        struct vmcb *vmcb = svm->vmcb01.ptr;
@@ -99,7 +150,7 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
                vmcb->control.int_ctl |= X2APIC_MODE_MASK;
                vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID;
                /* Disabling MSR intercept for x2APIC registers */
-               svm_set_x2apic_msr_interception(svm, false);
+               avic_set_x2apic_msr_interception(svm, false);
        } else {
                /*
                 * Flush the TLB, the guest may have inserted a non-APIC
@@ -110,7 +161,7 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
                /* For xAVIC and hybrid-xAVIC modes */
                vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
                /* Enabling MSR intercept for x2APIC registers */
-               svm_set_x2apic_msr_interception(svm, true);
+               avic_set_x2apic_msr_interception(svm, true);
        }
 }
 
@@ -130,7 +181,7 @@ static void avic_deactivate_vmcb(struct vcpu_svm *svm)
                return;
 
        /* Enabling MSR intercept for x2APIC registers */
-       svm_set_x2apic_msr_interception(svm, true);
+       avic_set_x2apic_msr_interception(svm, true);
 }
 
 /* Note:
index bfbd34818412633d352d23731a94b1889bb71067..44032c78aab06ee74af2f1443f5d15a25ec22b7e 100644 (file)
@@ -724,55 +724,6 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
                svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
 }
 
-void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
-{
-       static const u32 x2avic_passthrough_msrs[] = {
-               X2APIC_MSR(APIC_ID),
-               X2APIC_MSR(APIC_LVR),
-               X2APIC_MSR(APIC_TASKPRI),
-               X2APIC_MSR(APIC_ARBPRI),
-               X2APIC_MSR(APIC_PROCPRI),
-               X2APIC_MSR(APIC_EOI),
-               X2APIC_MSR(APIC_RRR),
-               X2APIC_MSR(APIC_LDR),
-               X2APIC_MSR(APIC_DFR),
-               X2APIC_MSR(APIC_SPIV),
-               X2APIC_MSR(APIC_ISR),
-               X2APIC_MSR(APIC_TMR),
-               X2APIC_MSR(APIC_IRR),
-               X2APIC_MSR(APIC_ESR),
-               X2APIC_MSR(APIC_ICR),
-               X2APIC_MSR(APIC_ICR2),
-
-               /*
-                * Note!  Always intercept LVTT, as TSC-deadline timer mode
-                * isn't virtualized by hardware, and the CPU will generate a
-                * #GP instead of a #VMEXIT.
-                */
-               X2APIC_MSR(APIC_LVTTHMR),
-               X2APIC_MSR(APIC_LVTPC),
-               X2APIC_MSR(APIC_LVT0),
-               X2APIC_MSR(APIC_LVT1),
-               X2APIC_MSR(APIC_LVTERR),
-               X2APIC_MSR(APIC_TMICT),
-               X2APIC_MSR(APIC_TMCCT),
-               X2APIC_MSR(APIC_TDCR),
-       };
-       int i;
-
-       if (intercept == svm->x2avic_msrs_intercepted)
-               return;
-
-       if (!x2avic_enabled)
-               return;
-
-       for (i = 0; i < ARRAY_SIZE(x2avic_passthrough_msrs); i++)
-               svm_set_intercept_for_msr(&svm->vcpu, x2avic_passthrough_msrs[i],
-                                         MSR_TYPE_RW, intercept);
-
-       svm->x2avic_msrs_intercepted = intercept;
-}
-
 void svm_vcpu_free_msrpm(void *msrpm)
 {
        __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
index bc46a353948736363b352fad099ea13b59dd76b4..cb1d26cb5113ce6857116d3ded01091b2a1836af 100644 (file)
@@ -703,7 +703,6 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
                          int read, int write);
-void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
                                     int trig_mode, int vec);