]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: SVM: Make svm_x86_ops globally visible, clean up on-HyperV usage
authorSean Christopherson <seanjc@google.com>
Fri, 19 Sep 2025 21:59:28 +0000 (14:59 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 23 Sep 2025 15:56:44 +0000 (08:56 -0700)
Make svm_x86_ops globally visible in anticipation of modifying the struct
in avic.c, and clean up the KVM-on-HyperV usage, as declaring _and using_
a local variable in a header that's only defined in one specific .c-file
is all kinds of ugly.

Opportunistically make svm_hv_enable_l2_tlb_flush() local to
svm_onhyperv.c, as the only reason it was visible was due to the
aforementioned shenanigans in svm_onhyperv.h.

Alternatively, svm_x86_ops could be explicitly passed to
svm_hv_hardware_setup() as a parameter.  While that approach is slightly
safer, e.g. avoids "hidden" updates, for better or worse, the Intel side
of KVM has already chosen to expose vt_x86_ops (and vt_init_ops).  Given
that svm_x86_ops is only truly consumed by kvm_ops_update, the odds of a
"hidden" update causing problems are extremely low.  So, absent a strong
reason to rework the VMX/TDX code, make svm_x86_ops visible, as having all
updates use exactly "svm_x86_ops." is advantageous in its own right.

No functional change intended.

Link: https://lore.kernel.org/r/20250919215934.1590410-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm_onhyperv.c
arch/x86/kvm/svm/svm_onhyperv.h

index 6f486fb821444d71e941eee12d1e23663ed54d09..bfbd34818412633d352d23731a94b1889bb71067 100644 (file)
@@ -5012,7 +5012,7 @@ static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
        return page_address(page);
 }
 
-static struct kvm_x86_ops svm_x86_ops __initdata = {
+struct kvm_x86_ops svm_x86_ops __initdata = {
        .name = KBUILD_MODNAME,
 
        .check_processor_compatibility = svm_check_processor_compat,
index ec3fb318ca8344919c2283c69c28b37439310bfa..bc46a353948736363b352fad099ea13b59dd76b4 100644 (file)
@@ -54,6 +54,8 @@ extern int lbrv;
 
 extern int tsc_aux_uret_slot __ro_after_init;
 
+extern struct kvm_x86_ops svm_x86_ops __initdata;
+
 /*
  * Clean bits in VMCB.
  * VMCB_ALL_CLEAN_MASK might also need to
index 3971b3ea5d04b31daa2079156fcf8873b6cc487a..a8e78c0e59562a2f516610342dd7361b3bbfe877 100644 (file)
@@ -15,7 +15,7 @@
 #include "kvm_onhyperv.h"
 #include "svm_onhyperv.h"
 
-int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
+static int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
 {
        struct hv_vmcb_enlightenments *hve;
        hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
@@ -35,3 +35,29 @@ int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+__init void svm_hv_hardware_setup(void)
+{
+       if (npt_enabled &&
+           ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
+               pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n");
+               svm_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
+               svm_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
+       }
+
+       if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
+               int cpu;
+
+               pr_info(KBUILD_MODNAME ": Hyper-V Direct TLB Flush enabled\n");
+               for_each_online_cpu(cpu) {
+                       struct hv_vp_assist_page *vp_ap =
+                               hv_get_vp_assist_page(cpu);
+
+                       if (!vp_ap)
+                               continue;
+
+                       vp_ap->nested_control.features.directhypercall = 1;
+               }
+               svm_x86_ops.enable_l2_tlb_flush =
+                               svm_hv_enable_l2_tlb_flush;
+       }
+}
index f85bc617ffe48ffee7d8f7ed17e64d576034354d..08f14e6f195ce8e6024b41287e0a0fae5d008750 100644 (file)
@@ -13,9 +13,7 @@
 #include "kvm_onhyperv.h"
 #include "svm/hyperv.h"
 
-static struct kvm_x86_ops svm_x86_ops;
-
-int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu);
+__init void svm_hv_hardware_setup(void);
 
 static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
 {
@@ -40,33 +38,6 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
                hve->hv_enlightenments_control.msr_bitmap = 1;
 }
 
-static inline __init void svm_hv_hardware_setup(void)
-{
-       if (npt_enabled &&
-           ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
-               pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n");
-               svm_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
-               svm_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
-       }
-
-       if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
-               int cpu;
-
-               pr_info(KBUILD_MODNAME ": Hyper-V Direct TLB Flush enabled\n");
-               for_each_online_cpu(cpu) {
-                       struct hv_vp_assist_page *vp_ap =
-                               hv_get_vp_assist_page(cpu);
-
-                       if (!vp_ap)
-                               continue;
-
-                       vp_ap->nested_control.features.directhypercall = 1;
-               }
-               svm_x86_ops.enable_l2_tlb_flush =
-                               svm_hv_enable_l2_tlb_flush;
-       }
-}
-
 static inline void svm_hv_vmcb_dirty_nested_enlightenments(
                struct kvm_vcpu *vcpu)
 {