]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: Add a helper to dedup loading guest/host XCR0 and XSS
authorBinbin Wu <binbin.wu@linux.intel.com>
Mon, 10 Nov 2025 05:05:39 +0000 (13:05 +0800)
committerSean Christopherson <seanjc@google.com>
Wed, 19 Nov 2025 13:41:12 +0000 (05:41 -0800)
Add and use a helper, kvm_load_xfeatures(), to dedup the code that loads
guest/host xfeatures.

Opportunistically return early if X86_CR4_OSXSAVE is not set to reduce
indentations.

No functional change intended.

Suggested-by: Chao Gao <chao.gao@intel.com>
Reviewed-by: Chao Gao <chao.gao@intel.com>
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Link: https://patch.msgid.link/20251110050539.3398759-1-binbin.wu@linux.intel.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/x86.c

index 1ef77a1be9b23ba5b7cec45c687eaa798a2f652e..aff32603a043d29bb71836ff6bee22ff33fd4161 100644 (file)
@@ -1205,34 +1205,21 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lmsw);
 
-static void kvm_load_guest_xfeatures(struct kvm_vcpu *vcpu)
+static void kvm_load_xfeatures(struct kvm_vcpu *vcpu, bool load_guest)
 {
        if (vcpu->arch.guest_state_protected)
                return;
 
-       if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
-               if (vcpu->arch.xcr0 != kvm_host.xcr0)
-                       xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
-
-               if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
-                   vcpu->arch.ia32_xss != kvm_host.xss)
-                       wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss);
-       }
-}
-
-static void kvm_load_host_xfeatures(struct kvm_vcpu *vcpu)
-{
-       if (vcpu->arch.guest_state_protected)
+       if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE))
                return;
 
-       if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
-               if (vcpu->arch.xcr0 != kvm_host.xcr0)
-                       xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
+       if (vcpu->arch.xcr0 != kvm_host.xcr0)
+               xsetbv(XCR_XFEATURE_ENABLED_MASK,
+                      load_guest ? vcpu->arch.xcr0 : kvm_host.xcr0);
 
-               if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
-                   vcpu->arch.ia32_xss != kvm_host.xss)
-                       wrmsrq(MSR_IA32_XSS, kvm_host.xss);
-       }
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
+           vcpu->arch.ia32_xss != kvm_host.xss)
+               wrmsrq(MSR_IA32_XSS, load_guest ? vcpu->arch.ia32_xss : kvm_host.xss);
 }
 
 static void kvm_load_guest_pkru(struct kvm_vcpu *vcpu)
@@ -11271,7 +11258,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (vcpu->arch.guest_fpu.xfd_err)
                wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
 
-       kvm_load_guest_xfeatures(vcpu);
+       kvm_load_xfeatures(vcpu, true);
 
        if (unlikely(vcpu->arch.switch_db_regs &&
                     !(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
@@ -11367,7 +11354,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
 
-       kvm_load_host_xfeatures(vcpu);
+       kvm_load_xfeatures(vcpu, false);
 
        /*
         * Sync xfd before calling handle_exit_irqoff() which may