]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: VMX: Ensure guest's SPEC_CTRL[63:32] is loaded on VM-Enter
authorUros Bizjak <ubizjak@gmail.com>
Wed, 20 Aug 2025 09:59:54 +0000 (11:59 +0200)
committerSean Christopherson <seanjc@google.com>
Thu, 6 Nov 2025 14:23:52 +0000 (06:23 -0800)
SPEC_CTRL is an MSR, i.e. a 64-bit value, but the assembly code that loads
the guest's value assumes bits 63:32 are always zero.  The bug is
_currently_ benign because neither KVM nor the kernel support setting any
of bits 63:32, but it's still a bug that needs to be fixed.

Note, the host's value is restored in C code and is unaffected.

Fixes: 07853adc29a0 ("KVM: VMX: Prevent RSB underflow before vmenter")
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Link: https://patch.msgid.link/20250820100007.356761-1-ubizjak@gmail.com
[sean: call out that only the guest's value is affected]
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/vmenter.S

index bc255d709d8a16ae22b5bc401965d209a89a8692..574159a84ee9b6969ac4d99951515c174ad93680 100644 (file)
@@ -118,13 +118,23 @@ SYM_FUNC_START(__vmx_vcpu_run)
         * and vmentry.
         */
        mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
-       movl VMX_spec_ctrl(%_ASM_DI), %edi
-       movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
-       cmp %edi, %esi
+#ifdef CONFIG_X86_64
+       mov VMX_spec_ctrl(%rdi), %rdx
+       cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+       je .Lspec_ctrl_done
+       movl %edx, %eax
+       shr $32, %rdx
+#else
+       mov VMX_spec_ctrl(%edi), %eax
+       mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx
+       xor %eax, %ecx
+       mov VMX_spec_ctrl + 4(%edi), %edx
+       mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edi
+       xor %edx, %edi
+       or %edi, %ecx
        je .Lspec_ctrl_done
+#endif
        mov $MSR_IA32_SPEC_CTRL, %ecx
-       xor %edx, %edx
-       mov %edi, %eax
        wrmsr
 
 .Lspec_ctrl_done: