SPEC_CTRL is an MSR, i.e. a 64-bit value, but the assembly code that loads
the guest's value assumes bits 63:32 are always zero. The bug is
_currently_ benign because neither KVM nor the kernel support setting any
of bits 63:32, but it's still a bug that needs to be fixed.
Note, the host's value is restored in C code and is unaffected.
Fixes: 07853adc29a0 ("KVM: VMX: Prevent RSB underflow before vmenter")
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Link: https://patch.msgid.link/20250820100007.356761-1-ubizjak@gmail.com
[sean: call out that only the guest's value is affected]
Signed-off-by: Sean Christopherson <seanjc@google.com>
* and vmentry.
*/
mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
- movl VMX_spec_ctrl(%_ASM_DI), %edi
- movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
- cmp %edi, %esi
+#ifdef CONFIG_X86_64
+ mov VMX_spec_ctrl(%rdi), %rdx
+ cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+ je .Lspec_ctrl_done
+ movl %edx, %eax
+ shr $32, %rdx
+#else
+ mov VMX_spec_ctrl(%edi), %eax
+ mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx
+ xor %eax, %ecx
+ mov VMX_spec_ctrl + 4(%edi), %edx
+ mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edi
+ xor %edx, %edi
+ or %edi, %ecx
je .Lspec_ctrl_done
+#endif
mov $MSR_IA32_SPEC_CTRL, %ecx
- xor %edx, %edx
- mov %edi, %eax
wrmsr
.Lspec_ctrl_done: