]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: VMX: Zero out *all* general purpose registers after VM-Exit
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 25 Jan 2019 15:40:50 +0000 (07:40 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 29 Apr 2020 14:31:17 +0000 (16:31 +0200)
commit 0e0ab73c9a0243736bcd779b30b717e23ba9a56d upstream.

...except RSP, which is restored by hardware as part of VM-Exit.

Paolo theorized that restoring registers from the stack after a VM-Exit
in lieu of zeroing them could lead to speculative execution with the
guest's values, e.g. if the stack accesses miss the L1 cache[1].
Zeroing XORs are dirt cheap, so just be ultra-paranoid.

Note that the scratch register (currently RCX) used to save/restore the
guest state is also zeroed as its host-defined value is loaded via the
stack, just with a MOV instead of a POP.

[1] https://patchwork.kernel.org/patch/10771539/#22441255

Fixes: 0cb5b30698fd ("kvm: vmx: Scrub hardware GPRs at VM-exit")
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
[bwh: Backported to 4.19: adjust filename, context]
Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/kvm/vmx.c

index d37b48173e9cf485f666531b38ef3b9c848e3cef..e4d0ad06790e1f67e8b069742edd62686936fc83 100644 (file)
@@ -10841,6 +10841,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                "mov %%r13, %c[r13](%0) \n\t"
                "mov %%r14, %c[r14](%0) \n\t"
                "mov %%r15, %c[r15](%0) \n\t"
+
+               /*
+                * Clear all general purpose registers (except RSP, which is loaded by
+                * the CPU during VM-Exit) to prevent speculative use of the guest's
+                * values, even those that are saved/loaded via the stack.  In theory,
+                * an L1 cache miss when restoring registers could lead to speculative
+                * execution with the guest's values.  Zeroing XORs are dirt cheap,
+                * i.e. the extra paranoia is essentially free.
+                */
                "xor %%r8d,  %%r8d \n\t"
                "xor %%r9d,  %%r9d \n\t"
                "xor %%r10d, %%r10d \n\t"
@@ -10855,8 +10864,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
                "xor %%eax, %%eax \n\t"
                "xor %%ebx, %%ebx \n\t"
+               "xor %%ecx, %%ecx \n\t"
+               "xor %%edx, %%edx \n\t"
                "xor %%esi, %%esi \n\t"
                "xor %%edi, %%edi \n\t"
+               "xor %%ebp, %%ebp \n\t"
                "pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
                ".pushsection .rodata \n\t"
                ".global vmx_return \n\t"