]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/5.0.10/x86-kvm-move-kvm_load-put_guest_xcr0-into-atomic-context.patch
Fixes for 4.19
[thirdparty/kernel/stable-queue.git] / releases / 5.0.10 / x86-kvm-move-kvm_load-put_guest_xcr0-into-atomic-context.patch
1 From 1811d979c71621aafc7b879477202d286f7e863b Mon Sep 17 00:00:00 2001
2 From: WANG Chao <chao.wang@ucloud.cn>
3 Date: Fri, 12 Apr 2019 15:55:39 +0800
4 Subject: x86/kvm: move kvm_load/put_guest_xcr0 into atomic context
5
6 From: WANG Chao <chao.wang@ucloud.cn>
7
8 commit 1811d979c71621aafc7b879477202d286f7e863b upstream.
9
10 guest xcr0 could leak into host when MCE happens in guest mode. Because
11 do_machine_check() could schedule out at a few places.
12
13 For example:
14
15 kvm_load_guest_xcr0
16 ...
17 kvm_x86_ops->run(vcpu) {
18 vmx_vcpu_run
19 vmx_complete_atomic_exit
20 kvm_machine_check
21 do_machine_check
22 do_memory_failure
23 memory_failure
24 lock_page
25
26 In this case, host_xcr0 is 0x2ff, guest vcpu xcr0 is 0xff. After schedule
27 out, host cpu has guest xcr0 loaded (0xff).
28
29 In __switch_to {
30 switch_fpu_finish
31 copy_kernel_to_fpregs
32 XRSTORS
33
34 If any bit i in XSTATE_BV[i] == 1 and xcr0[i] == 0, XRSTORS will
35 generate #GP (In this case, bit 9). Then ex_handler_fprestore kicks in
36 and tries to reinitialize fpu by restoring init fpu state. Same story as
37 last #GP, except we get DOUBLE FAULT this time.
38
39 Cc: stable@vger.kernel.org
40 Signed-off-by: WANG Chao <chao.wang@ucloud.cn>
41 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
42 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
43
44 ---
45 arch/x86/kvm/svm.c | 2 ++
46 arch/x86/kvm/vmx/vmx.c | 4 ++++
47 arch/x86/kvm/x86.c | 10 ++++------
48 arch/x86/kvm/x86.h | 2 ++
49 4 files changed, 12 insertions(+), 6 deletions(-)
50
51 --- a/arch/x86/kvm/svm.c
52 +++ b/arch/x86/kvm/svm.c
53 @@ -5634,6 +5634,7 @@ static void svm_vcpu_run(struct kvm_vcpu
54 svm->vmcb->save.cr2 = vcpu->arch.cr2;
55
56 clgi();
57 + kvm_load_guest_xcr0(vcpu);
58
59 /*
60 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
61 @@ -5779,6 +5780,7 @@ static void svm_vcpu_run(struct kvm_vcpu
62 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
63 kvm_before_interrupt(&svm->vcpu);
64
65 + kvm_put_guest_xcr0(vcpu);
66 stgi();
67
68 /* Any pending NMI will happen here */
69 --- a/arch/x86/kvm/vmx/vmx.c
70 +++ b/arch/x86/kvm/vmx/vmx.c
71 @@ -6548,6 +6548,8 @@ static void vmx_vcpu_run(struct kvm_vcpu
72 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
73 vmx_set_interrupt_shadow(vcpu, 0);
74
75 + kvm_load_guest_xcr0(vcpu);
76 +
77 if (static_cpu_has(X86_FEATURE_PKU) &&
78 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
79 vcpu->arch.pkru != vmx->host_pkru)
80 @@ -6635,6 +6637,8 @@ static void vmx_vcpu_run(struct kvm_vcpu
81 __write_pkru(vmx->host_pkru);
82 }
83
84 + kvm_put_guest_xcr0(vcpu);
85 +
86 vmx->nested.nested_run_pending = 0;
87 vmx->idt_vectoring_info = 0;
88
89 --- a/arch/x86/kvm/x86.c
90 +++ b/arch/x86/kvm/x86.c
91 @@ -800,7 +800,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, uns
92 }
93 EXPORT_SYMBOL_GPL(kvm_lmsw);
94
95 -static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
96 +void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
97 {
98 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
99 !vcpu->guest_xcr0_loaded) {
100 @@ -810,8 +810,9 @@ static void kvm_load_guest_xcr0(struct k
101 vcpu->guest_xcr0_loaded = 1;
102 }
103 }
104 +EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
105
106 -static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
107 +void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
108 {
109 if (vcpu->guest_xcr0_loaded) {
110 if (vcpu->arch.xcr0 != host_xcr0)
111 @@ -819,6 +820,7 @@ static void kvm_put_guest_xcr0(struct kv
112 vcpu->guest_xcr0_loaded = 0;
113 }
114 }
115 +EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
116
117 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
118 {
119 @@ -7856,8 +7858,6 @@ static int vcpu_enter_guest(struct kvm_v
120 goto cancel_injection;
121 }
122
123 - kvm_load_guest_xcr0(vcpu);
124 -
125 if (req_immediate_exit) {
126 kvm_make_request(KVM_REQ_EVENT, vcpu);
127 kvm_x86_ops->request_immediate_exit(vcpu);
128 @@ -7910,8 +7910,6 @@ static int vcpu_enter_guest(struct kvm_v
129 vcpu->mode = OUTSIDE_GUEST_MODE;
130 smp_wmb();
131
132 - kvm_put_guest_xcr0(vcpu);
133 -
134 kvm_before_interrupt(vcpu);
135 kvm_x86_ops->handle_external_intr(vcpu);
136 kvm_after_interrupt(vcpu);
137 --- a/arch/x86/kvm/x86.h
138 +++ b/arch/x86/kvm/x86.h
139 @@ -347,4 +347,6 @@ static inline void kvm_after_interrupt(s
140 __this_cpu_write(current_vcpu, NULL);
141 }
142
143 +void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
144 +void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
145 #endif