]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.19.35/kvm-x86-nvmx-close-leak-of-l0-s-x2apic-msrs-cve-2019-3887.patch
Linux 4.19.35
[thirdparty/kernel/stable-queue.git] / releases / 4.19.35 / kvm-x86-nvmx-close-leak-of-l0-s-x2apic-msrs-cve-2019-3887.patch
1 From acff78477b9b4f26ecdf65733a4ed77fe837e9dc Mon Sep 17 00:00:00 2001
2 From: Marc Orr <marcorr@google.com>
3 Date: Mon, 1 Apr 2019 23:55:59 -0700
4 Subject: KVM: x86: nVMX: close leak of L0's x2APIC MSRs (CVE-2019-3887)
5
6 From: Marc Orr <marcorr@google.com>
7
8 commit acff78477b9b4f26ecdf65733a4ed77fe837e9dc upstream.
9
10 The nested_vmx_prepare_msr_bitmap() function doesn't directly guard the
11 x2APIC MSR intercepts with the "virtualize x2APIC mode" MSR. As a
12 result, we discovered the potential for a buggy or malicious L1 to get
13 access to L0's x2APIC MSRs, via an L2, as follows.
14
15 1. L1 executes WRMSR(IA32_SPEC_CTRL, 1). This causes the spec_ctrl
16 variable, in nested_vmx_prepare_msr_bitmap() to become true.
17 2. L1 disables "virtualize x2APIC mode" in VMCS12.
18 3. L1 enables "APIC-register virtualization" in VMCS12.
19
20 Now, KVM will set VMCS02's x2APIC MSR intercepts from VMCS12, and then
21 set "virtualize x2APIC mode" to 0 in VMCS02. Oops.
22
23 This patch closes the leak by explicitly guarding VMCS02's x2APIC MSR
24 intercepts with VMCS12's "virtualize x2APIC mode" control.
25
26 The scenario outlined above and fix prescribed here, were verified with
27 a related patch in kvm-unit-tests titled "Add leak scenario to
28 virt_x2apic_mode_test".
29
30 Note, it looks like this issue may have been introduced inadvertently
31 during a merge---see 15303ba5d1cd.
32
33 Signed-off-by: Marc Orr <marcorr@google.com>
34 Reviewed-by: Jim Mattson <jmattson@google.com>
35 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
36 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
37
38 ---
39 arch/x86/kvm/vmx.c | 72 ++++++++++++++++++++++++++++++++---------------------
40 1 file changed, 44 insertions(+), 28 deletions(-)
41
42 --- a/arch/x86/kvm/vmx.c
43 +++ b/arch/x86/kvm/vmx.c
44 @@ -11582,6 +11582,17 @@ static int nested_vmx_check_tpr_shadow_c
45 return 0;
46 }
47
48 +static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
49 + int msr;
50 +
51 + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
52 + unsigned word = msr / BITS_PER_LONG;
53 +
54 + msr_bitmap[word] = ~0;
55 + msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
56 + }
57 +}
58 +
59 /*
60 * Merge L0's and L1's MSR bitmap, return false to indicate that
61 * we do not use the hardware.
62 @@ -11623,39 +11634,44 @@ static inline bool nested_vmx_prepare_ms
63 return false;
64
65 msr_bitmap_l1 = (unsigned long *)kmap(page);
66 - if (nested_cpu_has_apic_reg_virt(vmcs12)) {
67 - /*
68 - * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
69 - * just lets the processor take the value from the virtual-APIC page;
70 - * take those 256 bits directly from the L1 bitmap.
71 - */
72 - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
73 - unsigned word = msr / BITS_PER_LONG;
74 - msr_bitmap_l0[word] = msr_bitmap_l1[word];
75 - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
76 - }
77 - } else {
78 - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
79 - unsigned word = msr / BITS_PER_LONG;
80 - msr_bitmap_l0[word] = ~0;
81 - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
82 - }
83 - }
84
85 - nested_vmx_disable_intercept_for_msr(
86 - msr_bitmap_l1, msr_bitmap_l0,
87 - X2APIC_MSR(APIC_TASKPRI),
88 - MSR_TYPE_W);
89 + /*
90 + * To keep the control flow simple, pay eight 8-byte writes (sixteen
91 + * 4-byte writes on 32-bit systems) up front to enable intercepts for
92 + * the x2APIC MSR range and selectively disable them below.
93 + */
94 + enable_x2apic_msr_intercepts(msr_bitmap_l0);
95 +
96 + if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
97 + if (nested_cpu_has_apic_reg_virt(vmcs12)) {
98 + /*
99 + * L0 need not intercept reads for MSRs between 0x800
100 + * and 0x8ff, it just lets the processor take the value
101 + * from the virtual-APIC page; take those 256 bits
102 + * directly from the L1 bitmap.
103 + */
104 + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
105 + unsigned word = msr / BITS_PER_LONG;
106 +
107 + msr_bitmap_l0[word] = msr_bitmap_l1[word];
108 + }
109 + }
110
111 - if (nested_cpu_has_vid(vmcs12)) {
112 - nested_vmx_disable_intercept_for_msr(
113 - msr_bitmap_l1, msr_bitmap_l0,
114 - X2APIC_MSR(APIC_EOI),
115 - MSR_TYPE_W);
116 nested_vmx_disable_intercept_for_msr(
117 msr_bitmap_l1, msr_bitmap_l0,
118 - X2APIC_MSR(APIC_SELF_IPI),
119 + X2APIC_MSR(APIC_TASKPRI),
120 MSR_TYPE_W);
121 +
122 + if (nested_cpu_has_vid(vmcs12)) {
123 + nested_vmx_disable_intercept_for_msr(
124 + msr_bitmap_l1, msr_bitmap_l0,
125 + X2APIC_MSR(APIC_EOI),
126 + MSR_TYPE_W);
127 + nested_vmx_disable_intercept_for_msr(
128 + msr_bitmap_l1, msr_bitmap_l0,
129 + X2APIC_MSR(APIC_SELF_IPI),
130 + MSR_TYPE_W);
131 + }
132 }
133
134 if (spec_ctrl)