]>
Commit | Line | Data |
---|---|---|
fbc4be2a GKH |
1 | From 11988499e62b310f3bf6f6d0a807a06d3f9ccc96 Mon Sep 17 00:00:00 2001 |
2 | From: Sean Christopherson <sean.j.christopherson@intel.com> | |
3 | Date: Tue, 2 Apr 2019 08:19:15 -0700 | |
4 | Subject: KVM: x86: Skip EFER vs. guest CPUID checks for host-initiated writes | |
5 | ||
6 | From: Sean Christopherson <sean.j.christopherson@intel.com> | |
7 | ||
8 | commit 11988499e62b310f3bf6f6d0a807a06d3f9ccc96 upstream. | |
9 | ||
10 | KVM allows userspace to violate consistency checks related to the | |
11 | guest's CPUID model to some degree. Generally speaking, userspace has | |
12 | carte blanche when it comes to guest state so long as jamming invalid | |
13 | state won't negatively affect the host. | |
14 | ||
15 | Currently this is seems to be a non-issue as most of the interesting | |
16 | EFER checks are missing, e.g. NX and LME, but those will be added | |
17 | shortly. Proactively exempt userspace from the CPUID checks so as not | |
18 | to break userspace. | |
19 | ||
20 | Note, the efer_reserved_bits check still applies to userspace writes as | |
21 | that mask reflects the host's capabilities, e.g. KVM shouldn't allow a | |
22 | guest to run with NX=1 if it has been disabled in the host. | |
23 | ||
24 | Fixes: d80174745ba39 ("KVM: SVM: Only allow setting of EFER_SVME when CPUID SVM is set") | |
25 | Cc: stable@vger.kernel.org | |
26 | Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> | |
27 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | |
28 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
29 | ||
30 | --- | |
31 | arch/x86/kvm/x86.c | 33 ++++++++++++++++++++++----------- | |
32 | 1 file changed, 22 insertions(+), 11 deletions(-) | |
33 | ||
34 | --- a/arch/x86/kvm/x86.c | |
35 | +++ b/arch/x86/kvm/x86.c | |
36 | @@ -990,11 +990,8 @@ static u32 emulated_msrs[] = { | |
37 | ||
38 | static unsigned num_emulated_msrs; | |
39 | ||
40 | -bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) | |
41 | +static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) | |
42 | { | |
43 | - if (efer & efer_reserved_bits) | |
44 | - return false; | |
45 | - | |
46 | if (efer & EFER_FFXSR) { | |
47 | struct kvm_cpuid_entry2 *feat; | |
48 | ||
49 | @@ -1012,19 +1009,33 @@ bool kvm_valid_efer(struct kvm_vcpu *vcp | |
50 | } | |
51 | ||
52 | return true; | |
53 | + | |
54 | +} | |
55 | +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) | |
56 | +{ | |
57 | + if (efer & efer_reserved_bits) | |
58 | + return false; | |
59 | + | |
60 | + return __kvm_valid_efer(vcpu, efer); | |
61 | } | |
62 | EXPORT_SYMBOL_GPL(kvm_valid_efer); | |
63 | ||
64 | -static int set_efer(struct kvm_vcpu *vcpu, u64 efer) | |
65 | +static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |
66 | { | |
67 | u64 old_efer = vcpu->arch.efer; | |
68 | + u64 efer = msr_info->data; | |
69 | ||
70 | - if (!kvm_valid_efer(vcpu, efer)) | |
71 | - return 1; | |
72 | + if (efer & efer_reserved_bits) | |
73 | + return false; | |
74 | ||
75 | - if (is_paging(vcpu) | |
76 | - && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) | |
77 | - return 1; | |
78 | + if (!msr_info->host_initiated) { | |
79 | + if (!__kvm_valid_efer(vcpu, efer)) | |
80 | + return 1; | |
81 | + | |
82 | + if (is_paging(vcpu) && | |
83 | + (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) | |
84 | + return 1; | |
85 | + } | |
86 | ||
87 | efer &= ~EFER_LMA; | |
88 | efer |= vcpu->arch.efer & EFER_LMA; | |
89 | @@ -2055,7 +2066,7 @@ int kvm_set_msr_common(struct kvm_vcpu * | |
90 | break; | |
91 | ||
92 | case MSR_EFER: | |
93 | - return set_efer(vcpu, data); | |
94 | + return set_efer(vcpu, msr_info); | |
95 | case MSR_K7_HWCR: | |
96 | data &= ~(u64)0x40; /* ignore flush filter disable */ | |
97 | data &= ~(u64)0x100; /* ignore ignne emulation enable */ |