]>
Commit | Line | Data |
---|---|---|
ebe9a58d GKH |
1 | From 11988499e62b310f3bf6f6d0a807a06d3f9ccc96 Mon Sep 17 00:00:00 2001 |
2 | From: Sean Christopherson <sean.j.christopherson@intel.com> | |
3 | Date: Tue, 2 Apr 2019 08:19:15 -0700 | |
4 | Subject: KVM: x86: Skip EFER vs. guest CPUID checks for host-initiated writes | |
5 | ||
6 | From: Sean Christopherson <sean.j.christopherson@intel.com> | |
7 | ||
8 | commit 11988499e62b310f3bf6f6d0a807a06d3f9ccc96 upstream. | |
9 | ||
10 | KVM allows userspace to violate consistency checks related to the | |
11 | guest's CPUID model to some degree. Generally speaking, userspace has | |
12 | carte blanche when it comes to guest state so long as jamming invalid | |
13 | state won't negatively affect the host. | |
14 | ||
15 | Currently this is seems to be a non-issue as most of the interesting | |
16 | EFER checks are missing, e.g. NX and LME, but those will be added | |
17 | shortly. Proactively exempt userspace from the CPUID checks so as not | |
18 | to break userspace. | |
19 | ||
20 | Note, the efer_reserved_bits check still applies to userspace writes as | |
21 | that mask reflects the host's capabilities, e.g. KVM shouldn't allow a | |
22 | guest to run with NX=1 if it has been disabled in the host. | |
23 | ||
24 | Fixes: d80174745ba39 ("KVM: SVM: Only allow setting of EFER_SVME when CPUID SVM is set") | |
25 | Cc: stable@vger.kernel.org | |
26 | Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> | |
27 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | |
28 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
29 | ||
30 | --- | |
31 | arch/x86/kvm/x86.c | 37 ++++++++++++++++++++++++------------- | |
32 | 1 file changed, 24 insertions(+), 13 deletions(-) | |
33 | ||
34 | --- a/arch/x86/kvm/x86.c | |
35 | +++ b/arch/x86/kvm/x86.c | |
36 | @@ -1162,31 +1162,42 @@ static int do_get_msr_feature(struct kvm | |
37 | return 0; | |
38 | } | |
39 | ||
40 | -bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) | |
41 | +static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) | |
42 | { | |
43 | - if (efer & efer_reserved_bits) | |
44 | - return false; | |
45 | - | |
46 | if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) | |
47 | - return false; | |
48 | + return false; | |
49 | ||
50 | if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) | |
51 | - return false; | |
52 | + return false; | |
53 | ||
54 | return true; | |
55 | + | |
56 | +} | |
57 | +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) | |
58 | +{ | |
59 | + if (efer & efer_reserved_bits) | |
60 | + return false; | |
61 | + | |
62 | + return __kvm_valid_efer(vcpu, efer); | |
63 | } | |
64 | EXPORT_SYMBOL_GPL(kvm_valid_efer); | |
65 | ||
66 | -static int set_efer(struct kvm_vcpu *vcpu, u64 efer) | |
67 | +static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |
68 | { | |
69 | u64 old_efer = vcpu->arch.efer; | |
70 | + u64 efer = msr_info->data; | |
71 | ||
72 | - if (!kvm_valid_efer(vcpu, efer)) | |
73 | - return 1; | |
74 | + if (efer & efer_reserved_bits) | |
75 | + return false; | |
76 | ||
77 | - if (is_paging(vcpu) | |
78 | - && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) | |
79 | - return 1; | |
80 | + if (!msr_info->host_initiated) { | |
81 | + if (!__kvm_valid_efer(vcpu, efer)) | |
82 | + return 1; | |
83 | + | |
84 | + if (is_paging(vcpu) && | |
85 | + (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) | |
86 | + return 1; | |
87 | + } | |
88 | ||
89 | efer &= ~EFER_LMA; | |
90 | efer |= vcpu->arch.efer & EFER_LMA; | |
91 | @@ -2356,7 +2367,7 @@ int kvm_set_msr_common(struct kvm_vcpu * | |
92 | vcpu->arch.arch_capabilities = data; | |
93 | break; | |
94 | case MSR_EFER: | |
95 | - return set_efer(vcpu, data); | |
96 | + return set_efer(vcpu, msr_info); | |
97 | case MSR_K7_HWCR: | |
98 | data &= ~(u64)0x40; /* ignore flush filter disable */ | |
99 | data &= ~(u64)0x100; /* ignore ignne emulation enable */ |