]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
613dba334b7073c43ef29f4cba58b15731abae7f
[thirdparty/kernel/stable-queue.git] /
1 From 946fbbc13dce68902f64515b610eeb2a6c3d7a64 Mon Sep 17 00:00:00 2001
2 From: Paolo Bonzini <pbonzini@redhat.com>
3 Date: Thu, 22 Feb 2018 16:43:18 +0100
4 Subject: KVM/VMX: Optimize vmx_vcpu_run() and svm_vcpu_run() by marking the RDMSR path as unlikely()
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 From: Paolo Bonzini <pbonzini@redhat.com>
10
11 commit 946fbbc13dce68902f64515b610eeb2a6c3d7a64 upstream.
12
13 vmx_vcpu_run() and svm_vcpu_run() are large functions, and giving
14 branch hints to the compiler can actually make a substantial cycle
15 difference by keeping the fast path contiguous in memory.
16
17 With this optimization, the retpoline-guest/retpoline-host case is
18 about 50 cycles faster.
19
20 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
21 Reviewed-by: Jim Mattson <jmattson@google.com>
22 Cc: David Woodhouse <dwmw@amazon.co.uk>
23 Cc: KarimAllah Ahmed <karahmed@amazon.de>
24 Cc: Linus Torvalds <torvalds@linux-foundation.org>
25 Cc: Peter Zijlstra <peterz@infradead.org>
26 Cc: Radim Krčmář <rkrcmar@redhat.com>
27 Cc: Thomas Gleixner <tglx@linutronix.de>
28 Cc: kvm@vger.kernel.org
29 Cc: stable@vger.kernel.org
30 Link: http://lkml.kernel.org/r/20180222154318.20361-3-pbonzini@redhat.com
31 Signed-off-by: Ingo Molnar <mingo@kernel.org>
32 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
33
34 ---
35 arch/x86/kvm/svm.c | 2 +-
36 arch/x86/kvm/vmx.c | 2 +-
37 2 files changed, 2 insertions(+), 2 deletions(-)
38
39 --- a/arch/x86/kvm/svm.c
40 +++ b/arch/x86/kvm/svm.c
41 @@ -5029,7 +5029,7 @@ static void svm_vcpu_run(struct kvm_vcpu
42 * If the L02 MSR bitmap does not intercept the MSR, then we need to
43 * save it.
44 */
45 - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
46 + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
47 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
48
49 if (svm->spec_ctrl)
50 --- a/arch/x86/kvm/vmx.c
51 +++ b/arch/x86/kvm/vmx.c
52 @@ -9042,7 +9042,7 @@ static void __noclone vmx_vcpu_run(struc
53 * If the L02 MSR bitmap does not intercept the MSR, then we need to
54 * save it.
55 */
56 - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
57 + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
58 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
59
60 if (vmx->spec_ctrl)