]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 14 Nov 2022 11:01:42 +0000 (12:01 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 14 Nov 2022 11:01:42 +0000 (12:01 +0100)
added patches:
kvm-svm-adjust-register-allocation-for-__svm_vcpu_run.patch
kvm-svm-move-guest-vmsave-vmload-back-to-assembly.patch
kvm-svm-only-dump-vmsa-to-klog-at-kern_debug-level.patch
kvm-svm-replace-regs-argument-of-__svm_vcpu_run-with-vcpu_svm.patch
kvm-svm-retrieve-vmcb-from-assembly.patch
kvm-x86-mmu-block-all-page-faults-during-kvm_zap_gfn_range.patch
kvm-x86-pmu-do-not-speculatively-query-intel-gp-pmcs-that-don-t-exist-yet.patch
kvm-x86-use-a-separate-asm-offsets.c-file.patch

queue-6.0/kvm-svm-adjust-register-allocation-for-__svm_vcpu_run.patch [new file with mode: 0644]
queue-6.0/kvm-svm-move-guest-vmsave-vmload-back-to-assembly.patch [new file with mode: 0644]
queue-6.0/kvm-svm-only-dump-vmsa-to-klog-at-kern_debug-level.patch [new file with mode: 0644]
queue-6.0/kvm-svm-replace-regs-argument-of-__svm_vcpu_run-with-vcpu_svm.patch [new file with mode: 0644]
queue-6.0/kvm-svm-retrieve-vmcb-from-assembly.patch [new file with mode: 0644]
queue-6.0/kvm-x86-mmu-block-all-page-faults-during-kvm_zap_gfn_range.patch [new file with mode: 0644]
queue-6.0/kvm-x86-pmu-do-not-speculatively-query-intel-gp-pmcs-that-don-t-exist-yet.patch [new file with mode: 0644]
queue-6.0/kvm-x86-use-a-separate-asm-offsets.c-file.patch [new file with mode: 0644]
queue-6.0/series

diff --git a/queue-6.0/kvm-svm-adjust-register-allocation-for-__svm_vcpu_run.patch b/queue-6.0/kvm-svm-adjust-register-allocation-for-__svm_vcpu_run.patch
new file mode 100644 (file)
index 0000000..6416435
--- /dev/null
@@ -0,0 +1,80 @@
+From f7ef280132f9bf6f82acf5aa5c3c837206eef501 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 28 Oct 2022 17:30:07 -0400
+Subject: KVM: SVM: adjust register allocation for __svm_vcpu_run()
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit f7ef280132f9bf6f82acf5aa5c3c837206eef501 upstream.
+
+32-bit ABI uses RAX/RCX/RDX as its argument registers, so they are in
+the way of instructions that hardcode their operands such as RDMSR/WRMSR
+or VMLOAD/VMRUN/VMSAVE.
+
+In preparation for moving vmload/vmsave to __svm_vcpu_run(), keep
+the pointer to the struct vcpu_svm in %rdi.  In particular, it is now
+possible to load svm->vmcb01.pa in %rax without clobbering the struct
+vcpu_svm pointer.
+
+No functional change intended.
+
+Cc: stable@vger.kernel.org
+Fixes: a149180fbcf3 ("x86: Add magic AMD return-thunk")
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/vmenter.S |   38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -54,29 +54,29 @@ SYM_FUNC_START(__svm_vcpu_run)
+       /* Save @vmcb. */
+       push %_ASM_ARG1
+-      /* Move @svm to RAX. */
+-      mov %_ASM_ARG2, %_ASM_AX
++      /* Move @svm to RDI. */
++      mov %_ASM_ARG2, %_ASM_DI
++
++      /* "POP" @vmcb to RAX. */
++      pop %_ASM_AX
+       /* Load guest registers. */
+-      mov VCPU_RCX(%_ASM_AX), %_ASM_CX
+-      mov VCPU_RDX(%_ASM_AX), %_ASM_DX
+-      mov VCPU_RBX(%_ASM_AX), %_ASM_BX
+-      mov VCPU_RBP(%_ASM_AX), %_ASM_BP
+-      mov VCPU_RSI(%_ASM_AX), %_ASM_SI
+-      mov VCPU_RDI(%_ASM_AX), %_ASM_DI
++      mov VCPU_RCX(%_ASM_DI), %_ASM_CX
++      mov VCPU_RDX(%_ASM_DI), %_ASM_DX
++      mov VCPU_RBX(%_ASM_DI), %_ASM_BX
++      mov VCPU_RBP(%_ASM_DI), %_ASM_BP
++      mov VCPU_RSI(%_ASM_DI), %_ASM_SI
+ #ifdef CONFIG_X86_64
+-      mov VCPU_R8 (%_ASM_AX),  %r8
+-      mov VCPU_R9 (%_ASM_AX),  %r9
+-      mov VCPU_R10(%_ASM_AX), %r10
+-      mov VCPU_R11(%_ASM_AX), %r11
+-      mov VCPU_R12(%_ASM_AX), %r12
+-      mov VCPU_R13(%_ASM_AX), %r13
+-      mov VCPU_R14(%_ASM_AX), %r14
+-      mov VCPU_R15(%_ASM_AX), %r15
++      mov VCPU_R8 (%_ASM_DI),  %r8
++      mov VCPU_R9 (%_ASM_DI),  %r9
++      mov VCPU_R10(%_ASM_DI), %r10
++      mov VCPU_R11(%_ASM_DI), %r11
++      mov VCPU_R12(%_ASM_DI), %r12
++      mov VCPU_R13(%_ASM_DI), %r13
++      mov VCPU_R14(%_ASM_DI), %r14
++      mov VCPU_R15(%_ASM_DI), %r15
+ #endif
+-
+-      /* "POP" @vmcb to RAX. */
+-      pop %_ASM_AX
++      mov VCPU_RDI(%_ASM_DI), %_ASM_DI
+       /* Enter guest mode */
+       sti
diff --git a/queue-6.0/kvm-svm-move-guest-vmsave-vmload-back-to-assembly.patch b/queue-6.0/kvm-svm-move-guest-vmsave-vmload-back-to-assembly.patch
new file mode 100644 (file)
index 0000000..64270d9
--- /dev/null
@@ -0,0 +1,156 @@
+From e61ab42de874c5af8c5d98b327c77a374d9e7da1 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 7 Nov 2022 05:14:27 -0500
+Subject: KVM: SVM: move guest vmsave/vmload back to assembly
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit e61ab42de874c5af8c5d98b327c77a374d9e7da1 upstream.
+
+It is error-prone that code after vmexit cannot access percpu data
+because GSBASE has not been restored yet.  It forces MSR_IA32_SPEC_CTRL
+save/restore to happen very late, after the predictor untraining
+sequence, and it gets in the way of return stack depth tracking
+(a retbleed mitigation that is in linux-next as of 2022-11-09).
+
+As a first step towards fixing that, move the VMCB VMSAVE/VMLOAD to
+assembly, essentially undoing commit fb0c4a4fee5a ("KVM: SVM: move
+VMLOAD/VMSAVE to C code", 2021-03-15).  The reason for that commit was
+that it made it simpler to use a different VMCB for VMLOAD/VMSAVE versus
+VMRUN; but that is not a big hassle anymore thanks to the kvm-asm-offsets
+machinery and other related cleanups.
+
+The idea on how to number the exception tables is stolen from
+a prototype patch by Peter Zijlstra.
+
+Cc: stable@vger.kernel.org
+Fixes: a149180fbcf3 ("x86: Add magic AMD return-thunk")
+Link: <https://lore.kernel.org/all/f571e404-e625-bae1-10e9-449b2eb4cbd8@citrix.com/>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/kvm-asm-offsets.c |    1 
+ arch/x86/kvm/svm/svm.c         |    9 -------
+ arch/x86/kvm/svm/vmenter.S     |   49 +++++++++++++++++++++++++++++++----------
+ 3 files changed, 39 insertions(+), 20 deletions(-)
+
+--- a/arch/x86/kvm/kvm-asm-offsets.c
++++ b/arch/x86/kvm/kvm-asm-offsets.c
+@@ -16,6 +16,7 @@ static void __used common(void)
+               BLANK();
+               OFFSET(SVM_vcpu_arch_regs, vcpu_svm, vcpu.arch.regs);
+               OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
++              OFFSET(SVM_vmcb01, vcpu_svm, vmcb01);
+               OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
+       }
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3923,16 +3923,7 @@ static noinstr void svm_vcpu_enter_exit(
+       } else {
+               struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+-              /*
+-               * Use a single vmcb (vmcb01 because it's always valid) for
+-               * context switching guest state via VMLOAD/VMSAVE, that way
+-               * the state doesn't need to be copied between vmcb01 and
+-               * vmcb02 when switching vmcbs for nested virtualization.
+-               */
+-              vmload(svm->vmcb01.pa);
+               __svm_vcpu_run(svm);
+-              vmsave(svm->vmcb01.pa);
+-
+               vmload(__sme_page_pa(sd->save_area));
+       }
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -28,6 +28,8 @@
+ #define VCPU_R15      (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
+ #endif
++#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
++
+ .section .noinstr.text, "ax"
+ /**
+@@ -55,6 +57,16 @@ SYM_FUNC_START(__svm_vcpu_run)
+       mov %_ASM_ARG1, %_ASM_DI
+ .endif
++      /*
++       * Use a single vmcb (vmcb01 because it's always valid) for
++       * context switching guest state via VMLOAD/VMSAVE, that way
++       * the state doesn't need to be copied between vmcb01 and
++       * vmcb02 when switching vmcbs for nested virtualization.
++       */
++      mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
++1:    vmload %_ASM_AX
++2:
++
+       /* Get svm->current_vmcb->pa into RAX. */
+       mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
+       mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
+@@ -80,16 +92,11 @@ SYM_FUNC_START(__svm_vcpu_run)
+       /* Enter guest mode */
+       sti
+-1:    vmrun %_ASM_AX
+-
+-2:    cli
+-
+-#ifdef CONFIG_RETPOLINE
+-      /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
+-      FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+-#endif
++3:    vmrun %_ASM_AX
++4:
++      cli
+-      /* "POP" @svm to RAX. */
++      /* Pop @svm to RAX while it's the only available register. */
+       pop %_ASM_AX
+       /* Save all guest registers.  */
+@@ -110,6 +117,18 @@ SYM_FUNC_START(__svm_vcpu_run)
+       mov %r15, VCPU_R15(%_ASM_AX)
+ #endif
++      /* @svm can stay in RDI from now on.  */
++      mov %_ASM_AX, %_ASM_DI
++
++      mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
++5:    vmsave %_ASM_AX
++6:
++
++#ifdef CONFIG_RETPOLINE
++      /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
++      FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
++#endif
++
+       /*
+        * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
+        * untrained as soon as we exit the VM and are back to the
+@@ -159,11 +178,19 @@ SYM_FUNC_START(__svm_vcpu_run)
+       pop %_ASM_BP
+       RET
+-3:    cmpb $0, kvm_rebooting
++10:   cmpb $0, kvm_rebooting
+       jne 2b
+       ud2
++30:   cmpb $0, kvm_rebooting
++      jne 4b
++      ud2
++50:   cmpb $0, kvm_rebooting
++      jne 6b
++      ud2
+-      _ASM_EXTABLE(1b, 3b)
++      _ASM_EXTABLE(1b, 10b)
++      _ASM_EXTABLE(3b, 30b)
++      _ASM_EXTABLE(5b, 50b)
+ SYM_FUNC_END(__svm_vcpu_run)
diff --git a/queue-6.0/kvm-svm-only-dump-vmsa-to-klog-at-kern_debug-level.patch b/queue-6.0/kvm-svm-only-dump-vmsa-to-klog-at-kern_debug-level.patch
new file mode 100644 (file)
index 0000000..73faf3e
--- /dev/null
@@ -0,0 +1,56 @@
+From 0bd8bd2f7a789fe1dcb21ad148199d2f62d79873 Mon Sep 17 00:00:00 2001
+From: Peter Gonda <pgonda@google.com>
+Date: Fri, 4 Nov 2022 07:22:20 -0700
+Subject: KVM: SVM: Only dump VMSA to klog at KERN_DEBUG level
+
+From: Peter Gonda <pgonda@google.com>
+
+commit 0bd8bd2f7a789fe1dcb21ad148199d2f62d79873 upstream.
+
+Explicitly print the VMSA dump at KERN_DEBUG log level, KERN_CONT uses
+KERNEL_DEFAULT if the previous log line has a newline, i.e. if there's
+nothing to continuing, and as a result the VMSA gets dumped when it
+shouldn't.
+
+The KERN_CONT documentation says it defaults back to KERNL_DEFAULT if the
+previous log line has a newline. So switch from KERN_CONT to
+print_hex_dump_debug().
+
+Jarkko pointed this out in reference to the original patch. See:
+https://lore.kernel.org/all/YuPMeWX4uuR1Tz3M@kernel.org/
+print_hex_dump(KERN_DEBUG, ...) was pointed out there, but
+print_hex_dump_debug() should similar.
+
+Fixes: 6fac42f127b8 ("KVM: SVM: Dump Virtual Machine Save Area (VMSA) to klog")
+Signed-off-by: Peter Gonda <pgonda@google.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Cc: Jarkko Sakkinen <jarkko@kernel.org>
+Cc: Harald Hoyer <harald@profian.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: x86@kernel.org
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: stable@vger.kernel.org
+Message-Id: <20221104142220.469452-1-pgonda@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/sev.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -605,7 +605,7 @@ static int sev_es_sync_vmsa(struct vcpu_
+       save->dr6  = svm->vcpu.arch.dr6;
+       pr_debug("Virtual Machine Save Area (VMSA):\n");
+-      print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
++      print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
+       return 0;
+ }
diff --git a/queue-6.0/kvm-svm-replace-regs-argument-of-__svm_vcpu_run-with-vcpu_svm.patch b/queue-6.0/kvm-svm-replace-regs-argument-of-__svm_vcpu_run-with-vcpu_svm.patch
new file mode 100644 (file)
index 0000000..0a19688
--- /dev/null
@@ -0,0 +1,158 @@
+From 16fdc1de169ee0a4e59a8c02244414ec7acd55c3 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 30 Sep 2022 14:14:44 -0400
+Subject: KVM: SVM: replace regs argument of __svm_vcpu_run() with vcpu_svm
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 16fdc1de169ee0a4e59a8c02244414ec7acd55c3 upstream.
+
+Since registers are reachable through vcpu_svm, and we will
+need to access more fields of that struct, pass it instead
+of the regs[] array.
+
+No functional change intended.
+
+Cc: stable@vger.kernel.org
+Fixes: a149180fbcf3 ("x86: Add magic AMD return-thunk")
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/Makefile          |    3 +++
+ arch/x86/kvm/kvm-asm-offsets.c |    6 ++++++
+ arch/x86/kvm/svm/svm.c         |    2 +-
+ arch/x86/kvm/svm/svm.h         |    2 +-
+ arch/x86/kvm/svm/vmenter.S     |   37 +++++++++++++++++++------------------
+ 5 files changed, 30 insertions(+), 20 deletions(-)
+
+--- a/arch/x86/kvm/Makefile
++++ b/arch/x86/kvm/Makefile
+@@ -35,6 +35,9 @@ obj-$(CONFIG_KVM)    += kvm.o
+ obj-$(CONFIG_KVM_INTEL)       += kvm-intel.o
+ obj-$(CONFIG_KVM_AMD) += kvm-amd.o
++AFLAGS_svm/vmenter.o    := -iquote $(obj)
++$(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h
++
+ AFLAGS_vmx/vmenter.o    := -iquote $(obj)
+ $(obj)/vmx/vmenter.o: $(obj)/kvm-asm-offsets.h
+--- a/arch/x86/kvm/kvm-asm-offsets.c
++++ b/arch/x86/kvm/kvm-asm-offsets.c
+@@ -8,9 +8,15 @@
+ #include <linux/kbuild.h>
+ #include "vmx/vmx.h"
++#include "svm/svm.h"
+ static void __used common(void)
+ {
++      if (IS_ENABLED(CONFIG_KVM_AMD)) {
++              BLANK();
++              OFFSET(SVM_vcpu_arch_regs, vcpu_svm, vcpu.arch.regs);
++      }
++
+       if (IS_ENABLED(CONFIG_KVM_INTEL)) {
+               BLANK();
+               OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3931,7 +3931,7 @@ static noinstr void svm_vcpu_enter_exit(
+                * vmcb02 when switching vmcbs for nested virtualization.
+                */
+               vmload(svm->vmcb01.pa);
+-              __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
++              __svm_vcpu_run(vmcb_pa, svm);
+               vmsave(svm->vmcb01.pa);
+               vmload(__sme_page_pa(sd->save_area));
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -684,6 +684,6 @@ void sev_es_unmap_ghcb(struct vcpu_svm *
+ /* vmenter.S */
+ void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
+-void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
++void __svm_vcpu_run(unsigned long vmcb_pa, struct vcpu_svm *svm);
+ #endif
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -4,27 +4,28 @@
+ #include <asm/bitsperlong.h>
+ #include <asm/kvm_vcpu_regs.h>
+ #include <asm/nospec-branch.h>
++#include "kvm-asm-offsets.h"
+ #define WORD_SIZE (BITS_PER_LONG / 8)
+ /* Intentionally omit RAX as it's context switched by hardware */
+-#define VCPU_RCX      __VCPU_REGS_RCX * WORD_SIZE
+-#define VCPU_RDX      __VCPU_REGS_RDX * WORD_SIZE
+-#define VCPU_RBX      __VCPU_REGS_RBX * WORD_SIZE
++#define VCPU_RCX      (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
++#define VCPU_RDX      (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
++#define VCPU_RBX      (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
+ /* Intentionally omit RSP as it's context switched by hardware */
+-#define VCPU_RBP      __VCPU_REGS_RBP * WORD_SIZE
+-#define VCPU_RSI      __VCPU_REGS_RSI * WORD_SIZE
+-#define VCPU_RDI      __VCPU_REGS_RDI * WORD_SIZE
++#define VCPU_RBP      (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
++#define VCPU_RSI      (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
++#define VCPU_RDI      (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
+ #ifdef CONFIG_X86_64
+-#define VCPU_R8               __VCPU_REGS_R8  * WORD_SIZE
+-#define VCPU_R9               __VCPU_REGS_R9  * WORD_SIZE
+-#define VCPU_R10      __VCPU_REGS_R10 * WORD_SIZE
+-#define VCPU_R11      __VCPU_REGS_R11 * WORD_SIZE
+-#define VCPU_R12      __VCPU_REGS_R12 * WORD_SIZE
+-#define VCPU_R13      __VCPU_REGS_R13 * WORD_SIZE
+-#define VCPU_R14      __VCPU_REGS_R14 * WORD_SIZE
+-#define VCPU_R15      __VCPU_REGS_R15 * WORD_SIZE
++#define VCPU_R8               (SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
++#define VCPU_R9               (SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
++#define VCPU_R10      (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
++#define VCPU_R11      (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
++#define VCPU_R12      (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
++#define VCPU_R13      (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
++#define VCPU_R14      (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
++#define VCPU_R15      (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
+ #endif
+ .section .noinstr.text, "ax"
+@@ -32,7 +33,7 @@
+ /**
+  * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
+  * @vmcb_pa:  unsigned long
+- * @regs:     unsigned long * (to guest registers)
++ * @svm:      struct vcpu_svm *
+  */
+ SYM_FUNC_START(__svm_vcpu_run)
+       push %_ASM_BP
+@@ -47,13 +48,13 @@ SYM_FUNC_START(__svm_vcpu_run)
+ #endif
+       push %_ASM_BX
+-      /* Save @regs. */
++      /* Save @svm. */
+       push %_ASM_ARG2
+       /* Save @vmcb. */
+       push %_ASM_ARG1
+-      /* Move @regs to RAX. */
++      /* Move @svm to RAX. */
+       mov %_ASM_ARG2, %_ASM_AX
+       /* Load guest registers. */
+@@ -89,7 +90,7 @@ SYM_FUNC_START(__svm_vcpu_run)
+       FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+ #endif
+-      /* "POP" @regs to RAX. */
++      /* "POP" @svm to RAX. */
+       pop %_ASM_AX
+       /* Save all guest registers.  */
diff --git a/queue-6.0/kvm-svm-retrieve-vmcb-from-assembly.patch b/queue-6.0/kvm-svm-retrieve-vmcb-from-assembly.patch
new file mode 100644 (file)
index 0000000..238893e
--- /dev/null
@@ -0,0 +1,135 @@
+From f6d58266d731fd7e63163790aad21e0dbb1d5264 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 7 Nov 2022 04:17:29 -0500
+Subject: KVM: SVM: retrieve VMCB from assembly
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit f6d58266d731fd7e63163790aad21e0dbb1d5264 upstream.
+
+Continue moving accesses to struct vcpu_svm to vmenter.S.  Reducing the
+number of arguments limits the chance of mistakes due to different
+registers used for argument passing in 32- and 64-bit ABIs; pushing the
+VMCB argument and almost immediately popping it into a different
+register looks pretty weird.
+
+32-bit ABI is not a concern for __svm_sev_es_vcpu_run() which is 64-bit
+only; however, it will soon need @svm to save/restore SPEC_CTRL so stay
+consistent with __svm_vcpu_run() and let them share the same prototype.
+
+No functional change intended.
+
+Cc: stable@vger.kernel.org
+Fixes: a149180fbcf3 ("x86: Add magic AMD return-thunk")
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/kvm-asm-offsets.c |    2 ++
+ arch/x86/kvm/svm/svm.c         |    5 ++---
+ arch/x86/kvm/svm/svm.h         |    4 ++--
+ arch/x86/kvm/svm/vmenter.S     |   20 ++++++++++----------
+ 4 files changed, 16 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/kvm/kvm-asm-offsets.c
++++ b/arch/x86/kvm/kvm-asm-offsets.c
+@@ -15,6 +15,8 @@ static void __used common(void)
+       if (IS_ENABLED(CONFIG_KVM_AMD)) {
+               BLANK();
+               OFFSET(SVM_vcpu_arch_regs, vcpu_svm, vcpu.arch.regs);
++              OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
++              OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
+       }
+       if (IS_ENABLED(CONFIG_KVM_INTEL)) {
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3915,12 +3915,11 @@ static fastpath_t svm_exit_handlers_fast
+ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
+-      unsigned long vmcb_pa = svm->current_vmcb->pa;
+       guest_state_enter_irqoff();
+       if (sev_es_guest(vcpu->kvm)) {
+-              __svm_sev_es_vcpu_run(vmcb_pa);
++              __svm_sev_es_vcpu_run(svm);
+       } else {
+               struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+@@ -3931,7 +3930,7 @@ static noinstr void svm_vcpu_enter_exit(
+                * vmcb02 when switching vmcbs for nested virtualization.
+                */
+               vmload(svm->vmcb01.pa);
+-              __svm_vcpu_run(vmcb_pa, svm);
++              __svm_vcpu_run(svm);
+               vmsave(svm->vmcb01.pa);
+               vmload(__sme_page_pa(sd->save_area));
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -683,7 +683,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *
+ /* vmenter.S */
+-void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
+-void __svm_vcpu_run(unsigned long vmcb_pa, struct vcpu_svm *svm);
++void __svm_sev_es_vcpu_run(struct vcpu_svm *svm);
++void __svm_vcpu_run(struct vcpu_svm *svm);
+ #endif
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -32,7 +32,6 @@
+ /**
+  * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
+- * @vmcb_pa:  unsigned long
+  * @svm:      struct vcpu_svm *
+  */
+ SYM_FUNC_START(__svm_vcpu_run)
+@@ -49,16 +48,16 @@ SYM_FUNC_START(__svm_vcpu_run)
+       push %_ASM_BX
+       /* Save @svm. */
+-      push %_ASM_ARG2
+-
+-      /* Save @vmcb. */
+       push %_ASM_ARG1
++.ifnc _ASM_ARG1, _ASM_DI
+       /* Move @svm to RDI. */
+-      mov %_ASM_ARG2, %_ASM_DI
++      mov %_ASM_ARG1, %_ASM_DI
++.endif
+-      /* "POP" @vmcb to RAX. */
+-      pop %_ASM_AX
++      /* Get svm->current_vmcb->pa into RAX. */
++      mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
++      mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
+       /* Load guest registers. */
+       mov VCPU_RCX(%_ASM_DI), %_ASM_CX
+@@ -170,7 +169,7 @@ SYM_FUNC_END(__svm_vcpu_run)
+ /**
+  * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
+- * @vmcb_pa:  unsigned long
++ * @svm:      struct vcpu_svm *
+  */
+ SYM_FUNC_START(__svm_sev_es_vcpu_run)
+       push %_ASM_BP
+@@ -185,8 +184,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
+ #endif
+       push %_ASM_BX
+-      /* Move @vmcb to RAX. */
+-      mov %_ASM_ARG1, %_ASM_AX
++      /* Get svm->current_vmcb->pa into RAX. */
++      mov SVM_current_vmcb(%_ASM_ARG1), %_ASM_AX
++      mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
+       /* Enter guest mode */
+       sti
diff --git a/queue-6.0/kvm-x86-mmu-block-all-page-faults-during-kvm_zap_gfn_range.patch b/queue-6.0/kvm-x86-mmu-block-all-page-faults-during-kvm_zap_gfn_range.patch
new file mode 100644 (file)
index 0000000..34bb8a1
--- /dev/null
@@ -0,0 +1,53 @@
+From 6d3085e4d89ad7e6c7f1c6cf929d903393565861 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 11 Nov 2022 00:18:41 +0000
+Subject: KVM: x86/mmu: Block all page faults during kvm_zap_gfn_range()
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 6d3085e4d89ad7e6c7f1c6cf929d903393565861 upstream.
+
+When zapping a GFN range, pass 0 => ALL_ONES for the to-be-invalidated
+range to effectively block all page faults while the zap is in-progress.
+The invalidation helpers take a host virtual address, whereas zapping a
+GFN obviously provides a guest physical address and with the wrong unit
+of measurement (frame vs. byte).
+
+Alternatively, KVM could walk all memslots to get the associated HVAs,
+but thanks to SMM, that would require multiple lookups.  And practically
+speaking, kvm_zap_gfn_range() usage is quite rare and not a hot path,
+e.g. MTRR and CR0.CD are almost guaranteed to be done only on vCPU0
+during boot, and APICv inhibits are similarly infrequent operations.
+
+Fixes: edb298c663fc ("KVM: x86/mmu: bump mmu notifier count in kvm_zap_gfn_range")
+Reported-by: Chao Peng <chao.p.peng@linux.intel.com>
+Cc: stable@vger.kernel.org
+Cc: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20221111001841.2412598-1-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6044,7 +6044,7 @@ void kvm_zap_gfn_range(struct kvm *kvm,
+       write_lock(&kvm->mmu_lock);
+-      kvm_mmu_invalidate_begin(kvm, gfn_start, gfn_end);
++      kvm_mmu_invalidate_begin(kvm, 0, -1ul);
+       flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
+@@ -6058,7 +6058,7 @@ void kvm_zap_gfn_range(struct kvm *kvm,
+               kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+                                                  gfn_end - gfn_start);
+-      kvm_mmu_invalidate_end(kvm, gfn_start, gfn_end);
++      kvm_mmu_invalidate_end(kvm, 0, -1ul);
+       write_unlock(&kvm->mmu_lock);
+ }
diff --git a/queue-6.0/kvm-x86-pmu-do-not-speculatively-query-intel-gp-pmcs-that-don-t-exist-yet.patch b/queue-6.0/kvm-x86-pmu-do-not-speculatively-query-intel-gp-pmcs-that-don-t-exist-yet.patch
new file mode 100644 (file)
index 0000000..7520e09
--- /dev/null
@@ -0,0 +1,76 @@
+From 8631ef59b62290c7d88e7209e35dfb47f33f4902 Mon Sep 17 00:00:00 2001
+From: Like Xu <likexu@tencent.com>
+Date: Mon, 19 Sep 2022 17:10:06 +0800
+Subject: KVM: x86/pmu: Do not speculatively query Intel GP PMCs that don't exist yet
+
+From: Like Xu <likexu@tencent.com>
+
+commit 8631ef59b62290c7d88e7209e35dfb47f33f4902 upstream.
+
+The SDM lists an architectural MSR IA32_CORE_CAPABILITIES (0xCF)
+that limits the theoretical maximum value of the Intel GP PMC MSRs
+allocated at 0xC1 to 14; likewise the Intel April 2022 SDM adds
+IA32_OVERCLOCKING_STATUS at 0x195 which limits the number of event
+selection MSRs to 15 (0x186-0x194).
+
+Limiting the maximum number of counters to 14 or 18 based on the currently
+allocated MSRs is clearly fragile, and it seems likely that Intel will
+even place PMCs 8-15 at a completely different range of MSR indices.
+So stop at the maximum number of GP PMCs supported today on Intel
+processors.
+
+There are some machines, like Intel P4 with non Architectural PMU, that
+may indeed have 18 counters, but those counters are in a completely
+different MSR address range and are not supported by KVM.
+
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: cf05a67b68b8 ("KVM: x86: omit "impossible" pmu MSRs from MSR list")
+Suggested-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Like Xu <likexu@tencent.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Message-Id: <20220919091008.60695-1-likexu@tencent.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c |   14 ++------------
+ 1 file changed, 2 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1431,20 +1431,10 @@ static const u32 msrs_to_save_all[] = {
+       MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
+       MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
+       MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
+-      MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9,
+-      MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11,
+-      MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
+-      MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
+-      MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
+       MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
+       MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+       MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
+       MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
+-      MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9,
+-      MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11,
+-      MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
+-      MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
+-      MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
+       MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
+       MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
+@@ -7005,12 +6995,12 @@ static void kvm_init_msr_list(void)
+                               intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
+                               continue;
+                       break;
+-              case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
++              case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 7:
+                       if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
+                           min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
+                               continue;
+                       break;
+-              case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
++              case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 7:
+                       if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
+                           min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
+                               continue;
diff --git a/queue-6.0/kvm-x86-use-a-separate-asm-offsets.c-file.patch b/queue-6.0/kvm-x86-use-a-separate-asm-offsets.c-file.patch
new file mode 100644 (file)
index 0000000..2fd65bd
--- /dev/null
@@ -0,0 +1,111 @@
+From debc5a1ec0d195ffea70d11efeffb713de9fdbc7 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 8 Nov 2022 09:44:53 +0100
+Subject: KVM: x86: use a separate asm-offsets.c file
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit debc5a1ec0d195ffea70d11efeffb713de9fdbc7 upstream.
+
+This already removes an ugly #include "" from asm-offsets.c, but
+especially it avoids a future error when trying to define asm-offsets
+for KVM's svm/svm.h header.
+
+This would not work for kernel/asm-offsets.c, because svm/svm.h
+includes kvm_cache_regs.h which is not in the include path when
+compiling asm-offsets.c.  The problem is not there if the .c file is
+in arch/x86/kvm.
+
+Suggested-by: Sean Christopherson <seanjc@google.com>
+Cc: stable@vger.kernel.org
+Fixes: a149180fbcf3 ("x86: Add magic AMD return-thunk")
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/asm-offsets.c  |    6 ------
+ arch/x86/kvm/.gitignore        |    2 ++
+ arch/x86/kvm/Makefile          |    9 +++++++++
+ arch/x86/kvm/kvm-asm-offsets.c |   18 ++++++++++++++++++
+ arch/x86/kvm/vmx/vmenter.S     |    2 +-
+ 5 files changed, 30 insertions(+), 7 deletions(-)
+ create mode 100644 arch/x86/kvm/.gitignore
+ create mode 100644 arch/x86/kvm/kvm-asm-offsets.c
+
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -19,7 +19,6 @@
+ #include <asm/suspend.h>
+ #include <asm/tlbflush.h>
+ #include <asm/tdx.h>
+-#include "../kvm/vmx/vmx.h"
+ #ifdef CONFIG_XEN
+ #include <xen/interface/xen.h>
+@@ -108,9 +107,4 @@ static void __used common(void)
+       OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+       OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
+       OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
+-
+-      if (IS_ENABLED(CONFIG_KVM_INTEL)) {
+-              BLANK();
+-              OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
+-      }
+ }
+--- /dev/null
++++ b/arch/x86/kvm/.gitignore
+@@ -0,0 +1,2 @@
++/kvm-asm-offsets.s
++/kvm-asm-offsets.h
+--- a/arch/x86/kvm/Makefile
++++ b/arch/x86/kvm/Makefile
+@@ -34,3 +34,12 @@ endif
+ obj-$(CONFIG_KVM)     += kvm.o
+ obj-$(CONFIG_KVM_INTEL)       += kvm-intel.o
+ obj-$(CONFIG_KVM_AMD) += kvm-amd.o
++
++AFLAGS_vmx/vmenter.o    := -iquote $(obj)
++$(obj)/vmx/vmenter.o: $(obj)/kvm-asm-offsets.h
++
++$(obj)/kvm-asm-offsets.h: $(obj)/kvm-asm-offsets.s FORCE
++      $(call filechk,offsets,__KVM_ASM_OFFSETS_H__)
++
++targets += kvm-asm-offsets.s
++clean-files += kvm-asm-offsets.h
+--- /dev/null
++++ b/arch/x86/kvm/kvm-asm-offsets.c
+@@ -0,0 +1,18 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Generate definitions needed by assembly language modules.
++ * This code generates raw asm output which is post-processed to extract
++ * and format the required data.
++ */
++#define COMPILE_OFFSETS
++
++#include <linux/kbuild.h>
++#include "vmx/vmx.h"
++
++static void __used common(void)
++{
++      if (IS_ENABLED(CONFIG_KVM_INTEL)) {
++              BLANK();
++              OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
++      }
++}
+--- a/arch/x86/kvm/vmx/vmenter.S
++++ b/arch/x86/kvm/vmx/vmenter.S
+@@ -1,12 +1,12 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ #include <linux/linkage.h>
+ #include <asm/asm.h>
+-#include <asm/asm-offsets.h>
+ #include <asm/bitsperlong.h>
+ #include <asm/kvm_vcpu_regs.h>
+ #include <asm/nospec-branch.h>
+ #include <asm/percpu.h>
+ #include <asm/segment.h>
++#include "kvm-asm-offsets.h"
+ #include "run_flags.h"
+ #define WORD_SIZE (BITS_PER_LONG / 8)
index 06cddd39ec74da81766317193f01b9752266c370..11f0e36ae64019170255d34c13f92e3b8c6cef9a 100644 (file)
@@ -162,3 +162,11 @@ mm-shmem-use-page_mapping-to-detect-page-cache-for-uffd-continue.patch
 can-j1939-j1939_send_one-fix-missing-can-header-initialization.patch
 can-isotp-fix-tx-state-handling-for-echo-tx-processing.patch
 can-rcar_canfd-add-missing-ecc-error-checks-for-channels-2-7.patch
+kvm-x86-mmu-block-all-page-faults-during-kvm_zap_gfn_range.patch
+kvm-x86-pmu-do-not-speculatively-query-intel-gp-pmcs-that-don-t-exist-yet.patch
+kvm-x86-use-a-separate-asm-offsets.c-file.patch
+kvm-svm-replace-regs-argument-of-__svm_vcpu_run-with-vcpu_svm.patch
+kvm-svm-adjust-register-allocation-for-__svm_vcpu_run.patch
+kvm-svm-only-dump-vmsa-to-klog-at-kern_debug-level.patch
+kvm-svm-retrieve-vmcb-from-assembly.patch
+kvm-svm-move-guest-vmsave-vmload-back-to-assembly.patch