]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 12 Aug 2022 15:40:52 +0000 (17:40 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 12 Aug 2022 15:40:52 +0000 (17:40 +0200)
added patches:
kvm-svm-disable-sev-es-support-if-mmio-caching-is-disable.patch
kvm-x86-do-not-report-preemption-if-the-steal-time-cache-is-stale.patch
kvm-x86-mmu-fully-re-evaluate-mmio-caching-when-spte-masks-change.patch
kvm-x86-revalidate-steal-time-cache-if-msr-value-changes.patch
kvm-x86-tag-kvm_mmu_x86_module_init-with-__init.patch
kvm-x86-xen-initialize-xen-timer-only-once.patch
kvm-x86-xen-stop-xen-timer-before-changing-irq.patch

queue-5.19/kvm-svm-disable-sev-es-support-if-mmio-caching-is-disable.patch [new file with mode: 0644]
queue-5.19/kvm-x86-do-not-report-preemption-if-the-steal-time-cache-is-stale.patch [new file with mode: 0644]
queue-5.19/kvm-x86-mmu-fully-re-evaluate-mmio-caching-when-spte-masks-change.patch [new file with mode: 0644]
queue-5.19/kvm-x86-revalidate-steal-time-cache-if-msr-value-changes.patch [new file with mode: 0644]
queue-5.19/kvm-x86-tag-kvm_mmu_x86_module_init-with-__init.patch [new file with mode: 0644]
queue-5.19/kvm-x86-xen-initialize-xen-timer-only-once.patch [new file with mode: 0644]
queue-5.19/kvm-x86-xen-stop-xen-timer-before-changing-irq.patch [new file with mode: 0644]
queue-5.19/series

diff --git a/queue-5.19/kvm-svm-disable-sev-es-support-if-mmio-caching-is-disable.patch b/queue-5.19/kvm-svm-disable-sev-es-support-if-mmio-caching-is-disable.patch
new file mode 100644 (file)
index 0000000..d1f97a4
--- /dev/null
@@ -0,0 +1,118 @@
+From 0c29397ac1fdd64ae59941a477511a05e61a4754 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 3 Aug 2022 22:49:57 +0000
+Subject: KVM: SVM: Disable SEV-ES support if MMIO caching is disable
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 0c29397ac1fdd64ae59941a477511a05e61a4754 upstream.
+
+Disable SEV-ES if MMIO caching is disabled as SEV-ES relies on MMIO SPTEs
+generating #NPF(RSVD), which are reflected by the CPU into the guest as
+a #VC.  With SEV-ES, the untrusted host, a.k.a. KVM, doesn't have access
+to the guest instruction stream or register state and so can't directly
+emulate in response to a #NPF on an emulated MMIO GPA.  Disabling MMIO
+caching means guest accesses to emulated MMIO ranges cause #NPF(!PRESENT),
+and those flavors of #NPF cause automatic VM-Exits, not #VC.
+
+Adjust KVM's MMIO masks to account for the C-bit location prior to doing
+SEV(-ES) setup, and document that dependency between adjusting the MMIO
+SPTE mask and SEV(-ES) setup.
+
+Fixes: b09763da4dd8 ("KVM: x86/mmu: Add module param to disable MMIO caching (for testing)")
+Reported-by: Michael Roth <michael.roth@amd.com>
+Tested-by: Michael Roth <michael.roth@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20220803224957.1285926-4-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu.h      |    2 ++
+ arch/x86/kvm/mmu/spte.c |    1 +
+ arch/x86/kvm/mmu/spte.h |    2 --
+ arch/x86/kvm/svm/sev.c  |   10 ++++++++++
+ arch/x86/kvm/svm/svm.c  |    9 ++++++---
+ 5 files changed, 19 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -11,6 +11,8 @@
+ #define PT32_PT_BITS 10
+ #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
++extern bool __read_mostly enable_mmio_caching;
++
+ #define PT_WRITABLE_SHIFT 1
+ #define PT_USER_SHIFT 2
+--- a/arch/x86/kvm/mmu/spte.c
++++ b/arch/x86/kvm/mmu/spte.c
+@@ -21,6 +21,7 @@
+ bool __read_mostly enable_mmio_caching = true;
+ module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
++EXPORT_SYMBOL_GPL(enable_mmio_caching);
+ u64 __read_mostly shadow_host_writable_mask;
+ u64 __read_mostly shadow_mmu_writable_mask;
+--- a/arch/x86/kvm/mmu/spte.h
++++ b/arch/x86/kvm/mmu/spte.h
+@@ -5,8 +5,6 @@
+ #include "mmu_internal.h"
+-extern bool __read_mostly enable_mmio_caching;
+-
+ /*
+  * A MMU present SPTE is backed by actual memory and may or may not be present
+  * in hardware.  E.g. MMIO SPTEs are not considered present.  Use bit 11, as it
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -22,6 +22,7 @@
+ #include <asm/trapnr.h>
+ #include <asm/fpu/xcr.h>
++#include "mmu.h"
+ #include "x86.h"
+ #include "svm.h"
+ #include "svm_ops.h"
+@@ -2221,6 +2222,15 @@ void __init sev_hardware_setup(void)
+       if (!sev_es_enabled)
+               goto out;
++      /*
++       * SEV-ES requires MMIO caching as KVM doesn't have access to the guest
++       * instruction stream, i.e. can't emulate in response to a #NPF and
++       * instead relies on #NPF(RSVD) being reflected into the guest as #VC
++       * (the guest can then do a #VMGEXIT to request MMIO emulation).
++       */
++      if (!enable_mmio_caching)
++              goto out;
++
+       /* Does the CPU support SEV-ES? */
+       if (!boot_cpu_has(X86_FEATURE_SEV_ES))
+               goto out;
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4897,13 +4897,16 @@ static __init int svm_hardware_setup(voi
+       /* Setup shadow_me_value and shadow_me_mask */
+       kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
+-      /* Note, SEV setup consumes npt_enabled. */
++      svm_adjust_mmio_mask();
++
++      /*
++       * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
++       * may be modified by svm_adjust_mmio_mask()).
++       */
+       sev_hardware_setup();
+       svm_hv_hardware_setup();
+-      svm_adjust_mmio_mask();
+-
+       for_each_possible_cpu(cpu) {
+               r = svm_cpu_init(cpu);
+               if (r)
diff --git a/queue-5.19/kvm-x86-do-not-report-preemption-if-the-steal-time-cache-is-stale.patch b/queue-5.19/kvm-x86-do-not-report-preemption-if-the-steal-time-cache-is-stale.patch
new file mode 100644 (file)
index 0000000..ccad583
--- /dev/null
@@ -0,0 +1,45 @@
+From c3c28d24d910a746b02f496d190e0e8c6560224b Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 4 Aug 2022 15:28:32 +0200
+Subject: KVM: x86: do not report preemption if the steal time cache is stale
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit c3c28d24d910a746b02f496d190e0e8c6560224b upstream.
+
+Commit 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time
+/ preempted status", 2021-11-11) open coded the previous call to
+kvm_map_gfn, but in doing so it dropped the comparison between the cached
+guest physical address and the one in the MSR.  This cause an incorrect
+cache hit if the guest modifies the steal time address while the memslots
+remain the same.  This can happen with kexec, in which case the preempted
+bit is written at the address used by the old kernel instead of
+the old one.
+
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: stable@vger.kernel.org
+Fixes: 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time / preempted status")
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4635,6 +4635,7 @@ static void kvm_steal_time_set_preempted
+       struct kvm_steal_time __user *st;
+       struct kvm_memslots *slots;
+       static const u8 preempted = KVM_VCPU_PREEMPTED;
++      gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
+       /*
+        * The vCPU can be marked preempted if and only if the VM-Exit was on
+@@ -4662,6 +4663,7 @@ static void kvm_steal_time_set_preempted
+       slots = kvm_memslots(vcpu->kvm);
+       if (unlikely(slots->generation != ghc->generation ||
++                   gpa != ghc->gpa ||
+                    kvm_is_error_hva(ghc->hva) || !ghc->memslot))
+               return;
diff --git a/queue-5.19/kvm-x86-mmu-fully-re-evaluate-mmio-caching-when-spte-masks-change.patch b/queue-5.19/kvm-x86-mmu-fully-re-evaluate-mmio-caching-when-spte-masks-change.patch
new file mode 100644 (file)
index 0000000..deb5441
--- /dev/null
@@ -0,0 +1,105 @@
+From c3e0c8c2e8b17bae30d5978bc2decdd4098f0f99 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 3 Aug 2022 22:49:56 +0000
+Subject: KVM: x86/mmu: Fully re-evaluate MMIO caching when SPTE masks change
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit c3e0c8c2e8b17bae30d5978bc2decdd4098f0f99 upstream.
+
+Fully re-evaluate whether or not MMIO caching can be enabled when SPTE
+masks change; simply clearing enable_mmio_caching when a configuration
+isn't compatible with caching fails to handle the scenario where the
+masks are updated, e.g. by VMX for EPT or by SVM to account for the C-bit
+location, and toggle compatibility from false=>true.
+
+Snapshot the original module param so that re-evaluating MMIO caching
+preserves userspace's desire to allow caching.  Use a snapshot approach
+so that enable_mmio_caching still reflects KVM's actual behavior.
+
+Fixes: 8b9e74bfbf8c ("KVM: x86/mmu: Use enable_mmio_caching to track if MMIO caching is enabled")
+Reported-by: Michael Roth <michael.roth@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: stable@vger.kernel.org
+Tested-by: Michael Roth <michael.roth@amd.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Kai Huang <kai.huang@intel.com>
+Message-Id: <20220803224957.1285926-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c  |    4 ++++
+ arch/x86/kvm/mmu/spte.c |   19 +++++++++++++++++++
+ arch/x86/kvm/mmu/spte.h |    1 +
+ 3 files changed, 24 insertions(+)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6274,11 +6274,15 @@ static int set_nx_huge_pages(const char
+ /*
+  * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
+  * its default value of -1 is technically undefined behavior for a boolean.
++ * Forward the module init call to SPTE code so that it too can handle module
++ * params that need to be resolved/snapshot.
+  */
+ void __init kvm_mmu_x86_module_init(void)
+ {
+       if (nx_huge_pages == -1)
+               __set_nx_huge_pages(get_nx_auto_mode());
++
++      kvm_mmu_spte_module_init();
+ }
+ /*
+--- a/arch/x86/kvm/mmu/spte.c
++++ b/arch/x86/kvm/mmu/spte.c
+@@ -20,6 +20,7 @@
+ #include <asm/vmx.h>
+ bool __read_mostly enable_mmio_caching = true;
++static bool __ro_after_init allow_mmio_caching;
+ module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
+ EXPORT_SYMBOL_GPL(enable_mmio_caching);
+@@ -43,6 +44,18 @@ u64 __read_mostly shadow_nonpresent_or_r
+ u8 __read_mostly shadow_phys_bits;
++void __init kvm_mmu_spte_module_init(void)
++{
++      /*
++       * Snapshot userspace's desire to allow MMIO caching.  Whether or not
++       * KVM can actually enable MMIO caching depends on vendor-specific
++       * hardware capabilities and other module params that can't be resolved
++       * until the vendor module is loaded, i.e. enable_mmio_caching can and
++       * will change when the vendor module is (re)loaded.
++       */
++      allow_mmio_caching = enable_mmio_caching;
++}
++
+ static u64 generation_mmio_spte_mask(u64 gen)
+ {
+       u64 mask;
+@@ -338,6 +351,12 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio
+       BUG_ON((u64)(unsigned)access_mask != access_mask);
+       WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
++      /*
++       * Reset to the original module param value to honor userspace's desire
++       * to (dis)allow MMIO caching.  Update the param itself so that
++       * userspace can see whether or not KVM is actually using MMIO caching.
++       */
++      enable_mmio_caching = allow_mmio_caching;
+       if (!enable_mmio_caching)
+               mmio_value = 0;
+--- a/arch/x86/kvm/mmu/spte.h
++++ b/arch/x86/kvm/mmu/spte.h
+@@ -444,6 +444,7 @@ static inline u64 restore_acc_track_spte
+ u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
++void __init kvm_mmu_spte_module_init(void);
+ void kvm_mmu_reset_all_pte_masks(void);
+ #endif
diff --git a/queue-5.19/kvm-x86-revalidate-steal-time-cache-if-msr-value-changes.patch b/queue-5.19/kvm-x86-revalidate-steal-time-cache-if-msr-value-changes.patch
new file mode 100644 (file)
index 0000000..7510202
--- /dev/null
@@ -0,0 +1,59 @@
+From 901d3765fa804ce42812f1d5b1f3de2dfbb26723 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 4 Aug 2022 15:28:32 +0200
+Subject: KVM: x86: revalidate steal time cache if MSR value changes
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 901d3765fa804ce42812f1d5b1f3de2dfbb26723 upstream.
+
+Commit 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time
+/ preempted status", 2021-11-11) open coded the previous call to
+kvm_map_gfn, but in doing so it dropped the comparison between the cached
+guest physical address and the one in the MSR.  This cause an incorrect
+cache hit if the guest modifies the steal time address while the memslots
+remain the same.  This can happen with kexec, in which case the steal
+time data is written at the address used by the old kernel instead of
+the old one.
+
+While at it, rename the variable from gfn to gpa since it is a plain
+physical address and not a right-shifted one.
+
+Reported-by: Dave Young <ruyang@redhat.com>
+Reported-by: Xiaoying Yan  <yiyan@redhat.com>
+Analyzed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: stable@vger.kernel.org
+Fixes: 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time / preempted status")
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3386,6 +3386,7 @@ static void record_steal_time(struct kvm
+       struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
+       struct kvm_steal_time __user *st;
+       struct kvm_memslots *slots;
++      gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
+       u64 steal;
+       u32 version;
+@@ -3403,13 +3404,12 @@ static void record_steal_time(struct kvm
+       slots = kvm_memslots(vcpu->kvm);
+       if (unlikely(slots->generation != ghc->generation ||
++                   gpa != ghc->gpa ||
+                    kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
+-              gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
+-
+               /* We rely on the fact that it fits in a single page. */
+               BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
+-              if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) ||
++              if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
+                   kvm_is_error_hva(ghc->hva) || !ghc->memslot)
+                       return;
+       }
diff --git a/queue-5.19/kvm-x86-tag-kvm_mmu_x86_module_init-with-__init.patch b/queue-5.19/kvm-x86-tag-kvm_mmu_x86_module_init-with-__init.patch
new file mode 100644 (file)
index 0000000..7234f2c
--- /dev/null
@@ -0,0 +1,48 @@
+From 982bae43f11c37b51d2f1961bb25ef7cac3746fa Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 3 Aug 2022 22:49:55 +0000
+Subject: KVM: x86: Tag kvm_mmu_x86_module_init() with __init
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 982bae43f11c37b51d2f1961bb25ef7cac3746fa upstream.
+
+Mark kvm_mmu_x86_module_init() with __init, the entire reason it exists
+is to initialize variables when kvm.ko is loaded, i.e. it must never be
+called after module initialization.
+
+Fixes: 1d0e84806047 ("KVM: x86/mmu: Resolve nx_huge_pages when kvm.ko is loaded")
+Cc: stable@vger.kernel.org
+Reviewed-by: Kai Huang <kai.huang@intel.com>
+Tested-by: Michael Roth <michael.roth@amd.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20220803224957.1285926-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h |    2 +-
+ arch/x86/kvm/mmu/mmu.c          |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1654,7 +1654,7 @@ static inline int kvm_arch_flush_remote_
+ #define kvm_arch_pmi_in_guest(vcpu) \
+       ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
+-void kvm_mmu_x86_module_init(void);
++void __init kvm_mmu_x86_module_init(void);
+ int kvm_mmu_vendor_module_init(void);
+ void kvm_mmu_vendor_module_exit(void);
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6275,7 +6275,7 @@ static int set_nx_huge_pages(const char
+  * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
+  * its default value of -1 is technically undefined behavior for a boolean.
+  */
+-void kvm_mmu_x86_module_init(void)
++void __init kvm_mmu_x86_module_init(void)
+ {
+       if (nx_huge_pages == -1)
+               __set_nx_huge_pages(get_nx_auto_mode());
diff --git a/queue-5.19/kvm-x86-xen-initialize-xen-timer-only-once.patch b/queue-5.19/kvm-x86-xen-initialize-xen-timer-only-once.patch
new file mode 100644 (file)
index 0000000..3119551
--- /dev/null
@@ -0,0 +1,55 @@
+From af735db31285fa699384c649be72a9f32ecbb665 Mon Sep 17 00:00:00 2001
+From: Coleman Dietsch <dietschc@csp.edu>
+Date: Mon, 8 Aug 2022 14:06:06 -0500
+Subject: KVM: x86/xen: Initialize Xen timer only once
+
+From: Coleman Dietsch <dietschc@csp.edu>
+
+commit af735db31285fa699384c649be72a9f32ecbb665 upstream.
+
+Add a check for existing xen timers before initializing a new one.
+
+Currently kvm_xen_init_timer() is called on every
+KVM_XEN_VCPU_ATTR_TYPE_TIMER, which is causing the following ODEBUG
+crash when vcpu->arch.xen.timer is already set.
+
+ODEBUG: init active (active state 0)
+object type: hrtimer hint: xen_timer_callbac0
+RIP: 0010:debug_print_object+0x16e/0x250 lib/debugobjects.c:502
+Call Trace:
+__debug_object_init
+debug_hrtimer_init
+debug_init
+hrtimer_init
+kvm_xen_init_timer
+kvm_xen_vcpu_set_attr
+kvm_arch_vcpu_ioctl
+kvm_vcpu_ioctl
+vfs_ioctl
+
+Fixes: 536395260582 ("KVM: x86/xen: handle PV timers oneshot mode")
+Cc: stable@vger.kernel.org
+Link: https://syzkaller.appspot.com/bug?id=8234a9dfd3aafbf092cc5a7cd9842e3ebc45fc42
+Reported-by: syzbot+e54f930ed78eb0f85281@syzkaller.appspotmail.com
+Signed-off-by: Coleman Dietsch <dietschc@csp.edu>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20220808190607.323899-2-dietschc@csp.edu>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/xen.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -713,7 +713,9 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcp
+                               break;
+                       }
+                       vcpu->arch.xen.timer_virq = data->u.timer.port;
+-                      kvm_xen_init_timer(vcpu);
++
++                      if (!vcpu->arch.xen.timer.function)
++                              kvm_xen_init_timer(vcpu);
+                       /* Restart the timer if it's set */
+                       if (data->u.timer.expires_ns)
diff --git a/queue-5.19/kvm-x86-xen-stop-xen-timer-before-changing-irq.patch b/queue-5.19/kvm-x86-xen-stop-xen-timer-before-changing-irq.patch
new file mode 100644 (file)
index 0000000..fa3d433
--- /dev/null
@@ -0,0 +1,73 @@
+From c036899136355758dcd88878145036ab4d9c1f26 Mon Sep 17 00:00:00 2001
+From: Coleman Dietsch <dietschc@csp.edu>
+Date: Mon, 8 Aug 2022 14:06:07 -0500
+Subject: KVM: x86/xen: Stop Xen timer before changing IRQ
+
+From: Coleman Dietsch <dietschc@csp.edu>
+
+commit c036899136355758dcd88878145036ab4d9c1f26 upstream.
+
+Stop Xen timer (if it's running) prior to changing the IRQ vector and
+potentially (re)starting the timer. Changing the IRQ vector while the
+timer is still running can result in KVM injecting a garbage event, e.g.
+vm_xen_inject_timer_irqs() could see a non-zero xen.timer_pending from
+a previous timer but inject the new xen.timer_virq.
+
+Fixes: 536395260582 ("KVM: x86/xen: handle PV timers oneshot mode")
+Cc: stable@vger.kernel.org
+Link: https://syzkaller.appspot.com/bug?id=8234a9dfd3aafbf092cc5a7cd9842e3ebc45fc42
+Reported-by: syzbot+e54f930ed78eb0f85281@syzkaller.appspotmail.com
+Signed-off-by: Coleman Dietsch <dietschc@csp.edu>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Message-Id: <20220808190607.323899-3-dietschc@csp.edu>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/xen.c |   33 ++++++++++++++++-----------------
+ 1 file changed, 16 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -707,25 +707,24 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcp
+               break;
+       case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
+-              if (data->u.timer.port) {
+-                      if (data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
+-                              r = -EINVAL;
+-                              break;
+-                      }
+-                      vcpu->arch.xen.timer_virq = data->u.timer.port;
++              if (data->u.timer.port &&
++                  data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
++                      r = -EINVAL;
++                      break;
++              }
+-                      if (!vcpu->arch.xen.timer.function)
+-                              kvm_xen_init_timer(vcpu);
++              if (!vcpu->arch.xen.timer.function)
++                      kvm_xen_init_timer(vcpu);
+-                      /* Restart the timer if it's set */
+-                      if (data->u.timer.expires_ns)
+-                              kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
+-                                                  data->u.timer.expires_ns -
+-                                                  get_kvmclock_ns(vcpu->kvm));
+-              } else if (kvm_xen_timer_enabled(vcpu)) {
+-                      kvm_xen_stop_timer(vcpu);
+-                      vcpu->arch.xen.timer_virq = 0;
+-              }
++              /* Stop the timer (if it's running) before changing the vector */
++              kvm_xen_stop_timer(vcpu);
++              vcpu->arch.xen.timer_virq = data->u.timer.port;
++
++              /* Start the timer if the new value has a valid vector+expiry. */
++              if (data->u.timer.port && data->u.timer.expires_ns)
++                      kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
++                                          data->u.timer.expires_ns -
++                                          get_kvmclock_ns(vcpu->kvm));
+               r = 0;
+               break;
index c3e36ac8b7ee12e68d8ec94e29de0da3ca83629f..90682226bb746fbad70bb349a7e0ebcd7cb7fa55 100644 (file)
@@ -36,3 +36,10 @@ kvm-nvmx-inject-ud-if-vmxon-is-attempted-with-incompatible-cr0-cr4.patch
 kvm-x86-mark-tss-busy-during-ltr-emulation-_after_-all-fault-checks.patch
 kvm-x86-set-error-code-to-segment-selector-on-lldt-ltr-non-canonical-gp.patch
 kvm-x86-mmu-treat-nx-as-a-valid-spte-bit-for-npt.patch
+kvm-svm-disable-sev-es-support-if-mmio-caching-is-disable.patch
+kvm-x86-tag-kvm_mmu_x86_module_init-with-__init.patch
+kvm-x86-mmu-fully-re-evaluate-mmio-caching-when-spte-masks-change.patch
+kvm-x86-do-not-report-preemption-if-the-steal-time-cache-is-stale.patch
+kvm-x86-revalidate-steal-time-cache-if-msr-value-changes.patch
+kvm-x86-xen-initialize-xen-timer-only-once.patch
+kvm-x86-xen-stop-xen-timer-before-changing-irq.patch