]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Jan 2022 13:38:32 +0000 (14:38 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Jan 2022 13:38:32 +0000 (14:38 +0100)
added patches:
kvm-vmx-switch-blocked_vcpu_on_cpu_lock-to-raw-spinlock.patch
kvm-x86-mmu-fix-write-protection-of-pts-mapped-by-the-tdp-mmu.patch

queue-5.15/kvm-vmx-switch-blocked_vcpu_on_cpu_lock-to-raw-spinlock.patch [new file with mode: 0644]
queue-5.15/kvm-x86-mmu-fix-write-protection-of-pts-mapped-by-the-tdp-mmu.patch [new file with mode: 0644]
queue-5.15/series [new file with mode: 0644]

diff --git a/queue-5.15/kvm-vmx-switch-blocked_vcpu_on_cpu_lock-to-raw-spinlock.patch b/queue-5.15/kvm-vmx-switch-blocked_vcpu_on_cpu_lock-to-raw-spinlock.patch
new file mode 100644 (file)
index 0000000..bf01c7a
--- /dev/null
@@ -0,0 +1,105 @@
+From 5f02ef741a785678930f3ff0a8b6b2b0ef1bb402 Mon Sep 17 00:00:00 2001
+From: Marcelo Tosatti <mtosatti@redhat.com>
+Date: Tue, 18 Jan 2022 04:34:43 -0500
+Subject: KVM: VMX: switch blocked_vcpu_on_cpu_lock to raw spinlock
+
+From: Marcelo Tosatti <mtosatti@redhat.com>
+
+commit 5f02ef741a785678930f3ff0a8b6b2b0ef1bb402 upstream.
+
+blocked_vcpu_on_cpu_lock is taken from hard interrupt context
+(pi_wakeup_handler), therefore it cannot sleep.
+
+Switch it to a raw spinlock.
+
+Fixes:
+
+[41297.066254] BUG: scheduling while atomic: CPU 0/KVM/635218/0x00010001
+[41297.066323] Preemption disabled at:
+[41297.066324] [<ffffffff902ee47f>] irq_enter_rcu+0xf/0x60
+[41297.066339] Call Trace:
+[41297.066342]  <IRQ>
+[41297.066346]  dump_stack_lvl+0x34/0x44
+[41297.066353]  ? irq_enter_rcu+0xf/0x60
+[41297.066356]  __schedule_bug.cold+0x7d/0x8b
+[41297.066361]  __schedule+0x439/0x5b0
+[41297.066365]  ? task_blocks_on_rt_mutex.constprop.0.isra.0+0x1b0/0x440
+[41297.066369]  schedule_rtlock+0x1e/0x40
+[41297.066371]  rtlock_slowlock_locked+0xf1/0x260
+[41297.066374]  rt_spin_lock+0x3b/0x60
+[41297.066378]  pi_wakeup_handler+0x31/0x90 [kvm_intel]
+[41297.066388]  sysvec_kvm_posted_intr_wakeup_ipi+0x9d/0xd0
+[41297.066392]  </IRQ>
+[41297.066392]  asm_sysvec_kvm_posted_intr_wakeup_ipi+0x12/0x20
+...
+
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/posted_intr.c |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/vmx/posted_intr.c
++++ b/arch/x86/kvm/vmx/posted_intr.c
+@@ -15,7 +15,7 @@
+  * can find which vCPU should be waken up.
+  */
+ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
+-static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
++static DEFINE_PER_CPU(raw_spinlock_t, blocked_vcpu_on_cpu_lock);
+ static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
+ {
+@@ -121,9 +121,9 @@ static void __pi_post_block(struct kvm_v
+                          new.control) != old.control);
+       if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
+-              spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++              raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+               list_del(&vcpu->blocked_vcpu_list);
+-              spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++              raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+               vcpu->pre_pcpu = -1;
+       }
+ }
+@@ -154,11 +154,11 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
+       local_irq_disable();
+       if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
+               vcpu->pre_pcpu = vcpu->cpu;
+-              spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++              raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+               list_add_tail(&vcpu->blocked_vcpu_list,
+                             &per_cpu(blocked_vcpu_on_cpu,
+                                      vcpu->pre_pcpu));
+-              spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++              raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+       }
+       do {
+@@ -215,7 +215,7 @@ void pi_wakeup_handler(void)
+       struct kvm_vcpu *vcpu;
+       int cpu = smp_processor_id();
+-      spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
++      raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+       list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
+                       blocked_vcpu_list) {
+               struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+@@ -223,13 +223,13 @@ void pi_wakeup_handler(void)
+               if (pi_test_on(pi_desc) == 1)
+                       kvm_vcpu_kick(vcpu);
+       }
+-      spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
++      raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ }
+ void __init pi_init_cpu(int cpu)
+ {
+       INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
+-      spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
++      raw_spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ }
+ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
diff --git a/queue-5.15/kvm-x86-mmu-fix-write-protection-of-pts-mapped-by-the-tdp-mmu.patch b/queue-5.15/kvm-x86-mmu-fix-write-protection-of-pts-mapped-by-the-tdp-mmu.patch
new file mode 100644 (file)
index 0000000..4efe1b4
--- /dev/null
@@ -0,0 +1,56 @@
+From 7c8a4742c4abe205ec9daf416c9d42fd6b406e8e Mon Sep 17 00:00:00 2001
+From: David Matlack <dmatlack@google.com>
+Date: Thu, 13 Jan 2022 23:30:17 +0000
+Subject: KVM: x86/mmu: Fix write-protection of PTs mapped by the TDP MMU
+
+From: David Matlack <dmatlack@google.com>
+
+commit 7c8a4742c4abe205ec9daf416c9d42fd6b406e8e upstream.
+
+When the TDP MMU is write-protection GFNs for page table protection (as
+opposed to for dirty logging, or due to the HVA not being writable), it
+checks if the SPTE is already write-protected and if so skips modifying
+the SPTE and the TLB flush.
+
+This behavior is incorrect because it fails to check if the SPTE
+is write-protected for page table protection, i.e. fails to check
+that MMU-writable is '0'.  If the SPTE was write-protected for dirty
+logging but not page table protection, the SPTE could locklessly be made
+writable, and vCPUs could still be running with writable mappings cached
+in their TLB.
+
+Fix this by only skipping setting the SPTE if the SPTE is already
+write-protected *and* MMU-writable is already clear.  Technically,
+checking only MMU-writable would suffice; a SPTE cannot be writable
+without MMU-writable being set.  But check both to be paranoid and
+because it arguably yields more readable code.
+
+Fixes: 46044f72c382 ("kvm: x86/mmu: Support write protection for nesting in tdp MMU")
+Cc: stable@vger.kernel.org
+Signed-off-by: David Matlack <dmatlack@google.com>
+Message-Id: <20220113233020.3986005-2-dmatlack@google.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/tdp_mmu.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -1493,12 +1493,12 @@ static bool write_protect_gfn(struct kvm
+                   !is_last_spte(iter.old_spte, iter.level))
+                       continue;
+-              if (!is_writable_pte(iter.old_spte))
+-                      break;
+-
+               new_spte = iter.old_spte &
+                       ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
++              if (new_spte == iter.old_spte)
++                      break;
++
+               tdp_mmu_set_spte(kvm, &iter, new_spte);
+               spte_set = true;
+       }
diff --git a/queue-5.15/series b/queue-5.15/series
new file mode 100644 (file)
index 0000000..bf4f8cf
--- /dev/null
@@ -0,0 +1,2 @@
+kvm-x86-mmu-fix-write-protection-of-pts-mapped-by-the-tdp-mmu.patch
+kvm-vmx-switch-blocked_vcpu_on_cpu_lock-to-raw-spinlock.patch