--- /dev/null
+From d143f939a95696d38ff800ada14402fa50ebbd6c Mon Sep 17 00:00:00 2001
+From: Vinod Koul <vkoul@kernel.org>
+Date: Thu, 10 Mar 2022 10:13:20 +0530
+Subject: dmaengine: Revert "dmaengine: shdma: Fix runtime PM imbalance on error"
+
+From: Vinod Koul <vkoul@kernel.org>
+
+commit d143f939a95696d38ff800ada14402fa50ebbd6c upstream.
+
+This reverts commit 455896c53d5b ("dmaengine: shdma: Fix runtime PM
+imbalance on error") as the patch wrongly reduced the count on error and
+did not bail out. So drop the count by reverting the patch .
+
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/sh/shdma-base.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/dma/sh/shdma-base.c
++++ b/drivers/dma/sh/shdma-base.c
+@@ -115,10 +115,8 @@ static dma_cookie_t shdma_tx_submit(stru
+ ret = pm_runtime_get(schan->dev);
+
+ spin_unlock_irq(&schan->chan_lock);
+- if (ret < 0) {
++ if (ret < 0)
+ dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+- pm_runtime_put(schan->dev);
+- }
+
+ pm_runtime_barrier(schan->dev);
+
--- /dev/null
+From 5593473a1e6c743764b08e3b6071cb43b5cfa6c4 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 6 Apr 2022 13:13:42 -0400
+Subject: KVM: avoid NULL pointer dereference in kvm_dirty_ring_push
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 5593473a1e6c743764b08e3b6071cb43b5cfa6c4 upstream.
+
+kvm_vcpu_release() will call kvm_dirty_ring_free(), freeing
+ring->dirty_gfns and setting it to NULL. Afterwards, it calls
+kvm_arch_vcpu_destroy().
+
+However, if closing the file descriptor races with KVM_RUN in such away
+that vcpu->arch.st.preempted == 0, the following call stack leads to a
+NULL pointer dereference in kvm_dirty_run_push():
+
+ mark_page_dirty_in_slot+0x192/0x270 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3171
+ kvm_steal_time_set_preempted arch/x86/kvm/x86.c:4600 [inline]
+ kvm_arch_vcpu_put+0x34e/0x5b0 arch/x86/kvm/x86.c:4618
+ vcpu_put+0x1b/0x70 arch/x86/kvm/../../../virt/kvm/kvm_main.c:211
+ vmx_free_vcpu+0xcb/0x130 arch/x86/kvm/vmx/vmx.c:6985
+ kvm_arch_vcpu_destroy+0x76/0x290 arch/x86/kvm/x86.c:11219
+ kvm_vcpu_destroy arch/x86/kvm/../../../virt/kvm/kvm_main.c:441 [inline]
+
+The fix is to release the dirty page ring after kvm_arch_vcpu_destroy
+has run.
+
+Reported-by: Qiuhao Li <qiuhao@sysec.org>
+Reported-by: Gaoning Pan <pgn@zju.edu.cn>
+Reported-by: Yongkang Jia <kangel@zju.edu.cn>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -439,8 +439,8 @@ static void kvm_vcpu_init(struct kvm_vcp
+
+ static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+- kvm_dirty_ring_free(&vcpu->dirty_ring);
+ kvm_arch_vcpu_destroy(vcpu);
++ kvm_dirty_ring_free(&vcpu->dirty_ring);
+
+ /*
+ * No need for rcu_read_lock as VCPU_RUN is the only place that changes