--- /dev/null
+From 5d16467ae56343b9205caedf85e3a131e0914ad8 Mon Sep 17 00:00:00 2001
+From: Zhan Xusheng <zhanxusheng1024@gmail.com>
+Date: Mon, 23 Mar 2026 14:11:30 +0800
+Subject: alarmtimer: Fix argument order in alarm_timer_forward()
+
+From: Zhan Xusheng <zhanxusheng1024@gmail.com>
+
+commit 5d16467ae56343b9205caedf85e3a131e0914ad8 upstream.
+
+alarm_timer_forward() passes arguments to alarm_forward() in the wrong
+order:
+
+ alarm_forward(alarm, timr->it_interval, now);
+
+However, alarm_forward() is defined as:
+
+ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
+
+and uses the second argument as the current time:
+
+ delta = ktime_sub(now, alarm->node.expires);
+
+Passing the interval as "now" results in incorrect delta computation,
+which can lead to missed expirations or incorrect overrun accounting.
+
+This issue has been present since the introduction of
+alarm_timer_forward().
+
+Fix this by swapping the arguments.
+
+Fixes: e7561f1633ac ("alarmtimer: Implement forward callback")
+Signed-off-by: Zhan Xusheng <zhanxusheng@xiaomi.com>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260323061130.29991-1-zhanxusheng@xiaomi.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/time/alarmtimer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -608,7 +608,7 @@ static s64 alarm_timer_forward(struct k_
+ {
+ struct alarm *alarm = &timr->it.alarm.alarmtimer;
+
+- return alarm_forward(alarm, timr->it_interval, now);
++ return alarm_forward(alarm, now, timr->it_interval);
+ }
+
+ /**
--- /dev/null
+From 89a8567d84bde88cb7cdbbac2ab2299c4f991490 Mon Sep 17 00:00:00 2001
+From: Claudiu Beznea <claudiu.beznea@tuxon.dev>
+Date: Mon, 16 Mar 2026 15:32:46 +0200
+Subject: dmaengine: sh: rz-dmac: Move CHCTRL updates under spinlock
+
+From: Claudiu Beznea <claudiu.beznea@tuxon.dev>
+
+commit 89a8567d84bde88cb7cdbbac2ab2299c4f991490 upstream.
+
+Both rz_dmac_disable_hw() and rz_dmac_irq_handle_channel() update the
+CHCTRL register. To avoid concurrency issues when configuring
+functionalities exposed by this registers, take the virtual channel lock.
+All other CHCTRL updates were already protected by the same lock.
+
+Previously, rz_dmac_disable_hw() disabled and re-enabled local IRQs, before
+accessing CHCTRL registers but this does not ensure race-free access.
+Remove the local IRQ disable/enable code as well.
+
+Fixes: 5000d37042a6 ("dmaengine: sh: Add DMAC driver for RZ/G2L SoC")
+Cc: stable@vger.kernel.org
+Reviewed-by: Biju Das <biju.das.jz@bp.renesas.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://patch.msgid.link/20260316133252.240348-3-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/sh/rz-dmac.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/dma/sh/rz-dmac.c
++++ b/drivers/dma/sh/rz-dmac.c
+@@ -286,13 +286,10 @@ static void rz_dmac_disable_hw(struct rz
+ {
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+- unsigned long flags;
+
+ dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
+
+- local_irq_save(flags);
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+- local_irq_restore(flags);
+ }
+
+ static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars)
+@@ -545,8 +542,8 @@ static int rz_dmac_terminate_all(struct
+ unsigned int i;
+ LIST_HEAD(head);
+
+- rz_dmac_disable_hw(channel);
+ spin_lock_irqsave(&channel->vc.lock, flags);
++ rz_dmac_disable_hw(channel);
+ for (i = 0; i < DMAC_NR_LMDESC; i++)
+ lmdesc[i].header = 0;
+
+@@ -674,7 +671,9 @@ static void rz_dmac_irq_handle_channel(s
+ if (chstat & CHSTAT_ER) {
+ dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n",
+ channel->index, chstat);
+- rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
++
++ scoped_guard(spinlock_irqsave, &channel->vc.lock)
++ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+ goto done;
+ }
+
--- /dev/null
+From abb863e6213dc41a58ef8bb3289b7e77460dabf3 Mon Sep 17 00:00:00 2001
+From: Claudiu Beznea <claudiu.beznea@tuxon.dev>
+Date: Mon, 16 Mar 2026 15:32:45 +0200
+Subject: dmaengine: sh: rz-dmac: Protect the driver specific lists
+
+From: Claudiu Beznea <claudiu.beznea@tuxon.dev>
+
+commit abb863e6213dc41a58ef8bb3289b7e77460dabf3 upstream.
+
+The driver lists (ld_free, ld_queue) are used in
+rz_dmac_free_chan_resources(), rz_dmac_terminate_all(),
+rz_dmac_issue_pending(), and rz_dmac_irq_handler_thread(), all under
+the virtual channel lock. Take the same lock in rz_dmac_prep_slave_sg()
+and rz_dmac_prep_dma_memcpy() as well to avoid concurrency issues, since
+these functions also check whether the lists are empty and update or
+remove list entries.
+
+Fixes: 5000d37042a6 ("dmaengine: sh: Add DMAC driver for RZ/G2L SoC")
+Cc: stable@vger.kernel.org
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://patch.msgid.link/20260316133252.240348-2-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/sh/rz-dmac.c | 63 ++++++++++++++++++++++++++---------------------
+ 1 file changed, 35 insertions(+), 28 deletions(-)
+
+--- a/drivers/dma/sh/rz-dmac.c
++++ b/drivers/dma/sh/rz-dmac.c
+@@ -10,6 +10,7 @@
+ */
+
+ #include <linux/bitfield.h>
++#include <linux/cleanup.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmaengine.h>
+ #include <linux/interrupt.h>
+@@ -424,6 +425,7 @@ static int rz_dmac_alloc_chan_resources(
+ if (!desc)
+ break;
+
++ /* No need to lock. This is called only for the 1st client. */
+ list_add_tail(&desc->node, &channel->ld_free);
+ channel->descs_allocated++;
+ }
+@@ -479,18 +481,21 @@ rz_dmac_prep_dma_memcpy(struct dma_chan
+ dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n",
+ __func__, channel->index, &src, &dest, len);
+
+- if (list_empty(&channel->ld_free))
+- return NULL;
++ scoped_guard(spinlock_irqsave, &channel->vc.lock) {
++ if (list_empty(&channel->ld_free))
++ return NULL;
++
++ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
++
++ desc->type = RZ_DMAC_DESC_MEMCPY;
++ desc->src = src;
++ desc->dest = dest;
++ desc->len = len;
++ desc->direction = DMA_MEM_TO_MEM;
+
+- desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+-
+- desc->type = RZ_DMAC_DESC_MEMCPY;
+- desc->src = src;
+- desc->dest = dest;
+- desc->len = len;
+- desc->direction = DMA_MEM_TO_MEM;
++ list_move_tail(channel->ld_free.next, &channel->ld_queue);
++ }
+
+- list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+ }
+
+@@ -506,27 +511,29 @@ rz_dmac_prep_slave_sg(struct dma_chan *c
+ int dma_length = 0;
+ int i = 0;
+
+- if (list_empty(&channel->ld_free))
+- return NULL;
+-
+- desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
++ scoped_guard(spinlock_irqsave, &channel->vc.lock) {
++ if (list_empty(&channel->ld_free))
++ return NULL;
++
++ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
++
++ for_each_sg(sgl, sg, sg_len, i)
++ dma_length += sg_dma_len(sg);
++
++ desc->type = RZ_DMAC_DESC_SLAVE_SG;
++ desc->sg = sgl;
++ desc->sgcount = sg_len;
++ desc->len = dma_length;
++ desc->direction = direction;
++
++ if (direction == DMA_DEV_TO_MEM)
++ desc->src = channel->src_per_address;
++ else
++ desc->dest = channel->dst_per_address;
+
+- for_each_sg(sgl, sg, sg_len, i) {
+- dma_length += sg_dma_len(sg);
++ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ }
+
+- desc->type = RZ_DMAC_DESC_SLAVE_SG;
+- desc->sg = sgl;
+- desc->sgcount = sg_len;
+- desc->len = dma_length;
+- desc->direction = direction;
+-
+- if (direction == DMA_DEV_TO_MEM)
+- desc->src = channel->src_per_address;
+- else
+- desc->dest = channel->dst_per_address;
+-
+- list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+ }
+
--- /dev/null
+From cfe02147e86307a17057ee4e3604f5f5919571d2 Mon Sep 17 00:00:00 2001
+From: Jassi Brar <jassisinghbrar@gmail.com>
+Date: Sun, 22 Mar 2026 12:15:33 -0500
+Subject: irqchip/qcom-mpm: Add missing mailbox TX done acknowledgment
+
+From: Jassi Brar <jassisinghbrar@gmail.com>
+
+commit cfe02147e86307a17057ee4e3604f5f5919571d2 upstream.
+
+The mbox_client for qcom-mpm sends NULL doorbell messages via
+mbox_send_message() but never signals TX completion.
+
+Set knows_txdone=true and call mbox_client_txdone() after a successful
+send, matching the pattern used by other Qualcomm mailbox clients (smp2p,
+smsm, qcom_aoss etc).
+
+Fixes: a6199bb514d8a6 "irqchip: Add Qualcomm MPM controller driver"
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260322171533.608436-1-jassisinghbrar@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-qcom-mpm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/irqchip/irq-qcom-mpm.c
++++ b/drivers/irqchip/irq-qcom-mpm.c
+@@ -305,6 +305,8 @@ static int mpm_pd_power_off(struct gener
+ if (ret < 0)
+ return ret;
+
++ mbox_client_txdone(priv->mbox_chan, 0);
++
+ return 0;
+ }
+
+@@ -414,6 +416,7 @@ static int qcom_mpm_init(struct device_n
+ }
+
+ priv->mbox_client.dev = dev;
++ priv->mbox_client.knows_txdone = true;
+ priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0);
+ if (IS_ERR(priv->mbox_chan)) {
+ ret = PTR_ERR(priv->mbox_chan);
--- /dev/null
+From bac3190a8e79beff6ed221975e0c9b1b5f2a21da Mon Sep 17 00:00:00 2001
+From: Milos Nikic <nikic.milos@gmail.com>
+Date: Tue, 10 Mar 2026 21:15:48 -0700
+Subject: jbd2: gracefully abort on checkpointing state corruptions
+
+From: Milos Nikic <nikic.milos@gmail.com>
+
+commit bac3190a8e79beff6ed221975e0c9b1b5f2a21da upstream.
+
+This patch targets two internal state machine invariants in checkpoint.c
+residing inside functions that natively return integer error codes.
+
+- In jbd2_cleanup_journal_tail(): A blocknr of 0 indicates a severely
+corrupted journal superblock. Replaced the J_ASSERT with a WARN_ON_ONCE
+and a graceful journal abort, returning -EFSCORRUPTED.
+
+- In jbd2_log_do_checkpoint(): Replaced the J_ASSERT_BH checking for
+an unexpected buffer_jwrite state. If the warning triggers, we
+explicitly drop the just-taken get_bh() reference and call __flush_batch()
+to safely clean up any previously queued buffers in the j_chkpt_bhs array,
+preventing a memory leak before returning -EFSCORRUPTED.
+
+Signed-off-by: Milos Nikic <nikic.milos@gmail.com>
+Reviewed-by: Andreas Dilger <adilger@dilger.ca>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun@linux.alibaba.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20260311041548.159424-1-nikic.milos@gmail.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jbd2/checkpoint.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -267,7 +267,15 @@ restart:
+ */
+ BUFFER_TRACE(bh, "queue");
+ get_bh(bh);
+- J_ASSERT_BH(bh, !buffer_jwrite(bh));
++ if (WARN_ON_ONCE(buffer_jwrite(bh))) {
++ put_bh(bh); /* drop the ref we just took */
++ spin_unlock(&journal->j_list_lock);
++ /* Clean up any previously batched buffers */
++ if (batch_count)
++ __flush_batch(journal, &batch_count);
++ jbd2_journal_abort(journal, -EFSCORRUPTED);
++ return -EFSCORRUPTED;
++ }
+ journal->j_chkpt_bhs[batch_count++] = bh;
+ transaction->t_chp_stats.cs_written++;
+ transaction->t_checkpoint_list = jh->b_cpnext;
+@@ -325,7 +333,10 @@ int jbd2_cleanup_journal_tail(journal_t
+
+ if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
+ return 1;
+- J_ASSERT(blocknr != 0);
++ if (WARN_ON_ONCE(blocknr == 0)) {
++ jbd2_journal_abort(journal, -EFSCORRUPTED);
++ return -EFSCORRUPTED;
++ }
+
+ /*
+ * We need to make sure that any blocks that were recently written out
--- /dev/null
+From aad885e774966e97b675dfe928da164214a71605 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Thu, 5 Mar 2026 17:28:04 -0800
+Subject: KVM: x86/mmu: Drop/zap existing present SPTE even when creating an MMIO SPTE
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit aad885e774966e97b675dfe928da164214a71605 upstream.
+
+When installing an emulated MMIO SPTE, do so *after* dropping/zapping the
+existing SPTE (if it's shadow-present). While commit a54aa15c6bda3 was
+right about it being impossible to convert a shadow-present SPTE to an
+MMIO SPTE due to a _guest_ write, it failed to account for writes to guest
+memory that are outside the scope of KVM.
+
+E.g. if host userspace modifies a shadowed gPTE to switch from a memslot
+to emulted MMIO and then the guest hits a relevant page fault, KVM will
+install the MMIO SPTE without first zapping the shadow-present SPTE.
+
+ ------------[ cut here ]------------
+ is_shadow_present_pte(*sptep)
+ WARNING: arch/x86/kvm/mmu/mmu.c:484 at mark_mmio_spte+0xb2/0xc0 [kvm], CPU#0: vmx_ept_stale_r/4292
+ Modules linked in: kvm_intel kvm irqbypass
+ CPU: 0 UID: 1000 PID: 4292 Comm: vmx_ept_stale_r Not tainted 7.0.0-rc2-eafebd2d2ab0-sink-vm #319 PREEMPT
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+ RIP: 0010:mark_mmio_spte+0xb2/0xc0 [kvm]
+ Call Trace:
+ <TASK>
+ mmu_set_spte+0x237/0x440 [kvm]
+ ept_page_fault+0x535/0x7f0 [kvm]
+ kvm_mmu_do_page_fault+0xee/0x1f0 [kvm]
+ kvm_mmu_page_fault+0x8d/0x620 [kvm]
+ vmx_handle_exit+0x18c/0x5a0 [kvm_intel]
+ kvm_arch_vcpu_ioctl_run+0xc55/0x1c20 [kvm]
+ kvm_vcpu_ioctl+0x2d5/0x980 [kvm]
+ __x64_sys_ioctl+0x8a/0xd0
+ do_syscall_64+0xb5/0x730
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+ RIP: 0033:0x47fa3f
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Reported-by: Alexander Bulekov <bkov@amazon.com>
+Debugged-by: Alexander Bulekov <bkov@amazon.com>
+Suggested-by: Fred Griffoul <fgriffo@amazon.co.uk>
+Fixes: a54aa15c6bda3 ("KVM: x86/mmu: Handle MMIO SPTEs directly in mmu_set_spte()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -2914,12 +2914,6 @@ static int mmu_set_spte(struct kvm_vcpu
+ bool prefetch = !fault || fault->prefetch;
+ bool write_fault = fault && fault->write;
+
+- if (unlikely(is_noslot_pfn(pfn))) {
+- vcpu->stat.pf_mmio_spte_created++;
+- mark_mmio_spte(vcpu, sptep, gfn, pte_access);
+- return RET_PF_EMULATE;
+- }
+-
+ if (is_shadow_present_pte(*sptep)) {
+ /*
+ * If we overwrite a PTE page pointer with a 2MB PMD, unlink
+@@ -2939,6 +2933,14 @@ static int mmu_set_spte(struct kvm_vcpu
+ was_rmapped = 1;
+ }
+
++ if (unlikely(is_noslot_pfn(pfn))) {
++ vcpu->stat.pf_mmio_spte_created++;
++ mark_mmio_spte(vcpu, sptep, gfn, pte_access);
++ if (flush)
++ kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
++ return RET_PF_EMULATE;
++ }
++
+ wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
+ true, host_writable, &spte);
+
--- /dev/null
+From 95db0c9f526d583634cddb2e5914718570fbac87 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Thu, 26 Mar 2026 14:29:09 +0800
+Subject: LoongArch: Workaround LS2K/LS7A GPU DMA hang bug
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 95db0c9f526d583634cddb2e5914718570fbac87 upstream.
+
+1. Hardware limitation: GPU, DC and VPU are typically PCI device 06.0,
+06.1 and 06.2. They share some hardware resources, so when configure the
+PCI 06.0 device BAR1, DMA memory access cannot be performed through this
+BAR, otherwise it will cause hardware abnormalities.
+
+2. In typical scenarios of reboot or S3/S4, DC access to memory through
+BAR is not prohibited, resulting in GPU DMA hangs.
+
+3. Workaround method: When configuring the 06.0 device BAR1, turn off
+the memory access of DC, GPU and VPU (via DC's CRTC registers).
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Qianhai Wu <wuqianhai@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/pci/pci.c | 80 +++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 80 insertions(+)
+
+--- a/arch/loongarch/pci/pci.c
++++ b/arch/loongarch/pci/pci.c
+@@ -6,9 +6,11 @@
+ #include <linux/export.h>
+ #include <linux/init.h>
+ #include <linux/acpi.h>
++#include <linux/delay.h>
+ #include <linux/types.h>
+ #include <linux/pci.h>
+ #include <linux/vgaarb.h>
++#include <linux/io-64-nonatomic-lo-hi.h>
+ #include <asm/cacheflush.h>
+ #include <asm/loongson.h>
+
+@@ -16,6 +18,9 @@
+ #define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
+ #define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36
+ #define PCI_DEVICE_ID_LOONGSON_DC3 0x7a46
++#define PCI_DEVICE_ID_LOONGSON_GPU1 0x7a15
++#define PCI_DEVICE_ID_LOONGSON_GPU2 0x7a25
++#define PCI_DEVICE_ID_LOONGSON_GPU3 0x7a35
+
+ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val)
+@@ -100,3 +105,78 @@ static void pci_fixup_vgadev(struct pci_
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC3, pci_fixup_vgadev);
++
++#define CRTC_NUM_MAX 2
++#define CRTC_OUTPUT_ENABLE 0x100
++
++static void loongson_gpu_fixup_dma_hang(struct pci_dev *pdev, bool on)
++{
++ u32 i, val, count, crtc_offset, device;
++ void __iomem *crtc_reg, *base, *regbase;
++ static u32 crtc_status[CRTC_NUM_MAX] = { 0 };
++
++ base = pdev->bus->ops->map_bus(pdev->bus, pdev->devfn + 1, 0);
++ device = readw(base + PCI_DEVICE_ID);
++
++ regbase = ioremap(readq(base + PCI_BASE_ADDRESS_0) & ~0xffull, SZ_64K);
++ if (!regbase) {
++ pci_err(pdev, "Failed to ioremap()\n");
++ return;
++ }
++
++ switch (device) {
++ case PCI_DEVICE_ID_LOONGSON_DC2:
++ crtc_reg = regbase + 0x1240;
++ crtc_offset = 0x10;
++ break;
++ case PCI_DEVICE_ID_LOONGSON_DC3:
++ crtc_reg = regbase;
++ crtc_offset = 0x400;
++ break;
++ }
++
++ for (i = 0; i < CRTC_NUM_MAX; i++, crtc_reg += crtc_offset) {
++ val = readl(crtc_reg);
++
++ if (!on)
++ crtc_status[i] = val;
++
++ /* No need to fixup if the status is off at startup. */
++ if (!(crtc_status[i] & CRTC_OUTPUT_ENABLE))
++ continue;
++
++ if (on)
++ val |= CRTC_OUTPUT_ENABLE;
++ else
++ val &= ~CRTC_OUTPUT_ENABLE;
++
++ mb();
++ writel(val, crtc_reg);
++
++ for (count = 0; count < 40; count++) {
++ val = readl(crtc_reg) & CRTC_OUTPUT_ENABLE;
++ if ((on && val) || (!on && !val))
++ break;
++ udelay(1000);
++ }
++
++ pci_info(pdev, "DMA hang fixup at reg[0x%lx]: 0x%x\n",
++ (unsigned long)crtc_reg & 0xffff, readl(crtc_reg));
++ }
++
++ iounmap(regbase);
++}
++
++static void pci_fixup_dma_hang_early(struct pci_dev *pdev)
++{
++ loongson_gpu_fixup_dma_hang(pdev, false);
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_GPU2, pci_fixup_dma_hang_early);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_GPU3, pci_fixup_dma_hang_early);
++
++static void pci_fixup_dma_hang_final(struct pci_dev *pdev)
++{
++ loongson_gpu_fixup_dma_hang(pdev, true);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_GPU2, pci_fixup_dma_hang_final);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_GPU3, pci_fixup_dma_hang_final);
--- /dev/null
+From 647b8a2fe474474704110db6bd07f7a139e621eb Mon Sep 17 00:00:00 2001
+From: Kevin Hao <haokexin@gmail.com>
+Date: Sat, 21 Mar 2026 22:04:41 +0800
+Subject: net: macb: Use dev_consume_skb_any() to free TX SKBs
+
+From: Kevin Hao <haokexin@gmail.com>
+
+commit 647b8a2fe474474704110db6bd07f7a139e621eb upstream.
+
+The napi_consume_skb() function is not intended to be called in an IRQ
+disabled context. However, after commit 6bc8a5098bf4 ("net: macb: Fix
+tx_ptr_lock locking"), the freeing of TX SKBs is performed with IRQs
+disabled. To resolve the following call trace, use dev_consume_skb_any()
+for freeing TX SKBs:
+ WARNING: kernel/softirq.c:430 at __local_bh_enable_ip+0x174/0x188, CPU#0: ksoftirqd/0/15
+ Modules linked in:
+ CPU: 0 UID: 0 PID: 15 Comm: ksoftirqd/0 Not tainted 7.0.0-rc4-next-20260319-yocto-standard-dirty #37 PREEMPT
+ Hardware name: ZynqMP ZCU102 Rev1.1 (DT)
+ pstate: 200000c5 (nzCv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : __local_bh_enable_ip+0x174/0x188
+ lr : local_bh_enable+0x24/0x38
+ sp : ffff800082b3bb10
+ x29: ffff800082b3bb10 x28: ffff0008031f3c00 x27: 000000000011ede0
+ x26: ffff000800a7ff00 x25: ffff800083937ce8 x24: 0000000000017a80
+ x23: ffff000803243a78 x22: 0000000000000040 x21: 0000000000000000
+ x20: ffff000800394c80 x19: 0000000000000200 x18: 0000000000000001
+ x17: 0000000000000001 x16: ffff000803240000 x15: 0000000000000000
+ x14: ffffffffffffffff x13: 0000000000000028 x12: ffff000800395650
+ x11: ffff8000821d1528 x10: ffff800081c2bc08 x9 : ffff800081c1e258
+ x8 : 0000000100000301 x7 : ffff8000810426ec x6 : 0000000000000000
+ x5 : 0000000000000001 x4 : 0000000000000001 x3 : 0000000000000000
+ x2 : 0000000000000008 x1 : 0000000000000200 x0 : ffff8000810428dc
+ Call trace:
+ __local_bh_enable_ip+0x174/0x188 (P)
+ local_bh_enable+0x24/0x38
+ skb_attempt_defer_free+0x190/0x1d8
+ napi_consume_skb+0x58/0x108
+ macb_tx_poll+0x1a4/0x558
+ __napi_poll+0x50/0x198
+ net_rx_action+0x1f4/0x3d8
+ handle_softirqs+0x16c/0x560
+ run_ksoftirqd+0x44/0x80
+ smpboot_thread_fn+0x1d8/0x338
+ kthread+0x120/0x150
+ ret_from_fork+0x10/0x20
+ irq event stamp: 29751
+ hardirqs last enabled at (29750): [<ffff8000813be184>] _raw_spin_unlock_irqrestore+0x44/0x88
+ hardirqs last disabled at (29751): [<ffff8000813bdf60>] _raw_spin_lock_irqsave+0x38/0x98
+ softirqs last enabled at (29150): [<ffff8000800f1aec>] handle_softirqs+0x504/0x560
+ softirqs last disabled at (29153): [<ffff8000800f2fec>] run_ksoftirqd+0x44/0x80
+
+Fixes: 6bc8a5098bf4 ("net: macb: Fix tx_ptr_lock locking")
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260321-macb-tx-v1-1-b383a58dd4e6@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1129,7 +1129,7 @@ static void macb_tx_unmap(struct macb *b
+ }
+
+ if (tx_skb->skb) {
+- napi_consume_skb(tx_skb->skb, budget);
++ dev_consume_skb_any(tx_skb->skb);
+ tx_skb->skb = NULL;
+ }
+ }
--- /dev/null
+From 61d099ac4a7a8fb11ebdb6e2ec8d77f38e77362f Mon Sep 17 00:00:00 2001
+From: Tyllis Xu <livelycarpet87@gmail.com>
+Date: Sat, 14 Mar 2026 12:01:50 -0500
+Subject: scsi: ibmvfc: Fix OOB access in ibmvfc_discover_targets_done()
+
+From: Tyllis Xu <livelycarpet87@gmail.com>
+
+commit 61d099ac4a7a8fb11ebdb6e2ec8d77f38e77362f upstream.
+
+A malicious or compromised VIO server can return a num_written value in the
+discover targets MAD response that exceeds max_targets. This value is
+stored directly in vhost->num_targets without validation, and is then used
+as the loop bound in ibmvfc_alloc_targets() to index into disc_buf[], which
+is only allocated for max_targets entries. Indices at or beyond max_targets
+access kernel memory outside the DMA-coherent allocation. The
+out-of-bounds data is subsequently embedded in Implicit Logout and PLOGI
+MADs that are sent back to the VIO server, leaking kernel memory.
+
+Fix by clamping num_written to max_targets before storing it.
+
+Fixes: 072b91f9c651 ("[SCSI] ibmvfc: IBM Power Virtual Fibre Channel Adapter Client Driver")
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Tyllis Xu <LivelyCarpet87@gmail.com>
+Reviewed-by: Dave Marquardt <davemarq@linux.ibm.com>
+Acked-by: Tyrel Datwyler <tyreld@linux.ibm.com>
+Link: https://patch.msgid.link/20260314170151.548614-1-LivelyCarpet87@gmail.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/ibmvscsi/ibmvfc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -4928,7 +4928,8 @@ static void ibmvfc_discover_targets_done
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
+- vhost->num_targets = be32_to_cpu(rsp->num_written);
++ vhost->num_targets = min_t(u32, be32_to_cpu(rsp->num_written),
++ max_targets);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
+ break;
+ case IBMVFC_MAD_FAILED:
--- /dev/null
+From 7a9f448d44127217fabc4065c5ba070d4e0b5d37 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Mon, 23 Feb 2026 16:44:59 +0100
+Subject: scsi: ses: Handle positive SCSI error from ses_recv_diag()
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit 7a9f448d44127217fabc4065c5ba070d4e0b5d37 upstream.
+
+ses_recv_diag() can return a positive value, which also means that an
+error happened, so do not only test for negative values.
+
+Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
+Cc: Martin K. Petersen <martin.petersen@oracle.com>
+Cc: stable <stable@kernel.org>
+Assisted-by: gkh_clanker_2000
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Link: https://patch.msgid.link/2026022301-bony-overstock-a07f@gregkh
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/ses.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -190,7 +190,7 @@ static unsigned char *ses_get_page2_desc
+ unsigned char *type_ptr = ses_dev->page1_types;
+ unsigned char *desc_ptr = ses_dev->page2 + 8;
+
+- if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len) < 0)
++ if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len))
+ return NULL;
+
+ for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
virtio_net-fix-uaf-on-dst_ops-when-iff_xmit_dst_release-is-cleared-and-napi_tx-is-false.patch
s390-entry-scrub-r12-register-on-kernel-entry.patch
erofs-add-gfp_noio-in-the-bio-completion-if-needed.patch
+alarmtimer-fix-argument-order-in-alarm_timer_forward.patch
+scsi-ibmvfc-fix-oob-access-in-ibmvfc_discover_targets_done.patch
+scsi-ses-handle-positive-scsi-error-from-ses_recv_diag.patch
+net-macb-use-dev_consume_skb_any-to-free-tx-skbs.patch
+kvm-x86-mmu-drop-zap-existing-present-spte-even-when-creating-an-mmio-spte.patch
+jbd2-gracefully-abort-on-checkpointing-state-corruptions.patch
+irqchip-qcom-mpm-add-missing-mailbox-tx-done-acknowledgment.patch
+dmaengine-sh-rz-dmac-protect-the-driver-specific-lists.patch
+dmaengine-sh-rz-dmac-move-chctrl-updates-under-spinlock.patch
+loongarch-workaround-ls2k-ls7a-gpu-dma-hang-bug.patch
+xfs-stop-reclaim-before-pushing-ail-during-unmount.patch
+xfs-fix-ri_total-validation-in-xlog_recover_attri_commit_pass2.patch
--- /dev/null
+From d72f2084e30966097c8eae762e31986a33c3c0ae Mon Sep 17 00:00:00 2001
+From: Long Li <leo.lilong@huawei.com>
+Date: Fri, 20 Mar 2026 10:11:29 +0800
+Subject: xfs: fix ri_total validation in xlog_recover_attri_commit_pass2
+
+From: Long Li <leo.lilong@huawei.com>
+
+commit d72f2084e30966097c8eae762e31986a33c3c0ae upstream.
+
+The ri_total checks for SET/REPLACE operations are hardcoded to 3,
+but xfs_attri_item_size() only emits a value iovec when value_len > 0,
+so ri_total is 2 when value_len == 0.
+
+For PPTR_SET/PPTR_REMOVE/PPTR_REPLACE, value_len is validated by
+xfs_attri_validate() to be exactly sizeof(struct xfs_parent_rec) and
+is never zero, so their hardcoded checks remain correct.
+
+This problem may cause log recovery failures. The following script can be
+used to reproduce the problem:
+
+ #!/bin/bash
+ mkfs.xfs -f /dev/sda
+ mount /dev/sda /mnt/test/
+ touch /mnt/test/file
+ for i in {1..200}; do
+ attr -s "user.attr_$i" -V "value_$i" /mnt/test/file > /dev/null
+ done
+ echo 1 > /sys/fs/xfs/debug/larp
+ echo 1 > /sys/fs/xfs/sda/errortag/larp
+ attr -s "user.zero" -V "" /mnt/test/file
+ echo 0 > /sys/fs/xfs/sda/errortag/larp
+ umount /mnt/test
+ mount /dev/sda /mnt/test/ # mount failed
+
+Fix this by deriving the expected count dynamically as "2 + !!value_len"
+for SET/REPLACE operations.
+
+Cc: stable@vger.kernel.org # v6.9
+Fixes: ad206ae50eca ("xfs: check opcode and iovec count match in xlog_recover_attri_commit_pass2")
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Long Li <leo.lilong@huawei.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_attr_item.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/xfs/xfs_attr_item.c
++++ b/fs/xfs/xfs_attr_item.c
+@@ -746,8 +746,8 @@ xlog_recover_attri_commit_pass2(
+ switch (op) {
+ case XFS_ATTRI_OP_FLAGS_SET:
+ case XFS_ATTRI_OP_FLAGS_REPLACE:
+- /* Log item, attr name, attr value */
+- if (item->ri_total != 3) {
++ /* Log item, attr name, optional attr value */
++ if (item->ri_total != 2 + !!attri_formatp->alfi_value_len) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ attri_formatp, len);
+ return -EFSCORRUPTED;
--- /dev/null
+From 4f24a767e3d64a5f58c595b5c29b6063a201f1e3 Mon Sep 17 00:00:00 2001
+From: Yuto Ohnuki <ytohnuki@amazon.com>
+Date: Tue, 10 Mar 2026 18:38:37 +0000
+Subject: xfs: stop reclaim before pushing AIL during unmount
+
+From: Yuto Ohnuki <ytohnuki@amazon.com>
+
+commit 4f24a767e3d64a5f58c595b5c29b6063a201f1e3 upstream.
+
+The unmount sequence in xfs_unmount_flush_inodes() pushed the AIL while
+background reclaim and inodegc are still running. This is broken
+independently of any use-after-free issues - background reclaim and
+inodegc should not be running while the AIL is being pushed during
+unmount, as inodegc can dirty and insert inodes into the AIL during the
+flush, and background reclaim can race to abort and free dirty inodes.
+
+Reorder xfs_unmount_flush_inodes() to stop inodegc and cancel background
+reclaim before pushing the AIL. Stop inodegc before cancelling
+m_reclaim_work because the inodegc worker can re-queue m_reclaim_work
+via xfs_inodegc_set_reclaimable.
+
+Reported-by: syzbot+652af2b3c5569c4ab63c@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=652af2b3c5569c4ab63c
+Fixes: 90c60e164012 ("xfs: xfs_iflush() is no longer necessary")
+Cc: stable@vger.kernel.org # v5.9
+Signed-off-by: Yuto Ohnuki <ytohnuki@amazon.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_mount.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/xfs/xfs_mount.c
++++ b/fs/xfs/xfs_mount.c
+@@ -576,8 +576,9 @@ xfs_unmount_check(
+ * have been retrying in the background. This will prevent never-ending
+ * retries in AIL pushing from hanging the unmount.
+ *
+- * Finally, we can push the AIL to clean all the remaining dirty objects, then
+- * reclaim the remaining inodes that are still in memory at this point in time.
++ * Stop inodegc and background reclaim before pushing the AIL so that they
++ * are not running while the AIL is being flushed. Then push the AIL to
++ * clean all the remaining dirty objects and reclaim the remaining inodes.
+ */
+ static void
+ xfs_unmount_flush_inodes(
+@@ -589,9 +590,9 @@ xfs_unmount_flush_inodes(
+
+ set_bit(XFS_OPSTATE_UNMOUNTING, &mp->m_opstate);
+
+- xfs_ail_push_all_sync(mp->m_ail);
+ xfs_inodegc_stop(mp);
+ cancel_delayed_work_sync(&mp->m_reclaim_work);
++ xfs_ail_push_all_sync(mp->m_ail);
+ xfs_reclaim_inodes(mp);
+ xfs_health_unmount(mp);
+ }