--- /dev/null
+From stable+bounces-219694-greg=kroah.com@vger.kernel.org Wed Feb 25 20:40:23 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 14:40:10 -0500
+Subject: ata: libata: remove pointless VPRINTK() calls
+To: stable@vger.kernel.org
+Cc: Hannes Reinecke <hare@suse.de>, Damien Le Moal <damien.lemoal@opensource.wdc.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260225194011.1015550-1-sashal@kernel.org>
+
+From: Hannes Reinecke <hare@suse.de>
+
+[ Upstream commit e1553351d747cbcd62db01d579dff916edcc782c ]
+
+Most of the information is already covered by tracepoints
+(if not downright pointless), so remove the VPRINTK() calls.
+And while we're at it, remove ata_scsi_dump_cdb(), too,
+as this information can be retrieved from scsi tracing.
+
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Stable-dep-of: bb3a8154b1a1 ("ata: libata-scsi: refactor ata_scsi_translate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-core.c | 3 ---
+ drivers/ata/libata-sata.c | 2 --
+ drivers/ata/libata-scsi.c | 42 ------------------------------------------
+ drivers/ata/libata-sff.c | 4 ----
+ drivers/ata/libata.h | 1 -
+ 5 files changed, 52 deletions(-)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4461,8 +4461,6 @@ static void ata_sg_clean(struct ata_queu
+
+ WARN_ON_ONCE(sg == NULL);
+
+- VPRINTK("unmapping %u sg elements\n", qc->n_elem);
+-
+ if (qc->n_elem)
+ dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
+
+@@ -4494,7 +4492,6 @@ static int ata_sg_setup(struct ata_queue
+ if (n_elem < 1)
+ return -1;
+
+- VPRINTK("%d sg elements mapped\n", n_elem);
+ qc->orig_n_elem = qc->n_elem;
+ qc->n_elem = n_elem;
+ qc->flags |= ATA_QCFLAG_DMAMAP;
+--- a/drivers/ata/libata-sata.c
++++ b/drivers/ata/libata-sata.c
+@@ -1258,8 +1258,6 @@ int ata_sas_queuecmd(struct scsi_cmnd *c
+ {
+ int rc = 0;
+
+- ata_scsi_dump_cdb(ap, cmd);
+-
+ if (likely(ata_dev_enabled(ap->link.device)))
+ rc = __ata_scsi_queuecmd(cmd, ap->link.device);
+ else {
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1296,8 +1296,6 @@ static void scsi_6_lba_len(const u8 *cdb
+ u64 lba = 0;
+ u32 len;
+
+- VPRINTK("six-byte command\n");
+-
+ lba |= ((u64)(cdb[1] & 0x1f)) << 16;
+ lba |= ((u64)cdb[2]) << 8;
+ lba |= ((u64)cdb[3]);
+@@ -1323,8 +1321,6 @@ static void scsi_10_lba_len(const u8 *cd
+ u64 lba = 0;
+ u32 len = 0;
+
+- VPRINTK("ten-byte command\n");
+-
+ lba |= ((u64)cdb[2]) << 24;
+ lba |= ((u64)cdb[3]) << 16;
+ lba |= ((u64)cdb[4]) << 8;
+@@ -1352,8 +1348,6 @@ static void scsi_16_lba_len(const u8 *cd
+ u64 lba = 0;
+ u32 len = 0;
+
+- VPRINTK("sixteen-byte command\n");
+-
+ lba |= ((u64)cdb[2]) << 56;
+ lba |= ((u64)cdb[3]) << 48;
+ lba |= ((u64)cdb[4]) << 40;
+@@ -1707,8 +1701,6 @@ static int ata_scsi_translate(struct ata
+ struct ata_queued_cmd *qc;
+ int rc;
+
+- VPRINTK("ENTER\n");
+-
+ qc = ata_scsi_qc_new(dev, cmd);
+ if (!qc)
+ goto err_mem;
+@@ -1739,7 +1731,6 @@ static int ata_scsi_translate(struct ata
+ /* select device, send command to hardware */
+ ata_qc_issue(qc);
+
+- VPRINTK("EXIT\n");
+ return 0;
+
+ early_finish:
+@@ -1897,8 +1888,6 @@ static unsigned int ata_scsiop_inq_std(s
+ 2
+ };
+
+- VPRINTK("ENTER\n");
+-
+ /* set scsi removable (RMB) bit per ata bit, or if the
+ * AHCI port says it's external (Hotplug-capable, eSATA).
+ */
+@@ -2309,8 +2298,6 @@ static unsigned int ata_scsiop_mode_sens
+ u8 dpofua, bp = 0xff;
+ u16 fp;
+
+- VPRINTK("ENTER\n");
+-
+ six_byte = (scsicmd[0] == MODE_SENSE);
+ ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
+ /*
+@@ -2428,8 +2415,6 @@ static unsigned int ata_scsiop_read_cap(
+ log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
+ lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
+
+- VPRINTK("ENTER\n");
+-
+ if (args->cmd->cmnd[0] == READ_CAPACITY) {
+ if (last_lba >= 0xffffffffULL)
+ last_lba = 0xffffffff;
+@@ -2496,7 +2481,6 @@ static unsigned int ata_scsiop_read_cap(
+ */
+ static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
+ {
+- VPRINTK("ENTER\n");
+ rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
+
+ return 0;
+@@ -2596,8 +2580,6 @@ static void atapi_qc_complete(struct ata
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ unsigned int err_mask = qc->err_mask;
+
+- VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
+-
+ /* handle completion from new EH */
+ if (unlikely(qc->ap->ops->error_handler &&
+ (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
+@@ -3732,8 +3714,6 @@ static unsigned int ata_scsi_mode_select
+ u8 buffer[64];
+ const u8 *p = buffer;
+
+- VPRINTK("ENTER\n");
+-
+ six_byte = (cdb[0] == MODE_SELECT);
+ if (six_byte) {
+ if (scmd->cmd_len < 5) {
+@@ -4032,26 +4012,6 @@ static inline ata_xlat_func_t ata_get_xl
+ return NULL;
+ }
+
+-/**
+- * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
+- * @ap: ATA port to which the command was being sent
+- * @cmd: SCSI command to dump
+- *
+- * Prints the contents of a SCSI command via printk().
+- */
+-
+-void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd)
+-{
+-#ifdef ATA_VERBOSE_DEBUG
+- struct scsi_device *scsidev = cmd->device;
+-
+- VPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
+- ap->print_id,
+- scsidev->channel, scsidev->id, scsidev->lun,
+- cmd->cmnd);
+-#endif
+-}
+-
+ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
+ {
+ struct ata_port *ap = dev->link->ap;
+@@ -4139,8 +4099,6 @@ int ata_scsi_queuecmd(struct Scsi_Host *
+
+ spin_lock_irqsave(ap->lock, irq_flags);
+
+- ata_scsi_dump_cdb(ap, cmd);
+-
+ dev = ata_scsi_find_dev(ap, scsidev);
+ if (likely(dev))
+ rc = __ata_scsi_queuecmd(cmd, dev);
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -888,8 +888,6 @@ static void atapi_pio_bytes(struct ata_q
+ if (unlikely(!bytes))
+ goto atapi_check;
+
+- VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
+-
+ if (unlikely(__atapi_pio_bytes(qc, bytes)))
+ goto err_out;
+ ata_sff_sync(ap); /* flush */
+@@ -2614,7 +2612,6 @@ static void ata_bmdma_fill_sg(struct ata
+
+ prd[pi].addr = cpu_to_le32(addr);
+ prd[pi].flags_len = cpu_to_le32(len & 0xffff);
+- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+@@ -2674,7 +2671,6 @@ static void ata_bmdma_fill_sg_dumb(struc
+ prd[++pi].addr = cpu_to_le32(addr + 0x8000);
+ }
+ prd[pi].flags_len = cpu_to_le32(blen);
+- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -150,7 +150,6 @@ extern int ata_scsi_user_scan(struct Scs
+ unsigned int id, u64 lun);
+ void ata_scsi_sdev_config(struct scsi_device *sdev);
+ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
+-void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd);
+ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
+
+ /* libata-eh.c */
--- /dev/null
+From stable+bounces-219695-greg=kroah.com@vger.kernel.org Wed Feb 25 20:40:26 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 14:40:11 -0500
+Subject: ata: libata-scsi: refactor ata_scsi_translate()
+To: stable@vger.kernel.org
+Cc: Damien Le Moal <dlemoal@kernel.org>, Niklas Cassel <cassel@kernel.org>, "Martin K. Petersen" <martin.petersen@oracle.com>, John Garry <john.g.garry@oracle.com>, Igor Pylypiv <ipylypiv@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260225194011.1015550-2-sashal@kernel.org>
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit bb3a8154b1a1dc2c86d037482c0a2cf9186829ed ]
+
+Factor out of ata_scsi_translate() the code handling queued command
+deferral using the port qc_defer callback and issuing the queued
+command with ata_qc_issue() into the new function ata_scsi_qc_issue(),
+and simplify the goto used in ata_scsi_translate().
+While at it, also add a lockdep annotation to check that the port lock
+is held when ata_scsi_translate() is called.
+
+No functional changes.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Reviewed-by: John Garry <john.g.garry@oracle.com>
+Reviewed-by: Igor Pylypiv <ipylypiv@google.com>
+[ scsi_done(cmd) => cmd->scsi_done(cmd) + DPRINTK ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-scsi.c | 83 +++++++++++++++++++++++++++-------------------
+ 1 file changed, 50 insertions(+), 33 deletions(-)
+
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1668,6 +1668,42 @@ static void ata_scsi_qc_complete(struct
+ ata_qc_done(qc);
+ }
+
++static int ata_scsi_qc_issue(struct ata_port *ap, struct ata_queued_cmd *qc)
++{
++ int ret;
++
++ if (!ap->ops->qc_defer)
++ goto issue;
++
++ /* Check if the command needs to be deferred. */
++ ret = ap->ops->qc_defer(qc);
++ switch (ret) {
++ case 0:
++ break;
++ case ATA_DEFER_LINK:
++ ret = SCSI_MLQUEUE_DEVICE_BUSY;
++ break;
++ case ATA_DEFER_PORT:
++ ret = SCSI_MLQUEUE_HOST_BUSY;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ ret = SCSI_MLQUEUE_HOST_BUSY;
++ break;
++ }
++
++ if (ret) {
++ /* Force a requeue of the command to defer its execution. */
++ ata_qc_free(qc);
++ return ret;
++ }
++
++issue:
++ ata_qc_issue(qc);
++
++ return 0;
++}
++
+ /**
+ * ata_scsi_translate - Translate then issue SCSI command to ATA device
+ * @dev: ATA device to which the command is addressed
+@@ -1691,69 +1727,50 @@ static void ata_scsi_qc_complete(struct
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+- * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
+- * needs to be deferred.
++ * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY or SCSI_MLQUEUE_HOST_BUSY if the
++ * command needs to be deferred.
+ */
+ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+ ata_xlat_func_t xlat_func)
+ {
+ struct ata_port *ap = dev->link->ap;
+ struct ata_queued_cmd *qc;
+- int rc;
+
++ lockdep_assert_held(ap->lock);
++
++ /*
++ * ata_scsi_qc_new() calls scsi_done(cmd) in case of failure. So we
++ * have nothing further to do when allocating a qc fails.
++ */
+ qc = ata_scsi_qc_new(dev, cmd);
+ if (!qc)
+- goto err_mem;
++ return 0;
+
+ /* data is present; dma-map it */
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+ cmd->sc_data_direction == DMA_TO_DEVICE) {
+ if (unlikely(scsi_bufflen(cmd) < 1)) {
+ ata_dev_warn(dev, "WARNING: zero len r/w req\n");
+- goto err_did;
++ cmd->result = (DID_ERROR << 16);
++ goto done;
+ }
+
+ ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
+-
+ qc->dma_dir = cmd->sc_data_direction;
+ }
+
+ qc->complete_fn = ata_scsi_qc_complete;
+
+ if (xlat_func(qc))
+- goto early_finish;
+-
+- if (ap->ops->qc_defer) {
+- if ((rc = ap->ops->qc_defer(qc)))
+- goto defer;
+- }
++ goto done;
+
+- /* select device, send command to hardware */
+- ata_qc_issue(qc);
+-
+- return 0;
++ return ata_scsi_qc_issue(ap, qc);
+
+-early_finish:
++done:
+ ata_qc_free(qc);
+ cmd->scsi_done(cmd);
+ DPRINTK("EXIT - early finish (good or error)\n");
+ return 0;
+-
+-err_did:
+- ata_qc_free(qc);
+- cmd->result = (DID_ERROR << 16);
+- cmd->scsi_done(cmd);
+-err_mem:
+- DPRINTK("EXIT - internal\n");
+- return 0;
+-
+-defer:
+- ata_qc_free(qc);
+- DPRINTK("EXIT - defer\n");
+- if (rc == ATA_DEFER_LINK)
+- return SCSI_MLQUEUE_DEVICE_BUSY;
+- else
+- return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ struct ata_scsi_args {
--- /dev/null
+From stable+bounces-223678-greg=kroah.com@vger.kernel.org Mon Mar 9 15:56:00 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2026 10:53:21 -0400
+Subject: drm/amd/display: Use GFP_ATOMIC in dc_create_stream_for_sink
+To: stable@vger.kernel.org
+Cc: Natalie Vock <natalie.vock@gmx.de>, Alex Deucher <alexander.deucher@amd.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260309145321.1224813-1-sashal@kernel.org>
+
+From: Natalie Vock <natalie.vock@gmx.de>
+
+[ Upstream commit 28dfe4317541e57fe52f9a290394cd29c348228b ]
+
+This can be called while preemption is disabled, for example by
+dcn32_internal_validate_bw which is called with the FPU active.
+
+Fixes "BUG: scheduling while atomic" messages I encounter on my Navi31
+machine.
+
+Signed-off-by: Natalie Vock <natalie.vock@gmx.de>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit b42dae2ebc5c84a68de63ec4ffdfec49362d53f1)
+Cc: stable@vger.kernel.org
+[ Context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -167,7 +167,7 @@ struct dc_stream_state *dc_create_stream
+ if (sink == NULL)
+ return NULL;
+
+- stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
++ stream = kzalloc(sizeof(struct dc_stream_state), GFP_ATOMIC);
+ if (stream == NULL)
+ goto alloc_fail;
+
--- /dev/null
+From stable+bounces-219168-greg=kroah.com@vger.kernel.org Wed Feb 25 04:13:16 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 22:13:10 -0500
+Subject: ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before submitting I/O
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Baokun Li <libaokun1@huawei.com>, stable@kernel.org, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260225031310.3856508-1-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit feaf2a80e78f89ee8a3464126077ba8683b62791 ]
+
+When allocating blocks during within-EOF DIO and writeback with
+dioread_nolock enabled, EXT4_GET_BLOCKS_PRE_IO was set to split an
+existing large unwritten extent. However, EXT4_GET_BLOCKS_CONVERT was
+set when calling ext4_split_convert_extents(), which may potentially
+result in stale data issues.
+
+Assume we have an unwritten extent, and then DIO writes the second half.
+
+ [UUUUUUUUUUUUUUUU] on-disk extent U: unwritten extent
+ [UUUUUUUUUUUUUUUU] extent status tree
+ |<- ->| ----> dio write this range
+
+First, ext4_iomap_alloc() call ext4_map_blocks() with
+EXT4_GET_BLOCKS_PRE_IO, EXT4_GET_BLOCKS_UNWRIT_EXT and
+EXT4_GET_BLOCKS_CREATE flags set. ext4_map_blocks() find this extent and
+call ext4_split_convert_extents() with EXT4_GET_BLOCKS_CONVERT and the
+above flags set.
+
+Then, ext4_split_convert_extents() calls ext4_split_extent() with
+EXT4_EXT_MAY_ZEROOUT, EXT4_EXT_MARK_UNWRIT2 and EXT4_EXT_DATA_VALID2
+flags set, and it calls ext4_split_extent_at() to split the second half
+with EXT4_EXT_DATA_VALID2, EXT4_EXT_MARK_UNWRIT1, EXT4_EXT_MAY_ZEROOUT
+and EXT4_EXT_MARK_UNWRIT2 flags set. However, ext4_split_extent_at()
+failed to insert extent since a temporary lack -ENOSPC. It zeroes out
+the first half but convert the entire on-disk extent to written since
+the EXT4_EXT_DATA_VALID2 flag set, but left the second half as unwritten
+in the extent status tree.
+
+ [0000000000SSSSSS] data S: stale data, 0: zeroed
+ [WWWWWWWWWWWWWWWW] on-disk extent W: written extent
+ [WWWWWWWWWWUUUUUU] extent status tree
+
+Finally, if the DIO failed to write data to the disk, the stale data in
+the second half will be exposed once the cached extent entry is gone.
+
+Fix this issue by not passing EXT4_GET_BLOCKS_CONVERT when splitting
+an unwritten extent before submitting I/O, and make
+ext4_split_convert_extents() to zero out the entire extent range
+to zero for this case, and also mark the extent in the extent status
+tree for consistency.
+
+Fixes: b8a8684502a0 ("ext4: Introduce FALLOC_FL_ZERO_RANGE flag for fallocate")
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Message-ID: <20251129103247.686136-4-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+[ different function signatures ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3705,11 +3705,15 @@ static int ext4_split_convert_extents(ha
+ /* Convert to unwritten */
+ if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
+ split_flag |= EXT4_EXT_DATA_VALID1;
+- /* Convert to initialized */
+- } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
++ /* Split the existing unwritten extent */
++ } else if (flags & (EXT4_GET_BLOCKS_UNWRIT_EXT |
++ EXT4_GET_BLOCKS_CONVERT)) {
+ split_flag |= ee_block + ee_len <= eof_block ?
+ EXT4_EXT_MAY_ZEROOUT : 0;
+- split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
++ split_flag |= EXT4_EXT_MARK_UNWRIT2;
++ /* Convert to initialized */
++ if (flags & EXT4_GET_BLOCKS_CONVERT)
++ split_flag |= EXT4_EXT_DATA_VALID2;
+ }
+ flags |= EXT4_GET_BLOCKS_PRE_IO;
+ return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
+@@ -3874,7 +3878,7 @@ ext4_ext_handle_unwritten_extents(handle
+ /* get_block() before submitting IO, split the extent */
+ if (flags & EXT4_GET_BLOCKS_PRE_IO) {
+ ret = ext4_split_convert_extents(handle, inode, map, ppath,
+- flags | EXT4_GET_BLOCKS_CONVERT);
++ flags);
+ if (ret < 0) {
+ err = ret;
+ goto out2;
--- /dev/null
+From stable+bounces-219634-greg=kroah.com@vger.kernel.org Wed Feb 25 15:38:00 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:33:08 -0500
+Subject: ext4: drop extent cache when splitting extent fails
+To: stable@vger.kernel.org
+Cc: Zhang Yi <yi.zhang@huawei.com>, Baokun Li <libaokun1@huawei.com>, stable@kernel.org, Ojaswin Mujoo <ojaswin@linux.ibm.com>, Theodore Ts'o <tytso@mit.edu>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260225143308.469233-1-sashal@kernel.org>
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 79b592e8f1b435796cbc2722190368e3e8ffd7a1 ]
+
+When the split extent fails, we might leave some extents still being
+processed and return an error directly, which will result in stale
+extent entries remaining in the extent status tree. So drop all of the
+remaining potentially stale extents if the splitting fails.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Message-ID: <20251129103247.686136-8-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+[ bring error handling pattern closer to upstream ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3231,7 +3231,9 @@ static int ext4_split_extent_at(handle_t
+ ext4_ext_mark_unwritten(ex2);
+
+ err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
+- if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
++ if (err && err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
++ goto out_err;
++ if (!err)
+ goto out;
+
+ /*
+@@ -3247,7 +3249,8 @@ static int ext4_split_extent_at(handle_t
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- return PTR_ERR(path);
++ err = PTR_ERR(path);
++ goto out_err;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+@@ -3303,6 +3306,9 @@ fix_extent_len:
+ */
+ ext4_ext_dirty(handle, inode, path + path->p_depth);
+ return err;
++out_err:
++ /* Remove all remaining potentially stale extents. */
++ ext4_es_remove_extent(inode, ee_block, ee_len);
+ out:
+ ext4_ext_show_leaf(inode, *ppath);
+ return err;
--- /dev/null
+From stable+bounces-219648-greg=kroah.com@vger.kernel.org Wed Feb 25 16:19:27 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 10:10:28 -0500
+Subject: ext4: fix dirtyclusters double decrement on fs shutdown
+To: stable@vger.kernel.org
+Cc: Brian Foster <bfoster@redhat.com>, Baokun Li <libaokun1@huawei.com>, Theodore Ts'o <tytso@mit.edu>, stable@kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260225151028.565227-1-sashal@kernel.org>
+
+From: Brian Foster <bfoster@redhat.com>
+
+[ Upstream commit 94a8cea54cd935c54fa2fba70354757c0fc245e3 ]
+
+fstests test generic/388 occasionally reproduces a warning in
+ext4_put_super() associated with the dirty clusters count:
+
+ WARNING: CPU: 7 PID: 76064 at fs/ext4/super.c:1324 ext4_put_super+0x48c/0x590 [ext4]
+
+Tracing the failure shows that the warning fires due to an
+s_dirtyclusters_counter value of -1. IOW, this appears to be a
+spurious decrement as opposed to some sort of leak. Further tracing
+of the dirty cluster count deltas and an LLM scan of the resulting
+output identified the cause as a double decrement in the error path
+between ext4_mb_mark_diskspace_used() and the caller
+ext4_mb_new_blocks().
+
+First, note that generic/388 is a shutdown vs. fsstress test and so
+produces a random set of operations and shutdown injections. In the
+problematic case, the shutdown triggers an error return from the
+ext4_handle_dirty_metadata() call(s) made from
+ext4_mb_mark_context(). The changed value is non-zero at this point,
+so ext4_mb_mark_diskspace_used() does not exit after the error
+bubbles up from ext4_mb_mark_context(). Instead, the former
+decrements both cluster counters and returns the error up to
+ext4_mb_new_blocks(). The latter falls into the !ar->len out path
+which decrements the dirty clusters counter a second time, creating
+the inconsistency.
+
+To avoid this problem and simplify ownership of the cluster
+reservation in this codepath, lift the counter reduction to a single
+place in the caller. This makes it more clear that
+ext4_mb_new_blocks() is responsible for acquiring cluster
+reservation (via ext4_claim_free_clusters()) in the !delalloc case
+as well as releasing it, regardless of whether it ends up consumed
+or returned due to failure.
+
+Fixes: 0087d9fb3f29 ("ext4: Fix s_dirty_blocks_counter if block allocation failed with nodelalloc")
+Signed-off-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Link: https://patch.msgid.link/20260113171905.118284-1-bfoster@redhat.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+[ Drop mballoc-test changes ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c | 21 +++++----------------
+ 1 file changed, 5 insertions(+), 16 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3309,8 +3309,7 @@ void ext4_exit_mballoc(void)
+ * Returns 0 if success or error code
+ */
+ static noinline_for_stack int
+-ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+- handle_t *handle, unsigned int reserv_clstrs)
++ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, handle_t *handle)
+ {
+ struct buffer_head *bitmap_bh = NULL;
+ struct ext4_group_desc *gdp;
+@@ -3397,13 +3396,6 @@ ext4_mb_mark_diskspace_used(struct ext4_
+
+ ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+ percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
+- /*
+- * Now reduce the dirty block count also. Should not go negative
+- */
+- if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
+- /* release all the reserved blocks if non delalloc */
+- percpu_counter_sub(&sbi->s_dirtyclusters_counter,
+- reserv_clstrs);
+
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi,
+@@ -5272,7 +5264,7 @@ repeat:
+ ext4_mb_pa_free(ac);
+ }
+ if (likely(ac->ac_status == AC_STATUS_FOUND)) {
+- *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
++ *errp = ext4_mb_mark_diskspace_used(ac, handle);
+ if (*errp) {
+ ext4_discard_allocated_blocks(ac);
+ goto errout;
+@@ -5304,12 +5296,9 @@ out:
+ kmem_cache_free(ext4_ac_cachep, ac);
+ if (inquota && ar->len < inquota)
+ dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
+- if (!ar->len) {
+- if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
+- /* release all the reserved blocks if non delalloc */
+- percpu_counter_sub(&sbi->s_dirtyclusters_counter,
+- reserv_clstrs);
+- }
++ /* release any reserved blocks */
++ if (reserv_clstrs)
++ percpu_counter_sub(&sbi->s_dirtyclusters_counter, reserv_clstrs);
+
+ trace_ext4_allocate_blocks(ar, (unsigned long long)block);
+
--- /dev/null
+From stable+bounces-219642-greg=kroah.com@vger.kernel.org Wed Feb 25 15:53:58 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:52:28 -0500
+Subject: ext4: fix e4b bitmap inconsistency reports
+To: stable@vger.kernel.org
+Cc: Yongjian Sun <sunyongjian1@huawei.com>, Zhang Yi <yi.zhang@huawei.com>, Baokun Li <libaokun1@huawei.com>, Jan Kara <jack@suse.cz>, Theodore Ts'o <tytso@mit.edu>, stable@kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260225145229.546147-1-sashal@kernel.org>
+
+From: Yongjian Sun <sunyongjian1@huawei.com>
+
+[ Upstream commit bdc56a9c46b2a99c12313122b9352b619a2e719e ]
+
+A bitmap inconsistency issue was observed during stress tests under
+mixed huge-page workloads. Ext4 reported multiple e4b bitmap check
+failures like:
+
+ext4_mb_complex_scan_group:2508: group 350, 8179 free clusters as
+per group info. But got 8192 blocks
+
+Analysis and experimentation confirmed that the issue is caused by a
+race condition between page migration and bitmap modification. Although
+this timing window is extremely narrow, it is still hit in practice:
+
+folio_lock ext4_mb_load_buddy
+__migrate_folio
+ check ref count
+ folio_mc_copy __filemap_get_folio
+ folio_try_get(folio)
+ ......
+ mb_mark_used
+ ext4_mb_unload_buddy
+ __folio_migrate_mapping
+ folio_ref_freeze
+folio_unlock
+
+The root cause of this issue is that the fast path of load_buddy only
+increments the folio's reference count, which is insufficient to prevent
+concurrent folio migration. We observed that the folio migration process
+acquires the folio lock. Therefore, we can determine whether to take the
+fast path in load_buddy by checking the lock status. If the folio is
+locked, we opt for the slow path (which acquires the lock) to close this
+concurrency window.
+
+Additionally, this change addresses the following issues:
+
+When the DOUBLE_CHECK macro is enabled to inspect bitmap-related
+issues, the following error may be triggered:
+
+corruption in group 324 at byte 784(6272): f in copy != ff on
+disk/prealloc
+
+Analysis reveals that this is a false positive. There is a specific race
+window where the bitmap and the group descriptor become momentarily
+inconsistent, leading to this error report:
+
+ext4_mb_load_buddy ext4_mb_load_buddy
+ __filemap_get_folio(create|lock)
+ folio_lock
+ ext4_mb_init_cache
+ folio_mark_uptodate
+ __filemap_get_folio(no lock)
+ ......
+ mb_mark_used
+ mb_mark_used_double
+ mb_cmp_bitmaps
+ mb_set_bits(e4b->bd_bitmap)
+ folio_unlock
+
+The original logic assumed that since mb_cmp_bitmaps is called when the
+bitmap is newly loaded from disk, the folio lock would be sufficient to
+prevent concurrent access. However, this overlooks a specific race
+condition: if another process attempts to load buddy and finds the folio
+is already in an uptodate state, it will immediately begin using it without
+holding folio lock.
+
+Signed-off-by: Yongjian Sun <sunyongjian1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20260106090820.836242-1-sunyongjian@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+[ folio -> page ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1230,16 +1230,17 @@ ext4_mb_load_buddy_gfp(struct super_bloc
+ /* we could use find_or_create_page(), but it locks page
+ * what we'd like to avoid in fast path ... */
+ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+- if (page == NULL || !PageUptodate(page)) {
++ if (page == NULL || !PageUptodate(page) || PageLocked(page)) {
++ /*
++ * PageLocked is employed to detect ongoing page
++ * migrations, since concurrent migrations can lead to
++ * bitmap inconsistency. And if we are not uptodate that
++ * implies somebody just created the page but is yet to
++ * initialize it. We can drop the page reference and
++ * try to get the page with lock in both cases to avoid
++ * concurrency.
++ */
+ if (page)
+- /*
+- * drop the page reference and try
+- * to get the page with lock. If we
+- * are not uptodate that implies
+- * somebody just created the page but
+- * is yet to initialize the same. So
+- * wait for it to initialize.
+- */
+ put_page(page);
+ page = find_or_create_page(inode->i_mapping, pnum, gfp);
+ if (page) {
+@@ -1274,7 +1275,7 @@ ext4_mb_load_buddy_gfp(struct super_bloc
+ poff = block % blocks_per_page;
+
+ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+- if (page == NULL || !PageUptodate(page)) {
++ if (page == NULL || !PageUptodate(page) || PageLocked(page)) {
+ if (page)
+ put_page(page);
+ page = find_or_create_page(inode->i_mapping, pnum, gfp);
--- /dev/null
+From stable+bounces-223667-greg=kroah.com@vger.kernel.org Mon Mar 9 15:08:34 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2026 10:08:27 -0400
+Subject: net: phy: register phy led_triggers during probe to avoid AB-BA deadlock
+To: stable@vger.kernel.org
+Cc: Andrew Lunn <andrew@lunn.ch>, Shiji Yang <yangshiji66@outlook.com>, Paolo Abeni <pabeni@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260309140827.1095159-1-sashal@kernel.org>
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+[ Upstream commit c8dbdc6e380e7e96a51706db3e4b7870d8a9402d ]
+
+There is an AB-BA deadlock when both LEDS_TRIGGER_NETDEV and
+LED_TRIGGER_PHY are enabled:
+
+[ 1362.049207] [<8054e4b8>] led_trigger_register+0x5c/0x1fc <-- Trying to get lock "triggers_list_lock" via down_write(&triggers_list_lock);
+[ 1362.054536] [<80662830>] phy_led_triggers_register+0xd0/0x234
+[ 1362.060329] [<8065e200>] phy_attach_direct+0x33c/0x40c
+[ 1362.065489] [<80651fc4>] phylink_fwnode_phy_connect+0x15c/0x23c
+[ 1362.071480] [<8066ee18>] mtk_open+0x7c/0xba0
+[ 1362.075849] [<806d714c>] __dev_open+0x280/0x2b0
+[ 1362.080384] [<806d7668>] __dev_change_flags+0x244/0x24c
+[ 1362.085598] [<806d7698>] dev_change_flags+0x28/0x78
+[ 1362.090528] [<807150e4>] dev_ioctl+0x4c0/0x654 <-- Hold lock "rtnl_mutex" by calling rtnl_lock();
+[ 1362.094985] [<80694360>] sock_ioctl+0x2f4/0x4e0
+[ 1362.099567] [<802e9c4c>] sys_ioctl+0x32c/0xd8c
+[ 1362.104022] [<80014504>] syscall_common+0x34/0x58
+
+Here LED_TRIGGER_PHY is registering LED triggers during phy_attach
+while holding RTNL and then taking triggers_list_lock.
+
+[ 1362.191101] [<806c2640>] register_netdevice_notifier+0x60/0x168 <-- Trying to get lock "rtnl_mutex" via rtnl_lock();
+[ 1362.197073] [<805504ac>] netdev_trig_activate+0x194/0x1e4
+[ 1362.202490] [<8054e28c>] led_trigger_set+0x1d4/0x360 <-- Hold lock "triggers_list_lock" by down_read(&triggers_list_lock);
+[ 1362.207511] [<8054eb38>] led_trigger_write+0xd8/0x14c
+[ 1362.212566] [<80381d98>] sysfs_kf_bin_write+0x80/0xbc
+[ 1362.217688] [<8037fcd8>] kernfs_fop_write_iter+0x17c/0x28c
+[ 1362.223174] [<802cbd70>] vfs_write+0x21c/0x3c4
+[ 1362.227712] [<802cc0c4>] ksys_write+0x78/0x12c
+[ 1362.232164] [<80014504>] syscall_common+0x34/0x58
+
+Here LEDS_TRIGGER_NETDEV is being enabled on an LED. It first takes
+triggers_list_lock and then RTNL. A classical AB-BA deadlock.
+
+phy_led_triggers_registers() does not require the RTNL, it does not
+make any calls into the network stack which require protection. There
+is also no requirement the PHY has been attached to a MAC, the
+triggers only make use of phydev state. This allows the call to
+phy_led_triggers_registers() to be placed elsewhere. PHY probe() and
+release() don't hold RTNL, so solving the AB-BA deadlock.
+
+Reported-by: Shiji Yang <yangshiji66@outlook.com>
+Closes: https://lore.kernel.org/all/OS7PR01MB13602B128BA1AD3FA38B6D1FFBC69A@OS7PR01MB13602.jpnprd01.prod.outlook.com/
+Fixes: 06f502f57d0d ("leds: trigger: Introduce a NETDEV trigger")
+Cc: stable@vger.kernel.org
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Tested-by: Shiji Yang <yangshiji66@outlook.com>
+Link: https://patch.msgid.link/20260222152601.1978655-1-andrew@lunn.ch
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ dropped `is_on_sfp_module` guards and `CONFIG_PHYLIB_LEDS`/`of_phy_leds` logic ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy_device.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1412,7 +1412,6 @@ int phy_attach_direct(struct net_device
+ return err;
+
+ phy_resume(phydev);
+- phy_led_triggers_register(phydev);
+
+ return err;
+
+@@ -1669,8 +1668,6 @@ void phy_detach(struct phy_device *phyde
+ }
+ phydev->phylink = NULL;
+
+- phy_led_triggers_unregister(phydev);
+-
+ if (phydev->mdio.dev.driver)
+ module_put(phydev->mdio.dev.driver->owner);
+
+@@ -2900,10 +2897,14 @@ static int phy_probe(struct device *dev)
+ /* Set the state to READY by default */
+ phydev->state = PHY_READY;
+
++ /* Register the PHY LED triggers */
++ phy_led_triggers_register(phydev);
++
++ return 0;
++
+ out:
+ /* Re-assert the reset signal on error */
+- if (err)
+- phy_device_reset(phydev, 1);
++ phy_device_reset(phydev, 1);
+
+ return err;
+ }
+@@ -2914,6 +2915,8 @@ static int phy_remove(struct device *dev
+
+ cancel_delayed_work_sync(&phydev->state_queue);
+
++ phy_led_triggers_unregister(phydev);
++
+ phydev->state = PHY_DOWN;
+
+ sfp_bus_del_upstream(phydev->sfp_bus);
--- /dev/null
+From stable+bounces-225306-greg=kroah.com@vger.kernel.org Fri Mar 13 14:18:38 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Mar 2026 09:18:29 -0400
+Subject: net/sched: act_gate: snapshot parameters with RCU on replace
+To: stable@vger.kernel.org
+Cc: Paul Moses <p@1g4.org>, Vladimir Oltean <vladimir.oltean@nxp.com>, Jamal Hadi Salim <jhs@mojatatu.com>, Victor Nogueira <victor@mojatatu.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260313131829.2431779-1-sashal@kernel.org>
+
+From: Paul Moses <p@1g4.org>
+
+[ Upstream commit 62413a9c3cb183afb9bb6e94dd68caf4e4145f4c ]
+
+The gate action can be replaced while the hrtimer callback or dump path is
+walking the schedule list.
+
+Convert the parameters to an RCU-protected snapshot and swap updates under
+tcf_lock, freeing the previous snapshot via call_rcu(). When REPLACE omits
+the entry list, preserve the existing schedule so the effective state is
+unchanged.
+
+Fixes: a51c328df310 ("net: qos: introduce a gate control flow action")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Moses <p@1g4.org>
+Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Reviewed-by: Victor Nogueira <victor@mojatatu.com>
+Link: https://patch.msgid.link/20260223150512.2251594-2-p@1g4.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Different function signatures, hrtimer changes, context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tc_act/tc_gate.h | 33 ++++-
+ net/sched/act_gate.c | 266 ++++++++++++++++++++++++++++++-------------
+ 2 files changed, 212 insertions(+), 87 deletions(-)
+
+--- a/include/net/tc_act/tc_gate.h
++++ b/include/net/tc_act/tc_gate.h
+@@ -32,6 +32,7 @@ struct tcf_gate_params {
+ s32 tcfg_clockid;
+ size_t num_entries;
+ struct list_head entries;
++ struct rcu_head rcu;
+ };
+
+ #define GATE_ACT_GATE_OPEN BIT(0)
+@@ -39,7 +40,7 @@ struct tcf_gate_params {
+
+ struct tcf_gate {
+ struct tc_action common;
+- struct tcf_gate_params param;
++ struct tcf_gate_params __rcu *param;
+ u8 current_gate_status;
+ ktime_t current_close_time;
+ u32 current_entry_octets;
+@@ -65,47 +66,65 @@ static inline u32 tcf_gate_index(const s
+ return a->tcfa_index;
+ }
+
++static inline struct tcf_gate_params *tcf_gate_params_locked(const struct tc_action *a)
++{
++ struct tcf_gate *gact = to_gate(a);
++
++ return rcu_dereference_protected(gact->param,
++ lockdep_is_held(&gact->tcf_lock));
++}
++
+ static inline s32 tcf_gate_prio(const struct tc_action *a)
+ {
++ struct tcf_gate_params *p;
+ s32 tcfg_prio;
+
+- tcfg_prio = to_gate(a)->param.tcfg_priority;
++ p = tcf_gate_params_locked(a);
++ tcfg_prio = p->tcfg_priority;
+
+ return tcfg_prio;
+ }
+
+ static inline u64 tcf_gate_basetime(const struct tc_action *a)
+ {
++ struct tcf_gate_params *p;
+ u64 tcfg_basetime;
+
+- tcfg_basetime = to_gate(a)->param.tcfg_basetime;
++ p = tcf_gate_params_locked(a);
++ tcfg_basetime = p->tcfg_basetime;
+
+ return tcfg_basetime;
+ }
+
+ static inline u64 tcf_gate_cycletime(const struct tc_action *a)
+ {
++ struct tcf_gate_params *p;
+ u64 tcfg_cycletime;
+
+- tcfg_cycletime = to_gate(a)->param.tcfg_cycletime;
++ p = tcf_gate_params_locked(a);
++ tcfg_cycletime = p->tcfg_cycletime;
+
+ return tcfg_cycletime;
+ }
+
+ static inline u64 tcf_gate_cycletimeext(const struct tc_action *a)
+ {
++ struct tcf_gate_params *p;
+ u64 tcfg_cycletimeext;
+
+- tcfg_cycletimeext = to_gate(a)->param.tcfg_cycletime_ext;
++ p = tcf_gate_params_locked(a);
++ tcfg_cycletimeext = p->tcfg_cycletime_ext;
+
+ return tcfg_cycletimeext;
+ }
+
+ static inline u32 tcf_gate_num_entries(const struct tc_action *a)
+ {
++ struct tcf_gate_params *p;
+ u32 num_entries;
+
+- num_entries = to_gate(a)->param.num_entries;
++ p = tcf_gate_params_locked(a);
++ num_entries = p->num_entries;
+
+ return num_entries;
+ }
+@@ -119,7 +138,7 @@ static inline struct action_gate_entry
+ u32 num_entries;
+ int i = 0;
+
+- p = &to_gate(a)->param;
++ p = tcf_gate_params_locked(a);
+ num_entries = p->num_entries;
+
+ list_for_each_entry(entry, &p->entries, list)
+--- a/net/sched/act_gate.c
++++ b/net/sched/act_gate.c
+@@ -32,9 +32,12 @@ static ktime_t gate_get_time(struct tcf_
+ return KTIME_MAX;
+ }
+
+-static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
++static void tcf_gate_params_free_rcu(struct rcu_head *head);
++
++static void gate_get_start_time(struct tcf_gate *gact,
++ const struct tcf_gate_params *param,
++ ktime_t *start)
+ {
+- struct tcf_gate_params *param = &gact->param;
+ ktime_t now, base, cycle;
+ u64 n;
+
+@@ -69,12 +72,14 @@ static enum hrtimer_restart gate_timer_f
+ {
+ struct tcf_gate *gact = container_of(timer, struct tcf_gate,
+ hitimer);
+- struct tcf_gate_params *p = &gact->param;
+ struct tcfg_gate_entry *next;
++ struct tcf_gate_params *p;
+ ktime_t close_time, now;
+
+ spin_lock(&gact->tcf_lock);
+
++ p = rcu_dereference_protected(gact->param,
++ lockdep_is_held(&gact->tcf_lock));
+ next = gact->next_entry;
+
+ /* cycle start, clear pending bit, clear total octets */
+@@ -227,6 +232,35 @@ static void release_entry_list(struct li
+ }
+ }
+
++static int tcf_gate_copy_entries(struct tcf_gate_params *dst,
++ const struct tcf_gate_params *src,
++ struct netlink_ext_ack *extack)
++{
++ struct tcfg_gate_entry *entry;
++ int i = 0;
++
++ list_for_each_entry(entry, &src->entries, list) {
++ struct tcfg_gate_entry *new;
++
++ new = kzalloc(sizeof(*new), GFP_ATOMIC);
++ if (!new) {
++ NL_SET_ERR_MSG(extack, "Not enough memory for entry");
++ return -ENOMEM;
++ }
++
++ new->index = entry->index;
++ new->gate_state = entry->gate_state;
++ new->interval = entry->interval;
++ new->ipv = entry->ipv;
++ new->maxoctets = entry->maxoctets;
++ list_add_tail(&new->list, &dst->entries);
++ i++;
++ }
++
++ dst->num_entries = i;
++ return 0;
++}
++
+ static int parse_gate_list(struct nlattr *list_attr,
+ struct tcf_gate_params *sched,
+ struct netlink_ext_ack *extack)
+@@ -272,23 +306,42 @@ release_list:
+ return err;
+ }
+
+-static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
+- enum tk_offsets tko, s32 clockid,
+- bool do_init)
+-{
+- if (!do_init) {
+- if (basetime == gact->param.tcfg_basetime &&
+- tko == gact->tk_offset &&
+- clockid == gact->param.tcfg_clockid)
+- return;
+-
+- spin_unlock_bh(&gact->tcf_lock);
+- hrtimer_cancel(&gact->hitimer);
+- spin_lock_bh(&gact->tcf_lock);
++static bool gate_timer_needs_cancel(u64 basetime, u64 old_basetime,
++ enum tk_offsets tko,
++ enum tk_offsets old_tko,
++ s32 clockid, s32 old_clockid)
++{
++ return basetime != old_basetime ||
++ clockid != old_clockid ||
++ tko != old_tko;
++}
++
++static int gate_clock_resolve(s32 clockid, enum tk_offsets *tko,
++ struct netlink_ext_ack *extack)
++{
++ switch (clockid) {
++ case CLOCK_REALTIME:
++ *tko = TK_OFFS_REAL;
++ return 0;
++ case CLOCK_MONOTONIC:
++ *tko = TK_OFFS_MAX;
++ return 0;
++ case CLOCK_BOOTTIME:
++ *tko = TK_OFFS_BOOT;
++ return 0;
++ case CLOCK_TAI:
++ *tko = TK_OFFS_TAI;
++ return 0;
++ default:
++ NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
++ return -EINVAL;
+ }
+- gact->param.tcfg_basetime = basetime;
+- gact->param.tcfg_clockid = clockid;
+- gact->tk_offset = tko;
++}
++
++static void gate_setup_timer(struct tcf_gate *gact, s32 clockid,
++ enum tk_offsets tko)
++{
++ WRITE_ONCE(gact->tk_offset, tko);
+ hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
+ gact->hitimer.function = gate_timer_func;
+ }
+@@ -300,14 +353,21 @@ static int tcf_gate_init(struct net *net
+ struct netlink_ext_ack *extack)
+ {
+ struct tc_action_net *tn = net_generic(net, gate_net_id);
+- enum tk_offsets tk_offset = TK_OFFS_TAI;
++ u64 cycletime = 0, basetime = 0, cycletime_ext = 0;
++ struct tcf_gate_params *p = NULL, *old_p = NULL;
++ enum tk_offsets old_tk_offset = TK_OFFS_TAI;
++ const struct tcf_gate_params *cur_p = NULL;
+ struct nlattr *tb[TCA_GATE_MAX + 1];
++ enum tk_offsets tko = TK_OFFS_TAI;
+ struct tcf_chain *goto_ch = NULL;
+- u64 cycletime = 0, basetime = 0;
+- struct tcf_gate_params *p;
++ s32 timer_clockid = CLOCK_TAI;
++ bool use_old_entries = false;
++ s32 old_clockid = CLOCK_TAI;
++ bool need_cancel = false;
+ s32 clockid = CLOCK_TAI;
+ struct tcf_gate *gact;
+ struct tc_gate *parm;
++ u64 old_basetime = 0;
+ int ret = 0, err;
+ u32 gflags = 0;
+ s32 prio = -1;
+@@ -324,26 +384,8 @@ static int tcf_gate_init(struct net *net
+ if (!tb[TCA_GATE_PARMS])
+ return -EINVAL;
+
+- if (tb[TCA_GATE_CLOCKID]) {
++ if (tb[TCA_GATE_CLOCKID])
+ clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
+- switch (clockid) {
+- case CLOCK_REALTIME:
+- tk_offset = TK_OFFS_REAL;
+- break;
+- case CLOCK_MONOTONIC:
+- tk_offset = TK_OFFS_MAX;
+- break;
+- case CLOCK_BOOTTIME:
+- tk_offset = TK_OFFS_BOOT;
+- break;
+- case CLOCK_TAI:
+- tk_offset = TK_OFFS_TAI;
+- break;
+- default:
+- NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
+- return -EINVAL;
+- }
+- }
+
+ parm = nla_data(tb[TCA_GATE_PARMS]);
+ index = parm->index;
+@@ -369,6 +411,60 @@ static int tcf_gate_init(struct net *net
+ return -EEXIST;
+ }
+
++ gact = to_gate(*a);
++
++ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
++ if (err < 0)
++ goto release_idr;
++
++ p = kzalloc(sizeof(*p), GFP_KERNEL);
++ if (!p) {
++ err = -ENOMEM;
++ goto chain_put;
++ }
++ INIT_LIST_HEAD(&p->entries);
++
++ use_old_entries = !tb[TCA_GATE_ENTRY_LIST];
++ if (!use_old_entries) {
++ err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
++ if (err < 0)
++ goto err_free;
++ use_old_entries = !err;
++ }
++
++ if (ret == ACT_P_CREATED && use_old_entries) {
++ NL_SET_ERR_MSG(extack, "The entry list is empty");
++ err = -EINVAL;
++ goto err_free;
++ }
++
++ if (ret != ACT_P_CREATED) {
++ rcu_read_lock();
++ cur_p = rcu_dereference(gact->param);
++
++ old_basetime = cur_p->tcfg_basetime;
++ old_clockid = cur_p->tcfg_clockid;
++ old_tk_offset = READ_ONCE(gact->tk_offset);
++
++ basetime = old_basetime;
++ cycletime_ext = cur_p->tcfg_cycletime_ext;
++ prio = cur_p->tcfg_priority;
++ gflags = cur_p->tcfg_flags;
++
++ if (!tb[TCA_GATE_CLOCKID])
++ clockid = old_clockid;
++
++ err = 0;
++ if (use_old_entries) {
++ err = tcf_gate_copy_entries(p, cur_p, extack);
++ if (!err && !tb[TCA_GATE_CYCLE_TIME])
++ cycletime = cur_p->tcfg_cycletime;
++ }
++ rcu_read_unlock();
++ if (err)
++ goto err_free;
++ }
++
+ if (tb[TCA_GATE_PRIORITY])
+ prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
+
+@@ -378,25 +474,26 @@ static int tcf_gate_init(struct net *net
+ if (tb[TCA_GATE_FLAGS])
+ gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
+
+- gact = to_gate(*a);
+- if (ret == ACT_P_CREATED)
+- INIT_LIST_HEAD(&gact->param.entries);
++ if (tb[TCA_GATE_CYCLE_TIME])
++ cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
+
+- err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+- if (err < 0)
+- goto release_idr;
++ if (tb[TCA_GATE_CYCLE_TIME_EXT])
++ cycletime_ext = nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
+
+- spin_lock_bh(&gact->tcf_lock);
+- p = &gact->param;
++ err = gate_clock_resolve(clockid, &tko, extack);
++ if (err)
++ goto err_free;
++ timer_clockid = clockid;
++
++ need_cancel = ret != ACT_P_CREATED &&
++ gate_timer_needs_cancel(basetime, old_basetime,
++ tko, old_tk_offset,
++ timer_clockid, old_clockid);
+
+- if (tb[TCA_GATE_CYCLE_TIME])
+- cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
++ if (need_cancel)
++ hrtimer_cancel(&gact->hitimer);
+
+- if (tb[TCA_GATE_ENTRY_LIST]) {
+- err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
+- if (err < 0)
+- goto chain_put;
+- }
++ spin_lock_bh(&gact->tcf_lock);
+
+ if (!cycletime) {
+ struct tcfg_gate_entry *entry;
+@@ -405,22 +502,20 @@ static int tcf_gate_init(struct net *net
+ list_for_each_entry(entry, &p->entries, list)
+ cycle = ktime_add_ns(cycle, entry->interval);
+ cycletime = cycle;
+- if (!cycletime) {
+- err = -EINVAL;
+- goto chain_put;
+- }
+ }
+ p->tcfg_cycletime = cycletime;
++ p->tcfg_cycletime_ext = cycletime_ext;
+
+- if (tb[TCA_GATE_CYCLE_TIME_EXT])
+- p->tcfg_cycletime_ext =
+- nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
+-
+- gate_setup_timer(gact, basetime, tk_offset, clockid,
+- ret == ACT_P_CREATED);
++ if (need_cancel || ret == ACT_P_CREATED)
++ gate_setup_timer(gact, timer_clockid, tko);
+ p->tcfg_priority = prio;
+ p->tcfg_flags = gflags;
+- gate_get_start_time(gact, &start);
++ p->tcfg_basetime = basetime;
++ p->tcfg_clockid = timer_clockid;
++ gate_get_start_time(gact, p, &start);
++
++ old_p = rcu_replace_pointer(gact->param, p,
++ lockdep_is_held(&gact->tcf_lock));
+
+ gact->current_close_time = start;
+ gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
+@@ -437,11 +532,15 @@ static int tcf_gate_init(struct net *net
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
++ if (old_p)
++ call_rcu(&old_p->rcu, tcf_gate_params_free_rcu);
++
+ return ret;
+
++err_free:
++ release_entry_list(&p->entries);
++ kfree(p);
+ chain_put:
+- spin_unlock_bh(&gact->tcf_lock);
+-
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ release_idr:
+@@ -449,21 +548,29 @@ release_idr:
+ * without taking tcf_lock.
+ */
+ if (ret == ACT_P_CREATED)
+- gate_setup_timer(gact, gact->param.tcfg_basetime,
+- gact->tk_offset, gact->param.tcfg_clockid,
+- true);
++ gate_setup_timer(gact, timer_clockid, tko);
++
+ tcf_idr_release(*a, bind);
+ return err;
+ }
+
++static void tcf_gate_params_free_rcu(struct rcu_head *head)
++{
++ struct tcf_gate_params *p = container_of(head, struct tcf_gate_params, rcu);
++
++ release_entry_list(&p->entries);
++ kfree(p);
++}
++
+ static void tcf_gate_cleanup(struct tc_action *a)
+ {
+ struct tcf_gate *gact = to_gate(a);
+ struct tcf_gate_params *p;
+
+- p = &gact->param;
+ hrtimer_cancel(&gact->hitimer);
+- release_entry_list(&p->entries);
++ p = rcu_dereference_protected(gact->param, 1);
++ if (p)
++ call_rcu(&p->rcu, tcf_gate_params_free_rcu);
+ }
+
+ static int dumping_entry(struct sk_buff *skb,
+@@ -512,10 +619,9 @@ static int tcf_gate_dump(struct sk_buff
+ struct nlattr *entry_list;
+ struct tcf_t t;
+
+- spin_lock_bh(&gact->tcf_lock);
+- opt.action = gact->tcf_action;
+-
+- p = &gact->param;
++ rcu_read_lock();
++ opt.action = READ_ONCE(gact->tcf_action);
++ p = rcu_dereference(gact->param);
+
+ if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+@@ -555,12 +661,12 @@ static int tcf_gate_dump(struct sk_buff
+ tcf_tm_dump(&t, &gact->tcf_tm);
+ if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
+ goto nla_put_failure;
+- spin_unlock_bh(&gact->tcf_lock);
++ rcu_read_unlock();
+
+ return skb->len;
+
+ nla_put_failure:
+- spin_unlock_bh(&gact->tcf_lock);
++ rcu_read_unlock();
+ nlmsg_trim(skb, b);
+ return -1;
+ }
drm-exynos-vidi-use-priv-vidi_dev-for-ctx-lookup-in-vidi_connection_ioctl.patch
drm-exynos-vidi-fix-to-avoid-directly-dereferencing-user-pointer.patch
drm-exynos-vidi-use-ctx-lock-to-protect-struct-vidi_context-member-variables-related-to-memory-alloc-free.patch
+ext4-don-t-set-ext4_get_blocks_convert-when-splitting-before-submitting-i-o.patch
+ext4-drop-extent-cache-when-splitting-extent-fails.patch
+ext4-fix-e4b-bitmap-inconsistency-reports.patch
+ext4-fix-dirtyclusters-double-decrement-on-fs-shutdown.patch
+ata-libata-remove-pointless-vprintk-calls.patch
+ata-libata-scsi-refactor-ata_scsi_translate.patch
+wifi-libertas-fix-use-after-free-in-lbs_free_adapter.patch
+wifi-mac80211-fix-null-pointer-dereference-in-mesh_rx_csa_frame.patch
+wifi-cfg80211-cancel-rfkill_block-work-in-wiphy_unregister.patch
+smb-client-don-t-log-plaintext-credentials-in-cifs_set_cifscreds.patch
+net-phy-register-phy-led_triggers-during-probe-to-avoid-ab-ba-deadlock.patch
+drm-amd-display-use-gfp_atomic-in-dc_create_stream_for_sink.patch
+net-sched-act_gate-snapshot-parameters-with-rcu-on-replace.patch
--- /dev/null
+From stable+bounces-223666-greg=kroah.com@vger.kernel.org Mon Mar 9 15:08:21 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2026 10:08:11 -0400
+Subject: smb: client: Don't log plaintext credentials in cifs_set_cifscreds
+To: stable@vger.kernel.org
+Cc: Thorsten Blum <thorsten.blum@linux.dev>, "Paulo Alcantara (Red Hat)" <pc@manguebit.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260309140811.1094239-1-sashal@kernel.org>
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit 2f37dc436d4e61ff7ae0b0353cf91b8c10396e4d ]
+
+When debug logging is enabled, cifs_set_cifscreds() logs the key
+payload and exposes the plaintext username and password. Remove the
+debug log to avoid exposing credentials.
+
+Fixes: 8a8798a5ff90 ("cifs: fetch credentials out of keyring for non-krb5 auth multiuser mounts")
+Cc: stable@vger.kernel.org
+Acked-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/connect.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2951,7 +2951,6 @@ cifs_set_cifscreds(struct smb_vol *vol,
+ /* find first : in payload */
+ payload = upayload->data;
+ delim = strnchr(payload, upayload->datalen, ':');
+- cifs_dbg(FYI, "payload=%s\n", payload);
+ if (!delim) {
+ cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
+ upayload->datalen);
--- /dev/null
+From stable+bounces-223624-greg=kroah.com@vger.kernel.org Mon Mar 9 13:07:10 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2026 08:01:44 -0400
+Subject: wifi: cfg80211: cancel rfkill_block work in wiphy_unregister()
+To: stable@vger.kernel.org
+Cc: Daniil Dulov <d.dulov@aladdin.ru>, Johannes Berg <johannes.berg@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260309120144.846204-1-sashal@kernel.org>
+
+From: Daniil Dulov <d.dulov@aladdin.ru>
+
+[ Upstream commit 767d23ade706d5fa51c36168e92a9c5533c351a1 ]
+
+There is a use-after-free error in cfg80211_shutdown_all_interfaces found
+by syzkaller:
+
+BUG: KASAN: use-after-free in cfg80211_shutdown_all_interfaces+0x213/0x220
+Read of size 8 at addr ffff888112a78d98 by task kworker/0:5/5326
+CPU: 0 UID: 0 PID: 5326 Comm: kworker/0:5 Not tainted 6.19.0-rc2 #2 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
+Workqueue: events cfg80211_rfkill_block_work
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x116/0x1f0
+ print_report+0xcd/0x630
+ kasan_report+0xe0/0x110
+ cfg80211_shutdown_all_interfaces+0x213/0x220
+ cfg80211_rfkill_block_work+0x1e/0x30
+ process_one_work+0x9cf/0x1b70
+ worker_thread+0x6c8/0xf10
+ kthread+0x3c5/0x780
+ ret_from_fork+0x56d/0x700
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+The problem arises due to the rfkill_block work is not cancelled when wiphy
+is being unregistered. In order to fix the issue cancel the corresponding
+work in wiphy_unregister().
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Fixes: 1f87f7d3a3b4 ("cfg80211: add rfkill support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniil Dulov <d.dulov@aladdin.ru>
+Link: https://patch.msgid.link/20260211082024.1967588-1-d.dulov@aladdin.ru
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+[ Context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/wireless/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1046,6 +1046,7 @@ void wiphy_unregister(struct wiphy *wiph
+ rtnl_unlock();
+
+ flush_work(&rdev->scan_done_wk);
++ cancel_work_sync(&rdev->rfkill_block);
+ cancel_work_sync(&rdev->conn_work);
+ flush_work(&rdev->event_work);
+ cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
--- /dev/null
+From stable+bounces-223613-greg=kroah.com@vger.kernel.org Mon Mar 9 12:29:35 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2026 07:29:28 -0400
+Subject: wifi: libertas: fix use-after-free in lbs_free_adapter()
+To: stable@vger.kernel.org
+Cc: Daniel Hodges <git@danielhodges.dev>, Johannes Berg <johannes.berg@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260309112928.819155-1-sashal@kernel.org>
+
+From: Daniel Hodges <git@danielhodges.dev>
+
+[ Upstream commit 03cc8f90d0537fcd4985c3319b4fafbf2e3fb1f0 ]
+
+The lbs_free_adapter() function uses timer_delete() (non-synchronous)
+for both command_timer and tx_lockup_timer before the structure is
+freed. This is incorrect because timer_delete() does not wait for
+any running timer callback to complete.
+
+If a timer callback is executing when lbs_free_adapter() is called,
+the callback will access freed memory since lbs_cfg_free() frees the
+containing structure immediately after lbs_free_adapter() returns.
+
+Both timer callbacks (lbs_cmd_timeout_handler and lbs_tx_lockup_handler)
+access priv->driver_lock, priv->cur_cmd, priv->dev, and other fields,
+which would all be use-after-free violations.
+
+Use timer_delete_sync() instead to ensure any running timer callback
+has completed before returning.
+
+This bug was introduced in commit 8f641d93c38a ("libertas: detect TX
+lockups and reset hardware") where del_timer() was used instead of
+del_timer_sync() in the cleanup path. The command_timer has had the
+same issue since the driver was first written.
+
+Fixes: 8f641d93c38a ("libertas: detect TX lockups and reset hardware")
+Fixes: 954ee164f4f4 ("[PATCH] libertas: reorganize and simplify init sequence")
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Hodges <git@danielhodges.dev>
+Link: https://patch.msgid.link/20260206195356.15647-1-git@danielhodges.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+[ del_timer() => timer_delete_sync() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/marvell/libertas/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/marvell/libertas/main.c
++++ b/drivers/net/wireless/marvell/libertas/main.c
+@@ -882,8 +882,8 @@ static void lbs_free_adapter(struct lbs_
+ {
+ lbs_free_cmd_buffer(priv);
+ kfifo_free(&priv->event_fifo);
+- del_timer(&priv->command_timer);
+- del_timer(&priv->tx_lockup_timer);
++ timer_delete_sync(&priv->command_timer);
++ timer_delete_sync(&priv->tx_lockup_timer);
+ del_timer(&priv->auto_deepsleep_timer);
+ }
+
--- /dev/null
+From stable+bounces-223622-greg=kroah.com@vger.kernel.org Mon Mar 9 12:55:32 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2026 07:53:59 -0400
+Subject: wifi: mac80211: fix NULL pointer dereference in mesh_rx_csa_frame()
+To: stable@vger.kernel.org
+Cc: Vahagn Vardanian <vahagn@redrays.io>, Johannes Berg <johannes.berg@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260309115359.840072-1-sashal@kernel.org>
+
+From: Vahagn Vardanian <vahagn@redrays.io>
+
+[ Upstream commit 017c1792525064a723971f0216e6ef86a8c7af11 ]
+
+In mesh_rx_csa_frame(), elems->mesh_chansw_params_ie is dereferenced
+at lines 1638 and 1642 without a prior NULL check:
+
+ ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl;
+ ...
+ pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value);
+
+The mesh_matches_local() check above only validates the Mesh ID,
+Mesh Configuration, and Supported Rates IEs. It does not verify the
+presence of the Mesh Channel Switch Parameters IE (element ID 118).
+When a received CSA action frame omits that IE, ieee802_11_parse_elems()
+leaves elems->mesh_chansw_params_ie as NULL, and the unconditional
+dereference causes a kernel NULL pointer dereference.
+
+A remote mesh peer with an established peer link (PLINK_ESTAB) can
+trigger this by sending a crafted SPECTRUM_MGMT/CHL_SWITCH action frame
+that includes a matching Mesh ID and Mesh Configuration IE but omits the
+Mesh Channel Switch Parameters IE. No authentication beyond the default
+open mesh peering is required.
+
+Crash confirmed on kernel 6.17.0-5-generic via mac80211_hwsim:
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000000
+ Oops: Oops: 0000 [#1] SMP NOPTI
+ RIP: 0010:ieee80211_mesh_rx_queued_mgmt+0x143/0x2a0 [mac80211]
+ CR2: 0000000000000000
+
+Fix by adding a NULL check for mesh_chansw_params_ie after
+mesh_matches_local() returns, consistent with how other optional IEs
+are guarded throughout the mesh code.
+
+The bug has been present since v3.13 (released 2014-01-19).
+
+Fixes: 8f2535b92d68 ("mac80211: process the CSA frame for mesh accordingly")
+Cc: stable@vger.kernel.org
+Signed-off-by: Vahagn Vardanian <vahagn@redrays.io>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+[ adapted pointer access elems-> to stack struct elems, and replaced goto free with return ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/mesh.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1435,6 +1435,9 @@ static void mesh_rx_csa_frame(struct iee
+ if (!mesh_matches_local(sdata, &elems))
+ return;
+
++ if (!elems.mesh_chansw_params_ie)
++ return;
++
+ ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
+ if (!--ifmsh->chsw_ttl)
+ fwd_csa = false;