--- /dev/null
+From stable+bounces-227168-greg=kroah.com@vger.kernel.org Wed Mar 18 22:16:03 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 17:15:57 -0400
+Subject: cifs: open files should not hold ref on superblock
+To: stable@vger.kernel.org
+Cc: Shyam Prasad N <sprasad@microsoft.com>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260318211557.1344366-1-sashal@kernel.org>
+
+From: Shyam Prasad N <sprasad@microsoft.com>
+
+[ Upstream commit 340cea84f691c5206561bb2e0147158fe02070be ]
+
+Today whenever we deal with a file, in addition to holding
+a reference on the dentry, we also get a reference on the
+superblock. This happens in two cases:
+1. when a new cinode is allocated
+2. when an oplock break is being processed
+
+The reasoning for holding the superblock ref was to make sure
+that when umount happens, if there are users of inodes and
+dentries, it does not try to clean them up and wait for the
+last ref to superblock to be dropped by last of such users.
+
+But the side effect of doing that is that umount silently drops
+a ref on the superblock and we could have deferred closes and
+lease breaks still holding these refs.
+
+Ideally, we should ensure that all of these users of inodes and
+dentries are cleaned up at the time of umount, which is what this
+code is doing.
+
+This code change allows these code paths to use a ref on the
+dentry (and hence the inode). That way, umount is
+ensured to clean up SMB client resources when it's the last
+ref on the superblock (For ex: when same objects are shared).
+
+The code change also moves the call to close all the files in
+deferred close list to the umount code path. It also waits for
+oplock_break workers to be flushed before calling
+kill_anon_super (which eventually frees up those objects).
+
+Fixes: 24261fc23db9 ("cifs: delay super block destruction until all cifsFileInfo objects are gone")
+Fixes: 705c79101ccf ("smb: client: fix use-after-free in cifs_oplock_break")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ replaced kmalloc_obj() with kmalloc(sizeof(...)) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsfs.c | 7 +++++--
+ fs/smb/client/cifsproto.h | 1 +
+ fs/smb/client/file.c | 11 -----------
+ fs/smb/client/misc.c | 42 ++++++++++++++++++++++++++++++++++++++++++
+ fs/smb/client/trace.h | 2 ++
+ 5 files changed, 50 insertions(+), 13 deletions(-)
+
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -330,10 +330,14 @@ static void cifs_kill_sb(struct super_bl
+
+ /*
+ * We need to release all dentries for the cached directories
+- * before we kill the sb.
++ * and close all deferred file handles before we kill the sb.
+ */
+ if (cifs_sb->root) {
+ close_all_cached_dirs(cifs_sb);
++ cifs_close_all_deferred_files_sb(cifs_sb);
++
++ /* Wait for all pending oplock breaks to complete */
++ flush_workqueue(cifsoplockd_wq);
+
+ /* finally release root dentry */
+ dput(cifs_sb->root);
+@@ -864,7 +868,6 @@ static void cifs_umount_begin(struct sup
+ spin_unlock(&tcon->tc_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+- cifs_close_all_deferred_files(tcon);
+ /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
+ /* cancel_notify_requests(tcon); */
+ if (tcon->ses && tcon->ses->server) {
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -313,6 +313,7 @@ extern void cifs_close_deferred_file(str
+
+ extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
+
++void cifs_close_all_deferred_files_sb(struct cifs_sb_info *cifs_sb);
+ void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+ struct dentry *dentry);
+
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -704,8 +704,6 @@ struct cifsFileInfo *cifs_new_fileinfo(s
+ mutex_init(&cfile->fh_mutex);
+ spin_lock_init(&cfile->file_info_lock);
+
+- cifs_sb_active(inode->i_sb);
+-
+ /*
+ * If the server returned a read oplock and we have mandatory brlocks,
+ * set oplock level to None.
+@@ -760,7 +758,6 @@ static void cifsFileInfo_put_final(struc
+ struct inode *inode = d_inode(cifs_file->dentry);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifsLockInfo *li, *tmp;
+- struct super_block *sb = inode->i_sb;
+
+ /*
+ * Delete any outstanding lock records. We'll lose them when the file
+@@ -778,7 +775,6 @@ static void cifsFileInfo_put_final(struc
+
+ cifs_put_tlink(cifs_file->tlink);
+ dput(cifs_file->dentry);
+- cifs_sb_deactive(sb);
+ kfree(cifs_file->symlink_target);
+ kfree(cifs_file);
+ }
+@@ -3150,12 +3146,6 @@ void cifs_oplock_break(struct work_struc
+ __u64 persistent_fid, volatile_fid;
+ __u16 net_fid;
+
+- /*
+- * Hold a reference to the superblock to prevent it and its inodes from
+- * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
+- * may release the last reference to the sb and trigger inode eviction.
+- */
+- cifs_sb_active(sb);
+ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ TASK_UNINTERRUPTIBLE);
+
+@@ -3228,7 +3218,6 @@ oplock_break_ack:
+ cifs_put_tlink(tlink);
+ out:
+ cifs_done_oplock_break(cinode);
+- cifs_sb_deactive(sb);
+ }
+
+ static int cifs_swap_activate(struct swap_info_struct *sis,
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -27,6 +27,11 @@
+ #include "fs_context.h"
+ #include "cached_dir.h"
+
++struct tcon_list {
++ struct list_head entry;
++ struct cifs_tcon *tcon;
++};
++
+ /* The xid serves as a useful identifier for each incoming vfs request,
+ in a similar way to the mid which is useful to track each sent smb,
+ and CurrentXid can also provide a running counter (although it
+@@ -832,6 +837,43 @@ cifs_close_all_deferred_files(struct cif
+ kfree(tmp_list);
+ }
+ }
++
++void cifs_close_all_deferred_files_sb(struct cifs_sb_info *cifs_sb)
++{
++ struct rb_root *root = &cifs_sb->tlink_tree;
++ struct rb_node *node;
++ struct cifs_tcon *tcon;
++ struct tcon_link *tlink;
++ struct tcon_list *tmp_list, *q;
++ LIST_HEAD(tcon_head);
++
++ spin_lock(&cifs_sb->tlink_tree_lock);
++ for (node = rb_first(root); node; node = rb_next(node)) {
++ tlink = rb_entry(node, struct tcon_link, tl_rbnode);
++ tcon = tlink_tcon(tlink);
++ if (IS_ERR(tcon))
++ continue;
++ tmp_list = kmalloc(sizeof(struct tcon_list), GFP_ATOMIC);
++ if (tmp_list == NULL)
++ break;
++ tmp_list->tcon = tcon;
++ /* Take a reference on tcon to prevent it from being freed */
++ spin_lock(&tcon->tc_lock);
++ ++tcon->tc_count;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_get_close_defer_files);
++ spin_unlock(&tcon->tc_lock);
++ list_add_tail(&tmp_list->entry, &tcon_head);
++ }
++ spin_unlock(&cifs_sb->tlink_tree_lock);
++
++ list_for_each_entry_safe(tmp_list, q, &tcon_head, entry) {
++ cifs_close_all_deferred_files(tmp_list->tcon);
++ list_del(&tmp_list->entry);
++ cifs_put_tcon(tmp_list->tcon, netfs_trace_tcon_ref_put_close_defer_files);
++ kfree(tmp_list);
++ }
++}
+
+ void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon,
+ struct dentry *dentry)
+--- a/fs/smb/client/trace.h
++++ b/fs/smb/client/trace.h
+@@ -47,6 +47,7 @@
+ EM(netfs_trace_tcon_ref_get_cached_laundromat, "GET Ch-Lau") \
+ EM(netfs_trace_tcon_ref_get_cached_lease_break, "GET Ch-Lea") \
+ EM(netfs_trace_tcon_ref_get_cancelled_close, "GET Cn-Cls") \
++ EM(netfs_trace_tcon_ref_get_close_defer_files, "GET Cl-Def") \
+ EM(netfs_trace_tcon_ref_get_dfs_refer, "GET DfsRef") \
+ EM(netfs_trace_tcon_ref_get_find, "GET Find ") \
+ EM(netfs_trace_tcon_ref_get_find_sess_tcon, "GET FndSes") \
+@@ -58,6 +59,7 @@
+ EM(netfs_trace_tcon_ref_put_cancelled_close, "PUT Cn-Cls") \
+ EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
+ EM(netfs_trace_tcon_ref_put_cancelled_mid, "PUT Cn-Mid") \
++ EM(netfs_trace_tcon_ref_put_close_defer_files, "PUT Cl-Def") \
+ EM(netfs_trace_tcon_ref_put_mnt_ctx, "PUT MntCtx") \
+ EM(netfs_trace_tcon_ref_put_dfs_refer, "PUT DfsRfr") \
+ EM(netfs_trace_tcon_ref_put_reconnect_server, "PUT Reconn") \
--- /dev/null
+From stable+bounces-227188-greg=kroah.com@vger.kernel.org Thu Mar 19 01:41:29 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 20:41:19 -0400
+Subject: crypto: atmel-sha204a - Fix OOM ->tfm_count leak
+To: stable@vger.kernel.org
+Cc: Thorsten Blum <thorsten.blum@linux.dev>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260319004119.1852584-1-sashal@kernel.org>
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit d240b079a37e90af03fd7dfec94930eb6c83936e ]
+
+If memory allocation fails, decrement ->tfm_count to avoid blocking
+future reads.
+
+Cc: stable@vger.kernel.org
+Fixes: da001fb651b0 ("crypto: atmel-i2c - add support for SHA204A random number generator")
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[ adapted kmalloc_obj() macro to kmalloc(sizeof()) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/atmel-sha204a.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/atmel-sha204a.c
++++ b/drivers/crypto/atmel-sha204a.c
+@@ -52,9 +52,10 @@ static int atmel_sha204a_rng_read_nonblo
+ rng->priv = 0;
+ } else {
+ work_data = kmalloc(sizeof(*work_data), GFP_ATOMIC);
+- if (!work_data)
++ if (!work_data) {
++ atomic_dec(&i2c_priv->tfm_count);
+ return -ENOMEM;
+-
++ }
+ work_data->ctx = i2c_priv;
+ work_data->client = i2c_priv->client;
+
--- /dev/null
+From bb5f1cd10101c2567bff4d0e760b74aee7c42f44 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jouni=20H=C3=B6gander?= <jouni.hogander@intel.com>
+Date: Wed, 4 Mar 2026 13:30:10 +0200
+Subject: drm/i915/dsc: Add helper for writing DSC Selective Update ET parameters
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+commit bb5f1cd10101c2567bff4d0e760b74aee7c42f44 upstream.
+
+There are slice row per frame and pic height configuration in DSC Selective
+Update Parameter Set 1 register. Add helper for configuring these.
+
+v2:
+ - Add WARN_ON_ONCE if vdsc instances per pipe > 2
+ - instead of checking vdsc instances per pipe being > 1 check == 2
+
+Bspec: 71709
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Link: https://patch.msgid.link/20260304113011.626542-4-jouni.hogander@intel.com
+(cherry picked from commit c8698d61aeb3f70fe33761ee9d3d0e131b5bc2eb)
+Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
+[tursulin: fixup forward declaration conflict]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_vdsc.c | 23 +++++++++++++++++++++++
+ drivers/gpu/drm/i915/display/intel_vdsc.h | 3 +++
+ 2 files changed, 26 insertions(+)
+
+--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
++++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
+@@ -751,6 +751,29 @@ void intel_dsc_dp_pps_write(struct intel
+ sizeof(dp_dsc_pps_sdp));
+ }
+
++void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
++ const struct intel_crtc_state *crtc_state, int su_lines)
++{
++ struct intel_display *display = to_intel_display(crtc_state);
++ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
++ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
++ enum pipe pipe = crtc->pipe;
++ int vdsc_instances_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
++ int slice_row_per_frame = su_lines / vdsc_cfg->slice_height;
++ u32 val;
++
++ drm_WARN_ON_ONCE(display->drm, su_lines % vdsc_cfg->slice_height);
++ drm_WARN_ON_ONCE(display->drm, vdsc_instances_per_pipe > 2);
++
++ val = DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(slice_row_per_frame);
++ val |= DSC_SUPS0_SU_PIC_HEIGHT(su_lines);
++
++ intel_de_write_dsb(display, dsb, LNL_DSC0_SU_PARAMETER_SET_0(pipe), val);
++
++ if (vdsc_instances_per_pipe == 2)
++ intel_de_write_dsb(display, dsb, LNL_DSC1_SU_PARAMETER_SET_0(pipe), val);
++}
++
+ static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
+ {
+ return is_pipe_dsc(crtc, cpu_transcoder) ?
+--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
++++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
+@@ -13,6 +13,7 @@ struct drm_printer;
+ enum transcoder;
+ struct intel_crtc;
+ struct intel_crtc_state;
++struct intel_dsb;
+ struct intel_encoder;
+
+ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
+@@ -29,6 +30,8 @@ void intel_dsc_dsi_pps_write(struct inte
+ const struct intel_crtc_state *crtc_state);
+ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
++void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
++ const struct intel_crtc_state *crtc_state, int su_lines);
+ void intel_vdsc_state_dump(struct drm_printer *p, int indent,
+ const struct intel_crtc_state *crtc_state);
+ int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
--- /dev/null
+From c2c79c6d5b939ae8a42ddb884f576bddae685672 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jouni=20H=C3=B6gander?= <jouni.hogander@intel.com>
+Date: Wed, 4 Mar 2026 13:30:09 +0200
+Subject: drm/i915/dsc: Add Selective Update register definitions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+commit c2c79c6d5b939ae8a42ddb884f576bddae685672 upstream.
+
+Add definitions for DSC_SU_PARAMETER_SET_0_DSC0 and
+DSC_SU_PARAMETER_SET_0_DSC1 registers. These are for Selective Update Early
+Transport configuration.
+
+Bspec: 71709
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Link: https://patch.msgid.link/20260304113011.626542-3-jouni.hogander@intel.com
+(cherry picked from commit 24f96d903daf3dcf8fafe84d3d22b80ef47ba493)
+Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_vdsc_regs.h | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
++++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+@@ -196,6 +196,18 @@
+ #define DSC_PPS18_NSL_BPG_OFFSET(offset) REG_FIELD_PREP(DSC_PPS18_NSL_BPG_OFFSET_MASK, offset)
+ #define DSC_PPS18_SL_OFFSET_ADJ(offset) REG_FIELD_PREP(DSC_PPS18_SL_OFFSET_ADJ_MASK, offset)
+
++#define _LNL_DSC0_SU_PARAMETER_SET_0_PA 0x78064
++#define _LNL_DSC1_SU_PARAMETER_SET_0_PA 0x78164
++#define _LNL_DSC0_SU_PARAMETER_SET_0_PB 0x78264
++#define _LNL_DSC1_SU_PARAMETER_SET_0_PB 0x78364
++#define LNL_DSC0_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC0_SU_PARAMETER_SET_0_PA, _LNL_DSC0_SU_PARAMETER_SET_0_PB)
++#define LNL_DSC1_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC1_SU_PARAMETER_SET_0_PA, _LNL_DSC1_SU_PARAMETER_SET_0_PB)
++
++#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK REG_GENMASK(31, 20)
++#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(rows) REG_FIELD_PREP(DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK, (rows))
++#define DSC_SUPS0_SU_PIC_HEIGHT_MASK REG_GENMASK(15, 0)
++#define DSC_SUPS0_SU_PIC_HEIGHT(h) REG_FIELD_PREP(DSC_SUPS0_SU_PIC_HEIGHT_MASK, (h))
++
+ /* Icelake Rate Control Buffer Threshold Registers */
+ #define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
+ #define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
--- /dev/null
+From 5923a6e0459fdd3edac4ad5abccb24d777d8f1b6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jouni=20H=C3=B6gander?= <jouni.hogander@intel.com>
+Date: Wed, 4 Mar 2026 13:30:11 +0200
+Subject: drm/i915/psr: Write DSC parameters on Selective Update in ET mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+commit 5923a6e0459fdd3edac4ad5abccb24d777d8f1b6 upstream.
+
+There are slice row per frame and pic height parameters in DSC that needs
+to be configured on every Selective Update in Early Transport mode. Use
+helper provided by DSC code to configure these on Selective Update when in
+Early Transport mode. Also fill crtc_state->psr2_su_area with full frame
+area on full frame update for DSC calculation.
+
+v2: move psr2_su_area under skip_sel_fetch_set_loop label
+
+Bspec: 68927, 71709
+Fixes: 467e4e061c44 ("drm/i915/psr: Enable psr2 early transport as possible")
+Cc: <stable@vger.kernel.org> # v6.9+
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Link: https://patch.msgid.link/20260304113011.626542-5-jouni.hogander@intel.com
+(cherry picked from commit 3140af2fab505a4cd47d516284529bf1585628be)
+Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_psr.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -50,6 +50,7 @@
+ #include "intel_snps_phy.h"
+ #include "intel_step.h"
+ #include "intel_vblank.h"
++#include "intel_vdsc.h"
+ #include "intel_vrr.h"
+ #include "skl_universal_plane.h"
+
+@@ -2489,6 +2490,12 @@ void intel_psr2_program_trans_man_trk_ct
+
+ intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ crtc_state->pipe_srcsz_early_tpt);
++
++ if (!crtc_state->dsc.compression_enable)
++ return;
++
++ intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state,
++ drm_rect_height(&crtc_state->psr2_su_area));
+ }
+
+ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
+@@ -2909,6 +2916,10 @@ int intel_psr2_sel_fetch_update(struct i
+ }
+
+ skip_sel_fetch_set_loop:
++ if (full_update)
++ clip_area_update(&crtc_state->psr2_su_area, &crtc_state->pipe_src,
++ &crtc_state->pipe_src);
++
+ psr2_man_trk_ctl_calc(crtc_state, full_update);
+ crtc_state->pipe_srcsz_early_tpt =
+ psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
--- /dev/null
+From stable+bounces-227119-greg=kroah.com@vger.kernel.org Wed Mar 18 18:21:44 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 12:14:17 -0400
+Subject: drm/i915/vrr: Configure VRR timings after enabling TRANS_DDI_FUNC_CTL
+To: stable@vger.kernel.org
+Cc: "Ville Syrjälä" <ville.syrjala@linux.intel.com>, "Ankit Nautiyal" <ankit.k.nautiyal@intel.com>, "Benjamin Tissoires" <bentiss@kernel.org>, "Tvrtko Ursulin" <tursulin@ursulin.net>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260318161417.911716-2-sashal@kernel.org>
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 237aab549676288d9255bb8dcc284738e56eaa31 ]
+
+Apparently ICL may hang with an MCE if we write TRANS_VRR_VMAX/FLIPLINE
+before enabling TRANS_DDI_FUNC_CTL.
+
+Personally I was only able to reproduce a hang (on an Dell XPS 7390
+2-in-1) with an external display connected via a dock using a dodgy
+type-C cable that made the link training fail. After the failed
+link training the machine would hang. TGL seemed immune to the
+problem for whatever reason.
+
+BSpec does tell us to configure VRR after enabling TRANS_DDI_FUNC_CTL
+as well. The DMC firmware also does the VRR restore in two stages:
+- first stage seems to be unconditional and includes TRANS_VRR_CTL
+ and a few other VRR registers, among other things
+- second stage is conditional on the DDI being enabled,
+ and includes TRANS_DDI_FUNC_CTL and TRANS_VRR_VMAX/VMIN/FLIPLINE,
+ among other things
+
+So let's reorder the steps to match to avoid the hang, and
+toss in an extra WARN to make sure we don't screw this up later.
+
+BSpec: 22243
+Cc: stable@vger.kernel.org
+Cc: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Reported-by: Benjamin Tissoires <bentiss@kernel.org>
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/15777
+Tested-by: Benjamin Tissoires <bentiss@kernel.org>
+Fixes: dda7dcd9da73 ("drm/i915/vrr: Use fixed timings for platforms that support VRR")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patch.msgid.link/20260303095414.4331-1-ville.syrjala@linux.intel.com
+Reviewed-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+(cherry picked from commit 93f3a267c3dd4d811b224bb9e179a10d81456a74)
+Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_display.c | 1 -
+ drivers/gpu/drm/i915/display/intel_vrr.c | 14 ++++++++++++++
+ 2 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -1642,7 +1642,6 @@ static void hsw_configure_cpu_transcoder
+ }
+
+ intel_set_transcoder_timings(crtc_state);
+- intel_vrr_set_transcoder_timings(crtc_state);
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
+--- a/drivers/gpu/drm/i915/display/intel_vrr.c
++++ b/drivers/gpu/drm/i915/display/intel_vrr.c
+@@ -465,6 +465,18 @@ void intel_vrr_set_transcoder_timings(co
+ return;
+
+ /*
++ * Bspec says:
++ * "(note: VRR needs to be programmed after
++ * TRANS_DDI_FUNC_CTL and before TRANS_CONF)."
++ *
++ * In practice it turns out that ICL can hang if
++ * TRANS_VRR_VMAX/FLIPLINE are written before
++ * enabling TRANS_DDI_FUNC_CTL.
++ */
++ drm_WARN_ON(display->drm,
++ !(intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE));
++
++ /*
+ * This bit seems to have two meanings depending on the platform:
+ * TGL: generate VRR "safe window" for DSB vblank waits
+ * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
+@@ -658,6 +670,8 @@ void intel_vrr_transcoder_enable(const s
+ if (!HAS_VRR(display))
+ return;
+
++ intel_vrr_set_transcoder_timings(crtc_state);
++
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
--- /dev/null
+From stable+bounces-227118-greg=kroah.com@vger.kernel.org Wed Mar 18 17:39:06 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 12:14:16 -0400
+Subject: drm/i915/vrr: Move HAS_VRR() check into intel_vrr_set_transcoder_timings()
+To: stable@vger.kernel.org
+Cc: "Ville Syrjälä" <ville.syrjala@linux.intel.com>, "Ankit Nautiyal" <ankit.k.nautiyal@intel.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260318161417.911716-1-sashal@kernel.org>
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 78ea8eb5b6235b3ef68fa0fb8ffe0b3b490baf38 ]
+
+Reduce the clutter in hsw_configure_cpu_transcoder() a bit by moving
+the HAS_VRR() check into intel_vrr_set_transcoder_timings().
+
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20251020185038.4272-18-ville.syrjala@linux.intel.com
+Reviewed-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Stable-dep-of: 237aab549676 ("drm/i915/vrr: Configure VRR timings after enabling TRANS_DDI_FUNC_CTL")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_display.c | 3 +--
+ drivers/gpu/drm/i915/display/intel_vrr.c | 3 +++
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -1642,8 +1642,7 @@ static void hsw_configure_cpu_transcoder
+ }
+
+ intel_set_transcoder_timings(crtc_state);
+- if (HAS_VRR(display))
+- intel_vrr_set_transcoder_timings(crtc_state);
++ intel_vrr_set_transcoder_timings(crtc_state);
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
+--- a/drivers/gpu/drm/i915/display/intel_vrr.c
++++ b/drivers/gpu/drm/i915/display/intel_vrr.c
+@@ -461,6 +461,9 @@ void intel_vrr_set_transcoder_timings(co
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
++ if (!HAS_VRR(display))
++ return;
++
+ /*
+ * This bit seems to have two meanings depending on the platform:
+ * TGL: generate VRR "safe window" for DSB vblank waits
--- /dev/null
+From stable+bounces-227158-greg=kroah.com@vger.kernel.org Wed Mar 18 21:49:08 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 16:48:15 -0400
+Subject: drm/xe: Fix memory leak in xe_vm_madvise_ioctl
+To: stable@vger.kernel.org
+Cc: Varun Gupta <varun.gupta@intel.com>, Shuicheng Lin <shuicheng.lin@intel.com>, Matthew Brost <matthew.brost@intel.com>, Tejas Upadhyay <tejas.upadhyay@intel.com>, Rodrigo Vivi <rodrigo.vivi@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260318204815.1144821-1-sashal@kernel.org>
+
+From: Varun Gupta <varun.gupta@intel.com>
+
+[ Upstream commit 0cfe9c4838f1147713f6b5c02094cd4dc0c598fa ]
+
+When check_bo_args_are_sane() validation fails, jump to the new
+free_vmas cleanup label to properly free the allocated resources.
+This ensures proper cleanup in this error path.
+
+Fixes: 293032eec4ba ("drm/xe/bo: Update atomic_access attribute on madvise")
+Cc: stable@vger.kernel.org # v6.18+
+Reviewed-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Signed-off-by: Varun Gupta <varun.gupta@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20260223175145.1532801-1-varun.gupta@intel.com
+Signed-off-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
+(cherry picked from commit 29bd06faf727a4b76663e4be0f7d770e2d2a7965)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+[ changed old goto target from `madv_fini` to `unlock_vm` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_vm_madvise.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_vm_madvise.c
++++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
+@@ -390,7 +390,7 @@ int xe_vm_madvise_ioctl(struct drm_devic
+ madvise_range.num_vmas,
+ args->atomic.val)) {
+ err = -EINVAL;
+- goto unlock_vm;
++ goto free_vmas;
+ }
+ }
+
+@@ -426,6 +426,7 @@ int xe_vm_madvise_ioctl(struct drm_devic
+ err_fini:
+ if (madvise_range.has_bo_vmas)
+ drm_exec_fini(&exec);
++free_vmas:
+ kfree(madvise_range.vmas);
+ madvise_range.vmas = NULL;
+ unlock_vm:
--- /dev/null
+From stable+bounces-226937-greg=kroah.com@vger.kernel.org Wed Mar 18 01:46:23 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 20:46:14 -0400
+Subject: drm/xe/sync: Fix user fence leak on alloc failure
+To: stable@vger.kernel.org
+Cc: Shuicheng Lin <shuicheng.lin@intel.com>, Matthew Brost <matthew.brost@intel.com>, Rodrigo Vivi <rodrigo.vivi@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260318004614.407161-2-sashal@kernel.org>
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit 0879c3f04f67e2a1677c25dcc24669ce21eb6a6c ]
+
+When dma_fence_chain_alloc() fails, properly release the user fence
+reference to prevent a memory leak.
+
+Fixes: 0995c2fc39b0 ("drm/xe: Enforce correct user fence signaling order using")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20260219233516.2938172-6-shuicheng.lin@intel.com
+(cherry picked from commit a5d5634cde48a9fcd68c8504aa07f89f175074a0)
+Cc: stable@vger.kernel.org
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_sync.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_sync.c
++++ b/drivers/gpu/drm/xe/xe_sync.c
+@@ -206,8 +206,10 @@ int xe_sync_entry_parse(struct xe_device
+ if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
+ return PTR_ERR(sync->ufence);
+ sync->ufence_chain_fence = dma_fence_chain_alloc();
+- if (!sync->ufence_chain_fence)
+- return -ENOMEM;
++ if (!sync->ufence_chain_fence) {
++ err = -ENOMEM;
++ goto free_sync;
++ }
+ sync->ufence_syncobj = ufence_syncobj;
+ }
+
--- /dev/null
+From stable+bounces-227065-greg=kroah.com@vger.kernel.org Wed Mar 18 14:16:47 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 09:13:53 -0400
+Subject: fgraph: Fix thresh_return nosleeptime double-adjust
+To: stable@vger.kernel.org
+Cc: Shengming Hu <hu.shengming@zte.com.cn>, "Masami Hiramatsu (Google)" <mhiramat@kernel.org>, "Steven Rostedt (Google)" <rostedt@goodmis.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260318131353.723405-1-sashal@kernel.org>
+
+From: Shengming Hu <hu.shengming@zte.com.cn>
+
+[ Upstream commit b96d0c59cdbb2a22b2545f6f3d5c6276b05761dd ]
+
+trace_graph_thresh_return() called handle_nosleeptime() and then delegated
+to trace_graph_return(), which calls handle_nosleeptime() again. When
+sleep-time accounting is disabled this double-adjusts calltime and can
+produce bogus durations (including underflow).
+
+Fix this by computing rettime once, applying handle_nosleeptime() only
+once, using the adjusted calltime for threshold comparison, and writing
+the return event directly via __trace_graph_return() when the threshold is
+met.
+
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260221113314048jE4VRwIyZEALiYByGK0My@zte.com.cn
+Fixes: 3c9880f3ab52b ("ftrace: Use a running sleeptime instead of saving on shadow stack")
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Shengming Hu <hu.shengming@zte.com.cn>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_functions_graph.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -378,9 +378,14 @@ static void trace_graph_thresh_return(st
+ struct fgraph_ops *gops,
+ struct ftrace_regs *fregs)
+ {
++ struct trace_array *tr = gops->private;
+ struct fgraph_times *ftimes;
++ unsigned int trace_ctx;
++ u64 calltime, rettime;
+ int size;
+
++ rettime = trace_clock_local();
++
+ ftrace_graph_addr_finish(gops, trace);
+
+ if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
+@@ -394,11 +399,13 @@ static void trace_graph_thresh_return(st
+
+ handle_nosleeptime(trace, ftimes, size);
+
+- if (tracing_thresh &&
+- (trace_clock_local() - ftimes->calltime < tracing_thresh))
++ calltime = ftimes->calltime;
++
++ if (tracing_thresh && (rettime - calltime < tracing_thresh))
+ return;
+- else
+- trace_graph_return(trace, gops, fregs);
++
++ trace_ctx = tracing_gen_ctx();
++ __trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
+ }
+
+ static struct fgraph_ops funcgraph_ops = {
--- /dev/null
+From stable+bounces-226523-greg=kroah.com@vger.kernel.org Tue Mar 17 18:30:45 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 13:04:31 -0400
+Subject: ipmi: Consolidate the run to completion checking for xmit msgs lock
+To: stable@vger.kernel.org
+Cc: Corey Minyard <corey@minyard.net>, Breno Leitao <leitao@debian.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317170432.230720-1-sashal@kernel.org>
+
+From: Corey Minyard <corey@minyard.net>
+
+[ Upstream commit 1d90e6c1a56f6ab83e5c9d30ded19e7ac8155713 ]
+
+It made things hard to read, move the check to a function.
+
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Reviewed-by: Breno Leitao <leitao@debian.org>
+Stable-dep-of: 62cd145453d5 ("ipmi:msghandler: Handle error returns from the SMI sender")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_msghandler.c | 42 ++++++++++++++++++++----------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -602,6 +602,22 @@ static int __ipmi_bmc_register(struct ip
+ static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id, bool rescan);
+
++static void ipmi_lock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion,
++ unsigned long *flags)
++{
++ if (run_to_completion)
++ return;
++ spin_lock_irqsave(&intf->xmit_msgs_lock, *flags);
++}
++
++static void ipmi_unlock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion,
++ unsigned long *flags)
++{
++ if (run_to_completion)
++ return;
++ spin_unlock_irqrestore(&intf->xmit_msgs_lock, *flags);
++}
++
+ static void free_ipmi_user(struct kref *ref)
+ {
+ struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+@@ -1878,11 +1894,9 @@ static void smi_send(struct ipmi_smi *in
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
+ unsigned long flags = 0;
+
+- if (!run_to_completion)
+- spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
++ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
+ smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+- if (!run_to_completion)
+- spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
++ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
+
+ if (smi_msg)
+ handlers->sender(intf->send_info, smi_msg);
+@@ -4822,8 +4836,7 @@ static void smi_work(struct work_struct
+ * message delivery.
+ */
+ restart:
+- if (!run_to_completion)
+- spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
++ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
+ if (intf->curr_msg == NULL && !intf->in_shutdown) {
+ struct list_head *entry = NULL;
+
+@@ -4839,8 +4852,7 @@ restart:
+ intf->curr_msg = newmsg;
+ }
+ }
+- if (!run_to_completion)
+- spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
++ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
+
+ if (newmsg) {
+ cc = intf->handlers->sender(intf->send_info, newmsg);
+@@ -4848,13 +4860,9 @@ restart:
+ if (newmsg->recv_msg)
+ deliver_err_response(intf,
+ newmsg->recv_msg, cc);
+- if (!run_to_completion)
+- spin_lock_irqsave(&intf->xmit_msgs_lock,
+- flags);
++ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
+ intf->curr_msg = NULL;
+- if (!run_to_completion)
+- spin_unlock_irqrestore(&intf->xmit_msgs_lock,
+- flags);
++ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
+ ipmi_free_smi_msg(newmsg);
+ newmsg = NULL;
+ goto restart;
+@@ -4924,16 +4932,14 @@ void ipmi_smi_msg_received(struct ipmi_s
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
+
+- if (!run_to_completion)
+- spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
++ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
+ /*
+ * We can get an asynchronous event or receive message in addition
+ * to commands we send.
+ */
+ if (msg == intf->curr_msg)
+ intf->curr_msg = NULL;
+- if (!run_to_completion)
+- spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
++ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
+
+ if (run_to_completion)
+ smi_work(&intf->smi_work);
--- /dev/null
+From stable+bounces-226524-greg=kroah.com@vger.kernel.org Tue Mar 17 18:06:14 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 13:04:32 -0400
+Subject: ipmi:msghandler: Handle error returns from the SMI sender
+To: stable@vger.kernel.org
+Cc: Corey Minyard <corey@minyard.net>, "Rafael J. Wysocki" <rafael@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317170432.230720-2-sashal@kernel.org>
+
+From: Corey Minyard <corey@minyard.net>
+
+[ Upstream commit 62cd145453d577113f993efd025f258dd86aa183 ]
+
+It used to be, until recently, that the sender operation on the low
+level interfaces would not fail. That's not the case any more with
+recent changes.
+
+So check the return value from the sender operation, and propagate it
+back up from there and handle the errors in all places.
+
+Reported-by: Rafael J. Wysocki <rafael@kernel.org>
+Fixes: bc3a9d217755 ("ipmi:si: Gracefully handle if the BMC is non-functional")
+Cc: stable@vger.kernel.org # 4.18
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Reviewed-by: Rafael J. Wysocki (Intel) <rafael@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_msghandler.c | 100 ++++++++++++++++++++++++------------
+ 1 file changed, 68 insertions(+), 32 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -1887,19 +1887,32 @@ static struct ipmi_smi_msg *smi_add_send
+ return smi_msg;
+ }
+
+-static void smi_send(struct ipmi_smi *intf,
++static int smi_send(struct ipmi_smi *intf,
+ const struct ipmi_smi_handlers *handlers,
+ struct ipmi_smi_msg *smi_msg, int priority)
+ {
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
+ unsigned long flags = 0;
++ int rv = 0;
+
+ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
+ smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
+
+- if (smi_msg)
+- handlers->sender(intf->send_info, smi_msg);
++ if (smi_msg) {
++ rv = handlers->sender(intf->send_info, smi_msg);
++ if (rv) {
++ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
++ intf->curr_msg = NULL;
++ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
++ /*
++ * Something may have been added to the transmit
++ * queue, so schedule a check for that.
++ */
++ queue_work(system_wq, &intf->smi_work);
++ }
++ }
++ return rv;
+ }
+
+ static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
+@@ -2312,6 +2325,7 @@ static int i_ipmi_request(struct ipmi_us
+ struct ipmi_recv_msg *recv_msg;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
+ int rv = 0;
++ bool in_seq_table = false;
+
+ if (supplied_recv) {
+ recv_msg = supplied_recv;
+@@ -2365,33 +2379,50 @@ static int i_ipmi_request(struct ipmi_us
+ rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
+ source_address, source_lun,
+ retries, retry_time_ms);
++ in_seq_table = true;
+ } else if (is_ipmb_direct_addr(addr)) {
+ rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
+ recv_msg, source_lun);
+ } else if (is_lan_addr(addr)) {
+ rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
+ source_lun, retries, retry_time_ms);
++ in_seq_table = true;
+ } else {
+- /* Unknown address type. */
++ /* Unknown address type. */
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ }
+
+- if (rv) {
+-out_err:
+- if (!supplied_smi)
+- ipmi_free_smi_msg(smi_msg);
+- if (!supplied_recv)
+- ipmi_free_recv_msg(recv_msg);
+- } else {
++ if (!rv) {
+ dev_dbg(intf->si_dev, "Send: %*ph\n",
+ smi_msg->data_size, smi_msg->data);
+
+- smi_send(intf, intf->handlers, smi_msg, priority);
++ rv = smi_send(intf, intf->handlers, smi_msg, priority);
++ if (rv != IPMI_CC_NO_ERROR)
++ /* smi_send() returns an IPMI err, return a Linux one. */
++ rv = -EIO;
++ if (rv && in_seq_table) {
++ /*
++ * If it's in the sequence table, it will be
++ * retried later, so ignore errors.
++ */
++ rv = 0;
++ /* But we need to fix the timeout. */
++ intf_start_seq_timer(intf, smi_msg->msgid);
++ ipmi_free_smi_msg(smi_msg);
++ smi_msg = NULL;
++ }
+ }
++out_err:
+ if (!run_to_completion)
+ mutex_unlock(&intf->users_mutex);
+
++ if (rv) {
++ if (!supplied_smi)
++ ipmi_free_smi_msg(smi_msg);
++ if (!supplied_recv)
++ ipmi_free_recv_msg(recv_msg);
++ }
+ return rv;
+ }
+
+@@ -3961,12 +3992,12 @@ static int handle_ipmb_get_msg_cmd(struc
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+- smi_send(intf, intf->handlers, msg, 0);
+- /*
+- * We used the message, so return the value that
+- * causes it to not be freed or queued.
+- */
+- rv = -1;
++ if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR)
++ /*
++ * We used the message, so return the value that
++ * causes it to not be freed or queued.
++ */
++ rv = -1;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+@@ -4040,12 +4071,12 @@ static int handle_ipmb_direct_rcv_cmd(st
+ msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data_size = 5;
+
+- smi_send(intf, intf->handlers, msg, 0);
+- /*
+- * We used the message, so return the value that
+- * causes it to not be freed or queued.
+- */
+- rv = -1;
++ if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR)
++ /*
++ * We used the message, so return the value that
++ * causes it to not be freed or queued.
++ */
++ rv = -1;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+@@ -4185,7 +4216,7 @@ static int handle_lan_get_msg_cmd(struct
+ struct ipmi_smi_msg *msg)
+ {
+ struct cmd_rcvr *rcvr;
+- int rv = 0;
++ int rv = 0; /* Free by default */
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+@@ -4238,12 +4269,12 @@ static int handle_lan_get_msg_cmd(struct
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+- smi_send(intf, intf->handlers, msg, 0);
+- /*
+- * We used the message, so return the value that
+- * causes it to not be freed or queued.
+- */
+- rv = -1;
++ if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR)
++ /*
++ * We used the message, so return the value that
++ * causes it to not be freed or queued.
++ */
++ rv = -1;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+@@ -5052,7 +5083,12 @@ static void check_msg_timeout(struct ipm
+ ipmi_inc_stat(intf,
+ retransmitted_ipmb_commands);
+
+- smi_send(intf, intf->handlers, smi_msg, 0);
++ /* If this fails we'll retry later or timeout. */
++ if (smi_send(intf, intf->handlers, smi_msg, 0) != IPMI_CC_NO_ERROR) {
++ /* But fix the timeout. */
++ intf_start_seq_timer(intf, smi_msg->msgid);
++ ipmi_free_smi_msg(smi_msg);
++ }
+ } else
+ ipmi_free_smi_msg(smi_msg);
+
--- /dev/null
+From stable+bounces-227067-greg=kroah.com@vger.kernel.org Wed Mar 18 14:17:32 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 09:14:44 -0400
+Subject: ksmbd: Don't log keys in SMB3 signing and encryption key generation
+To: stable@vger.kernel.org
+Cc: Thorsten Blum <thorsten.blum@linux.dev>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260318131444.723940-1-sashal@kernel.org>
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit 441336115df26b966575de56daf7107ed474faed ]
+
+When KSMBD_DEBUG_AUTH logging is enabled, generate_smb3signingkey() and
+generate_smb3encryptionkey() log the session, signing, encryption, and
+decryption key bytes. Remove the logs to avoid exposing credentials.
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ Context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/auth.c | 22 ++--------------------
+ 1 file changed, 2 insertions(+), 20 deletions(-)
+
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -802,12 +802,8 @@ static int generate_smb3signingkey(struc
+ if (!(conn->dialect >= SMB30_PROT_ID && signing->binding))
+ memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE);
+
+- ksmbd_debug(AUTH, "dumping generated AES signing keys\n");
++ ksmbd_debug(AUTH, "generated SMB3 signing key\n");
+ ksmbd_debug(AUTH, "Session Id %llu\n", sess->id);
+- ksmbd_debug(AUTH, "Session Key %*ph\n",
+- SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
+- ksmbd_debug(AUTH, "Signing Key %*ph\n",
+- SMB3_SIGN_KEY_SIZE, key);
+ return 0;
+ }
+
+@@ -871,23 +867,9 @@ static int generate_smb3encryptionkey(st
+ if (rc)
+ return rc;
+
+- ksmbd_debug(AUTH, "dumping generated AES encryption keys\n");
++ ksmbd_debug(AUTH, "generated SMB3 encryption/decryption keys\n");
+ ksmbd_debug(AUTH, "Cipher type %d\n", conn->cipher_type);
+ ksmbd_debug(AUTH, "Session Id %llu\n", sess->id);
+- ksmbd_debug(AUTH, "Session Key %*ph\n",
+- SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
+- if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+- conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
+- ksmbd_debug(AUTH, "ServerIn Key %*ph\n",
+- SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey);
+- ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
+- SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3decryptionkey);
+- } else {
+- ksmbd_debug(AUTH, "ServerIn Key %*ph\n",
+- SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3encryptionkey);
+- ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
+- SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3decryptionkey);
+- }
+ return 0;
+ }
+
--- /dev/null
+From 282343cf8a4a5a3603b1cb0e17a7083e4a593b03 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Fri, 13 Mar 2026 10:00:58 +0900
+Subject: ksmbd: unset conn->binding on failed binding request
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 282343cf8a4a5a3603b1cb0e17a7083e4a593b03 upstream.
+
+When a multichannel SMB2_SESSION_SETUP request with
+SMB2_SESSION_REQ_FLAG_BINDING fails ksmbd sets conn->binding = true
+but never clears it on the error path. This leaves the connection in
+a binding state where all subsequent ksmbd_session_lookup_all() calls
+fall back to the global sessions table. This fix it by clearing
+conn->binding = false in the error path.
+
+Cc: stable@vger.kernel.org
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1957,6 +1957,7 @@ out_err:
+ }
+ }
+ smb2_set_err_rsp(work);
++ conn->binding = false;
+ } else {
+ unsigned int iov_len;
+
--- /dev/null
+From 3a64125730cabc34fccfbc230c2667c2e14f7308 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Fri, 13 Mar 2026 10:01:29 +0900
+Subject: ksmbd: use volume UUID in FS_OBJECT_ID_INFORMATION
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 3a64125730cabc34fccfbc230c2667c2e14f7308 upstream.
+
+Use sb->s_uuid for a proper volume identifier as the primary choice.
+For filesystems that do not provide a UUID, fall back to stfs.f_fsid
+obtained from vfs_statfs().
+
+Cc: stable@vger.kernel.org
+Reported-by: Hyunwoo Kim <imv4bel@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -5462,7 +5462,6 @@ static int smb2_get_info_filesystem(stru
+ struct smb2_query_info_req *req,
+ struct smb2_query_info_rsp *rsp)
+ {
+- struct ksmbd_session *sess = work->sess;
+ struct ksmbd_conn *conn = work->conn;
+ struct ksmbd_share_config *share = work->tcon->share_conf;
+ int fsinfoclass = 0;
+@@ -5592,10 +5591,11 @@ static int smb2_get_info_filesystem(stru
+
+ info = (struct object_id_info *)(rsp->Buffer);
+
+- if (!user_guest(sess->user))
+- memcpy(info->objid, user_passkey(sess->user), 16);
++ if (path.mnt->mnt_sb->s_uuid_len == 16)
++ memcpy(info->objid, path.mnt->mnt_sb->s_uuid.b,
++ path.mnt->mnt_sb->s_uuid_len);
+ else
+- memset(info->objid, 0, 16);
++ memcpy(info->objid, &stfs.f_fsid, sizeof(stfs.f_fsid));
+
+ info->extended_info.magic = cpu_to_le32(EXTENDED_INFO_MAGIC);
+ info->extended_info.version = cpu_to_le32(1);
--- /dev/null
+From stable+bounces-227545-greg=kroah.com@vger.kernel.org Fri Mar 20 14:53:06 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Mar 2026 09:52:59 -0400
+Subject: LoongArch: Check return values for set_memory_{rw,rox}
+To: stable@vger.kernel.org
+Cc: Tiezhu Yang <yangtiezhu@loongson.cn>, Huacai Chen <chenhuacai@loongson.cn>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260320135259.4165408-1-sashal@kernel.org>
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit 431ce839dad66d0d56fb604785452c6a57409f35 ]
+
+set_memory_rw() and set_memory_rox() may fail, so we should check the
+return values and return immediately in larch_insn_text_copy().
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+[ kept `stop_machine()` instead of `stop_machine_cpuslocked()` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/inst.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/arch/loongarch/kernel/inst.c
++++ b/arch/loongarch/kernel/inst.c
+@@ -260,6 +260,7 @@ static int text_copy_cb(void *data)
+ int larch_insn_text_copy(void *dst, void *src, size_t len)
+ {
+ int ret = 0;
++ int err = 0;
+ size_t start, end;
+ struct insn_copy copy = {
+ .dst = dst,
+@@ -271,9 +272,19 @@ int larch_insn_text_copy(void *dst, void
+ start = round_down((size_t)dst, PAGE_SIZE);
+ end = round_up((size_t)dst + len, PAGE_SIZE);
+
+- set_memory_rw(start, (end - start) / PAGE_SIZE);
++ err = set_memory_rw(start, (end - start) / PAGE_SIZE);
++ if (err) {
++ pr_info("%s: set_memory_rw() failed\n", __func__);
++ return err;
++ }
++
+ ret = stop_machine(text_copy_cb, ©, cpu_online_mask);
+- set_memory_rox(start, (end - start) / PAGE_SIZE);
++
++ err = set_memory_rox(start, (end - start) / PAGE_SIZE);
++ if (err) {
++ pr_info("%s: set_memory_rox() failed\n", __func__);
++ return err;
++ }
+
+ return ret;
+ }
--- /dev/null
+From stable+bounces-227090-greg=kroah.com@vger.kernel.org Wed Mar 18 15:59:18 2026
+From: Zi Yan <ziy@nvidia.com>
+Date: Wed, 18 Mar 2026 10:55:25 -0400
+Subject: mm/huge_memory: fix a folio_split() race condition with folio_try_get()
+To: stable@vger.kernel.org
+Cc: Zi Yan <ziy@nvidia.com>, Bas van Dijk <bas@dfinity.org>, Lance Yang <lance.yang@linux.dev>, Lorenzo Stoakes <lorenzo.stoakes@oracle.com>, Wei Yang <richard.weiyang@gmail.com>, Baolin Wang <baolin.wang@linux.alibaba.com>, Barry Song <baohua@kernel.org>, David Hildenbrand <david@kernel.org>, Dev Jain <dev.jain@arm.com>, Hugh Dickins <hughd@google.com>, Liam Howlett <liam.howlett@oracle.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, Nico Pache <npache@redhat.com>, Ryan Roberts <ryan.roberts@arm.com>, Andrew Morton <akpm@linux-foundation.org>
+Message-ID: <20260318145525.261086-1-ziy@nvidia.com>
+
+From: Zi Yan <ziy@nvidia.com>
+
+During a pagecache folio split, the values in the related xarray should
+not be changed from the original folio at xarray split time until all
+after-split folios are well formed and stored in the xarray. Current use
+of xas_try_split() in __split_unmapped_folio() lets some after-split
+folios show up at wrong indices in the xarray. When these misplaced
+after-split folios are unfrozen, before correct folios are stored via
+__xa_store(), and grabbed by folio_try_get(), they are returned to
+userspace at wrong file indices, causing data corruption. More detailed
+explanation is at the bottom.
+
+The reproducer is at: https://github.com/dfinity/thp-madv-remove-test
+It
+1. creates a memfd,
+2. forks,
+3. in the child process, maps the file with large folios (via shmem code
+ path) and reads the mapped file continuously with 16 threads,
+4. in the parent process, uses madvise(MADV_REMOVE) to punch poles in the
+ large folio.
+
+Data corruption can be observed without the fix. Basically, data from a
+wrong page->index is returned.
+
+Fix it by using the original folio in xas_try_split() calls, so that
+folio_try_get() can get the right after-split folios after the original
+folio is unfrozen.
+
+Uniform split, split_huge_page*(), is not affected, since it uses
+xas_split_alloc() and xas_split() only once and stores the original folio
+in the xarray. Change xas_split() used in uniform split branch to use the
+original folio to avoid confusion.
+
+Fixes below points to the commit introduces the code, but folio_split() is
+used in a later commit 7460b470a131f ("mm/truncate: use folio_split() in
+truncate operation").
+
+More details:
+
+For example, a folio f is split non-uniformly into f, f2, f3, f4 like
+below:
++----------------+---------+----+----+
+| f | f2 | f3 | f4 |
++----------------+---------+----+----+
+but the xarray would look like below after __split_unmapped_folio() is
+done:
++----------------+---------+----+----+
+| f | f2 | f3 | f3 |
++----------------+---------+----+----+
+
+After __split_unmapped_folio(), the code changes the xarray and unfreezes
+after-split folios:
+
+1. unfreezes f2, __xa_store(f2)
+2. unfreezes f3, __xa_store(f3)
+3. unfreezes f4, __xa_store(f4), which overwrites the second f3 to f4.
+4. unfreezes f.
+
+Meanwhile, a parallel filemap_get_entry() can read the second f3 from the
+xarray and use folio_try_get() on it at step 2 when f3 is unfrozen. Then,
+f3 is wrongly returned to user.
+
+After the fix, the xarray looks like below after __split_unmapped_folio():
++----------------+---------+----+----+
+| f | f | f | f |
++----------------+---------+----+----+
+so that the race window no longer exists.
+
+[ziy@nvidia.com: move comment, per David]
+ Link: https://lkml.kernel.org/r/5C9FA053-A4C6-4615-BE05-74E47A6462B3@nvidia.com
+Link: https://lkml.kernel.org/r/20260302203159.3208341-1-ziy@nvidia.com
+Fixes: 00527733d0dc ("mm/huge_memory: add two new (not yet used) functions for folio_split()")
+Signed-off-by: Zi Yan <ziy@nvidia.com>
+Reported-by: Bas van Dijk <bas@dfinity.org>
+Closes: https://lore.kernel.org/all/CAKNNEtw5_kZomhkugedKMPOG-sxs5Q5OLumWJdiWXv+C9Yct0w@mail.gmail.com/
+Tested-by: Lance Yang <lance.yang@linux.dev>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: David Hildenbrand <david@kernel.org>
+Cc: Dev Jain <dev.jain@arm.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Nico Pache <npache@redhat.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 577a1f495fd78d8fb61b67ac3d3b595b01f6fcb0)
+Signed-off-by: Zi Yan <ziy@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3438,6 +3438,7 @@ static int __split_unmapped_folio(struct
+ {
+ int order = folio_order(folio);
+ int start_order = uniform_split ? new_order : order - 1;
++ struct folio *old_folio = folio;
+ bool stop_split = false;
+ struct folio *next;
+ int split_order;
+@@ -3468,12 +3469,16 @@ static int __split_unmapped_folio(struct
+ * uniform split has xas_split_alloc() called before
+ * irq is disabled to allocate enough memory, whereas
+ * non-uniform split can handle ENOMEM.
++ * Use the to-be-split folio, so that a parallel
++ * folio_try_get() waits on it until xarray is updated
++ * with after-split folios and the original one is
++ * unfrozen.
+ */
+ if (uniform_split)
+- xas_split(xas, folio, old_order);
++ xas_split(xas, old_folio, old_order);
+ else {
+ xas_set_order(xas, folio->index, split_order);
+- xas_try_split(xas, folio, old_order);
++ xas_try_split(xas, old_folio, old_order);
+ if (xas_error(xas)) {
+ ret = xas_error(xas);
+ stop_split = true;
--- /dev/null
+From stable+bounces-227546-greg=kroah.com@vger.kernel.org Fri Mar 20 14:53:38 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Mar 2026 09:53:01 -0400
+Subject: net: macb: Introduce gem_init_rx_ring()
+To: stable@vger.kernel.org
+Cc: Kevin Hao <haokexin@gmail.com>, Simon Horman <horms@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260320135302.4165461-1-sashal@kernel.org>
+
+From: Kevin Hao <haokexin@gmail.com>
+
+[ Upstream commit 1a7124ecd655bcaf1845197fe416aa25cff4c3ea ]
+
+Extract the initialization code for the GEM RX ring into a new function.
+This change will be utilized in a subsequent patch. No functional changes
+are introduced.
+
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260312-macb-versal-v1-1-467647173fa4@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 718d0766ce4c ("net: macb: Reinitialize tx/rx queue pointer registers and rx ring during resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -2614,6 +2614,14 @@ static void macb_init_tieoff(struct macb
+ desc->ctrl = 0;
+ }
+
++static void gem_init_rx_ring(struct macb_queue *queue)
++{
++ queue->rx_tail = 0;
++ queue->rx_prepared_head = 0;
++
++ gem_rx_refill(queue);
++}
++
+ static void gem_init_rings(struct macb *bp)
+ {
+ struct macb_queue *queue;
+@@ -2631,10 +2639,7 @@ static void gem_init_rings(struct macb *
+ queue->tx_head = 0;
+ queue->tx_tail = 0;
+
+- queue->rx_tail = 0;
+- queue->rx_prepared_head = 0;
+-
+- gem_rx_refill(queue);
++ gem_init_rx_ring(queue);
+ }
+
+ macb_init_tieoff(bp);
--- /dev/null
+From stable+bounces-227547-greg=kroah.com@vger.kernel.org Fri Mar 20 14:53:09 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Mar 2026 09:53:02 -0400
+Subject: net: macb: Reinitialize tx/rx queue pointer registers and rx ring during resume
+To: stable@vger.kernel.org
+Cc: Kevin Hao <haokexin@gmail.com>, Quanyang Wang <quanyang.wang@windriver.com>, Simon Horman <horms@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260320135302.4165461-2-sashal@kernel.org>
+
+From: Kevin Hao <haokexin@gmail.com>
+
+[ Upstream commit 718d0766ce4c7634ce62fa78b526ea7263487edd ]
+
+On certain platforms, such as AMD Versal boards, the tx/rx queue pointer
+registers are cleared after suspend, and the rx queue pointer register
+is also disabled during suspend if WOL is enabled. Previously, we assumed
+that these registers would be restored by macb_mac_link_up(). However,
+in commit bf9cf80cab81, macb_init_buffers() was moved from
+macb_mac_link_up() to macb_open(). Therefore, we should call
+macb_init_buffers() to reinitialize the tx/rx queue pointer registers
+during resume.
+
+Due to the reset of these two registers, we also need to adjust the
+tx/rx rings accordingly. The tx ring will be handled by
+gem_shuffle_tx_rings() in macb_mac_link_up(), so we only need to
+initialize the rx ring here.
+
+Fixes: bf9cf80cab81 ("net: macb: Fix tx/rx malfunction after phy link down and up")
+Reported-by: Quanyang Wang <quanyang.wang@windriver.com>
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Tested-by: Quanyang Wang <quanyang.wang@windriver.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260312-macb-versal-v1-2-467647173fa4@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -5856,8 +5856,18 @@ static int __maybe_unused macb_resume(st
+ rtnl_unlock();
+ }
+
++ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
++ macb_init_buffers(bp);
++
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue) {
++ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
++ if (macb_is_gem(bp))
++ gem_init_rx_ring(queue);
++ else
++ macb_init_rx_ring(queue);
++ }
++
+ napi_enable(&queue->napi_rx);
+ napi_enable(&queue->napi_tx);
+ }
--- /dev/null
+From stable+bounces-227093-greg=kroah.com@vger.kernel.org Wed Mar 18 16:38:44 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 10:59:56 -0400
+Subject: net: macb: Shuffle the tx ring before enabling tx
+To: stable@vger.kernel.org
+Cc: Kevin Hao <haokexin@gmail.com>, Quanyang Wang <quanyang.wang@windriver.com>, Simon Horman <horms@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260318145956.858944-2-sashal@kernel.org>
+
+From: Kevin Hao <haokexin@gmail.com>
+
+[ Upstream commit 881a0263d502e1a93ebc13a78254e9ad19520232 ]
+
+Quanyang observed that when using an NFS rootfs on an AMD ZynqMp board,
+the rootfs may take an extended time to recover after a suspend.
+Upon investigation, it was determined that the issue originates from a
+problem in the macb driver.
+
+According to the Zynq UltraScale TRM [1], when transmit is disabled,
+the transmit buffer queue pointer resets to point to the address
+specified by the transmit buffer queue base address register.
+
+In the current implementation, the code merely resets `queue->tx_head`
+and `queue->tx_tail` to '0'. This approach presents several issues:
+
+- Packets already queued in the tx ring are silently lost,
+ leading to memory leaks since the associated skbs cannot be released.
+
+- Concurrent write access to `queue->tx_head` and `queue->tx_tail` may
+ occur from `macb_tx_poll()` or `macb_start_xmit()` when these values
+ are reset to '0'.
+
+- The transmission may become stuck on a packet that has already been sent
+ out, with its 'TX_USED' bit set, but has not yet been processed. However,
+ due to the manipulation of 'queue->tx_head' and 'queue->tx_tail',
+ `macb_tx_poll()` incorrectly assumes there are no packets to handle
+ because `queue->tx_head == queue->tx_tail`. This issue is only resolved
+ when a new packet is placed at this position. This is the root cause of
+ the prolonged recovery time observed for the NFS root filesystem.
+
+To resolve this issue, shuffle the tx ring and tx skb array so that
+the first unsent packet is positioned at the start of the tx ring.
+Additionally, ensure that updates to `queue->tx_head` and
+`queue->tx_tail` are properly protected with the appropriate lock.
+
+[1] https://docs.amd.com/v/u/en-US/ug1085-zynq-ultrascale-trm
+
+Fixes: bf9cf80cab81 ("net: macb: Fix tx/rx malfunction after phy link down and up")
+Reported-by: Quanyang Wang <quanyang.wang@windriver.com>
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260307-zynqmp-v2-1-6ef98a70e1d0@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c | 98 ++++++++++++++++++++++++++++++-
+ 1 file changed, 95 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -37,6 +37,7 @@
+ #include <linux/tcp.h>
+ #include <linux/types.h>
+ #include <linux/udp.h>
++#include <linux/gcd.h>
+ #include <net/pkt_sched.h>
+ #include "macb.h"
+
+@@ -705,6 +706,97 @@ static void macb_mac_link_down(struct ph
+ netif_tx_stop_all_queues(ndev);
+ }
+
++/* Use juggling algorithm to left rotate tx ring and tx skb array */
++static void gem_shuffle_tx_one_ring(struct macb_queue *queue)
++{
++ unsigned int head, tail, count, ring_size, desc_size;
++ struct macb_tx_skb tx_skb, *skb_curr, *skb_next;
++ struct macb_dma_desc *desc_curr, *desc_next;
++ unsigned int i, cycles, shift, curr, next;
++ struct macb *bp = queue->bp;
++ unsigned char desc[24];
++ unsigned long flags;
++
++ desc_size = macb_dma_desc_get_size(bp);
++
++ if (WARN_ON_ONCE(desc_size > ARRAY_SIZE(desc)))
++ return;
++
++ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
++ head = queue->tx_head;
++ tail = queue->tx_tail;
++ ring_size = bp->tx_ring_size;
++ count = CIRC_CNT(head, tail, ring_size);
++
++ if (!(tail % ring_size))
++ goto unlock;
++
++ if (!count) {
++ queue->tx_head = 0;
++ queue->tx_tail = 0;
++ goto unlock;
++ }
++
++ shift = tail % ring_size;
++ cycles = gcd(ring_size, shift);
++
++ for (i = 0; i < cycles; i++) {
++ memcpy(&desc, macb_tx_desc(queue, i), desc_size);
++ memcpy(&tx_skb, macb_tx_skb(queue, i),
++ sizeof(struct macb_tx_skb));
++
++ curr = i;
++ next = (curr + shift) % ring_size;
++
++ while (next != i) {
++ desc_curr = macb_tx_desc(queue, curr);
++ desc_next = macb_tx_desc(queue, next);
++
++ memcpy(desc_curr, desc_next, desc_size);
++
++ if (next == ring_size - 1)
++ desc_curr->ctrl &= ~MACB_BIT(TX_WRAP);
++ if (curr == ring_size - 1)
++ desc_curr->ctrl |= MACB_BIT(TX_WRAP);
++
++ skb_curr = macb_tx_skb(queue, curr);
++ skb_next = macb_tx_skb(queue, next);
++ memcpy(skb_curr, skb_next, sizeof(struct macb_tx_skb));
++
++ curr = next;
++ next = (curr + shift) % ring_size;
++ }
++
++ desc_curr = macb_tx_desc(queue, curr);
++ memcpy(desc_curr, &desc, desc_size);
++ if (i == ring_size - 1)
++ desc_curr->ctrl &= ~MACB_BIT(TX_WRAP);
++ if (curr == ring_size - 1)
++ desc_curr->ctrl |= MACB_BIT(TX_WRAP);
++ memcpy(macb_tx_skb(queue, curr), &tx_skb,
++ sizeof(struct macb_tx_skb));
++ }
++
++ queue->tx_head = count;
++ queue->tx_tail = 0;
++
++ /* Make descriptor updates visible to hardware */
++ wmb();
++
++unlock:
++ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
++}
++
++/* Rotate the queue so that the tail is at index 0 */
++static void gem_shuffle_tx_rings(struct macb *bp)
++{
++ struct macb_queue *queue;
++ int q;
++
++ for (q = 0, queue = bp->queues; q < bp->num_queues; q++, queue++)
++ gem_shuffle_tx_one_ring(queue);
++}
++
+ static void macb_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+@@ -743,8 +835,6 @@ static void macb_mac_link_up(struct phyl
+ ctrl |= MACB_BIT(PAE);
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+- queue->tx_head = 0;
+- queue->tx_tail = 0;
+ queue_writel(queue, IER,
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+ }
+@@ -758,8 +848,10 @@ static void macb_mac_link_up(struct phyl
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+- if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
++ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
+ macb_set_tx_clk(bp, speed);
++ gem_shuffle_tx_rings(bp);
++ }
+
+ /* Enable Rx and Tx; Enable PTP unicast */
+ ctrl = macb_readl(bp, NCR);
--- /dev/null
+From stable+bounces-227092-greg=kroah.com@vger.kernel.org Wed Mar 18 16:06:45 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 10:59:55 -0400
+Subject: net: macb: sort #includes
+To: stable@vger.kernel.org
+Cc: "Théo Lebrun" <theo.lebrun@bootlin.com>, "Andrew Lunn" <andrew@lunn.ch>, "Sean Anderson" <sean.anderson@linux.dev>, "Jakub Kicinski" <kuba@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260318145956.858944-1-sashal@kernel.org>
+
+From: Théo Lebrun <theo.lebrun@bootlin.com>
+
+[ Upstream commit 8ebeef3d01c8b9e5807afdf1d38547f4625d0e4e ]
+
+Sort #include preprocessor directives.
+
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Sean Anderson <sean.anderson@linux.dev>
+Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
+Link: https://patch.msgid.link/20251014-macb-cleanup-v1-15-31cd266e22cd@bootlin.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 881a0263d502 ("net: macb: Shuffle the tx ring before enabling tx")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c | 37 +++++++++++++++----------------
+ 1 file changed, 19 insertions(+), 18 deletions(-)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -6,36 +6,37 @@
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+-#include <linux/clk.h>
++#include <linux/circ_buf.h>
+ #include <linux/clk-provider.h>
++#include <linux/clk.h>
+ #include <linux/crc32.h>
+-#include <linux/module.h>
+-#include <linux/moduleparam.h>
+-#include <linux/kernel.h>
+-#include <linux/types.h>
+-#include <linux/circ_buf.h>
+-#include <linux/slab.h>
++#include <linux/dma-mapping.h>
++#include <linux/etherdevice.h>
++#include <linux/firmware/xlnx-zynqmp.h>
++#include <linux/inetdevice.h>
++#include <linux/inetdevice.h>
+ #include <linux/init.h>
+-#include <linux/io.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/iopoll.h>
++#include <linux/ip.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
+ #include <linux/netdevice.h>
+-#include <linux/etherdevice.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/platform_device.h>
+-#include <linux/phylink.h>
+ #include <linux/of.h>
+ #include <linux/of_mdio.h>
+ #include <linux/of_net.h>
+-#include <linux/ip.h>
+-#include <linux/udp.h>
+-#include <linux/tcp.h>
+-#include <linux/iopoll.h>
+ #include <linux/phy/phy.h>
++#include <linux/phylink.h>
++#include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/ptp_classify.h>
+ #include <linux/reset.h>
+-#include <linux/firmware/xlnx-zynqmp.h>
+-#include <linux/inetdevice.h>
++#include <linux/slab.h>
++#include <linux/tcp.h>
++#include <linux/types.h>
++#include <linux/udp.h>
+ #include <net/pkt_sched.h>
+ #include "macb.h"
+
--- /dev/null
+From stable+bounces-227195-greg=kroah.com@vger.kernel.org Thu Mar 19 02:00:16 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 21:00:09 -0400
+Subject: netconsole: fix sysdata_release_enabled_show checking wrong flag
+To: stable@vger.kernel.org
+Cc: Breno Leitao <leitao@debian.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260319010010.1861315-1-sashal@kernel.org>
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit 5af6e8b54927f7a8d3c7fd02b1bdc09e93d5c079 ]
+
+sysdata_release_enabled_show() checks SYSDATA_TASKNAME instead of
+SYSDATA_RELEASE, causing the configfs release_enabled attribute to
+reflect the taskname feature state rather than the release feature
+state. This is a copy-paste error from the adjacent
+sysdata_taskname_enabled_show() function.
+
+The corresponding _store function already uses the correct
+SYSDATA_RELEASE flag.
+
+Fixes: 343f90227070 ("netconsole: implement configfs for release_enabled")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260302-sysdata_release_fix-v1-1-e5090f677c7c@debian.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/netconsole.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -503,7 +503,7 @@ static ssize_t sysdata_release_enabled_s
+ bool release_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+- release_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
++ release_enabled = !!(nt->sysdata_fields & SYSDATA_RELEASE);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", release_enabled);
--- /dev/null
+From stable+bounces-226896-greg=kroah.com@vger.kernel.org Tue Mar 17 21:07:25 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 16:02:24 -0400
+Subject: nsfs: tighten permission checks for ns iteration ioctls
+To: stable@vger.kernel.org
+Cc: Christian Brauner <brauner@kernel.org>, Jeff Layton <jlayton@kernel.org>, stable@kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317200224.272095-1-sashal@kernel.org>
+
+From: Christian Brauner <brauner@kernel.org>
+
+[ Upstream commit e6b899f08066e744f89df16ceb782e06868bd148 ]
+
+Even privileged services should not necessarily be able to see other
+privileged service's namespaces so they can't leak information to each
+other. Use may_see_all_namespaces() helper that centralizes this policy
+until the nstree adapts.
+
+Link: https://patch.msgid.link/20260226-work-visibility-fixes-v1-1-d2c2853313bd@kernel.org
+Fixes: a1d220d9dafa ("nsfs: iterate through mount namespaces")
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Cc: stable@kernel.org # v6.12+
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+[ context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nsfs.c | 13 +++++++++++++
+ include/linux/ns_common.h | 2 ++
+ kernel/nscommon.c | 6 ++++++
+ 3 files changed, 21 insertions(+)
+
+--- a/fs/nsfs.c
++++ b/fs/nsfs.c
+@@ -194,6 +194,17 @@ static bool nsfs_ioctl_valid(unsigned in
+ return false;
+ }
+
++static bool may_use_nsfs_ioctl(unsigned int cmd)
++{
++ switch (_IOC_NR(cmd)) {
++ case _IOC_NR(NS_MNT_GET_NEXT):
++ fallthrough;
++ case _IOC_NR(NS_MNT_GET_PREV):
++ return may_see_all_namespaces();
++ }
++ return true;
++}
++
+ static long ns_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+ {
+@@ -209,6 +220,8 @@ static long ns_ioctl(struct file *filp,
+
+ if (!nsfs_ioctl_valid(ioctl))
+ return -ENOIOCTLCMD;
++ if (!may_use_nsfs_ioctl(ioctl))
++ return -EPERM;
+
+ ns = get_proc_ns(file_inode(filp));
+ switch (ioctl) {
+--- a/include/linux/ns_common.h
++++ b/include/linux/ns_common.h
+@@ -144,6 +144,8 @@ void __ns_common_free(struct ns_common *
+
+ #define ns_common_free(__ns) __ns_common_free(to_ns_common((__ns)))
+
++bool may_see_all_namespaces(void);
++
+ static __always_inline __must_check bool __ns_ref_put(struct ns_common *ns)
+ {
+ return refcount_dec_and_test(&ns->__ns_ref);
+--- a/kernel/nscommon.c
++++ b/kernel/nscommon.c
+@@ -75,3 +75,9 @@ void __ns_common_free(struct ns_common *
+ {
+ proc_free_inum(ns->inum);
+ }
++
++bool may_see_all_namespaces(void)
++{
++ return (task_active_pid_ns(current) == &init_pid_ns) &&
++ ns_capable_noaudit(init_pid_ns.user_ns, CAP_SYS_ADMIN);
++}
--- /dev/null
+From stable+bounces-226767-greg=kroah.com@vger.kernel.org Tue Mar 17 18:46:15 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 13:21:52 -0400
+Subject: sched_ext: Disable preemption between scx_claim_exit() and kicking helper work
+To: stable@vger.kernel.org
+Cc: Tejun Heo <tj@kernel.org>, Andrea Righi <arighi@nvidia.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317172152.239864-2-sashal@kernel.org>
+
+From: Tejun Heo <tj@kernel.org>
+
+[ Upstream commit 83236b2e43dba00bee5b82eb5758816b1a674f6a ]
+
+scx_claim_exit() atomically sets exit_kind, which prevents scx_error() from
+triggering further error handling. After claiming exit, the caller must kick
+the helper kthread work which initiates bypass mode and teardown.
+
+If the calling task gets preempted between claiming exit and kicking the
+helper work, and the BPF scheduler fails to schedule it back (since error
+handling is now disabled), the helper work is never queued, bypass mode
+never activates, tasks stop being dispatched, and the system wedges.
+
+Disable preemption across scx_claim_exit() and the subsequent work kicking
+in all callers - scx_disable() and scx_vexit(). Add
+lockdep_assert_preemption_disabled() to scx_claim_exit() to enforce the
+requirement.
+
+Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class")
+Cc: stable@vger.kernel.org # v6.12+
+Reviewed-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4066,10 +4066,19 @@ done:
+ scx_bypass(false);
+ }
+
++/*
++ * Claim the exit on @sch. The caller must ensure that the helper kthread work
++ * is kicked before the current task can be preempted. Once exit_kind is
++ * claimed, scx_error() can no longer trigger, so if the current task gets
++ * preempted and the BPF scheduler fails to schedule it back, the helper work
++ * will never be kicked and the whole system can wedge.
++ */
+ static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
+ {
+ int none = SCX_EXIT_NONE;
+
++ lockdep_assert_preemption_disabled();
++
+ if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
+ return false;
+
+@@ -4092,6 +4101,7 @@ static void scx_disable(enum scx_exit_ki
+ rcu_read_lock();
+ sch = rcu_dereference(scx_root);
+ if (sch) {
++ guard(preempt)();
+ scx_claim_exit(sch, kind);
+ kthread_queue_work(sch->helper, &sch->disable_work);
+ }
+@@ -4414,6 +4424,8 @@ static void scx_vexit(struct scx_sched *
+ {
+ struct scx_exit_info *ei = sch->exit_info;
+
++ guard(preempt)();
++
+ if (!scx_claim_exit(sch, kind))
+ return;
+
--- /dev/null
+From stable+bounces-226877-greg=kroah.com@vger.kernel.org Tue Mar 17 19:00:15 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 13:29:35 -0400
+Subject: sched_ext: Fix starvation of scx_enable() under fair-class saturation
+To: stable@vger.kernel.org
+Cc: Tejun Heo <tj@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317172935.242870-1-sashal@kernel.org>
+
+From: Tejun Heo <tj@kernel.org>
+
+[ Upstream commit b06ccbabe2506fd70b9167a644978b049150224a ]
+
+During scx_enable(), the READY -> ENABLED task switching loop changes the
+calling thread's sched_class from fair to ext. Since fair has higher
+priority than ext, saturating fair-class workloads can indefinitely starve
+the enable thread, hanging the system. This was introduced when the enable
+path switched from preempt_disable() to scx_bypass() which doesn't protect
+against fair-class starvation. Note that the original preempt_disable()
+protection wasn't complete either - in partial switch modes, the calling
+thread could still be starved after preempt_enable() as it may have been
+switched to ext class.
+
+Fix it by offloading the enable body to a dedicated system-wide RT
+(SCHED_FIFO) kthread which cannot be starved by either fair or ext class
+tasks. scx_enable() lazily creates the kthread on first use and passes the
+ops pointer through a struct scx_enable_cmd containing the kthread_work,
+then synchronously waits for completion.
+
+The workfn runs on a different kthread from sch->helper (which runs
+disable_work), so it can safely flush disable_work on the error path
+without deadlock.
+
+Fixes: 8c2090c504e9 ("sched_ext: Initialize in bypass mode")
+Cc: stable@vger.kernel.org # v6.12+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c | 66 ++++++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 56 insertions(+), 10 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4598,20 +4598,30 @@ static int validate_ops(struct scx_sched
+ return 0;
+ }
+
+-static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
++/*
++ * scx_enable() is offloaded to a dedicated system-wide RT kthread to avoid
++ * starvation. During the READY -> ENABLED task switching loop, the calling
++ * thread's sched_class gets switched from fair to ext. As fair has higher
++ * priority than ext, the calling thread can be indefinitely starved under
++ * fair-class saturation, leading to a system hang.
++ */
++struct scx_enable_cmd {
++ struct kthread_work work;
++ struct sched_ext_ops *ops;
++ int ret;
++};
++
++static void scx_enable_workfn(struct kthread_work *work)
+ {
++ struct scx_enable_cmd *cmd =
++ container_of(work, struct scx_enable_cmd, work);
++ struct sched_ext_ops *ops = cmd->ops;
+ struct scx_sched *sch;
+ struct scx_task_iter sti;
+ struct task_struct *p;
+ unsigned long timeout;
+ int i, cpu, ret;
+
+- if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
+- cpu_possible_mask)) {
+- pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
+- return -EINVAL;
+- }
+-
+ mutex_lock(&scx_enable_mutex);
+
+ if (scx_enable_state() != SCX_DISABLED) {
+@@ -4828,13 +4838,15 @@ static int scx_enable(struct sched_ext_o
+
+ atomic_long_inc(&scx_enable_seq);
+
+- return 0;
++ cmd->ret = 0;
++ return;
+
+ err_free_pseqs:
+ free_kick_pseqs();
+ err_unlock:
+ mutex_unlock(&scx_enable_mutex);
+- return ret;
++ cmd->ret = ret;
++ return;
+
+ err_disable_unlock_all:
+ scx_cgroup_unlock();
+@@ -4853,7 +4865,41 @@ err_disable:
+ */
+ scx_error(sch, "scx_enable() failed (%d)", ret);
+ kthread_flush_work(&sch->disable_work);
+- return 0;
++ cmd->ret = 0;
++}
++
++static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
++{
++ static struct kthread_worker *helper;
++ static DEFINE_MUTEX(helper_mutex);
++ struct scx_enable_cmd cmd;
++
++ if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
++ cpu_possible_mask)) {
++ pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
++ return -EINVAL;
++ }
++
++ if (!READ_ONCE(helper)) {
++ mutex_lock(&helper_mutex);
++ if (!helper) {
++ helper = kthread_run_worker(0, "scx_enable_helper");
++ if (IS_ERR_OR_NULL(helper)) {
++ helper = NULL;
++ mutex_unlock(&helper_mutex);
++ return -ENOMEM;
++ }
++ sched_set_fifo(helper->task);
++ }
++ mutex_unlock(&helper_mutex);
++ }
++
++ kthread_init_work(&cmd.work, scx_enable_workfn);
++ cmd.ops = ops;
++
++ kthread_queue_work(READ_ONCE(helper), &cmd.work);
++ kthread_flush_work(&cmd.work);
++ return cmd.ret;
+ }
+
+
--- /dev/null
+From stable+bounces-226766-greg=kroah.com@vger.kernel.org Tue Mar 17 18:52:52 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 13:21:51 -0400
+Subject: sched_ext: Simplify breather mechanism with scx_aborting flag
+To: stable@vger.kernel.org
+Cc: Tejun Heo <tj@kernel.org>, Dan Schatzberg <schatzberg.dan@gmail.com>, Emil Tsalapatis <emil@etsalapatis.com>, Andrea Righi <arighi@nvidia.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317172152.239864-1-sashal@kernel.org>
+
+From: Tejun Heo <tj@kernel.org>
+
+[ Upstream commit a69040ed57f50156e5452474d25c79b9e62075d0 ]
+
+The breather mechanism was introduced in 62dcbab8b0ef ("sched_ext: Avoid
+live-locking bypass mode switching") and e32c260195e6 ("sched_ext: Enable the
+ops breather and eject BPF scheduler on softlockup") to prevent live-locks by
+injecting delays when CPUs are trapped in dispatch paths.
+
+Currently, it uses scx_breather_depth (atomic_t) and scx_in_softlockup
+(unsigned long) with separate increment/decrement and cleanup operations. The
+breather is only activated when aborting, so tie it directly to the exit
+mechanism. Replace both variables with scx_aborting flag set when exit is
+claimed and cleared after bypass is enabled. Introduce scx_claim_exit() to
+consolidate exit_kind claiming and breather enablement. This eliminates
+scx_clear_softlockup() and simplifies scx_softlockup() and scx_bypass().
+
+The breather mechanism will be replaced by a different abort mechanism in a
+future patch. This simplification prepares for that change.
+
+Reviewed-by: Dan Schatzberg <schatzberg.dan@gmail.com>
+Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
+Acked-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Stable-dep-of: 83236b2e43db ("sched_ext: Disable preemption between scx_claim_exit() and kicking helper work")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c | 54 ++++++++++++++++++++++++-----------------------------
+ 1 file changed, 25 insertions(+), 29 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -33,9 +33,8 @@ static DEFINE_MUTEX(scx_enable_mutex);
+ DEFINE_STATIC_KEY_FALSE(__scx_enabled);
+ DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
+ static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
+-static unsigned long scx_in_softlockup;
+-static atomic_t scx_breather_depth = ATOMIC_INIT(0);
+ static int scx_bypass_depth;
++static bool scx_aborting;
+ static bool scx_init_task_enabled;
+ static bool scx_switching_all;
+ DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
+@@ -1791,7 +1790,7 @@ static void scx_breather(struct rq *rq)
+
+ lockdep_assert_rq_held(rq);
+
+- if (likely(!atomic_read(&scx_breather_depth)))
++ if (likely(!READ_ONCE(scx_aborting)))
+ return;
+
+ raw_spin_rq_unlock(rq);
+@@ -1800,9 +1799,9 @@ static void scx_breather(struct rq *rq)
+
+ do {
+ int cnt = 1024;
+- while (atomic_read(&scx_breather_depth) && --cnt)
++ while (READ_ONCE(scx_aborting) && --cnt)
+ cpu_relax();
+- } while (atomic_read(&scx_breather_depth) &&
++ } while (READ_ONCE(scx_aborting) &&
+ time_before64(ktime_get_ns(), until));
+
+ raw_spin_rq_lock(rq);
+@@ -3718,30 +3717,14 @@ void scx_softlockup(u32 dur_s)
+ goto out_unlock;
+ }
+
+- /* allow only one instance, cleared at the end of scx_bypass() */
+- if (test_and_set_bit(0, &scx_in_softlockup))
+- goto out_unlock;
+-
+ printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
+ smp_processor_id(), dur_s, scx_root->ops.name);
+
+- /*
+- * Some CPUs may be trapped in the dispatch paths. Enable breather
+- * immediately; otherwise, we might even be able to get to scx_bypass().
+- */
+- atomic_inc(&scx_breather_depth);
+-
+ scx_error(sch, "soft lockup - CPU#%d stuck for %us", smp_processor_id(), dur_s);
+ out_unlock:
+ rcu_read_unlock();
+ }
+
+-static void scx_clear_softlockup(void)
+-{
+- if (test_and_clear_bit(0, &scx_in_softlockup))
+- atomic_dec(&scx_breather_depth);
+-}
+-
+ /**
+ * scx_bypass - [Un]bypass scx_ops and guarantee forward progress
+ * @bypass: true for bypass, false for unbypass
+@@ -3802,8 +3785,6 @@ static void scx_bypass(bool bypass)
+ ktime_get_ns() - bypass_timestamp);
+ }
+
+- atomic_inc(&scx_breather_depth);
+-
+ /*
+ * No task property is changing. We just need to make sure all currently
+ * queued tasks are re-queued according to the new scx_rq_bypassing()
+@@ -3860,10 +3841,8 @@ static void scx_bypass(bool bypass)
+ raw_spin_rq_unlock(rq);
+ }
+
+- atomic_dec(&scx_breather_depth);
+ unlock:
+ raw_spin_unlock_irqrestore(&bypass_lock, flags);
+- scx_clear_softlockup();
+ }
+
+ static void free_exit_info(struct scx_exit_info *ei)
+@@ -3958,6 +3937,7 @@ static void scx_disable_workfn(struct kt
+
+ /* guarantee forward progress by bypassing scx_ops */
+ scx_bypass(true);
++ WRITE_ONCE(scx_aborting, false);
+
+ switch (scx_set_enable_state(SCX_DISABLING)) {
+ case SCX_DISABLING:
+@@ -4086,9 +4066,24 @@ done:
+ scx_bypass(false);
+ }
+
+-static void scx_disable(enum scx_exit_kind kind)
++static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
+ {
+ int none = SCX_EXIT_NONE;
++
++ if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
++ return false;
++
++ /*
++ * Some CPUs may be trapped in the dispatch paths. Enable breather
++ * immediately; otherwise, we might not even be able to get to
++ * scx_bypass().
++ */
++ WRITE_ONCE(scx_aborting, true);
++ return true;
++}
++
++static void scx_disable(enum scx_exit_kind kind)
++{
+ struct scx_sched *sch;
+
+ if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
+@@ -4097,7 +4092,7 @@ static void scx_disable(enum scx_exit_ki
+ rcu_read_lock();
+ sch = rcu_dereference(scx_root);
+ if (sch) {
+- atomic_try_cmpxchg(&sch->exit_kind, &none, kind);
++ scx_claim_exit(sch, kind);
+ kthread_queue_work(sch->helper, &sch->disable_work);
+ }
+ rcu_read_unlock();
+@@ -4418,9 +4413,8 @@ static void scx_vexit(struct scx_sched *
+ const char *fmt, va_list args)
+ {
+ struct scx_exit_info *ei = sch->exit_info;
+- int none = SCX_EXIT_NONE;
+
+- if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
++ if (!scx_claim_exit(sch, kind))
+ return;
+
+ ei->exit_code = exit_code;
+@@ -4645,6 +4639,8 @@ static void scx_enable_workfn(struct kth
+ */
+ WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED);
+ WARN_ON_ONCE(scx_root);
++ if (WARN_ON_ONCE(READ_ONCE(scx_aborting)))
++ WRITE_ONCE(scx_aborting, false);
+
+ atomic_long_set(&scx_nr_rejected, 0);
+
crypto-padlock-sha-disable-for-zhaoxin-processor.patch
bluetooth-l2cap-fix-type-confusion-in-l2cap_ecred_reconf_rsp.patch
bluetooth-l2cap-validate-l2cap_info_rsp-payload-length-before-access.patch
+smb-client-fix-krb5-mount-with-username-option.patch
+ksmbd-unset-conn-binding-on-failed-binding-request.patch
+ksmbd-use-volume-uuid-in-fs_object_id_information.patch
+drm-i915-dsc-add-selective-update-register-definitions.patch
+drm-i915-dsc-add-helper-for-writing-dsc-selective-update-et-parameters.patch
+drm-i915-psr-write-dsc-parameters-on-selective-update-in-et-mode.patch
+net-macb-introduce-gem_init_rx_ring.patch
+net-macb-reinitialize-tx-rx-queue-pointer-registers-and-rx-ring-during-resume.patch
+loongarch-check-return-values-for-set_memory_-rw-rox.patch
+ublk-fix-null-pointer-dereference-in-ublk_ctrl_set_size.patch
+netconsole-fix-sysdata_release_enabled_show-checking-wrong-flag.patch
+crypto-atmel-sha204a-fix-oom-tfm_count-leak.patch
+cifs-open-files-should-not-hold-ref-on-superblock.patch
+drm-xe-fix-memory-leak-in-xe_vm_madvise_ioctl.patch
+drm-i915-vrr-move-has_vrr-check-into-intel_vrr_set_transcoder_timings.patch
+drm-i915-vrr-configure-vrr-timings-after-enabling-trans_ddi_func_ctl.patch
+net-macb-sort-includes.patch
+net-macb-shuffle-the-tx-ring-before-enabling-tx.patch
+ksmbd-don-t-log-keys-in-smb3-signing-and-encryption-key-generation.patch
+fgraph-fix-thresh_return-nosleeptime-double-adjust.patch
+drm-xe-sync-fix-user-fence-leak-on-alloc-failure.patch
+nsfs-tighten-permission-checks-for-ns-iteration-ioctls.patch
+sched_ext-fix-starvation-of-scx_enable-under-fair-class-saturation.patch
+sched_ext-simplify-breather-mechanism-with-scx_aborting-flag.patch
+sched_ext-disable-preemption-between-scx_claim_exit-and-kicking-helper-work.patch
+ipmi-consolidate-the-run-to-completion-checking-for-xmit-msgs-lock.patch
+ipmi-msghandler-handle-error-returns-from-the-smi-sender.patch
+mm-huge_memory-fix-a-folio_split-race-condition-with-folio_try_get.patch
--- /dev/null
+From 12b4c5d98cd7ca46d5035a57bcd995df614c14e1 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.org>
+Date: Fri, 13 Mar 2026 00:03:38 -0300
+Subject: smb: client: fix krb5 mount with username option
+
+From: Paulo Alcantara <pc@manguebit.org>
+
+commit 12b4c5d98cd7ca46d5035a57bcd995df614c14e1 upstream.
+
+Customer reported that some of their krb5 mounts were failing against
+a single server as the client was trying to mount the shares with
+wrong credentials. It turned out the client was reusing SMB session
+from first mount to try mounting the other shares, even though a
+different username= option had been specified to the other mounts.
+
+By using username mount option along with sec=krb5 to search for
+principals from keytab is supported by cifs.upcall(8) since
+cifs-utils-4.8. So fix this by matching username mount option in
+match_session() even with Kerberos.
+
+For example, the second mount below should fail with -ENOKEY as there
+is no 'foobar' principal in keytab (/etc/krb5.keytab). The client
+ends up reusing SMB session from first mount to perform the second
+one, which is wrong.
+
+```
+$ ktutil
+ktutil: add_entry -password -p testuser -k 1 -e aes256-cts
+Password for testuser@ZELDA.TEST:
+ktutil: write_kt /etc/krb5.keytab
+ktutil: quit
+$ klist -ke
+Keytab name: FILE:/etc/krb5.keytab
+KVNO Principal
+ ---- ----------------------------------------------------------------
+ 1 testuser@ZELDA.TEST (aes256-cts-hmac-sha1-96)
+$ mount.cifs //w22-root2/scratch /mnt/1 -o sec=krb5,username=testuser
+$ mount.cifs //w22-root2/scratch /mnt/2 -o sec=krb5,username=foobar
+$ mount -t cifs | grep -Po 'username=\K\w+'
+testuser
+testuser
+```
+
+Reported-by: Oscar Santos <ossantos@redhat.com>
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+Cc: David Howells <dhowells@redhat.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/connect.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1954,6 +1954,10 @@ static int match_session(struct cifs_ses
+ case Kerberos:
+ if (!uid_eq(ctx->cred_uid, ses->cred_uid))
+ return 0;
++ if (strncmp(ses->user_name ?: "",
++ ctx->username ?: "",
++ CIFS_MAX_USERNAME_LEN))
++ return 0;
+ break;
+ case NTLMv2:
+ case RawNTLMSSP:
--- /dev/null
+From stable+bounces-227286-greg=kroah.com@vger.kernel.org Thu Mar 19 13:58:41 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Mar 2026 08:53:03 -0400
+Subject: ublk: fix NULL pointer dereference in ublk_ctrl_set_size()
+To: stable@vger.kernel.org
+Cc: Mehul Rao <mehulrao@gmail.com>, Ming Lei <ming.lei@redhat.com>, Jens Axboe <axboe@kernel.dk>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260319125303.2390790-1-sashal@kernel.org>
+
+From: Mehul Rao <mehulrao@gmail.com>
+
+[ Upstream commit 25966fc097691e5c925ad080f64a2f19c5fd940a ]
+
+ublk_ctrl_set_size() unconditionally dereferences ub->ub_disk via
+set_capacity_and_notify() without checking if it is NULL.
+
+ub->ub_disk is NULL before UBLK_CMD_START_DEV completes (it is only
+assigned in ublk_ctrl_start_dev()) and after UBLK_CMD_STOP_DEV runs
+(ublk_detach_disk() sets it to NULL). Since the UBLK_CMD_UPDATE_SIZE
+handler performs no state validation, a user can trigger a NULL pointer
+dereference by sending UPDATE_SIZE to a device that has been added but
+not yet started, or one that has been stopped.
+
+Fix this by checking ub->ub_disk under ub->mutex before dereferencing
+it, and returning -ENODEV if the disk is not available.
+
+Fixes: 98b995660bff ("ublk: Add UBLK_U_CMD_UPDATE_SIZE")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mehul Rao <mehulrao@gmail.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[ adapted `&header` to `header` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/ublk_drv.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -3604,15 +3604,22 @@ static int ublk_ctrl_get_features(const
+ return 0;
+ }
+
+-static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
++static int ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
+ {
+ struct ublk_param_basic *p = &ub->params.basic;
+ u64 new_size = header->data[0];
++ int ret = 0;
+
+ mutex_lock(&ub->mutex);
++ if (!ub->ub_disk) {
++ ret = -ENODEV;
++ goto out;
++ }
+ p->dev_sectors = new_size;
+ set_capacity_and_notify(ub->ub_disk, p->dev_sectors);
++out:
+ mutex_unlock(&ub->mutex);
++ return ret;
+ }
+
+ struct count_busy {
+@@ -3902,8 +3909,7 @@ static int ublk_ctrl_uring_cmd(struct io
+ ret = ublk_ctrl_end_recovery(ub, header);
+ break;
+ case UBLK_CMD_UPDATE_SIZE:
+- ublk_ctrl_set_size(ub, header);
+- ret = 0;
++ ret = ublk_ctrl_set_size(ub, header);
+ break;
+ case UBLK_CMD_QUIESCE_DEV:
+ ret = ublk_ctrl_quiesce_dev(ub, header);