--- /dev/null
+From stable+bounces-211719-greg=kroah.com@vger.kernel.org Tue Jan 27 05:03:22 2026
+From: alvalan9@foxmail.com
+Date: Tue, 27 Jan 2026 04:02:42 +0000
+Subject: blk-cgroup: Reinit blkg_iostat_set after clearing in blkcg_reset_stats()
+To: stable@vger.kernel.org
+Cc: Waiman Long <longman@redhat.com>, Ming Lei <ming.lei@redhat.com>, Tejun Heo <tj@kernel.org>, Jens Axboe <axboe@kernel.dk>, Alva Lan <alvalan9@foxmail.com>
+Message-ID: <tencent_6A48B46F35791FDA92E8D6A1384130E60D08@qq.com>
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 3d2af77e31ade05ff7ccc3658c3635ec1bea0979 ]
+
+When blkg_alloc() is called to allocate a blkcg_gq structure
+with the associated blkg_iostat_set's, there are 2 fields within
+blkg_iostat_set that requires proper initialization - blkg & sync.
+The former field was introduced by commit 3b8cc6298724 ("blk-cgroup:
+Optimize blkcg_rstat_flush()") while the later one was introduced by
+commit f73316482977 ("blk-cgroup: reimplement basic IO stats using
+cgroup rstat").
+
+Unfortunately those fields in the blkg_iostat_set's are not properly
+re-initialized when they are cleared in v1's blkcg_reset_stats(). This
+can lead to a kernel panic due to NULL pointer access of the blkg
+pointer. The missing initialization of sync is less problematic and
+can be a problem in a debug kernel due to missing lockdep initialization.
+
+Fix these problems by re-initializing them after memory clearing.
+
+Fixes: 3b8cc6298724 ("blk-cgroup: Optimize blkcg_rstat_flush()")
+Fixes: f73316482977 ("blk-cgroup: reimplement basic IO stats using cgroup rstat")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Link: https://lore.kernel.org/r/20230606180724.2455066-1-longman@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[ Remove this line: bis -> blkg = blkg for blkg was introduced by commit
+ 3b8cc6298724 ("blk-cgroup: Optimize blkcg_rstat_flush()") since v6.2. ]
+Signed-off-by: Alva Lan <alvalan9@foxmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-cgroup.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -531,8 +531,12 @@ static int blkcg_reset_stats(struct cgro
+ struct blkg_iostat_set *bis =
+ per_cpu_ptr(blkg->iostat_cpu, cpu);
+ memset(bis, 0, sizeof(*bis));
++
++ /* Re-initialize the cleared blkg_iostat_set */
++ u64_stats_init(&bis->sync);
+ }
+ memset(&blkg->iostat, 0, sizeof(blkg->iostat));
++ u64_stats_init(&blkg->iostat.sync);
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
--- /dev/null
+From 5a4391bdc6c8357242f62f22069c865b792406b3 Mon Sep 17 00:00:00 2001
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+Date: Sat, 10 Jan 2026 12:52:27 +0100
+Subject: can: esd_usb: esd_usb_read_bulk_callback(): fix URB memory leak
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+commit 5a4391bdc6c8357242f62f22069c865b792406b3 upstream.
+
+Fix similar memory leak as in commit 7352e1d5932a ("can: gs_usb:
+gs_usb_receive_bulk_callback(): fix URB memory leak").
+
+In esd_usb_open(), the URBs for USB-in transfers are allocated, added to
+the dev->rx_submitted anchor and submitted. In the complete callback
+esd_usb_read_bulk_callback(), the URBs are processed and resubmitted. In
+esd_usb_close() the URBs are freed by calling
+usb_kill_anchored_urbs(&dev->rx_submitted).
+
+However, this does not take into account that the USB framework unanchors
+the URB before the complete function is called. This means that once an
+in-URB has been completed, it is no longer anchored and is ultimately not
+released in esd_usb_close().
+
+Fix the memory leak by anchoring the URB in the
+esd_usb_read_bulk_callback() to the dev->rx_submitted anchor.
+
+Fixes: 96d8e90382dc ("can: Add driver for esd CAN-USB/2 device")
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260116-can_usb-fix-memory-leak-v2-2-4b8cb2915571@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/usb/esd_usb.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/can/usb/esd_usb.c
++++ b/drivers/net/can/usb/esd_usb.c
+@@ -447,13 +447,20 @@ resubmit_urb:
+ urb->transfer_buffer, RX_BUFFER_SIZE,
+ esd_usb_read_bulk_callback, dev);
+
++ usb_anchor_urb(urb, &dev->rx_submitted);
++
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
++ if (!retval)
++ return;
++
++ usb_unanchor_urb(urb);
++
+ if (retval == -ENODEV) {
+ for (i = 0; i < dev->net_count; i++) {
+ if (dev->nets[i])
+ netif_device_detach(dev->nets[i]->netdev);
+ }
+- } else if (retval) {
++ } else {
+ dev_err(dev->udev->dev.parent,
+ "failed resubmitting read bulk urb: %d\n", retval);
+ }
--- /dev/null
+From stable+bounces-211328-greg=kroah.com@vger.kernel.org Fri Jan 23 04:26:02 2026
+From: jetlan9@163.com
+Date: Fri, 23 Jan 2026 03:24:22 +0000
+Subject: crypto: qat - flush misc workqueue during device shutdown
+To: stable@vger.kernel.org
+Cc: Giovanni Cabiddu <giovanni.cabiddu@intel.com>, Ahsan Atta <ahsan.atta@intel.com>, Herbert Xu <herbert@gondor.apana.org.au>, Wenshan Lan <jetlan9@163.com>
+Message-ID: <20260123032422.4202-1-jetlan9@163.com>
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit 3d4df408ba9bad2b205c7fb8afc1836a6a4ca88a ]
+
+Repeated loading and unloading of a device specific QAT driver, for
+example qat_4xxx, in a tight loop can lead to a crash due to a
+use-after-free scenario. This occurs when a power management (PM)
+interrupt triggers just before the device-specific driver (e.g.,
+qat_4xxx.ko) is unloaded, while the core driver (intel_qat.ko) remains
+loaded.
+
+Since the driver uses a shared workqueue (`qat_misc_wq`) across all
+devices and owned by intel_qat.ko, a deferred routine from the
+device-specific driver may still be pending in the queue. If this
+routine executes after the driver is unloaded, it can dereference freed
+memory, resulting in a page fault and kernel crash like the following:
+
+ BUG: unable to handle page fault for address: ffa000002e50a01c
+ #PF: supervisor read access in kernel mode
+ RIP: 0010:pm_bh_handler+0x1d2/0x250 [intel_qat]
+ Call Trace:
+ pm_bh_handler+0x1d2/0x250 [intel_qat]
+ process_one_work+0x171/0x340
+ worker_thread+0x277/0x3a0
+ kthread+0xf0/0x120
+ ret_from_fork+0x2d/0x50
+
+To prevent this, flush the misc workqueue during device shutdown to
+ensure that all pending work items are completed before the driver is
+unloaded.
+
+Note: This approach may slightly increase shutdown latency if the
+workqueue contains jobs from other devices, but it ensures correctness
+and stability.
+
+Fixes: e5745f34113b ("crypto: qat - enable power management for QAT GEN4")
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Ahsan Atta <ahsan.atta@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[ Intel crypto drivers was moved by
+ a4b16dad4657 ("crypto: qat - Move driver to drivers/crypto/intel/qat")
+ so apply the patch to files under drivers/crypto/qat/qat_common in
+ 6.1.y. ]
+Signed-off-by: Wenshan Lan <jetlan9@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/qat/qat_common/adf_common_drv.h | 1 +
+ drivers/crypto/qat/qat_common/adf_init.c | 1 +
+ drivers/crypto/qat/qat_common/adf_isr.c | 5 +++++
+ 3 files changed, 7 insertions(+)
+
+--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
+@@ -194,6 +194,7 @@ int qat_uclo_set_cfg_ae_mask(struct icp_
+ int adf_init_misc_wq(void);
+ void adf_exit_misc_wq(void);
+ bool adf_misc_wq_queue_work(struct work_struct *work);
++void adf_misc_wq_flush(void);
+ #if defined(CONFIG_PCI_IOV)
+ int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+ void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+--- a/drivers/crypto/qat/qat_common/adf_init.c
++++ b/drivers/crypto/qat/qat_common/adf_init.c
+@@ -337,6 +337,7 @@ void adf_dev_shutdown(struct adf_accel_d
+ hw_data->exit_admin_comms(accel_dev);
+
+ adf_cleanup_etr_data(accel_dev);
++ adf_misc_wq_flush();
+ adf_dev_restore(accel_dev);
+ }
+ EXPORT_SYMBOL_GPL(adf_dev_shutdown);
+--- a/drivers/crypto/qat/qat_common/adf_isr.c
++++ b/drivers/crypto/qat/qat_common/adf_isr.c
+@@ -380,3 +380,8 @@ bool adf_misc_wq_queue_work(struct work_
+ {
+ return queue_work(adf_misc_wq, work);
+ }
++
++void adf_misc_wq_flush(void)
++{
++ flush_workqueue(adf_misc_wq);
++}
--- /dev/null
+From stable+bounces-210683-greg=kroah.com@vger.kernel.org Wed Jan 21 05:22:43 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Wed, 21 Jan 2026 12:21:58 +0800
+Subject: drm/amd/display: Check dce_hwseq before dereferencing it
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, Alex Hung <alex.hung@amd.com>, Mario Limonciello <mario.limonciello@amd.com>, Alex Deucher <alexander.deucher@amd.com>, Aurabindo Pillai <aurabindo.pillai@amd.com>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260121042158.3553167-1-black.hawk@163.com>
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit b669507b637eb6b1aaecf347f193efccc65d756e ]
+
+[WHAT]
+
+hws was checked for null earlier in dce110_blank_stream, indicating hws
+can be null, and should be checked whenever it is used.
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 79db43611ff61280b6de58ce1305e0b2ecf675ad)
+Cc: stable@vger.kernel.org
+[ The context change is due to the commit 8e7b3f5435b3
+("drm/amd/display: Add control flag to dc_stream_state to skip eDP BL off/link off")
+and the commit a8728dbb4ba2 ("drm/amd/display: Refactor edp power
+control") and the proper adoption is done. ]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1233,7 +1233,8 @@ void dce110_blank_stream(struct pipe_ctx
+ struct dce_hwseq *hws = link->dc->hwseq;
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+- hws->funcs.edp_backlight_control(link, false);
++ if (hws)
++ hws->funcs.edp_backlight_control(link, false);
+ link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
+ }
+
--- /dev/null
+From stable+bounces-212755-greg=kroah.com@vger.kernel.org Thu Jan 29 10:27:16 2026
+From: Li hongliang <1468888505@139.com>
+Date: Thu, 29 Jan 2026 17:27:02 +0800
+Subject: drm/amdgpu: Replace Mutex with Spinlock for RLCG register access to avoid Priority Inversion in SRIOV
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, srinivasan.shanmugam@amd.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, alexander.deucher@amd.com, christian.koenig@amd.com, Xinhui.Pan@amd.com, airlied@gmail.com, daniel@ffwll.ch, sashal@kernel.org, mario.limonciello@amd.com, superm1@kernel.org, Jun.Ma2@amd.com, Zhigang.Luo@amd.com, Hawking.Zhang@amd.com, Jesse.Zhang@amd.com, victor.skvortsov@amd.com, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, lin.cao@amd.com, Jingwen.Chen2@amd.com
+Message-ID: <20260129092702.3671189-1-1468888505@139.com>
+
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+
+[ Upstream commit dc0297f3198bd60108ccbd167ee5d9fa4af31ed0 ]
+
+RLCG Register Access is a way for virtual functions to safely access GPU
+registers in a virtualized environment., including TLB flushes and
+register reads. When multiple threads or VFs try to access the same
+registers simultaneously, it can lead to race conditions. By using the
+RLCG interface, the driver can serialize access to the registers. This
+means that only one thread can access the registers at a time,
+preventing conflicts and ensuring that operations are performed
+correctly. Additionally, when a low-priority task holds a mutex that a
+high-priority task needs, ie., If a thread holding a spinlock tries to
+acquire a mutex, it can lead to priority inversion. register access in
+amdgpu_virt_rlcg_reg_rw especially in a fast code path is critical.
+
+The call stack shows that the function amdgpu_virt_rlcg_reg_rw is being
+called, which attempts to acquire the mutex. This function is invoked
+from amdgpu_sriov_wreg, which in turn is called from
+gmc_v11_0_flush_gpu_tlb.
+
+The [ BUG: Invalid wait context ] indicates that a thread is trying to
+acquire a mutex while it is in a context that does not allow it to sleep
+(like holding a spinlock).
+
+Fixes the below:
+
+[ 253.013423] =============================
+[ 253.013434] [ BUG: Invalid wait context ]
+[ 253.013446] 6.12.0-amdstaging-drm-next-lol-050225 #14 Tainted: G U OE
+[ 253.013464] -----------------------------
+[ 253.013475] kworker/0:1/10 is trying to lock:
+[ 253.013487] ffff9f30542e3cf8 (&adev->virt.rlcg_reg_lock){+.+.}-{3:3}, at: amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.013815] other info that might help us debug this:
+[ 253.013827] context-{4:4}
+[ 253.013835] 3 locks held by kworker/0:1/10:
+[ 253.013847] #0: ffff9f3040050f58 ((wq_completion)events){+.+.}-{0:0}, at: process_one_work+0x3f5/0x680
+[ 253.013877] #1: ffffb789c008be40 ((work_completion)(&wfc.work)){+.+.}-{0:0}, at: process_one_work+0x1d6/0x680
+[ 253.013905] #2: ffff9f3054281838 (&adev->gmc.invalidate_lock){+.+.}-{2:2}, at: gmc_v11_0_flush_gpu_tlb+0x198/0x4f0 [amdgpu]
+[ 253.014154] stack backtrace:
+[ 253.014164] CPU: 0 UID: 0 PID: 10 Comm: kworker/0:1 Tainted: G U OE 6.12.0-amdstaging-drm-next-lol-050225 #14
+[ 253.014189] Tainted: [U]=USER, [O]=OOT_MODULE, [E]=UNSIGNED_MODULE
+[ 253.014203] Hardware name: Microsoft Corporation Virtual Machine/Virtual Machine, BIOS Hyper-V UEFI Release v4.1 11/18/2024
+[ 253.014224] Workqueue: events work_for_cpu_fn
+[ 253.014241] Call Trace:
+[ 253.014250] <TASK>
+[ 253.014260] dump_stack_lvl+0x9b/0xf0
+[ 253.014275] dump_stack+0x10/0x20
+[ 253.014287] __lock_acquire+0xa47/0x2810
+[ 253.014303] ? srso_alias_return_thunk+0x5/0xfbef5
+[ 253.014321] lock_acquire+0xd1/0x300
+[ 253.014333] ? amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.014562] ? __lock_acquire+0xa6b/0x2810
+[ 253.014578] __mutex_lock+0x85/0xe20
+[ 253.014591] ? amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.014782] ? sched_clock_noinstr+0x9/0x10
+[ 253.014795] ? srso_alias_return_thunk+0x5/0xfbef5
+[ 253.014808] ? local_clock_noinstr+0xe/0xc0
+[ 253.014822] ? amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.015012] ? srso_alias_return_thunk+0x5/0xfbef5
+[ 253.015029] mutex_lock_nested+0x1b/0x30
+[ 253.015044] ? mutex_lock_nested+0x1b/0x30
+[ 253.015057] amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.015249] amdgpu_sriov_wreg+0xc5/0xd0 [amdgpu]
+[ 253.015435] gmc_v11_0_flush_gpu_tlb+0x44b/0x4f0 [amdgpu]
+[ 253.015667] gfx_v11_0_hw_init+0x499/0x29c0 [amdgpu]
+[ 253.015901] ? __pfx_smu_v13_0_update_pcie_parameters+0x10/0x10 [amdgpu]
+[ 253.016159] ? srso_alias_return_thunk+0x5/0xfbef5
+[ 253.016173] ? smu_hw_init+0x18d/0x300 [amdgpu]
+[ 253.016403] amdgpu_device_init+0x29ad/0x36a0 [amdgpu]
+[ 253.016614] amdgpu_driver_load_kms+0x1a/0xc0 [amdgpu]
+[ 253.017057] amdgpu_pci_probe+0x1c2/0x660 [amdgpu]
+[ 253.017493] local_pci_probe+0x4b/0xb0
+[ 253.017746] work_for_cpu_fn+0x1a/0x30
+[ 253.017995] process_one_work+0x21e/0x680
+[ 253.018248] worker_thread+0x190/0x330
+[ 253.018500] ? __pfx_worker_thread+0x10/0x10
+[ 253.018746] kthread+0xe7/0x120
+[ 253.018988] ? __pfx_kthread+0x10/0x10
+[ 253.019231] ret_from_fork+0x3c/0x60
+[ 253.019468] ? __pfx_kthread+0x10/0x10
+[ 253.019701] ret_from_fork_asm+0x1a/0x30
+[ 253.019939] </TASK>
+
+v2: s/spin_trylock/spin_lock_irqsave to be safe (Christian).
+
+Fixes: e864180ee49b ("drm/amdgpu: Add lock around VF RLCG interface")
+Cc: lin cao <lin.cao@amd.com>
+Cc: Jingwen Chen <Jingwen.Chen2@amd.com>
+Cc: Victor Skvortsov <victor.skvortsov@amd.com>
+Cc: Zhigang Luo <zhigang.luo@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Suggested-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+[ Minor conflict resolved. ]
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 5 +++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 ++-
+ 3 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3652,7 +3652,6 @@ int amdgpu_device_init(struct amdgpu_dev
+ mutex_init(&adev->grbm_idx_mutex);
+ mutex_init(&adev->mn_lock);
+ mutex_init(&adev->virt.vf_errors.lock);
+- mutex_init(&adev->virt.rlcg_reg_lock);
+ hash_init(adev->mn_hash);
+ mutex_init(&adev->psp.mutex);
+ mutex_init(&adev->notifier_lock);
+@@ -3674,6 +3673,7 @@ int amdgpu_device_init(struct amdgpu_dev
+ spin_lock_init(&adev->se_cac_idx_lock);
+ spin_lock_init(&adev->audio_endpt_idx_lock);
+ spin_lock_init(&adev->mm_stats.lock);
++ spin_lock_init(&adev->virt.rlcg_reg_lock);
+
+ INIT_LIST_HEAD(&adev->shadow_list);
+ mutex_init(&adev->shadow_list_lock);
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -965,6 +965,7 @@ static u32 amdgpu_virt_rlcg_reg_rw(struc
+ void *scratch_reg2;
+ void *scratch_reg3;
+ void *spare_int;
++ unsigned long flags;
+
+ if (!adev->gfx.rlc.rlcg_reg_access_supported) {
+ dev_err(adev->dev,
+@@ -978,7 +979,7 @@ static u32 amdgpu_virt_rlcg_reg_rw(struc
+ scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
+ scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
+
+- mutex_lock(&adev->virt.rlcg_reg_lock);
++ spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
+
+ if (reg_access_ctrl->spare_int)
+ spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
+@@ -1034,7 +1035,7 @@ static u32 amdgpu_virt_rlcg_reg_rw(struc
+
+ ret = readl(scratch_reg0);
+
+- mutex_unlock(&adev->virt.rlcg_reg_lock);
++ spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
+
+ return ret;
+ }
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -264,7 +264,8 @@ struct amdgpu_virt {
+ /* the ucode id to signal the autoload */
+ uint32_t autoload_ucode_id;
+
+- struct mutex rlcg_reg_lock;
++ /* Spinlock to protect access to the RLCG register interface */
++ spinlock_t rlcg_reg_lock;
+ };
+
+ struct amdgpu_video_codec_info;
--- /dev/null
+From 80614c509810fc051312d1a7ccac8d0012d6b8d0 Mon Sep 17 00:00:00 2001
+From: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+Date: Thu, 8 Jan 2026 15:18:22 +0800
+Subject: drm/amdkfd: fix a memory leak in device_queue_manager_init()
+
+From: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+
+commit 80614c509810fc051312d1a7ccac8d0012d6b8d0 upstream.
+
+If dqm->ops.initialize() fails, add deallocate_hiq_sdma_mqd()
+to release the memory allocated by allocate_hiq_sdma_mqd().
+Move deallocate_hiq_sdma_mqd() up to ensure proper function
+visibility at the point of use.
+
+Fixes: 11614c36bc8f ("drm/amdkfd: Allocate MQD trunk for HIQ and SDMA")
+Signed-off-by: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+Signed-off-by: Felix Kuehling <felix.kuehling@amd.com>
+Reviewed-by: Oak Zeng <Oak.Zeng@amd.com>
+Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit b7cccc8286bb9919a0952c812872da1dcfe9d390)
+Cc: stable@vger.kernel.org
+Signed-off-by: Felix Kuehling <felix.kuehling@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -2257,6 +2257,14 @@ static int allocate_hiq_sdma_mqd(struct
+ return retval;
+ }
+
++static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
++ struct kfd_mem_obj *mqd)
++{
++ WARN(!mqd, "No hiq sdma mqd trunk to free");
++
++ amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
++}
++
+ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ {
+ struct device_queue_manager *dqm;
+@@ -2382,19 +2390,13 @@ struct device_queue_manager *device_queu
+ if (!dqm->ops.initialize(dqm))
+ return dqm;
+
++ deallocate_hiq_sdma_mqd(dev, &dqm->hiq_sdma_mqd);
++
+ out_free:
+ kfree(dqm);
+ return NULL;
+ }
+
+-static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
+- struct kfd_mem_obj *mqd)
+-{
+- WARN(!mqd, "No hiq sdma mqd trunk to free");
+-
+- amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
+-}
+-
+ void device_queue_manager_uninit(struct device_queue_manager *dqm)
+ {
+ dqm->ops.uninitialize(dqm);
--- /dev/null
+From stable+bounces-213031-greg=kroah.com@vger.kernel.org Mon Feb 2 08:59:14 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 2 Feb 2026 15:58:55 +0800
+Subject: drm/radeon: delete radeon_fence_process in is_signaled, no deadlock
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, rbmccav@gmail.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, alexander.deucher@amd.com, christian.koenig@amd.com, Xinhui.Pan@amd.com, airlied@gmail.com, daniel@ffwll.ch, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
+Message-ID: <20260202075855.947632-1-1468888505@139.com>
+
+From: Robert McClinton <rbmccav@gmail.com>
+
+[ Upstream commit 9eb00b5f5697bd56baa3222c7a1426fa15bacfb5 ]
+
+Delete the attempt to progress the queue when checking if fence is
+signaled. This avoids deadlock.
+
+dma-fence_ops::signaled can be called with the fence lock in unknown
+state. For radeon, the fence lock is also the wait queue lock. This can
+cause a self deadlock when signaled() tries to make forward progress on
+the wait queue. But advancing the queue is unneeded because incorrectly
+returning false from signaled() is perfectly acceptable.
+
+Link: https://github.com/brave/brave-browser/issues/49182
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4641
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Robert McClinton <rbmccav@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 527ba26e50ec2ca2be9c7c82f3ad42998a75d0db)
+Cc: stable@vger.kernel.org
+[ Minor conflict resolved. ]
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/radeon_fence.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_fence.c
++++ b/drivers/gpu/drm/radeon/radeon_fence.c
+@@ -362,14 +362,6 @@ static bool radeon_fence_is_signaled(str
+ return true;
+ }
+
+- if (down_read_trylock(&rdev->exclusive_lock)) {
+- radeon_fence_process(rdev, ring);
+- up_read(&rdev->exclusive_lock);
+-
+- if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+- return true;
+- }
+- }
+ return false;
+ }
+
--- /dev/null
+From stable+bounces-211512-greg=kroah.com@vger.kernel.org Mon Jan 26 07:21:17 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 26 Jan 2026 14:19:33 +0800
+Subject: fs/ntfs3: Initialize allocated memory before use
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, kubik.bartlomiej@gmail.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, almaz.alexandrovich@paragon-software.com, ntfs3@lists.linux.dev, khalid@kernel.org
+Message-ID: <20260126061933.1206836-1-1468888505@139.com>
+
+From: Bartlomiej Kubik <kubik.bartlomiej@gmail.com>
+
+[ Upstream commit a8a3ca23bbd9d849308a7921a049330dc6c91398 ]
+
+KMSAN reports: Multiple uninitialized values detected:
+
+- KMSAN: uninit-value in ntfs_read_hdr (3)
+- KMSAN: uninit-value in bcmp (3)
+
+Memory is allocated by __getname(), which is a wrapper for
+kmem_cache_alloc(). This memory is used before being properly
+cleared. Change kmem_cache_alloc() to kmem_cache_zalloc() to
+properly allocate and clear memory before use.
+
+Fixes: 82cae269cfa9 ("fs/ntfs3: Add initialization of super block")
+Fixes: 78ab59fee07f ("fs/ntfs3: Rework file operations")
+Tested-by: syzbot+332bd4e9d148f11a87dc@syzkaller.appspotmail.com
+Reported-by: syzbot+332bd4e9d148f11a87dc@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=332bd4e9d148f11a87dc
+
+Fixes: 82cae269cfa9 ("fs/ntfs3: Add initialization of super block")
+Fixes: 78ab59fee07f ("fs/ntfs3: Rework file operations")
+Tested-by: syzbot+0399100e525dd9696764@syzkaller.appspotmail.com
+Reported-by: syzbot+0399100e525dd9696764@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=0399100e525dd9696764
+
+Reviewed-by: Khalid Aziz <khalid@kernel.org>
+Signed-off-by: Bartlomiej Kubik <kubik.bartlomiej@gmail.com>
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/inode.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -1294,7 +1294,7 @@ struct inode *ntfs_create_inode(struct u
+ fa |= FILE_ATTRIBUTE_READONLY;
+
+ /* Allocate PATH_MAX bytes. */
+- new_de = __getname();
++ new_de = kmem_cache_zalloc(names_cachep, GFP_KERNEL);
+ if (!new_de) {
+ err = -ENOMEM;
+ goto out1;
+@@ -1698,10 +1698,9 @@ int ntfs_link_inode(struct inode *inode,
+ struct NTFS_DE *de;
+
+ /* Allocate PATH_MAX bytes. */
+- de = __getname();
++ de = kmem_cache_zalloc(names_cachep, GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
+- memset(de, 0, PATH_MAX);
+
+ /* Mark rw ntfs as dirty. It will be cleared at umount. */
+ ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
+@@ -1737,7 +1736,7 @@ int ntfs_unlink_inode(struct inode *dir,
+ return -EINVAL;
+
+ /* Allocate PATH_MAX bytes. */
+- de = __getname();
++ de = kmem_cache_zalloc(names_cachep, GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
+
--- /dev/null
+From stable+bounces-211917-greg=kroah.com@vger.kernel.org Wed Jan 28 04:18:27 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Wed, 28 Jan 2026 11:17:52 +0800
+Subject: gfs2: Fix NULL pointer dereference in gfs2_log_flush
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: Andreas Gruenbacher <agruenba@redhat.com>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260128031752.222018-1-black.hawk@163.com>
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+[ Upstream commit 35264909e9d1973ab9aaa2a1b07cda70f12bb828 ]
+
+In gfs2_jindex_free(), set sdp->sd_jdesc to NULL under the log flush
+lock to provide exclusion against gfs2_log_flush().
+
+In gfs2_log_flush(), check if sdp->sd_jdesc is non-NULL before
+dereferencing it. Otherwise, we could run into a NULL pointer
+dereference when outstanding glock work races with an unmount
+(glock_work_func -> run_queue -> do_xmote -> inode_go_sync ->
+gfs2_log_flush).
+
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+[ The context change is due to the commit 4d927b03a688
+("gfs2: Rename gfs2_withdrawn to gfs2_withdrawing_or_withdrawn") in v6.8
+which is irrelevant to the logic of this patch. ]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/gfs2/log.c | 3 ++-
+ fs/gfs2/super.c | 4 ++++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -1102,7 +1102,8 @@ repeat:
+ lops_before_commit(sdp, tr);
+ if (gfs2_withdrawn(sdp))
+ goto out_withdraw;
+- gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
++ if (sdp->sd_jdesc)
++ gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
+ if (gfs2_withdrawn(sdp))
+ goto out_withdraw;
+
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -67,9 +67,13 @@ void gfs2_jindex_free(struct gfs2_sbd *s
+ sdp->sd_journals = 0;
+ spin_unlock(&sdp->sd_jindex_spin);
+
++ down_write(&sdp->sd_log_flush_lock);
+ sdp->sd_jdesc = NULL;
++ up_write(&sdp->sd_log_flush_lock);
++
+ while (!list_empty(&list)) {
+ jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
++ BUG_ON(jd->jd_log_bio);
+ gfs2_free_journal_extents(jd);
+ list_del(&jd->jd_list);
+ iput(jd->jd_inode);
--- /dev/null
+From 681739313@139.com Fri Jan 23 07:33:06 2026
+From: Rajani Kantha <681739313@139.com>
+Date: Fri, 23 Jan 2026 14:33:00 +0800
+Subject: iomap: Fix possible overflow condition in iomap_write_delalloc_scan
+To: gregkh@linuxfoundation.org, ritesh.list@gmail.com, stable@vger.kernel.org
+Cc: djwong@kernel.org, hch@infradead.org, linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org, patches@lists.linux.dev, willy@infradead.org
+Message-ID: <20260123063300.3820420-1-681739313@139.com>
+
+From: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
+
+[ Upstream commit eee2d2e6ea5550118170dbd5bb1316ceb38455fb ]
+
+folio_next_index() returns an unsigned long value which left shifted
+by PAGE_SHIFT could possibly cause an overflow on 32-bit system. Instead
+use folio_pos(folio) + folio_size(folio), which does this correctly.
+
+Suggested-by: Matthew Wilcox <willy@infradead.org>
+Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Rajani Kantha <681739313@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/iomap/buffered-io.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -903,7 +903,7 @@ static int iomap_write_delalloc_scan(str
+ * the end of this data range, not the end of the folio.
+ */
+ *punch_start_byte = min_t(loff_t, end_byte,
+- folio_next_index(folio) << PAGE_SHIFT);
++ folio_pos(folio) + folio_size(folio));
+ }
+
+ /* move offset to start of next folio in range */
--- /dev/null
+From stable+bounces-213018-greg=kroah.com@vger.kernel.org Mon Feb 2 04:17:53 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 2 Feb 2026 11:17:11 +0800
+Subject: ksmbd: Fix race condition in RPC handle list access
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, ysk@kzalloc.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, linkinjeon@kernel.org, sfrench@samba.org, senozhatsky@chromium.org, tom@talpey.com, akendo@akendo.eu, set_pte_at@outlook.com, linux-cifs@vger.kernel.org, stfrench@microsoft.com
+Message-ID: <20260202031711.515125-1-1468888505@139.com>
+
+From: Yunseong Kim <ysk@kzalloc.com>
+
+[ Upstream commit 305853cce379407090a73b38c5de5ba748893aee ]
+
+The 'sess->rpc_handle_list' XArray manages RPC handles within a ksmbd
+session. Access to this list is intended to be protected by
+'sess->rpc_lock' (an rw_semaphore). However, the locking implementation was
+flawed, leading to potential race conditions.
+
+In ksmbd_session_rpc_open(), the code incorrectly acquired only a read lock
+before calling xa_store() and xa_erase(). Since these operations modify
+the XArray structure, a write lock is required to ensure exclusive access
+and prevent data corruption from concurrent modifications.
+
+Furthermore, ksmbd_session_rpc_method() accessed the list using xa_load()
+without holding any lock at all. This could lead to reading inconsistent
+data or a potential use-after-free if an entry is concurrently removed and
+the pointer is dereferenced.
+
+Fix these issues by:
+1. Using down_write() and up_write() in ksmbd_session_rpc_open()
+ to ensure exclusive access during XArray modification, and ensuring
+ the lock is correctly released on error paths.
+2. Adding down_read() and up_read() in ksmbd_session_rpc_method()
+ to safely protect the lookup.
+
+Fixes: a1f46c99d9ea ("ksmbd: fix use-after-free in ksmbd_session_rpc_open")
+Fixes: b685757c7b08 ("ksmbd: Implements sess->rpc_handle_list as xarray")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yunseong Kim <ysk@kzalloc.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ Minor conflict resolved. ]
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/mgmt/user_session.c | 26 +++++++++++++++++---------
+ 1 file changed, 17 insertions(+), 9 deletions(-)
+
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -104,29 +104,32 @@ int ksmbd_session_rpc_open(struct ksmbd_
+ if (!entry)
+ return -ENOMEM;
+
+- down_read(&sess->rpc_lock);
+ entry->method = method;
+ entry->id = id = ksmbd_ipc_id_alloc();
+ if (id < 0)
+ goto free_entry;
++
++ down_write(&sess->rpc_lock);
+ old = xa_store(&sess->rpc_handle_list, id, entry, GFP_KERNEL);
+- if (xa_is_err(old))
++ if (xa_is_err(old)) {
++ up_write(&sess->rpc_lock);
+ goto free_id;
++ }
+
+ resp = ksmbd_rpc_open(sess, id);
+- if (!resp)
+- goto erase_xa;
++ if (!resp) {
++ xa_erase(&sess->rpc_handle_list, entry->id);
++ up_write(&sess->rpc_lock);
++ goto free_id;
++ }
+
+- up_read(&sess->rpc_lock);
++ up_write(&sess->rpc_lock);
+ kvfree(resp);
+ return id;
+-erase_xa:
+- xa_erase(&sess->rpc_handle_list, entry->id);
+ free_id:
+ ksmbd_rpc_id_free(entry->id);
+ free_entry:
+ kfree(entry);
+- up_read(&sess->rpc_lock);
+ return -EINVAL;
+ }
+
+@@ -144,9 +147,14 @@ void ksmbd_session_rpc_close(struct ksmb
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+ {
+ struct ksmbd_session_rpc *entry;
++ int method;
+
++ down_read(&sess->rpc_lock);
+ entry = xa_load(&sess->rpc_handle_list, id);
+- return entry ? entry->method : 0;
++ method = entry ? entry->method : 0;
++ up_read(&sess->rpc_lock);
++
++ return method;
+ }
+
+ void ksmbd_session_destroy(struct ksmbd_session *sess)
--- /dev/null
+From 1468888505@139.com Mon Feb 2 04:15:55 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 2 Feb 2026 11:15:50 +0800
+Subject: ksmbd: fix use-after-free in ksmbd_session_rpc_open
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, linkinjeon@kernel.org
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, norbert@doyensec.com, ysk@kzalloc.com, sfrench@samba.org, senozhatsky@chromium.org, tom@talpey.com, akendo@akendo.eu, set_pte_at@outlook.com, linux-cifs@vger.kernel.org, stfrench@microsoft.com
+Message-ID: <20260202031550.514894-1-1468888505@139.com>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit a1f46c99d9ea411f9bf30025b912d881d36fc709 ]
+
+A UAF issue can occur due to a race condition between
+ksmbd_session_rpc_open() and __session_rpc_close().
+Add rpc_lock to the session to protect it.
+
+Cc: stable@vger.kernel.org
+Reported-by: Norbert Szetei <norbert@doyensec.com>
+Tested-by: Norbert Szetei <norbert@doyensec.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ KSMBD_DEFAULT_GFP is introduced by commit 0066f623bce8 ("ksmbd: use __GFP_RETRY_MAYFAIL")
+ after linux-6.13. Here we still use GFP_KERNEL. ]
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/mgmt/user_session.c | 20 ++++++++++++++------
+ fs/smb/server/mgmt/user_session.h | 1 +
+ 2 files changed, 15 insertions(+), 6 deletions(-)
+
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -59,10 +59,12 @@ static void ksmbd_session_rpc_clear_list
+ struct ksmbd_session_rpc *entry;
+ long index;
+
++ down_write(&sess->rpc_lock);
+ xa_for_each(&sess->rpc_handle_list, index, entry) {
+ xa_erase(&sess->rpc_handle_list, index);
+ __session_rpc_close(sess, entry);
+ }
++ up_write(&sess->rpc_lock);
+
+ xa_destroy(&sess->rpc_handle_list);
+ }
+@@ -92,7 +94,7 @@ int ksmbd_session_rpc_open(struct ksmbd_
+ {
+ struct ksmbd_session_rpc *entry, *old;
+ struct ksmbd_rpc_command *resp;
+- int method;
++ int method, id;
+
+ method = __rpc_method(rpc_name);
+ if (!method)
+@@ -102,26 +104,29 @@ int ksmbd_session_rpc_open(struct ksmbd_
+ if (!entry)
+ return -ENOMEM;
+
++ down_read(&sess->rpc_lock);
+ entry->method = method;
+- entry->id = ksmbd_ipc_id_alloc();
+- if (entry->id < 0)
++ entry->id = id = ksmbd_ipc_id_alloc();
++ if (id < 0)
+ goto free_entry;
+- old = xa_store(&sess->rpc_handle_list, entry->id, entry, GFP_KERNEL);
++ old = xa_store(&sess->rpc_handle_list, id, entry, GFP_KERNEL);
+ if (xa_is_err(old))
+ goto free_id;
+
+- resp = ksmbd_rpc_open(sess, entry->id);
++ resp = ksmbd_rpc_open(sess, id);
+ if (!resp)
+ goto erase_xa;
+
++ up_read(&sess->rpc_lock);
+ kvfree(resp);
+- return entry->id;
++ return id;
+ erase_xa:
+ xa_erase(&sess->rpc_handle_list, entry->id);
+ free_id:
+ ksmbd_rpc_id_free(entry->id);
+ free_entry:
+ kfree(entry);
++ up_read(&sess->rpc_lock);
+ return -EINVAL;
+ }
+
+@@ -129,9 +134,11 @@ void ksmbd_session_rpc_close(struct ksmb
+ {
+ struct ksmbd_session_rpc *entry;
+
++ down_write(&sess->rpc_lock);
+ entry = xa_erase(&sess->rpc_handle_list, id);
+ if (entry)
+ __session_rpc_close(sess, entry);
++ up_write(&sess->rpc_lock);
+ }
+
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+@@ -404,6 +411,7 @@ static struct ksmbd_session *__session_c
+ sess->sequence_number = 1;
+ rwlock_init(&sess->tree_conns_lock);
+ atomic_set(&sess->refcnt, 2);
++ init_rwsem(&sess->rpc_lock);
+
+ ret = __init_smb2_session(sess);
+ if (ret)
+--- a/fs/smb/server/mgmt/user_session.h
++++ b/fs/smb/server/mgmt/user_session.h
+@@ -63,6 +63,7 @@ struct ksmbd_session {
+ rwlock_t tree_conns_lock;
+
+ atomic_t refcnt;
++ struct rw_semaphore rpc_lock;
+ };
+
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
--- /dev/null
+From stable+bounces-212841-greg=kroah.com@vger.kernel.org Fri Jan 30 07:02:45 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Fri, 30 Jan 2026 14:01:31 +0800
+Subject: net: stmmac: make sure that ptp_rate is not 0 before configuring EST
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, "Alexis Lothoré" <alexis.lothore@bootlin.com>, "Maxime Chevallier" <maxime.chevallier@bootlin.com>, "Jakub Kicinski" <kuba@kernel.org>, "Rahul Sharma" <black.hawk@163.com>
+Message-ID: <20260130060131.3650575-1-black.hawk@163.com>
+
+From: Alexis Lothoré <alexis.lothore@bootlin.com>
+
+[ Upstream commit cbefe2ffa7784525ec5d008ba87c7add19ec631a ]
+
+If the ptp_rate recorded earlier in the driver happens to be 0, this
+bogus value will propagate up to EST configuration, where it will
+trigger a division by 0.
+
+Prevent this division by 0 by adding the corresponding check and error
+code.
+
+Suggested-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Signed-off-by: Alexis Lothoré <alexis.lothore@bootlin.com>
+Fixes: 8572aec3d0dc ("net: stmmac: Add basic EST support for XGMAC")
+Link: https://patch.msgid.link/20250529-stmmac_tstamp_div-v4-2-d73340a794d5@bootlin.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ The context change is due to the commit c3f3b97238f6
+("net: stmmac: Refactor EST implementation")
+and the proper adoption is done. ]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac5.c | 5 +++++
+ drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c | 5 +++++
+ 2 files changed, 10 insertions(+)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -597,6 +597,11 @@ int dwmac5_est_configure(void __iomem *i
+ int i, ret = 0x0;
+ u32 ctrl;
+
++ if (!ptp_rate) {
++ pr_warn("Dwmac5: Invalid PTP rate");
++ return -EINVAL;
++ }
++
+ ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
+ ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
+ ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -1497,6 +1497,11 @@ static int dwxgmac3_est_configure(void _
+ int i, ret = 0x0;
+ u32 ctrl;
+
++ if (!ptp_rate) {
++ pr_warn("Dwxgmac2: Invalid PTP rate");
++ return -EINVAL;
++ }
++
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
--- /dev/null
+From stable+bounces-211937-greg=kroah.com@vger.kernel.org Wed Jan 28 09:34:31 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Wed, 28 Jan 2026 16:33:34 +0800
+Subject: NFSD: fix race between nfsd registration and exports_proc
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, Maninder Singh <maninder1.s@samsung.com>, Shubham Rana <s9.rana@samsung.com>, Jeff Layton <jlayton@kernel.org>, Chuck Lever <chuck.lever@oracle.com>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260128083334.2450566-1-black.hawk@163.com>
+
+From: Maninder Singh <maninder1.s@samsung.com>
+
+[ Upstream commit f7fb730cac9aafda8b9813b55d04e28a9664d17c ]
+
+As of now nfsd calls create_proc_exports_entry() at start of init_nfsd
+and cleanup by remove_proc_entry() at last of exit_nfsd.
+
+Which causes kernel OOPs if there is race between below 2 operations:
+(i) exportfs -r
+(ii) mount -t nfsd none /proc/fs/nfsd
+
+for 5.4 kernel ARM64:
+
+CPU 1:
+el1_irq+0xbc/0x180
+arch_counter_get_cntvct+0x14/0x18
+running_clock+0xc/0x18
+preempt_count_add+0x88/0x110
+prep_new_page+0xb0/0x220
+get_page_from_freelist+0x2d8/0x1778
+__alloc_pages_nodemask+0x15c/0xef0
+__vmalloc_node_range+0x28c/0x478
+__vmalloc_node_flags_caller+0x8c/0xb0
+kvmalloc_node+0x88/0xe0
+nfsd_init_net+0x6c/0x108 [nfsd]
+ops_init+0x44/0x170
+register_pernet_operations+0x114/0x270
+register_pernet_subsys+0x34/0x50
+init_nfsd+0xa8/0x718 [nfsd]
+do_one_initcall+0x54/0x2e0
+
+CPU 2 :
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000010
+
+PC is at : exports_net_open+0x50/0x68 [nfsd]
+
+Call trace:
+exports_net_open+0x50/0x68 [nfsd]
+exports_proc_open+0x2c/0x38 [nfsd]
+proc_reg_open+0xb8/0x198
+do_dentry_open+0x1c4/0x418
+vfs_open+0x38/0x48
+path_openat+0x28c/0xf18
+do_filp_open+0x70/0xe8
+do_sys_open+0x154/0x248
+
+Sometimes it crashes at exports_net_open() and sometimes cache_seq_next_rcu().
+
+and same is happening on latest 6.14 kernel as well:
+
+[ 0.000000] Linux version 6.14.0-rc5-next-20250304-dirty
+...
+[ 285.455918] Unable to handle kernel paging request at virtual address 00001f4800001f48
+...
+[ 285.464902] pc : cache_seq_next_rcu+0x78/0xa4
+...
+[ 285.469695] Call trace:
+[ 285.470083] cache_seq_next_rcu+0x78/0xa4 (P)
+[ 285.470488] seq_read+0xe0/0x11c
+[ 285.470675] proc_reg_read+0x9c/0xf0
+[ 285.470874] vfs_read+0xc4/0x2fc
+[ 285.471057] ksys_read+0x6c/0xf4
+[ 285.471231] __arm64_sys_read+0x1c/0x28
+[ 285.471428] invoke_syscall+0x44/0x100
+[ 285.471633] el0_svc_common.constprop.0+0x40/0xe0
+[ 285.471870] do_el0_svc_compat+0x1c/0x34
+[ 285.472073] el0_svc_compat+0x2c/0x80
+[ 285.472265] el0t_32_sync_handler+0x90/0x140
+[ 285.472473] el0t_32_sync+0x19c/0x1a0
+[ 285.472887] Code: f9400885 93407c23 937d7c27 11000421 (f86378a3)
+[ 285.473422] ---[ end trace 0000000000000000 ]---
+
+It reproduced simply with below script:
+while [ 1 ]
+do
+/exportfs -r
+done &
+
+while [ 1 ]
+do
+insmod /nfsd.ko
+mount -t nfsd none /proc/fs/nfsd
+umount /proc/fs/nfsd
+rmmod nfsd
+done &
+
+So exporting interfaces to user space shall be done at last and
+cleanup at first place.
+
+With change there is no Kernel OOPs.
+
+Co-developed-by: Shubham Rana <s9.rana@samsung.com>
+Signed-off-by: Shubham Rana <s9.rana@samsung.com>
+Signed-off-by: Maninder Singh <maninder1.s@samsung.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+[ The context change is due to the commit bd9d6a3efa97
+("NFSD: add rpc_status netlink support") in v6.7
+and the proper adoption is done. ]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfsctl.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1511,12 +1511,9 @@ static int __init init_nfsd(void)
+ if (retval)
+ goto out_free_pnfs;
+ nfsd_lockd_init(); /* lockd->nfsd callbacks */
+- retval = create_proc_exports_entry();
+- if (retval)
+- goto out_free_lockd;
+ retval = register_pernet_subsys(&nfsd_net_ops);
+ if (retval < 0)
+- goto out_free_exports;
++ goto out_free_lockd;
+ retval = register_cld_notifier();
+ if (retval)
+ goto out_free_subsys;
+@@ -1525,17 +1522,19 @@ static int __init init_nfsd(void)
+ goto out_free_cld;
+ retval = register_filesystem(&nfsd_fs_type);
+ if (retval)
++ goto out_free_nfsd4;
++ retval = create_proc_exports_entry();
++ if (retval)
+ goto out_free_all;
+ return 0;
+ out_free_all:
++ unregister_filesystem(&nfsd_fs_type);
++out_free_nfsd4:
+ nfsd4_destroy_laundry_wq();
+ out_free_cld:
+ unregister_cld_notifier();
+ out_free_subsys:
+ unregister_pernet_subsys(&nfsd_net_ops);
+-out_free_exports:
+- remove_proc_entry("fs/nfs/exports", NULL);
+- remove_proc_entry("fs/nfs", NULL);
+ out_free_lockd:
+ nfsd_lockd_shutdown();
+ nfsd_drc_slab_free();
+@@ -1548,13 +1547,13 @@ out_free_slabs:
+
+ static void __exit exit_nfsd(void)
+ {
++ remove_proc_entry("fs/nfs/exports", NULL);
++ remove_proc_entry("fs/nfs", NULL);
+ unregister_filesystem(&nfsd_fs_type);
+ nfsd4_destroy_laundry_wq();
+ unregister_cld_notifier();
+ unregister_pernet_subsys(&nfsd_net_ops);
+ nfsd_drc_slab_free();
+- remove_proc_entry("fs/nfs/exports", NULL);
+- remove_proc_entry("fs/nfs", NULL);
+ nfsd_lockd_shutdown();
+ nfsd4_free_slabs();
+ nfsd4_exit_pnfs();
--- /dev/null
+From stable+bounces-209984-greg=kroah.com@vger.kernel.org Fri Jan 16 04:40:33 2026
+From: Harry Yoo <harry.yoo@oracle.com>
+Date: Fri, 16 Jan 2026 12:38:38 +0900
+Subject: Revert "mm/mprotect: delete pmd_none_or_clear_bad_unless_trans_huge()"
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>, stable@vger.kernel.org
+Cc: Liam.Howlett@oracle.com, akpm@linux-foundation.org, david@kernel.org, hughd@google.com, jannh@google.com, linux-mm@kvack.org, lorenzo.stoakes@oracle.com, pfalcato@suse.de, vbabka@suse.cz, Harry Yoo <harry.yoo@oracle.com>
+Message-ID: <20260116033838.20253-1-harry.yoo@oracle.com>
+
+From: Harry Yoo <harry.yoo@oracle.com>
+
+This reverts commit 91750c8a4be42d73b6810a1c35d73c8a3cd0b481 which is
+commit 670ddd8cdcbd1d07a4571266ae3517f821728c3a upstream.
+
+While the commit fixes a race condition between NUMA balancing and THP
+migration, it causes a NULL-pointer-deref when the pmd temporarily
+transitions from pmd_trans_huge() to pmd_none(). Verifying whether the
+pmd value has changed under page table lock does not prevent the crash,
+as it occurs when acquiring the lock.
+
+Since the original issue addressed by the commit is quite rare and
+non-fatal, revert the commit. A better backport solution that more
+closely matches the upstream semantics will be provided as a follow-up.
+
+Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mprotect.c | 101 +++++++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 58 insertions(+), 43 deletions(-)
+
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -73,12 +73,10 @@ static inline bool can_change_pte_writab
+ }
+
+ static long change_pte_range(struct mmu_gather *tlb,
+- struct vm_area_struct *vma, pmd_t *pmd, pmd_t pmd_old,
+- unsigned long addr, unsigned long end, pgprot_t newprot,
+- unsigned long cp_flags)
++ struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
++ unsigned long end, pgprot_t newprot, unsigned long cp_flags)
+ {
+ pte_t *pte, oldpte;
+- pmd_t _pmd;
+ spinlock_t *ptl;
+ long pages = 0;
+ int target_node = NUMA_NO_NODE;
+@@ -88,15 +86,21 @@ static long change_pte_range(struct mmu_
+
+ tlb_change_page_size(tlb, PAGE_SIZE);
+
++ /*
++ * Can be called with only the mmap_lock for reading by
++ * prot_numa so we must check the pmd isn't constantly
++ * changing from under us from pmd_none to pmd_trans_huge
++ * and/or the other way around.
++ */
++ if (pmd_trans_unstable(pmd))
++ return 0;
++
++ /*
++ * The pmd points to a regular pte so the pmd can't change
++ * from under us even if the mmap_lock is only hold for
++ * reading.
++ */
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+- /* Make sure pmd didn't change after acquiring ptl */
+- _pmd = pmd_read_atomic(pmd);
+- /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
+- barrier();
+- if (!pmd_same(pmd_old, _pmd)) {
+- pte_unmap_unlock(pte, ptl);
+- return -EAGAIN;
+- }
+
+ /* Get target node for single threaded private VMAs */
+ if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
+@@ -284,6 +288,31 @@ static long change_pte_range(struct mmu_
+ return pages;
+ }
+
++/*
++ * Used when setting automatic NUMA hinting protection where it is
++ * critical that a numa hinting PMD is not confused with a bad PMD.
++ */
++static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
++{
++ pmd_t pmdval = pmd_read_atomic(pmd);
++
++ /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ barrier();
++#endif
++
++ if (pmd_none(pmdval))
++ return 1;
++ if (pmd_trans_huge(pmdval))
++ return 0;
++ if (unlikely(pmd_bad(pmdval))) {
++ pmd_clear_bad(pmd);
++ return 1;
++ }
++
++ return 0;
++}
++
+ /* Return true if we're uffd wr-protecting file-backed memory, or false */
+ static inline bool
+ uffd_wp_protect_file(struct vm_area_struct *vma, unsigned long cp_flags)
+@@ -331,34 +360,22 @@ static inline long change_pmd_range(stru
+
+ pmd = pmd_offset(pud, addr);
+ do {
+- long ret;
+- pmd_t _pmd;
+-again:
++ long this_pages;
++
+ next = pmd_addr_end(addr, end);
+- _pmd = pmd_read_atomic(pmd);
+- /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+- barrier();
+-#endif
+
+ change_pmd_prepare(vma, pmd, cp_flags);
+ /*
+ * Automatic NUMA balancing walks the tables with mmap_lock
+ * held for read. It's possible a parallel update to occur
+- * between pmd_trans_huge(), is_swap_pmd(), and
+- * a pmd_none_or_clear_bad() check leading to a false positive
+- * and clearing. Hence, it's necessary to atomically read
+- * the PMD value for all the checks.
++ * between pmd_trans_huge() and a pmd_none_or_clear_bad()
++ * check leading to a false positive and clearing.
++ * Hence, it's necessary to atomically read the PMD value
++ * for all the checks.
+ */
+- if (!is_swap_pmd(_pmd) && !pmd_devmap(_pmd) && !pmd_trans_huge(_pmd)) {
+- if (pmd_none(_pmd))
+- goto next;
+-
+- if (pmd_bad(_pmd)) {
+- pmd_clear_bad(pmd);
+- goto next;
+- }
+- }
++ if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
++ pmd_none_or_clear_bad_unless_trans_huge(pmd))
++ goto next;
+
+ /* invoke the mmu notifier if the pmd is populated */
+ if (!range.start) {
+@@ -368,7 +385,7 @@ again:
+ mmu_notifier_invalidate_range_start(&range);
+ }
+
+- if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
++ if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
+ if ((next - addr != HPAGE_PMD_SIZE) ||
+ uffd_wp_protect_file(vma, cp_flags)) {
+ __split_huge_pmd(vma, pmd, addr, false, NULL);
+@@ -383,11 +400,11 @@ again:
+ * change_huge_pmd() does not defer TLB flushes,
+ * so no need to propagate the tlb argument.
+ */
+- ret = change_huge_pmd(tlb, vma, pmd,
+- addr, newprot, cp_flags);
++ int nr_ptes = change_huge_pmd(tlb, vma, pmd,
++ addr, newprot, cp_flags);
+
+- if (ret) {
+- if (ret == HPAGE_PMD_NR) {
++ if (nr_ptes) {
++ if (nr_ptes == HPAGE_PMD_NR) {
+ pages += HPAGE_PMD_NR;
+ nr_huge_updates++;
+ }
+@@ -398,11 +415,9 @@ again:
+ }
+ /* fall through, the trans huge pmd just split */
+ }
+- ret = change_pte_range(tlb, vma, pmd, _pmd, addr, next,
+- newprot, cp_flags);
+- if (ret < 0)
+- goto again;
+- pages += ret;
++ this_pages = change_pte_range(tlb, vma, pmd, addr, next,
++ newprot, cp_flags);
++ pages += this_pages;
+ next:
+ cond_resched();
+ } while (pmd++, addr = next, addr != end);
--- /dev/null
+From black.hawk@163.com Tue Jan 27 06:17:16 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Tue, 27 Jan 2026 13:17:07 +0800
+Subject: Revert "net/mlx5: Block entering switchdev mode with ns inconsistency"
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, Gavin Li <gavinl@nvidia.com>, Jiri Pirko <jiri@nvidia.com>, Saeed Mahameed <saeedm@nvidia.com>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260127051707.2439076-1-black.hawk@163.com>
+
+From: Gavin Li <gavinl@nvidia.com>
+
+[ Upstream commit 8deeefb24786ea7950b37bde4516b286c877db00 ]
+
+This reverts commit 662404b24a4c4d839839ed25e3097571f5938b9b.
+The revert is required due to the suspicion it is not good for anything
+and cause crash.
+
+Fixes: 662404b24a4c ("net/mlx5e: Block entering switchdev mode with ns inconsistency")
+Signed-off-by: Gavin Li <gavinl@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+[ The context change is due to the commit e25373416678
+("net/mlx5e: Rewrite IPsec vs. TC block interface") in v6.6
+which is irrelevant to the logic of this patch. ]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 19 -------------
+ 1 file changed, 19 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -3493,18 +3493,6 @@ static int esw_inline_mode_to_devlink(u8
+ return 0;
+ }
+
+-static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
+-{
+- struct net *devl_net, *netdev_net;
+- struct mlx5_eswitch *esw;
+-
+- esw = mlx5_devlink_eswitch_get(devlink);
+- netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
+- devl_net = devlink_net(devlink);
+-
+- return net_eq(devl_net, netdev_net);
+-}
+-
+ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+ {
+@@ -3519,13 +3507,6 @@ int mlx5_devlink_eswitch_mode_set(struct
+ if (esw_mode_from_devlink(mode, &mlx5_mode))
+ return -EINVAL;
+
+- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+- !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
+- NL_SET_ERR_MSG_MOD(extack,
+- "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+- return -EPERM;
+- }
+-
+ mlx5_lag_disable_change(esw->dev);
+ err = mlx5_esw_try_lock(esw);
+ if (err < 0) {
--- /dev/null
+From stable+bounces-212845-greg=kroah.com@vger.kernel.org Fri Jan 30 08:11:03 2026
+From: Li hongliang <1468888505@139.com>
+Date: Fri, 30 Jan 2026 15:10:38 +0800
+Subject: sctp: linearize cloned gso packets in sctp_rcv
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, lucien.xin@gmail.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, vyasevich@gmail.com, nhorman@tuxdriver.com, marcelo.leitner@gmail.com, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, linux-sctp@vger.kernel.org, netdev@vger.kernel.org
+Message-ID: <20260130071038.3931297-1-1468888505@139.com>
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit fd60d8a086191fe33c2d719732d2482052fa6805 ]
+
+A cloned head skb still shares these frag skbs in fraglist with the
+original head skb. It's not safe to access these frag skbs.
+
+syzbot reported two use-of-uninitialized-memory bugs caused by this:
+
+ BUG: KMSAN: uninit-value in sctp_inq_pop+0x15b7/0x1920 net/sctp/inqueue.c:211
+ sctp_inq_pop+0x15b7/0x1920 net/sctp/inqueue.c:211
+ sctp_assoc_bh_rcv+0x1a7/0xc50 net/sctp/associola.c:998
+ sctp_inq_push+0x2ef/0x380 net/sctp/inqueue.c:88
+ sctp_backlog_rcv+0x397/0xdb0 net/sctp/input.c:331
+ sk_backlog_rcv+0x13b/0x420 include/net/sock.h:1122
+ __release_sock+0x1da/0x330 net/core/sock.c:3106
+ release_sock+0x6b/0x250 net/core/sock.c:3660
+ sctp_wait_for_connect+0x487/0x820 net/sctp/socket.c:9360
+ sctp_sendmsg_to_asoc+0x1ec1/0x1f00 net/sctp/socket.c:1885
+ sctp_sendmsg+0x32b9/0x4a80 net/sctp/socket.c:2031
+ inet_sendmsg+0x25a/0x280 net/ipv4/af_inet.c:851
+ sock_sendmsg_nosec net/socket.c:718 [inline]
+
+and
+
+ BUG: KMSAN: uninit-value in sctp_assoc_bh_rcv+0x34e/0xbc0 net/sctp/associola.c:987
+ sctp_assoc_bh_rcv+0x34e/0xbc0 net/sctp/associola.c:987
+ sctp_inq_push+0x2a3/0x350 net/sctp/inqueue.c:88
+ sctp_backlog_rcv+0x3c7/0xda0 net/sctp/input.c:331
+ sk_backlog_rcv+0x142/0x420 include/net/sock.h:1148
+ __release_sock+0x1d3/0x330 net/core/sock.c:3213
+ release_sock+0x6b/0x270 net/core/sock.c:3767
+ sctp_wait_for_connect+0x458/0x820 net/sctp/socket.c:9367
+ sctp_sendmsg_to_asoc+0x223a/0x2260 net/sctp/socket.c:1886
+ sctp_sendmsg+0x3910/0x49f0 net/sctp/socket.c:2032
+ inet_sendmsg+0x269/0x2a0 net/ipv4/af_inet.c:851
+ sock_sendmsg_nosec net/socket.c:712 [inline]
+
+This patch fixes it by linearizing cloned gso packets in sctp_rcv().
+
+Fixes: 90017accff61 ("sctp: Add GSO support")
+Reported-by: syzbot+773e51afe420baaf0e2b@syzkaller.appspotmail.com
+Reported-by: syzbot+70a42f45e76bede082be@syzkaller.appspotmail.com
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Link: https://patch.msgid.link/dd7dc337b99876d4132d0961f776913719f7d225.1754595611.git.lucien.xin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+This patch is lost/missing, as it has already been added
+into stable branches less than and greater than 6.1. Previous patch in
+https://lore.kernel.org/stable/20251022075549.195012-1-kovalev@altlinux.org/ is still not added.
+So I resent it again.
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/input.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -114,7 +114,7 @@ int sctp_rcv(struct sk_buff *skb)
+ * it's better to just linearize it otherwise crc computing
+ * takes longer.
+ */
+- if ((!is_gso && skb_linearize(skb)) ||
++ if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) ||
+ !pskb_may_pull(skb, sizeof(struct sctphdr)))
+ goto discard_it;
+
wifi-mac80211-use-wiphy-work-for-sdata-work.patch
wifi-mac80211-move-tdls-work-to-wiphy-work.patch
genirq-irq_sim-initialize-work-context-pointers-properly.patch
+drm-amdkfd-fix-a-memory-leak-in-device_queue_manager_init.patch
+can-esd_usb-esd_usb_read_bulk_callback-fix-urb-memory-leak.patch
+revert-mm-mprotect-delete-pmd_none_or_clear_bad_unless_trans_huge.patch
+drm-amd-display-check-dce_hwseq-before-dereferencing-it.patch
+crypto-qat-flush-misc-workqueue-during-device-shutdown.patch
+iomap-fix-possible-overflow-condition-in-iomap_write_delalloc_scan.patch
+fs-ntfs3-initialize-allocated-memory-before-use.patch
+blk-cgroup-reinit-blkg_iostat_set-after-clearing-in-blkcg_reset_stats.patch
+revert-net-mlx5-block-entering-switchdev-mode-with-ns-inconsistency.patch
+gfs2-fix-null-pointer-dereference-in-gfs2_log_flush.patch
+nfsd-fix-race-between-nfsd-registration-and-exports_proc.patch
+usbnet-fix-using-smp_processor_id-in-preemptible-code-warnings.patch
+drm-amdgpu-replace-mutex-with-spinlock-for-rlcg-register-access-to-avoid-priority-inversion-in-sriov.patch
+net-stmmac-make-sure-that-ptp_rate-is-not-0-before-configuring-est.patch
+sctp-linearize-cloned-gso-packets-in-sctp_rcv.patch
+ksmbd-fix-use-after-free-in-ksmbd_session_rpc_open.patch
+ksmbd-fix-race-condition-in-rpc-handle-list-access.patch
+vhost-scsi-fix-handling-of-multiple-calls-to-vhost_scsi_set_endpoint.patch
+drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch
--- /dev/null
+From stable+bounces-212747-greg=kroah.com@vger.kernel.org Thu Jan 29 09:00:19 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Thu, 29 Jan 2026 15:58:54 +0800
+Subject: usbnet: Fix using smp_processor_id() in preemptible code warnings
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, Zqiang <qiang.zhang@linux.dev>, Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260129075854.2945271-1-black.hawk@163.com>
+
+From: Zqiang <qiang.zhang@linux.dev>
+
+[ Upstream commit 327cd4b68b4398b6c24f10eb2b2533ffbfc10185 ]
+
+Syzbot reported the following warning:
+
+BUG: using smp_processor_id() in preemptible [00000000] code: dhcpcd/2879
+caller is usbnet_skb_return+0x74/0x490 drivers/net/usb/usbnet.c:331
+CPU: 1 UID: 0 PID: 2879 Comm: dhcpcd Not tainted 6.15.0-rc4-syzkaller-00098-g615dca38c2ea #0 PREEMPT(voluntary)
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:94 [inline]
+ dump_stack_lvl+0x16c/0x1f0 lib/dump_stack.c:120
+ check_preemption_disabled+0xd0/0xe0 lib/smp_processor_id.c:49
+ usbnet_skb_return+0x74/0x490 drivers/net/usb/usbnet.c:331
+ usbnet_resume_rx+0x4b/0x170 drivers/net/usb/usbnet.c:708
+ usbnet_change_mtu+0x1be/0x220 drivers/net/usb/usbnet.c:417
+ __dev_set_mtu net/core/dev.c:9443 [inline]
+ netif_set_mtu_ext+0x369/0x5c0 net/core/dev.c:9496
+ netif_set_mtu+0xb0/0x160 net/core/dev.c:9520
+ dev_set_mtu+0xae/0x170 net/core/dev_api.c:247
+ dev_ifsioc+0xa31/0x18d0 net/core/dev_ioctl.c:572
+ dev_ioctl+0x223/0x10e0 net/core/dev_ioctl.c:821
+ sock_do_ioctl+0x19d/0x280 net/socket.c:1204
+ sock_ioctl+0x42f/0x6a0 net/socket.c:1311
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:906 [inline]
+ __se_sys_ioctl fs/ioctl.c:892 [inline]
+ __x64_sys_ioctl+0x190/0x200 fs/ioctl.c:892
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xcd/0x260 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+For historical and portability reasons, the netif_rx() is usually
+run in the softirq or interrupt context, this commit therefore add
+local_bh_disable/enable() protection in the usbnet_resume_rx().
+
+Fixes: 43daa96b166c ("usbnet: Stop RX Q on MTU change")
+Link: https://syzkaller.appspot.com/bug?id=81f55dfa587ee544baaaa5a359a060512228c1e1
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Zqiang <qiang.zhang@linux.dev>
+Link: https://patch.msgid.link/20251011070518.7095-1-qiang.zhang@linux.dev
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ The context change is due to the commit 2c04d279e857
+("net: usb: Convert tasklet API to new bottom half workqueue mechanism")
+in v6.17 which is irrelevant to the logic of this patch.]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/usbnet.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -704,6 +704,7 @@ void usbnet_resume_rx(struct usbnet *dev
+ struct sk_buff *skb;
+ int num = 0;
+
++ local_bh_disable();
+ clear_bit(EVENT_RX_PAUSED, &dev->flags);
+
+ while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
+@@ -712,6 +713,7 @@ void usbnet_resume_rx(struct usbnet *dev
+ }
+
+ tasklet_schedule(&dev->bh);
++ local_bh_enable();
+
+ netif_dbg(dev, rx_status, dev->net,
+ "paused rx queue disabled, %d skbs requeued\n", num);
--- /dev/null
+From stable+bounces-213026-greg=kroah.com@vger.kernel.org Mon Feb 2 07:47:41 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 2 Feb 2026 14:47:19 +0800
+Subject: vhost-scsi: Fix handling of multiple calls to vhost_scsi_set_endpoint
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, michael.christie@oracle.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, mst@redhat.com, jasowang@redhat.com, pbonzini@redhat.com, stefanha@redhat.com, mlombard@redhat.com, asias@redhat.com, nab@linux-iscsi.org, virtualization@lists.linux-foundation.org, kvm@vger.kernel.org, netdev@vger.kernel.org, wh1sper@zju.edu.cn, sgarzare@redhat.com
+Message-ID: <20260202064719.642351-1-1468888505@139.com>
+
+From: Mike Christie <michael.christie@oracle.com>
+
+[ Upstream commit 5dd639a1646ef5fe8f4bf270fad47c5c3755b9b6 ]
+
+If vhost_scsi_set_endpoint is called multiple times without a
+vhost_scsi_clear_endpoint between them, we can hit multiple bugs
+found by Haoran Zhang:
+
+1. Use-after-free when no tpgs are found:
+
+This fixes a use after free that occurs when vhost_scsi_set_endpoint is
+called more than once and calls after the first call do not find any
+tpgs to add to the vs_tpg. When vhost_scsi_set_endpoint first finds
+tpgs to add to the vs_tpg array match=true, so we will do:
+
+vhost_vq_set_backend(vq, vs_tpg);
+...
+
+kfree(vs->vs_tpg);
+vs->vs_tpg = vs_tpg;
+
+If vhost_scsi_set_endpoint is called again and no tpgs are found
+match=false so we skip the vhost_vq_set_backend call leaving the
+pointer to the vs_tpg we then free via:
+
+kfree(vs->vs_tpg);
+vs->vs_tpg = vs_tpg;
+
+If a scsi request is then sent we do:
+
+vhost_scsi_handle_vq -> vhost_scsi_get_req -> vhost_vq_get_backend
+
+which sees the vs_tpg we just did a kfree on.
+
+2. Tpg dir removal hang:
+
+This patch fixes an issue where we cannot remove a LIO/target layer
+tpg (and structs above it like the target) dir due to the refcount
+dropping to -1.
+
+The problem is that if vhost_scsi_set_endpoint detects a tpg is already
+in the vs->vs_tpg array or if the tpg has been removed so
+target_depend_item fails, the undepend goto handler will do
+target_undepend_item on all tpgs in the vs_tpg array dropping their
+refcount to 0. At this time vs_tpg contains both the tpgs we have added
+in the current vhost_scsi_set_endpoint call as well as tpgs we added in
+previous calls which are also in vs->vs_tpg.
+
+Later, when vhost_scsi_clear_endpoint runs it will do
+target_undepend_item on all the tpgs in the vs->vs_tpg which will drop
+their refcount to -1. Userspace will then not be able to remove the tpg
+and will hang when it tries to do rmdir on the tpg dir.
+
+3. Tpg leak:
+
+This fixes a bug where we can leak tpgs and cause them to be
+un-removable because the target name is overwritten when
+vhost_scsi_set_endpoint is called multiple times but with different
+target names.
+
+The bug occurs if a user has called VHOST_SCSI_SET_ENDPOINT and setup
+a vhost-scsi device to target/tpg mapping, then calls
+VHOST_SCSI_SET_ENDPOINT again with a new target name that has tpgs we
+haven't seen before (target1 has tpg1 but target2 has tpg2). When this
+happens we don't teardown the old target tpg mapping and just overwrite
+the target name and the vs->vs_tpg array. Later when we do
+vhost_scsi_clear_endpoint, we are passed in either target1 or target2's
+name and we will only match that target's tpgs when we loop over the
+vs->vs_tpg. We will then return from the function without doing
+target_undepend_item on the tpgs.
+
+Because of all these bugs, it looks like being able to call
+vhost_scsi_set_endpoint multiple times was never supported. The major
+user, QEMU, already has checks to prevent this use case. So to fix the
+issues, this patch prevents vhost_scsi_set_endpoint from being called
+if it's already successfully added tpgs. To add, remove or change the
+tpg config or target name, you must do a vhost_scsi_clear_endpoint
+first.
+
+Fixes: 25b98b64e284 ("vhost scsi: alloc cmds per vq instead of session")
+Fixes: 4f7f46d32c98 ("tcm_vhost: Use vq->private_data to indicate if the endpoint is setup")
+Reported-by: Haoran Zhang <wh1sper@zju.edu.cn>
+Closes: https://lore.kernel.org/virtualization/e418a5ee-45ca-4d18-9b5d-6f8b6b1add8e@oracle.com/T/#me6c0041ce376677419b9b2563494172a01487ecb
+Signed-off-by: Mike Christie <michael.christie@oracle.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20250129210922.121533-1-michael.christie@oracle.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Stefano Garzarella <sgarzare@redhat.com>
+[ Minor conflict resolved. ]
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/scsi.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -1572,14 +1572,19 @@ vhost_scsi_set_endpoint(struct vhost_scs
+ }
+ }
+
++ if (vs->vs_tpg) {
++ pr_err("vhost-scsi endpoint already set for %s.\n",
++ vs->vs_vhost_wwpn);
++ ret = -EEXIST;
++ goto out;
++ }
++
+ len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
+ vs_tpg = kzalloc(len, GFP_KERNEL);
+ if (!vs_tpg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+- if (vs->vs_tpg)
+- memcpy(vs_tpg, vs->vs_tpg, len);
+
+ list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+@@ -1594,11 +1599,6 @@ vhost_scsi_set_endpoint(struct vhost_scs
+ tv_tport = tpg->tport;
+
+ if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
+- if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
+- mutex_unlock(&tpg->tv_tpg_mutex);
+- ret = -EEXIST;
+- goto undepend;
+- }
+ /*
+ * In order to ensure individual vhost-scsi configfs
+ * groups cannot be removed while in use by vhost ioctl,
+@@ -1643,15 +1643,15 @@ vhost_scsi_set_endpoint(struct vhost_scs
+ }
+ ret = 0;
+ } else {
+- ret = -EEXIST;
++ ret = -ENODEV;
++ goto free_tpg;
+ }
+
+ /*
+- * Act as synchronize_rcu to make sure access to
+- * old vs->vs_tpg is finished.
++ * Act as synchronize_rcu to make sure requests after this point
++ * see a fully setup device.
+ */
+ vhost_scsi_flush(vs);
+- kfree(vs->vs_tpg);
+ vs->vs_tpg = vs_tpg;
+ goto out;
+
+@@ -1668,6 +1668,7 @@ undepend:
+ target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
+ }
+ }
++free_tpg:
+ kfree(vs_tpg);
+ out:
+ mutex_unlock(&vs->dev.mutex);
+@@ -1757,6 +1758,7 @@ vhost_scsi_clear_endpoint(struct vhost_s
+ vhost_scsi_flush(vs);
+ kfree(vs->vs_tpg);
+ vs->vs_tpg = NULL;
++ memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn));
+ WARN_ON(vs->vs_events_nr);
+ mutex_unlock(&vs->dev.mutex);
+ mutex_unlock(&vhost_scsi_mutex);