--- /dev/null
+From stable+bounces-212673-greg=kroah.com@vger.kernel.org Wed Jan 28 21:34:00 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jan 2026 15:33:49 -0500
+Subject: arm64/fpsimd: signal: Consistently read FPSIMD context
+To: stable@vger.kernel.org
+Cc: Mark Rutland <mark.rutland@arm.com>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Brown <broonie@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260128203350.2720303-2-sashal@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit be625d803c3bbfa9652697eb57589fe6f2f24b89 ]
+
+For historical reasons, restore_sve_fpsimd_context() has an open-coded
+copy of the logic from read_fpsimd_context(), which is used to either
+restore an FPSIMD-only context, or to merge FPSIMD state into an
+SVE state when restoring an SVE+FPSIMD context. The logic is *almost*
+identical.
+
+Refactor the logic to avoid duplication and make this clearer.
+
+This comes with two functional changes that I do not believe will be
+problematic in practice:
+
+* The user_fpsimd_state::size field will be checked in all restore paths
+ that consume it user_fpsimd_state. The kernel always populates this
+ field when delivering a signal, and so this should contain the
+ expected value unless it has been corrupted.
+
+* If a read of user_fpsimd_state fails, we will return early without
+ modifying TIF_SVE, the saved SVCR, or the save fp_type. This will
+ leave the task in a consistent state, without potentially resurrecting
+ stale FPSIMD state. A read of user_fpsimd_state should never fail
+ unless the structure has been corrupted or the stack has been
+ unmapped.
+
+Suggested-by: Will Deacon <will@kernel.org>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20250508132644.1395904-5-mark.rutland@arm.com
+[will: Ensure read_fpsimd_context() returns negative error code or zero]
+Signed-off-by: Will Deacon <will@kernel.org>
+Stable-dep-of: d2907cbe9ea0 ("arm64/fpsimd: signal: Fix restoration of SVE context")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/signal.c | 57 ++++++++++++++++++++++-----------------------
+ 1 file changed, 29 insertions(+), 28 deletions(-)
+
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -202,29 +202,39 @@ static int preserve_fpsimd_context(struc
+ return err ? -EFAULT : 0;
+ }
+
+-static int restore_fpsimd_context(struct user_ctxs *user)
++static int read_fpsimd_context(struct user_fpsimd_state *fpsimd,
++ struct user_ctxs *user)
+ {
+- struct user_fpsimd_state fpsimd;
+- int err = 0;
++ int err;
+
+ /* check the size information */
+ if (user->fpsimd_size != sizeof(struct fpsimd_context))
+ return -EINVAL;
+
+ /* copy the FP and status/control registers */
+- err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs),
+- sizeof(fpsimd.vregs));
+- __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err);
+- __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err);
++ err = __copy_from_user(fpsimd->vregs, &(user->fpsimd->vregs),
++ sizeof(fpsimd->vregs));
++ __get_user_error(fpsimd->fpsr, &(user->fpsimd->fpsr), err);
++ __get_user_error(fpsimd->fpcr, &(user->fpsimd->fpcr), err);
++
++ return err ? -EFAULT : 0;
++}
++
++static int restore_fpsimd_context(struct user_ctxs *user)
++{
++ struct user_fpsimd_state fpsimd;
++ int err;
++
++ err = read_fpsimd_context(&fpsimd, user);
++ if (err)
++ return err;
+
+ clear_thread_flag(TIF_SVE);
+ current->thread.fp_type = FP_STATE_FPSIMD;
+
+ /* load the hardware registers from the fpsimd_state structure */
+- if (!err)
+- fpsimd_update_current_state(&fpsimd);
+-
+- return err ? -EFAULT : 0;
++ fpsimd_update_current_state(&fpsimd);
++ return 0;
+ }
+
+
+@@ -316,12 +326,8 @@ static int restore_sve_fpsimd_context(st
+ * consistency and robustness, reject restoring streaming SVE state
+ * without an SVE payload.
+ */
+- if (!sm && user->sve_size == sizeof(*user->sve)) {
+- clear_thread_flag(TIF_SVE);
+- current->thread.svcr &= ~SVCR_SM_MASK;
+- current->thread.fp_type = FP_STATE_FPSIMD;
+- goto fpsimd_only;
+- }
++ if (!sm && user->sve_size == sizeof(*user->sve))
++ return restore_fpsimd_context(user);
+
+ vq = sve_vq_from_vl(vl);
+
+@@ -357,19 +363,14 @@ static int restore_sve_fpsimd_context(st
+ set_thread_flag(TIF_SVE);
+ current->thread.fp_type = FP_STATE_SVE;
+
+-fpsimd_only:
+- /* copy the FP and status/control registers */
+- /* restore_sigframe() already checked that user->fpsimd != NULL. */
+- err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
+- sizeof(fpsimd.vregs));
+- __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
+- __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
++ err = read_fpsimd_context(&fpsimd, user);
++ if (err)
++ return err;
+
+- /* load the hardware registers from the fpsimd_state structure */
+- if (!err)
+- fpsimd_update_current_state(&fpsimd);
++ /* Merge the FPSIMD registers into the SVE state */
++ fpsimd_update_current_state(&fpsimd);
+
+- return err ? -EFAULT : 0;
++ return 0;
+ }
+
+ #else /* ! CONFIG_ARM64_SVE */
--- /dev/null
+From stable+bounces-212674-greg=kroah.com@vger.kernel.org Wed Jan 28 21:35:11 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jan 2026 15:33:50 -0500
+Subject: arm64/fpsimd: signal: Fix restoration of SVE context
+To: stable@vger.kernel.org
+Cc: Mark Rutland <mark.rutland@arm.com>, Mark Brown <broonie@kernel.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260128203350.2720303-3-sashal@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit d2907cbe9ea0a54cbe078076f9d089240ee1e2d9 ]
+
+When SME is supported, Restoring SVE signal context can go wrong in a
+few ways, including placing the task into an invalid state where the
+kernel may read from out-of-bounds memory (and may potentially take a
+fatal fault) and/or may kill the task with a SIGKILL.
+
+(1) Restoring a context with SVE_SIG_FLAG_SM set can place the task into
+ an invalid state where SVCR.SM is set (and sve_state is non-NULL)
+ but TIF_SME is clear, consequently resuting in out-of-bounds memory
+ reads and/or killing the task with SIGKILL.
+
+ This can only occur in unusual (but legitimate) cases where the SVE
+ signal context has either been modified by userspace or was saved in
+ the context of another task (e.g. as with CRIU), as otherwise the
+ presence of an SVE signal context with SVE_SIG_FLAG_SM implies that
+ TIF_SME is already set.
+
+ While in this state, task_fpsimd_load() will NOT configure SMCR_ELx
+ (leaving some arbitrary value configured in hardware) before
+ restoring SVCR and attempting to restore the streaming mode SVE
+ registers from memory via sve_load_state(). As the value of
+ SMCR_ELx.LEN may be larger than the task's streaming SVE vector
+ length, this may read memory outside of the task's allocated
+ sve_state, reading unrelated data and/or triggering a fault.
+
+ While this can result in secrets being loaded into streaming SVE
+ registers, these values are never exposed. As TIF_SME is clear,
+ fpsimd_bind_task_to_cpu() will configure CPACR_ELx.SMEN to trap EL0
+ accesses to streaming mode SVE registers, so these cannot be
+ accessed directly at EL0. As fpsimd_save_user_state() verifies the
+ live vector length before saving (S)SVE state to memory, no secret
+ values can be saved back to memory (and hence cannot be observed via
+ ptrace, signals, etc).
+
+ When the live vector length doesn't match the expected vector length
+ for the task, fpsimd_save_user_state() will send a fatal SIGKILL
+ signal to the task. Hence the task may be killed after executing
+ userspace for some period of time.
+
+(2) Restoring a context with SVE_SIG_FLAG_SM clear does not clear the
+ task's SVCR.SM. If SVCR.SM was set prior to restoring the context,
+ then the task will be left in streaming mode unexpectedly, and some
+ register state will be combined inconsistently, though the task will
+ be left in legitimate state from the kernel's PoV.
+
+ This can only occur in unusual (but legitimate) cases where ptrace
+ has been used to set SVCR.SM after entry to the sigreturn syscall,
+ as syscall entry clears SVCR.SM.
+
+ In these cases, the the provided SVE register data will be loaded
+ into the task's sve_state using the non-streaming SVE vector length
+ and the FPSIMD registers will be merged into this using the
+ streaming SVE vector length.
+
+Fix (1) by setting TIF_SME when setting SVCR.SM. This also requires
+ensuring that the task's sme_state has been allocated, but as this could
+contain live ZA state, it should not be zeroed. Fix (2) by clearing
+SVCR.SM when restoring a SVE signal context with SVE_SIG_FLAG_SM clear.
+
+For consistency, I've pulled the manipulation of SVCR, TIF_SVE, TIF_SME,
+and fp_type earlier, immediately after the allocation of
+sve_state/sme_state, before the restore of the actual register state.
+This makes it easier to ensure that these are always modified
+consistently, even if a fault is taken while reading the register data
+from the signal context. I do not expect any software to depend on the
+exact state restored when a fault is taken while reading the context.
+
+Fixes: 85ed24dad290 ("arm64/sme: Implement streaming SVE signal handling")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: <stable@vger.kernel.org>
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Will Deacon <will@kernel.org>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ preserved fpsimd_flush_task_state() call before new SME allocation logic ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/signal.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -344,12 +344,28 @@ static int restore_sve_fpsimd_context(st
+ fpsimd_flush_task_state(current);
+ /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
+
++ if (sm) {
++ sme_alloc(current, false);
++ if (!current->thread.sme_state)
++ return -ENOMEM;
++ }
++
+ sve_alloc(current, true);
+ if (!current->thread.sve_state) {
+ clear_thread_flag(TIF_SVE);
+ return -ENOMEM;
+ }
+
++ if (sm) {
++ current->thread.svcr |= SVCR_SM_MASK;
++ set_thread_flag(TIF_SME);
++ } else {
++ current->thread.svcr &= ~SVCR_SM_MASK;
++ set_thread_flag(TIF_SVE);
++ }
++
++ current->thread.fp_type = FP_STATE_SVE;
++
+ err = __copy_from_user(current->thread.sve_state,
+ (char __user const *)user->sve +
+ SVE_SIG_REGS_OFFSET,
+@@ -357,12 +373,6 @@ static int restore_sve_fpsimd_context(st
+ if (err)
+ return -EFAULT;
+
+- if (flags & SVE_SIG_FLAG_SM)
+- current->thread.svcr |= SVCR_SM_MASK;
+- else
+- set_thread_flag(TIF_SVE);
+- current->thread.fp_type = FP_STATE_SVE;
+-
+ err = read_fpsimd_context(&fpsimd, user);
+ if (err)
+ return err;
--- /dev/null
+From stable+bounces-212672-greg=kroah.com@vger.kernel.org Wed Jan 28 21:35:07 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jan 2026 15:33:48 -0500
+Subject: arm64/fpsimd: signal: Mandate SVE payload for streaming-mode state
+To: stable@vger.kernel.org
+Cc: Mark Rutland <mark.rutland@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Brown <broonie@kernel.org>, Will Deacon <will@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260128203350.2720303-1-sashal@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit b465ace42620970e840c7aeb2c44a6e3b1002fec ]
+
+Non-streaming SVE state may be preserved without an SVE payload, in
+which case the SVE context only has a header with VL==0, and all state
+can be restored from the FPSIMD context. Streaming SVE state is always
+preserved with an SVE payload, where the SVE context header has VL!=0,
+and the SVE_SIG_FLAG_SM flag is set.
+
+The kernel never preserves an SVE context where SVE_SIG_FLAG_SM is set
+without an SVE payload. However, restore_sve_fpsimd_context() doesn't
+forbid restoring such a context, and will handle this case by clearing
+PSTATE.SM and restoring the FPSIMD context into non-streaming mode,
+which isn't consistent with the SVE_SIG_FLAG_SM flag.
+
+Forbid this case, and mandate an SVE payload when the SVE_SIG_FLAG_SM
+flag is set. This avoids an awkward ABI quirk and reduces the risk that
+later rework to this code permits configuring a task with PSTATE.SM==1
+and fp_type==FP_STATE_FPSIMD.
+
+I've marked this as a fix given that we never intended to support this
+case, and we don't want anyone to start relying upon the old behaviour
+once we re-enable SME.
+
+Fixes: 85ed24dad290 ("arm64/sme: Implement streaming SVE signal handling")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Will Deacon <will@kernel.org>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Link: https://lore.kernel.org/r/20250508132644.1395904-4-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Stable-dep-of: d2907cbe9ea0 ("arm64/fpsimd: signal: Fix restoration of SVE context")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/signal.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -276,6 +276,7 @@ static int restore_sve_fpsimd_context(st
+ unsigned int vl, vq;
+ struct user_fpsimd_state fpsimd;
+ u16 user_vl, flags;
++ bool sm;
+
+ if (user->sve_size < sizeof(*user->sve))
+ return -EINVAL;
+@@ -285,7 +286,8 @@ static int restore_sve_fpsimd_context(st
+ if (err)
+ return err;
+
+- if (flags & SVE_SIG_FLAG_SM) {
++ sm = flags & SVE_SIG_FLAG_SM;
++ if (sm) {
+ if (!system_supports_sme())
+ return -EINVAL;
+
+@@ -305,7 +307,16 @@ static int restore_sve_fpsimd_context(st
+ if (user_vl != vl)
+ return -EINVAL;
+
+- if (user->sve_size == sizeof(*user->sve)) {
++ /*
++ * Non-streaming SVE state may be preserved without an SVE payload, in
++ * which case the SVE context only has a header with VL==0, and all
++ * state can be restored from the FPSIMD context.
++ *
++ * Streaming SVE state is always preserved with an SVE payload. For
++ * consistency and robustness, reject restoring streaming SVE state
++ * without an SVE payload.
++ */
++ if (!sm && user->sve_size == sizeof(*user->sve)) {
+ clear_thread_flag(TIF_SVE);
+ current->thread.svcr &= ~SVCR_SM_MASK;
+ current->thread.fp_type = FP_STATE_FPSIMD;
--- /dev/null
+From 1468888505@139.com Thu Jan 29 10:13:30 2026
+From: Li hongliang <1468888505@139.com>
+Date: Thu, 29 Jan 2026 17:13:25 +0800
+Subject: drm/amdgpu: Replace Mutex with Spinlock for RLCG register access to avoid Priority Inversion in SRIOV
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, srinivasan.shanmugam@amd.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, alexander.deucher@amd.com, christian.koenig@amd.com, Xinhui.Pan@amd.com, airlied@gmail.com, daniel@ffwll.ch, sashal@kernel.org, mario.limonciello@amd.com, superm1@kernel.org, Jun.Ma2@amd.com, Zhigang.Luo@amd.com, Hawking.Zhang@amd.com, Jesse.Zhang@amd.com, victor.skvortsov@amd.com, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, lin.cao@amd.com, Jingwen.Chen2@amd.com
+Message-ID: <20260129091325.3637010-1-1468888505@139.com>
+
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+
+[ Upstream commit dc0297f3198bd60108ccbd167ee5d9fa4af31ed0 ]
+
+RLCG Register Access is a way for virtual functions to safely access GPU
+registers in a virtualized environment., including TLB flushes and
+register reads. When multiple threads or VFs try to access the same
+registers simultaneously, it can lead to race conditions. By using the
+RLCG interface, the driver can serialize access to the registers. This
+means that only one thread can access the registers at a time,
+preventing conflicts and ensuring that operations are performed
+correctly. Additionally, when a low-priority task holds a mutex that a
+high-priority task needs, ie., If a thread holding a spinlock tries to
+acquire a mutex, it can lead to priority inversion. register access in
+amdgpu_virt_rlcg_reg_rw especially in a fast code path is critical.
+
+The call stack shows that the function amdgpu_virt_rlcg_reg_rw is being
+called, which attempts to acquire the mutex. This function is invoked
+from amdgpu_sriov_wreg, which in turn is called from
+gmc_v11_0_flush_gpu_tlb.
+
+The [ BUG: Invalid wait context ] indicates that a thread is trying to
+acquire a mutex while it is in a context that does not allow it to sleep
+(like holding a spinlock).
+
+Fixes the below:
+
+[ 253.013423] =============================
+[ 253.013434] [ BUG: Invalid wait context ]
+[ 253.013446] 6.12.0-amdstaging-drm-next-lol-050225 #14 Tainted: G U OE
+[ 253.013464] -----------------------------
+[ 253.013475] kworker/0:1/10 is trying to lock:
+[ 253.013487] ffff9f30542e3cf8 (&adev->virt.rlcg_reg_lock){+.+.}-{3:3}, at: amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.013815] other info that might help us debug this:
+[ 253.013827] context-{4:4}
+[ 253.013835] 3 locks held by kworker/0:1/10:
+[ 253.013847] #0: ffff9f3040050f58 ((wq_completion)events){+.+.}-{0:0}, at: process_one_work+0x3f5/0x680
+[ 253.013877] #1: ffffb789c008be40 ((work_completion)(&wfc.work)){+.+.}-{0:0}, at: process_one_work+0x1d6/0x680
+[ 253.013905] #2: ffff9f3054281838 (&adev->gmc.invalidate_lock){+.+.}-{2:2}, at: gmc_v11_0_flush_gpu_tlb+0x198/0x4f0 [amdgpu]
+[ 253.014154] stack backtrace:
+[ 253.014164] CPU: 0 UID: 0 PID: 10 Comm: kworker/0:1 Tainted: G U OE 6.12.0-amdstaging-drm-next-lol-050225 #14
+[ 253.014189] Tainted: [U]=USER, [O]=OOT_MODULE, [E]=UNSIGNED_MODULE
+[ 253.014203] Hardware name: Microsoft Corporation Virtual Machine/Virtual Machine, BIOS Hyper-V UEFI Release v4.1 11/18/2024
+[ 253.014224] Workqueue: events work_for_cpu_fn
+[ 253.014241] Call Trace:
+[ 253.014250] <TASK>
+[ 253.014260] dump_stack_lvl+0x9b/0xf0
+[ 253.014275] dump_stack+0x10/0x20
+[ 253.014287] __lock_acquire+0xa47/0x2810
+[ 253.014303] ? srso_alias_return_thunk+0x5/0xfbef5
+[ 253.014321] lock_acquire+0xd1/0x300
+[ 253.014333] ? amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.014562] ? __lock_acquire+0xa6b/0x2810
+[ 253.014578] __mutex_lock+0x85/0xe20
+[ 253.014591] ? amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.014782] ? sched_clock_noinstr+0x9/0x10
+[ 253.014795] ? srso_alias_return_thunk+0x5/0xfbef5
+[ 253.014808] ? local_clock_noinstr+0xe/0xc0
+[ 253.014822] ? amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.015012] ? srso_alias_return_thunk+0x5/0xfbef5
+[ 253.015029] mutex_lock_nested+0x1b/0x30
+[ 253.015044] ? mutex_lock_nested+0x1b/0x30
+[ 253.015057] amdgpu_virt_rlcg_reg_rw+0xf6/0x330 [amdgpu]
+[ 253.015249] amdgpu_sriov_wreg+0xc5/0xd0 [amdgpu]
+[ 253.015435] gmc_v11_0_flush_gpu_tlb+0x44b/0x4f0 [amdgpu]
+[ 253.015667] gfx_v11_0_hw_init+0x499/0x29c0 [amdgpu]
+[ 253.015901] ? __pfx_smu_v13_0_update_pcie_parameters+0x10/0x10 [amdgpu]
+[ 253.016159] ? srso_alias_return_thunk+0x5/0xfbef5
+[ 253.016173] ? smu_hw_init+0x18d/0x300 [amdgpu]
+[ 253.016403] amdgpu_device_init+0x29ad/0x36a0 [amdgpu]
+[ 253.016614] amdgpu_driver_load_kms+0x1a/0xc0 [amdgpu]
+[ 253.017057] amdgpu_pci_probe+0x1c2/0x660 [amdgpu]
+[ 253.017493] local_pci_probe+0x4b/0xb0
+[ 253.017746] work_for_cpu_fn+0x1a/0x30
+[ 253.017995] process_one_work+0x21e/0x680
+[ 253.018248] worker_thread+0x190/0x330
+[ 253.018500] ? __pfx_worker_thread+0x10/0x10
+[ 253.018746] kthread+0xe7/0x120
+[ 253.018988] ? __pfx_kthread+0x10/0x10
+[ 253.019231] ret_from_fork+0x3c/0x60
+[ 253.019468] ? __pfx_kthread+0x10/0x10
+[ 253.019701] ret_from_fork_asm+0x1a/0x30
+[ 253.019939] </TASK>
+
+v2: s/spin_trylock/spin_lock_irqsave to be safe (Christian).
+
+Fixes: e864180ee49b ("drm/amdgpu: Add lock around VF RLCG interface")
+Cc: lin cao <lin.cao@amd.com>
+Cc: Jingwen Chen <Jingwen.Chen2@amd.com>
+Cc: Victor Skvortsov <victor.skvortsov@amd.com>
+Cc: Zhigang Luo <zhigang.luo@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Suggested-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+[ Minor conflict resolved. ]
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 5 +++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 ++-
+ 3 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3582,7 +3582,6 @@ int amdgpu_device_init(struct amdgpu_dev
+ mutex_init(&adev->grbm_idx_mutex);
+ mutex_init(&adev->mn_lock);
+ mutex_init(&adev->virt.vf_errors.lock);
+- mutex_init(&adev->virt.rlcg_reg_lock);
+ hash_init(adev->mn_hash);
+ mutex_init(&adev->psp.mutex);
+ mutex_init(&adev->notifier_lock);
+@@ -3604,6 +3603,7 @@ int amdgpu_device_init(struct amdgpu_dev
+ spin_lock_init(&adev->se_cac_idx_lock);
+ spin_lock_init(&adev->audio_endpt_idx_lock);
+ spin_lock_init(&adev->mm_stats.lock);
++ spin_lock_init(&adev->virt.rlcg_reg_lock);
+
+ INIT_LIST_HEAD(&adev->shadow_list);
+ mutex_init(&adev->shadow_list_lock);
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -1007,6 +1007,7 @@ static u32 amdgpu_virt_rlcg_reg_rw(struc
+ void *scratch_reg2;
+ void *scratch_reg3;
+ void *spare_int;
++ unsigned long flags;
+
+ if (!adev->gfx.rlc.rlcg_reg_access_supported) {
+ dev_err(adev->dev,
+@@ -1028,7 +1029,7 @@ static u32 amdgpu_virt_rlcg_reg_rw(struc
+ scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
+ scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
+
+- mutex_lock(&adev->virt.rlcg_reg_lock);
++ spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
+
+ if (reg_access_ctrl->spare_int)
+ spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
+@@ -1086,7 +1087,7 @@ static u32 amdgpu_virt_rlcg_reg_rw(struc
+
+ ret = readl(scratch_reg0);
+
+- mutex_unlock(&adev->virt.rlcg_reg_lock);
++ spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
+
+ return ret;
+ }
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -267,7 +267,8 @@ struct amdgpu_virt {
+ /* the ucode id to signal the autoload */
+ uint32_t autoload_ucode_id;
+
+- struct mutex rlcg_reg_lock;
++ /* Spinlock to protect access to the RLCG register interface */
++ spinlock_t rlcg_reg_lock;
+ };
+
+ struct amdgpu_video_codec_info;
--- /dev/null
+From 1468888505@139.com Mon Feb 2 08:58:36 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 2 Feb 2026 15:58:31 +0800
+Subject: drm/radeon: delete radeon_fence_process in is_signaled, no deadlock
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, rbmccav@gmail.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, alexander.deucher@amd.com, christian.koenig@amd.com, Xinhui.Pan@amd.com, airlied@gmail.com, daniel@ffwll.ch, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
+Message-ID: <20260202075831.947537-1-1468888505@139.com>
+
+From: Robert McClinton <rbmccav@gmail.com>
+
+[ Upstream commit 9eb00b5f5697bd56baa3222c7a1426fa15bacfb5 ]
+
+Delete the attempt to progress the queue when checking if fence is
+signaled. This avoids deadlock.
+
+dma-fence_ops::signaled can be called with the fence lock in unknown
+state. For radeon, the fence lock is also the wait queue lock. This can
+cause a self deadlock when signaled() tries to make forward progress on
+the wait queue. But advancing the queue is unneeded because incorrectly
+returning false from signaled() is perfectly acceptable.
+
+Link: https://github.com/brave/brave-browser/issues/49182
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4641
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Robert McClinton <rbmccav@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 527ba26e50ec2ca2be9c7c82f3ad42998a75d0db)
+Cc: stable@vger.kernel.org
+[ Minor conflict resolved. ]
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/radeon_fence.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_fence.c
++++ b/drivers/gpu/drm/radeon/radeon_fence.c
+@@ -362,14 +362,6 @@ static bool radeon_fence_is_signaled(str
+ return true;
+ }
+
+- if (down_read_trylock(&rdev->exclusive_lock)) {
+- radeon_fence_process(rdev, ring);
+- up_read(&rdev->exclusive_lock);
+-
+- if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+- return true;
+- }
+- }
+ return false;
+ }
+
--- /dev/null
+From 7ca497be00163610afb663867db24ac408752f13 Mon Sep 17 00:00:00 2001
+From: Robin Murphy <robin.murphy@arm.com>
+Date: Mon, 26 Jan 2026 12:12:26 +0000
+Subject: gpio: rockchip: Stop calling pinctrl for set_direction
+
+From: Robin Murphy <robin.murphy@arm.com>
+
+commit 7ca497be00163610afb663867db24ac408752f13 upstream.
+
+Marking the whole controller as sleeping due to the pinctrl calls in the
+.direction_{input,output} callbacks has the unfortunate side effect that
+legitimate invocations of .get and .set, which cannot themselves sleep,
+in atomic context now spew WARN()s from gpiolib.
+
+However, as Heiko points out, the driver doing this is a bit silly to
+begin with, as the pinctrl .gpio_set_direction hook doesn't even care
+about the direction, the hook is only used to claim the mux. And sure
+enough, the .gpio_request_enable hook exists to serve this very purpose,
+so switch to that and remove the problematic business entirely.
+
+Cc: stable@vger.kernel.org
+Fixes: 20cf2aed89ac ("gpio: rockchip: mark the GPIO controller as sleeping")
+Suggested-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Reviewed-by: Heiko Stuebner <heiko@sntech.de>
+Link: https://lore.kernel.org/r/bddc0469f25843ca5ae0cf578ab3671435ae98a7.1769429546.git.robin.murphy@arm.com
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+[ Backport past pinctrl API change for the deleted calls ]
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-rockchip.c | 8 --------
+ drivers/pinctrl/pinctrl-rockchip.c | 9 ++++-----
+ 2 files changed, 4 insertions(+), 13 deletions(-)
+
+--- a/drivers/gpio/gpio-rockchip.c
++++ b/drivers/gpio/gpio-rockchip.c
+@@ -18,7 +18,6 @@
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
+-#include <linux/pinctrl/consumer.h>
+ #include <linux/pinctrl/pinconf-generic.h>
+ #include <linux/platform_device.h>
+ #include <linux/regmap.h>
+@@ -157,12 +156,6 @@ static int rockchip_gpio_set_direction(s
+ unsigned long flags;
+ u32 data = input ? 0 : 1;
+
+-
+- if (input)
+- pinctrl_gpio_direction_input(bank->pin_base + offset);
+- else
+- pinctrl_gpio_direction_output(bank->pin_base + offset);
+-
+ raw_spin_lock_irqsave(&bank->slock, flags);
+ rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
+@@ -584,7 +577,6 @@ static int rockchip_gpiolib_register(str
+ gc->ngpio = bank->nr_pins;
+ gc->label = bank->name;
+ gc->parent = bank->dev;
+- gc->can_sleep = true;
+
+ ret = gpiochip_add_data(gc, bank);
+ if (ret) {
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -2749,10 +2749,9 @@ static int rockchip_pmx_set(struct pinct
+ return 0;
+ }
+
+-static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+- struct pinctrl_gpio_range *range,
+- unsigned offset,
+- bool input)
++static int rockchip_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
++ struct pinctrl_gpio_range *range,
++ unsigned int offset)
+ {
+ struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ struct rockchip_pin_bank *bank;
+@@ -2766,7 +2765,7 @@ static const struct pinmux_ops rockchip_
+ .get_function_name = rockchip_pmx_get_func_name,
+ .get_function_groups = rockchip_pmx_get_groups,
+ .set_mux = rockchip_pmx_set,
+- .gpio_set_direction = rockchip_pmx_gpio_set_direction,
++ .gpio_request_enable = rockchip_pmx_gpio_request_enable,
+ };
+
+ /*
--- /dev/null
+From 1468888505@139.com Mon Feb 2 04:17:45 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 2 Feb 2026 11:17:39 +0800
+Subject: ksmbd: Fix race condition in RPC handle list access
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, ysk@kzalloc.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, linkinjeon@kernel.org, sfrench@samba.org, senozhatsky@chromium.org, tom@talpey.com, akendo@akendo.eu, set_pte_at@outlook.com, linux-cifs@vger.kernel.org, stfrench@microsoft.com
+Message-ID: <20260202031739.515222-1-1468888505@139.com>
+
+From: Yunseong Kim <ysk@kzalloc.com>
+
+[ Upstream commit 305853cce379407090a73b38c5de5ba748893aee ]
+
+The 'sess->rpc_handle_list' XArray manages RPC handles within a ksmbd
+session. Access to this list is intended to be protected by
+'sess->rpc_lock' (an rw_semaphore). However, the locking implementation was
+flawed, leading to potential race conditions.
+
+In ksmbd_session_rpc_open(), the code incorrectly acquired only a read lock
+before calling xa_store() and xa_erase(). Since these operations modify
+the XArray structure, a write lock is required to ensure exclusive access
+and prevent data corruption from concurrent modifications.
+
+Furthermore, ksmbd_session_rpc_method() accessed the list using xa_load()
+without holding any lock at all. This could lead to reading inconsistent
+data or a potential use-after-free if an entry is concurrently removed and
+the pointer is dereferenced.
+
+Fix these issues by:
+1. Using down_write() and up_write() in ksmbd_session_rpc_open()
+ to ensure exclusive access during XArray modification, and ensuring
+ the lock is correctly released on error paths.
+2. Adding down_read() and up_read() in ksmbd_session_rpc_method()
+ to safely protect the lookup.
+
+Fixes: a1f46c99d9ea ("ksmbd: fix use-after-free in ksmbd_session_rpc_open")
+Fixes: b685757c7b08 ("ksmbd: Implements sess->rpc_handle_list as xarray")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yunseong Kim <ysk@kzalloc.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ Minor conflict resolved. ]
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/mgmt/user_session.c | 26 +++++++++++++++++---------
+ 1 file changed, 17 insertions(+), 9 deletions(-)
+
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -104,29 +104,32 @@ int ksmbd_session_rpc_open(struct ksmbd_
+ if (!entry)
+ return -ENOMEM;
+
+- down_read(&sess->rpc_lock);
+ entry->method = method;
+ entry->id = id = ksmbd_ipc_id_alloc();
+ if (id < 0)
+ goto free_entry;
++
++ down_write(&sess->rpc_lock);
+ old = xa_store(&sess->rpc_handle_list, id, entry, GFP_KERNEL);
+- if (xa_is_err(old))
++ if (xa_is_err(old)) {
++ up_write(&sess->rpc_lock);
+ goto free_id;
++ }
+
+ resp = ksmbd_rpc_open(sess, id);
+- if (!resp)
+- goto erase_xa;
++ if (!resp) {
++ xa_erase(&sess->rpc_handle_list, entry->id);
++ up_write(&sess->rpc_lock);
++ goto free_id;
++ }
+
+- up_read(&sess->rpc_lock);
++ up_write(&sess->rpc_lock);
+ kvfree(resp);
+ return id;
+-erase_xa:
+- xa_erase(&sess->rpc_handle_list, entry->id);
+ free_id:
+ ksmbd_rpc_id_free(entry->id);
+ free_entry:
+ kfree(entry);
+- up_read(&sess->rpc_lock);
+ return -EINVAL;
+ }
+
+@@ -144,9 +147,14 @@ void ksmbd_session_rpc_close(struct ksmb
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+ {
+ struct ksmbd_session_rpc *entry;
++ int method;
+
++ down_read(&sess->rpc_lock);
+ entry = xa_load(&sess->rpc_handle_list, id);
+- return entry ? entry->method : 0;
++ method = entry ? entry->method : 0;
++ up_read(&sess->rpc_lock);
++
++ return method;
+ }
+
+ void ksmbd_session_destroy(struct ksmbd_session *sess)
--- /dev/null
+From stable+bounces-212679-greg=kroah.com@vger.kernel.org Wed Jan 28 22:36:19 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jan 2026 16:36:14 -0500
+Subject: ksmbd: smbd: fix dma_unmap_sg() nents
+To: stable@vger.kernel.org
+Cc: Thomas Fourier <fourier.thomas@gmail.com>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260128213614.2762269-1-sashal@kernel.org>
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+[ Upstream commit 98e3e2b561bc88f4dd218d1c05890672874692f6 ]
+
+The dma_unmap_sg() functions should be called with the same nents as the
+dma_map_sg(), not the value the map function returned.
+
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ Context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/transport_rdma.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -1108,14 +1108,12 @@ static int get_sg_list(void *buf, int si
+
+ static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
+ struct scatterlist *sg_list, int nentries,
+- enum dma_data_direction dir)
++ enum dma_data_direction dir, int *npages)
+ {
+- int npages;
+-
+- npages = get_sg_list(buf, size, sg_list, nentries);
+- if (npages < 0)
++ *npages = get_sg_list(buf, size, sg_list, nentries);
++ if (*npages < 0)
+ return -EINVAL;
+- return ib_dma_map_sg(device, sg_list, npages, dir);
++ return ib_dma_map_sg(device, sg_list, *npages, dir);
+ }
+
+ static int post_sendmsg(struct smb_direct_transport *t,
+@@ -1184,12 +1182,13 @@ static int smb_direct_post_send_data(str
+ for (i = 0; i < niov; i++) {
+ struct ib_sge *sge;
+ int sg_cnt;
++ int npages;
+
+ sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
+ sg_cnt = get_mapped_sg_list(t->cm_id->device,
+ iov[i].iov_base, iov[i].iov_len,
+ sg, SMB_DIRECT_MAX_SEND_SGES - 1,
+- DMA_TO_DEVICE);
++ DMA_TO_DEVICE, &npages);
+ if (sg_cnt <= 0) {
+ pr_err("failed to map buffer\n");
+ ret = -ENOMEM;
+@@ -1197,7 +1196,7 @@ static int smb_direct_post_send_data(str
+ } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
+ pr_err("buffer not fitted into sges\n");
+ ret = -E2BIG;
+- ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
++ ib_dma_unmap_sg(t->cm_id->device, sg, npages,
+ DMA_TO_DEVICE);
+ goto err;
+ }
--- /dev/null
+From stable+bounces-212678-greg=kroah.com@vger.kernel.org Wed Jan 28 22:36:17 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jan 2026 16:36:08 -0500
+Subject: mei: trace: treat reg parameter as string
+To: stable@vger.kernel.org
+Cc: Alexander Usyskin <alexander.usyskin@intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260128213608.2762086-1-sashal@kernel.org>
+
+From: Alexander Usyskin <alexander.usyskin@intel.com>
+
+[ Upstream commit 06d5a7afe1d0b47102936d8fba568572c2b4b941 ]
+
+The commit
+afd2627f727b ("tracing: Check "%s" dereference via the field and not the TP_printk format")
+forbids to emit event with a plain char* without a wrapper.
+
+The reg parameter always passed as static string and wrapper
+is not strictly required, contrary to dev parameter.
+Use the string wrapper anyway to check sanity of the reg parameters,
+store it value independently and prevent internal kernel data leaks.
+
+Since some code refactoring has taken place, explicit backporting may
+be needed for kernels older than 6.10.
+
+Cc: stable@vger.kernel.org # v6.11+
+Fixes: a0a927d06d79 ("mei: me: add io register tracing")
+Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
+Link: https://patch.msgid.link/20260111145125.1754912-1-alexander.usyskin@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[ adapted __assign_str() calls to use two arguments ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/mei/mei-trace.h | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/misc/mei/mei-trace.h
++++ b/drivers/misc/mei/mei-trace.h
+@@ -21,18 +21,18 @@ TRACE_EVENT(mei_reg_read,
+ TP_ARGS(dev, reg, offs, val),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+- __field(const char *, reg)
++ __string(reg, reg)
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+- __entry->reg = reg;
++ __assign_str(reg, reg);
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] read %s:[%#x] = %#x",
+- __get_str(dev), __entry->reg, __entry->offs, __entry->val)
++ __get_str(dev), __get_str(reg), __entry->offs, __entry->val)
+ );
+
+ TRACE_EVENT(mei_reg_write,
+@@ -40,18 +40,18 @@ TRACE_EVENT(mei_reg_write,
+ TP_ARGS(dev, reg, offs, val),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+- __field(const char *, reg)
++ __string(reg, reg)
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+- __entry->reg = reg;
++ __assign_str(reg, reg);
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] write %s[%#x] = %#x",
+- __get_str(dev), __entry->reg, __entry->offs, __entry->val)
++ __get_str(dev), __get_str(reg), __entry->offs, __entry->val)
+ );
+
+ TRACE_EVENT(mei_pci_cfg_read,
+@@ -59,18 +59,18 @@ TRACE_EVENT(mei_pci_cfg_read,
+ TP_ARGS(dev, reg, offs, val),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+- __field(const char *, reg)
++ __string(reg, reg)
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+- __entry->reg = reg;
++ __assign_str(reg, reg);
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] pci cfg read %s:[%#x] = %#x",
+- __get_str(dev), __entry->reg, __entry->offs, __entry->val)
++ __get_str(dev), __get_str(reg), __entry->offs, __entry->val)
+ );
+
+ #endif /* _MEI_TRACE_H_ */
--- /dev/null
+From 870ff19251bf3910dda7a7245da826924045fedd Mon Sep 17 00:00:00 2001
+From: Pimyn Girgis <pimyn@google.com>
+Date: Tue, 20 Jan 2026 17:15:10 +0100
+Subject: mm/kfence: randomize the freelist on initialization
+
+From: Pimyn Girgis <pimyn@google.com>
+
+commit 870ff19251bf3910dda7a7245da826924045fedd upstream.
+
+Randomize the KFENCE freelist during pool initialization to make
+allocation patterns less predictable. This is achieved by shuffling the
+order in which metadata objects are added to the freelist using
+get_random_u32_below().
+
+Additionally, ensure the error path correctly calculates the address range
+to be reset if initialization fails, as the address increment logic has
+been moved to a separate loop.
+
+Link: https://lkml.kernel.org/r/20260120161510.3289089-1-pimyn@google.com
+Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
+Signed-off-by: Pimyn Girgis <pimyn@google.com>
+Reviewed-by: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: Ernesto Martnez Garca <ernesto.martinezgarcia@tugraz.at>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Kees Cook <kees@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Pimyn Girgis <pimyn@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kfence/core.c | 23 +++++++++++++++++++----
+ 1 file changed, 19 insertions(+), 4 deletions(-)
+
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -577,7 +577,7 @@ static unsigned long kfence_init_pool(vo
+ {
+ unsigned long addr;
+ struct page *pages;
+- int i;
++ int i, rand;
+
+ if (!arch_kfence_init_pool())
+ return (unsigned long)__kfence_pool;
+@@ -626,13 +626,27 @@ static unsigned long kfence_init_pool(vo
+ INIT_LIST_HEAD(&meta->list);
+ raw_spin_lock_init(&meta->lock);
+ meta->state = KFENCE_OBJECT_UNUSED;
+- meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
+- list_add_tail(&meta->list, &kfence_freelist);
++ /* Use addr to randomize the freelist. */
++ meta->addr = i;
+
+ /* Protect the right redzone. */
+- if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
++ if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE)))
+ goto reset_slab;
++ }
++
++ for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) {
++ rand = get_random_u32_below(i);
++ swap(kfence_metadata_init[i - 1].addr, kfence_metadata_init[rand].addr);
++ }
+
++ for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
++ struct kfence_metadata *meta_1 = &kfence_metadata_init[i];
++ struct kfence_metadata *meta_2 = &kfence_metadata_init[meta_1->addr];
++
++ list_add_tail(&meta_2->list, &kfence_freelist);
++ }
++ for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
++ kfence_metadata_init[i].addr = addr;
+ addr += 2 * PAGE_SIZE;
+ }
+
+@@ -645,6 +659,7 @@ static unsigned long kfence_init_pool(vo
+ return 0;
+
+ reset_slab:
++ addr += 2 * i * PAGE_SIZE;
+ for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+ struct slab *slab = page_slab(nth_page(pages, i));
+
drm-amdgpu-soc21-fix-xclk-for-apus.patch
drm-amdgpu-gfx10-fix-wptr-reset-in-kgq-init.patch
drm-amdgpu-gfx11-fix-wptr-reset-in-kgq-init.patch
+gpio-rockchip-stop-calling-pinctrl-for-set_direction.patch
+mm-kfence-randomize-the-freelist-on-initialization.patch
+arm64-fpsimd-signal-mandate-sve-payload-for-streaming-mode-state.patch
+arm64-fpsimd-signal-consistently-read-fpsimd-context.patch
+arm64-fpsimd-signal-fix-restoration-of-sve-context.patch
+mei-trace-treat-reg-parameter-as-string.patch
+ksmbd-smbd-fix-dma_unmap_sg-nents.patch
+drm-amdgpu-replace-mutex-with-spinlock-for-rlcg-register-access-to-avoid-priority-inversion-in-sriov.patch
+ksmbd-fix-race-condition-in-rpc-handle-list-access.patch
+wifi-mac80211-move-tdls-work-to-wiphy-work.patch
+wifi-ath11k-add-srng-lock-for-ath11k_hal_srng_-in-monitor-mode.patch
+team-move-team-device-type-change-at-the-end-of-team_port_add.patch
+drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch
--- /dev/null
+From stable+bounces-213166-greg=kroah.com@vger.kernel.org Tue Feb 3 05:47:11 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Tue, 3 Feb 2026 12:45:57 +0800
+Subject: team: Move team device type change at the end of team_port_add
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, "Nikola Z. Ivanov" <zlatistiv@gmail.com>, syzbot+a2a3b519de727b0f7903@syzkaller.appspotmail.com, Jiri Pirko <jiri@nvidia.com>, Jakub Kicinski <kuba@kernel.org>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260203044557.440244-1-black.hawk@163.com>
+
+From: "Nikola Z. Ivanov" <zlatistiv@gmail.com>
+
+[ Upstream commit 0ae9cfc454ea5ead5f3ddbdfe2e70270d8e2c8ef ]
+
+Attempting to add a port device that is already up will expectedly fail,
+but not before modifying the team device header_ops.
+
+In the case of the syzbot reproducer the gre0 device is
+already in state UP when it attempts to add it as a
+port device of team0, this fails but before that
+header_ops->create of team0 is changed from eth_header to ipgre_header
+in the call to team_dev_type_check_change.
+
+Later when we end up in ipgre_header() struct ip_tunnel* points to nonsense
+as the private data of the device still holds a struct team.
+
+Example sequence of iproute2 commands to reproduce the hang/BUG():
+ip link add dev team0 type team
+ip link add dev gre0 type gre
+ip link set dev gre0 up
+ip link set dev gre0 master team0
+ip link set dev team0 up
+ping -I team0 1.1.1.1
+
+Move team_dev_type_check_change down where all other checks have passed
+as it changes the dev type with no way to restore it in case
+one of the checks that follow it fail.
+
+Also make sure to preserve the origial mtu assignment:
+ - If port_dev is not the same type as dev, dev takes mtu from port_dev
+ - If port_dev is the same type as dev, port_dev takes mtu from dev
+
+This is done by adding a conditional before the call to dev_set_mtu
+to prevent it from assigning port_dev->mtu = dev->mtu and instead
+letting team_dev_type_check_change assign dev->mtu = port_dev->mtu.
+The conditional is needed because the patch moves the call to
+team_dev_type_check_change past dev_set_mtu.
+
+Testing:
+ - team device driver in-tree selftests
+ - Add/remove various devices as slaves of team device
+ - syzbot
+
+Reported-by: syzbot+a2a3b519de727b0f7903@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=a2a3b519de727b0f7903
+Fixes: 1d76efe1577b ("team: add support for non-ethernet devices")
+Signed-off-by: Nikola Z. Ivanov <zlatistiv@gmail.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Link: https://patch.msgid.link/20251122002027.695151-1-zlatistiv@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1184,10 +1184,6 @@ static int team_port_add(struct team *te
+ return -EPERM;
+ }
+
+- err = team_dev_type_check_change(dev, port_dev);
+- if (err)
+- return err;
+-
+ if (port_dev->flags & IFF_UP) {
+ NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
+ netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
+@@ -1205,10 +1201,16 @@ static int team_port_add(struct team *te
+ INIT_LIST_HEAD(&port->qom_list);
+
+ port->orig.mtu = port_dev->mtu;
+- err = dev_set_mtu(port_dev, dev->mtu);
+- if (err) {
+- netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+- goto err_set_mtu;
++ /*
++ * MTU assignment will be handled in team_dev_type_check_change
++ * if dev and port_dev are of different types
++ */
++ if (dev->type == port_dev->type) {
++ err = dev_set_mtu(port_dev, dev->mtu);
++ if (err) {
++ netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
++ goto err_set_mtu;
++ }
+ }
+
+ memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
+@@ -1283,6 +1285,10 @@ static int team_port_add(struct team *te
+ }
+ }
+
++ err = team_dev_type_check_change(dev, port_dev);
++ if (err)
++ goto err_set_dev_type;
++
+ if (dev->flags & IFF_UP) {
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+@@ -1301,6 +1307,7 @@ static int team_port_add(struct team *te
+
+ return 0;
+
++err_set_dev_type:
+ err_set_slave_promisc:
+ __team_option_inst_del_port(team, port);
+
--- /dev/null
+From stable+bounces-213159-greg=kroah.com@vger.kernel.org Tue Feb 3 04:14:07 2026
+From: Li hongliang <1468888505@139.com>
+Date: Tue, 3 Feb 2026 11:13:45 +0800
+Subject: wifi: ath11k: add srng->lock for ath11k_hal_srng_* in monitor mode
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, quic_kangyang@quicinc.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, kvalo@kernel.org, jjohnson@kernel.org, quic_vthiagar@quicinc.com, quic_vnaralas@quicinc.com, quic_msinada@quicinc.com, gseset@codeaurora.org, linux-wireless@vger.kernel.org, ath11k@lists.infradead.org, quic_jjohnson@quicinc.com, jeff.johnson@oss.qualcomm.com
+Message-ID: <20260203031345.1357063-1-1468888505@139.com>
+
+From: Kang Yang <quic_kangyang@quicinc.com>
+
+[ Upstream commit 63b7af49496d0e32f7a748b6af3361ec138b1bd3 ]
+
+ath11k_hal_srng_* should be used with srng->lock to protect srng data.
+
+For ath11k_dp_rx_mon_dest_process() and ath11k_dp_full_mon_process_rx(),
+they use ath11k_hal_srng_* for many times but never call srng->lock.
+
+So when running (full) monitor mode, warning will occur:
+RIP: 0010:ath11k_hal_srng_dst_peek+0x18/0x30 [ath11k]
+Call Trace:
+ ? ath11k_hal_srng_dst_peek+0x18/0x30 [ath11k]
+ ath11k_dp_rx_process_mon_status+0xc45/0x1190 [ath11k]
+ ? idr_alloc_u32+0x97/0xd0
+ ath11k_dp_rx_process_mon_rings+0x32a/0x550 [ath11k]
+ ath11k_dp_service_srng+0x289/0x5a0 [ath11k]
+ ath11k_pcic_ext_grp_napi_poll+0x30/0xd0 [ath11k]
+ __napi_poll+0x30/0x1f0
+ net_rx_action+0x198/0x320
+ __do_softirq+0xdd/0x319
+
+So add srng->lock for them to avoid such warnings.
+
+Inorder to fetch the srng->lock, should change srng's definition from
+'void' to 'struct hal_srng'. And initialize them elsewhere to prevent
+one line of code from being too long. This is consistent with other ring
+process functions, such as ath11k_dp_process_rx().
+
+Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3.6510.30
+Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.7.0.1-01744-QCAHKSWPL_SILICONZ-1
+
+Fixes: d5c65159f289 ("ath11k: driver for Qualcomm IEEE 802.11ax devices")
+Signed-off-by: Kang Yang <quic_kangyang@quicinc.com>
+Acked-by: Jeff Johnson <quic_jjohnson@quicinc.com>
+Link: https://patch.msgid.link/20241219110531.2096-3-quic_kangyang@quicinc.com
+Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/ath/ath11k/dp_rx.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -5093,7 +5093,7 @@ static void ath11k_dp_rx_mon_dest_proces
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ const struct ath11k_hw_hal_params *hal_params;
+ void *ring_entry;
+- void *mon_dst_srng;
++ struct hal_srng *mon_dst_srng;
+ u32 ppdu_id;
+ u32 rx_bufs_used;
+ u32 ring_id;
+@@ -5117,6 +5117,7 @@ static void ath11k_dp_rx_mon_dest_proces
+
+ spin_lock_bh(&pmon->mon_lock);
+
++ spin_lock_bh(&mon_dst_srng->lock);
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+@@ -5175,6 +5176,7 @@ static void ath11k_dp_rx_mon_dest_proces
+ mon_dst_srng);
+ }
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
++ spin_unlock_bh(&mon_dst_srng->lock);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+@@ -5564,7 +5566,7 @@ static int ath11k_dp_full_mon_process_rx
+ struct hal_sw_mon_ring_entries *sw_mon_entries;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct sk_buff *head_msdu, *tail_msdu;
+- void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
++ struct hal_srng *mon_dst_srng;
+ void *ring_entry;
+ u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
+ int quota = 0, ret;
+@@ -5580,6 +5582,9 @@ static int ath11k_dp_full_mon_process_rx
+ goto reap_status_ring;
+ }
+
++ mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
++ spin_lock_bh(&mon_dst_srng->lock);
++
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ head_msdu = NULL;
+@@ -5623,6 +5628,7 @@ next_entry:
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
++ spin_unlock_bh(&mon_dst_srng->lock);
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
--- /dev/null
+From stable+bounces-213085-greg=kroah.com@vger.kernel.org Mon Feb 2 17:51:30 2026
+From: "Hanne-Lotta Mäenpää" <hannelotta@gmail.com>
+Date: Mon, 2 Feb 2026 18:47:45 +0200
+Subject: wifi: mac80211: move TDLS work to wiphy work
+To: stable@vger.kernel.org
+Cc: johannes@sipsolutions.net, linux-wireless@vger.kernel.org, linux-kernel@vger.kernel.org, "Johannes Berg" <johannes.berg@intel.com>, "Emmanuel Grumbach" <emmanuel.grumbach@intel.com>, "Hanne-Lotta Mäenpää" <hannelotta@gmail.com>
+Message-ID: <20260202164745.215560-1-hannelotta@gmail.com>
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 777b26002b73127e81643d9286fadf3d41e0e477 ]
+
+Again, to have the wiphy locked for it.
+
+Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+(cherry picked from commit 777b26002b73127e81643d9286fadf3d41e0e477)
+Signed-off-by: Hanne-Lotta Mäenpää <hannelotta@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/ieee80211_i.h | 4 ++--
+ net/mac80211/mlme.c | 7 ++++---
+ net/mac80211/tdls.c | 11 ++++++-----
+ 3 files changed, 12 insertions(+), 10 deletions(-)
+
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -530,7 +530,7 @@ struct ieee80211_if_managed {
+
+ /* TDLS support */
+ u8 tdls_peer[ETH_ALEN] __aligned(2);
+- struct delayed_work tdls_peer_del_work;
++ struct wiphy_delayed_work tdls_peer_del_work;
+ struct sk_buff *orig_teardown_skb; /* The original teardown skb */
+ struct sk_buff *teardown_skb; /* A copy to send through the AP */
+ spinlock_t teardown_lock; /* To lock changing teardown_skb */
+@@ -2599,7 +2599,7 @@ int ieee80211_tdls_mgmt(struct wiphy *wi
+ const u8 *extra_ies, size_t extra_ies_len);
+ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, enum nl80211_tdls_operation oper);
+-void ieee80211_tdls_peer_del_work(struct work_struct *wk);
++void ieee80211_tdls_peer_del_work(struct wiphy *wiphy, struct wiphy_work *wk);
+ int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *addr, u8 oper_class,
+ struct cfg80211_chan_def *chandef);
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -6866,8 +6866,8 @@ void ieee80211_sta_setup_sdata(struct ie
+ ieee80211_beacon_connection_loss_work);
+ wiphy_work_init(&ifmgd->csa_connection_drop_work,
+ ieee80211_csa_connection_drop_work);
+- INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work,
+- ieee80211_tdls_peer_del_work);
++ wiphy_delayed_work_init(&ifmgd->tdls_peer_del_work,
++ ieee80211_tdls_peer_del_work);
+ wiphy_delayed_work_init(&ifmgd->ml_reconf_work,
+ ieee80211_ml_reconf_work);
+ timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0);
+@@ -7881,7 +7881,8 @@ void ieee80211_mgd_stop(struct ieee80211
+ &ifmgd->beacon_connection_loss_work);
+ wiphy_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->csa_connection_drop_work);
+- cancel_delayed_work_sync(&ifmgd->tdls_peer_del_work);
++ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
++ &ifmgd->tdls_peer_del_work);
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->ml_reconf_work);
+
+--- a/net/mac80211/tdls.c
++++ b/net/mac80211/tdls.c
+@@ -21,7 +21,7 @@
+ /* give usermode some time for retries in setting up the TDLS session */
+ #define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
+
+-void ieee80211_tdls_peer_del_work(struct work_struct *wk)
++void ieee80211_tdls_peer_del_work(struct wiphy *wiphy, struct wiphy_work *wk)
+ {
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_local *local;
+@@ -1224,9 +1224,9 @@ ieee80211_tdls_mgmt_setup(struct wiphy *
+ return ret;
+ }
+
+- ieee80211_queue_delayed_work(&sdata->local->hw,
+- &sdata->u.mgd.tdls_peer_del_work,
+- TDLS_PEER_SETUP_TIMEOUT);
++ wiphy_delayed_work_queue(sdata->local->hw.wiphy,
++ &sdata->u.mgd.tdls_peer_del_work,
++ TDLS_PEER_SETUP_TIMEOUT);
+ return 0;
+
+ out_unlock:
+@@ -1526,7 +1526,8 @@ int ieee80211_tdls_oper(struct wiphy *wi
+ }
+
+ if (ret == 0 && ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
+- cancel_delayed_work(&sdata->u.mgd.tdls_peer_del_work);
++ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
++ &sdata->u.mgd.tdls_peer_del_work);
+ eth_zero_addr(sdata->u.mgd.tdls_peer);
+ }
+