--- /dev/null
+From stable+bounces-231263-greg=kroah.com@vger.kernel.org Mon Mar 30 20:38:08 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 14:37:58 -0400
+Subject: drm/xe: always keep track of remap prev/next
+To: stable@vger.kernel.org
+Cc: Matthew Auld <matthew.auld@intel.com>, Matthew Brost <matthew.brost@intel.com>, Rodrigo Vivi <rodrigo.vivi@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260330183758.941967-1-sashal@kernel.org>
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+[ Upstream commit bfe9e314d7574d1c5c851972e7aee342733819d2 ]
+
+During 3D workload, user is reporting hitting:
+
+[ 413.361679] WARNING: drivers/gpu/drm/xe/xe_vm.c:1217 at vm_bind_ioctl_ops_unwind+0x1e2/0x2e0 [xe], CPU#7: vkd3d_queue/9925
+[ 413.361944] CPU: 7 UID: 1000 PID: 9925 Comm: vkd3d_queue Kdump: loaded Not tainted 7.0.0-070000rc3-generic #202603090038 PREEMPT(lazy)
+[ 413.361949] RIP: 0010:vm_bind_ioctl_ops_unwind+0x1e2/0x2e0 [xe]
+[ 413.362074] RSP: 0018:ffffd4c25c3df930 EFLAGS: 00010282
+[ 413.362077] RAX: 0000000000000000 RBX: ffff8f3ee817ed10 RCX: 0000000000000000
+[ 413.362078] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
+[ 413.362079] RBP: ffffd4c25c3df980 R08: 0000000000000000 R09: 0000000000000000
+[ 413.362081] R10: 0000000000000000 R11: 0000000000000000 R12: ffff8f41fbf99380
+[ 413.362082] R13: ffff8f3ee817e968 R14: 00000000ffffffef R15: ffff8f43d00bd380
+[ 413.362083] FS: 00000001040ff6c0(0000) GS:ffff8f4696d89000(0000) knlGS:00000000330b0000
+[ 413.362085] CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
+[ 413.362086] CR2: 00007ddfc4747000 CR3: 00000002e6262005 CR4: 0000000000f72ef0
+[ 413.362088] PKRU: 55555554
+[ 413.362089] Call Trace:
+[ 413.362092] <TASK>
+[ 413.362096] xe_vm_bind_ioctl+0xa9a/0xc60 [xe]
+
+Which seems to hint that the vma we are re-inserting for the ops unwind
+is either invalid or overlapping with something already inserted in the
+vm. It shouldn't be invalid since this is a re-insertion, so must have
+worked before. Leaving the likely culprit as something already placed
+where we want to insert the vma.
+
+Following from that, for the case where we do something like a rebind in
+the middle of a vma, and one or both mapped ends are already compatible,
+we skip doing the rebind of those vma and set next/prev to NULL. As well
+as then adjust the original unmap va range, to avoid unmapping the ends.
+However, if we trigger the unwind path, we end up with three va, with
+the two ends never being removed and the original va range in the middle
+still being the shrunken size.
+
+If this occurs, one failure mode is when another unwind op needs to
+interact with that range, which can happen with a vector of binds. For
+example, if we need to re-insert something in place of the original va.
+In this case the va is still the shrunken version, so when removing it
+and then doing a re-insert it can overlap with the ends, which were
+never removed, triggering a warning like above, plus leaving the vm in a
+bad state.
+
+With that, we need two things here:
+
+ 1) Stop nuking the prev/next tracking for the skip cases. Instead
+ relying on checking for skip prev/next, where needed. That way on the
+ unwind path, we now correctly remove both ends.
+
+ 2) Undo the unmap va shrinkage, on the unwind path. With the two ends
+ now removed the unmap va should expand back to the original size again,
+ before re-insertion.
+
+v2:
+ - Update the explanation in the commit message, based on an actual IGT of
+ triggering this issue, rather than conjecture.
+ - Also undo the unmap shrinkage, for the skip case. With the two ends
+ now removed, the original unmap va range should expand back to the
+ original range.
+v3:
+ - Track the old start/range separately. vma_size/start() uses the va
+ info directly.
+
+Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/7602
+Fixes: 8f33b4f054fc ("drm/xe: Avoid doing rebinds")
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.8+
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20260318100208.78097-2-matthew.auld@intel.com
+(cherry picked from commit aec6969f75afbf4e01fd5fb5850ed3e9c27043ac)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+[ adapted function signatures ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_pt.c | 12 ++++++------
+ drivers/gpu/drm/xe/xe_vm.c | 22 ++++++++++++++++++----
+ drivers/gpu/drm/xe/xe_vm_types.h | 4 ++++
+ 3 files changed, 28 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_pt.c
++++ b/drivers/gpu/drm/xe/xe_pt.c
+@@ -1281,9 +1281,9 @@ static int op_check_userptr(struct xe_vm
+ err = vma_check_userptr(vm, op->map.vma, pt_update);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+- if (op->remap.prev)
++ if (op->remap.prev && !op->remap.skip_prev)
+ err = vma_check_userptr(vm, op->remap.prev, pt_update);
+- if (!err && op->remap.next)
++ if (!err && op->remap.next && !op->remap.skip_next)
+ err = vma_check_userptr(vm, op->remap.next, pt_update);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+@@ -1784,12 +1784,12 @@ static int op_prepare(struct xe_vm *vm,
+ err = unbind_op_prepare(tile, pt_update_ops,
+ gpuva_to_vma(op->base.remap.unmap->va));
+
+- if (!err && op->remap.prev) {
++ if (!err && op->remap.prev && !op->remap.skip_prev) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.prev);
+ pt_update_ops->wait_vm_bookkeep = true;
+ }
+- if (!err && op->remap.next) {
++ if (!err && op->remap.next && !op->remap.skip_next) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.next);
+ pt_update_ops->wait_vm_bookkeep = true;
+@@ -1950,10 +1950,10 @@ static void op_commit(struct xe_vm *vm,
+ gpuva_to_vma(op->base.remap.unmap->va), fence,
+ fence2);
+
+- if (op->remap.prev)
++ if (op->remap.prev && !op->remap.skip_prev)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
+ fence, fence2);
+- if (op->remap.next)
++ if (op->remap.next && !op->remap.skip_next)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
+ fence, fence2);
+ break;
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -2187,7 +2187,6 @@ static int xe_vma_op_commit(struct xe_vm
+ if (!err && op->remap.skip_prev) {
+ op->remap.prev->tile_present =
+ tile_present;
+- op->remap.prev = NULL;
+ }
+ }
+ if (op->remap.next) {
+@@ -2197,11 +2196,13 @@ static int xe_vma_op_commit(struct xe_vm
+ if (!err && op->remap.skip_next) {
+ op->remap.next->tile_present =
+ tile_present;
+- op->remap.next = NULL;
+ }
+ }
+
+- /* Adjust for partial unbind after removin VMA from VM */
++ /*
++ * Adjust for partial unbind after removing VMA from VM. In case
++ * of unwind we might need to undo this later.
++ */
+ if (!err) {
+ op->base.remap.unmap->va->va.addr = op->remap.start;
+ op->base.remap.unmap->va->va.range = op->remap.range;
+@@ -2273,6 +2274,8 @@ static int vm_bind_ioctl_ops_parse(struc
+
+ op->remap.start = xe_vma_start(old);
+ op->remap.range = xe_vma_size(old);
++ op->remap.old_start = op->remap.start;
++ op->remap.old_range = op->remap.range;
+
+ if (op->base.remap.prev) {
+ flags |= op->base.remap.unmap->va->flags &
+@@ -2421,8 +2424,19 @@ static void xe_vma_op_unwind(struct xe_v
+ down_read(&vm->userptr.notifier_lock);
+ vma->gpuva.flags &= ~XE_VMA_DESTROYED;
+ up_read(&vm->userptr.notifier_lock);
+- if (post_commit)
++ if (post_commit) {
++ /*
++ * Restore the old va range, in case of the
++ * prev/next skip optimisation. Otherwise what
++ * we re-insert here could be smaller than the
++ * original range.
++ */
++ op->base.remap.unmap->va->va.addr =
++ op->remap.old_start;
++ op->base.remap.unmap->va->va.range =
++ op->remap.old_range;
+ xe_vm_insert_vma(vm, vma);
++ }
+ }
+ break;
+ }
+--- a/drivers/gpu/drm/xe/xe_vm_types.h
++++ b/drivers/gpu/drm/xe/xe_vm_types.h
+@@ -314,6 +314,10 @@ struct xe_vma_op_remap {
+ u64 start;
+ /** @range: range of the VMA unmap */
+ u64 range;
++ /** @old_start: Original start of the VMA we unmap */
++ u64 old_start;
++ /** @old_range: Original range of the VMA we unmap */
++ u64 old_range;
+ /** @skip_prev: skip prev rebind */
+ bool skip_prev;
+ /** @skip_next: skip next rebind */
--- /dev/null
+From stable+bounces-231298-greg=kroah.com@vger.kernel.org Tue Mar 31 01:46:48 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 19:46:42 -0400
+Subject: ksmbd: fix use-after-free and NULL deref in smb_grant_oplock()
+To: stable@vger.kernel.org
+Cc: Werner Kasselman <werner@verivus.com>, ChenXiaoSong <chenxiaosong@kylinos.cn>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260330234642.1399542-1-sashal@kernel.org>
+
+From: Werner Kasselman <werner@verivus.com>
+
+[ Upstream commit 48623ec358c1c600fa1e38368746f933e0f1a617 ]
+
+smb_grant_oplock() has two issues in the oplock publication sequence:
+
+1) opinfo is linked into ci->m_op_list (via opinfo_add) before
+ add_lease_global_list() is called. If add_lease_global_list()
+ fails (kmalloc returns NULL), the error path frees the opinfo
+ via __free_opinfo() while it is still linked in ci->m_op_list.
+ Concurrent m_op_list readers (opinfo_get_list, or direct iteration
+ in smb_break_all_levII_oplock) dereference the freed node.
+
+2) opinfo->o_fp is assigned after add_lease_global_list() publishes
+ the opinfo on the global lease list. A concurrent
+ find_same_lease_key() can walk the lease list and dereference
+ opinfo->o_fp->f_ci while o_fp is still NULL.
+
+Fix by restructuring the publication sequence to eliminate post-publish
+failure:
+
+- Set opinfo->o_fp before any list publication (fixes NULL deref).
+- Preallocate lease_table via alloc_lease_table() before opinfo_add()
+ so add_lease_global_list() becomes infallible after publication.
+- Keep the original m_op_list publication order (opinfo_add before
+ lease list) so concurrent opens via same_client_has_lease() and
+ opinfo_get_list() still see the in-flight grant.
+- Use opinfo_put() instead of __free_opinfo() on err_out so that
+ the RCU-deferred free path is used.
+
+This also requires splitting add_lease_global_list() to take a
+preallocated lease_table and changing its return type from int to void,
+since it can no longer fail.
+
+Fixes: 1dfd062caa16 ("ksmbd: fix use-after-free by using call_rcu() for oplock_info")
+Cc: stable@vger.kernel.org
+Signed-off-by: Werner Kasselman <werner@verivus.com>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ adapted kmalloc_obj() macro to kmalloc(sizeof()) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/oplock.c | 72 ++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 45 insertions(+), 27 deletions(-)
+
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -82,11 +82,19 @@ static void lease_del_list(struct oplock
+ spin_unlock(&lb->lb_lock);
+ }
+
+-static void lb_add(struct lease_table *lb)
++static struct lease_table *alloc_lease_table(struct oplock_info *opinfo)
+ {
+- write_lock(&lease_list_lock);
+- list_add(&lb->l_entry, &lease_table_list);
+- write_unlock(&lease_list_lock);
++ struct lease_table *lb;
++
++ lb = kmalloc(sizeof(struct lease_table), KSMBD_DEFAULT_GFP);
++ if (!lb)
++ return NULL;
++
++ memcpy(lb->client_guid, opinfo->conn->ClientGUID,
++ SMB2_CLIENT_GUID_SIZE);
++ INIT_LIST_HEAD(&lb->lease_list);
++ spin_lock_init(&lb->lb_lock);
++ return lb;
+ }
+
+ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+@@ -1042,34 +1050,27 @@ static void copy_lease(struct oplock_inf
+ lease2->version = lease1->version;
+ }
+
+-static int add_lease_global_list(struct oplock_info *opinfo)
++static void add_lease_global_list(struct oplock_info *opinfo,
++ struct lease_table *new_lb)
+ {
+ struct lease_table *lb;
+
+- read_lock(&lease_list_lock);
++ write_lock(&lease_list_lock);
+ list_for_each_entry(lb, &lease_table_list, l_entry) {
+ if (!memcmp(lb->client_guid, opinfo->conn->ClientGUID,
+ SMB2_CLIENT_GUID_SIZE)) {
+ opinfo->o_lease->l_lb = lb;
+ lease_add_list(opinfo);
+- read_unlock(&lease_list_lock);
+- return 0;
++ write_unlock(&lease_list_lock);
++ kfree(new_lb);
++ return;
+ }
+ }
+- read_unlock(&lease_list_lock);
+
+- lb = kmalloc(sizeof(struct lease_table), KSMBD_DEFAULT_GFP);
+- if (!lb)
+- return -ENOMEM;
+-
+- memcpy(lb->client_guid, opinfo->conn->ClientGUID,
+- SMB2_CLIENT_GUID_SIZE);
+- INIT_LIST_HEAD(&lb->lease_list);
+- spin_lock_init(&lb->lb_lock);
+- opinfo->o_lease->l_lb = lb;
++ opinfo->o_lease->l_lb = new_lb;
+ lease_add_list(opinfo);
+- lb_add(lb);
+- return 0;
++ list_add(&new_lb->l_entry, &lease_table_list);
++ write_unlock(&lease_list_lock);
+ }
+
+ static void set_oplock_level(struct oplock_info *opinfo, int level,
+@@ -1189,6 +1190,7 @@ int smb_grant_oplock(struct ksmbd_work *
+ int err = 0;
+ struct oplock_info *opinfo = NULL, *prev_opinfo = NULL;
+ struct ksmbd_inode *ci = fp->f_ci;
++ struct lease_table *new_lb = NULL;
+ bool prev_op_has_lease;
+ __le32 prev_op_state = 0;
+
+@@ -1291,21 +1293,37 @@ set_lev:
+ set_oplock_level(opinfo, req_op_level, lctx);
+
+ out:
+- opinfo_count_inc(fp);
+- opinfo_add(opinfo, fp);
+-
++ /*
++ * Set o_fp before any publication so that concurrent readers
++ * (e.g. find_same_lease_key() on the lease list) that
++ * dereference opinfo->o_fp don't hit a NULL pointer.
++ *
++ * Keep the original publication order so concurrent opens can
++ * still observe the in-flight grant via ci->m_op_list, but make
++ * everything after opinfo_add() no-fail by preallocating any new
++ * lease_table first.
++ */
++ opinfo->o_fp = fp;
+ if (opinfo->is_lease) {
+- err = add_lease_global_list(opinfo);
+- if (err)
++ new_lb = alloc_lease_table(opinfo);
++ if (!new_lb) {
++ err = -ENOMEM;
+ goto err_out;
++ }
+ }
+
++ opinfo_count_inc(fp);
++ opinfo_add(opinfo, fp);
++
++ if (opinfo->is_lease)
++ add_lease_global_list(opinfo, new_lb);
++
+ rcu_assign_pointer(fp->f_opinfo, opinfo);
+- opinfo->o_fp = fp;
+
+ return 0;
+ err_out:
+- __free_opinfo(opinfo);
++ kfree(new_lb);
++ opinfo_put(opinfo);
+ return err;
+ }
+
--- /dev/null
+From stable+bounces-231141-greg=kroah.com@vger.kernel.org Mon Mar 30 12:03:48 2026
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Mon, 30 Mar 2026 18:01:07 +0800
+Subject: LoongArch: vDSO: Emit GNU_EH_FRAME correctly
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>, Huacai Chen <chenhuacai@kernel.org>
+Cc: Xuerui Wang <kernel@xen0n.name>, stable@vger.kernel.org, linux-kernel@vger.kernel.org, loongarch@lists.linux.dev, Xi Ruoyao <xry111@xry111.site>, Huacai Chen <chenhuacai@loongson.cn>
+Message-ID: <20260330100107.3955340-1-chenhuacai@loongson.cn>
+
+From: Xi Ruoyao <xry111@xry111.site>
+
+commit e4878c37f6679fdea91b27a0f4e60a871f0b7bad upstream.
+
+With -fno-asynchronous-unwind-tables and --no-eh-frame-hdr (the default
+of the linker), the GNU_EH_FRAME segment (specified by vdso.lds.S) is
+empty. This is not valid, as the current DWARF specification mandates
+the first byte of the EH frame to be the version number 1. It causes
+some unwinders to complain, for example the ClickHouse query profiler
+spams the log with messages:
+
+ clickhouse-server[365854]: libunwind: unsupported .eh_frame_hdr
+ version: 127 at 7ffffffb0000
+
+Here "127" is just the byte located at the p_vaddr (0, i.e. the
+beginning of the vDSO) of the empty GNU_EH_FRAME segment. Cross-
+checking with /proc/365854/maps has also proven 7ffffffb0000 is the
+start of vDSO in the process VM image.
+
+In LoongArch the -fno-asynchronous-unwind-tables option seems just a
+MIPS legacy, and MIPS only uses this option to satisfy the MIPS-specific
+"genvdso" program, per the commit cfd75c2db17e ("MIPS: VDSO: Explicitly
+use -fno-asynchronous-unwind-tables"). IIRC it indicates some inherent
+limitation of the MIPS ELF ABI and has nothing to do with LoongArch. So
+we can simply flip it over to -fasynchronous-unwind-tables and pass
+--eh-frame-hdr for linking the vDSO, allowing the profilers to unwind the
+stack for statistics even if the sample point is taken when the PC is in
+the vDSO.
+
+However simply adjusting the options above would exploit an issue: when
+the libgcc unwinder saw the invalid GNU_EH_FRAME segment, it silently
+falled back to a machine-specific routine to match the code pattern of
+rt_sigreturn() and extract the registers saved in the sigframe if the
+code pattern is matched. As unwinding from signal handlers is vital for
+libgcc to support pthread cancellation etc., the fall-back routine had
+been silently keeping the LoongArch Linux systems functioning since
+Linux 5.19. But when we start to emit GNU_EH_FRAME with the correct
+format, fall-back routine will no longer be used and libgcc will fail
+to unwind the sigframe, and unwinding from signal handlers will no
+longer work, causing dozens of glibc test failures. To make it possible
+to unwind from signal handlers again, it's necessary to code the unwind
+info in __vdso_rt_sigreturn via .cfi_* directives.
+
+The offsets in the .cfi_* directives depend on the layout of struct
+sigframe, notably the offset of sigcontext in the sigframe. To use the
+offset in the assembly file, factor out struct sigframe into a header to
+allow asm-offsets.c to output the offset for assembly.
+
+To work around a long-term issue in the libgcc unwinder (the pc is
+unconditionally substracted by 1: doing so is technically incorrect for
+a signal frame), a nop instruction is included with the two real
+instructions in __vdso_rt_sigreturn in the same FDE PC range. The same
+hack has been used on x86 for a long time.
+
+Cc: stable@vger.kernel.org
+Fixes: c6b99bed6b8f ("LoongArch: Add VDSO and VSYSCALL support")
+Signed-off-by: Xi Ruoyao <xry111@xry111.site>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/linkage.h | 36 ++++++++++++++++++++++++++++++++++
+ arch/loongarch/include/asm/sigframe.h | 9 ++++++++
+ arch/loongarch/kernel/asm-offsets.c | 2 +
+ arch/loongarch/kernel/signal.c | 6 -----
+ arch/loongarch/vdso/Makefile | 4 +--
+ arch/loongarch/vdso/sigreturn.S | 6 ++---
+ 6 files changed, 53 insertions(+), 10 deletions(-)
+ create mode 100644 arch/loongarch/include/asm/sigframe.h
+
+--- a/arch/loongarch/include/asm/linkage.h
++++ b/arch/loongarch/include/asm/linkage.h
+@@ -41,4 +41,40 @@
+ .cfi_endproc; \
+ SYM_END(name, SYM_T_NONE)
+
++/*
++ * This is for the signal handler trampoline, which is used as the return
++ * address of the signal handlers in userspace instead of called normally.
++ * The long standing libgcc bug https://gcc.gnu.org/PR124050 requires a
++ * nop between .cfi_startproc and the actual address of the trampoline, so
++ * we cannot simply use SYM_FUNC_START.
++ *
++ * This wrapper also contains all the .cfi_* directives for recovering
++ * the content of the GPRs and the "return address" (where the rt_sigreturn
++ * syscall will jump to), assuming there is a struct rt_sigframe (where
++ * a struct sigcontext containing those information we need to recover) at
++ * $sp. The "DWARF for the LoongArch(TM) Architecture" manual states
++ * column 0 is for $zero, but it does not make too much sense to
++ * save/restore the hardware zero register. Repurpose this column here
++ * for the return address (here it's not the content of $ra we cannot use
++ * the default column 3).
++ */
++#define SYM_SIGFUNC_START(name) \
++ .cfi_startproc; \
++ .cfi_signal_frame; \
++ .cfi_def_cfa 3, RT_SIGFRAME_SC; \
++ .cfi_return_column 0; \
++ .cfi_offset 0, SC_PC; \
++ \
++ .irp num, 1, 2, 3, 4, 5, 6, 7, 8, \
++ 9, 10, 11, 12, 13, 14, 15, 16, \
++ 17, 18, 19, 20, 21, 22, 23, 24, \
++ 25, 26, 27, 28, 29, 30, 31; \
++ .cfi_offset \num, SC_REGS + \num * SZREG; \
++ .endr; \
++ \
++ nop; \
++ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
++
++#define SYM_SIGFUNC_END(name) SYM_FUNC_END(name)
++
+ #endif
+--- /dev/null
++++ b/arch/loongarch/include/asm/sigframe.h
+@@ -0,0 +1,9 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++
++#include <asm/siginfo.h>
++#include <asm/ucontext.h>
++
++struct rt_sigframe {
++ struct siginfo rs_info;
++ struct ucontext rs_uctx;
++};
+--- a/arch/loongarch/kernel/asm-offsets.c
++++ b/arch/loongarch/kernel/asm-offsets.c
+@@ -16,6 +16,7 @@
+ #include <asm/ptrace.h>
+ #include <asm/processor.h>
+ #include <asm/ftrace.h>
++#include <asm/sigframe.h>
+
+ static void __used output_ptreg_defines(void)
+ {
+@@ -219,6 +220,7 @@ static void __used output_sc_defines(voi
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_PC, sigcontext, sc_pc);
++ OFFSET(RT_SIGFRAME_SC, rt_sigframe, rs_uctx.uc_mcontext);
+ BLANK();
+ }
+
+--- a/arch/loongarch/kernel/signal.c
++++ b/arch/loongarch/kernel/signal.c
+@@ -35,6 +35,7 @@
+ #include <asm/cpu-features.h>
+ #include <asm/fpu.h>
+ #include <asm/lbt.h>
++#include <asm/sigframe.h>
+ #include <asm/ucontext.h>
+ #include <asm/vdso.h>
+
+@@ -51,11 +52,6 @@
+ #define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
+ #define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
+
+-struct rt_sigframe {
+- struct siginfo rs_info;
+- struct ucontext rs_uctx;
+-};
+-
+ struct _ctx_layout {
+ struct sctx_info *addr;
+ unsigned int size;
+--- a/arch/loongarch/vdso/Makefile
++++ b/arch/loongarch/vdso/Makefile
+@@ -21,7 +21,7 @@ cflags-vdso := $(ccflags-vdso) \
+ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
+ -std=gnu11 -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
+ -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
+- $(call cc-option, -fno-asynchronous-unwind-tables) \
++ $(call cc-option, -fasynchronous-unwind-tables) \
+ $(call cc-option, -fno-stack-protector)
+ aflags-vdso := $(ccflags-vdso) \
+ -D__ASSEMBLY__ -Wa,-gdwarf-2
+@@ -36,7 +36,7 @@ endif
+
+ # VDSO linker flags.
+ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+- $(filter -E%,$(KBUILD_CFLAGS)) -shared --build-id -T
++ $(filter -E%,$(KBUILD_CFLAGS)) -shared --build-id --eh-frame-hdr -T
+
+ #
+ # Shared build commands.
+--- a/arch/loongarch/vdso/sigreturn.S
++++ b/arch/loongarch/vdso/sigreturn.S
+@@ -12,13 +12,13 @@
+
+ #include <asm/regdef.h>
+ #include <asm/asm.h>
++#include <asm/asm-offsets.h>
+
+ .section .text
+- .cfi_sections .debug_frame
+
+-SYM_FUNC_START(__vdso_rt_sigreturn)
++SYM_SIGFUNC_START(__vdso_rt_sigreturn)
+
+ li.w a7, __NR_rt_sigreturn
+ syscall 0
+
+-SYM_FUNC_END(__vdso_rt_sigreturn)
++SYM_SIGFUNC_END(__vdso_rt_sigreturn)
--- /dev/null
+From stable+bounces-230063-greg=kroah.com@vger.kernel.org Tue Mar 24 08:08:16 2026
+From: Robert Garcia <rob_garcia@163.com>
+Date: Tue, 24 Mar 2026 15:05:07 +0800
+Subject: media: nxp: imx8-isi: Fix streaming cleanup on release
+To: stable@vger.kernel.org, Richard Leitner <richard.leitner@linux.dev>
+Cc: Hans Verkuil <hverkuil+cisco@kernel.org>, Laurent Pinchart <laurent.pinchart@ideasonboard.com>, Mauro Carvalho Chehab <mchehab@kernel.org>, Shawn Guo <shawnguo@kernel.org>, Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix Kernel Team <kernel@pengutronix.de>, Fabio Estevam <festevam@gmail.com>, linux-media@vger.kernel.org, Robert Garcia <rob_garcia@163.com>, imx@lists.linux.dev, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org
+Message-ID: <20260324070507.2554507-1-rob_garcia@163.com>
+
+From: Richard Leitner <richard.leitner@linux.dev>
+
+[ Upstream commit 47773031a148ad7973b809cc7723cba77eda2b42 ]
+
+The current implementation unconditionally calls
+mxc_isi_video_cleanup_streaming() in mxc_isi_video_release(). This can
+lead to situations where any release call (like from a simple
+"v4l2-ctl -l") may release a currently streaming queue when called on
+such a device.
+
+This is reproducible on an i.MX8MP board by streaming from an ISI
+capture device using gstreamer:
+
+ gst-launch-1.0 -v v4l2src device=/dev/videoX ! \
+ video/x-raw,format=GRAY8,width=1280,height=800,framerate=1/120 ! \
+ fakesink
+
+While this stream is running, querying the caps of the same device
+provokes the error state:
+
+ v4l2-ctl -l -d /dev/videoX
+
+This results in the following trace:
+
+[ 155.452152] ------------[ cut here ]------------
+[ 155.452163] WARNING: CPU: 0 PID: 1708 at drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c:713 mxc_isi_pipe_irq_handler+0x19c/0x1b0 [imx8_isi]
+[ 157.004248] Modules linked in: cfg80211 rpmsg_ctrl rpmsg_char rpmsg_tty virtio_rpmsg_bus rpmsg_ns rpmsg_core rfkill nft_ct nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nf_tables mcp251x6
+[ 157.053499] CPU: 0 UID: 0 PID: 1708 Comm: python3 Not tainted 6.15.4-00114-g1f61ca5cad76 #1 PREEMPT
+[ 157.064369] Hardware name: imx8mp_board_01 (DT)
+[ 157.068205] pstate: 400000c5 (nZcv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[ 157.075169] pc : mxc_isi_pipe_irq_handler+0x19c/0x1b0 [imx8_isi]
+[ 157.081195] lr : mxc_isi_pipe_irq_handler+0x38/0x1b0 [imx8_isi]
+[ 157.087126] sp : ffff800080003ee0
+[ 157.090438] x29: ffff800080003ee0 x28: ffff0000c3688000 x27: 0000000000000000
+[ 157.097580] x26: 0000000000000000 x25: ffff0000c1e7ac00 x24: ffff800081b5ad50
+[ 157.104723] x23: 00000000000000d1 x22: 0000000000000000 x21: ffff0000c25e4000
+[ 157.111866] x20: 0000000060000200 x19: ffff80007a0608d0 x18: 0000000000000000
+[ 157.119008] x17: ffff80006a4e3000 x16: ffff800080000000 x15: 0000000000000000
+[ 157.126146] x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000
+[ 157.133287] x11: 0000000000000040 x10: ffff0000c01445f0 x9 : ffff80007a053a38
+[ 157.140425] x8 : ffff0000c04004b8 x7 : 0000000000000000 x6 : 0000000000000000
+[ 157.147567] x5 : ffff0000c0400490 x4 : ffff80006a4e3000 x3 : ffff0000c25e4000
+[ 157.154706] x2 : 0000000000000000 x1 : ffff8000825c0014 x0 : 0000000060000200
+[ 157.161850] Call trace:
+[ 157.164296] mxc_isi_pipe_irq_handler+0x19c/0x1b0 [imx8_isi] (P)
+[ 157.170319] __handle_irq_event_percpu+0x58/0x218
+[ 157.175029] handle_irq_event+0x54/0xb8
+[ 157.178867] handle_fasteoi_irq+0xac/0x248
+[ 157.182968] handle_irq_desc+0x48/0x68
+[ 157.186723] generic_handle_domain_irq+0x24/0x38
+[ 157.191346] gic_handle_irq+0x54/0x120
+[ 157.195098] call_on_irq_stack+0x24/0x30
+[ 157.199027] do_interrupt_handler+0x88/0x98
+[ 157.203212] el0_interrupt+0x44/0xc0
+[ 157.206792] __el0_irq_handler_common+0x18/0x28
+[ 157.211328] el0t_64_irq_handler+0x10/0x20
+[ 157.215429] el0t_64_irq+0x198/0x1a0
+[ 157.219009] ---[ end trace 0000000000000000 ]---
+
+Address this issue by moving the streaming preparation and cleanup to
+the vb2 .prepare_streaming() and .unprepare_streaming() operations. This
+also simplifies the driver by allowing direct usage of the
+vb2_ioctl_streamon() and vb2_ioctl_streamoff() helpers, and removal of
+the manual cleanup from mxc_isi_video_release().
+
+Link: https://lore.kernel.org/r/20250813212451.22140-2-laurent.pinchart@ideasonboard.com
+Signed-off-by: Richard Leitner <richard.leitner@linux.dev>
+Co-developed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Tested-by: Richard Leitner <richard.leitner@linux.dev> # i.MX8MP
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[ Minor context change fixed. ]
+Signed-off-by: Robert Garcia <rob_garcia@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c | 156 +++++++------------
+ 1 file changed, 58 insertions(+), 98 deletions(-)
+
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+@@ -937,6 +937,49 @@ static void mxc_isi_video_init_channel(s
+ mxc_isi_channel_set_output_format(pipe, video->fmtinfo, &video->pix);
+ }
+
++static int mxc_isi_vb2_prepare_streaming(struct vb2_queue *q)
++{
++ struct mxc_isi_video *video = vb2_get_drv_priv(q);
++ struct media_device *mdev = &video->pipe->isi->media_dev;
++ struct media_pipeline *pipe;
++ int ret;
++
++ /* Get a pipeline for the video node and start it. */
++ scoped_guard(mutex, &mdev->graph_mutex) {
++ ret = mxc_isi_pipe_acquire(video->pipe,
++ &mxc_isi_video_frame_write_done);
++ if (ret)
++ return ret;
++
++ pipe = media_entity_pipeline(&video->vdev.entity)
++ ? : &video->pipe->pipe;
++
++ ret = __video_device_pipeline_start(&video->vdev, pipe);
++ if (ret)
++ goto err_release;
++ }
++
++ /* Verify that the video format matches the output of the subdev. */
++ ret = mxc_isi_video_validate_format(video);
++ if (ret)
++ goto err_stop;
++
++ /* Allocate buffers for discard operation. */
++ ret = mxc_isi_video_alloc_discard_buffers(video);
++ if (ret)
++ goto err_stop;
++
++ video->is_streaming = true;
++
++ return 0;
++
++err_stop:
++ video_device_pipeline_stop(&video->vdev);
++err_release:
++ mxc_isi_pipe_release(video->pipe);
++ return ret;
++}
++
+ static int mxc_isi_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
+ {
+ struct mxc_isi_video *video = vb2_get_drv_priv(q);
+@@ -985,6 +1028,17 @@ static void mxc_isi_vb2_stop_streaming(s
+ mxc_isi_video_return_buffers(video, VB2_BUF_STATE_ERROR);
+ }
+
++static void mxc_isi_vb2_unprepare_streaming(struct vb2_queue *q)
++{
++ struct mxc_isi_video *video = vb2_get_drv_priv(q);
++
++ mxc_isi_video_free_discard_buffers(video);
++ video_device_pipeline_stop(&video->vdev);
++ mxc_isi_pipe_release(video->pipe);
++
++ video->is_streaming = false;
++}
++
+ static const struct vb2_ops mxc_isi_vb2_qops = {
+ .queue_setup = mxc_isi_vb2_queue_setup,
+ .buf_init = mxc_isi_vb2_buffer_init,
+@@ -992,8 +1046,10 @@ static const struct vb2_ops mxc_isi_vb2_
+ .buf_queue = mxc_isi_vb2_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
++ .prepare_streaming = mxc_isi_vb2_prepare_streaming,
+ .start_streaming = mxc_isi_vb2_start_streaming,
+ .stop_streaming = mxc_isi_vb2_stop_streaming,
++ .unprepare_streaming = mxc_isi_vb2_unprepare_streaming,
+ };
+
+ /* -----------------------------------------------------------------------------
+@@ -1147,97 +1203,6 @@ static int mxc_isi_video_s_fmt(struct fi
+ return 0;
+ }
+
+-static int mxc_isi_video_streamon(struct file *file, void *priv,
+- enum v4l2_buf_type type)
+-{
+- struct mxc_isi_video *video = video_drvdata(file);
+- struct media_device *mdev = &video->pipe->isi->media_dev;
+- struct media_pipeline *pipe;
+- int ret;
+-
+- if (vb2_queue_is_busy(&video->vb2_q, file))
+- return -EBUSY;
+-
+- /*
+- * Get a pipeline for the video node and start it. This must be done
+- * here and not in the queue .start_streaming() handler, so that
+- * pipeline start errors can be reported from VIDIOC_STREAMON and not
+- * delayed until subsequent VIDIOC_QBUF calls.
+- */
+- mutex_lock(&mdev->graph_mutex);
+-
+- ret = mxc_isi_pipe_acquire(video->pipe, &mxc_isi_video_frame_write_done);
+- if (ret) {
+- mutex_unlock(&mdev->graph_mutex);
+- return ret;
+- }
+-
+- pipe = media_entity_pipeline(&video->vdev.entity) ? : &video->pipe->pipe;
+-
+- ret = __video_device_pipeline_start(&video->vdev, pipe);
+- if (ret) {
+- mutex_unlock(&mdev->graph_mutex);
+- goto err_release;
+- }
+-
+- mutex_unlock(&mdev->graph_mutex);
+-
+- /* Verify that the video format matches the output of the subdev. */
+- ret = mxc_isi_video_validate_format(video);
+- if (ret)
+- goto err_stop;
+-
+- /* Allocate buffers for discard operation. */
+- ret = mxc_isi_video_alloc_discard_buffers(video);
+- if (ret)
+- goto err_stop;
+-
+- ret = vb2_streamon(&video->vb2_q, type);
+- if (ret)
+- goto err_free;
+-
+- video->is_streaming = true;
+-
+- return 0;
+-
+-err_free:
+- mxc_isi_video_free_discard_buffers(video);
+-err_stop:
+- video_device_pipeline_stop(&video->vdev);
+-err_release:
+- mxc_isi_pipe_release(video->pipe);
+- return ret;
+-}
+-
+-static void mxc_isi_video_cleanup_streaming(struct mxc_isi_video *video)
+-{
+- lockdep_assert_held(&video->lock);
+-
+- if (!video->is_streaming)
+- return;
+-
+- mxc_isi_video_free_discard_buffers(video);
+- video_device_pipeline_stop(&video->vdev);
+- mxc_isi_pipe_release(video->pipe);
+-
+- video->is_streaming = false;
+-}
+-
+-static int mxc_isi_video_streamoff(struct file *file, void *priv,
+- enum v4l2_buf_type type)
+-{
+- struct mxc_isi_video *video = video_drvdata(file);
+- int ret;
+-
+- ret = vb2_ioctl_streamoff(file, priv, type);
+- if (ret)
+- return ret;
+-
+- mxc_isi_video_cleanup_streaming(video);
+-
+- return 0;
+-}
+-
+ static int mxc_isi_video_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+ {
+@@ -1293,9 +1258,8 @@ static const struct v4l2_ioctl_ops mxc_i
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+-
+- .vidioc_streamon = mxc_isi_video_streamon,
+- .vidioc_streamoff = mxc_isi_video_streamoff,
++ .vidioc_streamon = vb2_ioctl_streamon,
++ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_enum_framesizes = mxc_isi_video_enum_framesizes,
+
+@@ -1334,10 +1298,6 @@ static int mxc_isi_video_release(struct
+ if (ret)
+ dev_err(video->pipe->isi->dev, "%s fail\n", __func__);
+
+- mutex_lock(&video->lock);
+- mxc_isi_video_cleanup_streaming(video);
+- mutex_unlock(&video->lock);
+-
+ pm_runtime_put(video->pipe->isi->dev);
+ return ret;
+ }
--- /dev/null
+From 521bd39d9d28ce54cbfec7f9b89c94ad4fdb8350 Mon Sep 17 00:00:00 2001
+From: Hari Bathini <hbathini@linux.ibm.com>
+Date: Tue, 3 Mar 2026 23:40:25 +0530
+Subject: powerpc64/bpf: do not increment tailcall count when prog is NULL
+
+From: Hari Bathini <hbathini@linux.ibm.com>
+
+commit 521bd39d9d28ce54cbfec7f9b89c94ad4fdb8350 upstream.
+
+Do not increment tailcall count, if tailcall did not succeed due to
+missing BPF program.
+
+Fixes: ce0761419fae ("powerpc/bpf: Implement support for tail calls")
+Cc: stable@vger.kernel.org
+Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
+Link: https://patch.msgid.link/20260303181031.390073-2-hbathini@linux.ibm.com
+[ Conflict due to missing feature commit 2ed2d8f6fb38 ("powerpc64/bpf:
+ Support tailcalls with subprogs") resolved accordingly. ]
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/net/bpf_jit_comp64.c | 23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -404,27 +404,32 @@ static int bpf_jit_emit_tail_call(u32 *i
+
+ /*
+ * tail_call_cnt++;
++ * Writeback this updated value only if tailcall succeeds.
+ */
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
+- EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+
+ /* prog = array->ptrs[index]; */
+- EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
+- EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
+- EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
++ EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_2), b2p_index, 8));
++ EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), b2p_bpf_array));
++ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ offsetof(struct bpf_array, ptrs)));
+
+ /*
+ * if (prog == NULL)
+ * goto out;
+ */
+- EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
++ EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), 0));
+ PPC_BCC_SHORT(COND_EQ, out);
+
+ /* goto *(prog->bpf_func + prologue_size); */
+- EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
+- EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
+- FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
+- EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
++ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ offsetof(struct bpf_prog, bpf_func)));
++ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
++ EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_2)));
++
++ /* Writeback updated tailcall count */
++ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+
+ /* tear down stack, restore NVRs, ... */
+ bpf_jit_emit_common_epilogue(image, ctx);
ext4-fix-iloc.bh-leak-in-ext4_fc_replay_inode-error-paths.patch
ext4-always-drain-queued-discard-work-in-ext4_mb_release.patch
arm64-dts-imx8mn-tqma8mqnl-fix-ldo5-power-off.patch
+powerpc64-bpf-do-not-increment-tailcall-count-when-prog-is-null.patch
+ksmbd-fix-use-after-free-and-null-deref-in-smb_grant_oplock.patch
+tracing-switch-trace_osnoise.c-code-over-to-use-guard-and-__free.patch
+tracing-fix-potential-deadlock-in-cpu-hotplug-with-osnoise.patch
+drm-xe-always-keep-track-of-remap-prev-next.patch
+loongarch-vdso-emit-gnu_eh_frame-correctly.patch
+spi-tegra210-quad-protect-curr_xfer-check-in-irq-handler.patch
+media-nxp-imx8-isi-fix-streaming-cleanup-on-release.patch
--- /dev/null
+From jianqkang@sina.cn Tue Mar 24 07:08:53 2026
+From: Jianqiang kang <jianqkang@sina.cn>
+Date: Tue, 24 Mar 2026 14:08:32 +0800
+Subject: spi: tegra210-quad: Protect curr_xfer check in IRQ handler
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, leitao@debian.org
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, thierry.reding@gmail.com, jonathanh@nvidia.com, skomatineni@nvidia.com, ldewangan@nvidia.com, treding@nvidia.com, broonie@kernel.org, va@nvidia.com, linux-tegra@vger.kernel.org, linux-spi@vger.kernel.org
+Message-ID: <20260324060832.724228-1-jianqkang@sina.cn>
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit edf9088b6e1d6d88982db7eb5e736a0e4fbcc09e ]
+
+Now that all other accesses to curr_xfer are done under the lock,
+protect the curr_xfer NULL check in tegra_qspi_isr_thread() with the
+spinlock. Without this protection, the following race can occur:
+
+ CPU0 (ISR thread) CPU1 (timeout path)
+ ---------------- -------------------
+ if (!tqspi->curr_xfer)
+ // sees non-NULL
+ spin_lock()
+ tqspi->curr_xfer = NULL
+ spin_unlock()
+ handle_*_xfer()
+ spin_lock()
+ t = tqspi->curr_xfer // NULL!
+ ... t->len ... // NULL dereference!
+
+With this patch, all curr_xfer accesses are now properly synchronized.
+
+Although all accesses to curr_xfer are done under the lock, in
+tegra_qspi_isr_thread() it checks for NULL, releases the lock and
+reacquires it later in handle_cpu_based_xfer()/handle_dma_based_xfer().
+There is a potential for an update in between, which could cause a NULL
+pointer dereference.
+
+To handle this, add a NULL check inside the handlers after acquiring
+the lock. This ensures that if the timeout path has already cleared
+curr_xfer, the handler will safely return without dereferencing the
+NULL pointer.
+
+Fixes: b4e002d8a7ce ("spi: tegra210-quad: Fix timeout handling")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Tested-by: Jon Hunter <jonathanh@nvidia.com>
+Acked-by: Jon Hunter <jonathanh@nvidia.com>
+Acked-by: Thierry Reding <treding@nvidia.com>
+Link: https://patch.msgid.link/20260126-tegra_xfer-v2-6-6d2115e4f387@debian.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+[ Minor conflict resolved. ]
+Signed-off-by: Jianqiang kang <jianqkang@sina.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-tegra210-quad.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/spi/spi-tegra210-quad.c
++++ b/drivers/spi/spi-tegra210-quad.c
+@@ -1351,6 +1351,11 @@ static irqreturn_t handle_cpu_based_xfer
+ spin_lock_irqsave(&tqspi->lock, flags);
+ t = tqspi->curr_xfer;
+
++ if (!t) {
++ spin_unlock_irqrestore(&tqspi->lock, flags);
++ return IRQ_HANDLED;
++ }
++
+ if (tqspi->tx_status || tqspi->rx_status) {
+ tegra_qspi_handle_error(tqspi);
+ complete(&tqspi->xfer_completion);
+@@ -1419,6 +1424,11 @@ static irqreturn_t handle_dma_based_xfer
+ spin_lock_irqsave(&tqspi->lock, flags);
+ t = tqspi->curr_xfer;
+
++ if (!t) {
++ spin_unlock_irqrestore(&tqspi->lock, flags);
++ return IRQ_HANDLED;
++ }
++
+ if (err) {
+ tegra_qspi_dma_unmap_xfer(tqspi, t);
+ tegra_qspi_handle_error(tqspi);
+@@ -1457,6 +1467,7 @@ exit:
+ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
+ {
+ struct tegra_qspi *tqspi = context_data;
++ unsigned long flags;
+ u32 status;
+
+ /*
+@@ -1474,7 +1485,9 @@ static irqreturn_t tegra_qspi_isr_thread
+ * If no transfer is in progress, check if this was a real interrupt
+ * that the timeout handler already processed, or a spurious one.
+ */
++ spin_lock_irqsave(&tqspi->lock, flags);
+ if (!tqspi->curr_xfer) {
++ spin_unlock_irqrestore(&tqspi->lock, flags);
+ /* Spurious interrupt - transfer not ready */
+ if (!(status & QSPI_RDY))
+ return IRQ_NONE;
+@@ -1491,7 +1504,14 @@ static irqreturn_t tegra_qspi_isr_thread
+ tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
+
+ tegra_qspi_mask_clear_irq(tqspi);
++ spin_unlock_irqrestore(&tqspi->lock, flags);
+
++ /*
++ * Lock is released here but handlers safely re-check curr_xfer under
++ * lock before dereferencing.
++ * DMA handler also needs to sleep in wait_for_completion_*(), which
++ * cannot be done while holding spinlock.
++ */
+ if (!tqspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tqspi);
+
--- /dev/null
+From stable+bounces-231231-greg=kroah.com@vger.kernel.org Mon Mar 30 16:25:58 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 10:18:06 -0400
+Subject: tracing: Fix potential deadlock in cpu hotplug with osnoise
+To: stable@vger.kernel.org
+Cc: Luo Haiyang <luo.haiyang@zte.com.cn>, mathieu.desnoyers@efficios.com, zhang.run@zte.com.cn, yang.tao172@zte.com.cn, ran.xiaokai@zte.com.cn, "Masami Hiramatsu (Google)" <mhiramat@kernel.org>, "Steven Rostedt (Google)" <rostedt@goodmis.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260330141806.817242-2-sashal@kernel.org>
+
+From: Luo Haiyang <luo.haiyang@zte.com.cn>
+
+[ Upstream commit 1f9885732248d22f788e4992c739a98c88ab8a55 ]
+
+The following sequence may leads deadlock in cpu hotplug:
+
+ task1 task2 task3
+ ----- ----- -----
+
+ mutex_lock(&interface_lock)
+
+ [CPU GOING OFFLINE]
+
+ cpus_write_lock();
+ osnoise_cpu_die();
+ kthread_stop(task3);
+ wait_for_completion();
+
+ osnoise_sleep();
+ mutex_lock(&interface_lock);
+
+ cpus_read_lock();
+
+ [DEAD LOCK]
+
+Fix by swap the order of cpus_read_lock() and mutex_lock(&interface_lock).
+
+Cc: stable@vger.kernel.org
+Cc: <mathieu.desnoyers@efficios.com>
+Cc: <zhang.run@zte.com.cn>
+Cc: <yang.tao172@zte.com.cn>
+Cc: <ran.xiaokai@zte.com.cn>
+Fixes: bce29ac9ce0bb ("trace: Add osnoise tracer")
+Link: https://patch.msgid.link/20260326141953414bVSj33dAYktqp9Oiyizq8@zte.com.cn
+Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Luo Haiyang <luo.haiyang@zte.com.cn>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_osnoise.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -2104,8 +2104,8 @@ static void osnoise_hotplug_workfn(struc
+ if (!osnoise_has_registered_instances())
+ return;
+
+- guard(mutex)(&interface_lock);
+ guard(cpus_read_lock)();
++ guard(mutex)(&interface_lock);
+
+ if (!cpu_online(cpu))
+ return;
+@@ -2268,11 +2268,11 @@ static ssize_t osnoise_options_write(str
+ if (running)
+ stop_per_cpu_kthreads();
+
+- mutex_lock(&interface_lock);
+ /*
+ * avoid CPU hotplug operations that might read options.
+ */
+ cpus_read_lock();
++ mutex_lock(&interface_lock);
+
+ retval = cnt;
+
+@@ -2288,8 +2288,8 @@ static ssize_t osnoise_options_write(str
+ clear_bit(option, &osnoise_options);
+ }
+
+- cpus_read_unlock();
+ mutex_unlock(&interface_lock);
++ cpus_read_unlock();
+
+ if (running)
+ start_per_cpu_kthreads();
+@@ -2375,16 +2375,16 @@ osnoise_cpus_write(struct file *filp, co
+ if (running)
+ stop_per_cpu_kthreads();
+
+- mutex_lock(&interface_lock);
+ /*
+ * osnoise_cpumask is read by CPU hotplug operations.
+ */
+ cpus_read_lock();
++ mutex_lock(&interface_lock);
+
+ cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
+
+- cpus_read_unlock();
+ mutex_unlock(&interface_lock);
++ cpus_read_unlock();
+
+ if (running)
+ start_per_cpu_kthreads();
--- /dev/null
+From stable+bounces-231230-greg=kroah.com@vger.kernel.org Mon Mar 30 16:25:58 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 10:18:05 -0400
+Subject: tracing: Switch trace_osnoise.c code over to use guard() and __free()
+To: stable@vger.kernel.org
+Cc: Steven Rostedt <rostedt@goodmis.org>, Masami Hiramatsu <mhiramat@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Mathieu Desnoyers <mathieu.desnoyers@efficios.com>, Andrew Morton <akpm@linux-foundation.org>, Peter Zijlstra <peterz@infradead.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260330141806.817242-1-sashal@kernel.org>
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+[ Upstream commit 930d2b32c0af6895ba4c6ca6404e7f7b6dc214ed ]
+
+The osnoise_hotplug_workfn() grabs two mutexes and cpu_read_lock(). It has
+various gotos to handle unlocking them. Switch them over to guard() and
+let the compiler worry about it.
+
+The osnoise_cpus_read() has a temporary mask_str allocated and there's
+some gotos to make sure it gets freed on error paths. Switch that over to
+__free() to let the compiler worry about it.
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/20241225222931.517329690@goodmis.org
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Stable-dep-of: 1f9885732248 ("tracing: Fix potential deadlock in cpu hotplug with osnoise")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_osnoise.c | 40 +++++++++++++---------------------------
+ 1 file changed, 13 insertions(+), 27 deletions(-)
+
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -2099,26 +2099,21 @@ static void osnoise_hotplug_workfn(struc
+ {
+ unsigned int cpu = smp_processor_id();
+
+- mutex_lock(&trace_types_lock);
++ guard(mutex)(&trace_types_lock);
+
+ if (!osnoise_has_registered_instances())
+- goto out_unlock_trace;
++ return;
+
+- mutex_lock(&interface_lock);
+- cpus_read_lock();
++ guard(mutex)(&interface_lock);
++ guard(cpus_read_lock)();
+
+ if (!cpu_online(cpu))
+- goto out_unlock;
++ return;
++
+ if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
+- goto out_unlock;
++ return;
+
+ start_kthread(cpu);
+-
+-out_unlock:
+- cpus_read_unlock();
+- mutex_unlock(&interface_lock);
+-out_unlock_trace:
+- mutex_unlock(&trace_types_lock);
+ }
+
+ static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
+@@ -2316,31 +2311,22 @@ static ssize_t
+ osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
+ loff_t *ppos)
+ {
+- char *mask_str;
++ char *mask_str __free(kfree) = NULL;
+ int len;
+
+- mutex_lock(&interface_lock);
++ guard(mutex)(&interface_lock);
+
+ len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
+ mask_str = kmalloc(len, GFP_KERNEL);
+- if (!mask_str) {
+- count = -ENOMEM;
+- goto out_unlock;
+- }
++ if (!mask_str)
++ return -ENOMEM;
+
+ len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
+- if (len >= count) {
+- count = -EINVAL;
+- goto out_free;
+- }
++ if (len >= count)
++ return -EINVAL;
+
+ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
+
+-out_free:
+- kfree(mask_str);
+-out_unlock:
+- mutex_unlock(&interface_lock);
+-
+ return count;
+ }
+