--- /dev/null
+From stable+bounces-231278-greg=kroah.com@vger.kernel.org Mon Mar 30 23:08:21 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 17:07:59 -0400
+Subject: ksmbd: fix use-after-free and NULL deref in smb_grant_oplock()
+To: stable@vger.kernel.org
+Cc: Werner Kasselman <werner@verivus.com>, ChenXiaoSong <chenxiaosong@kylinos.cn>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260330210759.1213972-1-sashal@kernel.org>
+
+From: Werner Kasselman <werner@verivus.com>
+
+[ Upstream commit 48623ec358c1c600fa1e38368746f933e0f1a617 ]
+
+smb_grant_oplock() has two issues in the oplock publication sequence:
+
+1) opinfo is linked into ci->m_op_list (via opinfo_add) before
+ add_lease_global_list() is called. If add_lease_global_list()
+ fails (kmalloc returns NULL), the error path frees the opinfo
+ via __free_opinfo() while it is still linked in ci->m_op_list.
+ Concurrent m_op_list readers (opinfo_get_list, or direct iteration
+ in smb_break_all_levII_oplock) dereference the freed node.
+
+2) opinfo->o_fp is assigned after add_lease_global_list() publishes
+ the opinfo on the global lease list. A concurrent
+ find_same_lease_key() can walk the lease list and dereference
+ opinfo->o_fp->f_ci while o_fp is still NULL.
+
+Fix by restructuring the publication sequence to eliminate post-publish
+failure:
+
+- Set opinfo->o_fp before any list publication (fixes NULL deref).
+- Preallocate lease_table via alloc_lease_table() before opinfo_add()
+ so add_lease_global_list() becomes infallible after publication.
+- Keep the original m_op_list publication order (opinfo_add before
+ lease list) so concurrent opens via same_client_has_lease() and
+ opinfo_get_list() still see the in-flight grant.
+- Use opinfo_put() instead of __free_opinfo() on err_out so that
+ the RCU-deferred free path is used.
+
+This also requires splitting add_lease_global_list() to take a
+preallocated lease_table and changing its return type from int to void,
+since it can no longer fail.
+
+Fixes: 1dfd062caa16 ("ksmbd: fix use-after-free by using call_rcu() for oplock_info")
+Cc: stable@vger.kernel.org
+Signed-off-by: Werner Kasselman <werner@verivus.com>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ adapted kmalloc_obj() macro to kmalloc(sizeof()) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/oplock.c | 72 ++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 45 insertions(+), 27 deletions(-)
+
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -82,11 +82,19 @@ static void lease_del_list(struct oplock
+ spin_unlock(&lb->lb_lock);
+ }
+
+-static void lb_add(struct lease_table *lb)
++static struct lease_table *alloc_lease_table(struct oplock_info *opinfo)
+ {
+- write_lock(&lease_list_lock);
+- list_add(&lb->l_entry, &lease_table_list);
+- write_unlock(&lease_list_lock);
++ struct lease_table *lb;
++
++ lb = kmalloc(sizeof(struct lease_table), KSMBD_DEFAULT_GFP);
++ if (!lb)
++ return NULL;
++
++ memcpy(lb->client_guid, opinfo->conn->ClientGUID,
++ SMB2_CLIENT_GUID_SIZE);
++ INIT_LIST_HEAD(&lb->lease_list);
++ spin_lock_init(&lb->lb_lock);
++ return lb;
+ }
+
+ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+@@ -1042,34 +1050,27 @@ static void copy_lease(struct oplock_inf
+ lease2->version = lease1->version;
+ }
+
+-static int add_lease_global_list(struct oplock_info *opinfo)
++static void add_lease_global_list(struct oplock_info *opinfo,
++ struct lease_table *new_lb)
+ {
+ struct lease_table *lb;
+
+- read_lock(&lease_list_lock);
++ write_lock(&lease_list_lock);
+ list_for_each_entry(lb, &lease_table_list, l_entry) {
+ if (!memcmp(lb->client_guid, opinfo->conn->ClientGUID,
+ SMB2_CLIENT_GUID_SIZE)) {
+ opinfo->o_lease->l_lb = lb;
+ lease_add_list(opinfo);
+- read_unlock(&lease_list_lock);
+- return 0;
++ write_unlock(&lease_list_lock);
++ kfree(new_lb);
++ return;
+ }
+ }
+- read_unlock(&lease_list_lock);
+
+- lb = kmalloc(sizeof(struct lease_table), KSMBD_DEFAULT_GFP);
+- if (!lb)
+- return -ENOMEM;
+-
+- memcpy(lb->client_guid, opinfo->conn->ClientGUID,
+- SMB2_CLIENT_GUID_SIZE);
+- INIT_LIST_HEAD(&lb->lease_list);
+- spin_lock_init(&lb->lb_lock);
+- opinfo->o_lease->l_lb = lb;
++ opinfo->o_lease->l_lb = new_lb;
+ lease_add_list(opinfo);
+- lb_add(lb);
+- return 0;
++ list_add(&new_lb->l_entry, &lease_table_list);
++ write_unlock(&lease_list_lock);
+ }
+
+ static void set_oplock_level(struct oplock_info *opinfo, int level,
+@@ -1189,6 +1190,7 @@ int smb_grant_oplock(struct ksmbd_work *
+ int err = 0;
+ struct oplock_info *opinfo = NULL, *prev_opinfo = NULL;
+ struct ksmbd_inode *ci = fp->f_ci;
++ struct lease_table *new_lb = NULL;
+ bool prev_op_has_lease;
+ __le32 prev_op_state = 0;
+
+@@ -1291,21 +1293,37 @@ set_lev:
+ set_oplock_level(opinfo, req_op_level, lctx);
+
+ out:
+- opinfo_count_inc(fp);
+- opinfo_add(opinfo, fp);
+-
++ /*
++ * Set o_fp before any publication so that concurrent readers
++ * (e.g. find_same_lease_key() on the lease list) that
++ * dereference opinfo->o_fp don't hit a NULL pointer.
++ *
++ * Keep the original publication order so concurrent opens can
++ * still observe the in-flight grant via ci->m_op_list, but make
++ * everything after opinfo_add() no-fail by preallocating any new
++ * lease_table first.
++ */
++ opinfo->o_fp = fp;
+ if (opinfo->is_lease) {
+- err = add_lease_global_list(opinfo);
+- if (err)
++ new_lb = alloc_lease_table(opinfo);
++ if (!new_lb) {
++ err = -ENOMEM;
+ goto err_out;
++ }
+ }
+
++ opinfo_count_inc(fp);
++ opinfo_add(opinfo, fp);
++
++ if (opinfo->is_lease)
++ add_lease_global_list(opinfo, new_lb);
++
+ rcu_assign_pointer(fp->f_opinfo, opinfo);
+- opinfo->o_fp = fp;
+
+ return 0;
+ err_out:
+- __free_opinfo(opinfo);
++ kfree(new_lb);
++ opinfo_put(opinfo);
+ return err;
+ }
+
--- /dev/null
+From 26f775a054c3cda86ad465a64141894a90a9e145 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Thu, 19 Mar 2026 07:52:17 -0700
+Subject: mm/damon/core: avoid use of half-online-committed context
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 26f775a054c3cda86ad465a64141894a90a9e145 upstream.
+
+One major usage of damon_call() is online DAMON parameters update. It is
+done by calling damon_commit_ctx() inside the damon_call() callback
+function. damon_commit_ctx() can fail for two reasons: 1) invalid
+parameters and 2) internal memory allocation failures. In case of
+failures, the damon_ctx that attempted to be updated (commit destination)
+can be partially updated (or, corrupted from a perspective), and therefore
+shouldn't be used anymore. The function only ensures the damon_ctx object
+can safely deallocated using damon_destroy_ctx().
+
+The API callers are, however, calling damon_commit_ctx() only after
+asserting the parameters are valid, to avoid damon_commit_ctx() fails due
+to invalid input parameters. But it can still theoretically fail if the
+internal memory allocation fails. In the case, DAMON may run with the
+partially updated damon_ctx. This can result in unexpected behaviors
+including even NULL pointer dereference in case of damos_commit_dests()
+failure [1]. Such allocation failure is arguably too small to fail, so
+the real world impact would be rare. But, given the bad consequence, this
+needs to be fixed.
+
+Avoid such partially-committed (maybe-corrupted) damon_ctx use by saving
+the damon_commit_ctx() failure on the damon_ctx object. For this,
+introduce damon_ctx->maybe_corrupted field. damon_commit_ctx() sets it
+when it is failed. kdamond_call() checks if the field is set after each
+damon_call_control->fn() is executed. If it is set, ignore remaining
+callback requests and return. All kdamond_call() callers including
+kdamond_fn() also check the maybe_corrupted field right after
+kdamond_call() invocations. If the field is set, break the kdamond_fn()
+main loop so that DAMON sill doesn't use the context that might be
+corrupted.
+
+[sj@kernel.org: let kdamond_call() with cancel regardless of maybe_corrupted]
+ Link: https://lkml.kernel.org/r/20260320031553.2479-1-sj@kernel.org
+ Link: https://sashiko.dev/#/patchset/20260319145218.86197-1-sj%40kernel.org
+Link: https://lkml.kernel.org/r/20260319145218.86197-1-sj@kernel.org
+Link: https://lore.kernel.org/20260319043309.97966-1-sj@kernel.org [1]
+Fixes: 3301f1861d34 ("mm/damon/sysfs: handle commit command using damon_call()")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [6.15+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/damon.h | 7 +++++++
+ mm/damon/core.c | 9 ++++++++-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -806,7 +806,14 @@ struct damon_ctx {
+ struct damos_walk_control *walk_control;
+ struct mutex walk_control_lock;
+
++ /*
++ * indicate if this may be corrupted. Currentonly this is set only for
++ * damon_commit_ctx() failure.
++ */
++ bool maybe_corrupted;
++
+ /* public: */
++ /* Working thread of the given DAMON context */
+ struct task_struct *kdamond;
+ struct mutex kdamond_lock;
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1241,6 +1241,7 @@ int damon_commit_ctx(struct damon_ctx *d
+ {
+ int err;
+
++ dst->maybe_corrupted = true;
+ if (!is_power_of_2(src->min_region_sz))
+ return -EINVAL;
+
+@@ -1266,6 +1267,7 @@ int damon_commit_ctx(struct damon_ctx *d
+ dst->addr_unit = src->addr_unit;
+ dst->min_region_sz = src->min_region_sz;
+
++ dst->maybe_corrupted = false;
+ return 0;
+ }
+
+@@ -2610,10 +2612,11 @@ static void kdamond_call(struct damon_ct
+ complete(&control->completion);
+ } else if (control->canceled && control->dealloc_on_cancel) {
+ kfree(control);
+- continue;
+ } else {
+ list_add(&control->list, &repeat_controls);
+ }
++ if (!cancel && ctx->maybe_corrupted)
++ break;
+ }
+ control = list_first_entry_or_null(&repeat_controls,
+ struct damon_call_control, list);
+@@ -2646,6 +2649,8 @@ static int kdamond_wait_activation(struc
+ kdamond_usleep(min_wait_time);
+
+ kdamond_call(ctx, false);
++ if (ctx->maybe_corrupted)
++ return -EINVAL;
+ damos_walk_cancel(ctx);
+ }
+ return -EBUSY;
+@@ -2731,6 +2736,8 @@ static int kdamond_fn(void *data)
+ * kdamond_merge_regions() if possible, to reduce overhead
+ */
+ kdamond_call(ctx, false);
++ if (ctx->maybe_corrupted)
++ break;
+ if (!list_empty(&ctx->schemes))
+ kdamond_apply_schemes(ctx);
+ else
--- /dev/null
+From 521bd39d9d28ce54cbfec7f9b89c94ad4fdb8350 Mon Sep 17 00:00:00 2001
+From: Hari Bathini <hbathini@linux.ibm.com>
+Date: Tue, 3 Mar 2026 23:40:25 +0530
+Subject: powerpc64/bpf: do not increment tailcall count when prog is NULL
+
+From: Hari Bathini <hbathini@linux.ibm.com>
+
+commit 521bd39d9d28ce54cbfec7f9b89c94ad4fdb8350 upstream.
+
+Do not increment tailcall count, if tailcall did not succeed due to
+missing BPF program.
+
+Fixes: ce0761419fae ("powerpc/bpf: Implement support for tail calls")
+Cc: stable@vger.kernel.org
+Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
+Link: https://patch.msgid.link/20260303181031.390073-2-hbathini@linux.ibm.com
+[ Conflict due to missing feature commit 2ed2d8f6fb38 ("powerpc64/bpf:
+ Support tailcalls with subprogs") resolved accordingly. ]
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/net/bpf_jit_comp64.c | 23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -430,27 +430,32 @@ static int bpf_jit_emit_tail_call(u32 *i
+
+ /*
+ * tail_call_cnt++;
++ * Writeback this updated value only if tailcall succeeds.
+ */
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
+- EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+
+ /* prog = array->ptrs[index]; */
+- EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
+- EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
+- EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
++ EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_2), b2p_index, 8));
++ EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), b2p_bpf_array));
++ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ offsetof(struct bpf_array, ptrs)));
+
+ /*
+ * if (prog == NULL)
+ * goto out;
+ */
+- EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
++ EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), 0));
+ PPC_BCC_SHORT(COND_EQ, out);
+
+ /* goto *(prog->bpf_func + prologue_size); */
+- EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
+- EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
+- FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
+- EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
++ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ offsetof(struct bpf_prog, bpf_func)));
++ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
++ EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_2)));
++
++ /* Writeback updated tailcall count */
++ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+
+ /* tear down stack, restore NVRs, ... */
+ bpf_jit_emit_common_epilogue(image, ctx);
--- /dev/null
+From stable+bounces-230330-greg=kroah.com@vger.kernel.org Wed Mar 25 14:08:18 2026
+From: Benno Lossin <lossin@kernel.org>
+Date: Wed, 25 Mar 2026 13:57:31 +0100
+Subject: rust: pin-init: internal: init: document load-bearing fact of field accessors
+To: "Benno Lossin" <lossin@kernel.org>, "Miguel Ojeda" <ojeda@kernel.org>, "Boqun Feng" <boqun.feng@gmail.com>, "Gary Guo" <gary@garyguo.net>, "Björn Roy Baron" <bjorn3_gh@protonmail.com>, "Andreas Hindborg" <a.hindborg@kernel.org>, "Alice Ryhl" <aliceryhl@google.com>, "Trevor Gross" <tmgross@umich.edu>, "Danilo Krummrich" <dakr@kernel.org>, "Wedson Almeida Filho" <wedsonaf@gmail.com>
+Cc: stable@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org
+Message-ID: <20260325125734.944247-1-lossin@kernel.org>
+
+From: Benno Lossin <lossin@kernel.org>
+
+[ Upstream commit 580cc37b1de4fcd9997c48d7080e744533f09f36 ]
+
+The functions `[Pin]Init::__[pinned_]init` and `ptr::write` called from
+the `init!` macro require the passed pointer to be aligned. This fact is
+ensured by the creation of field accessors to previously initialized
+fields.
+
+Since we missed this very important fact from the beginning [1],
+document it in the code.
+
+Link: https://rust-for-linux.zulipchat.com/#narrow/channel/561532-pin-init/topic/initialized.20field.20accessor.20detection/with/576210658 [1]
+Fixes: 90e53c5e70a6 ("rust: add pin-init API core")
+Cc: <stable@vger.kernel.org> # 6.6.y, 6.12.y: 42415d163e5d: rust: pin-init: add references to previously initialized fields
+Cc: <stable@vger.kernel.org> # 6.6.y, 6.12.y, 6.18.y, 6.19.y
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Reviewed-by: Gary Guo <gary@garyguo.net>
+Link: https://patch.msgid.link/20260302140424.4097655-2-lossin@kernel.org
+[ Updated Cc: stable@ tags as discussed. - Miguel ]
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+[ Moved changes to the declarative macro, because 6.19.y and earlier do not
+ have `syn`. Also duplicated the comment for all field accessor creations.
+ - Benno ]
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/pin-init/src/macros.rs | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/rust/pin-init/src/macros.rs
++++ b/rust/pin-init/src/macros.rs
+@@ -1312,6 +1312,10 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+@@ -1351,6 +1355,10 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+@@ -1391,6 +1399,10 @@ macro_rules! __init_internal {
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+@@ -1431,6 +1443,10 @@ macro_rules! __init_internal {
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
ext4-fix-iloc.bh-leak-in-ext4_fc_replay_inode-error-paths.patch
ext4-always-drain-queued-discard-work-in-ext4_mb_release.patch
arm64-dts-imx8mn-tqma8mqnl-fix-ldo5-power-off.patch
+powerpc64-bpf-do-not-increment-tailcall-count-when-prog-is-null.patch
+mm-damon-core-avoid-use-of-half-online-committed-context.patch
+rust-pin-init-internal-init-document-load-bearing-fact-of-field-accessors.patch
+ksmbd-fix-use-after-free-and-null-deref-in-smb_grant_oplock.patch