--- /dev/null
+From stable+bounces-230434-greg=kroah.com@vger.kernel.org Thu Mar 26 09:05:41 2026
+From: Robert Garcia <rob_garcia@163.com>
+Date: Thu, 26 Mar 2026 16:00:23 +0800
+Subject: gfs2: Fix unlikely race in gdlm_put_lock
+To: stable@vger.kernel.org, Andreas Gruenbacher <agruenba@redhat.com>
+Cc: Andrew Price <anprice@redhat.com>, Robert Garcia <rob_garcia@163.com>, Bob Peterson <rpeterso@redhat.com>, cluster-devel@redhat.com, linux-kernel@vger.kernel.org
+Message-ID: <20260326080023.1708804-1-rob_garcia@163.com>
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+[ Upstream commit 28c4d9bc0708956c1a736a9e49fee71b65deee81 ]
+
+In gdlm_put_lock(), there is a small window of time in which the
+DFL_UNMOUNT flag has been set but the lockspace hasn't been released,
+yet. In that window, dlm may still call gdlm_ast() and gdlm_bast().
+To prevent it from dereferencing freed glock objects, only free the
+glock if the lockspace has actually been released.
+
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Reviewed-by: Andrew Price <anprice@redhat.com>
+[ Minor context change fixed. ]
+Signed-off-by: Robert Garcia <rob_garcia@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/gfs2/lock_dlm.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -311,11 +311,6 @@ static void gdlm_put_lock(struct gfs2_gl
+ gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
+ gfs2_update_request_times(gl);
+
+- /* don't want to call dlm if we've unmounted the lock protocol */
+- if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+- gfs2_glock_free(gl);
+- return;
+- }
+ /* don't want to skip dlm_unlock writing the lvb when lock has one */
+
+ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
+@@ -332,6 +327,11 @@ again:
+ goto again;
+ }
+
++ if (error == -ENODEV) {
++ gfs2_glock_free(gl);
++ return;
++ }
++
+ if (error) {
+ fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
+ gl->gl_name.ln_type,
--- /dev/null
+From stable+bounces-231266-greg=kroah.com@vger.kernel.org Mon Mar 30 21:12:59 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 15:11:09 -0400
+Subject: ksmbd: fix memory leaks and NULL deref in smb2_lock()
+To: stable@vger.kernel.org
+Cc: Werner Kasselman <werner@verivus.com>, ChenXiaoSong <chenxiaosong@kylinos.cn>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260330191109.1038495-1-sashal@kernel.org>
+
+From: Werner Kasselman <werner@verivus.com>
+
+[ Upstream commit 309b44ed684496ed3f9c5715d10b899338623512 ]
+
+smb2_lock() has three error handling issues after list_del() detaches
+smb_lock from lock_list at no_check_cl:
+
+1) If vfs_lock_file() returns an unexpected error in the non-UNLOCK
+ path, goto out leaks smb_lock and its flock because the out:
+ handler only iterates lock_list and rollback_list, neither of
+ which contains the detached smb_lock.
+
+2) If vfs_lock_file() returns -ENOENT in the UNLOCK path, goto out
+ leaks smb_lock and flock for the same reason. The error code
+ returned to the dispatcher is also stale.
+
+3) In the rollback path, smb_flock_init() can return NULL on
+ allocation failure. The result is dereferenced unconditionally,
+ causing a kernel NULL pointer dereference. Add a NULL check to
+ prevent the crash and clean up the bookkeeping; the VFS lock
+ itself cannot be rolled back without the allocation and will be
+ released at file or connection teardown.
+
+Fix cases 1 and 2 by hoisting the locks_free_lock()/kfree() to before
+the if(!rc) check in the UNLOCK branch so all exit paths share one
+free site, and by freeing smb_lock and flock before goto out in the
+non-UNLOCK branch. Propagate the correct error code in both cases.
+Fix case 3 by wrapping the VFS unlock in an if(rlock) guard and adding
+a NULL check for locks_free_lock(rlock) in the shared cleanup.
+
+Found via call-graph analysis using sqry.
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Suggested-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Werner Kasselman <werner@verivus.com>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ adapted rlock->c.flc_type to rlock->fl_type ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 29 +++++++++++++++++++----------
+ 1 file changed, 19 insertions(+), 10 deletions(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -7536,14 +7536,15 @@ retry:
+ rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
+ skip:
+ if (smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) {
++ locks_free_lock(flock);
++ kfree(smb_lock);
+ if (!rc) {
+ ksmbd_debug(SMB, "File unlocked\n");
+ } else if (rc == -ENOENT) {
+ rsp->hdr.Status = STATUS_NOT_LOCKED;
++ err = rc;
+ goto out;
+ }
+- locks_free_lock(flock);
+- kfree(smb_lock);
+ } else {
+ if (rc == FILE_LOCK_DEFERRED) {
+ void **argv;
+@@ -7612,6 +7613,9 @@ skip:
+ spin_unlock(&work->conn->llist_lock);
+ ksmbd_debug(SMB, "successful in taking lock\n");
+ } else {
++ locks_free_lock(flock);
++ kfree(smb_lock);
++ err = rc;
+ goto out;
+ }
+ }
+@@ -7642,13 +7646,17 @@ out:
+ struct file_lock *rlock = NULL;
+
+ rlock = smb_flock_init(filp);
+- rlock->fl_type = F_UNLCK;
+- rlock->fl_start = smb_lock->start;
+- rlock->fl_end = smb_lock->end;
+-
+- rc = vfs_lock_file(filp, F_SETLK, rlock, NULL);
+- if (rc)
+- pr_err("rollback unlock fail : %d\n", rc);
++ if (rlock) {
++ rlock->fl_type = F_UNLCK;
++ rlock->fl_start = smb_lock->start;
++ rlock->fl_end = smb_lock->end;
++
++ rc = vfs_lock_file(filp, F_SETLK, rlock, NULL);
++ if (rc)
++ pr_err("rollback unlock fail : %d\n", rc);
++ } else {
++ pr_err("rollback unlock alloc failed\n");
++ }
+
+ list_del(&smb_lock->llist);
+ spin_lock(&work->conn->llist_lock);
+@@ -7658,7 +7666,8 @@ out:
+ spin_unlock(&work->conn->llist_lock);
+
+ locks_free_lock(smb_lock->fl);
+- locks_free_lock(rlock);
++ if (rlock)
++ locks_free_lock(rlock);
+ kfree(smb_lock);
+ }
+ out2:
--- /dev/null
+From stable+bounces-231300-greg=kroah.com@vger.kernel.org Tue Mar 31 02:18:29 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 20:18:17 -0400
+Subject: ksmbd: fix use-after-free and NULL deref in smb_grant_oplock()
+To: stable@vger.kernel.org
+Cc: Werner Kasselman <werner@verivus.com>, ChenXiaoSong <chenxiaosong@kylinos.cn>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331001817.1575834-1-sashal@kernel.org>
+
+From: Werner Kasselman <werner@verivus.com>
+
+[ Upstream commit 48623ec358c1c600fa1e38368746f933e0f1a617 ]
+
+smb_grant_oplock() has two issues in the oplock publication sequence:
+
+1) opinfo is linked into ci->m_op_list (via opinfo_add) before
+ add_lease_global_list() is called. If add_lease_global_list()
+ fails (kmalloc returns NULL), the error path frees the opinfo
+ via __free_opinfo() while it is still linked in ci->m_op_list.
+ Concurrent m_op_list readers (opinfo_get_list, or direct iteration
+ in smb_break_all_levII_oplock) dereference the freed node.
+
+2) opinfo->o_fp is assigned after add_lease_global_list() publishes
+ the opinfo on the global lease list. A concurrent
+ find_same_lease_key() can walk the lease list and dereference
+ opinfo->o_fp->f_ci while o_fp is still NULL.
+
+Fix by restructuring the publication sequence to eliminate post-publish
+failure:
+
+- Set opinfo->o_fp before any list publication (fixes NULL deref).
+- Preallocate lease_table via alloc_lease_table() before opinfo_add()
+ so add_lease_global_list() becomes infallible after publication.
+- Keep the original m_op_list publication order (opinfo_add before
+ lease list) so concurrent opens via same_client_has_lease() and
+ opinfo_get_list() still see the in-flight grant.
+- Use opinfo_put() instead of __free_opinfo() on err_out so that
+ the RCU-deferred free path is used.
+
+This also requires splitting add_lease_global_list() to take a
+preallocated lease_table and changing its return type from int to void,
+since it can no longer fail.
+
+Fixes: 1dfd062caa16 ("ksmbd: fix use-after-free by using call_rcu() for oplock_info")
+Cc: stable@vger.kernel.org
+Signed-off-by: Werner Kasselman <werner@verivus.com>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ replaced kmalloc_obj() and KSMBD_DEFAULT_GFP with kmalloc(sizeof(), GFP_KERNEL) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/oplock.c | 72 ++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 45 insertions(+), 27 deletions(-)
+
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -82,11 +82,19 @@ static void lease_del_list(struct oplock
+ spin_unlock(&lb->lb_lock);
+ }
+
+-static void lb_add(struct lease_table *lb)
++static struct lease_table *alloc_lease_table(struct oplock_info *opinfo)
+ {
+- write_lock(&lease_list_lock);
+- list_add(&lb->l_entry, &lease_table_list);
+- write_unlock(&lease_list_lock);
++ struct lease_table *lb;
++
++ lb = kmalloc(sizeof(struct lease_table), GFP_KERNEL);
++ if (!lb)
++ return NULL;
++
++ memcpy(lb->client_guid, opinfo->conn->ClientGUID,
++ SMB2_CLIENT_GUID_SIZE);
++ INIT_LIST_HEAD(&lb->lease_list);
++ spin_lock_init(&lb->lb_lock);
++ return lb;
+ }
+
+ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+@@ -1042,34 +1050,27 @@ static void copy_lease(struct oplock_inf
+ lease2->version = lease1->version;
+ }
+
+-static int add_lease_global_list(struct oplock_info *opinfo)
++static void add_lease_global_list(struct oplock_info *opinfo,
++ struct lease_table *new_lb)
+ {
+ struct lease_table *lb;
+
+- read_lock(&lease_list_lock);
++ write_lock(&lease_list_lock);
+ list_for_each_entry(lb, &lease_table_list, l_entry) {
+ if (!memcmp(lb->client_guid, opinfo->conn->ClientGUID,
+ SMB2_CLIENT_GUID_SIZE)) {
+ opinfo->o_lease->l_lb = lb;
+ lease_add_list(opinfo);
+- read_unlock(&lease_list_lock);
+- return 0;
++ write_unlock(&lease_list_lock);
++ kfree(new_lb);
++ return;
+ }
+ }
+- read_unlock(&lease_list_lock);
+
+- lb = kmalloc(sizeof(struct lease_table), GFP_KERNEL);
+- if (!lb)
+- return -ENOMEM;
+-
+- memcpy(lb->client_guid, opinfo->conn->ClientGUID,
+- SMB2_CLIENT_GUID_SIZE);
+- INIT_LIST_HEAD(&lb->lease_list);
+- spin_lock_init(&lb->lb_lock);
+- opinfo->o_lease->l_lb = lb;
++ opinfo->o_lease->l_lb = new_lb;
+ lease_add_list(opinfo);
+- lb_add(lb);
+- return 0;
++ list_add(&new_lb->l_entry, &lease_table_list);
++ write_unlock(&lease_list_lock);
+ }
+
+ static void set_oplock_level(struct oplock_info *opinfo, int level,
+@@ -1189,6 +1190,7 @@ int smb_grant_oplock(struct ksmbd_work *
+ int err = 0;
+ struct oplock_info *opinfo = NULL, *prev_opinfo = NULL;
+ struct ksmbd_inode *ci = fp->f_ci;
++ struct lease_table *new_lb = NULL;
+ bool prev_op_has_lease;
+ __le32 prev_op_state = 0;
+
+@@ -1291,21 +1293,37 @@ set_lev:
+ set_oplock_level(opinfo, req_op_level, lctx);
+
+ out:
+- opinfo_count_inc(fp);
+- opinfo_add(opinfo, fp);
+-
++ /*
++ * Set o_fp before any publication so that concurrent readers
++ * (e.g. find_same_lease_key() on the lease list) that
++ * dereference opinfo->o_fp don't hit a NULL pointer.
++ *
++ * Keep the original publication order so concurrent opens can
++ * still observe the in-flight grant via ci->m_op_list, but make
++ * everything after opinfo_add() no-fail by preallocating any new
++ * lease_table first.
++ */
++ opinfo->o_fp = fp;
+ if (opinfo->is_lease) {
+- err = add_lease_global_list(opinfo);
+- if (err)
++ new_lb = alloc_lease_table(opinfo);
++ if (!new_lb) {
++ err = -ENOMEM;
+ goto err_out;
++ }
+ }
+
++ opinfo_count_inc(fp);
++ opinfo_add(opinfo, fp);
++
++ if (opinfo->is_lease)
++ add_lease_global_list(opinfo, new_lb);
++
+ rcu_assign_pointer(fp->f_opinfo, opinfo);
+- opinfo->o_fp = fp;
+
+ return 0;
+ err_out:
+- __free_opinfo(opinfo);
++ kfree(new_lb);
++ opinfo_put(opinfo);
+ return err;
+ }
+
--- /dev/null
+From kernel@schlaraffenlan.de Mon Mar 23 19:41:51 2026
+From: Jonas Rebmann <kernel@schlaraffenlan.de>
+Date: Mon, 23 Mar 2026 19:41:42 +0100
+Subject: libbpf: Fix -Wdiscarded-qualifiers under C23
+To: stable@vger.kernel.org
+Cc: Shung-Hsi Yu <shung-hsi.yu@suse.com>, Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>, Florian Weimer <fweimer@redhat.com>, Andrii Nakryiko <andrii@kernel.org>, Alexei Starovoitov <ast@kernel.org>, Shung-Hsi Yu <shung-hsi.yu@suse.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Jonas Rebmann <kernel@schlaraffenlan.de>
+Message-ID: <20260323-linux-6-6-y-c23-v1-1-a62654ec6cff@schlaraffenlan.de>
+
+From: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
+
+commit d70f79fef65810faf64dbae1f3a1b5623cdb2345 upstream.
+
+glibc ≥ 2.42 (GCC 15) defaults to -std=gnu23, which promotes
+-Wdiscarded-qualifiers to an error.
+
+In C23, strstr() and strchr() return "const char *".
+
+Change variable types to const char * where the pointers are never
+modified (res, sym_sfx, next_path).
+
+[ shung-hsi.yu: needed to fix kernel build failure due to libbpf since glibc
+ 2.43+ (which adds 'const' qualifier to strstr) ]
+[ Jonas Rebmann: down to one declaration on 6.6 to resolve build error
+ with glibc 2.43 ]
+
+Suggested-by: Florian Weimer <fweimer@redhat.com>
+Suggested-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
+Link: https://lore.kernel.org/r/20251206092825.1471385-1-mikhail.v.gavrilov@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jonas Rebmann <kernel@schlaraffenlan.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/lib/bpf/libbpf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -11247,7 +11247,7 @@ static int resolve_full_path(const char
+ if (!search_paths[i])
+ continue;
+ for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
+- char *next_path;
++ const char *next_path;
+ int seg_len;
+
+ if (s[0] == ':')
--- /dev/null
+From stable+bounces-230287-greg=kroah.com@vger.kernel.org Wed Mar 25 10:22:40 2026
+From: liyin.zhang.cn@windriver.com
+Date: Wed, 25 Mar 2026 17:17:39 +0800
+Subject: mtd: spi-nor: core: avoid odd length/address reads on 8D-8D-8D mode
+To: stable@vger.kernel.org
+Message-ID: <20260325091740.941742-2-liyin.zhang.cn@windriver.com>
+
+From: Pratyush Yadav <p.yadav@ti.com>
+
+[ Upstream commit f156b23df6a84efb2f6686156be94d4988568954 ]
+
+On Octal DTR capable flashes like Micron Xcella reads cannot start or
+end at an odd address in Octal DTR mode. Extra bytes need to be read at
+the start or end to make sure both the start address and length remain
+even.
+
+To avoid allocating too much extra memory, thereby putting unnecessary
+memory pressure on the system, the temporary buffer containing the extra
+padding bytes is capped at PAGE_SIZE bytes. The rest of the 2-byte
+aligned part should be read directly in the main buffer.
+
+Signed-off-by: Pratyush Yadav <p.yadav@ti.com>
+Reviewed-by: Michael Walle <michael@walle.cc>
+Signed-off-by: Luke Wang <ziniu.wang_1@nxp.com>
+Signed-off-by: Pratyush Yadav <pratyush@kernel.org>
+Link: https://lore.kernel.org/r/20250708091646.292-1-ziniu.wang_1@nxp.com
+[ Resolve conflict in drivers/mtd/spi-nor/core.c.
+ In spi_nor_read(), 6.6.y contains a spi_nor_convert_addr() call
+ before spi_nor_read_data(), introduced by 364995962803 ("mtd:
+ spi-nor: Add a ->convert_addr() method"), which does not exist in
+ mainline. This call is specific to Xilinx S3AN flashes, which use a
+ non-standard address format. In mainline, S3AN flash support was
+ removed entirely, and the corresponding spi_nor_convert_addr() call
+ was dropped by 9539d12d9f52 ("mtd: spi-nor: get rid of non-power-of-2
+ page size handling"). Keep the existing spi_nor_convert_addr() call
+ and insert the new spi_nor_octal_dtr_read() branch after it. ]
+Signed-off-by: Liyin Zhang <liyin.zhang.cn@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/spi-nor/core.c | 76 ++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 75 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2082,6 +2082,76 @@ static const struct flash_info *spi_nor_
+ return info;
+ }
+
++/*
++ * On Octal DTR capable flashes, reads cannot start or end at an odd
++ * address in Octal DTR mode. Extra bytes need to be read at the start
++ * or end to make sure both the start address and length remain even.
++ */
++static int spi_nor_octal_dtr_read(struct spi_nor *nor, loff_t from, size_t len,
++ u_char *buf)
++{
++ u_char *tmp_buf;
++ size_t tmp_len;
++ loff_t start, end;
++ int ret, bytes_read;
++
++ if (IS_ALIGNED(from, 2) && IS_ALIGNED(len, 2))
++ return spi_nor_read_data(nor, from, len, buf);
++ else if (IS_ALIGNED(from, 2) && len > PAGE_SIZE)
++ return spi_nor_read_data(nor, from, round_down(len, PAGE_SIZE),
++ buf);
++
++ tmp_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ if (!tmp_buf)
++ return -ENOMEM;
++
++ start = round_down(from, 2);
++ end = round_up(from + len, 2);
++
++ /*
++ * Avoid allocating too much memory. The requested read length might be
++ * quite large. Allocating a buffer just as large (slightly bigger, in
++ * fact) would put unnecessary memory pressure on the system.
++ *
++ * For example if the read is from 3 to 1M, then this will read from 2
++ * to 4098. The reads from 4098 to 1M will then not need a temporary
++ * buffer so they can proceed as normal.
++ */
++ tmp_len = min_t(size_t, end - start, PAGE_SIZE);
++
++ ret = spi_nor_read_data(nor, start, tmp_len, tmp_buf);
++ if (ret == 0) {
++ ret = -EIO;
++ goto out;
++ }
++ if (ret < 0)
++ goto out;
++
++ /*
++ * More bytes are read than actually requested, but that number can't be
++ * reported to the calling function or it will confuse its calculations.
++ * Calculate how many of the _requested_ bytes were read.
++ */
++ bytes_read = ret;
++
++ if (from != start)
++ ret -= from - start;
++
++ /*
++ * Only account for extra bytes at the end if they were actually read.
++ * For example, if the total length was truncated because of temporary
++ * buffer size limit then the adjustment for the extra bytes at the end
++ * is not needed.
++ */
++ if (start + bytes_read == end)
++ ret -= end - (from + len);
++
++ memcpy(buf, tmp_buf + (from - start), ret);
++out:
++ kfree(tmp_buf);
++ return ret;
++}
++
+ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+ {
+@@ -2101,7 +2171,11 @@ static int spi_nor_read(struct mtd_info
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+- ret = spi_nor_read_data(nor, addr, len, buf);
++ if (nor->read_proto == SNOR_PROTO_8_8_8_DTR)
++ ret = spi_nor_octal_dtr_read(nor, addr, len, buf);
++ else
++ ret = spi_nor_read_data(nor, addr, len, buf);
++
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
--- /dev/null
+From stable+bounces-230288-greg=kroah.com@vger.kernel.org Wed Mar 25 10:22:48 2026
+From: liyin.zhang.cn@windriver.com
+Date: Wed, 25 Mar 2026 17:17:40 +0800
+Subject: mtd: spi-nor: core: avoid odd length/address writes in 8D-8D-8D mode
+To: stable@vger.kernel.org
+Message-ID: <20260325091740.941742-3-liyin.zhang.cn@windriver.com>
+
+From: Pratyush Yadav <p.yadav@ti.com>
+
+[ Upstream commit 17926cd770ec837ed27d9856cf07f2da8dda4131 ]
+
+On Octal DTR capable flashes like Micron Xcella the writes cannot start
+or end at an odd address in Octal DTR mode. Extra 0xff bytes need to be
+appended or prepended to make sure the start address and end address are
+even. 0xff is used because on NOR flashes a program operation can only
+flip bits from 1 to 0, not the other way round. 0 to 1 flip needs to
+happen via erases.
+
+Signed-off-by: Pratyush Yadav <p.yadav@ti.com>
+Reviewed-by: Michael Walle <michael@walle.cc>
+Signed-off-by: Luke Wang <ziniu.wang_1@nxp.com>
+Signed-off-by: Pratyush Yadav <pratyush@kernel.org>
+Link: https://lore.kernel.org/r/20250708091646.292-2-ziniu.wang_1@nxp.com
+Signed-off-by: Liyin Zhang <liyin.zhang.cn@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/spi-nor/core.c | 69 ++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 68 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2199,6 +2199,68 @@ read_err:
+ }
+
+ /*
++ * On Octal DTR capable flashes, writes cannot start or end at an odd address
++ * in Octal DTR mode. Extra 0xff bytes need to be appended or prepended to
++ * make sure the start address and end address are even. 0xff is used because
++ * on NOR flashes a program operation can only flip bits from 1 to 0, not the
++ * other way round. 0 to 1 flip needs to happen via erases.
++ */
++static int spi_nor_octal_dtr_write(struct spi_nor *nor, loff_t to, size_t len,
++ const u8 *buf)
++{
++ u8 *tmp_buf;
++ size_t bytes_written;
++ loff_t start, end;
++ int ret;
++
++ if (IS_ALIGNED(to, 2) && IS_ALIGNED(len, 2))
++ return spi_nor_write_data(nor, to, len, buf);
++
++ tmp_buf = kmalloc(nor->params->page_size, GFP_KERNEL);
++ if (!tmp_buf)
++ return -ENOMEM;
++
++ memset(tmp_buf, 0xff, nor->params->page_size);
++
++ start = round_down(to, 2);
++ end = round_up(to + len, 2);
++
++ memcpy(tmp_buf + (to - start), buf, len);
++
++ ret = spi_nor_write_data(nor, start, end - start, tmp_buf);
++ if (ret == 0) {
++ ret = -EIO;
++ goto out;
++ }
++ if (ret < 0)
++ goto out;
++
++ /*
++ * More bytes are written than actually requested, but that number can't
++ * be reported to the calling function or it will confuse its
++ * calculations. Calculate how many of the _requested_ bytes were
++ * written.
++ */
++ bytes_written = ret;
++
++ if (to != start)
++ ret -= to - start;
++
++ /*
++ * Only account for extra bytes at the end if they were actually
++ * written. For example, if for some reason the controller could only
++ * complete a partial write then the adjustment for the extra bytes at
++ * the end is not needed.
++ */
++ if (start + bytes_written == end)
++ ret -= end - (to + len);
++
++out:
++ kfree(tmp_buf);
++ return ret;
++}
++
++/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+@@ -2248,7 +2310,12 @@ static int spi_nor_write(struct mtd_info
+ goto write_err;
+ }
+
+- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
++ if (nor->write_proto == SNOR_PROTO_8_8_8_DTR)
++ ret = spi_nor_octal_dtr_write(nor, addr, page_remain,
++ buf + i);
++ else
++ ret = spi_nor_write_data(nor, addr, page_remain,
++ buf + i);
+ spi_nor_unlock_device(nor);
+ if (ret < 0)
+ goto write_err;
--- /dev/null
+From stable+bounces-231348-greg=kroah.com@vger.kernel.org Tue Mar 31 09:44:05 2026
+From: Li hongliang <1468888505@139.com>
+Date: Tue, 31 Mar 2026 15:36:59 +0800
+Subject: nvme: fix admin queue leak on controller reset
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, ming.lei@redhat.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, yi.zhang@redhat.com, kbusch@kernel.org, axboe@fb.com, hch@lst.de, sagi@grimberg.me, hare@suse.de, kch@nvidia.com, linux-nvme@lists.infradead.org
+Message-ID: <20260331073659.3136206-1-1468888505@139.com>
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit b84bb7bd913d8ca2f976ee6faf4a174f91c02b8d ]
+
+When nvme_alloc_admin_tag_set() is called during a controller reset,
+a previous admin queue may still exist. Release it properly before
+allocating a new one to avoid orphaning the old queue.
+
+This fixes a regression introduced by commit 03b3bcd319b3 ("nvme: fix
+admin request_queue lifetime").
+
+Cc: Keith Busch <kbusch@kernel.org>
+Fixes: 03b3bcd319b3 ("nvme: fix admin request_queue lifetime").
+Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
+Closes: https://lore.kernel.org/linux-block/CAHj4cs9wv3SdPo+N01Fw2SHBYDs9tj2M_e1-GdQOkRy=DsBB1w@mail.gmail.com/
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/core.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4287,6 +4287,13 @@ int nvme_alloc_admin_tag_set(struct nvme
+ if (ret)
+ return ret;
+
++ /*
++ * If a previous admin queue exists (e.g., from before a reset),
++ * put it now before allocating a new one to avoid orphaning it.
++ */
++ if (ctrl->admin_q)
++ blk_put_queue(ctrl->admin_q);
++
+ ctrl->admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->admin_q)) {
+ ret = PTR_ERR(ctrl->admin_q);
--- /dev/null
+From 521bd39d9d28ce54cbfec7f9b89c94ad4fdb8350 Mon Sep 17 00:00:00 2001
+From: Hari Bathini <hbathini@linux.ibm.com>
+Date: Tue, 3 Mar 2026 23:40:25 +0530
+Subject: powerpc64/bpf: do not increment tailcall count when prog is NULL
+
+From: Hari Bathini <hbathini@linux.ibm.com>
+
+commit 521bd39d9d28ce54cbfec7f9b89c94ad4fdb8350 upstream.
+
+Do not increment tailcall count, if tailcall did not succeed due to
+missing BPF program.
+
+Fixes: ce0761419fae ("powerpc/bpf: Implement support for tail calls")
+Cc: stable@vger.kernel.org
+Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
+Link: https://patch.msgid.link/20260303181031.390073-2-hbathini@linux.ibm.com
+[ Conflict due to missing feature commit 2ed2d8f6fb38 ("powerpc64/bpf:
+ Support tailcalls with subprogs") resolved accordingly. ]
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/net/bpf_jit_comp64.c | 23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -307,27 +307,32 @@ static int bpf_jit_emit_tail_call(u32 *i
+
+ /*
+ * tail_call_cnt++;
++ * Writeback this updated value only if tailcall succeeds.
+ */
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
+- EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+
+ /* prog = array->ptrs[index]; */
+- EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
+- EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
+- EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
++ EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_2), b2p_index, 8));
++ EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), b2p_bpf_array));
++ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ offsetof(struct bpf_array, ptrs)));
+
+ /*
+ * if (prog == NULL)
+ * goto out;
+ */
+- EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
++ EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), 0));
+ PPC_BCC_SHORT(COND_EQ, out);
+
+ /* goto *(prog->bpf_func + prologue_size); */
+- EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
+- EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
+- FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
+- EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
++ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ offsetof(struct bpf_prog, bpf_func)));
++ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
++ EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_2)));
++
++ /* Writeback updated tailcall count */
++ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+
+ /* tear down stack, restore NVRs, ... */
+ bpf_jit_emit_common_epilogue(image, ctx);
--- /dev/null
+From rust-for-linux+bounces-38643-greg=kroah.com@vger.kernel.org Wed Mar 25 14:30:31 2026
+From: Benno Lossin <lossin@kernel.org>
+Date: Wed, 25 Mar 2026 13:59:41 +0100
+Subject: rust: pin-init: add references to previously initialized fields
+To: "Miguel Ojeda" <ojeda@kernel.org>, "Alex Gaynor" <alex.gaynor@gmail.com>, "Wedson Almeida Filho" <wedsonaf@gmail.com>, "Boqun Feng" <boqun.feng@gmail.com>, "Gary Guo" <gary@garyguo.net>, "Björn Roy Baron" <bjorn3_gh@protonmail.com>, "Benno Lossin" <benno.lossin@proton.me>, "Andreas Hindborg" <a.hindborg@samsung.com>, "Alice Ryhl" <aliceryhl@google.com>, "Danilo Krummrich" <dakr@kernel.org>
+Cc: Benno Lossin <lossin@kernel.org>, stable@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org
+Message-ID: <20260325125944.947263-1-lossin@kernel.org>
+
+From: Benno Lossin <lossin@kernel.org>
+
+[ Upstream commit 42415d163e5df6db799c7de6262d707e402c2c7e ]
+
+After initializing a field in an initializer macro, create a variable
+holding a reference that points at that field. The type is either
+`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
+kind.
+
+[ Applied fixes to devres and rust_driver_pci sample - Benno]
+Reviewed-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+[ Removed the devres changes, because devres is not present in 6.12.y and
+ earlier. Also adjusted paths in the macro to account for the fact that
+ pin-init is part of the kernel crate in 6.12.y and earlier. - Benno ]
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/init/macros.rs | 144 +++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 113 insertions(+), 31 deletions(-)
+
+--- a/rust/kernel/init/macros.rs
++++ b/rust/kernel/init/macros.rs
+@@ -964,35 +964,54 @@ macro_rules! __pin_data {
+ @pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
+ @not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
+ ) => {
+- // For every field, we create a projection function according to its projection type. If a
+- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+- // structurally pinned, then it can be initialized via `Init`.
+- //
+- // The functions are `unsafe` to prevent accidentally calling them.
+- #[allow(dead_code)]
+- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+- where $($whr)*
+- {
+- $(
+- $(#[$($p_attr)*])*
+- $pvis unsafe fn $p_field<E>(
+- self,
+- slot: *mut $p_type,
+- init: impl $crate::init::PinInit<$p_type, E>,
+- ) -> ::core::result::Result<(), E> {
+- unsafe { $crate::init::PinInit::__pinned_init(init, slot) }
+- }
+- )*
+- $(
+- $(#[$($attr)*])*
+- $fvis unsafe fn $field<E>(
+- self,
+- slot: *mut $type,
+- init: impl $crate::init::Init<$type, E>,
+- ) -> ::core::result::Result<(), E> {
+- unsafe { $crate::init::Init::__init(init, slot) }
+- }
+- )*
++ $crate::macros::paste! {
++ // For every field, we create a projection function according to its projection type. If a
++ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
++ // structurally pinned, then it can be initialized via `Init`.
++ //
++ // The functions are `unsafe` to prevent accidentally calling them.
++ #[allow(dead_code, non_snake_case)]
++ #[expect(clippy::missing_safety_doc)]
++ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
++ where $($whr)*
++ {
++ $(
++ $(#[$($p_attr)*])*
++ $pvis unsafe fn $p_field<E>(
++ self,
++ slot: *mut $p_type,
++ init: impl $crate::init::PinInit<$p_type, E>,
++ ) -> ::core::result::Result<(), E> {
++ unsafe { $crate::init::PinInit::__pinned_init(init, slot) }
++ }
++
++ $(#[$($p_attr)*])*
++ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
++ self,
++ slot: &'__slot mut $p_type,
++ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
++ unsafe { ::core::pin::Pin::new_unchecked(slot) }
++ }
++ )*
++ $(
++ $(#[$($attr)*])*
++ $fvis unsafe fn $field<E>(
++ self,
++ slot: *mut $type,
++ init: impl $crate::init::Init<$type, E>,
++ ) -> ::core::result::Result<(), E> {
++ unsafe { $crate::init::Init::__init(init, slot) }
++ }
++
++ $(#[$($attr)*])*
++ $fvis unsafe fn [<__project_ $field>]<'__slot>(
++ self,
++ slot: &'__slot mut $type,
++ ) -> &'__slot mut $type {
++ slot
++ }
++ )*
++ }
+ }
+ };
+ }
+@@ -1186,6 +1205,13 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
++ // SAFETY:
++ // - the project function does the correct field projection,
++ // - the field has been initialized,
++ // - the reference is only valid until the end of the initializer.
++ #[allow(unused_variables, unused_assignments)]
++ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
++
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+@@ -1217,6 +1243,14 @@ macro_rules! __init_internal {
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ unsafe { $crate::init::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
++
++ // SAFETY:
++ // - the field is not structurally pinned, since the line above must compile,
++ // - the field has been initialized,
++ // - the reference is only valid until the end of the initializer.
++ #[allow(unused_variables, unused_assignments)]
++ let $field = unsafe { &mut (*$slot).$field };
++
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+@@ -1235,7 +1269,7 @@ macro_rules! __init_internal {
+ );
+ }
+ };
+- (init_slot($($use_data:ident)?):
++ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+@@ -1249,6 +1283,15 @@ macro_rules! __init_internal {
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
++
++ #[allow(unused_variables, unused_assignments)]
++ // SAFETY:
++ // - the field is not structurally pinned, since no `use_data` was required to create this
++ // initializer,
++ // - the field has been initialized,
++ // - the reference is only valid until the end of the initializer.
++ let $field = unsafe { &mut (*$slot).$field };
++
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+@@ -1259,7 +1302,46 @@ macro_rules! __init_internal {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+- $crate::__init_internal!(init_slot($($use_data)?):
++ $crate::__init_internal!(init_slot():
++ @data($data),
++ @slot($slot),
++ @guards([< __ $field _guard >], $($guards,)*),
++ @munch_fields($($rest)*),
++ );
++ }
++ };
++ (init_slot($use_data:ident):
++ @data($data:ident),
++ @slot($slot:ident),
++ @guards($($guards:ident,)*),
++ // Init by-value.
++ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
++ ) => {
++ {
++ $(let $field = $val;)?
++ // Initialize the field.
++ //
++ // SAFETY: The memory at `slot` is uninitialized.
++ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
++ }
++ // SAFETY:
++ // - the project function does the correct field projection,
++ // - the field has been initialized,
++ // - the reference is only valid until the end of the initializer.
++ #[allow(unused_variables, unused_assignments)]
++ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
++
++ // Create the drop guard:
++ //
++ // We rely on macro hygiene to make it impossible for users to access this local variable.
++ // We use `paste!` to create new hygiene for `$field`.
++ $crate::macros::paste! {
++ // SAFETY: We forget the guard later when initialization has succeeded.
++ let [< __ $field _guard >] = unsafe {
++ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
++ };
++
++ $crate::__init_internal!(init_slot($use_data):
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
--- /dev/null
+From rust-for-linux+bounces-38644-greg=kroah.com@vger.kernel.org Wed Mar 25 14:14:34 2026
+From: Benno Lossin <lossin@kernel.org>
+Date: Wed, 25 Mar 2026 13:59:42 +0100
+Subject: rust: pin-init: internal: init: document load-bearing fact of field accessors
+To: "Miguel Ojeda" <ojeda@kernel.org>, "Alex Gaynor" <alex.gaynor@gmail.com>, "Wedson Almeida Filho" <wedsonaf@gmail.com>, "Boqun Feng" <boqun.feng@gmail.com>, "Gary Guo" <gary@garyguo.net>, "Björn Roy Baron" <bjorn3_gh@protonmail.com>, "Benno Lossin" <benno.lossin@proton.me>, "Andreas Hindborg" <a.hindborg@samsung.com>, "Alice Ryhl" <aliceryhl@google.com>, "Danilo Krummrich" <dakr@kernel.org>
+Cc: Benno Lossin <lossin@kernel.org>, stable@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org
+Message-ID: <20260325125944.947263-2-lossin@kernel.org>
+
+From: Benno Lossin <lossin@kernel.org>
+
+[ Upstream commit 580cc37b1de4fcd9997c48d7080e744533f09f36 ]
+
+The functions `[Pin]Init::__[pinned_]init` and `ptr::write` called from
+the `init!` macro require the passed pointer to be aligned. This fact is
+ensured by the creation of field accessors to previously initialized
+fields.
+
+Since we missed this very important fact from the beginning [1],
+document it in the code.
+
+Link: https://rust-for-linux.zulipchat.com/#narrow/channel/561532-pin-init/topic/initialized.20field.20accessor.20detection/with/576210658 [1]
+Fixes: 90e53c5e70a6 ("rust: add pin-init API core")
+Cc: <stable@vger.kernel.org> # 6.6.y, 6.12.y: 42415d163e5d: rust: pin-init: add references to previously initialized fields
+Cc: <stable@vger.kernel.org> # 6.6.y, 6.12.y, 6.18.y, 6.19.y
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Reviewed-by: Gary Guo <gary@garyguo.net>
+Link: https://patch.msgid.link/20260302140424.4097655-2-lossin@kernel.org
+[ Updated Cc: stable@ tags as discussed. - Miguel ]
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+[ Moved changes to the declarative macro, because 6.19.y and earlier do not
+ have `syn`. Also duplicated the comment for all field accessor creations.
+ - Benno ]
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/init/macros.rs | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/rust/kernel/init/macros.rs
++++ b/rust/kernel/init/macros.rs
+@@ -1205,6 +1205,10 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+@@ -1244,6 +1248,10 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ unsafe { $crate::init::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+@@ -1284,6 +1292,10 @@ macro_rules! __init_internal {
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ #[allow(unused_variables, unused_assignments)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+@@ -1324,6 +1336,10 @@ macro_rules! __init_internal {
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
ext4-fix-iloc.bh-leak-in-ext4_fc_replay_inode-error-paths.patch
ext4-always-drain-queued-discard-work-in-ext4_mb_release.patch
arm64-dts-imx8mn-tqma8mqnl-fix-ldo5-power-off.patch
+powerpc64-bpf-do-not-increment-tailcall-count-when-prog-is-null.patch
+ksmbd-fix-use-after-free-and-null-deref-in-smb_grant_oplock.patch
+ksmbd-fix-memory-leaks-and-null-deref-in-smb2_lock.patch
+tracing-switch-trace_osnoise.c-code-over-to-use-guard-and-__free.patch
+tracing-fix-potential-deadlock-in-cpu-hotplug-with-osnoise.patch
+rust-pin-init-add-references-to-previously-initialized-fields.patch
+rust-pin-init-internal-init-document-load-bearing-fact-of-field-accessors.patch
+mtd-spi-nor-core-avoid-odd-length-address-reads-on-8d-8d-8d-mode.patch
+mtd-spi-nor-core-avoid-odd-length-address-writes-in-8d-8d-8d-mode.patch
+gfs2-fix-unlikely-race-in-gdlm_put_lock.patch
+libbpf-fix-wdiscarded-qualifiers-under-c23.patch
+xattr-switch-to-class-fd.patch
+nvme-fix-admin-queue-leak-on-controller-reset.patch
--- /dev/null
+From stable+bounces-231237-greg=kroah.com@vger.kernel.org Mon Mar 30 16:33:38 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 10:25:47 -0400
+Subject: tracing: Fix potential deadlock in cpu hotplug with osnoise
+To: stable@vger.kernel.org
+Cc: Luo Haiyang <luo.haiyang@zte.com.cn>, mathieu.desnoyers@efficios.com, zhang.run@zte.com.cn, yang.tao172@zte.com.cn, ran.xiaokai@zte.com.cn, "Masami Hiramatsu (Google)" <mhiramat@kernel.org>, "Steven Rostedt (Google)" <rostedt@goodmis.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260330142547.819699-2-sashal@kernel.org>
+
+From: Luo Haiyang <luo.haiyang@zte.com.cn>
+
+[ Upstream commit 1f9885732248d22f788e4992c739a98c88ab8a55 ]
+
+The following sequence may leads deadlock in cpu hotplug:
+
+ task1 task2 task3
+ ----- ----- -----
+
+ mutex_lock(&interface_lock)
+
+ [CPU GOING OFFLINE]
+
+ cpus_write_lock();
+ osnoise_cpu_die();
+ kthread_stop(task3);
+ wait_for_completion();
+
+ osnoise_sleep();
+ mutex_lock(&interface_lock);
+
+ cpus_read_lock();
+
+ [DEAD LOCK]
+
+Fix by swap the order of cpus_read_lock() and mutex_lock(&interface_lock).
+
+Cc: stable@vger.kernel.org
+Cc: <mathieu.desnoyers@efficios.com>
+Cc: <zhang.run@zte.com.cn>
+Cc: <yang.tao172@zte.com.cn>
+Cc: <ran.xiaokai@zte.com.cn>
+Fixes: bce29ac9ce0bb ("trace: Add osnoise tracer")
+Link: https://patch.msgid.link/20260326141953414bVSj33dAYktqp9Oiyizq8@zte.com.cn
+Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Luo Haiyang <luo.haiyang@zte.com.cn>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_osnoise.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -2104,8 +2104,8 @@ static void osnoise_hotplug_workfn(struc
+ if (!osnoise_has_registered_instances())
+ return;
+
+- guard(mutex)(&interface_lock);
+ guard(cpus_read_lock)();
++ guard(mutex)(&interface_lock);
+
+ if (!cpu_online(cpu))
+ return;
+@@ -2268,11 +2268,11 @@ static ssize_t osnoise_options_write(str
+ if (running)
+ stop_per_cpu_kthreads();
+
+- mutex_lock(&interface_lock);
+ /*
+ * avoid CPU hotplug operations that might read options.
+ */
+ cpus_read_lock();
++ mutex_lock(&interface_lock);
+
+ retval = cnt;
+
+@@ -2288,8 +2288,8 @@ static ssize_t osnoise_options_write(str
+ clear_bit(option, &osnoise_options);
+ }
+
+- cpus_read_unlock();
+ mutex_unlock(&interface_lock);
++ cpus_read_unlock();
+
+ if (running)
+ start_per_cpu_kthreads();
+@@ -2375,16 +2375,16 @@ osnoise_cpus_write(struct file *filp, co
+ if (running)
+ stop_per_cpu_kthreads();
+
+- mutex_lock(&interface_lock);
+ /*
+ * osnoise_cpumask is read by CPU hotplug operations.
+ */
+ cpus_read_lock();
++ mutex_lock(&interface_lock);
+
+ cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
+
+- cpus_read_unlock();
+ mutex_unlock(&interface_lock);
++ cpus_read_unlock();
+
+ if (running)
+ start_per_cpu_kthreads();
--- /dev/null
+From stable+bounces-231236-greg=kroah.com@vger.kernel.org Mon Mar 30 16:38:17 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 10:25:46 -0400
+Subject: tracing: Switch trace_osnoise.c code over to use guard() and __free()
+To: stable@vger.kernel.org
+Cc: Steven Rostedt <rostedt@goodmis.org>, Masami Hiramatsu <mhiramat@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Mathieu Desnoyers <mathieu.desnoyers@efficios.com>, Andrew Morton <akpm@linux-foundation.org>, Peter Zijlstra <peterz@infradead.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260330142547.819699-1-sashal@kernel.org>
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+[ Upstream commit 930d2b32c0af6895ba4c6ca6404e7f7b6dc214ed ]
+
+The osnoise_hotplug_workfn() grabs two mutexes and cpu_read_lock(). It has
+various gotos to handle unlocking them. Switch them over to guard() and
+let the compiler worry about it.
+
+The osnoise_cpus_read() has a temporary mask_str allocated and there's
+some gotos to make sure it gets freed on error paths. Switch that over to
+__free() to let the compiler worry about it.
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/20241225222931.517329690@goodmis.org
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Stable-dep-of: 1f9885732248 ("tracing: Fix potential deadlock in cpu hotplug with osnoise")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_osnoise.c | 40 +++++++++++++---------------------------
+ 1 file changed, 13 insertions(+), 27 deletions(-)
+
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -2099,26 +2099,21 @@ static void osnoise_hotplug_workfn(struc
+ {
+ unsigned int cpu = smp_processor_id();
+
+- mutex_lock(&trace_types_lock);
++ guard(mutex)(&trace_types_lock);
+
+ if (!osnoise_has_registered_instances())
+- goto out_unlock_trace;
++ return;
+
+- mutex_lock(&interface_lock);
+- cpus_read_lock();
++ guard(mutex)(&interface_lock);
++ guard(cpus_read_lock)();
+
+ if (!cpu_online(cpu))
+- goto out_unlock;
++ return;
++
+ if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
+- goto out_unlock;
++ return;
+
+ start_kthread(cpu);
+-
+-out_unlock:
+- cpus_read_unlock();
+- mutex_unlock(&interface_lock);
+-out_unlock_trace:
+- mutex_unlock(&trace_types_lock);
+ }
+
+ static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
+@@ -2316,31 +2311,22 @@ static ssize_t
+ osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
+ loff_t *ppos)
+ {
+- char *mask_str;
++ char *mask_str __free(kfree) = NULL;
+ int len;
+
+- mutex_lock(&interface_lock);
++ guard(mutex)(&interface_lock);
+
+ len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
+ mask_str = kmalloc(len, GFP_KERNEL);
+- if (!mask_str) {
+- count = -ENOMEM;
+- goto out_unlock;
+- }
++ if (!mask_str)
++ return -ENOMEM;
+
+ len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
+- if (len >= count) {
+- count = -EINVAL;
+- goto out_free;
+- }
++ if (len >= count)
++ return -EINVAL;
+
+ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
+
+-out_free:
+- kfree(mask_str);
+-out_unlock:
+- mutex_unlock(&interface_lock);
+-
+ return count;
+ }
+
--- /dev/null
+From alvalan9@foxmail.com Mon Mar 30 14:34:20 2026
+From: Alva Lan <alvalan9@foxmail.com>
+Date: Mon, 30 Mar 2026 20:33:52 +0800
+Subject: xattr: switch to CLASS(fd)
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, viro@zeniv.linux.org.uk
+Cc: linux-fsdevel@vger.kernel.org, Christian Brauner <brauner@kernel.org>, Alva Lan <alvalan9@foxmail.com>
+Message-ID: <tencent_72B5370E2D4C4AC319ED4F0DCB479CA4B406@qq.com>
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+[ Upstream commit a71874379ec8c6e788a61d71b3ad014a8d9a5c08 ]
+
+Reviewed-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+[ Only switch to CLASS(fd) in v6.6.y for fd_empty() was introduced in commit
+88a2f6468d01 ("struct fd: representation change") in 6.12. ]
+Signed-off-by: Alva Lan <alvalan9@foxmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xattr.c | 27 ++++++---------------------
+ 1 file changed, 6 insertions(+), 21 deletions(-)
+
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -698,8 +698,6 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
+ int error;
+
+ CLASS(fd, f)(fd);
+- if (!f.file)
+- return -EBADF;
+
+ audit_file(f.file);
+ error = setxattr_copy(name, &ctx);
+@@ -810,16 +808,11 @@ SYSCALL_DEFINE4(lgetxattr, const char __
+ SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
+ void __user *, value, size_t, size)
+ {
+- struct fd f = fdget(fd);
+- ssize_t error = -EBADF;
++ CLASS(fd, f)(fd);
+
+- if (!f.file)
+- return error;
+ audit_file(f.file);
+- error = getxattr(file_mnt_idmap(f.file), f.file->f_path.dentry,
++ return getxattr(file_mnt_idmap(f.file), f.file->f_path.dentry,
+ name, value, size);
+- fdput(f);
+- return error;
+ }
+
+ /*
+@@ -886,15 +879,10 @@ SYSCALL_DEFINE3(llistxattr, const char _
+
+ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
+ {
+- struct fd f = fdget(fd);
+- ssize_t error = -EBADF;
++ CLASS(fd, f)(fd);
+
+- if (!f.file)
+- return error;
+ audit_file(f.file);
+- error = listxattr(f.file->f_path.dentry, list, size);
+- fdput(f);
+- return error;
++ return listxattr(f.file->f_path.dentry, list, size);
+ }
+
+ /*
+@@ -951,12 +939,10 @@ SYSCALL_DEFINE2(lremovexattr, const char
+
+ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
+ {
+- struct fd f = fdget(fd);
++ CLASS(fd, f)(fd);
+ char kname[XATTR_NAME_MAX + 1];
+- int error = -EBADF;
++ int error;
+
+- if (!f.file)
+- return error;
+ audit_file(f.file);
+
+ error = strncpy_from_user(kname, name, sizeof(kname));
+@@ -971,7 +957,6 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, c
+ f.file->f_path.dentry, kname);
+ mnt_drop_write_file(f.file);
+ }
+- fdput(f);
+ return error;
+ }
+