--- /dev/null
+From 9291f1a693d0d03bf18680fbaca6c32868bc95c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Mar 2025 17:46:35 +1030
+Subject: btrfs: avoid page_lockend underflow in btrfs_punch_hole_lock_range()
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit bc2dbc4983afedd198490cca043798f57c93e9bf ]
+
+[BUG]
+When running btrfs/004 with 4K fs block size and 64K page size,
+sometimes fsstress workload can take 100% CPU for a while, but not long
+enough to trigger a 120s hang warning.
+
+[CAUSE]
+When such 100% CPU usage happens, btrfs_punch_hole_lock_range() is
+always in the call trace.
+
+One example when this problem happens, the function
+btrfs_punch_hole_lock_range() got the following parameters:
+
+ lock_start = 4096, lockend = 20469
+
+Then we calculate @page_lockstart by rounding up lock_start to page
+boundary, which is 64K (page size is 64K).
+
+For @page_lockend, we round down the value towards page boundary, which
+result 0. Then since we need to pass an inclusive end to
+filemap_range_has_page(), we subtract 1 from the rounded down value,
+resulting in (u64)-1.
+
+In the above case, the range is inside the same page, and we do not even
+need to call filemap_range_has_page(), not to mention to call it with
+(u64)-1 at the end.
+
+This behavior will cause btrfs_punch_hole_lock_range() to busy loop
+waiting for irrelevant range to have its pages dropped.
+
+[FIX]
+Calculate @page_lockend by just rounding down @lockend, without
+decreasing the value by one. So @page_lockend will no longer overflow.
+
+Then exit early if @page_lockend is no larger than @page_lockstart.
+As it means either the range is inside the same page, or the two pages
+are adjacent already.
+
+Finally only decrease @page_lockend when calling filemap_range_has_page().
+
+Fixes: 0528476b6ac7 ("btrfs: fix the filemap_range_has_page() call in btrfs_punch_hole_lock_range()")
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/file.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 68092b64e29ea..e794606e7c780 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2225,15 +2225,20 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
+ * will always return true.
+ * So here we need to do extra page alignment for
+ * filemap_range_has_page().
++ *
++ * And do not decrease page_lockend right now, as it can be 0.
+ */
+ const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
+- const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
++ const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE);
+
+ while (1) {
+ truncate_pagecache_range(inode, lockstart, lockend);
+
+ lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state);
++ /* The same page or adjacent pages. */
++ if (page_lockend <= page_lockstart)
++ break;
+ /*
+ * We can't have ordered extents in the range, nor dirty/writeback
+ * pages, because we have locked the inode's VFS lock in exclusive
+@@ -2245,7 +2250,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
+ * we do, unlock the range and retry.
+ */
+ if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
+- page_lockend))
++ page_lockend - 1))
+ break;
+
+ unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+--
+2.39.5
+
--- /dev/null
+From 2e4d2b5f3d50c6a23ffdedbdd408d8c0e093270b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Mar 2025 10:47:11 +0000
+Subject: ceph: Fix incorrect flush end position calculation
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit f452a2204614fc10e2c3b85904c4bd300c2789dc ]
+
+In ceph, in fill_fscrypt_truncate(), the end flush position is calculated
+by:
+
+ loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1;
+
+but that's using the block shift not the block size.
+
+Fix this to use the block size instead.
+
+Fixes: 5c64737d2536 ("ceph: add truncate size handling support for fscrypt")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index db6977c15c282..f0befbeb6cb83 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -2319,7 +2319,7 @@ static int fill_fscrypt_truncate(struct inode *inode,
+
+ /* Try to writeback the dirty pagecaches */
+ if (issued & (CEPH_CAP_FILE_BUFFER)) {
+- loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1;
++ loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SIZE - 1;
+
+ ret = filemap_write_and_wait_range(inode->i_mapping,
+ orig_pos, lend);
+--
+2.39.5
+
--- /dev/null
+From bc4600466e224f079cd5f90051554973af23a67e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Apr 2025 20:48:13 +0800
+Subject: cpufreq: apple-soc: Fix null-ptr-deref in
+ apple_soc_cpufreq_get_rate()
+
+From: Henry Martin <bsdhenrymartin@gmail.com>
+
+[ Upstream commit 9992649f6786921873a9b89dafa5e04d8c5fef2b ]
+
+cpufreq_cpu_get_raw() can return NULL when the target CPU is not present
+in the policy->cpus mask. apple_soc_cpufreq_get_rate() does not check
+for this case, which results in a NULL pointer dereference.
+
+Fixes: 6286bbb40576 ("cpufreq: apple-soc: Add new driver to control Apple SoC CPU P-states")
+Signed-off-by: Henry Martin <bsdhenrymartin@gmail.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/apple-soc-cpufreq.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
+index 021f423705e1b..9ba6b09775f61 100644
+--- a/drivers/cpufreq/apple-soc-cpufreq.c
++++ b/drivers/cpufreq/apple-soc-cpufreq.c
+@@ -103,11 +103,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] = {
+
+ static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
+ {
+- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+- struct apple_cpu_priv *priv = policy->driver_data;
++ struct cpufreq_policy *policy;
++ struct apple_cpu_priv *priv;
+ struct cpufreq_frequency_table *p;
+ unsigned int pstate;
+
++ policy = cpufreq_cpu_get_raw(cpu);
++ if (unlikely(!policy))
++ return 0;
++
++ priv = policy->driver_data;
++
+ if (priv->info->cur_pstate_mask) {
+ u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+
+--
+2.39.5
+
--- /dev/null
+From 34942286d6674bca4704f75c72704b254d092bd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Apr 2025 11:11:42 +0100
+Subject: cpufreq: cppc: Fix invalid return value in .get() callback
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit 2b8e6b58889c672e1ae3601d9b2b070be4dc2fbc ]
+
+Returning a negative error code in a function with an unsigned
+return type is a pretty bad idea. It is probably worse when the
+justification for the change is "our static analisys tool found it".
+
+Fixes: cf7de25878a1 ("cppc_cpufreq: Fix possible null pointer dereference")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: "Rafael J. Wysocki" <rafael@kernel.org>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Reviewed-by: Lifeng Zheng <zhenglifeng1@huawei.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/cppc_cpufreq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index c8447ecad797e..aa34af940cb53 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -773,7 +773,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
+ int ret;
+
+ if (!policy)
+- return -ENODEV;
++ return 0;
+
+ cpu_data = policy->driver_data;
+
+--
+2.39.5
+
--- /dev/null
+From ca8530ccd7632b012e2d008fca097d2f99e3a46b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Apr 2025 23:03:53 +0800
+Subject: cpufreq: scmi: Fix null-ptr-deref in scmi_cpufreq_get_rate()
+
+From: Henry Martin <bsdhenrymartin@gmail.com>
+
+[ Upstream commit 484d3f15cc6cbaa52541d6259778e715b2c83c54 ]
+
+cpufreq_cpu_get_raw() can return NULL when the target CPU is not present
+in the policy->cpus mask. scmi_cpufreq_get_rate() does not check for
+this case, which results in a NULL pointer dereference.
+
+Add NULL check after cpufreq_cpu_get_raw() to prevent this issue.
+
+Fixes: 99d6bdf33877 ("cpufreq: add support for CPU DVFS based on SCMI message protocol")
+Signed-off-by: Henry Martin <bsdhenrymartin@gmail.com>
+Acked-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/scmi-cpufreq.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index 079940c69ee0b..e4989764efe2a 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -33,11 +33,17 @@ static const struct scmi_perf_proto_ops *perf_ops;
+
+ static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
+ {
+- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+- struct scmi_data *priv = policy->driver_data;
++ struct cpufreq_policy *policy;
++ struct scmi_data *priv;
+ unsigned long rate;
+ int ret;
+
++ policy = cpufreq_cpu_get_raw(cpu);
++ if (unlikely(!policy))
++ return 0;
++
++ priv = policy->driver_data;
++
+ ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
+ if (ret)
+ return 0;
+--
+2.39.5
+
--- /dev/null
+From 286daeb32dd7b7973d446a593a19616f8735e3bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Apr 2025 23:03:54 +0800
+Subject: cpufreq: scpi: Fix null-ptr-deref in scpi_cpufreq_get_rate()
+
+From: Henry Martin <bsdhenrymartin@gmail.com>
+
+[ Upstream commit 73b24dc731731edf762f9454552cb3a5b7224949 ]
+
+cpufreq_cpu_get_raw() can return NULL when the target CPU is not present
+in the policy->cpus mask. scpi_cpufreq_get_rate() does not check for
+this case, which results in a NULL pointer dereference.
+
+Fixes: 343a8d17fa8d ("cpufreq: scpi: remove arm_big_little dependency")
+Signed-off-by: Henry Martin <bsdhenrymartin@gmail.com>
+Acked-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/scpi-cpufreq.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
+index bfc2e65e1e502..2aef39bff7d6f 100644
+--- a/drivers/cpufreq/scpi-cpufreq.c
++++ b/drivers/cpufreq/scpi-cpufreq.c
+@@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
+
+ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+ {
+- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+- struct scpi_data *priv = policy->driver_data;
+- unsigned long rate = clk_get_rate(priv->clk);
++ struct cpufreq_policy *policy;
++ struct scpi_data *priv;
++ unsigned long rate;
++
++ policy = cpufreq_cpu_get_raw(cpu);
++ if (unlikely(!policy))
++ return 0;
++
++ priv = policy->driver_data;
++ rate = clk_get_rate(priv->clk);
+
+ return rate / 1000;
+ }
+--
+2.39.5
+
--- /dev/null
+From 18f14c4cc6c11510ee60062d972006d54996b520 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Apr 2025 17:15:42 +0200
+Subject: dma/contiguous: avoid warning about unused size_bytes
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit d7b98ae5221007d3f202746903d4c21c7caf7ea9 ]
+
+When building with W=1, this variable is unused for configs with
+CONFIG_CMA_SIZE_SEL_PERCENTAGE=y:
+
+kernel/dma/contiguous.c:67:26: error: 'size_bytes' defined but not used [-Werror=unused-const-variable=]
+
+Change this to a macro to avoid the warning.
+
+Fixes: c64be2bb1c6e ("drivers: add Contiguous Memory Allocator")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Link: https://lore.kernel.org/r/20250409151557.3890443-1-arnd@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/contiguous.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
+index f005c66f378c3..a600819799637 100644
+--- a/kernel/dma/contiguous.c
++++ b/kernel/dma/contiguous.c
+@@ -70,8 +70,7 @@ struct cma *dma_contiguous_default_area;
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+-static const phys_addr_t size_bytes __initconst =
+- (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
++#define size_bytes ((phys_addr_t)CMA_SIZE_MBYTES * SZ_1M)
+ static phys_addr_t size_cmdline __initdata = -1;
+ static phys_addr_t base_cmdline __initdata;
+ static phys_addr_t limit_cmdline __initdata;
+--
+2.39.5
+
--- /dev/null
+From 4d9ec80ed1d1c731a99d60291834bb71a55a3f5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Apr 2025 02:30:34 +0100
+Subject: fix a couple of races in MNT_TREE_BENEATH handling by do_move_mount()
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+[ Upstream commit 0d039eac6e5950f9d1ecc9e410c2fd1feaeab3b6 ]
+
+Normally do_lock_mount(path, _) is locking a mountpoint pinned by
+*path and at the time when matching unlock_mount() unlocks that
+location it is still pinned by the same thing.
+
+Unfortunately, for 'beneath' case it's no longer that simple -
+the object being locked is not the one *path points to. It's the
+mountpoint of path->mnt. The thing is, without sufficient locking
+->mnt_parent may change under us and none of the locks are held
+at that point. The rules are
+ * mount_lock stabilizes m->mnt_parent for any mount m.
+ * namespace_sem stabilizes m->mnt_parent, provided that
+m is mounted.
+ * if either of the above holds and refcount of m is positive,
+we are guaranteed the same for refcount of m->mnt_parent.
+
+namespace_sem nests inside inode_lock(), so do_lock_mount() has
+to take inode_lock() before grabbing namespace_sem. It does
+recheck that path->mnt is still mounted in the same place after
+getting namespace_sem, and it does take care to pin the dentry.
+It is needed, since otherwise we might end up with racing mount --move
+(or umount) happening while we were getting locks; in that case
+dentry would no longer be a mountpoint and could've been evicted
+on memory pressure along with its inode - not something you want
+when grabbing lock on that inode.
+
+However, pinning a dentry is not enough - the matching mount is
+also pinned only by the fact that path->mnt is mounted on top it
+and at that point we are not holding any locks whatsoever, so
+the same kind of races could end up with all references to
+that mount gone just as we are about to enter inode_lock().
+If that happens, we are left with filesystem being shut down while
+we are holding a dentry reference on it; results are not pretty.
+
+What we need to do is grab both dentry and mount at the same time;
+that makes inode_lock() safe *and* avoids the problem with fs getting
+shut down under us. After taking namespace_sem we verify that
+path->mnt is still mounted (which stabilizes its ->mnt_parent) and
+check that it's still mounted at the same place. From that point
+on to the matching namespace_unlock() we are guaranteed that
+mount/dentry pair we'd grabbed are also pinned by being the mountpoint
+of path->mnt, so we can quietly drop both the dentry reference (as
+the current code does) and mnt one - it's OK to do under namespace_sem,
+since we are not dropping the final refs.
+
+That solves the problem on do_lock_mount() side; unlock_mount()
+also has one, since dentry is guaranteed to stay pinned only until
+the namespace_unlock(). That's easy to fix - just have inode_unlock()
+done earlier, while it's still pinned by mp->m_dentry.
+
+Fixes: 6ac392815628 "fs: allow to mount beneath top mount" # v6.5+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/namespace.c | 69 ++++++++++++++++++++++++++------------------------
+ 1 file changed, 36 insertions(+), 33 deletions(-)
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 671e266b8fc5d..5a885d35efe93 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2439,56 +2439,62 @@ static struct mountpoint *do_lock_mount(struct path *path, bool beneath)
+ struct vfsmount *mnt = path->mnt;
+ struct dentry *dentry;
+ struct mountpoint *mp = ERR_PTR(-ENOENT);
++ struct path under = {};
+
+ for (;;) {
+- struct mount *m;
++ struct mount *m = real_mount(mnt);
+
+ if (beneath) {
+- m = real_mount(mnt);
++ path_put(&under);
+ read_seqlock_excl(&mount_lock);
+- dentry = dget(m->mnt_mountpoint);
++ under.mnt = mntget(&m->mnt_parent->mnt);
++ under.dentry = dget(m->mnt_mountpoint);
+ read_sequnlock_excl(&mount_lock);
++ dentry = under.dentry;
+ } else {
+ dentry = path->dentry;
+ }
+
+ inode_lock(dentry->d_inode);
+- if (unlikely(cant_mount(dentry))) {
+- inode_unlock(dentry->d_inode);
+- goto out;
+- }
+-
+ namespace_lock();
+
+- if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) {
++ if (unlikely(cant_mount(dentry) || !is_mounted(mnt)))
++ break; // not to be mounted on
++
++ if (beneath && unlikely(m->mnt_mountpoint != dentry ||
++ &m->mnt_parent->mnt != under.mnt)) {
+ namespace_unlock();
+ inode_unlock(dentry->d_inode);
+- goto out;
++ continue; // got moved
+ }
+
+ mnt = lookup_mnt(path);
+- if (likely(!mnt))
++ if (unlikely(mnt)) {
++ namespace_unlock();
++ inode_unlock(dentry->d_inode);
++ path_put(path);
++ path->mnt = mnt;
++ path->dentry = dget(mnt->mnt_root);
++ continue; // got overmounted
++ }
++ mp = get_mountpoint(dentry);
++ if (IS_ERR(mp))
+ break;
+-
+- namespace_unlock();
+- inode_unlock(dentry->d_inode);
+- if (beneath)
+- dput(dentry);
+- path_put(path);
+- path->mnt = mnt;
+- path->dentry = dget(mnt->mnt_root);
+- }
+-
+- mp = get_mountpoint(dentry);
+- if (IS_ERR(mp)) {
+- namespace_unlock();
+- inode_unlock(dentry->d_inode);
++ if (beneath) {
++ /*
++ * @under duplicates the references that will stay
++ * at least until namespace_unlock(), so the path_put()
++ * below is safe (and OK to do under namespace_lock -
++ * we are not dropping the final references here).
++ */
++ path_put(&under);
++ }
++ return mp;
+ }
+-
+-out:
++ namespace_unlock();
++ inode_unlock(dentry->d_inode);
+ if (beneath)
+- dput(dentry);
+-
++ path_put(&under);
+ return mp;
+ }
+
+@@ -2499,14 +2505,11 @@ static inline struct mountpoint *lock_mount(struct path *path)
+
+ static void unlock_mount(struct mountpoint *where)
+ {
+- struct dentry *dentry = where->m_dentry;
+-
++ inode_unlock(where->m_dentry->d_inode);
+ read_seqlock_excl(&mount_lock);
+ put_mountpoint(where);
+ read_sequnlock_excl(&mount_lock);
+-
+ namespace_unlock();
+- inode_unlock(dentry->d_inode);
+ }
+
+ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
+--
+2.39.5
+
--- /dev/null
+From a8f42f7539d869f9ee56931d1620310e8593014f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Apr 2025 12:38:20 -0700
+Subject: iommu/amd: Return an error if vCPU affinity is set for non-vCPU IRTE
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 07172206a26dcf3f0bf7c3ecaadd4242b008ea54 ]
+
+Return -EINVAL instead of success if amd_ir_set_vcpu_affinity() is
+invoked without use_vapic; lying to KVM about whether or not the IRTE was
+configured to post IRQs is all kinds of bad.
+
+Fixes: d98de49a53e4 ("iommu/amd: Enable vAPIC interrupt remapping mode by default")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-ID: <20250404193923.1413163-6-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/iommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 95bd7c25ba6f3..83c5d786686d0 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -3619,7 +3619,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
+ * we should not modify the IRTE
+ */
+ if (!dev_data || !dev_data->use_vapic)
+- return 0;
++ return -EINVAL;
+
+ ir_data->cfg = irqd_cfg(data);
+ pi_data->ir_data = ir_data;
+--
+2.39.5
+
--- /dev/null
+From b3b6c6b863f868056bdf0f4561a546fd5aff4f5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 20:15:41 +0800
+Subject: LoongArch: Make do_xyz() exception handlers more robust
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit cc73cc6bcdb5f959670e3ff9abdc62461452ddff ]
+
+Currently, interrupts need to be disabled before single-step mode is
+set, it requires that CSR_PRMD_PIE be cleared in save_local_irqflag()
+which is called by setup_singlestep(), this is reasonable.
+
+But in the first kprobe breakpoint exception, if the irq is enabled at
+the beginning of do_bp(), it will not be disabled at the end of do_bp()
+due to the CSR_PRMD_PIE has been cleared in save_local_irqflag(). So for
+this case, it may corrupt exception context when restoring the exception
+after do_bp() in handle_bp(), this is not reasonable.
+
+In order to restore exception safely in handle_bp(), it needs to ensure
+the irq is disabled at the end of do_bp(), so just add a local variable
+to record the original interrupt status in the parent context, then use
+it as the check condition to enable and disable irq in do_bp().
+
+While at it, do the similar thing for other do_xyz() exception handlers
+to make them more robust.
+
+Fixes: 6d4cc40fb5f5 ("LoongArch: Add kprobes support")
+Suggested-by: Jinyang He <hejinyang@loongson.cn>
+Suggested-by: Huacai Chen <chenhuacai@loongson.cn>
+Co-developed-by: Tianyang Zhang <zhangtianyang@loongson.cn>
+Signed-off-by: Tianyang Zhang <zhangtianyang@loongson.cn>
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/traps.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
+index d59052c03d9b7..2b4b99b4e6c94 100644
+--- a/arch/loongarch/kernel/traps.c
++++ b/arch/loongarch/kernel/traps.c
+@@ -527,9 +527,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
+ die_if_kernel("Kernel ale access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+ #else
++ bool pie = regs_irqs_disabled(regs);
+ unsigned int *pc;
+
+- if (regs->csr_prmd & CSR_PRMD_PIE)
++ if (!pie)
+ local_irq_enable();
+
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
+@@ -556,7 +557,7 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
+ die_if_kernel("Kernel ale access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+ out:
+- if (regs->csr_prmd & CSR_PRMD_PIE)
++ if (!pie)
+ local_irq_disable();
+ #endif
+ irqentry_exit(regs, state);
+@@ -588,12 +589,13 @@ static void bug_handler(struct pt_regs *regs)
+ asmlinkage void noinstr do_bce(struct pt_regs *regs)
+ {
+ bool user = user_mode(regs);
++ bool pie = regs_irqs_disabled(regs);
+ unsigned long era = exception_era(regs);
+ u64 badv = 0, lower = 0, upper = ULONG_MAX;
+ union loongarch_instruction insn;
+ irqentry_state_t state = irqentry_enter(regs);
+
+- if (regs->csr_prmd & CSR_PRMD_PIE)
++ if (!pie)
+ local_irq_enable();
+
+ current->thread.trap_nr = read_csr_excode();
+@@ -659,7 +661,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
+ force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
+
+ out:
+- if (regs->csr_prmd & CSR_PRMD_PIE)
++ if (!pie)
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+@@ -677,11 +679,12 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
+ asmlinkage void noinstr do_bp(struct pt_regs *regs)
+ {
+ bool user = user_mode(regs);
++ bool pie = regs_irqs_disabled(regs);
+ unsigned int opcode, bcode;
+ unsigned long era = exception_era(regs);
+ irqentry_state_t state = irqentry_enter(regs);
+
+- if (regs->csr_prmd & CSR_PRMD_PIE)
++ if (!pie)
+ local_irq_enable();
+
+ if (__get_inst(&opcode, (u32 *)era, user))
+@@ -747,7 +750,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
+ }
+
+ out:
+- if (regs->csr_prmd & CSR_PRMD_PIE)
++ if (!pie)
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+@@ -982,6 +985,7 @@ static void init_restore_lbt(void)
+
+ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+ {
++ bool pie = regs_irqs_disabled(regs);
+ irqentry_state_t state = irqentry_enter(regs);
+
+ /*
+@@ -991,7 +995,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+ * (including the user using 'MOVGR2GCSR' to turn on TM, which
+ * will not trigger the BTE), we need to check PRMD first.
+ */
+- if (regs->csr_prmd & CSR_PRMD_PIE)
++ if (!pie)
+ local_irq_enable();
+
+ if (!cpu_has_lbt) {
+@@ -1005,7 +1009,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+ preempt_enable();
+
+ out:
+- if (regs->csr_prmd & CSR_PRMD_PIE)
++ if (!pie)
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+--
+2.39.5
+
--- /dev/null
+From 141423f4ac625fcde183238163a18c7ae6e70bbf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 20:15:41 +0800
+Subject: LoongArch: Make regs_irqs_disabled() more clear
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit bb0511d59db9b3e40c8d51f0d151ccd0fd44071d ]
+
+In the current code, the definition of regs_irqs_disabled() is actually
+"!(regs->csr_prmd & CSR_CRMD_IE)" because arch_irqs_disabled_flags() is
+defined as "!(flags & CSR_CRMD_IE)", it looks a little strange.
+
+Define regs_irqs_disabled() as !(regs->csr_prmd & CSR_PRMD_PIE) directly
+to make it more clear, no functional change.
+
+While at it, the return value of regs_irqs_disabled() is true or false,
+so change its type to reflect that and also make it always inline.
+
+Fixes: 803b0fc5c3f2 ("LoongArch: Add process management")
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/include/asm/ptrace.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
+index f3ddaed9ef7f0..a5b63c84f8541 100644
+--- a/arch/loongarch/include/asm/ptrace.h
++++ b/arch/loongarch/include/asm/ptrace.h
+@@ -33,9 +33,9 @@ struct pt_regs {
+ unsigned long __last[];
+ } __aligned(8);
+
+-static inline int regs_irqs_disabled(struct pt_regs *regs)
++static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
+ {
+- return arch_irqs_disabled_flags(regs->csr_prmd);
++ return !(regs->csr_prmd & CSR_PRMD_PIE);
+ }
+
+ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+--
+2.39.5
+
--- /dev/null
+From 37a71c786f66e0275791b985deac800120211238 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 20:15:22 +0800
+Subject: LoongArch: Select ARCH_USE_MEMTEST
+
+From: Yuli Wang <wangyuli@uniontech.com>
+
+[ Upstream commit fb8e9f59d6f292c3d9fea6c155c22ea5fc3053ab ]
+
+As of commit dce44566192e ("mm/memtest: add ARCH_USE_MEMTEST"),
+architectures must select ARCH_USE_MEMTESET to enable CONFIG_MEMTEST.
+
+Commit 628c3bb40e9a ("LoongArch: Add boot and setup routines") added
+support for early_memtest but did not select ARCH_USE_MEMTESET.
+
+Fixes: 628c3bb40e9a ("LoongArch: Add boot and setup routines")
+Tested-by: Erpeng Xu <xuerpeng@uniontech.com>
+Tested-by: Yuli Wang <wangyuli@uniontech.com>
+Signed-off-by: Yuli Wang <wangyuli@uniontech.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index 623cf80639dec..25aa993abebce 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -59,6 +59,7 @@ config LOONGARCH
+ select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF
++ select ARCH_USE_MEMTEST
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+--
+2.39.5
+
--- /dev/null
+From 55e5913cc3c8cbe98951842216958f78a9cd926b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Apr 2025 04:10:20 +0100
+Subject: net: dsa: mt7530: sync driver-specific behavior of MT7531 variants
+
+From: Daniel Golle <daniel@makrotopia.org>
+
+[ Upstream commit 497041d763016c2e8314d2f6a329a9b77c3797ca ]
+
+MT7531 standalone and MMIO variants found in MT7988 and EN7581 share
+most basic properties. Despite that, assisted_learning_on_cpu_port and
+mtu_enforcement_ingress were only applied for MT7531 but not for MT7988
+or EN7581, causing the expected issues on MMIO devices.
+
+Apply both settings equally also for MT7988 and EN7581 by moving both
+assignments form mt7531_setup() to mt7531_setup_common().
+
+This fixes unwanted flooding of packets due to unknown unicast
+during DA lookup, as well as issues with heterogenous MTU settings.
+
+Fixes: 7f54cc9772ce ("net: dsa: mt7530: split-off common parts from mt7531_setup")
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Reviewed-by: Chester A. Unal <chester.a.unal@arinc9.com>
+Link: https://patch.msgid.link/89ed7ec6d4fa0395ac53ad2809742bb1ce61ed12.1745290867.git.daniel@makrotopia.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mt7530.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 90ab2f1058ce0..2d18a03d92742 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -2596,6 +2596,9 @@ mt7531_setup_common(struct dsa_switch *ds)
+ struct mt7530_priv *priv = ds->priv;
+ int ret, i;
+
++ ds->assisted_learning_on_cpu_port = true;
++ ds->mtu_enforcement_ingress = true;
++
+ mt753x_trap_frames(priv);
+
+ /* Enable and reset MIB counters */
+@@ -2735,9 +2738,6 @@ mt7531_setup(struct dsa_switch *ds)
+
+ mt7531_setup_common(ds);
+
+- ds->assisted_learning_on_cpu_port = true;
+- ds->mtu_enforcement_ingress = true;
+-
+ return 0;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 2cadb5613d8c09ede9183f6606a5a5c0dd77a797 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Apr 2025 17:41:07 +0100
+Subject: net: ethernet: mtk_eth_soc: net: revise NETSYSv3 hardware
+ configuration
+
+From: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
+
+[ Upstream commit 491ef1117c56476f199b481f8c68820fe4c3a7c2 ]
+
+Change hardware configuration for the NETSYSv3.
+ - Enable PSE dummy page mechanism for the GDM1/2/3
+ - Enable PSE drop mechanism when the WDMA Rx ring full
+ - Enable PSE no-drop mechanism for packets from the WDMA Tx
+ - Correct PSE free drop threshold
+ - Correct PSE CDMA high threshold
+
+Fixes: 1953f134a1a8b ("net: ethernet: mtk_eth_soc: add NETSYS_V3 version support")
+Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/b71f8fd9d4bb69c646c4d558f9331dd965068606.1744907886.git.daniel@makrotopia.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 24 +++++++++++++++++----
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++++++++-
+ 2 files changed, 29 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index c201ea20e4047..dc89dbc13b251 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3949,11 +3949,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+- /* PSE should not drop port1, port8 and port9 packets */
+- mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
++ /* PSE dummy page mechanism */
++ mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
++ PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
++
++ /* PSE free buffer drop threshold */
++ mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
++
++ /* PSE should not drop port8, port9 and port13 packets from
++ * WDMA Tx
++ */
++ mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
++
++ /* PSE should drop packets to port8, port9 and port13 on WDMA Rx
++ * ring full
++ */
++ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
++ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
++ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
+
+ /* GDM and CDM Threshold */
+- mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
++ mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
+
+ /* Disable GDM1 RX CRC stripping */
+@@ -3970,7 +3986,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
+ mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+
+ /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
+- mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
++ mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
+
+ /* PSE Free Queue Flow Control */
+ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 403219d987eff..d1c7b5f1ee4a9 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -149,7 +149,15 @@
+ #define PSE_FQFC_CFG1 0x100
+ #define PSE_FQFC_CFG2 0x104
+ #define PSE_DROP_CFG 0x108
+-#define PSE_PPE0_DROP 0x110
++#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
++
++/* PSE Last FreeQ Page Request Control */
++#define PSE_DUMY_REQ 0x10C
++/* PSE_DUMY_REQ is not a typo but actually called like that also in
++ * MediaTek's datasheet
++ */
++#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
++#define DUMMY_PAGE_THR 0x1
+
+ /* PSE Input Queue Reservation Register*/
+ #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
+--
+2.39.5
+
--- /dev/null
+From 8dab15dc95a39da4b0bcc5c308d04bc3b190a039 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Apr 2025 18:07:16 +0200
+Subject: net: lwtunnel: disable BHs when required
+
+From: Justin Iurman <justin.iurman@uliege.be>
+
+[ Upstream commit c03a49f3093a4903c8a93c8b5c9a297b5343b169 ]
+
+In lwtunnel_{output|xmit}(), dev_xmit_recursion() may be called in
+preemptible scope for PREEMPT kernels. This patch disables BHs before
+calling dev_xmit_recursion(). BHs are re-enabled only at the end, since
+we must ensure the same CPU is used for both dev_xmit_recursion_inc()
+and dev_xmit_recursion_dec() (and any other recursion levels in some
+cases) in order to maintain valid per-cpu counters.
+
+Reported-by: Alexei Starovoitov <alexei.starovoitov@gmail.com>
+Closes: https://lore.kernel.org/netdev/CAADnVQJFWn3dBFJtY+ci6oN1pDFL=TzCmNbRgey7MdYxt_AP2g@mail.gmail.com/
+Reported-by: Eduard Zingerman <eddyz87@gmail.com>
+Closes: https://lore.kernel.org/netdev/m2h62qwf34.fsf@gmail.com/
+Fixes: 986ffb3a57c5 ("net: lwtunnel: fix recursion loops")
+Signed-off-by: Justin Iurman <justin.iurman@uliege.be>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250416160716.8823-1-justin.iurman@uliege.be
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/lwtunnel.c | 26 ++++++++++++++++++++------
+ 1 file changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
+index 4417a18b3e951..f63586c9ce021 100644
+--- a/net/core/lwtunnel.c
++++ b/net/core/lwtunnel.c
+@@ -332,6 +332,8 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ struct dst_entry *dst;
+ int ret;
+
++ local_bh_disable();
++
+ if (dev_xmit_recursion()) {
+ net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
+ __func__);
+@@ -347,8 +349,10 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ lwtstate = dst->lwtstate;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+- lwtstate->type > LWTUNNEL_ENCAP_MAX)
+- return 0;
++ lwtstate->type > LWTUNNEL_ENCAP_MAX) {
++ ret = 0;
++ goto out;
++ }
+
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+@@ -363,11 +367,13 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ if (ret == -EOPNOTSUPP)
+ goto drop;
+
+- return ret;
++ goto out;
+
+ drop:
+ kfree_skb(skb);
+
++out:
++ local_bh_enable();
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(lwtunnel_output);
+@@ -379,6 +385,8 @@ int lwtunnel_xmit(struct sk_buff *skb)
+ struct dst_entry *dst;
+ int ret;
+
++ local_bh_disable();
++
+ if (dev_xmit_recursion()) {
+ net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
+ __func__);
+@@ -395,8 +403,10 @@ int lwtunnel_xmit(struct sk_buff *skb)
+ lwtstate = dst->lwtstate;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+- lwtstate->type > LWTUNNEL_ENCAP_MAX)
+- return 0;
++ lwtstate->type > LWTUNNEL_ENCAP_MAX) {
++ ret = 0;
++ goto out;
++ }
+
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+@@ -411,11 +421,13 @@ int lwtunnel_xmit(struct sk_buff *skb)
+ if (ret == -EOPNOTSUPP)
+ goto drop;
+
+- return ret;
++ goto out;
+
+ drop:
+ kfree_skb(skb);
+
++out:
++ local_bh_enable();
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(lwtunnel_xmit);
+@@ -427,6 +439,8 @@ int lwtunnel_input(struct sk_buff *skb)
+ struct dst_entry *dst;
+ int ret;
+
++ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
++
+ if (dev_xmit_recursion()) {
+ net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
+ __func__);
+--
+2.39.5
+
--- /dev/null
+From 1e17eeb45793a5be8b64394b2926f45d78ecd646 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Apr 2025 11:25:56 +0800
+Subject: net: phy: leds: fix memory leak
+
+From: Qingfang Deng <qingfang.deng@siflower.com.cn>
+
+[ Upstream commit b7f0ee992adf601aa00c252418266177eb7ac2bc ]
+
+A network restart test on a router led to an out-of-memory condition,
+which was traced to a memory leak in the PHY LED trigger code.
+
+The root cause is misuse of the devm API. The registration function
+(phy_led_triggers_register) is called from phy_attach_direct, not
+phy_probe, and the unregister function (phy_led_triggers_unregister)
+is called from phy_detach, not phy_remove. This means the register and
+unregister functions can be called multiple times for the same PHY
+device, but devm-allocated memory is not freed until the driver is
+unbound.
+
+This also prevents kmemleak from detecting the leak, as the devm API
+internally stores the allocated pointer.
+
+Fix this by replacing devm_kzalloc/devm_kcalloc with standard
+kzalloc/kcalloc, and add the corresponding kfree calls in the unregister
+path.
+
+Fixes: 3928ee6485a3 ("net: phy: leds: Add support for "link" trigger")
+Fixes: 2e0bc452f472 ("net: phy: leds: add support for led triggers on phy link state change")
+Signed-off-by: Hao Guan <hao.guan@siflower.com.cn>
+Signed-off-by: Qingfang Deng <qingfang.deng@siflower.com.cn>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250417032557.2929427-1-dqfext@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/phy_led_triggers.c | 23 +++++++++++++----------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
+index f550576eb9dae..6f9d8da76c4df 100644
+--- a/drivers/net/phy/phy_led_triggers.c
++++ b/drivers/net/phy/phy_led_triggers.c
+@@ -91,9 +91,8 @@ int phy_led_triggers_register(struct phy_device *phy)
+ if (!phy->phy_num_led_triggers)
+ return 0;
+
+- phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev,
+- sizeof(*phy->led_link_trigger),
+- GFP_KERNEL);
++ phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger),
++ GFP_KERNEL);
+ if (!phy->led_link_trigger) {
+ err = -ENOMEM;
+ goto out_clear;
+@@ -103,10 +102,9 @@ int phy_led_triggers_register(struct phy_device *phy)
+ if (err)
+ goto out_free_link;
+
+- phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev,
+- phy->phy_num_led_triggers,
+- sizeof(struct phy_led_trigger),
+- GFP_KERNEL);
++ phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers,
++ sizeof(struct phy_led_trigger),
++ GFP_KERNEL);
+ if (!phy->phy_led_triggers) {
+ err = -ENOMEM;
+ goto out_unreg_link;
+@@ -127,11 +125,11 @@ int phy_led_triggers_register(struct phy_device *phy)
+ out_unreg:
+ while (i--)
+ phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
+- devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
++ kfree(phy->phy_led_triggers);
+ out_unreg_link:
+ phy_led_trigger_unregister(phy->led_link_trigger);
+ out_free_link:
+- devm_kfree(&phy->mdio.dev, phy->led_link_trigger);
++ kfree(phy->led_link_trigger);
+ phy->led_link_trigger = NULL;
+ out_clear:
+ phy->phy_num_led_triggers = 0;
+@@ -145,8 +143,13 @@ void phy_led_triggers_unregister(struct phy_device *phy)
+
+ for (i = 0; i < phy->phy_num_led_triggers; i++)
+ phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
++ kfree(phy->phy_led_triggers);
++ phy->phy_led_triggers = NULL;
+
+- if (phy->led_link_trigger)
++ if (phy->led_link_trigger) {
+ phy_led_trigger_unregister(phy->led_link_trigger);
++ kfree(phy->led_link_trigger);
++ phy->led_link_trigger = NULL;
++ }
+ }
+ EXPORT_SYMBOL_GPL(phy_led_triggers_unregister);
+--
+2.39.5
+
--- /dev/null
+From 3e751714b4a2d472a1e3c2f7a621f4d6ec1e12b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Apr 2025 11:47:31 -0700
+Subject: net_sched: hfsc: Fix a potential UAF in hfsc_dequeue() too
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 6ccbda44e2cc3d26fd22af54c650d6d5d801addf ]
+
+Similarly to the previous patch, we need to safe guard hfsc_dequeue()
+too. But for this one, we don't have a reliable reproducer.
+
+Fixes: 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 ("Linux-2.6.12-rc2")
+Reported-by: Gerrard Tai <gerrard.tai@starlabs.sg>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20250417184732.943057-3-xiyou.wangcong@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_hfsc.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 90801b6fe2b08..371255e624332 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -1638,10 +1638,16 @@ hfsc_dequeue(struct Qdisc *sch)
+ if (cl->qdisc->q.qlen != 0) {
+ /* update ed */
+ next_len = qdisc_peek_len(cl->qdisc);
+- if (realtime)
+- update_ed(cl, next_len);
+- else
+- update_d(cl, next_len);
++ /* Check queue length again since some qdisc implementations
++ * (e.g., netem/codel) might empty the queue during the peek
++ * operation.
++ */
++ if (cl->qdisc->q.qlen != 0) {
++ if (realtime)
++ update_ed(cl, next_len);
++ else
++ update_d(cl, next_len);
++ }
+ } else {
+ /* the class becomes passive */
+ eltree_remove(cl);
+--
+2.39.5
+
--- /dev/null
+From 77f18dd1a1e966ca011c0c5395307d9cae8c677e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Apr 2025 11:47:30 -0700
+Subject: net_sched: hfsc: Fix a UAF vulnerability in class handling
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 3df275ef0a6ae181e8428a6589ef5d5231e58b5c ]
+
+This patch fixes a Use-After-Free vulnerability in the HFSC qdisc class
+handling. The issue occurs due to a time-of-check/time-of-use condition
+in hfsc_change_class() when working with certain child qdiscs like netem
+or codel.
+
+The vulnerability works as follows:
+1. hfsc_change_class() checks if a class has packets (q.qlen != 0)
+2. It then calls qdisc_peek_len(), which for certain qdiscs (e.g.,
+ codel, netem) might drop packets and empty the queue
+3. The code continues assuming the queue is still non-empty, adding
+ the class to vttree
+4. This breaks HFSC scheduler assumptions that only non-empty classes
+ are in vttree
+5. Later, when the class is destroyed, this can lead to a Use-After-Free
+
+The fix adds a second queue length check after qdisc_peek_len() to verify
+the queue wasn't emptied.
+
+Fixes: 21f4d5cc25ec ("net_sched/hfsc: fix curve activation in hfsc_change_class()")
+Reported-by: Gerrard Tai <gerrard.tai@starlabs.sg>
+Reviewed-by: Konstantin Khlebnikov <koct9i@gmail.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20250417184732.943057-2-xiyou.wangcong@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_hfsc.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 880c5f16b29cc..90801b6fe2b08 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -958,6 +958,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+
+ if (cl != NULL) {
+ int old_flags;
++ int len = 0;
+
+ if (parentid) {
+ if (cl->cl_parent &&
+@@ -988,9 +989,13 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ if (usc != NULL)
+ hfsc_change_usc(cl, usc, cur_time);
+
++ if (cl->qdisc->q.qlen != 0)
++ len = qdisc_peek_len(cl->qdisc);
++ /* Check queue length again since some qdisc implementations
++ * (e.g., netem/codel) might empty the queue during the peek
++ * operation.
++ */
+ if (cl->qdisc->q.qlen != 0) {
+- int len = qdisc_peek_len(cl->qdisc);
+-
+ if (cl->cl_flags & HFSC_RSC) {
+ if (old_flags & HFSC_RSC)
+ update_ed(cl, len);
+--
+2.39.5
+
--- /dev/null
+From 42bb048fcea8afdf903f5205be94173b1818edc4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Apr 2025 10:46:04 -0700
+Subject: pds_core: handle unsupported PDS_CORE_CMD_FW_CONTROL result
+
+From: Brett Creeley <brett.creeley@amd.com>
+
+[ Upstream commit 2567daad69cd1107fc0ec29b1615f110d7cf7385 ]
+
+If the FW doesn't support the PDS_CORE_CMD_FW_CONTROL command
+the driver might at the least print garbage and at the worst
+crash when the user runs the "devlink dev info" devlink command.
+
+This happens because the stack variable fw_list is not 0
+initialized which results in fw_list.num_fw_slots being a
+garbage value from the stack. Then the driver tries to access
+fw_list.fw_names[i] with i >= ARRAY_SIZE and runs off the end
+of the array.
+
+Fix this by initializing the fw_list and by not failing
+completely if the devcmd fails because other useful information
+is printed via devlink dev info even if the devcmd fails.
+
+Fixes: 45d76f492938 ("pds_core: set up device and adminq")
+Signed-off-by: Brett Creeley <brett.creeley@amd.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20250421174606.3892-3-shannon.nelson@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/pds_core/devlink.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index 971d4278280d6..0032e8e351811 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -101,7 +101,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_GET_LIST,
+ };
+- struct pds_core_fw_list_info fw_list;
++ struct pds_core_fw_list_info fw_list = {};
+ struct pdsc *pdsc = devlink_priv(dl);
+ union pds_core_dev_comp comp;
+ char buf[32];
+@@ -114,8 +114,6 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ if (!err)
+ memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+ mutex_unlock(&pdsc->devcmd_lock);
+- if (err && err != -EIO)
+- return err;
+
+ listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
+ for (i = 0; i < listlen; i++) {
+--
+2.39.5
+
--- /dev/null
+From 0ac36af549ac6217ba2e6d2d0298216975e9e621 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Apr 2025 10:46:06 -0700
+Subject: pds_core: make wait_context part of q_info
+
+From: Shannon Nelson <shannon.nelson@amd.com>
+
+[ Upstream commit 3f77c3dfffc7063428b100c4945ca2a7a8680380 ]
+
+Make the wait_context a full part of the q_info struct rather
+than a stack variable that goes away after pdsc_adminq_post()
+is done so that the context is still available after the wait
+loop has given up.
+
+There was a case where a slow development firmware caused
+the adminq request to time out, but then later the FW finally
+finished the request and sent the interrupt. The handler tried
+to complete_all() the completion context that had been created
+on the stack in pdsc_adminq_post() but no longer existed.
+This caused bad pointer usage, kernel crashes, and much wailing
+and gnashing of teeth.
+
+Fixes: 01ba61b55b20 ("pds_core: Add adminq processing and commands")
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20250421174606.3892-5-shannon.nelson@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/pds_core/adminq.c | 36 +++++++++-------------
+ drivers/net/ethernet/amd/pds_core/core.c | 4 ++-
+ drivers/net/ethernet/amd/pds_core/core.h | 2 +-
+ 3 files changed, 18 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
+index ea773cfa0af67..733f133d69e75 100644
+--- a/drivers/net/ethernet/amd/pds_core/adminq.c
++++ b/drivers/net/ethernet/amd/pds_core/adminq.c
+@@ -5,11 +5,6 @@
+
+ #include "core.h"
+
+-struct pdsc_wait_context {
+- struct pdsc_qcq *qcq;
+- struct completion wait_completion;
+-};
+-
+ static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
+ {
+ union pds_core_notifyq_comp *comp;
+@@ -110,10 +105,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
+ q_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+
+- /* Copy out the completion data */
+- memcpy(q_info->dest, comp, sizeof(*comp));
+-
+- complete_all(&q_info->wc->wait_completion);
++ if (!completion_done(&q_info->completion)) {
++ memcpy(q_info->dest, comp, sizeof(*comp));
++ complete(&q_info->completion);
++ }
+
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+@@ -166,8 +161,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
+ static int __pdsc_adminq_post(struct pdsc *pdsc,
+ struct pdsc_qcq *qcq,
+ union pds_core_adminq_cmd *cmd,
+- union pds_core_adminq_comp *comp,
+- struct pdsc_wait_context *wc)
++ union pds_core_adminq_comp *comp)
+ {
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc_q_info *q_info;
+@@ -209,9 +203,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
+ /* Post the request */
+ index = q->head_idx;
+ q_info = &q->info[index];
+- q_info->wc = wc;
+ q_info->dest = comp;
+ memcpy(q_info->desc, cmd, sizeof(*cmd));
++ reinit_completion(&q_info->completion);
+
+ dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
+ q->head_idx, q->tail_idx);
+@@ -235,16 +229,13 @@ int pdsc_adminq_post(struct pdsc *pdsc,
+ union pds_core_adminq_comp *comp,
+ bool fast_poll)
+ {
+- struct pdsc_wait_context wc = {
+- .wait_completion =
+- COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
+- };
+ unsigned long poll_interval = 1;
+ unsigned long poll_jiffies;
+ unsigned long time_limit;
+ unsigned long time_start;
+ unsigned long time_done;
+ unsigned long remaining;
++ struct completion *wc;
+ int err = 0;
+ int index;
+
+@@ -254,20 +245,19 @@ int pdsc_adminq_post(struct pdsc *pdsc,
+ return -ENXIO;
+ }
+
+- wc.qcq = &pdsc->adminqcq;
+- index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
++ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
+ if (index < 0) {
+ err = index;
+ goto err_out;
+ }
+
++ wc = &pdsc->adminqcq.q.info[index].completion;
+ time_start = jiffies;
+ time_limit = time_start + HZ * pdsc->devcmd_timeout;
+ do {
+ /* Timeslice the actual wait to catch IO errors etc early */
+ poll_jiffies = msecs_to_jiffies(poll_interval);
+- remaining = wait_for_completion_timeout(&wc.wait_completion,
+- poll_jiffies);
++ remaining = wait_for_completion_timeout(wc, poll_jiffies);
+ if (remaining)
+ break;
+
+@@ -296,9 +286,11 @@ int pdsc_adminq_post(struct pdsc *pdsc,
+ dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
+ __func__, jiffies_to_msecs(time_done - time_start));
+
+- /* Check the results */
+- if (time_after_eq(time_done, time_limit))
++ /* Check the results and clear an un-completed timeout */
++ if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
+ err = -ETIMEDOUT;
++ complete(wc);
++ }
+
+ dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
+ dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
+diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
+index eb73c921dc1ed..b3fa867c8ccd9 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.c
++++ b/drivers/net/ethernet/amd/pds_core/core.c
+@@ -169,8 +169,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
+ q->base = base;
+ q->base_pa = base_pa;
+
+- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
++ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
+ cur->desc = base + (i * q->desc_size);
++ init_completion(&cur->completion);
++ }
+ }
+
+ static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
+diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
+index f410f7d132056..858bebf797762 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.h
++++ b/drivers/net/ethernet/amd/pds_core/core.h
+@@ -96,7 +96,7 @@ struct pdsc_q_info {
+ unsigned int bytes;
+ unsigned int nbufs;
+ struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
+- struct pdsc_wait_context *wc;
++ struct completion completion;
+ void *dest;
+ };
+
+--
+2.39.5
+
--- /dev/null
+From 4b30904a52751013063298fced72295c2e4bc1eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Apr 2025 10:46:05 -0700
+Subject: pds_core: Remove unnecessary check in pds_client_adminq_cmd()
+
+From: Brett Creeley <brett.creeley@amd.com>
+
+[ Upstream commit f9559d818205a4a0b9cd87181ef46e101ea11157 ]
+
+When the pds_core driver was first created there were some race
+conditions around using the adminq, especially for client drivers.
+To reduce the possibility of a race condition there's a check
+against pf->state in pds_client_adminq_cmd(). This is problematic
+for a couple of reasons:
+
+1. The PDSC_S_INITING_DRIVER bit is set during probe, but not
+ cleared until after everything in probe is complete, which
+ includes creating the auxiliary devices. For pds_fwctl this
+ means it can't make any adminq commands until after pds_core's
+ probe is complete even though the adminq is fully up by the
+ time pds_fwctl's auxiliary device is created.
+
+2. The race conditions around using the adminq have been fixed
+ and this path is already protected against client drivers
+ calling pds_client_adminq_cmd() if the adminq isn't ready,
+ i.e. see pdsc_adminq_post() -> pdsc_adminq_inc_if_up().
+
+Fix this by removing the pf->state check in pds_client_adminq_cmd()
+because invalid accesses to pds_core's adminq is already handled by
+pdsc_adminq_post()->pdsc_adminq_inc_if_up().
+
+Fixes: 10659034c622 ("pds_core: add the aux client API")
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Brett Creeley <brett.creeley@amd.com>
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20250421174606.3892-4-shannon.nelson@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/pds_core/auxbus.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
+index fd1a5149c0031..fb7a5403e630d 100644
+--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
++++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
+@@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
+ dev_dbg(pf->dev, "%s: %s opcode %d\n",
+ __func__, dev_name(&padev->aux_dev.dev), req->opcode);
+
+- if (pf->state)
+- return -ENXIO;
+-
+ /* Wrap the client's request */
+ cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
+ cmd.client_request.client_id = cpu_to_le16(padev->client_id);
+--
+2.39.5
+
--- /dev/null
+From bcadd5a4a3998b68edf811c2f1a0a38ec278bf1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Apr 2025 06:47:24 +0000
+Subject: perf/x86: Fix non-sampling (counting) events on certain x86 platforms
+
+From: Luo Gengkun <luogengkun@huaweicloud.com>
+
+[ Upstream commit 1a97fea9db9e9b9c4839d4232dde9f505ff5b4cc ]
+
+Perf doesn't work at perf stat for hardware events on certain x86 platforms:
+
+ $perf stat -- sleep 1
+ Performance counter stats for 'sleep 1':
+ 16.44 msec task-clock # 0.016 CPUs utilized
+ 2 context-switches # 121.691 /sec
+ 0 cpu-migrations # 0.000 /sec
+ 54 page-faults # 3.286 K/sec
+ <not supported> cycles
+ <not supported> instructions
+ <not supported> branches
+ <not supported> branch-misses
+
+The reason is that the check in x86_pmu_hw_config() for sampling events is
+unexpectedly applied to counting events as well.
+
+It should only impact x86 platforms with limit_period used for non-PEBS
+events. For Intel platforms, it should only impact some older platforms,
+e.g., HSW, BDW and NHM.
+
+Fixes: 88ec7eedbbd2 ("perf/x86: Fix low freqency setting issue")
+Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ravi Bangoria <ravi.bangoria@amd.com>
+Link: https://lore.kernel.org/r/20250423064724.3716211-1-luogengkun@huaweicloud.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 1458ccaa6a057..ad63bd408cd90 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -623,7 +623,7 @@ int x86_pmu_hw_config(struct perf_event *event)
+ if (event->attr.type == event->pmu->type)
+ event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+
+- if (!event->attr.freq && x86_pmu.limit_period) {
++ if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) {
+ s64 left = event->attr.sample_period;
+ x86_pmu.limit_period(event, &left);
+ if (left > event->attr.sample_period)
+--
+2.39.5
+
--- /dev/null
+From a46de9ef93f43ebd6576e743e546ac36dee3e3cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 19 Apr 2025 13:14:00 +0200
+Subject: riscv: uprobes: Add missing fence.i after building the XOL buffer
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Björn Töpel <bjorn@rivosinc.com>
+
+[ Upstream commit 7d1d19a11cfbfd8bae1d89cc010b2cc397cd0c48 ]
+
+The XOL (execute out-of-line) buffer is used to single-step the
+replaced instruction(s) for uprobes. The RISC-V port was missing a
+proper fence.i (i$ flushing) after constructing the XOL buffer, which
+can result in incorrect execution of stale/broken instructions.
+
+This was found running the BPF selftests "test_progs:
+uprobe_autoattach, attach_probe" on the Spacemit K1/X60, where the
+uprobes tests randomly blew up.
+
+Reviewed-by: Guo Ren <guoren@kernel.org>
+Fixes: 74784081aac8 ("riscv: Add uprobes supported")
+Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
+Link: https://lore.kernel.org/r/20250419111402.1660267-2-bjorn@kernel.org
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/probes/uprobes.c | 10 ++--------
+ 1 file changed, 2 insertions(+), 8 deletions(-)
+
+diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
+index 4b3dc8beaf77d..cc15f7ca6cc17 100644
+--- a/arch/riscv/kernel/probes/uprobes.c
++++ b/arch/riscv/kernel/probes/uprobes.c
+@@ -167,6 +167,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ /* Initialize the slot */
+ void *kaddr = kmap_atomic(page);
+ void *dst = kaddr + (vaddr & ~PAGE_MASK);
++ unsigned long start = (unsigned long)dst;
+
+ memcpy(dst, src, len);
+
+@@ -176,13 +177,6 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ *(uprobe_opcode_t *)dst = __BUG_INSN_32;
+ }
+
++ flush_icache_range(start, start + len);
+ kunmap_atomic(kaddr);
+-
+- /*
+- * We probably need flush_icache_user_page() but it needs vma.
+- * This should work on most of architectures by default. If
+- * architecture needs to do something different it can define
+- * its own version of the function.
+- */
+- flush_dcache_page(page);
+ }
+--
+2.39.5
+
--- /dev/null
+From 4c8844e8a66b442863764b543f97be7c0ac2a88d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Mar 2025 11:49:33 +0300
+Subject: scsi: core: Clear flags for scsi_cmnd that did not complete
+
+From: Anastasia Kovaleva <a.kovaleva@yadro.com>
+
+[ Upstream commit 54bebe46871d4e56e05fcf55c1a37e7efa24e0a8 ]
+
+Commands that have not been completed with scsi_done() do not clear the
+SCMD_INITIALIZED flag and therefore will not be properly reinitialized.
+Thus, the next time the scsi_cmnd structure is used, the command may
+fail in scsi_cmd_runtime_exceeded() due to the old jiffies_at_alloc
+value:
+
+ kernel: sd 16:0:1:84: [sdts] tag#405 timing out command, waited 720s
+ kernel: sd 16:0:1:84: [sdts] tag#405 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_OK cmd_age=66636s
+
+Clear flags for commands that have not been completed by SCSI.
+
+Fixes: 4abafdc4360d ("block: remove the initialize_rq_fn blk_mq_ops method")
+Signed-off-by: Anastasia Kovaleva <a.kovaleva@yadro.com>
+Link: https://lore.kernel.org/r/20250324084933.15932-2-a.kovaleva@yadro.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/scsi_lib.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index e6dc2c556fde9..bd75e3ebc14da 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1152,8 +1152,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request);
+ */
+ static void scsi_cleanup_rq(struct request *rq)
+ {
++ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
++
++ cmd->flags = 0;
++
+ if (rq->rq_flags & RQF_DONTPREP) {
+- scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
++ scsi_mq_uninit_cmd(cmd);
+ rq->rq_flags &= ~RQF_DONTPREP;
+ }
+ }
+--
+2.39.5
+
--- /dev/null
+From 7a56f9cc706faff777a8d20ddfbb764c7b4d5a10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Apr 2025 19:13:20 -0500
+Subject: scsi: ufs: mcq: Add NULL check in ufshcd_mcq_abort()
+
+From: Chenyuan Yang <chenyuan0y@gmail.com>
+
+[ Upstream commit 4c324085062919d4e21c69e5e78456dcec0052fe ]
+
+A race can occur between the MCQ completion path and the abort handler:
+once a request completes, __blk_mq_free_request() sets rq->mq_hctx to
+NULL, meaning the subsequent ufshcd_mcq_req_to_hwq() call in
+ufshcd_mcq_abort() can return a NULL pointer. If this NULL pointer is
+dereferenced, the kernel will crash.
+
+Add a NULL check for the returned hwq pointer. If hwq is NULL, log an
+error and return FAILED, preventing a potential NULL-pointer
+dereference. As suggested by Bart, the ufshcd_cmd_inflight() check is
+removed.
+
+This is similar to the fix in commit 74736103fb41 ("scsi: ufs: core: Fix
+ufshcd_abort_one racing issue").
+
+This is found by our static analysis tool KNighter.
+
+Signed-off-by: Chenyuan Yang <chenyuan0y@gmail.com>
+Link: https://lore.kernel.org/r/20250410001320.2219341-1-chenyuan0y@gmail.com
+Fixes: f1304d442077 ("scsi: ufs: mcq: Added ufshcd_mcq_abort()")
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Peter Wang <peter.wang@mediatek.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufs-mcq.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index da8c1734d3335..411109a5ebbff 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -632,13 +632,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ unsigned long flags;
+ int err;
+
+- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+- dev_err(hba->dev,
+- "%s: skip abort. cmd at tag %d already completed.\n",
+- __func__, tag);
+- return FAILED;
+- }
+-
+ /* Skip task abort in case previous aborts failed and report failure */
+ if (lrbp->req_abort_skip) {
+ dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
+@@ -647,6 +640,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ }
+
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
++ if (!hwq) {
++ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
++ __func__, tag);
++ return FAILED;
++ }
+
+ if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
+ /*
+--
+2.39.5
+
sched-topology-consolidate-and-clean-up-access-to-a-.patch
sched-cpufreq-rework-schedutil-governor-performance-.patch
cpufreq-sched-explicitly-synchronize-limits_changed-.patch
+ceph-fix-incorrect-flush-end-position-calculation.patch
+dma-contiguous-avoid-warning-about-unused-size_bytes.patch
+cpufreq-apple-soc-fix-null-ptr-deref-in-apple_soc_cp.patch
+cpufreq-scmi-fix-null-ptr-deref-in-scmi_cpufreq_get_.patch
+cpufreq-scpi-fix-null-ptr-deref-in-scpi_cpufreq_get_.patch
+scsi-ufs-mcq-add-null-check-in-ufshcd_mcq_abort.patch
+cpufreq-cppc-fix-invalid-return-value-in-.get-callba.patch
+btrfs-avoid-page_lockend-underflow-in-btrfs_punch_ho.patch
+scsi-core-clear-flags-for-scsi_cmnd-that-did-not-com.patch
+net-lwtunnel-disable-bhs-when-required.patch
+net-phy-leds-fix-memory-leak.patch
+tipc-fix-null-pointer-dereference-in-tipc_mon_reinit.patch
+net-ethernet-mtk_eth_soc-net-revise-netsysv3-hardwar.patch
+fix-a-couple-of-races-in-mnt_tree_beneath-handling-b.patch
+net_sched-hfsc-fix-a-uaf-vulnerability-in-class-hand.patch
+net_sched-hfsc-fix-a-potential-uaf-in-hfsc_dequeue-t.patch
+net-dsa-mt7530-sync-driver-specific-behavior-of-mt75.patch
+pds_core-handle-unsupported-pds_core_cmd_fw_control-.patch
+pds_core-remove-unnecessary-check-in-pds_client_admi.patch
+pds_core-make-wait_context-part-of-q_info.patch
+iommu-amd-return-an-error-if-vcpu-affinity-is-set-fo.patch
+riscv-uprobes-add-missing-fence.i-after-building-the.patch
+splice-remove-duplicate-noinline-from-pipe_clear_now.patch
+perf-x86-fix-non-sampling-counting-events-on-certain.patch
+loongarch-select-arch_use_memtest.patch
+loongarch-make-regs_irqs_disabled-more-clear.patch
+loongarch-make-do_xyz-exception-handlers-more-robust.patch
--- /dev/null
+From fb79fed0011d576818e6d31d6ff457d051a86c9f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Apr 2025 18:00:23 +0000
+Subject: splice: remove duplicate noinline from pipe_clear_nowait
+
+From: T.J. Mercier <tjmercier@google.com>
+
+[ Upstream commit e6f141b332ddd9007756751b6afd24f799488fd8 ]
+
+pipe_clear_nowait has two noinline macros, but we only need one.
+
+I checked the whole tree, and this is the only occurrence:
+
+$ grep -r "noinline .* noinline"
+fs/splice.c:static noinline void noinline pipe_clear_nowait(struct file *file)
+$
+
+Fixes: 0f99fc513ddd ("splice: clear FMODE_NOWAIT on file if splice/vmsplice is used")
+Signed-off-by: "T.J. Mercier" <tjmercier@google.com>
+Link: https://lore.kernel.org/20250423180025.2627670-1-tjmercier@google.com
+Reviewed-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/splice.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/splice.c b/fs/splice.c
+index d983d375ff113..6f9b06bbb860a 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -45,7 +45,7 @@
+ * here if set to avoid blocking other users of this pipe if splice is
+ * being done on it.
+ */
+-static noinline void noinline pipe_clear_nowait(struct file *file)
++static noinline void pipe_clear_nowait(struct file *file)
+ {
+ fmode_t fmode = READ_ONCE(file->f_mode);
+
+--
+2.39.5
+
--- /dev/null
+From 488144efb7a928eee4e28b12327c0f2166c87f8c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Apr 2025 14:47:15 +0700
+Subject: tipc: fix NULL pointer dereference in tipc_mon_reinit_self()
+
+From: Tung Nguyen <tung.quang.nguyen@est.tech>
+
+[ Upstream commit d63527e109e811ef11abb1c2985048fdb528b4cb ]
+
+syzbot reported:
+
+tipc: Node number set to 1055423674
+Oops: general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] SMP KASAN NOPTI
+KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
+CPU: 3 UID: 0 PID: 6017 Comm: kworker/3:5 Not tainted 6.15.0-rc1-syzkaller-00246-g900241a5cc15 #0 PREEMPT(full)
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014
+Workqueue: events tipc_net_finalize_work
+RIP: 0010:tipc_mon_reinit_self+0x11c/0x210 net/tipc/monitor.c:719
+...
+RSP: 0018:ffffc9000356fb68 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: 000000003ee87cba
+RDX: 0000000000000000 RSI: ffffffff8dbc56a7 RDI: ffff88804c2cc010
+RBP: dffffc0000000000 R08: 0000000000000001 R09: 0000000000000000
+R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000007
+R13: fffffbfff2111097 R14: ffff88804ead8000 R15: ffff88804ead9010
+FS: 0000000000000000(0000) GS:ffff888097ab9000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00000000f720eb00 CR3: 000000000e182000 CR4: 0000000000352ef0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ tipc_net_finalize+0x10b/0x180 net/tipc/net.c:140
+ process_one_work+0x9cc/0x1b70 kernel/workqueue.c:3238
+ process_scheduled_works kernel/workqueue.c:3319 [inline]
+ worker_thread+0x6c8/0xf10 kernel/workqueue.c:3400
+ kthread+0x3c2/0x780 kernel/kthread.c:464
+ ret_from_fork+0x45/0x80 arch/x86/kernel/process.c:153
+ ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:245
+ </TASK>
+...
+RIP: 0010:tipc_mon_reinit_self+0x11c/0x210 net/tipc/monitor.c:719
+...
+RSP: 0018:ffffc9000356fb68 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: 000000003ee87cba
+RDX: 0000000000000000 RSI: ffffffff8dbc56a7 RDI: ffff88804c2cc010
+RBP: dffffc0000000000 R08: 0000000000000001 R09: 0000000000000000
+R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000007
+R13: fffffbfff2111097 R14: ffff88804ead8000 R15: ffff88804ead9010
+FS: 0000000000000000(0000) GS:ffff888097ab9000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00000000f720eb00 CR3: 000000000e182000 CR4: 0000000000352ef0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+
+There is a racing condition between workqueue created when enabling
+bearer and another thread created when disabling bearer right after
+that as follow:
+
+enabling_bearer | disabling_bearer
+--------------- | ----------------
+tipc_disc_timeout() |
+{ | bearer_disable()
+ ... | {
+ schedule_work(&tn->work); | tipc_mon_delete()
+ ... | {
+} | ...
+ | write_lock_bh(&mon->lock);
+ | mon->self = NULL;
+ | write_unlock_bh(&mon->lock);
+ | ...
+ | }
+tipc_net_finalize_work() | }
+{ |
+ ... |
+ tipc_net_finalize() |
+ { |
+ ... |
+ tipc_mon_reinit_self() |
+ { |
+ ... |
+ write_lock_bh(&mon->lock); |
+ mon->self->addr = tipc_own_addr(net); |
+ write_unlock_bh(&mon->lock); |
+ ... |
+ } |
+ ... |
+ } |
+ ... |
+} |
+
+'mon->self' is set to NULL in disabling_bearer thread and dereferenced
+later in enabling_bearer thread.
+
+This commit fixes this issue by validating 'mon->self' before assigning
+node address to it.
+
+Reported-by: syzbot+ed60da8d686dc709164c@syzkaller.appspotmail.com
+Fixes: 46cb01eeeb86 ("tipc: update mon's self addr when node addr generated")
+Signed-off-by: Tung Nguyen <tung.quang.nguyen@est.tech>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250417074826.578115-1-tung.quang.nguyen@est.tech
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/monitor.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
+index 77a3d016cadec..ddc3e4e5e18d7 100644
+--- a/net/tipc/monitor.c
++++ b/net/tipc/monitor.c
+@@ -716,7 +716,8 @@ void tipc_mon_reinit_self(struct net *net)
+ if (!mon)
+ continue;
+ write_lock_bh(&mon->lock);
+- mon->self->addr = tipc_own_addr(net);
++ if (mon->self)
++ mon->self->addr = tipc_own_addr(net);
+ write_unlock_bh(&mon->lock);
+ }
+ }
+--
+2.39.5
+