]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Jul 2025 14:09:19 +0000 (16:09 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Jul 2025 14:09:19 +0000 (16:09 +0200)
added patches:
arm64-filter-out-sme-hwcaps-when-feat_sme-isn-t-implemented.patch
clone_private_mnt-make-sure-that-caller-has-cap_sys_admin-in-the-right-userns.patch
ipv6-make-addrconf_wq-single-threaded.patch
sched-change-nr_uninterruptible-type-to-unsigned-long.patch

queue-6.6/arm64-filter-out-sme-hwcaps-when-feat_sme-isn-t-implemented.patch [new file with mode: 0644]
queue-6.6/clone_private_mnt-make-sure-that-caller-has-cap_sys_admin-in-the-right-userns.patch [new file with mode: 0644]
queue-6.6/ipv6-make-addrconf_wq-single-threaded.patch [new file with mode: 0644]
queue-6.6/sched-change-nr_uninterruptible-type-to-unsigned-long.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-6.6/arm64-filter-out-sme-hwcaps-when-feat_sme-isn-t-implemented.patch b/queue-6.6/arm64-filter-out-sme-hwcaps-when-feat_sme-isn-t-implemented.patch
new file mode 100644 (file)
index 0000000..b86a3dd
--- /dev/null
@@ -0,0 +1,85 @@
+From a75ad2fc76a2ab70817c7eed3163b66ea84ca6ac Mon Sep 17 00:00:00 2001
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 20 Jun 2025 12:28:48 +0100
+Subject: arm64: Filter out SME hwcaps when FEAT_SME isn't implemented
+
+From: Mark Brown <broonie@kernel.org>
+
+commit a75ad2fc76a2ab70817c7eed3163b66ea84ca6ac upstream.
+
+We have a number of hwcaps for various SME subfeatures enumerated via
+ID_AA64SMFR0_EL1. Currently we advertise these without cross checking
+against the main SME feature, advertised in ID_AA64PFR1_EL1.SME which
+means that if the two are out of sync userspace can see a confusing
+situation where SME subfeatures are advertised without the base SME
+hwcap. This can be readily triggered by using the arm64.nosme override
+which only masks out ID_AA64PFR1_EL1.SME, and there have also been
+reports of VMMs which do the same thing.
+
+Fix this as we did previously for SVE in 064737920bdb ("arm64: Filter
+out SVE hwcaps when FEAT_SVE isn't implemented") by filtering out the
+SME subfeature hwcaps when FEAT_SME is not present.
+
+Fixes: 5e64b862c482 ("arm64/sme: Basic enumeration support")
+Reported-by: Yury Khrustalev <yury.khrustalev@arm.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250620-arm64-sme-filter-hwcaps-v1-1-02b9d3c2d8ef@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   35 +++++++++++++++++++++--------------
+ 1 file changed, 21 insertions(+), 14 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2804,6 +2804,13 @@ static bool has_sve_feature(const struct
+ }
+ #endif
++#ifdef CONFIG_ARM64_SME
++static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope)
++{
++      return system_supports_sme() && has_user_cpuid_feature(cap, scope);
++}
++#endif
++
+ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+       HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+       HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
+@@ -2875,20 +2882,20 @@ static const struct arm64_cpu_capabiliti
+       HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS),
+       HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
+ #ifdef CONFIG_ARM64_SME
+-      HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
+-      HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
++      HWCAP_CAP(ID_MATCH_ID(has_sme_feature, AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
++      HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
+ #endif /* CONFIG_ARM64_SME */
+       {},
+ };
diff --git a/queue-6.6/clone_private_mnt-make-sure-that-caller-has-cap_sys_admin-in-the-right-userns.patch b/queue-6.6/clone_private_mnt-make-sure-that-caller-has-cap_sys_admin-in-the-right-userns.patch
new file mode 100644 (file)
index 0000000..7cf51b0
--- /dev/null
@@ -0,0 +1,48 @@
+From c28f922c9dcee0e4876a2c095939d77fe7e15116 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sun, 1 Jun 2025 20:11:06 -0400
+Subject: clone_private_mnt(): make sure that caller has CAP_SYS_ADMIN in the right userns
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit c28f922c9dcee0e4876a2c095939d77fe7e15116 upstream.
+
+What we want is to verify there is that clone won't expose something
+hidden by a mount we wouldn't be able to undo.  "Wouldn't be able to undo"
+may be a result of MNT_LOCKED on a child, but it may also come from
+lacking admin rights in the userns of the namespace mount belongs to.
+
+clone_private_mnt() checks the former, but not the latter.
+
+There's a number of rather confusing CAP_SYS_ADMIN checks in various
+userns during the mount, especially with the new mount API; they serve
+different purposes and in case of clone_private_mnt() they usually,
+but not always end up covering the missing check mentioned above.
+
+Reviewed-by: Christian Brauner <brauner@kernel.org>
+Reported-by: "Orlando, Noah" <Noah.Orlando@deshaw.com>
+Fixes: 427215d85e8d ("ovl: prevent private clone if bind mount is not allowed")
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+[ merge conflict resolution: clone_private_mount() was reworked in
+  db04662e2f4f ("fs: allow detached mounts in clone_private_mount()").
+  Tweak the relevant ns_capable check so that it works on older kernels ]
+Signed-off-by: Noah Orlando <Noah.Orlando@deshaw.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/namespace.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2110,6 +2110,11 @@ struct vfsmount *clone_private_mount(con
+       if (!check_mnt(old_mnt))
+               goto invalid;
++      if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN)) {
++              up_read(&namespace_sem);
++              return ERR_PTR(-EPERM);
++      }
++
+       if (has_locked_children(old_mnt, path->dentry))
+               goto invalid;
diff --git a/queue-6.6/ipv6-make-addrconf_wq-single-threaded.patch b/queue-6.6/ipv6-make-addrconf_wq-single-threaded.patch
new file mode 100644 (file)
index 0000000..9934ed4
--- /dev/null
@@ -0,0 +1,34 @@
+From dfd2ee086a63c730022cb095576a8b3a5a752109 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 1 Feb 2024 17:30:31 +0000
+Subject: ipv6: make addrconf_wq single threaded
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit dfd2ee086a63c730022cb095576a8b3a5a752109 upstream.
+
+Both addrconf_verify_work() and addrconf_dad_work() acquire rtnl,
+there is no point trying to have one thread per cpu.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://lore.kernel.org/r/20240201173031.3654257-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Brett A C Sheffield <bacs@librecast.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/addrconf.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -7383,7 +7383,8 @@ int __init addrconf_init(void)
+       if (err < 0)
+               goto out_addrlabel;
+-      addrconf_wq = create_workqueue("ipv6_addrconf");
++      /* All works using addrconf_wq need to lock rtnl. */
++      addrconf_wq = create_singlethread_workqueue("ipv6_addrconf");
+       if (!addrconf_wq) {
+               err = -ENOMEM;
+               goto out_nowq;
diff --git a/queue-6.6/sched-change-nr_uninterruptible-type-to-unsigned-long.patch b/queue-6.6/sched-change-nr_uninterruptible-type-to-unsigned-long.patch
new file mode 100644 (file)
index 0000000..9c3de94
--- /dev/null
@@ -0,0 +1,54 @@
+From 36569780b0d64de283f9d6c2195fd1a43e221ee8 Mon Sep 17 00:00:00 2001
+From: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+Date: Wed, 9 Jul 2025 17:33:28 +0000
+Subject: sched: Change nr_uninterruptible type to unsigned long
+
+From: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+
+commit 36569780b0d64de283f9d6c2195fd1a43e221ee8 upstream.
+
+The commit e6fe3f422be1 ("sched: Make multiple runqueue task counters
+32-bit") changed nr_uninterruptible to an unsigned int. But the
+nr_uninterruptible values for each of the CPU runqueues can grow to
+large numbers, sometimes exceeding INT_MAX. This is valid, if, over
+time, a large number of tasks are migrated off of one CPU after going
+into an uninterruptible state. Only the sum of all nr_interruptible
+values across all CPUs yields the correct result, as explained in a
+comment in kernel/sched/loadavg.c.
+
+Change the type of nr_uninterruptible back to unsigned long to prevent
+overflows, and thus the miscalculation of load average.
+
+Fixes: e6fe3f422be1 ("sched: Make multiple runqueue task counters 32-bit")
+
+Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20250709173328.606794-1-aruna.ramakrishna@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/loadavg.c |    2 +-
+ kernel/sched/sched.h   |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/loadavg.c
++++ b/kernel/sched/loadavg.c
+@@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *th
+       long nr_active, delta = 0;
+       nr_active = this_rq->nr_running - adjust;
+-      nr_active += (int)this_rq->nr_uninterruptible;
++      nr_active += (long)this_rq->nr_uninterruptible;
+       if (nr_active != this_rq->calc_load_active) {
+               delta = nr_active - this_rq->calc_load_active;
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1011,7 +1011,7 @@ struct rq {
+        * one CPU and if it got migrated afterwards it may decrease
+        * it on another CPU. Always updated under the runqueue lock:
+        */
+-      unsigned int            nr_uninterruptible;
++      unsigned long           nr_uninterruptible;
+       struct task_struct __rcu        *curr;
+       struct task_struct      *idle;
index a02cc796e2079aae11dcde680c2796a6724dc4ba..36370219d7a9ad5f0fe8ebb798b8887a397dd4b9 100644 (file)
@@ -91,3 +91,6 @@ rxrpc-fix-recv-recv-race-of-completed-call.patch
 rxrpc-fix-transmission-of-an-abort-in-response-to-an.patch
 revert-cgroup_freezer-cgroup_freezing-check-if-not-f.patch
 sched-change-nr_uninterruptible-type-to-unsigned-long.patch
+ipv6-make-addrconf_wq-single-threaded.patch
+clone_private_mnt-make-sure-that-caller-has-cap_sys_admin-in-the-right-userns.patch
+arm64-filter-out-sme-hwcaps-when-feat_sme-isn-t-implemented.patch