]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Jul 2025 14:09:36 +0000 (16:09 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Jul 2025 14:09:36 +0000 (16:09 +0200)
added patches:
sched-change-nr_uninterruptible-type-to-unsigned-long.patch

queue-6.15/sched-change-nr_uninterruptible-type-to-unsigned-long.patch [new file with mode: 0644]

diff --git a/queue-6.15/sched-change-nr_uninterruptible-type-to-unsigned-long.patch b/queue-6.15/sched-change-nr_uninterruptible-type-to-unsigned-long.patch
new file mode 100644 (file)
index 0000000..350cccd
--- /dev/null
@@ -0,0 +1,54 @@
+From 36569780b0d64de283f9d6c2195fd1a43e221ee8 Mon Sep 17 00:00:00 2001
+From: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+Date: Wed, 9 Jul 2025 17:33:28 +0000
+Subject: sched: Change nr_uninterruptible type to unsigned long
+
+From: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+
+commit 36569780b0d64de283f9d6c2195fd1a43e221ee8 upstream.
+
+The commit e6fe3f422be1 ("sched: Make multiple runqueue task counters
+32-bit") changed nr_uninterruptible to an unsigned int. But the
+nr_uninterruptible values for each of the CPU runqueues can grow to
+large numbers, sometimes exceeding INT_MAX. This is valid, if, over
+time, a large number of tasks are migrated off of one CPU after going
+into an uninterruptible state. Only the sum of all nr_interruptible
+values across all CPUs yields the correct result, as explained in a
+comment in kernel/sched/loadavg.c.
+
+Change the type of nr_uninterruptible back to unsigned long to prevent
+overflows, and thus the miscalculation of load average.
+
+Fixes: e6fe3f422be1 ("sched: Make multiple runqueue task counters 32-bit")
+
+Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20250709173328.606794-1-aruna.ramakrishna@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/loadavg.c |    2 +-
+ kernel/sched/sched.h   |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/loadavg.c
++++ b/kernel/sched/loadavg.c
+@@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *th
+       long nr_active, delta = 0;
+       nr_active = this_rq->nr_running - adjust;
+-      nr_active += (int)this_rq->nr_uninterruptible;
++      nr_active += (long)this_rq->nr_uninterruptible;
+       if (nr_active != this_rq->calc_load_active) {
+               delta = nr_active - this_rq->calc_load_active;
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1147,7 +1147,7 @@ struct rq {
+        * one CPU and if it got migrated afterwards it may decrease
+        * it on another CPU. Always updated under the runqueue lock:
+        */
+-      unsigned int            nr_uninterruptible;
++      unsigned long           nr_uninterruptible;
+       union {
+               struct task_struct __rcu *donor; /* Scheduler context */