From 978d5f9509ba147ad79966db0d1c2e010b3eb189 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 21 Jul 2025 16:09:36 +0200 Subject: [PATCH] 6.15-stable patches added patches: sched-change-nr_uninterruptible-type-to-unsigned-long.patch --- ...ninterruptible-type-to-unsigned-long.patch | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 queue-6.15/sched-change-nr_uninterruptible-type-to-unsigned-long.patch diff --git a/queue-6.15/sched-change-nr_uninterruptible-type-to-unsigned-long.patch b/queue-6.15/sched-change-nr_uninterruptible-type-to-unsigned-long.patch new file mode 100644 index 0000000000..350cccd465 --- /dev/null +++ b/queue-6.15/sched-change-nr_uninterruptible-type-to-unsigned-long.patch @@ -0,0 +1,54 @@ +From 36569780b0d64de283f9d6c2195fd1a43e221ee8 Mon Sep 17 00:00:00 2001 +From: Aruna Ramakrishna +Date: Wed, 9 Jul 2025 17:33:28 +0000 +Subject: sched: Change nr_uninterruptible type to unsigned long + +From: Aruna Ramakrishna + +commit 36569780b0d64de283f9d6c2195fd1a43e221ee8 upstream. + +The commit e6fe3f422be1 ("sched: Make multiple runqueue task counters +32-bit") changed nr_uninterruptible to an unsigned int. But the +nr_uninterruptible values for each of the CPU runqueues can grow to +large numbers, sometimes exceeding INT_MAX. This is valid, if, over +time, a large number of tasks are migrated off of one CPU after going +into an uninterruptible state. Only the sum of all nr_interruptible +values across all CPUs yields the correct result, as explained in a +comment in kernel/sched/loadavg.c. + +Change the type of nr_uninterruptible back to unsigned long to prevent +overflows, and thus the miscalculation of load average. + +Fixes: e6fe3f422be1 ("sched: Make multiple runqueue task counters 32-bit") + +Signed-off-by: Aruna Ramakrishna +Signed-off-by: Peter Zijlstra (Intel) +Link: https://lkml.kernel.org/r/20250709173328.606794-1-aruna.ramakrishna@oracle.com +Signed-off-by: Greg Kroah-Hartman +--- + kernel/sched/loadavg.c | 2 +- + kernel/sched/sched.h | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +--- a/kernel/sched/loadavg.c ++++ b/kernel/sched/loadavg.c +@@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *th + long nr_active, delta = 0; + + nr_active = this_rq->nr_running - adjust; +- nr_active += (int)this_rq->nr_uninterruptible; ++ nr_active += (long)this_rq->nr_uninterruptible; + + if (nr_active != this_rq->calc_load_active) { + delta = nr_active - this_rq->calc_load_active; +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1147,7 +1147,7 @@ struct rq { + * one CPU and if it got migrated afterwards it may decrease + * it on another CPU. Always updated under the runqueue lock: + */ +- unsigned int nr_uninterruptible; ++ unsigned long nr_uninterruptible; + + union { + struct task_struct __rcu *donor; /* Scheduler context */ -- 2.47.2