]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop sched-deadline-check-bandwidth-overflow-earlier-for-.patch
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 14 Feb 2025 13:33:00 +0000 (14:33 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 14 Feb 2025 13:33:00 +0000 (14:33 +0100)
queue-6.12/sched-deadline-check-bandwidth-overflow-earlier-for-.patch [deleted file]
queue-6.12/series
queue-6.13/sched-deadline-check-bandwidth-overflow-earlier-for-.patch [deleted file]
queue-6.13/series

diff --git a/queue-6.12/sched-deadline-check-bandwidth-overflow-earlier-for-.patch b/queue-6.12/sched-deadline-check-bandwidth-overflow-earlier-for-.patch
deleted file mode 100644 (file)
index 1ab8a09..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-From aee0c37052bd5c9a3ecc8a188015b0c2ff999de8 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 15 Nov 2024 11:48:29 +0000
-Subject: sched/deadline: Check bandwidth overflow earlier for hotplug
-
-From: Juri Lelli <juri.lelli@redhat.com>
-
-[ Upstream commit 53916d5fd3c0b658de3463439dd2b7ce765072cb ]
-
-Currently we check for bandwidth overflow potentially due to hotplug
-operations at the end of sched_cpu_deactivate(), after the cpu going
-offline has already been removed from scheduling, active_mask, etc.
-This can create issues for DEADLINE tasks, as there is a substantial
-race window between the start of sched_cpu_deactivate() and the moment
-we possibly decide to roll-back the operation if dl_bw_deactivate()
-returns failure in cpuset_cpu_inactive(). An example is a throttled
-task that sees its replenishment timer firing while the cpu it was
-previously running on is considered offline, but before
-dl_bw_deactivate() had a chance to say no and roll-back happened.
-
-Fix this by directly calling dl_bw_deactivate() first thing in
-sched_cpu_deactivate() and do the required calculation in the former
-function considering the cpu passed as an argument as offline already.
-
-By doing so we also simplify sched_cpu_deactivate(), as there is no need
-anymore for any kind of roll-back if we fail early.
-
-Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Reviewed-by: Phil Auld <pauld@redhat.com>
-Tested-by: Waiman Long <longman@redhat.com>
-Link: https://lore.kernel.org/r/Zzc1DfPhbvqDDIJR@jlelli-thinkpadt14gen4.remote.csb
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- kernel/sched/core.c     | 22 +++++++---------------
- kernel/sched/deadline.c | 12 ++++++++++--
- 2 files changed, 17 insertions(+), 17 deletions(-)
-
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index e2756138609c1..4b455e491ea48 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -8091,19 +8091,14 @@ static void cpuset_cpu_active(void)
-       cpuset_update_active_cpus();
- }
--static int cpuset_cpu_inactive(unsigned int cpu)
-+static void cpuset_cpu_inactive(unsigned int cpu)
- {
-       if (!cpuhp_tasks_frozen) {
--              int ret = dl_bw_deactivate(cpu);
--
--              if (ret)
--                      return ret;
-               cpuset_update_active_cpus();
-       } else {
-               num_cpus_frozen++;
-               partition_sched_domains(1, NULL, NULL);
-       }
--      return 0;
- }
- static inline void sched_smt_present_inc(int cpu)
-@@ -8165,6 +8160,11 @@ int sched_cpu_deactivate(unsigned int cpu)
-       struct rq *rq = cpu_rq(cpu);
-       int ret;
-+      ret = dl_bw_deactivate(cpu);
-+
-+      if (ret)
-+              return ret;
-+
-       /*
-        * Remove CPU from nohz.idle_cpus_mask to prevent participating in
-        * load balancing when not active
-@@ -8210,15 +8210,7 @@ int sched_cpu_deactivate(unsigned int cpu)
-               return 0;
-       sched_update_numa(cpu, false);
--      ret = cpuset_cpu_inactive(cpu);
--      if (ret) {
--              sched_smt_present_inc(cpu);
--              sched_set_rq_online(rq, cpu);
--              balance_push_set(cpu, false);
--              set_cpu_active(cpu, true);
--              sched_update_numa(cpu, true);
--              return ret;
--      }
-+      cpuset_cpu_inactive(cpu);
-       sched_domains_numa_masks_clear(cpu);
-       return 0;
- }
-diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index 36fd501ad9958..ec2b66a7aae0e 100644
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -3499,6 +3499,13 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
-               }
-               break;
-       case dl_bw_req_deactivate:
-+              /*
-+               * cpu is not off yet, but we need to do the math by
-+               * considering it off already (i.e., what would happen if we
-+               * turn cpu off?).
-+               */
-+              cap -= arch_scale_cpu_capacity(cpu);
-+
-               /*
-                * cpu is going offline and NORMAL tasks will be moved away
-                * from it. We can thus discount dl_server bandwidth
-@@ -3516,9 +3523,10 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
-               if (dl_b->total_bw - fair_server_bw > 0) {
-                       /*
-                        * Leaving at least one CPU for DEADLINE tasks seems a
--                       * wise thing to do.
-+                       * wise thing to do. As said above, cpu is not offline
-+                       * yet, so account for that.
-                        */
--                      if (dl_bw_cpus(cpu))
-+                      if (dl_bw_cpus(cpu) - 1)
-                               overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0);
-                       else
-                               overflow = 1;
--- 
-2.39.5
-
index 2fff838c89b66e5ccbca1354a1ddaff5cf0486d6..0827df6746641acd3e823d8e163845bd1841ca1c 100644 (file)
@@ -9,7 +9,6 @@ btrfs-fix-data-race-when-accessing-the-inode-s-disk_.patch
 btrfs-convert-bug_on-in-btrfs_reloc_cow_block-to-pro.patch
 sched-don-t-try-to-catch-up-excess-steal-time.patch
 sched-deadline-correctly-account-for-allocated-bandw.patch
-sched-deadline-check-bandwidth-overflow-earlier-for-.patch
 x86-convert-unreachable-to-bug.patch
 locking-ww_mutex-test-use-swap-macro.patch
 lockdep-fix-upper-limit-for-lockdep_-_bits-configs.patch
diff --git a/queue-6.13/sched-deadline-check-bandwidth-overflow-earlier-for-.patch b/queue-6.13/sched-deadline-check-bandwidth-overflow-earlier-for-.patch
deleted file mode 100644 (file)
index 336a0d5..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-From d902de6614ea6cc8b786caf2d2a2d0ae7955449d Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 15 Nov 2024 11:48:29 +0000
-Subject: sched/deadline: Check bandwidth overflow earlier for hotplug
-
-From: Juri Lelli <juri.lelli@redhat.com>
-
-[ Upstream commit 53916d5fd3c0b658de3463439dd2b7ce765072cb ]
-
-Currently we check for bandwidth overflow potentially due to hotplug
-operations at the end of sched_cpu_deactivate(), after the cpu going
-offline has already been removed from scheduling, active_mask, etc.
-This can create issues for DEADLINE tasks, as there is a substantial
-race window between the start of sched_cpu_deactivate() and the moment
-we possibly decide to roll-back the operation if dl_bw_deactivate()
-returns failure in cpuset_cpu_inactive(). An example is a throttled
-task that sees its replenishment timer firing while the cpu it was
-previously running on is considered offline, but before
-dl_bw_deactivate() had a chance to say no and roll-back happened.
-
-Fix this by directly calling dl_bw_deactivate() first thing in
-sched_cpu_deactivate() and do the required calculation in the former
-function considering the cpu passed as an argument as offline already.
-
-By doing so we also simplify sched_cpu_deactivate(), as there is no need
-anymore for any kind of roll-back if we fail early.
-
-Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Reviewed-by: Phil Auld <pauld@redhat.com>
-Tested-by: Waiman Long <longman@redhat.com>
-Link: https://lore.kernel.org/r/Zzc1DfPhbvqDDIJR@jlelli-thinkpadt14gen4.remote.csb
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- kernel/sched/core.c     | 22 +++++++---------------
- kernel/sched/deadline.c | 12 ++++++++++--
- 2 files changed, 17 insertions(+), 17 deletions(-)
-
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 141bbe97d7e5f..a4fc6d357e08a 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -8182,19 +8182,14 @@ static void cpuset_cpu_active(void)
-       cpuset_update_active_cpus();
- }
--static int cpuset_cpu_inactive(unsigned int cpu)
-+static void cpuset_cpu_inactive(unsigned int cpu)
- {
-       if (!cpuhp_tasks_frozen) {
--              int ret = dl_bw_deactivate(cpu);
--
--              if (ret)
--                      return ret;
-               cpuset_update_active_cpus();
-       } else {
-               num_cpus_frozen++;
-               partition_sched_domains(1, NULL, NULL);
-       }
--      return 0;
- }
- static inline void sched_smt_present_inc(int cpu)
-@@ -8256,6 +8251,11 @@ int sched_cpu_deactivate(unsigned int cpu)
-       struct rq *rq = cpu_rq(cpu);
-       int ret;
-+      ret = dl_bw_deactivate(cpu);
-+
-+      if (ret)
-+              return ret;
-+
-       /*
-        * Remove CPU from nohz.idle_cpus_mask to prevent participating in
-        * load balancing when not active
-@@ -8301,15 +8301,7 @@ int sched_cpu_deactivate(unsigned int cpu)
-               return 0;
-       sched_update_numa(cpu, false);
--      ret = cpuset_cpu_inactive(cpu);
--      if (ret) {
--              sched_smt_present_inc(cpu);
--              sched_set_rq_online(rq, cpu);
--              balance_push_set(cpu, false);
--              set_cpu_active(cpu, true);
--              sched_update_numa(cpu, true);
--              return ret;
--      }
-+      cpuset_cpu_inactive(cpu);
-       sched_domains_numa_masks_clear(cpu);
-       return 0;
- }
-diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index b078014273d9e..b6781ddea7650 100644
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -3488,6 +3488,13 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
-               }
-               break;
-       case dl_bw_req_deactivate:
-+              /*
-+               * cpu is not off yet, but we need to do the math by
-+               * considering it off already (i.e., what would happen if we
-+               * turn cpu off?).
-+               */
-+              cap -= arch_scale_cpu_capacity(cpu);
-+
-               /*
-                * cpu is going offline and NORMAL tasks will be moved away
-                * from it. We can thus discount dl_server bandwidth
-@@ -3505,9 +3512,10 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
-               if (dl_b->total_bw - fair_server_bw > 0) {
-                       /*
-                        * Leaving at least one CPU for DEADLINE tasks seems a
--                       * wise thing to do.
-+                       * wise thing to do. As said above, cpu is not offline
-+                       * yet, so account for that.
-                        */
--                      if (dl_bw_cpus(cpu))
-+                      if (dl_bw_cpus(cpu) - 1)
-                               overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0);
-                       else
-                               overflow = 1;
--- 
-2.39.5
-
index 071f9554c9cf8ae6353f7d017b3e4d82e1b6b771..93a6bd3bb774e753e606c60b33dccf0a52d19aa7 100644 (file)
@@ -11,7 +11,6 @@ btrfs-convert-bug_on-in-btrfs_reloc_cow_block-to-pro.patch
 btrfs-don-t-use-btrfs_set_item_key_safe-on-raid-stri.patch
 sched-don-t-try-to-catch-up-excess-steal-time.patch
 sched-deadline-correctly-account-for-allocated-bandw.patch
-sched-deadline-check-bandwidth-overflow-earlier-for-.patch
 x86-convert-unreachable-to-bug.patch
 locking-ww_mutex-test-use-swap-macro.patch
 lockdep-fix-upper-limit-for-lockdep_-_bits-configs.patch