]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop sched-deadline-correctly-account-for-allocated-bandw.patch
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 15 Feb 2025 07:49:42 +0000 (08:49 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 15 Feb 2025 07:49:42 +0000 (08:49 +0100)
queue-6.12/sched-deadline-correctly-account-for-allocated-bandw.patch [deleted file]
queue-6.12/series
queue-6.13/sched-deadline-correctly-account-for-allocated-bandw.patch [deleted file]
queue-6.13/series

diff --git a/queue-6.12/sched-deadline-correctly-account-for-allocated-bandw.patch b/queue-6.12/sched-deadline-correctly-account-for-allocated-bandw.patch
deleted file mode 100644 (file)
index 0aef6f3..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-From 2c3b5ab504c9f0c83f2685cbd94e25b48aca0563 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 14 Nov 2024 14:28:10 +0000
-Subject: sched/deadline: Correctly account for allocated bandwidth during
- hotplug
-
-From: Juri Lelli <juri.lelli@redhat.com>
-
-[ Upstream commit d4742f6ed7ea6df56e381f82ba4532245fa1e561 ]
-
-For hotplug operations, DEADLINE needs to check that there is still enough
-bandwidth left after removing the CPU that is going offline. We however
-fail to do so currently.
-
-Restore the correct behavior by restructuring dl_bw_manage() a bit, so
-that overflow conditions (not enough bandwidth left) are properly
-checked. Also account for dl_server bandwidth, i.e. discount such
-bandwidth in the calculation since NORMAL tasks will be anyway moved
-away from the CPU as a result of the hotplug operation.
-
-Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Reviewed-by: Phil Auld <pauld@redhat.com>
-Tested-by: Waiman Long <longman@redhat.com>
-Link: https://lore.kernel.org/r/20241114142810.794657-3-juri.lelli@redhat.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- kernel/sched/core.c     |  2 +-
- kernel/sched/deadline.c | 48 +++++++++++++++++++++++++++++++++--------
- kernel/sched/sched.h    |  2 +-
- 3 files changed, 41 insertions(+), 11 deletions(-)
-
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 5d67f41d05d40..e2756138609c1 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -8094,7 +8094,7 @@ static void cpuset_cpu_active(void)
- static int cpuset_cpu_inactive(unsigned int cpu)
- {
-       if (!cpuhp_tasks_frozen) {
--              int ret = dl_bw_check_overflow(cpu);
-+              int ret = dl_bw_deactivate(cpu);
-               if (ret)
-                       return ret;
-diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index a17c23b53049c..36fd501ad9958 100644
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -3464,29 +3464,31 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
- }
- enum dl_bw_request {
--      dl_bw_req_check_overflow = 0,
-+      dl_bw_req_deactivate = 0,
-       dl_bw_req_alloc,
-       dl_bw_req_free
- };
- static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
- {
--      unsigned long flags;
-+      unsigned long flags, cap;
-       struct dl_bw *dl_b;
-       bool overflow = 0;
-+      u64 fair_server_bw = 0;
-       rcu_read_lock_sched();
-       dl_b = dl_bw_of(cpu);
-       raw_spin_lock_irqsave(&dl_b->lock, flags);
--      if (req == dl_bw_req_free) {
-+      cap = dl_bw_capacity(cpu);
-+      switch (req) {
-+      case dl_bw_req_free:
-               __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
--      } else {
--              unsigned long cap = dl_bw_capacity(cpu);
--
-+              break;
-+      case dl_bw_req_alloc:
-               overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
--              if (req == dl_bw_req_alloc && !overflow) {
-+              if (!overflow) {
-                       /*
-                        * We reserve space in the destination
-                        * root_domain, as we can't fail after this point.
-@@ -3495,6 +3497,34 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
-                        */
-                       __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
-               }
-+              break;
-+      case dl_bw_req_deactivate:
-+              /*
-+               * cpu is going offline and NORMAL tasks will be moved away
-+               * from it. We can thus discount dl_server bandwidth
-+               * contribution as it won't need to be servicing tasks after
-+               * the cpu is off.
-+               */
-+              if (cpu_rq(cpu)->fair_server.dl_server)
-+                      fair_server_bw = cpu_rq(cpu)->fair_server.dl_bw;
-+
-+              /*
-+               * Not much to check if no DEADLINE bandwidth is present.
-+               * dl_servers we can discount, as tasks will be moved out the
-+               * offlined CPUs anyway.
-+               */
-+              if (dl_b->total_bw - fair_server_bw > 0) {
-+                      /*
-+                       * Leaving at least one CPU for DEADLINE tasks seems a
-+                       * wise thing to do.
-+                       */
-+                      if (dl_bw_cpus(cpu))
-+                              overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0);
-+                      else
-+                              overflow = 1;
-+              }
-+
-+              break;
-       }
-       raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-@@ -3503,9 +3533,9 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
-       return overflow ? -EBUSY : 0;
- }
--int dl_bw_check_overflow(int cpu)
-+int dl_bw_deactivate(int cpu)
- {
--      return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
-+      return dl_bw_manage(dl_bw_req_deactivate, cpu, 0);
- }
- int dl_bw_alloc(int cpu, u64 dl_bw)
-diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index 5426969cf478a..43154dd6934be 100644
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -362,7 +362,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
- extern bool __checkparam_dl(const struct sched_attr *attr);
- extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
- extern int  dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
--extern int  dl_bw_check_overflow(int cpu);
-+extern int  dl_bw_deactivate(int cpu);
- extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
- /*
-  * SCHED_DEADLINE supports servers (nested scheduling) with the following
--- 
-2.39.5
-
index c5f403a14b1bcbeaa5319845e39617b18bf2a1d8..82a42811da85a90b823c91fe54774f028320a4e8 100644 (file)
@@ -8,7 +8,6 @@ s390-stackleak-use-exrl-instead-of-ex-in-__stackleak.patch
 btrfs-fix-data-race-when-accessing-the-inode-s-disk_.patch
 btrfs-convert-bug_on-in-btrfs_reloc_cow_block-to-pro.patch
 sched-don-t-try-to-catch-up-excess-steal-time.patch
-sched-deadline-correctly-account-for-allocated-bandw.patch
 x86-convert-unreachable-to-bug.patch
 locking-ww_mutex-test-use-swap-macro.patch
 lockdep-fix-upper-limit-for-lockdep_-_bits-configs.patch
diff --git a/queue-6.13/sched-deadline-correctly-account-for-allocated-bandw.patch b/queue-6.13/sched-deadline-correctly-account-for-allocated-bandw.patch
deleted file mode 100644 (file)
index c408bdd..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-From 4a75049f87443321e24f1cfc4f168e1d8512898c Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 14 Nov 2024 14:28:10 +0000
-Subject: sched/deadline: Correctly account for allocated bandwidth during
- hotplug
-
-From: Juri Lelli <juri.lelli@redhat.com>
-
-[ Upstream commit d4742f6ed7ea6df56e381f82ba4532245fa1e561 ]
-
-For hotplug operations, DEADLINE needs to check that there is still enough
-bandwidth left after removing the CPU that is going offline. We however
-fail to do so currently.
-
-Restore the correct behavior by restructuring dl_bw_manage() a bit, so
-that overflow conditions (not enough bandwidth left) are properly
-checked. Also account for dl_server bandwidth, i.e. discount such
-bandwidth in the calculation since NORMAL tasks will be anyway moved
-away from the CPU as a result of the hotplug operation.
-
-Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Reviewed-by: Phil Auld <pauld@redhat.com>
-Tested-by: Waiman Long <longman@redhat.com>
-Link: https://lore.kernel.org/r/20241114142810.794657-3-juri.lelli@redhat.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- kernel/sched/core.c     |  2 +-
- kernel/sched/deadline.c | 48 +++++++++++++++++++++++++++++++++--------
- kernel/sched/sched.h    |  2 +-
- 3 files changed, 41 insertions(+), 11 deletions(-)
-
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index ffceb5ff4c5c3..141bbe97d7e5f 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -8185,7 +8185,7 @@ static void cpuset_cpu_active(void)
- static int cpuset_cpu_inactive(unsigned int cpu)
- {
-       if (!cpuhp_tasks_frozen) {
--              int ret = dl_bw_check_overflow(cpu);
-+              int ret = dl_bw_deactivate(cpu);
-               if (ret)
-                       return ret;
-diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index d94f2ed6d1f46..b078014273d9e 100644
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -3453,29 +3453,31 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
- }
- enum dl_bw_request {
--      dl_bw_req_check_overflow = 0,
-+      dl_bw_req_deactivate = 0,
-       dl_bw_req_alloc,
-       dl_bw_req_free
- };
- static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
- {
--      unsigned long flags;
-+      unsigned long flags, cap;
-       struct dl_bw *dl_b;
-       bool overflow = 0;
-+      u64 fair_server_bw = 0;
-       rcu_read_lock_sched();
-       dl_b = dl_bw_of(cpu);
-       raw_spin_lock_irqsave(&dl_b->lock, flags);
--      if (req == dl_bw_req_free) {
-+      cap = dl_bw_capacity(cpu);
-+      switch (req) {
-+      case dl_bw_req_free:
-               __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
--      } else {
--              unsigned long cap = dl_bw_capacity(cpu);
--
-+              break;
-+      case dl_bw_req_alloc:
-               overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
--              if (req == dl_bw_req_alloc && !overflow) {
-+              if (!overflow) {
-                       /*
-                        * We reserve space in the destination
-                        * root_domain, as we can't fail after this point.
-@@ -3484,6 +3486,34 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
-                        */
-                       __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
-               }
-+              break;
-+      case dl_bw_req_deactivate:
-+              /*
-+               * cpu is going offline and NORMAL tasks will be moved away
-+               * from it. We can thus discount dl_server bandwidth
-+               * contribution as it won't need to be servicing tasks after
-+               * the cpu is off.
-+               */
-+              if (cpu_rq(cpu)->fair_server.dl_server)
-+                      fair_server_bw = cpu_rq(cpu)->fair_server.dl_bw;
-+
-+              /*
-+               * Not much to check if no DEADLINE bandwidth is present.
-+               * dl_servers we can discount, as tasks will be moved out the
-+               * offlined CPUs anyway.
-+               */
-+              if (dl_b->total_bw - fair_server_bw > 0) {
-+                      /*
-+                       * Leaving at least one CPU for DEADLINE tasks seems a
-+                       * wise thing to do.
-+                       */
-+                      if (dl_bw_cpus(cpu))
-+                              overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0);
-+                      else
-+                              overflow = 1;
-+              }
-+
-+              break;
-       }
-       raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-@@ -3492,9 +3522,9 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
-       return overflow ? -EBUSY : 0;
- }
--int dl_bw_check_overflow(int cpu)
-+int dl_bw_deactivate(int cpu)
- {
--      return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
-+      return dl_bw_manage(dl_bw_req_deactivate, cpu, 0);
- }
- int dl_bw_alloc(int cpu, u64 dl_bw)
-diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index c5d67a43fe524..96d9bbba94acc 100644
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -362,7 +362,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
- extern bool __checkparam_dl(const struct sched_attr *attr);
- extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
- extern int  dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
--extern int  dl_bw_check_overflow(int cpu);
-+extern int  dl_bw_deactivate(int cpu);
- extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
- /*
-  * SCHED_DEADLINE supports servers (nested scheduling) with the following
--- 
-2.39.5
-
index 93a6bd3bb774e753e606c60b33dccf0a52d19aa7..16683b6f424e35d8eac6bd679ad7266fb619ece7 100644 (file)
@@ -10,7 +10,6 @@ btrfs-fix-data-race-when-accessing-the-inode-s-disk_.patch
 btrfs-convert-bug_on-in-btrfs_reloc_cow_block-to-pro.patch
 btrfs-don-t-use-btrfs_set_item_key_safe-on-raid-stri.patch
 sched-don-t-try-to-catch-up-excess-steal-time.patch
-sched-deadline-correctly-account-for-allocated-bandw.patch
 x86-convert-unreachable-to-bug.patch
 locking-ww_mutex-test-use-swap-macro.patch
 lockdep-fix-upper-limit-for-lockdep_-_bits-configs.patch