]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/fair: Fix warning in bandwidth distribution
authorJosh Don <joshdon@google.com>
Fri, 22 Sep 2023 23:05:35 +0000 (16:05 -0700)
committerIngo Molnar <mingo@kernel.org>
Sun, 24 Sep 2023 10:08:29 +0000 (12:08 +0200)
We've observed the following warning being hit in
distribute_cfs_runtime():

SCHED_WARN_ON(cfs_rq->runtime_remaining > 0)

We have the following race:

 - CPU 0: running bandwidth distribution (distribute_cfs_runtime).
   Inspects the local cfs_rq and makes its runtime_remaining positive.
   However, we defer unthrottling the local cfs_rq until after
   considering all remote cfs_rq's.

 - CPU 1: starts running bandwidth distribution from the slack timer. When
   it finds the cfs_rq for CPU 0 on the throttled list, it observers the
   that the cfs_rq is throttled, yet is not on the CSD list, and has a
   positive runtime_remaining, thus triggering the warning in
   distribute_cfs_runtime.

To fix this, we can rework the local unthrottling logic to put the local
cfs_rq on a local list, so that any future bandwidth distributions will
realize that the cfs_rq is about to be unthrottled.

Signed-off-by: Josh Don <joshdon@google.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230922230535.296350-2-joshdon@google.com
kernel/sched/fair.c

index 41c960eca7920ebbd028ba59b9eafda77397686a..2973173ad85091dc012f3cb868c3134f9444871b 100644 (file)
@@ -5741,13 +5741,13 @@ static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
 
 static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
 {
-       struct cfs_rq *local_unthrottle = NULL;
        int this_cpu = smp_processor_id();
        u64 runtime, remaining = 1;
        bool throttled = false;
-       struct cfs_rq *cfs_rq;
+       struct cfs_rq *cfs_rq, *tmp;
        struct rq_flags rf;
        struct rq *rq;
+       LIST_HEAD(local_unthrottle);
 
        rcu_read_lock();
        list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
@@ -5782,11 +5782,17 @@ static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
 
                /* we check whether we're throttled above */
                if (cfs_rq->runtime_remaining > 0) {
-                       if (cpu_of(rq) != this_cpu ||
-                           SCHED_WARN_ON(local_unthrottle))
+                       if (cpu_of(rq) != this_cpu) {
                                unthrottle_cfs_rq_async(cfs_rq);
-                       else
-                               local_unthrottle = cfs_rq;
+                       } else {
+                               /*
+                                * We currently only expect to be unthrottling
+                                * a single cfs_rq locally.
+                                */
+                               SCHED_WARN_ON(!list_empty(&local_unthrottle));
+                               list_add_tail(&cfs_rq->throttled_csd_list,
+                                             &local_unthrottle);
+                       }
                } else {
                        throttled = true;
                }
@@ -5794,15 +5800,23 @@ static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
 next:
                rq_unlock_irqrestore(rq, &rf);
        }
-       rcu_read_unlock();
 
-       if (local_unthrottle) {
-               rq = cpu_rq(this_cpu);
+       list_for_each_entry_safe(cfs_rq, tmp, &local_unthrottle,
+                                throttled_csd_list) {
+               struct rq *rq = rq_of(cfs_rq);
+
                rq_lock_irqsave(rq, &rf);
-               if (cfs_rq_throttled(local_unthrottle))
-                       unthrottle_cfs_rq(local_unthrottle);
+
+               list_del_init(&cfs_rq->throttled_csd_list);
+
+               if (cfs_rq_throttled(cfs_rq))
+                       unthrottle_cfs_rq(cfs_rq);
+
                rq_unlock_irqrestore(rq, &rf);
        }
+       SCHED_WARN_ON(!list_empty(&local_unthrottle));
+
+       rcu_read_unlock();
 
        return throttled;
 }