]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/eevdf: Propagate min_slice up the cgroup hierarchy
authorPeter Zijlstra <peterz@infradead.org>
Thu, 20 Jun 2024 11:16:49 +0000 (13:16 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Sat, 17 Aug 2024 09:06:46 +0000 (11:06 +0200)
In the absence of an explicit cgroup slice configureation, make mixed
slice length work with cgroups by propagating the min_slice up the
hierarchy.

This ensures the cgroup entity gets timely service to service its
entities that have this timing constraint set on them.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Valentin Schneider <vschneid@redhat.com>
Link: https://lkml.kernel.org/r/20240727105030.948188417@infradead.org
include/linux/sched.h
kernel/sched/fair.c

index 89a3d8d94e965e025871130dc3c9fca6cf485d5b..3709dedbab59209110afc76896f35354c3f15bbb 100644 (file)
@@ -542,6 +542,7 @@ struct sched_entity {
        struct rb_node                  run_node;
        u64                             deadline;
        u64                             min_vruntime;
+       u64                             min_slice;
 
        struct list_head                group_node;
        unsigned char                   on_rq;
index 3284d3cb71470b873649105a67dc344778e3f05c..fea057b311f69b15f92a1a09a84949e60cd0806a 100644 (file)
@@ -782,6 +782,21 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
        cfs_rq->min_vruntime = __update_min_vruntime(cfs_rq, vruntime);
 }
 
+static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
+{
+       struct sched_entity *root = __pick_root_entity(cfs_rq);
+       struct sched_entity *curr = cfs_rq->curr;
+       u64 min_slice = ~0ULL;
+
+       if (curr && curr->on_rq)
+               min_slice = curr->slice;
+
+       if (root)
+               min_slice = min(min_slice, root->min_slice);
+
+       return min_slice;
+}
+
 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
 {
        return entity_before(__node_2_se(a), __node_2_se(b));
@@ -798,19 +813,34 @@ static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node
        }
 }
 
+static inline void __min_slice_update(struct sched_entity *se, struct rb_node *node)
+{
+       if (node) {
+               struct sched_entity *rse = __node_2_se(node);
+               if (rse->min_slice < se->min_slice)
+                       se->min_slice = rse->min_slice;
+       }
+}
+
 /*
  * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
  */
 static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
 {
        u64 old_min_vruntime = se->min_vruntime;
+       u64 old_min_slice = se->min_slice;
        struct rb_node *node = &se->run_node;
 
        se->min_vruntime = se->vruntime;
        __min_vruntime_update(se, node->rb_right);
        __min_vruntime_update(se, node->rb_left);
 
-       return se->min_vruntime == old_min_vruntime;
+       se->min_slice = se->slice;
+       __min_slice_update(se, node->rb_right);
+       __min_slice_update(se, node->rb_left);
+
+       return se->min_vruntime == old_min_vruntime &&
+              se->min_slice == old_min_slice;
 }
 
 RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
@@ -823,6 +853,7 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        avg_vruntime_add(cfs_rq, se);
        se->min_vruntime = se->vruntime;
+       se->min_slice = se->slice;
        rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
                                __entity_less, &min_vruntime_cb);
 }
@@ -6911,6 +6942,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        int idle_h_nr_running = task_has_idle_policy(p);
        int task_new = !(flags & ENQUEUE_WAKEUP);
        int rq_h_nr_running = rq->cfs.h_nr_running;
+       u64 slice = 0;
 
        if (flags & ENQUEUE_DELAYED) {
                requeue_delayed_entity(se);
@@ -6940,7 +6972,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                        break;
                }
                cfs_rq = cfs_rq_of(se);
+
+               /*
+                * Basically set the slice of group entries to the min_slice of
+                * their respective cfs_rq. This ensures the group can service
+                * its entities in the desired time-frame.
+                */
+               if (slice) {
+                       se->slice = slice;
+                       se->custom_slice = 1;
+               }
                enqueue_entity(cfs_rq, se, flags);
+               slice = cfs_rq_min_slice(cfs_rq);
 
                cfs_rq->h_nr_running++;
                cfs_rq->idle_h_nr_running += idle_h_nr_running;
@@ -6962,6 +7005,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                se_update_runnable(se);
                update_cfs_group(se);
 
+               se->slice = slice;
+               slice = cfs_rq_min_slice(cfs_rq);
+
                cfs_rq->h_nr_running++;
                cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
@@ -7027,11 +7073,15 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
        int idle_h_nr_running = 0;
        int h_nr_running = 0;
        struct cfs_rq *cfs_rq;
+       u64 slice = 0;
 
        if (entity_is_task(se)) {
                p = task_of(se);
                h_nr_running = 1;
                idle_h_nr_running = task_has_idle_policy(p);
+       } else {
+               cfs_rq = group_cfs_rq(se);
+               slice = cfs_rq_min_slice(cfs_rq);
        }
 
        for_each_sched_entity(se) {
@@ -7056,6 +7106,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
 
                /* Don't dequeue parent if it has other entities besides us */
                if (cfs_rq->load.weight) {
+                       slice = cfs_rq_min_slice(cfs_rq);
+
                        /* Avoid re-evaluating load for this entity: */
                        se = parent_entity(se);
                        /*
@@ -7077,6 +7129,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
                se_update_runnable(se);
                update_cfs_group(se);
 
+               se->slice = slice;
+               slice = cfs_rq_min_slice(cfs_rq);
+
                cfs_rq->h_nr_running -= h_nr_running;
                cfs_rq->idle_h_nr_running -= idle_h_nr_running;