]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/eevdf: Use sched_attr::sched_runtime to set request/slice suggestion
authorPeter Zijlstra <peterz@infradead.org>
Mon, 22 May 2023 11:46:30 +0000 (13:46 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Sat, 17 Aug 2024 09:06:45 +0000 (11:06 +0200)
Allow applications to directly set a suggested request/slice length using
sched_attr::sched_runtime.

The implementation clamps the value to: 0.1[ms] <= slice <= 100[ms]
which is 1/10 the size of HZ=1000 and 10 times the size of HZ=100.

Applications should strive to use their periodic runtime at a high
confidence interval (95%+) as the target slice. Using a smaller slice
will introduce undue preemptions, while using a larger value will
increase latency.

For all the following examples assume a scheduling quantum of 8, and for
consistency all examples have W=4:

  {A,B,C,D}(w=1,r=8):

  ABCD...
  +---+---+---+---

  t=0, V=1.5 t=1, V=3.5
  A  |------< A          |------<
  B   |------< B   |------<
  C    |------< C    |------<
  D     |------< D     |------<
  ---+*------+-------+--- ---+--*----+-------+---

  t=2, V=5.5 t=3, V=7.5
  A          |------< A          |------<
  B           |------< B           |------<
  C    |------< C            |------<
  D     |------< D     |------<
  ---+----*--+-------+--- ---+------*+-------+---

Note: 4 identical tasks in FIFO order

~~~

  {A,B}(w=1,r=16) C(w=2,r=16)

  AACCBBCC...
  +---+---+---+---

  t=0, V=1.25 t=2, V=5.25
  A  |--------------<                   A                  |--------------<
  B   |--------------<                  B   |--------------<
  C    |------<                         C    |------<
  ---+*------+-------+---               ---+----*--+-------+---

  t=4, V=8.25 t=6, V=12.25
  A                  |--------------<   A                  |--------------<
  B   |--------------<                  B                   |--------------<
  C            |------<                 C            |------<
  ---+-------*-------+---               ---+-------+---*---+---

Note: 1 heavy task -- because q=8, double r such that the deadline of the w=2
      task doesn't go below q.

Note: observe the full schedule becomes: W*max(r_i/w_i) = 4*2q = 8q in length.

Note: the period of the heavy task is half the full period at:
      W*(r_i/w_i) = 4*(2q/2) = 4q

~~~

  {A,C,D}(w=1,r=16) B(w=1,r=8):

  BAACCBDD...
  +---+---+---+---

  t=0, V=1.5 t=1, V=3.5
  A  |--------------< A  |---------------<
  B   |------< B           |------<
  C    |--------------< C    |--------------<
  D     |--------------< D     |--------------<
  ---+*------+-------+--- ---+--*----+-------+---

  t=3, V=7.5 t=5, V=11.5
  A                  |---------------<  A                  |---------------<
  B           |------<                  B           |------<
  C    |--------------<                 C                    |--------------<
  D     |--------------<                D     |--------------<
  ---+------*+-------+---               ---+-------+--*----+---

  t=6, V=13.5
  A                  |---------------<
  B                   |------<
  C                    |--------------<
  D     |--------------<
  ---+-------+----*--+---

Note: 1 short task -- again double r so that the deadline of the short task
      won't be below q. Made B short because its not the leftmost task, but is
      eligible with the 0,1,2,3 spread.

Note: like with the heavy task, the period of the short task observes:
      W*(r_i/w_i) = 4*(1q/1) = 4q

~~~

  A(w=1,r=16) B(w=1,r=8) C(w=2,r=16)

  BCCAABCC...
  +---+---+---+---

  t=0, V=1.25 t=1, V=3.25
  A  |--------------<                   A  |--------------<
  B   |------<                          B           |------<
  C    |------<                         C    |------<
  ---+*------+-------+---               ---+--*----+-------+---

  t=3, V=7.25 t=5, V=11.25
  A  |--------------<                   A                  |--------------<
  B           |------<                  B           |------<
  C            |------<                 C            |------<
  ---+------*+-------+---               ---+-------+--*----+---

  t=6, V=13.25
  A                  |--------------<
  B                   |------<
  C            |------<
  ---+-------+----*--+---

Note: 1 heavy and 1 short task -- combine them all.

Note: both the short and heavy task end up with a period of 4q

~~~

  A(w=1,r=16) B(w=2,r=16) C(w=1,r=8)

  BBCAABBC...
  +---+---+---+---

  t=0, V=1 t=2, V=5
  A  |--------------<                   A  |--------------<
  B   |------<                          B           |------<
  C    |------<                         C    |------<
  ---+*------+-------+---               ---+----*--+-------+---

  t=3, V=7 t=5, V=11
  A  |--------------<                   A                  |--------------<
  B           |------<                  B           |------<
  C            |------<                 C            |------<
  ---+------*+-------+---               ---+-------+--*----+---

  t=7, V=15
  A                  |--------------<
  B                   |------<
  C            |------<
  ---+-------+------*+---

Note: as before but permuted

~~~

From all this it can be deduced that, for the steady state:

 - the total period (P) of a schedule is: W*max(r_i/w_i)
 - the average period of a task is: W*(r_i/w_i)
 - each task obtains the fair share: w_i/W of each full period P

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Valentin Schneider <vschneid@redhat.com>
Link: https://lkml.kernel.org/r/20240727105030.842834421@infradead.org
include/linux/sched.h
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/syscalls.c

index d25e1cfd576673692eb8d4245c885b6607e96f5b..89a3d8d94e965e025871130dc3c9fca6cf485d5b 100644 (file)
@@ -547,6 +547,7 @@ struct sched_entity {
        unsigned char                   on_rq;
        unsigned char                   sched_delayed;
        unsigned char                   rel_deadline;
+       unsigned char                   custom_slice;
                                        /* hole */
 
        u64                             exec_start;
index 868b71b9f2e4907ac91364bb557dba15b67f9342..016581168cb8f30db73cd74832da86d4899baf63 100644 (file)
@@ -4390,7 +4390,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
        p->se.nr_migrations             = 0;
        p->se.vruntime                  = 0;
        p->se.vlag                      = 0;
-       p->se.slice                     = sysctl_sched_base_slice;
        INIT_LIST_HEAD(&p->se.group_node);
 
        /* A delayed task cannot be in clone(). */
@@ -4643,6 +4642,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
 
                p->prio = p->normal_prio = p->static_prio;
                set_load_weight(p, false);
+               p->se.custom_slice = 0;
+               p->se.slice = sysctl_sched_base_slice;
 
                /*
                 * We don't need the reset flag anymore after the fork. It has
@@ -8412,6 +8413,7 @@ void __init sched_init(void)
        }
 
        set_load_weight(&init_task, false);
+       init_task.se.slice = sysctl_sched_base_slice,
 
        /*
         * The boot idle thread does lazy MMU switching as well:
index 831a77ab8466145d2c5394dddc4375d41ea4c6b8..01ce9a76164cb38086a4bd1e797f1f30a1adfa40 100644 (file)
@@ -739,11 +739,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
        else
                SEQ_printf(m, " %c", task_state_to_char(p));
 
-       SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
+       SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
                p->comm, task_pid_nr(p),
                SPLIT_NS(p->se.vruntime),
                entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
                SPLIT_NS(p->se.deadline),
+               p->se.custom_slice ? 'S' : ' ',
                SPLIT_NS(p->se.slice),
                SPLIT_NS(p->se.sum_exec_runtime),
                (long long)(p->nvcsw + p->nivcsw),
index cc30ea3a84e29e8871d511a5e166a831ae7639f2..3284d3cb71470b873649105a67dc344778e3f05c 100644 (file)
@@ -983,7 +983,8 @@ static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
         * nice) while the request time r_i is determined by
         * sysctl_sched_base_slice.
         */
-       se->slice = sysctl_sched_base_slice;
+       if (!se->custom_slice)
+               se->slice = sysctl_sched_base_slice;
 
        /*
         * EEVDF: vd_i = ve_i + r_i / w_i
@@ -5227,7 +5228,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
        u64 vslice, vruntime = avg_vruntime(cfs_rq);
        s64 lag = 0;
 
-       se->slice = sysctl_sched_base_slice;
+       if (!se->custom_slice)
+               se->slice = sysctl_sched_base_slice;
        vslice = calc_delta_fair(se->slice, se);
 
        /*
index 60e70c889d91e849a46323bb3cda38aac75c7733..4fae3cf25a3a2dac0a5b906fe1911f0bbca7dc8d 100644 (file)
@@ -401,10 +401,20 @@ static void __setscheduler_params(struct task_struct *p,
 
        p->policy = policy;
 
-       if (dl_policy(policy))
+       if (dl_policy(policy)) {
                __setparam_dl(p, attr);
-       else if (fair_policy(policy))
+       } else if (fair_policy(policy)) {
                p->static_prio = NICE_TO_PRIO(attr->sched_nice);
+               if (attr->sched_runtime) {
+                       p->se.custom_slice = 1;
+                       p->se.slice = clamp_t(u64, attr->sched_runtime,
+                                             NSEC_PER_MSEC/10,   /* HZ=1000 * 10 */
+                                             NSEC_PER_MSEC*100); /* HZ=100  / 10 */
+               } else {
+                       p->se.custom_slice = 0;
+                       p->se.slice = sysctl_sched_base_slice;
+               }
+       }
 
        /*
         * __sched_setscheduler() ensures attr->sched_priority == 0 when
@@ -700,7 +710,9 @@ recheck:
         * but store a possible modification of reset_on_fork.
         */
        if (unlikely(policy == p->policy)) {
-               if (fair_policy(policy) && attr->sched_nice != task_nice(p))
+               if (fair_policy(policy) &&
+                   (attr->sched_nice != task_nice(p) ||
+                    (attr->sched_runtime != p->se.slice)))
                        goto change;
                if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
                        goto change;
@@ -846,6 +858,9 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
                .sched_nice     = PRIO_TO_NICE(p->static_prio),
        };
 
+       if (p->se.custom_slice)
+               attr.sched_runtime = p->se.slice;
+
        /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
        if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
                attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
@@ -1012,12 +1027,14 @@ err_size:
 
 static void get_params(struct task_struct *p, struct sched_attr *attr)
 {
-       if (task_has_dl_policy(p))
+       if (task_has_dl_policy(p)) {
                __getparam_dl(p, attr);
-       else if (task_has_rt_policy(p))
+       } else if (task_has_rt_policy(p)) {
                attr->sched_priority = p->rt_priority;
-       else
+       } else {
                attr->sched_nice = task_nice(p);
+               attr->sched_runtime = p->se.slice;
+       }
 }
 
 /**