]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Account for idle policy when setting p->scx.weight in scx_ops_enable_task()
authorTejun Heo <tj@kernel.org>
Mon, 8 Jul 2024 19:19:14 +0000 (09:19 -1000)
committerTejun Heo <tj@kernel.org>
Mon, 8 Jul 2024 19:25:35 +0000 (09:25 -1000)
When initializing p->scx.weight, scx_ops_enable_task() wasn't considering
whether the task is SCHED_IDLE. Update it to use WEIGHT_IDLEPRIO as the
source weight for SCHED_IDLE tasks. This leaves reweight_task_scx() the sole
user of set_task_scx_weight(). Open code it. @weight is going to be provided
by sched core in the future anyway.

v2: Use the newly available @lw->weight to set @p->scx.weight in
    reweight_task_scx().

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: David Vernet <void@manifault.com>
Cc: Peter Zijlstra <peterz@infradead.org>
kernel/sched/ext.c

index 525102f3ff5be396d780c4fc292f9389c9fbaa7e..3eb7169e3973dc9ce9ac7b1c4e322524cf11dbf5 100644 (file)
@@ -3237,22 +3237,23 @@ static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool
        return 0;
 }
 
-static void set_task_scx_weight(struct task_struct *p)
-{
-       u32 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
-
-       p->scx.weight = sched_weight_to_cgroup(weight);
-}
-
 static void scx_ops_enable_task(struct task_struct *p)
 {
+       u32 weight;
+
        lockdep_assert_rq_held(task_rq(p));
 
        /*
         * Set the weight before calling ops.enable() so that the scheduler
         * doesn't see a stale value if they inspect the task struct.
         */
-       set_task_scx_weight(p);
+       if (task_has_idle_policy(p))
+               weight = WEIGHT_IDLEPRIO;
+       else
+               weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
+
+       p->scx.weight = sched_weight_to_cgroup(weight);
+
        if (SCX_HAS_OP(enable))
                SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
        scx_set_task_state(p, SCX_TASK_ENABLED);
@@ -3408,7 +3409,7 @@ static void reweight_task_scx(struct rq *rq, struct task_struct *p,
 {
        lockdep_assert_rq_held(task_rq(p));
 
-       set_task_scx_weight(p);
+       p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
        if (SCX_HAS_OP(set_weight))
                SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
 }