]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Move sched_class::prio_changed() into the change pattern
authorPeter Zijlstra <peterz@infradead.org>
Fri, 1 Nov 2024 13:16:10 +0000 (14:16 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 16 Oct 2025 09:13:52 +0000 (11:13 +0200)
Move sched_class::prio_changed() into the change pattern.

And while there, extend it with sched_class::get_prio() in order to
fix the deadline sitation.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/ext.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stop_task.c
kernel/sched/syscalls.c

index bd2c551de6d7e0baacfc8ac8c44c178f5b0cc7b6..4a4dbce77bd758b5f1679b726258cb50e6830e6f 100644 (file)
@@ -2169,12 +2169,6 @@ inline int task_curr(const struct task_struct *p)
        return cpu_curr(task_cpu(p)) == p;
 }
 
-void check_prio_changed(struct rq *rq, struct task_struct *p, int oldprio)
-{
-       if (oldprio != p->prio || dl_task(p))
-               p->sched_class->prio_changed(rq, p, oldprio);
-}
-
 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
 {
        struct task_struct *donor = rq->donor;
@@ -7400,9 +7394,6 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
                p->sched_class = next_class;
                p->prio = prio;
        }
-
-       if (!(queue_flag & DEQUEUE_CLASS))
-               check_prio_changed(rq, p, oldprio);
 out_unlock:
        /* Avoid rq from going away on us: */
        preempt_disable();
@@ -10855,6 +10846,13 @@ struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int
                .running = task_current_donor(rq, p),
        };
 
+       if (!(flags & DEQUEUE_CLASS)) {
+               if (p->sched_class->get_prio)
+                       ctx->prio = p->sched_class->get_prio(rq, p);
+               else
+                       ctx->prio = p->prio;
+       }
+
        if (ctx->queued)
                dequeue_task(rq, p, flags);
        if (ctx->running)
@@ -10881,6 +10879,10 @@ void sched_change_end(struct sched_change_ctx *ctx)
        if (ctx->running)
                set_next_task(rq, p);
 
-       if ((ctx->flags & ENQUEUE_CLASS) && p->sched_class->switched_to)
-               p->sched_class->switched_to(rq, p);
+       if (ctx->flags & ENQUEUE_CLASS) {
+               if (p->sched_class->switched_to)
+                       p->sched_class->switched_to(rq, p);
+       } else {
+               p->sched_class->prio_changed(rq, p, ctx->prio);
+       }
 }
index fd147a78f7ed2f0907fbdaa6fdba4727e9a13851..1f949949840381f7385021408ed9d3073ee6d443 100644 (file)
@@ -3042,23 +3042,24 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
        }
 }
 
+static u64 get_prio_dl(struct rq *rq, struct task_struct *p)
+{
+       return p->dl.deadline;
+}
+
 /*
  * If the scheduling parameters of a -deadline task changed,
  * a push or pull operation might be needed.
  */
-static void prio_changed_dl(struct rq *rq, struct task_struct *p,
-                           int oldprio)
+static void prio_changed_dl(struct rq *rq, struct task_struct *p, u64 old_deadline)
 {
        if (!task_on_rq_queued(p))
                return;
 
-       /*
-        * This might be too much, but unfortunately
-        * we don't have the old deadline value, and
-        * we can't argue if the task is increasing
-        * or lowering its prio, so...
-        */
-       if (!rq->dl.overloaded)
+       if (p->dl.deadline == old_deadline)
+               return;
+
+       if (dl_time_before(old_deadline, p->dl.deadline))
                deadline_queue_pull_task(rq);
 
        if (task_current_donor(rq, p)) {
@@ -3113,6 +3114,7 @@ DEFINE_SCHED_CLASS(dl) = {
        .task_tick              = task_tick_dl,
        .task_fork              = task_fork_dl,
 
+       .get_prio               = get_prio_dl,
        .prio_changed           = prio_changed_dl,
        .switched_from          = switched_from_dl,
        .switched_to            = switched_to_dl,
index b0a1e2a4c4a343de90be84fc521ec5ed1f5d46b2..ad371b612a16f4f22c4cabb6793a10da01d7adcd 100644 (file)
@@ -2961,7 +2961,7 @@ static void reweight_task_scx(struct rq *rq, struct task_struct *p,
                                 p, p->scx.weight);
 }
 
-static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
+static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio)
 {
 }
 
@@ -3926,9 +3926,6 @@ static void scx_disable_workfn(struct kthread_work *work)
                        p->sched_class = new_class;
                }
 
-               if (!(queue_flags & DEQUEUE_CLASS))
-                       check_prio_changed(task_rq(p), p, p->prio);
-
                scx_exit_task(p);
        }
        scx_task_iter_stop(&sti);
@@ -4675,9 +4672,6 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
                        p->sched_class = new_class;
                }
 
-               if (!(queue_flags & DEQUEUE_CLASS))
-                       check_prio_changed(task_rq(p), p, p->prio);
-
                put_task_struct(p);
        }
        scx_task_iter_stop(&sti);
index 6c462e4b3db9b96490746ee80792e5ea15a57495..77a713ecde9d10eb00ff214350bd66dbf3bacadb 100644 (file)
@@ -13150,11 +13150,14 @@ static void task_fork_fair(struct task_struct *p)
  * the current task.
  */
 static void
-prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
+prio_changed_fair(struct rq *rq, struct task_struct *p, u64 oldprio)
 {
        if (!task_on_rq_queued(p))
                return;
 
+       if (p->prio == oldprio)
+               return;
+
        if (rq->cfs.nr_queued == 1)
                return;
 
@@ -13166,8 +13169,9 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
        if (task_current_donor(rq, p)) {
                if (p->prio > oldprio)
                        resched_curr(rq);
-       } else
+       } else {
                wakeup_preempt(rq, p, 0);
+       }
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
index f02dceda039a3f26a93b9fbafcb44446d5ecb631..dee6e019dcf81fbdab568d952c00e4a111b07a73 100644 (file)
@@ -504,8 +504,11 @@ static void switching_to_idle(struct rq *rq, struct task_struct *p)
 }
 
 static void
-prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
+prio_changed_idle(struct rq *rq, struct task_struct *p, u64 oldprio)
 {
+       if (p->prio == oldprio)
+               return;
+
        BUG();
 }
 
index 6b2e8112c57064e2f7fa184d54dfd327b4f3478d..c2347e485dc651b964379257f8f3991629407e34 100644 (file)
@@ -2437,11 +2437,14 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
  * us to initiate a push or pull.
  */
 static void
-prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
+prio_changed_rt(struct rq *rq, struct task_struct *p, u64 oldprio)
 {
        if (!task_on_rq_queued(p))
                return;
 
+       if (p->prio == oldprio)
+               return;
+
        if (task_current_donor(rq, p)) {
                /*
                 * If our priority decreases while running, we
index e3f4215e84f76a3370b335ee4286473f55689d8e..bcde43deb8e9ff8196a841d4113a9479de3b2b0d 100644 (file)
@@ -2451,8 +2451,10 @@ struct sched_class {
 
        void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
                              const struct load_weight *lw);
+
+       u64  (*get_prio)     (struct rq *this_rq, struct task_struct *task);
        void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
-                             int oldprio);
+                             u64 oldprio);
 
        unsigned int (*get_rr_interval)(struct rq *rq,
                                        struct task_struct *task);
@@ -3877,8 +3879,6 @@ extern void set_load_weight(struct task_struct *p, bool update_load);
 extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
 extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
 
-extern void check_prio_changed(struct rq *rq, struct task_struct *p, int oldprio);
-
 extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
 extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
 
@@ -3899,6 +3899,7 @@ extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
  * the task's queueing state is idempotent across the operation.
  */
 struct sched_change_ctx {
+       u64                     prio;
        struct task_struct      *p;
        int                     flags;
        bool                    queued;
index fcc4c54245ec906519b6453abafb48c4a5550007..73aa8de190675ca6fae67f6c7ee7bc84cc47f0c4 100644 (file)
@@ -81,8 +81,11 @@ static void switching_to_stop(struct rq *rq, struct task_struct *p)
 }
 
 static void
-prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
+prio_changed_stop(struct rq *rq, struct task_struct *p, u64 oldprio)
 {
+       if (p->prio == oldprio)
+               return;
+
        BUG(); /* how!?, what priority? */
 }
 
index 6583faf66f6545ce30a75aa43ecb81168bad9194..20af5640b0ab49480fe58914b705b0f620343011 100644 (file)
@@ -95,12 +95,6 @@ void set_user_nice(struct task_struct *p, long nice)
                old_prio = p->prio;
                p->prio = effective_prio(p);
        }
-
-       /*
-        * If the task increased its priority or is running and
-        * lowered its priority, then reschedule its CPU:
-        */
-       p->sched_class->prio_changed(rq, p, old_prio);
 }
 EXPORT_SYMBOL(set_user_nice);
 
@@ -706,9 +700,6 @@ change:
                }
        }
 
-       if (!(queue_flags & DEQUEUE_CLASS))
-               check_prio_changed(rq, p, oldprio);
-
        /* Avoid rq from going away on us: */
        preempt_disable();
        head = splice_balance_callbacks(rq);