]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Pass correct scheduling policy to __setscheduler_class
authorAboorva Devarajan <aboorvad@linux.ibm.com>
Fri, 25 Oct 2024 18:50:20 +0000 (00:20 +0530)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 29 Oct 2024 12:57:51 +0000 (13:57 +0100)
Commit 98442f0ccd82 ("sched: Fix delayed_dequeue vs
switched_from_fair()") overlooked that __setscheduler_prio(), now
__setscheduler_class() relies on p->policy for task_should_scx(), and
moved the call before __setscheduler_params() updates it, causing it
to be using the old p->policy value.

Resolve this by changing task_should_scx() to take the policy itself
instead of a task pointer, such that __sched_setscheduler() can pass
in the updated policy.

Fixes: 98442f0ccd82 ("sched: Fix delayed_dequeue vs switched_from_fair()")
Signed-off-by: Aboorva Devarajan <aboorvad@linux.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Tejun Heo <tj@kernel.org>
kernel/sched/core.c
kernel/sched/ext.c
kernel/sched/ext.h
kernel/sched/sched.h
kernel/sched/syscalls.c

index dbfb5717d6afbe2691242b0e28efac0aa61dcc67..719e0ed1e97618cea8eefc9de08604c1b1fc76fd 100644 (file)
@@ -4711,7 +4711,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        if (rt_prio(p->prio)) {
                p->sched_class = &rt_sched_class;
 #ifdef CONFIG_SCHED_CLASS_EXT
-       } else if (task_should_scx(p)) {
+       } else if (task_should_scx(p->policy)) {
                p->sched_class = &ext_sched_class;
 #endif
        } else {
@@ -7025,7 +7025,7 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
 }
 EXPORT_SYMBOL(default_wake_function);
 
-const struct sched_class *__setscheduler_class(struct task_struct *p, int prio)
+const struct sched_class *__setscheduler_class(int policy, int prio)
 {
        if (dl_prio(prio))
                return &dl_sched_class;
@@ -7034,7 +7034,7 @@ const struct sched_class *__setscheduler_class(struct task_struct *p, int prio)
                return &rt_sched_class;
 
 #ifdef CONFIG_SCHED_CLASS_EXT
-       if (task_should_scx(p))
+       if (task_should_scx(policy))
                return &ext_sched_class;
 #endif
 
@@ -7142,7 +7142,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
                queue_flag &= ~DEQUEUE_MOVE;
 
        prev_class = p->sched_class;
-       next_class = __setscheduler_class(p, prio);
+       next_class = __setscheduler_class(p->policy, prio);
 
        if (prev_class != next_class && p->se.sched_delayed)
                dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
index 5900b06fd0364c76bcca9a37e9ad9e76082b8560..40bdfe84e4f0033ad495c3861cf2ce48363634be 100644 (file)
@@ -4256,14 +4256,14 @@ static const struct kset_uevent_ops scx_uevent_ops = {
  * Used by sched_fork() and __setscheduler_prio() to pick the matching
  * sched_class. dl/rt are already handled.
  */
-bool task_should_scx(struct task_struct *p)
+bool task_should_scx(int policy)
 {
        if (!scx_enabled() ||
            unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
                return false;
        if (READ_ONCE(scx_switching_all))
                return true;
-       return p->policy == SCHED_EXT;
+       return policy == SCHED_EXT;
 }
 
 /**
@@ -4493,7 +4493,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
 
                sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
 
-               p->sched_class = __setscheduler_class(p, p->prio);
+               p->sched_class = __setscheduler_class(p->policy, p->prio);
                check_class_changing(task_rq(p), p, old_class);
 
                sched_enq_and_set_task(&ctx);
@@ -5204,7 +5204,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
                sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
 
                p->scx.slice = SCX_SLICE_DFL;
-               p->sched_class = __setscheduler_class(p, p->prio);
+               p->sched_class = __setscheduler_class(p->policy, p->prio);
                check_class_changing(task_rq(p), p, old_class);
 
                sched_enq_and_set_task(&ctx);
index 246019519231cf03ccd6bf68376b54e5f3119e6c..b1675bb59fc4610f59b8868dcc029a3b58008d18 100644 (file)
@@ -18,7 +18,7 @@ bool scx_can_stop_tick(struct rq *rq);
 void scx_rq_activate(struct rq *rq);
 void scx_rq_deactivate(struct rq *rq);
 int scx_check_setscheduler(struct task_struct *p, int policy);
-bool task_should_scx(struct task_struct *p);
+bool task_should_scx(int policy);
 void init_sched_ext_class(void);
 
 static inline u32 scx_cpuperf_target(s32 cpu)
index 9f9d1cc390b124954f5a2e294fa8f78e25125d54..6c54a57275ccfdc685db48ef89a5dc01515696bb 100644 (file)
@@ -3830,7 +3830,7 @@ static inline int rt_effective_prio(struct task_struct *p, int prio)
 
 extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi);
 extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
-extern const struct sched_class *__setscheduler_class(struct task_struct *p, int prio);
+extern const struct sched_class *__setscheduler_class(int policy, int prio);
 extern void set_load_weight(struct task_struct *p, bool update_load);
 extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
 extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
index 0470bcc3d2048c01372ea98b0d9d64ac3dba9fd7..24f9f90b6574e57481a2d4e5dfbeb831b8a0f367 100644 (file)
@@ -707,7 +707,7 @@ change:
        }
 
        prev_class = p->sched_class;
-       next_class = __setscheduler_class(p, newprio);
+       next_class = __setscheduler_class(policy, newprio);
 
        if (prev_class != next_class && p->se.sched_delayed)
                dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);