]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Skip non-root task_groups with disabled RT_GROUP_SCHED
authorMichal Koutný <mkoutny@suse.com>
Mon, 10 Mar 2025 17:04:37 +0000 (18:04 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 8 Apr 2025 18:55:53 +0000 (20:55 +0200)
First, we want to prevent placement of RT tasks on non-root rt_rqs which
we achieve in the task migration code that'd fall back to
root_task_group's rt_rq.

Second, we want to work with only root_task_group's rt_rq when iterating
all "real" rt_rqs when RT_GROUP is disabled. To achieve this we keep
root_task_group as the first one on the task_groups and break out
quickly.

Signed-off-by: Michal Koutný <mkoutny@suse.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20250310170442.504716-6-mkoutny@suse.com
kernel/sched/core.c
kernel/sched/rt.c
kernel/sched/sched.h

index 58d093a8c1af7b795b9a03099ff64ac4e71516f9..32fb4c1100cb985e3d2f4f3a480e04b735b059f2 100644 (file)
@@ -9020,7 +9020,7 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
        unsigned long flags;
 
        spin_lock_irqsave(&task_group_lock, flags);
-       list_add_rcu(&tg->list, &task_groups);
+       list_add_tail_rcu(&tg->list, &task_groups);
 
        /* Root should already exist: */
        WARN_ON(!parent);
index 1af3996ec0fbe690596a0acf2cc888c86e8cea2f..efa22bad31e1afcae43cb22ce7412ab7401e59ab 100644 (file)
@@ -495,6 +495,9 @@ typedef struct task_group *rt_rq_iter_t;
 
 static inline struct task_group *next_task_group(struct task_group *tg)
 {
+       if (!rt_group_sched_enabled())
+               return NULL;
+
        do {
                tg = list_entry_rcu(tg->list.next,
                        typeof(struct task_group), list);
@@ -507,9 +510,9 @@ static inline struct task_group *next_task_group(struct task_group *tg)
 }
 
 #define for_each_rt_rq(rt_rq, iter, rq)                                        \
-       for (iter = container_of(&task_groups, typeof(*iter), list);    \
-               (iter = next_task_group(iter)) &&                       \
-               (rt_rq = iter->rt_rq[cpu_of(rq)]);)
+       for (iter = &root_task_group;                                   \
+               iter && (rt_rq = iter->rt_rq[cpu_of(rq)]);              \
+               iter = next_task_group(iter))
 
 #define for_each_sched_rt_entity(rt_se) \
        for (; rt_se; rt_se = rt_se->parent)
index d1e591f91cf8a4b3abf1a0f82079909010d0c836..898aab7417bd0aa44569196b408196c26e31b95d 100644 (file)
@@ -2165,6 +2165,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
+       /*
+        * p->rt.rt_rq is NULL initially and it is easier to assign
+        * root_task_group's rt_rq than switching in rt_rq_of_se()
+        * Clobbers tg(!)
+        */
+       if (!rt_group_sched_enabled())
+               tg = &root_task_group;
        p->rt.rt_rq  = tg->rt_rq[cpu];
        p->rt.parent = tg->rt_se[cpu];
 #endif