From: Michal Koutný Date: Mon, 10 Mar 2025 17:04:35 +0000 (+0100) Subject: sched: Always initialize rt_rq's task_group X-Git-Tag: v6.16-rc1~197^2~18 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=a5a25b32c08a31c03258ec4960bec26caaf76e9a;p=thirdparty%2Flinux.git sched: Always initialize rt_rq's task_group rt_rq->tg may be NULL which denotes the root task_group. Store the pointer to root_task_group directly so that callers may use rt_rq->tg homogenously. root_task_group exists always with CONFIG_CGROUPS_SCHED, CONFIG_RT_GROUP_SCHED depends on that. This changes root level rt_rq's default limit from infinity to the value of (originally) global RT throttling. Signed-off-by: Michal Koutný Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20250310170442.504716-4-mkoutny@suse.com --- diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 61ec29b11ef45..1af3996ec0fbe 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -89,6 +89,7 @@ void init_rt_rq(struct rt_rq *rt_rq) rt_rq->rt_throttled = 0; rt_rq->rt_runtime = 0; raw_spin_lock_init(&rt_rq->rt_runtime_lock); + rt_rq->tg = &root_task_group; #endif } @@ -482,9 +483,6 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { - if (!rt_rq->tg) - return RUNTIME_INF; - return rt_rq->rt_runtime; } @@ -1154,8 +1152,7 @@ inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) if (rt_se_boosted(rt_se)) rt_rq->rt_nr_boosted++; - if (rt_rq->tg) - start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); + start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); } static void diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 47972f34ea701..c006348102d9b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -821,6 +821,8 @@ struct rt_rq { unsigned int rt_nr_boosted; struct rq *rq; +#endif +#ifdef CONFIG_CGROUP_SCHED struct task_group *tg; #endif };