From: Michal Koutný Date: Mon, 10 Mar 2025 17:04:39 +0000 (+0100) Subject: sched: Do not construct nor expose RT_GROUP_SCHED structures if disabled X-Git-Tag: v6.16-rc1~197^2~14 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d6809c2f606c14f9e95be87d75a576901d2fa050;p=thirdparty%2Fkernel%2Flinux.git sched: Do not construct nor expose RT_GROUP_SCHED structures if disabled Thanks to kernel cmdline being available early, before any cgroup hierarchy exists, we can achieve the RT_GROUP_SCHED boottime disabling goal by simply skipping any creation (and destruction) of RT_GROUP data and its exposure via RT attributes. We can do this thanks to previously placed runtime guards that would redirect all operations to root_task_group's data when RT_GROUP_SCHED disabled. Signed-off-by: Michal Koutný Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20250310170442.504716-8-mkoutny@suse.com --- diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6900ce5b90398..79692f85643fe 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9867,18 +9867,6 @@ static struct cftype cpu_legacy_files[] = { .seq_show = cpu_cfs_local_stat_show, }, #endif -#ifdef CONFIG_RT_GROUP_SCHED - { - .name = "rt_runtime_us", - .read_s64 = cpu_rt_runtime_read, - .write_s64 = cpu_rt_runtime_write, - }, - { - .name = "rt_period_us", - .read_u64 = cpu_rt_period_read_uint, - .write_u64 = cpu_rt_period_write_uint, - }, -#endif #ifdef CONFIG_UCLAMP_TASK_GROUP { .name = "uclamp.min", @@ -9897,6 +9885,20 @@ static struct cftype cpu_legacy_files[] = { }; #ifdef CONFIG_RT_GROUP_SCHED +static struct cftype rt_group_files[] = { + { + .name = "rt_runtime_us", + .read_s64 = cpu_rt_runtime_read, + .write_s64 = cpu_rt_runtime_write, + }, + { + .name = "rt_period_us", + .read_u64 = cpu_rt_period_read_uint, + .write_u64 = cpu_rt_period_write_uint, + }, + { } /* Terminate */ +}; + # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED DEFINE_STATIC_KEY_FALSE(rt_group_sched); # else @@ -9919,6 +9921,16 @@ static int __init setup_rt_group_sched(char *str) return 1; } __setup("rt_group_sched=", setup_rt_group_sched); + +static int __init cpu_rt_group_init(void) +{ + if (!rt_group_sched_enabled()) + return 0; + + WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files)); + return 0; +} +subsys_initcall(cpu_rt_group_init); #endif /* CONFIG_RT_GROUP_SCHED */ static int cpu_extra_stat_show(struct seq_file *sf, diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 5e82bfe56fdf9..b6119341f0e24 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -193,6 +193,9 @@ static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) void unregister_rt_sched_group(struct task_group *tg) { + if (!rt_group_sched_enabled()) + return; + if (tg->rt_se) destroy_rt_bandwidth(&tg->rt_bandwidth); } @@ -201,6 +204,9 @@ void free_rt_sched_group(struct task_group *tg) { int i; + if (!rt_group_sched_enabled()) + return; + for_each_possible_cpu(i) { if (tg->rt_rq) kfree(tg->rt_rq[i]); @@ -245,6 +251,9 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) struct sched_rt_entity *rt_se; int i; + if (!rt_group_sched_enabled()) + return 1; + tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); if (!tg->rt_rq) goto err;