]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Do not construct nor expose RT_GROUP_SCHED structures if disabled
authorMichal Koutný <mkoutny@suse.com>
Mon, 10 Mar 2025 17:04:39 +0000 (18:04 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 8 Apr 2025 18:55:54 +0000 (20:55 +0200)
Thanks to kernel cmdline being available early, before any
cgroup hierarchy exists, we can achieve the RT_GROUP_SCHED boottime
disabling goal by simply skipping any creation (and destruction) of
RT_GROUP data and its exposure via RT attributes.

We can do this thanks to previously placed runtime guards that would
redirect all operations to root_task_group's data when RT_GROUP_SCHED
disabled.

Signed-off-by: Michal Koutný <mkoutny@suse.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20250310170442.504716-8-mkoutny@suse.com
kernel/sched/core.c
kernel/sched/rt.c

index 6900ce5b90398c4072c8fb4fc0b08b72a3181817..79692f85643fee05ac1f1ed5d6ef08e807a29c55 100644 (file)
@@ -9867,18 +9867,6 @@ static struct cftype cpu_legacy_files[] = {
                .seq_show = cpu_cfs_local_stat_show,
        },
 #endif
-#ifdef CONFIG_RT_GROUP_SCHED
-       {
-               .name = "rt_runtime_us",
-               .read_s64 = cpu_rt_runtime_read,
-               .write_s64 = cpu_rt_runtime_write,
-       },
-       {
-               .name = "rt_period_us",
-               .read_u64 = cpu_rt_period_read_uint,
-               .write_u64 = cpu_rt_period_write_uint,
-       },
-#endif
 #ifdef CONFIG_UCLAMP_TASK_GROUP
        {
                .name = "uclamp.min",
@@ -9897,6 +9885,20 @@ static struct cftype cpu_legacy_files[] = {
 };
 
 #ifdef CONFIG_RT_GROUP_SCHED
+static struct cftype rt_group_files[] = {
+       {
+               .name = "rt_runtime_us",
+               .read_s64 = cpu_rt_runtime_read,
+               .write_s64 = cpu_rt_runtime_write,
+       },
+       {
+               .name = "rt_period_us",
+               .read_u64 = cpu_rt_period_read_uint,
+               .write_u64 = cpu_rt_period_write_uint,
+       },
+       { }     /* Terminate */
+};
+
 # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED
 DEFINE_STATIC_KEY_FALSE(rt_group_sched);
 # else
@@ -9919,6 +9921,16 @@ static int __init setup_rt_group_sched(char *str)
        return 1;
 }
 __setup("rt_group_sched=", setup_rt_group_sched);
+
+static int __init cpu_rt_group_init(void)
+{
+       if (!rt_group_sched_enabled())
+               return 0;
+
+       WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files));
+       return 0;
+}
+subsys_initcall(cpu_rt_group_init);
 #endif /* CONFIG_RT_GROUP_SCHED */
 
 static int cpu_extra_stat_show(struct seq_file *sf,
index 5e82bfe56fdf9fa803d2bca16561f511e120493a..b6119341f0e24716b7befcf2b3124bf2e2de7974 100644 (file)
@@ -193,6 +193,9 @@ static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 
 void unregister_rt_sched_group(struct task_group *tg)
 {
+       if (!rt_group_sched_enabled())
+               return;
+
        if (tg->rt_se)
                destroy_rt_bandwidth(&tg->rt_bandwidth);
 }
@@ -201,6 +204,9 @@ void free_rt_sched_group(struct task_group *tg)
 {
        int i;
 
+       if (!rt_group_sched_enabled())
+               return;
+
        for_each_possible_cpu(i) {
                if (tg->rt_rq)
                        kfree(tg->rt_rq[i]);
@@ -245,6 +251,9 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
        struct sched_rt_entity *rt_se;
        int i;
 
+       if (!rt_group_sched_enabled())
+               return 1;
+
        tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
        if (!tg->rt_rq)
                goto err;