]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/uclamp: Optimize sched_uclamp_used static key enabling
authorXuewen Yan <xuewen.yan@unisoc.com>
Wed, 19 Feb 2025 09:37:47 +0000 (17:37 +0800)
committerIngo Molnar <mingo@kernel.org>
Sat, 15 Mar 2025 09:28:50 +0000 (10:28 +0100)
Repeat calls of static_branch_enable() to an already enabled
static key introduce overhead, because it calls cpus_read_lock().

Users may frequently set the uclamp value of tasks, triggering
the repeat enabling of the sched_uclamp_used static key.

Optimize this and avoid repeat calls to static_branch_enable()
by checking whether it's enabled already.

[ mingo: Rewrote the changelog for legibility ]

Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Christian Loehle <christian.loehle@arm.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20250219093747.2612-2-xuewen.yan@unisoc.com
kernel/sched/core.c
kernel/sched/sched.h
kernel/sched/syscalls.c

index 45daa41ab0b8049044476a903ea843084cc437a6..03d7b63dc3e5491a3b2d62284085117c4eeb751c 100644 (file)
@@ -1941,12 +1941,12 @@ static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
        }
 
        if (update_root_tg) {
-               static_branch_enable(&sched_uclamp_used);
+               sched_uclamp_enable();
                uclamp_update_root_tg();
        }
 
        if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
-               static_branch_enable(&sched_uclamp_used);
+               sched_uclamp_enable();
                uclamp_sync_util_min_rt_default();
        }
 
@@ -9294,7 +9294,7 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
        if (req.ret)
                return req.ret;
 
-       static_branch_enable(&sched_uclamp_used);
+       sched_uclamp_enable();
 
        guard(mutex)(&uclamp_mutex);
        guard(rcu)();
index 8d42d3cd1cb85c6ee5662606ed00d0fd37f3910b..0212a0c5534af668ffa55ed7851616163eb8cc8d 100644 (file)
@@ -3407,6 +3407,18 @@ static inline bool uclamp_is_used(void)
        return static_branch_likely(&sched_uclamp_used);
 }
 
+/*
+ * Enabling static branches would get the cpus_read_lock(),
+ * check whether uclamp_is_used before enable it to avoid always
+ * calling cpus_read_lock(). Because we never disable this
+ * static key once enable it.
+ */
+static inline void sched_uclamp_enable(void)
+{
+       if (!uclamp_is_used())
+               static_branch_enable(&sched_uclamp_used);
+}
+
 static inline unsigned long uclamp_rq_get(struct rq *rq,
                                          enum uclamp_id clamp_id)
 {
@@ -3486,6 +3498,8 @@ static inline bool uclamp_is_used(void)
        return false;
 }
 
+static inline void sched_uclamp_enable(void) {}
+
 static inline unsigned long
 uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id)
 {
index 9f40348f1dc7a1f3a70c994b1d27cd3557e2069a..c326de1344fb72dbd0820f448f33b9b95d13d7a6 100644 (file)
@@ -368,7 +368,7 @@ static int uclamp_validate(struct task_struct *p,
         * blocking operation which obviously cannot be done while holding
         * scheduler locks.
         */
-       static_branch_enable(&sched_uclamp_used);
+       sched_uclamp_enable();
 
        return 0;
 }