]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/uclamp: Use the uclamp_is_used() helper instead of open-coding it
authorXuewen Yan <xuewen.yan@unisoc.com>
Wed, 19 Feb 2025 09:37:46 +0000 (17:37 +0800)
committerIngo Molnar <mingo@kernel.org>
Sat, 15 Mar 2025 09:26:37 +0000 (10:26 +0100)
Don't open-code static_branch_unlikely(&sched_uclamp_used), we have
the uclamp_is_used() wrapper around it.

[ mingo: Clean up the changelog ]

Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Hongyan Xia <hongyan.xia2@arm.com>
Reviewed-by: Christian Loehle <christian.loehle@arm.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20250219093747.2612-1-xuewen.yan@unisoc.com
kernel/sched/core.c
kernel/sched/sched.h

index 621cfc731c5be9e3e650bc0b08e9189e622f449a..45daa41ab0b8049044476a903ea843084cc437a6 100644 (file)
@@ -1756,7 +1756,7 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
         * The condition is constructed such that a NOP is generated when
         * sched_uclamp_used is disabled.
         */
-       if (!static_branch_unlikely(&sched_uclamp_used))
+       if (!uclamp_is_used())
                return;
 
        if (unlikely(!p->sched_class->uclamp_enabled))
@@ -1783,7 +1783,7 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
         * The condition is constructed such that a NOP is generated when
         * sched_uclamp_used is disabled.
         */
-       if (!static_branch_unlikely(&sched_uclamp_used))
+       if (!uclamp_is_used())
                return;
 
        if (unlikely(!p->sched_class->uclamp_enabled))
index 023b844159c9415f4854e359a3a98f3b24e296bc..8d42d3cd1cb85c6ee5662606ed00d0fd37f3910b 100644 (file)
@@ -3394,6 +3394,19 @@ static inline bool update_other_load_avgs(struct rq *rq) { return false; }
 
 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
 
+/*
+ * When uclamp is compiled in, the aggregation at rq level is 'turned off'
+ * by default in the fast path and only gets turned on once userspace performs
+ * an operation that requires it.
+ *
+ * Returns true if userspace opted-in to use uclamp and aggregation at rq level
+ * hence is active.
+ */
+static inline bool uclamp_is_used(void)
+{
+       return static_branch_likely(&sched_uclamp_used);
+}
+
 static inline unsigned long uclamp_rq_get(struct rq *rq,
                                          enum uclamp_id clamp_id)
 {
@@ -3417,7 +3430,7 @@ static inline bool uclamp_rq_is_capped(struct rq *rq)
        unsigned long rq_util;
        unsigned long max_util;
 
-       if (!static_branch_likely(&sched_uclamp_used))
+       if (!uclamp_is_used())
                return false;
 
        rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq);
@@ -3426,19 +3439,6 @@ static inline bool uclamp_rq_is_capped(struct rq *rq)
        return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util;
 }
 
-/*
- * When uclamp is compiled in, the aggregation at rq level is 'turned off'
- * by default in the fast path and only gets turned on once userspace performs
- * an operation that requires it.
- *
- * Returns true if userspace opted-in to use uclamp and aggregation at rq level
- * hence is active.
- */
-static inline bool uclamp_is_used(void)
-{
-       return static_branch_likely(&sched_uclamp_used);
-}
-
 #define for_each_clamp_id(clamp_id) \
        for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)