]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched: Clean up and standardize #if/#else/#endif markers in sched/pelt.[ch]
authorIngo Molnar <mingo@kernel.org>
Wed, 28 May 2025 08:08:53 +0000 (10:08 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 13 Jun 2025 06:47:17 +0000 (08:47 +0200)
 - Use the standard #ifdef marker format for larger blocks,
   where appropriate:

        #if CONFIG_FOO
        ...
        #else /* !CONFIG_FOO: */
        ...
        #endif /* !CONFIG_FOO */

 - Fix whitespace noise and other inconsistencies.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20250528080924.2273858-13-mingo@kernel.org
kernel/sched/pelt.c
kernel/sched/pelt.h

index 09be6a83e45a9e74fc3299b0c88eefe62ad3c722..fa83bbaf4f3e83c8f969982d8d81cc176baa2de7 100644 (file)
@@ -414,7 +414,7 @@ int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
 
        return 0;
 }
-#endif
+#endif /* CONFIG_SCHED_HW_PRESSURE */
 
 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 /*
@@ -467,7 +467,7 @@ int update_irq_load_avg(struct rq *rq, u64 running)
 
        return ret;
 }
-#endif
+#endif /* CONFIG_HAVE_SCHED_AVG_IRQ */
 
 /*
  * Load avg and utiliztion metrics need to be updated periodically and before
index 19592077452e8431ef468327a927bab3e211155b..a5d4933e6b704074116a8989fb8723e897407c0a 100644 (file)
@@ -20,7 +20,7 @@ static inline u64 hw_load_avg(struct rq *rq)
 {
        return READ_ONCE(rq->avg_hw.load_avg);
 }
-#else
+#else /* !CONFIG_SCHED_HW_PRESSURE: */
 static inline int
 update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
 {
@@ -31,7 +31,7 @@ static inline u64 hw_load_avg(struct rq *rq)
 {
        return 0;
 }
-#endif
+#endif /* !CONFIG_SCHED_HW_PRESSURE */
 
 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 int update_irq_load_avg(struct rq *rq, u64 running);
@@ -179,15 +179,15 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
 
        return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
 }
-#else
+#else /* !CONFIG_CFS_BANDWIDTH: */
 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
 {
        return rq_clock_pelt(rq_of(cfs_rq));
 }
-#endif
+#endif /* !CONFIG_CFS_BANDWIDTH */
 
-#else
+#else /* !CONFIG_SMP: */
 
 static inline int
 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
@@ -236,6 +236,6 @@ static inline void
 update_idle_rq_clock_pelt(struct rq *rq) { }
 
 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
-#endif
+#endif /* !CONFIG_SMP */
 
 #endif /* _KERNEL_SCHED_PELT_H */