From: Ingo Molnar Date: Wed, 28 May 2025 08:08:53 +0000 (+0200) Subject: sched: Clean up and standardize #if/#else/#endif markers in sched/pelt.[ch] X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=311bb3f7b78e944e831ffb07cb58455b47bf2269;p=thirdparty%2Fkernel%2Flinux.git sched: Clean up and standardize #if/#else/#endif markers in sched/pelt.[ch] - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Sebastian Andrzej Siewior Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot Link: https://lore.kernel.org/r/20250528080924.2273858-13-mingo@kernel.org --- diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 09be6a83e45a9..fa83bbaf4f3e8 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -414,7 +414,7 @@ int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) return 0; } -#endif +#endif /* CONFIG_SCHED_HW_PRESSURE */ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ /* @@ -467,7 +467,7 @@ int update_irq_load_avg(struct rq *rq, u64 running) return ret; } -#endif +#endif /* CONFIG_HAVE_SCHED_AVG_IRQ */ /* * Load avg and utiliztion metrics need to be updated periodically and before diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h index 19592077452e8..a5d4933e6b704 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -20,7 +20,7 @@ static inline u64 hw_load_avg(struct rq *rq) { return READ_ONCE(rq->avg_hw.load_avg); } -#else +#else /* !CONFIG_SCHED_HW_PRESSURE: */ static inline int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) { @@ -31,7 +31,7 @@ static inline u64 hw_load_avg(struct rq *rq) { return 0; } -#endif +#endif /* !CONFIG_SCHED_HW_PRESSURE */ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ int update_irq_load_avg(struct rq *rq, u64 running); @@ -179,15 +179,15 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; } -#else +#else /* !CONFIG_CFS_BANDWIDTH: */ static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { } static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { return rq_clock_pelt(rq_of(cfs_rq)); } -#endif +#endif /* !CONFIG_CFS_BANDWIDTH */ -#else +#else /* !CONFIG_SMP: */ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) @@ -236,6 +236,6 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq) { } static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { } -#endif +#endif /* !CONFIG_SMP */ #endif /* _KERNEL_SCHED_PELT_H */