]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/fair: Get this cpu once in find_new_ilb()
authorShrikanth Hegde <sshegde@linux.ibm.com>
Mon, 23 Mar 2026 19:36:27 +0000 (01:06 +0530)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 24 Mar 2026 09:07:04 +0000 (10:07 +0100)
Calling smp_processor_id() on:
 - In CONFIG_DEBUG_PREEMPT=y, if preemption/irq is disabled, then it does
   not print any warning.
 - In CONFIG_DEBUG_PREEMPT=n, it doesn't do anything apart from getting
   __smp_processor_id

So with both CONFIG_DEBUG_PREEMPT=y/n, in preemption disabled section
it is better to cache the value. It could save a few cycles. Though
tiny, repeated in loop could add up to a small value.

find_new_ilb is called in interrupt context. So preemption is disabled.
So Hoist the this_cpu out of loop

Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Mukesh Kumar Chaurasiya (IBM) <mkchauras@gmail.com>
Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>
Link: https://patch.msgid.link/20260323193630.640311-2-sshegde@linux.ibm.com
kernel/sched/fair.c

index 0a35a82e47920b6863f1c4c65c7b2244a3e51723..226509231e673c75c4560e22a594c1970c4ec933 100644 (file)
@@ -12614,14 +12614,14 @@ static inline int on_null_domain(struct rq *rq)
  */
 static inline int find_new_ilb(void)
 {
+       int this_cpu = smp_processor_id();
        const struct cpumask *hk_mask;
        int ilb_cpu;
 
        hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
 
        for_each_cpu_and(ilb_cpu, nohz.idle_cpus_mask, hk_mask) {
-
-               if (ilb_cpu == smp_processor_id())
+               if (ilb_cpu == this_cpu)
                        continue;
 
                if (idle_cpu(ilb_cpu))