]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
timers: Split out "get next timer interrupt" functionality
authorAnna-Maria Behnsen <anna-maria@linutronix.de>
Wed, 21 Feb 2024 09:05:40 +0000 (10:05 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 22 Feb 2024 16:52:31 +0000 (17:52 +0100)
The functionality for getting the next timer interrupt in
get_next_timer_interrupt() is split into a separate function
fetch_next_timer_interrupt() to be usable by other call sites.

This is preparatory work for the conversion of the NOHZ timer
placement to a pull at expiry time model. No functional change.

Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20240221090548.36600-13-anna-maria@linutronix.de
kernel/time/timer.c

index 38becd2faceeac01d7dc9d995e581d911347677a..b10e97c995a773271f49109285ed074e50fec47b 100644 (file)
@@ -2033,30 +2033,13 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
        return base->next_expiry;
 }
 
-static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
-                                            bool *idle)
+static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
+                                               struct timer_base *base_local,
+                                               struct timer_base *base_global,
+                                               struct timer_events *tevt)
 {
-       struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
        unsigned long nextevt, nextevt_local, nextevt_global;
-       struct timer_base *base_local, *base_global;
        bool local_first;
-       u64 expires;
-
-       /*
-        * Pretend that there is no timer pending if the cpu is offline.
-        * Possible pending timers will be migrated later to an active cpu.
-        */
-       if (cpu_is_offline(smp_processor_id())) {
-               if (idle)
-                       *idle = true;
-               return tevt.local;
-       }
-
-       base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
-       base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
-
-       raw_spin_lock(&base_local->lock);
-       raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
 
        nextevt_local = next_timer_interrupt(base_local, basej);
        nextevt_global = next_timer_interrupt(base_global, basej);
@@ -2074,8 +2057,8 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
                /* If we missed a tick already, force 0 delta */
                if (time_before(nextevt, basej))
                        nextevt = basej;
-               tevt.local = basem + (u64)(nextevt - basej) * TICK_NSEC;
-               goto forward;
+               tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
+               return nextevt;
        }
 
        /*
@@ -2085,12 +2068,41 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
         * ignored. If the global queue is empty, nothing to do either.
         */
        if (!local_first && base_global->timers_pending)
-               tevt.global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
+               tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
 
        if (base_local->timers_pending)
-               tevt.local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
+               tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
+
+       return nextevt;
+}
+
+static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
+                                            bool *idle)
+{
+       struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
+       struct timer_base *base_local, *base_global;
+       unsigned long nextevt;
+       u64 expires;
+
+       /*
+        * Pretend that there is no timer pending if the cpu is offline.
+        * Possible pending timers will be migrated later to an active cpu.
+        */
+       if (cpu_is_offline(smp_processor_id())) {
+               if (idle)
+                       *idle = true;
+               return tevt.local;
+       }
+
+       base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
+       base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
+
+       raw_spin_lock(&base_local->lock);
+       raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
+
+       nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
+                                            base_global, &tevt);
 
-forward:
        /*
         * We have a fresh next event. Check whether we can forward the
         * base.