]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
hrtimer: Rename hrtimer_cpu_base::in_hrtirq to deferred_rearm
authorThomas Gleixner <tglx@kernel.org>
Tue, 24 Feb 2026 16:37:53 +0000 (17:37 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 27 Feb 2026 15:40:12 +0000 (16:40 +0100)
The upcoming deferred rearming scheme has the same effect as the deferred
rearming when the hrtimer interrupt is executing. So it can reuse the
in_hrtirq flag, but when it gets deferred beyond the hrtimer interrupt
path, then the name does not make sense anymore.

Rename it to deferred_rearm upfront to keep the actual functional change
separate from the mechanical rename churn.

Signed-off-by: Thomas Gleixner <tglx@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260224163430.935623347@kernel.org
include/linux/hrtimer_defs.h
kernel/time/hrtimer.c

index f9fbf9a48f59085369df33f1991dc0c8e51c4cd5..2c3bdbd562d22cd19d520a7432b50677afad9c05 100644 (file)
@@ -53,7 +53,7 @@ enum  hrtimer_base_type {
  * @active_bases:      Bitfield to mark bases with active timers
  * @clock_was_set_seq: Sequence counter of clock was set events
  * @hres_active:       State of high resolution mode
- * @in_hrtirq:         hrtimer_interrupt() is currently executing
+ * @deferred_rearm:    A deferred rearm is pending
  * @hang_detected:     The last hrtimer interrupt detected a hang
  * @softirq_activated: displays, if the softirq is raised - update of softirq
  *                     related settings is not required then.
@@ -84,7 +84,7 @@ struct hrtimer_cpu_base {
        unsigned int                    active_bases;
        unsigned int                    clock_was_set_seq;
        bool                            hres_active;
-       bool                            in_hrtirq;
+       bool                            deferred_rearm;
        bool                            hang_detected;
        bool                            softirq_activated;
        bool                            online;
index 2e05a1885d247e3283fc7b0002d1133e26c7fdd3..6f05d2569286ab5661f62d7e0d56fb1f7fcfb9b9 100644 (file)
@@ -883,11 +883,8 @@ static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
        if (expires >= cpu_base->expires_next)
                return;
 
-       /*
-        * If the hrtimer interrupt is running, then it will reevaluate the
-        * clock bases and reprogram the clock event device.
-        */
-       if (cpu_base->in_hrtirq)
+       /* If a deferred rearm is pending skip reprogramming the device */
+       if (cpu_base->deferred_rearm)
                return;
 
        cpu_base->next_timer = timer;
@@ -921,12 +918,8 @@ static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base, unsigned int act
        if (seq == cpu_base->clock_was_set_seq)
                return false;
 
-       /*
-        * If the remote CPU is currently handling an hrtimer interrupt, it
-        * will reevaluate the first expiring timer of all clock bases
-        * before reprogramming. Nothing to do here.
-        */
-       if (cpu_base->in_hrtirq)
+       /* If a deferred rearm is pending the remote CPU will take care of it */
+       if (cpu_base->deferred_rearm)
                return false;
 
        /*
@@ -1334,11 +1327,8 @@ static bool __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, u64 del
                first = enqueue_hrtimer(timer, base, mode, was_armed);
        }
 
-       /*
-        * If the hrtimer interrupt is running, then it will reevaluate the
-        * clock bases and reprogram the clock event device.
-        */
-       if (cpu_base->in_hrtirq)
+       /* If a deferred rearm is pending skip reprogramming the device */
+       if (cpu_base->deferred_rearm)
                return false;
 
        if (!was_first || cpu_base != this_cpu_base) {
@@ -1947,14 +1937,14 @@ static __latent_entropy void hrtimer_run_softirq(void)
 
 /*
  * Very similar to hrtimer_force_reprogram(), except it deals with
- * in_hrtirq and hang_detected.
+ * deferred_rearm and hang_detected.
  */
 static void hrtimer_rearm(struct hrtimer_cpu_base *cpu_base, ktime_t now)
 {
        ktime_t expires_next = hrtimer_update_next_event(cpu_base);
 
        cpu_base->expires_next = expires_next;
-       cpu_base->in_hrtirq = false;
+       cpu_base->deferred_rearm = false;
 
        if (unlikely(cpu_base->hang_detected)) {
                /*
@@ -1985,7 +1975,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        raw_spin_lock_irqsave(&cpu_base->lock, flags);
        entry_time = now = hrtimer_update_base(cpu_base);
 retry:
-       cpu_base->in_hrtirq = true;
+       cpu_base->deferred_rearm = true;
        /*
         * Set expires_next to KTIME_MAX, which prevents that remote CPUs queue
         * timers while __hrtimer_run_queues() is expiring the clock bases.