]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
hrtimers: Handle CPU state correctly on hotplug
authorKoichiro Den <koichiro.den@canonical.com>
Fri, 20 Dec 2024 13:44:21 +0000 (22:44 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 1 Feb 2025 17:22:29 +0000 (18:22 +0100)
commit 2f8dea1692eef2b7ba6a256246ed82c365fdc686 upstream.

Consider a scenario where a CPU transitions from CPUHP_ONLINE to halfway
through a CPU hotunplug down to CPUHP_HRTIMERS_PREPARE, and then back to
CPUHP_ONLINE:

Since hrtimers_prepare_cpu() does not run, cpu_base.hres_active remains set
to 1 throughout. However, during a CPU unplug operation, the tick and the
clockevents are shut down at CPUHP_AP_TICK_DYING. On return to the online
state, for instance CFS incorrectly assumes that the hrtick is already
active, and the chance of the clockevent device to transition to oneshot
mode is also lost forever for the CPU, unless it goes back to a lower state
than CPUHP_HRTIMERS_PREPARE once.

This round-trip reveals another issue; cpu_base.online is not set to 1
after the transition, which appears as a WARN_ON_ONCE in enqueue_hrtimer().

Aside of that, the bulk of the per CPU state is not reset either, which
means there are dangling pointers in the worst case.

Address this by adding a corresponding startup() callback, which resets the
stale per CPU state and sets the online flag.

[ tglx: Make the new callback unconditionally available, remove the online
   modification in the prepare() callback and clear the remaining
   state in the starting callback instead of the prepare callback ]

Fixes: 5c0930ccaad5 ("hrtimers: Push pending hrtimers away from outgoing CPU earlier")
Signed-off-by: Koichiro Den <koichiro.den@canonical.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/all/20241220134421.3809834-1-koichiro.den@canonical.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/hrtimer.h
kernel/cpu.c
kernel/time/hrtimer.c

index 54a3ad7bff581d6a329faecca86bb87685185786..b86bd4fe22e0af7de50b355cfc8072cc50d0c0bf 100644 (file)
@@ -527,6 +527,7 @@ extern void __init hrtimers_init(void);
 extern void sysrq_timer_list_show(void);
 
 int hrtimers_prepare_cpu(unsigned int cpu);
+int hrtimers_cpu_starting(unsigned int cpu);
 #ifdef CONFIG_HOTPLUG_CPU
 int hrtimers_cpu_dying(unsigned int cpu);
 #else
index d84ba5a13d1711ec5f4c97dd5ecb6d84aecf7668..7eab6e3e771f7033df71cbc044f9fb7dc856eed9 100644 (file)
@@ -1664,7 +1664,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
        },
        [CPUHP_AP_HRTIMERS_DYING] = {
                .name                   = "hrtimers:dying",
-               .startup.single         = NULL,
+               .startup.single         = hrtimers_cpu_starting,
                .teardown.single        = hrtimers_cpu_dying,
        },
 
index 16f1e747c5673a504bce8e13463fe301ce20ab54..7f9f2fc183fe31bea2a97158fe5f07940662ef30 100644 (file)
@@ -2074,6 +2074,15 @@ int hrtimers_prepare_cpu(unsigned int cpu)
        }
 
        cpu_base->cpu = cpu;
+       hrtimer_cpu_base_init_expiry_lock(cpu_base);
+       return 0;
+}
+
+int hrtimers_cpu_starting(unsigned int cpu)
+{
+       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+
+       /* Clear out any left over state from a CPU down operation */
        cpu_base->active_bases = 0;
        cpu_base->hres_active = 0;
        cpu_base->hang_detected = 0;
@@ -2082,7 +2091,6 @@ int hrtimers_prepare_cpu(unsigned int cpu)
        cpu_base->expires_next = KTIME_MAX;
        cpu_base->softirq_expires_next = KTIME_MAX;
        cpu_base->online = 1;
-       hrtimer_cpu_base_init_expiry_lock(cpu_base);
        return 0;
 }
 
@@ -2160,6 +2168,7 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
 void __init hrtimers_init(void)
 {
        hrtimers_prepare_cpu(smp_processor_id());
+       hrtimers_cpu_starting(smp_processor_id());
        open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
 }