]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
timers/migration: Rename 'online' bit to 'available'
authorGabriele Monaco <gmonaco@redhat.com>
Thu, 20 Nov 2025 14:56:47 +0000 (15:56 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 20 Nov 2025 19:17:31 +0000 (20:17 +0100)
The timer migration hierarchy excludes offline CPUs via the
tmigr_is_not_available function, which is essentially checking the
online bit for the CPU.

Rename the online bit to available and all references in function names
and tracepoint to generalise the concept of available CPUs.

Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://patch.msgid.link/20251120145653.296659-2-gmonaco@redhat.com
include/trace/events/timer_migration.h
kernel/time/timer_migration.c
kernel/time/timer_migration.h

index 47db5eaf2f9ab75669abb54162d4e6ea55f0ee0d..61171b13c687c2ffc77ff20f00e09fff7a17c8d1 100644 (file)
@@ -173,14 +173,14 @@ DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active,
        TP_ARGS(tmc)
 );
 
-DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_online,
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_available,
 
        TP_PROTO(struct tmigr_cpu *tmc),
 
        TP_ARGS(tmc)
 );
 
-DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_offline,
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_unavailable,
 
        TP_PROTO(struct tmigr_cpu *tmc),
 
index 57e38674e56e5fefe9385c5b32928e734a026289..2cfebed35e225eb6ea861422982fceab739b9ed9 100644 (file)
@@ -429,7 +429,7 @@ static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
 
 static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
 {
-       return !(tmc->tmgroup && tmc->online);
+       return !(tmc->tmgroup && tmc->available);
 }
 
 /*
@@ -926,7 +926,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
         * updated the event takes care when hierarchy is completely
         * idle. Otherwise the migrator does it as the event is enqueued.
         */
-       if (!tmc->online || tmc->remote || tmc->cpuevt.ignore ||
+       if (!tmc->available || tmc->remote || tmc->cpuevt.ignore ||
            now < tmc->cpuevt.nextevt.expires) {
                raw_spin_unlock_irq(&tmc->lock);
                return;
@@ -973,7 +973,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
         * (See also section "Required event and timerqueue update after a
         * remote expiry" in the documentation at the top)
         */
-       if (!tmc->online || !tmc->idle) {
+       if (!tmc->available || !tmc->idle) {
                timer_unlock_remote_bases(cpu);
                goto unlock;
        }
@@ -1422,19 +1422,19 @@ static long tmigr_trigger_active(void *unused)
 {
        struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
 
-       WARN_ON_ONCE(!tmc->online || tmc->idle);
+       WARN_ON_ONCE(!tmc->available || tmc->idle);
 
        return 0;
 }
 
-static int tmigr_cpu_offline(unsigned int cpu)
+static int tmigr_clear_cpu_available(unsigned int cpu)
 {
        struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
        int migrator;
        u64 firstexp;
 
        raw_spin_lock_irq(&tmc->lock);
-       tmc->online = false;
+       tmc->available = false;
        WRITE_ONCE(tmc->wakeup, KTIME_MAX);
 
        /*
@@ -1442,7 +1442,7 @@ static int tmigr_cpu_offline(unsigned int cpu)
         * offline; Therefore nextevt value is set to KTIME_MAX
         */
        firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
-       trace_tmigr_cpu_offline(tmc);
+       trace_tmigr_cpu_unavailable(tmc);
        raw_spin_unlock_irq(&tmc->lock);
 
        if (firstexp != KTIME_MAX) {
@@ -1453,7 +1453,7 @@ static int tmigr_cpu_offline(unsigned int cpu)
        return 0;
 }
 
-static int tmigr_cpu_online(unsigned int cpu)
+static int tmigr_set_cpu_available(unsigned int cpu)
 {
        struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
 
@@ -1462,11 +1462,11 @@ static int tmigr_cpu_online(unsigned int cpu)
                return -EINVAL;
 
        raw_spin_lock_irq(&tmc->lock);
-       trace_tmigr_cpu_online(tmc);
+       trace_tmigr_cpu_available(tmc);
        tmc->idle = timer_base_is_idle();
        if (!tmc->idle)
                __tmigr_cpu_activate(tmc);
-       tmc->online = true;
+       tmc->available = true;
        raw_spin_unlock_irq(&tmc->lock);
        return 0;
 }
@@ -1758,7 +1758,7 @@ static int tmigr_add_cpu(unsigned int cpu)
                 * The (likely) current CPU is expected to be online in the hierarchy,
                 * otherwise the old root may not be active as expected.
                 */
-               WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->online);
+               WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->available);
                ret = tmigr_setup_groups(-1, old_root->numa_node, old_root, true);
        }
 
@@ -1854,7 +1854,7 @@ static int __init tmigr_init(void)
                goto err;
 
        ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
-                               tmigr_cpu_online, tmigr_cpu_offline);
+                               tmigr_set_cpu_available, tmigr_clear_cpu_available);
        if (ret)
                goto err;
 
index ae19f70f8170fd6834b542f8998c4798fd477bce..70879cde6fdd01db51ad92cca64a9d3599e63cb3 100644 (file)
@@ -97,7 +97,7 @@ struct tmigr_group {
  */
 struct tmigr_cpu {
        raw_spinlock_t          lock;
-       bool                    online;
+       bool                    available;
        bool                    idle;
        bool                    remote;
        struct tmigr_group      *tmgroup;