]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
context_tracking, rcu: Rename RCU_DYNTICKS_IDX into CT_RCU_WATCHING
authorValentin Schneider <vschneid@redhat.com>
Tue, 25 Jul 2023 11:19:01 +0000 (12:19 +0100)
committerNeeraj Upadhyay <neeraj.upadhyay@kernel.org>
Mon, 29 Jul 2024 02:03:10 +0000 (07:33 +0530)
The symbols relating to the CT_STATE part of context_tracking.state are now
all prefixed with CT_STATE.

The RCU dynticks counter part of that atomic variable still involves
symbols with different prefixes, align them all to be prefixed with
CT_RCU_WATCHING.

Suggested-by: "Paul E. McKenney" <paulmck@kernel.org>
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
include/linux/context_tracking.h
include/linux/context_tracking_state.h
kernel/context_tracking.c
kernel/rcu/tree.c

index 28fcfa184903281fb1b86f195994829d09e8c3cf..a6c36780cc3bd2af1bf7a03699e45170e8a86da6 100644 (file)
@@ -119,7 +119,7 @@ extern void ct_idle_exit(void);
  */
 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
 {
-       return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
+       return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING);
 }
 
 /*
@@ -142,7 +142,7 @@ static __always_inline bool warn_rcu_enter(void)
        preempt_disable_notrace();
        if (rcu_dynticks_curr_cpu_in_eqs()) {
                ret = true;
-               ct_state_inc(RCU_DYNTICKS_IDX);
+               ct_state_inc(CT_RCU_WATCHING);
        }
 
        return ret;
@@ -151,7 +151,7 @@ static __always_inline bool warn_rcu_enter(void)
 static __always_inline void warn_rcu_exit(bool rcu)
 {
        if (rcu)
-               ct_state_inc(RCU_DYNTICKS_IDX);
+               ct_state_inc(CT_RCU_WATCHING);
        preempt_enable_notrace();
 }
 
index f1c53125edee25e3af17fb5254a6310c09609929..94d6a935af3beb2df637741d3f44bf889cd8d9aa 100644 (file)
@@ -18,11 +18,11 @@ enum ctx_state {
        CT_STATE_MAX            = 4,
 };
 
-/* Even value for idle, else odd. */
-#define RCU_DYNTICKS_IDX CT_STATE_MAX
+/* Odd value for watching, else even. */
+#define CT_RCU_WATCHING CT_STATE_MAX
 
 #define CT_STATE_MASK (CT_STATE_MAX - 1)
-#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
+#define CT_RCU_WATCHING_MASK (~CT_STATE_MASK)
 
 struct context_tracking {
 #ifdef CONFIG_CONTEXT_TRACKING_USER
@@ -58,21 +58,21 @@ static __always_inline int __ct_state(void)
 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
 static __always_inline int ct_dynticks(void)
 {
-       return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
+       return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
 }
 
 static __always_inline int ct_dynticks_cpu(int cpu)
 {
        struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
 
-       return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
+       return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
 }
 
 static __always_inline int ct_dynticks_cpu_acquire(int cpu)
 {
        struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
 
-       return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
+       return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK;
 }
 
 static __always_inline long ct_dynticks_nesting(void)
index 4bb5751af994f65e2a0bc41afb094eb5e518caaf..b2589bc59e1869c4658d2cfa86804234a1a353f9 100644 (file)
@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
        .dynticks_nesting = 1,
        .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
 #endif
-       .state = ATOMIC_INIT(RCU_DYNTICKS_IDX),
+       .state = ATOMIC_INIT(CT_RCU_WATCHING),
 };
 EXPORT_SYMBOL_GPL(context_tracking);
 
@@ -90,7 +90,7 @@ static noinstr void ct_kernel_exit_state(int offset)
        rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
        seq = ct_state_inc(offset);
        // RCU is no longer watching.  Better be in extended quiescent state!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & RCU_DYNTICKS_IDX));
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & CT_RCU_WATCHING));
 }
 
 /*
@@ -110,7 +110,7 @@ static noinstr void ct_kernel_enter_state(int offset)
        seq = ct_state_inc(offset);
        // RCU is now watching.  Better not be in an extended quiescent state!
        rcu_dynticks_task_trace_exit();  // After ->dynticks update!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & RCU_DYNTICKS_IDX));
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & CT_RCU_WATCHING));
 }
 
 /*
@@ -236,7 +236,7 @@ void noinstr ct_nmi_exit(void)
        instrumentation_end();
 
        // RCU is watching here ...
-       ct_kernel_exit_state(RCU_DYNTICKS_IDX);
+       ct_kernel_exit_state(CT_RCU_WATCHING);
        // ... but is no longer watching here.
 
        if (!in_nmi())
@@ -277,7 +277,7 @@ void noinstr ct_nmi_enter(void)
                        rcu_dynticks_task_exit();
 
                // RCU is not watching here ...
-               ct_kernel_enter_state(RCU_DYNTICKS_IDX);
+               ct_kernel_enter_state(CT_RCU_WATCHING);
                // ... but is watching here.
 
                instrumentation_begin();
@@ -317,7 +317,7 @@ void noinstr ct_nmi_enter(void)
 void noinstr ct_idle_enter(void)
 {
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
-       ct_kernel_exit(false, RCU_DYNTICKS_IDX + CT_STATE_IDLE);
+       ct_kernel_exit(false, CT_RCU_WATCHING + CT_STATE_IDLE);
 }
 EXPORT_SYMBOL_GPL(ct_idle_enter);
 
@@ -335,7 +335,7 @@ void noinstr ct_idle_exit(void)
        unsigned long flags;
 
        raw_local_irq_save(flags);
-       ct_kernel_enter(false, RCU_DYNTICKS_IDX - CT_STATE_IDLE);
+       ct_kernel_enter(false, CT_RCU_WATCHING - CT_STATE_IDLE);
        raw_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(ct_idle_exit);
@@ -504,7 +504,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
                         * CPU doesn't need to maintain the tick for RCU maintenance purposes
                         * when the CPU runs in userspace.
                         */
-                       ct_kernel_exit(true, RCU_DYNTICKS_IDX + state);
+                       ct_kernel_exit(true, CT_RCU_WATCHING + state);
 
                        /*
                         * Special case if we only track user <-> kernel transitions for tickless
@@ -534,7 +534,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
                                /*
                                 * Tracking for vtime and RCU EQS. Make sure we don't race
                                 * with NMIs. OTOH we don't care about ordering here since
-                                * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+                                * RCU only requires CT_RCU_WATCHING increments to be fully
                                 * ordered.
                                 */
                                raw_atomic_add(state, &ct->state);
@@ -620,7 +620,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
                         * Exit RCU idle mode while entering the kernel because it can
                         * run a RCU read side critical section anytime.
                         */
-                       ct_kernel_enter(true, RCU_DYNTICKS_IDX - state);
+                       ct_kernel_enter(true, CT_RCU_WATCHING - state);
                        if (state == CT_STATE_USER) {
                                instrumentation_begin();
                                vtime_user_exit(current);
@@ -644,7 +644,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
                                /*
                                 * Tracking for vtime and RCU EQS. Make sure we don't race
                                 * with NMIs. OTOH we don't care about ordering here since
-                                * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+                                * RCU only requires CT_RCU_WATCHING increments to be fully
                                 * ordered.
                                 */
                                raw_atomic_sub(state, &ct->state);
index e641cc681901a5e5c7a7e1791a836ba0db89d7d3..04f87c44385c7f0fc2d07f6a4b3cbcda48dc3f58 100644 (file)
@@ -294,9 +294,9 @@ void rcu_softirq_qs(void)
  */
 static void rcu_dynticks_eqs_online(void)
 {
-       if (ct_dynticks() & RCU_DYNTICKS_IDX)
+       if (ct_dynticks() & CT_RCU_WATCHING)
                return;
-       ct_state_inc(RCU_DYNTICKS_IDX);
+       ct_state_inc(CT_RCU_WATCHING);
 }
 
 /*
@@ -305,7 +305,7 @@ static void rcu_dynticks_eqs_online(void)
  */
 static bool rcu_dynticks_in_eqs(int snap)
 {
-       return !(snap & RCU_DYNTICKS_IDX);
+       return !(snap & CT_RCU_WATCHING);
 }
 
 /*
@@ -335,7 +335,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        int snap;
 
        // If not quiescent, force back to earlier extended quiescent state.
-       snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
+       snap = ct_dynticks_cpu(cpu) & ~CT_RCU_WATCHING;
        smp_rmb(); // Order ->dynticks and *vp reads.
        if (READ_ONCE(*vp))
                return false;  // Non-zero, so report failure;
@@ -361,9 +361,9 @@ notrace void rcu_momentary_dyntick_idle(void)
        int seq;
 
        raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
-       seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
+       seq = ct_state_inc(2 * CT_RCU_WATCHING);
        /* It is illegal to call this from idle state. */
-       WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
+       WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
        rcu_preempt_deferred_qs(current);
 }
 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);