]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
treewide: context_tracking: Rename CONTEXT_* into CT_STATE_*
authorValentin Schneider <vschneid@redhat.com>
Tue, 25 Jul 2023 11:08:50 +0000 (12:08 +0100)
committerNeeraj Upadhyay <neeraj.upadhyay@kernel.org>
Mon, 29 Jul 2024 02:03:10 +0000 (07:33 +0530)
Context tracking state related symbols currently use a mix of the
CONTEXT_ (e.g. CONTEXT_KERNEL) and CT_SATE_ (e.g. CT_STATE_MASK) prefixes.

Clean up the naming and make the ctx_state enum use the CT_STATE_ prefix.

Suggested-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
12 files changed:
arch/Kconfig
arch/arm64/kernel/entry-common.c
arch/powerpc/include/asm/interrupt.h
arch/powerpc/kernel/interrupt.c
arch/powerpc/kernel/syscall.c
arch/x86/entry/common.c
include/linux/context_tracking.h
include/linux/context_tracking_state.h
include/linux/entry-common.h
kernel/context_tracking.c
kernel/entry/common.c
kernel/sched/core.c

index 975dd22a2dbd22dc1fa7ef727149d99e5ebcbe0a..4e2eaba9e30528ed81ff8c4bcd1226040bf447cd 100644 (file)
@@ -862,7 +862,7 @@ config HAVE_CONTEXT_TRACKING_USER_OFFSTACK
          Architecture neither relies on exception_enter()/exception_exit()
          nor on schedule_user(). Also preempt_schedule_notrace() and
          preempt_schedule_irq() can't be called in a preemptible section
-         while context tracking is CONTEXT_USER. This feature reflects a sane
+         while context tracking is CT_STATE_USER. This feature reflects a sane
          entry implementation where the following requirements are met on
          critical entry code, ie: before user_exit() or after user_enter():
 
index b77a15955f28bad450976ca36640c506eb9aeeda..3fcd9d080bf2a9086ebe6a596e73dee63fd2cb7e 100644 (file)
@@ -103,7 +103,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
 static __always_inline void __enter_from_user_mode(void)
 {
        lockdep_hardirqs_off(CALLER_ADDR0);
-       CT_WARN_ON(ct_state() != CONTEXT_USER);
+       CT_WARN_ON(ct_state() != CT_STATE_USER);
        user_exit_irqoff();
        trace_hardirqs_off_finish();
        mte_disable_tco_entry(current);
index 2d6c886b40f4479eb75a9dc32166996e137bc5ad..23638d4e73ac034c29dc929e1b9feb2c3cb736a3 100644 (file)
@@ -177,7 +177,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
 
        if (user_mode(regs)) {
                kuap_lock();
-               CT_WARN_ON(ct_state() != CONTEXT_USER);
+               CT_WARN_ON(ct_state() != CT_STATE_USER);
                user_exit_irqoff();
 
                account_cpu_user_entry();
@@ -189,8 +189,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
                 * so avoid recursion.
                 */
                if (TRAP(regs) != INTERRUPT_PROGRAM)
-                       CT_WARN_ON(ct_state() != CONTEXT_KERNEL &&
-                                  ct_state() != CONTEXT_IDLE);
+                       CT_WARN_ON(ct_state() != CT_STATE_KERNEL &&
+                                  ct_state() != CT_STATE_IDLE);
                INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
                INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
                                           search_kernel_restart_table(regs->nip));
index eca293794a1e831a7c657a75696fa9b53d95c990..af62ec974b9702cc0b310290bc262e6924222267 100644 (file)
@@ -266,7 +266,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
        unsigned long ret = 0;
        bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
 
-       CT_WARN_ON(ct_state() == CONTEXT_USER);
+       CT_WARN_ON(ct_state() == CT_STATE_USER);
 
        kuap_assert_locked();
 
@@ -344,7 +344,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
 
        BUG_ON(regs_is_unrecoverable(regs));
        BUG_ON(arch_irq_disabled_regs(regs));
-       CT_WARN_ON(ct_state() == CONTEXT_USER);
+       CT_WARN_ON(ct_state() == CT_STATE_USER);
 
        /*
         * We don't need to restore AMR on the way back to userspace for KUAP.
@@ -386,7 +386,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
        if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) &&
            TRAP(regs) != INTERRUPT_PROGRAM &&
            TRAP(regs) != INTERRUPT_PERFMON)
-               CT_WARN_ON(ct_state() == CONTEXT_USER);
+               CT_WARN_ON(ct_state() == CT_STATE_USER);
 
        kuap = kuap_get_and_assert_locked();
 
index f6f868e817e636f7c1327feb13935ecff49d57d5..be159ad4b77bd387f50299c5981d80984a605ce6 100644 (file)
@@ -27,7 +27,7 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
 
        trace_hardirqs_off(); /* finish reconciling */
 
-       CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
+       CT_WARN_ON(ct_state() == CT_STATE_KERNEL);
        user_exit_irqoff();
 
        BUG_ON(regs_is_unrecoverable(regs));
index 51cc9c7cb9bdc0e1181d08e33c05b5cc1f177830..94941c5a10ac107f48871c6728921c4a08c1a81f 100644 (file)
@@ -150,7 +150,7 @@ early_param("ia32_emulation", ia32_emulation_override_cmdline);
 #endif
 
 /*
- * Invoke a 32-bit syscall.  Called with IRQs on in CONTEXT_KERNEL.
+ * Invoke a 32-bit syscall.  Called with IRQs on in CT_STATE_KERNEL.
  */
 static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
 {
index 6e76b9dba00e77d9e28e6044f6684acf6ff28ee1..28fcfa184903281fb1b86f195994829d09e8c3cf 100644 (file)
@@ -26,26 +26,26 @@ extern void user_exit_callable(void);
 static inline void user_enter(void)
 {
        if (context_tracking_enabled())
-               ct_user_enter(CONTEXT_USER);
+               ct_user_enter(CT_STATE_USER);
 
 }
 static inline void user_exit(void)
 {
        if (context_tracking_enabled())
-               ct_user_exit(CONTEXT_USER);
+               ct_user_exit(CT_STATE_USER);
 }
 
 /* Called with interrupts disabled.  */
 static __always_inline void user_enter_irqoff(void)
 {
        if (context_tracking_enabled())
-               __ct_user_enter(CONTEXT_USER);
+               __ct_user_enter(CT_STATE_USER);
 
 }
 static __always_inline void user_exit_irqoff(void)
 {
        if (context_tracking_enabled())
-               __ct_user_exit(CONTEXT_USER);
+               __ct_user_exit(CT_STATE_USER);
 }
 
 static inline enum ctx_state exception_enter(void)
@@ -57,7 +57,7 @@ static inline enum ctx_state exception_enter(void)
                return 0;
 
        prev_ctx = __ct_state();
-       if (prev_ctx != CONTEXT_KERNEL)
+       if (prev_ctx != CT_STATE_KERNEL)
                ct_user_exit(prev_ctx);
 
        return prev_ctx;
@@ -67,7 +67,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
 {
        if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
            context_tracking_enabled()) {
-               if (prev_ctx != CONTEXT_KERNEL)
+               if (prev_ctx != CT_STATE_KERNEL)
                        ct_user_enter(prev_ctx);
        }
 }
@@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
 static __always_inline bool context_tracking_guest_enter(void)
 {
        if (context_tracking_enabled())
-               __ct_user_enter(CONTEXT_GUEST);
+               __ct_user_enter(CT_STATE_GUEST);
 
        return context_tracking_enabled_this_cpu();
 }
@@ -83,7 +83,7 @@ static __always_inline bool context_tracking_guest_enter(void)
 static __always_inline void context_tracking_guest_exit(void)
 {
        if (context_tracking_enabled())
-               __ct_user_exit(CONTEXT_GUEST);
+               __ct_user_exit(CT_STATE_GUEST);
 }
 
 #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
index bbff5f7f8803062fa3e38ee69b35fbc9de3efe79..f1c53125edee25e3af17fb5254a6310c09609929 100644 (file)
 #define DYNTICK_IRQ_NONIDLE    ((LONG_MAX / 2) + 1)
 
 enum ctx_state {
-       CONTEXT_DISABLED        = -1,   /* returned by ct_state() if unknown */
-       CONTEXT_KERNEL          = 0,
-       CONTEXT_IDLE            = 1,
-       CONTEXT_USER            = 2,
-       CONTEXT_GUEST           = 3,
-       CONTEXT_MAX             = 4,
+       CT_STATE_DISABLED       = -1,   /* returned by ct_state() if unknown */
+       CT_STATE_KERNEL         = 0,
+       CT_STATE_IDLE           = 1,
+       CT_STATE_USER           = 2,
+       CT_STATE_GUEST          = 3,
+       CT_STATE_MAX            = 4,
 };
 
 /* Even value for idle, else odd. */
-#define RCU_DYNTICKS_IDX CONTEXT_MAX
+#define RCU_DYNTICKS_IDX CT_STATE_MAX
 
-#define CT_STATE_MASK (CONTEXT_MAX - 1)
+#define CT_STATE_MASK (CT_STATE_MAX - 1)
 #define CT_DYNTICKS_MASK (~CT_STATE_MASK)
 
 struct context_tracking {
@@ -123,14 +123,14 @@ static inline bool context_tracking_enabled_this_cpu(void)
  *
  * Returns the current cpu's context tracking state if context tracking
  * is enabled.  If context tracking is disabled, returns
- * CONTEXT_DISABLED.  This should be used primarily for debugging.
+ * CT_STATE_DISABLED.  This should be used primarily for debugging.
  */
 static __always_inline int ct_state(void)
 {
        int ret;
 
        if (!context_tracking_enabled())
-               return CONTEXT_DISABLED;
+               return CT_STATE_DISABLED;
 
        preempt_disable();
        ret = __ct_state();
index b0fb775a600d9844aa6dbd7c6db70b1c7261829d..1e50cdb83ae501467ecc30ee52f1379d409f962e 100644 (file)
@@ -108,7 +108,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
        arch_enter_from_user_mode(regs);
        lockdep_hardirqs_off(CALLER_ADDR0);
 
-       CT_WARN_ON(__ct_state() != CONTEXT_USER);
+       CT_WARN_ON(__ct_state() != CT_STATE_USER);
        user_exit_irqoff();
 
        instrumentation_begin();
index 24b1e11432608e17d20c7844377d1148ac186d6d..4bb5751af994f65e2a0bc41afb094eb5e518caaf 100644 (file)
@@ -317,7 +317,7 @@ void noinstr ct_nmi_enter(void)
 void noinstr ct_idle_enter(void)
 {
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
-       ct_kernel_exit(false, RCU_DYNTICKS_IDX + CONTEXT_IDLE);
+       ct_kernel_exit(false, RCU_DYNTICKS_IDX + CT_STATE_IDLE);
 }
 EXPORT_SYMBOL_GPL(ct_idle_enter);
 
@@ -335,7 +335,7 @@ void noinstr ct_idle_exit(void)
        unsigned long flags;
 
        raw_local_irq_save(flags);
-       ct_kernel_enter(false, RCU_DYNTICKS_IDX - CONTEXT_IDLE);
+       ct_kernel_enter(false, RCU_DYNTICKS_IDX - CT_STATE_IDLE);
        raw_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(ct_idle_exit);
@@ -485,7 +485,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
                         * user_exit() or ct_irq_enter(). Let's remove RCU's dependency
                         * on the tick.
                         */
-                       if (state == CONTEXT_USER) {
+                       if (state == CT_STATE_USER) {
                                instrumentation_begin();
                                trace_user_enter(0);
                                vtime_user_enter(current);
@@ -621,7 +621,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
                         * run a RCU read side critical section anytime.
                         */
                        ct_kernel_enter(true, RCU_DYNTICKS_IDX - state);
-                       if (state == CONTEXT_USER) {
+                       if (state == CT_STATE_USER) {
                                instrumentation_begin();
                                vtime_user_exit(current);
                                trace_user_exit(0);
@@ -634,12 +634,12 @@ void noinstr __ct_user_exit(enum ctx_state state)
                         * In this we case we don't care about any concurrency/ordering.
                         */
                        if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
-                               raw_atomic_set(&ct->state, CONTEXT_KERNEL);
+                               raw_atomic_set(&ct->state, CT_STATE_KERNEL);
 
                } else {
                        if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
                                /* Tracking for vtime only, no concurrent RCU EQS accounting */
-                               raw_atomic_set(&ct->state, CONTEXT_KERNEL);
+                               raw_atomic_set(&ct->state, CT_STATE_KERNEL);
                        } else {
                                /*
                                 * Tracking for vtime and RCU EQS. Make sure we don't race
index 90843cc38588065ee5c52f8549d6a32c69bdf102..5b6934e23c21d36a3238dc03e391eb9e3beb4cfb 100644 (file)
@@ -182,7 +182,7 @@ static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
        unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
        unsigned long nr = syscall_get_nr(current, regs);
 
-       CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
+       CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
 
        if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
                if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
index a9f655025607b977a790adbc7f33b1265fce6636..93a845fe844982a6b5b28b660eefb54966aa38b0 100644 (file)
@@ -5762,7 +5762,7 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
                preempt_count_set(PREEMPT_DISABLED);
        }
        rcu_sleep_check();
-       SCHED_WARN_ON(ct_state() == CONTEXT_USER);
+       SCHED_WARN_ON(ct_state() == CT_STATE_USER);
 
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
 
@@ -6658,7 +6658,7 @@ asmlinkage __visible void __sched schedule_user(void)
         * we find a better solution.
         *
         * NB: There are buggy callers of this function.  Ideally we
-        * should warn if prev_state != CONTEXT_USER, but that will trigger
+        * should warn if prev_state != CT_STATE_USER, but that will trigger
         * too frequently to make sense yet.
         */
        enum ctx_state prev_state = exception_enter();