]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
rcu: Clean up after the SRCU-fastification of RCU Tasks Trace
authorPaul E. McKenney <paulmck@kernel.org>
Mon, 29 Dec 2025 19:10:58 +0000 (11:10 -0800)
committerBoqun Feng <boqun.feng@gmail.com>
Thu, 1 Jan 2026 08:39:46 +0000 (16:39 +0800)
Now that RCU Tasks Trace has been re-implemented in terms of SRCU-fast,
the ->trc_ipi_to_cpu, ->trc_blkd_cpu, ->trc_blkd_node, ->trc_holdout_list,
and ->trc_reader_special task_struct fields are no longer used.

In addition, the rcu_tasks_trace_qs(), rcu_tasks_trace_qs_blkd(),
exit_tasks_rcu_finish_trace(), and rcu_spawn_tasks_trace_kthread(),
show_rcu_tasks_trace_gp_kthread(), rcu_tasks_trace_get_gp_data(),
rcu_tasks_trace_torture_stats_print(), and get_rcu_tasks_trace_gp_kthread()
functions and all the other functions that they invoke are no longer used.

Also, the TRC_NEED_QS and TRC_NEED_QS_CHECKED CPP macros are no longer used.
Neither are the rcu_tasks_trace_lazy_ms and rcu_task_ipi_delay rcupdate
module parameters and the TASKS_TRACE_RCU_READ_MB Kconfig option.

This commit therefore removes all of them.

[ paulmck: Apply Alexei Starovoitov feedback. ]

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: bpf@vger.kernel.org
Reviewed-by: Joel Fernandes <joelagnelf@nvidia.com>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
13 files changed:
Documentation/admin-guide/kernel-parameters.txt
include/linux/rcupdate.h
include/linux/rcupdate_trace.h
include/linux/sched.h
init/init_task.c
kernel/fork.c
kernel/rcu/Kconfig
kernel/rcu/rcu.h
kernel/rcu/rcuscale.c
kernel/rcu/rcutorture.c
kernel/rcu/tasks.h
tools/testing/selftests/rcutorture/configs/rcu/TRACE01
tools/testing/selftests/rcutorture/configs/rcu/TRACE02

index a8d0afde7f85a506b827ae31d48fc5d9dbabc095..1b8e5cadbecbc2120c0ede367666f06543b645da 100644 (file)
@@ -6249,13 +6249,6 @@ Kernel parameters
                        dynamically) adjusted.  This parameter is intended
                        for use in testing.
 
-       rcupdate.rcu_task_ipi_delay= [KNL]
-                       Set time in jiffies during which RCU tasks will
-                       avoid sending IPIs, starting with the beginning
-                       of a given grace period.  Setting a large
-                       number avoids disturbing real-time workloads,
-                       but lengthens grace periods.
-
        rcupdate.rcu_task_lazy_lim= [KNL]
                        Number of callbacks on a given CPU that will
                        cancel laziness on that CPU.  Use -1 to disable
@@ -6299,14 +6292,6 @@ Kernel parameters
                        of zero will disable batching.  Batching is
                        always disabled for synchronize_rcu_tasks().
 
-       rcupdate.rcu_tasks_trace_lazy_ms= [KNL]
-                       Set timeout in milliseconds RCU Tasks
-                       Trace asynchronous callback batching for
-                       call_rcu_tasks_trace().  A negative value
-                       will take the default.  A value of zero will
-                       disable batching.  Batching is always disabled
-                       for synchronize_rcu_tasks_trace().
-
        rcupdate.rcu_self_test= [KNL]
                        Run the RCU early boot self tests
 
index c5b30054cd018bb96d227e23ee911e26d98e27b3..bd5a420cf09a0a5ae093228004d67a2e2bec2d4b 100644 (file)
@@ -175,36 +175,7 @@ void rcu_tasks_torture_stats_print(char *tt, char *tf);
 # define synchronize_rcu_tasks synchronize_rcu
 # endif
 
-# ifdef CONFIG_TASKS_TRACE_RCU
-// Bits for ->trc_reader_special.b.need_qs field.
-#define TRC_NEED_QS            0x1  // Task needs a quiescent state.
-#define TRC_NEED_QS_CHECKED    0x2  // Task has been checked for needing quiescent state.
-
-u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
-void rcu_tasks_trace_qs_blkd(struct task_struct *t);
-
-# define rcu_tasks_trace_qs(t)                                                 \
-       do {                                                                    \
-               int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting);       \
-                                                                               \
-               if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) &&    \
-                   likely(!___rttq_nesting)) {                                 \
-                       rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
-               } else if (___rttq_nesting && ___rttq_nesting != INT_MIN &&     \
-                          !READ_ONCE((t)->trc_reader_special.b.blocked)) {     \
-                       rcu_tasks_trace_qs_blkd(t);                             \
-               }                                                               \
-       } while (0)
-void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
-# else
-# define rcu_tasks_trace_qs(t) do { } while (0)
-# endif
-
-#define rcu_tasks_qs(t, preempt)                                       \
-do {                                                                   \
-       rcu_tasks_classic_qs((t), (preempt));                           \
-       rcu_tasks_trace_qs(t);                                          \
-} while (0)
+#define rcu_tasks_qs(t, preempt) rcu_tasks_classic_qs((t), (preempt))
 
 # ifdef CONFIG_TASKS_RUDE_RCU
 void synchronize_rcu_tasks_rude(void);
index 3f46cbe6700038b0b7880c2eb8a441368d3f2a30..0bd47f12ecd17b954ba3efe68713bb0df16ea3d7 100644 (file)
@@ -136,9 +136,7 @@ static inline void rcu_barrier_tasks_trace(void)
 }
 
 // Placeholders to enable stepwise transition.
-void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq);
 void __init rcu_tasks_trace_suppress_unused(void);
-struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
 
 #else
 /*
index fe39d422b37d77df35b9f5b34039289ce210cee9..56156643ccac8c8d52f2abfe62adf4bf766d9cb0 100644 (file)
@@ -946,11 +946,6 @@ struct task_struct {
 #ifdef CONFIG_TASKS_TRACE_RCU
        int                             trc_reader_nesting;
        struct srcu_ctr __percpu        *trc_reader_scp;
-       int                             trc_ipi_to_cpu;
-       union rcu_special               trc_reader_special;
-       struct list_head                trc_holdout_list;
-       struct list_head                trc_blkd_node;
-       int                             trc_blkd_cpu;
 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 
        struct sched_info               sched_info;
index 49b13d7c3985d4be1bafce6fc9fb476e85a27541..db92c404d59a8db6feff7ef496ee1958ed95665b 100644 (file)
@@ -195,9 +195,6 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
 #endif
 #ifdef CONFIG_TASKS_TRACE_RCU
        .trc_reader_nesting = 0,
-       .trc_reader_special.s = 0,
-       .trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
-       .trc_blkd_node = LIST_HEAD_INIT(init_task.trc_blkd_node),
 #endif
 #ifdef CONFIG_CPUSETS
        .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
index b1f3915d5f8ec887b96a59b799797c34be91c231..d7ed107cbb47d730012ab3dc6f4a7d3e99e3b44b 100644 (file)
@@ -1828,9 +1828,6 @@ static inline void rcu_copy_process(struct task_struct *p)
 #endif /* #ifdef CONFIG_TASKS_RCU */
 #ifdef CONFIG_TASKS_TRACE_RCU
        p->trc_reader_nesting = 0;
-       p->trc_reader_special.s = 0;
-       INIT_LIST_HEAD(&p->trc_holdout_list);
-       INIT_LIST_HEAD(&p->trc_blkd_node);
 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 }
 
index 4d9b21f69eaae483631cb3027a1b16f59e5ba80a..8d5a1ecb7d56cfa38574bf849ce1d3f467f01d68 100644 (file)
@@ -313,24 +313,6 @@ config RCU_NOCB_CPU_CB_BOOST
          Say Y here if you want to set RT priority for offloading kthreads.
          Say N here if you are building a !PREEMPT_RT kernel and are unsure.
 
-config TASKS_TRACE_RCU_READ_MB
-       bool "Tasks Trace RCU readers use memory barriers in user and idle"
-       depends on RCU_EXPERT && TASKS_TRACE_RCU
-       default PREEMPT_RT || NR_CPUS < 8
-       help
-         Use this option to further reduce the number of IPIs sent
-         to CPUs executing in userspace or idle during tasks trace
-         RCU grace periods.  Given that a reasonable setting of
-         the rcupdate.rcu_task_ipi_delay kernel boot parameter
-         eliminates such IPIs for many workloads, proper setting
-         of this Kconfig option is important mostly for aggressive
-         real-time installations and for battery-powered devices,
-         hence the default chosen above.
-
-         Say Y here if you hate IPIs.
-         Say N here if you hate read-side memory barriers.
-         Take the default if you are unsure.
-
 config RCU_LAZY
        bool "RCU callback lazy invocation functionality"
        depends on RCU_NOCB_CPU
index 9cf01832a6c3d17bc49a7e8a0d0a55e97c0ba6fd..dc5d614b372c1e1bafbbe11dbcdbe85ef0dbc590 100644 (file)
@@ -544,10 +544,6 @@ struct task_struct *get_rcu_tasks_rude_gp_kthread(void);
 void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq);
 #endif // # ifdef CONFIG_TASKS_RUDE_RCU
 
-#ifdef CONFIG_TASKS_TRACE_RCU
-void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq);
-#endif
-
 #ifdef CONFIG_TASKS_RCU_GENERIC
 void tasks_cblist_init_generic(void);
 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
@@ -673,11 +669,6 @@ void show_rcu_tasks_rude_gp_kthread(void);
 #else
 static inline void show_rcu_tasks_rude_gp_kthread(void) {}
 #endif
-#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
-void show_rcu_tasks_trace_gp_kthread(void);
-#else
-static inline void show_rcu_tasks_trace_gp_kthread(void) {}
-#endif
 
 #ifdef CONFIG_TINY_RCU
 static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
index 7484d8ad5767b5fb5e7869320980554c33a90450..1c50f89fbd6f72b28d603b9e4c8b857efda56c67 100644 (file)
@@ -400,11 +400,6 @@ static void tasks_trace_scale_read_unlock(int idx)
        rcu_read_unlock_trace();
 }
 
-static void rcu_tasks_trace_scale_stats(void)
-{
-       rcu_tasks_trace_torture_stats_print(scale_type, SCALE_FLAG);
-}
-
 static struct rcu_scale_ops tasks_tracing_ops = {
        .ptype          = RCU_TASKS_FLAVOR,
        .init           = rcu_sync_scale_init,
@@ -416,8 +411,6 @@ static struct rcu_scale_ops tasks_tracing_ops = {
        .gp_barrier     = rcu_barrier_tasks_trace,
        .sync           = synchronize_rcu_tasks_trace,
        .exp_sync       = synchronize_rcu_tasks_trace,
-       .rso_gp_kthread = get_rcu_tasks_trace_gp_kthread,
-       .stats          = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_trace_scale_stats,
        .name           = "tasks-tracing"
 };
 
index 07e51974b06bc692f47bb4752dd026953030732e..78a6ebe77d35d39596eb82a40dfc3cdb21cb3af8 100644 (file)
@@ -1180,8 +1180,6 @@ static struct rcu_torture_ops tasks_tracing_ops = {
        .exp_sync       = synchronize_rcu_tasks_trace,
        .call           = call_rcu_tasks_trace,
        .cb_barrier     = rcu_barrier_tasks_trace,
-       .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
-       .get_gp_data    = rcu_tasks_trace_get_gp_data,
        .cbflood_max    = 50000,
        .irq_capable    = 1,
        .slow_gps       = 1,
index 1fe789c99f361dda3aedfa5eca17fdd3605142be..1249b47f0a8daa0ba16ff1cd14e10350b58cd40d 100644 (file)
@@ -161,11 +161,6 @@ static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
 static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
 #endif
 
-/* Avoid IPIing CPUs early in the grace period. */
-#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
-static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
-module_param(rcu_task_ipi_delay, int, 0644);
-
 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
 #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
@@ -800,8 +795,6 @@ static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *t
 
 #endif // #ifndef CONFIG_TINY_RCU
 
-static void exit_tasks_rcu_finish_trace(struct task_struct *t);
-
 #if defined(CONFIG_TASKS_RCU)
 
 ////////////////////////////////////////////////////////////////////////
@@ -1321,13 +1314,11 @@ void exit_tasks_rcu_finish(void)
        raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
        list_del_init(&t->rcu_tasks_exit_list);
        raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
-
-       exit_tasks_rcu_finish_trace(t);
 }
 
 #else /* #ifdef CONFIG_TASKS_RCU */
 void exit_tasks_rcu_start(void) { }
-void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
+void exit_tasks_rcu_finish(void) { }
 #endif /* #else #ifdef CONFIG_TASKS_RCU */
 
 #ifdef CONFIG_TASKS_RUDE_RCU
@@ -1475,69 +1466,6 @@ void __init rcu_tasks_trace_suppress_unused(void)
 #endif // #ifndef CONFIG_TINY_RCU
 }
 
-/*
- * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
- * the four-byte operand-size restriction of some platforms.
- *
- * Returns the old value, which is often ignored.
- */
-u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
-{
-       return cmpxchg(&t->trc_reader_special.b.need_qs, old, new);
-}
-EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
-
-/* Add a newly blocked reader task to its CPU's list. */
-void rcu_tasks_trace_qs_blkd(struct task_struct *t)
-{
-}
-EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
-
-/* Communicate task state back to the RCU tasks trace stall warning request. */
-struct trc_stall_chk_rdr {
-       int nesting;
-       int ipi_to_cpu;
-       u8 needqs;
-};
-
-/* Report any needed quiescent state for this exiting task. */
-static void exit_tasks_rcu_finish_trace(struct task_struct *t)
-{
-}
-
-int rcu_tasks_trace_lazy_ms = -1;
-module_param(rcu_tasks_trace_lazy_ms, int, 0444);
-
-static int __init rcu_spawn_tasks_trace_kthread(void)
-{
-       return 0;
-}
-
-#if !defined(CONFIG_TINY_RCU)
-void show_rcu_tasks_trace_gp_kthread(void)
-{
-}
-EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
-
-void rcu_tasks_trace_torture_stats_print(char *tt, char *tf)
-{
-}
-EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print);
-#endif // !defined(CONFIG_TINY_RCU)
-
-struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
-{
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
-
-void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq)
-{
-}
-EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data);
-
-#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
-static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
 
 #ifndef CONFIG_TINY_RCU
@@ -1545,7 +1473,6 @@ void show_rcu_tasks_gp_kthreads(void)
 {
        show_rcu_tasks_classic_gp_kthread();
        show_rcu_tasks_rude_gp_kthread();
-       show_rcu_tasks_trace_gp_kthread();
 }
 #endif /* #ifndef CONFIG_TINY_RCU */
 
@@ -1684,10 +1611,6 @@ static int __init rcu_init_tasks_generic(void)
        rcu_spawn_tasks_rude_kthread();
 #endif
 
-#ifdef CONFIG_TASKS_TRACE_RCU
-       rcu_spawn_tasks_trace_kthread();
-#endif
-
        // Run the self-tests.
        rcu_tasks_initiate_self_tests();
 
index 85b407467454a2e98072ff234d9396e450ec01d1..18efab346381a4d89369b31d7948ddbf7481d1a0 100644 (file)
@@ -10,5 +10,4 @@ CONFIG_PROVE_LOCKING=n
 #CHECK#CONFIG_PROVE_RCU=n
 CONFIG_FORCE_TASKS_TRACE_RCU=y
 #CHECK#CONFIG_TASKS_TRACE_RCU=y
-CONFIG_TASKS_TRACE_RCU_READ_MB=y
 CONFIG_RCU_EXPERT=y
index 9003c56cd76484b35e9a0cbc11ac4f5cedf8713c..8da390e8282977aadf767d38c771f37b73bbc1b4 100644 (file)
@@ -9,6 +9,5 @@ CONFIG_PROVE_LOCKING=y
 #CHECK#CONFIG_PROVE_RCU=y
 CONFIG_FORCE_TASKS_TRACE_RCU=y
 #CHECK#CONFIG_TASKS_TRACE_RCU=y
-CONFIG_TASKS_TRACE_RCU_READ_MB=n
 CONFIG_RCU_EXPERT=y
 CONFIG_DEBUG_OBJECTS=y