1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
5 * Copyright IBM Corporation, 2008
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
14 * For detailed explanation of Read-Copy Update mechanism see -
18 #define pr_fmt(fmt) "rcu: " fmt
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/percpu.h>
36 #include <linux/notifier.h>
37 #include <linux/cpu.h>
38 #include <linux/mutex.h>
39 #include <linux/time.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/wait.h>
42 #include <linux/kthread.h>
43 #include <uapi/linux/sched/types.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
46 #include <linux/random.h>
47 #include <linux/trace_events.h>
48 #include <linux/suspend.h>
49 #include <linux/ftrace.h>
50 #include <linux/tick.h>
51 #include <linux/sysrq.h>
52 #include <linux/kprobes.h>
53 #include <linux/gfp.h>
54 #include <linux/oom.h>
55 #include <linux/smpboot.h>
56 #include <linux/jiffies.h>
57 #include <linux/slab.h>
58 #include <linux/sched/isolation.h>
59 #include <linux/sched/clock.h>
60 #include "../time/tick-internal.h"
65 #ifdef MODULE_PARAM_PREFIX
66 #undef MODULE_PARAM_PREFIX
68 #define MODULE_PARAM_PREFIX "rcutree."
71 #define data_race(expr) \
76 #ifndef ASSERT_EXCLUSIVE_WRITER
77 #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
79 #ifndef ASSERT_EXCLUSIVE_ACCESS
80 #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
83 /* Data structures. */
86 * Steal a bit from the bottom of ->dynticks for idle entry/exit
87 * control. Initially this is for TLB flushing.
89 #define RCU_DYNTICK_CTRL_MASK 0x1
90 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
92 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data
, rcu_data
) = {
93 .dynticks_nesting
= 1,
94 .dynticks_nmi_nesting
= DYNTICK_IRQ_NONIDLE
,
95 .dynticks
= ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR
),
97 static struct rcu_state rcu_state
= {
98 .level
= { &rcu_state
.node
[0] },
99 .gp_state
= RCU_GP_IDLE
,
100 .gp_seq
= (0UL - 300UL) << RCU_SEQ_CTR_SHIFT
,
101 .barrier_mutex
= __MUTEX_INITIALIZER(rcu_state
.barrier_mutex
),
104 .exp_mutex
= __MUTEX_INITIALIZER(rcu_state
.exp_mutex
),
105 .exp_wake_mutex
= __MUTEX_INITIALIZER(rcu_state
.exp_wake_mutex
),
106 .ofl_lock
= __RAW_SPIN_LOCK_UNLOCKED(rcu_state
.ofl_lock
),
109 /* Dump rcu_node combining tree at boot to verify correct setup. */
110 static bool dump_tree
;
111 module_param(dump_tree
, bool, 0444);
112 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
113 static bool use_softirq
= true;
114 module_param(use_softirq
, bool, 0444);
115 /* Control rcu_node-tree auto-balancing at boot time. */
116 static bool rcu_fanout_exact
;
117 module_param(rcu_fanout_exact
, bool, 0444);
118 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
119 static int rcu_fanout_leaf
= RCU_FANOUT_LEAF
;
120 module_param(rcu_fanout_leaf
, int, 0444);
121 int rcu_num_lvls __read_mostly
= RCU_NUM_LVLS
;
122 /* Number of rcu_nodes at specified level. */
123 int num_rcu_lvl
[] = NUM_RCU_LVL_INIT
;
124 int rcu_num_nodes __read_mostly
= NUM_RCU_NODES
; /* Total # rcu_nodes in use. */
127 * The rcu_scheduler_active variable is initialized to the value
128 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
129 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
130 * RCU can assume that there is but one task, allowing RCU to (for example)
131 * optimize synchronize_rcu() to a simple barrier(). When this variable
132 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
133 * to detect real grace periods. This variable is also used to suppress
134 * boot-time false positives from lockdep-RCU error checking. Finally, it
135 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
136 * is fully initialized, including all of its kthreads having been spawned.
138 int rcu_scheduler_active __read_mostly
;
139 EXPORT_SYMBOL_GPL(rcu_scheduler_active
);
142 * The rcu_scheduler_fully_active variable transitions from zero to one
143 * during the early_initcall() processing, which is after the scheduler
144 * is capable of creating new tasks. So RCU processing (for example,
145 * creating tasks for RCU priority boosting) must be delayed until after
146 * rcu_scheduler_fully_active transitions from zero to one. We also
147 * currently delay invocation of any RCU callbacks until after this point.
149 * It might later prove better for people registering RCU callbacks during
150 * early boot to take responsibility for these callbacks, but one step at
153 static int rcu_scheduler_fully_active __read_mostly
;
155 static void rcu_report_qs_rnp(unsigned long mask
, struct rcu_node
*rnp
,
156 unsigned long gps
, unsigned long flags
);
157 static void rcu_init_new_rnp(struct rcu_node
*rnp_leaf
);
158 static void rcu_cleanup_dead_rnp(struct rcu_node
*rnp_leaf
);
159 static void rcu_boost_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
);
160 static void invoke_rcu_core(void);
161 static void rcu_report_exp_rdp(struct rcu_data
*rdp
);
162 static void sync_sched_exp_online_cleanup(int cpu
);
163 static void check_cb_ovld_locked(struct rcu_data
*rdp
, struct rcu_node
*rnp
);
165 /* rcuc/rcub kthread realtime priority */
166 static int kthread_prio
= IS_ENABLED(CONFIG_RCU_BOOST
) ? 1 : 0;
167 module_param(kthread_prio
, int, 0444);
169 /* Delay in jiffies for grace-period initialization delays, debug only. */
171 static int gp_preinit_delay
;
172 module_param(gp_preinit_delay
, int, 0444);
173 static int gp_init_delay
;
174 module_param(gp_init_delay
, int, 0444);
175 static int gp_cleanup_delay
;
176 module_param(gp_cleanup_delay
, int, 0444);
178 /* Retrieve RCU kthreads priority for rcutorture */
179 int rcu_get_gp_kthreads_prio(void)
183 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio
);
186 * Number of grace periods between delays, normalized by the duration of
187 * the delay. The longer the delay, the more the grace periods between
188 * each delay. The reason for this normalization is that it means that,
189 * for non-zero delays, the overall slowdown of grace periods is constant
190 * regardless of the duration of the delay. This arrangement balances
191 * the need for long delays to increase some race probabilities with the
192 * need for fast grace periods to increase other race probabilities.
194 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
197 * Compute the mask of online CPUs for the specified rcu_node structure.
198 * This will not be stable unless the rcu_node structure's ->lock is
199 * held, but the bit corresponding to the current CPU will be stable
202 static unsigned long rcu_rnp_online_cpus(struct rcu_node
*rnp
)
204 return READ_ONCE(rnp
->qsmaskinitnext
);
208 * Return true if an RCU grace period is in progress. The READ_ONCE()s
209 * permit this function to be invoked without holding the root rcu_node
210 * structure's ->lock, but of course results can be subject to change.
212 static int rcu_gp_in_progress(void)
214 return rcu_seq_state(rcu_seq_current(&rcu_state
.gp_seq
));
218 * Return the number of callbacks queued on the specified CPU.
219 * Handles both the nocbs and normal cases.
221 static long rcu_get_n_cbs_cpu(int cpu
)
223 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
225 if (rcu_segcblist_is_enabled(&rdp
->cblist
))
226 return rcu_segcblist_n_cbs(&rdp
->cblist
);
230 void rcu_softirq_qs(void)
233 rcu_preempt_deferred_qs(current
);
237 * Record entry into an extended quiescent state. This is only to be
238 * called when not already in an extended quiescent state, that is,
239 * RCU is watching prior to the call to this function and is no longer
240 * watching upon return.
242 static noinstr
void rcu_dynticks_eqs_enter(void)
244 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
248 * CPUs seeing atomic_add_return() must see prior RCU read-side
249 * critical sections, and we also must force ordering with the
252 rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
253 seq
= atomic_add_return(RCU_DYNTICK_CTRL_CTR
, &rdp
->dynticks
);
254 // RCU is no longer watching. Better be in extended quiescent state!
255 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG
) &&
256 (seq
& RCU_DYNTICK_CTRL_CTR
));
257 /* Better not have special action (TLB flush) pending! */
258 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG
) &&
259 (seq
& RCU_DYNTICK_CTRL_MASK
));
263 * Record exit from an extended quiescent state. This is only to be
264 * called from an extended quiescent state, that is, RCU is not watching
265 * prior to the call to this function and is watching upon return.
267 static noinstr
void rcu_dynticks_eqs_exit(void)
269 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
273 * CPUs seeing atomic_add_return() must see prior idle sojourns,
274 * and we also must force ordering with the next RCU read-side
277 seq
= atomic_add_return(RCU_DYNTICK_CTRL_CTR
, &rdp
->dynticks
);
278 // RCU is now watching. Better not be in an extended quiescent state!
279 rcu_dynticks_task_trace_exit(); // After ->dynticks update!
280 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG
) &&
281 !(seq
& RCU_DYNTICK_CTRL_CTR
));
282 if (seq
& RCU_DYNTICK_CTRL_MASK
) {
283 atomic_andnot(RCU_DYNTICK_CTRL_MASK
, &rdp
->dynticks
);
284 smp_mb__after_atomic(); /* _exit after clearing mask. */
289 * Reset the current CPU's ->dynticks counter to indicate that the
290 * newly onlined CPU is no longer in an extended quiescent state.
291 * This will either leave the counter unchanged, or increment it
292 * to the next non-quiescent value.
294 * The non-atomic test/increment sequence works because the upper bits
295 * of the ->dynticks counter are manipulated only by the corresponding CPU,
296 * or when the corresponding CPU is offline.
298 static void rcu_dynticks_eqs_online(void)
300 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
302 if (atomic_read(&rdp
->dynticks
) & RCU_DYNTICK_CTRL_CTR
)
304 atomic_add(RCU_DYNTICK_CTRL_CTR
, &rdp
->dynticks
);
308 * Is the current CPU in an extended quiescent state?
310 * No ordering, as we are sampling CPU-local information.
312 static __always_inline
bool rcu_dynticks_curr_cpu_in_eqs(void)
314 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
316 return !(atomic_read(&rdp
->dynticks
) & RCU_DYNTICK_CTRL_CTR
);
320 * Snapshot the ->dynticks counter with full ordering so as to allow
321 * stable comparison of this counter with past and future snapshots.
323 static int rcu_dynticks_snap(struct rcu_data
*rdp
)
325 int snap
= atomic_add_return(0, &rdp
->dynticks
);
327 return snap
& ~RCU_DYNTICK_CTRL_MASK
;
331 * Return true if the snapshot returned from rcu_dynticks_snap()
332 * indicates that RCU is in an extended quiescent state.
334 static bool rcu_dynticks_in_eqs(int snap
)
336 return !(snap
& RCU_DYNTICK_CTRL_CTR
);
340 * Return true if the CPU corresponding to the specified rcu_data
341 * structure has spent some time in an extended quiescent state since
342 * rcu_dynticks_snap() returned the specified snapshot.
344 static bool rcu_dynticks_in_eqs_since(struct rcu_data
*rdp
, int snap
)
346 return snap
!= rcu_dynticks_snap(rdp
);
350 * Return true if the referenced integer is zero while the specified
351 * CPU remains within a single extended quiescent state.
353 bool rcu_dynticks_zero_in_eqs(int cpu
, int *vp
)
355 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
358 // If not quiescent, force back to earlier extended quiescent state.
359 snap
= atomic_read(&rdp
->dynticks
) & ~(RCU_DYNTICK_CTRL_MASK
|
360 RCU_DYNTICK_CTRL_CTR
);
362 smp_rmb(); // Order ->dynticks and *vp reads.
364 return false; // Non-zero, so report failure;
365 smp_rmb(); // Order *vp read and ->dynticks re-read.
367 // If still in the same extended quiescent state, we are good!
368 return snap
== (atomic_read(&rdp
->dynticks
) & ~RCU_DYNTICK_CTRL_MASK
);
372 * Set the special (bottom) bit of the specified CPU so that it
373 * will take special action (such as flushing its TLB) on the
374 * next exit from an extended quiescent state. Returns true if
375 * the bit was successfully set, or false if the CPU was not in
376 * an extended quiescent state.
378 bool rcu_eqs_special_set(int cpu
)
383 struct rcu_data
*rdp
= &per_cpu(rcu_data
, cpu
);
385 new_old
= atomic_read(&rdp
->dynticks
);
388 if (old
& RCU_DYNTICK_CTRL_CTR
)
390 new = old
| RCU_DYNTICK_CTRL_MASK
;
391 new_old
= atomic_cmpxchg(&rdp
->dynticks
, old
, new);
392 } while (new_old
!= old
);
397 * Let the RCU core know that this CPU has gone through the scheduler,
398 * which is a quiescent state. This is called when the need for a
399 * quiescent state is urgent, so we burn an atomic operation and full
400 * memory barriers to let the RCU core know about it, regardless of what
401 * this CPU might (or might not) do in the near future.
403 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
405 * The caller must have disabled interrupts and must not be idle.
407 void rcu_momentary_dyntick_idle(void)
411 raw_cpu_write(rcu_data
.rcu_need_heavy_qs
, false);
412 special
= atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR
,
413 &this_cpu_ptr(&rcu_data
)->dynticks
);
414 /* It is illegal to call this from idle state. */
415 WARN_ON_ONCE(!(special
& RCU_DYNTICK_CTRL_CTR
));
416 rcu_preempt_deferred_qs(current
);
418 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle
);
421 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
423 * If the current CPU is idle and running at a first-level (not nested)
424 * interrupt, or directly, from idle, return true.
426 * The caller must have at least disabled IRQs.
428 static int rcu_is_cpu_rrupt_from_idle(void)
433 * Usually called from the tick; but also used from smp_function_call()
434 * for expedited grace periods. This latter can result in running from
435 * the idle task, instead of an actual IPI.
437 lockdep_assert_irqs_disabled();
439 /* Check for counter underflows */
440 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data
.dynticks_nesting
) < 0,
441 "RCU dynticks_nesting counter underflow!");
442 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data
.dynticks_nmi_nesting
) <= 0,
443 "RCU dynticks_nmi_nesting counter underflow/zero!");
445 /* Are we at first interrupt nesting level? */
446 nesting
= __this_cpu_read(rcu_data
.dynticks_nmi_nesting
);
451 * If we're not in an interrupt, we must be in the idle task!
453 WARN_ON_ONCE(!nesting
&& !is_idle_task(current
));
455 /* Does CPU appear to be idle from an RCU standpoint? */
456 return __this_cpu_read(rcu_data
.dynticks_nesting
) == 0;
459 #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */
460 #define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */
461 static long blimit
= DEFAULT_RCU_BLIMIT
;
462 #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
463 static long qhimark
= DEFAULT_RCU_QHIMARK
;
464 #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */
465 static long qlowmark
= DEFAULT_RCU_QLOMARK
;
466 #define DEFAULT_RCU_QOVLD_MULT 2
467 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
468 static long qovld
= DEFAULT_RCU_QOVLD
; /* If this many pending, hammer QS. */
469 static long qovld_calc
= -1; /* No pre-initialization lock acquisitions! */
471 module_param(blimit
, long, 0444);
472 module_param(qhimark
, long, 0444);
473 module_param(qlowmark
, long, 0444);
474 module_param(qovld
, long, 0444);
476 static ulong jiffies_till_first_fqs
= ULONG_MAX
;
477 static ulong jiffies_till_next_fqs
= ULONG_MAX
;
478 static bool rcu_kick_kthreads
;
479 static int rcu_divisor
= 7;
480 module_param(rcu_divisor
, int, 0644);
482 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
483 static long rcu_resched_ns
= 3 * NSEC_PER_MSEC
;
484 module_param(rcu_resched_ns
, long, 0644);
487 * How long the grace period must be before we start recruiting
488 * quiescent-state help from rcu_note_context_switch().
490 static ulong jiffies_till_sched_qs
= ULONG_MAX
;
491 module_param(jiffies_till_sched_qs
, ulong
, 0444);
492 static ulong jiffies_to_sched_qs
; /* See adjust_jiffies_till_sched_qs(). */
493 module_param(jiffies_to_sched_qs
, ulong
, 0444); /* Display only! */
496 * Make sure that we give the grace-period kthread time to detect any
497 * idle CPUs before taking active measures to force quiescent states.
498 * However, don't go below 100 milliseconds, adjusted upwards for really
501 static void adjust_jiffies_till_sched_qs(void)
505 /* If jiffies_till_sched_qs was specified, respect the request. */
506 if (jiffies_till_sched_qs
!= ULONG_MAX
) {
507 WRITE_ONCE(jiffies_to_sched_qs
, jiffies_till_sched_qs
);
510 /* Otherwise, set to third fqs scan, but bound below on large system. */
511 j
= READ_ONCE(jiffies_till_first_fqs
) +
512 2 * READ_ONCE(jiffies_till_next_fqs
);
513 if (j
< HZ
/ 10 + nr_cpu_ids
/ RCU_JIFFIES_FQS_DIV
)
514 j
= HZ
/ 10 + nr_cpu_ids
/ RCU_JIFFIES_FQS_DIV
;
515 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j
);
516 WRITE_ONCE(jiffies_to_sched_qs
, j
);
519 static int param_set_first_fqs_jiffies(const char *val
, const struct kernel_param
*kp
)
522 int ret
= kstrtoul(val
, 0, &j
);
525 WRITE_ONCE(*(ulong
*)kp
->arg
, (j
> HZ
) ? HZ
: j
);
526 adjust_jiffies_till_sched_qs();
531 static int param_set_next_fqs_jiffies(const char *val
, const struct kernel_param
*kp
)
534 int ret
= kstrtoul(val
, 0, &j
);
537 WRITE_ONCE(*(ulong
*)kp
->arg
, (j
> HZ
) ? HZ
: (j
?: 1));
538 adjust_jiffies_till_sched_qs();
543 static struct kernel_param_ops first_fqs_jiffies_ops
= {
544 .set
= param_set_first_fqs_jiffies
,
545 .get
= param_get_ulong
,
548 static struct kernel_param_ops next_fqs_jiffies_ops
= {
549 .set
= param_set_next_fqs_jiffies
,
550 .get
= param_get_ulong
,
553 module_param_cb(jiffies_till_first_fqs
, &first_fqs_jiffies_ops
, &jiffies_till_first_fqs
, 0644);
554 module_param_cb(jiffies_till_next_fqs
, &next_fqs_jiffies_ops
, &jiffies_till_next_fqs
, 0644);
555 module_param(rcu_kick_kthreads
, bool, 0644);
557 static void force_qs_rnp(int (*f
)(struct rcu_data
*rdp
));
558 static int rcu_pending(int user
);
561 * Return the number of RCU GPs completed thus far for debug & stats.
563 unsigned long rcu_get_gp_seq(void)
565 return READ_ONCE(rcu_state
.gp_seq
);
567 EXPORT_SYMBOL_GPL(rcu_get_gp_seq
);
570 * Return the number of RCU expedited batches completed thus far for
571 * debug & stats. Odd numbers mean that a batch is in progress, even
572 * numbers mean idle. The value returned will thus be roughly double
573 * the cumulative batches since boot.
575 unsigned long rcu_exp_batches_completed(void)
577 return rcu_state
.expedited_sequence
;
579 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed
);
582 * Return the root node of the rcu_state structure.
584 static struct rcu_node
*rcu_get_root(void)
586 return &rcu_state
.node
[0];
590 * Send along grace-period-related data for rcutorture diagnostics.
592 void rcutorture_get_gp_data(enum rcutorture_type test_type
, int *flags
,
593 unsigned long *gp_seq
)
597 *flags
= READ_ONCE(rcu_state
.gp_flags
);
598 *gp_seq
= rcu_seq_current(&rcu_state
.gp_seq
);
604 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data
);
607 * Enter an RCU extended quiescent state, which can be either the
608 * idle loop or adaptive-tickless usermode execution.
610 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
611 * the possibility of usermode upcalls having messed up our count
612 * of interrupt nesting level during the prior busy period.
614 static noinstr
void rcu_eqs_enter(bool user
)
616 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
618 WARN_ON_ONCE(rdp
->dynticks_nmi_nesting
!= DYNTICK_IRQ_NONIDLE
);
619 WRITE_ONCE(rdp
->dynticks_nmi_nesting
, 0);
620 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG
) &&
621 rdp
->dynticks_nesting
== 0);
622 if (rdp
->dynticks_nesting
!= 1) {
623 // RCU will still be watching, so just do accounting and leave.
624 rdp
->dynticks_nesting
--;
628 lockdep_assert_irqs_disabled();
629 instrumentation_begin();
630 trace_rcu_dyntick(TPS("Start"), rdp
->dynticks_nesting
, 0, atomic_read(&rdp
->dynticks
));
631 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG
) && !user
&& !is_idle_task(current
));
632 rdp
= this_cpu_ptr(&rcu_data
);
633 do_nocb_deferred_wakeup(rdp
);
634 rcu_prepare_for_idle();
635 rcu_preempt_deferred_qs(current
);
636 instrumentation_end();
637 WRITE_ONCE(rdp
->dynticks_nesting
, 0); /* Avoid irq-access tearing. */
638 // RCU is watching here ...
639 rcu_dynticks_eqs_enter();
640 // ... but is no longer watching here.
641 rcu_dynticks_task_enter();
645 * rcu_idle_enter - inform RCU that current CPU is entering idle
647 * Enter idle mode, in other words, -leave- the mode in which RCU
648 * read-side critical sections can occur. (Though RCU read-side
649 * critical sections can occur in irq handlers in idle, a possibility
650 * handled by irq_enter() and irq_exit().)
652 * If you add or remove a call to rcu_idle_enter(), be sure to test with
653 * CONFIG_RCU_EQS_DEBUG=y.
655 void rcu_idle_enter(void)
657 lockdep_assert_irqs_disabled();
658 rcu_eqs_enter(false);
661 #ifdef CONFIG_NO_HZ_FULL
663 * rcu_user_enter - inform RCU that we are resuming userspace.
665 * Enter RCU idle mode right before resuming userspace. No use of RCU
666 * is permitted between this call and rcu_user_exit(). This way the
667 * CPU doesn't need to maintain the tick for RCU maintenance purposes
668 * when the CPU runs in userspace.
670 * If you add or remove a call to rcu_user_enter(), be sure to test with
671 * CONFIG_RCU_EQS_DEBUG=y.
673 noinstr
void rcu_user_enter(void)
675 lockdep_assert_irqs_disabled();
678 #endif /* CONFIG_NO_HZ_FULL */
681 * rcu_nmi_exit - inform RCU of exit from NMI context
683 * If we are returning from the outermost NMI handler that interrupted an
684 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
685 * to let the RCU grace-period handling know that the CPU is back to
688 * If you add or remove a call to rcu_nmi_exit(), be sure to test
689 * with CONFIG_RCU_EQS_DEBUG=y.
691 noinstr
void rcu_nmi_exit(void)
693 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
696 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
697 * (We are exiting an NMI handler, so RCU better be paying attention
700 WARN_ON_ONCE(rdp
->dynticks_nmi_nesting
<= 0);
701 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
704 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
705 * leave it in non-RCU-idle state.
707 if (rdp
->dynticks_nmi_nesting
!= 1) {
708 instrumentation_begin();
709 trace_rcu_dyntick(TPS("--="), rdp
->dynticks_nmi_nesting
, rdp
->dynticks_nmi_nesting
- 2,
710 atomic_read(&rdp
->dynticks
));
711 WRITE_ONCE(rdp
->dynticks_nmi_nesting
, /* No store tearing. */
712 rdp
->dynticks_nmi_nesting
- 2);
713 instrumentation_end();
717 instrumentation_begin();
718 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
719 trace_rcu_dyntick(TPS("Startirq"), rdp
->dynticks_nmi_nesting
, 0, atomic_read(&rdp
->dynticks
));
720 WRITE_ONCE(rdp
->dynticks_nmi_nesting
, 0); /* Avoid store tearing. */
723 rcu_prepare_for_idle();
724 instrumentation_end();
726 // RCU is watching here ...
727 rcu_dynticks_eqs_enter();
728 // ... but is no longer watching here.
731 rcu_dynticks_task_enter();
735 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
737 * Exit from an interrupt handler, which might possibly result in entering
738 * idle mode, in other words, leaving the mode in which read-side critical
739 * sections can occur. The caller must have disabled interrupts.
741 * This code assumes that the idle loop never does anything that might
742 * result in unbalanced calls to irq_enter() and irq_exit(). If your
743 * architecture's idle loop violates this assumption, RCU will give you what
744 * you deserve, good and hard. But very infrequently and irreproducibly.
746 * Use things like work queues to work around this limitation.
748 * You have been warned.
750 * If you add or remove a call to rcu_irq_exit(), be sure to test with
751 * CONFIG_RCU_EQS_DEBUG=y.
753 void noinstr
rcu_irq_exit(void)
755 lockdep_assert_irqs_disabled();
760 * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
761 * towards in kernel preemption
763 * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
764 * from RCU point of view. Invoked from return from interrupt before kernel
767 void rcu_irq_exit_preempt(void)
769 lockdep_assert_irqs_disabled();
772 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data
.dynticks_nesting
) <= 0,
773 "RCU dynticks_nesting counter underflow/zero!");
774 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data
.dynticks_nmi_nesting
) !=
776 "Bad RCU dynticks_nmi_nesting counter\n");
777 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
778 "RCU in extended quiescent state!");
781 #ifdef CONFIG_PROVE_RCU
783 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
785 void rcu_irq_exit_check_preempt(void)
787 lockdep_assert_irqs_disabled();
789 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data
.dynticks_nesting
) <= 0,
790 "RCU dynticks_nesting counter underflow/zero!");
791 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data
.dynticks_nmi_nesting
) !=
793 "Bad RCU dynticks_nmi_nesting counter\n");
794 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
795 "RCU in extended quiescent state!");
797 #endif /* #ifdef CONFIG_PROVE_RCU */
800 * Wrapper for rcu_irq_exit() where interrupts are enabled.
802 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
803 * with CONFIG_RCU_EQS_DEBUG=y.
805 void rcu_irq_exit_irqson(void)
809 local_irq_save(flags
);
811 local_irq_restore(flags
);
815 * Exit an RCU extended quiescent state, which can be either the
816 * idle loop or adaptive-tickless usermode execution.
818 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
819 * allow for the possibility of usermode upcalls messing up our count of
820 * interrupt nesting level during the busy period that is just now starting.
822 static void noinstr
rcu_eqs_exit(bool user
)
824 struct rcu_data
*rdp
;
827 lockdep_assert_irqs_disabled();
828 rdp
= this_cpu_ptr(&rcu_data
);
829 oldval
= rdp
->dynticks_nesting
;
830 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG
) && oldval
< 0);
832 // RCU was already watching, so just do accounting and leave.
833 rdp
->dynticks_nesting
++;
836 rcu_dynticks_task_exit();
837 // RCU is not watching here ...
838 rcu_dynticks_eqs_exit();
839 // ... but is watching here.
840 instrumentation_begin();
841 rcu_cleanup_after_idle();
842 trace_rcu_dyntick(TPS("End"), rdp
->dynticks_nesting
, 1, atomic_read(&rdp
->dynticks
));
843 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG
) && !user
&& !is_idle_task(current
));
844 WRITE_ONCE(rdp
->dynticks_nesting
, 1);
845 WARN_ON_ONCE(rdp
->dynticks_nmi_nesting
);
846 WRITE_ONCE(rdp
->dynticks_nmi_nesting
, DYNTICK_IRQ_NONIDLE
);
847 instrumentation_end();
851 * rcu_idle_exit - inform RCU that current CPU is leaving idle
853 * Exit idle mode, in other words, -enter- the mode in which RCU
854 * read-side critical sections can occur.
856 * If you add or remove a call to rcu_idle_exit(), be sure to test with
857 * CONFIG_RCU_EQS_DEBUG=y.
859 void rcu_idle_exit(void)
863 local_irq_save(flags
);
865 local_irq_restore(flags
);
868 #ifdef CONFIG_NO_HZ_FULL
870 * rcu_user_exit - inform RCU that we are exiting userspace.
872 * Exit RCU idle mode while entering the kernel because it can
873 * run a RCU read side critical section anytime.
875 * If you add or remove a call to rcu_user_exit(), be sure to test with
876 * CONFIG_RCU_EQS_DEBUG=y.
878 void noinstr
rcu_user_exit(void)
884 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
886 * The scheduler tick is not normally enabled when CPUs enter the kernel
887 * from nohz_full userspace execution. After all, nohz_full userspace
888 * execution is an RCU quiescent state and the time executing in the kernel
889 * is quite short. Except of course when it isn't. And it is not hard to
890 * cause a large system to spend tens of seconds or even minutes looping
891 * in the kernel, which can cause a number of problems, include RCU CPU
894 * Therefore, if a nohz_full CPU fails to report a quiescent state
895 * in a timely manner, the RCU grace-period kthread sets that CPU's
896 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
897 * exception will invoke this function, which will turn on the scheduler
898 * tick, which will enable RCU to detect that CPU's quiescent states,
899 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
900 * The tick will be disabled once a quiescent state is reported for
903 * Of course, in carefully tuned systems, there might never be an
904 * interrupt or exception. In that case, the RCU grace-period kthread
905 * will eventually cause one to happen. However, in less carefully
906 * controlled environments, this function allows RCU to get what it
907 * needs without creating otherwise useless interruptions.
909 void __rcu_irq_enter_check_tick(void)
911 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
913 // Enabling the tick is unsafe in NMI handlers.
914 if (WARN_ON_ONCE(in_nmi()))
917 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
918 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
920 if (!tick_nohz_full_cpu(rdp
->cpu
) ||
921 !READ_ONCE(rdp
->rcu_urgent_qs
) ||
922 READ_ONCE(rdp
->rcu_forced_tick
)) {
923 // RCU doesn't need nohz_full help from this CPU, or it is
924 // already getting that help.
928 // We get here only when not in an extended quiescent state and
929 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
930 // already watching and (2) The fact that we are in an interrupt
931 // handler and that the rcu_node lock is an irq-disabled lock
932 // prevents self-deadlock. So we can safely recheck under the lock.
933 // Note that the nohz_full state currently cannot change.
934 raw_spin_lock_rcu_node(rdp
->mynode
);
935 if (rdp
->rcu_urgent_qs
&& !rdp
->rcu_forced_tick
) {
936 // A nohz_full CPU is in the kernel and RCU needs a
937 // quiescent state. Turn on the tick!
938 WRITE_ONCE(rdp
->rcu_forced_tick
, true);
939 tick_dep_set_cpu(rdp
->cpu
, TICK_DEP_BIT_RCU
);
941 raw_spin_unlock_rcu_node(rdp
->mynode
);
943 #endif /* CONFIG_NO_HZ_FULL */
946 * rcu_nmi_enter - inform RCU of entry to NMI context
947 * @irq: Is this call from rcu_irq_enter?
949 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
950 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
951 * that the CPU is active. This implementation permits nested NMIs, as
952 * long as the nesting level does not overflow an int. (You will probably
953 * run out of stack space first.)
955 * If you add or remove a call to rcu_nmi_enter(), be sure to test
956 * with CONFIG_RCU_EQS_DEBUG=y.
958 noinstr
void rcu_nmi_enter(void)
961 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
963 /* Complain about underflow. */
964 WARN_ON_ONCE(rdp
->dynticks_nmi_nesting
< 0);
967 * If idle from RCU viewpoint, atomically increment ->dynticks
968 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
969 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
970 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
971 * to be in the outermost NMI handler that interrupted an RCU-idle
972 * period (observation due to Andy Lutomirski).
974 if (rcu_dynticks_curr_cpu_in_eqs()) {
977 rcu_dynticks_task_exit();
979 // RCU is not watching here ...
980 rcu_dynticks_eqs_exit();
981 // ... but is watching here.
984 rcu_cleanup_after_idle();
987 } else if (!in_nmi()) {
988 instrumentation_begin();
989 rcu_irq_enter_check_tick();
990 instrumentation_end();
992 instrumentation_begin();
993 trace_rcu_dyntick(incby
== 1 ? TPS("Endirq") : TPS("++="),
994 rdp
->dynticks_nmi_nesting
,
995 rdp
->dynticks_nmi_nesting
+ incby
, atomic_read(&rdp
->dynticks
));
996 instrumentation_end();
997 WRITE_ONCE(rdp
->dynticks_nmi_nesting
, /* Prevent store tearing. */
998 rdp
->dynticks_nmi_nesting
+ incby
);
1003 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1005 * Enter an interrupt handler, which might possibly result in exiting
1006 * idle mode, in other words, entering the mode in which read-side critical
1007 * sections can occur. The caller must have disabled interrupts.
1009 * Note that the Linux kernel is fully capable of entering an interrupt
1010 * handler that it never exits, for example when doing upcalls to user mode!
1011 * This code assumes that the idle loop never does upcalls to user mode.
1012 * If your architecture's idle loop does do upcalls to user mode (or does
1013 * anything else that results in unbalanced calls to the irq_enter() and
1014 * irq_exit() functions), RCU will give you what you deserve, good and hard.
1015 * But very infrequently and irreproducibly.
1017 * Use things like work queues to work around this limitation.
1019 * You have been warned.
1021 * If you add or remove a call to rcu_irq_enter(), be sure to test with
1022 * CONFIG_RCU_EQS_DEBUG=y.
1024 noinstr
void rcu_irq_enter(void)
1026 lockdep_assert_irqs_disabled();
1031 * Wrapper for rcu_irq_enter() where interrupts are enabled.
1033 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1034 * with CONFIG_RCU_EQS_DEBUG=y.
1036 void rcu_irq_enter_irqson(void)
1038 unsigned long flags
;
1040 local_irq_save(flags
);
1042 local_irq_restore(flags
);
1046 * If any sort of urgency was applied to the current CPU (for example,
1047 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1048 * to get to a quiescent state, disable it.
1050 static void rcu_disable_urgency_upon_qs(struct rcu_data
*rdp
)
1052 raw_lockdep_assert_held_rcu_node(rdp
->mynode
);
1053 WRITE_ONCE(rdp
->rcu_urgent_qs
, false);
1054 WRITE_ONCE(rdp
->rcu_need_heavy_qs
, false);
1055 if (tick_nohz_full_cpu(rdp
->cpu
) && rdp
->rcu_forced_tick
) {
1056 tick_dep_clear_cpu(rdp
->cpu
, TICK_DEP_BIT_RCU
);
1057 WRITE_ONCE(rdp
->rcu_forced_tick
, false);
1061 noinstr
bool __rcu_is_watching(void)
1063 return !rcu_dynticks_curr_cpu_in_eqs();
1067 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1069 * Return true if RCU is watching the running CPU, which means that this
1070 * CPU can safely enter RCU read-side critical sections. In other words,
1071 * if the current CPU is not in its idle loop or is in an interrupt or
1072 * NMI handler, return true.
1074 bool rcu_is_watching(void)
1078 preempt_disable_notrace();
1079 ret
= !rcu_dynticks_curr_cpu_in_eqs();
1080 preempt_enable_notrace();
1083 EXPORT_SYMBOL_GPL(rcu_is_watching
);
1086 * If a holdout task is actually running, request an urgent quiescent
1087 * state from its CPU. This is unsynchronized, so migrations can cause
1088 * the request to go to the wrong CPU. Which is OK, all that will happen
1089 * is that the CPU's next context switch will be a bit slower and next
1090 * time around this task will generate another request.
1092 void rcu_request_urgent_qs_task(struct task_struct
*t
)
1099 return; /* This task is not running on that CPU. */
1100 smp_store_release(per_cpu_ptr(&rcu_data
.rcu_urgent_qs
, cpu
), true);
1103 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1106 * Is the current CPU online as far as RCU is concerned?
1108 * Disable preemption to avoid false positives that could otherwise
1109 * happen due to the current CPU number being sampled, this task being
1110 * preempted, its old CPU being taken offline, resuming on some other CPU,
1111 * then determining that its old CPU is now offline.
1113 * Disable checking if in an NMI handler because we cannot safely
1114 * report errors from NMI handlers anyway. In addition, it is OK to use
1115 * RCU on an offline processor during initial boot, hence the check for
1116 * rcu_scheduler_fully_active.
1118 bool rcu_lockdep_current_cpu_online(void)
1120 struct rcu_data
*rdp
;
1121 struct rcu_node
*rnp
;
1124 if (in_nmi() || !rcu_scheduler_fully_active
)
1126 preempt_disable_notrace();
1127 rdp
= this_cpu_ptr(&rcu_data
);
1129 if (rdp
->grpmask
& rcu_rnp_online_cpus(rnp
))
1131 preempt_enable_notrace();
1134 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online
);
1136 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1139 * We are reporting a quiescent state on behalf of some other CPU, so
1140 * it is our responsibility to check for and handle potential overflow
1141 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1142 * After all, the CPU might be in deep idle state, and thus executing no
1145 static void rcu_gpnum_ovf(struct rcu_node
*rnp
, struct rcu_data
*rdp
)
1147 raw_lockdep_assert_held_rcu_node(rnp
);
1148 if (ULONG_CMP_LT(rcu_seq_current(&rdp
->gp_seq
) + ULONG_MAX
/ 4,
1150 WRITE_ONCE(rdp
->gpwrap
, true);
1151 if (ULONG_CMP_LT(rdp
->rcu_iw_gp_seq
+ ULONG_MAX
/ 4, rnp
->gp_seq
))
1152 rdp
->rcu_iw_gp_seq
= rnp
->gp_seq
+ ULONG_MAX
/ 4;
1156 * Snapshot the specified CPU's dynticks counter so that we can later
1157 * credit them with an implicit quiescent state. Return 1 if this CPU
1158 * is in dynticks idle mode, which is an extended quiescent state.
1160 static int dyntick_save_progress_counter(struct rcu_data
*rdp
)
1162 rdp
->dynticks_snap
= rcu_dynticks_snap(rdp
);
1163 if (rcu_dynticks_in_eqs(rdp
->dynticks_snap
)) {
1164 trace_rcu_fqs(rcu_state
.name
, rdp
->gp_seq
, rdp
->cpu
, TPS("dti"));
1165 rcu_gpnum_ovf(rdp
->mynode
, rdp
);
1172 * Return true if the specified CPU has passed through a quiescent
1173 * state by virtue of being in or having passed through an dynticks
1174 * idle state since the last call to dyntick_save_progress_counter()
1175 * for this same CPU, or by virtue of having been offline.
1177 static int rcu_implicit_dynticks_qs(struct rcu_data
*rdp
)
1182 struct rcu_node
*rnp
= rdp
->mynode
;
1185 * If the CPU passed through or entered a dynticks idle phase with
1186 * no active irq/NMI handlers, then we can safely pretend that the CPU
1187 * already acknowledged the request to pass through a quiescent
1188 * state. Either way, that CPU cannot possibly be in an RCU
1189 * read-side critical section that started before the beginning
1190 * of the current RCU grace period.
1192 if (rcu_dynticks_in_eqs_since(rdp
, rdp
->dynticks_snap
)) {
1193 trace_rcu_fqs(rcu_state
.name
, rdp
->gp_seq
, rdp
->cpu
, TPS("dti"));
1194 rcu_gpnum_ovf(rnp
, rdp
);
1198 /* If waiting too long on an offline CPU, complain. */
1199 if (!(rdp
->grpmask
& rcu_rnp_online_cpus(rnp
)) &&
1200 time_after(jiffies
, rcu_state
.gp_start
+ HZ
)) {
1202 struct rcu_node
*rnp1
;
1204 WARN_ON(1); /* Offline CPUs are supposed to report QS! */
1205 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1206 __func__
, rnp
->grplo
, rnp
->grphi
, rnp
->level
,
1207 (long)rnp
->gp_seq
, (long)rnp
->completedqs
);
1208 for (rnp1
= rnp
; rnp1
; rnp1
= rnp1
->parent
)
1209 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1210 __func__
, rnp1
->grplo
, rnp1
->grphi
, rnp1
->qsmask
, rnp1
->qsmaskinit
, rnp1
->qsmaskinitnext
, rnp1
->rcu_gp_init_mask
);
1211 onl
= !!(rdp
->grpmask
& rcu_rnp_online_cpus(rnp
));
1212 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1213 __func__
, rdp
->cpu
, ".o"[onl
],
1214 (long)rdp
->rcu_onl_gp_seq
, rdp
->rcu_onl_gp_flags
,
1215 (long)rdp
->rcu_ofl_gp_seq
, rdp
->rcu_ofl_gp_flags
);
1216 return 1; /* Break things loose after complaining. */
1220 * A CPU running for an extended time within the kernel can
1221 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1222 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1223 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
1224 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1225 * variable are safe because the assignments are repeated if this
1226 * CPU failed to pass through a quiescent state. This code
1227 * also checks .jiffies_resched in case jiffies_to_sched_qs
1230 jtsq
= READ_ONCE(jiffies_to_sched_qs
);
1231 ruqp
= per_cpu_ptr(&rcu_data
.rcu_urgent_qs
, rdp
->cpu
);
1232 rnhqp
= &per_cpu(rcu_data
.rcu_need_heavy_qs
, rdp
->cpu
);
1233 if (!READ_ONCE(*rnhqp
) &&
1234 (time_after(jiffies
, rcu_state
.gp_start
+ jtsq
* 2) ||
1235 time_after(jiffies
, rcu_state
.jiffies_resched
) ||
1236 rcu_state
.cbovld
)) {
1237 WRITE_ONCE(*rnhqp
, true);
1238 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1239 smp_store_release(ruqp
, true);
1240 } else if (time_after(jiffies
, rcu_state
.gp_start
+ jtsq
)) {
1241 WRITE_ONCE(*ruqp
, true);
1245 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1246 * The above code handles this, but only for straight cond_resched().
1247 * And some in-kernel loops check need_resched() before calling
1248 * cond_resched(), which defeats the above code for CPUs that are
1249 * running in-kernel with scheduling-clock interrupts disabled.
1250 * So hit them over the head with the resched_cpu() hammer!
1252 if (tick_nohz_full_cpu(rdp
->cpu
) &&
1253 (time_after(jiffies
, READ_ONCE(rdp
->last_fqs_resched
) + jtsq
* 3) ||
1254 rcu_state
.cbovld
)) {
1255 WRITE_ONCE(*ruqp
, true);
1256 resched_cpu(rdp
->cpu
);
1257 WRITE_ONCE(rdp
->last_fqs_resched
, jiffies
);
1261 * If more than halfway to RCU CPU stall-warning time, invoke
1262 * resched_cpu() more frequently to try to loosen things up a bit.
1263 * Also check to see if the CPU is getting hammered with interrupts,
1264 * but only once per grace period, just to keep the IPIs down to
1267 if (time_after(jiffies
, rcu_state
.jiffies_resched
)) {
1268 if (time_after(jiffies
,
1269 READ_ONCE(rdp
->last_fqs_resched
) + jtsq
)) {
1270 resched_cpu(rdp
->cpu
);
1271 WRITE_ONCE(rdp
->last_fqs_resched
, jiffies
);
1273 if (IS_ENABLED(CONFIG_IRQ_WORK
) &&
1274 !rdp
->rcu_iw_pending
&& rdp
->rcu_iw_gp_seq
!= rnp
->gp_seq
&&
1275 (rnp
->ffmask
& rdp
->grpmask
)) {
1276 init_irq_work(&rdp
->rcu_iw
, rcu_iw_handler
);
1277 atomic_set(&rdp
->rcu_iw
.flags
, IRQ_WORK_HARD_IRQ
);
1278 rdp
->rcu_iw_pending
= true;
1279 rdp
->rcu_iw_gp_seq
= rnp
->gp_seq
;
1280 irq_work_queue_on(&rdp
->rcu_iw
, rdp
->cpu
);
1287 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
1288 static void trace_rcu_this_gp(struct rcu_node
*rnp
, struct rcu_data
*rdp
,
1289 unsigned long gp_seq_req
, const char *s
)
1291 trace_rcu_future_grace_period(rcu_state
.name
, READ_ONCE(rnp
->gp_seq
),
1292 gp_seq_req
, rnp
->level
,
1293 rnp
->grplo
, rnp
->grphi
, s
);
1297 * rcu_start_this_gp - Request the start of a particular grace period
1298 * @rnp_start: The leaf node of the CPU from which to start.
1299 * @rdp: The rcu_data corresponding to the CPU from which to start.
1300 * @gp_seq_req: The gp_seq of the grace period to start.
1302 * Start the specified grace period, as needed to handle newly arrived
1303 * callbacks. The required future grace periods are recorded in each
1304 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1305 * is reason to awaken the grace-period kthread.
1307 * The caller must hold the specified rcu_node structure's ->lock, which
1308 * is why the caller is responsible for waking the grace-period kthread.
1310 * Returns true if the GP thread needs to be awakened else false.
1312 static bool rcu_start_this_gp(struct rcu_node
*rnp_start
, struct rcu_data
*rdp
,
1313 unsigned long gp_seq_req
)
1316 struct rcu_node
*rnp
;
1319 * Use funnel locking to either acquire the root rcu_node
1320 * structure's lock or bail out if the need for this grace period
1321 * has already been recorded -- or if that grace period has in
1322 * fact already started. If there is already a grace period in
1323 * progress in a non-leaf node, no recording is needed because the
1324 * end of the grace period will scan the leaf rcu_node structures.
1325 * Note that rnp_start->lock must not be released.
1327 raw_lockdep_assert_held_rcu_node(rnp_start
);
1328 trace_rcu_this_gp(rnp_start
, rdp
, gp_seq_req
, TPS("Startleaf"));
1329 for (rnp
= rnp_start
; 1; rnp
= rnp
->parent
) {
1330 if (rnp
!= rnp_start
)
1331 raw_spin_lock_rcu_node(rnp
);
1332 if (ULONG_CMP_GE(rnp
->gp_seq_needed
, gp_seq_req
) ||
1333 rcu_seq_started(&rnp
->gp_seq
, gp_seq_req
) ||
1334 (rnp
!= rnp_start
&&
1335 rcu_seq_state(rcu_seq_current(&rnp
->gp_seq
)))) {
1336 trace_rcu_this_gp(rnp
, rdp
, gp_seq_req
,
1340 WRITE_ONCE(rnp
->gp_seq_needed
, gp_seq_req
);
1341 if (rcu_seq_state(rcu_seq_current(&rnp
->gp_seq
))) {
1343 * We just marked the leaf or internal node, and a
1344 * grace period is in progress, which means that
1345 * rcu_gp_cleanup() will see the marking. Bail to
1346 * reduce contention.
1348 trace_rcu_this_gp(rnp_start
, rdp
, gp_seq_req
,
1349 TPS("Startedleaf"));
1352 if (rnp
!= rnp_start
&& rnp
->parent
!= NULL
)
1353 raw_spin_unlock_rcu_node(rnp
);
1355 break; /* At root, and perhaps also leaf. */
1358 /* If GP already in progress, just leave, otherwise start one. */
1359 if (rcu_gp_in_progress()) {
1360 trace_rcu_this_gp(rnp
, rdp
, gp_seq_req
, TPS("Startedleafroot"));
1363 trace_rcu_this_gp(rnp
, rdp
, gp_seq_req
, TPS("Startedroot"));
1364 WRITE_ONCE(rcu_state
.gp_flags
, rcu_state
.gp_flags
| RCU_GP_FLAG_INIT
);
1365 WRITE_ONCE(rcu_state
.gp_req_activity
, jiffies
);
1366 if (!READ_ONCE(rcu_state
.gp_kthread
)) {
1367 trace_rcu_this_gp(rnp
, rdp
, gp_seq_req
, TPS("NoGPkthread"));
1370 trace_rcu_grace_period(rcu_state
.name
, data_race(rcu_state
.gp_seq
), TPS("newreq"));
1371 ret
= true; /* Caller must wake GP kthread. */
1373 /* Push furthest requested GP to leaf node and rcu_data structure. */
1374 if (ULONG_CMP_LT(gp_seq_req
, rnp
->gp_seq_needed
)) {
1375 WRITE_ONCE(rnp_start
->gp_seq_needed
, rnp
->gp_seq_needed
);
1376 WRITE_ONCE(rdp
->gp_seq_needed
, rnp
->gp_seq_needed
);
1378 if (rnp
!= rnp_start
)
1379 raw_spin_unlock_rcu_node(rnp
);
1384 * Clean up any old requests for the just-ended grace period. Also return
1385 * whether any additional grace periods have been requested.
1387 static bool rcu_future_gp_cleanup(struct rcu_node
*rnp
)
1390 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
1392 needmore
= ULONG_CMP_LT(rnp
->gp_seq
, rnp
->gp_seq_needed
);
1394 rnp
->gp_seq_needed
= rnp
->gp_seq
; /* Avoid counter wrap. */
1395 trace_rcu_this_gp(rnp
, rdp
, rnp
->gp_seq
,
1396 needmore
? TPS("CleanupMore") : TPS("Cleanup"));
1401 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1402 * interrupt or softirq handler, in which case we just might immediately
1403 * sleep upon return, resulting in a grace-period hang), and don't bother
1404 * awakening when there is nothing for the grace-period kthread to do
1405 * (as in several CPUs raced to awaken, we lost), and finally don't try
1406 * to awaken a kthread that has not yet been created. If all those checks
1407 * are passed, track some debug information and awaken.
1409 * So why do the self-wakeup when in an interrupt or softirq handler
1410 * in the grace-period kthread's context? Because the kthread might have
1411 * been interrupted just as it was going to sleep, and just after the final
1412 * pre-sleep check of the awaken condition. In this case, a wakeup really
1413 * is required, and is therefore supplied.
1415 static void rcu_gp_kthread_wake(void)
1417 struct task_struct
*t
= READ_ONCE(rcu_state
.gp_kthread
);
1419 if ((current
== t
&& !in_irq() && !in_serving_softirq()) ||
1420 !READ_ONCE(rcu_state
.gp_flags
) || !t
)
1422 WRITE_ONCE(rcu_state
.gp_wake_time
, jiffies
);
1423 WRITE_ONCE(rcu_state
.gp_wake_seq
, READ_ONCE(rcu_state
.gp_seq
));
1424 swake_up_one(&rcu_state
.gp_wq
);
1428 * If there is room, assign a ->gp_seq number to any callbacks on this
1429 * CPU that have not already been assigned. Also accelerate any callbacks
1430 * that were previously assigned a ->gp_seq number that has since proven
1431 * to be too conservative, which can happen if callbacks get assigned a
1432 * ->gp_seq number while RCU is idle, but with reference to a non-root
1433 * rcu_node structure. This function is idempotent, so it does not hurt
1434 * to call it repeatedly. Returns an flag saying that we should awaken
1435 * the RCU grace-period kthread.
1437 * The caller must hold rnp->lock with interrupts disabled.
1439 static bool rcu_accelerate_cbs(struct rcu_node
*rnp
, struct rcu_data
*rdp
)
1441 unsigned long gp_seq_req
;
1444 rcu_lockdep_assert_cblist_protected(rdp
);
1445 raw_lockdep_assert_held_rcu_node(rnp
);
1447 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1448 if (!rcu_segcblist_pend_cbs(&rdp
->cblist
))
1452 * Callbacks are often registered with incomplete grace-period
1453 * information. Something about the fact that getting exact
1454 * information requires acquiring a global lock... RCU therefore
1455 * makes a conservative estimate of the grace period number at which
1456 * a given callback will become ready to invoke. The following
1457 * code checks this estimate and improves it when possible, thus
1458 * accelerating callback invocation to an earlier grace-period
1461 gp_seq_req
= rcu_seq_snap(&rcu_state
.gp_seq
);
1462 if (rcu_segcblist_accelerate(&rdp
->cblist
, gp_seq_req
))
1463 ret
= rcu_start_this_gp(rnp
, rdp
, gp_seq_req
);
1465 /* Trace depending on how much we were able to accelerate. */
1466 if (rcu_segcblist_restempty(&rdp
->cblist
, RCU_WAIT_TAIL
))
1467 trace_rcu_grace_period(rcu_state
.name
, rdp
->gp_seq
, TPS("AccWaitCB"));
1469 trace_rcu_grace_period(rcu_state
.name
, rdp
->gp_seq
, TPS("AccReadyCB"));
1474 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1475 * rcu_node structure's ->lock be held. It consults the cached value
1476 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1477 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1478 * while holding the leaf rcu_node structure's ->lock.
1480 static void rcu_accelerate_cbs_unlocked(struct rcu_node
*rnp
,
1481 struct rcu_data
*rdp
)
1486 rcu_lockdep_assert_cblist_protected(rdp
);
1487 c
= rcu_seq_snap(&rcu_state
.gp_seq
);
1488 if (!READ_ONCE(rdp
->gpwrap
) && ULONG_CMP_GE(rdp
->gp_seq_needed
, c
)) {
1489 /* Old request still live, so mark recent callbacks. */
1490 (void)rcu_segcblist_accelerate(&rdp
->cblist
, c
);
1493 raw_spin_lock_rcu_node(rnp
); /* irqs already disabled. */
1494 needwake
= rcu_accelerate_cbs(rnp
, rdp
);
1495 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled. */
1497 rcu_gp_kthread_wake();
1501 * Move any callbacks whose grace period has completed to the
1502 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1503 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1504 * sublist. This function is idempotent, so it does not hurt to
1505 * invoke it repeatedly. As long as it is not invoked -too- often...
1506 * Returns true if the RCU grace-period kthread needs to be awakened.
1508 * The caller must hold rnp->lock with interrupts disabled.
1510 static bool rcu_advance_cbs(struct rcu_node
*rnp
, struct rcu_data
*rdp
)
1512 rcu_lockdep_assert_cblist_protected(rdp
);
1513 raw_lockdep_assert_held_rcu_node(rnp
);
1515 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1516 if (!rcu_segcblist_pend_cbs(&rdp
->cblist
))
1520 * Find all callbacks whose ->gp_seq numbers indicate that they
1521 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1523 rcu_segcblist_advance(&rdp
->cblist
, rnp
->gp_seq
);
1525 /* Classify any remaining callbacks. */
1526 return rcu_accelerate_cbs(rnp
, rdp
);
1530 * Move and classify callbacks, but only if doing so won't require
1531 * that the RCU grace-period kthread be awakened.
1533 static void __maybe_unused
rcu_advance_cbs_nowake(struct rcu_node
*rnp
,
1534 struct rcu_data
*rdp
)
1536 rcu_lockdep_assert_cblist_protected(rdp
);
1537 if (!rcu_seq_state(rcu_seq_current(&rnp
->gp_seq
)) ||
1538 !raw_spin_trylock_rcu_node(rnp
))
1540 WARN_ON_ONCE(rcu_advance_cbs(rnp
, rdp
));
1541 raw_spin_unlock_rcu_node(rnp
);
1545 * Update CPU-local rcu_data state to record the beginnings and ends of
1546 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1547 * structure corresponding to the current CPU, and must have irqs disabled.
1548 * Returns true if the grace-period kthread needs to be awakened.
1550 static bool __note_gp_changes(struct rcu_node
*rnp
, struct rcu_data
*rdp
)
1554 const bool offloaded
= IS_ENABLED(CONFIG_RCU_NOCB_CPU
) &&
1555 rcu_segcblist_is_offloaded(&rdp
->cblist
);
1557 raw_lockdep_assert_held_rcu_node(rnp
);
1559 if (rdp
->gp_seq
== rnp
->gp_seq
)
1560 return false; /* Nothing to do. */
1562 /* Handle the ends of any preceding grace periods first. */
1563 if (rcu_seq_completed_gp(rdp
->gp_seq
, rnp
->gp_seq
) ||
1564 unlikely(READ_ONCE(rdp
->gpwrap
))) {
1566 ret
= rcu_advance_cbs(rnp
, rdp
); /* Advance CBs. */
1567 rdp
->core_needs_qs
= false;
1568 trace_rcu_grace_period(rcu_state
.name
, rdp
->gp_seq
, TPS("cpuend"));
1571 ret
= rcu_accelerate_cbs(rnp
, rdp
); /* Recent CBs. */
1572 if (rdp
->core_needs_qs
)
1573 rdp
->core_needs_qs
= !!(rnp
->qsmask
& rdp
->grpmask
);
1576 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1577 if (rcu_seq_new_gp(rdp
->gp_seq
, rnp
->gp_seq
) ||
1578 unlikely(READ_ONCE(rdp
->gpwrap
))) {
1580 * If the current grace period is waiting for this CPU,
1581 * set up to detect a quiescent state, otherwise don't
1582 * go looking for one.
1584 trace_rcu_grace_period(rcu_state
.name
, rnp
->gp_seq
, TPS("cpustart"));
1585 need_qs
= !!(rnp
->qsmask
& rdp
->grpmask
);
1586 rdp
->cpu_no_qs
.b
.norm
= need_qs
;
1587 rdp
->core_needs_qs
= need_qs
;
1588 zero_cpu_stall_ticks(rdp
);
1590 rdp
->gp_seq
= rnp
->gp_seq
; /* Remember new grace-period state. */
1591 if (ULONG_CMP_LT(rdp
->gp_seq_needed
, rnp
->gp_seq_needed
) || rdp
->gpwrap
)
1592 WRITE_ONCE(rdp
->gp_seq_needed
, rnp
->gp_seq_needed
);
1593 WRITE_ONCE(rdp
->gpwrap
, false);
1594 rcu_gpnum_ovf(rnp
, rdp
);
1598 static void note_gp_changes(struct rcu_data
*rdp
)
1600 unsigned long flags
;
1602 struct rcu_node
*rnp
;
1604 local_irq_save(flags
);
1606 if ((rdp
->gp_seq
== rcu_seq_current(&rnp
->gp_seq
) &&
1607 !unlikely(READ_ONCE(rdp
->gpwrap
))) || /* w/out lock. */
1608 !raw_spin_trylock_rcu_node(rnp
)) { /* irqs already off, so later. */
1609 local_irq_restore(flags
);
1612 needwake
= __note_gp_changes(rnp
, rdp
);
1613 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
1615 rcu_gp_kthread_wake();
1618 static void rcu_gp_slow(int delay
)
1621 !(rcu_seq_ctr(rcu_state
.gp_seq
) %
1622 (rcu_num_nodes
* PER_RCU_NODE_PERIOD
* delay
)))
1623 schedule_timeout_uninterruptible(delay
);
1626 static unsigned long sleep_duration
;
1628 /* Allow rcutorture to stall the grace-period kthread. */
1629 void rcu_gp_set_torture_wait(int duration
)
1631 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST
) && duration
> 0)
1632 WRITE_ONCE(sleep_duration
, duration
);
1634 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait
);
1636 /* Actually implement the aforementioned wait. */
1637 static void rcu_gp_torture_wait(void)
1639 unsigned long duration
;
1641 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST
))
1643 duration
= xchg(&sleep_duration
, 0UL);
1645 pr_alert("%s: Waiting %lu jiffies\n", __func__
, duration
);
1646 schedule_timeout_uninterruptible(duration
);
1647 pr_alert("%s: Wait complete\n", __func__
);
1652 * Initialize a new grace period. Return false if no grace period required.
1654 static bool rcu_gp_init(void)
1656 unsigned long flags
;
1657 unsigned long oldmask
;
1659 struct rcu_data
*rdp
;
1660 struct rcu_node
*rnp
= rcu_get_root();
1662 WRITE_ONCE(rcu_state
.gp_activity
, jiffies
);
1663 raw_spin_lock_irq_rcu_node(rnp
);
1664 if (!READ_ONCE(rcu_state
.gp_flags
)) {
1665 /* Spurious wakeup, tell caller to go back to sleep. */
1666 raw_spin_unlock_irq_rcu_node(rnp
);
1669 WRITE_ONCE(rcu_state
.gp_flags
, 0); /* Clear all flags: New GP. */
1671 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1673 * Grace period already in progress, don't start another.
1674 * Not supposed to be able to happen.
1676 raw_spin_unlock_irq_rcu_node(rnp
);
1680 /* Advance to a new grace period and initialize state. */
1681 record_gp_stall_check_time();
1682 /* Record GP times before starting GP, hence rcu_seq_start(). */
1683 rcu_seq_start(&rcu_state
.gp_seq
);
1684 ASSERT_EXCLUSIVE_WRITER(rcu_state
.gp_seq
);
1685 trace_rcu_grace_period(rcu_state
.name
, rcu_state
.gp_seq
, TPS("start"));
1686 raw_spin_unlock_irq_rcu_node(rnp
);
1689 * Apply per-leaf buffered online and offline operations to the
1690 * rcu_node tree. Note that this new grace period need not wait
1691 * for subsequent online CPUs, and that quiescent-state forcing
1692 * will handle subsequent offline CPUs.
1694 rcu_state
.gp_state
= RCU_GP_ONOFF
;
1695 rcu_for_each_leaf_node(rnp
) {
1696 raw_spin_lock(&rcu_state
.ofl_lock
);
1697 raw_spin_lock_irq_rcu_node(rnp
);
1698 if (rnp
->qsmaskinit
== rnp
->qsmaskinitnext
&&
1699 !rnp
->wait_blkd_tasks
) {
1700 /* Nothing to do on this leaf rcu_node structure. */
1701 raw_spin_unlock_irq_rcu_node(rnp
);
1702 raw_spin_unlock(&rcu_state
.ofl_lock
);
1706 /* Record old state, apply changes to ->qsmaskinit field. */
1707 oldmask
= rnp
->qsmaskinit
;
1708 rnp
->qsmaskinit
= rnp
->qsmaskinitnext
;
1710 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1711 if (!oldmask
!= !rnp
->qsmaskinit
) {
1712 if (!oldmask
) { /* First online CPU for rcu_node. */
1713 if (!rnp
->wait_blkd_tasks
) /* Ever offline? */
1714 rcu_init_new_rnp(rnp
);
1715 } else if (rcu_preempt_has_tasks(rnp
)) {
1716 rnp
->wait_blkd_tasks
= true; /* blocked tasks */
1717 } else { /* Last offline CPU and can propagate. */
1718 rcu_cleanup_dead_rnp(rnp
);
1723 * If all waited-on tasks from prior grace period are
1724 * done, and if all this rcu_node structure's CPUs are
1725 * still offline, propagate up the rcu_node tree and
1726 * clear ->wait_blkd_tasks. Otherwise, if one of this
1727 * rcu_node structure's CPUs has since come back online,
1728 * simply clear ->wait_blkd_tasks.
1730 if (rnp
->wait_blkd_tasks
&&
1731 (!rcu_preempt_has_tasks(rnp
) || rnp
->qsmaskinit
)) {
1732 rnp
->wait_blkd_tasks
= false;
1733 if (!rnp
->qsmaskinit
)
1734 rcu_cleanup_dead_rnp(rnp
);
1737 raw_spin_unlock_irq_rcu_node(rnp
);
1738 raw_spin_unlock(&rcu_state
.ofl_lock
);
1740 rcu_gp_slow(gp_preinit_delay
); /* Races with CPU hotplug. */
1743 * Set the quiescent-state-needed bits in all the rcu_node
1744 * structures for all currently online CPUs in breadth-first
1745 * order, starting from the root rcu_node structure, relying on the
1746 * layout of the tree within the rcu_state.node[] array. Note that
1747 * other CPUs will access only the leaves of the hierarchy, thus
1748 * seeing that no grace period is in progress, at least until the
1749 * corresponding leaf node has been initialized.
1751 * The grace period cannot complete until the initialization
1752 * process finishes, because this kthread handles both.
1754 rcu_state
.gp_state
= RCU_GP_INIT
;
1755 rcu_for_each_node_breadth_first(rnp
) {
1756 rcu_gp_slow(gp_init_delay
);
1757 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
1758 rdp
= this_cpu_ptr(&rcu_data
);
1759 rcu_preempt_check_blocked_tasks(rnp
);
1760 rnp
->qsmask
= rnp
->qsmaskinit
;
1761 WRITE_ONCE(rnp
->gp_seq
, rcu_state
.gp_seq
);
1762 if (rnp
== rdp
->mynode
)
1763 (void)__note_gp_changes(rnp
, rdp
);
1764 rcu_preempt_boost_start_gp(rnp
);
1765 trace_rcu_grace_period_init(rcu_state
.name
, rnp
->gp_seq
,
1766 rnp
->level
, rnp
->grplo
,
1767 rnp
->grphi
, rnp
->qsmask
);
1768 /* Quiescent states for tasks on any now-offline CPUs. */
1769 mask
= rnp
->qsmask
& ~rnp
->qsmaskinitnext
;
1770 rnp
->rcu_gp_init_mask
= mask
;
1771 if ((mask
|| rnp
->wait_blkd_tasks
) && rcu_is_leaf_node(rnp
))
1772 rcu_report_qs_rnp(mask
, rnp
, rnp
->gp_seq
, flags
);
1774 raw_spin_unlock_irq_rcu_node(rnp
);
1775 cond_resched_tasks_rcu_qs();
1776 WRITE_ONCE(rcu_state
.gp_activity
, jiffies
);
1783 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1786 static bool rcu_gp_fqs_check_wake(int *gfp
)
1788 struct rcu_node
*rnp
= rcu_get_root();
1790 // If under overload conditions, force an immediate FQS scan.
1791 if (*gfp
& RCU_GP_FLAG_OVLD
)
1794 // Someone like call_rcu() requested a force-quiescent-state scan.
1795 *gfp
= READ_ONCE(rcu_state
.gp_flags
);
1796 if (*gfp
& RCU_GP_FLAG_FQS
)
1799 // The current grace period has completed.
1800 if (!READ_ONCE(rnp
->qsmask
) && !rcu_preempt_blocked_readers_cgp(rnp
))
1807 * Do one round of quiescent-state forcing.
1809 static void rcu_gp_fqs(bool first_time
)
1811 struct rcu_node
*rnp
= rcu_get_root();
1813 WRITE_ONCE(rcu_state
.gp_activity
, jiffies
);
1814 rcu_state
.n_force_qs
++;
1816 /* Collect dyntick-idle snapshots. */
1817 force_qs_rnp(dyntick_save_progress_counter
);
1819 /* Handle dyntick-idle and offline CPUs. */
1820 force_qs_rnp(rcu_implicit_dynticks_qs
);
1822 /* Clear flag to prevent immediate re-entry. */
1823 if (READ_ONCE(rcu_state
.gp_flags
) & RCU_GP_FLAG_FQS
) {
1824 raw_spin_lock_irq_rcu_node(rnp
);
1825 WRITE_ONCE(rcu_state
.gp_flags
,
1826 READ_ONCE(rcu_state
.gp_flags
) & ~RCU_GP_FLAG_FQS
);
1827 raw_spin_unlock_irq_rcu_node(rnp
);
1832 * Loop doing repeated quiescent-state forcing until the grace period ends.
1834 static void rcu_gp_fqs_loop(void)
1840 struct rcu_node
*rnp
= rcu_get_root();
1842 first_gp_fqs
= true;
1843 j
= READ_ONCE(jiffies_till_first_fqs
);
1844 if (rcu_state
.cbovld
)
1845 gf
= RCU_GP_FLAG_OVLD
;
1849 rcu_state
.jiffies_force_qs
= jiffies
+ j
;
1850 WRITE_ONCE(rcu_state
.jiffies_kick_kthreads
,
1851 jiffies
+ (j
? 3 * j
: 2));
1853 trace_rcu_grace_period(rcu_state
.name
, rcu_state
.gp_seq
,
1855 rcu_state
.gp_state
= RCU_GP_WAIT_FQS
;
1856 ret
= swait_event_idle_timeout_exclusive(
1857 rcu_state
.gp_wq
, rcu_gp_fqs_check_wake(&gf
), j
);
1858 rcu_gp_torture_wait();
1859 rcu_state
.gp_state
= RCU_GP_DOING_FQS
;
1860 /* Locking provides needed memory barriers. */
1861 /* If grace period done, leave loop. */
1862 if (!READ_ONCE(rnp
->qsmask
) &&
1863 !rcu_preempt_blocked_readers_cgp(rnp
))
1865 /* If time for quiescent-state forcing, do it. */
1866 if (!time_after(rcu_state
.jiffies_force_qs
, jiffies
) ||
1867 (gf
& RCU_GP_FLAG_FQS
)) {
1868 trace_rcu_grace_period(rcu_state
.name
, rcu_state
.gp_seq
,
1870 rcu_gp_fqs(first_gp_fqs
);
1873 first_gp_fqs
= false;
1874 gf
= rcu_state
.cbovld
? RCU_GP_FLAG_OVLD
: 0;
1876 trace_rcu_grace_period(rcu_state
.name
, rcu_state
.gp_seq
,
1878 cond_resched_tasks_rcu_qs();
1879 WRITE_ONCE(rcu_state
.gp_activity
, jiffies
);
1880 ret
= 0; /* Force full wait till next FQS. */
1881 j
= READ_ONCE(jiffies_till_next_fqs
);
1883 /* Deal with stray signal. */
1884 cond_resched_tasks_rcu_qs();
1885 WRITE_ONCE(rcu_state
.gp_activity
, jiffies
);
1886 WARN_ON(signal_pending(current
));
1887 trace_rcu_grace_period(rcu_state
.name
, rcu_state
.gp_seq
,
1889 ret
= 1; /* Keep old FQS timing. */
1891 if (time_after(jiffies
, rcu_state
.jiffies_force_qs
))
1894 j
= rcu_state
.jiffies_force_qs
- j
;
1901 * Clean up after the old grace period.
1903 static void rcu_gp_cleanup(void)
1906 bool needgp
= false;
1907 unsigned long gp_duration
;
1908 unsigned long new_gp_seq
;
1910 struct rcu_data
*rdp
;
1911 struct rcu_node
*rnp
= rcu_get_root();
1912 struct swait_queue_head
*sq
;
1914 WRITE_ONCE(rcu_state
.gp_activity
, jiffies
);
1915 raw_spin_lock_irq_rcu_node(rnp
);
1916 rcu_state
.gp_end
= jiffies
;
1917 gp_duration
= rcu_state
.gp_end
- rcu_state
.gp_start
;
1918 if (gp_duration
> rcu_state
.gp_max
)
1919 rcu_state
.gp_max
= gp_duration
;
1922 * We know the grace period is complete, but to everyone else
1923 * it appears to still be ongoing. But it is also the case
1924 * that to everyone else it looks like there is nothing that
1925 * they can do to advance the grace period. It is therefore
1926 * safe for us to drop the lock in order to mark the grace
1927 * period as completed in all of the rcu_node structures.
1929 raw_spin_unlock_irq_rcu_node(rnp
);
1932 * Propagate new ->gp_seq value to rcu_node structures so that
1933 * other CPUs don't have to wait until the start of the next grace
1934 * period to process their callbacks. This also avoids some nasty
1935 * RCU grace-period initialization races by forcing the end of
1936 * the current grace period to be completely recorded in all of
1937 * the rcu_node structures before the beginning of the next grace
1938 * period is recorded in any of the rcu_node structures.
1940 new_gp_seq
= rcu_state
.gp_seq
;
1941 rcu_seq_end(&new_gp_seq
);
1942 rcu_for_each_node_breadth_first(rnp
) {
1943 raw_spin_lock_irq_rcu_node(rnp
);
1944 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp
)))
1945 dump_blkd_tasks(rnp
, 10);
1946 WARN_ON_ONCE(rnp
->qsmask
);
1947 WRITE_ONCE(rnp
->gp_seq
, new_gp_seq
);
1948 rdp
= this_cpu_ptr(&rcu_data
);
1949 if (rnp
== rdp
->mynode
)
1950 needgp
= __note_gp_changes(rnp
, rdp
) || needgp
;
1951 /* smp_mb() provided by prior unlock-lock pair. */
1952 needgp
= rcu_future_gp_cleanup(rnp
) || needgp
;
1953 // Reset overload indication for CPUs no longer overloaded
1954 if (rcu_is_leaf_node(rnp
))
1955 for_each_leaf_node_cpu_mask(rnp
, cpu
, rnp
->cbovldmask
) {
1956 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1957 check_cb_ovld_locked(rdp
, rnp
);
1959 sq
= rcu_nocb_gp_get(rnp
);
1960 raw_spin_unlock_irq_rcu_node(rnp
);
1961 rcu_nocb_gp_cleanup(sq
);
1962 cond_resched_tasks_rcu_qs();
1963 WRITE_ONCE(rcu_state
.gp_activity
, jiffies
);
1964 rcu_gp_slow(gp_cleanup_delay
);
1966 rnp
= rcu_get_root();
1967 raw_spin_lock_irq_rcu_node(rnp
); /* GP before ->gp_seq update. */
1969 /* Declare grace period done, trace first to use old GP number. */
1970 trace_rcu_grace_period(rcu_state
.name
, rcu_state
.gp_seq
, TPS("end"));
1971 rcu_seq_end(&rcu_state
.gp_seq
);
1972 ASSERT_EXCLUSIVE_WRITER(rcu_state
.gp_seq
);
1973 rcu_state
.gp_state
= RCU_GP_IDLE
;
1974 /* Check for GP requests since above loop. */
1975 rdp
= this_cpu_ptr(&rcu_data
);
1976 if (!needgp
&& ULONG_CMP_LT(rnp
->gp_seq
, rnp
->gp_seq_needed
)) {
1977 trace_rcu_this_gp(rnp
, rdp
, rnp
->gp_seq_needed
,
1978 TPS("CleanupMore"));
1981 /* Advance CBs to reduce false positives below. */
1982 offloaded
= IS_ENABLED(CONFIG_RCU_NOCB_CPU
) &&
1983 rcu_segcblist_is_offloaded(&rdp
->cblist
);
1984 if ((offloaded
|| !rcu_accelerate_cbs(rnp
, rdp
)) && needgp
) {
1985 WRITE_ONCE(rcu_state
.gp_flags
, RCU_GP_FLAG_INIT
);
1986 WRITE_ONCE(rcu_state
.gp_req_activity
, jiffies
);
1987 trace_rcu_grace_period(rcu_state
.name
,
1991 WRITE_ONCE(rcu_state
.gp_flags
,
1992 rcu_state
.gp_flags
& RCU_GP_FLAG_INIT
);
1994 raw_spin_unlock_irq_rcu_node(rnp
);
1998 * Body of kthread that handles grace periods.
2000 static int __noreturn
rcu_gp_kthread(void *unused
)
2002 rcu_bind_gp_kthread();
2005 /* Handle grace-period start. */
2007 trace_rcu_grace_period(rcu_state
.name
, rcu_state
.gp_seq
,
2009 rcu_state
.gp_state
= RCU_GP_WAIT_GPS
;
2010 swait_event_idle_exclusive(rcu_state
.gp_wq
,
2011 READ_ONCE(rcu_state
.gp_flags
) &
2013 rcu_gp_torture_wait();
2014 rcu_state
.gp_state
= RCU_GP_DONE_GPS
;
2015 /* Locking provides needed memory barrier. */
2018 cond_resched_tasks_rcu_qs();
2019 WRITE_ONCE(rcu_state
.gp_activity
, jiffies
);
2020 WARN_ON(signal_pending(current
));
2021 trace_rcu_grace_period(rcu_state
.name
, rcu_state
.gp_seq
,
2025 /* Handle quiescent-state forcing. */
2028 /* Handle grace-period end. */
2029 rcu_state
.gp_state
= RCU_GP_CLEANUP
;
2031 rcu_state
.gp_state
= RCU_GP_CLEANED
;
2036 * Report a full set of quiescent states to the rcu_state data structure.
2037 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2038 * another grace period is required. Whether we wake the grace-period
2039 * kthread or it awakens itself for the next round of quiescent-state
2040 * forcing, that kthread will clean up after the just-completed grace
2041 * period. Note that the caller must hold rnp->lock, which is released
2044 static void rcu_report_qs_rsp(unsigned long flags
)
2045 __releases(rcu_get_root()->lock
)
2047 raw_lockdep_assert_held_rcu_node(rcu_get_root());
2048 WARN_ON_ONCE(!rcu_gp_in_progress());
2049 WRITE_ONCE(rcu_state
.gp_flags
,
2050 READ_ONCE(rcu_state
.gp_flags
) | RCU_GP_FLAG_FQS
);
2051 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags
);
2052 rcu_gp_kthread_wake();
2056 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2057 * Allows quiescent states for a group of CPUs to be reported at one go
2058 * to the specified rcu_node structure, though all the CPUs in the group
2059 * must be represented by the same rcu_node structure (which need not be a
2060 * leaf rcu_node structure, though it often will be). The gps parameter
2061 * is the grace-period snapshot, which means that the quiescent states
2062 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2063 * must be held upon entry, and it is released before return.
2065 * As a special case, if mask is zero, the bit-already-cleared check is
2066 * disabled. This allows propagating quiescent state due to resumed tasks
2067 * during grace-period initialization.
2069 static void rcu_report_qs_rnp(unsigned long mask
, struct rcu_node
*rnp
,
2070 unsigned long gps
, unsigned long flags
)
2071 __releases(rnp
->lock
)
2073 unsigned long oldmask
= 0;
2074 struct rcu_node
*rnp_c
;
2076 raw_lockdep_assert_held_rcu_node(rnp
);
2078 /* Walk up the rcu_node hierarchy. */
2080 if ((!(rnp
->qsmask
& mask
) && mask
) || rnp
->gp_seq
!= gps
) {
2083 * Our bit has already been cleared, or the
2084 * relevant grace period is already over, so done.
2086 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
2089 WARN_ON_ONCE(oldmask
); /* Any child must be all zeroed! */
2090 WARN_ON_ONCE(!rcu_is_leaf_node(rnp
) &&
2091 rcu_preempt_blocked_readers_cgp(rnp
));
2092 WRITE_ONCE(rnp
->qsmask
, rnp
->qsmask
& ~mask
);
2093 trace_rcu_quiescent_state_report(rcu_state
.name
, rnp
->gp_seq
,
2094 mask
, rnp
->qsmask
, rnp
->level
,
2095 rnp
->grplo
, rnp
->grphi
,
2097 if (rnp
->qsmask
!= 0 || rcu_preempt_blocked_readers_cgp(rnp
)) {
2099 /* Other bits still set at this level, so done. */
2100 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
2103 rnp
->completedqs
= rnp
->gp_seq
;
2104 mask
= rnp
->grpmask
;
2105 if (rnp
->parent
== NULL
) {
2107 /* No more levels. Exit loop holding root lock. */
2111 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
2114 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
2115 oldmask
= READ_ONCE(rnp_c
->qsmask
);
2119 * Get here if we are the last CPU to pass through a quiescent
2120 * state for this grace period. Invoke rcu_report_qs_rsp()
2121 * to clean up and start the next grace period if one is needed.
2123 rcu_report_qs_rsp(flags
); /* releases rnp->lock. */
2127 * Record a quiescent state for all tasks that were previously queued
2128 * on the specified rcu_node structure and that were blocking the current
2129 * RCU grace period. The caller must hold the corresponding rnp->lock with
2130 * irqs disabled, and this lock is released upon return, but irqs remain
2133 static void __maybe_unused
2134 rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
2135 __releases(rnp
->lock
)
2139 struct rcu_node
*rnp_p
;
2141 raw_lockdep_assert_held_rcu_node(rnp
);
2142 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU
)) ||
2143 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp
)) ||
2145 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
2146 return; /* Still need more quiescent states! */
2149 rnp
->completedqs
= rnp
->gp_seq
;
2150 rnp_p
= rnp
->parent
;
2151 if (rnp_p
== NULL
) {
2153 * Only one rcu_node structure in the tree, so don't
2154 * try to report up to its nonexistent parent!
2156 rcu_report_qs_rsp(flags
);
2160 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2162 mask
= rnp
->grpmask
;
2163 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled. */
2164 raw_spin_lock_rcu_node(rnp_p
); /* irqs already disabled. */
2165 rcu_report_qs_rnp(mask
, rnp_p
, gps
, flags
);
2169 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2170 * structure. This must be called from the specified CPU.
2173 rcu_report_qs_rdp(int cpu
, struct rcu_data
*rdp
)
2175 unsigned long flags
;
2177 bool needwake
= false;
2178 const bool offloaded
= IS_ENABLED(CONFIG_RCU_NOCB_CPU
) &&
2179 rcu_segcblist_is_offloaded(&rdp
->cblist
);
2180 struct rcu_node
*rnp
;
2183 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
2184 if (rdp
->cpu_no_qs
.b
.norm
|| rdp
->gp_seq
!= rnp
->gp_seq
||
2188 * The grace period in which this quiescent state was
2189 * recorded has ended, so don't report it upwards.
2190 * We will instead need a new quiescent state that lies
2191 * within the current grace period.
2193 rdp
->cpu_no_qs
.b
.norm
= true; /* need qs for new gp. */
2194 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
2197 mask
= rdp
->grpmask
;
2198 if (rdp
->cpu
== smp_processor_id())
2199 rdp
->core_needs_qs
= false;
2200 if ((rnp
->qsmask
& mask
) == 0) {
2201 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
2204 * This GP can't end until cpu checks in, so all of our
2205 * callbacks can be processed during the next GP.
2208 needwake
= rcu_accelerate_cbs(rnp
, rdp
);
2210 rcu_disable_urgency_upon_qs(rdp
);
2211 rcu_report_qs_rnp(mask
, rnp
, rnp
->gp_seq
, flags
);
2212 /* ^^^ Released rnp->lock */
2214 rcu_gp_kthread_wake();
2219 * Check to see if there is a new grace period of which this CPU
2220 * is not yet aware, and if so, set up local rcu_data state for it.
2221 * Otherwise, see if this CPU has just passed through its first
2222 * quiescent state for this grace period, and record that fact if so.
2225 rcu_check_quiescent_state(struct rcu_data
*rdp
)
2227 /* Check for grace-period ends and beginnings. */
2228 note_gp_changes(rdp
);
2231 * Does this CPU still need to do its part for current grace period?
2232 * If no, return and let the other CPUs do their part as well.
2234 if (!rdp
->core_needs_qs
)
2238 * Was there a quiescent state since the beginning of the grace
2239 * period? If no, then exit and wait for the next call.
2241 if (rdp
->cpu_no_qs
.b
.norm
)
2245 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2248 rcu_report_qs_rdp(rdp
->cpu
, rdp
);
2252 * Near the end of the offline process. Trace the fact that this CPU
2255 int rcutree_dying_cpu(unsigned int cpu
)
2258 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
2259 struct rcu_node
*rnp
= rdp
->mynode
;
2261 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU
))
2264 blkd
= !!(rnp
->qsmask
& rdp
->grpmask
);
2265 trace_rcu_grace_period(rcu_state
.name
, READ_ONCE(rnp
->gp_seq
),
2266 blkd
? TPS("cpuofl") : TPS("cpuofl-bgp"));
2271 * All CPUs for the specified rcu_node structure have gone offline,
2272 * and all tasks that were preempted within an RCU read-side critical
2273 * section while running on one of those CPUs have since exited their RCU
2274 * read-side critical section. Some other CPU is reporting this fact with
2275 * the specified rcu_node structure's ->lock held and interrupts disabled.
2276 * This function therefore goes up the tree of rcu_node structures,
2277 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2278 * the leaf rcu_node structure's ->qsmaskinit field has already been
2281 * This function does check that the specified rcu_node structure has
2282 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2283 * prematurely. That said, invoking it after the fact will cost you
2284 * a needless lock acquisition. So once it has done its work, don't
2287 static void rcu_cleanup_dead_rnp(struct rcu_node
*rnp_leaf
)
2290 struct rcu_node
*rnp
= rnp_leaf
;
2292 raw_lockdep_assert_held_rcu_node(rnp_leaf
);
2293 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU
) ||
2294 WARN_ON_ONCE(rnp_leaf
->qsmaskinit
) ||
2295 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf
)))
2298 mask
= rnp
->grpmask
;
2302 raw_spin_lock_rcu_node(rnp
); /* irqs already disabled. */
2303 rnp
->qsmaskinit
&= ~mask
;
2304 /* Between grace periods, so better already be zero! */
2305 WARN_ON_ONCE(rnp
->qsmask
);
2306 if (rnp
->qsmaskinit
) {
2307 raw_spin_unlock_rcu_node(rnp
);
2308 /* irqs remain disabled. */
2311 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled. */
2316 * The CPU has been completely removed, and some other CPU is reporting
2317 * this fact from process context. Do the remainder of the cleanup.
2318 * There can only be one CPU hotplug operation at a time, so no need for
2321 int rcutree_dead_cpu(unsigned int cpu
)
2323 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
2324 struct rcu_node
*rnp
= rdp
->mynode
; /* Outgoing CPU's rdp & rnp. */
2326 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU
))
2329 /* Adjust any no-longer-needed kthreads. */
2330 rcu_boost_kthread_setaffinity(rnp
, -1);
2331 /* Do any needed no-CB deferred wakeups from this CPU. */
2332 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data
, cpu
));
2334 // Stop-machine done, so allow nohz_full to disable tick.
2335 tick_dep_clear(TICK_DEP_BIT_RCU
);
2340 * Invoke any RCU callbacks that have made it to the end of their grace
2341 * period. Thottle as specified by rdp->blimit.
2343 static void rcu_do_batch(struct rcu_data
*rdp
)
2345 unsigned long flags
;
2346 const bool offloaded
= IS_ENABLED(CONFIG_RCU_NOCB_CPU
) &&
2347 rcu_segcblist_is_offloaded(&rdp
->cblist
);
2348 struct rcu_head
*rhp
;
2349 struct rcu_cblist rcl
= RCU_CBLIST_INITIALIZER(rcl
);
2351 long pending
, tlimit
= 0;
2353 /* If no callbacks are ready, just return. */
2354 if (!rcu_segcblist_ready_cbs(&rdp
->cblist
)) {
2355 trace_rcu_batch_start(rcu_state
.name
,
2356 rcu_segcblist_n_cbs(&rdp
->cblist
), 0);
2357 trace_rcu_batch_end(rcu_state
.name
, 0,
2358 !rcu_segcblist_empty(&rdp
->cblist
),
2359 need_resched(), is_idle_task(current
),
2360 rcu_is_callbacks_kthread());
2365 * Extract the list of ready callbacks, disabling to prevent
2366 * races with call_rcu() from interrupt handlers. Leave the
2367 * callback counts, as rcu_barrier() needs to be conservative.
2369 local_irq_save(flags
);
2371 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2372 pending
= rcu_segcblist_n_cbs(&rdp
->cblist
);
2373 bl
= max(rdp
->blimit
, pending
>> rcu_divisor
);
2374 if (unlikely(bl
> 100))
2375 tlimit
= local_clock() + rcu_resched_ns
;
2376 trace_rcu_batch_start(rcu_state
.name
,
2377 rcu_segcblist_n_cbs(&rdp
->cblist
), bl
);
2378 rcu_segcblist_extract_done_cbs(&rdp
->cblist
, &rcl
);
2380 rdp
->qlen_last_fqs_check
= rcu_segcblist_n_cbs(&rdp
->cblist
);
2381 rcu_nocb_unlock_irqrestore(rdp
, flags
);
2383 /* Invoke callbacks. */
2384 tick_dep_set_task(current
, TICK_DEP_BIT_RCU
);
2385 rhp
= rcu_cblist_dequeue(&rcl
);
2386 for (; rhp
; rhp
= rcu_cblist_dequeue(&rcl
)) {
2389 debug_rcu_head_unqueue(rhp
);
2391 rcu_lock_acquire(&rcu_callback_map
);
2392 trace_rcu_invoke_callback(rcu_state
.name
, rhp
);
2395 WRITE_ONCE(rhp
->func
, (rcu_callback_t
)0L);
2398 rcu_lock_release(&rcu_callback_map
);
2401 * Stop only if limit reached and CPU has something to do.
2402 * Note: The rcl structure counts down from zero.
2404 if (-rcl
.len
>= bl
&& !offloaded
&&
2406 (!is_idle_task(current
) && !rcu_is_callbacks_kthread())))
2408 if (unlikely(tlimit
)) {
2409 /* only call local_clock() every 32 callbacks */
2410 if (likely((-rcl
.len
& 31) || local_clock() < tlimit
))
2412 /* Exceeded the time limit, so leave. */
2416 WARN_ON_ONCE(in_serving_softirq());
2418 lockdep_assert_irqs_enabled();
2419 cond_resched_tasks_rcu_qs();
2420 lockdep_assert_irqs_enabled();
2425 local_irq_save(flags
);
2428 trace_rcu_batch_end(rcu_state
.name
, count
, !!rcl
.head
, need_resched(),
2429 is_idle_task(current
), rcu_is_callbacks_kthread());
2431 /* Update counts and requeue any remaining callbacks. */
2432 rcu_segcblist_insert_done_cbs(&rdp
->cblist
, &rcl
);
2433 smp_mb(); /* List handling before counting for rcu_barrier(). */
2434 rcu_segcblist_insert_count(&rdp
->cblist
, &rcl
);
2436 /* Reinstate batch limit if we have worked down the excess. */
2437 count
= rcu_segcblist_n_cbs(&rdp
->cblist
);
2438 if (rdp
->blimit
>= DEFAULT_MAX_RCU_BLIMIT
&& count
<= qlowmark
)
2439 rdp
->blimit
= blimit
;
2441 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2442 if (count
== 0 && rdp
->qlen_last_fqs_check
!= 0) {
2443 rdp
->qlen_last_fqs_check
= 0;
2444 rdp
->n_force_qs_snap
= rcu_state
.n_force_qs
;
2445 } else if (count
< rdp
->qlen_last_fqs_check
- qhimark
)
2446 rdp
->qlen_last_fqs_check
= count
;
2449 * The following usually indicates a double call_rcu(). To track
2450 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2452 WARN_ON_ONCE(count
== 0 && !rcu_segcblist_empty(&rdp
->cblist
));
2453 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU
) &&
2454 count
!= 0 && rcu_segcblist_empty(&rdp
->cblist
));
2456 rcu_nocb_unlock_irqrestore(rdp
, flags
);
2458 /* Re-invoke RCU core processing if there are callbacks remaining. */
2459 if (!offloaded
&& rcu_segcblist_ready_cbs(&rdp
->cblist
))
2461 tick_dep_clear_task(current
, TICK_DEP_BIT_RCU
);
2465 * This function is invoked from each scheduling-clock interrupt,
2466 * and checks to see if this CPU is in a non-context-switch quiescent
2467 * state, for example, user mode or idle loop. It also schedules RCU
2468 * core processing. If the current grace period has gone on too long,
2469 * it will ask the scheduler to manufacture a context switch for the sole
2470 * purpose of providing a providing the needed quiescent state.
2472 void rcu_sched_clock_irq(int user
)
2474 trace_rcu_utilization(TPS("Start scheduler-tick"));
2475 raw_cpu_inc(rcu_data
.ticks_this_gp
);
2476 /* The load-acquire pairs with the store-release setting to true. */
2477 if (smp_load_acquire(this_cpu_ptr(&rcu_data
.rcu_urgent_qs
))) {
2478 /* Idle and userspace execution already are quiescent states. */
2479 if (!rcu_is_cpu_rrupt_from_idle() && !user
) {
2480 set_tsk_need_resched(current
);
2481 set_preempt_need_resched();
2483 __this_cpu_write(rcu_data
.rcu_urgent_qs
, false);
2485 rcu_flavor_sched_clock_irq(user
);
2486 if (rcu_pending(user
))
2489 trace_rcu_utilization(TPS("End scheduler-tick"));
2493 * Scan the leaf rcu_node structures. For each structure on which all
2494 * CPUs have reported a quiescent state and on which there are tasks
2495 * blocking the current grace period, initiate RCU priority boosting.
2496 * Otherwise, invoke the specified function to check dyntick state for
2497 * each CPU that has not yet reported a quiescent state.
2499 static void force_qs_rnp(int (*f
)(struct rcu_data
*rdp
))
2502 unsigned long flags
;
2504 struct rcu_data
*rdp
;
2505 struct rcu_node
*rnp
;
2507 rcu_state
.cbovld
= rcu_state
.cbovldnext
;
2508 rcu_state
.cbovldnext
= false;
2509 rcu_for_each_leaf_node(rnp
) {
2510 cond_resched_tasks_rcu_qs();
2512 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
2513 rcu_state
.cbovldnext
|= !!rnp
->cbovldmask
;
2514 if (rnp
->qsmask
== 0) {
2515 if (!IS_ENABLED(CONFIG_PREEMPT_RCU
) ||
2516 rcu_preempt_blocked_readers_cgp(rnp
)) {
2518 * No point in scanning bits because they
2519 * are all zero. But we might need to
2520 * priority-boost blocked readers.
2522 rcu_initiate_boost(rnp
, flags
);
2523 /* rcu_initiate_boost() releases rnp->lock */
2526 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
2529 for_each_leaf_node_cpu_mask(rnp
, cpu
, rnp
->qsmask
) {
2530 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
2532 mask
|= rdp
->grpmask
;
2533 rcu_disable_urgency_upon_qs(rdp
);
2537 /* Idle/offline CPUs, report (releases rnp->lock). */
2538 rcu_report_qs_rnp(mask
, rnp
, rnp
->gp_seq
, flags
);
2540 /* Nothing to do here, so just drop the lock. */
2541 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
2547 * Force quiescent states on reluctant CPUs, and also detect which
2548 * CPUs are in dyntick-idle mode.
2550 void rcu_force_quiescent_state(void)
2552 unsigned long flags
;
2554 struct rcu_node
*rnp
;
2555 struct rcu_node
*rnp_old
= NULL
;
2557 /* Funnel through hierarchy to reduce memory contention. */
2558 rnp
= __this_cpu_read(rcu_data
.mynode
);
2559 for (; rnp
!= NULL
; rnp
= rnp
->parent
) {
2560 ret
= (READ_ONCE(rcu_state
.gp_flags
) & RCU_GP_FLAG_FQS
) ||
2561 !raw_spin_trylock(&rnp
->fqslock
);
2562 if (rnp_old
!= NULL
)
2563 raw_spin_unlock(&rnp_old
->fqslock
);
2568 /* rnp_old == rcu_get_root(), rnp == NULL. */
2570 /* Reached the root of the rcu_node tree, acquire lock. */
2571 raw_spin_lock_irqsave_rcu_node(rnp_old
, flags
);
2572 raw_spin_unlock(&rnp_old
->fqslock
);
2573 if (READ_ONCE(rcu_state
.gp_flags
) & RCU_GP_FLAG_FQS
) {
2574 raw_spin_unlock_irqrestore_rcu_node(rnp_old
, flags
);
2575 return; /* Someone beat us to it. */
2577 WRITE_ONCE(rcu_state
.gp_flags
,
2578 READ_ONCE(rcu_state
.gp_flags
) | RCU_GP_FLAG_FQS
);
2579 raw_spin_unlock_irqrestore_rcu_node(rnp_old
, flags
);
2580 rcu_gp_kthread_wake();
2582 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
2584 /* Perform RCU core processing work for the current CPU. */
2585 static __latent_entropy
void rcu_core(void)
2587 unsigned long flags
;
2588 struct rcu_data
*rdp
= raw_cpu_ptr(&rcu_data
);
2589 struct rcu_node
*rnp
= rdp
->mynode
;
2590 const bool offloaded
= IS_ENABLED(CONFIG_RCU_NOCB_CPU
) &&
2591 rcu_segcblist_is_offloaded(&rdp
->cblist
);
2593 if (cpu_is_offline(smp_processor_id()))
2595 trace_rcu_utilization(TPS("Start RCU core"));
2596 WARN_ON_ONCE(!rdp
->beenonline
);
2598 /* Report any deferred quiescent states if preemption enabled. */
2599 if (!(preempt_count() & PREEMPT_MASK
)) {
2600 rcu_preempt_deferred_qs(current
);
2601 } else if (rcu_preempt_need_deferred_qs(current
)) {
2602 set_tsk_need_resched(current
);
2603 set_preempt_need_resched();
2606 /* Update RCU state based on any recent quiescent states. */
2607 rcu_check_quiescent_state(rdp
);
2609 /* No grace period and unregistered callbacks? */
2610 if (!rcu_gp_in_progress() &&
2611 rcu_segcblist_is_enabled(&rdp
->cblist
) && !offloaded
) {
2612 local_irq_save(flags
);
2613 if (!rcu_segcblist_restempty(&rdp
->cblist
, RCU_NEXT_READY_TAIL
))
2614 rcu_accelerate_cbs_unlocked(rnp
, rdp
);
2615 local_irq_restore(flags
);
2618 rcu_check_gp_start_stall(rnp
, rdp
, rcu_jiffies_till_stall_check());
2620 /* If there are callbacks ready, invoke them. */
2621 if (!offloaded
&& rcu_segcblist_ready_cbs(&rdp
->cblist
) &&
2622 likely(READ_ONCE(rcu_scheduler_fully_active
)))
2625 /* Do any needed deferred wakeups of rcuo kthreads. */
2626 do_nocb_deferred_wakeup(rdp
);
2627 trace_rcu_utilization(TPS("End RCU core"));
2630 static void rcu_core_si(struct softirq_action
*h
)
2635 static void rcu_wake_cond(struct task_struct
*t
, int status
)
2638 * If the thread is yielding, only wake it when this
2639 * is invoked from idle
2641 if (t
&& (status
!= RCU_KTHREAD_YIELDING
|| is_idle_task(current
)))
2645 static void invoke_rcu_core_kthread(void)
2647 struct task_struct
*t
;
2648 unsigned long flags
;
2650 local_irq_save(flags
);
2651 __this_cpu_write(rcu_data
.rcu_cpu_has_work
, 1);
2652 t
= __this_cpu_read(rcu_data
.rcu_cpu_kthread_task
);
2653 if (t
!= NULL
&& t
!= current
)
2654 rcu_wake_cond(t
, __this_cpu_read(rcu_data
.rcu_cpu_kthread_status
));
2655 local_irq_restore(flags
);
2659 * Wake up this CPU's rcuc kthread to do RCU core processing.
2661 static void invoke_rcu_core(void)
2663 if (!cpu_online(smp_processor_id()))
2666 raise_softirq(RCU_SOFTIRQ
);
2668 invoke_rcu_core_kthread();
2671 static void rcu_cpu_kthread_park(unsigned int cpu
)
2673 per_cpu(rcu_data
.rcu_cpu_kthread_status
, cpu
) = RCU_KTHREAD_OFFCPU
;
2676 static int rcu_cpu_kthread_should_run(unsigned int cpu
)
2678 return __this_cpu_read(rcu_data
.rcu_cpu_has_work
);
2682 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2683 * the RCU softirq used in configurations of RCU that do not support RCU
2684 * priority boosting.
2686 static void rcu_cpu_kthread(unsigned int cpu
)
2688 unsigned int *statusp
= this_cpu_ptr(&rcu_data
.rcu_cpu_kthread_status
);
2689 char work
, *workp
= this_cpu_ptr(&rcu_data
.rcu_cpu_has_work
);
2692 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2693 for (spincnt
= 0; spincnt
< 10; spincnt
++) {
2695 *statusp
= RCU_KTHREAD_RUNNING
;
2696 local_irq_disable();
2704 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2705 *statusp
= RCU_KTHREAD_WAITING
;
2709 *statusp
= RCU_KTHREAD_YIELDING
;
2710 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2711 schedule_timeout_interruptible(2);
2712 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2713 *statusp
= RCU_KTHREAD_WAITING
;
2716 static struct smp_hotplug_thread rcu_cpu_thread_spec
= {
2717 .store
= &rcu_data
.rcu_cpu_kthread_task
,
2718 .thread_should_run
= rcu_cpu_kthread_should_run
,
2719 .thread_fn
= rcu_cpu_kthread
,
2720 .thread_comm
= "rcuc/%u",
2721 .setup
= rcu_cpu_kthread_setup
,
2722 .park
= rcu_cpu_kthread_park
,
2726 * Spawn per-CPU RCU core processing kthreads.
2728 static int __init
rcu_spawn_core_kthreads(void)
2732 for_each_possible_cpu(cpu
)
2733 per_cpu(rcu_data
.rcu_cpu_has_work
, cpu
) = 0;
2734 if (!IS_ENABLED(CONFIG_RCU_BOOST
) && use_softirq
)
2736 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec
),
2737 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__
);
2740 early_initcall(rcu_spawn_core_kthreads
);
2743 * Handle any core-RCU processing required by a call_rcu() invocation.
2745 static void __call_rcu_core(struct rcu_data
*rdp
, struct rcu_head
*head
,
2746 unsigned long flags
)
2749 * If called from an extended quiescent state, invoke the RCU
2750 * core in order to force a re-evaluation of RCU's idleness.
2752 if (!rcu_is_watching())
2755 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2756 if (irqs_disabled_flags(flags
) || cpu_is_offline(smp_processor_id()))
2760 * Force the grace period if too many callbacks or too long waiting.
2761 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2762 * if some other CPU has recently done so. Also, don't bother
2763 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2764 * is the only one waiting for a grace period to complete.
2766 if (unlikely(rcu_segcblist_n_cbs(&rdp
->cblist
) >
2767 rdp
->qlen_last_fqs_check
+ qhimark
)) {
2769 /* Are we ignoring a completed grace period? */
2770 note_gp_changes(rdp
);
2772 /* Start a new grace period if one not already started. */
2773 if (!rcu_gp_in_progress()) {
2774 rcu_accelerate_cbs_unlocked(rdp
->mynode
, rdp
);
2776 /* Give the grace period a kick. */
2777 rdp
->blimit
= DEFAULT_MAX_RCU_BLIMIT
;
2778 if (rcu_state
.n_force_qs
== rdp
->n_force_qs_snap
&&
2779 rcu_segcblist_first_pend_cb(&rdp
->cblist
) != head
)
2780 rcu_force_quiescent_state();
2781 rdp
->n_force_qs_snap
= rcu_state
.n_force_qs
;
2782 rdp
->qlen_last_fqs_check
= rcu_segcblist_n_cbs(&rdp
->cblist
);
2788 * RCU callback function to leak a callback.
2790 static void rcu_leak_callback(struct rcu_head
*rhp
)
2795 * Check and if necessary update the leaf rcu_node structure's
2796 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2797 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
2798 * structure's ->lock.
2800 static void check_cb_ovld_locked(struct rcu_data
*rdp
, struct rcu_node
*rnp
)
2802 raw_lockdep_assert_held_rcu_node(rnp
);
2803 if (qovld_calc
<= 0)
2804 return; // Early boot and wildcard value set.
2805 if (rcu_segcblist_n_cbs(&rdp
->cblist
) >= qovld_calc
)
2806 WRITE_ONCE(rnp
->cbovldmask
, rnp
->cbovldmask
| rdp
->grpmask
);
2808 WRITE_ONCE(rnp
->cbovldmask
, rnp
->cbovldmask
& ~rdp
->grpmask
);
2812 * Check and if necessary update the leaf rcu_node structure's
2813 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2814 * number of queued RCU callbacks. No locks need be held, but the
2815 * caller must have disabled interrupts.
2817 * Note that this function ignores the possibility that there are a lot
2818 * of callbacks all of which have already seen the end of their respective
2819 * grace periods. This omission is due to the need for no-CBs CPUs to
2820 * be holding ->nocb_lock to do this check, which is too heavy for a
2821 * common-case operation.
2823 static void check_cb_ovld(struct rcu_data
*rdp
)
2825 struct rcu_node
*const rnp
= rdp
->mynode
;
2827 if (qovld_calc
<= 0 ||
2828 ((rcu_segcblist_n_cbs(&rdp
->cblist
) >= qovld_calc
) ==
2829 !!(READ_ONCE(rnp
->cbovldmask
) & rdp
->grpmask
)))
2830 return; // Early boot wildcard value or already set correctly.
2831 raw_spin_lock_rcu_node(rnp
);
2832 check_cb_ovld_locked(rdp
, rnp
);
2833 raw_spin_unlock_rcu_node(rnp
);
2836 /* Helper function for call_rcu() and friends. */
2838 __call_rcu(struct rcu_head
*head
, rcu_callback_t func
)
2840 unsigned long flags
;
2841 struct rcu_data
*rdp
;
2844 /* Misaligned rcu_head! */
2845 WARN_ON_ONCE((unsigned long)head
& (sizeof(void *) - 1));
2847 if (debug_rcu_head_queue(head
)) {
2849 * Probable double call_rcu(), so leak the callback.
2850 * Use rcu:rcu_callback trace event to find the previous
2851 * time callback was passed to __call_rcu().
2853 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
2855 WRITE_ONCE(head
->func
, rcu_leak_callback
);
2860 local_irq_save(flags
);
2861 rdp
= this_cpu_ptr(&rcu_data
);
2863 /* Add the callback to our list. */
2864 if (unlikely(!rcu_segcblist_is_enabled(&rdp
->cblist
))) {
2865 // This can trigger due to call_rcu() from offline CPU:
2866 WARN_ON_ONCE(rcu_scheduler_active
!= RCU_SCHEDULER_INACTIVE
);
2867 WARN_ON_ONCE(!rcu_is_watching());
2868 // Very early boot, before rcu_init(). Initialize if needed
2869 // and then drop through to queue the callback.
2870 if (rcu_segcblist_empty(&rdp
->cblist
))
2871 rcu_segcblist_init(&rdp
->cblist
);
2875 if (rcu_nocb_try_bypass(rdp
, head
, &was_alldone
, flags
))
2876 return; // Enqueued onto ->nocb_bypass, so just leave.
2877 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2878 rcu_segcblist_enqueue(&rdp
->cblist
, head
);
2879 if (__is_kfree_rcu_offset((unsigned long)func
))
2880 trace_rcu_kfree_callback(rcu_state
.name
, head
,
2881 (unsigned long)func
,
2882 rcu_segcblist_n_cbs(&rdp
->cblist
));
2884 trace_rcu_callback(rcu_state
.name
, head
,
2885 rcu_segcblist_n_cbs(&rdp
->cblist
));
2887 /* Go handle any RCU core processing required. */
2888 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU
) &&
2889 unlikely(rcu_segcblist_is_offloaded(&rdp
->cblist
))) {
2890 __call_rcu_nocb_wake(rdp
, was_alldone
, flags
); /* unlocks */
2892 __call_rcu_core(rdp
, head
, flags
);
2893 local_irq_restore(flags
);
2898 * call_rcu() - Queue an RCU callback for invocation after a grace period.
2899 * @head: structure to be used for queueing the RCU updates.
2900 * @func: actual callback function to be invoked after the grace period
2902 * The callback function will be invoked some time after a full grace
2903 * period elapses, in other words after all pre-existing RCU read-side
2904 * critical sections have completed. However, the callback function
2905 * might well execute concurrently with RCU read-side critical sections
2906 * that started after call_rcu() was invoked. RCU read-side critical
2907 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
2908 * may be nested. In addition, regions of code across which interrupts,
2909 * preemption, or softirqs have been disabled also serve as RCU read-side
2910 * critical sections. This includes hardware interrupt handlers, softirq
2911 * handlers, and NMI handlers.
2913 * Note that all CPUs must agree that the grace period extended beyond
2914 * all pre-existing RCU read-side critical section. On systems with more
2915 * than one CPU, this means that when "func()" is invoked, each CPU is
2916 * guaranteed to have executed a full memory barrier since the end of its
2917 * last RCU read-side critical section whose beginning preceded the call
2918 * to call_rcu(). It also means that each CPU executing an RCU read-side
2919 * critical section that continues beyond the start of "func()" must have
2920 * executed a memory barrier after the call_rcu() but before the beginning
2921 * of that RCU read-side critical section. Note that these guarantees
2922 * include CPUs that are offline, idle, or executing in user mode, as
2923 * well as CPUs that are executing in the kernel.
2925 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2926 * resulting RCU callback function "func()", then both CPU A and CPU B are
2927 * guaranteed to execute a full memory barrier during the time interval
2928 * between the call to call_rcu() and the invocation of "func()" -- even
2929 * if CPU A and CPU B are the same CPU (but again only if the system has
2930 * more than one CPU).
2932 void call_rcu(struct rcu_head
*head
, rcu_callback_t func
)
2934 __call_rcu(head
, func
);
2936 EXPORT_SYMBOL_GPL(call_rcu
);
2939 /* Maximum number of jiffies to wait before draining a batch. */
2940 #define KFREE_DRAIN_JIFFIES (HZ / 50)
2941 #define KFREE_N_BATCHES 2
2944 * This macro defines how many entries the "records" array
2945 * will contain. It is based on the fact that the size of
2946 * kfree_rcu_bulk_data structure becomes exactly one page.
2948 #define KFREE_BULK_MAX_ENTR ((PAGE_SIZE / sizeof(void *)) - 3)
2951 * struct kfree_rcu_bulk_data - single block to store kfree_rcu() pointers
2952 * @nr_records: Number of active pointers in the array
2953 * @records: Array of the kfree_rcu() pointers
2954 * @next: Next bulk object in the block chain
2955 * @head_free_debug: For debug, when CONFIG_DEBUG_OBJECTS_RCU_HEAD is set
2957 struct kfree_rcu_bulk_data
{
2958 unsigned long nr_records
;
2959 void *records
[KFREE_BULK_MAX_ENTR
];
2960 struct kfree_rcu_bulk_data
*next
;
2961 struct rcu_head
*head_free_debug
;
2965 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2966 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2967 * @head_free: List of kfree_rcu() objects waiting for a grace period
2968 * @bhead_free: Bulk-List of kfree_rcu() objects waiting for a grace period
2969 * @krcp: Pointer to @kfree_rcu_cpu structure
2972 struct kfree_rcu_cpu_work
{
2973 struct rcu_work rcu_work
;
2974 struct rcu_head
*head_free
;
2975 struct kfree_rcu_bulk_data
*bhead_free
;
2976 struct kfree_rcu_cpu
*krcp
;
2980 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2981 * @head: List of kfree_rcu() objects not yet waiting for a grace period
2982 * @bhead: Bulk-List of kfree_rcu() objects not yet waiting for a grace period
2983 * @bcached: Keeps at most one object for later reuse when build chain blocks
2984 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2985 * @lock: Synchronize access to this structure
2986 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2987 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
2988 * @initialized: The @lock and @rcu_work fields have been initialized
2990 * This is a per-CPU structure. The reason that it is not included in
2991 * the rcu_data structure is to permit this code to be extracted from
2992 * the RCU files. Such extraction could allow further optimization of
2993 * the interactions with the slab allocators.
2995 struct kfree_rcu_cpu
{
2996 struct rcu_head
*head
;
2997 struct kfree_rcu_bulk_data
*bhead
;
2998 struct kfree_rcu_bulk_data
*bcached
;
2999 struct kfree_rcu_cpu_work krw_arr
[KFREE_N_BATCHES
];
3001 struct delayed_work monitor_work
;
3004 // Number of objects for which GP not started
3008 static DEFINE_PER_CPU(struct kfree_rcu_cpu
, krc
);
3010 static __always_inline
void
3011 debug_rcu_head_unqueue_bulk(struct rcu_head
*head
)
3013 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3014 for (; head
; head
= head
->next
)
3015 debug_rcu_head_unqueue(head
);
3020 * This function is invoked in workqueue context after a grace period.
3021 * It frees all the objects queued on ->bhead_free or ->head_free.
3023 static void kfree_rcu_work(struct work_struct
*work
)
3025 unsigned long flags
;
3026 struct rcu_head
*head
, *next
;
3027 struct kfree_rcu_bulk_data
*bhead
, *bnext
;
3028 struct kfree_rcu_cpu
*krcp
;
3029 struct kfree_rcu_cpu_work
*krwp
;
3031 krwp
= container_of(to_rcu_work(work
),
3032 struct kfree_rcu_cpu_work
, rcu_work
);
3034 spin_lock_irqsave(&krcp
->lock
, flags
);
3035 head
= krwp
->head_free
;
3036 krwp
->head_free
= NULL
;
3037 bhead
= krwp
->bhead_free
;
3038 krwp
->bhead_free
= NULL
;
3039 spin_unlock_irqrestore(&krcp
->lock
, flags
);
3041 /* "bhead" is now private, so traverse locklessly. */
3042 for (; bhead
; bhead
= bnext
) {
3043 bnext
= bhead
->next
;
3045 debug_rcu_head_unqueue_bulk(bhead
->head_free_debug
);
3047 rcu_lock_acquire(&rcu_callback_map
);
3048 trace_rcu_invoke_kfree_bulk_callback(rcu_state
.name
,
3049 bhead
->nr_records
, bhead
->records
);
3051 kfree_bulk(bhead
->nr_records
, bhead
->records
);
3052 rcu_lock_release(&rcu_callback_map
);
3054 if (cmpxchg(&krcp
->bcached
, NULL
, bhead
))
3055 free_page((unsigned long) bhead
);
3057 cond_resched_tasks_rcu_qs();
3061 * Emergency case only. It can happen under low memory
3062 * condition when an allocation gets failed, so the "bulk"
3063 * path can not be temporary maintained.
3065 for (; head
; head
= next
) {
3066 unsigned long offset
= (unsigned long)head
->func
;
3069 debug_rcu_head_unqueue(head
);
3070 rcu_lock_acquire(&rcu_callback_map
);
3071 trace_rcu_invoke_kfree_callback(rcu_state
.name
, head
, offset
);
3073 if (!WARN_ON_ONCE(!__is_kfree_rcu_offset(offset
)))
3074 kfree((void *)head
- offset
);
3076 rcu_lock_release(&rcu_callback_map
);
3077 cond_resched_tasks_rcu_qs();
3082 * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3084 * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3085 * timeout has been reached.
3087 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu
*krcp
)
3089 struct kfree_rcu_cpu_work
*krwp
;
3090 bool queued
= false;
3093 lockdep_assert_held(&krcp
->lock
);
3095 for (i
= 0; i
< KFREE_N_BATCHES
; i
++) {
3096 krwp
= &(krcp
->krw_arr
[i
]);
3099 * Try to detach bhead or head and attach it over any
3100 * available corresponding free channel. It can be that
3101 * a previous RCU batch is in progress, it means that
3102 * immediately to queue another one is not possible so
3103 * return false to tell caller to retry.
3105 if ((krcp
->bhead
&& !krwp
->bhead_free
) ||
3106 (krcp
->head
&& !krwp
->head_free
)) {
3108 if (!krwp
->bhead_free
) {
3109 krwp
->bhead_free
= krcp
->bhead
;
3114 if (!krwp
->head_free
) {
3115 krwp
->head_free
= krcp
->head
;
3119 WRITE_ONCE(krcp
->count
, 0);
3122 * One work is per one batch, so there are two "free channels",
3123 * "bhead_free" and "head_free" the batch can handle. It can be
3124 * that the work is in the pending state when two channels have
3125 * been detached following each other, one by one.
3127 queue_rcu_work(system_wq
, &krwp
->rcu_work
);
3135 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu
*krcp
,
3136 unsigned long flags
)
3138 // Attempt to start a new batch.
3139 krcp
->monitor_todo
= false;
3140 if (queue_kfree_rcu_work(krcp
)) {
3141 // Success! Our job is done here.
3142 spin_unlock_irqrestore(&krcp
->lock
, flags
);
3146 // Previous RCU batch still in progress, try again later.
3147 krcp
->monitor_todo
= true;
3148 schedule_delayed_work(&krcp
->monitor_work
, KFREE_DRAIN_JIFFIES
);
3149 spin_unlock_irqrestore(&krcp
->lock
, flags
);
3153 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3154 * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3156 static void kfree_rcu_monitor(struct work_struct
*work
)
3158 unsigned long flags
;
3159 struct kfree_rcu_cpu
*krcp
= container_of(work
, struct kfree_rcu_cpu
,
3162 spin_lock_irqsave(&krcp
->lock
, flags
);
3163 if (krcp
->monitor_todo
)
3164 kfree_rcu_drain_unlock(krcp
, flags
);
3166 spin_unlock_irqrestore(&krcp
->lock
, flags
);
3170 kfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu
*krcp
,
3171 struct rcu_head
*head
, rcu_callback_t func
)
3173 struct kfree_rcu_bulk_data
*bnode
;
3175 if (unlikely(!krcp
->initialized
))
3178 lockdep_assert_held(&krcp
->lock
);
3180 /* Check if a new block is required. */
3182 krcp
->bhead
->nr_records
== KFREE_BULK_MAX_ENTR
) {
3183 bnode
= xchg(&krcp
->bcached
, NULL
);
3185 WARN_ON_ONCE(sizeof(struct kfree_rcu_bulk_data
) > PAGE_SIZE
);
3187 bnode
= (struct kfree_rcu_bulk_data
*)
3188 __get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
3191 /* Switch to emergency path. */
3192 if (unlikely(!bnode
))
3195 /* Initialize the new block. */
3196 bnode
->nr_records
= 0;
3197 bnode
->next
= krcp
->bhead
;
3198 bnode
->head_free_debug
= NULL
;
3200 /* Attach it to the head. */
3201 krcp
->bhead
= bnode
;
3204 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3206 head
->next
= krcp
->bhead
->head_free_debug
;
3207 krcp
->bhead
->head_free_debug
= head
;
3210 /* Finally insert. */
3211 krcp
->bhead
->records
[krcp
->bhead
->nr_records
++] =
3212 (void *) head
- (unsigned long) func
;
3218 * Queue a request for lazy invocation of kfree_bulk()/kfree() after a grace
3219 * period. Please note there are two paths are maintained, one is the main one
3220 * that uses kfree_bulk() interface and second one is emergency one, that is
3221 * used only when the main path can not be maintained temporary, due to memory
3224 * Each kfree_call_rcu() request is added to a batch. The batch will be drained
3225 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3226 * be free'd in workqueue context. This allows us to: batch requests together to
3227 * reduce the number of grace periods during heavy kfree_rcu() load.
3229 void kfree_call_rcu(struct rcu_head
*head
, rcu_callback_t func
)
3231 unsigned long flags
;
3232 struct kfree_rcu_cpu
*krcp
;
3234 local_irq_save(flags
); // For safely calling this_cpu_ptr().
3235 krcp
= this_cpu_ptr(&krc
);
3236 if (krcp
->initialized
)
3237 spin_lock(&krcp
->lock
);
3239 // Queue the object but don't yet schedule the batch.
3240 if (debug_rcu_head_queue(head
)) {
3241 // Probable double kfree_rcu(), just leak.
3242 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3248 * Under high memory pressure GFP_NOWAIT can fail,
3249 * in that case the emergency path is maintained.
3251 if (unlikely(!kfree_call_rcu_add_ptr_to_bulk(krcp
, head
, func
))) {
3253 head
->next
= krcp
->head
;
3257 WRITE_ONCE(krcp
->count
, krcp
->count
+ 1);
3259 // Set timer to drain after KFREE_DRAIN_JIFFIES.
3260 if (rcu_scheduler_active
== RCU_SCHEDULER_RUNNING
&&
3261 !krcp
->monitor_todo
) {
3262 krcp
->monitor_todo
= true;
3263 schedule_delayed_work(&krcp
->monitor_work
, KFREE_DRAIN_JIFFIES
);
3267 if (krcp
->initialized
)
3268 spin_unlock(&krcp
->lock
);
3269 local_irq_restore(flags
);
3271 EXPORT_SYMBOL_GPL(kfree_call_rcu
);
3273 static unsigned long
3274 kfree_rcu_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
3277 unsigned long count
= 0;
3279 /* Snapshot count of all CPUs */
3280 for_each_online_cpu(cpu
) {
3281 struct kfree_rcu_cpu
*krcp
= per_cpu_ptr(&krc
, cpu
);
3283 count
+= READ_ONCE(krcp
->count
);
3289 static unsigned long
3290 kfree_rcu_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
3293 unsigned long flags
;
3295 for_each_online_cpu(cpu
) {
3297 struct kfree_rcu_cpu
*krcp
= per_cpu_ptr(&krc
, cpu
);
3299 count
= krcp
->count
;
3300 spin_lock_irqsave(&krcp
->lock
, flags
);
3301 if (krcp
->monitor_todo
)
3302 kfree_rcu_drain_unlock(krcp
, flags
);
3304 spin_unlock_irqrestore(&krcp
->lock
, flags
);
3306 sc
->nr_to_scan
-= count
;
3309 if (sc
->nr_to_scan
<= 0)
3316 static struct shrinker kfree_rcu_shrinker
= {
3317 .count_objects
= kfree_rcu_shrink_count
,
3318 .scan_objects
= kfree_rcu_shrink_scan
,
3320 .seeks
= DEFAULT_SEEKS
,
3323 void __init
kfree_rcu_scheduler_running(void)
3326 unsigned long flags
;
3328 for_each_online_cpu(cpu
) {
3329 struct kfree_rcu_cpu
*krcp
= per_cpu_ptr(&krc
, cpu
);
3331 spin_lock_irqsave(&krcp
->lock
, flags
);
3332 if (!krcp
->head
|| krcp
->monitor_todo
) {
3333 spin_unlock_irqrestore(&krcp
->lock
, flags
);
3336 krcp
->monitor_todo
= true;
3337 schedule_delayed_work_on(cpu
, &krcp
->monitor_work
,
3338 KFREE_DRAIN_JIFFIES
);
3339 spin_unlock_irqrestore(&krcp
->lock
, flags
);
3344 * During early boot, any blocking grace-period wait automatically
3345 * implies a grace period. Later on, this is never the case for PREEMPTION.
3347 * Howevr, because a context switch is a grace period for !PREEMPTION, any
3348 * blocking grace-period wait automatically implies a grace period if
3349 * there is only one CPU online at any point time during execution of
3350 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
3351 * occasionally incorrectly indicate that there are multiple CPUs online
3352 * when there was in fact only one the whole time, as this just adds some
3353 * overhead: RCU still operates correctly.
3355 static int rcu_blocking_is_gp(void)
3359 if (IS_ENABLED(CONFIG_PREEMPTION
))
3360 return rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
;
3361 might_sleep(); /* Check for RCU read-side critical section. */
3363 ret
= num_online_cpus() <= 1;
3369 * synchronize_rcu - wait until a grace period has elapsed.
3371 * Control will return to the caller some time after a full grace
3372 * period has elapsed, in other words after all currently executing RCU
3373 * read-side critical sections have completed. Note, however, that
3374 * upon return from synchronize_rcu(), the caller might well be executing
3375 * concurrently with new RCU read-side critical sections that began while
3376 * synchronize_rcu() was waiting. RCU read-side critical sections are
3377 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3378 * In addition, regions of code across which interrupts, preemption, or
3379 * softirqs have been disabled also serve as RCU read-side critical
3380 * sections. This includes hardware interrupt handlers, softirq handlers,
3383 * Note that this guarantee implies further memory-ordering guarantees.
3384 * On systems with more than one CPU, when synchronize_rcu() returns,
3385 * each CPU is guaranteed to have executed a full memory barrier since
3386 * the end of its last RCU read-side critical section whose beginning
3387 * preceded the call to synchronize_rcu(). In addition, each CPU having
3388 * an RCU read-side critical section that extends beyond the return from
3389 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3390 * after the beginning of synchronize_rcu() and before the beginning of
3391 * that RCU read-side critical section. Note that these guarantees include
3392 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3393 * that are executing in the kernel.
3395 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3396 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3397 * to have executed a full memory barrier during the execution of
3398 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3399 * again only if the system has more than one CPU).
3401 void synchronize_rcu(void)
3403 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map
) ||
3404 lock_is_held(&rcu_lock_map
) ||
3405 lock_is_held(&rcu_sched_lock_map
),
3406 "Illegal synchronize_rcu() in RCU read-side critical section");
3407 if (rcu_blocking_is_gp())
3409 if (rcu_gp_is_expedited())
3410 synchronize_rcu_expedited();
3412 wait_rcu_gp(call_rcu
);
3414 EXPORT_SYMBOL_GPL(synchronize_rcu
);
3417 * get_state_synchronize_rcu - Snapshot current RCU state
3419 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3420 * to determine whether or not a full grace period has elapsed in the
3423 unsigned long get_state_synchronize_rcu(void)
3426 * Any prior manipulation of RCU-protected data must happen
3427 * before the load from ->gp_seq.
3430 return rcu_seq_snap(&rcu_state
.gp_seq
);
3432 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu
);
3435 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3437 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3439 * If a full RCU grace period has elapsed since the earlier call to
3440 * get_state_synchronize_rcu(), just return. Otherwise, invoke
3441 * synchronize_rcu() to wait for a full grace period.
3443 * Yes, this function does not take counter wrap into account. But
3444 * counter wrap is harmless. If the counter wraps, we have waited for
3445 * more than 2 billion grace periods (and way more on a 64-bit system!),
3446 * so waiting for one additional grace period should be just fine.
3448 void cond_synchronize_rcu(unsigned long oldstate
)
3450 if (!rcu_seq_done(&rcu_state
.gp_seq
, oldstate
))
3453 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3455 EXPORT_SYMBOL_GPL(cond_synchronize_rcu
);
3458 * Check to see if there is any immediate RCU-related work to be done by
3459 * the current CPU, returning 1 if so and zero otherwise. The checks are
3460 * in order of increasing expense: checks that can be carried out against
3461 * CPU-local state are performed first. However, we must check for CPU
3462 * stalls first, else we might not get a chance.
3464 static int rcu_pending(int user
)
3466 bool gp_in_progress
;
3467 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
3468 struct rcu_node
*rnp
= rdp
->mynode
;
3470 /* Check for CPU stalls, if enabled. */
3471 check_cpu_stall(rdp
);
3473 /* Does this CPU need a deferred NOCB wakeup? */
3474 if (rcu_nocb_need_deferred_wakeup(rdp
))
3477 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3478 if ((user
|| rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3481 /* Is the RCU core waiting for a quiescent state from this CPU? */
3482 gp_in_progress
= rcu_gp_in_progress();
3483 if (rdp
->core_needs_qs
&& !rdp
->cpu_no_qs
.b
.norm
&& gp_in_progress
)
3486 /* Does this CPU have callbacks ready to invoke? */
3487 if (rcu_segcblist_ready_cbs(&rdp
->cblist
))
3490 /* Has RCU gone idle with this CPU needing another grace period? */
3491 if (!gp_in_progress
&& rcu_segcblist_is_enabled(&rdp
->cblist
) &&
3492 (!IS_ENABLED(CONFIG_RCU_NOCB_CPU
) ||
3493 !rcu_segcblist_is_offloaded(&rdp
->cblist
)) &&
3494 !rcu_segcblist_restempty(&rdp
->cblist
, RCU_NEXT_READY_TAIL
))
3497 /* Have RCU grace period completed or started? */
3498 if (rcu_seq_current(&rnp
->gp_seq
) != rdp
->gp_seq
||
3499 unlikely(READ_ONCE(rdp
->gpwrap
))) /* outside lock */
3507 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3508 * the compiler is expected to optimize this away.
3510 static void rcu_barrier_trace(const char *s
, int cpu
, unsigned long done
)
3512 trace_rcu_barrier(rcu_state
.name
, s
, cpu
,
3513 atomic_read(&rcu_state
.barrier_cpu_count
), done
);
3517 * RCU callback function for rcu_barrier(). If we are last, wake
3518 * up the task executing rcu_barrier().
3520 * Note that the value of rcu_state.barrier_sequence must be captured
3521 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3522 * other CPUs might count the value down to zero before this CPU gets
3523 * around to invoking rcu_barrier_trace(), which might result in bogus
3524 * data from the next instance of rcu_barrier().
3526 static void rcu_barrier_callback(struct rcu_head
*rhp
)
3528 unsigned long __maybe_unused s
= rcu_state
.barrier_sequence
;
3530 if (atomic_dec_and_test(&rcu_state
.barrier_cpu_count
)) {
3531 rcu_barrier_trace(TPS("LastCB"), -1, s
);
3532 complete(&rcu_state
.barrier_completion
);
3534 rcu_barrier_trace(TPS("CB"), -1, s
);
3539 * Called with preemption disabled, and from cross-cpu IRQ context.
3541 static void rcu_barrier_func(void *cpu_in
)
3543 uintptr_t cpu
= (uintptr_t)cpu_in
;
3544 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3546 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state
.barrier_sequence
);
3547 rdp
->barrier_head
.func
= rcu_barrier_callback
;
3548 debug_rcu_head_queue(&rdp
->barrier_head
);
3550 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp
, NULL
, jiffies
));
3551 if (rcu_segcblist_entrain(&rdp
->cblist
, &rdp
->barrier_head
)) {
3552 atomic_inc(&rcu_state
.barrier_cpu_count
);
3554 debug_rcu_head_unqueue(&rdp
->barrier_head
);
3555 rcu_barrier_trace(TPS("IRQNQ"), -1,
3556 rcu_state
.barrier_sequence
);
3558 rcu_nocb_unlock(rdp
);
3562 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3564 * Note that this primitive does not necessarily wait for an RCU grace period
3565 * to complete. For example, if there are no RCU callbacks queued anywhere
3566 * in the system, then rcu_barrier() is within its rights to return
3567 * immediately, without waiting for anything, much less an RCU grace period.
3569 void rcu_barrier(void)
3572 struct rcu_data
*rdp
;
3573 unsigned long s
= rcu_seq_snap(&rcu_state
.barrier_sequence
);
3575 rcu_barrier_trace(TPS("Begin"), -1, s
);
3577 /* Take mutex to serialize concurrent rcu_barrier() requests. */
3578 mutex_lock(&rcu_state
.barrier_mutex
);
3580 /* Did someone else do our work for us? */
3581 if (rcu_seq_done(&rcu_state
.barrier_sequence
, s
)) {
3582 rcu_barrier_trace(TPS("EarlyExit"), -1,
3583 rcu_state
.barrier_sequence
);
3584 smp_mb(); /* caller's subsequent code after above check. */
3585 mutex_unlock(&rcu_state
.barrier_mutex
);
3589 /* Mark the start of the barrier operation. */
3590 rcu_seq_start(&rcu_state
.barrier_sequence
);
3591 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state
.barrier_sequence
);
3594 * Initialize the count to two rather than to zero in order
3595 * to avoid a too-soon return to zero in case of an immediate
3596 * invocation of the just-enqueued callback (or preemption of
3597 * this task). Exclude CPU-hotplug operations to ensure that no
3598 * offline non-offloaded CPU has callbacks queued.
3600 init_completion(&rcu_state
.barrier_completion
);
3601 atomic_set(&rcu_state
.barrier_cpu_count
, 2);
3605 * Force each CPU with callbacks to register a new callback.
3606 * When that callback is invoked, we will know that all of the
3607 * corresponding CPU's preceding callbacks have been invoked.
3609 for_each_possible_cpu(cpu
) {
3610 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3611 if (cpu_is_offline(cpu
) &&
3612 !rcu_segcblist_is_offloaded(&rdp
->cblist
))
3614 if (rcu_segcblist_n_cbs(&rdp
->cblist
) && cpu_online(cpu
)) {
3615 rcu_barrier_trace(TPS("OnlineQ"), cpu
,
3616 rcu_state
.barrier_sequence
);
3617 smp_call_function_single(cpu
, rcu_barrier_func
, (void *)cpu
, 1);
3618 } else if (rcu_segcblist_n_cbs(&rdp
->cblist
) &&
3619 cpu_is_offline(cpu
)) {
3620 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu
,
3621 rcu_state
.barrier_sequence
);
3622 local_irq_disable();
3623 rcu_barrier_func((void *)cpu
);
3625 } else if (cpu_is_offline(cpu
)) {
3626 rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu
,
3627 rcu_state
.barrier_sequence
);
3629 rcu_barrier_trace(TPS("OnlineNQ"), cpu
,
3630 rcu_state
.barrier_sequence
);
3636 * Now that we have an rcu_barrier_callback() callback on each
3637 * CPU, and thus each counted, remove the initial count.
3639 if (atomic_sub_and_test(2, &rcu_state
.barrier_cpu_count
))
3640 complete(&rcu_state
.barrier_completion
);
3642 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3643 wait_for_completion(&rcu_state
.barrier_completion
);
3645 /* Mark the end of the barrier operation. */
3646 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state
.barrier_sequence
);
3647 rcu_seq_end(&rcu_state
.barrier_sequence
);
3649 /* Other rcu_barrier() invocations can now safely proceed. */
3650 mutex_unlock(&rcu_state
.barrier_mutex
);
3652 EXPORT_SYMBOL_GPL(rcu_barrier
);
3655 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3656 * first CPU in a given leaf rcu_node structure coming online. The caller
3657 * must hold the corresponding leaf rcu_node ->lock with interrrupts
3660 static void rcu_init_new_rnp(struct rcu_node
*rnp_leaf
)
3664 struct rcu_node
*rnp
= rnp_leaf
;
3666 raw_lockdep_assert_held_rcu_node(rnp_leaf
);
3667 WARN_ON_ONCE(rnp
->wait_blkd_tasks
);
3669 mask
= rnp
->grpmask
;
3673 raw_spin_lock_rcu_node(rnp
); /* Interrupts already disabled. */
3674 oldmask
= rnp
->qsmaskinit
;
3675 rnp
->qsmaskinit
|= mask
;
3676 raw_spin_unlock_rcu_node(rnp
); /* Interrupts remain disabled. */
3683 * Do boot-time initialization of a CPU's per-CPU RCU data.
3686 rcu_boot_init_percpu_data(int cpu
)
3688 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3690 /* Set up local state, ensuring consistent view of global state. */
3691 rdp
->grpmask
= leaf_node_cpu_bit(rdp
->mynode
, cpu
);
3692 WARN_ON_ONCE(rdp
->dynticks_nesting
!= 1);
3693 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp
)));
3694 rdp
->rcu_ofl_gp_seq
= rcu_state
.gp_seq
;
3695 rdp
->rcu_ofl_gp_flags
= RCU_GP_CLEANED
;
3696 rdp
->rcu_onl_gp_seq
= rcu_state
.gp_seq
;
3697 rdp
->rcu_onl_gp_flags
= RCU_GP_CLEANED
;
3699 rcu_boot_init_nocb_percpu_data(rdp
);
3703 * Invoked early in the CPU-online process, when pretty much all services
3704 * are available. The incoming CPU is not present.
3706 * Initializes a CPU's per-CPU RCU data. Note that only one online or
3707 * offline event can be happening at a given time. Note also that we can
3708 * accept some slop in the rsp->gp_seq access due to the fact that this
3709 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3710 * And any offloaded callbacks are being numbered elsewhere.
3712 int rcutree_prepare_cpu(unsigned int cpu
)
3714 unsigned long flags
;
3715 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3716 struct rcu_node
*rnp
= rcu_get_root();
3718 /* Set up local state, ensuring consistent view of global state. */
3719 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
3720 rdp
->qlen_last_fqs_check
= 0;
3721 rdp
->n_force_qs_snap
= rcu_state
.n_force_qs
;
3722 rdp
->blimit
= blimit
;
3723 if (rcu_segcblist_empty(&rdp
->cblist
) && /* No early-boot CBs? */
3724 !rcu_segcblist_is_offloaded(&rdp
->cblist
))
3725 rcu_segcblist_init(&rdp
->cblist
); /* Re-enable callbacks. */
3726 rdp
->dynticks_nesting
= 1; /* CPU not up, no tearing. */
3727 rcu_dynticks_eqs_online();
3728 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled. */
3731 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
3732 * propagation up the rcu_node tree will happen at the beginning
3733 * of the next grace period.
3736 raw_spin_lock_rcu_node(rnp
); /* irqs already disabled. */
3737 rdp
->beenonline
= true; /* We have now been online. */
3738 rdp
->gp_seq
= READ_ONCE(rnp
->gp_seq
);
3739 rdp
->gp_seq_needed
= rdp
->gp_seq
;
3740 rdp
->cpu_no_qs
.b
.norm
= true;
3741 rdp
->core_needs_qs
= false;
3742 rdp
->rcu_iw_pending
= false;
3743 rdp
->rcu_iw_gp_seq
= rdp
->gp_seq
- 1;
3744 trace_rcu_grace_period(rcu_state
.name
, rdp
->gp_seq
, TPS("cpuonl"));
3745 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
3746 rcu_prepare_kthreads(cpu
);
3747 rcu_spawn_cpu_nocb_kthread(cpu
);
3753 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
3755 static void rcutree_affinity_setting(unsigned int cpu
, int outgoing
)
3757 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3759 rcu_boost_kthread_setaffinity(rdp
->mynode
, outgoing
);
3763 * Near the end of the CPU-online process. Pretty much all services
3764 * enabled, and the CPU is now very much alive.
3766 int rcutree_online_cpu(unsigned int cpu
)
3768 unsigned long flags
;
3769 struct rcu_data
*rdp
;
3770 struct rcu_node
*rnp
;
3772 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3774 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
3775 rnp
->ffmask
|= rdp
->grpmask
;
3776 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
3777 if (rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
)
3778 return 0; /* Too early in boot for scheduler work. */
3779 sync_sched_exp_online_cleanup(cpu
);
3780 rcutree_affinity_setting(cpu
, -1);
3782 // Stop-machine done, so allow nohz_full to disable tick.
3783 tick_dep_clear(TICK_DEP_BIT_RCU
);
3788 * Near the beginning of the process. The CPU is still very much alive
3789 * with pretty much all services enabled.
3791 int rcutree_offline_cpu(unsigned int cpu
)
3793 unsigned long flags
;
3794 struct rcu_data
*rdp
;
3795 struct rcu_node
*rnp
;
3797 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3799 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
3800 rnp
->ffmask
&= ~rdp
->grpmask
;
3801 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
3803 rcutree_affinity_setting(cpu
, cpu
);
3805 // nohz_full CPUs need the tick for stop-machine to work quickly
3806 tick_dep_set(TICK_DEP_BIT_RCU
);
3810 static DEFINE_PER_CPU(int, rcu_cpu_started
);
3813 * Mark the specified CPU as being online so that subsequent grace periods
3814 * (both expedited and normal) will wait on it. Note that this means that
3815 * incoming CPUs are not allowed to use RCU read-side critical sections
3816 * until this function is called. Failing to observe this restriction
3817 * will result in lockdep splats.
3819 * Note that this function is special in that it is invoked directly
3820 * from the incoming CPU rather than from the cpuhp_step mechanism.
3821 * This is because this function must be invoked at a precise location.
3823 void rcu_cpu_starting(unsigned int cpu
)
3825 unsigned long flags
;
3828 unsigned long oldmask
;
3829 struct rcu_data
*rdp
;
3830 struct rcu_node
*rnp
;
3832 if (per_cpu(rcu_cpu_started
, cpu
))
3835 per_cpu(rcu_cpu_started
, cpu
) = 1;
3837 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3839 mask
= rdp
->grpmask
;
3840 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
3841 WRITE_ONCE(rnp
->qsmaskinitnext
, rnp
->qsmaskinitnext
| mask
);
3842 oldmask
= rnp
->expmaskinitnext
;
3843 rnp
->expmaskinitnext
|= mask
;
3844 oldmask
^= rnp
->expmaskinitnext
;
3845 nbits
= bitmap_weight(&oldmask
, BITS_PER_LONG
);
3846 /* Allow lockless access for expedited grace periods. */
3847 smp_store_release(&rcu_state
.ncpus
, rcu_state
.ncpus
+ nbits
); /* ^^^ */
3848 ASSERT_EXCLUSIVE_WRITER(rcu_state
.ncpus
);
3849 rcu_gpnum_ovf(rnp
, rdp
); /* Offline-induced counter wrap? */
3850 rdp
->rcu_onl_gp_seq
= READ_ONCE(rcu_state
.gp_seq
);
3851 rdp
->rcu_onl_gp_flags
= READ_ONCE(rcu_state
.gp_flags
);
3852 if (rnp
->qsmask
& mask
) { /* RCU waiting on incoming CPU? */
3853 rcu_disable_urgency_upon_qs(rdp
);
3854 /* Report QS -after- changing ->qsmaskinitnext! */
3855 rcu_report_qs_rnp(mask
, rnp
, rnp
->gp_seq
, flags
);
3857 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
3859 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
3862 #ifdef CONFIG_HOTPLUG_CPU
3864 * The outgoing function has no further need of RCU, so remove it from
3865 * the rcu_node tree's ->qsmaskinitnext bit masks.
3867 * Note that this function is special in that it is invoked directly
3868 * from the outgoing CPU rather than from the cpuhp_step mechanism.
3869 * This is because this function must be invoked at a precise location.
3871 void rcu_report_dead(unsigned int cpu
)
3873 unsigned long flags
;
3875 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3876 struct rcu_node
*rnp
= rdp
->mynode
; /* Outgoing CPU's rdp & rnp. */
3878 /* QS for any half-done expedited grace period. */
3880 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data
));
3882 rcu_preempt_deferred_qs(current
);
3884 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
3885 mask
= rdp
->grpmask
;
3886 raw_spin_lock(&rcu_state
.ofl_lock
);
3887 raw_spin_lock_irqsave_rcu_node(rnp
, flags
); /* Enforce GP memory-order guarantee. */
3888 rdp
->rcu_ofl_gp_seq
= READ_ONCE(rcu_state
.gp_seq
);
3889 rdp
->rcu_ofl_gp_flags
= READ_ONCE(rcu_state
.gp_flags
);
3890 if (rnp
->qsmask
& mask
) { /* RCU waiting on outgoing CPU? */
3891 /* Report quiescent state -before- changing ->qsmaskinitnext! */
3892 rcu_report_qs_rnp(mask
, rnp
, rnp
->gp_seq
, flags
);
3893 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
3895 WRITE_ONCE(rnp
->qsmaskinitnext
, rnp
->qsmaskinitnext
& ~mask
);
3896 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
3897 raw_spin_unlock(&rcu_state
.ofl_lock
);
3899 per_cpu(rcu_cpu_started
, cpu
) = 0;
3903 * The outgoing CPU has just passed through the dying-idle state, and we
3904 * are being invoked from the CPU that was IPIed to continue the offline
3905 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
3907 void rcutree_migrate_callbacks(int cpu
)
3909 unsigned long flags
;
3910 struct rcu_data
*my_rdp
;
3911 struct rcu_node
*my_rnp
;
3912 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
3915 if (rcu_segcblist_is_offloaded(&rdp
->cblist
) ||
3916 rcu_segcblist_empty(&rdp
->cblist
))
3917 return; /* No callbacks to migrate. */
3919 local_irq_save(flags
);
3920 my_rdp
= this_cpu_ptr(&rcu_data
);
3921 my_rnp
= my_rdp
->mynode
;
3922 rcu_nocb_lock(my_rdp
); /* irqs already disabled. */
3923 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp
, NULL
, jiffies
));
3924 raw_spin_lock_rcu_node(my_rnp
); /* irqs already disabled. */
3925 /* Leverage recent GPs and set GP for new callbacks. */
3926 needwake
= rcu_advance_cbs(my_rnp
, rdp
) ||
3927 rcu_advance_cbs(my_rnp
, my_rdp
);
3928 rcu_segcblist_merge(&my_rdp
->cblist
, &rdp
->cblist
);
3929 needwake
= needwake
|| rcu_advance_cbs(my_rnp
, my_rdp
);
3930 rcu_segcblist_disable(&rdp
->cblist
);
3931 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp
->cblist
) !=
3932 !rcu_segcblist_n_cbs(&my_rdp
->cblist
));
3933 if (rcu_segcblist_is_offloaded(&my_rdp
->cblist
)) {
3934 raw_spin_unlock_rcu_node(my_rnp
); /* irqs remain disabled. */
3935 __call_rcu_nocb_wake(my_rdp
, true, flags
);
3937 rcu_nocb_unlock(my_rdp
); /* irqs remain disabled. */
3938 raw_spin_unlock_irqrestore_rcu_node(my_rnp
, flags
);
3941 rcu_gp_kthread_wake();
3942 lockdep_assert_irqs_enabled();
3943 WARN_ONCE(rcu_segcblist_n_cbs(&rdp
->cblist
) != 0 ||
3944 !rcu_segcblist_empty(&rdp
->cblist
),
3945 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
3946 cpu
, rcu_segcblist_n_cbs(&rdp
->cblist
),
3947 rcu_segcblist_first_cb(&rdp
->cblist
));
3952 * On non-huge systems, use expedited RCU grace periods to make suspend
3953 * and hibernation run faster.
3955 static int rcu_pm_notify(struct notifier_block
*self
,
3956 unsigned long action
, void *hcpu
)
3959 case PM_HIBERNATION_PREPARE
:
3960 case PM_SUSPEND_PREPARE
:
3963 case PM_POST_HIBERNATION
:
3964 case PM_POST_SUSPEND
:
3965 rcu_unexpedite_gp();
3974 * Spawn the kthreads that handle RCU's grace periods.
3976 static int __init
rcu_spawn_gp_kthread(void)
3978 unsigned long flags
;
3979 int kthread_prio_in
= kthread_prio
;
3980 struct rcu_node
*rnp
;
3981 struct sched_param sp
;
3982 struct task_struct
*t
;
3984 /* Force priority into range. */
3985 if (IS_ENABLED(CONFIG_RCU_BOOST
) && kthread_prio
< 2
3986 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
))
3988 else if (IS_ENABLED(CONFIG_RCU_BOOST
) && kthread_prio
< 1)
3990 else if (kthread_prio
< 0)
3992 else if (kthread_prio
> 99)
3995 if (kthread_prio
!= kthread_prio_in
)
3996 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3997 kthread_prio
, kthread_prio_in
);
3999 rcu_scheduler_fully_active
= 1;
4000 t
= kthread_create(rcu_gp_kthread
, NULL
, "%s", rcu_state
.name
);
4001 if (WARN_ONCE(IS_ERR(t
), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__
))
4004 sp
.sched_priority
= kthread_prio
;
4005 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
4007 rnp
= rcu_get_root();
4008 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
4009 WRITE_ONCE(rcu_state
.gp_activity
, jiffies
);
4010 WRITE_ONCE(rcu_state
.gp_req_activity
, jiffies
);
4011 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4012 smp_store_release(&rcu_state
.gp_kthread
, t
); /* ^^^ */
4013 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
4015 rcu_spawn_nocb_kthreads();
4016 rcu_spawn_boost_kthreads();
4019 early_initcall(rcu_spawn_gp_kthread
);
4022 * This function is invoked towards the end of the scheduler's
4023 * initialization process. Before this is called, the idle task might
4024 * contain synchronous grace-period primitives (during which time, this idle
4025 * task is booting the system, and such primitives are no-ops). After this
4026 * function is called, any synchronous grace-period primitives are run as
4027 * expedited, with the requesting task driving the grace period forward.
4028 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4029 * runtime RCU functionality.
4031 void rcu_scheduler_starting(void)
4033 WARN_ON(num_online_cpus() != 1);
4034 WARN_ON(nr_context_switches() > 0);
4035 rcu_test_sync_prims();
4036 rcu_scheduler_active
= RCU_SCHEDULER_INIT
;
4037 rcu_test_sync_prims();
4041 * Helper function for rcu_init() that initializes the rcu_state structure.
4043 static void __init
rcu_init_one(void)
4045 static const char * const buf
[] = RCU_NODE_NAME_INIT
;
4046 static const char * const fqs
[] = RCU_FQS_NAME_INIT
;
4047 static struct lock_class_key rcu_node_class
[RCU_NUM_LVLS
];
4048 static struct lock_class_key rcu_fqs_class
[RCU_NUM_LVLS
];
4050 int levelspread
[RCU_NUM_LVLS
]; /* kids/node in each level. */
4054 struct rcu_node
*rnp
;
4056 BUILD_BUG_ON(RCU_NUM_LVLS
> ARRAY_SIZE(buf
)); /* Fix buf[] init! */
4058 /* Silence gcc 4.8 false positive about array index out of range. */
4059 if (rcu_num_lvls
<= 0 || rcu_num_lvls
> RCU_NUM_LVLS
)
4060 panic("rcu_init_one: rcu_num_lvls out of range");
4062 /* Initialize the level-tracking arrays. */
4064 for (i
= 1; i
< rcu_num_lvls
; i
++)
4065 rcu_state
.level
[i
] =
4066 rcu_state
.level
[i
- 1] + num_rcu_lvl
[i
- 1];
4067 rcu_init_levelspread(levelspread
, num_rcu_lvl
);
4069 /* Initialize the elements themselves, starting from the leaves. */
4071 for (i
= rcu_num_lvls
- 1; i
>= 0; i
--) {
4072 cpustride
*= levelspread
[i
];
4073 rnp
= rcu_state
.level
[i
];
4074 for (j
= 0; j
< num_rcu_lvl
[i
]; j
++, rnp
++) {
4075 raw_spin_lock_init(&ACCESS_PRIVATE(rnp
, lock
));
4076 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp
, lock
),
4077 &rcu_node_class
[i
], buf
[i
]);
4078 raw_spin_lock_init(&rnp
->fqslock
);
4079 lockdep_set_class_and_name(&rnp
->fqslock
,
4080 &rcu_fqs_class
[i
], fqs
[i
]);
4081 rnp
->gp_seq
= rcu_state
.gp_seq
;
4082 rnp
->gp_seq_needed
= rcu_state
.gp_seq
;
4083 rnp
->completedqs
= rcu_state
.gp_seq
;
4085 rnp
->qsmaskinit
= 0;
4086 rnp
->grplo
= j
* cpustride
;
4087 rnp
->grphi
= (j
+ 1) * cpustride
- 1;
4088 if (rnp
->grphi
>= nr_cpu_ids
)
4089 rnp
->grphi
= nr_cpu_ids
- 1;
4095 rnp
->grpnum
= j
% levelspread
[i
- 1];
4096 rnp
->grpmask
= BIT(rnp
->grpnum
);
4097 rnp
->parent
= rcu_state
.level
[i
- 1] +
4098 j
/ levelspread
[i
- 1];
4101 INIT_LIST_HEAD(&rnp
->blkd_tasks
);
4102 rcu_init_one_nocb(rnp
);
4103 init_waitqueue_head(&rnp
->exp_wq
[0]);
4104 init_waitqueue_head(&rnp
->exp_wq
[1]);
4105 init_waitqueue_head(&rnp
->exp_wq
[2]);
4106 init_waitqueue_head(&rnp
->exp_wq
[3]);
4107 spin_lock_init(&rnp
->exp_lock
);
4111 init_swait_queue_head(&rcu_state
.gp_wq
);
4112 init_swait_queue_head(&rcu_state
.expedited_wq
);
4113 rnp
= rcu_first_leaf_node();
4114 for_each_possible_cpu(i
) {
4115 while (i
> rnp
->grphi
)
4117 per_cpu_ptr(&rcu_data
, i
)->mynode
= rnp
;
4118 rcu_boot_init_percpu_data(i
);
4123 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4124 * replace the definitions in tree.h because those are needed to size
4125 * the ->node array in the rcu_state structure.
4127 static void __init
rcu_init_geometry(void)
4131 int rcu_capacity
[RCU_NUM_LVLS
];
4134 * Initialize any unspecified boot parameters.
4135 * The default values of jiffies_till_first_fqs and
4136 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4137 * value, which is a function of HZ, then adding one for each
4138 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4140 d
= RCU_JIFFIES_TILL_FORCE_QS
+ nr_cpu_ids
/ RCU_JIFFIES_FQS_DIV
;
4141 if (jiffies_till_first_fqs
== ULONG_MAX
)
4142 jiffies_till_first_fqs
= d
;
4143 if (jiffies_till_next_fqs
== ULONG_MAX
)
4144 jiffies_till_next_fqs
= d
;
4145 adjust_jiffies_till_sched_qs();
4147 /* If the compile-time values are accurate, just leave. */
4148 if (rcu_fanout_leaf
== RCU_FANOUT_LEAF
&&
4149 nr_cpu_ids
== NR_CPUS
)
4151 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4152 rcu_fanout_leaf
, nr_cpu_ids
);
4155 * The boot-time rcu_fanout_leaf parameter must be at least two
4156 * and cannot exceed the number of bits in the rcu_node masks.
4157 * Complain and fall back to the compile-time values if this
4158 * limit is exceeded.
4160 if (rcu_fanout_leaf
< 2 ||
4161 rcu_fanout_leaf
> sizeof(unsigned long) * 8) {
4162 rcu_fanout_leaf
= RCU_FANOUT_LEAF
;
4168 * Compute number of nodes that can be handled an rcu_node tree
4169 * with the given number of levels.
4171 rcu_capacity
[0] = rcu_fanout_leaf
;
4172 for (i
= 1; i
< RCU_NUM_LVLS
; i
++)
4173 rcu_capacity
[i
] = rcu_capacity
[i
- 1] * RCU_FANOUT
;
4176 * The tree must be able to accommodate the configured number of CPUs.
4177 * If this limit is exceeded, fall back to the compile-time values.
4179 if (nr_cpu_ids
> rcu_capacity
[RCU_NUM_LVLS
- 1]) {
4180 rcu_fanout_leaf
= RCU_FANOUT_LEAF
;
4185 /* Calculate the number of levels in the tree. */
4186 for (i
= 0; nr_cpu_ids
> rcu_capacity
[i
]; i
++) {
4188 rcu_num_lvls
= i
+ 1;
4190 /* Calculate the number of rcu_nodes at each level of the tree. */
4191 for (i
= 0; i
< rcu_num_lvls
; i
++) {
4192 int cap
= rcu_capacity
[(rcu_num_lvls
- 1) - i
];
4193 num_rcu_lvl
[i
] = DIV_ROUND_UP(nr_cpu_ids
, cap
);
4196 /* Calculate the total number of rcu_node structures. */
4198 for (i
= 0; i
< rcu_num_lvls
; i
++)
4199 rcu_num_nodes
+= num_rcu_lvl
[i
];
4203 * Dump out the structure of the rcu_node combining tree associated
4204 * with the rcu_state structure.
4206 static void __init
rcu_dump_rcu_node_tree(void)
4209 struct rcu_node
*rnp
;
4211 pr_info("rcu_node tree layout dump\n");
4213 rcu_for_each_node_breadth_first(rnp
) {
4214 if (rnp
->level
!= level
) {
4219 pr_cont("%d:%d ^%d ", rnp
->grplo
, rnp
->grphi
, rnp
->grpnum
);
4224 struct workqueue_struct
*rcu_gp_wq
;
4225 struct workqueue_struct
*rcu_par_gp_wq
;
4227 static void __init
kfree_rcu_batch_init(void)
4232 for_each_possible_cpu(cpu
) {
4233 struct kfree_rcu_cpu
*krcp
= per_cpu_ptr(&krc
, cpu
);
4235 spin_lock_init(&krcp
->lock
);
4236 for (i
= 0; i
< KFREE_N_BATCHES
; i
++) {
4237 INIT_RCU_WORK(&krcp
->krw_arr
[i
].rcu_work
, kfree_rcu_work
);
4238 krcp
->krw_arr
[i
].krcp
= krcp
;
4241 INIT_DELAYED_WORK(&krcp
->monitor_work
, kfree_rcu_monitor
);
4242 krcp
->initialized
= true;
4244 if (register_shrinker(&kfree_rcu_shrinker
))
4245 pr_err("Failed to register kfree_rcu() shrinker!\n");
4248 void __init
rcu_init(void)
4252 rcu_early_boot_tests();
4254 kfree_rcu_batch_init();
4255 rcu_bootup_announce();
4256 rcu_init_geometry();
4259 rcu_dump_rcu_node_tree();
4261 open_softirq(RCU_SOFTIRQ
, rcu_core_si
);
4264 * We don't need protection against CPU-hotplug here because
4265 * this is called early in boot, before either interrupts
4266 * or the scheduler are operational.
4268 pm_notifier(rcu_pm_notify
, 0);
4269 for_each_online_cpu(cpu
) {
4270 rcutree_prepare_cpu(cpu
);
4271 rcu_cpu_starting(cpu
);
4272 rcutree_online_cpu(cpu
);
4275 /* Create workqueue for expedited GPs and for Tree SRCU. */
4276 rcu_gp_wq
= alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM
, 0);
4277 WARN_ON(!rcu_gp_wq
);
4278 rcu_par_gp_wq
= alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM
, 0);
4279 WARN_ON(!rcu_par_gp_wq
);
4282 /* Fill in default value for rcutree.qovld boot parameter. */
4283 /* -After- the rcu_node ->lock fields are initialized! */
4285 qovld_calc
= DEFAULT_RCU_QOVLD_MULT
* qhimark
;
4290 #include "tree_stall.h"
4291 #include "tree_exp.h"
4292 #include "tree_plugin.h"