]>
Commit | Line | Data |
---|---|---|
e7ee1501 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
dad81a20 PM |
2 | /* |
3 | * Sleepable Read-Copy Update mechanism for mutual exclusion. | |
4 | * | |
dad81a20 PM |
5 | * Copyright (C) IBM Corporation, 2006 |
6 | * Copyright (C) Fujitsu, 2012 | |
7 | * | |
65bb0dc4 | 8 | * Authors: Paul McKenney <paulmck@linux.ibm.com> |
dad81a20 PM |
9 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
10 | * | |
11 | * For detailed explanation of Read-Copy Update mechanism see - | |
12 | * Documentation/RCU/ *.txt | |
13 | * | |
14 | */ | |
15 | ||
a7538352 JP |
16 | #define pr_fmt(fmt) "rcu: " fmt |
17 | ||
dad81a20 PM |
18 | #include <linux/export.h> |
19 | #include <linux/mutex.h> | |
20 | #include <linux/percpu.h> | |
21 | #include <linux/preempt.h> | |
22 | #include <linux/rcupdate_wait.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/delay.h> | |
22607d66 | 26 | #include <linux/module.h> |
dad81a20 PM |
27 | #include <linux/srcu.h> |
28 | ||
dad81a20 | 29 | #include "rcu.h" |
45753c5f | 30 | #include "rcu_segcblist.h" |
dad81a20 | 31 | |
4f58820f PM |
32 | #ifndef data_race |
33 | #define data_race(expr) \ | |
34 | ({ \ | |
35 | expr; \ | |
36 | }) | |
37 | #endif | |
38 | #ifndef ASSERT_EXCLUSIVE_WRITER | |
39 | #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0) | |
40 | #endif | |
41 | #ifndef ASSERT_EXCLUSIVE_ACCESS | |
42 | #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0) | |
43 | #endif | |
44 | ||
0c8e0e3c PM |
45 | /* Holdoff in nanoseconds for auto-expediting. */ |
46 | #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) | |
47 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; | |
22607d66 PM |
48 | module_param(exp_holdoff, ulong, 0444); |
49 | ||
c350c008 PM |
50 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ |
51 | static ulong counter_wrap_check = (ULONG_MAX >> 2); | |
52 | module_param(counter_wrap_check, ulong, 0444); | |
53 | ||
e0fcba9a PM |
54 | /* Early-boot callback-management, so early that no lock is required! */ |
55 | static LIST_HEAD(srcu_boot_list); | |
56 | static bool __read_mostly srcu_init_done; | |
57 | ||
da915ad5 | 58 | static void srcu_invoke_callbacks(struct work_struct *work); |
aacb5d91 | 59 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); |
0d8a1e83 | 60 | static void process_srcu(struct work_struct *work); |
e81baf4c | 61 | static void srcu_delay_timer(struct timer_list *t); |
da915ad5 | 62 | |
d6331980 PM |
63 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ |
64 | #define spin_lock_rcu_node(p) \ | |
65 | do { \ | |
66 | spin_lock(&ACCESS_PRIVATE(p, lock)); \ | |
67 | smp_mb__after_unlock_lock(); \ | |
68 | } while (0) | |
69 | ||
70 | #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) | |
71 | ||
72 | #define spin_lock_irq_rcu_node(p) \ | |
73 | do { \ | |
74 | spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ | |
75 | smp_mb__after_unlock_lock(); \ | |
76 | } while (0) | |
77 | ||
78 | #define spin_unlock_irq_rcu_node(p) \ | |
79 | spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) | |
80 | ||
81 | #define spin_lock_irqsave_rcu_node(p, flags) \ | |
82 | do { \ | |
83 | spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ | |
84 | smp_mb__after_unlock_lock(); \ | |
85 | } while (0) | |
86 | ||
87 | #define spin_unlock_irqrestore_rcu_node(p, flags) \ | |
88 | spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ | |
89 | ||
da915ad5 PM |
90 | /* |
91 | * Initialize SRCU combining tree. Note that statically allocated | |
92 | * srcu_struct structures might already have srcu_read_lock() and | |
93 | * srcu_read_unlock() running against them. So if the is_static parameter | |
94 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. | |
95 | */ | |
aacb5d91 | 96 | static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) |
dad81a20 | 97 | { |
da915ad5 PM |
98 | int cpu; |
99 | int i; | |
100 | int level = 0; | |
101 | int levelspread[RCU_NUM_LVLS]; | |
102 | struct srcu_data *sdp; | |
103 | struct srcu_node *snp; | |
104 | struct srcu_node *snp_first; | |
105 | ||
106 | /* Work out the overall tree geometry. */ | |
aacb5d91 | 107 | ssp->level[0] = &ssp->node[0]; |
da915ad5 | 108 | for (i = 1; i < rcu_num_lvls; i++) |
aacb5d91 | 109 | ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; |
da915ad5 PM |
110 | rcu_init_levelspread(levelspread, num_rcu_lvl); |
111 | ||
112 | /* Each pass through this loop initializes one srcu_node structure. */ | |
aacb5d91 | 113 | srcu_for_each_node_breadth_first(ssp, snp) { |
d6331980 | 114 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
c7e88067 PM |
115 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
116 | ARRAY_SIZE(snp->srcu_data_have_cbs)); | |
117 | for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { | |
da915ad5 | 118 | snp->srcu_have_cbs[i] = 0; |
c7e88067 PM |
119 | snp->srcu_data_have_cbs[i] = 0; |
120 | } | |
1e9a038b | 121 | snp->srcu_gp_seq_needed_exp = 0; |
da915ad5 PM |
122 | snp->grplo = -1; |
123 | snp->grphi = -1; | |
aacb5d91 | 124 | if (snp == &ssp->node[0]) { |
da915ad5 PM |
125 | /* Root node, special case. */ |
126 | snp->srcu_parent = NULL; | |
127 | continue; | |
128 | } | |
129 | ||
130 | /* Non-root node. */ | |
aacb5d91 | 131 | if (snp == ssp->level[level + 1]) |
da915ad5 | 132 | level++; |
aacb5d91 PM |
133 | snp->srcu_parent = ssp->level[level - 1] + |
134 | (snp - ssp->level[level]) / | |
da915ad5 PM |
135 | levelspread[level - 1]; |
136 | } | |
137 | ||
138 | /* | |
139 | * Initialize the per-CPU srcu_data array, which feeds into the | |
140 | * leaves of the srcu_node tree. | |
141 | */ | |
142 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != | |
143 | ARRAY_SIZE(sdp->srcu_unlock_count)); | |
144 | level = rcu_num_lvls - 1; | |
aacb5d91 | 145 | snp_first = ssp->level[level]; |
da915ad5 | 146 | for_each_possible_cpu(cpu) { |
aacb5d91 | 147 | sdp = per_cpu_ptr(ssp->sda, cpu); |
d6331980 | 148 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
da915ad5 PM |
149 | rcu_segcblist_init(&sdp->srcu_cblist); |
150 | sdp->srcu_cblist_invoking = false; | |
aacb5d91 PM |
151 | sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; |
152 | sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; | |
da915ad5 PM |
153 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
154 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { | |
155 | if (snp->grplo < 0) | |
156 | snp->grplo = cpu; | |
157 | snp->grphi = cpu; | |
158 | } | |
159 | sdp->cpu = cpu; | |
e81baf4c SAS |
160 | INIT_WORK(&sdp->work, srcu_invoke_callbacks); |
161 | timer_setup(&sdp->delay_work, srcu_delay_timer, 0); | |
aacb5d91 | 162 | sdp->ssp = ssp; |
c7e88067 | 163 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
da915ad5 PM |
164 | if (is_static) |
165 | continue; | |
166 | ||
167 | /* Dynamically allocated, better be no srcu_read_locks()! */ | |
168 | for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { | |
169 | sdp->srcu_lock_count[i] = 0; | |
170 | sdp->srcu_unlock_count[i] = 0; | |
171 | } | |
172 | } | |
173 | } | |
174 | ||
175 | /* | |
176 | * Initialize non-compile-time initialized fields, including the | |
177 | * associated srcu_node and srcu_data structures. The is_static | |
178 | * parameter is passed through to init_srcu_struct_nodes(), and | |
179 | * also tells us that ->sda has already been wired up to srcu_data. | |
180 | */ | |
aacb5d91 | 181 | static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) |
da915ad5 | 182 | { |
aacb5d91 PM |
183 | mutex_init(&ssp->srcu_cb_mutex); |
184 | mutex_init(&ssp->srcu_gp_mutex); | |
185 | ssp->srcu_idx = 0; | |
186 | ssp->srcu_gp_seq = 0; | |
187 | ssp->srcu_barrier_seq = 0; | |
188 | mutex_init(&ssp->srcu_barrier_mutex); | |
189 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); | |
190 | INIT_DELAYED_WORK(&ssp->work, process_srcu); | |
da915ad5 | 191 | if (!is_static) |
aacb5d91 PM |
192 | ssp->sda = alloc_percpu(struct srcu_data); |
193 | init_srcu_struct_nodes(ssp, is_static); | |
194 | ssp->srcu_gp_seq_needed_exp = 0; | |
195 | ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); | |
196 | smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ | |
197 | return ssp->sda ? 0 : -ENOMEM; | |
dad81a20 PM |
198 | } |
199 | ||
200 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
201 | ||
aacb5d91 | 202 | int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
dad81a20 PM |
203 | struct lock_class_key *key) |
204 | { | |
205 | /* Don't re-initialize a lock while it is held. */ | |
aacb5d91 PM |
206 | debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); |
207 | lockdep_init_map(&ssp->dep_map, name, key, 0); | |
208 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); | |
209 | return init_srcu_struct_fields(ssp, false); | |
dad81a20 PM |
210 | } |
211 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | |
212 | ||
213 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
214 | ||
215 | /** | |
216 | * init_srcu_struct - initialize a sleep-RCU structure | |
aacb5d91 | 217 | * @ssp: structure to initialize. |
dad81a20 PM |
218 | * |
219 | * Must invoke this on a given srcu_struct before passing that srcu_struct | |
220 | * to any other function. Each srcu_struct represents a separate domain | |
221 | * of SRCU protection. | |
222 | */ | |
aacb5d91 | 223 | int init_srcu_struct(struct srcu_struct *ssp) |
dad81a20 | 224 | { |
aacb5d91 PM |
225 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
226 | return init_srcu_struct_fields(ssp, false); | |
dad81a20 PM |
227 | } |
228 | EXPORT_SYMBOL_GPL(init_srcu_struct); | |
229 | ||
230 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
231 | ||
232 | /* | |
da915ad5 PM |
233 | * First-use initialization of statically allocated srcu_struct |
234 | * structure. Wiring up the combining tree is more than can be | |
235 | * done with compile-time initialization, so this check is added | |
aacb5d91 | 236 | * to each update-side SRCU primitive. Use ssp->lock, which -is- |
da915ad5 PM |
237 | * compile-time initialized, to resolve races involving multiple |
238 | * CPUs trying to garner first-use privileges. | |
239 | */ | |
aacb5d91 | 240 | static void check_init_srcu_struct(struct srcu_struct *ssp) |
da915ad5 PM |
241 | { |
242 | unsigned long flags; | |
243 | ||
da915ad5 | 244 | /* The smp_load_acquire() pairs with the smp_store_release(). */ |
aacb5d91 | 245 | if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ |
da915ad5 | 246 | return; /* Already initialized. */ |
aacb5d91 PM |
247 | spin_lock_irqsave_rcu_node(ssp, flags); |
248 | if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { | |
249 | spin_unlock_irqrestore_rcu_node(ssp, flags); | |
da915ad5 PM |
250 | return; |
251 | } | |
aacb5d91 PM |
252 | init_srcu_struct_fields(ssp, true); |
253 | spin_unlock_irqrestore_rcu_node(ssp, flags); | |
da915ad5 PM |
254 | } |
255 | ||
256 | /* | |
257 | * Returns approximate total of the readers' ->srcu_lock_count[] values | |
258 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 | 259 | */ |
aacb5d91 | 260 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
261 | { |
262 | int cpu; | |
263 | unsigned long sum = 0; | |
264 | ||
265 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 266 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
dad81a20 | 267 | |
da915ad5 | 268 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
dad81a20 PM |
269 | } |
270 | return sum; | |
271 | } | |
272 | ||
273 | /* | |
da915ad5 PM |
274 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
275 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 | 276 | */ |
aacb5d91 | 277 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
278 | { |
279 | int cpu; | |
280 | unsigned long sum = 0; | |
281 | ||
282 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 283 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
dad81a20 | 284 | |
da915ad5 | 285 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
dad81a20 PM |
286 | } |
287 | return sum; | |
288 | } | |
289 | ||
290 | /* | |
291 | * Return true if the number of pre-existing readers is determined to | |
292 | * be zero. | |
293 | */ | |
aacb5d91 | 294 | static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
295 | { |
296 | unsigned long unlocks; | |
297 | ||
aacb5d91 | 298 | unlocks = srcu_readers_unlock_idx(ssp, idx); |
dad81a20 PM |
299 | |
300 | /* | |
301 | * Make sure that a lock is always counted if the corresponding | |
302 | * unlock is counted. Needs to be a smp_mb() as the read side may | |
303 | * contain a read from a variable that is written to before the | |
304 | * synchronize_srcu() in the write side. In this case smp_mb()s | |
305 | * A and B act like the store buffering pattern. | |
306 | * | |
307 | * This smp_mb() also pairs with smp_mb() C to prevent accesses | |
308 | * after the synchronize_srcu() from being executed before the | |
309 | * grace period ends. | |
310 | */ | |
311 | smp_mb(); /* A */ | |
312 | ||
313 | /* | |
314 | * If the locks are the same as the unlocks, then there must have | |
315 | * been no readers on this index at some time in between. This does | |
316 | * not mean that there are no more readers, as one could have read | |
317 | * the current index but not have incremented the lock counter yet. | |
318 | * | |
881ec9d2 PM |
319 | * So suppose that the updater is preempted here for so long |
320 | * that more than ULONG_MAX non-nested readers come and go in | |
321 | * the meantime. It turns out that this cannot result in overflow | |
322 | * because if a reader modifies its unlock count after we read it | |
323 | * above, then that reader's next load of ->srcu_idx is guaranteed | |
324 | * to get the new value, which will cause it to operate on the | |
325 | * other bank of counters, where it cannot contribute to the | |
326 | * overflow of these counters. This means that there is a maximum | |
327 | * of 2*NR_CPUS increments, which cannot overflow given current | |
328 | * systems, especially not on 64-bit systems. | |
329 | * | |
330 | * OK, how about nesting? This does impose a limit on nesting | |
331 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, | |
332 | * especially on 64-bit systems. | |
dad81a20 | 333 | */ |
aacb5d91 | 334 | return srcu_readers_lock_idx(ssp, idx) == unlocks; |
dad81a20 PM |
335 | } |
336 | ||
337 | /** | |
338 | * srcu_readers_active - returns true if there are readers. and false | |
339 | * otherwise | |
aacb5d91 | 340 | * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). |
dad81a20 PM |
341 | * |
342 | * Note that this is not an atomic primitive, and can therefore suffer | |
343 | * severe errors when invoked on an active srcu_struct. That said, it | |
344 | * can be useful as an error check at cleanup time. | |
345 | */ | |
aacb5d91 | 346 | static bool srcu_readers_active(struct srcu_struct *ssp) |
dad81a20 PM |
347 | { |
348 | int cpu; | |
349 | unsigned long sum = 0; | |
350 | ||
351 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 352 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
dad81a20 | 353 | |
da915ad5 PM |
354 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
355 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); | |
356 | sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); | |
357 | sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); | |
dad81a20 PM |
358 | } |
359 | return sum; | |
360 | } | |
361 | ||
362 | #define SRCU_INTERVAL 1 | |
363 | ||
1e9a038b PM |
364 | /* |
365 | * Return grace-period delay, zero if there are expedited grace | |
366 | * periods pending, SRCU_INTERVAL otherwise. | |
367 | */ | |
aacb5d91 | 368 | static unsigned long srcu_get_delay(struct srcu_struct *ssp) |
1e9a038b | 369 | { |
aacb5d91 PM |
370 | if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), |
371 | READ_ONCE(ssp->srcu_gp_seq_needed_exp))) | |
1e9a038b PM |
372 | return 0; |
373 | return SRCU_INTERVAL; | |
374 | } | |
375 | ||
f5ad3991 PM |
376 | /** |
377 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure | |
378 | * @ssp: structure to clean up. | |
379 | * | |
380 | * Must invoke this after you are finished using a given srcu_struct that | |
381 | * was initialized via init_srcu_struct(), else you leak memory. | |
382 | */ | |
383 | void cleanup_srcu_struct(struct srcu_struct *ssp) | |
dad81a20 | 384 | { |
da915ad5 PM |
385 | int cpu; |
386 | ||
aacb5d91 | 387 | if (WARN_ON(!srcu_get_delay(ssp))) |
f7194ac3 | 388 | return; /* Just leak it! */ |
aacb5d91 | 389 | if (WARN_ON(srcu_readers_active(ssp))) |
f7194ac3 | 390 | return; /* Just leak it! */ |
f5ad3991 | 391 | flush_delayed_work(&ssp->work); |
e81baf4c SAS |
392 | for_each_possible_cpu(cpu) { |
393 | struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); | |
394 | ||
f5ad3991 PM |
395 | del_timer_sync(&sdp->delay_work); |
396 | flush_work(&sdp->work); | |
5cdfd174 PM |
397 | if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) |
398 | return; /* Forgot srcu_barrier(), so just leak it! */ | |
e81baf4c | 399 | } |
aacb5d91 PM |
400 | if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
401 | WARN_ON(srcu_readers_active(ssp))) { | |
a7538352 | 402 | pr_info("%s: Active srcu_struct %p state: %d\n", |
aacb5d91 | 403 | __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); |
dad81a20 PM |
404 | return; /* Caller forgot to stop doing call_srcu()? */ |
405 | } | |
aacb5d91 PM |
406 | free_percpu(ssp->sda); |
407 | ssp->sda = NULL; | |
dad81a20 | 408 | } |
f5ad3991 | 409 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
dad81a20 PM |
410 | |
411 | /* | |
412 | * Counts the new reader in the appropriate per-CPU element of the | |
cdf7abc4 | 413 | * srcu_struct. |
dad81a20 PM |
414 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
415 | */ | |
aacb5d91 | 416 | int __srcu_read_lock(struct srcu_struct *ssp) |
dad81a20 PM |
417 | { |
418 | int idx; | |
419 | ||
aacb5d91 PM |
420 | idx = READ_ONCE(ssp->srcu_idx) & 0x1; |
421 | this_cpu_inc(ssp->sda->srcu_lock_count[idx]); | |
dad81a20 PM |
422 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
423 | return idx; | |
424 | } | |
425 | EXPORT_SYMBOL_GPL(__srcu_read_lock); | |
426 | ||
427 | /* | |
428 | * Removes the count for the old reader from the appropriate per-CPU | |
429 | * element of the srcu_struct. Note that this may well be a different | |
430 | * CPU than that which was incremented by the corresponding srcu_read_lock(). | |
dad81a20 | 431 | */ |
aacb5d91 | 432 | void __srcu_read_unlock(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
433 | { |
434 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ | |
aacb5d91 | 435 | this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); |
dad81a20 PM |
436 | } |
437 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | |
438 | ||
439 | /* | |
440 | * We use an adaptive strategy for synchronize_srcu() and especially for | |
441 | * synchronize_srcu_expedited(). We spin for a fixed time period | |
442 | * (defined below) to allow SRCU readers to exit their read-side critical | |
443 | * sections. If there are still some readers after a few microseconds, | |
444 | * we repeatedly block for 1-millisecond time periods. | |
445 | */ | |
446 | #define SRCU_RETRY_CHECK_DELAY 5 | |
447 | ||
448 | /* | |
449 | * Start an SRCU grace period. | |
450 | */ | |
aacb5d91 | 451 | static void srcu_gp_start(struct srcu_struct *ssp) |
dad81a20 | 452 | { |
aacb5d91 | 453 | struct srcu_data *sdp = this_cpu_ptr(ssp->sda); |
dad81a20 PM |
454 | int state; |
455 | ||
aacb5d91 PM |
456 | lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); |
457 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); | |
eb4c2382 | 458 | spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ |
da915ad5 | 459 | rcu_segcblist_advance(&sdp->srcu_cblist, |
aacb5d91 | 460 | rcu_seq_current(&ssp->srcu_gp_seq)); |
da915ad5 | 461 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
aacb5d91 | 462 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
eb4c2382 | 463 | spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ |
2da4b2a7 | 464 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
aacb5d91 | 465 | rcu_seq_start(&ssp->srcu_gp_seq); |
71042606 | 466 | state = rcu_seq_state(ssp->srcu_gp_seq); |
dad81a20 PM |
467 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
468 | } | |
469 | ||
da915ad5 | 470 | |
e81baf4c | 471 | static void srcu_delay_timer(struct timer_list *t) |
da915ad5 | 472 | { |
e81baf4c | 473 | struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); |
da915ad5 | 474 | |
e81baf4c | 475 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); |
da915ad5 PM |
476 | } |
477 | ||
e81baf4c | 478 | static void srcu_queue_delayed_work_on(struct srcu_data *sdp, |
da915ad5 PM |
479 | unsigned long delay) |
480 | { | |
e81baf4c SAS |
481 | if (!delay) { |
482 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); | |
483 | return; | |
484 | } | |
da915ad5 | 485 | |
e81baf4c | 486 | timer_reduce(&sdp->delay_work, jiffies + delay); |
da915ad5 PM |
487 | } |
488 | ||
489 | /* | |
490 | * Schedule callback invocation for the specified srcu_data structure, | |
491 | * if possible, on the corresponding CPU. | |
492 | */ | |
493 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) | |
494 | { | |
e81baf4c | 495 | srcu_queue_delayed_work_on(sdp, delay); |
da915ad5 PM |
496 | } |
497 | ||
498 | /* | |
499 | * Schedule callback invocation for all srcu_data structures associated | |
c7e88067 PM |
500 | * with the specified srcu_node structure that have callbacks for the |
501 | * just-completed grace period, the one corresponding to idx. If possible, | |
502 | * schedule this invocation on the corresponding CPUs. | |
da915ad5 | 503 | */ |
aacb5d91 | 504 | static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, |
1e9a038b | 505 | unsigned long mask, unsigned long delay) |
da915ad5 PM |
506 | { |
507 | int cpu; | |
508 | ||
c7e88067 PM |
509 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
510 | if (!(mask & (1 << (cpu - snp->grplo)))) | |
511 | continue; | |
aacb5d91 | 512 | srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); |
c7e88067 | 513 | } |
da915ad5 PM |
514 | } |
515 | ||
516 | /* | |
517 | * Note the end of an SRCU grace period. Initiates callback invocation | |
518 | * and starts a new grace period if needed. | |
519 | * | |
520 | * The ->srcu_cb_mutex acquisition does not protect any data, but | |
521 | * instead prevents more than one grace period from starting while we | |
522 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] | |
523 | * array to have a finite number of elements. | |
524 | */ | |
aacb5d91 | 525 | static void srcu_gp_end(struct srcu_struct *ssp) |
da915ad5 | 526 | { |
1e9a038b | 527 | unsigned long cbdelay; |
da915ad5 | 528 | bool cbs; |
8ddbd883 | 529 | bool last_lvl; |
c350c008 PM |
530 | int cpu; |
531 | unsigned long flags; | |
da915ad5 PM |
532 | unsigned long gpseq; |
533 | int idx; | |
c7e88067 | 534 | unsigned long mask; |
c350c008 | 535 | struct srcu_data *sdp; |
da915ad5 PM |
536 | struct srcu_node *snp; |
537 | ||
538 | /* Prevent more than one additional grace period. */ | |
aacb5d91 | 539 | mutex_lock(&ssp->srcu_cb_mutex); |
da915ad5 PM |
540 | |
541 | /* End the current grace period. */ | |
aacb5d91 PM |
542 | spin_lock_irq_rcu_node(ssp); |
543 | idx = rcu_seq_state(ssp->srcu_gp_seq); | |
da915ad5 | 544 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); |
aacb5d91 | 545 | cbdelay = srcu_get_delay(ssp); |
844a378d | 546 | WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); |
aacb5d91 PM |
547 | rcu_seq_end(&ssp->srcu_gp_seq); |
548 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); | |
549 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) | |
8c9e0cb3 | 550 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); |
aacb5d91 PM |
551 | spin_unlock_irq_rcu_node(ssp); |
552 | mutex_unlock(&ssp->srcu_gp_mutex); | |
da915ad5 PM |
553 | /* A new grace period can start at this point. But only one. */ |
554 | ||
555 | /* Initiate callback invocation as needed. */ | |
556 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); | |
aacb5d91 | 557 | srcu_for_each_node_breadth_first(ssp, snp) { |
d6331980 | 558 | spin_lock_irq_rcu_node(snp); |
da915ad5 | 559 | cbs = false; |
aacb5d91 | 560 | last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; |
8ddbd883 | 561 | if (last_lvl) |
da915ad5 PM |
562 | cbs = snp->srcu_have_cbs[idx] == gpseq; |
563 | snp->srcu_have_cbs[idx] = gpseq; | |
564 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); | |
1e9a038b | 565 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
7ff8b450 | 566 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); |
c7e88067 PM |
567 | mask = snp->srcu_data_have_cbs[idx]; |
568 | snp->srcu_data_have_cbs[idx] = 0; | |
d6331980 | 569 | spin_unlock_irq_rcu_node(snp); |
a3883df3 | 570 | if (cbs) |
aacb5d91 | 571 | srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); |
c350c008 PM |
572 | |
573 | /* Occasionally prevent srcu_data counter wrap. */ | |
8ddbd883 | 574 | if (!(gpseq & counter_wrap_check) && last_lvl) |
c350c008 | 575 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
aacb5d91 | 576 | sdp = per_cpu_ptr(ssp->sda, cpu); |
d6331980 | 577 | spin_lock_irqsave_rcu_node(sdp, flags); |
c350c008 PM |
578 | if (ULONG_CMP_GE(gpseq, |
579 | sdp->srcu_gp_seq_needed + 100)) | |
580 | sdp->srcu_gp_seq_needed = gpseq; | |
a35d13ec II |
581 | if (ULONG_CMP_GE(gpseq, |
582 | sdp->srcu_gp_seq_needed_exp + 100)) | |
583 | sdp->srcu_gp_seq_needed_exp = gpseq; | |
d6331980 | 584 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
c350c008 | 585 | } |
da915ad5 PM |
586 | } |
587 | ||
588 | /* Callback initiation done, allow grace periods after next. */ | |
aacb5d91 | 589 | mutex_unlock(&ssp->srcu_cb_mutex); |
da915ad5 PM |
590 | |
591 | /* Start a new grace period if needed. */ | |
aacb5d91 PM |
592 | spin_lock_irq_rcu_node(ssp); |
593 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); | |
da915ad5 | 594 | if (!rcu_seq_state(gpseq) && |
aacb5d91 PM |
595 | ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { |
596 | srcu_gp_start(ssp); | |
597 | spin_unlock_irq_rcu_node(ssp); | |
598 | srcu_reschedule(ssp, 0); | |
da915ad5 | 599 | } else { |
aacb5d91 | 600 | spin_unlock_irq_rcu_node(ssp); |
da915ad5 PM |
601 | } |
602 | } | |
603 | ||
1e9a038b PM |
604 | /* |
605 | * Funnel-locking scheme to scalably mediate many concurrent expedited | |
606 | * grace-period requests. This function is invoked for the first known | |
607 | * expedited request for a grace period that has already been requested, | |
608 | * but without expediting. To start a completely new grace period, | |
609 | * whether expedited or not, use srcu_funnel_gp_start() instead. | |
610 | */ | |
aacb5d91 | 611 | static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, |
1e9a038b PM |
612 | unsigned long s) |
613 | { | |
614 | unsigned long flags; | |
615 | ||
616 | for (; snp != NULL; snp = snp->srcu_parent) { | |
aacb5d91 | 617 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) || |
1e9a038b PM |
618 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) |
619 | return; | |
d6331980 | 620 | spin_lock_irqsave_rcu_node(snp, flags); |
1e9a038b | 621 | if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { |
d6331980 | 622 | spin_unlock_irqrestore_rcu_node(snp, flags); |
1e9a038b PM |
623 | return; |
624 | } | |
625 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); | |
d6331980 | 626 | spin_unlock_irqrestore_rcu_node(snp, flags); |
1e9a038b | 627 | } |
aacb5d91 PM |
628 | spin_lock_irqsave_rcu_node(ssp, flags); |
629 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) | |
8c9e0cb3 | 630 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
aacb5d91 | 631 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
1e9a038b PM |
632 | } |
633 | ||
da915ad5 PM |
634 | /* |
635 | * Funnel-locking scheme to scalably mediate many concurrent grace-period | |
636 | * requests. The winner has to do the work of actually starting grace | |
637 | * period s. Losers must either ensure that their desired grace-period | |
638 | * number is recorded on at least their leaf srcu_node structure, or they | |
639 | * must take steps to invoke their own callbacks. | |
17294ce6 PM |
640 | * |
641 | * Note that this function also does the work of srcu_funnel_exp_start(), | |
642 | * in some cases by directly invoking it. | |
da915ad5 | 643 | */ |
aacb5d91 | 644 | static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, |
1e9a038b | 645 | unsigned long s, bool do_norm) |
da915ad5 PM |
646 | { |
647 | unsigned long flags; | |
648 | int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); | |
649 | struct srcu_node *snp = sdp->mynode; | |
650 | unsigned long snp_seq; | |
651 | ||
652 | /* Each pass through the loop does one level of the srcu_node tree. */ | |
653 | for (; snp != NULL; snp = snp->srcu_parent) { | |
aacb5d91 | 654 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) |
da915ad5 | 655 | return; /* GP already done and CBs recorded. */ |
d6331980 | 656 | spin_lock_irqsave_rcu_node(snp, flags); |
da915ad5 PM |
657 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
658 | snp_seq = snp->srcu_have_cbs[idx]; | |
c7e88067 PM |
659 | if (snp == sdp->mynode && snp_seq == s) |
660 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
d6331980 | 661 | spin_unlock_irqrestore_rcu_node(snp, flags); |
da915ad5 | 662 | if (snp == sdp->mynode && snp_seq != s) { |
1e9a038b PM |
663 | srcu_schedule_cbs_sdp(sdp, do_norm |
664 | ? SRCU_INTERVAL | |
665 | : 0); | |
666 | return; | |
da915ad5 | 667 | } |
1e9a038b | 668 | if (!do_norm) |
aacb5d91 | 669 | srcu_funnel_exp_start(ssp, snp, s); |
da915ad5 PM |
670 | return; |
671 | } | |
672 | snp->srcu_have_cbs[idx] = s; | |
c7e88067 PM |
673 | if (snp == sdp->mynode) |
674 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
1e9a038b | 675 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
7ff8b450 | 676 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
d6331980 | 677 | spin_unlock_irqrestore_rcu_node(snp, flags); |
da915ad5 PM |
678 | } |
679 | ||
680 | /* Top of tree, must ensure the grace period will be started. */ | |
aacb5d91 PM |
681 | spin_lock_irqsave_rcu_node(ssp, flags); |
682 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { | |
da915ad5 PM |
683 | /* |
684 | * Record need for grace period s. Pair with load | |
685 | * acquire setting up for initialization. | |
686 | */ | |
aacb5d91 | 687 | smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ |
da915ad5 | 688 | } |
aacb5d91 | 689 | if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
8c9e0cb3 | 690 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
da915ad5 PM |
691 | |
692 | /* If grace period not already done and none in progress, start it. */ | |
aacb5d91 PM |
693 | if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && |
694 | rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { | |
695 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); | |
696 | srcu_gp_start(ssp); | |
e0fcba9a | 697 | if (likely(srcu_init_done)) |
aacb5d91 PM |
698 | queue_delayed_work(rcu_gp_wq, &ssp->work, |
699 | srcu_get_delay(ssp)); | |
700 | else if (list_empty(&ssp->work.work.entry)) | |
701 | list_add(&ssp->work.work.entry, &srcu_boot_list); | |
da915ad5 | 702 | } |
aacb5d91 | 703 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
da915ad5 PM |
704 | } |
705 | ||
dad81a20 PM |
706 | /* |
707 | * Wait until all readers counted by array index idx complete, but | |
708 | * loop an additional time if there is an expedited grace period pending. | |
da915ad5 | 709 | * The caller must ensure that ->srcu_idx is not changed while checking. |
dad81a20 | 710 | */ |
aacb5d91 | 711 | static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) |
dad81a20 PM |
712 | { |
713 | for (;;) { | |
aacb5d91 | 714 | if (srcu_readers_active_idx_check(ssp, idx)) |
dad81a20 | 715 | return true; |
aacb5d91 | 716 | if (--trycount + !srcu_get_delay(ssp) <= 0) |
dad81a20 PM |
717 | return false; |
718 | udelay(SRCU_RETRY_CHECK_DELAY); | |
719 | } | |
720 | } | |
721 | ||
722 | /* | |
da915ad5 PM |
723 | * Increment the ->srcu_idx counter so that future SRCU readers will |
724 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows | |
dad81a20 PM |
725 | * us to wait for pre-existing readers in a starvation-free manner. |
726 | */ | |
aacb5d91 | 727 | static void srcu_flip(struct srcu_struct *ssp) |
dad81a20 | 728 | { |
881ec9d2 PM |
729 | /* |
730 | * Ensure that if this updater saw a given reader's increment | |
731 | * from __srcu_read_lock(), that reader was using an old value | |
732 | * of ->srcu_idx. Also ensure that if a given reader sees the | |
733 | * new value of ->srcu_idx, this updater's earlier scans cannot | |
734 | * have seen that reader's increments (which is OK, because this | |
735 | * grace period need not wait on that reader). | |
736 | */ | |
737 | smp_mb(); /* E */ /* Pairs with B and C. */ | |
738 | ||
aacb5d91 | 739 | WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); |
dad81a20 PM |
740 | |
741 | /* | |
742 | * Ensure that if the updater misses an __srcu_read_unlock() | |
743 | * increment, that task's next __srcu_read_lock() will see the | |
744 | * above counter update. Note that both this memory barrier | |
745 | * and the one in srcu_readers_active_idx_check() provide the | |
746 | * guarantee for __srcu_read_lock(). | |
747 | */ | |
748 | smp_mb(); /* D */ /* Pairs with C. */ | |
749 | } | |
750 | ||
2da4b2a7 PM |
751 | /* |
752 | * If SRCU is likely idle, return true, otherwise return false. | |
753 | * | |
754 | * Note that it is OK for several current from-idle requests for a new | |
755 | * grace period from idle to specify expediting because they will all end | |
756 | * up requesting the same grace period anyhow. So no loss. | |
757 | * | |
758 | * Note also that if any CPU (including the current one) is still invoking | |
759 | * callbacks, this function will nevertheless say "idle". This is not | |
760 | * ideal, but the overhead of checking all CPUs' callback lists is even | |
761 | * less ideal, especially on large systems. Furthermore, the wakeup | |
762 | * can happen before the callback is fully removed, so we have no choice | |
763 | * but to accept this type of error. | |
764 | * | |
765 | * This function is also subject to counter-wrap errors, but let's face | |
766 | * it, if this function was preempted for enough time for the counters | |
767 | * to wrap, it really doesn't matter whether or not we expedite the grace | |
768 | * period. The extra overhead of a needlessly expedited grace period is | |
769 | * negligible when amoritized over that time period, and the extra latency | |
770 | * of a needlessly non-expedited grace period is similarly negligible. | |
771 | */ | |
aacb5d91 | 772 | static bool srcu_might_be_idle(struct srcu_struct *ssp) |
2da4b2a7 | 773 | { |
22607d66 | 774 | unsigned long curseq; |
2da4b2a7 PM |
775 | unsigned long flags; |
776 | struct srcu_data *sdp; | |
22607d66 | 777 | unsigned long t; |
844a378d | 778 | unsigned long tlast; |
2da4b2a7 PM |
779 | |
780 | /* If the local srcu_data structure has callbacks, not idle. */ | |
781 | local_irq_save(flags); | |
aacb5d91 | 782 | sdp = this_cpu_ptr(ssp->sda); |
2da4b2a7 PM |
783 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
784 | local_irq_restore(flags); | |
785 | return false; /* Callbacks already present, so not idle. */ | |
786 | } | |
787 | local_irq_restore(flags); | |
788 | ||
789 | /* | |
790 | * No local callbacks, so probabalistically probe global state. | |
791 | * Exact information would require acquiring locks, which would | |
792 | * kill scalability, hence the probabalistic nature of the probe. | |
793 | */ | |
22607d66 PM |
794 | |
795 | /* First, see if enough time has passed since the last GP. */ | |
796 | t = ktime_get_mono_fast_ns(); | |
844a378d | 797 | tlast = READ_ONCE(ssp->srcu_last_gp_end); |
22607d66 | 798 | if (exp_holdoff == 0 || |
844a378d | 799 | time_in_range_open(t, tlast, tlast + exp_holdoff)) |
22607d66 PM |
800 | return false; /* Too soon after last GP. */ |
801 | ||
802 | /* Next, check for probable idleness. */ | |
aacb5d91 | 803 | curseq = rcu_seq_current(&ssp->srcu_gp_seq); |
2da4b2a7 | 804 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ |
aacb5d91 | 805 | if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) |
2da4b2a7 PM |
806 | return false; /* Grace period in progress, so not idle. */ |
807 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ | |
aacb5d91 | 808 | if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) |
2da4b2a7 PM |
809 | return false; /* GP # changed, so not idle. */ |
810 | return true; /* With reasonable probability, idle! */ | |
811 | } | |
812 | ||
a602538e PM |
813 | /* |
814 | * SRCU callback function to leak a callback. | |
815 | */ | |
816 | static void srcu_leak_callback(struct rcu_head *rhp) | |
817 | { | |
818 | } | |
819 | ||
dad81a20 | 820 | /* |
da915ad5 PM |
821 | * Enqueue an SRCU callback on the srcu_data structure associated with |
822 | * the current CPU and the specified srcu_struct structure, initiating | |
823 | * grace-period processing if it is not already running. | |
dad81a20 PM |
824 | * |
825 | * Note that all CPUs must agree that the grace period extended beyond | |
826 | * all pre-existing SRCU read-side critical section. On systems with | |
827 | * more than one CPU, this means that when "func()" is invoked, each CPU | |
828 | * is guaranteed to have executed a full memory barrier since the end of | |
829 | * its last corresponding SRCU read-side critical section whose beginning | |
5ef98a63 | 830 | * preceded the call to call_srcu(). It also means that each CPU executing |
dad81a20 | 831 | * an SRCU read-side critical section that continues beyond the start of |
5ef98a63 | 832 | * "func()" must have executed a memory barrier after the call_srcu() |
dad81a20 PM |
833 | * but before the beginning of that SRCU read-side critical section. |
834 | * Note that these guarantees include CPUs that are offline, idle, or | |
835 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
836 | * | |
5ef98a63 | 837 | * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the |
dad81a20 PM |
838 | * resulting SRCU callback function "func()", then both CPU A and CPU |
839 | * B are guaranteed to execute a full memory barrier during the time | |
5ef98a63 | 840 | * interval between the call to call_srcu() and the invocation of "func()". |
dad81a20 PM |
841 | * This guarantee applies even if CPU A and CPU B are the same CPU (but |
842 | * again only if the system has more than one CPU). | |
843 | * | |
844 | * Of course, these guarantees apply only for invocations of call_srcu(), | |
845 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | |
846 | * srcu_struct structure. | |
847 | */ | |
11b00045 JB |
848 | static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
849 | rcu_callback_t func, bool do_norm) | |
dad81a20 PM |
850 | { |
851 | unsigned long flags; | |
0607ba84 | 852 | int idx; |
1e9a038b | 853 | bool needexp = false; |
da915ad5 PM |
854 | bool needgp = false; |
855 | unsigned long s; | |
856 | struct srcu_data *sdp; | |
857 | ||
aacb5d91 | 858 | check_init_srcu_struct(ssp); |
a602538e PM |
859 | if (debug_rcu_head_queue(rhp)) { |
860 | /* Probable double call_srcu(), so leak the callback. */ | |
861 | WRITE_ONCE(rhp->func, srcu_leak_callback); | |
862 | WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); | |
863 | return; | |
864 | } | |
da915ad5 | 865 | rhp->func = func; |
aacb5d91 | 866 | idx = srcu_read_lock(ssp); |
da915ad5 | 867 | local_irq_save(flags); |
aacb5d91 | 868 | sdp = this_cpu_ptr(ssp->sda); |
d6331980 | 869 | spin_lock_rcu_node(sdp); |
77a40f97 | 870 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); |
da915ad5 | 871 | rcu_segcblist_advance(&sdp->srcu_cblist, |
aacb5d91 PM |
872 | rcu_seq_current(&ssp->srcu_gp_seq)); |
873 | s = rcu_seq_snap(&ssp->srcu_gp_seq); | |
da915ad5 PM |
874 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); |
875 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { | |
876 | sdp->srcu_gp_seq_needed = s; | |
877 | needgp = true; | |
dad81a20 | 878 | } |
1e9a038b PM |
879 | if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { |
880 | sdp->srcu_gp_seq_needed_exp = s; | |
881 | needexp = true; | |
882 | } | |
d6331980 | 883 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
da915ad5 | 884 | if (needgp) |
aacb5d91 | 885 | srcu_funnel_gp_start(ssp, sdp, s, do_norm); |
1e9a038b | 886 | else if (needexp) |
aacb5d91 PM |
887 | srcu_funnel_exp_start(ssp, sdp->mynode, s); |
888 | srcu_read_unlock(ssp, idx); | |
1e9a038b PM |
889 | } |
890 | ||
5a0465e1 PM |
891 | /** |
892 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | |
aacb5d91 | 893 | * @ssp: srcu_struct in queue the callback |
27fdb35f | 894 | * @rhp: structure to be used for queueing the SRCU callback. |
5a0465e1 PM |
895 | * @func: function to be invoked after the SRCU grace period |
896 | * | |
897 | * The callback function will be invoked some time after a full SRCU | |
898 | * grace period elapses, in other words after all pre-existing SRCU | |
899 | * read-side critical sections have completed. However, the callback | |
900 | * function might well execute concurrently with other SRCU read-side | |
901 | * critical sections that started after call_srcu() was invoked. SRCU | |
902 | * read-side critical sections are delimited by srcu_read_lock() and | |
903 | * srcu_read_unlock(), and may be nested. | |
904 | * | |
905 | * The callback will be invoked from process context, but must nevertheless | |
906 | * be fast and must not block. | |
907 | */ | |
aacb5d91 | 908 | void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
1e9a038b PM |
909 | rcu_callback_t func) |
910 | { | |
aacb5d91 | 911 | __call_srcu(ssp, rhp, func, true); |
dad81a20 PM |
912 | } |
913 | EXPORT_SYMBOL_GPL(call_srcu); | |
914 | ||
dad81a20 PM |
915 | /* |
916 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | |
917 | */ | |
aacb5d91 | 918 | static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) |
dad81a20 PM |
919 | { |
920 | struct rcu_synchronize rcu; | |
dad81a20 | 921 | |
aacb5d91 | 922 | RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) || |
dad81a20 PM |
923 | lock_is_held(&rcu_bh_lock_map) || |
924 | lock_is_held(&rcu_lock_map) || | |
925 | lock_is_held(&rcu_sched_lock_map), | |
926 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); | |
927 | ||
928 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) | |
929 | return; | |
930 | might_sleep(); | |
aacb5d91 | 931 | check_init_srcu_struct(ssp); |
dad81a20 | 932 | init_completion(&rcu.completion); |
da915ad5 | 933 | init_rcu_head_on_stack(&rcu.head); |
aacb5d91 | 934 | __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); |
dad81a20 | 935 | wait_for_completion(&rcu.completion); |
da915ad5 | 936 | destroy_rcu_head_on_stack(&rcu.head); |
35732cf9 PM |
937 | |
938 | /* | |
939 | * Make sure that later code is ordered after the SRCU grace | |
d6331980 | 940 | * period. This pairs with the spin_lock_irq_rcu_node() |
35732cf9 PM |
941 | * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed |
942 | * because the current CPU might have been totally uninvolved with | |
943 | * (and thus unordered against) that grace period. | |
944 | */ | |
945 | smp_mb(); | |
dad81a20 PM |
946 | } |
947 | ||
948 | /** | |
949 | * synchronize_srcu_expedited - Brute-force SRCU grace period | |
aacb5d91 | 950 | * @ssp: srcu_struct with which to synchronize. |
dad81a20 PM |
951 | * |
952 | * Wait for an SRCU grace period to elapse, but be more aggressive about | |
953 | * spinning rather than blocking when waiting. | |
954 | * | |
955 | * Note that synchronize_srcu_expedited() has the same deadlock and | |
956 | * memory-ordering properties as does synchronize_srcu(). | |
957 | */ | |
aacb5d91 | 958 | void synchronize_srcu_expedited(struct srcu_struct *ssp) |
dad81a20 | 959 | { |
aacb5d91 | 960 | __synchronize_srcu(ssp, rcu_gp_is_normal()); |
dad81a20 PM |
961 | } |
962 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | |
963 | ||
964 | /** | |
965 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | |
aacb5d91 | 966 | * @ssp: srcu_struct with which to synchronize. |
dad81a20 PM |
967 | * |
968 | * Wait for the count to drain to zero of both indexes. To avoid the | |
969 | * possible starvation of synchronize_srcu(), it waits for the count of | |
da915ad5 PM |
970 | * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, |
971 | * and then flip the srcu_idx and wait for the count of the other index. | |
dad81a20 PM |
972 | * |
973 | * Can block; must be called from process context. | |
974 | * | |
975 | * Note that it is illegal to call synchronize_srcu() from the corresponding | |
976 | * SRCU read-side critical section; doing so will result in deadlock. | |
977 | * However, it is perfectly legal to call synchronize_srcu() on one | |
978 | * srcu_struct from some other srcu_struct's read-side critical section, | |
979 | * as long as the resulting graph of srcu_structs is acyclic. | |
980 | * | |
981 | * There are memory-ordering constraints implied by synchronize_srcu(). | |
982 | * On systems with more than one CPU, when synchronize_srcu() returns, | |
983 | * each CPU is guaranteed to have executed a full memory barrier since | |
6eb95cc4 | 984 | * the end of its last corresponding SRCU read-side critical section |
dad81a20 PM |
985 | * whose beginning preceded the call to synchronize_srcu(). In addition, |
986 | * each CPU having an SRCU read-side critical section that extends beyond | |
987 | * the return from synchronize_srcu() is guaranteed to have executed a | |
988 | * full memory barrier after the beginning of synchronize_srcu() and before | |
989 | * the beginning of that SRCU read-side critical section. Note that these | |
990 | * guarantees include CPUs that are offline, idle, or executing in user mode, | |
991 | * as well as CPUs that are executing in the kernel. | |
992 | * | |
993 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned | |
994 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
995 | * to have executed a full memory barrier during the execution of | |
996 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B | |
997 | * are the same CPU, but again only if the system has more than one CPU. | |
998 | * | |
999 | * Of course, these memory-ordering guarantees apply only when | |
1000 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are | |
1001 | * passed the same srcu_struct structure. | |
2da4b2a7 PM |
1002 | * |
1003 | * If SRCU is likely idle, expedite the first request. This semantic | |
1004 | * was provided by Classic SRCU, and is relied upon by its users, so TREE | |
1005 | * SRCU must also provide it. Note that detecting idleness is heuristic | |
1006 | * and subject to both false positives and negatives. | |
dad81a20 | 1007 | */ |
aacb5d91 | 1008 | void synchronize_srcu(struct srcu_struct *ssp) |
dad81a20 | 1009 | { |
aacb5d91 PM |
1010 | if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) |
1011 | synchronize_srcu_expedited(ssp); | |
dad81a20 | 1012 | else |
aacb5d91 | 1013 | __synchronize_srcu(ssp, true); |
dad81a20 PM |
1014 | } |
1015 | EXPORT_SYMBOL_GPL(synchronize_srcu); | |
1016 | ||
da915ad5 PM |
1017 | /* |
1018 | * Callback function for srcu_barrier() use. | |
1019 | */ | |
1020 | static void srcu_barrier_cb(struct rcu_head *rhp) | |
1021 | { | |
1022 | struct srcu_data *sdp; | |
aacb5d91 | 1023 | struct srcu_struct *ssp; |
da915ad5 PM |
1024 | |
1025 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); | |
aacb5d91 PM |
1026 | ssp = sdp->ssp; |
1027 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) | |
1028 | complete(&ssp->srcu_barrier_completion); | |
da915ad5 PM |
1029 | } |
1030 | ||
dad81a20 PM |
1031 | /** |
1032 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. | |
aacb5d91 | 1033 | * @ssp: srcu_struct on which to wait for in-flight callbacks. |
dad81a20 | 1034 | */ |
aacb5d91 | 1035 | void srcu_barrier(struct srcu_struct *ssp) |
dad81a20 | 1036 | { |
da915ad5 PM |
1037 | int cpu; |
1038 | struct srcu_data *sdp; | |
aacb5d91 | 1039 | unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); |
da915ad5 | 1040 | |
aacb5d91 PM |
1041 | check_init_srcu_struct(ssp); |
1042 | mutex_lock(&ssp->srcu_barrier_mutex); | |
1043 | if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { | |
da915ad5 | 1044 | smp_mb(); /* Force ordering following return. */ |
aacb5d91 | 1045 | mutex_unlock(&ssp->srcu_barrier_mutex); |
da915ad5 PM |
1046 | return; /* Someone else did our work for us. */ |
1047 | } | |
aacb5d91 PM |
1048 | rcu_seq_start(&ssp->srcu_barrier_seq); |
1049 | init_completion(&ssp->srcu_barrier_completion); | |
da915ad5 PM |
1050 | |
1051 | /* Initial count prevents reaching zero until all CBs are posted. */ | |
aacb5d91 | 1052 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); |
da915ad5 PM |
1053 | |
1054 | /* | |
1055 | * Each pass through this loop enqueues a callback, but only | |
1056 | * on CPUs already having callbacks enqueued. Note that if | |
1057 | * a CPU already has callbacks enqueue, it must have already | |
1058 | * registered the need for a future grace period, so all we | |
1059 | * need do is enqueue a callback that will use the same | |
1060 | * grace period as the last callback already in the queue. | |
1061 | */ | |
1062 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 1063 | sdp = per_cpu_ptr(ssp->sda, cpu); |
d6331980 | 1064 | spin_lock_irq_rcu_node(sdp); |
aacb5d91 | 1065 | atomic_inc(&ssp->srcu_barrier_cpu_cnt); |
da915ad5 | 1066 | sdp->srcu_barrier_head.func = srcu_barrier_cb; |
a602538e | 1067 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
da915ad5 | 1068 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
77a40f97 | 1069 | &sdp->srcu_barrier_head)) { |
a602538e | 1070 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); |
aacb5d91 | 1071 | atomic_dec(&ssp->srcu_barrier_cpu_cnt); |
a602538e | 1072 | } |
d6331980 | 1073 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1074 | } |
1075 | ||
1076 | /* Remove the initial count, at which point reaching zero can happen. */ | |
aacb5d91 PM |
1077 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
1078 | complete(&ssp->srcu_barrier_completion); | |
1079 | wait_for_completion(&ssp->srcu_barrier_completion); | |
da915ad5 | 1080 | |
aacb5d91 PM |
1081 | rcu_seq_end(&ssp->srcu_barrier_seq); |
1082 | mutex_unlock(&ssp->srcu_barrier_mutex); | |
dad81a20 PM |
1083 | } |
1084 | EXPORT_SYMBOL_GPL(srcu_barrier); | |
1085 | ||
1086 | /** | |
1087 | * srcu_batches_completed - return batches completed. | |
aacb5d91 | 1088 | * @ssp: srcu_struct on which to report batch completion. |
dad81a20 PM |
1089 | * |
1090 | * Report the number of batches, correlated with, but not necessarily | |
1091 | * precisely the same as, the number of grace periods that have elapsed. | |
1092 | */ | |
aacb5d91 | 1093 | unsigned long srcu_batches_completed(struct srcu_struct *ssp) |
dad81a20 | 1094 | { |
39f91504 | 1095 | return READ_ONCE(ssp->srcu_idx); |
dad81a20 PM |
1096 | } |
1097 | EXPORT_SYMBOL_GPL(srcu_batches_completed); | |
1098 | ||
1099 | /* | |
da915ad5 PM |
1100 | * Core SRCU state machine. Push state bits of ->srcu_gp_seq |
1101 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has | |
1102 | * completed in that state. | |
dad81a20 | 1103 | */ |
aacb5d91 | 1104 | static void srcu_advance_state(struct srcu_struct *ssp) |
dad81a20 PM |
1105 | { |
1106 | int idx; | |
1107 | ||
aacb5d91 | 1108 | mutex_lock(&ssp->srcu_gp_mutex); |
da915ad5 | 1109 | |
dad81a20 PM |
1110 | /* |
1111 | * Because readers might be delayed for an extended period after | |
da915ad5 | 1112 | * fetching ->srcu_idx for their index, at any point in time there |
dad81a20 PM |
1113 | * might well be readers using both idx=0 and idx=1. We therefore |
1114 | * need to wait for readers to clear from both index values before | |
1115 | * invoking a callback. | |
1116 | * | |
1117 | * The load-acquire ensures that we see the accesses performed | |
1118 | * by the prior grace period. | |
1119 | */ | |
aacb5d91 | 1120 | idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ |
dad81a20 | 1121 | if (idx == SRCU_STATE_IDLE) { |
aacb5d91 PM |
1122 | spin_lock_irq_rcu_node(ssp); |
1123 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { | |
1124 | WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); | |
1125 | spin_unlock_irq_rcu_node(ssp); | |
1126 | mutex_unlock(&ssp->srcu_gp_mutex); | |
dad81a20 PM |
1127 | return; |
1128 | } | |
aacb5d91 | 1129 | idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); |
dad81a20 | 1130 | if (idx == SRCU_STATE_IDLE) |
aacb5d91 PM |
1131 | srcu_gp_start(ssp); |
1132 | spin_unlock_irq_rcu_node(ssp); | |
da915ad5 | 1133 | if (idx != SRCU_STATE_IDLE) { |
aacb5d91 | 1134 | mutex_unlock(&ssp->srcu_gp_mutex); |
dad81a20 | 1135 | return; /* Someone else started the grace period. */ |
da915ad5 | 1136 | } |
dad81a20 PM |
1137 | } |
1138 | ||
aacb5d91 PM |
1139 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
1140 | idx = 1 ^ (ssp->srcu_idx & 1); | |
1141 | if (!try_check_zero(ssp, idx, 1)) { | |
1142 | mutex_unlock(&ssp->srcu_gp_mutex); | |
dad81a20 | 1143 | return; /* readers present, retry later. */ |
da915ad5 | 1144 | } |
aacb5d91 | 1145 | srcu_flip(ssp); |
71042606 | 1146 | spin_lock_irq_rcu_node(ssp); |
aacb5d91 | 1147 | rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); |
71042606 | 1148 | spin_unlock_irq_rcu_node(ssp); |
dad81a20 PM |
1149 | } |
1150 | ||
aacb5d91 | 1151 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
dad81a20 PM |
1152 | |
1153 | /* | |
1154 | * SRCU read-side critical sections are normally short, | |
1155 | * so check at least twice in quick succession after a flip. | |
1156 | */ | |
aacb5d91 PM |
1157 | idx = 1 ^ (ssp->srcu_idx & 1); |
1158 | if (!try_check_zero(ssp, idx, 2)) { | |
1159 | mutex_unlock(&ssp->srcu_gp_mutex); | |
da915ad5 PM |
1160 | return; /* readers present, retry later. */ |
1161 | } | |
aacb5d91 | 1162 | srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ |
dad81a20 PM |
1163 | } |
1164 | } | |
1165 | ||
1166 | /* | |
1167 | * Invoke a limited number of SRCU callbacks that have passed through | |
1168 | * their grace period. If there are more to do, SRCU will reschedule | |
1169 | * the workqueue. Note that needed memory barriers have been executed | |
1170 | * in this task's context by srcu_readers_active_idx_check(). | |
1171 | */ | |
da915ad5 | 1172 | static void srcu_invoke_callbacks(struct work_struct *work) |
dad81a20 | 1173 | { |
da915ad5 | 1174 | bool more; |
dad81a20 PM |
1175 | struct rcu_cblist ready_cbs; |
1176 | struct rcu_head *rhp; | |
da915ad5 | 1177 | struct srcu_data *sdp; |
aacb5d91 | 1178 | struct srcu_struct *ssp; |
dad81a20 | 1179 | |
e81baf4c SAS |
1180 | sdp = container_of(work, struct srcu_data, work); |
1181 | ||
aacb5d91 | 1182 | ssp = sdp->ssp; |
dad81a20 | 1183 | rcu_cblist_init(&ready_cbs); |
d6331980 | 1184 | spin_lock_irq_rcu_node(sdp); |
da915ad5 | 1185 | rcu_segcblist_advance(&sdp->srcu_cblist, |
aacb5d91 | 1186 | rcu_seq_current(&ssp->srcu_gp_seq)); |
da915ad5 PM |
1187 | if (sdp->srcu_cblist_invoking || |
1188 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { | |
d6331980 | 1189 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1190 | return; /* Someone else on the job or nothing to do. */ |
1191 | } | |
1192 | ||
1193 | /* We are on the job! Extract and invoke ready callbacks. */ | |
1194 | sdp->srcu_cblist_invoking = true; | |
1195 | rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); | |
d6331980 | 1196 | spin_unlock_irq_rcu_node(sdp); |
dad81a20 PM |
1197 | rhp = rcu_cblist_dequeue(&ready_cbs); |
1198 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { | |
a602538e | 1199 | debug_rcu_head_unqueue(rhp); |
dad81a20 PM |
1200 | local_bh_disable(); |
1201 | rhp->func(rhp); | |
1202 | local_bh_enable(); | |
1203 | } | |
da915ad5 PM |
1204 | |
1205 | /* | |
1206 | * Update counts, accelerate new callbacks, and if needed, | |
1207 | * schedule another round of callback invocation. | |
1208 | */ | |
d6331980 | 1209 | spin_lock_irq_rcu_node(sdp); |
da915ad5 PM |
1210 | rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); |
1211 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, | |
aacb5d91 | 1212 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
da915ad5 PM |
1213 | sdp->srcu_cblist_invoking = false; |
1214 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); | |
d6331980 | 1215 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1216 | if (more) |
1217 | srcu_schedule_cbs_sdp(sdp, 0); | |
dad81a20 PM |
1218 | } |
1219 | ||
1220 | /* | |
1221 | * Finished one round of SRCU grace period. Start another if there are | |
1222 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. | |
1223 | */ | |
aacb5d91 | 1224 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) |
dad81a20 | 1225 | { |
da915ad5 | 1226 | bool pushgp = true; |
dad81a20 | 1227 | |
aacb5d91 PM |
1228 | spin_lock_irq_rcu_node(ssp); |
1229 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { | |
1230 | if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { | |
da915ad5 PM |
1231 | /* All requests fulfilled, time to go idle. */ |
1232 | pushgp = false; | |
1233 | } | |
aacb5d91 | 1234 | } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { |
da915ad5 | 1235 | /* Outstanding request and no GP. Start one. */ |
aacb5d91 | 1236 | srcu_gp_start(ssp); |
dad81a20 | 1237 | } |
aacb5d91 | 1238 | spin_unlock_irq_rcu_node(ssp); |
dad81a20 | 1239 | |
da915ad5 | 1240 | if (pushgp) |
aacb5d91 | 1241 | queue_delayed_work(rcu_gp_wq, &ssp->work, delay); |
dad81a20 PM |
1242 | } |
1243 | ||
1244 | /* | |
1245 | * This is the work-queue function that handles SRCU grace periods. | |
1246 | */ | |
0d8a1e83 | 1247 | static void process_srcu(struct work_struct *work) |
dad81a20 | 1248 | { |
aacb5d91 | 1249 | struct srcu_struct *ssp; |
dad81a20 | 1250 | |
aacb5d91 | 1251 | ssp = container_of(work, struct srcu_struct, work.work); |
dad81a20 | 1252 | |
aacb5d91 PM |
1253 | srcu_advance_state(ssp); |
1254 | srcu_reschedule(ssp, srcu_get_delay(ssp)); | |
dad81a20 | 1255 | } |
7f6733c3 PM |
1256 | |
1257 | void srcutorture_get_gp_data(enum rcutorture_type test_type, | |
aacb5d91 | 1258 | struct srcu_struct *ssp, int *flags, |
aebc8264 | 1259 | unsigned long *gp_seq) |
7f6733c3 PM |
1260 | { |
1261 | if (test_type != SRCU_FLAVOR) | |
1262 | return; | |
1263 | *flags = 0; | |
aacb5d91 | 1264 | *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); |
7f6733c3 PM |
1265 | } |
1266 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); | |
1f4f6da1 | 1267 | |
aacb5d91 | 1268 | void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) |
115a1a52 PM |
1269 | { |
1270 | int cpu; | |
1271 | int idx; | |
ac3748c6 | 1272 | unsigned long s0 = 0, s1 = 0; |
115a1a52 | 1273 | |
aacb5d91 | 1274 | idx = ssp->srcu_idx & 0x1; |
52e17ba1 | 1275 | pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", |
aacb5d91 | 1276 | tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); |
115a1a52 PM |
1277 | for_each_possible_cpu(cpu) { |
1278 | unsigned long l0, l1; | |
1279 | unsigned long u0, u1; | |
1280 | long c0, c1; | |
5ab07a8d | 1281 | struct srcu_data *sdp; |
115a1a52 | 1282 | |
aacb5d91 | 1283 | sdp = per_cpu_ptr(ssp->sda, cpu); |
b68c6146 PM |
1284 | u0 = data_race(sdp->srcu_unlock_count[!idx]); |
1285 | u1 = data_race(sdp->srcu_unlock_count[idx]); | |
115a1a52 PM |
1286 | |
1287 | /* | |
1288 | * Make sure that a lock is always counted if the corresponding | |
1289 | * unlock is counted. | |
1290 | */ | |
1291 | smp_rmb(); | |
1292 | ||
b68c6146 PM |
1293 | l0 = data_race(sdp->srcu_lock_count[!idx]); |
1294 | l1 = data_race(sdp->srcu_lock_count[idx]); | |
115a1a52 PM |
1295 | |
1296 | c0 = l0 - u0; | |
1297 | c1 = l1 - u1; | |
7e210a65 PM |
1298 | pr_cont(" %d(%ld,%ld %c)", |
1299 | cpu, c0, c1, | |
1300 | "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); | |
ac3748c6 PM |
1301 | s0 += c0; |
1302 | s1 += c1; | |
115a1a52 | 1303 | } |
ac3748c6 | 1304 | pr_cont(" T(%ld,%ld)\n", s0, s1); |
115a1a52 PM |
1305 | } |
1306 | EXPORT_SYMBOL_GPL(srcu_torture_stats_print); | |
1307 | ||
1f4f6da1 PM |
1308 | static int __init srcu_bootup_announce(void) |
1309 | { | |
1310 | pr_info("Hierarchical SRCU implementation.\n"); | |
0c8e0e3c PM |
1311 | if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) |
1312 | pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); | |
1f4f6da1 PM |
1313 | return 0; |
1314 | } | |
1315 | early_initcall(srcu_bootup_announce); | |
e0fcba9a PM |
1316 | |
1317 | void __init srcu_init(void) | |
1318 | { | |
aacb5d91 | 1319 | struct srcu_struct *ssp; |
e0fcba9a PM |
1320 | |
1321 | srcu_init_done = true; | |
1322 | while (!list_empty(&srcu_boot_list)) { | |
aacb5d91 | 1323 | ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, |
4e6ea4ef | 1324 | work.work.entry); |
aacb5d91 PM |
1325 | check_init_srcu_struct(ssp); |
1326 | list_del_init(&ssp->work.work.entry); | |
1327 | queue_work(rcu_gp_wq, &ssp->work.work); | |
e0fcba9a PM |
1328 | } |
1329 | } | |
fe15b50c PM |
1330 | |
1331 | #ifdef CONFIG_MODULES | |
1332 | ||
1333 | /* Initialize any global-scope srcu_struct structures used by this module. */ | |
1334 | static int srcu_module_coming(struct module *mod) | |
1335 | { | |
1336 | int i; | |
1337 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; | |
1338 | int ret; | |
1339 | ||
1340 | for (i = 0; i < mod->num_srcu_structs; i++) { | |
1341 | ret = init_srcu_struct(*(sspp++)); | |
1342 | if (WARN_ON_ONCE(ret)) | |
1343 | return ret; | |
1344 | } | |
1345 | return 0; | |
1346 | } | |
1347 | ||
1348 | /* Clean up any global-scope srcu_struct structures used by this module. */ | |
1349 | static void srcu_module_going(struct module *mod) | |
1350 | { | |
1351 | int i; | |
1352 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; | |
1353 | ||
1354 | for (i = 0; i < mod->num_srcu_structs; i++) | |
1355 | cleanup_srcu_struct(*(sspp++)); | |
1356 | } | |
1357 | ||
1358 | /* Handle one module, either coming or going. */ | |
1359 | static int srcu_module_notify(struct notifier_block *self, | |
1360 | unsigned long val, void *data) | |
1361 | { | |
1362 | struct module *mod = data; | |
1363 | int ret = 0; | |
1364 | ||
1365 | switch (val) { | |
1366 | case MODULE_STATE_COMING: | |
1367 | ret = srcu_module_coming(mod); | |
1368 | break; | |
1369 | case MODULE_STATE_GOING: | |
1370 | srcu_module_going(mod); | |
1371 | break; | |
1372 | default: | |
1373 | break; | |
1374 | } | |
1375 | return ret; | |
1376 | } | |
1377 | ||
1378 | static struct notifier_block srcu_module_nb = { | |
1379 | .notifier_call = srcu_module_notify, | |
1380 | .priority = 0, | |
1381 | }; | |
1382 | ||
1383 | static __init int init_srcu_module_notifier(void) | |
1384 | { | |
1385 | int ret; | |
1386 | ||
1387 | ret = register_module_notifier(&srcu_module_nb); | |
1388 | if (ret) | |
1389 | pr_warn("Failed to register srcu module notifier\n"); | |
1390 | return ret; | |
1391 | } | |
1392 | late_initcall(init_srcu_module_notifier); | |
1393 | ||
1394 | #endif /* #ifdef CONFIG_MODULES */ |