]> git.ipfire.org Git - thirdparty/linux.git/blob - lib/percpu-refcount.c
Merge tag 'fiemap-regression-fix' of git://git.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/linux.git] / lib / percpu-refcount.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "%s: " fmt "\n", __func__
3
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/wait.h>
7 #include <linux/percpu-refcount.h>
8
9 /*
10 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
11 * don't try to detect the ref hitting 0 - which means that get/put can just
12 * increment or decrement the local counter. Note that the counter on a
13 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
14 * percpu counters will all sum to the correct value
15 *
16 * (More precisely: because modular arithmetic is commutative the sum of all the
17 * percpu_count vars will be equal to what it would have been if all the gets
18 * and puts were done to a single integer, even if some of the percpu integers
19 * overflow or underflow).
20 *
21 * The real trick to implementing percpu refcounts is shutdown. We can't detect
22 * the ref hitting 0 on every put - this would require global synchronization
23 * and defeat the whole purpose of using percpu refs.
24 *
25 * What we do is require the user to keep track of the initial refcount; we know
26 * the ref can't hit 0 before the user drops the initial ref, so as long as we
27 * convert to non percpu mode before the initial ref is dropped everything
28 * works.
29 *
30 * Converting to non percpu mode is done with some RCUish stuff in
31 * percpu_ref_kill. Additionally, we need a bias value so that the
32 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
33 */
34
35 #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
36
37 static DEFINE_SPINLOCK(percpu_ref_switch_lock);
38 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
39
40 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
41 {
42 return (unsigned long __percpu *)
43 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
44 }
45
46 /**
47 * percpu_ref_init - initialize a percpu refcount
48 * @ref: percpu_ref to initialize
49 * @release: function which will be called when refcount hits 0
50 * @flags: PERCPU_REF_INIT_* flags
51 * @gfp: allocation mask to use
52 *
53 * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
54 * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
55 * change the start state to atomic with the latter setting the initial refcount
56 * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
57 *
58 * Note that @release must not sleep - it may potentially be called from RCU
59 * callback context by percpu_ref_kill().
60 */
61 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
62 unsigned int flags, gfp_t gfp)
63 {
64 size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
65 __alignof__(unsigned long));
66 unsigned long start_count = 0;
67
68 ref->percpu_count_ptr = (unsigned long)
69 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
70 if (!ref->percpu_count_ptr)
71 return -ENOMEM;
72
73 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
74 ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
75
76 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
77 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
78 ref->allow_reinit = true;
79 } else {
80 start_count += PERCPU_COUNT_BIAS;
81 }
82
83 if (flags & PERCPU_REF_INIT_DEAD)
84 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
85 else
86 start_count++;
87
88 atomic_long_set(&ref->count, start_count);
89
90 ref->release = release;
91 ref->confirm_switch = NULL;
92 return 0;
93 }
94 EXPORT_SYMBOL_GPL(percpu_ref_init);
95
96 /**
97 * percpu_ref_exit - undo percpu_ref_init()
98 * @ref: percpu_ref to exit
99 *
100 * This function exits @ref. The caller is responsible for ensuring that
101 * @ref is no longer in active use. The usual places to invoke this
102 * function from are the @ref->release() callback or in init failure path
103 * where percpu_ref_init() succeeded but other parts of the initialization
104 * of the embedding object failed.
105 */
106 void percpu_ref_exit(struct percpu_ref *ref)
107 {
108 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
109
110 if (percpu_count) {
111 /* non-NULL confirm_switch indicates switching in progress */
112 WARN_ON_ONCE(ref->confirm_switch);
113 free_percpu(percpu_count);
114 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
115 }
116 }
117 EXPORT_SYMBOL_GPL(percpu_ref_exit);
118
119 static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
120 {
121 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
122
123 ref->confirm_switch(ref);
124 ref->confirm_switch = NULL;
125 wake_up_all(&percpu_ref_switch_waitq);
126
127 if (!ref->allow_reinit)
128 percpu_ref_exit(ref);
129
130 /* drop ref from percpu_ref_switch_to_atomic() */
131 percpu_ref_put(ref);
132 }
133
134 static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
135 {
136 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
137 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
138 unsigned long count = 0;
139 int cpu;
140
141 for_each_possible_cpu(cpu)
142 count += *per_cpu_ptr(percpu_count, cpu);
143
144 pr_debug("global %ld percpu %ld",
145 atomic_long_read(&ref->count), (long)count);
146
147 /*
148 * It's crucial that we sum the percpu counters _before_ adding the sum
149 * to &ref->count; since gets could be happening on one cpu while puts
150 * happen on another, adding a single cpu's count could cause
151 * @ref->count to hit 0 before we've got a consistent value - but the
152 * sum of all the counts will be consistent and correct.
153 *
154 * Subtracting the bias value then has to happen _after_ adding count to
155 * &ref->count; we need the bias value to prevent &ref->count from
156 * reaching 0 before we add the percpu counts. But doing it at the same
157 * time is equivalent and saves us atomic operations:
158 */
159 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
160
161 WARN_ONCE(atomic_long_read(&ref->count) <= 0,
162 "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
163 ref->release, atomic_long_read(&ref->count));
164
165 /* @ref is viewed as dead on all CPUs, send out switch confirmation */
166 percpu_ref_call_confirm_rcu(rcu);
167 }
168
169 static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
170 {
171 }
172
173 static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
174 percpu_ref_func_t *confirm_switch)
175 {
176 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
177 if (confirm_switch)
178 confirm_switch(ref);
179 return;
180 }
181
182 /* switching from percpu to atomic */
183 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
184
185 /*
186 * Non-NULL ->confirm_switch is used to indicate that switching is
187 * in progress. Use noop one if unspecified.
188 */
189 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
190
191 percpu_ref_get(ref); /* put after confirmation */
192 call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
193 }
194
195 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
196 {
197 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
198 int cpu;
199
200 BUG_ON(!percpu_count);
201
202 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
203 return;
204
205 if (WARN_ON_ONCE(!ref->allow_reinit))
206 return;
207
208 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
209
210 /*
211 * Restore per-cpu operation. smp_store_release() is paired
212 * with READ_ONCE() in __ref_is_percpu() and guarantees that the
213 * zeroing is visible to all percpu accesses which can see the
214 * following __PERCPU_REF_ATOMIC clearing.
215 */
216 for_each_possible_cpu(cpu)
217 *per_cpu_ptr(percpu_count, cpu) = 0;
218
219 smp_store_release(&ref->percpu_count_ptr,
220 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
221 }
222
223 static void __percpu_ref_switch_mode(struct percpu_ref *ref,
224 percpu_ref_func_t *confirm_switch)
225 {
226 lockdep_assert_held(&percpu_ref_switch_lock);
227
228 /*
229 * If the previous ATOMIC switching hasn't finished yet, wait for
230 * its completion. If the caller ensures that ATOMIC switching
231 * isn't in progress, this function can be called from any context.
232 */
233 wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
234 percpu_ref_switch_lock);
235
236 if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
237 __percpu_ref_switch_to_atomic(ref, confirm_switch);
238 else
239 __percpu_ref_switch_to_percpu(ref);
240 }
241
242 /**
243 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
244 * @ref: percpu_ref to switch to atomic mode
245 * @confirm_switch: optional confirmation callback
246 *
247 * There's no reason to use this function for the usual reference counting.
248 * Use percpu_ref_kill[_and_confirm]().
249 *
250 * Schedule switching of @ref to atomic mode. All its percpu counts will
251 * be collected to the main atomic counter. On completion, when all CPUs
252 * are guaraneed to be in atomic mode, @confirm_switch, which may not
253 * block, is invoked. This function may be invoked concurrently with all
254 * the get/put operations and can safely be mixed with kill and reinit
255 * operations. Note that @ref will stay in atomic mode across kill/reinit
256 * cycles until percpu_ref_switch_to_percpu() is called.
257 *
258 * This function may block if @ref is in the process of switching to atomic
259 * mode. If the caller ensures that @ref is not in the process of
260 * switching to atomic mode, this function can be called from any context.
261 */
262 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
263 percpu_ref_func_t *confirm_switch)
264 {
265 unsigned long flags;
266
267 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
268
269 ref->force_atomic = true;
270 __percpu_ref_switch_mode(ref, confirm_switch);
271
272 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
273 }
274 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
275
276 /**
277 * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
278 * @ref: percpu_ref to switch to atomic mode
279 *
280 * Schedule switching the ref to atomic mode, and wait for the
281 * switch to complete. Caller must ensure that no other thread
282 * will switch back to percpu mode.
283 */
284 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
285 {
286 percpu_ref_switch_to_atomic(ref, NULL);
287 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
288 }
289 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
290
291 /**
292 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
293 * @ref: percpu_ref to switch to percpu mode
294 *
295 * There's no reason to use this function for the usual reference counting.
296 * To re-use an expired ref, use percpu_ref_reinit().
297 *
298 * Switch @ref to percpu mode. This function may be invoked concurrently
299 * with all the get/put operations and can safely be mixed with kill and
300 * reinit operations. This function reverses the sticky atomic state set
301 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
302 * dying or dead, the actual switching takes place on the following
303 * percpu_ref_reinit().
304 *
305 * This function may block if @ref is in the process of switching to atomic
306 * mode. If the caller ensures that @ref is not in the process of
307 * switching to atomic mode, this function can be called from any context.
308 */
309 void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
310 {
311 unsigned long flags;
312
313 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
314
315 ref->force_atomic = false;
316 __percpu_ref_switch_mode(ref, NULL);
317
318 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
319 }
320 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
321
322 /**
323 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
324 * @ref: percpu_ref to kill
325 * @confirm_kill: optional confirmation callback
326 *
327 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
328 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
329 * called after @ref is seen as dead from all CPUs at which point all
330 * further invocations of percpu_ref_tryget_live() will fail. See
331 * percpu_ref_tryget_live() for details.
332 *
333 * This function normally doesn't block and can be called from any context
334 * but it may block if @confirm_kill is specified and @ref is in the
335 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
336 *
337 * There are no implied RCU grace periods between kill and release.
338 */
339 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
340 percpu_ref_func_t *confirm_kill)
341 {
342 unsigned long flags;
343
344 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
345
346 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
347 "%s called more than once on %ps!", __func__, ref->release);
348
349 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
350 __percpu_ref_switch_mode(ref, confirm_kill);
351 percpu_ref_put(ref);
352
353 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
354 }
355 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
356
357 /**
358 * percpu_ref_reinit - re-initialize a percpu refcount
359 * @ref: perpcu_ref to re-initialize
360 *
361 * Re-initialize @ref so that it's in the same state as when it finished
362 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
363 * initialized successfully and reached 0 but not exited.
364 *
365 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
366 * this function is in progress.
367 */
368 void percpu_ref_reinit(struct percpu_ref *ref)
369 {
370 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
371
372 percpu_ref_resurrect(ref);
373 }
374 EXPORT_SYMBOL_GPL(percpu_ref_reinit);
375
376 /**
377 * percpu_ref_resurrect - modify a percpu refcount from dead to live
378 * @ref: perpcu_ref to resurrect
379 *
380 * Modify @ref so that it's in the same state as before percpu_ref_kill() was
381 * called. @ref must be dead but must not yet have exited.
382 *
383 * If @ref->release() frees @ref then the caller is responsible for
384 * guaranteeing that @ref->release() does not get called while this
385 * function is in progress.
386 *
387 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
388 * this function is in progress.
389 */
390 void percpu_ref_resurrect(struct percpu_ref *ref)
391 {
392 unsigned long __percpu *percpu_count;
393 unsigned long flags;
394
395 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
396
397 WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
398 WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
399
400 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
401 percpu_ref_get(ref);
402 __percpu_ref_switch_mode(ref, NULL);
403
404 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
405 }
406 EXPORT_SYMBOL_GPL(percpu_ref_resurrect);