]>
Commit | Line | Data |
---|---|---|
215e262f KO |
1 | #define pr_fmt(fmt) "%s: " fmt "\n", __func__ |
2 | ||
3 | #include <linux/kernel.h> | |
490c79a6 TH |
4 | #include <linux/sched.h> |
5 | #include <linux/wait.h> | |
215e262f KO |
6 | #include <linux/percpu-refcount.h> |
7 | ||
8 | /* | |
9 | * Initially, a percpu refcount is just a set of percpu counters. Initially, we | |
10 | * don't try to detect the ref hitting 0 - which means that get/put can just | |
11 | * increment or decrement the local counter. Note that the counter on a | |
12 | * particular cpu can (and will) wrap - this is fine, when we go to shutdown the | |
13 | * percpu counters will all sum to the correct value | |
14 | * | |
15 | * (More precisely: because moduler arithmatic is commutative the sum of all the | |
eecc16ba TH |
16 | * percpu_count vars will be equal to what it would have been if all the gets |
17 | * and puts were done to a single integer, even if some of the percpu integers | |
215e262f KO |
18 | * overflow or underflow). |
19 | * | |
20 | * The real trick to implementing percpu refcounts is shutdown. We can't detect | |
21 | * the ref hitting 0 on every put - this would require global synchronization | |
22 | * and defeat the whole purpose of using percpu refs. | |
23 | * | |
24 | * What we do is require the user to keep track of the initial refcount; we know | |
25 | * the ref can't hit 0 before the user drops the initial ref, so as long as we | |
26 | * convert to non percpu mode before the initial ref is dropped everything | |
27 | * works. | |
28 | * | |
29 | * Converting to non percpu mode is done with some RCUish stuff in | |
e625305b TH |
30 | * percpu_ref_kill. Additionally, we need a bias value so that the |
31 | * atomic_long_t can't hit 0 before we've added up all the percpu refs. | |
215e262f KO |
32 | */ |
33 | ||
eecc16ba | 34 | #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) |
215e262f | 35 | |
490c79a6 TH |
36 | static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq); |
37 | ||
eecc16ba | 38 | static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) |
eae7975d | 39 | { |
eecc16ba | 40 | return (unsigned long __percpu *) |
27344a90 | 41 | (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); |
eae7975d TH |
42 | } |
43 | ||
215e262f KO |
44 | /** |
45 | * percpu_ref_init - initialize a percpu refcount | |
ac899061 TH |
46 | * @ref: percpu_ref to initialize |
47 | * @release: function which will be called when refcount hits 0 | |
a34375ef | 48 | * @gfp: allocation mask to use |
215e262f KO |
49 | * |
50 | * Initializes the refcount in single atomic counter mode with a refcount of 1; | |
e625305b | 51 | * analagous to atomic_long_set(ref, 1). |
215e262f KO |
52 | * |
53 | * Note that @release must not sleep - it may potentially be called from RCU | |
54 | * callback context by percpu_ref_kill(). | |
55 | */ | |
a34375ef TH |
56 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, |
57 | gfp_t gfp) | |
215e262f | 58 | { |
27344a90 TH |
59 | size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, |
60 | __alignof__(unsigned long)); | |
61 | ||
eecc16ba | 62 | atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS); |
215e262f | 63 | |
27344a90 TH |
64 | ref->percpu_count_ptr = (unsigned long) |
65 | __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); | |
eecc16ba | 66 | if (!ref->percpu_count_ptr) |
215e262f KO |
67 | return -ENOMEM; |
68 | ||
69 | ref->release = release; | |
70 | return 0; | |
71 | } | |
5e9dd373 | 72 | EXPORT_SYMBOL_GPL(percpu_ref_init); |
215e262f | 73 | |
bc497bd3 | 74 | /** |
9a1049da TH |
75 | * percpu_ref_exit - undo percpu_ref_init() |
76 | * @ref: percpu_ref to exit | |
bc497bd3 | 77 | * |
9a1049da TH |
78 | * This function exits @ref. The caller is responsible for ensuring that |
79 | * @ref is no longer in active use. The usual places to invoke this | |
80 | * function from are the @ref->release() callback or in init failure path | |
81 | * where percpu_ref_init() succeeded but other parts of the initialization | |
82 | * of the embedding object failed. | |
bc497bd3 | 83 | */ |
9a1049da | 84 | void percpu_ref_exit(struct percpu_ref *ref) |
bc497bd3 | 85 | { |
eecc16ba | 86 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
bc497bd3 | 87 | |
eecc16ba TH |
88 | if (percpu_count) { |
89 | free_percpu(percpu_count); | |
27344a90 | 90 | ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; |
bc497bd3 TH |
91 | } |
92 | } | |
9a1049da | 93 | EXPORT_SYMBOL_GPL(percpu_ref_exit); |
bc497bd3 | 94 | |
490c79a6 TH |
95 | static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu) |
96 | { | |
97 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | |
98 | ||
99 | ref->confirm_switch(ref); | |
100 | ref->confirm_switch = NULL; | |
101 | wake_up_all(&percpu_ref_switch_waitq); | |
102 | ||
103 | /* drop ref from percpu_ref_switch_to_atomic() */ | |
104 | percpu_ref_put(ref); | |
105 | } | |
106 | ||
107 | static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) | |
215e262f KO |
108 | { |
109 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | |
eecc16ba | 110 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
e625305b | 111 | unsigned long count = 0; |
215e262f KO |
112 | int cpu; |
113 | ||
215e262f | 114 | for_each_possible_cpu(cpu) |
eecc16ba | 115 | count += *per_cpu_ptr(percpu_count, cpu); |
215e262f | 116 | |
eecc16ba | 117 | pr_debug("global %ld percpu %ld", |
e625305b | 118 | atomic_long_read(&ref->count), (long)count); |
215e262f KO |
119 | |
120 | /* | |
121 | * It's crucial that we sum the percpu counters _before_ adding the sum | |
122 | * to &ref->count; since gets could be happening on one cpu while puts | |
123 | * happen on another, adding a single cpu's count could cause | |
124 | * @ref->count to hit 0 before we've got a consistent value - but the | |
125 | * sum of all the counts will be consistent and correct. | |
126 | * | |
127 | * Subtracting the bias value then has to happen _after_ adding count to | |
128 | * &ref->count; we need the bias value to prevent &ref->count from | |
129 | * reaching 0 before we add the percpu counts. But doing it at the same | |
130 | * time is equivalent and saves us atomic operations: | |
131 | */ | |
eecc16ba | 132 | atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); |
215e262f | 133 | |
e625305b | 134 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, |
490c79a6 | 135 | "percpu ref (%pf) <= 0 (%ld) after switching to atomic", |
e625305b | 136 | ref->release, atomic_long_read(&ref->count)); |
687b0ad2 | 137 | |
490c79a6 TH |
138 | /* @ref is viewed as dead on all CPUs, send out switch confirmation */ |
139 | percpu_ref_call_confirm_rcu(rcu); | |
140 | } | |
dbece3a0 | 141 | |
490c79a6 TH |
142 | static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) |
143 | { | |
144 | } | |
145 | ||
146 | static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, | |
147 | percpu_ref_func_t *confirm_switch) | |
148 | { | |
149 | if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) { | |
150 | /* switching from percpu to atomic */ | |
151 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; | |
152 | ||
153 | /* | |
154 | * Non-NULL ->confirm_switch is used to indicate that | |
155 | * switching is in progress. Use noop one if unspecified. | |
156 | */ | |
157 | WARN_ON_ONCE(ref->confirm_switch); | |
158 | ref->confirm_switch = | |
159 | confirm_switch ?: percpu_ref_noop_confirm_switch; | |
160 | ||
161 | percpu_ref_get(ref); /* put after confirmation */ | |
162 | call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); | |
163 | } else if (confirm_switch) { | |
164 | /* | |
165 | * Somebody already set ATOMIC. Switching may still be in | |
166 | * progress. @confirm_switch must be invoked after the | |
167 | * switching is complete and a full sched RCU grace period | |
168 | * has passed. Wait synchronously for the previous | |
169 | * switching and schedule @confirm_switch invocation. | |
170 | */ | |
171 | wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); | |
172 | ref->confirm_switch = confirm_switch; | |
173 | ||
174 | percpu_ref_get(ref); /* put after confirmation */ | |
175 | call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu); | |
176 | } | |
215e262f KO |
177 | } |
178 | ||
179 | /** | |
490c79a6 TH |
180 | * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode |
181 | * @ref: percpu_ref to switch to atomic mode | |
182 | * @confirm_switch: optional confirmation callback | |
215e262f | 183 | * |
490c79a6 TH |
184 | * There's no reason to use this function for the usual reference counting. |
185 | * Use percpu_ref_kill[_and_confirm](). | |
186 | * | |
187 | * Schedule switching of @ref to atomic mode. All its percpu counts will | |
188 | * be collected to the main atomic counter. On completion, when all CPUs | |
189 | * are guaraneed to be in atomic mode, @confirm_switch, which may not | |
190 | * block, is invoked. This function may be invoked concurrently with all | |
191 | * the get/put operations and can safely be mixed with kill and reinit | |
192 | * operations. | |
215e262f | 193 | * |
490c79a6 TH |
194 | * This function normally doesn't block and can be called from any context |
195 | * but it may block if @confirm_kill is specified and @ref is already in | |
196 | * the process of switching to atomic mode. In such cases, @confirm_switch | |
197 | * will be invoked after the switching is complete. | |
198 | * | |
199 | * Due to the way percpu_ref is implemented, @confirm_switch will be called | |
200 | * after at least one full sched RCU grace period has passed but this is an | |
201 | * implementation detail and must not be depended upon. | |
215e262f | 202 | */ |
490c79a6 TH |
203 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, |
204 | percpu_ref_func_t *confirm_switch) | |
215e262f | 205 | { |
490c79a6 | 206 | __percpu_ref_switch_to_atomic(ref, confirm_switch); |
215e262f | 207 | } |
a2237370 TH |
208 | |
209 | /** | |
210 | * percpu_ref_reinit - re-initialize a percpu refcount | |
211 | * @ref: perpcu_ref to re-initialize | |
212 | * | |
213 | * Re-initialize @ref so that it's in the same state as when it finished | |
214 | * percpu_ref_init(). @ref must have been initialized successfully, killed | |
215 | * and reached 0 but not exited. | |
216 | * | |
217 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while | |
218 | * this function is in progress. | |
219 | */ | |
220 | void percpu_ref_reinit(struct percpu_ref *ref) | |
221 | { | |
eecc16ba | 222 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
a2237370 TH |
223 | int cpu; |
224 | ||
eecc16ba | 225 | BUG_ON(!percpu_count); |
6251f997 | 226 | WARN_ON_ONCE(!percpu_ref_is_zero(ref)); |
a2237370 | 227 | |
eecc16ba | 228 | atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS); |
a2237370 TH |
229 | |
230 | /* | |
231 | * Restore per-cpu operation. smp_store_release() is paired with | |
9e804d1f TH |
232 | * smp_read_barrier_depends() in __ref_is_percpu() and guarantees |
233 | * that the zeroing is visible to all percpu accesses which can see | |
27344a90 | 234 | * the following __PERCPU_REF_ATOMIC_DEAD clearing. |
a2237370 TH |
235 | */ |
236 | for_each_possible_cpu(cpu) | |
eecc16ba | 237 | *per_cpu_ptr(percpu_count, cpu) = 0; |
a2237370 | 238 | |
eecc16ba | 239 | smp_store_release(&ref->percpu_count_ptr, |
27344a90 | 240 | ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); |
a2237370 TH |
241 | } |
242 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); | |
490c79a6 TH |
243 | |
244 | /** | |
245 | * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation | |
246 | * @ref: percpu_ref to kill | |
247 | * @confirm_kill: optional confirmation callback | |
248 | * | |
249 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if | |
250 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be | |
251 | * called after @ref is seen as dead from all CPUs at which point all | |
252 | * further invocations of percpu_ref_tryget_live() will fail. See | |
253 | * percpu_ref_tryget_live() for details. | |
254 | * | |
255 | * This function normally doesn't block and can be called from any context | |
256 | * but it may block if @confirm_kill is specified and @ref is already in | |
257 | * the process of switching to atomic mode by percpu_ref_switch_atomic(). | |
258 | * | |
259 | * Due to the way percpu_ref is implemented, @confirm_switch will be called | |
260 | * after at least one full sched RCU grace period has passed but this is an | |
261 | * implementation detail and must not be depended upon. | |
262 | */ | |
263 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | |
264 | percpu_ref_func_t *confirm_kill) | |
265 | { | |
266 | WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, | |
267 | "%s called more than once on %pf!", __func__, ref->release); | |
268 | ||
269 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; | |
270 | __percpu_ref_switch_to_atomic(ref, confirm_kill); | |
271 | percpu_ref_put(ref); | |
272 | } | |
273 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); |