]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - lib/percpu_counter.c
percpu_counter: add percpu_counter_sum_all interface
[thirdparty/kernel/stable.git] / lib / percpu_counter.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
3cbc5640
RT
2/*
3 * Fast batching percpu counters.
4 */
5
6#include <linux/percpu_counter.h>
c67ad917
AM
7#include <linux/mutex.h>
8#include <linux/init.h>
9#include <linux/cpu.h>
3cbc5640 10#include <linux/module.h>
e2852ae8 11#include <linux/debugobjects.h>
3cbc5640 12
3a8495c7 13#ifdef CONFIG_HOTPLUG_CPU
c67ad917 14static LIST_HEAD(percpu_counters);
d87aae2f 15static DEFINE_SPINLOCK(percpu_counters_lock);
3a8495c7 16#endif
c67ad917 17
e2852ae8
TH
18#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
19
f9e62f31 20static const struct debug_obj_descr percpu_counter_debug_descr;
e2852ae8 21
d99b1d89 22static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
e2852ae8
TH
23{
24 struct percpu_counter *fbc = addr;
25
26 switch (state) {
27 case ODEBUG_STATE_ACTIVE:
28 percpu_counter_destroy(fbc);
29 debug_object_free(fbc, &percpu_counter_debug_descr);
d99b1d89 30 return true;
e2852ae8 31 default:
d99b1d89 32 return false;
e2852ae8
TH
33 }
34}
35
f9e62f31 36static const struct debug_obj_descr percpu_counter_debug_descr = {
e2852ae8
TH
37 .name = "percpu_counter",
38 .fixup_free = percpu_counter_fixup_free,
39};
40
41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
42{
43 debug_object_init(fbc, &percpu_counter_debug_descr);
44 debug_object_activate(fbc, &percpu_counter_debug_descr);
45}
46
47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
48{
49 debug_object_deactivate(fbc, &percpu_counter_debug_descr);
50 debug_object_free(fbc, &percpu_counter_debug_descr);
51}
52
53#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
55{ }
56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
57{ }
58#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
59
3a587f47
PZ
60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
61{
62 int cpu;
098faf58 63 unsigned long flags;
3a587f47 64
098faf58 65 raw_spin_lock_irqsave(&fbc->lock, flags);
3a587f47
PZ
66 for_each_possible_cpu(cpu) {
67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
68 *pcount = 0;
69 }
70 fbc->count = amount;
098faf58 71 raw_spin_unlock_irqrestore(&fbc->lock, flags);
3a587f47
PZ
72}
73EXPORT_SYMBOL(percpu_counter_set);
74
db65a867 75/*
3e8f399d
NB
76 * This function is both preempt and irq safe. The former is due to explicit
77 * preemption disable. The latter is guaranteed by the fact that the slow path
78 * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
79 * this_cpu_add which is irq-safe by definition. Hence there is no need muck
80 * with irq state before calling this one
81 */
104b4e51 82void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
3cbc5640 83{
20e89767 84 s64 count;
3cbc5640 85
ea00c30b 86 preempt_disable();
819a72af 87 count = __this_cpu_read(*fbc->counters) + amount;
1d339638 88 if (abs(count) >= batch) {
098faf58
SL
89 unsigned long flags;
90 raw_spin_lock_irqsave(&fbc->lock, flags);
3cbc5640 91 fbc->count += count;
d1969a84 92 __this_cpu_sub(*fbc->counters, count - amount);
098faf58 93 raw_spin_unlock_irqrestore(&fbc->lock, flags);
3cbc5640 94 } else {
74e72f89 95 this_cpu_add(*fbc->counters, amount);
3cbc5640 96 }
ea00c30b 97 preempt_enable();
3cbc5640 98}
104b4e51 99EXPORT_SYMBOL(percpu_counter_add_batch);
3cbc5640 100
0a4954a8
FT
101/*
102 * For percpu_counter with a big batch, the devication of its count could
103 * be big, and there is requirement to reduce the deviation, like when the
104 * counter's batch could be runtime decreased to get a better accuracy,
105 * which can be achieved by running this sync function on each CPU.
106 */
107void percpu_counter_sync(struct percpu_counter *fbc)
108{
109 unsigned long flags;
110 s64 count;
111
112 raw_spin_lock_irqsave(&fbc->lock, flags);
113 count = __this_cpu_read(*fbc->counters);
114 fbc->count += count;
115 __this_cpu_sub(*fbc->counters, count);
116 raw_spin_unlock_irqrestore(&fbc->lock, flags);
117}
118EXPORT_SYMBOL(percpu_counter_sync);
119
f689054a
SB
120static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
121 const struct cpumask *cpu_mask)
3cbc5640 122{
0216bfcf 123 s64 ret;
3cbc5640 124 int cpu;
098faf58 125 unsigned long flags;
3cbc5640 126
098faf58 127 raw_spin_lock_irqsave(&fbc->lock, flags);
3cbc5640 128 ret = fbc->count;
f689054a 129 for_each_cpu(cpu, cpu_mask) {
0216bfcf 130 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
3cbc5640
RT
131 ret += *pcount;
132 }
098faf58 133 raw_spin_unlock_irqrestore(&fbc->lock, flags);
bf1d89c8 134 return ret;
3cbc5640 135}
f689054a
SB
136
137/*
138 * Add up all the per-cpu counts, return the result. This is a more accurate
139 * but much slower version of percpu_counter_read_positive()
140 */
141s64 __percpu_counter_sum(struct percpu_counter *fbc)
142{
143 return __percpu_counter_sum_mask(fbc, cpu_online_mask);
144}
bf1d89c8 145EXPORT_SYMBOL(__percpu_counter_sum);
c67ad917 146
f689054a
SB
147/*
148 * This is slower version of percpu_counter_sum as it traverses all possible
149 * cpus. Use this only in the cases where accurate data is needed in the
150 * presense of CPUs getting offlined.
151 */
152s64 percpu_counter_sum_all(struct percpu_counter *fbc)
153{
154 return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
155}
156EXPORT_SYMBOL(percpu_counter_sum_all);
157
908c7f19 158int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
ea319518 159 struct lock_class_key *key)
c67ad917 160{
ebd8fef3
TH
161 unsigned long flags __maybe_unused;
162
f032a450 163 raw_spin_lock_init(&fbc->lock);
ea319518 164 lockdep_set_class(&fbc->lock, key);
c67ad917 165 fbc->count = amount;
908c7f19 166 fbc->counters = alloc_percpu_gfp(s32, gfp);
833f4077
PZ
167 if (!fbc->counters)
168 return -ENOMEM;
e2852ae8
TH
169
170 debug_percpu_counter_activate(fbc);
171
c67ad917 172#ifdef CONFIG_HOTPLUG_CPU
8474b591 173 INIT_LIST_HEAD(&fbc->list);
ebd8fef3 174 spin_lock_irqsave(&percpu_counters_lock, flags);
c67ad917 175 list_add(&fbc->list, &percpu_counters);
ebd8fef3 176 spin_unlock_irqrestore(&percpu_counters_lock, flags);
c67ad917 177#endif
833f4077 178 return 0;
c67ad917 179}
ea319518 180EXPORT_SYMBOL(__percpu_counter_init);
c67ad917
AM
181
182void percpu_counter_destroy(struct percpu_counter *fbc)
183{
ebd8fef3
TH
184 unsigned long flags __maybe_unused;
185
833f4077
PZ
186 if (!fbc->counters)
187 return;
188
e2852ae8
TH
189 debug_percpu_counter_deactivate(fbc);
190
c67ad917 191#ifdef CONFIG_HOTPLUG_CPU
ebd8fef3 192 spin_lock_irqsave(&percpu_counters_lock, flags);
c67ad917 193 list_del(&fbc->list);
ebd8fef3 194 spin_unlock_irqrestore(&percpu_counters_lock, flags);
c67ad917 195#endif
fd3d664f
ED
196 free_percpu(fbc->counters);
197 fbc->counters = NULL;
c67ad917
AM
198}
199EXPORT_SYMBOL(percpu_counter_destroy);
200
179f7ebf
ED
201int percpu_counter_batch __read_mostly = 32;
202EXPORT_SYMBOL(percpu_counter_batch);
203
5588f5af 204static int compute_batch_value(unsigned int cpu)
179f7ebf
ED
205{
206 int nr = num_online_cpus();
207
208 percpu_counter_batch = max(32, nr*2);
5588f5af 209 return 0;
179f7ebf
ED
210}
211
5588f5af 212static int percpu_counter_cpu_dead(unsigned int cpu)
c67ad917 213{
179f7ebf 214#ifdef CONFIG_HOTPLUG_CPU
c67ad917
AM
215 struct percpu_counter *fbc;
216
5588f5af 217 compute_batch_value(cpu);
c67ad917 218
ebd8fef3 219 spin_lock_irq(&percpu_counters_lock);
c67ad917
AM
220 list_for_each_entry(fbc, &percpu_counters, list) {
221 s32 *pcount;
222
aaf0f2fa 223 raw_spin_lock(&fbc->lock);
c67ad917
AM
224 pcount = per_cpu_ptr(fbc->counters, cpu);
225 fbc->count += *pcount;
226 *pcount = 0;
aaf0f2fa 227 raw_spin_unlock(&fbc->lock);
c67ad917 228 }
ebd8fef3 229 spin_unlock_irq(&percpu_counters_lock);
179f7ebf 230#endif
5588f5af 231 return 0;
c67ad917
AM
232}
233
27f5e0f6
TC
234/*
235 * Compare counter against given value.
236 * Return 1 if greater, 0 if equal and -1 if less
237 */
80188b0d 238int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
27f5e0f6
TC
239{
240 s64 count;
241
242 count = percpu_counter_read(fbc);
243 /* Check to see if rough count will be sufficient for comparison */
80188b0d 244 if (abs(count - rhs) > (batch * num_online_cpus())) {
27f5e0f6
TC
245 if (count > rhs)
246 return 1;
247 else
248 return -1;
249 }
250 /* Need to use precise count */
251 count = percpu_counter_sum(fbc);
252 if (count > rhs)
253 return 1;
254 else if (count < rhs)
255 return -1;
256 else
257 return 0;
258}
80188b0d 259EXPORT_SYMBOL(__percpu_counter_compare);
27f5e0f6 260
c67ad917
AM
261static int __init percpu_counter_startup(void)
262{
5588f5af
SAS
263 int ret;
264
265 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
266 compute_batch_value, NULL);
267 WARN_ON(ret < 0);
268 ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
269 "lib/percpu_cnt:dead", NULL,
270 percpu_counter_cpu_dead);
271 WARN_ON(ret < 0);
c67ad917
AM
272 return 0;
273}
274module_init(percpu_counter_startup);