]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - include/linux/percpu_counter.h
bde6c4c1f40585933d96608586cb8b1895edc508
[thirdparty/kernel/stable.git] / include / linux / percpu_counter.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
4 /*
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 *
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 */
9
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
16
17 /* percpu_counter batch for local add or sub */
18 #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
19
20 #ifdef CONFIG_SMP
21
22 struct percpu_counter {
23 raw_spinlock_t lock;
24 s64 count;
25 #ifdef CONFIG_HOTPLUG_CPU
26 struct list_head list; /* All percpu_counters are on a list */
27 #endif
28 s32 __percpu *counters;
29 };
30
31 extern int percpu_counter_batch;
32
33 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
34 struct lock_class_key *key);
35
36 #define percpu_counter_init(fbc, value, gfp) \
37 ({ \
38 static struct lock_class_key __key; \
39 \
40 __percpu_counter_init(fbc, value, gfp, &__key); \
41 })
42
43 void percpu_counter_destroy(struct percpu_counter *fbc);
44 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
45 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
46 s32 batch);
47 s64 __percpu_counter_sum(struct percpu_counter *fbc);
48 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
49 void percpu_counter_sync(struct percpu_counter *fbc);
50
51 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
52 {
53 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
54 }
55
56 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
57 {
58 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
59 }
60
61 /*
62 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
63 * are accumulated in local per cpu counter and not in fbc->count until
64 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
65 * write efficient.
66 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
67 * used to add up the counts from each CPU to account for all the local
68 * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
69 * should be used when a counter is updated frequently and read rarely.
70 */
71 static inline void
72 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
73 {
74 percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
75 }
76
77 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
78 {
79 s64 ret = __percpu_counter_sum(fbc);
80 return ret < 0 ? 0 : ret;
81 }
82
83 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
84 {
85 return __percpu_counter_sum(fbc);
86 }
87
88 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
89 {
90 return fbc->count;
91 }
92
93 /*
94 * It is possible for the percpu_counter_read() to return a small negative
95 * number for some counter which should never be negative.
96 *
97 */
98 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
99 {
100 /* Prevent reloads of fbc->count */
101 s64 ret = READ_ONCE(fbc->count);
102
103 if (ret >= 0)
104 return ret;
105 return 0;
106 }
107
108 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
109 {
110 return (fbc->counters != NULL);
111 }
112
113 #else /* !CONFIG_SMP */
114
115 struct percpu_counter {
116 s64 count;
117 };
118
119 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
120 gfp_t gfp)
121 {
122 fbc->count = amount;
123 return 0;
124 }
125
126 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
127 {
128 }
129
130 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
131 {
132 fbc->count = amount;
133 }
134
135 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
136 {
137 if (fbc->count > rhs)
138 return 1;
139 else if (fbc->count < rhs)
140 return -1;
141 else
142 return 0;
143 }
144
145 static inline int
146 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
147 {
148 return percpu_counter_compare(fbc, rhs);
149 }
150
151 static inline void
152 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
153 {
154 preempt_disable();
155 fbc->count += amount;
156 preempt_enable();
157 }
158
159 /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
160 static inline void
161 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
162 {
163 percpu_counter_add(fbc, amount);
164 }
165
166 static inline void
167 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
168 {
169 percpu_counter_add(fbc, amount);
170 }
171
172 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
173 {
174 return fbc->count;
175 }
176
177 /*
178 * percpu_counter is intended to track positive numbers. In the UP case the
179 * number should never be negative.
180 */
181 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
182 {
183 return fbc->count;
184 }
185
186 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
187 {
188 return percpu_counter_read_positive(fbc);
189 }
190
191 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
192 {
193 return percpu_counter_read(fbc);
194 }
195
196 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
197 {
198 return true;
199 }
200
201 static inline void percpu_counter_sync(struct percpu_counter *fbc)
202 {
203 }
204 #endif /* CONFIG_SMP */
205
206 static inline void percpu_counter_inc(struct percpu_counter *fbc)
207 {
208 percpu_counter_add(fbc, 1);
209 }
210
211 static inline void percpu_counter_dec(struct percpu_counter *fbc)
212 {
213 percpu_counter_add(fbc, -1);
214 }
215
216 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
217 {
218 percpu_counter_add(fbc, -amount);
219 }
220
221 static inline void
222 percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
223 {
224 percpu_counter_add_local(fbc, -amount);
225 }
226
227 #endif /* _LINUX_PERCPU_COUNTER_H */