1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
17 /* percpu_counter batch for local add or sub */
18 #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
22 struct percpu_counter
{
25 #ifdef CONFIG_HOTPLUG_CPU
26 struct list_head list
; /* All percpu_counters are on a list */
28 s32 __percpu
*counters
;
31 extern int percpu_counter_batch
;
33 int __percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
, gfp_t gfp
,
34 struct lock_class_key
*key
);
36 #define percpu_counter_init(fbc, value, gfp) \
38 static struct lock_class_key __key; \
40 __percpu_counter_init(fbc, value, gfp, &__key); \
43 void percpu_counter_destroy(struct percpu_counter
*fbc
);
44 void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
);
45 void percpu_counter_add_batch(struct percpu_counter
*fbc
, s64 amount
,
47 s64
__percpu_counter_sum(struct percpu_counter
*fbc
);
48 int __percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
, s32 batch
);
49 void percpu_counter_sync(struct percpu_counter
*fbc
);
51 static inline int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
)
53 return __percpu_counter_compare(fbc
, rhs
, percpu_counter_batch
);
56 static inline void percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
58 percpu_counter_add_batch(fbc
, amount
, percpu_counter_batch
);
62 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
63 * are accumulated in local per cpu counter and not in fbc->count until
64 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
66 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
67 * used to add up the counts from each CPU to account for all the local
68 * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
69 * should be used when a counter is updated frequently and read rarely.
72 percpu_counter_add_local(struct percpu_counter
*fbc
, s64 amount
)
74 percpu_counter_add_batch(fbc
, amount
, PERCPU_COUNTER_LOCAL_BATCH
);
77 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
79 s64 ret
= __percpu_counter_sum(fbc
);
80 return ret
< 0 ? 0 : ret
;
83 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
85 return __percpu_counter_sum(fbc
);
88 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
94 * It is possible for the percpu_counter_read() to return a small negative
95 * number for some counter which should never be negative.
98 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
100 /* Prevent reloads of fbc->count */
101 s64 ret
= READ_ONCE(fbc
->count
);
108 static inline bool percpu_counter_initialized(struct percpu_counter
*fbc
)
110 return (fbc
->counters
!= NULL
);
113 #else /* !CONFIG_SMP */
115 struct percpu_counter
{
119 static inline int percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
,
126 static inline void percpu_counter_destroy(struct percpu_counter
*fbc
)
130 static inline void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
)
135 static inline int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
)
137 if (fbc
->count
> rhs
)
139 else if (fbc
->count
< rhs
)
146 __percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
, s32 batch
)
148 return percpu_counter_compare(fbc
, rhs
);
152 percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
155 fbc
->count
+= amount
;
159 /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
161 percpu_counter_add_local(struct percpu_counter
*fbc
, s64 amount
)
163 percpu_counter_add(fbc
, amount
);
167 percpu_counter_add_batch(struct percpu_counter
*fbc
, s64 amount
, s32 batch
)
169 percpu_counter_add(fbc
, amount
);
172 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
178 * percpu_counter is intended to track positive numbers. In the UP case the
179 * number should never be negative.
181 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
186 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
188 return percpu_counter_read_positive(fbc
);
191 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
193 return percpu_counter_read(fbc
);
196 static inline bool percpu_counter_initialized(struct percpu_counter
*fbc
)
201 static inline void percpu_counter_sync(struct percpu_counter
*fbc
)
204 #endif /* CONFIG_SMP */
206 static inline void percpu_counter_inc(struct percpu_counter
*fbc
)
208 percpu_counter_add(fbc
, 1);
211 static inline void percpu_counter_dec(struct percpu_counter
*fbc
)
213 percpu_counter_add(fbc
, -1);
216 static inline void percpu_counter_sub(struct percpu_counter
*fbc
, s64 amount
)
218 percpu_counter_add(fbc
, -amount
);
222 percpu_counter_sub_local(struct percpu_counter
*fbc
, s64 amount
)
224 percpu_counter_add_local(fbc
, -amount
);
227 #endif /* _LINUX_PERCPU_COUNTER_H */