]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - include/atomic.h
xfsprogs: Release v6.10.1
[thirdparty/xfsprogs-dev.git] / include / atomic.h
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2011 RedHat, Inc.
4 * All Rights Reserved.
5 */
6 #ifndef __ATOMIC_H__
7 #define __ATOMIC_H__
8
9 /*
10 * Atomics are provided by liburcu.
11 *
12 * API and guidelines for which operations provide memory barriers is here:
13 *
14 * https://github.com/urcu/userspace-rcu/blob/master/doc/uatomic-api.md
15 *
16 * Unlike the kernel, the same interface supports 32 and 64 bit atomic integers.
17 */
18 #include <urcu/uatomic.h>
19 #include "spinlock.h"
20
21 typedef int32_t atomic_t;
22 typedef int64_t atomic64_t;
23
24 #define atomic_read(a) uatomic_read(a)
25 #define atomic_set(a, v) uatomic_set(a, v)
26 #define atomic_add(v, a) uatomic_add(a, v)
27 #define atomic_sub(v, a) uatomic_sub(a, v)
28 #define atomic_inc(a) uatomic_inc(a)
29 #define atomic_dec(a) uatomic_dec(a)
30 #define atomic_inc_return(a) uatomic_add_return(a, 1)
31 #define atomic_dec_return(a) uatomic_sub_return(a, 1)
32 #define atomic_dec_and_test(a) (atomic_dec_return(a) == 0)
33 #define cmpxchg(a, o, n) uatomic_cmpxchg(a, o, n);
34
35 static inline bool atomic_add_unless(atomic_t *a, int v, int u)
36 {
37 int r = atomic_read(a);
38 int n, o;
39
40 do {
41 o = r;
42 if (o == u)
43 break;
44 n = o + v;
45 r = uatomic_cmpxchg(a, o, n);
46 } while (r != o);
47
48 return o != u;
49 }
50
51 static inline bool atomic_inc_not_zero(atomic_t *a)
52 {
53 return atomic_add_unless(a, 1, 0);
54 }
55
56 static inline bool atomic_dec_and_lock(atomic_t *a, spinlock_t *lock)
57 {
58 if (atomic_add_unless(a, -1, 1))
59 return 0;
60
61 spin_lock(lock);
62 if (atomic_dec_and_test(a))
63 return 1;
64 spin_unlock(lock);
65 return 0;
66 }
67
68 #ifdef HAVE_LIBURCU_ATOMIC64
69 /*
70 * On most (64-bit) platforms, liburcu can handle 64-bit atomic counter
71 * updates, so we preferentially use that.
72 */
73 #define atomic64_read(a) uatomic_read(a)
74 #define atomic64_set(a, v) uatomic_set(a, v)
75 #define atomic64_add(v, a) uatomic_add(a, v)
76 #define atomic64_sub(v, a) uatomic_sub(a, v)
77 #define atomic64_inc(a) uatomic_inc(a)
78 #define atomic64_dec(a) uatomic_dec(a)
79 #else
80 /*
81 * If we don't detect support for that, emulate it with a lock. Currently
82 * there are only three atomic64_t counters in userspace and none of them are
83 * performance critical, so we serialize them all with a single mutex since
84 * the kernel atomic64_t API doesn't have an _init call.
85 */
86 extern pthread_mutex_t atomic64_lock;
87
88 static inline int64_t
89 atomic64_read(atomic64_t *a)
90 {
91 int64_t ret;
92
93 pthread_mutex_lock(&atomic64_lock);
94 ret = *a;
95 pthread_mutex_unlock(&atomic64_lock);
96 return ret;
97 }
98
99 static inline void
100 atomic64_add(int64_t v, atomic64_t *a)
101 {
102 pthread_mutex_lock(&atomic64_lock);
103 (*a) += v;
104 pthread_mutex_unlock(&atomic64_lock);
105 }
106
107 static inline void
108 atomic64_set(atomic64_t *a, int64_t v)
109 {
110 pthread_mutex_lock(&atomic64_lock);
111 (*a) = v;
112 pthread_mutex_unlock(&atomic64_lock);
113 }
114
115 #define atomic64_inc(a) atomic64_add(1, (a))
116 #define atomic64_dec(a) atomic64_add(-1, (a))
117 #define atomic64_sub(v, a) atomic64_add(-(v), (a))
118
119 #endif /* HAVE_URCU_ATOMIC64 */
120
121 #define __smp_mb() cmm_smp_mb()
122
123 /* from compiler_types.h */
124 /*
125 * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
126 * non-scalar types unchanged.
127 */
128 /*
129 * Prefer C11 _Generic for better compile-times and simpler code. Note 'char'
130 * is not type-compatible with 'signed char', and we define a separate case.
131 */
132 #define __scalar_type_to_expr_cases(type) \
133 unsigned type: (unsigned type)0, \
134 signed type: (signed type)0
135
136 #define __unqual_scalar_typeof(x) typeof( \
137 _Generic((x), \
138 char: (char)0, \
139 __scalar_type_to_expr_cases(char), \
140 __scalar_type_to_expr_cases(short), \
141 __scalar_type_to_expr_cases(int), \
142 __scalar_type_to_expr_cases(long), \
143 __scalar_type_to_expr_cases(long long), \
144 default: (x)))
145
146 /* Is this type a native word size -- useful for atomic operations */
147 #define __native_word(t) \
148 (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
149 sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
150
151 #define compiletime_assert(a, s) BUILD_BUG_ON(!(a))
152
153 #define compiletime_assert_atomic_type(t) \
154 compiletime_assert(__native_word(t), \
155 "Need native word sized stores/loads for atomicity.")
156
157 /* from rwonce.h */
158 /*
159 * Yes, this permits 64-bit accesses on 32-bit architectures. These will
160 * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
161 * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
162 * (e.g. a virtual address) and a strong prevailing wind.
163 */
164 #define compiletime_assert_rwonce_type(t) \
165 compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
166 "Unsupported access size for {READ,WRITE}_ONCE().")
167
168 /*
169 * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
170 * atomicity. Note that this may result in tears!
171 */
172 #ifndef __READ_ONCE
173 #define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
174 #endif
175
176 #define READ_ONCE(x) \
177 ({ \
178 compiletime_assert_rwonce_type(x); \
179 __READ_ONCE(x); \
180 })
181
182 #define __WRITE_ONCE(x, val) \
183 do { \
184 *(volatile typeof(x) *)&(x) = (val); \
185 } while (0)
186
187 #define WRITE_ONCE(x, val) \
188 do { \
189 compiletime_assert_rwonce_type(x); \
190 __WRITE_ONCE(x, val); \
191 } while (0)
192
193 /* from barrier.h */
194 #ifndef __smp_store_release
195 #define __smp_store_release(p, v) \
196 do { \
197 compiletime_assert_atomic_type(*p); \
198 __smp_mb(); \
199 WRITE_ONCE(*p, v); \
200 } while (0)
201 #endif
202
203 #ifndef __smp_load_acquire
204 #define __smp_load_acquire(p) \
205 ({ \
206 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
207 compiletime_assert_atomic_type(*p); \
208 __smp_mb(); \
209 (typeof(*p))___p1; \
210 })
211 #endif
212
213 #ifndef smp_store_release
214 #define smp_store_release(p, v) __smp_store_release((p), (v))
215 #endif
216
217 #ifndef smp_load_acquire
218 #define smp_load_acquire(p) __smp_load_acquire(p)
219 #endif
220
221 #endif /* __ATOMIC_H__ */