1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2011 RedHat, Inc.
10 * Atomics are provided by liburcu.
12 * API and guidelines for which operations provide memory barriers is here:
14 * https://github.com/urcu/userspace-rcu/blob/master/doc/uatomic-api.md
16 * Unlike the kernel, the same interface supports 32 and 64 bit atomic integers.
18 #include <urcu/uatomic.h>
21 typedef int32_t atomic_t
;
22 typedef int64_t atomic64_t
;
24 #define atomic_read(a) uatomic_read(a)
25 #define atomic_set(a, v) uatomic_set(a, v)
26 #define atomic_add(v, a) uatomic_add(a, v)
27 #define atomic_sub(v, a) uatomic_sub(a, v)
28 #define atomic_inc(a) uatomic_inc(a)
29 #define atomic_dec(a) uatomic_dec(a)
30 #define atomic_inc_return(a) uatomic_add_return(a, 1)
31 #define atomic_dec_return(a) uatomic_sub_return(a, 1)
32 #define atomic_dec_and_test(a) (atomic_dec_return(a) == 0)
33 #define cmpxchg(a, o, n) uatomic_cmpxchg(a, o, n);
35 static inline bool atomic_add_unless(atomic_t
*a
, int v
, int u
)
37 int r
= atomic_read(a
);
45 r
= uatomic_cmpxchg(a
, o
, n
);
51 static inline bool atomic_inc_not_zero(atomic_t
*a
)
53 return atomic_add_unless(a
, 1, 0);
56 static inline bool atomic_dec_and_lock(atomic_t
*a
, spinlock_t
*lock
)
58 if (atomic_add_unless(a
, -1, 1))
62 if (atomic_dec_and_test(a
))
68 #ifdef HAVE_LIBURCU_ATOMIC64
70 * On most (64-bit) platforms, liburcu can handle 64-bit atomic counter
71 * updates, so we preferentially use that.
73 #define atomic64_read(a) uatomic_read(a)
74 #define atomic64_set(a, v) uatomic_set(a, v)
75 #define atomic64_add(v, a) uatomic_add(a, v)
76 #define atomic64_sub(v, a) uatomic_sub(a, v)
77 #define atomic64_inc(a) uatomic_inc(a)
78 #define atomic64_dec(a) uatomic_dec(a)
81 * If we don't detect support for that, emulate it with a lock. Currently
82 * there are only three atomic64_t counters in userspace and none of them are
83 * performance critical, so we serialize them all with a single mutex since
84 * the kernel atomic64_t API doesn't have an _init call.
86 extern pthread_mutex_t atomic64_lock
;
89 atomic64_read(atomic64_t
*a
)
93 pthread_mutex_lock(&atomic64_lock
);
95 pthread_mutex_unlock(&atomic64_lock
);
100 atomic64_add(int64_t v
, atomic64_t
*a
)
102 pthread_mutex_lock(&atomic64_lock
);
104 pthread_mutex_unlock(&atomic64_lock
);
108 atomic64_set(atomic64_t
*a
, int64_t v
)
110 pthread_mutex_lock(&atomic64_lock
);
112 pthread_mutex_unlock(&atomic64_lock
);
115 #define atomic64_inc(a) atomic64_add(1, (a))
116 #define atomic64_dec(a) atomic64_add(-1, (a))
117 #define atomic64_sub(v, a) atomic64_add(-(v), (a))
119 #endif /* HAVE_URCU_ATOMIC64 */
121 #define __smp_mb() cmm_smp_mb()
123 /* from compiler_types.h */
125 * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
126 * non-scalar types unchanged.
129 * Prefer C11 _Generic for better compile-times and simpler code. Note 'char'
130 * is not type-compatible with 'signed char', and we define a separate case.
132 #define __scalar_type_to_expr_cases(type) \
133 unsigned type: (unsigned type)0, \
134 signed type: (signed type)0
136 #define __unqual_scalar_typeof(x) typeof( \
139 __scalar_type_to_expr_cases(char), \
140 __scalar_type_to_expr_cases(short), \
141 __scalar_type_to_expr_cases(int), \
142 __scalar_type_to_expr_cases(long), \
143 __scalar_type_to_expr_cases(long long), \
146 /* Is this type a native word size -- useful for atomic operations */
147 #define __native_word(t) \
148 (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
149 sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
151 #define compiletime_assert(a, s) BUILD_BUG_ON(!(a))
153 #define compiletime_assert_atomic_type(t) \
154 compiletime_assert(__native_word(t), \
155 "Need native word sized stores/loads for atomicity.")
159 * Yes, this permits 64-bit accesses on 32-bit architectures. These will
160 * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
161 * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
162 * (e.g. a virtual address) and a strong prevailing wind.
164 #define compiletime_assert_rwonce_type(t) \
165 compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
166 "Unsupported access size for {READ,WRITE}_ONCE().")
169 * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
170 * atomicity. Note that this may result in tears!
173 #define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
176 #define READ_ONCE(x) \
178 compiletime_assert_rwonce_type(x); \
182 #define __WRITE_ONCE(x, val) \
184 *(volatile typeof(x) *)&(x) = (val); \
187 #define WRITE_ONCE(x, val) \
189 compiletime_assert_rwonce_type(x); \
190 __WRITE_ONCE(x, val); \
194 #ifndef __smp_store_release
195 #define __smp_store_release(p, v) \
197 compiletime_assert_atomic_type(*p); \
203 #ifndef __smp_load_acquire
204 #define __smp_load_acquire(p) \
206 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
207 compiletime_assert_atomic_type(*p); \
213 #ifndef smp_store_release
214 #define smp_store_release(p, v) __smp_store_release((p), (v))
217 #ifndef smp_load_acquire
218 #define smp_load_acquire(p) __smp_load_acquire(p)
221 #endif /* __ATOMIC_H__ */