]> git.ipfire.org Git - thirdparty/glibc.git/blame - include/atomic.h
alloc_buffer: Return unqualified pointer type in alloc_buffer_next
[thirdparty/glibc.git] / include / atomic.h
CommitLineData
d15851ec 1/* Internal macros for atomic operations for GNU C Library.
04277e02 2 Copyright (C) 2002-2019 Free Software Foundation, Inc.
76a50749
UD
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
76a50749
UD
19
20#ifndef _ATOMIC_H
21#define _ATOMIC_H 1
22
11bf311e
UD
23/* This header defines three types of macros:
24
25 - atomic arithmetic and logic operation on memory. They all
26 have the prefix "atomic_".
27
28 - conditionally atomic operations of the same kinds. These
29 always behave identical but can be faster when atomicity
30 is not really needed since only one thread has access to
31 the memory location. In that case the code is slower in
32 the multi-thread case. The interfaces have the prefix
33 "catomic_".
34
eaea92f1 35 - support functions like barriers. They also have the prefix
11bf311e
UD
36 "atomic_".
37
38 Architectures must provide a few lowlevel macros (the compare
39 and exchange definitions). All others are optional. They
40 should only be provided if the architecture has specific
41 support for the operation.
42
43 As <atomic.h> macros are usually heavily nested and often use local
44 variables to make sure side-effects are evaluated properly, use for
45 macro local variables a per-macro unique prefix. This file uses
46 __atgN_ prefix where N is different in each macro. */
47
76a50749
UD
48#include <stdlib.h>
49
de071d19 50#include <atomic-machine.h>
76a50749 51
fe4610b3
RM
52/* Wrapper macros to call pre_NN_post (mem, ...) where NN is the
53 bit width of *MEM. The calling macro puts parens around MEM
54 and following args. */
55#define __atomic_val_bysize(pre, post, mem, ...) \
56 ({ \
12d2dd70 57 __typeof ((__typeof (*(mem))) *(mem)) __atg1_result; \
fe4610b3 58 if (sizeof (*mem) == 1) \
11bf311e 59 __atg1_result = pre##_8_##post (mem, __VA_ARGS__); \
fe4610b3 60 else if (sizeof (*mem) == 2) \
11bf311e 61 __atg1_result = pre##_16_##post (mem, __VA_ARGS__); \
fe4610b3 62 else if (sizeof (*mem) == 4) \
11bf311e 63 __atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
fe4610b3 64 else if (sizeof (*mem) == 8) \
11bf311e 65 __atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
fe4610b3
RM
66 else \
67 abort (); \
11bf311e 68 __atg1_result; \
fe4610b3
RM
69 })
70#define __atomic_bool_bysize(pre, post, mem, ...) \
71 ({ \
11bf311e 72 int __atg2_result; \
fe4610b3 73 if (sizeof (*mem) == 1) \
11bf311e 74 __atg2_result = pre##_8_##post (mem, __VA_ARGS__); \
fe4610b3 75 else if (sizeof (*mem) == 2) \
11bf311e 76 __atg2_result = pre##_16_##post (mem, __VA_ARGS__); \
fe4610b3 77 else if (sizeof (*mem) == 4) \
11bf311e 78 __atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
fe4610b3 79 else if (sizeof (*mem) == 8) \
11bf311e 80 __atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
fe4610b3
RM
81 else \
82 abort (); \
11bf311e 83 __atg2_result; \
fe4610b3
RM
84 })
85
76a50749 86
d15851ec
RM
87/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
88 Return the old *MEM value. */
89#if !defined atomic_compare_and_exchange_val_acq \
90 && defined __arch_compare_and_exchange_val_32_acq
18627f61 91# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
a1b5424f
UD
92 __atomic_val_bysize (__arch_compare_and_exchange_val,acq, \
93 mem, newval, oldval)
76a50749
UD
94#endif
95
96
3b1b533b
JJ
97#ifndef catomic_compare_and_exchange_val_acq
98# ifdef __arch_c_compare_and_exchange_val_32_acq
99# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
11bf311e
UD
100 __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \
101 mem, newval, oldval)
3b1b533b
JJ
102# else
103# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
11bf311e 104 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
3b1b533b 105# endif
11bf311e
UD
106#endif
107
108
bea0ac1d
JJ
109#ifndef catomic_compare_and_exchange_val_rel
110# ifndef atomic_compare_and_exchange_val_rel
111# define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
112 catomic_compare_and_exchange_val_acq (mem, newval, oldval)
113# else
114# define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
115 atomic_compare_and_exchange_val_rel (mem, newval, oldval)
116# endif
18627f61
UD
117#endif
118
119
bea0ac1d
JJ
120#ifndef atomic_compare_and_exchange_val_rel
121# define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
11bf311e
UD
122 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
123#endif
124
125
d15851ec
RM
126/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
127 Return zero if *MEM was changed or non-zero if no exchange happened. */
18627f61
UD
128#ifndef atomic_compare_and_exchange_bool_acq
129# ifdef __arch_compare_and_exchange_bool_32_acq
18627f61 130# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
a1b5424f
UD
131 __atomic_bool_bysize (__arch_compare_and_exchange_bool,acq, \
132 mem, newval, oldval)
3b1b533b
JJ
133# else
134# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
d15851ec
RM
135 ({ /* Cannot use __oldval here, because macros later in this file might \
136 call this macro with __oldval argument. */ \
11bf311e
UD
137 __typeof (oldval) __atg3_old = (oldval); \
138 atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old) \
139 != __atg3_old; \
140 })
141# endif
142#endif
143
144
145#ifndef catomic_compare_and_exchange_bool_acq
146# ifdef __arch_c_compare_and_exchange_bool_32_acq
147# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
148 __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \
149 mem, newval, oldval)
3b1b533b
JJ
150# else
151# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
11bf311e
UD
152 ({ /* Cannot use __oldval here, because macros later in this file might \
153 call this macro with __oldval argument. */ \
154 __typeof (oldval) __atg4_old = (oldval); \
155 catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old) \
156 != __atg4_old; \
8099361e
UD
157 })
158# endif
159#endif
160
161
5132931e 162/* Store NEWVALUE in *MEM and return the old value. */
949ec764
UD
163#ifndef atomic_exchange_acq
164# define atomic_exchange_acq(mem, newvalue) \
12d2dd70 165 ({ __typeof ((__typeof (*(mem))) *(mem)) __atg5_oldval; \
11bf311e 166 __typeof (mem) __atg5_memp = (mem); \
12d2dd70 167 __typeof ((__typeof (*(mem))) *(mem)) __atg5_value = (newvalue); \
5132931e
UD
168 \
169 do \
11bf311e
UD
170 __atg5_oldval = *__atg5_memp; \
171 while (__builtin_expect \
172 (atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \
173 __atg5_oldval), 0)); \
5132931e 174 \
11bf311e 175 __atg5_oldval; })
5132931e
UD
176#endif
177
949ec764
UD
178#ifndef atomic_exchange_rel
179# define atomic_exchange_rel(mem, newvalue) atomic_exchange_acq (mem, newvalue)
180#endif
181
5132931e
UD
182
183/* Add VALUE to *MEM and return the old value of *MEM. */
51a9ba86
MK
184#ifndef atomic_exchange_and_add_acq
185# ifdef atomic_exchange_and_add
186# define atomic_exchange_and_add_acq(mem, value) \
187 atomic_exchange_and_add (mem, value)
188# else
189# define atomic_exchange_and_add_acq(mem, value) \
11bf311e
UD
190 ({ __typeof (*(mem)) __atg6_oldval; \
191 __typeof (mem) __atg6_memp = (mem); \
192 __typeof (*(mem)) __atg6_value = (value); \
76a50749
UD
193 \
194 do \
11bf311e
UD
195 __atg6_oldval = *__atg6_memp; \
196 while (__builtin_expect \
197 (atomic_compare_and_exchange_bool_acq (__atg6_memp, \
198 __atg6_oldval \
199 + __atg6_value, \
200 __atg6_oldval), 0)); \
76a50749 201 \
11bf311e 202 __atg6_oldval; })
51a9ba86 203# endif
76a50749
UD
204#endif
205
51a9ba86
MK
206#ifndef atomic_exchange_and_add_rel
207# define atomic_exchange_and_add_rel(mem, value) \
208 atomic_exchange_and_add_acq(mem, value)
209#endif
210
211#ifndef atomic_exchange_and_add
212# define atomic_exchange_and_add(mem, value) \
213 atomic_exchange_and_add_acq(mem, value)
214#endif
76a50749 215
11bf311e
UD
216#ifndef catomic_exchange_and_add
217# define catomic_exchange_and_add(mem, value) \
218 ({ __typeof (*(mem)) __atg7_oldv; \
219 __typeof (mem) __atg7_memp = (mem); \
220 __typeof (*(mem)) __atg7_value = (value); \
221 \
222 do \
223 __atg7_oldv = *__atg7_memp; \
224 while (__builtin_expect \
225 (catomic_compare_and_exchange_bool_acq (__atg7_memp, \
226 __atg7_oldv \
227 + __atg7_value, \
228 __atg7_oldv), 0)); \
229 \
230 __atg7_oldv; })
231#endif
232
d6c7294e
UD
233
234#ifndef atomic_max
235# define atomic_max(mem, value) \
236 do { \
11bf311e
UD
237 __typeof (*(mem)) __atg8_oldval; \
238 __typeof (mem) __atg8_memp = (mem); \
239 __typeof (*(mem)) __atg8_value = (value); \
240 do { \
241 __atg8_oldval = *__atg8_memp; \
242 if (__atg8_oldval >= __atg8_value) \
243 break; \
244 } while (__builtin_expect \
245 (atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\
246 __atg8_oldval), 0)); \
247 } while (0)
248#endif
249
250
251#ifndef catomic_max
252# define catomic_max(mem, value) \
253 do { \
254 __typeof (*(mem)) __atg9_oldv; \
255 __typeof (mem) __atg9_memp = (mem); \
256 __typeof (*(mem)) __atg9_value = (value); \
8099361e 257 do { \
11bf311e
UD
258 __atg9_oldv = *__atg9_memp; \
259 if (__atg9_oldv >= __atg9_value) \
8099361e 260 break; \
11bf311e
UD
261 } while (__builtin_expect \
262 (catomic_compare_and_exchange_bool_acq (__atg9_memp, \
263 __atg9_value, \
264 __atg9_oldv), 0)); \
8099361e
UD
265 } while (0)
266#endif
267
11bf311e 268
d6c7294e
UD
269#ifndef atomic_min
270# define atomic_min(mem, value) \
271 do { \
11bf311e
UD
272 __typeof (*(mem)) __atg10_oldval; \
273 __typeof (mem) __atg10_memp = (mem); \
274 __typeof (*(mem)) __atg10_value = (value); \
d6c7294e 275 do { \
11bf311e
UD
276 __atg10_oldval = *__atg10_memp; \
277 if (__atg10_oldval <= __atg10_value) \
d6c7294e 278 break; \
11bf311e
UD
279 } while (__builtin_expect \
280 (atomic_compare_and_exchange_bool_acq (__atg10_memp, \
281 __atg10_value, \
282 __atg10_oldval), 0)); \
d6c7294e
UD
283 } while (0)
284#endif
285
11bf311e 286
76a50749 287#ifndef atomic_add
fe4610b3 288# define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
76a50749
UD
289#endif
290
291
11bf311e
UD
292#ifndef catomic_add
293# define catomic_add(mem, value) \
294 (void) catomic_exchange_and_add ((mem), (value))
295#endif
296
297
76a50749 298#ifndef atomic_increment
fe4610b3 299# define atomic_increment(mem) atomic_add ((mem), 1)
76a50749
UD
300#endif
301
302
11bf311e
UD
303#ifndef catomic_increment
304# define catomic_increment(mem) catomic_add ((mem), 1)
305#endif
306
307
f377d022
UD
308#ifndef atomic_increment_val
309# define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
310#endif
311
312
11bf311e
UD
313#ifndef catomic_increment_val
314# define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
315#endif
316
317
fe4610b3 318/* Add one to *MEM and return true iff it's now zero. */
ec609a8e
UD
319#ifndef atomic_increment_and_test
320# define atomic_increment_and_test(mem) \
fe4610b3 321 (atomic_exchange_and_add ((mem), 1) + 1 == 0)
ec609a8e
UD
322#endif
323
324
76a50749 325#ifndef atomic_decrement
fe4610b3 326# define atomic_decrement(mem) atomic_add ((mem), -1)
76a50749
UD
327#endif
328
329
11bf311e
UD
330#ifndef catomic_decrement
331# define catomic_decrement(mem) catomic_add ((mem), -1)
332#endif
333
334
f377d022
UD
335#ifndef atomic_decrement_val
336# define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
337#endif
338
339
11bf311e
UD
340#ifndef catomic_decrement_val
341# define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
342#endif
343
344
d15851ec 345/* Subtract 1 from *MEM and return true iff it's now zero. */
ec609a8e
UD
346#ifndef atomic_decrement_and_test
347# define atomic_decrement_and_test(mem) \
fe4610b3 348 (atomic_exchange_and_add ((mem), -1) == 1)
ec609a8e
UD
349#endif
350
351
5132931e 352/* Decrement *MEM if it is > 0, and return the old value. */
2ff16e04 353#ifndef atomic_decrement_if_positive
972209dd 354# define atomic_decrement_if_positive(mem) \
11bf311e
UD
355 ({ __typeof (*(mem)) __atg11_oldval; \
356 __typeof (mem) __atg11_memp = (mem); \
972209dd 357 \
5132931e
UD
358 do \
359 { \
11bf311e 360 __atg11_oldval = *__atg11_memp; \
a1ffb40e 361 if (__glibc_unlikely (__atg11_oldval <= 0)) \
5132931e 362 break; \
5132931e 363 } \
11bf311e
UD
364 while (__builtin_expect \
365 (atomic_compare_and_exchange_bool_acq (__atg11_memp, \
366 __atg11_oldval - 1, \
367 __atg11_oldval), 0)); \
368 __atg11_oldval; })
5132931e
UD
369#endif
370
371
5cd09cd6 372#ifndef atomic_add_negative
859e708f 373# define atomic_add_negative(mem, value) \
11bf311e
UD
374 ({ __typeof (value) __atg12_value = (value); \
375 atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; })
ec609a8e
UD
376#endif
377
378
379#ifndef atomic_add_zero
859e708f 380# define atomic_add_zero(mem, value) \
11bf311e
UD
381 ({ __typeof (value) __atg13_value = (value); \
382 atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; })
ec609a8e
UD
383#endif
384
385
0289bef9
UD
386#ifndef atomic_bit_set
387# define atomic_bit_set(mem, bit) \
a1b5424f 388 (void) atomic_bit_test_set(mem, bit)
ec609a8e
UD
389#endif
390
391
392#ifndef atomic_bit_test_set
393# define atomic_bit_test_set(mem, bit) \
11bf311e
UD
394 ({ __typeof (*(mem)) __atg14_old; \
395 __typeof (mem) __atg14_memp = (mem); \
396 __typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit)); \
ec609a8e
UD
397 \
398 do \
11bf311e
UD
399 __atg14_old = (*__atg14_memp); \
400 while (__builtin_expect \
401 (atomic_compare_and_exchange_bool_acq (__atg14_memp, \
402 __atg14_old | __atg14_mask,\
403 __atg14_old), 0)); \
76a50749 404 \
11bf311e 405 __atg14_old & __atg14_mask; })
76a50749
UD
406#endif
407
11bf311e 408/* Atomically *mem &= mask. */
f1740bc4
UD
409#ifndef atomic_and
410# define atomic_and(mem, mask) \
11bf311e
UD
411 do { \
412 __typeof (*(mem)) __atg15_old; \
413 __typeof (mem) __atg15_memp = (mem); \
414 __typeof (*(mem)) __atg15_mask = (mask); \
415 \
416 do \
417 __atg15_old = (*__atg15_memp); \
418 while (__builtin_expect \
419 (atomic_compare_and_exchange_bool_acq (__atg15_memp, \
420 __atg15_old & __atg15_mask, \
421 __atg15_old), 0)); \
422 } while (0)
423#endif
424
6c03cd11
UD
425#ifndef catomic_and
426# define catomic_and(mem, mask) \
427 do { \
428 __typeof (*(mem)) __atg20_old; \
429 __typeof (mem) __atg20_memp = (mem); \
430 __typeof (*(mem)) __atg20_mask = (mask); \
431 \
432 do \
433 __atg20_old = (*__atg20_memp); \
434 while (__builtin_expect \
435 (catomic_compare_and_exchange_bool_acq (__atg20_memp, \
436 __atg20_old & __atg20_mask,\
437 __atg20_old), 0)); \
438 } while (0)
439#endif
440
11bf311e
UD
441/* Atomically *mem &= mask and return the old value of *mem. */
442#ifndef atomic_and_val
443# define atomic_and_val(mem, mask) \
444 ({ __typeof (*(mem)) __atg16_old; \
445 __typeof (mem) __atg16_memp = (mem); \
446 __typeof (*(mem)) __atg16_mask = (mask); \
f1740bc4
UD
447 \
448 do \
11bf311e
UD
449 __atg16_old = (*__atg16_memp); \
450 while (__builtin_expect \
451 (atomic_compare_and_exchange_bool_acq (__atg16_memp, \
452 __atg16_old & __atg16_mask,\
453 __atg16_old), 0)); \
f1740bc4 454 \
11bf311e 455 __atg16_old; })
f1740bc4
UD
456#endif
457
458/* Atomically *mem |= mask and return the old value of *mem. */
459#ifndef atomic_or
460# define atomic_or(mem, mask) \
11bf311e
UD
461 do { \
462 __typeof (*(mem)) __atg17_old; \
463 __typeof (mem) __atg17_memp = (mem); \
464 __typeof (*(mem)) __atg17_mask = (mask); \
465 \
466 do \
467 __atg17_old = (*__atg17_memp); \
468 while (__builtin_expect \
469 (atomic_compare_and_exchange_bool_acq (__atg17_memp, \
470 __atg17_old | __atg17_mask, \
471 __atg17_old), 0)); \
472 } while (0)
473#endif
474
475#ifndef catomic_or
476# define catomic_or(mem, mask) \
477 do { \
478 __typeof (*(mem)) __atg18_old; \
479 __typeof (mem) __atg18_memp = (mem); \
480 __typeof (*(mem)) __atg18_mask = (mask); \
481 \
482 do \
483 __atg18_old = (*__atg18_memp); \
484 while (__builtin_expect \
485 (catomic_compare_and_exchange_bool_acq (__atg18_memp, \
486 __atg18_old | __atg18_mask,\
487 __atg18_old), 0)); \
488 } while (0)
489#endif
490
491/* Atomically *mem |= mask and return the old value of *mem. */
492#ifndef atomic_or_val
493# define atomic_or_val(mem, mask) \
494 ({ __typeof (*(mem)) __atg19_old; \
495 __typeof (mem) __atg19_memp = (mem); \
496 __typeof (*(mem)) __atg19_mask = (mask); \
f1740bc4
UD
497 \
498 do \
11bf311e
UD
499 __atg19_old = (*__atg19_memp); \
500 while (__builtin_expect \
501 (atomic_compare_and_exchange_bool_acq (__atg19_memp, \
502 __atg19_old | __atg19_mask,\
503 __atg19_old), 0)); \
f1740bc4 504 \
11bf311e 505 __atg19_old; })
f1740bc4 506#endif
76a50749
UD
507
508#ifndef atomic_full_barrier
733f25e6 509# define atomic_full_barrier() __asm ("" ::: "memory")
76a50749
UD
510#endif
511
512
513#ifndef atomic_read_barrier
a1b5424f 514# define atomic_read_barrier() atomic_full_barrier ()
76a50749
UD
515#endif
516
517
518#ifndef atomic_write_barrier
a1b5424f 519# define atomic_write_barrier() atomic_full_barrier ()
76a50749
UD
520#endif
521
f377d022 522
fc242bef
UD
523#ifndef atomic_forced_read
524# define atomic_forced_read(x) \
525 ({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
526#endif
527
ff871426
TR
528/* This is equal to 1 iff the architecture supports 64b atomic operations. */
529#ifndef __HAVE_64B_ATOMICS
530#error Unable to determine if 64-bit atomics are present.
531#endif
532
533/* The following functions are a subset of the atomic operations provided by
534 C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
535 atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
536
537/* Each arch can request to use compiler built-ins for C11 atomics. If it
538 does, all atomics will be based on these. */
539#if USE_ATOMIC_COMPILER_BUILTINS
540
541/* We require 32b atomic operations; some archs also support 64b atomic
542 operations. */
543void __atomic_link_error (void);
544# if __HAVE_64B_ATOMICS == 1
545# define __atomic_check_size(mem) \
546 if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
547 __atomic_link_error ();
548# else
549# define __atomic_check_size(mem) \
550 if (sizeof (*mem) != 4) \
551 __atomic_link_error ();
552# endif
ca6e601a
TR
553/* We additionally provide 8b and 16b atomic loads and stores; we do not yet
554 need other atomic operations of such sizes, and restricting the support to
555 loads and stores makes this easier for archs that do not have native
556 support for atomic operations to less-than-word-sized data. */
557# if __HAVE_64B_ATOMICS == 1
558# define __atomic_check_size_ls(mem) \
559 if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
560 && (sizeof (*mem) != 8)) \
561 __atomic_link_error ();
562# else
563# define __atomic_check_size_ls(mem) \
564 if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && sizeof (*mem) != 4) \
565 __atomic_link_error ();
566# endif
ff871426
TR
567
568# define atomic_thread_fence_acquire() \
569 __atomic_thread_fence (__ATOMIC_ACQUIRE)
570# define atomic_thread_fence_release() \
571 __atomic_thread_fence (__ATOMIC_RELEASE)
572# define atomic_thread_fence_seq_cst() \
573 __atomic_thread_fence (__ATOMIC_SEQ_CST)
574
575# define atomic_load_relaxed(mem) \
ca6e601a
TR
576 ({ __atomic_check_size_ls((mem)); \
577 __atomic_load_n ((mem), __ATOMIC_RELAXED); })
ff871426 578# define atomic_load_acquire(mem) \
ca6e601a
TR
579 ({ __atomic_check_size_ls((mem)); \
580 __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
ff871426
TR
581
582# define atomic_store_relaxed(mem, val) \
583 do { \
ca6e601a 584 __atomic_check_size_ls((mem)); \
ff871426
TR
585 __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
586 } while (0)
587# define atomic_store_release(mem, val) \
588 do { \
ca6e601a 589 __atomic_check_size_ls((mem)); \
ff871426
TR
590 __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
591 } while (0)
592
593/* On failure, this CAS has memory_order_relaxed semantics. */
594# define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
595 ({ __atomic_check_size((mem)); \
596 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
597 __ATOMIC_RELAXED, __ATOMIC_RELAXED); })
598# define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
599 ({ __atomic_check_size((mem)); \
600 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
601 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); })
602# define atomic_compare_exchange_weak_release(mem, expected, desired) \
603 ({ __atomic_check_size((mem)); \
604 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
605 __ATOMIC_RELEASE, __ATOMIC_RELAXED); })
606
f0e3925b
TR
607# define atomic_exchange_relaxed(mem, desired) \
608 ({ __atomic_check_size((mem)); \
609 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELAXED); })
ff871426
TR
610# define atomic_exchange_acquire(mem, desired) \
611 ({ __atomic_check_size((mem)); \
612 __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); })
613# define atomic_exchange_release(mem, desired) \
614 ({ __atomic_check_size((mem)); \
615 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); })
616
617# define atomic_fetch_add_relaxed(mem, operand) \
618 ({ __atomic_check_size((mem)); \
619 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); })
620# define atomic_fetch_add_acquire(mem, operand) \
621 ({ __atomic_check_size((mem)); \
622 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); })
623# define atomic_fetch_add_release(mem, operand) \
624 ({ __atomic_check_size((mem)); \
625 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
626# define atomic_fetch_add_acq_rel(mem, operand) \
627 ({ __atomic_check_size((mem)); \
628 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); })
629
13cb8f76
TR
630# define atomic_fetch_and_relaxed(mem, operand) \
631 ({ __atomic_check_size((mem)); \
632 __atomic_fetch_and ((mem), (operand), __ATOMIC_RELAXED); })
ff871426
TR
633# define atomic_fetch_and_acquire(mem, operand) \
634 ({ __atomic_check_size((mem)); \
635 __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
13cb8f76
TR
636# define atomic_fetch_and_release(mem, operand) \
637 ({ __atomic_check_size((mem)); \
638 __atomic_fetch_and ((mem), (operand), __ATOMIC_RELEASE); })
ff871426
TR
639
640# define atomic_fetch_or_relaxed(mem, operand) \
641 ({ __atomic_check_size((mem)); \
642 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); })
643# define atomic_fetch_or_acquire(mem, operand) \
644 ({ __atomic_check_size((mem)); \
645 __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
13cb8f76
TR
646# define atomic_fetch_or_release(mem, operand) \
647 ({ __atomic_check_size((mem)); \
648 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELEASE); })
649
650# define atomic_fetch_xor_release(mem, operand) \
651 ({ __atomic_check_size((mem)); \
652 __atomic_fetch_xor ((mem), (operand), __ATOMIC_RELEASE); })
ff871426
TR
653
654#else /* !USE_ATOMIC_COMPILER_BUILTINS */
655
656/* By default, we assume that read, write, and full barriers are equivalent
657 to acquire, release, and seq_cst barriers. Archs for which this does not
658 hold have to provide custom definitions of the fences. */
659# ifndef atomic_thread_fence_acquire
660# define atomic_thread_fence_acquire() atomic_read_barrier ()
661# endif
662# ifndef atomic_thread_fence_release
663# define atomic_thread_fence_release() atomic_write_barrier ()
664# endif
665# ifndef atomic_thread_fence_seq_cst
666# define atomic_thread_fence_seq_cst() atomic_full_barrier ()
667# endif
668
669# ifndef atomic_load_relaxed
670# define atomic_load_relaxed(mem) \
12d2dd70 671 ({ __typeof ((__typeof (*(mem))) *(mem)) __atg100_val; \
ff871426
TR
672 __asm ("" : "=r" (__atg100_val) : "0" (*(mem))); \
673 __atg100_val; })
674# endif
675# ifndef atomic_load_acquire
676# define atomic_load_acquire(mem) \
677 ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \
678 atomic_thread_fence_acquire (); \
679 __atg101_val; })
680# endif
681
682# ifndef atomic_store_relaxed
683/* XXX Use inline asm here? */
684# define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0)
685# endif
686# ifndef atomic_store_release
687# define atomic_store_release(mem, val) \
688 do { \
689 atomic_thread_fence_release (); \
690 atomic_store_relaxed ((mem), (val)); \
691 } while (0)
692# endif
693
694/* On failure, this CAS has memory_order_relaxed semantics. */
695/* XXX This potentially has one branch more than necessary, but archs
696 currently do not define a CAS that returns both the previous value and
697 the success flag. */
698# ifndef atomic_compare_exchange_weak_acquire
699# define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
700 ({ typeof (*(expected)) __atg102_expected = *(expected); \
701 *(expected) = \
702 atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \
703 *(expected) == __atg102_expected; })
704# endif
705# ifndef atomic_compare_exchange_weak_relaxed
706/* XXX Fall back to CAS with acquire MO because archs do not define a weaker
707 CAS. */
708# define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
709 atomic_compare_exchange_weak_acquire ((mem), (expected), (desired))
710# endif
711# ifndef atomic_compare_exchange_weak_release
712# define atomic_compare_exchange_weak_release(mem, expected, desired) \
713 ({ typeof (*(expected)) __atg103_expected = *(expected); \
714 *(expected) = \
715 atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \
716 *(expected) == __atg103_expected; })
717# endif
718
f0e3925b
TR
719/* XXX Fall back to acquire MO because archs do not define a weaker
720 atomic_exchange. */
721# ifndef atomic_exchange_relaxed
722# define atomic_exchange_relaxed(mem, val) \
723 atomic_exchange_acq ((mem), (val))
724# endif
ff871426
TR
725# ifndef atomic_exchange_acquire
726# define atomic_exchange_acquire(mem, val) \
727 atomic_exchange_acq ((mem), (val))
728# endif
729# ifndef atomic_exchange_release
730# define atomic_exchange_release(mem, val) \
731 atomic_exchange_rel ((mem), (val))
732# endif
733
734# ifndef atomic_fetch_add_acquire
735# define atomic_fetch_add_acquire(mem, operand) \
736 atomic_exchange_and_add_acq ((mem), (operand))
737# endif
738# ifndef atomic_fetch_add_relaxed
739/* XXX Fall back to acquire MO because the MO semantics of
740 atomic_exchange_and_add are not documented; the generic version falls back
741 to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined,
742 and vice versa. */
743# define atomic_fetch_add_relaxed(mem, operand) \
744 atomic_fetch_add_acquire ((mem), (operand))
745# endif
746# ifndef atomic_fetch_add_release
747# define atomic_fetch_add_release(mem, operand) \
748 atomic_exchange_and_add_rel ((mem), (operand))
749# endif
750# ifndef atomic_fetch_add_acq_rel
751# define atomic_fetch_add_acq_rel(mem, operand) \
752 ({ atomic_thread_fence_release (); \
753 atomic_exchange_and_add_acq ((mem), (operand)); })
754# endif
755
13cb8f76
TR
756/* XXX Fall back to acquire MO because archs do not define a weaker
757 atomic_and_val. */
758# ifndef atomic_fetch_and_relaxed
759# define atomic_fetch_and_relaxed(mem, operand) \
760 atomic_fetch_and_acquire ((mem), (operand))
761# endif
ff871426
TR
762/* XXX The default for atomic_and_val has acquire semantics, but this is not
763 documented. */
764# ifndef atomic_fetch_and_acquire
765# define atomic_fetch_and_acquire(mem, operand) \
766 atomic_and_val ((mem), (operand))
767# endif
13cb8f76
TR
768# ifndef atomic_fetch_and_release
769/* XXX This unnecessarily has acquire MO. */
770# define atomic_fetch_and_release(mem, operand) \
771 ({ atomic_thread_fence_release (); \
772 atomic_and_val ((mem), (operand)); })
773# endif
ff871426
TR
774
775/* XXX The default for atomic_or_val has acquire semantics, but this is not
776 documented. */
777# ifndef atomic_fetch_or_acquire
778# define atomic_fetch_or_acquire(mem, operand) \
779 atomic_or_val ((mem), (operand))
780# endif
781/* XXX Fall back to acquire MO because archs do not define a weaker
782 atomic_or_val. */
783# ifndef atomic_fetch_or_relaxed
784# define atomic_fetch_or_relaxed(mem, operand) \
785 atomic_fetch_or_acquire ((mem), (operand))
786# endif
13cb8f76
TR
787/* XXX Contains an unnecessary acquire MO because archs do not define a weaker
788 atomic_or_val. */
789# ifndef atomic_fetch_or_release
790# define atomic_fetch_or_release(mem, operand) \
791 ({ atomic_thread_fence_release (); \
792 atomic_fetch_or_acquire ((mem), (operand)); })
793# endif
794
795# ifndef atomic_fetch_xor_release
abff18c0
CD
796/* Failing the atomic_compare_exchange_weak_release reloads the value in
797 __atg104_expected, so we need only do the XOR again and retry. */
13cb8f76 798# define atomic_fetch_xor_release(mem, operand) \
abff18c0
CD
799 ({ __typeof (mem) __atg104_memp = (mem); \
800 __typeof (*(mem)) __atg104_expected = (*__atg104_memp); \
801 __typeof (*(mem)) __atg104_desired; \
13cb8f76
TR
802 __typeof (*(mem)) __atg104_op = (operand); \
803 \
804 do \
abff18c0
CD
805 __atg104_desired = __atg104_expected ^ __atg104_op; \
806 while (__glibc_unlikely \
807 (atomic_compare_exchange_weak_release ( \
808 __atg104_memp, &__atg104_expected, __atg104_desired) \
809 == 0)); \
810 __atg104_expected; })
13cb8f76 811#endif
ff871426
TR
812
813#endif /* !USE_ATOMIC_COMPILER_BUILTINS */
814
4eb984d3
TR
815/* This operation does not affect synchronization semantics but can be used
816 in the body of a spin loop to potentially improve its efficiency. */
817#ifndef atomic_spin_nop
818# define atomic_spin_nop() do { /* nothing */ } while (0)
f377d022
UD
819#endif
820
12d2dd70
SL
821/* ATOMIC_EXCHANGE_USES_CAS is non-zero if atomic_exchange operations
822 are implemented based on a CAS loop; otherwise, this is zero and we assume
823 that the atomic_exchange operations could provide better performance
824 than a CAS loop. */
825#ifndef ATOMIC_EXCHANGE_USES_CAS
826# error ATOMIC_EXCHANGE_USES_CAS has to be defined.
827#endif
828
76a50749 829#endif /* atomic.h */