]>
git.ipfire.org Git - thirdparty/glibc.git/blob - include/atomic.h
1 /* Internal macros for atomic operations for GNU C Library.
2 Copyright (C) 2002-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <https://www.gnu.org/licenses/>. */
23 /* This header defines three types of macros:
25 - atomic arithmetic and logic operation on memory. They all
26 have the prefix "atomic_".
28 - conditionally atomic operations of the same kinds. These
29 always behave identical but can be faster when atomicity
30 is not really needed since only one thread has access to
31 the memory location. In that case the code is slower in
32 the multi-thread case. The interfaces have the prefix
35 - support functions like barriers. They also have the prefix
38 Architectures must provide a few lowlevel macros (the compare
39 and exchange definitions). All others are optional. They
40 should only be provided if the architecture has specific
41 support for the operation.
43 As <atomic.h> macros are usually heavily nested and often use local
44 variables to make sure side-effects are evaluated properly, use for
45 macro local variables a per-macro unique prefix. This file uses
46 __atgN_ prefix where N is different in each macro. */
50 #include <atomic-machine.h>
52 /* Wrapper macros to call pre_NN_post (mem, ...) where NN is the
53 bit width of *MEM. The calling macro puts parens around MEM
54 and following args. */
55 #define __atomic_val_bysize(pre, post, mem, ...) \
57 __typeof ((__typeof (*(mem))) *(mem)) __atg1_result; \
58 if (sizeof (*mem) == 1) \
59 __atg1_result = pre##_8_##post (mem, __VA_ARGS__); \
60 else if (sizeof (*mem) == 2) \
61 __atg1_result = pre##_16_##post (mem, __VA_ARGS__); \
62 else if (sizeof (*mem) == 4) \
63 __atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
64 else if (sizeof (*mem) == 8) \
65 __atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
70 #define __atomic_bool_bysize(pre, post, mem, ...) \
73 if (sizeof (*mem) == 1) \
74 __atg2_result = pre##_8_##post (mem, __VA_ARGS__); \
75 else if (sizeof (*mem) == 2) \
76 __atg2_result = pre##_16_##post (mem, __VA_ARGS__); \
77 else if (sizeof (*mem) == 4) \
78 __atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
79 else if (sizeof (*mem) == 8) \
80 __atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
87 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
88 Return the old *MEM value. */
89 #if !defined atomic_compare_and_exchange_val_acq \
90 && defined __arch_compare_and_exchange_val_32_acq
91 # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
92 __atomic_val_bysize (__arch_compare_and_exchange_val,acq, \
97 #ifndef catomic_compare_and_exchange_val_acq
98 # ifdef __arch_c_compare_and_exchange_val_32_acq
99 # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
100 __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \
103 # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
104 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
109 #ifndef catomic_compare_and_exchange_val_rel
110 # ifndef atomic_compare_and_exchange_val_rel
111 # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
112 catomic_compare_and_exchange_val_acq (mem, newval, oldval)
114 # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
115 atomic_compare_and_exchange_val_rel (mem, newval, oldval)
120 #ifndef atomic_compare_and_exchange_val_rel
121 # define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
122 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
126 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
127 Return zero if *MEM was changed or non-zero if no exchange happened. */
128 #ifndef atomic_compare_and_exchange_bool_acq
129 # ifdef __arch_compare_and_exchange_bool_32_acq
130 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
131 __atomic_bool_bysize (__arch_compare_and_exchange_bool,acq, \
134 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
135 ({ /* Cannot use __oldval here, because macros later in this file might \
136 call this macro with __oldval argument. */ \
137 __typeof (oldval) __atg3_old = (oldval); \
138 atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old) \
145 #ifndef catomic_compare_and_exchange_bool_acq
146 # ifdef __arch_c_compare_and_exchange_bool_32_acq
147 # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
148 __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \
151 # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
152 ({ /* Cannot use __oldval here, because macros later in this file might \
153 call this macro with __oldval argument. */ \
154 __typeof (oldval) __atg4_old = (oldval); \
155 catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old) \
162 /* Store NEWVALUE in *MEM and return the old value. */
163 #ifndef atomic_exchange_acq
164 # define atomic_exchange_acq(mem, newvalue) \
165 ({ __typeof ((__typeof (*(mem))) *(mem)) __atg5_oldval; \
166 __typeof (mem) __atg5_memp = (mem); \
167 __typeof ((__typeof (*(mem))) *(mem)) __atg5_value = (newvalue); \
170 __atg5_oldval = *__atg5_memp; \
171 while (__builtin_expect \
172 (atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \
173 __atg5_oldval), 0)); \
178 #ifndef atomic_exchange_rel
179 # define atomic_exchange_rel(mem, newvalue) atomic_exchange_acq (mem, newvalue)
183 /* Add VALUE to *MEM and return the old value of *MEM. */
184 #ifndef atomic_exchange_and_add_acq
185 # ifdef atomic_exchange_and_add
186 # define atomic_exchange_and_add_acq(mem, value) \
187 atomic_exchange_and_add (mem, value)
189 # define atomic_exchange_and_add_acq(mem, value) \
190 ({ __typeof (*(mem)) __atg6_oldval; \
191 __typeof (mem) __atg6_memp = (mem); \
192 __typeof (*(mem)) __atg6_value = (value); \
195 __atg6_oldval = *__atg6_memp; \
196 while (__builtin_expect \
197 (atomic_compare_and_exchange_bool_acq (__atg6_memp, \
200 __atg6_oldval), 0)); \
206 #ifndef atomic_exchange_and_add_rel
207 # define atomic_exchange_and_add_rel(mem, value) \
208 atomic_exchange_and_add_acq(mem, value)
211 #ifndef atomic_exchange_and_add
212 # define atomic_exchange_and_add(mem, value) \
213 atomic_exchange_and_add_acq(mem, value)
216 #ifndef catomic_exchange_and_add
217 # define catomic_exchange_and_add(mem, value) \
218 ({ __typeof (*(mem)) __atg7_oldv; \
219 __typeof (mem) __atg7_memp = (mem); \
220 __typeof (*(mem)) __atg7_value = (value); \
223 __atg7_oldv = *__atg7_memp; \
224 while (__builtin_expect \
225 (catomic_compare_and_exchange_bool_acq (__atg7_memp, \
235 # define atomic_max(mem, value) \
237 __typeof (*(mem)) __atg8_oldval; \
238 __typeof (mem) __atg8_memp = (mem); \
239 __typeof (*(mem)) __atg8_value = (value); \
241 __atg8_oldval = *__atg8_memp; \
242 if (__atg8_oldval >= __atg8_value) \
244 } while (__builtin_expect \
245 (atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\
246 __atg8_oldval), 0)); \
252 # define catomic_max(mem, value) \
254 __typeof (*(mem)) __atg9_oldv; \
255 __typeof (mem) __atg9_memp = (mem); \
256 __typeof (*(mem)) __atg9_value = (value); \
258 __atg9_oldv = *__atg9_memp; \
259 if (__atg9_oldv >= __atg9_value) \
261 } while (__builtin_expect \
262 (catomic_compare_and_exchange_bool_acq (__atg9_memp, \
270 # define atomic_min(mem, value) \
272 __typeof (*(mem)) __atg10_oldval; \
273 __typeof (mem) __atg10_memp = (mem); \
274 __typeof (*(mem)) __atg10_value = (value); \
276 __atg10_oldval = *__atg10_memp; \
277 if (__atg10_oldval <= __atg10_value) \
279 } while (__builtin_expect \
280 (atomic_compare_and_exchange_bool_acq (__atg10_memp, \
282 __atg10_oldval), 0)); \
288 # define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
293 # define catomic_add(mem, value) \
294 (void) catomic_exchange_and_add ((mem), (value))
298 #ifndef atomic_increment
299 # define atomic_increment(mem) atomic_add ((mem), 1)
303 #ifndef catomic_increment
304 # define catomic_increment(mem) catomic_add ((mem), 1)
308 #ifndef atomic_increment_val
309 # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
313 #ifndef catomic_increment_val
314 # define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
318 /* Add one to *MEM and return true iff it's now zero. */
319 #ifndef atomic_increment_and_test
320 # define atomic_increment_and_test(mem) \
321 (atomic_exchange_and_add ((mem), 1) + 1 == 0)
325 #ifndef atomic_decrement
326 # define atomic_decrement(mem) atomic_add ((mem), -1)
330 #ifndef catomic_decrement
331 # define catomic_decrement(mem) catomic_add ((mem), -1)
335 #ifndef atomic_decrement_val
336 # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
340 #ifndef catomic_decrement_val
341 # define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
345 /* Subtract 1 from *MEM and return true iff it's now zero. */
346 #ifndef atomic_decrement_and_test
347 # define atomic_decrement_and_test(mem) \
348 (atomic_exchange_and_add ((mem), -1) == 1)
352 /* Decrement *MEM if it is > 0, and return the old value. */
353 #ifndef atomic_decrement_if_positive
354 # define atomic_decrement_if_positive(mem) \
355 ({ __typeof (*(mem)) __atg11_oldval; \
356 __typeof (mem) __atg11_memp = (mem); \
360 __atg11_oldval = *__atg11_memp; \
361 if (__glibc_unlikely (__atg11_oldval <= 0)) \
364 while (__builtin_expect \
365 (atomic_compare_and_exchange_bool_acq (__atg11_memp, \
366 __atg11_oldval - 1, \
367 __atg11_oldval), 0)); \
372 #ifndef atomic_add_negative
373 # define atomic_add_negative(mem, value) \
374 ({ __typeof (value) __atg12_value = (value); \
375 atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; })
379 #ifndef atomic_add_zero
380 # define atomic_add_zero(mem, value) \
381 ({ __typeof (value) __atg13_value = (value); \
382 atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; })
386 #ifndef atomic_bit_set
387 # define atomic_bit_set(mem, bit) \
388 (void) atomic_bit_test_set(mem, bit)
392 #ifndef atomic_bit_test_set
393 # define atomic_bit_test_set(mem, bit) \
394 ({ __typeof (*(mem)) __atg14_old; \
395 __typeof (mem) __atg14_memp = (mem); \
396 __typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit)); \
399 __atg14_old = (*__atg14_memp); \
400 while (__builtin_expect \
401 (atomic_compare_and_exchange_bool_acq (__atg14_memp, \
402 __atg14_old | __atg14_mask,\
405 __atg14_old & __atg14_mask; })
408 /* Atomically *mem &= mask. */
410 # define atomic_and(mem, mask) \
412 __typeof (*(mem)) __atg15_old; \
413 __typeof (mem) __atg15_memp = (mem); \
414 __typeof (*(mem)) __atg15_mask = (mask); \
417 __atg15_old = (*__atg15_memp); \
418 while (__builtin_expect \
419 (atomic_compare_and_exchange_bool_acq (__atg15_memp, \
420 __atg15_old & __atg15_mask, \
426 # define catomic_and(mem, mask) \
428 __typeof (*(mem)) __atg20_old; \
429 __typeof (mem) __atg20_memp = (mem); \
430 __typeof (*(mem)) __atg20_mask = (mask); \
433 __atg20_old = (*__atg20_memp); \
434 while (__builtin_expect \
435 (catomic_compare_and_exchange_bool_acq (__atg20_memp, \
436 __atg20_old & __atg20_mask,\
441 /* Atomically *mem &= mask and return the old value of *mem. */
442 #ifndef atomic_and_val
443 # define atomic_and_val(mem, mask) \
444 ({ __typeof (*(mem)) __atg16_old; \
445 __typeof (mem) __atg16_memp = (mem); \
446 __typeof (*(mem)) __atg16_mask = (mask); \
449 __atg16_old = (*__atg16_memp); \
450 while (__builtin_expect \
451 (atomic_compare_and_exchange_bool_acq (__atg16_memp, \
452 __atg16_old & __atg16_mask,\
458 /* Atomically *mem |= mask and return the old value of *mem. */
460 # define atomic_or(mem, mask) \
462 __typeof (*(mem)) __atg17_old; \
463 __typeof (mem) __atg17_memp = (mem); \
464 __typeof (*(mem)) __atg17_mask = (mask); \
467 __atg17_old = (*__atg17_memp); \
468 while (__builtin_expect \
469 (atomic_compare_and_exchange_bool_acq (__atg17_memp, \
470 __atg17_old | __atg17_mask, \
476 # define catomic_or(mem, mask) \
478 __typeof (*(mem)) __atg18_old; \
479 __typeof (mem) __atg18_memp = (mem); \
480 __typeof (*(mem)) __atg18_mask = (mask); \
483 __atg18_old = (*__atg18_memp); \
484 while (__builtin_expect \
485 (catomic_compare_and_exchange_bool_acq (__atg18_memp, \
486 __atg18_old | __atg18_mask,\
491 /* Atomically *mem |= mask and return the old value of *mem. */
492 #ifndef atomic_or_val
493 # define atomic_or_val(mem, mask) \
494 ({ __typeof (*(mem)) __atg19_old; \
495 __typeof (mem) __atg19_memp = (mem); \
496 __typeof (*(mem)) __atg19_mask = (mask); \
499 __atg19_old = (*__atg19_memp); \
500 while (__builtin_expect \
501 (atomic_compare_and_exchange_bool_acq (__atg19_memp, \
502 __atg19_old | __atg19_mask,\
508 #ifndef atomic_full_barrier
509 # define atomic_full_barrier() __asm ("" ::: "memory")
513 #ifndef atomic_read_barrier
514 # define atomic_read_barrier() atomic_full_barrier ()
518 #ifndef atomic_write_barrier
519 # define atomic_write_barrier() atomic_full_barrier ()
523 #ifndef atomic_forced_read
524 # define atomic_forced_read(x) \
525 ({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
528 /* This is equal to 1 iff the architecture supports 64b atomic operations. */
529 #ifndef __HAVE_64B_ATOMICS
530 #error Unable to determine if 64-bit atomics are present.
533 /* The following functions are a subset of the atomic operations provided by
534 C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
535 atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
537 /* Each arch can request to use compiler built-ins for C11 atomics. If it
538 does, all atomics will be based on these. */
539 #if USE_ATOMIC_COMPILER_BUILTINS
541 /* We require 32b atomic operations; some archs also support 64b atomic
543 void __atomic_link_error (void);
544 # if __HAVE_64B_ATOMICS == 1
545 # define __atomic_check_size(mem) \
546 if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
547 __atomic_link_error ();
549 # define __atomic_check_size(mem) \
550 if (sizeof (*mem) != 4) \
551 __atomic_link_error ();
553 /* We additionally provide 8b and 16b atomic loads and stores; we do not yet
554 need other atomic operations of such sizes, and restricting the support to
555 loads and stores makes this easier for archs that do not have native
556 support for atomic operations to less-than-word-sized data. */
557 # if __HAVE_64B_ATOMICS == 1
558 # define __atomic_check_size_ls(mem) \
559 if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
560 && (sizeof (*mem) != 8)) \
561 __atomic_link_error ();
563 # define __atomic_check_size_ls(mem) \
564 if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && sizeof (*mem) != 4) \
565 __atomic_link_error ();
568 # define atomic_thread_fence_acquire() \
569 __atomic_thread_fence (__ATOMIC_ACQUIRE)
570 # define atomic_thread_fence_release() \
571 __atomic_thread_fence (__ATOMIC_RELEASE)
572 # define atomic_thread_fence_seq_cst() \
573 __atomic_thread_fence (__ATOMIC_SEQ_CST)
575 # define atomic_load_relaxed(mem) \
576 ({ __atomic_check_size_ls((mem)); \
577 __atomic_load_n ((mem), __ATOMIC_RELAXED); })
578 # define atomic_load_acquire(mem) \
579 ({ __atomic_check_size_ls((mem)); \
580 __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
582 # define atomic_store_relaxed(mem, val) \
584 __atomic_check_size_ls((mem)); \
585 __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
587 # define atomic_store_release(mem, val) \
589 __atomic_check_size_ls((mem)); \
590 __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
593 /* On failure, this CAS has memory_order_relaxed semantics. */
594 # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
595 ({ __atomic_check_size((mem)); \
596 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
597 __ATOMIC_RELAXED, __ATOMIC_RELAXED); })
598 # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
599 ({ __atomic_check_size((mem)); \
600 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
601 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); })
602 # define atomic_compare_exchange_weak_release(mem, expected, desired) \
603 ({ __atomic_check_size((mem)); \
604 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
605 __ATOMIC_RELEASE, __ATOMIC_RELAXED); })
607 # define atomic_exchange_relaxed(mem, desired) \
608 ({ __atomic_check_size((mem)); \
609 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELAXED); })
610 # define atomic_exchange_acquire(mem, desired) \
611 ({ __atomic_check_size((mem)); \
612 __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); })
613 # define atomic_exchange_release(mem, desired) \
614 ({ __atomic_check_size((mem)); \
615 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); })
617 # define atomic_fetch_add_relaxed(mem, operand) \
618 ({ __atomic_check_size((mem)); \
619 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); })
620 # define atomic_fetch_add_acquire(mem, operand) \
621 ({ __atomic_check_size((mem)); \
622 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); })
623 # define atomic_fetch_add_release(mem, operand) \
624 ({ __atomic_check_size((mem)); \
625 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
626 # define atomic_fetch_add_acq_rel(mem, operand) \
627 ({ __atomic_check_size((mem)); \
628 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); })
630 # define atomic_fetch_and_relaxed(mem, operand) \
631 ({ __atomic_check_size((mem)); \
632 __atomic_fetch_and ((mem), (operand), __ATOMIC_RELAXED); })
633 # define atomic_fetch_and_acquire(mem, operand) \
634 ({ __atomic_check_size((mem)); \
635 __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
636 # define atomic_fetch_and_release(mem, operand) \
637 ({ __atomic_check_size((mem)); \
638 __atomic_fetch_and ((mem), (operand), __ATOMIC_RELEASE); })
640 # define atomic_fetch_or_relaxed(mem, operand) \
641 ({ __atomic_check_size((mem)); \
642 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); })
643 # define atomic_fetch_or_acquire(mem, operand) \
644 ({ __atomic_check_size((mem)); \
645 __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
646 # define atomic_fetch_or_release(mem, operand) \
647 ({ __atomic_check_size((mem)); \
648 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELEASE); })
650 # define atomic_fetch_xor_release(mem, operand) \
651 ({ __atomic_check_size((mem)); \
652 __atomic_fetch_xor ((mem), (operand), __ATOMIC_RELEASE); })
654 #else /* !USE_ATOMIC_COMPILER_BUILTINS */
656 /* By default, we assume that read, write, and full barriers are equivalent
657 to acquire, release, and seq_cst barriers. Archs for which this does not
658 hold have to provide custom definitions of the fences. */
659 # ifndef atomic_thread_fence_acquire
660 # define atomic_thread_fence_acquire() atomic_read_barrier ()
662 # ifndef atomic_thread_fence_release
663 # define atomic_thread_fence_release() atomic_write_barrier ()
665 # ifndef atomic_thread_fence_seq_cst
666 # define atomic_thread_fence_seq_cst() atomic_full_barrier ()
669 # ifndef atomic_load_relaxed
670 # define atomic_load_relaxed(mem) \
671 ({ __typeof ((__typeof (*(mem))) *(mem)) __atg100_val; \
672 __asm ("" : "=r" (__atg100_val) : "0" (*(mem))); \
675 # ifndef atomic_load_acquire
676 # define atomic_load_acquire(mem) \
677 ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \
678 atomic_thread_fence_acquire (); \
682 # ifndef atomic_store_relaxed
683 /* XXX Use inline asm here? */
684 # define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0)
686 # ifndef atomic_store_release
687 # define atomic_store_release(mem, val) \
689 atomic_thread_fence_release (); \
690 atomic_store_relaxed ((mem), (val)); \
694 /* On failure, this CAS has memory_order_relaxed semantics. */
695 /* XXX This potentially has one branch more than necessary, but archs
696 currently do not define a CAS that returns both the previous value and
698 # ifndef atomic_compare_exchange_weak_acquire
699 # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
700 ({ typeof (*(expected)) __atg102_expected = *(expected); \
702 atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \
703 *(expected) == __atg102_expected; })
705 # ifndef atomic_compare_exchange_weak_relaxed
706 /* XXX Fall back to CAS with acquire MO because archs do not define a weaker
708 # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
709 atomic_compare_exchange_weak_acquire ((mem), (expected), (desired))
711 # ifndef atomic_compare_exchange_weak_release
712 # define atomic_compare_exchange_weak_release(mem, expected, desired) \
713 ({ typeof (*(expected)) __atg103_expected = *(expected); \
715 atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \
716 *(expected) == __atg103_expected; })
719 /* XXX Fall back to acquire MO because archs do not define a weaker
721 # ifndef atomic_exchange_relaxed
722 # define atomic_exchange_relaxed(mem, val) \
723 atomic_exchange_acq ((mem), (val))
725 # ifndef atomic_exchange_acquire
726 # define atomic_exchange_acquire(mem, val) \
727 atomic_exchange_acq ((mem), (val))
729 # ifndef atomic_exchange_release
730 # define atomic_exchange_release(mem, val) \
731 atomic_exchange_rel ((mem), (val))
734 # ifndef atomic_fetch_add_acquire
735 # define atomic_fetch_add_acquire(mem, operand) \
736 atomic_exchange_and_add_acq ((mem), (operand))
738 # ifndef atomic_fetch_add_relaxed
739 /* XXX Fall back to acquire MO because the MO semantics of
740 atomic_exchange_and_add are not documented; the generic version falls back
741 to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined,
743 # define atomic_fetch_add_relaxed(mem, operand) \
744 atomic_fetch_add_acquire ((mem), (operand))
746 # ifndef atomic_fetch_add_release
747 # define atomic_fetch_add_release(mem, operand) \
748 atomic_exchange_and_add_rel ((mem), (operand))
750 # ifndef atomic_fetch_add_acq_rel
751 # define atomic_fetch_add_acq_rel(mem, operand) \
752 ({ atomic_thread_fence_release (); \
753 atomic_exchange_and_add_acq ((mem), (operand)); })
756 /* XXX Fall back to acquire MO because archs do not define a weaker
758 # ifndef atomic_fetch_and_relaxed
759 # define atomic_fetch_and_relaxed(mem, operand) \
760 atomic_fetch_and_acquire ((mem), (operand))
762 /* XXX The default for atomic_and_val has acquire semantics, but this is not
764 # ifndef atomic_fetch_and_acquire
765 # define atomic_fetch_and_acquire(mem, operand) \
766 atomic_and_val ((mem), (operand))
768 # ifndef atomic_fetch_and_release
769 /* XXX This unnecessarily has acquire MO. */
770 # define atomic_fetch_and_release(mem, operand) \
771 ({ atomic_thread_fence_release (); \
772 atomic_and_val ((mem), (operand)); })
775 /* XXX The default for atomic_or_val has acquire semantics, but this is not
777 # ifndef atomic_fetch_or_acquire
778 # define atomic_fetch_or_acquire(mem, operand) \
779 atomic_or_val ((mem), (operand))
781 /* XXX Fall back to acquire MO because archs do not define a weaker
783 # ifndef atomic_fetch_or_relaxed
784 # define atomic_fetch_or_relaxed(mem, operand) \
785 atomic_fetch_or_acquire ((mem), (operand))
787 /* XXX Contains an unnecessary acquire MO because archs do not define a weaker
789 # ifndef atomic_fetch_or_release
790 # define atomic_fetch_or_release(mem, operand) \
791 ({ atomic_thread_fence_release (); \
792 atomic_fetch_or_acquire ((mem), (operand)); })
795 # ifndef atomic_fetch_xor_release
796 /* Failing the atomic_compare_exchange_weak_release reloads the value in
797 __atg104_expected, so we need only do the XOR again and retry. */
798 # define atomic_fetch_xor_release(mem, operand) \
799 ({ __typeof (mem) __atg104_memp = (mem); \
800 __typeof (*(mem)) __atg104_expected = (*__atg104_memp); \
801 __typeof (*(mem)) __atg104_desired; \
802 __typeof (*(mem)) __atg104_op = (operand); \
805 __atg104_desired = __atg104_expected ^ __atg104_op; \
806 while (__glibc_unlikely \
807 (atomic_compare_exchange_weak_release ( \
808 __atg104_memp, &__atg104_expected, __atg104_desired) \
810 __atg104_expected; })
813 #endif /* !USE_ATOMIC_COMPILER_BUILTINS */
815 /* This operation does not affect synchronization semantics but can be used
816 in the body of a spin loop to potentially improve its efficiency. */
817 #ifndef atomic_spin_nop
818 # define atomic_spin_nop() do { /* nothing */ } while (0)
821 /* ATOMIC_EXCHANGE_USES_CAS is non-zero if atomic_exchange operations
822 are implemented based on a CAS loop; otherwise, this is zero and we assume
823 that the atomic_exchange operations could provide better performance
825 #ifndef ATOMIC_EXCHANGE_USES_CAS
826 # error ATOMIC_EXCHANGE_USES_CAS has to be defined.
829 #endif /* atomic.h */