]>
git.ipfire.org Git - thirdparty/glibc.git/blob - include/atomic.h
1 /* Internal macros for atomic operations for GNU C Library.
2 Copyright (C) 2002-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
22 /* This header defines three types of macros:
24 - atomic arithmetic and logic operation on memory. They all
25 have the prefix "atomic_".
27 - conditionally atomic operations of the same kinds. These
28 always behave identical but can be faster when atomicity
29 is not really needed since only one thread has access to
30 the memory location. In that case the code is slower in
31 the multi-thread case. The interfaces have the prefix
34 - support functions like barriers. They also have the prefix
37 Architectures must provide a few lowlevel macros (the compare
38 and exchange definitions). All others are optional. They
39 should only be provided if the architecture has specific
40 support for the operation.
42 As <atomic.h> macros are usually heavily nested and often use local
43 variables to make sure side-effects are evaluated properly, use for
44 macro local variables a per-macro unique prefix. This file uses
45 __atgN_ prefix where N is different in each macro. */
49 #include <atomic-machine.h>
51 /* Wrapper macros to call pre_NN_post (mem, ...) where NN is the
52 bit width of *MEM. The calling macro puts parens around MEM
53 and following args. */
54 #define __atomic_val_bysize(pre, post, mem, ...) \
56 __typeof ((__typeof (*(mem))) *(mem)) __atg1_result; \
57 if (sizeof (*mem) == 1) \
58 __atg1_result = pre##_8_##post (mem, __VA_ARGS__); \
59 else if (sizeof (*mem) == 2) \
60 __atg1_result = pre##_16_##post (mem, __VA_ARGS__); \
61 else if (sizeof (*mem) == 4) \
62 __atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
63 else if (sizeof (*mem) == 8) \
64 __atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
69 #define __atomic_bool_bysize(pre, post, mem, ...) \
72 if (sizeof (*mem) == 1) \
73 __atg2_result = pre##_8_##post (mem, __VA_ARGS__); \
74 else if (sizeof (*mem) == 2) \
75 __atg2_result = pre##_16_##post (mem, __VA_ARGS__); \
76 else if (sizeof (*mem) == 4) \
77 __atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
78 else if (sizeof (*mem) == 8) \
79 __atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
86 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
87 Return the old *MEM value. */
88 #if !defined atomic_compare_and_exchange_val_acq \
89 && defined __arch_compare_and_exchange_val_32_acq
90 # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
91 __atomic_val_bysize (__arch_compare_and_exchange_val,acq, \
96 #ifndef catomic_compare_and_exchange_val_acq
97 # ifdef __arch_c_compare_and_exchange_val_32_acq
98 # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
99 __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \
102 # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
103 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
108 #ifndef catomic_compare_and_exchange_val_rel
109 # ifndef atomic_compare_and_exchange_val_rel
110 # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
111 catomic_compare_and_exchange_val_acq (mem, newval, oldval)
113 # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
114 atomic_compare_and_exchange_val_rel (mem, newval, oldval)
119 #ifndef atomic_compare_and_exchange_val_rel
120 # define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
121 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
125 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
126 Return zero if *MEM was changed or non-zero if no exchange happened. */
127 #ifndef atomic_compare_and_exchange_bool_acq
128 # ifdef __arch_compare_and_exchange_bool_32_acq
129 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
130 __atomic_bool_bysize (__arch_compare_and_exchange_bool,acq, \
133 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
134 ({ /* Cannot use __oldval here, because macros later in this file might \
135 call this macro with __oldval argument. */ \
136 __typeof (oldval) __atg3_old = (oldval); \
137 atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old) \
144 #ifndef catomic_compare_and_exchange_bool_acq
145 # ifdef __arch_c_compare_and_exchange_bool_32_acq
146 # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
147 __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \
150 # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
151 ({ /* Cannot use __oldval here, because macros later in this file might \
152 call this macro with __oldval argument. */ \
153 __typeof (oldval) __atg4_old = (oldval); \
154 catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old) \
161 /* Store NEWVALUE in *MEM and return the old value. */
162 #ifndef atomic_exchange_acq
163 # define atomic_exchange_acq(mem, newvalue) \
164 ({ __typeof ((__typeof (*(mem))) *(mem)) __atg5_oldval; \
165 __typeof (mem) __atg5_memp = (mem); \
166 __typeof ((__typeof (*(mem))) *(mem)) __atg5_value = (newvalue); \
169 __atg5_oldval = *__atg5_memp; \
170 while (__builtin_expect \
171 (atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \
172 __atg5_oldval), 0)); \
177 #ifndef atomic_exchange_rel
178 # define atomic_exchange_rel(mem, newvalue) atomic_exchange_acq (mem, newvalue)
182 /* Add VALUE to *MEM and return the old value of *MEM. */
183 #ifndef atomic_exchange_and_add_acq
184 # ifdef atomic_exchange_and_add
185 # define atomic_exchange_and_add_acq(mem, value) \
186 atomic_exchange_and_add (mem, value)
188 # define atomic_exchange_and_add_acq(mem, value) \
189 ({ __typeof (*(mem)) __atg6_oldval; \
190 __typeof (mem) __atg6_memp = (mem); \
191 __typeof (*(mem)) __atg6_value = (value); \
194 __atg6_oldval = *__atg6_memp; \
195 while (__builtin_expect \
196 (atomic_compare_and_exchange_bool_acq (__atg6_memp, \
199 __atg6_oldval), 0)); \
205 #ifndef atomic_exchange_and_add_rel
206 # define atomic_exchange_and_add_rel(mem, value) \
207 atomic_exchange_and_add_acq(mem, value)
210 #ifndef atomic_exchange_and_add
211 # define atomic_exchange_and_add(mem, value) \
212 atomic_exchange_and_add_acq(mem, value)
215 #ifndef catomic_exchange_and_add
216 # define catomic_exchange_and_add(mem, value) \
217 ({ __typeof (*(mem)) __atg7_oldv; \
218 __typeof (mem) __atg7_memp = (mem); \
219 __typeof (*(mem)) __atg7_value = (value); \
222 __atg7_oldv = *__atg7_memp; \
223 while (__builtin_expect \
224 (catomic_compare_and_exchange_bool_acq (__atg7_memp, \
234 # define atomic_max(mem, value) \
236 __typeof (*(mem)) __atg8_oldval; \
237 __typeof (mem) __atg8_memp = (mem); \
238 __typeof (*(mem)) __atg8_value = (value); \
240 __atg8_oldval = *__atg8_memp; \
241 if (__atg8_oldval >= __atg8_value) \
243 } while (__builtin_expect \
244 (atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\
245 __atg8_oldval), 0)); \
251 # define catomic_max(mem, value) \
253 __typeof (*(mem)) __atg9_oldv; \
254 __typeof (mem) __atg9_memp = (mem); \
255 __typeof (*(mem)) __atg9_value = (value); \
257 __atg9_oldv = *__atg9_memp; \
258 if (__atg9_oldv >= __atg9_value) \
260 } while (__builtin_expect \
261 (catomic_compare_and_exchange_bool_acq (__atg9_memp, \
269 # define atomic_min(mem, value) \
271 __typeof (*(mem)) __atg10_oldval; \
272 __typeof (mem) __atg10_memp = (mem); \
273 __typeof (*(mem)) __atg10_value = (value); \
275 __atg10_oldval = *__atg10_memp; \
276 if (__atg10_oldval <= __atg10_value) \
278 } while (__builtin_expect \
279 (atomic_compare_and_exchange_bool_acq (__atg10_memp, \
281 __atg10_oldval), 0)); \
287 # define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
292 # define catomic_add(mem, value) \
293 (void) catomic_exchange_and_add ((mem), (value))
297 #ifndef atomic_increment
298 # define atomic_increment(mem) atomic_add ((mem), 1)
302 #ifndef catomic_increment
303 # define catomic_increment(mem) catomic_add ((mem), 1)
307 #ifndef atomic_increment_val
308 # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
312 #ifndef catomic_increment_val
313 # define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
317 /* Add one to *MEM and return true iff it's now zero. */
318 #ifndef atomic_increment_and_test
319 # define atomic_increment_and_test(mem) \
320 (atomic_exchange_and_add ((mem), 1) + 1 == 0)
324 #ifndef atomic_decrement
325 # define atomic_decrement(mem) atomic_add ((mem), -1)
329 #ifndef catomic_decrement
330 # define catomic_decrement(mem) catomic_add ((mem), -1)
334 #ifndef atomic_decrement_val
335 # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
339 #ifndef catomic_decrement_val
340 # define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
344 /* Subtract 1 from *MEM and return true iff it's now zero. */
345 #ifndef atomic_decrement_and_test
346 # define atomic_decrement_and_test(mem) \
347 (atomic_exchange_and_add ((mem), -1) == 1)
351 /* Decrement *MEM if it is > 0, and return the old value. */
352 #ifndef atomic_decrement_if_positive
353 # define atomic_decrement_if_positive(mem) \
354 ({ __typeof (*(mem)) __atg11_oldval; \
355 __typeof (mem) __atg11_memp = (mem); \
359 __atg11_oldval = *__atg11_memp; \
360 if (__glibc_unlikely (__atg11_oldval <= 0)) \
363 while (__builtin_expect \
364 (atomic_compare_and_exchange_bool_acq (__atg11_memp, \
365 __atg11_oldval - 1, \
366 __atg11_oldval), 0)); \
371 #ifndef atomic_add_negative
372 # define atomic_add_negative(mem, value) \
373 ({ __typeof (value) __atg12_value = (value); \
374 atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; })
378 #ifndef atomic_add_zero
379 # define atomic_add_zero(mem, value) \
380 ({ __typeof (value) __atg13_value = (value); \
381 atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; })
385 #ifndef atomic_bit_set
386 # define atomic_bit_set(mem, bit) \
387 (void) atomic_bit_test_set(mem, bit)
391 #ifndef atomic_bit_test_set
392 # define atomic_bit_test_set(mem, bit) \
393 ({ __typeof (*(mem)) __atg14_old; \
394 __typeof (mem) __atg14_memp = (mem); \
395 __typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit)); \
398 __atg14_old = (*__atg14_memp); \
399 while (__builtin_expect \
400 (atomic_compare_and_exchange_bool_acq (__atg14_memp, \
401 __atg14_old | __atg14_mask,\
404 __atg14_old & __atg14_mask; })
407 /* Atomically *mem &= mask. */
409 # define atomic_and(mem, mask) \
411 __typeof (*(mem)) __atg15_old; \
412 __typeof (mem) __atg15_memp = (mem); \
413 __typeof (*(mem)) __atg15_mask = (mask); \
416 __atg15_old = (*__atg15_memp); \
417 while (__builtin_expect \
418 (atomic_compare_and_exchange_bool_acq (__atg15_memp, \
419 __atg15_old & __atg15_mask, \
425 # define catomic_and(mem, mask) \
427 __typeof (*(mem)) __atg20_old; \
428 __typeof (mem) __atg20_memp = (mem); \
429 __typeof (*(mem)) __atg20_mask = (mask); \
432 __atg20_old = (*__atg20_memp); \
433 while (__builtin_expect \
434 (catomic_compare_and_exchange_bool_acq (__atg20_memp, \
435 __atg20_old & __atg20_mask,\
440 /* Atomically *mem &= mask and return the old value of *mem. */
441 #ifndef atomic_and_val
442 # define atomic_and_val(mem, mask) \
443 ({ __typeof (*(mem)) __atg16_old; \
444 __typeof (mem) __atg16_memp = (mem); \
445 __typeof (*(mem)) __atg16_mask = (mask); \
448 __atg16_old = (*__atg16_memp); \
449 while (__builtin_expect \
450 (atomic_compare_and_exchange_bool_acq (__atg16_memp, \
451 __atg16_old & __atg16_mask,\
457 /* Atomically *mem |= mask and return the old value of *mem. */
459 # define atomic_or(mem, mask) \
461 __typeof (*(mem)) __atg17_old; \
462 __typeof (mem) __atg17_memp = (mem); \
463 __typeof (*(mem)) __atg17_mask = (mask); \
466 __atg17_old = (*__atg17_memp); \
467 while (__builtin_expect \
468 (atomic_compare_and_exchange_bool_acq (__atg17_memp, \
469 __atg17_old | __atg17_mask, \
475 # define catomic_or(mem, mask) \
477 __typeof (*(mem)) __atg18_old; \
478 __typeof (mem) __atg18_memp = (mem); \
479 __typeof (*(mem)) __atg18_mask = (mask); \
482 __atg18_old = (*__atg18_memp); \
483 while (__builtin_expect \
484 (catomic_compare_and_exchange_bool_acq (__atg18_memp, \
485 __atg18_old | __atg18_mask,\
490 /* Atomically *mem |= mask and return the old value of *mem. */
491 #ifndef atomic_or_val
492 # define atomic_or_val(mem, mask) \
493 ({ __typeof (*(mem)) __atg19_old; \
494 __typeof (mem) __atg19_memp = (mem); \
495 __typeof (*(mem)) __atg19_mask = (mask); \
498 __atg19_old = (*__atg19_memp); \
499 while (__builtin_expect \
500 (atomic_compare_and_exchange_bool_acq (__atg19_memp, \
501 __atg19_old | __atg19_mask,\
507 #ifndef atomic_full_barrier
508 # define atomic_full_barrier() __asm ("" ::: "memory")
512 #ifndef atomic_read_barrier
513 # define atomic_read_barrier() atomic_full_barrier ()
517 #ifndef atomic_write_barrier
518 # define atomic_write_barrier() atomic_full_barrier ()
522 #ifndef atomic_forced_read
523 # define atomic_forced_read(x) \
524 ({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
527 /* This is equal to 1 iff the architecture supports 64b atomic operations. */
528 #ifndef __HAVE_64B_ATOMICS
529 #error Unable to determine if 64-bit atomics are present.
532 /* The following functions are a subset of the atomic operations provided by
533 C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
534 atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
536 /* Each arch can request to use compiler built-ins for C11 atomics. If it
537 does, all atomics will be based on these. */
538 #if USE_ATOMIC_COMPILER_BUILTINS
540 /* We require 32b atomic operations; some archs also support 64b atomic
542 void __atomic_link_error (void);
543 # if __HAVE_64B_ATOMICS == 1
544 # define __atomic_check_size(mem) \
545 if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
546 __atomic_link_error ();
548 # define __atomic_check_size(mem) \
549 if (sizeof (*mem) != 4) \
550 __atomic_link_error ();
552 /* We additionally provide 8b and 16b atomic loads and stores; we do not yet
553 need other atomic operations of such sizes, and restricting the support to
554 loads and stores makes this easier for archs that do not have native
555 support for atomic operations to less-than-word-sized data. */
556 # if __HAVE_64B_ATOMICS == 1
557 # define __atomic_check_size_ls(mem) \
558 if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
559 && (sizeof (*mem) != 8)) \
560 __atomic_link_error ();
562 # define __atomic_check_size_ls(mem) \
563 if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && sizeof (*mem) != 4) \
564 __atomic_link_error ();
567 # define atomic_thread_fence_acquire() \
568 __atomic_thread_fence (__ATOMIC_ACQUIRE)
569 # define atomic_thread_fence_release() \
570 __atomic_thread_fence (__ATOMIC_RELEASE)
571 # define atomic_thread_fence_seq_cst() \
572 __atomic_thread_fence (__ATOMIC_SEQ_CST)
574 # define atomic_load_relaxed(mem) \
575 ({ __atomic_check_size_ls((mem)); \
576 __atomic_load_n ((mem), __ATOMIC_RELAXED); })
577 # define atomic_load_acquire(mem) \
578 ({ __atomic_check_size_ls((mem)); \
579 __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
581 # define atomic_store_relaxed(mem, val) \
583 __atomic_check_size_ls((mem)); \
584 __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
586 # define atomic_store_release(mem, val) \
588 __atomic_check_size_ls((mem)); \
589 __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
592 /* On failure, this CAS has memory_order_relaxed semantics. */
593 # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
594 ({ __atomic_check_size((mem)); \
595 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
596 __ATOMIC_RELAXED, __ATOMIC_RELAXED); })
597 # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
598 ({ __atomic_check_size((mem)); \
599 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
600 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); })
601 # define atomic_compare_exchange_weak_release(mem, expected, desired) \
602 ({ __atomic_check_size((mem)); \
603 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
604 __ATOMIC_RELEASE, __ATOMIC_RELAXED); })
606 # define atomic_exchange_relaxed(mem, desired) \
607 ({ __atomic_check_size((mem)); \
608 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELAXED); })
609 # define atomic_exchange_acquire(mem, desired) \
610 ({ __atomic_check_size((mem)); \
611 __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); })
612 # define atomic_exchange_release(mem, desired) \
613 ({ __atomic_check_size((mem)); \
614 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); })
616 # define atomic_fetch_add_relaxed(mem, operand) \
617 ({ __atomic_check_size((mem)); \
618 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); })
619 # define atomic_fetch_add_acquire(mem, operand) \
620 ({ __atomic_check_size((mem)); \
621 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); })
622 # define atomic_fetch_add_release(mem, operand) \
623 ({ __atomic_check_size((mem)); \
624 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
625 # define atomic_fetch_add_acq_rel(mem, operand) \
626 ({ __atomic_check_size((mem)); \
627 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); })
629 # define atomic_fetch_and_relaxed(mem, operand) \
630 ({ __atomic_check_size((mem)); \
631 __atomic_fetch_and ((mem), (operand), __ATOMIC_RELAXED); })
632 # define atomic_fetch_and_acquire(mem, operand) \
633 ({ __atomic_check_size((mem)); \
634 __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
635 # define atomic_fetch_and_release(mem, operand) \
636 ({ __atomic_check_size((mem)); \
637 __atomic_fetch_and ((mem), (operand), __ATOMIC_RELEASE); })
639 # define atomic_fetch_or_relaxed(mem, operand) \
640 ({ __atomic_check_size((mem)); \
641 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); })
642 # define atomic_fetch_or_acquire(mem, operand) \
643 ({ __atomic_check_size((mem)); \
644 __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
645 # define atomic_fetch_or_release(mem, operand) \
646 ({ __atomic_check_size((mem)); \
647 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELEASE); })
649 # define atomic_fetch_xor_release(mem, operand) \
650 ({ __atomic_check_size((mem)); \
651 __atomic_fetch_xor ((mem), (operand), __ATOMIC_RELEASE); })
653 #else /* !USE_ATOMIC_COMPILER_BUILTINS */
655 /* By default, we assume that read, write, and full barriers are equivalent
656 to acquire, release, and seq_cst barriers. Archs for which this does not
657 hold have to provide custom definitions of the fences. */
658 # ifndef atomic_thread_fence_acquire
659 # define atomic_thread_fence_acquire() atomic_read_barrier ()
661 # ifndef atomic_thread_fence_release
662 # define atomic_thread_fence_release() atomic_write_barrier ()
664 # ifndef atomic_thread_fence_seq_cst
665 # define atomic_thread_fence_seq_cst() atomic_full_barrier ()
668 # ifndef atomic_load_relaxed
669 # define atomic_load_relaxed(mem) \
670 ({ __typeof ((__typeof (*(mem))) *(mem)) __atg100_val; \
671 __asm ("" : "=r" (__atg100_val) : "0" (*(mem))); \
674 # ifndef atomic_load_acquire
675 # define atomic_load_acquire(mem) \
676 ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \
677 atomic_thread_fence_acquire (); \
681 # ifndef atomic_store_relaxed
682 /* XXX Use inline asm here? */
683 # define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0)
685 # ifndef atomic_store_release
686 # define atomic_store_release(mem, val) \
688 atomic_thread_fence_release (); \
689 atomic_store_relaxed ((mem), (val)); \
693 /* On failure, this CAS has memory_order_relaxed semantics. */
694 /* XXX This potentially has one branch more than necessary, but archs
695 currently do not define a CAS that returns both the previous value and
697 # ifndef atomic_compare_exchange_weak_acquire
698 # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
699 ({ typeof (*(expected)) __atg102_expected = *(expected); \
701 atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \
702 *(expected) == __atg102_expected; })
704 # ifndef atomic_compare_exchange_weak_relaxed
705 /* XXX Fall back to CAS with acquire MO because archs do not define a weaker
707 # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
708 atomic_compare_exchange_weak_acquire ((mem), (expected), (desired))
710 # ifndef atomic_compare_exchange_weak_release
711 # define atomic_compare_exchange_weak_release(mem, expected, desired) \
712 ({ typeof (*(expected)) __atg103_expected = *(expected); \
714 atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \
715 *(expected) == __atg103_expected; })
718 /* XXX Fall back to acquire MO because archs do not define a weaker
720 # ifndef atomic_exchange_relaxed
721 # define atomic_exchange_relaxed(mem, val) \
722 atomic_exchange_acq ((mem), (val))
724 # ifndef atomic_exchange_acquire
725 # define atomic_exchange_acquire(mem, val) \
726 atomic_exchange_acq ((mem), (val))
728 # ifndef atomic_exchange_release
729 # define atomic_exchange_release(mem, val) \
730 atomic_exchange_rel ((mem), (val))
733 # ifndef atomic_fetch_add_acquire
734 # define atomic_fetch_add_acquire(mem, operand) \
735 atomic_exchange_and_add_acq ((mem), (operand))
737 # ifndef atomic_fetch_add_relaxed
738 /* XXX Fall back to acquire MO because the MO semantics of
739 atomic_exchange_and_add are not documented; the generic version falls back
740 to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined,
742 # define atomic_fetch_add_relaxed(mem, operand) \
743 atomic_fetch_add_acquire ((mem), (operand))
745 # ifndef atomic_fetch_add_release
746 # define atomic_fetch_add_release(mem, operand) \
747 atomic_exchange_and_add_rel ((mem), (operand))
749 # ifndef atomic_fetch_add_acq_rel
750 # define atomic_fetch_add_acq_rel(mem, operand) \
751 ({ atomic_thread_fence_release (); \
752 atomic_exchange_and_add_acq ((mem), (operand)); })
755 /* XXX Fall back to acquire MO because archs do not define a weaker
757 # ifndef atomic_fetch_and_relaxed
758 # define atomic_fetch_and_relaxed(mem, operand) \
759 atomic_fetch_and_acquire ((mem), (operand))
761 /* XXX The default for atomic_and_val has acquire semantics, but this is not
763 # ifndef atomic_fetch_and_acquire
764 # define atomic_fetch_and_acquire(mem, operand) \
765 atomic_and_val ((mem), (operand))
767 # ifndef atomic_fetch_and_release
768 /* XXX This unnecessarily has acquire MO. */
769 # define atomic_fetch_and_release(mem, operand) \
770 ({ atomic_thread_fence_release (); \
771 atomic_and_val ((mem), (operand)); })
774 /* XXX The default for atomic_or_val has acquire semantics, but this is not
776 # ifndef atomic_fetch_or_acquire
777 # define atomic_fetch_or_acquire(mem, operand) \
778 atomic_or_val ((mem), (operand))
780 /* XXX Fall back to acquire MO because archs do not define a weaker
782 # ifndef atomic_fetch_or_relaxed
783 # define atomic_fetch_or_relaxed(mem, operand) \
784 atomic_fetch_or_acquire ((mem), (operand))
786 /* XXX Contains an unnecessary acquire MO because archs do not define a weaker
788 # ifndef atomic_fetch_or_release
789 # define atomic_fetch_or_release(mem, operand) \
790 ({ atomic_thread_fence_release (); \
791 atomic_fetch_or_acquire ((mem), (operand)); })
794 # ifndef atomic_fetch_xor_release
795 /* Failing the atomic_compare_exchange_weak_release reloads the value in
796 __atg104_expected, so we need only do the XOR again and retry. */
797 # define atomic_fetch_xor_release(mem, operand) \
798 ({ __typeof (mem) __atg104_memp = (mem); \
799 __typeof (*(mem)) __atg104_expected = (*__atg104_memp); \
800 __typeof (*(mem)) __atg104_desired; \
801 __typeof (*(mem)) __atg104_op = (operand); \
804 __atg104_desired = __atg104_expected ^ __atg104_op; \
805 while (__glibc_unlikely \
806 (atomic_compare_exchange_weak_release ( \
807 __atg104_memp, &__atg104_expected, __atg104_desired) \
809 __atg104_expected; })
812 #endif /* !USE_ATOMIC_COMPILER_BUILTINS */
814 /* This operation does not affect synchronization semantics but can be used
815 in the body of a spin loop to potentially improve its efficiency. */
816 #ifndef atomic_spin_nop
817 # define atomic_spin_nop() do { /* nothing */ } while (0)
820 /* ATOMIC_EXCHANGE_USES_CAS is non-zero if atomic_exchange operations
821 are implemented based on a CAS loop; otherwise, this is zero and we assume
822 that the atomic_exchange operations could provide better performance
824 #ifndef ATOMIC_EXCHANGE_USES_CAS
825 # error ATOMIC_EXCHANGE_USES_CAS has to be defined.
828 #endif /* atomic.h */