]> git.ipfire.org Git - thirdparty/glibc.git/blob - include/atomic.h
Add atomic operations required by the new condition variable.
[thirdparty/glibc.git] / include / atomic.h
1 /* Internal macros for atomic operations for GNU C Library.
2 Copyright (C) 2002-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
19
20 #ifndef _ATOMIC_H
21 #define _ATOMIC_H 1
22
23 /* This header defines three types of macros:
24
25 - atomic arithmetic and logic operation on memory. They all
26 have the prefix "atomic_".
27
28 - conditionally atomic operations of the same kinds. These
29 always behave identical but can be faster when atomicity
30 is not really needed since only one thread has access to
31 the memory location. In that case the code is slower in
32 the multi-thread case. The interfaces have the prefix
33 "catomic_".
34
35 - support functions like barriers. They also have the prefix
36 "atomic_".
37
38 Architectures must provide a few lowlevel macros (the compare
39 and exchange definitions). All others are optional. They
40 should only be provided if the architecture has specific
41 support for the operation.
42
43 As <atomic.h> macros are usually heavily nested and often use local
44 variables to make sure side-effects are evaluated properly, use for
45 macro local variables a per-macro unique prefix. This file uses
46 __atgN_ prefix where N is different in each macro. */
47
48 #include <stdlib.h>
49
50 #include <atomic-machine.h>
51
52 /* Wrapper macros to call pre_NN_post (mem, ...) where NN is the
53 bit width of *MEM. The calling macro puts parens around MEM
54 and following args. */
55 #define __atomic_val_bysize(pre, post, mem, ...) \
56 ({ \
57 __typeof (*mem) __atg1_result; \
58 if (sizeof (*mem) == 1) \
59 __atg1_result = pre##_8_##post (mem, __VA_ARGS__); \
60 else if (sizeof (*mem) == 2) \
61 __atg1_result = pre##_16_##post (mem, __VA_ARGS__); \
62 else if (sizeof (*mem) == 4) \
63 __atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
64 else if (sizeof (*mem) == 8) \
65 __atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
66 else \
67 abort (); \
68 __atg1_result; \
69 })
70 #define __atomic_bool_bysize(pre, post, mem, ...) \
71 ({ \
72 int __atg2_result; \
73 if (sizeof (*mem) == 1) \
74 __atg2_result = pre##_8_##post (mem, __VA_ARGS__); \
75 else if (sizeof (*mem) == 2) \
76 __atg2_result = pre##_16_##post (mem, __VA_ARGS__); \
77 else if (sizeof (*mem) == 4) \
78 __atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
79 else if (sizeof (*mem) == 8) \
80 __atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
81 else \
82 abort (); \
83 __atg2_result; \
84 })
85
86
87 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
88 Return the old *MEM value. */
89 #if !defined atomic_compare_and_exchange_val_acq \
90 && defined __arch_compare_and_exchange_val_32_acq
91 # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
92 __atomic_val_bysize (__arch_compare_and_exchange_val,acq, \
93 mem, newval, oldval)
94 #endif
95
96
97 #ifndef catomic_compare_and_exchange_val_acq
98 # ifdef __arch_c_compare_and_exchange_val_32_acq
99 # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
100 __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \
101 mem, newval, oldval)
102 # else
103 # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
104 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
105 # endif
106 #endif
107
108
109 #ifndef catomic_compare_and_exchange_val_rel
110 # ifndef atomic_compare_and_exchange_val_rel
111 # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
112 catomic_compare_and_exchange_val_acq (mem, newval, oldval)
113 # else
114 # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
115 atomic_compare_and_exchange_val_rel (mem, newval, oldval)
116 # endif
117 #endif
118
119
120 #ifndef atomic_compare_and_exchange_val_rel
121 # define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
122 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
123 #endif
124
125
126 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
127 Return zero if *MEM was changed or non-zero if no exchange happened. */
128 #ifndef atomic_compare_and_exchange_bool_acq
129 # ifdef __arch_compare_and_exchange_bool_32_acq
130 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
131 __atomic_bool_bysize (__arch_compare_and_exchange_bool,acq, \
132 mem, newval, oldval)
133 # else
134 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
135 ({ /* Cannot use __oldval here, because macros later in this file might \
136 call this macro with __oldval argument. */ \
137 __typeof (oldval) __atg3_old = (oldval); \
138 atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old) \
139 != __atg3_old; \
140 })
141 # endif
142 #endif
143
144
145 #ifndef catomic_compare_and_exchange_bool_acq
146 # ifdef __arch_c_compare_and_exchange_bool_32_acq
147 # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
148 __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \
149 mem, newval, oldval)
150 # else
151 # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
152 ({ /* Cannot use __oldval here, because macros later in this file might \
153 call this macro with __oldval argument. */ \
154 __typeof (oldval) __atg4_old = (oldval); \
155 catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old) \
156 != __atg4_old; \
157 })
158 # endif
159 #endif
160
161
162 /* Store NEWVALUE in *MEM and return the old value. */
163 #ifndef atomic_exchange_acq
164 # define atomic_exchange_acq(mem, newvalue) \
165 ({ __typeof (*(mem)) __atg5_oldval; \
166 __typeof (mem) __atg5_memp = (mem); \
167 __typeof (*(mem)) __atg5_value = (newvalue); \
168 \
169 do \
170 __atg5_oldval = *__atg5_memp; \
171 while (__builtin_expect \
172 (atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \
173 __atg5_oldval), 0)); \
174 \
175 __atg5_oldval; })
176 #endif
177
178 #ifndef atomic_exchange_rel
179 # define atomic_exchange_rel(mem, newvalue) atomic_exchange_acq (mem, newvalue)
180 #endif
181
182
183 /* Add VALUE to *MEM and return the old value of *MEM. */
184 #ifndef atomic_exchange_and_add_acq
185 # ifdef atomic_exchange_and_add
186 # define atomic_exchange_and_add_acq(mem, value) \
187 atomic_exchange_and_add (mem, value)
188 # else
189 # define atomic_exchange_and_add_acq(mem, value) \
190 ({ __typeof (*(mem)) __atg6_oldval; \
191 __typeof (mem) __atg6_memp = (mem); \
192 __typeof (*(mem)) __atg6_value = (value); \
193 \
194 do \
195 __atg6_oldval = *__atg6_memp; \
196 while (__builtin_expect \
197 (atomic_compare_and_exchange_bool_acq (__atg6_memp, \
198 __atg6_oldval \
199 + __atg6_value, \
200 __atg6_oldval), 0)); \
201 \
202 __atg6_oldval; })
203 # endif
204 #endif
205
206 #ifndef atomic_exchange_and_add_rel
207 # define atomic_exchange_and_add_rel(mem, value) \
208 atomic_exchange_and_add_acq(mem, value)
209 #endif
210
211 #ifndef atomic_exchange_and_add
212 # define atomic_exchange_and_add(mem, value) \
213 atomic_exchange_and_add_acq(mem, value)
214 #endif
215
216 #ifndef catomic_exchange_and_add
217 # define catomic_exchange_and_add(mem, value) \
218 ({ __typeof (*(mem)) __atg7_oldv; \
219 __typeof (mem) __atg7_memp = (mem); \
220 __typeof (*(mem)) __atg7_value = (value); \
221 \
222 do \
223 __atg7_oldv = *__atg7_memp; \
224 while (__builtin_expect \
225 (catomic_compare_and_exchange_bool_acq (__atg7_memp, \
226 __atg7_oldv \
227 + __atg7_value, \
228 __atg7_oldv), 0)); \
229 \
230 __atg7_oldv; })
231 #endif
232
233
234 #ifndef atomic_max
235 # define atomic_max(mem, value) \
236 do { \
237 __typeof (*(mem)) __atg8_oldval; \
238 __typeof (mem) __atg8_memp = (mem); \
239 __typeof (*(mem)) __atg8_value = (value); \
240 do { \
241 __atg8_oldval = *__atg8_memp; \
242 if (__atg8_oldval >= __atg8_value) \
243 break; \
244 } while (__builtin_expect \
245 (atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\
246 __atg8_oldval), 0)); \
247 } while (0)
248 #endif
249
250
251 #ifndef catomic_max
252 # define catomic_max(mem, value) \
253 do { \
254 __typeof (*(mem)) __atg9_oldv; \
255 __typeof (mem) __atg9_memp = (mem); \
256 __typeof (*(mem)) __atg9_value = (value); \
257 do { \
258 __atg9_oldv = *__atg9_memp; \
259 if (__atg9_oldv >= __atg9_value) \
260 break; \
261 } while (__builtin_expect \
262 (catomic_compare_and_exchange_bool_acq (__atg9_memp, \
263 __atg9_value, \
264 __atg9_oldv), 0)); \
265 } while (0)
266 #endif
267
268
269 #ifndef atomic_min
270 # define atomic_min(mem, value) \
271 do { \
272 __typeof (*(mem)) __atg10_oldval; \
273 __typeof (mem) __atg10_memp = (mem); \
274 __typeof (*(mem)) __atg10_value = (value); \
275 do { \
276 __atg10_oldval = *__atg10_memp; \
277 if (__atg10_oldval <= __atg10_value) \
278 break; \
279 } while (__builtin_expect \
280 (atomic_compare_and_exchange_bool_acq (__atg10_memp, \
281 __atg10_value, \
282 __atg10_oldval), 0)); \
283 } while (0)
284 #endif
285
286
287 #ifndef atomic_add
288 # define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
289 #endif
290
291
292 #ifndef catomic_add
293 # define catomic_add(mem, value) \
294 (void) catomic_exchange_and_add ((mem), (value))
295 #endif
296
297
298 #ifndef atomic_increment
299 # define atomic_increment(mem) atomic_add ((mem), 1)
300 #endif
301
302
303 #ifndef catomic_increment
304 # define catomic_increment(mem) catomic_add ((mem), 1)
305 #endif
306
307
308 #ifndef atomic_increment_val
309 # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
310 #endif
311
312
313 #ifndef catomic_increment_val
314 # define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
315 #endif
316
317
318 /* Add one to *MEM and return true iff it's now zero. */
319 #ifndef atomic_increment_and_test
320 # define atomic_increment_and_test(mem) \
321 (atomic_exchange_and_add ((mem), 1) + 1 == 0)
322 #endif
323
324
325 #ifndef atomic_decrement
326 # define atomic_decrement(mem) atomic_add ((mem), -1)
327 #endif
328
329
330 #ifndef catomic_decrement
331 # define catomic_decrement(mem) catomic_add ((mem), -1)
332 #endif
333
334
335 #ifndef atomic_decrement_val
336 # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
337 #endif
338
339
340 #ifndef catomic_decrement_val
341 # define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
342 #endif
343
344
345 /* Subtract 1 from *MEM and return true iff it's now zero. */
346 #ifndef atomic_decrement_and_test
347 # define atomic_decrement_and_test(mem) \
348 (atomic_exchange_and_add ((mem), -1) == 1)
349 #endif
350
351
352 /* Decrement *MEM if it is > 0, and return the old value. */
353 #ifndef atomic_decrement_if_positive
354 # define atomic_decrement_if_positive(mem) \
355 ({ __typeof (*(mem)) __atg11_oldval; \
356 __typeof (mem) __atg11_memp = (mem); \
357 \
358 do \
359 { \
360 __atg11_oldval = *__atg11_memp; \
361 if (__glibc_unlikely (__atg11_oldval <= 0)) \
362 break; \
363 } \
364 while (__builtin_expect \
365 (atomic_compare_and_exchange_bool_acq (__atg11_memp, \
366 __atg11_oldval - 1, \
367 __atg11_oldval), 0)); \
368 __atg11_oldval; })
369 #endif
370
371
372 #ifndef atomic_add_negative
373 # define atomic_add_negative(mem, value) \
374 ({ __typeof (value) __atg12_value = (value); \
375 atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; })
376 #endif
377
378
379 #ifndef atomic_add_zero
380 # define atomic_add_zero(mem, value) \
381 ({ __typeof (value) __atg13_value = (value); \
382 atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; })
383 #endif
384
385
386 #ifndef atomic_bit_set
387 # define atomic_bit_set(mem, bit) \
388 (void) atomic_bit_test_set(mem, bit)
389 #endif
390
391
392 #ifndef atomic_bit_test_set
393 # define atomic_bit_test_set(mem, bit) \
394 ({ __typeof (*(mem)) __atg14_old; \
395 __typeof (mem) __atg14_memp = (mem); \
396 __typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit)); \
397 \
398 do \
399 __atg14_old = (*__atg14_memp); \
400 while (__builtin_expect \
401 (atomic_compare_and_exchange_bool_acq (__atg14_memp, \
402 __atg14_old | __atg14_mask,\
403 __atg14_old), 0)); \
404 \
405 __atg14_old & __atg14_mask; })
406 #endif
407
408 /* Atomically *mem &= mask. */
409 #ifndef atomic_and
410 # define atomic_and(mem, mask) \
411 do { \
412 __typeof (*(mem)) __atg15_old; \
413 __typeof (mem) __atg15_memp = (mem); \
414 __typeof (*(mem)) __atg15_mask = (mask); \
415 \
416 do \
417 __atg15_old = (*__atg15_memp); \
418 while (__builtin_expect \
419 (atomic_compare_and_exchange_bool_acq (__atg15_memp, \
420 __atg15_old & __atg15_mask, \
421 __atg15_old), 0)); \
422 } while (0)
423 #endif
424
425 #ifndef catomic_and
426 # define catomic_and(mem, mask) \
427 do { \
428 __typeof (*(mem)) __atg20_old; \
429 __typeof (mem) __atg20_memp = (mem); \
430 __typeof (*(mem)) __atg20_mask = (mask); \
431 \
432 do \
433 __atg20_old = (*__atg20_memp); \
434 while (__builtin_expect \
435 (catomic_compare_and_exchange_bool_acq (__atg20_memp, \
436 __atg20_old & __atg20_mask,\
437 __atg20_old), 0)); \
438 } while (0)
439 #endif
440
441 /* Atomically *mem &= mask and return the old value of *mem. */
442 #ifndef atomic_and_val
443 # define atomic_and_val(mem, mask) \
444 ({ __typeof (*(mem)) __atg16_old; \
445 __typeof (mem) __atg16_memp = (mem); \
446 __typeof (*(mem)) __atg16_mask = (mask); \
447 \
448 do \
449 __atg16_old = (*__atg16_memp); \
450 while (__builtin_expect \
451 (atomic_compare_and_exchange_bool_acq (__atg16_memp, \
452 __atg16_old & __atg16_mask,\
453 __atg16_old), 0)); \
454 \
455 __atg16_old; })
456 #endif
457
458 /* Atomically *mem |= mask and return the old value of *mem. */
459 #ifndef atomic_or
460 # define atomic_or(mem, mask) \
461 do { \
462 __typeof (*(mem)) __atg17_old; \
463 __typeof (mem) __atg17_memp = (mem); \
464 __typeof (*(mem)) __atg17_mask = (mask); \
465 \
466 do \
467 __atg17_old = (*__atg17_memp); \
468 while (__builtin_expect \
469 (atomic_compare_and_exchange_bool_acq (__atg17_memp, \
470 __atg17_old | __atg17_mask, \
471 __atg17_old), 0)); \
472 } while (0)
473 #endif
474
475 #ifndef catomic_or
476 # define catomic_or(mem, mask) \
477 do { \
478 __typeof (*(mem)) __atg18_old; \
479 __typeof (mem) __atg18_memp = (mem); \
480 __typeof (*(mem)) __atg18_mask = (mask); \
481 \
482 do \
483 __atg18_old = (*__atg18_memp); \
484 while (__builtin_expect \
485 (catomic_compare_and_exchange_bool_acq (__atg18_memp, \
486 __atg18_old | __atg18_mask,\
487 __atg18_old), 0)); \
488 } while (0)
489 #endif
490
491 /* Atomically *mem |= mask and return the old value of *mem. */
492 #ifndef atomic_or_val
493 # define atomic_or_val(mem, mask) \
494 ({ __typeof (*(mem)) __atg19_old; \
495 __typeof (mem) __atg19_memp = (mem); \
496 __typeof (*(mem)) __atg19_mask = (mask); \
497 \
498 do \
499 __atg19_old = (*__atg19_memp); \
500 while (__builtin_expect \
501 (atomic_compare_and_exchange_bool_acq (__atg19_memp, \
502 __atg19_old | __atg19_mask,\
503 __atg19_old), 0)); \
504 \
505 __atg19_old; })
506 #endif
507
508 #ifndef atomic_full_barrier
509 # define atomic_full_barrier() __asm ("" ::: "memory")
510 #endif
511
512
513 #ifndef atomic_read_barrier
514 # define atomic_read_barrier() atomic_full_barrier ()
515 #endif
516
517
518 #ifndef atomic_write_barrier
519 # define atomic_write_barrier() atomic_full_barrier ()
520 #endif
521
522
523 #ifndef atomic_forced_read
524 # define atomic_forced_read(x) \
525 ({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
526 #endif
527
528 /* This is equal to 1 iff the architecture supports 64b atomic operations. */
529 #ifndef __HAVE_64B_ATOMICS
530 #error Unable to determine if 64-bit atomics are present.
531 #endif
532
533 /* The following functions are a subset of the atomic operations provided by
534 C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
535 atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
536
537 /* Each arch can request to use compiler built-ins for C11 atomics. If it
538 does, all atomics will be based on these. */
539 #if USE_ATOMIC_COMPILER_BUILTINS
540
541 /* We require 32b atomic operations; some archs also support 64b atomic
542 operations. */
543 void __atomic_link_error (void);
544 # if __HAVE_64B_ATOMICS == 1
545 # define __atomic_check_size(mem) \
546 if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
547 __atomic_link_error ();
548 # else
549 # define __atomic_check_size(mem) \
550 if (sizeof (*mem) != 4) \
551 __atomic_link_error ();
552 # endif
553
554 # define atomic_thread_fence_acquire() \
555 __atomic_thread_fence (__ATOMIC_ACQUIRE)
556 # define atomic_thread_fence_release() \
557 __atomic_thread_fence (__ATOMIC_RELEASE)
558 # define atomic_thread_fence_seq_cst() \
559 __atomic_thread_fence (__ATOMIC_SEQ_CST)
560
561 # define atomic_load_relaxed(mem) \
562 ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_RELAXED); })
563 # define atomic_load_acquire(mem) \
564 ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
565
566 # define atomic_store_relaxed(mem, val) \
567 do { \
568 __atomic_check_size((mem)); \
569 __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
570 } while (0)
571 # define atomic_store_release(mem, val) \
572 do { \
573 __atomic_check_size((mem)); \
574 __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
575 } while (0)
576
577 /* On failure, this CAS has memory_order_relaxed semantics. */
578 # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
579 ({ __atomic_check_size((mem)); \
580 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
581 __ATOMIC_RELAXED, __ATOMIC_RELAXED); })
582 # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
583 ({ __atomic_check_size((mem)); \
584 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
585 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); })
586 # define atomic_compare_exchange_weak_release(mem, expected, desired) \
587 ({ __atomic_check_size((mem)); \
588 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
589 __ATOMIC_RELEASE, __ATOMIC_RELAXED); })
590
591 # define atomic_exchange_relaxed(mem, desired) \
592 ({ __atomic_check_size((mem)); \
593 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELAXED); })
594 # define atomic_exchange_acquire(mem, desired) \
595 ({ __atomic_check_size((mem)); \
596 __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); })
597 # define atomic_exchange_release(mem, desired) \
598 ({ __atomic_check_size((mem)); \
599 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); })
600
601 # define atomic_fetch_add_relaxed(mem, operand) \
602 ({ __atomic_check_size((mem)); \
603 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); })
604 # define atomic_fetch_add_acquire(mem, operand) \
605 ({ __atomic_check_size((mem)); \
606 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); })
607 # define atomic_fetch_add_release(mem, operand) \
608 ({ __atomic_check_size((mem)); \
609 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
610 # define atomic_fetch_add_acq_rel(mem, operand) \
611 ({ __atomic_check_size((mem)); \
612 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); })
613
614 # define atomic_fetch_and_relaxed(mem, operand) \
615 ({ __atomic_check_size((mem)); \
616 __atomic_fetch_and ((mem), (operand), __ATOMIC_RELAXED); })
617 # define atomic_fetch_and_acquire(mem, operand) \
618 ({ __atomic_check_size((mem)); \
619 __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
620 # define atomic_fetch_and_release(mem, operand) \
621 ({ __atomic_check_size((mem)); \
622 __atomic_fetch_and ((mem), (operand), __ATOMIC_RELEASE); })
623
624 # define atomic_fetch_or_relaxed(mem, operand) \
625 ({ __atomic_check_size((mem)); \
626 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); })
627 # define atomic_fetch_or_acquire(mem, operand) \
628 ({ __atomic_check_size((mem)); \
629 __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
630 # define atomic_fetch_or_release(mem, operand) \
631 ({ __atomic_check_size((mem)); \
632 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELEASE); })
633
634 # define atomic_fetch_xor_release(mem, operand) \
635 ({ __atomic_check_size((mem)); \
636 __atomic_fetch_xor ((mem), (operand), __ATOMIC_RELEASE); })
637
638 #else /* !USE_ATOMIC_COMPILER_BUILTINS */
639
640 /* By default, we assume that read, write, and full barriers are equivalent
641 to acquire, release, and seq_cst barriers. Archs for which this does not
642 hold have to provide custom definitions of the fences. */
643 # ifndef atomic_thread_fence_acquire
644 # define atomic_thread_fence_acquire() atomic_read_barrier ()
645 # endif
646 # ifndef atomic_thread_fence_release
647 # define atomic_thread_fence_release() atomic_write_barrier ()
648 # endif
649 # ifndef atomic_thread_fence_seq_cst
650 # define atomic_thread_fence_seq_cst() atomic_full_barrier ()
651 # endif
652
653 # ifndef atomic_load_relaxed
654 # define atomic_load_relaxed(mem) \
655 ({ __typeof (*(mem)) __atg100_val; \
656 __asm ("" : "=r" (__atg100_val) : "0" (*(mem))); \
657 __atg100_val; })
658 # endif
659 # ifndef atomic_load_acquire
660 # define atomic_load_acquire(mem) \
661 ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \
662 atomic_thread_fence_acquire (); \
663 __atg101_val; })
664 # endif
665
666 # ifndef atomic_store_relaxed
667 /* XXX Use inline asm here? */
668 # define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0)
669 # endif
670 # ifndef atomic_store_release
671 # define atomic_store_release(mem, val) \
672 do { \
673 atomic_thread_fence_release (); \
674 atomic_store_relaxed ((mem), (val)); \
675 } while (0)
676 # endif
677
678 /* On failure, this CAS has memory_order_relaxed semantics. */
679 /* XXX This potentially has one branch more than necessary, but archs
680 currently do not define a CAS that returns both the previous value and
681 the success flag. */
682 # ifndef atomic_compare_exchange_weak_acquire
683 # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
684 ({ typeof (*(expected)) __atg102_expected = *(expected); \
685 *(expected) = \
686 atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \
687 *(expected) == __atg102_expected; })
688 # endif
689 # ifndef atomic_compare_exchange_weak_relaxed
690 /* XXX Fall back to CAS with acquire MO because archs do not define a weaker
691 CAS. */
692 # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
693 atomic_compare_exchange_weak_acquire ((mem), (expected), (desired))
694 # endif
695 # ifndef atomic_compare_exchange_weak_release
696 # define atomic_compare_exchange_weak_release(mem, expected, desired) \
697 ({ typeof (*(expected)) __atg103_expected = *(expected); \
698 *(expected) = \
699 atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \
700 *(expected) == __atg103_expected; })
701 # endif
702
703 /* XXX Fall back to acquire MO because archs do not define a weaker
704 atomic_exchange. */
705 # ifndef atomic_exchange_relaxed
706 # define atomic_exchange_relaxed(mem, val) \
707 atomic_exchange_acq ((mem), (val))
708 # endif
709 # ifndef atomic_exchange_acquire
710 # define atomic_exchange_acquire(mem, val) \
711 atomic_exchange_acq ((mem), (val))
712 # endif
713 # ifndef atomic_exchange_release
714 # define atomic_exchange_release(mem, val) \
715 atomic_exchange_rel ((mem), (val))
716 # endif
717
718 # ifndef atomic_fetch_add_acquire
719 # define atomic_fetch_add_acquire(mem, operand) \
720 atomic_exchange_and_add_acq ((mem), (operand))
721 # endif
722 # ifndef atomic_fetch_add_relaxed
723 /* XXX Fall back to acquire MO because the MO semantics of
724 atomic_exchange_and_add are not documented; the generic version falls back
725 to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined,
726 and vice versa. */
727 # define atomic_fetch_add_relaxed(mem, operand) \
728 atomic_fetch_add_acquire ((mem), (operand))
729 # endif
730 # ifndef atomic_fetch_add_release
731 # define atomic_fetch_add_release(mem, operand) \
732 atomic_exchange_and_add_rel ((mem), (operand))
733 # endif
734 # ifndef atomic_fetch_add_acq_rel
735 # define atomic_fetch_add_acq_rel(mem, operand) \
736 ({ atomic_thread_fence_release (); \
737 atomic_exchange_and_add_acq ((mem), (operand)); })
738 # endif
739
740 /* XXX Fall back to acquire MO because archs do not define a weaker
741 atomic_and_val. */
742 # ifndef atomic_fetch_and_relaxed
743 # define atomic_fetch_and_relaxed(mem, operand) \
744 atomic_fetch_and_acquire ((mem), (operand))
745 # endif
746 /* XXX The default for atomic_and_val has acquire semantics, but this is not
747 documented. */
748 # ifndef atomic_fetch_and_acquire
749 # define atomic_fetch_and_acquire(mem, operand) \
750 atomic_and_val ((mem), (operand))
751 # endif
752 # ifndef atomic_fetch_and_release
753 /* XXX This unnecessarily has acquire MO. */
754 # define atomic_fetch_and_release(mem, operand) \
755 ({ atomic_thread_fence_release (); \
756 atomic_and_val ((mem), (operand)); })
757 # endif
758
759 /* XXX The default for atomic_or_val has acquire semantics, but this is not
760 documented. */
761 # ifndef atomic_fetch_or_acquire
762 # define atomic_fetch_or_acquire(mem, operand) \
763 atomic_or_val ((mem), (operand))
764 # endif
765 /* XXX Fall back to acquire MO because archs do not define a weaker
766 atomic_or_val. */
767 # ifndef atomic_fetch_or_relaxed
768 # define atomic_fetch_or_relaxed(mem, operand) \
769 atomic_fetch_or_acquire ((mem), (operand))
770 # endif
771 /* XXX Contains an unnecessary acquire MO because archs do not define a weaker
772 atomic_or_val. */
773 # ifndef atomic_fetch_or_release
774 # define atomic_fetch_or_release(mem, operand) \
775 ({ atomic_thread_fence_release (); \
776 atomic_fetch_or_acquire ((mem), (operand)); })
777 # endif
778
779 # ifndef atomic_fetch_xor_release
780 # define atomic_fetch_xor_release(mem, operand) \
781 ({ __typeof (*(mem)) __atg104_old; \
782 __typeof (mem) __atg104_memp = (mem); \
783 __typeof (*(mem)) __atg104_op = (operand); \
784 \
785 do \
786 __atg104_old = (*__atg104_memp); \
787 while (__builtin_expect \
788 (atomic_compare_and_exchange_bool_rel ( \
789 __atg104_memp, __atg104_old ^ __atg104_op, __atg104_old), 0));\
790 \
791 __atg104_old; })
792 #endif
793
794 #endif /* !USE_ATOMIC_COMPILER_BUILTINS */
795
796 /* This operation does not affect synchronization semantics but can be used
797 in the body of a spin loop to potentially improve its efficiency. */
798 #ifndef atomic_spin_nop
799 # define atomic_spin_nop() do { /* nothing */ } while (0)
800 #endif
801
802 #endif /* atomic.h */