1 From: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61164
3 --- trunk/libitm/local_atomic 2015/08/20 17:43:55 227039
4 +++ trunk/libitm/local_atomic 2015/08/20 17:55:24 227040
6 #ifndef _GLIBCXX_ATOMIC
7 #define _GLIBCXX_ATOMIC 1
9 -#undef __always_inline
10 -#define __always_inline __attribute__((always_inline))
11 +#define __libitm_always_inline __attribute__((always_inline))
13 // #pragma GCC system_header
19 - inline __always_inline memory_order
20 + inline __libitm_always_inline memory_order
21 __calculate_memory_order(memory_order __m) noexcept
23 const bool __cond1 = __m == memory_order_release;
28 - inline __always_inline void
29 + inline __libitm_always_inline void
30 atomic_thread_fence(memory_order __m) noexcept
32 __atomic_thread_fence (__m);
35 - inline __always_inline void
36 + inline __libitm_always_inline void
37 atomic_signal_fence(memory_order __m) noexcept
39 __atomic_thread_fence (__m);
41 // Conversion to ATOMIC_FLAG_INIT.
42 atomic_flag(bool __i) noexcept : __atomic_flag_base({ __i }) { }
44 - __always_inline bool
45 + __libitm_always_inline bool
46 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
48 return __atomic_test_and_set (&_M_i, __m);
51 - __always_inline bool
52 + __libitm_always_inline bool
53 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
55 return __atomic_test_and_set (&_M_i, __m);
58 - __always_inline void
59 + __libitm_always_inline void
60 clear(memory_order __m = memory_order_seq_cst) noexcept
62 // __glibcxx_assert(__m != memory_order_consume);
64 __atomic_clear (&_M_i, __m);
67 - __always_inline void
68 + __libitm_always_inline void
69 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
71 // __glibcxx_assert(__m != memory_order_consume);
73 is_lock_free() const volatile noexcept
74 { return __atomic_is_lock_free (sizeof (_M_i), &_M_i); }
76 - __always_inline void
77 + __libitm_always_inline void
78 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
80 // __glibcxx_assert(__m != memory_order_acquire);
82 __atomic_store_n(&_M_i, __i, __m);
85 - __always_inline void
86 + __libitm_always_inline void
88 memory_order __m = memory_order_seq_cst) volatile noexcept
91 __atomic_store_n(&_M_i, __i, __m);
94 - __always_inline __int_type
95 + __libitm_always_inline __int_type
96 load(memory_order __m = memory_order_seq_cst) const noexcept
98 // __glibcxx_assert(__m != memory_order_release);
100 return __atomic_load_n(&_M_i, __m);
103 - __always_inline __int_type
104 + __libitm_always_inline __int_type
105 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
107 // __glibcxx_assert(__m != memory_order_release);
108 @@ -494,21 +493,21 @@
109 return __atomic_load_n(&_M_i, __m);
112 - __always_inline __int_type
113 + __libitm_always_inline __int_type
114 exchange(__int_type __i,
115 memory_order __m = memory_order_seq_cst) noexcept
117 return __atomic_exchange_n(&_M_i, __i, __m);
120 - __always_inline __int_type
121 + __libitm_always_inline __int_type
122 exchange(__int_type __i,
123 memory_order __m = memory_order_seq_cst) volatile noexcept
125 return __atomic_exchange_n(&_M_i, __i, __m);
128 - __always_inline bool
129 + __libitm_always_inline bool
130 compare_exchange_weak(__int_type& __i1, __int_type __i2,
131 memory_order __m1, memory_order __m2) noexcept
134 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
137 - __always_inline bool
138 + __libitm_always_inline bool
139 compare_exchange_weak(__int_type& __i1, __int_type __i2,
141 memory_order __m2) volatile noexcept
143 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
146 - __always_inline bool
147 + __libitm_always_inline bool
148 compare_exchange_weak(__int_type& __i1, __int_type __i2,
149 memory_order __m = memory_order_seq_cst) noexcept
152 __calculate_memory_order(__m));
155 - __always_inline bool
156 + __libitm_always_inline bool
157 compare_exchange_weak(__int_type& __i1, __int_type __i2,
158 memory_order __m = memory_order_seq_cst) volatile noexcept
161 __calculate_memory_order(__m));
164 - __always_inline bool
165 + __libitm_always_inline bool
166 compare_exchange_strong(__int_type& __i1, __int_type __i2,
167 memory_order __m1, memory_order __m2) noexcept
170 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
173 - __always_inline bool
174 + __libitm_always_inline bool
175 compare_exchange_strong(__int_type& __i1, __int_type __i2,
177 memory_order __m2) volatile noexcept
179 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
182 - __always_inline bool
183 + __libitm_always_inline bool
184 compare_exchange_strong(__int_type& __i1, __int_type __i2,
185 memory_order __m = memory_order_seq_cst) noexcept
188 __calculate_memory_order(__m));
191 - __always_inline bool
192 + __libitm_always_inline bool
193 compare_exchange_strong(__int_type& __i1, __int_type __i2,
194 memory_order __m = memory_order_seq_cst) volatile noexcept
196 @@ -586,52 +585,52 @@
197 __calculate_memory_order(__m));
200 - __always_inline __int_type
201 + __libitm_always_inline __int_type
202 fetch_add(__int_type __i,
203 memory_order __m = memory_order_seq_cst) noexcept
204 { return __atomic_fetch_add(&_M_i, __i, __m); }
206 - __always_inline __int_type
207 + __libitm_always_inline __int_type
208 fetch_add(__int_type __i,
209 memory_order __m = memory_order_seq_cst) volatile noexcept
210 { return __atomic_fetch_add(&_M_i, __i, __m); }
212 - __always_inline __int_type
213 + __libitm_always_inline __int_type
214 fetch_sub(__int_type __i,
215 memory_order __m = memory_order_seq_cst) noexcept
216 { return __atomic_fetch_sub(&_M_i, __i, __m); }
218 - __always_inline __int_type
219 + __libitm_always_inline __int_type
220 fetch_sub(__int_type __i,
221 memory_order __m = memory_order_seq_cst) volatile noexcept
222 { return __atomic_fetch_sub(&_M_i, __i, __m); }
224 - __always_inline __int_type
225 + __libitm_always_inline __int_type
226 fetch_and(__int_type __i,
227 memory_order __m = memory_order_seq_cst) noexcept
228 { return __atomic_fetch_and(&_M_i, __i, __m); }
230 - __always_inline __int_type
231 + __libitm_always_inline __int_type
232 fetch_and(__int_type __i,
233 memory_order __m = memory_order_seq_cst) volatile noexcept
234 { return __atomic_fetch_and(&_M_i, __i, __m); }
236 - __always_inline __int_type
237 + __libitm_always_inline __int_type
238 fetch_or(__int_type __i,
239 memory_order __m = memory_order_seq_cst) noexcept
240 { return __atomic_fetch_or(&_M_i, __i, __m); }
242 - __always_inline __int_type
243 + __libitm_always_inline __int_type
244 fetch_or(__int_type __i,
245 memory_order __m = memory_order_seq_cst) volatile noexcept
246 { return __atomic_fetch_or(&_M_i, __i, __m); }
248 - __always_inline __int_type
249 + __libitm_always_inline __int_type
250 fetch_xor(__int_type __i,
251 memory_order __m = memory_order_seq_cst) noexcept
252 { return __atomic_fetch_xor(&_M_i, __i, __m); }
254 - __always_inline __int_type
255 + __libitm_always_inline __int_type
256 fetch_xor(__int_type __i,
257 memory_order __m = memory_order_seq_cst) volatile noexcept
258 { return __atomic_fetch_xor(&_M_i, __i, __m); }
260 is_lock_free() const volatile noexcept
261 { return __atomic_is_lock_free (sizeof (_M_p), &_M_p); }
263 - __always_inline void
264 + __libitm_always_inline void
265 store(__pointer_type __p,
266 memory_order __m = memory_order_seq_cst) noexcept
269 __atomic_store_n(&_M_p, __p, __m);
272 - __always_inline void
273 + __libitm_always_inline void
274 store(__pointer_type __p,
275 memory_order __m = memory_order_seq_cst) volatile noexcept
278 __atomic_store_n(&_M_p, __p, __m);
281 - __always_inline __pointer_type
282 + __libitm_always_inline __pointer_type
283 load(memory_order __m = memory_order_seq_cst) const noexcept
285 // __glibcxx_assert(__m != memory_order_release);
287 return __atomic_load_n(&_M_p, __m);
290 - __always_inline __pointer_type
291 + __libitm_always_inline __pointer_type
292 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
294 // __glibcxx_assert(__m != memory_order_release);
295 @@ -773,21 +772,21 @@
296 return __atomic_load_n(&_M_p, __m);
299 - __always_inline __pointer_type
300 + __libitm_always_inline __pointer_type
301 exchange(__pointer_type __p,
302 memory_order __m = memory_order_seq_cst) noexcept
304 return __atomic_exchange_n(&_M_p, __p, __m);
307 - __always_inline __pointer_type
308 + __libitm_always_inline __pointer_type
309 exchange(__pointer_type __p,
310 memory_order __m = memory_order_seq_cst) volatile noexcept
312 return __atomic_exchange_n(&_M_p, __p, __m);
315 - __always_inline bool
316 + __libitm_always_inline bool
317 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
319 memory_order __m2) noexcept
321 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
324 - __always_inline bool
325 + __libitm_always_inline bool
326 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
328 memory_order __m2) volatile noexcept
329 @@ -811,22 +810,22 @@
330 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
333 - __always_inline __pointer_type
334 + __libitm_always_inline __pointer_type
335 fetch_add(ptrdiff_t __d,
336 memory_order __m = memory_order_seq_cst) noexcept
337 { return __atomic_fetch_add(&_M_p, __d, __m); }
339 - __always_inline __pointer_type
340 + __libitm_always_inline __pointer_type
341 fetch_add(ptrdiff_t __d,
342 memory_order __m = memory_order_seq_cst) volatile noexcept
343 { return __atomic_fetch_add(&_M_p, __d, __m); }
345 - __always_inline __pointer_type
346 + __libitm_always_inline __pointer_type
347 fetch_sub(ptrdiff_t __d,
348 memory_order __m = memory_order_seq_cst) noexcept
349 { return __atomic_fetch_sub(&_M_p, __d, __m); }
351 - __always_inline __pointer_type
352 + __libitm_always_inline __pointer_type
353 fetch_sub(ptrdiff_t __d,
354 memory_order __m = memory_order_seq_cst) volatile noexcept
355 { return __atomic_fetch_sub(&_M_p, __d, __m); }
356 @@ -870,67 +869,67 @@
358 is_lock_free() const volatile noexcept { return _M_base.is_lock_free(); }
360 - __always_inline void
361 + __libitm_always_inline void
362 store(bool __i, memory_order __m = memory_order_seq_cst) noexcept
363 { _M_base.store(__i, __m); }
365 - __always_inline void
366 + __libitm_always_inline void
367 store(bool __i, memory_order __m = memory_order_seq_cst) volatile noexcept
368 { _M_base.store(__i, __m); }
370 - __always_inline bool
371 + __libitm_always_inline bool
372 load(memory_order __m = memory_order_seq_cst) const noexcept
373 { return _M_base.load(__m); }
375 - __always_inline bool
376 + __libitm_always_inline bool
377 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
378 { return _M_base.load(__m); }
380 - __always_inline bool
381 + __libitm_always_inline bool
382 exchange(bool __i, memory_order __m = memory_order_seq_cst) noexcept
383 { return _M_base.exchange(__i, __m); }
385 - __always_inline bool
386 + __libitm_always_inline bool
388 memory_order __m = memory_order_seq_cst) volatile noexcept
389 { return _M_base.exchange(__i, __m); }
391 - __always_inline bool
392 + __libitm_always_inline bool
393 compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
394 memory_order __m2) noexcept
395 { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
397 - __always_inline bool
398 + __libitm_always_inline bool
399 compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
400 memory_order __m2) volatile noexcept
401 { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
403 - __always_inline bool
404 + __libitm_always_inline bool
405 compare_exchange_weak(bool& __i1, bool __i2,
406 memory_order __m = memory_order_seq_cst) noexcept
407 { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
409 - __always_inline bool
410 + __libitm_always_inline bool
411 compare_exchange_weak(bool& __i1, bool __i2,
412 memory_order __m = memory_order_seq_cst) volatile noexcept
413 { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
415 - __always_inline bool
416 + __libitm_always_inline bool
417 compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
418 memory_order __m2) noexcept
419 { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
421 - __always_inline bool
422 + __libitm_always_inline bool
423 compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
424 memory_order __m2) volatile noexcept
425 { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
427 - __always_inline bool
428 + __libitm_always_inline bool
429 compare_exchange_strong(bool& __i1, bool __i2,
430 memory_order __m = memory_order_seq_cst) noexcept
431 { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
433 - __always_inline bool
434 + __libitm_always_inline bool
435 compare_exchange_strong(bool& __i1, bool __i2,
436 memory_order __m = memory_order_seq_cst) volatile noexcept
437 { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
438 @@ -980,11 +979,11 @@
439 store(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept
440 { __atomic_store(&_M_i, &__i, _m); }
442 - __always_inline void
443 + __libitm_always_inline void
444 store(_Tp __i, memory_order _m = memory_order_seq_cst) volatile noexcept
445 { __atomic_store(&_M_i, &__i, _m); }
447 - __always_inline _Tp
448 + __libitm_always_inline _Tp
449 load(memory_order _m = memory_order_seq_cst) const noexcept
456 - __always_inline _Tp
457 + __libitm_always_inline _Tp
458 load(memory_order _m = memory_order_seq_cst) const volatile noexcept
465 - __always_inline _Tp
466 + __libitm_always_inline _Tp
467 exchange(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept
470 @@ -1008,7 +1007,7 @@
474 - __always_inline _Tp
475 + __libitm_always_inline _Tp
477 memory_order _m = memory_order_seq_cst) volatile noexcept
479 @@ -1017,50 +1016,50 @@
483 - __always_inline bool
484 + __libitm_always_inline bool
485 compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s,
486 memory_order __f) noexcept
488 return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f);
491 - __always_inline bool
492 + __libitm_always_inline bool
493 compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s,
494 memory_order __f) volatile noexcept
496 return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f);
499 - __always_inline bool
500 + __libitm_always_inline bool
501 compare_exchange_weak(_Tp& __e, _Tp __i,
502 memory_order __m = memory_order_seq_cst) noexcept
503 { return compare_exchange_weak(__e, __i, __m, __m); }
505 - __always_inline bool
506 + __libitm_always_inline bool
507 compare_exchange_weak(_Tp& __e, _Tp __i,
508 memory_order __m = memory_order_seq_cst) volatile noexcept
509 { return compare_exchange_weak(__e, __i, __m, __m); }
511 - __always_inline bool
512 + __libitm_always_inline bool
513 compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s,
514 memory_order __f) noexcept
516 return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f);
519 - __always_inline bool
520 + __libitm_always_inline bool
521 compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s,
522 memory_order __f) volatile noexcept
524 return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f);
527 - __always_inline bool
528 + __libitm_always_inline bool
529 compare_exchange_strong(_Tp& __e, _Tp __i,
530 memory_order __m = memory_order_seq_cst) noexcept
531 { return compare_exchange_strong(__e, __i, __m, __m); }
533 - __always_inline bool
534 + __libitm_always_inline bool
535 compare_exchange_strong(_Tp& __e, _Tp __i,
536 memory_order __m = memory_order_seq_cst) volatile noexcept
537 { return compare_exchange_strong(__e, __i, __m, __m); }
538 @@ -1153,46 +1152,46 @@
539 is_lock_free() const volatile noexcept
540 { return _M_b.is_lock_free(); }
542 - __always_inline void
543 + __libitm_always_inline void
544 store(__pointer_type __p,
545 memory_order __m = memory_order_seq_cst) noexcept
546 { return _M_b.store(__p, __m); }
548 - __always_inline void
549 + __libitm_always_inline void
550 store(__pointer_type __p,
551 memory_order __m = memory_order_seq_cst) volatile noexcept
552 { return _M_b.store(__p, __m); }
554 - __always_inline __pointer_type
555 + __libitm_always_inline __pointer_type
556 load(memory_order __m = memory_order_seq_cst) const noexcept
557 { return _M_b.load(__m); }
559 - __always_inline __pointer_type
560 + __libitm_always_inline __pointer_type
561 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
562 { return _M_b.load(__m); }
564 - __always_inline __pointer_type
565 + __libitm_always_inline __pointer_type
566 exchange(__pointer_type __p,
567 memory_order __m = memory_order_seq_cst) noexcept
568 { return _M_b.exchange(__p, __m); }
570 - __always_inline __pointer_type
571 + __libitm_always_inline __pointer_type
572 exchange(__pointer_type __p,
573 memory_order __m = memory_order_seq_cst) volatile noexcept
574 { return _M_b.exchange(__p, __m); }
576 - __always_inline bool
577 + __libitm_always_inline bool
578 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
579 memory_order __m1, memory_order __m2) noexcept
580 { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
582 - __always_inline bool
583 + __libitm_always_inline bool
584 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
586 memory_order __m2) volatile noexcept
587 { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
589 - __always_inline bool
590 + __libitm_always_inline bool
591 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
592 memory_order __m = memory_order_seq_cst) noexcept
594 @@ -1200,7 +1199,7 @@
595 __calculate_memory_order(__m));
598 - __always_inline bool
599 + __libitm_always_inline bool
600 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
601 memory_order __m = memory_order_seq_cst) volatile noexcept
603 @@ -1208,18 +1207,18 @@
604 __calculate_memory_order(__m));
607 - __always_inline bool
608 + __libitm_always_inline bool
609 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
610 memory_order __m1, memory_order __m2) noexcept
611 { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
613 - __always_inline bool
614 + __libitm_always_inline bool
615 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
617 memory_order __m2) volatile noexcept
618 { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
620 - __always_inline bool
621 + __libitm_always_inline bool
622 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
623 memory_order __m = memory_order_seq_cst) noexcept
625 @@ -1227,7 +1226,7 @@
626 __calculate_memory_order(__m));
629 - __always_inline bool
630 + __libitm_always_inline bool
631 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
632 memory_order __m = memory_order_seq_cst) volatile noexcept
634 @@ -1235,22 +1234,22 @@
635 __calculate_memory_order(__m));
638 - __always_inline __pointer_type
639 + __libitm_always_inline __pointer_type
640 fetch_add(ptrdiff_t __d,
641 memory_order __m = memory_order_seq_cst) noexcept
642 { return _M_b.fetch_add(__d, __m); }
644 - __always_inline __pointer_type
645 + __libitm_always_inline __pointer_type
646 fetch_add(ptrdiff_t __d,
647 memory_order __m = memory_order_seq_cst) volatile noexcept
648 { return _M_b.fetch_add(__d, __m); }
650 - __always_inline __pointer_type
651 + __libitm_always_inline __pointer_type
652 fetch_sub(ptrdiff_t __d,
653 memory_order __m = memory_order_seq_cst) noexcept
654 { return _M_b.fetch_sub(__d, __m); }
656 - __always_inline __pointer_type
657 + __libitm_always_inline __pointer_type
658 fetch_sub(ptrdiff_t __d,
659 memory_order __m = memory_order_seq_cst) volatile noexcept
660 { return _M_b.fetch_sub(__d, __m); }
661 @@ -1544,98 +1543,98 @@
664 // Function definitions, atomic_flag operations.
665 - inline __always_inline bool
666 + inline __libitm_always_inline bool
667 atomic_flag_test_and_set_explicit(atomic_flag* __a,
668 memory_order __m) noexcept
669 { return __a->test_and_set(__m); }
671 - inline __always_inline bool
672 + inline __libitm_always_inline bool
673 atomic_flag_test_and_set_explicit(volatile atomic_flag* __a,
674 memory_order __m) noexcept
675 { return __a->test_and_set(__m); }
677 - inline __always_inline void
678 + inline __libitm_always_inline void
679 atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m) noexcept
682 - inline __always_inline void
683 + inline __libitm_always_inline void
684 atomic_flag_clear_explicit(volatile atomic_flag* __a,
685 memory_order __m) noexcept
688 - inline __always_inline bool
689 + inline __libitm_always_inline bool
690 atomic_flag_test_and_set(atomic_flag* __a) noexcept
691 { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); }
693 - inline __always_inline bool
694 + inline __libitm_always_inline bool
695 atomic_flag_test_and_set(volatile atomic_flag* __a) noexcept
696 { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); }
698 - inline __always_inline void
699 + inline __libitm_always_inline void
700 atomic_flag_clear(atomic_flag* __a) noexcept
701 { atomic_flag_clear_explicit(__a, memory_order_seq_cst); }
703 - inline __always_inline void
704 + inline __libitm_always_inline void
705 atomic_flag_clear(volatile atomic_flag* __a) noexcept
706 { atomic_flag_clear_explicit(__a, memory_order_seq_cst); }
709 // Function templates generally applicable to atomic types.
710 template<typename _ITp>
711 - __always_inline bool
712 + __libitm_always_inline bool
713 atomic_is_lock_free(const atomic<_ITp>* __a) noexcept
714 { return __a->is_lock_free(); }
716 template<typename _ITp>
717 - __always_inline bool
718 + __libitm_always_inline bool
719 atomic_is_lock_free(const volatile atomic<_ITp>* __a) noexcept
720 { return __a->is_lock_free(); }
722 template<typename _ITp>
723 - __always_inline void
724 + __libitm_always_inline void
725 atomic_init(atomic<_ITp>* __a, _ITp __i) noexcept;
727 template<typename _ITp>
728 - __always_inline void
729 + __libitm_always_inline void
730 atomic_init(volatile atomic<_ITp>* __a, _ITp __i) noexcept;
732 template<typename _ITp>
733 - __always_inline void
734 + __libitm_always_inline void
735 atomic_store_explicit(atomic<_ITp>* __a, _ITp __i,
736 memory_order __m) noexcept
737 { __a->store(__i, __m); }
739 template<typename _ITp>
740 - __always_inline void
741 + __libitm_always_inline void
742 atomic_store_explicit(volatile atomic<_ITp>* __a, _ITp __i,
743 memory_order __m) noexcept
744 { __a->store(__i, __m); }
746 template<typename _ITp>
747 - __always_inline _ITp
748 + __libitm_always_inline _ITp
749 atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m) noexcept
750 { return __a->load(__m); }
752 template<typename _ITp>
753 - __always_inline _ITp
754 + __libitm_always_inline _ITp
755 atomic_load_explicit(const volatile atomic<_ITp>* __a,
756 memory_order __m) noexcept
757 { return __a->load(__m); }
759 template<typename _ITp>
760 - __always_inline _ITp
761 + __libitm_always_inline _ITp
762 atomic_exchange_explicit(atomic<_ITp>* __a, _ITp __i,
763 memory_order __m) noexcept
764 { return __a->exchange(__i, __m); }
766 template<typename _ITp>
767 - __always_inline _ITp
768 + __libitm_always_inline _ITp
769 atomic_exchange_explicit(volatile atomic<_ITp>* __a, _ITp __i,
770 memory_order __m) noexcept
771 { return __a->exchange(__i, __m); }
773 template<typename _ITp>
774 - __always_inline bool
775 + __libitm_always_inline bool
776 atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a,
777 _ITp* __i1, _ITp __i2,
779 @@ -1643,7 +1642,7 @@
780 { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
782 template<typename _ITp>
783 - __always_inline bool
784 + __libitm_always_inline bool
785 atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a,
786 _ITp* __i1, _ITp __i2,
788 @@ -1651,7 +1650,7 @@
789 { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
791 template<typename _ITp>
792 - __always_inline bool
793 + __libitm_always_inline bool
794 atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a,
795 _ITp* __i1, _ITp __i2,
797 @@ -1659,7 +1658,7 @@
798 { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
800 template<typename _ITp>
801 - __always_inline bool
802 + __libitm_always_inline bool
803 atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a,
804 _ITp* __i1, _ITp __i2,
806 @@ -1668,37 +1667,37 @@
809 template<typename _ITp>
810 - __always_inline void
811 + __libitm_always_inline void
812 atomic_store(atomic<_ITp>* __a, _ITp __i) noexcept
813 { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
815 template<typename _ITp>
816 - __always_inline void
817 + __libitm_always_inline void
818 atomic_store(volatile atomic<_ITp>* __a, _ITp __i) noexcept
819 { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
821 template<typename _ITp>
822 - __always_inline _ITp
823 + __libitm_always_inline _ITp
824 atomic_load(const atomic<_ITp>* __a) noexcept
825 { return atomic_load_explicit(__a, memory_order_seq_cst); }
827 template<typename _ITp>
828 - __always_inline _ITp
829 + __libitm_always_inline _ITp
830 atomic_load(const volatile atomic<_ITp>* __a) noexcept
831 { return atomic_load_explicit(__a, memory_order_seq_cst); }
833 template<typename _ITp>
834 - __always_inline _ITp
835 + __libitm_always_inline _ITp
836 atomic_exchange(atomic<_ITp>* __a, _ITp __i) noexcept
837 { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
839 template<typename _ITp>
840 - __always_inline _ITp
841 + __libitm_always_inline _ITp
842 atomic_exchange(volatile atomic<_ITp>* __a, _ITp __i) noexcept
843 { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
845 template<typename _ITp>
846 - __always_inline bool
847 + __libitm_always_inline bool
848 atomic_compare_exchange_weak(atomic<_ITp>* __a,
849 _ITp* __i1, _ITp __i2) noexcept
851 @@ -1708,7 +1707,7 @@
854 template<typename _ITp>
855 - __always_inline bool
856 + __libitm_always_inline bool
857 atomic_compare_exchange_weak(volatile atomic<_ITp>* __a,
858 _ITp* __i1, _ITp __i2) noexcept
860 @@ -1718,7 +1717,7 @@
863 template<typename _ITp>
864 - __always_inline bool
865 + __libitm_always_inline bool
866 atomic_compare_exchange_strong(atomic<_ITp>* __a,
867 _ITp* __i1, _ITp __i2) noexcept
869 @@ -1728,7 +1727,7 @@
872 template<typename _ITp>
873 - __always_inline bool
874 + __libitm_always_inline bool
875 atomic_compare_exchange_strong(volatile atomic<_ITp>* __a,
876 _ITp* __i1, _ITp __i2) noexcept
878 @@ -1742,158 +1741,158 @@
879 // intergral types as specified in the standard, excluding address
881 template<typename _ITp>
882 - __always_inline _ITp
883 + __libitm_always_inline _ITp
884 atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i,
885 memory_order __m) noexcept
886 { return __a->fetch_add(__i, __m); }
888 template<typename _ITp>
889 - __always_inline _ITp
890 + __libitm_always_inline _ITp
891 atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
892 memory_order __m) noexcept
893 { return __a->fetch_add(__i, __m); }
895 template<typename _ITp>
896 - __always_inline _ITp
897 + __libitm_always_inline _ITp
898 atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i,
899 memory_order __m) noexcept
900 { return __a->fetch_sub(__i, __m); }
902 template<typename _ITp>
903 - __always_inline _ITp
904 + __libitm_always_inline _ITp
905 atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
906 memory_order __m) noexcept
907 { return __a->fetch_sub(__i, __m); }
909 template<typename _ITp>
910 - __always_inline _ITp
911 + __libitm_always_inline _ITp
912 atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i,
913 memory_order __m) noexcept
914 { return __a->fetch_and(__i, __m); }
916 template<typename _ITp>
917 - __always_inline _ITp
918 + __libitm_always_inline _ITp
919 atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
920 memory_order __m) noexcept
921 { return __a->fetch_and(__i, __m); }
923 template<typename _ITp>
924 - __always_inline _ITp
925 + __libitm_always_inline _ITp
926 atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i,
927 memory_order __m) noexcept
928 { return __a->fetch_or(__i, __m); }
930 template<typename _ITp>
931 - __always_inline _ITp
932 + __libitm_always_inline _ITp
933 atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
934 memory_order __m) noexcept
935 { return __a->fetch_or(__i, __m); }
937 template<typename _ITp>
938 - __always_inline _ITp
939 + __libitm_always_inline _ITp
940 atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i,
941 memory_order __m) noexcept
942 { return __a->fetch_xor(__i, __m); }
944 template<typename _ITp>
945 - __always_inline _ITp
946 + __libitm_always_inline _ITp
947 atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
948 memory_order __m) noexcept
949 { return __a->fetch_xor(__i, __m); }
951 template<typename _ITp>
952 - __always_inline _ITp
953 + __libitm_always_inline _ITp
954 atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i) noexcept
955 { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
957 template<typename _ITp>
958 - __always_inline _ITp
959 + __libitm_always_inline _ITp
960 atomic_fetch_add(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
961 { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
963 template<typename _ITp>
964 - __always_inline _ITp
965 + __libitm_always_inline _ITp
966 atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i) noexcept
967 { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
969 template<typename _ITp>
970 - __always_inline _ITp
971 + __libitm_always_inline _ITp
972 atomic_fetch_sub(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
973 { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
975 template<typename _ITp>
976 - __always_inline _ITp
977 + __libitm_always_inline _ITp
978 atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i) noexcept
979 { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
981 template<typename _ITp>
982 - __always_inline _ITp
983 + __libitm_always_inline _ITp
984 atomic_fetch_and(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
985 { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
987 template<typename _ITp>
988 - __always_inline _ITp
989 + __libitm_always_inline _ITp
990 atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i) noexcept
991 { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
993 template<typename _ITp>
994 - __always_inline _ITp
995 + __libitm_always_inline _ITp
996 atomic_fetch_or(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
997 { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
999 template<typename _ITp>
1000 - __always_inline _ITp
1001 + __libitm_always_inline _ITp
1002 atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i) noexcept
1003 { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
1005 template<typename _ITp>
1006 - __always_inline _ITp
1007 + __libitm_always_inline _ITp
1008 atomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
1009 { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
1012 // Partial specializations for pointers.
1013 template<typename _ITp>
1014 - __always_inline _ITp*
1015 + __libitm_always_inline _ITp*
1016 atomic_fetch_add_explicit(atomic<_ITp*>* __a, ptrdiff_t __d,
1017 memory_order __m) noexcept
1018 { return __a->fetch_add(__d, __m); }
1020 template<typename _ITp>
1021 - __always_inline _ITp*
1022 + __libitm_always_inline _ITp*
1023 atomic_fetch_add_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d,
1024 memory_order __m) noexcept
1025 { return __a->fetch_add(__d, __m); }
1027 template<typename _ITp>
1028 - __always_inline _ITp*
1029 + __libitm_always_inline _ITp*
1030 atomic_fetch_add(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
1031 { return __a->fetch_add(__d); }
1033 template<typename _ITp>
1034 - __always_inline _ITp*
1035 + __libitm_always_inline _ITp*
1036 atomic_fetch_add(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
1037 { return __a->fetch_add(__d); }
1039 template<typename _ITp>
1040 - __always_inline _ITp*
1041 + __libitm_always_inline _ITp*
1042 atomic_fetch_sub_explicit(volatile atomic<_ITp*>* __a,
1043 ptrdiff_t __d, memory_order __m) noexcept
1044 { return __a->fetch_sub(__d, __m); }
1046 template<typename _ITp>
1047 - __always_inline _ITp*
1048 + __libitm_always_inline _ITp*
1049 atomic_fetch_sub_explicit(atomic<_ITp*>* __a, ptrdiff_t __d,
1050 memory_order __m) noexcept
1051 { return __a->fetch_sub(__d, __m); }
1053 template<typename _ITp>
1054 - __always_inline _ITp*
1055 + __libitm_always_inline _ITp*
1056 atomic_fetch_sub(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
1057 { return __a->fetch_sub(__d); }
1059 template<typename _ITp>
1060 - __always_inline _ITp*
1061 + __libitm_always_inline _ITp*
1062 atomic_fetch_sub(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
1063 { return __a->fetch_sub(__d); }