]> git.ipfire.org Git - thirdparty/gcc.git/blame - libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h
Remove support for alternative Solaris 11.4 ld -V output
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_atomic_clang_x86.h
CommitLineData
7d752f28 1//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9// Not intended for direct inclusion. Include sanitizer_atomic.h.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef SANITIZER_ATOMIC_CLANG_X86_H
14#define SANITIZER_ATOMIC_CLANG_X86_H
15
16namespace __sanitizer {
17
18INLINE void proc_yield(int cnt) {
19 __asm__ __volatile__("" ::: "memory");
20 for (int i = 0; i < cnt; i++)
21 __asm__ __volatile__("pause");
22 __asm__ __volatile__("" ::: "memory");
23}
24
25template<typename T>
26INLINE typename T::Type atomic_load(
27 const volatile T *a, memory_order mo) {
28 DCHECK(mo & (memory_order_relaxed | memory_order_consume
29 | memory_order_acquire | memory_order_seq_cst));
30 DCHECK(!((uptr)a % sizeof(*a)));
31 typename T::Type v;
32
33 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
34 // Assume that aligned loads are atomic.
35 if (mo == memory_order_relaxed) {
36 v = a->val_dont_use;
37 } else if (mo == memory_order_consume) {
38 // Assume that processor respects data dependencies
39 // (and that compiler won't break them).
40 __asm__ __volatile__("" ::: "memory");
41 v = a->val_dont_use;
42 __asm__ __volatile__("" ::: "memory");
43 } else if (mo == memory_order_acquire) {
44 __asm__ __volatile__("" ::: "memory");
45 v = a->val_dont_use;
46 // On x86 loads are implicitly acquire.
47 __asm__ __volatile__("" ::: "memory");
48 } else { // seq_cst
49 // On x86 plain MOV is enough for seq_cst store.
50 __asm__ __volatile__("" ::: "memory");
51 v = a->val_dont_use;
52 __asm__ __volatile__("" ::: "memory");
53 }
54 } else {
55 // 64-bit load on 32-bit platform.
56 __asm__ __volatile__(
57 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
58 "movq %%mm0, %0;" // (ptr could be read-only)
59 "emms;" // Empty mmx state/Reset FP regs
60 : "=m" (v)
61 : "m" (a->val_dont_use)
d2ef4bee 62 : // mark the mmx registers as clobbered
7d752f28 63#ifdef __MMX__
64 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
65#endif // #ifdef __MMX__
66 "memory");
67 }
68 return v;
69}
70
71template<typename T>
72INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
73 DCHECK(mo & (memory_order_relaxed | memory_order_release
74 | memory_order_seq_cst));
75 DCHECK(!((uptr)a % sizeof(*a)));
76
77 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
78 // Assume that aligned loads are atomic.
79 if (mo == memory_order_relaxed) {
80 a->val_dont_use = v;
81 } else if (mo == memory_order_release) {
82 // On x86 stores are implicitly release.
83 __asm__ __volatile__("" ::: "memory");
84 a->val_dont_use = v;
85 __asm__ __volatile__("" ::: "memory");
86 } else { // seq_cst
87 // On x86 stores are implicitly release.
88 __asm__ __volatile__("" ::: "memory");
89 a->val_dont_use = v;
90 __sync_synchronize();
91 }
92 } else {
93 // 64-bit store on 32-bit platform.
94 __asm__ __volatile__(
95 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
96 "movq %%mm0, %0;"
97 "emms;" // Empty mmx state/Reset FP regs
98 : "=m" (a->val_dont_use)
99 : "m" (v)
d2ef4bee 100 : // mark the mmx registers as clobbered
7d752f28 101#ifdef __MMX__
102 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
103#endif // #ifdef __MMX__
104 "memory");
105 if (mo == memory_order_seq_cst)
106 __sync_synchronize();
107 }
108}
109
110} // namespace __sanitizer
111
112#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H