]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/x86_64/atomic-machine.h
Optimize generic spinlock code and use C11 like atomic macros.
[thirdparty/glibc.git] / sysdeps / x86_64 / atomic-machine.h
CommitLineData
bfff8b1b 1/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
c10c099c
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
c10c099c 18
9090848d
ZW
19#ifndef _X86_64_ATOMIC_MACHINE_H
20#define _X86_64_ATOMIC_MACHINE_H 1
c10c099c 21
9090848d
ZW
22#include <stdint.h>
23#include <tls.h> /* For tcbhead_t. */
24#include <libc-pointer-arith.h> /* For cast_to_integer. */
c10c099c
UD
25
26typedef int8_t atomic8_t;
27typedef uint8_t uatomic8_t;
28typedef int_fast8_t atomic_fast8_t;
29typedef uint_fast8_t uatomic_fast8_t;
30
31typedef int16_t atomic16_t;
32typedef uint16_t uatomic16_t;
33typedef int_fast16_t atomic_fast16_t;
34typedef uint_fast16_t uatomic_fast16_t;
35
36typedef int32_t atomic32_t;
37typedef uint32_t uatomic32_t;
38typedef int_fast32_t atomic_fast32_t;
39typedef uint_fast32_t uatomic_fast32_t;
40
41typedef int64_t atomic64_t;
42typedef uint64_t uatomic64_t;
43typedef int_fast64_t atomic_fast64_t;
44typedef uint_fast64_t uatomic_fast64_t;
45
46typedef intptr_t atomicptr_t;
47typedef uintptr_t uatomicptr_t;
48typedef intmax_t atomic_max_t;
49typedef uintmax_t uatomic_max_t;
50
51
bd4f43b4 52#ifndef LOCK_PREFIX
c10c099c 53# ifdef UP
bd4f43b4 54# define LOCK_PREFIX /* nothing */
c10c099c 55# else
bd4f43b4 56# define LOCK_PREFIX "lock;"
c10c099c
UD
57# endif
58#endif
59
1ea339b6 60#define __HAVE_64B_ATOMICS 1
1ea339b6 61#define USE_ATOMIC_COMPILER_BUILTINS 1
12d2dd70 62#define ATOMIC_EXCHANGE_USES_CAS 0
c10c099c 63
bc957d53 64#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
038a1a9f 65 __sync_val_compare_and_swap (mem, oldval, newval)
bc957d53 66#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
038a1a9f 67 (! __sync_bool_compare_and_swap (mem, oldval, newval))
11bf311e
UD
68
69
70#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
71 ({ __typeof (*mem) ret; \
72 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
73 "je 0f\n\t" \
74 "lock\n" \
75 "0:\tcmpxchgb %b2, %1" \
76 : "=a" (ret), "=m" (*mem) \
77 : "q" (newval), "m" (*mem), "0" (oldval), \
78 "i" (offsetof (tcbhead_t, multiple_threads))); \
79 ret; })
80
81#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
82 ({ __typeof (*mem) ret; \
83 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
84 "je 0f\n\t" \
85 "lock\n" \
86 "0:\tcmpxchgw %w2, %1" \
87 : "=a" (ret), "=m" (*mem) \
88 : "q" (newval), "m" (*mem), "0" (oldval), \
89 "i" (offsetof (tcbhead_t, multiple_threads))); \
90 ret; })
91
92#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
93 ({ __typeof (*mem) ret; \
94 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
95 "je 0f\n\t" \
96 "lock\n" \
97 "0:\tcmpxchgl %2, %1" \
98 : "=a" (ret), "=m" (*mem) \
99 : "q" (newval), "m" (*mem), "0" (oldval), \
100 "i" (offsetof (tcbhead_t, multiple_threads))); \
101 ret; })
102
103#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
104 ({ __typeof (*mem) ret; \
105 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
106 "je 0f\n\t" \
107 "lock\n" \
108 "0:\tcmpxchgq %q2, %1" \
109 : "=a" (ret), "=m" (*mem) \
c515fb51
L
110 : "q" ((atomic64_t) cast_to_integer (newval)), \
111 "m" (*mem), \
112 "0" ((atomic64_t) cast_to_integer (oldval)), \
11bf311e 113 "i" (offsetof (tcbhead_t, multiple_threads))); \
8099361e
UD
114 ret; })
115
116
f79466a8 117/* Note that we need no lock prefix. */
949ec764 118#define atomic_exchange_acq(mem, newvalue) \
f79466a8
UD
119 ({ __typeof (*mem) result; \
120 if (sizeof (*mem) == 1) \
121 __asm __volatile ("xchgb %b0, %1" \
a810e68c 122 : "=q" (result), "=m" (*mem) \
abfd53d1 123 : "0" (newvalue), "m" (*mem)); \
f79466a8
UD
124 else if (sizeof (*mem) == 2) \
125 __asm __volatile ("xchgw %w0, %1" \
126 : "=r" (result), "=m" (*mem) \
abfd53d1 127 : "0" (newvalue), "m" (*mem)); \
f79466a8
UD
128 else if (sizeof (*mem) == 4) \
129 __asm __volatile ("xchgl %0, %1" \
130 : "=r" (result), "=m" (*mem) \
abfd53d1 131 : "0" (newvalue), "m" (*mem)); \
f79466a8
UD
132 else \
133 __asm __volatile ("xchgq %q0, %1" \
134 : "=r" (result), "=m" (*mem) \
c515fb51
L
135 : "0" ((atomic64_t) cast_to_integer (newvalue)), \
136 "m" (*mem)); \
f79466a8
UD
137 result; })
138
139
11bf311e 140#define __arch_exchange_and_add_body(lock, mem, value) \
c10c099c
UD
141 ({ __typeof (*mem) result; \
142 if (sizeof (*mem) == 1) \
11bf311e 143 __asm __volatile (lock "xaddb %b0, %1" \
a810e68c 144 : "=q" (result), "=m" (*mem) \
11bf311e
UD
145 : "0" (value), "m" (*mem), \
146 "i" (offsetof (tcbhead_t, multiple_threads))); \
c10c099c 147 else if (sizeof (*mem) == 2) \
11bf311e 148 __asm __volatile (lock "xaddw %w0, %1" \
c10c099c 149 : "=r" (result), "=m" (*mem) \
11bf311e
UD
150 : "0" (value), "m" (*mem), \
151 "i" (offsetof (tcbhead_t, multiple_threads))); \
c10c099c 152 else if (sizeof (*mem) == 4) \
11bf311e 153 __asm __volatile (lock "xaddl %0, %1" \
c10c099c 154 : "=r" (result), "=m" (*mem) \
11bf311e
UD
155 : "0" (value), "m" (*mem), \
156 "i" (offsetof (tcbhead_t, multiple_threads))); \
c10c099c 157 else \
11bf311e 158 __asm __volatile (lock "xaddq %q0, %1" \
c10c099c 159 : "=r" (result), "=m" (*mem) \
c515fb51
L
160 : "0" ((atomic64_t) cast_to_integer (value)), \
161 "m" (*mem), \
11bf311e 162 "i" (offsetof (tcbhead_t, multiple_threads))); \
c10c099c
UD
163 result; })
164
bc957d53 165#define atomic_exchange_and_add(mem, value) \
038a1a9f 166 __sync_fetch_and_add (mem, value)
11bf311e
UD
167
168#define __arch_exchange_and_add_cprefix \
169 "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
170
171#define catomic_exchange_and_add(mem, value) \
172 __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
173
174
175#define __arch_add_body(lock, pfx, mem, value) \
176 do { \
177 if (__builtin_constant_p (value) && (value) == 1) \
178 pfx##_increment (mem); \
179 else if (__builtin_constant_p (value) && (value) == -1) \
180 pfx##_decrement (mem); \
181 else if (sizeof (*mem) == 1) \
182 __asm __volatile (lock "addb %b1, %0" \
183 : "=m" (*mem) \
a810e68c 184 : "iq" (value), "m" (*mem), \
11bf311e
UD
185 "i" (offsetof (tcbhead_t, multiple_threads))); \
186 else if (sizeof (*mem) == 2) \
187 __asm __volatile (lock "addw %w1, %0" \
188 : "=m" (*mem) \
189 : "ir" (value), "m" (*mem), \
190 "i" (offsetof (tcbhead_t, multiple_threads))); \
191 else if (sizeof (*mem) == 4) \
192 __asm __volatile (lock "addl %1, %0" \
193 : "=m" (*mem) \
194 : "ir" (value), "m" (*mem), \
195 "i" (offsetof (tcbhead_t, multiple_threads))); \
196 else \
197 __asm __volatile (lock "addq %q1, %0" \
198 : "=m" (*mem) \
c515fb51
L
199 : "ir" ((atomic64_t) cast_to_integer (value)), \
200 "m" (*mem), \
11bf311e
UD
201 "i" (offsetof (tcbhead_t, multiple_threads))); \
202 } while (0)
c10c099c
UD
203
204#define atomic_add(mem, value) \
11bf311e
UD
205 __arch_add_body (LOCK_PREFIX, atomic, mem, value)
206
207#define __arch_add_cprefix \
208 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
209
210#define catomic_add(mem, value) \
211 __arch_add_body (__arch_add_cprefix, catomic, mem, value)
c10c099c
UD
212
213
214#define atomic_add_negative(mem, value) \
215 ({ unsigned char __result; \
216 if (sizeof (*mem) == 1) \
bd4f43b4 217 __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
c10c099c 218 : "=m" (*mem), "=qm" (__result) \
a810e68c 219 : "iq" (value), "m" (*mem)); \
c10c099c 220 else if (sizeof (*mem) == 2) \
bd4f43b4 221 __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
c10c099c 222 : "=m" (*mem), "=qm" (__result) \
abfd53d1 223 : "ir" (value), "m" (*mem)); \
c10c099c 224 else if (sizeof (*mem) == 4) \
bd4f43b4 225 __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
c10c099c 226 : "=m" (*mem), "=qm" (__result) \
abfd53d1 227 : "ir" (value), "m" (*mem)); \
c10c099c 228 else \
bd4f43b4 229 __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \
c10c099c 230 : "=m" (*mem), "=qm" (__result) \
c515fb51
L
231 : "ir" ((atomic64_t) cast_to_integer (value)), \
232 "m" (*mem)); \
c10c099c
UD
233 __result; })
234
235
236#define atomic_add_zero(mem, value) \
237 ({ unsigned char __result; \
238 if (sizeof (*mem) == 1) \
bd4f43b4 239 __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
c10c099c 240 : "=m" (*mem), "=qm" (__result) \
a810e68c 241 : "iq" (value), "m" (*mem)); \
c10c099c 242 else if (sizeof (*mem) == 2) \
bd4f43b4 243 __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
c10c099c 244 : "=m" (*mem), "=qm" (__result) \
abfd53d1 245 : "ir" (value), "m" (*mem)); \
c10c099c 246 else if (sizeof (*mem) == 4) \
bd4f43b4 247 __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
c10c099c 248 : "=m" (*mem), "=qm" (__result) \
abfd53d1 249 : "ir" (value), "m" (*mem)); \
c10c099c 250 else \
bd4f43b4 251 __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \
c10c099c 252 : "=m" (*mem), "=qm" (__result) \
c515fb51
L
253 : "ir" ((atomic64_t) cast_to_integer (value)), \
254 "m" (*mem)); \
c10c099c
UD
255 __result; })
256
257
11bf311e
UD
258#define __arch_increment_body(lock, mem) \
259 do { \
260 if (sizeof (*mem) == 1) \
261 __asm __volatile (lock "incb %b0" \
262 : "=m" (*mem) \
263 : "m" (*mem), \
264 "i" (offsetof (tcbhead_t, multiple_threads))); \
265 else if (sizeof (*mem) == 2) \
266 __asm __volatile (lock "incw %w0" \
267 : "=m" (*mem) \
268 : "m" (*mem), \
269 "i" (offsetof (tcbhead_t, multiple_threads))); \
270 else if (sizeof (*mem) == 4) \
271 __asm __volatile (lock "incl %0" \
272 : "=m" (*mem) \
273 : "m" (*mem), \
274 "i" (offsetof (tcbhead_t, multiple_threads))); \
275 else \
276 __asm __volatile (lock "incq %q0" \
277 : "=m" (*mem) \
278 : "m" (*mem), \
279 "i" (offsetof (tcbhead_t, multiple_threads))); \
280 } while (0)
281
282#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
283
284#define __arch_increment_cprefix \
285 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
286
287#define catomic_increment(mem) \
288 __arch_increment_body (__arch_increment_cprefix, mem)
8099361e 289
c10c099c
UD
290
291#define atomic_increment_and_test(mem) \
292 ({ unsigned char __result; \
293 if (sizeof (*mem) == 1) \
bd4f43b4 294 __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \
c10c099c 295 : "=m" (*mem), "=qm" (__result) \
abfd53d1 296 : "m" (*mem)); \
c10c099c 297 else if (sizeof (*mem) == 2) \
bd4f43b4 298 __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \
c10c099c 299 : "=m" (*mem), "=qm" (__result) \
abfd53d1 300 : "m" (*mem)); \
c10c099c 301 else if (sizeof (*mem) == 4) \
bd4f43b4 302 __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
c10c099c 303 : "=m" (*mem), "=qm" (__result) \
abfd53d1 304 : "m" (*mem)); \
c10c099c 305 else \
bd4f43b4 306 __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \
c10c099c 307 : "=m" (*mem), "=qm" (__result) \
abfd53d1 308 : "m" (*mem)); \
c10c099c
UD
309 __result; })
310
311
11bf311e
UD
312#define __arch_decrement_body(lock, mem) \
313 do { \
314 if (sizeof (*mem) == 1) \
315 __asm __volatile (lock "decb %b0" \
316 : "=m" (*mem) \
317 : "m" (*mem), \
318 "i" (offsetof (tcbhead_t, multiple_threads))); \
319 else if (sizeof (*mem) == 2) \
320 __asm __volatile (lock "decw %w0" \
321 : "=m" (*mem) \
322 : "m" (*mem), \
323 "i" (offsetof (tcbhead_t, multiple_threads))); \
324 else if (sizeof (*mem) == 4) \
325 __asm __volatile (lock "decl %0" \
326 : "=m" (*mem) \
327 : "m" (*mem), \
328 "i" (offsetof (tcbhead_t, multiple_threads))); \
329 else \
330 __asm __volatile (lock "decq %q0" \
331 : "=m" (*mem) \
332 : "m" (*mem), \
333 "i" (offsetof (tcbhead_t, multiple_threads))); \
334 } while (0)
335
336#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
337
338#define __arch_decrement_cprefix \
339 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
340
341#define catomic_decrement(mem) \
342 __arch_decrement_body (__arch_decrement_cprefix, mem)
8099361e 343
c10c099c
UD
344
345#define atomic_decrement_and_test(mem) \
346 ({ unsigned char __result; \
347 if (sizeof (*mem) == 1) \
bd4f43b4 348 __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
c10c099c 349 : "=m" (*mem), "=qm" (__result) \
abfd53d1 350 : "m" (*mem)); \
c10c099c 351 else if (sizeof (*mem) == 2) \
bd4f43b4 352 __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
c10c099c 353 : "=m" (*mem), "=qm" (__result) \
abfd53d1 354 : "m" (*mem)); \
c10c099c 355 else if (sizeof (*mem) == 4) \
bd4f43b4 356 __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
c10c099c 357 : "=m" (*mem), "=qm" (__result) \
abfd53d1 358 : "m" (*mem)); \
c10c099c 359 else \
bd4f43b4 360 __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \
c10c099c 361 : "=m" (*mem), "=qm" (__result) \
abfd53d1 362 : "m" (*mem)); \
c10c099c
UD
363 __result; })
364
365
366#define atomic_bit_set(mem, bit) \
11bf311e
UD
367 do { \
368 if (sizeof (*mem) == 1) \
369 __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
370 : "=m" (*mem) \
a810e68c 371 : "m" (*mem), "iq" (1L << (bit))); \
11bf311e
UD
372 else if (sizeof (*mem) == 2) \
373 __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
374 : "=m" (*mem) \
375 : "m" (*mem), "ir" (1L << (bit))); \
376 else if (sizeof (*mem) == 4) \
377 __asm __volatile (LOCK_PREFIX "orl %2, %0" \
378 : "=m" (*mem) \
379 : "m" (*mem), "ir" (1L << (bit))); \
380 else if (__builtin_constant_p (bit) && (bit) < 32) \
381 __asm __volatile (LOCK_PREFIX "orq %2, %0" \
382 : "=m" (*mem) \
383 : "m" (*mem), "i" (1L << (bit))); \
384 else \
385 __asm __volatile (LOCK_PREFIX "orq %q2, %0" \
386 : "=m" (*mem) \
387 : "m" (*mem), "r" (1UL << (bit))); \
388 } while (0)
c10c099c
UD
389
390
391#define atomic_bit_test_set(mem, bit) \
392 ({ unsigned char __result; \
393 if (sizeof (*mem) == 1) \
bd4f43b4 394 __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
c10c099c 395 : "=q" (__result), "=m" (*mem) \
a810e68c 396 : "m" (*mem), "iq" (bit)); \
c10c099c 397 else if (sizeof (*mem) == 2) \
bd4f43b4 398 __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
c10c099c 399 : "=q" (__result), "=m" (*mem) \
002ff853 400 : "m" (*mem), "ir" (bit)); \
c10c099c 401 else if (sizeof (*mem) == 4) \
bd4f43b4 402 __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
c10c099c 403 : "=q" (__result), "=m" (*mem) \
002ff853 404 : "m" (*mem), "ir" (bit)); \
c10c099c 405 else \
bd4f43b4 406 __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \
c10c099c 407 : "=q" (__result), "=m" (*mem) \
002ff853 408 : "m" (*mem), "ir" (bit)); \
c10c099c 409 __result; })
f377d022
UD
410
411
4eb984d3 412#define atomic_spin_nop() asm ("rep; nop")
11bf311e
UD
413
414
6c03cd11 415#define __arch_and_body(lock, mem, mask) \
11bf311e
UD
416 do { \
417 if (sizeof (*mem) == 1) \
6c03cd11 418 __asm __volatile (lock "andb %b1, %0" \
11bf311e 419 : "=m" (*mem) \
6c03cd11
UD
420 : "iq" (mask), "m" (*mem), \
421 "i" (offsetof (tcbhead_t, multiple_threads))); \
11bf311e 422 else if (sizeof (*mem) == 2) \
6c03cd11 423 __asm __volatile (lock "andw %w1, %0" \
11bf311e 424 : "=m" (*mem) \
6c03cd11
UD
425 : "ir" (mask), "m" (*mem), \
426 "i" (offsetof (tcbhead_t, multiple_threads))); \
11bf311e 427 else if (sizeof (*mem) == 4) \
6c03cd11 428 __asm __volatile (lock "andl %1, %0" \
11bf311e 429 : "=m" (*mem) \
6c03cd11
UD
430 : "ir" (mask), "m" (*mem), \
431 "i" (offsetof (tcbhead_t, multiple_threads))); \
11bf311e 432 else \
6c03cd11 433 __asm __volatile (lock "andq %q1, %0" \
11bf311e 434 : "=m" (*mem) \
6c03cd11
UD
435 : "ir" (mask), "m" (*mem), \
436 "i" (offsetof (tcbhead_t, multiple_threads))); \
11bf311e
UD
437 } while (0)
438
6c03cd11
UD
439#define __arch_cprefix \
440 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
441
442#define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
443
444#define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask)
445
11bf311e
UD
446
447#define __arch_or_body(lock, mem, mask) \
448 do { \
449 if (sizeof (*mem) == 1) \
a810e68c 450 __asm __volatile (lock "orb %b1, %0" \
11bf311e 451 : "=m" (*mem) \
a810e68c 452 : "iq" (mask), "m" (*mem), \
11bf311e
UD
453 "i" (offsetof (tcbhead_t, multiple_threads))); \
454 else if (sizeof (*mem) == 2) \
77151937 455 __asm __volatile (lock "orw %w1, %0" \
11bf311e
UD
456 : "=m" (*mem) \
457 : "ir" (mask), "m" (*mem), \
458 "i" (offsetof (tcbhead_t, multiple_threads))); \
459 else if (sizeof (*mem) == 4) \
460 __asm __volatile (lock "orl %1, %0" \
461 : "=m" (*mem) \
462 : "ir" (mask), "m" (*mem), \
463 "i" (offsetof (tcbhead_t, multiple_threads))); \
464 else \
77151937 465 __asm __volatile (lock "orq %q1, %0" \
11bf311e
UD
466 : "=m" (*mem) \
467 : "ir" (mask), "m" (*mem), \
468 "i" (offsetof (tcbhead_t, multiple_threads))); \
469 } while (0)
470
471#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
472
6c03cd11 473#define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask)
c47ca964
TR
474
475/* We don't use mfence because it is supposedly slower due to having to
476 provide stronger guarantees (e.g., regarding self-modifying code). */
477#define atomic_full_barrier() \
478 __asm __volatile (LOCK_PREFIX "orl $0, (%%rsp)" ::: "memory")
479#define atomic_read_barrier() __asm ("" ::: "memory")
480#define atomic_write_barrier() __asm ("" ::: "memory")
9090848d
ZW
481
482#endif /* atomic-machine.h */