]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/x86_64/bits/atomic.h
* nscd/nscd.c (parse_opt): One more conversion to use send instead
[thirdparty/glibc.git] / sysdeps / x86_64 / bits / atomic.h
CommitLineData
038a1a9f 1/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
c10c099c
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <stdint.h>
11bf311e 21#include <tls.h> /* For tcbhead_t. */
c10c099c
UD
22
23
24typedef int8_t atomic8_t;
25typedef uint8_t uatomic8_t;
26typedef int_fast8_t atomic_fast8_t;
27typedef uint_fast8_t uatomic_fast8_t;
28
29typedef int16_t atomic16_t;
30typedef uint16_t uatomic16_t;
31typedef int_fast16_t atomic_fast16_t;
32typedef uint_fast16_t uatomic_fast16_t;
33
34typedef int32_t atomic32_t;
35typedef uint32_t uatomic32_t;
36typedef int_fast32_t atomic_fast32_t;
37typedef uint_fast32_t uatomic_fast32_t;
38
39typedef int64_t atomic64_t;
40typedef uint64_t uatomic64_t;
41typedef int_fast64_t atomic_fast64_t;
42typedef uint_fast64_t uatomic_fast64_t;
43
44typedef intptr_t atomicptr_t;
45typedef uintptr_t uatomicptr_t;
46typedef intmax_t atomic_max_t;
47typedef uintmax_t uatomic_max_t;
48
49
bd4f43b4 50#ifndef LOCK_PREFIX
c10c099c 51# ifdef UP
bd4f43b4 52# define LOCK_PREFIX /* nothing */
c10c099c 53# else
bd4f43b4 54# define LOCK_PREFIX "lock;"
c10c099c
UD
55# endif
56#endif
57
58
038a1a9f
UD
59#if __GNUC_PREREQ (4, 1)
60# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
61 __sync_val_compare_and_swap (mem, oldval, newval)
62# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
63 (! __sync_bool_compare_and_swap (mem, oldval, newval))
64#else
65# define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
5a3ab2fc 66 ({ __typeof (*mem) ret; \
bd4f43b4 67 __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1" \
c10c099c 68 : "=a" (ret), "=m" (*mem) \
abfd53d1 69 : "q" (newval), "m" (*mem), "0" (oldval)); \
c10c099c
UD
70 ret; })
71
038a1a9f 72# define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
5a3ab2fc 73 ({ __typeof (*mem) ret; \
bd4f43b4 74 __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1" \
c10c099c 75 : "=a" (ret), "=m" (*mem) \
abfd53d1 76 : "r" (newval), "m" (*mem), "0" (oldval)); \
c10c099c
UD
77 ret; })
78
038a1a9f 79# define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
5a3ab2fc 80 ({ __typeof (*mem) ret; \
bd4f43b4 81 __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1" \
c10c099c 82 : "=a" (ret), "=m" (*mem) \
abfd53d1 83 : "r" (newval), "m" (*mem), "0" (oldval)); \
c10c099c
UD
84 ret; })
85
038a1a9f 86# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
5a3ab2fc 87 ({ __typeof (*mem) ret; \
bd4f43b4 88 __asm __volatile (LOCK_PREFIX "cmpxchgq %q2, %1" \
c10c099c 89 : "=a" (ret), "=m" (*mem) \
11bf311e
UD
90 : "r" ((long int) (newval)), "m" (*mem), \
91 "0" ((long int) (oldval))); \
92 ret; })
038a1a9f 93#endif
11bf311e
UD
94
95
96#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
97 ({ __typeof (*mem) ret; \
98 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
99 "je 0f\n\t" \
100 "lock\n" \
101 "0:\tcmpxchgb %b2, %1" \
102 : "=a" (ret), "=m" (*mem) \
103 : "q" (newval), "m" (*mem), "0" (oldval), \
104 "i" (offsetof (tcbhead_t, multiple_threads))); \
105 ret; })
106
107#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
108 ({ __typeof (*mem) ret; \
109 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
110 "je 0f\n\t" \
111 "lock\n" \
112 "0:\tcmpxchgw %w2, %1" \
113 : "=a" (ret), "=m" (*mem) \
114 : "q" (newval), "m" (*mem), "0" (oldval), \
115 "i" (offsetof (tcbhead_t, multiple_threads))); \
116 ret; })
117
118#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
119 ({ __typeof (*mem) ret; \
120 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
121 "je 0f\n\t" \
122 "lock\n" \
123 "0:\tcmpxchgl %2, %1" \
124 : "=a" (ret), "=m" (*mem) \
125 : "q" (newval), "m" (*mem), "0" (oldval), \
126 "i" (offsetof (tcbhead_t, multiple_threads))); \
127 ret; })
128
129#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
130 ({ __typeof (*mem) ret; \
131 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
132 "je 0f\n\t" \
133 "lock\n" \
134 "0:\tcmpxchgq %q2, %1" \
135 : "=a" (ret), "=m" (*mem) \
136 : "q" ((long int) (newval)), "m" (*mem), \
137 "0" ((long int)oldval), \
138 "i" (offsetof (tcbhead_t, multiple_threads))); \
8099361e
UD
139 ret; })
140
141
f79466a8 142/* Note that we need no lock prefix. */
949ec764 143#define atomic_exchange_acq(mem, newvalue) \
f79466a8
UD
144 ({ __typeof (*mem) result; \
145 if (sizeof (*mem) == 1) \
146 __asm __volatile ("xchgb %b0, %1" \
a810e68c 147 : "=q" (result), "=m" (*mem) \
abfd53d1 148 : "0" (newvalue), "m" (*mem)); \
f79466a8
UD
149 else if (sizeof (*mem) == 2) \
150 __asm __volatile ("xchgw %w0, %1" \
151 : "=r" (result), "=m" (*mem) \
abfd53d1 152 : "0" (newvalue), "m" (*mem)); \
f79466a8
UD
153 else if (sizeof (*mem) == 4) \
154 __asm __volatile ("xchgl %0, %1" \
155 : "=r" (result), "=m" (*mem) \
abfd53d1 156 : "0" (newvalue), "m" (*mem)); \
f79466a8
UD
157 else \
158 __asm __volatile ("xchgq %q0, %1" \
159 : "=r" (result), "=m" (*mem) \
859e708f 160 : "0" ((long) (newvalue)), "m" (*mem)); \
f79466a8
UD
161 result; })
162
163
11bf311e 164#define __arch_exchange_and_add_body(lock, mem, value) \
c10c099c
UD
165 ({ __typeof (*mem) result; \
166 if (sizeof (*mem) == 1) \
11bf311e 167 __asm __volatile (lock "xaddb %b0, %1" \
a810e68c 168 : "=q" (result), "=m" (*mem) \
11bf311e
UD
169 : "0" (value), "m" (*mem), \
170 "i" (offsetof (tcbhead_t, multiple_threads))); \
c10c099c 171 else if (sizeof (*mem) == 2) \
11bf311e 172 __asm __volatile (lock "xaddw %w0, %1" \
c10c099c 173 : "=r" (result), "=m" (*mem) \
11bf311e
UD
174 : "0" (value), "m" (*mem), \
175 "i" (offsetof (tcbhead_t, multiple_threads))); \
c10c099c 176 else if (sizeof (*mem) == 4) \
11bf311e 177 __asm __volatile (lock "xaddl %0, %1" \
c10c099c 178 : "=r" (result), "=m" (*mem) \
11bf311e
UD
179 : "0" (value), "m" (*mem), \
180 "i" (offsetof (tcbhead_t, multiple_threads))); \
c10c099c 181 else \
11bf311e 182 __asm __volatile (lock "xaddq %q0, %1" \
c10c099c 183 : "=r" (result), "=m" (*mem) \
11bf311e
UD
184 : "0" ((long) (value)), "m" (*mem), \
185 "i" (offsetof (tcbhead_t, multiple_threads))); \
c10c099c
UD
186 result; })
187
038a1a9f
UD
188#if __GNUC_PREREQ (4, 1)
189# define atomic_exchange_and_add(mem, value) \
190 __sync_fetch_and_add (mem, value)
191#else
192# define atomic_exchange_and_add(mem, value) \
11bf311e 193 __arch_exchange_and_add_body (LOCK_PREFIX, mem, value)
038a1a9f 194#endif
11bf311e
UD
195
196#define __arch_exchange_and_add_cprefix \
197 "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
198
199#define catomic_exchange_and_add(mem, value) \
200 __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
201
202
203#define __arch_add_body(lock, pfx, mem, value) \
204 do { \
205 if (__builtin_constant_p (value) && (value) == 1) \
206 pfx##_increment (mem); \
207 else if (__builtin_constant_p (value) && (value) == -1) \
208 pfx##_decrement (mem); \
209 else if (sizeof (*mem) == 1) \
210 __asm __volatile (lock "addb %b1, %0" \
211 : "=m" (*mem) \
a810e68c 212 : "iq" (value), "m" (*mem), \
11bf311e
UD
213 "i" (offsetof (tcbhead_t, multiple_threads))); \
214 else if (sizeof (*mem) == 2) \
215 __asm __volatile (lock "addw %w1, %0" \
216 : "=m" (*mem) \
217 : "ir" (value), "m" (*mem), \
218 "i" (offsetof (tcbhead_t, multiple_threads))); \
219 else if (sizeof (*mem) == 4) \
220 __asm __volatile (lock "addl %1, %0" \
221 : "=m" (*mem) \
222 : "ir" (value), "m" (*mem), \
223 "i" (offsetof (tcbhead_t, multiple_threads))); \
224 else \
225 __asm __volatile (lock "addq %q1, %0" \
226 : "=m" (*mem) \
227 : "ir" ((long) (value)), "m" (*mem), \
228 "i" (offsetof (tcbhead_t, multiple_threads))); \
229 } while (0)
c10c099c
UD
230
231#define atomic_add(mem, value) \
11bf311e
UD
232 __arch_add_body (LOCK_PREFIX, atomic, mem, value)
233
234#define __arch_add_cprefix \
235 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
236
237#define catomic_add(mem, value) \
238 __arch_add_body (__arch_add_cprefix, catomic, mem, value)
c10c099c
UD
239
240
241#define atomic_add_negative(mem, value) \
242 ({ unsigned char __result; \
243 if (sizeof (*mem) == 1) \
bd4f43b4 244 __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
c10c099c 245 : "=m" (*mem), "=qm" (__result) \
a810e68c 246 : "iq" (value), "m" (*mem)); \
c10c099c 247 else if (sizeof (*mem) == 2) \
bd4f43b4 248 __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
c10c099c 249 : "=m" (*mem), "=qm" (__result) \
abfd53d1 250 : "ir" (value), "m" (*mem)); \
c10c099c 251 else if (sizeof (*mem) == 4) \
bd4f43b4 252 __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
c10c099c 253 : "=m" (*mem), "=qm" (__result) \
abfd53d1 254 : "ir" (value), "m" (*mem)); \
c10c099c 255 else \
bd4f43b4 256 __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \
c10c099c 257 : "=m" (*mem), "=qm" (__result) \
859e708f 258 : "ir" ((long) (value)), "m" (*mem)); \
c10c099c
UD
259 __result; })
260
261
262#define atomic_add_zero(mem, value) \
263 ({ unsigned char __result; \
264 if (sizeof (*mem) == 1) \
bd4f43b4 265 __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
c10c099c 266 : "=m" (*mem), "=qm" (__result) \
a810e68c 267 : "iq" (value), "m" (*mem)); \
c10c099c 268 else if (sizeof (*mem) == 2) \
bd4f43b4 269 __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
c10c099c 270 : "=m" (*mem), "=qm" (__result) \
abfd53d1 271 : "ir" (value), "m" (*mem)); \
c10c099c 272 else if (sizeof (*mem) == 4) \
bd4f43b4 273 __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
c10c099c 274 : "=m" (*mem), "=qm" (__result) \
abfd53d1 275 : "ir" (value), "m" (*mem)); \
c10c099c 276 else \
bd4f43b4 277 __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \
c10c099c 278 : "=m" (*mem), "=qm" (__result) \
859e708f 279 : "ir" ((long) (value)), "m" (*mem)); \
c10c099c
UD
280 __result; })
281
282
11bf311e
UD
283#define __arch_increment_body(lock, mem) \
284 do { \
285 if (sizeof (*mem) == 1) \
286 __asm __volatile (lock "incb %b0" \
287 : "=m" (*mem) \
288 : "m" (*mem), \
289 "i" (offsetof (tcbhead_t, multiple_threads))); \
290 else if (sizeof (*mem) == 2) \
291 __asm __volatile (lock "incw %w0" \
292 : "=m" (*mem) \
293 : "m" (*mem), \
294 "i" (offsetof (tcbhead_t, multiple_threads))); \
295 else if (sizeof (*mem) == 4) \
296 __asm __volatile (lock "incl %0" \
297 : "=m" (*mem) \
298 : "m" (*mem), \
299 "i" (offsetof (tcbhead_t, multiple_threads))); \
300 else \
301 __asm __volatile (lock "incq %q0" \
302 : "=m" (*mem) \
303 : "m" (*mem), \
304 "i" (offsetof (tcbhead_t, multiple_threads))); \
305 } while (0)
306
307#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
308
309#define __arch_increment_cprefix \
310 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
311
312#define catomic_increment(mem) \
313 __arch_increment_body (__arch_increment_cprefix, mem)
8099361e 314
c10c099c
UD
315
316#define atomic_increment_and_test(mem) \
317 ({ unsigned char __result; \
318 if (sizeof (*mem) == 1) \
bd4f43b4 319 __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \
c10c099c 320 : "=m" (*mem), "=qm" (__result) \
abfd53d1 321 : "m" (*mem)); \
c10c099c 322 else if (sizeof (*mem) == 2) \
bd4f43b4 323 __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \
c10c099c 324 : "=m" (*mem), "=qm" (__result) \
abfd53d1 325 : "m" (*mem)); \
c10c099c 326 else if (sizeof (*mem) == 4) \
bd4f43b4 327 __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
c10c099c 328 : "=m" (*mem), "=qm" (__result) \
abfd53d1 329 : "m" (*mem)); \
c10c099c 330 else \
bd4f43b4 331 __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \
c10c099c 332 : "=m" (*mem), "=qm" (__result) \
abfd53d1 333 : "m" (*mem)); \
c10c099c
UD
334 __result; })
335
336
11bf311e
UD
337#define __arch_decrement_body(lock, mem) \
338 do { \
339 if (sizeof (*mem) == 1) \
340 __asm __volatile (lock "decb %b0" \
341 : "=m" (*mem) \
342 : "m" (*mem), \
343 "i" (offsetof (tcbhead_t, multiple_threads))); \
344 else if (sizeof (*mem) == 2) \
345 __asm __volatile (lock "decw %w0" \
346 : "=m" (*mem) \
347 : "m" (*mem), \
348 "i" (offsetof (tcbhead_t, multiple_threads))); \
349 else if (sizeof (*mem) == 4) \
350 __asm __volatile (lock "decl %0" \
351 : "=m" (*mem) \
352 : "m" (*mem), \
353 "i" (offsetof (tcbhead_t, multiple_threads))); \
354 else \
355 __asm __volatile (lock "decq %q0" \
356 : "=m" (*mem) \
357 : "m" (*mem), \
358 "i" (offsetof (tcbhead_t, multiple_threads))); \
359 } while (0)
360
361#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
362
363#define __arch_decrement_cprefix \
364 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
365
366#define catomic_decrement(mem) \
367 __arch_decrement_body (__arch_decrement_cprefix, mem)
8099361e 368
c10c099c
UD
369
370#define atomic_decrement_and_test(mem) \
371 ({ unsigned char __result; \
372 if (sizeof (*mem) == 1) \
bd4f43b4 373 __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
c10c099c 374 : "=m" (*mem), "=qm" (__result) \
abfd53d1 375 : "m" (*mem)); \
c10c099c 376 else if (sizeof (*mem) == 2) \
bd4f43b4 377 __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
c10c099c 378 : "=m" (*mem), "=qm" (__result) \
abfd53d1 379 : "m" (*mem)); \
c10c099c 380 else if (sizeof (*mem) == 4) \
bd4f43b4 381 __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
c10c099c 382 : "=m" (*mem), "=qm" (__result) \
abfd53d1 383 : "m" (*mem)); \
c10c099c 384 else \
bd4f43b4 385 __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \
c10c099c 386 : "=m" (*mem), "=qm" (__result) \
abfd53d1 387 : "m" (*mem)); \
c10c099c
UD
388 __result; })
389
390
391#define atomic_bit_set(mem, bit) \
11bf311e
UD
392 do { \
393 if (sizeof (*mem) == 1) \
394 __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
395 : "=m" (*mem) \
a810e68c 396 : "m" (*mem), "iq" (1L << (bit))); \
11bf311e
UD
397 else if (sizeof (*mem) == 2) \
398 __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
399 : "=m" (*mem) \
400 : "m" (*mem), "ir" (1L << (bit))); \
401 else if (sizeof (*mem) == 4) \
402 __asm __volatile (LOCK_PREFIX "orl %2, %0" \
403 : "=m" (*mem) \
404 : "m" (*mem), "ir" (1L << (bit))); \
405 else if (__builtin_constant_p (bit) && (bit) < 32) \
406 __asm __volatile (LOCK_PREFIX "orq %2, %0" \
407 : "=m" (*mem) \
408 : "m" (*mem), "i" (1L << (bit))); \
409 else \
410 __asm __volatile (LOCK_PREFIX "orq %q2, %0" \
411 : "=m" (*mem) \
412 : "m" (*mem), "r" (1UL << (bit))); \
413 } while (0)
c10c099c
UD
414
415
416#define atomic_bit_test_set(mem, bit) \
417 ({ unsigned char __result; \
418 if (sizeof (*mem) == 1) \
bd4f43b4 419 __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
c10c099c 420 : "=q" (__result), "=m" (*mem) \
a810e68c 421 : "m" (*mem), "iq" (bit)); \
c10c099c 422 else if (sizeof (*mem) == 2) \
bd4f43b4 423 __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
c10c099c 424 : "=q" (__result), "=m" (*mem) \
002ff853 425 : "m" (*mem), "ir" (bit)); \
c10c099c 426 else if (sizeof (*mem) == 4) \
bd4f43b4 427 __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
c10c099c 428 : "=q" (__result), "=m" (*mem) \
002ff853 429 : "m" (*mem), "ir" (bit)); \
c10c099c 430 else \
bd4f43b4 431 __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \
c10c099c 432 : "=q" (__result), "=m" (*mem) \
002ff853 433 : "m" (*mem), "ir" (bit)); \
c10c099c 434 __result; })
f377d022
UD
435
436
437#define atomic_delay() asm ("rep; nop")
11bf311e
UD
438
439
440#define atomic_and(mem, mask) \
441 do { \
442 if (sizeof (*mem) == 1) \
a810e68c 443 __asm __volatile (LOCK_PREFIX "andb %b1, %0" \
11bf311e 444 : "=m" (*mem) \
a810e68c 445 : "iq" (mask), "m" (*mem)); \
11bf311e
UD
446 else if (sizeof (*mem) == 2) \
447 __asm __volatile (LOCK_PREFIX "andw %1, %w0" \
448 : "=m" (*mem) \
449 : "ir" (mask), "m" (*mem)); \
450 else if (sizeof (*mem) == 4) \
451 __asm __volatile (LOCK_PREFIX "andl %1, %0" \
452 : "=m" (*mem) \
453 : "ir" (mask), "m" (*mem)); \
454 else \
455 __asm __volatile (LOCK_PREFIX "andq %1, %q0" \
456 : "=m" (*mem) \
457 : "ir" (mask), "m" (*mem)); \
458 } while (0)
459
460
461#define __arch_or_body(lock, mem, mask) \
462 do { \
463 if (sizeof (*mem) == 1) \
a810e68c 464 __asm __volatile (lock "orb %b1, %0" \
11bf311e 465 : "=m" (*mem) \
a810e68c 466 : "iq" (mask), "m" (*mem), \
11bf311e
UD
467 "i" (offsetof (tcbhead_t, multiple_threads))); \
468 else if (sizeof (*mem) == 2) \
469 __asm __volatile (lock "orw %1, %w0" \
470 : "=m" (*mem) \
471 : "ir" (mask), "m" (*mem), \
472 "i" (offsetof (tcbhead_t, multiple_threads))); \
473 else if (sizeof (*mem) == 4) \
474 __asm __volatile (lock "orl %1, %0" \
475 : "=m" (*mem) \
476 : "ir" (mask), "m" (*mem), \
477 "i" (offsetof (tcbhead_t, multiple_threads))); \
478 else \
479 __asm __volatile (lock "orq %1, %q0" \
480 : "=m" (*mem) \
481 : "ir" (mask), "m" (*mem), \
482 "i" (offsetof (tcbhead_t, multiple_threads))); \
483 } while (0)
484
485#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
486
487#define __arch_or_cprefix \
488 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
489
490#define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)