]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/bits/atomic.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / x86_64 / bits / atomic.h
1 /* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <stdint.h>
20 #include <tls.h> /* For tcbhead_t. */
21 #include <libc-internal.h>
22
23
24 typedef int8_t atomic8_t;
25 typedef uint8_t uatomic8_t;
26 typedef int_fast8_t atomic_fast8_t;
27 typedef uint_fast8_t uatomic_fast8_t;
28
29 typedef int16_t atomic16_t;
30 typedef uint16_t uatomic16_t;
31 typedef int_fast16_t atomic_fast16_t;
32 typedef uint_fast16_t uatomic_fast16_t;
33
34 typedef int32_t atomic32_t;
35 typedef uint32_t uatomic32_t;
36 typedef int_fast32_t atomic_fast32_t;
37 typedef uint_fast32_t uatomic_fast32_t;
38
39 typedef int64_t atomic64_t;
40 typedef uint64_t uatomic64_t;
41 typedef int_fast64_t atomic_fast64_t;
42 typedef uint_fast64_t uatomic_fast64_t;
43
44 typedef intptr_t atomicptr_t;
45 typedef uintptr_t uatomicptr_t;
46 typedef intmax_t atomic_max_t;
47 typedef uintmax_t uatomic_max_t;
48
49
50 #ifndef LOCK_PREFIX
51 # ifdef UP
52 # define LOCK_PREFIX /* nothing */
53 # else
54 # define LOCK_PREFIX "lock;"
55 # endif
56 #endif
57
58 #define __HAVE_64B_ATOMICS 1
59 #if __GNUC_PREREQ (4, 7)
60 #define USE_ATOMIC_COMPILER_BUILTINS 1
61 #else
62 #define USE_ATOMIC_COMPILER_BUILTINS 0
63 #endif
64
65 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
66 __sync_val_compare_and_swap (mem, oldval, newval)
67 #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
68 (! __sync_bool_compare_and_swap (mem, oldval, newval))
69
70
71 #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
72 ({ __typeof (*mem) ret; \
73 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
74 "je 0f\n\t" \
75 "lock\n" \
76 "0:\tcmpxchgb %b2, %1" \
77 : "=a" (ret), "=m" (*mem) \
78 : "q" (newval), "m" (*mem), "0" (oldval), \
79 "i" (offsetof (tcbhead_t, multiple_threads))); \
80 ret; })
81
82 #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
83 ({ __typeof (*mem) ret; \
84 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
85 "je 0f\n\t" \
86 "lock\n" \
87 "0:\tcmpxchgw %w2, %1" \
88 : "=a" (ret), "=m" (*mem) \
89 : "q" (newval), "m" (*mem), "0" (oldval), \
90 "i" (offsetof (tcbhead_t, multiple_threads))); \
91 ret; })
92
93 #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
94 ({ __typeof (*mem) ret; \
95 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
96 "je 0f\n\t" \
97 "lock\n" \
98 "0:\tcmpxchgl %2, %1" \
99 : "=a" (ret), "=m" (*mem) \
100 : "q" (newval), "m" (*mem), "0" (oldval), \
101 "i" (offsetof (tcbhead_t, multiple_threads))); \
102 ret; })
103
104 #define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
105 ({ __typeof (*mem) ret; \
106 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
107 "je 0f\n\t" \
108 "lock\n" \
109 "0:\tcmpxchgq %q2, %1" \
110 : "=a" (ret), "=m" (*mem) \
111 : "q" ((atomic64_t) cast_to_integer (newval)), \
112 "m" (*mem), \
113 "0" ((atomic64_t) cast_to_integer (oldval)), \
114 "i" (offsetof (tcbhead_t, multiple_threads))); \
115 ret; })
116
117
118 /* Note that we need no lock prefix. */
119 #define atomic_exchange_acq(mem, newvalue) \
120 ({ __typeof (*mem) result; \
121 if (sizeof (*mem) == 1) \
122 __asm __volatile ("xchgb %b0, %1" \
123 : "=q" (result), "=m" (*mem) \
124 : "0" (newvalue), "m" (*mem)); \
125 else if (sizeof (*mem) == 2) \
126 __asm __volatile ("xchgw %w0, %1" \
127 : "=r" (result), "=m" (*mem) \
128 : "0" (newvalue), "m" (*mem)); \
129 else if (sizeof (*mem) == 4) \
130 __asm __volatile ("xchgl %0, %1" \
131 : "=r" (result), "=m" (*mem) \
132 : "0" (newvalue), "m" (*mem)); \
133 else \
134 __asm __volatile ("xchgq %q0, %1" \
135 : "=r" (result), "=m" (*mem) \
136 : "0" ((atomic64_t) cast_to_integer (newvalue)), \
137 "m" (*mem)); \
138 result; })
139
140
141 #define __arch_exchange_and_add_body(lock, mem, value) \
142 ({ __typeof (*mem) result; \
143 if (sizeof (*mem) == 1) \
144 __asm __volatile (lock "xaddb %b0, %1" \
145 : "=q" (result), "=m" (*mem) \
146 : "0" (value), "m" (*mem), \
147 "i" (offsetof (tcbhead_t, multiple_threads))); \
148 else if (sizeof (*mem) == 2) \
149 __asm __volatile (lock "xaddw %w0, %1" \
150 : "=r" (result), "=m" (*mem) \
151 : "0" (value), "m" (*mem), \
152 "i" (offsetof (tcbhead_t, multiple_threads))); \
153 else if (sizeof (*mem) == 4) \
154 __asm __volatile (lock "xaddl %0, %1" \
155 : "=r" (result), "=m" (*mem) \
156 : "0" (value), "m" (*mem), \
157 "i" (offsetof (tcbhead_t, multiple_threads))); \
158 else \
159 __asm __volatile (lock "xaddq %q0, %1" \
160 : "=r" (result), "=m" (*mem) \
161 : "0" ((atomic64_t) cast_to_integer (value)), \
162 "m" (*mem), \
163 "i" (offsetof (tcbhead_t, multiple_threads))); \
164 result; })
165
166 #define atomic_exchange_and_add(mem, value) \
167 __sync_fetch_and_add (mem, value)
168
169 #define __arch_exchange_and_add_cprefix \
170 "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
171
172 #define catomic_exchange_and_add(mem, value) \
173 __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
174
175
176 #define __arch_add_body(lock, pfx, mem, value) \
177 do { \
178 if (__builtin_constant_p (value) && (value) == 1) \
179 pfx##_increment (mem); \
180 else if (__builtin_constant_p (value) && (value) == -1) \
181 pfx##_decrement (mem); \
182 else if (sizeof (*mem) == 1) \
183 __asm __volatile (lock "addb %b1, %0" \
184 : "=m" (*mem) \
185 : "iq" (value), "m" (*mem), \
186 "i" (offsetof (tcbhead_t, multiple_threads))); \
187 else if (sizeof (*mem) == 2) \
188 __asm __volatile (lock "addw %w1, %0" \
189 : "=m" (*mem) \
190 : "ir" (value), "m" (*mem), \
191 "i" (offsetof (tcbhead_t, multiple_threads))); \
192 else if (sizeof (*mem) == 4) \
193 __asm __volatile (lock "addl %1, %0" \
194 : "=m" (*mem) \
195 : "ir" (value), "m" (*mem), \
196 "i" (offsetof (tcbhead_t, multiple_threads))); \
197 else \
198 __asm __volatile (lock "addq %q1, %0" \
199 : "=m" (*mem) \
200 : "ir" ((atomic64_t) cast_to_integer (value)), \
201 "m" (*mem), \
202 "i" (offsetof (tcbhead_t, multiple_threads))); \
203 } while (0)
204
205 #define atomic_add(mem, value) \
206 __arch_add_body (LOCK_PREFIX, atomic, mem, value)
207
208 #define __arch_add_cprefix \
209 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
210
211 #define catomic_add(mem, value) \
212 __arch_add_body (__arch_add_cprefix, catomic, mem, value)
213
214
215 #define atomic_add_negative(mem, value) \
216 ({ unsigned char __result; \
217 if (sizeof (*mem) == 1) \
218 __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
219 : "=m" (*mem), "=qm" (__result) \
220 : "iq" (value), "m" (*mem)); \
221 else if (sizeof (*mem) == 2) \
222 __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
223 : "=m" (*mem), "=qm" (__result) \
224 : "ir" (value), "m" (*mem)); \
225 else if (sizeof (*mem) == 4) \
226 __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
227 : "=m" (*mem), "=qm" (__result) \
228 : "ir" (value), "m" (*mem)); \
229 else \
230 __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \
231 : "=m" (*mem), "=qm" (__result) \
232 : "ir" ((atomic64_t) cast_to_integer (value)), \
233 "m" (*mem)); \
234 __result; })
235
236
237 #define atomic_add_zero(mem, value) \
238 ({ unsigned char __result; \
239 if (sizeof (*mem) == 1) \
240 __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
241 : "=m" (*mem), "=qm" (__result) \
242 : "iq" (value), "m" (*mem)); \
243 else if (sizeof (*mem) == 2) \
244 __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
245 : "=m" (*mem), "=qm" (__result) \
246 : "ir" (value), "m" (*mem)); \
247 else if (sizeof (*mem) == 4) \
248 __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
249 : "=m" (*mem), "=qm" (__result) \
250 : "ir" (value), "m" (*mem)); \
251 else \
252 __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \
253 : "=m" (*mem), "=qm" (__result) \
254 : "ir" ((atomic64_t) cast_to_integer (value)), \
255 "m" (*mem)); \
256 __result; })
257
258
259 #define __arch_increment_body(lock, mem) \
260 do { \
261 if (sizeof (*mem) == 1) \
262 __asm __volatile (lock "incb %b0" \
263 : "=m" (*mem) \
264 : "m" (*mem), \
265 "i" (offsetof (tcbhead_t, multiple_threads))); \
266 else if (sizeof (*mem) == 2) \
267 __asm __volatile (lock "incw %w0" \
268 : "=m" (*mem) \
269 : "m" (*mem), \
270 "i" (offsetof (tcbhead_t, multiple_threads))); \
271 else if (sizeof (*mem) == 4) \
272 __asm __volatile (lock "incl %0" \
273 : "=m" (*mem) \
274 : "m" (*mem), \
275 "i" (offsetof (tcbhead_t, multiple_threads))); \
276 else \
277 __asm __volatile (lock "incq %q0" \
278 : "=m" (*mem) \
279 : "m" (*mem), \
280 "i" (offsetof (tcbhead_t, multiple_threads))); \
281 } while (0)
282
283 #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
284
285 #define __arch_increment_cprefix \
286 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
287
288 #define catomic_increment(mem) \
289 __arch_increment_body (__arch_increment_cprefix, mem)
290
291
292 #define atomic_increment_and_test(mem) \
293 ({ unsigned char __result; \
294 if (sizeof (*mem) == 1) \
295 __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \
296 : "=m" (*mem), "=qm" (__result) \
297 : "m" (*mem)); \
298 else if (sizeof (*mem) == 2) \
299 __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \
300 : "=m" (*mem), "=qm" (__result) \
301 : "m" (*mem)); \
302 else if (sizeof (*mem) == 4) \
303 __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
304 : "=m" (*mem), "=qm" (__result) \
305 : "m" (*mem)); \
306 else \
307 __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \
308 : "=m" (*mem), "=qm" (__result) \
309 : "m" (*mem)); \
310 __result; })
311
312
313 #define __arch_decrement_body(lock, mem) \
314 do { \
315 if (sizeof (*mem) == 1) \
316 __asm __volatile (lock "decb %b0" \
317 : "=m" (*mem) \
318 : "m" (*mem), \
319 "i" (offsetof (tcbhead_t, multiple_threads))); \
320 else if (sizeof (*mem) == 2) \
321 __asm __volatile (lock "decw %w0" \
322 : "=m" (*mem) \
323 : "m" (*mem), \
324 "i" (offsetof (tcbhead_t, multiple_threads))); \
325 else if (sizeof (*mem) == 4) \
326 __asm __volatile (lock "decl %0" \
327 : "=m" (*mem) \
328 : "m" (*mem), \
329 "i" (offsetof (tcbhead_t, multiple_threads))); \
330 else \
331 __asm __volatile (lock "decq %q0" \
332 : "=m" (*mem) \
333 : "m" (*mem), \
334 "i" (offsetof (tcbhead_t, multiple_threads))); \
335 } while (0)
336
337 #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
338
339 #define __arch_decrement_cprefix \
340 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
341
342 #define catomic_decrement(mem) \
343 __arch_decrement_body (__arch_decrement_cprefix, mem)
344
345
346 #define atomic_decrement_and_test(mem) \
347 ({ unsigned char __result; \
348 if (sizeof (*mem) == 1) \
349 __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
350 : "=m" (*mem), "=qm" (__result) \
351 : "m" (*mem)); \
352 else if (sizeof (*mem) == 2) \
353 __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
354 : "=m" (*mem), "=qm" (__result) \
355 : "m" (*mem)); \
356 else if (sizeof (*mem) == 4) \
357 __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
358 : "=m" (*mem), "=qm" (__result) \
359 : "m" (*mem)); \
360 else \
361 __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \
362 : "=m" (*mem), "=qm" (__result) \
363 : "m" (*mem)); \
364 __result; })
365
366
367 #define atomic_bit_set(mem, bit) \
368 do { \
369 if (sizeof (*mem) == 1) \
370 __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
371 : "=m" (*mem) \
372 : "m" (*mem), "iq" (1L << (bit))); \
373 else if (sizeof (*mem) == 2) \
374 __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
375 : "=m" (*mem) \
376 : "m" (*mem), "ir" (1L << (bit))); \
377 else if (sizeof (*mem) == 4) \
378 __asm __volatile (LOCK_PREFIX "orl %2, %0" \
379 : "=m" (*mem) \
380 : "m" (*mem), "ir" (1L << (bit))); \
381 else if (__builtin_constant_p (bit) && (bit) < 32) \
382 __asm __volatile (LOCK_PREFIX "orq %2, %0" \
383 : "=m" (*mem) \
384 : "m" (*mem), "i" (1L << (bit))); \
385 else \
386 __asm __volatile (LOCK_PREFIX "orq %q2, %0" \
387 : "=m" (*mem) \
388 : "m" (*mem), "r" (1UL << (bit))); \
389 } while (0)
390
391
392 #define atomic_bit_test_set(mem, bit) \
393 ({ unsigned char __result; \
394 if (sizeof (*mem) == 1) \
395 __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
396 : "=q" (__result), "=m" (*mem) \
397 : "m" (*mem), "iq" (bit)); \
398 else if (sizeof (*mem) == 2) \
399 __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
400 : "=q" (__result), "=m" (*mem) \
401 : "m" (*mem), "ir" (bit)); \
402 else if (sizeof (*mem) == 4) \
403 __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
404 : "=q" (__result), "=m" (*mem) \
405 : "m" (*mem), "ir" (bit)); \
406 else \
407 __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \
408 : "=q" (__result), "=m" (*mem) \
409 : "m" (*mem), "ir" (bit)); \
410 __result; })
411
412
413 #define atomic_delay() asm ("rep; nop")
414
415
416 #define __arch_and_body(lock, mem, mask) \
417 do { \
418 if (sizeof (*mem) == 1) \
419 __asm __volatile (lock "andb %b1, %0" \
420 : "=m" (*mem) \
421 : "iq" (mask), "m" (*mem), \
422 "i" (offsetof (tcbhead_t, multiple_threads))); \
423 else if (sizeof (*mem) == 2) \
424 __asm __volatile (lock "andw %w1, %0" \
425 : "=m" (*mem) \
426 : "ir" (mask), "m" (*mem), \
427 "i" (offsetof (tcbhead_t, multiple_threads))); \
428 else if (sizeof (*mem) == 4) \
429 __asm __volatile (lock "andl %1, %0" \
430 : "=m" (*mem) \
431 : "ir" (mask), "m" (*mem), \
432 "i" (offsetof (tcbhead_t, multiple_threads))); \
433 else \
434 __asm __volatile (lock "andq %q1, %0" \
435 : "=m" (*mem) \
436 : "ir" (mask), "m" (*mem), \
437 "i" (offsetof (tcbhead_t, multiple_threads))); \
438 } while (0)
439
440 #define __arch_cprefix \
441 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
442
443 #define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
444
445 #define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask)
446
447
448 #define __arch_or_body(lock, mem, mask) \
449 do { \
450 if (sizeof (*mem) == 1) \
451 __asm __volatile (lock "orb %b1, %0" \
452 : "=m" (*mem) \
453 : "iq" (mask), "m" (*mem), \
454 "i" (offsetof (tcbhead_t, multiple_threads))); \
455 else if (sizeof (*mem) == 2) \
456 __asm __volatile (lock "orw %w1, %0" \
457 : "=m" (*mem) \
458 : "ir" (mask), "m" (*mem), \
459 "i" (offsetof (tcbhead_t, multiple_threads))); \
460 else if (sizeof (*mem) == 4) \
461 __asm __volatile (lock "orl %1, %0" \
462 : "=m" (*mem) \
463 : "ir" (mask), "m" (*mem), \
464 "i" (offsetof (tcbhead_t, multiple_threads))); \
465 else \
466 __asm __volatile (lock "orq %q1, %0" \
467 : "=m" (*mem) \
468 : "ir" (mask), "m" (*mem), \
469 "i" (offsetof (tcbhead_t, multiple_threads))); \
470 } while (0)
471
472 #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
473
474 #define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask)