]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/bits/atomic.h
Update copyright notices with scripts/update-copyrights
[thirdparty/glibc.git] / sysdeps / x86_64 / bits / atomic.h
1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <stdint.h>
20 #include <tls.h> /* For tcbhead_t. */
21 #include <libc-internal.h>
22
23
24 typedef int8_t atomic8_t;
25 typedef uint8_t uatomic8_t;
26 typedef int_fast8_t atomic_fast8_t;
27 typedef uint_fast8_t uatomic_fast8_t;
28
29 typedef int16_t atomic16_t;
30 typedef uint16_t uatomic16_t;
31 typedef int_fast16_t atomic_fast16_t;
32 typedef uint_fast16_t uatomic_fast16_t;
33
34 typedef int32_t atomic32_t;
35 typedef uint32_t uatomic32_t;
36 typedef int_fast32_t atomic_fast32_t;
37 typedef uint_fast32_t uatomic_fast32_t;
38
39 typedef int64_t atomic64_t;
40 typedef uint64_t uatomic64_t;
41 typedef int_fast64_t atomic_fast64_t;
42 typedef uint_fast64_t uatomic_fast64_t;
43
44 typedef intptr_t atomicptr_t;
45 typedef uintptr_t uatomicptr_t;
46 typedef intmax_t atomic_max_t;
47 typedef uintmax_t uatomic_max_t;
48
49
50 #ifndef LOCK_PREFIX
51 # ifdef UP
52 # define LOCK_PREFIX /* nothing */
53 # else
54 # define LOCK_PREFIX "lock;"
55 # endif
56 #endif
57
58
59 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
60 __sync_val_compare_and_swap (mem, oldval, newval)
61 #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
62 (! __sync_bool_compare_and_swap (mem, oldval, newval))
63
64
65 #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
66 ({ __typeof (*mem) ret; \
67 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
68 "je 0f\n\t" \
69 "lock\n" \
70 "0:\tcmpxchgb %b2, %1" \
71 : "=a" (ret), "=m" (*mem) \
72 : "q" (newval), "m" (*mem), "0" (oldval), \
73 "i" (offsetof (tcbhead_t, multiple_threads))); \
74 ret; })
75
76 #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
77 ({ __typeof (*mem) ret; \
78 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
79 "je 0f\n\t" \
80 "lock\n" \
81 "0:\tcmpxchgw %w2, %1" \
82 : "=a" (ret), "=m" (*mem) \
83 : "q" (newval), "m" (*mem), "0" (oldval), \
84 "i" (offsetof (tcbhead_t, multiple_threads))); \
85 ret; })
86
87 #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
88 ({ __typeof (*mem) ret; \
89 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
90 "je 0f\n\t" \
91 "lock\n" \
92 "0:\tcmpxchgl %2, %1" \
93 : "=a" (ret), "=m" (*mem) \
94 : "q" (newval), "m" (*mem), "0" (oldval), \
95 "i" (offsetof (tcbhead_t, multiple_threads))); \
96 ret; })
97
98 #define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
99 ({ __typeof (*mem) ret; \
100 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
101 "je 0f\n\t" \
102 "lock\n" \
103 "0:\tcmpxchgq %q2, %1" \
104 : "=a" (ret), "=m" (*mem) \
105 : "q" ((atomic64_t) cast_to_integer (newval)), \
106 "m" (*mem), \
107 "0" ((atomic64_t) cast_to_integer (oldval)), \
108 "i" (offsetof (tcbhead_t, multiple_threads))); \
109 ret; })
110
111
112 /* Note that we need no lock prefix. */
113 #define atomic_exchange_acq(mem, newvalue) \
114 ({ __typeof (*mem) result; \
115 if (sizeof (*mem) == 1) \
116 __asm __volatile ("xchgb %b0, %1" \
117 : "=q" (result), "=m" (*mem) \
118 : "0" (newvalue), "m" (*mem)); \
119 else if (sizeof (*mem) == 2) \
120 __asm __volatile ("xchgw %w0, %1" \
121 : "=r" (result), "=m" (*mem) \
122 : "0" (newvalue), "m" (*mem)); \
123 else if (sizeof (*mem) == 4) \
124 __asm __volatile ("xchgl %0, %1" \
125 : "=r" (result), "=m" (*mem) \
126 : "0" (newvalue), "m" (*mem)); \
127 else \
128 __asm __volatile ("xchgq %q0, %1" \
129 : "=r" (result), "=m" (*mem) \
130 : "0" ((atomic64_t) cast_to_integer (newvalue)), \
131 "m" (*mem)); \
132 result; })
133
134
135 #define __arch_exchange_and_add_body(lock, mem, value) \
136 ({ __typeof (*mem) result; \
137 if (sizeof (*mem) == 1) \
138 __asm __volatile (lock "xaddb %b0, %1" \
139 : "=q" (result), "=m" (*mem) \
140 : "0" (value), "m" (*mem), \
141 "i" (offsetof (tcbhead_t, multiple_threads))); \
142 else if (sizeof (*mem) == 2) \
143 __asm __volatile (lock "xaddw %w0, %1" \
144 : "=r" (result), "=m" (*mem) \
145 : "0" (value), "m" (*mem), \
146 "i" (offsetof (tcbhead_t, multiple_threads))); \
147 else if (sizeof (*mem) == 4) \
148 __asm __volatile (lock "xaddl %0, %1" \
149 : "=r" (result), "=m" (*mem) \
150 : "0" (value), "m" (*mem), \
151 "i" (offsetof (tcbhead_t, multiple_threads))); \
152 else \
153 __asm __volatile (lock "xaddq %q0, %1" \
154 : "=r" (result), "=m" (*mem) \
155 : "0" ((atomic64_t) cast_to_integer (value)), \
156 "m" (*mem), \
157 "i" (offsetof (tcbhead_t, multiple_threads))); \
158 result; })
159
160 #define atomic_exchange_and_add(mem, value) \
161 __sync_fetch_and_add (mem, value)
162
163 #define __arch_exchange_and_add_cprefix \
164 "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
165
166 #define catomic_exchange_and_add(mem, value) \
167 __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
168
169
170 #define __arch_add_body(lock, pfx, mem, value) \
171 do { \
172 if (__builtin_constant_p (value) && (value) == 1) \
173 pfx##_increment (mem); \
174 else if (__builtin_constant_p (value) && (value) == -1) \
175 pfx##_decrement (mem); \
176 else if (sizeof (*mem) == 1) \
177 __asm __volatile (lock "addb %b1, %0" \
178 : "=m" (*mem) \
179 : "iq" (value), "m" (*mem), \
180 "i" (offsetof (tcbhead_t, multiple_threads))); \
181 else if (sizeof (*mem) == 2) \
182 __asm __volatile (lock "addw %w1, %0" \
183 : "=m" (*mem) \
184 : "ir" (value), "m" (*mem), \
185 "i" (offsetof (tcbhead_t, multiple_threads))); \
186 else if (sizeof (*mem) == 4) \
187 __asm __volatile (lock "addl %1, %0" \
188 : "=m" (*mem) \
189 : "ir" (value), "m" (*mem), \
190 "i" (offsetof (tcbhead_t, multiple_threads))); \
191 else \
192 __asm __volatile (lock "addq %q1, %0" \
193 : "=m" (*mem) \
194 : "ir" ((atomic64_t) cast_to_integer (value)), \
195 "m" (*mem), \
196 "i" (offsetof (tcbhead_t, multiple_threads))); \
197 } while (0)
198
199 #define atomic_add(mem, value) \
200 __arch_add_body (LOCK_PREFIX, atomic, mem, value)
201
202 #define __arch_add_cprefix \
203 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
204
205 #define catomic_add(mem, value) \
206 __arch_add_body (__arch_add_cprefix, catomic, mem, value)
207
208
209 #define atomic_add_negative(mem, value) \
210 ({ unsigned char __result; \
211 if (sizeof (*mem) == 1) \
212 __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
213 : "=m" (*mem), "=qm" (__result) \
214 : "iq" (value), "m" (*mem)); \
215 else if (sizeof (*mem) == 2) \
216 __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
217 : "=m" (*mem), "=qm" (__result) \
218 : "ir" (value), "m" (*mem)); \
219 else if (sizeof (*mem) == 4) \
220 __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
221 : "=m" (*mem), "=qm" (__result) \
222 : "ir" (value), "m" (*mem)); \
223 else \
224 __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \
225 : "=m" (*mem), "=qm" (__result) \
226 : "ir" ((atomic64_t) cast_to_integer (value)), \
227 "m" (*mem)); \
228 __result; })
229
230
231 #define atomic_add_zero(mem, value) \
232 ({ unsigned char __result; \
233 if (sizeof (*mem) == 1) \
234 __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
235 : "=m" (*mem), "=qm" (__result) \
236 : "iq" (value), "m" (*mem)); \
237 else if (sizeof (*mem) == 2) \
238 __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
239 : "=m" (*mem), "=qm" (__result) \
240 : "ir" (value), "m" (*mem)); \
241 else if (sizeof (*mem) == 4) \
242 __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
243 : "=m" (*mem), "=qm" (__result) \
244 : "ir" (value), "m" (*mem)); \
245 else \
246 __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \
247 : "=m" (*mem), "=qm" (__result) \
248 : "ir" ((atomic64_t) cast_to_integer (value)), \
249 "m" (*mem)); \
250 __result; })
251
252
253 #define __arch_increment_body(lock, mem) \
254 do { \
255 if (sizeof (*mem) == 1) \
256 __asm __volatile (lock "incb %b0" \
257 : "=m" (*mem) \
258 : "m" (*mem), \
259 "i" (offsetof (tcbhead_t, multiple_threads))); \
260 else if (sizeof (*mem) == 2) \
261 __asm __volatile (lock "incw %w0" \
262 : "=m" (*mem) \
263 : "m" (*mem), \
264 "i" (offsetof (tcbhead_t, multiple_threads))); \
265 else if (sizeof (*mem) == 4) \
266 __asm __volatile (lock "incl %0" \
267 : "=m" (*mem) \
268 : "m" (*mem), \
269 "i" (offsetof (tcbhead_t, multiple_threads))); \
270 else \
271 __asm __volatile (lock "incq %q0" \
272 : "=m" (*mem) \
273 : "m" (*mem), \
274 "i" (offsetof (tcbhead_t, multiple_threads))); \
275 } while (0)
276
277 #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
278
279 #define __arch_increment_cprefix \
280 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
281
282 #define catomic_increment(mem) \
283 __arch_increment_body (__arch_increment_cprefix, mem)
284
285
286 #define atomic_increment_and_test(mem) \
287 ({ unsigned char __result; \
288 if (sizeof (*mem) == 1) \
289 __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \
290 : "=m" (*mem), "=qm" (__result) \
291 : "m" (*mem)); \
292 else if (sizeof (*mem) == 2) \
293 __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \
294 : "=m" (*mem), "=qm" (__result) \
295 : "m" (*mem)); \
296 else if (sizeof (*mem) == 4) \
297 __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
298 : "=m" (*mem), "=qm" (__result) \
299 : "m" (*mem)); \
300 else \
301 __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \
302 : "=m" (*mem), "=qm" (__result) \
303 : "m" (*mem)); \
304 __result; })
305
306
307 #define __arch_decrement_body(lock, mem) \
308 do { \
309 if (sizeof (*mem) == 1) \
310 __asm __volatile (lock "decb %b0" \
311 : "=m" (*mem) \
312 : "m" (*mem), \
313 "i" (offsetof (tcbhead_t, multiple_threads))); \
314 else if (sizeof (*mem) == 2) \
315 __asm __volatile (lock "decw %w0" \
316 : "=m" (*mem) \
317 : "m" (*mem), \
318 "i" (offsetof (tcbhead_t, multiple_threads))); \
319 else if (sizeof (*mem) == 4) \
320 __asm __volatile (lock "decl %0" \
321 : "=m" (*mem) \
322 : "m" (*mem), \
323 "i" (offsetof (tcbhead_t, multiple_threads))); \
324 else \
325 __asm __volatile (lock "decq %q0" \
326 : "=m" (*mem) \
327 : "m" (*mem), \
328 "i" (offsetof (tcbhead_t, multiple_threads))); \
329 } while (0)
330
331 #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
332
333 #define __arch_decrement_cprefix \
334 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
335
336 #define catomic_decrement(mem) \
337 __arch_decrement_body (__arch_decrement_cprefix, mem)
338
339
340 #define atomic_decrement_and_test(mem) \
341 ({ unsigned char __result; \
342 if (sizeof (*mem) == 1) \
343 __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
344 : "=m" (*mem), "=qm" (__result) \
345 : "m" (*mem)); \
346 else if (sizeof (*mem) == 2) \
347 __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
348 : "=m" (*mem), "=qm" (__result) \
349 : "m" (*mem)); \
350 else if (sizeof (*mem) == 4) \
351 __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
352 : "=m" (*mem), "=qm" (__result) \
353 : "m" (*mem)); \
354 else \
355 __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \
356 : "=m" (*mem), "=qm" (__result) \
357 : "m" (*mem)); \
358 __result; })
359
360
361 #define atomic_bit_set(mem, bit) \
362 do { \
363 if (sizeof (*mem) == 1) \
364 __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
365 : "=m" (*mem) \
366 : "m" (*mem), "iq" (1L << (bit))); \
367 else if (sizeof (*mem) == 2) \
368 __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
369 : "=m" (*mem) \
370 : "m" (*mem), "ir" (1L << (bit))); \
371 else if (sizeof (*mem) == 4) \
372 __asm __volatile (LOCK_PREFIX "orl %2, %0" \
373 : "=m" (*mem) \
374 : "m" (*mem), "ir" (1L << (bit))); \
375 else if (__builtin_constant_p (bit) && (bit) < 32) \
376 __asm __volatile (LOCK_PREFIX "orq %2, %0" \
377 : "=m" (*mem) \
378 : "m" (*mem), "i" (1L << (bit))); \
379 else \
380 __asm __volatile (LOCK_PREFIX "orq %q2, %0" \
381 : "=m" (*mem) \
382 : "m" (*mem), "r" (1UL << (bit))); \
383 } while (0)
384
385
386 #define atomic_bit_test_set(mem, bit) \
387 ({ unsigned char __result; \
388 if (sizeof (*mem) == 1) \
389 __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
390 : "=q" (__result), "=m" (*mem) \
391 : "m" (*mem), "iq" (bit)); \
392 else if (sizeof (*mem) == 2) \
393 __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
394 : "=q" (__result), "=m" (*mem) \
395 : "m" (*mem), "ir" (bit)); \
396 else if (sizeof (*mem) == 4) \
397 __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
398 : "=q" (__result), "=m" (*mem) \
399 : "m" (*mem), "ir" (bit)); \
400 else \
401 __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \
402 : "=q" (__result), "=m" (*mem) \
403 : "m" (*mem), "ir" (bit)); \
404 __result; })
405
406
407 #define atomic_delay() asm ("rep; nop")
408
409
410 #define __arch_and_body(lock, mem, mask) \
411 do { \
412 if (sizeof (*mem) == 1) \
413 __asm __volatile (lock "andb %b1, %0" \
414 : "=m" (*mem) \
415 : "iq" (mask), "m" (*mem), \
416 "i" (offsetof (tcbhead_t, multiple_threads))); \
417 else if (sizeof (*mem) == 2) \
418 __asm __volatile (lock "andw %w1, %0" \
419 : "=m" (*mem) \
420 : "ir" (mask), "m" (*mem), \
421 "i" (offsetof (tcbhead_t, multiple_threads))); \
422 else if (sizeof (*mem) == 4) \
423 __asm __volatile (lock "andl %1, %0" \
424 : "=m" (*mem) \
425 : "ir" (mask), "m" (*mem), \
426 "i" (offsetof (tcbhead_t, multiple_threads))); \
427 else \
428 __asm __volatile (lock "andq %q1, %0" \
429 : "=m" (*mem) \
430 : "ir" (mask), "m" (*mem), \
431 "i" (offsetof (tcbhead_t, multiple_threads))); \
432 } while (0)
433
434 #define __arch_cprefix \
435 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
436
437 #define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
438
439 #define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask)
440
441
442 #define __arch_or_body(lock, mem, mask) \
443 do { \
444 if (sizeof (*mem) == 1) \
445 __asm __volatile (lock "orb %b1, %0" \
446 : "=m" (*mem) \
447 : "iq" (mask), "m" (*mem), \
448 "i" (offsetof (tcbhead_t, multiple_threads))); \
449 else if (sizeof (*mem) == 2) \
450 __asm __volatile (lock "orw %w1, %0" \
451 : "=m" (*mem) \
452 : "ir" (mask), "m" (*mem), \
453 "i" (offsetof (tcbhead_t, multiple_threads))); \
454 else if (sizeof (*mem) == 4) \
455 __asm __volatile (lock "orl %1, %0" \
456 : "=m" (*mem) \
457 : "ir" (mask), "m" (*mem), \
458 "i" (offsetof (tcbhead_t, multiple_threads))); \
459 else \
460 __asm __volatile (lock "orq %q1, %0" \
461 : "=m" (*mem) \
462 : "ir" (mask), "m" (*mem), \
463 "i" (offsetof (tcbhead_t, multiple_threads))); \
464 } while (0)
465
466 #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
467
468 #define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask)