]>
git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/alpha/bits/atomic.h
1 /* Copyright (C) 2003-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
20 typedef int8_t atomic8_t
;
21 typedef uint8_t uatomic8_t
;
22 typedef int_fast8_t atomic_fast8_t
;
23 typedef uint_fast8_t uatomic_fast8_t
;
25 typedef int16_t atomic16_t
;
26 typedef uint16_t uatomic16_t
;
27 typedef int_fast16_t atomic_fast16_t
;
28 typedef uint_fast16_t uatomic_fast16_t
;
30 typedef int32_t atomic32_t
;
31 typedef uint32_t uatomic32_t
;
32 typedef int_fast32_t atomic_fast32_t
;
33 typedef uint_fast32_t uatomic_fast32_t
;
35 typedef int64_t atomic64_t
;
36 typedef uint64_t uatomic64_t
;
37 typedef int_fast64_t atomic_fast64_t
;
38 typedef uint_fast64_t uatomic_fast64_t
;
40 typedef intptr_t atomicptr_t
;
41 typedef uintptr_t uatomicptr_t
;
42 typedef intmax_t atomic_max_t
;
43 typedef uintmax_t uatomic_max_t
;
45 #define __HAVE_64B_ATOMICS 1
46 #define USE_ATOMIC_COMPILER_BUILTINS 0
50 # define __MB /* nothing */
56 /* Compare and exchange. For all of the "xxx" routines, we expect a
57 "__prev" and a "__cmp" variable to be provided by the enclosing scope,
58 in which values are returned. */
60 #define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \
62 unsigned long __tmp, __snew, __addr64; \
63 __asm__ __volatile__ ( \
65 " andnot %[__addr8],7,%[__addr64]\n" \
66 " insbl %[__new],%[__addr8],%[__snew]\n" \
67 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
68 " extbl %[__tmp],%[__addr8],%[__prev]\n" \
69 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
70 " beq %[__cmp],2f\n" \
71 " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
72 " or %[__snew],%[__tmp],%[__tmp]\n" \
73 " stq_c %[__tmp],0(%[__addr64])\n" \
74 " beq %[__tmp],1b\n" \
77 : [__prev] "=&r" (__prev), \
78 [__snew] "=&r" (__snew), \
79 [__tmp] "=&r" (__tmp), \
80 [__cmp] "=&r" (__cmp), \
81 [__addr64] "=&r" (__addr64) \
82 : [__addr8] "r" (mem), \
83 [__old] "Ir" ((uint64_t)(uint8_t)(uint64_t)(old)), \
88 #define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \
90 unsigned long __tmp, __snew, __addr64; \
91 __asm__ __volatile__ ( \
93 " andnot %[__addr16],7,%[__addr64]\n" \
94 " inswl %[__new],%[__addr16],%[__snew]\n" \
95 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
96 " extwl %[__tmp],%[__addr16],%[__prev]\n" \
97 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
98 " beq %[__cmp],2f\n" \
99 " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
100 " or %[__snew],%[__tmp],%[__tmp]\n" \
101 " stq_c %[__tmp],0(%[__addr64])\n" \
102 " beq %[__tmp],1b\n" \
105 : [__prev] "=&r" (__prev), \
106 [__snew] "=&r" (__snew), \
107 [__tmp] "=&r" (__tmp), \
108 [__cmp] "=&r" (__cmp), \
109 [__addr64] "=&r" (__addr64) \
110 : [__addr16] "r" (mem), \
111 [__old] "Ir" ((uint64_t)(uint16_t)(uint64_t)(old)), \
116 #define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \
118 __asm__ __volatile__ ( \
120 "1: ldl_l %[__prev],%[__mem]\n" \
121 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
122 " beq %[__cmp],2f\n" \
123 " mov %[__new],%[__cmp]\n" \
124 " stl_c %[__cmp],%[__mem]\n" \
125 " beq %[__cmp],1b\n" \
128 : [__prev] "=&r" (__prev), \
129 [__cmp] "=&r" (__cmp) \
130 : [__mem] "m" (*(mem)), \
131 [__old] "Ir" ((uint64_t)(atomic32_t)(uint64_t)(old)), \
136 #define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \
138 __asm__ __volatile__ ( \
140 "1: ldq_l %[__prev],%[__mem]\n" \
141 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
142 " beq %[__cmp],2f\n" \
143 " mov %[__new],%[__cmp]\n" \
144 " stq_c %[__cmp],%[__mem]\n" \
145 " beq %[__cmp],1b\n" \
148 : [__prev] "=&r" (__prev), \
149 [__cmp] "=&r" (__cmp) \
150 : [__mem] "m" (*(mem)), \
151 [__old] "Ir" ((uint64_t)(old)), \
156 /* For all "bool" routines, we return FALSE if exchange succesful. */
158 #define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \
159 ({ unsigned long __prev; int __cmp; \
160 __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
163 #define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \
164 ({ unsigned long __prev; int __cmp; \
165 __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
168 #define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \
169 ({ unsigned long __prev; int __cmp; \
170 __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
173 #define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \
174 ({ unsigned long __prev; int __cmp; \
175 __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
178 /* For all "val" routines, return the old value whether exchange
179 successful or not. */
181 #define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2) \
182 ({ unsigned long __prev; int __cmp; \
183 __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
184 (typeof (*mem))__prev; })
186 #define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \
187 ({ unsigned long __prev; int __cmp; \
188 __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
189 (typeof (*mem))__prev; })
191 #define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \
192 ({ unsigned long __prev; int __cmp; \
193 __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
194 (typeof (*mem))__prev; })
196 #define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \
197 ({ unsigned long __prev; int __cmp; \
198 __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
199 (typeof (*mem))__prev; })
201 /* Compare and exchange with "acquire" semantics, ie barrier after. */
203 #define atomic_compare_and_exchange_bool_acq(mem, new, old) \
204 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
205 mem, new, old, "", __MB)
207 #define atomic_compare_and_exchange_val_acq(mem, new, old) \
208 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
209 mem, new, old, "", __MB)
211 /* Compare and exchange with "release" semantics, ie barrier before. */
213 #define atomic_compare_and_exchange_bool_rel(mem, new, old) \
214 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
215 mem, new, old, __MB, "")
217 #define atomic_compare_and_exchange_val_rel(mem, new, old) \
218 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
219 mem, new, old, __MB, "")
222 /* Atomically store value and return the previous value. */
224 #define __arch_exchange_8_int(mem, value, mb1, mb2) \
226 unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
227 __asm__ __volatile__ ( \
229 " andnot %[__addr8],7,%[__addr64]\n" \
230 " insbl %[__value],%[__addr8],%[__sval]\n" \
231 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
232 " extbl %[__tmp],%[__addr8],%[__ret]\n" \
233 " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
234 " or %[__sval],%[__tmp],%[__tmp]\n" \
235 " stq_c %[__tmp],0(%[__addr64])\n" \
236 " beq %[__tmp],1b\n" \
238 : [__ret] "=&r" (__ret), \
239 [__sval] "=&r" (__sval), \
240 [__tmp] "=&r" (__tmp), \
241 [__addr64] "=&r" (__addr64) \
242 : [__addr8] "r" (mem), \
243 [__value] "r" (value) \
247 #define __arch_exchange_16_int(mem, value, mb1, mb2) \
249 unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
250 __asm__ __volatile__ ( \
252 " andnot %[__addr16],7,%[__addr64]\n" \
253 " inswl %[__value],%[__addr16],%[__sval]\n" \
254 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
255 " extwl %[__tmp],%[__addr16],%[__ret]\n" \
256 " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
257 " or %[__sval],%[__tmp],%[__tmp]\n" \
258 " stq_c %[__tmp],0(%[__addr64])\n" \
259 " beq %[__tmp],1b\n" \
261 : [__ret] "=&r" (__ret), \
262 [__sval] "=&r" (__sval), \
263 [__tmp] "=&r" (__tmp), \
264 [__addr64] "=&r" (__addr64) \
265 : [__addr16] "r" (mem), \
266 [__value] "r" (value) \
270 #define __arch_exchange_32_int(mem, value, mb1, mb2) \
272 signed int __tmp; __typeof(*mem) __ret; \
273 __asm__ __volatile__ ( \
275 "1: ldl_l %[__ret],%[__mem]\n" \
276 " mov %[__val],%[__tmp]\n" \
277 " stl_c %[__tmp],%[__mem]\n" \
278 " beq %[__tmp],1b\n" \
280 : [__ret] "=&r" (__ret), \
281 [__tmp] "=&r" (__tmp) \
282 : [__mem] "m" (*(mem)), \
283 [__val] "Ir" (value) \
287 #define __arch_exchange_64_int(mem, value, mb1, mb2) \
289 unsigned long __tmp; __typeof(*mem) __ret; \
290 __asm__ __volatile__ ( \
292 "1: ldq_l %[__ret],%[__mem]\n" \
293 " mov %[__val],%[__tmp]\n" \
294 " stq_c %[__tmp],%[__mem]\n" \
295 " beq %[__tmp],1b\n" \
297 : [__ret] "=&r" (__ret), \
298 [__tmp] "=&r" (__tmp) \
299 : [__mem] "m" (*(mem)), \
300 [__val] "Ir" (value) \
304 #define atomic_exchange_acq(mem, value) \
305 __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB)
307 #define atomic_exchange_rel(mem, value) \
308 __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "")
311 /* Atomically add value and return the previous (unincremented) value. */
313 #define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \
314 ({ __builtin_trap (); 0; })
316 #define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \
317 ({ __builtin_trap (); 0; })
319 #define __arch_exchange_and_add_32_int(mem, value, mb1, mb2) \
321 signed int __tmp; __typeof(*mem) __ret; \
322 __asm__ __volatile__ ( \
324 "1: ldl_l %[__ret],%[__mem]\n" \
325 " addl %[__ret],%[__val],%[__tmp]\n" \
326 " stl_c %[__tmp],%[__mem]\n" \
327 " beq %[__tmp],1b\n" \
329 : [__ret] "=&r" (__ret), \
330 [__tmp] "=&r" (__tmp) \
331 : [__mem] "m" (*(mem)), \
332 [__val] "Ir" ((signed int)(value)) \
336 #define __arch_exchange_and_add_64_int(mem, value, mb1, mb2) \
338 unsigned long __tmp; __typeof(*mem) __ret; \
339 __asm__ __volatile__ ( \
341 "1: ldq_l %[__ret],%[__mem]\n" \
342 " addq %[__ret],%[__val],%[__tmp]\n" \
343 " stq_c %[__tmp],%[__mem]\n" \
344 " beq %[__tmp],1b\n" \
346 : [__ret] "=&r" (__ret), \
347 [__tmp] "=&r" (__tmp) \
348 : [__mem] "m" (*(mem)), \
349 [__val] "Ir" ((unsigned long)(value)) \
353 /* ??? Barrier semantics for atomic_exchange_and_add appear to be
354 undefined. Use full barrier for now, as that's safe. */
355 #define atomic_exchange_and_add(mem, value) \
356 __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB)
359 /* ??? Blah, I'm lazy. Implement these later. Can do better than the
360 compare-and-exchange loop provided by generic code.
362 #define atomic_decrement_if_positive(mem)
363 #define atomic_bit_test_set(mem, bit)
368 # define atomic_full_barrier() __asm ("mb" : : : "memory");
369 # define atomic_read_barrier() __asm ("mb" : : : "memory");
370 # define atomic_write_barrier() __asm ("wmb" : : : "memory");