]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/alpha/atomic-machine.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / alpha / atomic-machine.h
CommitLineData
f7a9f785 1/* Copyright (C) 2003-2016 Free Software Foundation, Inc.
adaab729
RH
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
ab84e3ff
PE
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
adaab729
RH
17
18#include <stdint.h>
19
20typedef int8_t atomic8_t;
21typedef uint8_t uatomic8_t;
22typedef int_fast8_t atomic_fast8_t;
23typedef uint_fast8_t uatomic_fast8_t;
24
25typedef int16_t atomic16_t;
26typedef uint16_t uatomic16_t;
27typedef int_fast16_t atomic_fast16_t;
28typedef uint_fast16_t uatomic_fast16_t;
29
30typedef int32_t atomic32_t;
31typedef uint32_t uatomic32_t;
32typedef int_fast32_t atomic_fast32_t;
33typedef uint_fast32_t uatomic_fast32_t;
34
35typedef int64_t atomic64_t;
36typedef uint64_t uatomic64_t;
37typedef int_fast64_t atomic_fast64_t;
38typedef uint_fast64_t uatomic_fast64_t;
39
40typedef intptr_t atomicptr_t;
41typedef uintptr_t uatomicptr_t;
42typedef intmax_t atomic_max_t;
43typedef uintmax_t uatomic_max_t;
44
1ea339b6
TR
45#define __HAVE_64B_ATOMICS 1
46#define USE_ATOMIC_COMPILER_BUILTINS 0
47
adaab729
RH
48
49#ifdef UP
50# define __MB /* nothing */
51#else
52# define __MB " mb\n"
53#endif
54
55
56/* Compare and exchange. For all of the "xxx" routines, we expect a
57 "__prev" and a "__cmp" variable to be provided by the enclosing scope,
58 in which values are returned. */
59
60#define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \
61({ \
62 unsigned long __tmp, __snew, __addr64; \
63 __asm__ __volatile__ ( \
64 mb1 \
65 " andnot %[__addr8],7,%[__addr64]\n" \
66 " insbl %[__new],%[__addr8],%[__snew]\n" \
67 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
68 " extbl %[__tmp],%[__addr8],%[__prev]\n" \
69 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
70 " beq %[__cmp],2f\n" \
71 " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
72 " or %[__snew],%[__tmp],%[__tmp]\n" \
73 " stq_c %[__tmp],0(%[__addr64])\n" \
74 " beq %[__tmp],1b\n" \
75 mb2 \
76 "2:" \
77 : [__prev] "=&r" (__prev), \
78 [__snew] "=&r" (__snew), \
79 [__tmp] "=&r" (__tmp), \
80 [__cmp] "=&r" (__cmp), \
81 [__addr64] "=&r" (__addr64) \
82 : [__addr8] "r" (mem), \
cfbf1a2f 83 [__old] "Ir" ((uint64_t)(uint8_t)(uint64_t)(old)), \
adaab729
RH
84 [__new] "r" (new) \
85 : "memory"); \
86})
87
88#define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \
89({ \
90 unsigned long __tmp, __snew, __addr64; \
91 __asm__ __volatile__ ( \
92 mb1 \
93 " andnot %[__addr16],7,%[__addr64]\n" \
94 " inswl %[__new],%[__addr16],%[__snew]\n" \
95 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
96 " extwl %[__tmp],%[__addr16],%[__prev]\n" \
97 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
98 " beq %[__cmp],2f\n" \
99 " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
100 " or %[__snew],%[__tmp],%[__tmp]\n" \
101 " stq_c %[__tmp],0(%[__addr64])\n" \
102 " beq %[__tmp],1b\n" \
103 mb2 \
104 "2:" \
105 : [__prev] "=&r" (__prev), \
106 [__snew] "=&r" (__snew), \
107 [__tmp] "=&r" (__tmp), \
108 [__cmp] "=&r" (__cmp), \
109 [__addr64] "=&r" (__addr64) \
110 : [__addr16] "r" (mem), \
cfbf1a2f 111 [__old] "Ir" ((uint64_t)(uint16_t)(uint64_t)(old)), \
adaab729
RH
112 [__new] "r" (new) \
113 : "memory"); \
114})
115
116#define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \
117({ \
118 __asm__ __volatile__ ( \
119 mb1 \
120 "1: ldl_l %[__prev],%[__mem]\n" \
121 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
122 " beq %[__cmp],2f\n" \
123 " mov %[__new],%[__cmp]\n" \
124 " stl_c %[__cmp],%[__mem]\n" \
125 " beq %[__cmp],1b\n" \
126 mb2 \
127 "2:" \
128 : [__prev] "=&r" (__prev), \
129 [__cmp] "=&r" (__cmp) \
130 : [__mem] "m" (*(mem)), \
cfbf1a2f 131 [__old] "Ir" ((uint64_t)(atomic32_t)(uint64_t)(old)), \
adaab729
RH
132 [__new] "Ir" (new) \
133 : "memory"); \
134})
135
136#define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \
137({ \
138 __asm__ __volatile__ ( \
139 mb1 \
140 "1: ldq_l %[__prev],%[__mem]\n" \
141 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
142 " beq %[__cmp],2f\n" \
143 " mov %[__new],%[__cmp]\n" \
144 " stq_c %[__cmp],%[__mem]\n" \
145 " beq %[__cmp],1b\n" \
146 mb2 \
147 "2:" \
148 : [__prev] "=&r" (__prev), \
149 [__cmp] "=&r" (__cmp) \
150 : [__mem] "m" (*(mem)), \
cfbf1a2f 151 [__old] "Ir" ((uint64_t)(old)), \
adaab729
RH
152 [__new] "Ir" (new) \
153 : "memory"); \
154})
155
b7978e8d 156/* For all "bool" routines, we return FALSE if exchange succesful. */
adaab729
RH
157
158#define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \
159({ unsigned long __prev; int __cmp; \
160 __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
b7978e8d 161 !__cmp; })
adaab729
RH
162
163#define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \
164({ unsigned long __prev; int __cmp; \
165 __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
b7978e8d 166 !__cmp; })
adaab729
RH
167
168#define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \
169({ unsigned long __prev; int __cmp; \
170 __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
b7978e8d 171 !__cmp; })
adaab729
RH
172
173#define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \
174({ unsigned long __prev; int __cmp; \
175 __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
b7978e8d 176 !__cmp; })
adaab729
RH
177
178/* For all "val" routines, return the old value whether exchange
179 successful or not. */
180
181#define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2) \
182({ unsigned long __prev; int __cmp; \
183 __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
cfbf1a2f 184 (typeof (*mem))__prev; })
adaab729
RH
185
186#define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \
187({ unsigned long __prev; int __cmp; \
188 __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
cfbf1a2f 189 (typeof (*mem))__prev; })
adaab729
RH
190
191#define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \
192({ unsigned long __prev; int __cmp; \
193 __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
cfbf1a2f 194 (typeof (*mem))__prev; })
adaab729
RH
195
196#define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \
197({ unsigned long __prev; int __cmp; \
198 __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
cfbf1a2f 199 (typeof (*mem))__prev; })
adaab729
RH
200
201/* Compare and exchange with "acquire" semantics, ie barrier after. */
202
203#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
cfbf1a2f
RH
204 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
205 mem, new, old, "", __MB)
adaab729
RH
206
207#define atomic_compare_and_exchange_val_acq(mem, new, old) \
208 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
209 mem, new, old, "", __MB)
210
211/* Compare and exchange with "release" semantics, ie barrier before. */
212
213#define atomic_compare_and_exchange_bool_rel(mem, new, old) \
cfbf1a2f
RH
214 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
215 mem, new, old, __MB, "")
adaab729
RH
216
217#define atomic_compare_and_exchange_val_rel(mem, new, old) \
218 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
219 mem, new, old, __MB, "")
220
221
222/* Atomically store value and return the previous value. */
223
224#define __arch_exchange_8_int(mem, value, mb1, mb2) \
225({ \
6796df65 226 unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
adaab729
RH
227 __asm__ __volatile__ ( \
228 mb1 \
229 " andnot %[__addr8],7,%[__addr64]\n" \
230 " insbl %[__value],%[__addr8],%[__sval]\n" \
231 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
232 " extbl %[__tmp],%[__addr8],%[__ret]\n" \
233 " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
234 " or %[__sval],%[__tmp],%[__tmp]\n" \
235 " stq_c %[__tmp],0(%[__addr64])\n" \
236 " beq %[__tmp],1b\n" \
237 mb2 \
238 : [__ret] "=&r" (__ret), \
239 [__sval] "=&r" (__sval), \
240 [__tmp] "=&r" (__tmp), \
241 [__addr64] "=&r" (__addr64) \
242 : [__addr8] "r" (mem), \
243 [__value] "r" (value) \
244 : "memory"); \
245 __ret; })
246
247#define __arch_exchange_16_int(mem, value, mb1, mb2) \
248({ \
6796df65 249 unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
adaab729
RH
250 __asm__ __volatile__ ( \
251 mb1 \
b7978e8d 252 " andnot %[__addr16],7,%[__addr64]\n" \
adaab729
RH
253 " inswl %[__value],%[__addr16],%[__sval]\n" \
254 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
255 " extwl %[__tmp],%[__addr16],%[__ret]\n" \
256 " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
257 " or %[__sval],%[__tmp],%[__tmp]\n" \
258 " stq_c %[__tmp],0(%[__addr64])\n" \
259 " beq %[__tmp],1b\n" \
260 mb2 \
261 : [__ret] "=&r" (__ret), \
262 [__sval] "=&r" (__sval), \
263 [__tmp] "=&r" (__tmp), \
264 [__addr64] "=&r" (__addr64) \
265 : [__addr16] "r" (mem), \
266 [__value] "r" (value) \
267 : "memory"); \
268 __ret; })
269
270#define __arch_exchange_32_int(mem, value, mb1, mb2) \
271({ \
6796df65 272 signed int __tmp; __typeof(*mem) __ret; \
adaab729
RH
273 __asm__ __volatile__ ( \
274 mb1 \
275 "1: ldl_l %[__ret],%[__mem]\n" \
b7978e8d 276 " mov %[__val],%[__tmp]\n" \
adaab729
RH
277 " stl_c %[__tmp],%[__mem]\n" \
278 " beq %[__tmp],1b\n" \
279 mb2 \
280 : [__ret] "=&r" (__ret), \
281 [__tmp] "=&r" (__tmp) \
b7978e8d
RH
282 : [__mem] "m" (*(mem)), \
283 [__val] "Ir" (value) \
adaab729
RH
284 : "memory"); \
285 __ret; })
286
287#define __arch_exchange_64_int(mem, value, mb1, mb2) \
288({ \
6796df65 289 unsigned long __tmp; __typeof(*mem) __ret; \
adaab729
RH
290 __asm__ __volatile__ ( \
291 mb1 \
292 "1: ldq_l %[__ret],%[__mem]\n" \
b7978e8d 293 " mov %[__val],%[__tmp]\n" \
adaab729
RH
294 " stq_c %[__tmp],%[__mem]\n" \
295 " beq %[__tmp],1b\n" \
296 mb2 \
297 : [__ret] "=&r" (__ret), \
298 [__tmp] "=&r" (__tmp) \
b7978e8d
RH
299 : [__mem] "m" (*(mem)), \
300 [__val] "Ir" (value) \
adaab729
RH
301 : "memory"); \
302 __ret; })
303
304#define atomic_exchange_acq(mem, value) \
305 __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB)
306
307#define atomic_exchange_rel(mem, value) \
308 __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "")
309
310
311/* Atomically add value and return the previous (unincremented) value. */
312
313#define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \
314 ({ __builtin_trap (); 0; })
315
316#define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \
317 ({ __builtin_trap (); 0; })
318
319#define __arch_exchange_and_add_32_int(mem, value, mb1, mb2) \
320({ \
6796df65 321 signed int __tmp; __typeof(*mem) __ret; \
adaab729
RH
322 __asm__ __volatile__ ( \
323 mb1 \
324 "1: ldl_l %[__ret],%[__mem]\n" \
325 " addl %[__ret],%[__val],%[__tmp]\n" \
326 " stl_c %[__tmp],%[__mem]\n" \
327 " beq %[__tmp],1b\n" \
328 mb2 \
329 : [__ret] "=&r" (__ret), \
330 [__tmp] "=&r" (__tmp) \
331 : [__mem] "m" (*(mem)), \
332 [__val] "Ir" ((signed int)(value)) \
333 : "memory"); \
334 __ret; })
335
336#define __arch_exchange_and_add_64_int(mem, value, mb1, mb2) \
337({ \
6796df65 338 unsigned long __tmp; __typeof(*mem) __ret; \
adaab729
RH
339 __asm__ __volatile__ ( \
340 mb1 \
341 "1: ldq_l %[__ret],%[__mem]\n" \
342 " addq %[__ret],%[__val],%[__tmp]\n" \
343 " stq_c %[__tmp],%[__mem]\n" \
344 " beq %[__tmp],1b\n" \
345 mb2 \
346 : [__ret] "=&r" (__ret), \
347 [__tmp] "=&r" (__tmp) \
348 : [__mem] "m" (*(mem)), \
349 [__val] "Ir" ((unsigned long)(value)) \
350 : "memory"); \
351 __ret; })
352
5556231d 353/* ??? Barrier semantics for atomic_exchange_and_add appear to be
adaab729
RH
354 undefined. Use full barrier for now, as that's safe. */
355#define atomic_exchange_and_add(mem, value) \
356 __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB)
357
358
359/* ??? Blah, I'm lazy. Implement these later. Can do better than the
360 compare-and-exchange loop provided by generic code.
361
362#define atomic_decrement_if_positive(mem)
363#define atomic_bit_test_set(mem, bit)
364
365*/
366
367#ifndef UP
368# define atomic_full_barrier() __asm ("mb" : : : "memory");
369# define atomic_read_barrier() __asm ("mb" : : : "memory");
370# define atomic_write_barrier() __asm ("wmb" : : : "memory");
371#endif