]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/alpha/atomic-machine.h
Remove atomic_compare_and_exchange_bool_rel.
[thirdparty/glibc.git] / sysdeps / alpha / atomic-machine.h
CommitLineData
f7a9f785 1/* Copyright (C) 2003-2016 Free Software Foundation, Inc.
adaab729
RH
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
ab84e3ff
PE
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
adaab729
RH
17
18#include <stdint.h>
19
20typedef int8_t atomic8_t;
21typedef uint8_t uatomic8_t;
22typedef int_fast8_t atomic_fast8_t;
23typedef uint_fast8_t uatomic_fast8_t;
24
25typedef int16_t atomic16_t;
26typedef uint16_t uatomic16_t;
27typedef int_fast16_t atomic_fast16_t;
28typedef uint_fast16_t uatomic_fast16_t;
29
30typedef int32_t atomic32_t;
31typedef uint32_t uatomic32_t;
32typedef int_fast32_t atomic_fast32_t;
33typedef uint_fast32_t uatomic_fast32_t;
34
35typedef int64_t atomic64_t;
36typedef uint64_t uatomic64_t;
37typedef int_fast64_t atomic_fast64_t;
38typedef uint_fast64_t uatomic_fast64_t;
39
40typedef intptr_t atomicptr_t;
41typedef uintptr_t uatomicptr_t;
42typedef intmax_t atomic_max_t;
43typedef uintmax_t uatomic_max_t;
44
1ea339b6
TR
45#define __HAVE_64B_ATOMICS 1
46#define USE_ATOMIC_COMPILER_BUILTINS 0
47
adaab729
RH
48
49#ifdef UP
50# define __MB /* nothing */
51#else
52# define __MB " mb\n"
53#endif
54
55
56/* Compare and exchange. For all of the "xxx" routines, we expect a
57 "__prev" and a "__cmp" variable to be provided by the enclosing scope,
58 in which values are returned. */
59
60#define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \
61({ \
62 unsigned long __tmp, __snew, __addr64; \
63 __asm__ __volatile__ ( \
64 mb1 \
65 " andnot %[__addr8],7,%[__addr64]\n" \
66 " insbl %[__new],%[__addr8],%[__snew]\n" \
67 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
68 " extbl %[__tmp],%[__addr8],%[__prev]\n" \
69 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
70 " beq %[__cmp],2f\n" \
71 " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
72 " or %[__snew],%[__tmp],%[__tmp]\n" \
73 " stq_c %[__tmp],0(%[__addr64])\n" \
74 " beq %[__tmp],1b\n" \
75 mb2 \
76 "2:" \
77 : [__prev] "=&r" (__prev), \
78 [__snew] "=&r" (__snew), \
79 [__tmp] "=&r" (__tmp), \
80 [__cmp] "=&r" (__cmp), \
81 [__addr64] "=&r" (__addr64) \
82 : [__addr8] "r" (mem), \
cfbf1a2f 83 [__old] "Ir" ((uint64_t)(uint8_t)(uint64_t)(old)), \
adaab729
RH
84 [__new] "r" (new) \
85 : "memory"); \
86})
87
88#define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \
89({ \
90 unsigned long __tmp, __snew, __addr64; \
91 __asm__ __volatile__ ( \
92 mb1 \
93 " andnot %[__addr16],7,%[__addr64]\n" \
94 " inswl %[__new],%[__addr16],%[__snew]\n" \
95 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
96 " extwl %[__tmp],%[__addr16],%[__prev]\n" \
97 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
98 " beq %[__cmp],2f\n" \
99 " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
100 " or %[__snew],%[__tmp],%[__tmp]\n" \
101 " stq_c %[__tmp],0(%[__addr64])\n" \
102 " beq %[__tmp],1b\n" \
103 mb2 \
104 "2:" \
105 : [__prev] "=&r" (__prev), \
106 [__snew] "=&r" (__snew), \
107 [__tmp] "=&r" (__tmp), \
108 [__cmp] "=&r" (__cmp), \
109 [__addr64] "=&r" (__addr64) \
110 : [__addr16] "r" (mem), \
cfbf1a2f 111 [__old] "Ir" ((uint64_t)(uint16_t)(uint64_t)(old)), \
adaab729
RH
112 [__new] "r" (new) \
113 : "memory"); \
114})
115
116#define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \
117({ \
118 __asm__ __volatile__ ( \
119 mb1 \
120 "1: ldl_l %[__prev],%[__mem]\n" \
121 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
122 " beq %[__cmp],2f\n" \
123 " mov %[__new],%[__cmp]\n" \
124 " stl_c %[__cmp],%[__mem]\n" \
125 " beq %[__cmp],1b\n" \
126 mb2 \
127 "2:" \
128 : [__prev] "=&r" (__prev), \
129 [__cmp] "=&r" (__cmp) \
130 : [__mem] "m" (*(mem)), \
cfbf1a2f 131 [__old] "Ir" ((uint64_t)(atomic32_t)(uint64_t)(old)), \
adaab729
RH
132 [__new] "Ir" (new) \
133 : "memory"); \
134})
135
136#define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \
137({ \
138 __asm__ __volatile__ ( \
139 mb1 \
140 "1: ldq_l %[__prev],%[__mem]\n" \
141 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
142 " beq %[__cmp],2f\n" \
143 " mov %[__new],%[__cmp]\n" \
144 " stq_c %[__cmp],%[__mem]\n" \
145 " beq %[__cmp],1b\n" \
146 mb2 \
147 "2:" \
148 : [__prev] "=&r" (__prev), \
149 [__cmp] "=&r" (__cmp) \
150 : [__mem] "m" (*(mem)), \
cfbf1a2f 151 [__old] "Ir" ((uint64_t)(old)), \
adaab729
RH
152 [__new] "Ir" (new) \
153 : "memory"); \
154})
155
b7978e8d 156/* For all "bool" routines, we return FALSE if exchange succesful. */
adaab729
RH
157
158#define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \
159({ unsigned long __prev; int __cmp; \
160 __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
b7978e8d 161 !__cmp; })
adaab729
RH
162
163#define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \
164({ unsigned long __prev; int __cmp; \
165 __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
b7978e8d 166 !__cmp; })
adaab729
RH
167
168#define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \
169({ unsigned long __prev; int __cmp; \
170 __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
b7978e8d 171 !__cmp; })
adaab729
RH
172
173#define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \
174({ unsigned long __prev; int __cmp; \
175 __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
b7978e8d 176 !__cmp; })
adaab729
RH
177
178/* For all "val" routines, return the old value whether exchange
179 successful or not. */
180
181#define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2) \
182({ unsigned long __prev; int __cmp; \
183 __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
cfbf1a2f 184 (typeof (*mem))__prev; })
adaab729
RH
185
186#define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \
187({ unsigned long __prev; int __cmp; \
188 __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
cfbf1a2f 189 (typeof (*mem))__prev; })
adaab729
RH
190
191#define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \
192({ unsigned long __prev; int __cmp; \
193 __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
cfbf1a2f 194 (typeof (*mem))__prev; })
adaab729
RH
195
196#define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \
197({ unsigned long __prev; int __cmp; \
198 __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
cfbf1a2f 199 (typeof (*mem))__prev; })
adaab729
RH
200
201/* Compare and exchange with "acquire" semantics, ie barrier after. */
202
203#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
cfbf1a2f
RH
204 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
205 mem, new, old, "", __MB)
adaab729
RH
206
207#define atomic_compare_and_exchange_val_acq(mem, new, old) \
208 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
209 mem, new, old, "", __MB)
210
211/* Compare and exchange with "release" semantics, ie barrier before. */
212
adaab729
RH
213#define atomic_compare_and_exchange_val_rel(mem, new, old) \
214 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
215 mem, new, old, __MB, "")
216
217
218/* Atomically store value and return the previous value. */
219
220#define __arch_exchange_8_int(mem, value, mb1, mb2) \
221({ \
6796df65 222 unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
adaab729
RH
223 __asm__ __volatile__ ( \
224 mb1 \
225 " andnot %[__addr8],7,%[__addr64]\n" \
226 " insbl %[__value],%[__addr8],%[__sval]\n" \
227 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
228 " extbl %[__tmp],%[__addr8],%[__ret]\n" \
229 " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
230 " or %[__sval],%[__tmp],%[__tmp]\n" \
231 " stq_c %[__tmp],0(%[__addr64])\n" \
232 " beq %[__tmp],1b\n" \
233 mb2 \
234 : [__ret] "=&r" (__ret), \
235 [__sval] "=&r" (__sval), \
236 [__tmp] "=&r" (__tmp), \
237 [__addr64] "=&r" (__addr64) \
238 : [__addr8] "r" (mem), \
239 [__value] "r" (value) \
240 : "memory"); \
241 __ret; })
242
243#define __arch_exchange_16_int(mem, value, mb1, mb2) \
244({ \
6796df65 245 unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
adaab729
RH
246 __asm__ __volatile__ ( \
247 mb1 \
b7978e8d 248 " andnot %[__addr16],7,%[__addr64]\n" \
adaab729
RH
249 " inswl %[__value],%[__addr16],%[__sval]\n" \
250 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
251 " extwl %[__tmp],%[__addr16],%[__ret]\n" \
252 " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
253 " or %[__sval],%[__tmp],%[__tmp]\n" \
254 " stq_c %[__tmp],0(%[__addr64])\n" \
255 " beq %[__tmp],1b\n" \
256 mb2 \
257 : [__ret] "=&r" (__ret), \
258 [__sval] "=&r" (__sval), \
259 [__tmp] "=&r" (__tmp), \
260 [__addr64] "=&r" (__addr64) \
261 : [__addr16] "r" (mem), \
262 [__value] "r" (value) \
263 : "memory"); \
264 __ret; })
265
266#define __arch_exchange_32_int(mem, value, mb1, mb2) \
267({ \
6796df65 268 signed int __tmp; __typeof(*mem) __ret; \
adaab729
RH
269 __asm__ __volatile__ ( \
270 mb1 \
271 "1: ldl_l %[__ret],%[__mem]\n" \
b7978e8d 272 " mov %[__val],%[__tmp]\n" \
adaab729
RH
273 " stl_c %[__tmp],%[__mem]\n" \
274 " beq %[__tmp],1b\n" \
275 mb2 \
276 : [__ret] "=&r" (__ret), \
277 [__tmp] "=&r" (__tmp) \
b7978e8d
RH
278 : [__mem] "m" (*(mem)), \
279 [__val] "Ir" (value) \
adaab729
RH
280 : "memory"); \
281 __ret; })
282
283#define __arch_exchange_64_int(mem, value, mb1, mb2) \
284({ \
6796df65 285 unsigned long __tmp; __typeof(*mem) __ret; \
adaab729
RH
286 __asm__ __volatile__ ( \
287 mb1 \
288 "1: ldq_l %[__ret],%[__mem]\n" \
b7978e8d 289 " mov %[__val],%[__tmp]\n" \
adaab729
RH
290 " stq_c %[__tmp],%[__mem]\n" \
291 " beq %[__tmp],1b\n" \
292 mb2 \
293 : [__ret] "=&r" (__ret), \
294 [__tmp] "=&r" (__tmp) \
b7978e8d
RH
295 : [__mem] "m" (*(mem)), \
296 [__val] "Ir" (value) \
adaab729
RH
297 : "memory"); \
298 __ret; })
299
300#define atomic_exchange_acq(mem, value) \
301 __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB)
302
303#define atomic_exchange_rel(mem, value) \
304 __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "")
305
306
307/* Atomically add value and return the previous (unincremented) value. */
308
309#define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \
310 ({ __builtin_trap (); 0; })
311
312#define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \
313 ({ __builtin_trap (); 0; })
314
315#define __arch_exchange_and_add_32_int(mem, value, mb1, mb2) \
316({ \
6796df65 317 signed int __tmp; __typeof(*mem) __ret; \
adaab729
RH
318 __asm__ __volatile__ ( \
319 mb1 \
320 "1: ldl_l %[__ret],%[__mem]\n" \
321 " addl %[__ret],%[__val],%[__tmp]\n" \
322 " stl_c %[__tmp],%[__mem]\n" \
323 " beq %[__tmp],1b\n" \
324 mb2 \
325 : [__ret] "=&r" (__ret), \
326 [__tmp] "=&r" (__tmp) \
327 : [__mem] "m" (*(mem)), \
328 [__val] "Ir" ((signed int)(value)) \
329 : "memory"); \
330 __ret; })
331
332#define __arch_exchange_and_add_64_int(mem, value, mb1, mb2) \
333({ \
6796df65 334 unsigned long __tmp; __typeof(*mem) __ret; \
adaab729
RH
335 __asm__ __volatile__ ( \
336 mb1 \
337 "1: ldq_l %[__ret],%[__mem]\n" \
338 " addq %[__ret],%[__val],%[__tmp]\n" \
339 " stq_c %[__tmp],%[__mem]\n" \
340 " beq %[__tmp],1b\n" \
341 mb2 \
342 : [__ret] "=&r" (__ret), \
343 [__tmp] "=&r" (__tmp) \
344 : [__mem] "m" (*(mem)), \
345 [__val] "Ir" ((unsigned long)(value)) \
346 : "memory"); \
347 __ret; })
348
5556231d 349/* ??? Barrier semantics for atomic_exchange_and_add appear to be
adaab729
RH
350 undefined. Use full barrier for now, as that's safe. */
351#define atomic_exchange_and_add(mem, value) \
352 __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB)
353
354
355/* ??? Blah, I'm lazy. Implement these later. Can do better than the
356 compare-and-exchange loop provided by generic code.
357
358#define atomic_decrement_if_positive(mem)
359#define atomic_bit_test_set(mem, bit)
360
361*/
362
363#ifndef UP
364# define atomic_full_barrier() __asm ("mb" : : : "memory");
365# define atomic_read_barrier() __asm ("mb" : : : "memory");
366# define atomic_write_barrier() __asm ("wmb" : : : "memory");
367#endif