]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/sparc/sparc32/atomic-machine.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / sparc / sparc32 / atomic-machine.h
CommitLineData
0a9d1b3b 1/* Atomic operations. sparc32 version.
f7a9f785 2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
0a9d1b3b
RM
3 This file is part of the GNU C Library.
4 Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
0a9d1b3b 19
de071d19
JM
20#ifndef _ATOMIC_MACHINE_H
21#define _ATOMIC_MACHINE_H 1
0a9d1b3b 22
b01fe5f7
UD
23#include <stdint.h>
24
25typedef int8_t atomic8_t;
26typedef uint8_t uatomic8_t;
27typedef int_fast8_t atomic_fast8_t;
28typedef uint_fast8_t uatomic_fast8_t;
29
30typedef int16_t atomic16_t;
31typedef uint16_t uatomic16_t;
32typedef int_fast16_t atomic_fast16_t;
33typedef uint_fast16_t uatomic_fast16_t;
34
35typedef int32_t atomic32_t;
36typedef uint32_t uatomic32_t;
37typedef int_fast32_t atomic_fast32_t;
38typedef uint_fast32_t uatomic_fast32_t;
39
40typedef int64_t atomic64_t;
41typedef uint64_t uatomic64_t;
42typedef int_fast64_t atomic_fast64_t;
43typedef uint_fast64_t uatomic_fast64_t;
44
45typedef intptr_t atomicptr_t;
46typedef uintptr_t uatomicptr_t;
47typedef intmax_t atomic_max_t;
48typedef uintmax_t uatomic_max_t;
49
1ea339b6
TR
50#define __HAVE_64B_ATOMICS 0
51#define USE_ATOMIC_COMPILER_BUILTINS 0
52
b01fe5f7 53
0a9d1b3b 54/* We have no compare and swap, just test and set.
d57a3f0e 55 The following implementation contends on 64 global locks
0a9d1b3b
RM
56 per library and assumes no variable will be accessed using atomic.h
57 macros from two different libraries. */
58
1be3130e
UD
59__make_section_unallocated
60 (".gnu.linkonce.b.__sparc32_atomic_locks, \"aw\", %nobits");
61
d57a3f0e 62volatile unsigned char __sparc32_atomic_locks[64]
1be3130e
UD
63 __attribute__ ((nocommon, section (".gnu.linkonce.b.__sparc32_atomic_locks"
64 __sec_comment),
0a9d1b3b
RM
65 visibility ("hidden")));
66
d57a3f0e 67#define __sparc32_atomic_do_lock(addr) \
0a9d1b3b
RM
68 do \
69 { \
70 unsigned int __old_lock; \
d57a3f0e
UD
71 unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \
72 & 63; \
0a9d1b3b 73 do \
b01fe5f7
UD
74 __asm __volatile ("ldstub %1, %0" \
75 : "=r" (__old_lock), \
76 "=m" (__sparc32_atomic_locks[__idx]) \
77 : "m" (__sparc32_atomic_locks[__idx]) \
78 : "memory"); \
0a9d1b3b
RM
79 while (__old_lock); \
80 } \
81 while (0)
82
d57a3f0e
UD
83#define __sparc32_atomic_do_unlock(addr) \
84 do \
b01fe5f7
UD
85 { \
86 __sparc32_atomic_locks[(((long) addr >> 2) \
87 ^ ((long) addr >> 12)) & 63] = 0; \
88 __asm __volatile ("" ::: "memory"); \
89 } \
90 while (0)
91
92#define __sparc32_atomic_do_lock24(addr) \
93 do \
94 { \
95 unsigned int __old_lock; \
96 do \
97 __asm __volatile ("ldstub %1, %0" \
98 : "=r" (__old_lock), "=m" (*(addr)) \
99 : "m" (*(addr)) \
100 : "memory"); \
101 while (__old_lock); \
102 } \
d57a3f0e 103 while (0)
0a9d1b3b 104
b01fe5f7
UD
105#define __sparc32_atomic_do_unlock24(addr) \
106 do \
107 { \
b01fe5f7 108 __asm __volatile ("" ::: "memory"); \
edac0a60 109 *(char *) (addr) = 0; \
b01fe5f7
UD
110 } \
111 while (0)
112
113
114#ifndef SHARED
115# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \
edac0a60
DM
116({union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) }; \
117 union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) }; \
118 register uint32_t __acev_tmp __asm ("%g6"); \
b01fe5f7 119 register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \
edac0a60
DM
120 register uint32_t __acev_oldval __asm ("%g5"); \
121 __acev_tmp = newval_arg.v; \
122 __acev_oldval = oldval_arg.v; \
b01fe5f7
UD
123 /* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \
124 because as will then mark the object file as V8+ arch. */ \
125 __asm __volatile (".word 0xcde05005" \
126 : "+r" (__acev_tmp), "=m" (*__acev_mem) \
127 : "r" (__acev_oldval), "m" (*__acev_mem), \
c8e82b4a 128 "r" (__acev_mem) : "memory"); \
edac0a60 129 (__typeof (oldval)) __acev_tmp; })
b01fe5f7
UD
130#endif
131
0a9d1b3b 132/* The only basic operation needed is compare and exchange. */
b01fe5f7 133#define __v7_compare_and_exchange_val_acq(mem, newval, oldval) \
0a9d1b3b
RM
134 ({ __typeof (mem) __acev_memp = (mem); \
135 __typeof (*mem) __acev_ret; \
136 __typeof (*mem) __acev_newval = (newval); \
137 \
d57a3f0e 138 __sparc32_atomic_do_lock (__acev_memp); \
0a9d1b3b
RM
139 __acev_ret = *__acev_memp; \
140 if (__acev_ret == (oldval)) \
141 *__acev_memp = __acev_newval; \
d57a3f0e 142 __sparc32_atomic_do_unlock (__acev_memp); \
0a9d1b3b
RM
143 __acev_ret; })
144
b01fe5f7 145#define __v7_compare_and_exchange_bool_acq(mem, newval, oldval) \
0a9d1b3b
RM
146 ({ __typeof (mem) __aceb_memp = (mem); \
147 int __aceb_ret; \
148 __typeof (*mem) __aceb_newval = (newval); \
149 \
d57a3f0e 150 __sparc32_atomic_do_lock (__aceb_memp); \
0a9d1b3b
RM
151 __aceb_ret = 0; \
152 if (*__aceb_memp == (oldval)) \
153 *__aceb_memp = __aceb_newval; \
154 else \
155 __aceb_ret = 1; \
d57a3f0e 156 __sparc32_atomic_do_unlock (__aceb_memp); \
0a9d1b3b
RM
157 __aceb_ret; })
158
b01fe5f7
UD
159#define __v7_exchange_acq(mem, newval) \
160 ({ __typeof (mem) __acev_memp = (mem); \
161 __typeof (*mem) __acev_ret; \
162 __typeof (*mem) __acev_newval = (newval); \
163 \
164 __sparc32_atomic_do_lock (__acev_memp); \
165 __acev_ret = *__acev_memp; \
166 *__acev_memp = __acev_newval; \
167 __sparc32_atomic_do_unlock (__acev_memp); \
168 __acev_ret; })
169
170#define __v7_exchange_and_add(mem, value) \
171 ({ __typeof (mem) __acev_memp = (mem); \
172 __typeof (*mem) __acev_ret; \
173 \
174 __sparc32_atomic_do_lock (__acev_memp); \
175 __acev_ret = *__acev_memp; \
176 *__acev_memp = __acev_ret + (value); \
177 __sparc32_atomic_do_unlock (__acev_memp); \
178 __acev_ret; })
179
180/* Special versions, which guarantee that top 8 bits of all values
181 are cleared and use those bits as the ldstub lock. */
182#define __v7_compare_and_exchange_val_24_acq(mem, newval, oldval) \
183 ({ __typeof (mem) __acev_memp = (mem); \
184 __typeof (*mem) __acev_ret; \
185 __typeof (*mem) __acev_newval = (newval); \
186 \
187 __sparc32_atomic_do_lock24 (__acev_memp); \
188 __acev_ret = *__acev_memp & 0xffffff; \
189 if (__acev_ret == (oldval)) \
190 *__acev_memp = __acev_newval; \
191 else \
192 __sparc32_atomic_do_unlock24 (__acev_memp); \
193 __asm __volatile ("" ::: "memory"); \
194 __acev_ret; })
195
196#define __v7_exchange_24_rel(mem, newval) \
197 ({ __typeof (mem) __acev_memp = (mem); \
198 __typeof (*mem) __acev_ret; \
199 __typeof (*mem) __acev_newval = (newval); \
200 \
201 __sparc32_atomic_do_lock24 (__acev_memp); \
202 __acev_ret = *__acev_memp & 0xffffff; \
203 *__acev_memp = __acev_newval; \
204 __asm __volatile ("" ::: "memory"); \
205 __acev_ret; })
206
207#ifdef SHARED
208
209/* When dynamically linked, we assume pre-v9 libraries are only ever
210 used on pre-v9 CPU. */
211# define __atomic_is_v9 0
212
213# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
214 __v7_compare_and_exchange_val_acq (mem, newval, oldval)
215
216# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
217 __v7_compare_and_exchange_bool_acq (mem, newval, oldval)
218
219# define atomic_exchange_acq(mem, newval) \
220 __v7_exchange_acq (mem, newval)
221
222# define atomic_exchange_and_add(mem, value) \
223 __v7_exchange_and_add (mem, value)
224
225# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \
226 ({ \
227 if (sizeof (*mem) != 4) \
228 abort (); \
229 __v7_compare_and_exchange_val_24_acq (mem, newval, oldval); })
230
231# define atomic_exchange_24_rel(mem, newval) \
232 ({ \
233 if (sizeof (*mem) != 4) \
234 abort (); \
235 __v7_exchange_24_rel (mem, newval); })
236
cfa1f3e8
DM
237# define atomic_full_barrier() __asm ("" ::: "memory")
238# define atomic_read_barrier() atomic_full_barrier ()
239# define atomic_write_barrier() atomic_full_barrier ()
240
b01fe5f7
UD
241#else
242
243/* In libc.a/libpthread.a etc. we don't know if we'll be run on
244 pre-v9 or v9 CPU. To be interoperable with dynamically linked
245 apps on v9 CPUs e.g. with process shared primitives, use cas insn
246 on v9 CPUs and ldstub on pre-v9. */
247
b01fe5f7 248extern uint64_t _dl_hwcap __attribute__((weak));
b01fe5f7
UD
249# define __atomic_is_v9 \
250 (__builtin_expect (&_dl_hwcap != 0, 1) \
5f4318d1 251 && __builtin_expect (_dl_hwcap & HWCAP_SPARC_V9, HWCAP_SPARC_V9))
b01fe5f7
UD
252
253# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
254 ({ \
255 __typeof (*mem) __acev_wret; \
256 if (sizeof (*mem) != 4) \
257 abort (); \
258 if (__atomic_is_v9) \
259 __acev_wret \
260 = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\
261 else \
262 __acev_wret \
263 = __v7_compare_and_exchange_val_acq (mem, newval, oldval); \
264 __acev_wret; })
265
266# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
267 ({ \
268 int __acev_wret; \
269 if (sizeof (*mem) != 4) \
270 abort (); \
271 if (__atomic_is_v9) \
272 { \
273 __typeof (oldval) __acev_woldval = (oldval); \
274 __acev_wret \
275 = __v9_compare_and_exchange_val_32_acq (mem, newval, \
276 __acev_woldval) \
277 != __acev_woldval; \
278 } \
279 else \
280 __acev_wret \
281 = __v7_compare_and_exchange_bool_acq (mem, newval, oldval); \
282 __acev_wret; })
283
284# define atomic_exchange_rel(mem, newval) \
285 ({ \
286 __typeof (*mem) __acev_wret; \
287 if (sizeof (*mem) != 4) \
288 abort (); \
289 if (__atomic_is_v9) \
290 { \
291 __typeof (mem) __acev_wmemp = (mem); \
292 __typeof (*(mem)) __acev_wval = (newval); \
293 do \
294 __acev_wret = *__acev_wmemp; \
295 while (__builtin_expect \
296 (__v9_compare_and_exchange_val_32_acq (__acev_wmemp,\
297 __acev_wval, \
298 __acev_wret) \
299 != __acev_wret, 0)); \
300 } \
301 else \
302 __acev_wret = __v7_exchange_acq (mem, newval); \
303 __acev_wret; })
304
305# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \
306 ({ \
307 __typeof (*mem) __acev_wret; \
308 if (sizeof (*mem) != 4) \
309 abort (); \
310 if (__atomic_is_v9) \
311 __acev_wret \
312 = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\
313 else \
314 __acev_wret \
315 = __v7_compare_and_exchange_val_24_acq (mem, newval, oldval);\
316 __acev_wret; })
317
318# define atomic_exchange_24_rel(mem, newval) \
319 ({ \
320 __typeof (*mem) __acev_w24ret; \
321 if (sizeof (*mem) != 4) \
322 abort (); \
323 if (__atomic_is_v9) \
324 __acev_w24ret = atomic_exchange_rel (mem, newval); \
325 else \
326 __acev_w24ret = __v7_exchange_24_rel (mem, newval); \
327 __acev_w24ret; })
328
cfa1f3e8
DM
329#define atomic_full_barrier() \
330 do { \
331 if (__atomic_is_v9) \
332 /* membar #LoadLoad | #LoadStore | #StoreLoad | #StoreStore */ \
333 __asm __volatile (".word 0x8143e00f" : : : "memory"); \
334 else \
335 __asm __volatile ("" : : : "memory"); \
336 } while (0)
337
338#define atomic_read_barrier() \
339 do { \
340 if (__atomic_is_v9) \
341 /* membar #LoadLoad | #LoadStore */ \
342 __asm __volatile (".word 0x8143e005" : : : "memory"); \
343 else \
344 __asm __volatile ("" : : : "memory"); \
345 } while (0)
346
347#define atomic_write_barrier() \
348 do { \
349 if (__atomic_is_v9) \
ff9dbdc0
TR
350 /* membar #LoadStore | #StoreStore */ \
351 __asm __volatile (".word 0x8143e00c" : : : "memory"); \
cfa1f3e8
DM
352 else \
353 __asm __volatile ("" : : : "memory"); \
354 } while (0)
355
b01fe5f7
UD
356#endif
357
cfa1f3e8
DM
358#include <sysdep.h>
359
de071d19 360#endif /* atomic-machine.h */