]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/unix/sysv/linux/sh/bits/atomic.h
63188b9aa034cab0c5510aa497ed39ec63289354
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / sh / bits / atomic.h
1 /* Atomic operations used inside libc. Linux/SH version.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <stdint.h>
20
21
22 typedef int8_t atomic8_t;
23 typedef uint8_t uatomic8_t;
24 typedef int_fast8_t atomic_fast8_t;
25 typedef uint_fast8_t uatomic_fast8_t;
26
27 typedef int16_t atomic16_t;
28 typedef uint16_t uatomic16_t;
29 typedef int_fast16_t atomic_fast16_t;
30 typedef uint_fast16_t uatomic_fast16_t;
31
32 typedef int32_t atomic32_t;
33 typedef uint32_t uatomic32_t;
34 typedef int_fast32_t atomic_fast32_t;
35 typedef uint_fast32_t uatomic_fast32_t;
36
37 typedef int64_t atomic64_t;
38 typedef uint64_t uatomic64_t;
39 typedef int_fast64_t atomic_fast64_t;
40 typedef uint_fast64_t uatomic_fast64_t;
41
42 typedef intptr_t atomicptr_t;
43 typedef uintptr_t uatomicptr_t;
44 typedef intmax_t atomic_max_t;
45 typedef uintmax_t uatomic_max_t;
46
47 /* SH kernel has implemented a gUSA ("g" User Space Atomicity) support
48 for the user space atomicity. The atomicity macros use this scheme.
49
50 Reference:
51 Niibe Yutaka, "gUSA: Simple and Efficient User Space Atomicity
52 Emulation with Little Kernel Modification", Linux Conference 2002,
53 Japan. http://lc.linux.or.jp/lc2002/papers/niibe0919h.pdf (in
54 Japanese).
55
56 B.N. Bershad, D. Redell, and J. Ellis, "Fast Mutual Exclusion for
57 Uniprocessors", Proceedings of the Fifth Architectural Support for
58 Programming Languages and Operating Systems (ASPLOS), pp. 223-233,
59 October 1992. http://www.cs.washington.edu/homes/bershad/Papers/Rcs.ps
60
61 SuperH ABI:
62 r15: -(size of atomic instruction sequence) < 0
63 r0: end point
64 r1: saved stack pointer
65 */
66
67 #if __GNUC_PREREQ (4, 7)
68 # define rNOSP "u"
69 #else
70 # define rNOSP "r"
71 #endif
72
73 #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
74 ({ __typeof (*(mem)) __result; \
75 __asm __volatile ("\
76 mova 1f,r0\n\
77 .align 2\n\
78 mov r15,r1\n\
79 mov #(0f-1f),r15\n\
80 0: mov.b @%1,%0\n\
81 cmp/eq %0,%3\n\
82 bf 1f\n\
83 mov.b %2,@%1\n\
84 1: mov r1,r15"\
85 : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \
86 : "r0", "r1", "t", "memory"); \
87 __result; })
88
89 #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
90 ({ __typeof (*(mem)) __result; \
91 __asm __volatile ("\
92 mova 1f,r0\n\
93 mov r15,r1\n\
94 .align 2\n\
95 mov #(0f-1f),r15\n\
96 mov #-8,r15\n\
97 0: mov.w @%1,%0\n\
98 cmp/eq %0,%3\n\
99 bf 1f\n\
100 mov.w %2,@%1\n\
101 1: mov r1,r15"\
102 : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \
103 : "r0", "r1", "t", "memory"); \
104 __result; })
105
106 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
107 ({ __typeof (*(mem)) __result; \
108 __asm __volatile ("\
109 mova 1f,r0\n\
110 .align 2\n\
111 mov r15,r1\n\
112 mov #(0f-1f),r15\n\
113 0: mov.l @%1,%0\n\
114 cmp/eq %0,%3\n\
115 bf 1f\n\
116 mov.l %2,@%1\n\
117 1: mov r1,r15"\
118 : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \
119 : "r0", "r1", "t", "memory"); \
120 __result; })
121
122 /* XXX We do not really need 64-bit compare-and-exchange. At least
123 not in the moment. Using it would mean causing portability
124 problems since not many other 32-bit architectures have support for
125 such an operation. So don't define any code for now. */
126
127 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
128 (abort (), (__typeof (*mem)) 0)
129
130 #define atomic_exchange_and_add(mem, value) \
131 ({ __typeof (*(mem)) __result, __tmp, __value = (value); \
132 if (sizeof (*(mem)) == 1) \
133 __asm __volatile ("\
134 mova 1f,r0\n\
135 .align 2\n\
136 mov r15,r1\n\
137 mov #(0f-1f),r15\n\
138 0: mov.b @%2,%0\n\
139 mov %1,r2\n\
140 add %0,r2\n\
141 mov.b r2,@%2\n\
142 1: mov r1,r15"\
143 : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
144 : "r0", "r1", "r2", "memory"); \
145 else if (sizeof (*(mem)) == 2) \
146 __asm __volatile ("\
147 mova 1f,r0\n\
148 .align 2\n\
149 mov r15,r1\n\
150 mov #(0f-1f),r15\n\
151 0: mov.w @%2,%0\n\
152 mov %1,r2\n\
153 add %0,r2\n\
154 mov.w r2,@%2\n\
155 1: mov r1,r15"\
156 : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
157 : "r0", "r1", "r2", "memory"); \
158 else if (sizeof (*(mem)) == 4) \
159 __asm __volatile ("\
160 mova 1f,r0\n\
161 .align 2\n\
162 mov r15,r1\n\
163 mov #(0f-1f),r15\n\
164 0: mov.l @%2,%0\n\
165 mov %1,r2\n\
166 add %0,r2\n\
167 mov.l r2,@%2\n\
168 1: mov r1,r15"\
169 : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
170 : "r0", "r1", "r2", "memory"); \
171 else \
172 { \
173 __typeof (mem) memp = (mem); \
174 do \
175 __result = *memp; \
176 while (__arch_compare_and_exchange_val_64_acq \
177 (memp, __result + __value, __result) == __result); \
178 (void) __value; \
179 } \
180 __result; })
181
182 #define atomic_add(mem, value) \
183 (void) ({ __typeof (*(mem)) __tmp, __value = (value); \
184 if (sizeof (*(mem)) == 1) \
185 __asm __volatile ("\
186 mova 1f,r0\n\
187 mov r15,r1\n\
188 .align 2\n\
189 mov #(0f-1f),r15\n\
190 0: mov.b @%1,r2\n\
191 add %0,r2\n\
192 mov.b r2,@%1\n\
193 1: mov r1,r15"\
194 : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \
195 : "r0", "r1", "r2", "memory"); \
196 else if (sizeof (*(mem)) == 2) \
197 __asm __volatile ("\
198 mova 1f,r0\n\
199 mov r15,r1\n\
200 .align 2\n\
201 mov #(0f-1f),r15\n\
202 0: mov.w @%1,r2\n\
203 add %0,r2\n\
204 mov.w r2,@%1\n\
205 1: mov r1,r15"\
206 : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \
207 : "r0", "r1", "r2", "memory"); \
208 else if (sizeof (*(mem)) == 4) \
209 __asm __volatile ("\
210 mova 1f,r0\n\
211 mov r15,r1\n\
212 .align 2\n\
213 mov #(0f-1f),r15\n\
214 0: mov.l @%1,r2\n\
215 add %0,r2\n\
216 mov.l r2,@%1\n\
217 1: mov r1,r15"\
218 : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \
219 : "r0", "r1", "r2", "memory"); \
220 else \
221 { \
222 __typeof (*(mem)) oldval; \
223 __typeof (mem) memp = (mem); \
224 do \
225 oldval = *memp; \
226 while (__arch_compare_and_exchange_val_64_acq \
227 (memp, oldval + __value, oldval) == oldval); \
228 (void) __value; \
229 } \
230 })
231
232 #define atomic_add_negative(mem, value) \
233 ({ unsigned char __result; \
234 __typeof (*(mem)) __tmp, __value = (value); \
235 if (sizeof (*(mem)) == 1) \
236 __asm __volatile ("\
237 mova 1f,r0\n\
238 mov r15,r1\n\
239 .align 2\n\
240 mov #(0f-1f),r15\n\
241 0: mov.b @%2,r2\n\
242 add %1,r2\n\
243 mov.b r2,@%2\n\
244 1: mov r1,r15\n\
245 shal r2\n\
246 movt %0"\
247 : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
248 : "r0", "r1", "r2", "t", "memory"); \
249 else if (sizeof (*(mem)) == 2) \
250 __asm __volatile ("\
251 mova 1f,r0\n\
252 mov r15,r1\n\
253 .align 2\n\
254 mov #(0f-1f),r15\n\
255 0: mov.w @%2,r2\n\
256 add %1,r2\n\
257 mov.w r2,@%2\n\
258 1: mov r1,r15\n\
259 shal r2\n\
260 movt %0"\
261 : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
262 : "r0", "r1", "r2", "t", "memory"); \
263 else if (sizeof (*(mem)) == 4) \
264 __asm __volatile ("\
265 mova 1f,r0\n\
266 mov r15,r1\n\
267 .align 2\n\
268 mov #(0f-1f),r15\n\
269 0: mov.l @%2,r2\n\
270 add %1,r2\n\
271 mov.l r2,@%2\n\
272 1: mov r1,r15\n\
273 shal r2\n\
274 movt %0"\
275 : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
276 : "r0", "r1", "r2", "t", "memory"); \
277 else \
278 abort (); \
279 __result; })
280
281 #define atomic_add_zero(mem, value) \
282 ({ unsigned char __result; \
283 __typeof (*(mem)) __tmp, __value = (value); \
284 if (sizeof (*(mem)) == 1) \
285 __asm __volatile ("\
286 mova 1f,r0\n\
287 mov r15,r1\n\
288 .align 2\n\
289 mov #(0f-1f),r15\n\
290 0: mov.b @%2,r2\n\
291 add %1,r2\n\
292 mov.b r2,@%2\n\
293 1: mov r1,r15\n\
294 tst r2,r2\n\
295 movt %0"\
296 : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
297 : "r0", "r1", "r2", "t", "memory"); \
298 else if (sizeof (*(mem)) == 2) \
299 __asm __volatile ("\
300 mova 1f,r0\n\
301 mov r15,r1\n\
302 .align 2\n\
303 mov #(0f-1f),r15\n\
304 0: mov.w @%2,r2\n\
305 add %1,r2\n\
306 mov.w r2,@%2\n\
307 1: mov r1,r15\n\
308 tst r2,r2\n\
309 movt %0"\
310 : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
311 : "r0", "r1", "r2", "t", "memory"); \
312 else if (sizeof (*(mem)) == 4) \
313 __asm __volatile ("\
314 mova 1f,r0\n\
315 mov r15,r1\n\
316 .align 2\n\
317 mov #(0f-1f),r15\n\
318 0: mov.l @%2,r2\n\
319 add %1,r2\n\
320 mov.l r2,@%2\n\
321 1: mov r1,r15\n\
322 tst r2,r2\n\
323 movt %0"\
324 : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
325 : "r0", "r1", "r2", "t", "memory"); \
326 else \
327 abort (); \
328 __result; })
329
330 #define atomic_increment_and_test(mem) atomic_add_zero((mem), 1)
331 #define atomic_decrement_and_test(mem) atomic_add_zero((mem), -1)
332
333 #define atomic_bit_set(mem, bit) \
334 (void) ({ unsigned int __mask = 1 << (bit); \
335 if (sizeof (*(mem)) == 1) \
336 __asm __volatile ("\
337 mova 1f,r0\n\
338 mov r15,r1\n\
339 .align 2\n\
340 mov #(0f-1f),r15\n\
341 0: mov.b @%0,r2\n\
342 or %1,r2\n\
343 mov.b r2,@%0\n\
344 1: mov r1,r15"\
345 : : rNOSP (mem), rNOSP (__mask) \
346 : "r0", "r1", "r2", "memory"); \
347 else if (sizeof (*(mem)) == 2) \
348 __asm __volatile ("\
349 mova 1f,r0\n\
350 mov r15,r1\n\
351 .align 2\n\
352 mov #(0f-1f),r15\n\
353 0: mov.w @%0,r2\n\
354 or %1,r2\n\
355 mov.w r2,@%0\n\
356 1: mov r1,r15"\
357 : : rNOSP (mem), rNOSP (__mask) \
358 : "r0", "r1", "r2", "memory"); \
359 else if (sizeof (*(mem)) == 4) \
360 __asm __volatile ("\
361 mova 1f,r0\n\
362 mov r15,r1\n\
363 .align 2\n\
364 mov #(0f-1f),r15\n\
365 0: mov.l @%0,r2\n\
366 or %1,r2\n\
367 mov.l r2,@%0\n\
368 1: mov r1,r15"\
369 : : rNOSP (mem), rNOSP (__mask) \
370 : "r0", "r1", "r2", "memory"); \
371 else \
372 abort (); \
373 })
374
375 #define atomic_bit_test_set(mem, bit) \
376 ({ unsigned int __mask = 1 << (bit); \
377 unsigned int __result = __mask; \
378 if (sizeof (*(mem)) == 1) \
379 __asm __volatile ("\
380 mova 1f,r0\n\
381 .align 2\n\
382 mov r15,r1\n\
383 mov #(0f-1f),r15\n\
384 0: mov.b @%2,r2\n\
385 mov r2,r3\n\
386 or %1,r2\n\
387 mov.b r2,@%2\n\
388 1: mov r1,r15\n\
389 and r3,%0"\
390 : "=&r" (__result), "=&r" (__mask) \
391 : rNOSP (mem), "0" (__result), "1" (__mask) \
392 : "r0", "r1", "r2", "r3", "memory"); \
393 else if (sizeof (*(mem)) == 2) \
394 __asm __volatile ("\
395 mova 1f,r0\n\
396 .align 2\n\
397 mov r15,r1\n\
398 mov #(0f-1f),r15\n\
399 0: mov.w @%2,r2\n\
400 mov r2,r3\n\
401 or %1,r2\n\
402 mov.w %1,@%2\n\
403 1: mov r1,r15\n\
404 and r3,%0"\
405 : "=&r" (__result), "=&r" (__mask) \
406 : rNOSP (mem), "0" (__result), "1" (__mask) \
407 : "r0", "r1", "r2", "r3", "memory"); \
408 else if (sizeof (*(mem)) == 4) \
409 __asm __volatile ("\
410 mova 1f,r0\n\
411 .align 2\n\
412 mov r15,r1\n\
413 mov #(0f-1f),r15\n\
414 0: mov.l @%2,r2\n\
415 mov r2,r3\n\
416 or r2,%1\n\
417 mov.l %1,@%2\n\
418 1: mov r1,r15\n\
419 and r3,%0"\
420 : "=&r" (__result), "=&r" (__mask) \
421 : rNOSP (mem), "0" (__result), "1" (__mask) \
422 : "r0", "r1", "r2", "r3", "memory"); \
423 else \
424 abort (); \
425 __result; })