]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/unix/sysv/linux/sh/atomic-machine.h
Update copyright dates with scripts/update-copyrights
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / sh / atomic-machine.h
CommitLineData
e87d07a0 1/* Atomic operations used inside libc. Linux/SH version.
dff8da6b 2 Copyright (C) 2003-2024 Free Software Foundation, Inc.
a54e8d33
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
a54e8d33 18
1ea339b6
TR
19#define __HAVE_64B_ATOMICS 0
20#define USE_ATOMIC_COMPILER_BUILTINS 0
21
12d2dd70
SL
22/* XXX Is this actually correct? */
23#define ATOMIC_EXCHANGE_USES_CAS 1
24
e87d07a0
RM
25/* SH kernel has implemented a gUSA ("g" User Space Atomicity) support
26 for the user space atomicity. The atomicity macros use this scheme.
27
28 Reference:
29 Niibe Yutaka, "gUSA: Simple and Efficient User Space Atomicity
30 Emulation with Little Kernel Modification", Linux Conference 2002,
31 Japan. http://lc.linux.or.jp/lc2002/papers/niibe0919h.pdf (in
32 Japanese).
33
34 B.N. Bershad, D. Redell, and J. Ellis, "Fast Mutual Exclusion for
35 Uniprocessors", Proceedings of the Fifth Architectural Support for
36 Programming Languages and Operating Systems (ASPLOS), pp. 223-233,
37 October 1992. http://www.cs.washington.edu/homes/bershad/Papers/Rcs.ps
38
39 SuperH ABI:
40 r15: -(size of atomic instruction sequence) < 0
41 r0: end point
42 r1: saved stack pointer
43*/
a54e8d33 44
dd731d53 45#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
e87d07a0 46 ({ __typeof (*(mem)) __result; \
a54e8d33 47 __asm __volatile ("\
a54e8d33 48 mova 1f,r0\n\
d230f50a 49 .align 2\n\
a54e8d33 50 mov r15,r1\n\
d230f50a 51 mov #(0f-1f),r15\n\
dd731d53
UD
52 0: mov.b @%1,%0\n\
53 cmp/eq %0,%3\n\
a54e8d33
UD
54 bf 1f\n\
55 mov.b %2,@%1\n\
dd731d53 56 1: mov r1,r15"\
9f9f2724 57 : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \
dd731d53 58 : "r0", "r1", "t", "memory"); \
a54e8d33
UD
59 __result; })
60
dd731d53 61#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
e87d07a0 62 ({ __typeof (*(mem)) __result; \
a54e8d33 63 __asm __volatile ("\
a54e8d33 64 mova 1f,r0\n\
a54e8d33 65 mov r15,r1\n\
d230f50a
KK
66 .align 2\n\
67 mov #(0f-1f),r15\n\
a54e8d33 68 mov #-8,r15\n\
dd731d53
UD
69 0: mov.w @%1,%0\n\
70 cmp/eq %0,%3\n\
a54e8d33
UD
71 bf 1f\n\
72 mov.w %2,@%1\n\
dd731d53 73 1: mov r1,r15"\
9f9f2724 74 : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \
dd731d53 75 : "r0", "r1", "t", "memory"); \
a54e8d33
UD
76 __result; })
77
dd731d53 78#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
e87d07a0 79 ({ __typeof (*(mem)) __result; \
a54e8d33 80 __asm __volatile ("\
a54e8d33 81 mova 1f,r0\n\
d230f50a 82 .align 2\n\
a54e8d33 83 mov r15,r1\n\
d230f50a 84 mov #(0f-1f),r15\n\
dd731d53
UD
85 0: mov.l @%1,%0\n\
86 cmp/eq %0,%3\n\
a54e8d33
UD
87 bf 1f\n\
88 mov.l %2,@%1\n\
dd731d53 89 1: mov r1,r15"\
9f9f2724 90 : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \
dd731d53 91 : "r0", "r1", "t", "memory"); \
a54e8d33
UD
92 __result; })
93
94/* XXX We do not really need 64-bit compare-and-exchange. At least
95 not in the moment. Using it would mean causing portability
96 problems since not many other 32-bit architectures have support for
97 such an operation. So don't define any code for now. */
98
dd731d53 99# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
4661a153 100 (abort (), (__typeof (*mem)) 0)
a54e8d33
UD
101
102#define atomic_exchange_and_add(mem, value) \
e87d07a0
RM
103 ({ __typeof (*(mem)) __result, __tmp, __value = (value); \
104 if (sizeof (*(mem)) == 1) \
a54e8d33 105 __asm __volatile ("\
a54e8d33 106 mova 1f,r0\n\
d230f50a 107 .align 2\n\
a54e8d33 108 mov r15,r1\n\
d230f50a 109 mov #(0f-1f),r15\n\
a54e8d33 110 0: mov.b @%2,%0\n\
d230f50a
KK
111 mov %1,r2\n\
112 add %0,r2\n\
113 mov.b r2,@%2\n\
a54e8d33 114 1: mov r1,r15"\
9f9f2724 115 : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
d230f50a 116 : "r0", "r1", "r2", "memory"); \
e87d07a0 117 else if (sizeof (*(mem)) == 2) \
a54e8d33 118 __asm __volatile ("\
a54e8d33 119 mova 1f,r0\n\
d230f50a 120 .align 2\n\
a54e8d33 121 mov r15,r1\n\
d230f50a 122 mov #(0f-1f),r15\n\
a54e8d33 123 0: mov.w @%2,%0\n\
d230f50a
KK
124 mov %1,r2\n\
125 add %0,r2\n\
126 mov.w r2,@%2\n\
a54e8d33 127 1: mov r1,r15"\
9f9f2724 128 : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
d230f50a 129 : "r0", "r1", "r2", "memory"); \
e87d07a0 130 else if (sizeof (*(mem)) == 4) \
a54e8d33 131 __asm __volatile ("\
a54e8d33 132 mova 1f,r0\n\
d230f50a 133 .align 2\n\
a54e8d33 134 mov r15,r1\n\
d230f50a 135 mov #(0f-1f),r15\n\
a54e8d33 136 0: mov.l @%2,%0\n\
d230f50a
KK
137 mov %1,r2\n\
138 add %0,r2\n\
139 mov.l r2,@%2\n\
a54e8d33 140 1: mov r1,r15"\
9f9f2724 141 : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
d230f50a 142 : "r0", "r1", "r2", "memory"); \
a54e8d33
UD
143 else \
144 { \
a54e8d33
UD
145 __typeof (mem) memp = (mem); \
146 do \
dd731d53
UD
147 __result = *memp; \
148 while (__arch_compare_and_exchange_val_64_acq \
e87d07a0
RM
149 (memp, __result + __value, __result) == __result); \
150 (void) __value; \
a54e8d33
UD
151 } \
152 __result; })
153
154#define atomic_add(mem, value) \
e87d07a0
RM
155 (void) ({ __typeof (*(mem)) __tmp, __value = (value); \
156 if (sizeof (*(mem)) == 1) \
a54e8d33 157 __asm __volatile ("\
a54e8d33
UD
158 mova 1f,r0\n\
159 mov r15,r1\n\
d230f50a
KK
160 .align 2\n\
161 mov #(0f-1f),r15\n\
a54e8d33 162 0: mov.b @%1,r2\n\
d230f50a
KK
163 add %0,r2\n\
164 mov.b r2,@%1\n\
a54e8d33 165 1: mov r1,r15"\
9f9f2724 166 : "=&r" (__tmp) : "u" (mem), "0" (__value) \
a54e8d33 167 : "r0", "r1", "r2", "memory"); \
e87d07a0 168 else if (sizeof (*(mem)) == 2) \
a54e8d33 169 __asm __volatile ("\
a54e8d33
UD
170 mova 1f,r0\n\
171 mov r15,r1\n\
d230f50a
KK
172 .align 2\n\
173 mov #(0f-1f),r15\n\
a54e8d33 174 0: mov.w @%1,r2\n\
d230f50a
KK
175 add %0,r2\n\
176 mov.w r2,@%1\n\
a54e8d33 177 1: mov r1,r15"\
9f9f2724 178 : "=&r" (__tmp) : "u" (mem), "0" (__value) \
a54e8d33 179 : "r0", "r1", "r2", "memory"); \
e87d07a0 180 else if (sizeof (*(mem)) == 4) \
a54e8d33 181 __asm __volatile ("\
a54e8d33
UD
182 mova 1f,r0\n\
183 mov r15,r1\n\
d230f50a
KK
184 .align 2\n\
185 mov #(0f-1f),r15\n\
a54e8d33 186 0: mov.l @%1,r2\n\
d230f50a
KK
187 add %0,r2\n\
188 mov.l r2,@%1\n\
a54e8d33 189 1: mov r1,r15"\
9f9f2724 190 : "=&r" (__tmp) : "u" (mem), "0" (__value) \
a54e8d33
UD
191 : "r0", "r1", "r2", "memory"); \
192 else \
193 { \
e87d07a0 194 __typeof (*(mem)) oldval; \
a54e8d33
UD
195 __typeof (mem) memp = (mem); \
196 do \
197 oldval = *memp; \
dd731d53 198 while (__arch_compare_and_exchange_val_64_acq \
e87d07a0
RM
199 (memp, oldval + __value, oldval) == oldval); \
200 (void) __value; \
a54e8d33
UD
201 } \
202 })
203
204#define atomic_add_negative(mem, value) \
205 ({ unsigned char __result; \
e87d07a0
RM
206 __typeof (*(mem)) __tmp, __value = (value); \
207 if (sizeof (*(mem)) == 1) \
a54e8d33 208 __asm __volatile ("\
a54e8d33
UD
209 mova 1f,r0\n\
210 mov r15,r1\n\
d230f50a
KK
211 .align 2\n\
212 mov #(0f-1f),r15\n\
a54e8d33 213 0: mov.b @%2,r2\n\
d230f50a
KK
214 add %1,r2\n\
215 mov.b r2,@%2\n\
a54e8d33 216 1: mov r1,r15\n\
d230f50a 217 shal r2\n\
a54e8d33 218 movt %0"\
9f9f2724 219 : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
a54e8d33 220 : "r0", "r1", "r2", "t", "memory"); \
e87d07a0 221 else if (sizeof (*(mem)) == 2) \
a54e8d33 222 __asm __volatile ("\
a54e8d33
UD
223 mova 1f,r0\n\
224 mov r15,r1\n\
d230f50a
KK
225 .align 2\n\
226 mov #(0f-1f),r15\n\
a54e8d33 227 0: mov.w @%2,r2\n\
d230f50a
KK
228 add %1,r2\n\
229 mov.w r2,@%2\n\
a54e8d33 230 1: mov r1,r15\n\
d230f50a 231 shal r2\n\
a54e8d33 232 movt %0"\
9f9f2724 233 : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
a54e8d33 234 : "r0", "r1", "r2", "t", "memory"); \
e87d07a0 235 else if (sizeof (*(mem)) == 4) \
a54e8d33 236 __asm __volatile ("\
a54e8d33
UD
237 mova 1f,r0\n\
238 mov r15,r1\n\
d230f50a
KK
239 .align 2\n\
240 mov #(0f-1f),r15\n\
a54e8d33 241 0: mov.l @%2,r2\n\
d230f50a
KK
242 add %1,r2\n\
243 mov.l r2,@%2\n\
a54e8d33 244 1: mov r1,r15\n\
d230f50a 245 shal r2\n\
a54e8d33 246 movt %0"\
9f9f2724 247 : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
a54e8d33
UD
248 : "r0", "r1", "r2", "t", "memory"); \
249 else \
250 abort (); \
251 __result; })
252
253#define atomic_add_zero(mem, value) \
254 ({ unsigned char __result; \
e87d07a0
RM
255 __typeof (*(mem)) __tmp, __value = (value); \
256 if (sizeof (*(mem)) == 1) \
a54e8d33 257 __asm __volatile ("\
a54e8d33
UD
258 mova 1f,r0\n\
259 mov r15,r1\n\
d230f50a
KK
260 .align 2\n\
261 mov #(0f-1f),r15\n\
a54e8d33 262 0: mov.b @%2,r2\n\
d230f50a
KK
263 add %1,r2\n\
264 mov.b r2,@%2\n\
a54e8d33 265 1: mov r1,r15\n\
d230f50a 266 tst r2,r2\n\
a54e8d33 267 movt %0"\
9f9f2724 268 : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
a54e8d33 269 : "r0", "r1", "r2", "t", "memory"); \
e87d07a0 270 else if (sizeof (*(mem)) == 2) \
a54e8d33 271 __asm __volatile ("\
a54e8d33
UD
272 mova 1f,r0\n\
273 mov r15,r1\n\
d230f50a
KK
274 .align 2\n\
275 mov #(0f-1f),r15\n\
a54e8d33 276 0: mov.w @%2,r2\n\
d230f50a
KK
277 add %1,r2\n\
278 mov.w r2,@%2\n\
a54e8d33 279 1: mov r1,r15\n\
d230f50a 280 tst r2,r2\n\
a54e8d33 281 movt %0"\
9f9f2724 282 : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
a54e8d33 283 : "r0", "r1", "r2", "t", "memory"); \
e87d07a0 284 else if (sizeof (*(mem)) == 4) \
a54e8d33 285 __asm __volatile ("\
a54e8d33
UD
286 mova 1f,r0\n\
287 mov r15,r1\n\
d230f50a
KK
288 .align 2\n\
289 mov #(0f-1f),r15\n\
a54e8d33 290 0: mov.l @%2,r2\n\
d230f50a
KK
291 add %1,r2\n\
292 mov.l r2,@%2\n\
a54e8d33 293 1: mov r1,r15\n\
d230f50a 294 tst r2,r2\n\
a54e8d33 295 movt %0"\
9f9f2724 296 : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
a54e8d33
UD
297 : "r0", "r1", "r2", "t", "memory"); \
298 else \
299 abort (); \
300 __result; })
301
302#define atomic_increment_and_test(mem) atomic_add_zero((mem), 1)
303#define atomic_decrement_and_test(mem) atomic_add_zero((mem), -1)
304
305#define atomic_bit_set(mem, bit) \
306 (void) ({ unsigned int __mask = 1 << (bit); \
e87d07a0 307 if (sizeof (*(mem)) == 1) \
a54e8d33 308 __asm __volatile ("\
a54e8d33
UD
309 mova 1f,r0\n\
310 mov r15,r1\n\
d230f50a
KK
311 .align 2\n\
312 mov #(0f-1f),r15\n\
a54e8d33
UD
313 0: mov.b @%0,r2\n\
314 or %1,r2\n\
315 mov.b r2,@%0\n\
316 1: mov r1,r15"\
9f9f2724 317 : : "u" (mem), "u" (__mask) \
a54e8d33 318 : "r0", "r1", "r2", "memory"); \
e87d07a0 319 else if (sizeof (*(mem)) == 2) \
a54e8d33 320 __asm __volatile ("\
a54e8d33
UD
321 mova 1f,r0\n\
322 mov r15,r1\n\
d230f50a
KK
323 .align 2\n\
324 mov #(0f-1f),r15\n\
a54e8d33
UD
325 0: mov.w @%0,r2\n\
326 or %1,r2\n\
327 mov.w r2,@%0\n\
328 1: mov r1,r15"\
9f9f2724 329 : : "u" (mem), "u" (__mask) \
a54e8d33 330 : "r0", "r1", "r2", "memory"); \
e87d07a0 331 else if (sizeof (*(mem)) == 4) \
a54e8d33 332 __asm __volatile ("\
a54e8d33
UD
333 mova 1f,r0\n\
334 mov r15,r1\n\
d230f50a
KK
335 .align 2\n\
336 mov #(0f-1f),r15\n\
a54e8d33
UD
337 0: mov.l @%0,r2\n\
338 or %1,r2\n\
339 mov.l r2,@%0\n\
340 1: mov r1,r15"\
9f9f2724 341 : : "u" (mem), "u" (__mask) \
a54e8d33
UD
342 : "r0", "r1", "r2", "memory"); \
343 else \
344 abort (); \
345 })
346
347#define atomic_bit_test_set(mem, bit) \
348 ({ unsigned int __mask = 1 << (bit); \
349 unsigned int __result = __mask; \
e87d07a0 350 if (sizeof (*(mem)) == 1) \
a54e8d33 351 __asm __volatile ("\
a54e8d33 352 mova 1f,r0\n\
d230f50a 353 .align 2\n\
a54e8d33 354 mov r15,r1\n\
d230f50a 355 mov #(0f-1f),r15\n\
a54e8d33 356 0: mov.b @%2,r2\n\
d230f50a
KK
357 mov r2,r3\n\
358 or %1,r2\n\
359 mov.b r2,@%2\n\
360 1: mov r1,r15\n\
361 and r3,%0"\
a54e8d33 362 : "=&r" (__result), "=&r" (__mask) \
9f9f2724 363 : "u" (mem), "0" (__result), "1" (__mask) \
d230f50a 364 : "r0", "r1", "r2", "r3", "memory"); \
e87d07a0 365 else if (sizeof (*(mem)) == 2) \
a54e8d33 366 __asm __volatile ("\
a54e8d33 367 mova 1f,r0\n\
d230f50a 368 .align 2\n\
a54e8d33 369 mov r15,r1\n\
d230f50a 370 mov #(0f-1f),r15\n\
a54e8d33 371 0: mov.w @%2,r2\n\
d230f50a
KK
372 mov r2,r3\n\
373 or %1,r2\n\
a54e8d33 374 mov.w %1,@%2\n\
d230f50a
KK
375 1: mov r1,r15\n\
376 and r3,%0"\
a54e8d33 377 : "=&r" (__result), "=&r" (__mask) \
9f9f2724 378 : "u" (mem), "0" (__result), "1" (__mask) \
d230f50a 379 : "r0", "r1", "r2", "r3", "memory"); \
e87d07a0 380 else if (sizeof (*(mem)) == 4) \
a54e8d33 381 __asm __volatile ("\
a54e8d33 382 mova 1f,r0\n\
d230f50a 383 .align 2\n\
a54e8d33 384 mov r15,r1\n\
d230f50a 385 mov #(0f-1f),r15\n\
a54e8d33 386 0: mov.l @%2,r2\n\
d230f50a 387 mov r2,r3\n\
a54e8d33 388 or r2,%1\n\
a54e8d33 389 mov.l %1,@%2\n\
d230f50a
KK
390 1: mov r1,r15\n\
391 and r3,%0"\
a54e8d33 392 : "=&r" (__result), "=&r" (__mask) \
9f9f2724 393 : "u" (mem), "0" (__result), "1" (__mask) \
d230f50a 394 : "r0", "r1", "r2", "r3", "memory"); \
a54e8d33
UD
395 else \
396 abort (); \
397 __result; })