]> git.ipfire.org Git - thirdparty/glibc.git/blob - ports/sysdeps/mips/bits/atomic.h
Add #include <stdint.h> for uint[32|64]_t usage (except installed headers).
[thirdparty/glibc.git] / ports / sysdeps / mips / bits / atomic.h
1 /* Low-level functions for atomic operations. Mips version.
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #ifndef _MIPS_BITS_ATOMIC_H
20 #define _MIPS_BITS_ATOMIC_H 1
21
22 #include <stdint.h>
23 #include <inttypes.h>
24 #include <sgidefs.h>
25
26 typedef int32_t atomic32_t;
27 typedef uint32_t uatomic32_t;
28 typedef int_fast32_t atomic_fast32_t;
29 typedef uint_fast32_t uatomic_fast32_t;
30
31 typedef int64_t atomic64_t;
32 typedef uint64_t uatomic64_t;
33 typedef int_fast64_t atomic_fast64_t;
34 typedef uint_fast64_t uatomic_fast64_t;
35
36 typedef intptr_t atomicptr_t;
37 typedef uintptr_t uatomicptr_t;
38 typedef intmax_t atomic_max_t;
39 typedef uintmax_t uatomic_max_t;
40
41 #if _MIPS_SIM == _ABIO32
42 #define MIPS_PUSH_MIPS2 ".set mips2\n\t"
43 #else
44 #define MIPS_PUSH_MIPS2
45 #endif
46
47 /* See the comments in <sys/asm.h> about the use of the sync instruction. */
48 #ifndef MIPS_SYNC
49 # define MIPS_SYNC sync
50 #endif
51
52 /* Certain revisions of the R10000 Processor need an LL/SC Workaround
53 enabled. Revisions before 3.0 misbehave on atomic operations, and
54 Revs 2.6 and lower deadlock after several seconds due to other errata.
55
56 To quote the R10K Errata:
57 Workaround: The basic idea is to inhibit the four instructions
58 from simultaneously becoming active in R10000. Padding all
59 ll/sc sequences with nops or changing the looping branch in the
60 routines to a branch likely (which is always predicted taken
61 by R10000) will work. The nops should go after the loop, and the
62 number of them should be 28. This number could be decremented for
63 each additional instruction in the ll/sc loop such as the lock
64 modifier(s) between the ll and sc, the looping branch and its
65 delay slot. For typical short routines with one ll/sc loop, any
66 instructions after the loop could also count as a decrement. The
67 nop workaround pollutes the cache more but would be a few cycles
68 faster if all the code is in the cache and the looping branch
69 is predicted not taken. */
70
71
72 #ifdef _MIPS_ARCH_R10000
73 #define R10K_BEQZ_INSN "beqzl"
74 #else
75 #define R10K_BEQZ_INSN "beqz"
76 #endif
77
78 #define MIPS_SYNC_STR_2(X) #X
79 #define MIPS_SYNC_STR_1(X) MIPS_SYNC_STR_2(X)
80 #define MIPS_SYNC_STR MIPS_SYNC_STR_1(MIPS_SYNC)
81
82 #if __GNUC_PREREQ (4, 8) || (defined __mips16 && __GNUC_PREREQ (4, 7))
83 /* The __atomic_* builtins are available in GCC 4.7 and later, but MIPS
84 support for their efficient implementation was added only in GCC 4.8.
85 We still want to use them even with GCC 4.7 for MIPS16 code where we
86 have no assembly alternative available and want to avoid the __sync_*
87 if at all possible. */
88
89 /* Compare and exchange.
90 For all "bool" routines, we return FALSE if exchange succesful. */
91
92 # define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
93 (abort (), 0)
94
95 # define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
96 (abort (), 0)
97
98 # define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
99 ({ \
100 typeof (*mem) __oldval = (oldval); \
101 !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
102 model, __ATOMIC_RELAXED); \
103 })
104
105 # define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
106 (abort (), (typeof(*mem)) 0)
107
108 # define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
109 (abort (), (typeof(*mem)) 0)
110
111 # define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
112 ({ \
113 typeof (*mem) __oldval = (oldval); \
114 __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
115 model, __ATOMIC_RELAXED); \
116 __oldval; \
117 })
118
119 # if _MIPS_SIM == _ABIO32
120 /* We can't do an atomic 64-bit operation in O32. */
121 # define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
122 (abort (), 0)
123 # define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
124 (abort (), (typeof(*mem)) 0)
125 # else
126 # define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
127 __arch_compare_and_exchange_bool_32_int (mem, newval, oldval, model)
128 # define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
129 __arch_compare_and_exchange_val_32_int (mem, newval, oldval, model)
130 # endif
131
132 /* Compare and exchange with "acquire" semantics, ie barrier after. */
133
134 # define atomic_compare_and_exchange_bool_acq(mem, new, old) \
135 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
136 mem, new, old, __ATOMIC_ACQUIRE)
137
138 # define atomic_compare_and_exchange_val_acq(mem, new, old) \
139 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
140 mem, new, old, __ATOMIC_ACQUIRE)
141
142 /* Compare and exchange with "release" semantics, ie barrier before. */
143
144 # define atomic_compare_and_exchange_bool_rel(mem, new, old) \
145 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
146 mem, new, old, __ATOMIC_RELEASE)
147
148 # define atomic_compare_and_exchange_val_rel(mem, new, old) \
149 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
150 mem, new, old, __ATOMIC_RELEASE)
151
152
153 /* Atomic exchange (without compare). */
154
155 # define __arch_exchange_8_int(mem, newval, model) \
156 (abort (), (typeof(*mem)) 0)
157
158 # define __arch_exchange_16_int(mem, newval, model) \
159 (abort (), (typeof(*mem)) 0)
160
161 # define __arch_exchange_32_int(mem, newval, model) \
162 __atomic_exchange_n (mem, newval, model)
163
164 # if _MIPS_SIM == _ABIO32
165 /* We can't do an atomic 64-bit operation in O32. */
166 # define __arch_exchange_64_int(mem, newval, model) \
167 (abort (), (typeof(*mem)) 0)
168 # else
169 # define __arch_exchange_64_int(mem, newval, model) \
170 __atomic_exchange_n (mem, newval, model)
171 # endif
172
173 # define atomic_exchange_acq(mem, value) \
174 __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
175
176 # define atomic_exchange_rel(mem, value) \
177 __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE)
178
179
180 /* Atomically add value and return the previous (unincremented) value. */
181
182 # define __arch_exchange_and_add_8_int(mem, value, model) \
183 (abort (), (typeof(*mem)) 0)
184
185 # define __arch_exchange_and_add_16_int(mem, value, model) \
186 (abort (), (typeof(*mem)) 0)
187
188 # define __arch_exchange_and_add_32_int(mem, value, model) \
189 __atomic_fetch_add (mem, value, model)
190
191 # if _MIPS_SIM == _ABIO32
192 /* We can't do an atomic 64-bit operation in O32. */
193 # define __arch_exchange_and_add_64_int(mem, value, model) \
194 (abort (), (typeof(*mem)) 0)
195 # else
196 # define __arch_exchange_and_add_64_int(mem, value, model) \
197 __atomic_fetch_add (mem, value, model)
198 # endif
199
200 # define atomic_exchange_and_add_acq(mem, value) \
201 __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
202 __ATOMIC_ACQUIRE)
203
204 # define atomic_exchange_and_add_rel(mem, value) \
205 __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
206 __ATOMIC_RELEASE)
207
208 #elif defined __mips16 /* !__GNUC_PREREQ (4, 7) */
209 /* This implementation using __sync* builtins will be removed once glibc
210 requires GCC 4.7 or later to build. */
211
212 # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
213 __sync_val_compare_and_swap ((mem), (oldval), (newval))
214 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
215 (!__sync_bool_compare_and_swap ((mem), (oldval), (newval)))
216
217 # define atomic_exchange_acq(mem, newval) \
218 __sync_lock_test_and_set ((mem), (newval))
219
220 # define atomic_exchange_and_add(mem, val) \
221 __sync_fetch_and_add ((mem), (val))
222
223 # define atomic_bit_test_set(mem, bit) \
224 ({ __typeof (bit) __bit = (bit); \
225 (__sync_fetch_and_or ((mem), 1 << (__bit)) & (1 << (__bit))); })
226
227 # define atomic_and(mem, mask) (void) __sync_fetch_and_and ((mem), (mask))
228 # define atomic_and_val(mem, mask) __sync_fetch_and_and ((mem), (mask))
229
230 # define atomic_or(mem, mask) (void) __sync_fetch_and_or ((mem), (mask))
231 # define atomic_or_val(mem, mask) __sync_fetch_and_or ((mem), (mask))
232
233 #else /* !__mips16 && !__GNUC_PREREQ (4, 8) */
234 /* This implementation using inline assembly will be removed once glibc
235 requires GCC 4.8 or later to build. */
236
237 /* Compare and exchange. For all of the "xxx" routines, we expect a
238 "__prev" and a "__cmp" variable to be provided by the enclosing scope,
239 in which values are returned. */
240
241 # define __arch_compare_and_exchange_xxx_8_int(mem, newval, oldval, rel, acq) \
242 (abort (), __prev = 0, __cmp = 0, (void) __cmp)
243
244 # define __arch_compare_and_exchange_xxx_16_int(mem, newval, oldval, rel, acq) \
245 (abort (), __prev = 0, __cmp = 0, (void) __cmp)
246
247 # define __arch_compare_and_exchange_xxx_32_int(mem, newval, oldval, rel, acq) \
248 __asm__ __volatile__ ( \
249 ".set push\n\t" \
250 MIPS_PUSH_MIPS2 \
251 rel "\n" \
252 "1:\t" \
253 "ll %0,%5\n\t" \
254 "move %1,$0\n\t" \
255 "bne %0,%3,2f\n\t" \
256 "move %1,%4\n\t" \
257 "sc %1,%2\n\t" \
258 R10K_BEQZ_INSN" %1,1b\n" \
259 acq "\n\t" \
260 ".set pop\n" \
261 "2:\n\t" \
262 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
263 : "r" (oldval), "r" (newval), "m" (*mem) \
264 : "memory")
265
266 # if _MIPS_SIM == _ABIO32
267 /* We can't do an atomic 64-bit operation in O32. */
268 # define __arch_compare_and_exchange_xxx_64_int(mem, newval, oldval, rel, acq) \
269 (abort (), __prev = 0, __cmp = 0, (void) __cmp)
270 # else
271 # define __arch_compare_and_exchange_xxx_64_int(mem, newval, oldval, rel, acq) \
272 __asm__ __volatile__ ("\n" \
273 ".set push\n\t" \
274 MIPS_PUSH_MIPS2 \
275 rel "\n" \
276 "1:\t" \
277 "lld %0,%5\n\t" \
278 "move %1,$0\n\t" \
279 "bne %0,%3,2f\n\t" \
280 "move %1,%4\n\t" \
281 "scd %1,%2\n\t" \
282 R10K_BEQZ_INSN" %1,1b\n" \
283 acq "\n\t" \
284 ".set pop\n" \
285 "2:\n\t" \
286 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
287 : "r" (oldval), "r" (newval), "m" (*mem) \
288 : "memory")
289 # endif
290
291 /* For all "bool" routines, we return FALSE if exchange succesful. */
292
293 # define __arch_compare_and_exchange_bool_8_int(mem, new, old, rel, acq) \
294 ({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \
295 __arch_compare_and_exchange_xxx_8_int(mem, new, old, rel, acq); \
296 !__cmp; })
297
298 # define __arch_compare_and_exchange_bool_16_int(mem, new, old, rel, acq) \
299 ({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \
300 __arch_compare_and_exchange_xxx_16_int(mem, new, old, rel, acq); \
301 !__cmp; })
302
303 # define __arch_compare_and_exchange_bool_32_int(mem, new, old, rel, acq) \
304 ({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \
305 __arch_compare_and_exchange_xxx_32_int(mem, new, old, rel, acq); \
306 !__cmp; })
307
308 # define __arch_compare_and_exchange_bool_64_int(mem, new, old, rel, acq) \
309 ({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \
310 __arch_compare_and_exchange_xxx_64_int(mem, new, old, rel, acq); \
311 !__cmp; })
312
313 /* For all "val" routines, return the old value whether exchange
314 successful or not. */
315
316 # define __arch_compare_and_exchange_val_8_int(mem, new, old, rel, acq) \
317 ({ typeof (*mem) __prev; int __cmp; \
318 __arch_compare_and_exchange_xxx_8_int(mem, new, old, rel, acq); \
319 (typeof (*mem))__prev; })
320
321 # define __arch_compare_and_exchange_val_16_int(mem, new, old, rel, acq) \
322 ({ typeof (*mem) __prev; int __cmp; \
323 __arch_compare_and_exchange_xxx_16_int(mem, new, old, rel, acq); \
324 (typeof (*mem))__prev; })
325
326 # define __arch_compare_and_exchange_val_32_int(mem, new, old, rel, acq) \
327 ({ typeof (*mem) __prev; int __cmp; \
328 __arch_compare_and_exchange_xxx_32_int(mem, new, old, rel, acq); \
329 (typeof (*mem))__prev; })
330
331 # define __arch_compare_and_exchange_val_64_int(mem, new, old, rel, acq) \
332 ({ typeof (*mem) __prev; int __cmp; \
333 __arch_compare_and_exchange_xxx_64_int(mem, new, old, rel, acq); \
334 (typeof (*mem))__prev; })
335
336 /* Compare and exchange with "acquire" semantics, ie barrier after. */
337
338 # define atomic_compare_and_exchange_bool_acq(mem, new, old) \
339 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
340 mem, new, old, "", MIPS_SYNC_STR)
341
342 # define atomic_compare_and_exchange_val_acq(mem, new, old) \
343 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
344 mem, new, old, "", MIPS_SYNC_STR)
345
346 /* Compare and exchange with "release" semantics, ie barrier before. */
347
348 # define atomic_compare_and_exchange_bool_rel(mem, new, old) \
349 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
350 mem, new, old, MIPS_SYNC_STR, "")
351
352 # define atomic_compare_and_exchange_val_rel(mem, new, old) \
353 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
354 mem, new, old, MIPS_SYNC_STR, "")
355
356
357
358 /* Atomic exchange (without compare). */
359
360 # define __arch_exchange_xxx_8_int(mem, newval, rel, acq) \
361 (abort (), (typeof(*mem)) 0)
362
363 # define __arch_exchange_xxx_16_int(mem, newval, rel, acq) \
364 (abort (), (typeof(*mem)) 0)
365
366 # define __arch_exchange_xxx_32_int(mem, newval, rel, acq) \
367 ({ typeof (*mem) __prev; int __cmp; \
368 __asm__ __volatile__ ("\n" \
369 ".set push\n\t" \
370 MIPS_PUSH_MIPS2 \
371 rel "\n" \
372 "1:\t" \
373 "ll %0,%4\n\t" \
374 "move %1,%3\n\t" \
375 "sc %1,%2\n\t" \
376 R10K_BEQZ_INSN" %1,1b\n" \
377 acq "\n\t" \
378 ".set pop\n" \
379 "2:\n\t" \
380 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
381 : "r" (newval), "m" (*mem) \
382 : "memory"); \
383 __prev; })
384
385 # if _MIPS_SIM == _ABIO32
386 /* We can't do an atomic 64-bit operation in O32. */
387 # define __arch_exchange_xxx_64_int(mem, newval, rel, acq) \
388 (abort (), (typeof(*mem)) 0)
389 # else
390 # define __arch_exchange_xxx_64_int(mem, newval, rel, acq) \
391 ({ typeof (*mem) __prev; int __cmp; \
392 __asm__ __volatile__ ("\n" \
393 ".set push\n\t" \
394 MIPS_PUSH_MIPS2 \
395 rel "\n" \
396 "1:\n" \
397 "lld %0,%4\n\t" \
398 "move %1,%3\n\t" \
399 "scd %1,%2\n\t" \
400 R10K_BEQZ_INSN" %1,1b\n" \
401 acq "\n\t" \
402 ".set pop\n" \
403 "2:\n\t" \
404 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
405 : "r" (newval), "m" (*mem) \
406 : "memory"); \
407 __prev; })
408 # endif
409
410 # define atomic_exchange_acq(mem, value) \
411 __atomic_val_bysize (__arch_exchange_xxx, int, mem, value, "", MIPS_SYNC_STR)
412
413 # define atomic_exchange_rel(mem, value) \
414 __atomic_val_bysize (__arch_exchange_xxx, int, mem, value, MIPS_SYNC_STR, "")
415
416
417 /* Atomically add value and return the previous (unincremented) value. */
418
419 # define __arch_exchange_and_add_8_int(mem, newval, rel, acq) \
420 (abort (), (typeof(*mem)) 0)
421
422 # define __arch_exchange_and_add_16_int(mem, newval, rel, acq) \
423 (abort (), (typeof(*mem)) 0)
424
425 # define __arch_exchange_and_add_32_int(mem, value, rel, acq) \
426 ({ typeof (*mem) __prev; int __cmp; \
427 __asm__ __volatile__ ("\n" \
428 ".set push\n\t" \
429 MIPS_PUSH_MIPS2 \
430 rel "\n" \
431 "1:\t" \
432 "ll %0,%4\n\t" \
433 "addu %1,%0,%3\n\t" \
434 "sc %1,%2\n\t" \
435 R10K_BEQZ_INSN" %1,1b\n" \
436 acq "\n\t" \
437 ".set pop\n" \
438 "2:\n\t" \
439 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
440 : "r" (value), "m" (*mem) \
441 : "memory"); \
442 __prev; })
443
444 # if _MIPS_SIM == _ABIO32
445 /* We can't do an atomic 64-bit operation in O32. */
446 # define __arch_exchange_and_add_64_int(mem, value, rel, acq) \
447 (abort (), (typeof(*mem)) 0)
448 # else
449 # define __arch_exchange_and_add_64_int(mem, value, rel, acq) \
450 ({ typeof (*mem) __prev; int __cmp; \
451 __asm__ __volatile__ ( \
452 ".set push\n\t" \
453 MIPS_PUSH_MIPS2 \
454 rel "\n" \
455 "1:\t" \
456 "lld %0,%4\n\t" \
457 "daddu %1,%0,%3\n\t" \
458 "scd %1,%2\n\t" \
459 R10K_BEQZ_INSN" %1,1b\n" \
460 acq "\n\t" \
461 ".set pop\n" \
462 "2:\n\t" \
463 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
464 : "r" (value), "m" (*mem) \
465 : "memory"); \
466 __prev; })
467 # endif
468
469 # define atomic_exchange_and_add_acq(mem, value) \
470 __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
471 "", MIPS_SYNC_STR)
472
473 # define atomic_exchange_and_add_rel(mem, value) \
474 __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
475 MIPS_SYNC_STR, "")
476
477 #endif /* !__mips16 && !__GNUC_PREREQ (4, 8) */
478
479 /* TODO: More atomic operations could be implemented efficiently; only the
480 basic requirements are done. */
481
482 #ifdef __mips16
483 # define atomic_full_barrier() __sync_synchronize ()
484
485 #else /* !__mips16 */
486 # define atomic_full_barrier() \
487 __asm__ __volatile__ (".set push\n\t" \
488 MIPS_PUSH_MIPS2 \
489 MIPS_SYNC_STR "\n\t" \
490 ".set pop" : : : "memory")
491 #endif /* !__mips16 */
492
493 #endif /* bits/atomic.h */