]>
git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/m68k/m680x0/m68020/bits/atomic.h
1 /* Copyright (C) 2003-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Andreas Schwab <schwab@suse.de>, 2003.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <http://www.gnu.org/licenses/>. */
22 typedef int8_t atomic8_t
;
23 typedef uint8_t uatomic8_t
;
24 typedef int_fast8_t atomic_fast8_t
;
25 typedef uint_fast8_t uatomic_fast8_t
;
27 typedef int16_t atomic16_t
;
28 typedef uint16_t uatomic16_t
;
29 typedef int_fast16_t atomic_fast16_t
;
30 typedef uint_fast16_t uatomic_fast16_t
;
32 typedef int32_t atomic32_t
;
33 typedef uint32_t uatomic32_t
;
34 typedef int_fast32_t atomic_fast32_t
;
35 typedef uint_fast32_t uatomic_fast32_t
;
37 typedef int64_t atomic64_t
;
38 typedef uint64_t uatomic64_t
;
39 typedef int_fast64_t atomic_fast64_t
;
40 typedef uint_fast64_t uatomic_fast64_t
;
42 typedef intptr_t atomicptr_t
;
43 typedef uintptr_t uatomicptr_t
;
44 typedef intmax_t atomic_max_t
;
45 typedef uintmax_t uatomic_max_t
;
47 #define __HAVE_64B_ATOMICS 1
48 #define USE_ATOMIC_COMPILER_BUILTINS 0
50 #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
51 ({ __typeof (*(mem)) __ret; \
52 __asm __volatile ("cas%.b %0,%2,%1" \
53 : "=d" (__ret), "+m" (*(mem)) \
54 : "d" (newval), "0" (oldval)); \
57 #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
58 ({ __typeof (*(mem)) __ret; \
59 __asm __volatile ("cas%.w %0,%2,%1" \
60 : "=d" (__ret), "+m" (*(mem)) \
61 : "d" (newval), "0" (oldval)); \
64 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
65 ({ __typeof (*(mem)) __ret; \
66 __asm __volatile ("cas%.l %0,%2,%1" \
67 : "=d" (__ret), "+m" (*(mem)) \
68 : "d" (newval), "0" (oldval)); \
71 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
72 ({ __typeof (*(mem)) __ret; \
73 __typeof (mem) __memp = (mem); \
74 __asm __volatile ("cas2%.l %0:%R0,%1:%R1,(%2):(%3)" \
76 : "d" (newval), "r" (__memp), \
77 "r" ((char *) __memp + 4), "0" (oldval) \
81 #define atomic_exchange_acq(mem, newvalue) \
82 ({ __typeof (*(mem)) __result = *(mem); \
83 if (sizeof (*(mem)) == 1) \
84 __asm __volatile ("1: cas%.b %0,%2,%1;" \
86 : "=d" (__result), "+m" (*(mem)) \
87 : "d" (newvalue), "0" (__result)); \
88 else if (sizeof (*(mem)) == 2) \
89 __asm __volatile ("1: cas%.w %0,%2,%1;" \
91 : "=d" (__result), "+m" (*(mem)) \
92 : "d" (newvalue), "0" (__result)); \
93 else if (sizeof (*(mem)) == 4) \
94 __asm __volatile ("1: cas%.l %0,%2,%1;" \
96 : "=d" (__result), "+m" (*(mem)) \
97 : "d" (newvalue), "0" (__result)); \
100 __typeof (mem) __memp = (mem); \
101 __asm __volatile ("1: cas2%.l %0:%R0,%1:%R1,(%2):(%3);" \
104 : "d" (newvalue), "r" (__memp), \
105 "r" ((char *) __memp + 4), "0" (__result) \
110 #define atomic_exchange_and_add(mem, value) \
111 ({ __typeof (*(mem)) __result = *(mem); \
112 __typeof (*(mem)) __temp; \
113 if (sizeof (*(mem)) == 1) \
114 __asm __volatile ("1: move%.b %0,%2;" \
116 " cas%.b %0,%2,%1;" \
118 : "=d" (__result), "+m" (*(mem)), \
120 : "d" (value), "0" (__result)); \
121 else if (sizeof (*(mem)) == 2) \
122 __asm __volatile ("1: move%.w %0,%2;" \
124 " cas%.w %0,%2,%1;" \
126 : "=d" (__result), "+m" (*(mem)), \
128 : "d" (value), "0" (__result)); \
129 else if (sizeof (*(mem)) == 4) \
130 __asm __volatile ("1: move%.l %0,%2;" \
132 " cas%.l %0,%2,%1;" \
134 : "=d" (__result), "+m" (*(mem)), \
136 : "d" (value), "0" (__result)); \
139 __typeof (mem) __memp = (mem); \
140 __asm __volatile ("1: move%.l %0,%1;" \
141 " move%.l %R0,%R1;" \
143 " addx%.l %R2,%R1;" \
144 " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
146 : "=d" (__result), "=&d" (__temp) \
147 : "d" (value), "r" (__memp), \
148 "r" ((char *) __memp + 4), "0" (__result) \
153 #define atomic_add(mem, value) \
154 (void) ({ if (sizeof (*(mem)) == 1) \
155 __asm __volatile ("add%.b %1,%0" \
158 else if (sizeof (*(mem)) == 2) \
159 __asm __volatile ("add%.w %1,%0" \
162 else if (sizeof (*(mem)) == 4) \
163 __asm __volatile ("add%.l %1,%0" \
168 __typeof (mem) __memp = (mem); \
169 __typeof (*(mem)) __oldval = *__memp; \
170 __typeof (*(mem)) __temp; \
171 __asm __volatile ("1: move%.l %0,%1;" \
172 " move%.l %R0,%R1;" \
174 " addx%.l %R2,%R1;" \
175 " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
177 : "=d" (__oldval), "=&d" (__temp) \
178 : "d" (value), "r" (__memp), \
179 "r" ((char *) __memp + 4), "0" (__oldval) \
184 #define atomic_increment_and_test(mem) \
186 if (sizeof (*(mem)) == 1) \
187 __asm __volatile ("addq%.b %#1,%1; seq %0" \
188 : "=dm" (__result), "+m" (*(mem))); \
189 else if (sizeof (*(mem)) == 2) \
190 __asm __volatile ("addq%.w %#1,%1; seq %0" \
191 : "=dm" (__result), "+m" (*(mem))); \
192 else if (sizeof (*(mem)) == 4) \
193 __asm __volatile ("addq%.l %#1,%1; seq %0" \
194 : "=dm" (__result), "+m" (*(mem))); \
197 __typeof (mem) __memp = (mem); \
198 __typeof (*(mem)) __oldval = *__memp; \
199 __typeof (*(mem)) __temp; \
200 __asm __volatile ("1: move%.l %1,%2;" \
201 " move%.l %R1,%R2;" \
205 " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \
207 : "=&dm" (__result), "=d" (__oldval), \
209 : "r" (__memp), "r" ((char *) __memp + 4), \
210 "d" (0), "1" (__oldval) \
215 #define atomic_decrement_and_test(mem) \
217 if (sizeof (*(mem)) == 1) \
218 __asm __volatile ("subq%.b %#1,%1; seq %0" \
219 : "=dm" (__result), "+m" (*(mem))); \
220 else if (sizeof (*(mem)) == 2) \
221 __asm __volatile ("subq%.w %#1,%1; seq %0" \
222 : "=dm" (__result), "+m" (*(mem))); \
223 else if (sizeof (*(mem)) == 4) \
224 __asm __volatile ("subq%.l %#1,%1; seq %0" \
225 : "=dm" (__result), "+m" (*(mem))); \
228 __typeof (mem) __memp = (mem); \
229 __typeof (*(mem)) __oldval = *__memp; \
230 __typeof (*(mem)) __temp; \
231 __asm __volatile ("1: move%.l %1,%2;" \
232 " move%.l %R1,%R2;" \
236 " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \
238 : "=&dm" (__result), "=d" (__oldval), \
240 : "r" (__memp), "r" ((char *) __memp + 4), \
241 "d" (0), "1" (__oldval) \
246 #define atomic_bit_set(mem, bit) \
247 __asm __volatile ("bfset %0{%1,#1}" \
249 : "di" (sizeof (*(mem)) * 8 - (bit) - 1))
251 #define atomic_bit_test_set(mem, bit) \
253 __asm __volatile ("bfset %1{%2,#1}; sne %0" \
254 : "=dm" (__result), "+m" (*(mem)) \
255 : "di" (sizeof (*(mem)) * 8 - (bit) - 1)); \