]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/powerpc/atomic-machine.h
Update copyright dates with scripts/update-copyrights
[thirdparty/glibc.git] / sysdeps / powerpc / atomic-machine.h
1 /* Atomic operations. PowerPC Common version.
2 Copyright (C) 2003-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <https://www.gnu.org/licenses/>. */
19
20 /*
21 * Never include sysdeps/powerpc/atomic-machine.h directly.
22 * Alway use include/atomic.h which will include either
23 * sysdeps/powerpc/powerpc32/atomic-machine.h
24 * or
25 * sysdeps/powerpc/powerpc64/atomic-machine.h
26 * as appropriate and which in turn include this file.
27 */
28
29 #include <stdint.h>
30
31 typedef int32_t atomic32_t;
32 typedef uint32_t uatomic32_t;
33 typedef int_fast32_t atomic_fast32_t;
34 typedef uint_fast32_t uatomic_fast32_t;
35
36 typedef int64_t atomic64_t;
37 typedef uint64_t uatomic64_t;
38 typedef int_fast64_t atomic_fast64_t;
39 typedef uint_fast64_t uatomic_fast64_t;
40
41 typedef intptr_t atomicptr_t;
42 typedef uintptr_t uatomicptr_t;
43 typedef intmax_t atomic_max_t;
44 typedef uintmax_t uatomic_max_t;
45
46 /*
47 * Powerpc does not have byte and halfword forms of load and reserve and
48 * store conditional. So for powerpc we stub out the 8- and 16-bit forms.
49 */
50 #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
51 (abort (), 0)
52
53 #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
54 (abort (), 0)
55
56 #define __ARCH_ACQ_INSTR "isync"
57 #ifndef __ARCH_REL_INSTR
58 # define __ARCH_REL_INSTR "sync"
59 #endif
60
61 #ifndef MUTEX_HINT_ACQ
62 # define MUTEX_HINT_ACQ
63 #endif
64 #ifndef MUTEX_HINT_REL
65 # define MUTEX_HINT_REL
66 #endif
67
68 #define atomic_full_barrier() __asm ("sync" ::: "memory")
69
70 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
71 ({ \
72 __typeof (*(mem)) __tmp; \
73 __typeof (mem) __memp = (mem); \
74 __asm __volatile ( \
75 "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
76 " cmpw %0,%2\n" \
77 " bne 2f\n" \
78 " stwcx. %3,0,%1\n" \
79 " bne- 1b\n" \
80 "2: " __ARCH_ACQ_INSTR \
81 : "=&r" (__tmp) \
82 : "b" (__memp), "r" (oldval), "r" (newval) \
83 : "cr0", "memory"); \
84 __tmp; \
85 })
86
87 #define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \
88 ({ \
89 __typeof (*(mem)) __tmp; \
90 __typeof (mem) __memp = (mem); \
91 __asm __volatile (__ARCH_REL_INSTR "\n" \
92 "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \
93 " cmpw %0,%2\n" \
94 " bne 2f\n" \
95 " stwcx. %3,0,%1\n" \
96 " bne- 1b\n" \
97 "2: " \
98 : "=&r" (__tmp) \
99 : "b" (__memp), "r" (oldval), "r" (newval) \
100 : "cr0", "memory"); \
101 __tmp; \
102 })
103
104 #define __arch_atomic_exchange_32_acq(mem, value) \
105 ({ \
106 __typeof (*mem) __val; \
107 __asm __volatile ( \
108 "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
109 " stwcx. %3,0,%2\n" \
110 " bne- 1b\n" \
111 " " __ARCH_ACQ_INSTR \
112 : "=&r" (__val), "=m" (*mem) \
113 : "b" (mem), "r" (value), "m" (*mem) \
114 : "cr0", "memory"); \
115 __val; \
116 })
117
118 #define __arch_atomic_exchange_32_rel(mem, value) \
119 ({ \
120 __typeof (*mem) __val; \
121 __asm __volatile (__ARCH_REL_INSTR "\n" \
122 "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \
123 " stwcx. %3,0,%2\n" \
124 " bne- 1b" \
125 : "=&r" (__val), "=m" (*mem) \
126 : "b" (mem), "r" (value), "m" (*mem) \
127 : "cr0", "memory"); \
128 __val; \
129 })
130
131 #define __arch_atomic_exchange_and_add_32(mem, value) \
132 ({ \
133 __typeof (*mem) __val, __tmp; \
134 __asm __volatile ("1: lwarx %0,0,%3\n" \
135 " add %1,%0,%4\n" \
136 " stwcx. %1,0,%3\n" \
137 " bne- 1b" \
138 : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
139 : "b" (mem), "r" (value), "m" (*mem) \
140 : "cr0", "memory"); \
141 __val; \
142 })
143
144 #define __arch_atomic_exchange_and_add_32_acq(mem, value) \
145 ({ \
146 __typeof (*mem) __val, __tmp; \
147 __asm __volatile ("1: lwarx %0,0,%3" MUTEX_HINT_ACQ "\n" \
148 " add %1,%0,%4\n" \
149 " stwcx. %1,0,%3\n" \
150 " bne- 1b\n" \
151 __ARCH_ACQ_INSTR \
152 : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
153 : "b" (mem), "r" (value), "m" (*mem) \
154 : "cr0", "memory"); \
155 __val; \
156 })
157
158 #define __arch_atomic_exchange_and_add_32_rel(mem, value) \
159 ({ \
160 __typeof (*mem) __val, __tmp; \
161 __asm __volatile (__ARCH_REL_INSTR "\n" \
162 "1: lwarx %0,0,%3" MUTEX_HINT_REL "\n" \
163 " add %1,%0,%4\n" \
164 " stwcx. %1,0,%3\n" \
165 " bne- 1b" \
166 : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
167 : "b" (mem), "r" (value), "m" (*mem) \
168 : "cr0", "memory"); \
169 __val; \
170 })
171
172 #define __arch_atomic_increment_val_32(mem) \
173 ({ \
174 __typeof (*(mem)) __val; \
175 __asm __volatile ("1: lwarx %0,0,%2\n" \
176 " addi %0,%0,1\n" \
177 " stwcx. %0,0,%2\n" \
178 " bne- 1b" \
179 : "=&b" (__val), "=m" (*mem) \
180 : "b" (mem), "m" (*mem) \
181 : "cr0", "memory"); \
182 __val; \
183 })
184
185 #define __arch_atomic_decrement_val_32(mem) \
186 ({ \
187 __typeof (*(mem)) __val; \
188 __asm __volatile ("1: lwarx %0,0,%2\n" \
189 " subi %0,%0,1\n" \
190 " stwcx. %0,0,%2\n" \
191 " bne- 1b" \
192 : "=&b" (__val), "=m" (*mem) \
193 : "b" (mem), "m" (*mem) \
194 : "cr0", "memory"); \
195 __val; \
196 })
197
198 #define __arch_atomic_decrement_if_positive_32(mem) \
199 ({ int __val, __tmp; \
200 __asm __volatile ("1: lwarx %0,0,%3\n" \
201 " cmpwi 0,%0,0\n" \
202 " addi %1,%0,-1\n" \
203 " ble 2f\n" \
204 " stwcx. %1,0,%3\n" \
205 " bne- 1b\n" \
206 "2: " __ARCH_ACQ_INSTR \
207 : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
208 : "b" (mem), "m" (*mem) \
209 : "cr0", "memory"); \
210 __val; \
211 })
212
213 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
214 ({ \
215 __typeof (*(mem)) __result; \
216 if (sizeof (*mem) == 4) \
217 __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
218 else if (sizeof (*mem) == 8) \
219 __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \
220 else \
221 abort (); \
222 __result; \
223 })
224
225 #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
226 ({ \
227 __typeof (*(mem)) __result; \
228 if (sizeof (*mem) == 4) \
229 __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \
230 else if (sizeof (*mem) == 8) \
231 __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \
232 else \
233 abort (); \
234 __result; \
235 })
236
237 #define atomic_exchange_acq(mem, value) \
238 ({ \
239 __typeof (*(mem)) __result; \
240 if (sizeof (*mem) == 4) \
241 __result = __arch_atomic_exchange_32_acq (mem, value); \
242 else if (sizeof (*mem) == 8) \
243 __result = __arch_atomic_exchange_64_acq (mem, value); \
244 else \
245 abort (); \
246 __result; \
247 })
248
249 #define atomic_exchange_rel(mem, value) \
250 ({ \
251 __typeof (*(mem)) __result; \
252 if (sizeof (*mem) == 4) \
253 __result = __arch_atomic_exchange_32_rel (mem, value); \
254 else if (sizeof (*mem) == 8) \
255 __result = __arch_atomic_exchange_64_rel (mem, value); \
256 else \
257 abort (); \
258 __result; \
259 })
260
261 #define atomic_exchange_and_add(mem, value) \
262 ({ \
263 __typeof (*(mem)) __result; \
264 if (sizeof (*mem) == 4) \
265 __result = __arch_atomic_exchange_and_add_32 (mem, value); \
266 else if (sizeof (*mem) == 8) \
267 __result = __arch_atomic_exchange_and_add_64 (mem, value); \
268 else \
269 abort (); \
270 __result; \
271 })
272 #define atomic_exchange_and_add_acq(mem, value) \
273 ({ \
274 __typeof (*(mem)) __result; \
275 if (sizeof (*mem) == 4) \
276 __result = __arch_atomic_exchange_and_add_32_acq (mem, value); \
277 else if (sizeof (*mem) == 8) \
278 __result = __arch_atomic_exchange_and_add_64_acq (mem, value); \
279 else \
280 abort (); \
281 __result; \
282 })
283 #define atomic_exchange_and_add_rel(mem, value) \
284 ({ \
285 __typeof (*(mem)) __result; \
286 if (sizeof (*mem) == 4) \
287 __result = __arch_atomic_exchange_and_add_32_rel (mem, value); \
288 else if (sizeof (*mem) == 8) \
289 __result = __arch_atomic_exchange_and_add_64_rel (mem, value); \
290 else \
291 abort (); \
292 __result; \
293 })
294
295 #define atomic_increment_val(mem) \
296 ({ \
297 __typeof (*(mem)) __result; \
298 if (sizeof (*(mem)) == 4) \
299 __result = __arch_atomic_increment_val_32 (mem); \
300 else if (sizeof (*(mem)) == 8) \
301 __result = __arch_atomic_increment_val_64 (mem); \
302 else \
303 abort (); \
304 __result; \
305 })
306
307 #define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
308
309 #define atomic_decrement_val(mem) \
310 ({ \
311 __typeof (*(mem)) __result; \
312 if (sizeof (*(mem)) == 4) \
313 __result = __arch_atomic_decrement_val_32 (mem); \
314 else if (sizeof (*(mem)) == 8) \
315 __result = __arch_atomic_decrement_val_64 (mem); \
316 else \
317 abort (); \
318 __result; \
319 })
320
321 #define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
322
323
324 /* Decrement *MEM if it is > 0, and return the old value. */
325 #define atomic_decrement_if_positive(mem) \
326 ({ __typeof (*(mem)) __result; \
327 if (sizeof (*mem) == 4) \
328 __result = __arch_atomic_decrement_if_positive_32 (mem); \
329 else if (sizeof (*mem) == 8) \
330 __result = __arch_atomic_decrement_if_positive_64 (mem); \
331 else \
332 abort (); \
333 __result; \
334 })