]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/powerpc/powerpc64/bits/atomic.h
Update libm-test-ulps for MIPS.
[thirdparty/glibc.git] / sysdeps / powerpc / powerpc64 / bits / atomic.h
CommitLineData
94c24227 1/* Atomic operations. PowerPC64 version.
b168057a 2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
94c24227
UD
3 This file is part of the GNU C Library.
4 Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
94c24227 19
fa6e3bc3
UD
20/* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
21 This is a hint to the hardware to expect additional updates adjacent
22 to the lock word or not. If we are acquiring a Mutex, the hint
23 should be true. Otherwise we releasing a Mutex or doing a simple
2ccdea26 24 atomic operation. In that case we don't expect additional updates
fa6e3bc3
UD
25 adjacent to the lock word after the Store Conditional and the hint
26 should be false. */
27
28#if defined _ARCH_PWR6 || defined _ARCH_PWR6X
29# define MUTEX_HINT_ACQ ",1"
30# define MUTEX_HINT_REL ",0"
31#else
32# define MUTEX_HINT_ACQ
33# define MUTEX_HINT_REL
34#endif
35
1ea339b6
TR
36#define __HAVE_64B_ATOMICS 1
37#define USE_ATOMIC_COMPILER_BUILTINS 0
38
8e64f8ba 39/* The 32-bit exchange_bool is different on powerpc64 because the subf
2ccdea26 40 does signed 64-bit arithmetic while the lwarx is 32-bit unsigned
8e64f8ba
UD
41 (a load word and zero (high 32) form) load.
42 In powerpc64 register values are 64-bit by default, including oldval.
43 The value in old val unknown sign extension, lwarx loads the 32-bit
44 value as unsigned. So we explicitly clear the high 32 bits in oldval. */
fa6e3bc3 45#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
94c24227 46({ \
8e64f8ba
UD
47 unsigned int __tmp, __tmp2; \
48 __asm __volatile (" clrldi %1,%1,32\n" \
fa6e3bc3 49 "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
8e64f8ba 50 " subf. %0,%1,%0\n" \
94c24227 51 " bne 2f\n" \
8e64f8ba 52 " stwcx. %4,0,%2\n" \
94c24227
UD
53 " bne- 1b\n" \
54 "2: " __ARCH_ACQ_INSTR \
8e64f8ba
UD
55 : "=&r" (__tmp), "=r" (__tmp2) \
56 : "b" (mem), "1" (oldval), "r" (newval) \
94c24227
UD
57 : "cr0", "memory"); \
58 __tmp != 0; \
59})
60
fa6e3bc3 61#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
7158eae4 62({ \
8e64f8ba 63 unsigned int __tmp, __tmp2; \
7158eae4 64 __asm __volatile (__ARCH_REL_INSTR "\n" \
8e64f8ba 65 " clrldi %1,%1,32\n" \
fa6e3bc3 66 "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \
8e64f8ba 67 " subf. %0,%1,%0\n" \
7158eae4 68 " bne 2f\n" \
8e64f8ba 69 " stwcx. %4,0,%2\n" \
7158eae4
UD
70 " bne- 1b\n" \
71 "2: " \
8e64f8ba 72 : "=&r" (__tmp), "=r" (__tmp2) \
afb24a10 73 : "b" (mem), "1" (oldval), "r" (newval) \
7158eae4
UD
74 : "cr0", "memory"); \
75 __tmp != 0; \
76})
77
ecb2b2fb
UD
78/*
79 * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
94c24227
UD
80 * and Store doubleword conditional indexed (stdcx) instructions. So here
81 * we define the 64-bit forms.
82 */
fa6e3bc3 83#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
94c24227
UD
84({ \
85 unsigned long __tmp; \
7158eae4 86 __asm __volatile ( \
fa6e3bc3 87 "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
94c24227
UD
88 " subf. %0,%2,%0\n" \
89 " bne 2f\n" \
90 " stdcx. %3,0,%1\n" \
91 " bne- 1b\n" \
92 "2: " __ARCH_ACQ_INSTR \
93 : "=&r" (__tmp) \
94 : "b" (mem), "r" (oldval), "r" (newval) \
95 : "cr0", "memory"); \
96 __tmp != 0; \
97})
98
fa6e3bc3 99#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
7158eae4
UD
100({ \
101 unsigned long __tmp; \
102 __asm __volatile (__ARCH_REL_INSTR "\n" \
5fbb5691 103 "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \
7158eae4
UD
104 " subf. %0,%2,%0\n" \
105 " bne 2f\n" \
106 " stdcx. %3,0,%1\n" \
107 " bne- 1b\n" \
108 "2: " \
109 : "=&r" (__tmp) \
110 : "b" (mem), "r" (oldval), "r" (newval) \
111 : "cr0", "memory"); \
112 __tmp != 0; \
113})
114
94c24227
UD
115#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
116 ({ \
117 __typeof (*(mem)) __tmp; \
118 __typeof (mem) __memp = (mem); \
7158eae4 119 __asm __volatile ( \
fa6e3bc3 120 "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
94c24227
UD
121 " cmpd %0,%2\n" \
122 " bne 2f\n" \
123 " stdcx. %3,0,%1\n" \
124 " bne- 1b\n" \
125 "2: " __ARCH_ACQ_INSTR \
126 : "=&r" (__tmp) \
127 : "b" (__memp), "r" (oldval), "r" (newval) \
128 : "cr0", "memory"); \
129 __tmp; \
130 })
131
7158eae4
UD
132#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
133 ({ \
134 __typeof (*(mem)) __tmp; \
135 __typeof (mem) __memp = (mem); \
136 __asm __volatile (__ARCH_REL_INSTR "\n" \
fa6e3bc3 137 "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \
7158eae4
UD
138 " cmpd %0,%2\n" \
139 " bne 2f\n" \
140 " stdcx. %3,0,%1\n" \
141 " bne- 1b\n" \
142 "2: " \
143 : "=&r" (__tmp) \
144 : "b" (__memp), "r" (oldval), "r" (newval) \
145 : "cr0", "memory"); \
146 __tmp; \
147 })
148
fa6e3bc3 149#define __arch_atomic_exchange_64_acq(mem, value) \
7158eae4
UD
150 ({ \
151 __typeof (*mem) __val; \
152 __asm __volatile (__ARCH_REL_INSTR "\n" \
fa6e3bc3 153 "1: ldarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
7158eae4
UD
154 " stdcx. %3,0,%2\n" \
155 " bne- 1b\n" \
156 " " __ARCH_ACQ_INSTR \
157 : "=&r" (__val), "=m" (*mem) \
ecb2b2fb 158 : "b" (mem), "r" (value), "m" (*mem) \
7158eae4
UD
159 : "cr0", "memory"); \
160 __val; \
161 })
162
fa6e3bc3 163#define __arch_atomic_exchange_64_rel(mem, value) \
94c24227
UD
164 ({ \
165 __typeof (*mem) __val; \
166 __asm __volatile (__ARCH_REL_INSTR "\n" \
fa6e3bc3 167 "1: ldarx %0,0,%2" MUTEX_HINT_REL "\n" \
94c24227
UD
168 " stdcx. %3,0,%2\n" \
169 " bne- 1b" \
170 : "=&r" (__val), "=m" (*mem) \
ecb2b2fb 171 : "b" (mem), "r" (value), "m" (*mem) \
7158eae4 172 : "cr0", "memory"); \
94c24227
UD
173 __val; \
174 })
175
fa6e3bc3 176#define __arch_atomic_exchange_and_add_64(mem, value) \
94c24227
UD
177 ({ \
178 __typeof (*mem) __val, __tmp; \
179 __asm __volatile ("1: ldarx %0,0,%3\n" \
180 " add %1,%0,%4\n" \
181 " stdcx. %1,0,%3\n" \
704f7947
AZ
182 " bne- 1b" \
183 : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
184 : "b" (mem), "r" (value), "m" (*mem) \
185 : "cr0", "memory"); \
186 __val; \
187 })
188
189#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
190 ({ \
191 __typeof (*mem) __val, __tmp; \
192 __asm __volatile ("1: ldarx %0,0,%3" MUTEX_HINT_ACQ "\n" \
193 " add %1,%0,%4\n" \
194 " stdcx. %1,0,%3\n" \
195 " bne- 1b\n" \
196 __ARCH_ACQ_INSTR \
197 : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
198 : "b" (mem), "r" (value), "m" (*mem) \
199 : "cr0", "memory"); \
200 __val; \
201 })
202
203#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
204 ({ \
205 __typeof (*mem) __val, __tmp; \
206 __asm __volatile (__ARCH_REL_INSTR "\n" \
207 "1: ldarx %0,0,%3" MUTEX_HINT_REL "\n" \
208 " add %1,%0,%4\n" \
209 " stdcx. %1,0,%3\n" \
94c24227
UD
210 " bne- 1b" \
211 : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
ecb2b2fb 212 : "b" (mem), "r" (value), "m" (*mem) \
7ba0e52c
UD
213 : "cr0", "memory"); \
214 __val; \
215 })
216
fa6e3bc3 217#define __arch_atomic_increment_val_64(mem) \
7ba0e52c
UD
218 ({ \
219 __typeof (*(mem)) __val; \
220 __asm __volatile ("1: ldarx %0,0,%2\n" \
221 " addi %0,%0,1\n" \
222 " stdcx. %0,0,%2\n" \
223 " bne- 1b" \
224 : "=&b" (__val), "=m" (*mem) \
225 : "b" (mem), "m" (*mem) \
226 : "cr0", "memory"); \
227 __val; \
228 })
229
fa6e3bc3 230#define __arch_atomic_decrement_val_64(mem) \
7ba0e52c
UD
231 ({ \
232 __typeof (*(mem)) __val; \
233 __asm __volatile ("1: ldarx %0,0,%2\n" \
234 " subi %0,%0,1\n" \
235 " stdcx. %0,0,%2\n" \
236 " bne- 1b" \
237 : "=&b" (__val), "=m" (*mem) \
238 : "b" (mem), "m" (*mem) \
7158eae4 239 : "cr0", "memory"); \
94c24227
UD
240 __val; \
241 })
242
fa6e3bc3 243#define __arch_atomic_decrement_if_positive_64(mem) \
94c24227
UD
244 ({ int __val, __tmp; \
245 __asm __volatile ("1: ldarx %0,0,%3\n" \
246 " cmpdi 0,%0,0\n" \
247 " addi %1,%0,-1\n" \
248 " ble 2f\n" \
249 " stdcx. %1,0,%3\n" \
250 " bne- 1b\n" \
251 "2: " __ARCH_ACQ_INSTR \
252 : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
ecb2b2fb 253 : "b" (mem), "m" (*mem) \
7158eae4 254 : "cr0", "memory"); \
94c24227
UD
255 __val; \
256 })
257
ecb2b2fb
UD
258/*
259 * All powerpc64 processors support the new "light weight" sync (lwsync).
94c24227 260 */
fa6e3bc3 261#define atomic_read_barrier() __asm ("lwsync" ::: "memory")
ecb2b2fb
UD
262/*
263 * "light weight" sync can also be used for the release barrier.
7158eae4 264 */
fa6e3bc3
UD
265#ifndef UP
266# define __ARCH_REL_INSTR "lwsync"
267#endif
7f981fc2 268#define atomic_write_barrier() __asm ("lwsync" ::: "memory")
94c24227
UD
269
270/*
271 * Include the rest of the atomic ops macros which are common to both
ecb2b2fb 272 * powerpc32 and powerpc64.
94c24227
UD
273 */
274#include_next <bits/atomic.h>