]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/config/pa/linux-atomic.c
linux-atomic.c (__kernel_cmpxchg2): New.
[thirdparty/gcc.git] / libgcc / config / pa / linux-atomic.c
1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008-2014 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
21
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
26
27 #define EFAULT 14
28 #define EBUSY 16
29 #define ENOSYS 251
30
31 /* All PA-RISC implementations supported by linux have strongly
32 ordered loads and stores. Only cache flushes and purges can be
33 delayed. The data cache implementations are all globally
34 coherent. Thus, there is no need to synchonize memory accesses.
35
36 GCC automatically issues a asm memory barrier when it encounters
37 a __sync_synchronize builtin. Thus, we do not need to define this
38 builtin.
39
40 We implement byte, short and int versions of each atomic operation
41 using the kernel helper defined below. There is no support for
42 64-bit operations yet. */
43
44 /* A privileged instruction to crash a userspace program with SIGILL. */
45 #define ABORT_INSTRUCTION asm ("iitlbp %r0,(%sr0, %r0)")
46
47 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
48 #define LWS_CAS (sizeof(long) == 4 ? 0 : 1)
49
50 /* Kernel helper for compare-and-exchange a 32-bit value. */
51 static inline long
52 __kernel_cmpxchg (int oldval, int newval, int *mem)
53 {
54 register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
55 register long lws_ret asm("r28");
56 register long lws_errno asm("r21");
57 register int lws_old asm("r25") = oldval;
58 register int lws_new asm("r24") = newval;
59 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
60 "ldi %5, %%r20 \n\t"
61 : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem),
62 "=r" (lws_old), "=r" (lws_new)
63 : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new)
64 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
65 );
66 if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
67 ABORT_INSTRUCTION;
68
69 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
70 the old value from memory. If this value is equal to OLDVAL, the
71 new value was written to memory. If not, return -EBUSY. */
72 if (!lws_errno && lws_ret != oldval)
73 lws_errno = -EBUSY;
74
75 return lws_errno;
76 }
77
78 static inline long
79 __kernel_cmpxchg2 (void * oldval, void * newval, void *mem, int val_size)
80 {
81 register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
82 register long lws_ret asm("r28");
83 register long lws_errno asm("r21");
84 register unsigned long lws_old asm("r25") = (unsigned long) oldval;
85 register unsigned long lws_new asm("r24") = (unsigned long) newval;
86 register int lws_size asm("r23") = val_size;
87 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
88 "ldi %2, %%r20 \n\t"
89 : "=r" (lws_ret), "=r" (lws_errno)
90 : "i" (2), "r" (lws_mem), "r" (lws_old), "r" (lws_new), "r" (lws_size)
91 : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
92 );
93 if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
94 ABORT_INSTRUCTION;
95
96 /* If the kernel LWS call fails, retrun EBUSY */
97 if (!lws_errno && lws_ret)
98 lws_errno = -EBUSY;
99
100 return lws_errno;
101 }
102 #define HIDDEN __attribute__ ((visibility ("hidden")))
103
104 /* Big endian masks */
105 #define INVERT_MASK_1 24
106 #define INVERT_MASK_2 16
107
108 #define MASK_1 0xffu
109 #define MASK_2 0xffffu
110
111 #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
112 TYPE HIDDEN \
113 __sync_fetch_and_##OP##_##WIDTH (TYPE *ptr, TYPE val) \
114 { \
115 TYPE tmp, newval; \
116 int failure; \
117 \
118 do { \
119 tmp = *ptr; \
120 newval = PFX_OP (tmp INF_OP val); \
121 failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \
122 } while (failure != 0); \
123 \
124 return tmp; \
125 }
126
127 FETCH_AND_OP_2 (add, , +, long long, 8, 3)
128 FETCH_AND_OP_2 (sub, , -, long long, 8, 3)
129 FETCH_AND_OP_2 (or, , |, long long, 8, 3)
130 FETCH_AND_OP_2 (and, , &, long long, 8, 3)
131 FETCH_AND_OP_2 (xor, , ^, long long, 8, 3)
132 FETCH_AND_OP_2 (nand, ~, &, long long, 8, 3)
133
134 FETCH_AND_OP_2 (add, , +, short, 2, 1)
135 FETCH_AND_OP_2 (sub, , -, short, 2, 1)
136 FETCH_AND_OP_2 (or, , |, short, 2, 1)
137 FETCH_AND_OP_2 (and, , &, short, 2, 1)
138 FETCH_AND_OP_2 (xor, , ^, short, 2, 1)
139 FETCH_AND_OP_2 (nand, ~, &, short, 2, 1)
140
141 FETCH_AND_OP_2 (add, , +, signed char, 1, 0)
142 FETCH_AND_OP_2 (sub, , -, signed char, 1, 0)
143 FETCH_AND_OP_2 (or, , |, signed char, 1, 0)
144 FETCH_AND_OP_2 (and, , &, signed char, 1, 0)
145 FETCH_AND_OP_2 (xor, , ^, signed char, 1, 0)
146 FETCH_AND_OP_2 (nand, ~, &, signed char, 1, 0)
147
148 #define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
149 TYPE HIDDEN \
150 __sync_##OP##_and_fetch_##WIDTH (TYPE *ptr, TYPE val) \
151 { \
152 TYPE tmp, newval; \
153 int failure; \
154 \
155 do { \
156 tmp = *ptr; \
157 newval = PFX_OP (tmp INF_OP val); \
158 failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \
159 } while (failure != 0); \
160 \
161 return PFX_OP (tmp INF_OP val); \
162 }
163
164 OP_AND_FETCH_2 (add, , +, long long, 8, 3)
165 OP_AND_FETCH_2 (sub, , -, long long, 8, 3)
166 OP_AND_FETCH_2 (or, , |, long long, 8, 3)
167 OP_AND_FETCH_2 (and, , &, long long, 8, 3)
168 OP_AND_FETCH_2 (xor, , ^, long long, 8, 3)
169 OP_AND_FETCH_2 (nand, ~, &, long long, 8, 3)
170
171 OP_AND_FETCH_2 (add, , +, short, 2, 1)
172 OP_AND_FETCH_2 (sub, , -, short, 2, 1)
173 OP_AND_FETCH_2 (or, , |, short, 2, 1)
174 OP_AND_FETCH_2 (and, , &, short, 2, 1)
175 OP_AND_FETCH_2 (xor, , ^, short, 2, 1)
176 OP_AND_FETCH_2 (nand, ~, &, short, 2, 1)
177
178 OP_AND_FETCH_2 (add, , +, signed char, 1, 0)
179 OP_AND_FETCH_2 (sub, , -, signed char, 1, 0)
180 OP_AND_FETCH_2 (or, , |, signed char, 1, 0)
181 OP_AND_FETCH_2 (and, , &, signed char, 1, 0)
182 OP_AND_FETCH_2 (xor, , ^, signed char, 1, 0)
183 OP_AND_FETCH_2 (nand, ~, &, signed char, 1, 0)
184
185 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
186 int HIDDEN \
187 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
188 { \
189 int failure, tmp; \
190 \
191 do { \
192 tmp = *ptr; \
193 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
194 } while (failure != 0); \
195 \
196 return tmp; \
197 }
198
199 FETCH_AND_OP_WORD (add, , +)
200 FETCH_AND_OP_WORD (sub, , -)
201 FETCH_AND_OP_WORD (or, , |)
202 FETCH_AND_OP_WORD (and, , &)
203 FETCH_AND_OP_WORD (xor, , ^)
204 FETCH_AND_OP_WORD (nand, ~, &)
205
206 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
207 int HIDDEN \
208 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
209 { \
210 int tmp, failure; \
211 \
212 do { \
213 tmp = *ptr; \
214 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
215 } while (failure != 0); \
216 \
217 return PFX_OP (tmp INF_OP val); \
218 }
219
220 OP_AND_FETCH_WORD (add, , +)
221 OP_AND_FETCH_WORD (sub, , -)
222 OP_AND_FETCH_WORD (or, , |)
223 OP_AND_FETCH_WORD (and, , &)
224 OP_AND_FETCH_WORD (xor, , ^)
225 OP_AND_FETCH_WORD (nand, ~, &)
226
227 typedef unsigned char bool;
228
229 #define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
230 TYPE HIDDEN \
231 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
232 TYPE newval) \
233 { \
234 TYPE actual_oldval; \
235 int fail; \
236 \
237 while (1) \
238 { \
239 actual_oldval = *ptr; \
240 \
241 if (__builtin_expect (oldval != actual_oldval, 0)) \
242 return actual_oldval; \
243 \
244 fail = __kernel_cmpxchg2 (&actual_oldval, &newval, ptr, INDEX); \
245 \
246 if (__builtin_expect (!fail, 1)) \
247 return actual_oldval; \
248 } \
249 } \
250 \
251 bool HIDDEN \
252 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
253 TYPE newval) \
254 { \
255 int failure = __kernel_cmpxchg2 (&oldval, &newval, ptr, INDEX); \
256 return (failure != 0); \
257 }
258
259 COMPARE_AND_SWAP_2 (long long, 8, 3)
260 COMPARE_AND_SWAP_2 (short, 2, 1)
261 COMPARE_AND_SWAP_2 (char, 1, 0)
262
263 int HIDDEN
264 __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
265 {
266 int actual_oldval, fail;
267
268 while (1)
269 {
270 actual_oldval = *ptr;
271
272 if (__builtin_expect (oldval != actual_oldval, 0))
273 return actual_oldval;
274
275 fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
276
277 if (__builtin_expect (!fail, 1))
278 return actual_oldval;
279 }
280 }
281
282 bool HIDDEN
283 __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
284 {
285 int failure = __kernel_cmpxchg (oldval, newval, ptr);
286 return (failure == 0);
287 }
288
289 #define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
290 TYPE HIDDEN \
291 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
292 { \
293 TYPE oldval; \
294 int failure; \
295 \
296 do { \
297 oldval = *ptr; \
298 failure = __kernel_cmpxchg2 (&oldval, &val, ptr, INDEX); \
299 } while (failure != 0); \
300 \
301 return oldval; \
302 }
303
304 SYNC_LOCK_TEST_AND_SET_2 (long long, 8, 3)
305 SYNC_LOCK_TEST_AND_SET_2 (short, 2, 1)
306 SYNC_LOCK_TEST_AND_SET_2 (signed char, 1, 0)
307
308 int HIDDEN
309 __sync_lock_test_and_set_4 (int *ptr, int val)
310 {
311 int failure, oldval;
312
313 do {
314 oldval = *ptr;
315 failure = __kernel_cmpxchg (oldval, val, ptr);
316 } while (failure != 0);
317
318 return oldval;
319 }
320
321 #define SYNC_LOCK_RELEASE_2(TYPE, WIDTH, INDEX) \
322 void HIDDEN \
323 __sync_lock_release_##WIDTH (TYPE *ptr) \
324 { \
325 TYPE failure, oldval, zero = 0; \
326 \
327 do { \
328 oldval = *ptr; \
329 failure = __kernel_cmpxchg2 (&oldval, &zero, ptr, INDEX); \
330 } while (failure != 0); \
331 }
332
333 SYNC_LOCK_RELEASE_2 (long long, 8, 3)
334 SYNC_LOCK_RELEASE_2 (short, 2, 1)
335 SYNC_LOCK_RELEASE_2 (signed char, 1, 0)
336
337 void HIDDEN
338 __sync_lock_release_4 (int *ptr)
339 {
340 int failure, oldval;
341
342 do {
343 oldval = *ptr;
344 failure = __kernel_cmpxchg (oldval, 0, ptr);
345 } while (failure != 0);
346 }