]>
git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/config/pa/linux-atomic.c
1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008-2014 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
31 /* All PA-RISC implementations supported by linux have strongly
32 ordered loads and stores. Only cache flushes and purges can be
33 delayed. The data cache implementations are all globally
34 coherent. Thus, there is no need to synchonize memory accesses.
36 GCC automatically issues a asm memory barrier when it encounters
37 a __sync_synchronize builtin. Thus, we do not need to define this
40 We implement byte, short and int versions of each atomic operation
41 using the kernel helper defined below. There is no support for
42 64-bit operations yet. */
44 /* A privileged instruction to crash a userspace program with SIGILL. */
45 #define ABORT_INSTRUCTION asm ("iitlbp %r0,(%sr0, %r0)")
47 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
48 #define LWS_CAS (sizeof(long) == 4 ? 0 : 1)
50 /* Kernel helper for compare-and-exchange a 32-bit value. */
52 __kernel_cmpxchg (int oldval
, int newval
, int *mem
)
54 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
55 register long lws_ret
asm("r28");
56 register long lws_errno
asm("r21");
57 register int lws_old
asm("r25") = oldval
;
58 register int lws_new
asm("r24") = newval
;
59 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
61 : "=r" (lws_ret
), "=r" (lws_errno
), "=r" (lws_mem
),
62 "=r" (lws_old
), "=r" (lws_new
)
63 : "i" (LWS_CAS
), "2" (lws_mem
), "3" (lws_old
), "4" (lws_new
)
64 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
66 if (__builtin_expect (lws_errno
== -EFAULT
|| lws_errno
== -ENOSYS
, 0))
69 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
70 the old value from memory. If this value is equal to OLDVAL, the
71 new value was written to memory. If not, return -EBUSY. */
72 if (!lws_errno
&& lws_ret
!= oldval
)
79 __kernel_cmpxchg2 (void * oldval
, void * newval
, void *mem
, int val_size
)
81 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
82 register long lws_ret
asm("r28");
83 register long lws_errno
asm("r21");
84 register unsigned long lws_old
asm("r25") = (unsigned long) oldval
;
85 register unsigned long lws_new
asm("r24") = (unsigned long) newval
;
86 register int lws_size
asm("r23") = val_size
;
87 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
89 : "=r" (lws_ret
), "=r" (lws_errno
)
90 : "i" (2), "r" (lws_mem
), "r" (lws_old
), "r" (lws_new
), "r" (lws_size
)
91 : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
93 if (__builtin_expect (lws_errno
== -EFAULT
|| lws_errno
== -ENOSYS
, 0))
96 /* If the kernel LWS call fails, retrun EBUSY */
97 if (!lws_errno
&& lws_ret
)
102 #define HIDDEN __attribute__ ((visibility ("hidden")))
104 /* Big endian masks */
105 #define INVERT_MASK_1 24
106 #define INVERT_MASK_2 16
109 #define MASK_2 0xffffu
111 #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
113 __sync_fetch_and_##OP##_##WIDTH (TYPE *ptr, TYPE val) \
120 newval = PFX_OP (tmp INF_OP val); \
121 failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \
122 } while (failure != 0); \
127 FETCH_AND_OP_2 (add
, , +, long long, 8, 3)
128 FETCH_AND_OP_2 (sub
, , -, long long, 8, 3)
129 FETCH_AND_OP_2 (or, , |, long long, 8, 3)
130 FETCH_AND_OP_2 (and, , &, long long, 8, 3)
131 FETCH_AND_OP_2 (xor, , ^, long long, 8, 3)
132 FETCH_AND_OP_2 (nand
, ~, &, long long, 8, 3)
134 FETCH_AND_OP_2 (add
, , +, short, 2, 1)
135 FETCH_AND_OP_2 (sub
, , -, short, 2, 1)
136 FETCH_AND_OP_2 (or, , |, short, 2, 1)
137 FETCH_AND_OP_2 (and, , &, short, 2, 1)
138 FETCH_AND_OP_2 (xor, , ^, short, 2, 1)
139 FETCH_AND_OP_2 (nand
, ~, &, short, 2, 1)
141 FETCH_AND_OP_2 (add
, , +, signed char, 1, 0)
142 FETCH_AND_OP_2 (sub
, , -, signed char, 1, 0)
143 FETCH_AND_OP_2 (or, , |, signed char, 1, 0)
144 FETCH_AND_OP_2 (and, , &, signed char, 1, 0)
145 FETCH_AND_OP_2 (xor, , ^, signed char, 1, 0)
146 FETCH_AND_OP_2 (nand
, ~, &, signed char, 1, 0)
148 #define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
150 __sync_##OP##_and_fetch_##WIDTH (TYPE *ptr, TYPE val) \
157 newval = PFX_OP (tmp INF_OP val); \
158 failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \
159 } while (failure != 0); \
161 return PFX_OP (tmp INF_OP val); \
164 OP_AND_FETCH_2 (add
, , +, long long, 8, 3)
165 OP_AND_FETCH_2 (sub
, , -, long long, 8, 3)
166 OP_AND_FETCH_2 (or, , |, long long, 8, 3)
167 OP_AND_FETCH_2 (and, , &, long long, 8, 3)
168 OP_AND_FETCH_2 (xor, , ^, long long, 8, 3)
169 OP_AND_FETCH_2 (nand
, ~, &, long long, 8, 3)
171 OP_AND_FETCH_2 (add
, , +, short, 2, 1)
172 OP_AND_FETCH_2 (sub
, , -, short, 2, 1)
173 OP_AND_FETCH_2 (or, , |, short, 2, 1)
174 OP_AND_FETCH_2 (and, , &, short, 2, 1)
175 OP_AND_FETCH_2 (xor, , ^, short, 2, 1)
176 OP_AND_FETCH_2 (nand
, ~, &, short, 2, 1)
178 OP_AND_FETCH_2 (add
, , +, signed char, 1, 0)
179 OP_AND_FETCH_2 (sub
, , -, signed char, 1, 0)
180 OP_AND_FETCH_2 (or, , |, signed char, 1, 0)
181 OP_AND_FETCH_2 (and, , &, signed char, 1, 0)
182 OP_AND_FETCH_2 (xor, , ^, signed char, 1, 0)
183 OP_AND_FETCH_2 (nand
, ~, &, signed char, 1, 0)
185 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
187 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
193 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
194 } while (failure != 0); \
199 FETCH_AND_OP_WORD (add
, , +)
200 FETCH_AND_OP_WORD (sub
, , -)
201 FETCH_AND_OP_WORD (or, , |)
202 FETCH_AND_OP_WORD (and, , &)
203 FETCH_AND_OP_WORD (xor, , ^)
204 FETCH_AND_OP_WORD (nand
, ~, &)
206 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
208 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
214 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
215 } while (failure != 0); \
217 return PFX_OP (tmp INF_OP val); \
220 OP_AND_FETCH_WORD (add
, , +)
221 OP_AND_FETCH_WORD (sub
, , -)
222 OP_AND_FETCH_WORD (or, , |)
223 OP_AND_FETCH_WORD (and, , &)
224 OP_AND_FETCH_WORD (xor, , ^)
225 OP_AND_FETCH_WORD (nand
, ~, &)
227 typedef unsigned char bool;
229 #define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
231 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
234 TYPE actual_oldval; \
239 actual_oldval = *ptr; \
241 if (__builtin_expect (oldval != actual_oldval, 0)) \
242 return actual_oldval; \
244 fail = __kernel_cmpxchg2 (&actual_oldval, &newval, ptr, INDEX); \
246 if (__builtin_expect (!fail, 1)) \
247 return actual_oldval; \
252 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
255 int failure = __kernel_cmpxchg2 (&oldval, &newval, ptr, INDEX); \
256 return (failure != 0); \
259 COMPARE_AND_SWAP_2 (long long, 8, 3)
260 COMPARE_AND_SWAP_2 (short, 2, 1)
261 COMPARE_AND_SWAP_2 (char, 1, 0)
264 __sync_val_compare_and_swap_4 (int *ptr
, int oldval
, int newval
)
266 int actual_oldval
, fail
;
270 actual_oldval
= *ptr
;
272 if (__builtin_expect (oldval
!= actual_oldval
, 0))
273 return actual_oldval
;
275 fail
= __kernel_cmpxchg (actual_oldval
, newval
, ptr
);
277 if (__builtin_expect (!fail
, 1))
278 return actual_oldval
;
283 __sync_bool_compare_and_swap_4 (int *ptr
, int oldval
, int newval
)
285 int failure
= __kernel_cmpxchg (oldval
, newval
, ptr
);
286 return (failure
== 0);
289 #define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
291 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
298 failure = __kernel_cmpxchg2 (&oldval, &val, ptr, INDEX); \
299 } while (failure != 0); \
304 SYNC_LOCK_TEST_AND_SET_2 (long long, 8, 3)
305 SYNC_LOCK_TEST_AND_SET_2 (short, 2, 1)
306 SYNC_LOCK_TEST_AND_SET_2 (signed char, 1, 0)
309 __sync_lock_test_and_set_4 (int *ptr
, int val
)
315 failure
= __kernel_cmpxchg (oldval
, val
, ptr
);
316 } while (failure
!= 0);
321 #define SYNC_LOCK_RELEASE_2(TYPE, WIDTH, INDEX) \
323 __sync_lock_release_##WIDTH (TYPE *ptr) \
325 TYPE failure, oldval, zero = 0; \
329 failure = __kernel_cmpxchg2 (&oldval, &zero, ptr, INDEX); \
330 } while (failure != 0); \
333 SYNC_LOCK_RELEASE_2 (long long, 8, 3)
334 SYNC_LOCK_RELEASE_2 (short, 2, 1)
335 SYNC_LOCK_RELEASE_2 (signed char, 1, 0)
338 __sync_lock_release_4 (int *ptr
)
344 failure
= __kernel_cmpxchg (oldval
, 0, ptr
);
345 } while (failure
!= 0);