]>
git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/config/pa/linux-atomic.c
1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008-2021 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
31 /* PA-RISC 2.0 supports out-of-order execution for loads and stores.
32 Thus, we need to synchonize memory accesses. For more info, see:
33 "Advanced Performance Features of the 64-bit PA-8000" by Doug Hunt.
35 We implement byte, short and int versions of each atomic operation
36 using the kernel helper defined below. There is no support for
37 64-bit operations yet. */
39 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
40 #define LWS_CAS (sizeof(long) == 4 ? 0 : 1)
42 /* Kernel helper for compare-and-exchange a 32-bit value. */
44 __kernel_cmpxchg (volatile void *mem
, int oldval
, int newval
)
46 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
47 register int lws_old
asm("r25") = oldval
;
48 register int lws_new
asm("r24") = newval
;
49 register long lws_ret
asm("r28");
50 register long lws_errno
asm("r21");
51 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
53 : "=r" (lws_ret
), "=r" (lws_errno
)
54 : "i" (LWS_CAS
), "r" (lws_mem
), "r" (lws_old
), "r" (lws_new
)
55 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
58 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
59 the old value from memory. If this value is equal to OLDVAL, the
60 new value was written to memory. If not, return -EBUSY. */
61 if (!lws_errno
&& lws_ret
!= oldval
)
68 __kernel_cmpxchg2 (volatile void *mem
, const void *oldval
, const void *newval
,
71 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
72 register unsigned long lws_old
asm("r25") = (unsigned long) oldval
;
73 register unsigned long lws_new
asm("r24") = (unsigned long) newval
;
74 register int lws_size
asm("r23") = val_size
;
75 register long lws_ret
asm("r28");
76 register long lws_errno
asm("r21");
77 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
79 : "=r" (lws_ret
), "=r" (lws_errno
), "+r" (lws_mem
),
80 "+r" (lws_old
), "+r" (lws_new
), "+r" (lws_size
)
82 : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
85 /* If the kernel LWS call is successful, lws_ret contains 0. */
86 if (__builtin_expect (lws_ret
== 0, 1))
89 /* If the kernel LWS call fails with no error, return -EBUSY */
90 if (__builtin_expect (!lws_errno
, 0))
95 #define HIDDEN __attribute__ ((visibility ("hidden")))
97 /* Big endian masks */
98 #define INVERT_MASK_1 24
99 #define INVERT_MASK_2 16
102 #define MASK_2 0xffffu
104 #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
106 __sync_fetch_and_##OP##_##WIDTH (volatile void *ptr, TYPE val) \
112 tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
113 newval = PFX_OP (tmp INF_OP val); \
114 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
115 } while (failure != 0); \
120 FETCH_AND_OP_2 (add
, , +, long long unsigned int, 8, 3)
121 FETCH_AND_OP_2 (sub
, , -, long long unsigned int, 8, 3)
122 FETCH_AND_OP_2 (or, , |, long long unsigned int, 8, 3)
123 FETCH_AND_OP_2 (and, , &, long long unsigned int, 8, 3)
124 FETCH_AND_OP_2 (xor, , ^, long long unsigned int, 8, 3)
125 FETCH_AND_OP_2 (nand
, ~, &, long long unsigned int, 8, 3)
127 FETCH_AND_OP_2 (add
, , +, short unsigned int, 2, 1)
128 FETCH_AND_OP_2 (sub
, , -, short unsigned int, 2, 1)
129 FETCH_AND_OP_2 (or, , |, short unsigned int, 2, 1)
130 FETCH_AND_OP_2 (and, , &, short unsigned int, 2, 1)
131 FETCH_AND_OP_2 (xor, , ^, short unsigned int, 2, 1)
132 FETCH_AND_OP_2 (nand
, ~, &, short unsigned int, 2, 1)
134 FETCH_AND_OP_2 (add
, , +, unsigned char, 1, 0)
135 FETCH_AND_OP_2 (sub
, , -, unsigned char, 1, 0)
136 FETCH_AND_OP_2 (or, , |, unsigned char, 1, 0)
137 FETCH_AND_OP_2 (and, , &, unsigned char, 1, 0)
138 FETCH_AND_OP_2 (xor, , ^, unsigned char, 1, 0)
139 FETCH_AND_OP_2 (nand
, ~, &, unsigned char, 1, 0)
141 #define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
143 __sync_##OP##_and_fetch_##WIDTH (volatile void *ptr, TYPE val) \
149 tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
150 newval = PFX_OP (tmp INF_OP val); \
151 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
152 } while (failure != 0); \
154 return PFX_OP (tmp INF_OP val); \
157 OP_AND_FETCH_2 (add
, , +, long long unsigned int, 8, 3)
158 OP_AND_FETCH_2 (sub
, , -, long long unsigned int, 8, 3)
159 OP_AND_FETCH_2 (or, , |, long long unsigned int, 8, 3)
160 OP_AND_FETCH_2 (and, , &, long long unsigned int, 8, 3)
161 OP_AND_FETCH_2 (xor, , ^, long long unsigned int, 8, 3)
162 OP_AND_FETCH_2 (nand
, ~, &, long long unsigned int, 8, 3)
164 OP_AND_FETCH_2 (add
, , +, short unsigned int, 2, 1)
165 OP_AND_FETCH_2 (sub
, , -, short unsigned int, 2, 1)
166 OP_AND_FETCH_2 (or, , |, short unsigned int, 2, 1)
167 OP_AND_FETCH_2 (and, , &, short unsigned int, 2, 1)
168 OP_AND_FETCH_2 (xor, , ^, short unsigned int, 2, 1)
169 OP_AND_FETCH_2 (nand
, ~, &, short unsigned int, 2, 1)
171 OP_AND_FETCH_2 (add
, , +, unsigned char, 1, 0)
172 OP_AND_FETCH_2 (sub
, , -, unsigned char, 1, 0)
173 OP_AND_FETCH_2 (or, , |, unsigned char, 1, 0)
174 OP_AND_FETCH_2 (and, , &, unsigned char, 1, 0)
175 OP_AND_FETCH_2 (xor, , ^, unsigned char, 1, 0)
176 OP_AND_FETCH_2 (nand
, ~, &, unsigned char, 1, 0)
178 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
179 unsigned int HIDDEN \
180 __sync_fetch_and_##OP##_4 (volatile void *ptr, unsigned int val) \
186 tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
188 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
189 } while (failure != 0); \
194 FETCH_AND_OP_WORD (add
, , +)
195 FETCH_AND_OP_WORD (sub
, , -)
196 FETCH_AND_OP_WORD (or, , |)
197 FETCH_AND_OP_WORD (and, , &)
198 FETCH_AND_OP_WORD (xor, , ^)
199 FETCH_AND_OP_WORD (nand
, ~, &)
201 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
202 unsigned int HIDDEN \
203 __sync_##OP##_and_fetch_4 (volatile void *ptr, unsigned int val) \
209 tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
211 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
212 } while (failure != 0); \
214 return PFX_OP (tmp INF_OP val); \
217 OP_AND_FETCH_WORD (add
, , +)
218 OP_AND_FETCH_WORD (sub
, , -)
219 OP_AND_FETCH_WORD (or, , |)
220 OP_AND_FETCH_WORD (and, , &)
221 OP_AND_FETCH_WORD (xor, , ^)
222 OP_AND_FETCH_WORD (nand
, ~, &)
224 typedef unsigned char bool;
226 #define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
228 __sync_val_compare_and_swap_##WIDTH (volatile void *ptr, TYPE oldval, \
231 TYPE actual_oldval; \
236 actual_oldval = __atomic_load_n ((volatile TYPE *)ptr, \
239 if (__builtin_expect (oldval != actual_oldval, 0)) \
240 return actual_oldval; \
242 fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
244 if (__builtin_expect (!fail, 1)) \
245 return actual_oldval; \
250 __sync_bool_compare_and_swap_##WIDTH (volatile void *ptr, \
251 TYPE oldval, TYPE newval) \
253 long failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
254 return (failure == 0); \
257 COMPARE_AND_SWAP_2 (long long unsigned int, 8, 3)
258 COMPARE_AND_SWAP_2 (short unsigned int, 2, 1)
259 COMPARE_AND_SWAP_2 (unsigned char, 1, 0)
262 __sync_val_compare_and_swap_4 (volatile void *ptr
, unsigned int oldval
,
266 unsigned int actual_oldval
;
270 actual_oldval
= __atomic_load_n ((volatile unsigned int *)ptr
,
273 if (__builtin_expect (oldval
!= actual_oldval
, 0))
274 return actual_oldval
;
276 fail
= __kernel_cmpxchg (ptr
, actual_oldval
, newval
);
278 if (__builtin_expect (!fail
, 1))
279 return actual_oldval
;
284 __sync_bool_compare_and_swap_4 (volatile void *ptr
, unsigned int oldval
,
287 long failure
= __kernel_cmpxchg (ptr
, oldval
, newval
);
288 return (failure
== 0);
291 #define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
293 __sync_lock_test_and_set_##WIDTH (volatile void *ptr, TYPE val) \
299 oldval = __atomic_load_n ((volatile TYPE *)ptr, \
301 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
302 } while (failure != 0); \
307 SYNC_LOCK_TEST_AND_SET_2 (long long unsigned int, 8, 3)
308 SYNC_LOCK_TEST_AND_SET_2 (short unsigned int, 2, 1)
309 SYNC_LOCK_TEST_AND_SET_2 (unsigned char, 1, 0)
312 __sync_lock_test_and_set_4 (volatile void *ptr
, unsigned int val
)
318 oldval
= __atomic_load_n ((volatile unsigned int *)ptr
, __ATOMIC_RELAXED
);
319 failure
= __kernel_cmpxchg (ptr
, oldval
, val
);
320 } while (failure
!= 0);
325 #define SYNC_LOCK_RELEASE_1(TYPE, WIDTH, INDEX) \
327 __sync_lock_release_##WIDTH (volatile void *ptr) \
329 TYPE oldval, val = 0; \
333 oldval = __atomic_load_n ((volatile TYPE *)ptr, \
335 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
336 } while (failure != 0); \
339 SYNC_LOCK_RELEASE_1 (long long unsigned int, 8, 3)
340 SYNC_LOCK_RELEASE_1 (short unsigned int, 2, 1)
341 SYNC_LOCK_RELEASE_1 (unsigned char, 1, 0)
344 __sync_lock_release_4 (volatile void *ptr
)
350 oldval
= __atomic_load_n ((volatile unsigned int *)ptr
, __ATOMIC_RELAXED
);
351 failure
= __kernel_cmpxchg (ptr
, oldval
, 0);
352 } while (failure
!= 0);