]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/config/pa/linux-atomic.c
Update copyright years in libgcc/
[thirdparty/gcc.git] / libgcc / config / pa / linux-atomic.c
1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008-2014 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
21
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
26
27 #define EFAULT 14
28 #define EBUSY 16
29 #define ENOSYS 251
30
31 /* All PA-RISC implementations supported by linux have strongly
32 ordered loads and stores. Only cache flushes and purges can be
33 delayed. The data cache implementations are all globally
34 coherent. Thus, there is no need to synchonize memory accesses.
35
36 GCC automatically issues a asm memory barrier when it encounters
37 a __sync_synchronize builtin. Thus, we do not need to define this
38 builtin.
39
40 We implement byte, short and int versions of each atomic operation
41 using the kernel helper defined below. There is no support for
42 64-bit operations yet. */
43
44 /* A privileged instruction to crash a userspace program with SIGILL. */
45 #define ABORT_INSTRUCTION asm ("iitlbp %r0,(%sr0, %r0)")
46
47 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
48 #define LWS_CAS (sizeof(unsigned long) == 4 ? 0 : 1)
49
50 /* Kernel helper for compare-and-exchange a 32-bit value. */
51 static inline long
52 __kernel_cmpxchg (int oldval, int newval, int *mem)
53 {
54 register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
55 register long lws_ret asm("r28");
56 register long lws_errno asm("r21");
57 register int lws_old asm("r25") = oldval;
58 register int lws_new asm("r24") = newval;
59 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
60 "ldi %5, %%r20 \n\t"
61 : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem),
62 "=r" (lws_old), "=r" (lws_new)
63 : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new)
64 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
65 );
66 if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
67 ABORT_INSTRUCTION;
68
69 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
70 the old value from memory. If this value is equal to OLDVAL, the
71 new value was written to memory. If not, return -EBUSY. */
72 if (!lws_errno && lws_ret != oldval)
73 lws_errno = -EBUSY;
74
75 return lws_errno;
76 }
77
78 #define HIDDEN __attribute__ ((visibility ("hidden")))
79
80 /* Big endian masks */
81 #define INVERT_MASK_1 24
82 #define INVERT_MASK_2 16
83
84 #define MASK_1 0xffu
85 #define MASK_2 0xffffu
86
87 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
88 int HIDDEN \
89 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
90 { \
91 int failure, tmp; \
92 \
93 do { \
94 tmp = *ptr; \
95 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
96 } while (failure != 0); \
97 \
98 return tmp; \
99 }
100
101 FETCH_AND_OP_WORD (add, , +)
102 FETCH_AND_OP_WORD (sub, , -)
103 FETCH_AND_OP_WORD (or, , |)
104 FETCH_AND_OP_WORD (and, , &)
105 FETCH_AND_OP_WORD (xor, , ^)
106 FETCH_AND_OP_WORD (nand, ~, &)
107
108 #define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
109 #define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
110
111 /* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
112 subword-sized quantities. */
113
114 #define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
115 TYPE HIDDEN \
116 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
117 { \
118 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
119 unsigned int mask, shift, oldval, newval; \
120 int failure; \
121 \
122 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
123 mask = MASK_##WIDTH << shift; \
124 \
125 do { \
126 oldval = *wordptr; \
127 newval = ((PFX_OP (((oldval & mask) >> shift) \
128 INF_OP (unsigned int) val)) << shift) & mask; \
129 newval |= oldval & ~mask; \
130 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
131 } while (failure != 0); \
132 \
133 return (RETURN & mask) >> shift; \
134 }
135
136 SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval)
137 SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval)
138 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval)
139 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval)
140 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval)
141 SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval)
142
143 SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval)
144 SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval)
145 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval)
146 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval)
147 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval)
148 SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval)
149
150 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
151 int HIDDEN \
152 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
153 { \
154 int tmp, failure; \
155 \
156 do { \
157 tmp = *ptr; \
158 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
159 } while (failure != 0); \
160 \
161 return PFX_OP (tmp INF_OP val); \
162 }
163
164 OP_AND_FETCH_WORD (add, , +)
165 OP_AND_FETCH_WORD (sub, , -)
166 OP_AND_FETCH_WORD (or, , |)
167 OP_AND_FETCH_WORD (and, , &)
168 OP_AND_FETCH_WORD (xor, , ^)
169 OP_AND_FETCH_WORD (nand, ~, &)
170
171 SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval)
172 SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval)
173 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval)
174 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval)
175 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval)
176 SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval)
177
178 SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval)
179 SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval)
180 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval)
181 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval)
182 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval)
183 SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval)
184
185 int HIDDEN
186 __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
187 {
188 int actual_oldval, fail;
189
190 while (1)
191 {
192 actual_oldval = *ptr;
193
194 if (__builtin_expect (oldval != actual_oldval, 0))
195 return actual_oldval;
196
197 fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
198
199 if (__builtin_expect (!fail, 1))
200 return actual_oldval;
201 }
202 }
203
204 #define SUBWORD_VAL_CAS(TYPE, WIDTH) \
205 TYPE HIDDEN \
206 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
207 TYPE newval) \
208 { \
209 int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \
210 unsigned int mask, shift, actual_oldval, actual_newval; \
211 \
212 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
213 mask = MASK_##WIDTH << shift; \
214 \
215 while (1) \
216 { \
217 actual_oldval = *wordptr; \
218 \
219 if (__builtin_expect (((actual_oldval & mask) >> shift) \
220 != (unsigned int) oldval, 0)) \
221 return (actual_oldval & mask) >> shift; \
222 \
223 actual_newval = (actual_oldval & ~mask) \
224 | (((unsigned int) newval << shift) & mask); \
225 \
226 fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
227 wordptr); \
228 \
229 if (__builtin_expect (!fail, 1)) \
230 return (actual_oldval & mask) >> shift; \
231 } \
232 }
233
234 SUBWORD_VAL_CAS (unsigned short, 2)
235 SUBWORD_VAL_CAS (unsigned char, 1)
236
237 typedef unsigned char bool;
238
239 bool HIDDEN
240 __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
241 {
242 int failure = __kernel_cmpxchg (oldval, newval, ptr);
243 return (failure == 0);
244 }
245
246 #define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
247 bool HIDDEN \
248 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
249 TYPE newval) \
250 { \
251 TYPE actual_oldval \
252 = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
253 return (oldval == actual_oldval); \
254 }
255
256 SUBWORD_BOOL_CAS (unsigned short, 2)
257 SUBWORD_BOOL_CAS (unsigned char, 1)
258
259 int HIDDEN
260 __sync_lock_test_and_set_4 (int *ptr, int val)
261 {
262 int failure, oldval;
263
264 do {
265 oldval = *ptr;
266 failure = __kernel_cmpxchg (oldval, val, ptr);
267 } while (failure != 0);
268
269 return oldval;
270 }
271
272 #define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
273 TYPE HIDDEN \
274 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
275 { \
276 int failure; \
277 unsigned int oldval, newval, shift, mask; \
278 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
279 \
280 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
281 mask = MASK_##WIDTH << shift; \
282 \
283 do { \
284 oldval = *wordptr; \
285 newval = (oldval & ~mask) \
286 | (((unsigned int) val << shift) & mask); \
287 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
288 } while (failure != 0); \
289 \
290 return (oldval & mask) >> shift; \
291 }
292
293 SUBWORD_TEST_AND_SET (unsigned short, 2)
294 SUBWORD_TEST_AND_SET (unsigned char, 1)
295
296 #define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
297 void HIDDEN \
298 __sync_lock_release_##WIDTH (TYPE *ptr) \
299 { \
300 *ptr = 0; \
301 }
302
303 SYNC_LOCK_RELEASE (int, 4)
304 SYNC_LOCK_RELEASE (short, 2)
305 SYNC_LOCK_RELEASE (char, 1)