]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/config/nds32/linux-atomic.c
Update copyright years.
[thirdparty/gcc.git] / libgcc / config / nds32 / linux-atomic.c
CommitLineData
cf3cd43d 1/* Linux-specific atomic operations for NDS32 Linux.
8d9254fc 2 Copyright (C) 2012-2020 Free Software Foundation, Inc.
cf3cd43d
CJW
3
4This file is free software; you can redistribute it and/or modify it
5under the terms of the GNU General Public License as published by the
6Free Software Foundation; either version 3, or (at your option) any
7later version.
8
9This file is distributed in the hope that it will be useful, but
10WITHOUT ANY WARRANTY; without even the implied warranty of
11MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12General Public License for more details.
13
14Under Section 7 of GPL version 3, you are granted additional
15permissions described in the GCC Runtime Library Exception, version
163.1, as published by the Free Software Foundation.
17
18You should have received a copy of the GNU General Public License and
19a copy of the GCC Runtime Library Exception along with this program;
20see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
21<http://www.gnu.org/licenses/>. */
22
23/* We implement byte, short and int versions of each atomic operation
24 using the kernel helper defined below. There is no support for
25 64-bit operations yet. */
26
27/* This function copy form NDS32 Linux-kernal. */
28static inline int
29__kernel_cmpxchg (int oldval, int newval, int *mem)
30{
31 int temp1, temp2, temp3, offset;
32
33 asm volatile ("msync\tall\n"
34 "movi\t%0, #0\n"
35 "1:\n"
36 "\tllw\t%1, [%4+%0]\n"
37 "\tsub\t%3, %1, %6\n"
38 "\tcmovz\t%2, %5, %3\n"
39 "\tcmovn\t%2, %1, %3\n"
40 "\tscw\t%2, [%4+%0]\n"
41 "\tbeqz\t%2, 1b\n"
42 : "=&r" (offset), "=&r" (temp3), "=&r" (temp2), "=&r" (temp1)
43 : "r" (mem), "r" (newval), "r" (oldval) : "memory");
44
45 return temp1;
46}
47
48#define HIDDEN __attribute__ ((visibility ("hidden")))
49
50#ifdef __NDS32_EL__
51#define INVERT_MASK_1 0
52#define INVERT_MASK_2 0
53#else
54#define INVERT_MASK_1 24
55#define INVERT_MASK_2 16
56#endif
57
58#define MASK_1 0xffu
59#define MASK_2 0xffffu
60
61#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
62 int HIDDEN \
63 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
64 { \
65 int failure, tmp; \
66 \
67 do { \
68 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
69 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
70 } while (failure != 0); \
71 \
72 return tmp; \
73 }
74
75FETCH_AND_OP_WORD (add, , +)
76FETCH_AND_OP_WORD (sub, , -)
77FETCH_AND_OP_WORD (or, , |)
78FETCH_AND_OP_WORD (and, , &)
79FETCH_AND_OP_WORD (xor, , ^)
80FETCH_AND_OP_WORD (nand, ~, &)
81
82#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
83#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
84
85/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
86 subword-sized quantities. */
87
88#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
89 TYPE HIDDEN \
90 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
91 { \
92 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
93 unsigned int mask, shift, oldval, newval; \
94 int failure; \
95 \
96 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
97 mask = MASK_##WIDTH << shift; \
98 \
99 do { \
100 oldval = __atomic_load_n (wordptr, __ATOMIC_SEQ_CST); \
101 newval = ((PFX_OP (((oldval & mask) >> shift) \
102 INF_OP (unsigned int) val)) << shift) & mask; \
103 newval |= oldval & ~mask; \
104 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
105 } while (failure != 0); \
106 \
107 return (RETURN & mask) >> shift; \
108 }
109
110
111SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval)
112SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval)
113SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval)
114SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval)
115SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval)
116SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval)
117
118SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval)
119SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval)
120SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval)
121SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval)
122SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval)
123SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval)
124
125#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
126 int HIDDEN \
127 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
128 { \
129 int tmp, failure; \
130 \
131 do { \
132 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
133 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
134 } while (failure != 0); \
135 \
136 return PFX_OP (tmp INF_OP val); \
137 }
138
139OP_AND_FETCH_WORD (add, , +)
140OP_AND_FETCH_WORD (sub, , -)
141OP_AND_FETCH_WORD (or, , |)
142OP_AND_FETCH_WORD (and, , &)
143OP_AND_FETCH_WORD (xor, , ^)
144OP_AND_FETCH_WORD (nand, ~, &)
145
146SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval)
147SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval)
148SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval)
149SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval)
150SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval)
151SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval)
152
153SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval)
154SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval)
155SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval)
156SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval)
157SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval)
158SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval)
159
160int HIDDEN
161__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
162{
163 int actual_oldval, fail;
164
165 while (1)
166 {
167 actual_oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
168
169 if (oldval != actual_oldval)
170 return actual_oldval;
171
172 fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
173
174 if (!fail)
175 return oldval;
176 }
177}
178
179#define SUBWORD_VAL_CAS(TYPE, WIDTH) \
180 TYPE HIDDEN \
181 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
182 TYPE newval) \
183 { \
184 int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \
185 unsigned int mask, shift, actual_oldval, actual_newval; \
186 \
187 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
188 mask = MASK_##WIDTH << shift; \
189 \
190 while (1) \
191 { \
192 actual_oldval = __atomic_load_n (wordptr, __ATOMIC_SEQ_CST); \
193 \
194 if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \
195 return (actual_oldval & mask) >> shift; \
196 \
197 actual_newval = (actual_oldval & ~mask) \
198 | (((unsigned int) newval << shift) & mask); \
199 \
200 fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
201 wordptr); \
202 \
203 if (!fail) \
204 return oldval; \
205 } \
206 }
207
208SUBWORD_VAL_CAS (unsigned short, 2)
209SUBWORD_VAL_CAS (unsigned char, 1)
210
211typedef unsigned char bool;
212
213bool HIDDEN
214__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
215{
216 int failure = __kernel_cmpxchg (oldval, newval, ptr);
217 return (failure == 0);
218}
219
220#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
221 bool HIDDEN \
222 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
223 TYPE newval) \
224 { \
225 TYPE actual_oldval \
226 = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
227 return (oldval == actual_oldval); \
228 }
229
230SUBWORD_BOOL_CAS (unsigned short, 2)
231SUBWORD_BOOL_CAS (unsigned char, 1)
232
233int HIDDEN
234__sync_lock_test_and_set_4 (int *ptr, int val)
235{
236 int failure, oldval;
237
238 do {
239 oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
240 failure = __kernel_cmpxchg (oldval, val, ptr);
241 } while (failure != 0);
242
243 return oldval;
244}
245
246#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
247 TYPE HIDDEN \
248 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
249 { \
250 int failure; \
251 unsigned int oldval, newval, shift, mask; \
252 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
253 \
254 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
255 mask = MASK_##WIDTH << shift; \
256 \
257 do { \
258 oldval = __atomic_load_n (wordptr, __ATOMIC_SEQ_CST); \
259 newval = (oldval & ~mask) \
260 | (((unsigned int) val << shift) & mask); \
261 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
262 } while (failure != 0); \
263 \
264 return (oldval & mask) >> shift; \
265 }
266
267SUBWORD_TEST_AND_SET (unsigned short, 2)
268SUBWORD_TEST_AND_SET (unsigned char, 1)
269
270#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
271 void HIDDEN \
272 __sync_lock_release_##WIDTH (TYPE *ptr) \
273 { \
274 /* All writes before this point must be seen before we release \
275 the lock itself. */ \
276 __builtin_nds32_msync_all (); \
277 *ptr = 0; \
278 }
279
280SYNC_LOCK_RELEASE (int, 4)
281SYNC_LOCK_RELEASE (short, 2)
282SYNC_LOCK_RELEASE (char, 1)