]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/config/m68k/linux-atomic.c
Update copyright years.
[thirdparty/gcc.git] / libgcc / config / m68k / linux-atomic.c
1 /* Linux-specific atomic operations for m68k Linux.
2 Copyright (C) 2011-2017 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26 /* Coldfire dropped the CAS instruction from the base M68K ISA.
27
28 GCC automatically issues a asm memory barrier when it encounters
29 a __sync_synchronize builtin. Thus, we do not need to define this
30 builtin.
31
32 We implement byte, short and int versions of each atomic operation
33 using the kernel helper defined below. There is no support for
34 64-bit operations yet. */
35
36 #include <stdbool.h>
37
38 #ifndef __NR_atomic_cmpxchg_32
39 #define __NR_atomic_cmpxchg_32 335
40 #endif
41
42 /* Kernel helper for compare-and-exchange a 32-bit value. */
43 static inline unsigned
44 __kernel_cmpxchg (unsigned *mem, unsigned oldval, unsigned newval)
45 {
46 register unsigned *a0 asm("a0") = mem;
47 register unsigned d2 asm("d2") = oldval;
48 register unsigned d1 asm("d1") = newval;
49 register unsigned d0 asm("d0") = __NR_atomic_cmpxchg_32;
50
51 asm volatile ("trap #0"
52 : "=r"(d0), "=r"(d1), "=r"(a0)
53 : "r"(d0), "r"(d1), "r"(d2), "r"(a0)
54 : "memory", "a1");
55
56 return d0;
57 }
58
59 #define HIDDEN __attribute__ ((visibility ("hidden")))
60
61 /* Big endian masks */
62 #define INVERT_MASK_1 24
63 #define INVERT_MASK_2 16
64
65 #define MASK_1 0xffu
66 #define MASK_2 0xffffu
67
68 #define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
69 #define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
70
71 #define WORD_SYNC_OP(OP, PFX_OP, INF_OP, RETURN) \
72 unsigned HIDDEN \
73 NAME##_##RETURN (OP, 4) (unsigned *ptr, unsigned val) \
74 { \
75 unsigned oldval, newval, cmpval = *ptr; \
76 \
77 do { \
78 oldval = cmpval; \
79 newval = PFX_OP (oldval INF_OP val); \
80 cmpval = __kernel_cmpxchg (ptr, oldval, newval); \
81 } while (__builtin_expect (oldval != cmpval, 0)); \
82 \
83 return RETURN; \
84 }
85
86 #define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
87 TYPE HIDDEN \
88 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE sval) \
89 { \
90 unsigned *wordptr = (unsigned *) ((unsigned long) ptr & ~3); \
91 unsigned int mask, shift, oldval, newval, cmpval, wval; \
92 \
93 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
94 mask = MASK_##WIDTH << shift; \
95 wval = (sval & MASK_##WIDTH) << shift; \
96 \
97 cmpval = *wordptr; \
98 do { \
99 oldval = cmpval; \
100 newval = PFX_OP (oldval INF_OP wval); \
101 newval = (newval & mask) | (oldval & ~mask); \
102 cmpval = __kernel_cmpxchg (wordptr, oldval, newval); \
103 } while (__builtin_expect (oldval != cmpval, 0)); \
104 \
105 return (RETURN >> shift) & MASK_##WIDTH; \
106 }
107
108 WORD_SYNC_OP (add, , +, oldval)
109 WORD_SYNC_OP (sub, , -, oldval)
110 WORD_SYNC_OP (or, , |, oldval)
111 WORD_SYNC_OP (and, , &, oldval)
112 WORD_SYNC_OP (xor, , ^, oldval)
113 WORD_SYNC_OP (nand, ~, &, oldval)
114
115 SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval)
116 SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval)
117 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval)
118 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval)
119 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval)
120 SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval)
121
122 SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval)
123 SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval)
124 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval)
125 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval)
126 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval)
127 SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval)
128
129 WORD_SYNC_OP (add, , +, newval)
130 WORD_SYNC_OP (sub, , -, newval)
131 WORD_SYNC_OP (or, , |, newval)
132 WORD_SYNC_OP (and, , &, newval)
133 WORD_SYNC_OP (xor, , ^, newval)
134 WORD_SYNC_OP (nand, ~, &, newval)
135
136 SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval)
137 SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval)
138 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval)
139 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval)
140 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval)
141 SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval)
142
143 SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval)
144 SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval)
145 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval)
146 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval)
147 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval)
148 SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval)
149
150 unsigned HIDDEN
151 __sync_val_compare_and_swap_4 (unsigned *ptr, unsigned oldval, unsigned newval)
152 {
153 return __kernel_cmpxchg (ptr, oldval, newval);
154 }
155
156 bool HIDDEN
157 __sync_bool_compare_and_swap_4 (unsigned *ptr, unsigned oldval,
158 unsigned newval)
159 {
160 return __kernel_cmpxchg (ptr, oldval, newval) == oldval;
161 }
162
163 #define SUBWORD_VAL_CAS(TYPE, WIDTH) \
164 TYPE HIDDEN \
165 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE soldval, \
166 TYPE snewval) \
167 { \
168 unsigned *wordptr = (unsigned *)((unsigned long) ptr & ~3); \
169 unsigned int mask, shift, woldval, wnewval; \
170 unsigned oldval, newval, cmpval; \
171 \
172 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
173 mask = MASK_##WIDTH << shift; \
174 woldval = (soldval & MASK_##WIDTH) << shift; \
175 wnewval = (snewval & MASK_##WIDTH) << shift; \
176 cmpval = *wordptr; \
177 \
178 do { \
179 oldval = cmpval; \
180 if ((oldval & mask) != woldval) \
181 break; \
182 newval = (oldval & ~mask) | wnewval; \
183 cmpval = __kernel_cmpxchg (wordptr, oldval, newval); \
184 } while (__builtin_expect (oldval != cmpval, 0)); \
185 \
186 return (oldval >> shift) & MASK_##WIDTH; \
187 }
188
189 SUBWORD_VAL_CAS (unsigned short, 2)
190 SUBWORD_VAL_CAS (unsigned char, 1)
191
192 #define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
193 bool HIDDEN \
194 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
195 TYPE newval) \
196 { \
197 return (__sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval) \
198 == oldval); \
199 }
200
201 SUBWORD_BOOL_CAS (unsigned short, 2)
202 SUBWORD_BOOL_CAS (unsigned char, 1)
203
204 #undef NAME_oldval
205 #define NAME_oldval(OP, WIDTH) __sync_lock_##OP##_##WIDTH
206 #define COMMA ,
207
208 WORD_SYNC_OP (test_and_set, , COMMA, oldval)
209 SUBWORD_SYNC_OP (test_and_set, , COMMA, unsigned char, 1, oldval)
210 SUBWORD_SYNC_OP (test_and_set, , COMMA, unsigned short, 2, oldval)