]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/config/m68k/linux-atomic.c
Update copyright years.
[thirdparty/gcc.git] / libgcc / config / m68k / linux-atomic.c
CommitLineData
8b281334 1/* Linux-specific atomic operations for m68k Linux.
83ffe9cd 2 Copyright (C) 2011-2023 Free Software Foundation, Inc.
8b281334
RH
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24<http://www.gnu.org/licenses/>. */
25
26/* Coldfire dropped the CAS instruction from the base M68K ISA.
27
28 GCC automatically issues a asm memory barrier when it encounters
29 a __sync_synchronize builtin. Thus, we do not need to define this
30 builtin.
31
32 We implement byte, short and int versions of each atomic operation
33 using the kernel helper defined below. There is no support for
34 64-bit operations yet. */
35
8b281334
RH
36#include <stdbool.h>
37
38#ifndef __NR_atomic_cmpxchg_32
39#define __NR_atomic_cmpxchg_32 335
40#endif
41
42/* Kernel helper for compare-and-exchange a 32-bit value. */
43static inline unsigned
44__kernel_cmpxchg (unsigned *mem, unsigned oldval, unsigned newval)
45{
46 register unsigned *a0 asm("a0") = mem;
47 register unsigned d2 asm("d2") = oldval;
48 register unsigned d1 asm("d1") = newval;
49 register unsigned d0 asm("d0") = __NR_atomic_cmpxchg_32;
50
51 asm volatile ("trap #0"
52 : "=r"(d0), "=r"(d1), "=r"(a0)
53 : "r"(d0), "r"(d1), "r"(d2), "r"(a0)
54 : "memory", "a1");
55
56 return d0;
57}
58
59#define HIDDEN __attribute__ ((visibility ("hidden")))
60
61/* Big endian masks */
62#define INVERT_MASK_1 24
63#define INVERT_MASK_2 16
64
65#define MASK_1 0xffu
66#define MASK_2 0xffffu
67
68#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
69#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
70
71#define WORD_SYNC_OP(OP, PFX_OP, INF_OP, RETURN) \
72 unsigned HIDDEN \
73 NAME##_##RETURN (OP, 4) (unsigned *ptr, unsigned val) \
74 { \
75 unsigned oldval, newval, cmpval = *ptr; \
76 \
77 do { \
78 oldval = cmpval; \
79 newval = PFX_OP (oldval INF_OP val); \
80 cmpval = __kernel_cmpxchg (ptr, oldval, newval); \
81 } while (__builtin_expect (oldval != cmpval, 0)); \
82 \
83 return RETURN; \
84 }
85
86#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
87 TYPE HIDDEN \
88 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE sval) \
89 { \
90 unsigned *wordptr = (unsigned *) ((unsigned long) ptr & ~3); \
91 unsigned int mask, shift, oldval, newval, cmpval, wval; \
92 \
93 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
94 mask = MASK_##WIDTH << shift; \
95 wval = (sval & MASK_##WIDTH) << shift; \
96 \
97 cmpval = *wordptr; \
98 do { \
99 oldval = cmpval; \
100 newval = PFX_OP (oldval INF_OP wval); \
101 newval = (newval & mask) | (oldval & ~mask); \
102 cmpval = __kernel_cmpxchg (wordptr, oldval, newval); \
103 } while (__builtin_expect (oldval != cmpval, 0)); \
104 \
105 return (RETURN >> shift) & MASK_##WIDTH; \
106 }
107
108WORD_SYNC_OP (add, , +, oldval)
109WORD_SYNC_OP (sub, , -, oldval)
110WORD_SYNC_OP (or, , |, oldval)
111WORD_SYNC_OP (and, , &, oldval)
112WORD_SYNC_OP (xor, , ^, oldval)
113WORD_SYNC_OP (nand, ~, &, oldval)
114
115SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval)
116SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval)
117SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval)
118SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval)
119SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval)
120SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval)
121
122SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval)
123SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval)
124SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval)
125SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval)
126SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval)
127SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval)
128
129WORD_SYNC_OP (add, , +, newval)
130WORD_SYNC_OP (sub, , -, newval)
131WORD_SYNC_OP (or, , |, newval)
132WORD_SYNC_OP (and, , &, newval)
133WORD_SYNC_OP (xor, , ^, newval)
134WORD_SYNC_OP (nand, ~, &, newval)
135
136SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval)
137SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval)
138SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval)
139SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval)
140SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval)
141SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval)
142
143SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval)
144SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval)
145SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval)
146SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval)
147SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval)
148SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval)
149
150unsigned HIDDEN
151__sync_val_compare_and_swap_4 (unsigned *ptr, unsigned oldval, unsigned newval)
152{
153 return __kernel_cmpxchg (ptr, oldval, newval);
154}
155
156bool HIDDEN
157__sync_bool_compare_and_swap_4 (unsigned *ptr, unsigned oldval,
158 unsigned newval)
159{
160 return __kernel_cmpxchg (ptr, oldval, newval) == oldval;
161}
162
163#define SUBWORD_VAL_CAS(TYPE, WIDTH) \
164 TYPE HIDDEN \
165 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE soldval, \
166 TYPE snewval) \
167 { \
168 unsigned *wordptr = (unsigned *)((unsigned long) ptr & ~3); \
169 unsigned int mask, shift, woldval, wnewval; \
170 unsigned oldval, newval, cmpval; \
171 \
172 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
173 mask = MASK_##WIDTH << shift; \
174 woldval = (soldval & MASK_##WIDTH) << shift; \
175 wnewval = (snewval & MASK_##WIDTH) << shift; \
176 cmpval = *wordptr; \
177 \
178 do { \
179 oldval = cmpval; \
180 if ((oldval & mask) != woldval) \
181 break; \
182 newval = (oldval & ~mask) | wnewval; \
183 cmpval = __kernel_cmpxchg (wordptr, oldval, newval); \
184 } while (__builtin_expect (oldval != cmpval, 0)); \
185 \
186 return (oldval >> shift) & MASK_##WIDTH; \
187 }
188
189SUBWORD_VAL_CAS (unsigned short, 2)
190SUBWORD_VAL_CAS (unsigned char, 1)
191
192#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
193 bool HIDDEN \
194 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
195 TYPE newval) \
196 { \
197 return (__sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval) \
198 == oldval); \
199 }
200
201SUBWORD_BOOL_CAS (unsigned short, 2)
202SUBWORD_BOOL_CAS (unsigned char, 1)
203
204#undef NAME_oldval
205#define NAME_oldval(OP, WIDTH) __sync_lock_##OP##_##WIDTH
206#define COMMA ,
207
208WORD_SYNC_OP (test_and_set, , COMMA, oldval)
5615a07d 209SUBWORD_SYNC_OP (test_and_set, , COMMA, unsigned char, 1, oldval)
8b281334 210SUBWORD_SYNC_OP (test_and_set, , COMMA, unsigned short, 2, oldval)