]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/arm64/include/asm/cmpxchg.h
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[thirdparty/linux.git] / arch / arm64 / include / asm / cmpxchg.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
10b663ae
CM
2/*
3 * Based on arch/arm/include/asm/cmpxchg.h
4 *
5 * Copyright (C) 2012 ARM Ltd.
10b663ae
CM
6 */
7#ifndef __ASM_CMPXCHG_H
8#define __ASM_CMPXCHG_H
9
e8a2d040 10#include <linux/build_bug.h>
2a58fca9 11#include <linux/compiler.h>
10b663ae 12
c342f782 13#include <asm/atomic.h>
10b663ae 14#include <asm/barrier.h>
c8366ba0 15#include <asm/lse.h>
10b663ae 16
305d454a
WD
17/*
18 * We need separate acquire parameters for ll/sc and lse, since the full
19 * barrier case is generated as release+dmb for the former and
20 * acquire+release for the latter.
21 */
5ef3fe4c
WD
22#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
23static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
24{ \
25 u##sz ret; \
26 unsigned long tmp; \
27 \
28 asm volatile(ARM64_LSE_ATOMIC_INSN( \
29 /* LL/SC */ \
30 " prfm pstl1strm, %2\n" \
31 "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
32 " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
33 " cbnz %w1, 1b\n" \
34 " " #mb, \
35 /* LSE atomics */ \
36 " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
37 __nops(3) \
38 " " #nop_lse) \
39 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
40 : "r" (x) \
41 : cl); \
42 \
43 return ret; \
10b663ae
CM
44}
45
5ef3fe4c
WD
46__XCHG_CASE(w, b, , 8, , , , , , )
47__XCHG_CASE(w, h, , 16, , , , , , )
48__XCHG_CASE(w, , , 32, , , , , , )
49__XCHG_CASE( , , , 64, , , , , , )
50__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory")
51__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory")
52__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory")
53__XCHG_CASE( , , acq_, 64, , , a, a, , "memory")
54__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory")
55__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory")
56__XCHG_CASE(w, , rel_, 32, , , , , l, "memory")
57__XCHG_CASE( , , rel_, 64, , , , , l, "memory")
58__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory")
59__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory")
60__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory")
61__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
305d454a
WD
62
63#undef __XCHG_CASE
64
65#define __XCHG_GEN(sfx) \
66static inline unsigned long __xchg##sfx(unsigned long x, \
67 volatile void *ptr, \
68 int size) \
69{ \
70 switch (size) { \
71 case 1: \
5ef3fe4c 72 return __xchg_case##sfx##_8(x, ptr); \
305d454a 73 case 2: \
5ef3fe4c 74 return __xchg_case##sfx##_16(x, ptr); \
305d454a 75 case 4: \
5ef3fe4c 76 return __xchg_case##sfx##_32(x, ptr); \
305d454a 77 case 8: \
5ef3fe4c 78 return __xchg_case##sfx##_64(x, ptr); \
305d454a
WD
79 default: \
80 BUILD_BUG(); \
81 } \
82 \
83 unreachable(); \
84}
85
86__XCHG_GEN()
87__XCHG_GEN(_acq)
88__XCHG_GEN(_rel)
89__XCHG_GEN(_mb)
90
91#undef __XCHG_GEN
92
93#define __xchg_wrapper(sfx, ptr, x) \
94({ \
95 __typeof__(*(ptr)) __ret; \
96 __ret = (__typeof__(*(ptr))) \
97 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
98 __ret; \
e1dfda9c 99})
10b663ae 100
305d454a 101/* xchg */
c0df1081
MR
102#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
103#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
104#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
105#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
305d454a
WD
106
107#define __CMPXCHG_GEN(sfx) \
108static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
109 unsigned long old, \
110 unsigned long new, \
111 int size) \
112{ \
113 switch (size) { \
114 case 1: \
b4f9209b 115 return __cmpxchg_case##sfx##_8(ptr, old, new); \
305d454a 116 case 2: \
b4f9209b 117 return __cmpxchg_case##sfx##_16(ptr, old, new); \
305d454a 118 case 4: \
5ef3fe4c 119 return __cmpxchg_case##sfx##_32(ptr, old, new); \
305d454a 120 case 8: \
5ef3fe4c 121 return __cmpxchg_case##sfx##_64(ptr, old, new); \
305d454a
WD
122 default: \
123 BUILD_BUG(); \
124 } \
125 \
126 unreachable(); \
10b663ae
CM
127}
128
305d454a
WD
129__CMPXCHG_GEN()
130__CMPXCHG_GEN(_acq)
131__CMPXCHG_GEN(_rel)
132__CMPXCHG_GEN(_mb)
10b663ae 133
305d454a 134#undef __CMPXCHG_GEN
60010e50 135
305d454a
WD
136#define __cmpxchg_wrapper(sfx, ptr, o, n) \
137({ \
138 __typeof__(*(ptr)) __ret; \
139 __ret = (__typeof__(*(ptr))) \
140 __cmpxchg##sfx((ptr), (unsigned long)(o), \
141 (unsigned long)(n), sizeof(*(ptr))); \
142 __ret; \
60010e50 143})
10b663ae 144
305d454a 145/* cmpxchg */
c0df1081
MR
146#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
147#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
148#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
149#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
150#define arch_cmpxchg_local arch_cmpxchg_relaxed
305d454a
WD
151
152/* cmpxchg64 */
c0df1081
MR
153#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
154#define arch_cmpxchg64_acquire arch_cmpxchg_acquire
155#define arch_cmpxchg64_release arch_cmpxchg_release
156#define arch_cmpxchg64 arch_cmpxchg
157#define arch_cmpxchg64_local arch_cmpxchg_local
305d454a
WD
158
159/* cmpxchg_double */
e9a4b795
WD
160#define system_has_cmpxchg_double() 1
161
162#define __cmpxchg_double_check(ptr1, ptr2) \
163({ \
164 if (sizeof(*(ptr1)) != 8) \
165 BUILD_BUG(); \
166 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
167})
168
c0df1081
MR
169#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
170({ \
171 int __ret; \
172 __cmpxchg_double_check(ptr1, ptr2); \
173 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
174 (unsigned long)(n1), (unsigned long)(n2), \
175 ptr1); \
176 __ret; \
5284e1b4
SC
177})
178
c0df1081
MR
179#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
180({ \
181 int __ret; \
182 __cmpxchg_double_check(ptr1, ptr2); \
183 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
184 (unsigned long)(n1), (unsigned long)(n2), \
185 ptr1); \
186 __ret; \
5284e1b4
SC
187})
188
5ef3fe4c
WD
189#define __CMPWAIT_CASE(w, sfx, sz) \
190static inline void __cmpwait_case_##sz(volatile void *ptr, \
191 unsigned long val) \
03e3c2b7
WD
192{ \
193 unsigned long tmp; \
194 \
195 asm volatile( \
1cfc63b5
WD
196 " sevl\n" \
197 " wfe\n" \
5ef3fe4c 198 " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
03e3c2b7
WD
199 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
200 " cbnz %" #w "[tmp], 1f\n" \
201 " wfe\n" \
202 "1:" \
203 : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \
204 : [val] "r" (val)); \
205}
206
5ef3fe4c
WD
207__CMPWAIT_CASE(w, b, 8);
208__CMPWAIT_CASE(w, h, 16);
209__CMPWAIT_CASE(w, , 32);
210__CMPWAIT_CASE( , , 64);
03e3c2b7
WD
211
212#undef __CMPWAIT_CASE
213
214#define __CMPWAIT_GEN(sfx) \
215static inline void __cmpwait##sfx(volatile void *ptr, \
216 unsigned long val, \
217 int size) \
218{ \
219 switch (size) { \
220 case 1: \
5ef3fe4c 221 return __cmpwait_case##sfx##_8(ptr, (u8)val); \
03e3c2b7 222 case 2: \
5ef3fe4c 223 return __cmpwait_case##sfx##_16(ptr, (u16)val); \
03e3c2b7 224 case 4: \
5ef3fe4c 225 return __cmpwait_case##sfx##_32(ptr, val); \
03e3c2b7 226 case 8: \
5ef3fe4c 227 return __cmpwait_case##sfx##_64(ptr, val); \
03e3c2b7
WD
228 default: \
229 BUILD_BUG(); \
230 } \
231 \
232 unreachable(); \
233}
234
235__CMPWAIT_GEN()
236
237#undef __CMPWAIT_GEN
238
239#define __cmpwait_relaxed(ptr, val) \
240 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
241
10b663ae 242#endif /* __ASM_CMPXCHG_H */