]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - arch/ia64/include/asm/spinlock.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / ia64 / include / asm / spinlock.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _ASM_IA64_SPINLOCK_H
3#define _ASM_IA64_SPINLOCK_H
4
5/*
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
9 *
10 * This file is used for SMP configurations only.
11 */
12
13#include <linux/compiler.h>
14#include <linux/kernel.h>
1977f032 15#include <linux/bitops.h>
1da177e4 16
60063497 17#include <linux/atomic.h>
1da177e4 18#include <asm/intrinsics.h>
726328d9
PZ
19#include <asm/barrier.h>
20#include <asm/processor.h>
1da177e4 21
0199c4e6 22#define arch_spin_lock_init(x) ((x)->lock = 0)
1da177e4 23
1da177e4 24/*
2c86963b
TL
25 * Ticket locks are conceptually two parts, one indicating the current head of
26 * the queue, and the other indicating the current tail. The lock is acquired
27 * by atomically noting the tail and incrementing it by one (thus adding
28 * ourself to the queue and noting our position), then waiting until the head
29 * becomes equal to the the initial value of the tail.
9d40ee20
TL
30 * The pad bits in the middle are used to prevent the next_ticket number
31 * overflowing into the now_serving number.
2c86963b 32 *
9d40ee20 33 * 31 17 16 15 14 0
2c86963b 34 * +----------------------------------------------------+
9d40ee20 35 * | now_serving | padding | next_ticket |
2c86963b 36 * +----------------------------------------------------+
1da177e4
LT
37 */
38
9d40ee20
TL
39#define TICKET_SHIFT 17
40#define TICKET_BITS 15
41#define TICKET_MASK ((1 << TICKET_BITS) - 1)
2c86963b 42
445c8951 43static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
2c86963b 44{
9d40ee20 45 int *p = (int *)&lock->lock, ticket, serve;
2c86963b 46
9d40ee20 47 ticket = ia64_fetchadd(1, p, acq);
2c86963b 48
9d40ee20 49 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
2c86963b
TL
50 return;
51
9d40ee20
TL
52 ia64_invala();
53
54 for (;;) {
55 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
56
57 if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
58 return;
2c86963b 59 cpu_relax();
9d40ee20 60 }
2c86963b
TL
61}
62
445c8951 63static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
2c86963b 64{
9d40ee20 65 int tmp = ACCESS_ONCE(lock->lock);
2c86963b 66
9d40ee20
TL
67 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
68 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
2c86963b
TL
69 return 0;
70}
71
445c8951 72static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2c86963b 73{
9d40ee20 74 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2c86963b 75
9d40ee20
TL
76 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
77 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2c86963b 78}
1da177e4 79
445c8951 80static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
1da177e4 81{
2c86963b
TL
82 long tmp = ACCESS_ONCE(lock->lock);
83
9d40ee20 84 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
1da177e4 85}
f5210891 86
445c8951 87static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
2c86963b
TL
88{
89 long tmp = ACCESS_ONCE(lock->lock);
f5210891 90
9d40ee20 91 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
f5210891
CL
92}
93
71c7356f
TL
94static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
95{
96 return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
97}
98
0199c4e6 99static inline int arch_spin_is_locked(arch_spinlock_t *lock)
2c86963b
TL
100{
101 return __ticket_spin_is_locked(lock);
102}
103
0199c4e6 104static inline int arch_spin_is_contended(arch_spinlock_t *lock)
2c86963b
TL
105{
106 return __ticket_spin_is_contended(lock);
107}
0199c4e6 108#define arch_spin_is_contended arch_spin_is_contended
2c86963b 109
0199c4e6 110static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
2c86963b
TL
111{
112 __ticket_spin_lock(lock);
113}
114
0199c4e6 115static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
2c86963b
TL
116{
117 return __ticket_spin_trylock(lock);
118}
119
0199c4e6 120static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
2c86963b
TL
121{
122 __ticket_spin_unlock(lock);
123}
124
0199c4e6 125static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
2c86963b
TL
126 unsigned long flags)
127{
0199c4e6 128 arch_spin_lock(lock);
2c86963b
TL
129}
130
e5931943
TG
131#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
132#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
1da177e4 133
2d09cde9
RH
134#ifdef ASM_SUPPORTED
135
136static __always_inline void
e5931943 137arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
2d09cde9
RH
138{
139 __asm__ __volatile__ (
140 "tbit.nz p6, p0 = %1,%2\n"
141 "br.few 3f\n"
142 "1:\n"
143 "fetchadd4.rel r2 = [%0], -1;;\n"
144 "(p6) ssm psr.i\n"
145 "2:\n"
146 "hint @pause\n"
147 "ld4 r2 = [%0];;\n"
148 "cmp4.lt p7,p0 = r2, r0\n"
149 "(p7) br.cond.spnt.few 2b\n"
150 "(p6) rsm psr.i\n"
151 ";;\n"
152 "3:\n"
153 "fetchadd4.acq r2 = [%0], 1;;\n"
154 "cmp4.lt p7,p0 = r2, r0\n"
155 "(p7) br.cond.spnt.few 1b\n"
156 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
157 : "p6", "p7", "r2", "memory");
158}
159
e5931943 160#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
2d09cde9
RH
161
162#else /* !ASM_SUPPORTED */
163
e5931943 164#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
2d09cde9 165
e5931943 166#define arch_read_lock(rw) \
1da177e4 167do { \
fb3a6bbc 168 arch_rwlock_t *__read_lock_ptr = (rw); \
1da177e4
LT
169 \
170 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
171 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
172 while (*(volatile int *)__read_lock_ptr < 0) \
173 cpu_relax(); \
174 } \
175} while (0)
176
2d09cde9
RH
177#endif /* !ASM_SUPPORTED */
178
e5931943 179#define arch_read_unlock(rw) \
1da177e4 180do { \
fb3a6bbc 181 arch_rwlock_t *__read_lock_ptr = (rw); \
1da177e4
LT
182 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
183} while (0)
184
185#ifdef ASM_SUPPORTED
2d09cde9
RH
186
187static __always_inline void
e5931943 188arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
2d09cde9
RH
189{
190 __asm__ __volatile__ (
191 "tbit.nz p6, p0 = %1, %2\n"
192 "mov ar.ccv = r0\n"
193 "dep r29 = -1, r0, 31, 1\n"
194 "br.few 3f;;\n"
195 "1:\n"
196 "(p6) ssm psr.i\n"
197 "2:\n"
198 "hint @pause\n"
199 "ld4 r2 = [%0];;\n"
200 "cmp4.eq p0,p7 = r0, r2\n"
201 "(p7) br.cond.spnt.few 2b\n"
202 "(p6) rsm psr.i\n"
203 ";;\n"
204 "3:\n"
205 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
206 "cmp4.eq p0,p7 = r0, r2\n"
207 "(p7) br.cond.spnt.few 1b;;\n"
208 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
209 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
210}
211
e5931943 212#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
1da177e4 213
e5931943 214#define arch_write_trylock(rw) \
1da177e4
LT
215({ \
216 register long result; \
217 \
218 __asm__ __volatile__ ( \
219 "mov ar.ccv = r0\n" \
220 "dep r29 = -1, r0, 31, 1;;\n" \
221 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
222 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
223 (result == 0); \
224})
225
e5931943 226static inline void arch_write_unlock(arch_rwlock_t *x)
f5210891
CL
227{
228 u8 *y = (u8 *)x;
229 barrier();
230 asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
231}
232
1da177e4
LT
233#else /* !ASM_SUPPORTED */
234
e5931943 235#define arch_write_lock_flags(l, flags) arch_write_lock(l)
2d09cde9 236
e5931943 237#define arch_write_lock(l) \
1da177e4
LT
238({ \
239 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
240 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
241 do { \
242 while (*ia64_write_lock_ptr) \
243 ia64_barrier(); \
244 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
245 } while (ia64_val); \
246})
247
e5931943 248#define arch_write_trylock(rw) \
1da177e4
LT
249({ \
250 __u64 ia64_val; \
251 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
252 ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
253 (ia64_val == 0); \
254})
255
e5931943 256static inline void arch_write_unlock(arch_rwlock_t *x)
f5210891
CL
257{
258 barrier();
259 x->write_lock = 0;
260}
261
1da177e4
LT
262#endif /* !ASM_SUPPORTED */
263
e5931943 264static inline int arch_read_trylock(arch_rwlock_t *x)
bf7ececa
KO
265{
266 union {
fb3a6bbc 267 arch_rwlock_t lock;
bf7ececa
KO
268 __u32 word;
269 } old, new;
270 old.lock = new.lock = *x;
271 old.lock.write_lock = new.lock.write_lock = 0;
272 ++new.lock.read_counter;
273 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
274}
1da177e4 275
0199c4e6
TG
276#define arch_spin_relax(lock) cpu_relax()
277#define arch_read_relax(lock) cpu_relax()
278#define arch_write_relax(lock) cpu_relax()
ef6edc97 279
1da177e4 280#endif /* _ASM_IA64_SPINLOCK_H */