]> git.ipfire.org Git - people/arne_f/kernel.git/blame - arch/s390/lib/spinlock.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[people/arne_f/kernel.git] / arch / s390 / lib / spinlock.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
951f22d5 2/*
951f22d5
MS
3 * Out of line spinlock code.
4 *
a53c8fab 5 * Copyright IBM Corp. 2004, 2006
951f22d5
MS
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/types.h>
d3217967 10#include <linux/export.h>
951f22d5
MS
11#include <linux/spinlock.h>
12#include <linux/init.h>
8b646bd7 13#include <linux/smp.h>
951f22d5
MS
14#include <asm/io.h>
15
2c72a44e
MS
16int spin_retry = -1;
17
18static int __init spin_retry_init(void)
19{
20 if (spin_retry < 0)
b13de4b7 21 spin_retry = 1000;
2c72a44e
MS
22 return 0;
23}
24early_initcall(spin_retry_init);
951f22d5
MS
25
26/**
27 * spin_retry= parameter
28 */
29static int __init spin_retry_setup(char *str)
30{
31 spin_retry = simple_strtoul(str, &str, 0);
32 return 1;
33}
34__setup("spin_retry=", spin_retry_setup);
35
7f7e6e28
MS
36static inline int arch_load_niai4(int *lock)
37{
38 int owner;
39
40 asm volatile(
41#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
42 " .long 0xb2fa0040\n" /* NIAI 4 */
43#endif
44 " l %0,%1\n"
45 : "=d" (owner) : "Q" (*lock) : "memory");
46 return owner;
47}
48
49static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
50{
51 int expected = old;
52
53 asm volatile(
54#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
55 " .long 0xb2fa0080\n" /* NIAI 8 */
56#endif
57 " cs %0,%3,%1\n"
58 : "=d" (old), "=Q" (*lock)
59 : "0" (old), "d" (new), "Q" (*lock)
60 : "cc", "memory");
61 return expected == old;
62}
63
0199c4e6 64void arch_spin_lock_wait(arch_spinlock_t *lp)
951f22d5 65{
02c503ff 66 int cpu = SPINLOCK_LOCKVAL;
7f7e6e28
MS
67 int owner, count;
68
69 /* Pass the virtual CPU to the lock holder if it is not running */
70 owner = arch_load_niai4(&lp->lock);
71 if (owner && arch_vcpu_is_preempted(~owner))
72 smp_yield_cpu(~owner);
951f22d5 73
7f7e6e28 74 count = spin_retry;
951f22d5 75 while (1) {
7f7e6e28 76 owner = arch_load_niai4(&lp->lock);
470ada6b
MS
77 /* Try to get the lock if it is free. */
78 if (!owner) {
7f7e6e28 79 if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
470ada6b
MS
80 return;
81 continue;
951f22d5 82 }
7f7e6e28 83 if (count-- >= 0)
470ada6b 84 continue;
470ada6b 85 count = spin_retry;
470ada6b
MS
86 /*
87 * For multiple layers of hypervisors, e.g. z/VM + LPAR
db1c4515
MS
88 * yield the CPU unconditionally. For LPAR rely on the
89 * sense running status.
470ada6b 90 */
7f7e6e28 91 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
8b646bd7 92 smp_yield_cpu(~owner);
951f22d5
MS
93 }
94}
0199c4e6 95EXPORT_SYMBOL(arch_spin_lock_wait);
951f22d5 96
0199c4e6 97void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
894cdde2 98{
02c503ff 99 int cpu = SPINLOCK_LOCKVAL;
7f7e6e28 100 int owner, count;
894cdde2
HH
101
102 local_irq_restore(flags);
7f7e6e28
MS
103
104 /* Pass the virtual CPU to the lock holder if it is not running */
105 owner = arch_load_niai4(&lp->lock);
106 if (owner && arch_vcpu_is_preempted(~owner))
107 smp_yield_cpu(~owner);
108
109 count = spin_retry;
894cdde2 110 while (1) {
7f7e6e28 111 owner = arch_load_niai4(&lp->lock);
470ada6b
MS
112 /* Try to get the lock if it is free. */
113 if (!owner) {
114 local_irq_disable();
7f7e6e28 115 if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
470ada6b
MS
116 return;
117 local_irq_restore(flags);
84976952 118 continue;
470ada6b 119 }
7f7e6e28 120 if (count-- >= 0)
470ada6b 121 continue;
470ada6b 122 count = spin_retry;
470ada6b
MS
123 /*
124 * For multiple layers of hypervisors, e.g. z/VM + LPAR
db1c4515
MS
125 * yield the CPU unconditionally. For LPAR rely on the
126 * sense running status.
470ada6b 127 */
7f7e6e28 128 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
8b646bd7 129 smp_yield_cpu(~owner);
894cdde2
HH
130 }
131}
0199c4e6 132EXPORT_SYMBOL(arch_spin_lock_wait_flags);
894cdde2 133
0199c4e6 134int arch_spin_trylock_retry(arch_spinlock_t *lp)
951f22d5 135{
02c503ff
MS
136 int cpu = SPINLOCK_LOCKVAL;
137 int owner, count;
951f22d5 138
2c72a44e 139 for (count = spin_retry; count > 0; count--) {
187b5f41 140 owner = READ_ONCE(lp->lock);
2c72a44e
MS
141 /* Try to get the lock if it is free. */
142 if (!owner) {
02c503ff 143 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
2c72a44e 144 return 1;
b13de4b7 145 }
2c72a44e 146 }
951f22d5
MS
147 return 0;
148}
0199c4e6 149EXPORT_SYMBOL(arch_spin_trylock_retry);
951f22d5 150
fb3a6bbc 151void _raw_read_lock_wait(arch_rwlock_t *rw)
951f22d5 152{
951f22d5 153 int count = spin_retry;
02c503ff 154 int owner, old;
951f22d5 155
bbae71bf
MS
156#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
157 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
158#endif
d59b93da 159 owner = 0;
951f22d5
MS
160 while (1) {
161 if (count-- <= 0) {
760928c0 162 if (owner && arch_vcpu_is_preempted(~owner))
d59b93da 163 smp_yield_cpu(~owner);
951f22d5
MS
164 count = spin_retry;
165 }
bae8f567 166 old = ACCESS_ONCE(rw->lock);
d59b93da 167 owner = ACCESS_ONCE(rw->owner);
b13de4b7 168 if (old < 0)
96567161 169 continue;
02c503ff 170 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
951f22d5
MS
171 return;
172 }
173}
174EXPORT_SYMBOL(_raw_read_lock_wait);
175
fb3a6bbc 176int _raw_read_trylock_retry(arch_rwlock_t *rw)
951f22d5 177{
951f22d5 178 int count = spin_retry;
02c503ff 179 int old;
951f22d5
MS
180
181 while (count-- > 0) {
bae8f567 182 old = ACCESS_ONCE(rw->lock);
b13de4b7 183 if (old < 0)
96567161 184 continue;
02c503ff 185 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
951f22d5
MS
186 return 1;
187 }
188 return 0;
189}
190EXPORT_SYMBOL(_raw_read_trylock_retry);
191
bbae71bf
MS
192#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
193
02c503ff 194void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
bbae71bf 195{
bbae71bf 196 int count = spin_retry;
02c503ff 197 int owner, old;
bbae71bf
MS
198
199 owner = 0;
200 while (1) {
201 if (count-- <= 0) {
760928c0 202 if (owner && arch_vcpu_is_preempted(~owner))
bbae71bf
MS
203 smp_yield_cpu(~owner);
204 count = spin_retry;
205 }
206 old = ACCESS_ONCE(rw->lock);
207 owner = ACCESS_ONCE(rw->owner);
e0af21c5 208 smp_mb();
02c503ff 209 if (old >= 0) {
bbae71bf
MS
210 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
211 old = prev;
212 }
02c503ff 213 if ((old & 0x7fffffff) == 0 && prev >= 0)
bbae71bf
MS
214 break;
215 }
216}
217EXPORT_SYMBOL(_raw_write_lock_wait);
218
219#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
220
fb3a6bbc 221void _raw_write_lock_wait(arch_rwlock_t *rw)
951f22d5
MS
222{
223 int count = spin_retry;
02c503ff 224 int owner, old, prev;
951f22d5 225
94232a43 226 prev = 0x80000000;
d59b93da 227 owner = 0;
951f22d5
MS
228 while (1) {
229 if (count-- <= 0) {
760928c0 230 if (owner && arch_vcpu_is_preempted(~owner))
d59b93da 231 smp_yield_cpu(~owner);
951f22d5
MS
232 count = spin_retry;
233 }
bae8f567 234 old = ACCESS_ONCE(rw->lock);
d59b93da 235 owner = ACCESS_ONCE(rw->owner);
02c503ff
MS
236 if (old >= 0 &&
237 __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
94232a43
MS
238 prev = old;
239 else
e0af21c5 240 smp_mb();
02c503ff 241 if ((old & 0x7fffffff) == 0 && prev >= 0)
94232a43 242 break;
951f22d5
MS
243 }
244}
245EXPORT_SYMBOL(_raw_write_lock_wait);
246
bbae71bf
MS
247#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
248
fb3a6bbc 249int _raw_write_trylock_retry(arch_rwlock_t *rw)
951f22d5
MS
250{
251 int count = spin_retry;
02c503ff 252 int old;
951f22d5
MS
253
254 while (count-- > 0) {
bae8f567 255 old = ACCESS_ONCE(rw->lock);
b13de4b7 256 if (old)
96567161 257 continue;
02c503ff 258 if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
951f22d5
MS
259 return 1;
260 }
261 return 0;
262}
263EXPORT_SYMBOL(_raw_write_trylock_retry);
d59b93da 264
02c503ff 265void arch_lock_relax(int cpu)
d59b93da
MS
266{
267 if (!cpu)
268 return;
760928c0 269 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
d59b93da
MS
270 return;
271 smp_yield_cpu(~cpu);
272}
273EXPORT_SYMBOL(arch_lock_relax);