]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/sparc/include/asm/spinlock_32.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / sparc / include / asm / spinlock_32.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* spinlock.h: 32-bit Sparc spinlock support.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7 #ifndef __SPARC_SPINLOCK_H
8 #define __SPARC_SPINLOCK_H
9
10 #ifndef __ASSEMBLY__
11
12 #include <asm/psr.h>
13 #include <asm/barrier.h>
14 #include <asm/processor.h> /* for cpu_relax */
15
16 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
17
18 static inline void arch_spin_lock(arch_spinlock_t *lock)
19 {
20 __asm__ __volatile__(
21 "\n1:\n\t"
22 "ldstub [%0], %%g2\n\t"
23 "orcc %%g2, 0x0, %%g0\n\t"
24 "bne,a 2f\n\t"
25 " ldub [%0], %%g2\n\t"
26 ".subsection 2\n"
27 "2:\n\t"
28 "orcc %%g2, 0x0, %%g0\n\t"
29 "bne,a 2b\n\t"
30 " ldub [%0], %%g2\n\t"
31 "b,a 1b\n\t"
32 ".previous\n"
33 : /* no outputs */
34 : "r" (lock)
35 : "g2", "memory", "cc");
36 }
37
38 static inline int arch_spin_trylock(arch_spinlock_t *lock)
39 {
40 unsigned int result;
41 __asm__ __volatile__("ldstub [%1], %0"
42 : "=r" (result)
43 : "r" (lock)
44 : "memory");
45 return (result == 0);
46 }
47
48 static inline void arch_spin_unlock(arch_spinlock_t *lock)
49 {
50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51 }
52
53 /* Read-write spinlocks, allowing multiple readers
54 * but only one writer.
55 *
56 * NOTE! it is quite common to have readers in interrupts
57 * but no interrupt writers. For those circumstances we
58 * can "mix" irq-safe locks - any writer needs to get a
59 * irq-safe write-lock, but readers can get non-irqsafe
60 * read-locks.
61 *
62 * XXX This might create some problems with my dual spinlock
63 * XXX scheme, deadlocks etc. -DaveM
64 *
65 * Sort of like atomic_t's on Sparc, but even more clever.
66 *
67 * ------------------------------------
68 * | 24-bit counter | wlock | arch_rwlock_t
69 * ------------------------------------
70 * 31 8 7 0
71 *
72 * wlock signifies the one writer is in or somebody is updating
73 * counter. For a writer, if he successfully acquires the wlock,
74 * but counter is non-zero, he has to release the lock and wait,
75 * till both counter and wlock are zero.
76 *
77 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 */
79 static inline void __arch_read_lock(arch_rwlock_t *rw)
80 {
81 register arch_rwlock_t *lp asm("g1");
82 lp = rw;
83 __asm__ __volatile__(
84 "mov %%o7, %%g4\n\t"
85 "call ___rw_read_enter\n\t"
86 " ldstub [%%g1 + 3], %%g2\n"
87 : /* no outputs */
88 : "r" (lp)
89 : "g2", "g4", "memory", "cc");
90 }
91
92 #define arch_read_lock(lock) \
93 do { unsigned long flags; \
94 local_irq_save(flags); \
95 __arch_read_lock(lock); \
96 local_irq_restore(flags); \
97 } while(0)
98
99 static inline void __arch_read_unlock(arch_rwlock_t *rw)
100 {
101 register arch_rwlock_t *lp asm("g1");
102 lp = rw;
103 __asm__ __volatile__(
104 "mov %%o7, %%g4\n\t"
105 "call ___rw_read_exit\n\t"
106 " ldstub [%%g1 + 3], %%g2\n"
107 : /* no outputs */
108 : "r" (lp)
109 : "g2", "g4", "memory", "cc");
110 }
111
112 #define arch_read_unlock(lock) \
113 do { unsigned long flags; \
114 local_irq_save(flags); \
115 __arch_read_unlock(lock); \
116 local_irq_restore(flags); \
117 } while(0)
118
119 static inline void arch_write_lock(arch_rwlock_t *rw)
120 {
121 register arch_rwlock_t *lp asm("g1");
122 lp = rw;
123 __asm__ __volatile__(
124 "mov %%o7, %%g4\n\t"
125 "call ___rw_write_enter\n\t"
126 " ldstub [%%g1 + 3], %%g2\n"
127 : /* no outputs */
128 : "r" (lp)
129 : "g2", "g4", "memory", "cc");
130 *(volatile __u32 *)&lp->lock = ~0U;
131 }
132
133 static inline void arch_write_unlock(arch_rwlock_t *lock)
134 {
135 __asm__ __volatile__(
136 " st %%g0, [%0]"
137 : /* no outputs */
138 : "r" (lock)
139 : "memory");
140 }
141
142 static inline int arch_write_trylock(arch_rwlock_t *rw)
143 {
144 unsigned int val;
145
146 __asm__ __volatile__("ldstub [%1 + 3], %0"
147 : "=r" (val)
148 : "r" (&rw->lock)
149 : "memory");
150
151 if (val == 0) {
152 val = rw->lock & ~0xff;
153 if (val)
154 ((volatile u8*)&rw->lock)[3] = 0;
155 else
156 *(volatile u32*)&rw->lock = ~0U;
157 }
158
159 return (val == 0);
160 }
161
162 static inline int __arch_read_trylock(arch_rwlock_t *rw)
163 {
164 register arch_rwlock_t *lp asm("g1");
165 register int res asm("o0");
166 lp = rw;
167 __asm__ __volatile__(
168 "mov %%o7, %%g4\n\t"
169 "call ___rw_read_try\n\t"
170 " ldstub [%%g1 + 3], %%g2\n"
171 : "=r" (res)
172 : "r" (lp)
173 : "g2", "g4", "memory", "cc");
174 return res;
175 }
176
177 #define arch_read_trylock(lock) \
178 ({ unsigned long flags; \
179 int res; \
180 local_irq_save(flags); \
181 res = __arch_read_trylock(lock); \
182 local_irq_restore(flags); \
183 res; \
184 })
185
186 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
187 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
188 #define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
189
190 #define arch_spin_relax(lock) cpu_relax()
191 #define arch_read_relax(lock) cpu_relax()
192 #define arch_write_relax(lock) cpu_relax()
193
194 #define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
195 #define arch_write_can_lock(rw) (!(rw)->lock)
196
197 #endif /* !(__ASSEMBLY__) */
198
199 #endif /* __SPARC_SPINLOCK_H */