]> git.ipfire.org Git - people/ms/linux.git/blob - arch/x86/include/asm/spinlock.h
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger...
[people/ms/linux.git] / arch / x86 / include / asm / spinlock.h
1 #ifndef _ASM_X86_SPINLOCK_H
2 #define _ASM_X86_SPINLOCK_H
3
4 #include <linux/jump_label.h>
5 #include <linux/atomic.h>
6 #include <asm/page.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9 #include <asm/paravirt.h>
10 #include <asm/bitops.h>
11
12 /*
13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
14 *
15 * Simple spin lock operations. There are two variants, one clears IRQ's
16 * on the local processor, one does not.
17 *
18 * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
19 *
20 * (the type definitions are in asm/spinlock_types.h)
21 */
22
23 #ifdef CONFIG_X86_32
24 # define LOCK_PTR_REG "a"
25 #else
26 # define LOCK_PTR_REG "D"
27 #endif
28
29 #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
30 /*
31 * On PPro SMP, we use a locked operation to unlock
32 * (PPro errata 66, 92)
33 */
34 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
35 #else
36 # define UNLOCK_LOCK_PREFIX
37 #endif
38
39 /* How long a lock should spin before we consider blocking */
40 #define SPIN_THRESHOLD (1 << 15)
41
42 extern struct static_key paravirt_ticketlocks_enabled;
43 static __always_inline bool static_key_false(struct static_key *key);
44
45 #ifdef CONFIG_PARAVIRT_SPINLOCKS
46
47 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
48 {
49 set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
50 }
51
52 #else /* !CONFIG_PARAVIRT_SPINLOCKS */
53 static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
54 __ticket_t ticket)
55 {
56 }
57 static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
58 __ticket_t ticket)
59 {
60 }
61
62 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
63
64 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
65 {
66 return lock.tickets.head == lock.tickets.tail;
67 }
68
69 /*
70 * Ticket locks are conceptually two parts, one indicating the current head of
71 * the queue, and the other indicating the current tail. The lock is acquired
72 * by atomically noting the tail and incrementing it by one (thus adding
73 * ourself to the queue and noting our position), then waiting until the head
74 * becomes equal to the the initial value of the tail.
75 *
76 * We use an xadd covering *both* parts of the lock, to increment the tail and
77 * also load the position of the head, which takes care of memory ordering
78 * issues and should be optimal for the uncontended case. Note the tail must be
79 * in the high part, because a wide xadd increment of the low part would carry
80 * up and contaminate the high part.
81 */
82 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
83 {
84 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
85
86 inc = xadd(&lock->tickets, inc);
87 if (likely(inc.head == inc.tail))
88 goto out;
89
90 inc.tail &= ~TICKET_SLOWPATH_FLAG;
91 for (;;) {
92 unsigned count = SPIN_THRESHOLD;
93
94 do {
95 if (READ_ONCE(lock->tickets.head) == inc.tail)
96 goto out;
97 cpu_relax();
98 } while (--count);
99 __ticket_lock_spinning(lock, inc.tail);
100 }
101 out: barrier(); /* make sure nothing creeps before the lock is taken */
102 }
103
104 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
105 {
106 arch_spinlock_t old, new;
107
108 old.tickets = READ_ONCE(lock->tickets);
109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
110 return 0;
111
112 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
113
114 /* cmpxchg is a full barrier, so nothing can move before it */
115 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
116 }
117
118 static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
119 arch_spinlock_t old)
120 {
121 arch_spinlock_t new;
122
123 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
124
125 /* Perform the unlock on the "before" copy */
126 old.tickets.head += TICKET_LOCK_INC;
127
128 /* Clear the slowpath flag */
129 new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
130
131 /*
132 * If the lock is uncontended, clear the flag - use cmpxchg in
133 * case it changes behind our back though.
134 */
135 if (new.tickets.head != new.tickets.tail ||
136 cmpxchg(&lock->head_tail, old.head_tail,
137 new.head_tail) != old.head_tail) {
138 /*
139 * Lock still has someone queued for it, so wake up an
140 * appropriate waiter.
141 */
142 __ticket_unlock_kick(lock, old.tickets.head);
143 }
144 }
145
146 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
147 {
148 if (TICKET_SLOWPATH_FLAG &&
149 static_key_false(&paravirt_ticketlocks_enabled)) {
150 arch_spinlock_t prev;
151
152 prev = *lock;
153 add_smp(&lock->tickets.head, TICKET_LOCK_INC);
154
155 /* add_smp() is a full mb() */
156
157 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
158 __ticket_unlock_slowpath(lock, prev);
159 } else
160 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
161 }
162
163 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
164 {
165 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
166
167 return tmp.tail != tmp.head;
168 }
169
170 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
171 {
172 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
173
174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
175 }
176 #define arch_spin_is_contended arch_spin_is_contended
177
178 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
179 unsigned long flags)
180 {
181 arch_spin_lock(lock);
182 }
183
184 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
185 {
186 __ticket_t head = ACCESS_ONCE(lock->tickets.head);
187
188 for (;;) {
189 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
190 /*
191 * We need to check "unlocked" in a loop, tmp.head == head
192 * can be false positive because of overflow.
193 */
194 if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) ||
195 tmp.head != head)
196 break;
197
198 cpu_relax();
199 }
200 }
201
202 /*
203 * Read-write spinlocks, allowing multiple readers
204 * but only one writer.
205 *
206 * NOTE! it is quite common to have readers in interrupts
207 * but no interrupt writers. For those circumstances we
208 * can "mix" irq-safe locks - any writer needs to get a
209 * irq-safe write-lock, but readers can get non-irqsafe
210 * read-locks.
211 *
212 * On x86, we implement read-write locks using the generic qrwlock with
213 * x86 specific optimization.
214 */
215
216 #include <asm/qrwlock.h>
217
218 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
219 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
220
221 #define arch_spin_relax(lock) cpu_relax()
222 #define arch_read_relax(lock) cpu_relax()
223 #define arch_write_relax(lock) cpu_relax()
224
225 #endif /* _ASM_X86_SPINLOCK_H */