]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.6.5/locking-qspinlock-fix-spin_unlock_wait-some-more.patch
5.1-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.6.5 / locking-qspinlock-fix-spin_unlock_wait-some-more.patch
1 From 2c610022711675ee908b903d242f0b90e1db661f Mon Sep 17 00:00:00 2001
2 From: Peter Zijlstra <peterz@infradead.org>
3 Date: Wed, 8 Jun 2016 10:19:51 +0200
4 Subject: locking/qspinlock: Fix spin_unlock_wait() some more
5
6 From: Peter Zijlstra <peterz@infradead.org>
7
8 commit 2c610022711675ee908b903d242f0b90e1db661f upstream.
9
10 While this prior commit:
11
12 54cf809b9512 ("locking,qspinlock: Fix spin_is_locked() and spin_unlock_wait()")
13
14 ... fixes spin_is_locked() and spin_unlock_wait() for the usage
15 in ipc/sem and netfilter, it does not in fact work right for the
16 usage in task_work and futex.
17
18 So while the 2 locks crossed problem:
19
20 spin_lock(A) spin_lock(B)
21 if (!spin_is_locked(B)) spin_unlock_wait(A)
22 foo() foo();
23
24 ... works with the smp_mb() injected by both spin_is_locked() and
25 spin_unlock_wait(), this is not sufficient for:
26
27 flag = 1;
28 smp_mb(); spin_lock()
29 spin_unlock_wait() if (!flag)
30 // add to lockless list
31 // iterate lockless list
32
33 ... because in this scenario, the store from spin_lock() can be delayed
34 past the load of flag, uncrossing the variables and loosing the
35 guarantee.
36
37 This patch reworks spin_is_locked() and spin_unlock_wait() to work in
38 both cases by exploiting the observation that while the lock byte
39 store can be delayed, the contender must have registered itself
40 visibly in other state contained in the word.
41
42 It also allows for architectures to override both functions, as PPC
43 and ARM64 have an additional issue for which we currently have no
44 generic solution.
45
46 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
47 Cc: Andrew Morton <akpm@linux-foundation.org>
48 Cc: Boqun Feng <boqun.feng@gmail.com>
49 Cc: Davidlohr Bueso <dave@stgolabs.net>
50 Cc: Giovanni Gherdovich <ggherdovich@suse.com>
51 Cc: Linus Torvalds <torvalds@linux-foundation.org>
52 Cc: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
53 Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
54 Cc: Peter Zijlstra <peterz@infradead.org>
55 Cc: Thomas Gleixner <tglx@linutronix.de>
56 Cc: Waiman Long <waiman.long@hpe.com>
57 Cc: Will Deacon <will.deacon@arm.com>
58 Fixes: 54cf809b9512 ("locking,qspinlock: Fix spin_is_locked() and spin_unlock_wait()")
59 Signed-off-by: Ingo Molnar <mingo@kernel.org>
60 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
61
62 ---
63 include/asm-generic/qspinlock.h | 53 +++++++++++------------------------
64 kernel/locking/qspinlock.c | 60 ++++++++++++++++++++++++++++++++++++++++
65 2 files changed, 77 insertions(+), 36 deletions(-)
66
67 --- a/include/asm-generic/qspinlock.h
68 +++ b/include/asm-generic/qspinlock.h
69 @@ -22,37 +22,33 @@
70 #include <asm-generic/qspinlock_types.h>
71
72 /**
73 + * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
74 + * @lock : Pointer to queued spinlock structure
75 + *
76 + * There is a very slight possibility of live-lock if the lockers keep coming
77 + * and the waiter is just unfortunate enough to not see any unlock state.
78 + */
79 +#ifndef queued_spin_unlock_wait
80 +extern void queued_spin_unlock_wait(struct qspinlock *lock);
81 +#endif
82 +
83 +/**
84 * queued_spin_is_locked - is the spinlock locked?
85 * @lock: Pointer to queued spinlock structure
86 * Return: 1 if it is locked, 0 otherwise
87 */
88 +#ifndef queued_spin_is_locked
89 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
90 {
91 /*
92 - * queued_spin_lock_slowpath() can ACQUIRE the lock before
93 - * issuing the unordered store that sets _Q_LOCKED_VAL.
94 - *
95 - * See both smp_cond_acquire() sites for more detail.
96 - *
97 - * This however means that in code like:
98 - *
99 - * spin_lock(A) spin_lock(B)
100 - * spin_unlock_wait(B) spin_is_locked(A)
101 - * do_something() do_something()
102 - *
103 - * Both CPUs can end up running do_something() because the store
104 - * setting _Q_LOCKED_VAL will pass through the loads in
105 - * spin_unlock_wait() and/or spin_is_locked().
106 + * See queued_spin_unlock_wait().
107 *
108 - * Avoid this by issuing a full memory barrier between the spin_lock()
109 - * and the loads in spin_unlock_wait() and spin_is_locked().
110 - *
111 - * Note that regular mutual exclusion doesn't care about this
112 - * delayed store.
113 + * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
114 + * isn't immediately observable.
115 */
116 - smp_mb();
117 - return atomic_read(&lock->val) & _Q_LOCKED_MASK;
118 + return atomic_read(&lock->val);
119 }
120 +#endif
121
122 /**
123 * queued_spin_value_unlocked - is the spinlock structure unlocked?
124 @@ -122,21 +118,6 @@ static __always_inline void queued_spin_
125 }
126 #endif
127
128 -/**
129 - * queued_spin_unlock_wait - wait until current lock holder releases the lock
130 - * @lock : Pointer to queued spinlock structure
131 - *
132 - * There is a very slight possibility of live-lock if the lockers keep coming
133 - * and the waiter is just unfortunate enough to not see any unlock state.
134 - */
135 -static inline void queued_spin_unlock_wait(struct qspinlock *lock)
136 -{
137 - /* See queued_spin_is_locked() */
138 - smp_mb();
139 - while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
140 - cpu_relax();
141 -}
142 -
143 #ifndef virt_spin_lock
144 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
145 {
146 --- a/kernel/locking/qspinlock.c
147 +++ b/kernel/locking/qspinlock.c
148 @@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_he
149 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
150 #endif
151
152 +/*
153 + * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
154 + * issuing an _unordered_ store to set _Q_LOCKED_VAL.
155 + *
156 + * This means that the store can be delayed, but no later than the
157 + * store-release from the unlock. This means that simply observing
158 + * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
159 + *
160 + * There are two paths that can issue the unordered store:
161 + *
162 + * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
163 + *
164 + * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
165 + * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
166 + *
167 + * However, in both cases we have other !0 state we've set before to queue
168 + * ourseves:
169 + *
170 + * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
171 + * load is constrained by that ACQUIRE to not pass before that, and thus must
172 + * observe the store.
173 + *
174 + * For (2) we have a more intersting scenario. We enqueue ourselves using
175 + * xchg_tail(), which ends up being a RELEASE. This in itself is not
176 + * sufficient, however that is followed by an smp_cond_acquire() on the same
177 + * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
178 + * guarantees we must observe that store.
179 + *
180 + * Therefore both cases have other !0 state that is observable before the
181 + * unordered locked byte store comes through. This means we can use that to
182 + * wait for the lock store, and then wait for an unlock.
183 + */
184 +#ifndef queued_spin_unlock_wait
185 +void queued_spin_unlock_wait(struct qspinlock *lock)
186 +{
187 + u32 val;
188 +
189 + for (;;) {
190 + val = atomic_read(&lock->val);
191 +
192 + if (!val) /* not locked, we're done */
193 + goto done;
194 +
195 + if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
196 + break;
197 +
198 + /* not locked, but pending, wait until we observe the lock */
199 + cpu_relax();
200 + }
201 +
202 + /* any unlock is good */
203 + while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
204 + cpu_relax();
205 +
206 +done:
207 + smp_rmb(); /* CTRL + RMB -> ACQUIRE */
208 +}
209 +EXPORT_SYMBOL(queued_spin_unlock_wait);
210 +#endif
211 +
212 #endif /* _GEN_PV_LOCK_SLOWPATH */
213
214 /**