]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.1.1/hwspinlock-core-use-a-mutex-to-protect-the-radix-tree.patch
5.1-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.1.1 / hwspinlock-core-use-a-mutex-to-protect-the-radix-tree.patch
CommitLineData
f1755e8a
GKH
1From 93b465c2e186d96fb90012ba0f9372eb9952e732 Mon Sep 17 00:00:00 2001
2From: Juan Gutierrez <jgutierrez@ti.com>
3Date: Tue, 6 Sep 2011 09:30:16 +0300
4Subject: hwspinlock/core: use a mutex to protect the radix tree
5
6From: Juan Gutierrez <jgutierrez@ti.com>
7
8commit 93b465c2e186d96fb90012ba0f9372eb9952e732 upstream.
9
10Since we're using non-atomic radix tree allocations, we
11should be protecting the tree using a mutex and not a
12spinlock.
13
14Non-atomic allocations and process context locking is good enough,
15as the tree is manipulated only when locks are registered/
16unregistered/requested/freed.
17
18The locks themselves are still protected by spinlocks of course,
19and mutexes are not involved in the locking/unlocking paths.
20
21Signed-off-by: Juan Gutierrez <jgutierrez@ti.com>
22[ohad@wizery.com: rewrite the commit log, #include mutex.h, add minor
23commentary]
24[ohad@wizery.com: update register/unregister parts in hwspinlock.txt]
25Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
26Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
27
28---
29 Documentation/hwspinlock.txt | 18 +++++---------
30 drivers/hwspinlock/hwspinlock_core.c | 45 +++++++++++++++--------------------
31 2 files changed, 27 insertions(+), 36 deletions(-)
32
33--- a/Documentation/hwspinlock.txt
34+++ b/Documentation/hwspinlock.txt
35@@ -39,23 +39,20 @@ independent, drivers.
36 in case an unused hwspinlock isn't available. Users of this
37 API will usually want to communicate the lock's id to the remote core
38 before it can be used to achieve synchronization.
39- Can be called from an atomic context (this function will not sleep) but
40- not from within interrupt context.
41+ Should be called from a process context (might sleep).
42
43 struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
44 - assign a specific hwspinlock id and return its address, or NULL
45 if that hwspinlock is already in use. Usually board code will
46 be calling this function in order to reserve specific hwspinlock
47 ids for predefined purposes.
48- Can be called from an atomic context (this function will not sleep) but
49- not from within interrupt context.
50+ Should be called from a process context (might sleep).
51
52 int hwspin_lock_free(struct hwspinlock *hwlock);
53 - free a previously-assigned hwspinlock; returns 0 on success, or an
54 appropriate error code on failure (e.g. -EINVAL if the hwspinlock
55 is already free).
56- Can be called from an atomic context (this function will not sleep) but
57- not from within interrupt context.
58+ Should be called from a process context (might sleep).
59
60 int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
61 - lock a previously-assigned hwspinlock with a timeout limit (specified in
62@@ -232,15 +229,14 @@ int hwspinlock_example2(void)
63
64 int hwspin_lock_register(struct hwspinlock *hwlock);
65 - to be called from the underlying platform-specific implementation, in
66- order to register a new hwspinlock instance. Can be called from an atomic
67- context (this function will not sleep) but not from within interrupt
68- context. Returns 0 on success, or appropriate error code on failure.
69+ order to register a new hwspinlock instance. Should be called from
70+ a process context (this function might sleep).
71+ Returns 0 on success, or appropriate error code on failure.
72
73 struct hwspinlock *hwspin_lock_unregister(unsigned int id);
74 - to be called from the underlying vendor-specific implementation, in order
75 to unregister an existing (and unused) hwspinlock instance.
76- Can be called from an atomic context (will not sleep) but not from
77- within interrupt context.
78+ Should be called from a process context (this function might sleep).
79 Returns the address of hwspinlock on success, or NULL on error (e.g.
80 if the hwspinlock is sill in use).
81
82--- a/drivers/hwspinlock/hwspinlock_core.c
83+++ b/drivers/hwspinlock/hwspinlock_core.c
84@@ -26,6 +26,7 @@
85 #include <linux/radix-tree.h>
86 #include <linux/hwspinlock.h>
87 #include <linux/pm_runtime.h>
88+#include <linux/mutex.h>
89
90 #include "hwspinlock_internal.h"
91
92@@ -52,10 +53,12 @@
93 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
94
95 /*
96- * Synchronization of access to the tree is achieved using this spinlock,
97+ * Synchronization of access to the tree is achieved using this mutex,
98 * as the radix-tree API requires that users provide all synchronisation.
99+ * A mutex is needed because we're using non-atomic radix tree allocations.
100 */
101-static DEFINE_SPINLOCK(hwspinlock_tree_lock);
102+static DEFINE_MUTEX(hwspinlock_tree_lock);
103+
104
105 /**
106 * __hwspin_trylock() - attempt to lock a specific hwspinlock
107@@ -261,8 +264,7 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock);
108 * This function should be called from the underlying platform-specific
109 * implementation, to register a new hwspinlock instance.
110 *
111- * Can be called from an atomic context (will not sleep) but not from
112- * within interrupt context.
113+ * Should be called from a process context (might sleep)
114 *
115 * Returns 0 on success, or an appropriate error code on failure
116 */
117@@ -279,7 +281,7 @@ int hwspin_lock_register(struct hwspinlo
118
119 spin_lock_init(&hwlock->lock);
120
121- spin_lock(&hwspinlock_tree_lock);
122+ mutex_lock(&hwspinlock_tree_lock);
123
124 ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
125 if (ret)
126@@ -293,7 +295,7 @@ int hwspin_lock_register(struct hwspinlo
127 WARN_ON(tmp != hwlock);
128
129 out:
130- spin_unlock(&hwspinlock_tree_lock);
131+ mutex_unlock(&hwspinlock_tree_lock);
132 return ret;
133 }
134 EXPORT_SYMBOL_GPL(hwspin_lock_register);
135@@ -305,8 +307,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register);
136 * This function should be called from the underlying platform-specific
137 * implementation, to unregister an existing (and unused) hwspinlock.
138 *
139- * Can be called from an atomic context (will not sleep) but not from
140- * within interrupt context.
141+ * Should be called from a process context (might sleep)
142 *
143 * Returns the address of hwspinlock @id on success, or NULL on failure
144 */
145@@ -315,7 +316,7 @@ struct hwspinlock *hwspin_lock_unregiste
146 struct hwspinlock *hwlock = NULL;
147 int ret;
148
149- spin_lock(&hwspinlock_tree_lock);
150+ mutex_lock(&hwspinlock_tree_lock);
151
152 /* make sure the hwspinlock is not in use (tag is set) */
153 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
154@@ -331,7 +332,7 @@ struct hwspinlock *hwspin_lock_unregiste
155 }
156
157 out:
158- spin_unlock(&hwspinlock_tree_lock);
159+ mutex_unlock(&hwspinlock_tree_lock);
160 return hwlock;
161 }
162 EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
163@@ -400,9 +401,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
164 * to the remote core before it can be used for synchronization (to get the
165 * id of a given hwlock, use hwspin_lock_get_id()).
166 *
167- * Can be called from an atomic context (will not sleep) but not from
168- * within interrupt context (simply because there is no use case for
169- * that yet).
170+ * Should be called from a process context (might sleep)
171 *
172 * Returns the address of the assigned hwspinlock, or NULL on error
173 */
174@@ -411,7 +410,7 @@ struct hwspinlock *hwspin_lock_request(v
175 struct hwspinlock *hwlock;
176 int ret;
177
178- spin_lock(&hwspinlock_tree_lock);
179+ mutex_lock(&hwspinlock_tree_lock);
180
181 /* look for an unused lock */
182 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
183@@ -431,7 +430,7 @@ struct hwspinlock *hwspin_lock_request(v
184 hwlock = NULL;
185
186 out:
187- spin_unlock(&hwspinlock_tree_lock);
188+ mutex_unlock(&hwspinlock_tree_lock);
189 return hwlock;
190 }
191 EXPORT_SYMBOL_GPL(hwspin_lock_request);
192@@ -445,9 +444,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
193 * Usually early board code will be calling this function in order to
194 * reserve specific hwspinlock ids for predefined purposes.
195 *
196- * Can be called from an atomic context (will not sleep) but not from
197- * within interrupt context (simply because there is no use case for
198- * that yet).
199+ * Should be called from a process context (might sleep)
200 *
201 * Returns the address of the assigned hwspinlock, or NULL on error
202 */
203@@ -456,7 +453,7 @@ struct hwspinlock *hwspin_lock_request_s
204 struct hwspinlock *hwlock;
205 int ret;
206
207- spin_lock(&hwspinlock_tree_lock);
208+ mutex_lock(&hwspinlock_tree_lock);
209
210 /* make sure this hwspinlock exists */
211 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
212@@ -482,7 +479,7 @@ struct hwspinlock *hwspin_lock_request_s
213 hwlock = NULL;
214
215 out:
216- spin_unlock(&hwspinlock_tree_lock);
217+ mutex_unlock(&hwspinlock_tree_lock);
218 return hwlock;
219 }
220 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
221@@ -495,9 +492,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_sp
222 * Should only be called with an @hwlock that was retrieved from
223 * an earlier call to omap_hwspin_lock_request{_specific}.
224 *
225- * Can be called from an atomic context (will not sleep) but not from
226- * within interrupt context (simply because there is no use case for
227- * that yet).
228+ * Should be called from a process context (might sleep)
229 *
230 * Returns 0 on success, or an appropriate error code on failure
231 */
232@@ -511,7 +506,7 @@ int hwspin_lock_free(struct hwspinlock *
233 return -EINVAL;
234 }
235
236- spin_lock(&hwspinlock_tree_lock);
237+ mutex_lock(&hwspinlock_tree_lock);
238
239 /* make sure the hwspinlock is used */
240 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
241@@ -538,7 +533,7 @@ int hwspin_lock_free(struct hwspinlock *
242 module_put(hwlock->owner);
243
244 out:
245- spin_unlock(&hwspinlock_tree_lock);
246+ mutex_unlock(&hwspinlock_tree_lock);
247 return ret;
248 }
249 EXPORT_SYMBOL_GPL(hwspin_lock_free);