]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/hwspinlock/hwspinlock_core.c
Linux 4.18-rc1
[thirdparty/linux.git] / drivers / hwspinlock / hwspinlock_core.c
CommitLineData
eebba71e 1// SPDX-License-Identifier: GPL-2.0
bd9a4c7d
OBC
2/*
3 * Hardware spinlock framework
4 *
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
bd9a4c7d
OBC
8 */
9
10#define pr_fmt(fmt) "%s: " fmt, __func__
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/spinlock.h>
15#include <linux/types.h>
16#include <linux/err.h>
17#include <linux/jiffies.h>
18#include <linux/radix-tree.h>
19#include <linux/hwspinlock.h>
20#include <linux/pm_runtime.h>
93b465c2 21#include <linux/mutex.h>
fb7737e9 22#include <linux/of.h>
bd9a4c7d
OBC
23
24#include "hwspinlock_internal.h"
25
26/* radix tree tags */
27#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
28
29/*
30 * A radix tree is used to maintain the available hwspinlock instances.
31 * The tree associates hwspinlock pointers with their integer key id,
32 * and provides easy-to-use API which makes the hwspinlock core code simple
33 * and easy to read.
34 *
35 * Radix trees are quick on lookups, and reasonably efficient in terms of
36 * storage, especially with high density usages such as this framework
37 * requires (a continuous range of integer keys, beginning with zero, is
38 * used as the ID's of the hwspinlock instances).
39 *
40 * The radix tree API supports tagging items in the tree, which this
41 * framework uses to mark unused hwspinlock instances (see the
42 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
43 * tree, looking for an unused hwspinlock instance, is now reduced to a
44 * single radix tree API call.
45 */
46static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
47
48/*
93b465c2 49 * Synchronization of access to the tree is achieved using this mutex,
bd9a4c7d 50 * as the radix-tree API requires that users provide all synchronisation.
93b465c2 51 * A mutex is needed because we're using non-atomic radix tree allocations.
bd9a4c7d 52 */
93b465c2
JG
53static DEFINE_MUTEX(hwspinlock_tree_lock);
54
bd9a4c7d
OBC
55
56/**
57 * __hwspin_trylock() - attempt to lock a specific hwspinlock
58 * @hwlock: an hwspinlock which we want to trylock
59 * @mode: controls whether local interrupts are disabled or not
60 * @flags: a pointer where the caller's interrupt state will be saved at (if
61 * requested)
62 *
63 * This function attempts to lock an hwspinlock, and will immediately
64 * fail if the hwspinlock is already taken.
65 *
1e6c06a7
BW
66 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
67 * of getting hardware lock with mutex or spinlock. Since in some scenarios,
68 * user need some time-consuming or sleepable operations under the hardware
69 * lock, they need one sleepable lock (like mutex) to protect the operations.
70 *
71 * If the mode is not HWLOCK_RAW, upon a successful return from this function,
72 * preemption (and possibly interrupts) is disabled, so the caller must not
73 * sleep, and is advised to release the hwspinlock as soon as possible. This is
74 * required in order to minimize remote cores polling on the hardware
75 * interconnect.
bd9a4c7d
OBC
76 *
77 * The user decides whether local interrupts are disabled or not, and if yes,
78 * whether he wants their previous state to be saved. It is up to the user
79 * to choose the appropriate @mode of operation, exactly the same way users
80 * should decide between spin_trylock, spin_trylock_irq and
81 * spin_trylock_irqsave.
82 *
83 * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
84 * the hwspinlock was already taken.
85 * This function will never sleep.
86 */
87int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
88{
89 int ret;
90
91 BUG_ON(!hwlock);
92 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
93
94 /*
95 * This spin_lock{_irq, _irqsave} serves three purposes:
96 *
97 * 1. Disable preemption, in order to minimize the period of time
98 * in which the hwspinlock is taken. This is important in order
99 * to minimize the possible polling on the hardware interconnect
100 * by a remote user of this lock.
101 * 2. Make the hwspinlock SMP-safe (so we can take it from
102 * additional contexts on the local host).
103 * 3. Ensure that in_atomic/might_sleep checks catch potential
104 * problems with hwspinlock usage (e.g. scheduler checks like
105 * 'scheduling while atomic' etc.)
106 */
66742b19
BW
107 switch (mode) {
108 case HWLOCK_IRQSTATE:
bd9a4c7d 109 ret = spin_trylock_irqsave(&hwlock->lock, *flags);
66742b19
BW
110 break;
111 case HWLOCK_IRQ:
bd9a4c7d 112 ret = spin_trylock_irq(&hwlock->lock);
66742b19 113 break;
1e6c06a7
BW
114 case HWLOCK_RAW:
115 ret = 1;
116 break;
66742b19 117 default:
bd9a4c7d 118 ret = spin_trylock(&hwlock->lock);
66742b19
BW
119 break;
120 }
bd9a4c7d
OBC
121
122 /* is lock already taken by another context on the local cpu ? */
123 if (!ret)
124 return -EBUSY;
125
126 /* try to take the hwspinlock device */
300bab97 127 ret = hwlock->bank->ops->trylock(hwlock);
bd9a4c7d
OBC
128
129 /* if hwlock is already taken, undo spin_trylock_* and exit */
130 if (!ret) {
66742b19
BW
131 switch (mode) {
132 case HWLOCK_IRQSTATE:
bd9a4c7d 133 spin_unlock_irqrestore(&hwlock->lock, *flags);
66742b19
BW
134 break;
135 case HWLOCK_IRQ:
bd9a4c7d 136 spin_unlock_irq(&hwlock->lock);
66742b19 137 break;
1e6c06a7
BW
138 case HWLOCK_RAW:
139 /* Nothing to do */
140 break;
66742b19 141 default:
bd9a4c7d 142 spin_unlock(&hwlock->lock);
66742b19
BW
143 break;
144 }
bd9a4c7d
OBC
145
146 return -EBUSY;
147 }
148
149 /*
150 * We can be sure the other core's memory operations
151 * are observable to us only _after_ we successfully take
152 * the hwspinlock, and we must make sure that subsequent memory
153 * operations (both reads and writes) will not be reordered before
154 * we actually took the hwspinlock.
155 *
156 * Note: the implicit memory barrier of the spinlock above is too
157 * early, so we need this additional explicit memory barrier.
158 */
159 mb();
160
161 return 0;
162}
163EXPORT_SYMBOL_GPL(__hwspin_trylock);
164
165/**
166 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
167 * @hwlock: the hwspinlock to be locked
168 * @timeout: timeout value in msecs
169 * @mode: mode which controls whether local interrupts are disabled or not
170 * @flags: a pointer to where the caller's interrupt state will be saved at (if
171 * requested)
172 *
173 * This function locks the given @hwlock. If the @hwlock
174 * is already taken, the function will busy loop waiting for it to
175 * be released, but give up after @timeout msecs have elapsed.
176 *
1e6c06a7
BW
177 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
178 * of getting hardware lock with mutex or spinlock. Since in some scenarios,
179 * user need some time-consuming or sleepable operations under the hardware
180 * lock, they need one sleepable lock (like mutex) to protect the operations.
181 *
182 * If the mode is not HWLOCK_RAW, upon a successful return from this function,
183 * preemption is disabled (and possibly local interrupts, too), so the caller
184 * must not sleep, and is advised to release the hwspinlock as soon as possible.
bd9a4c7d
OBC
185 * This is required in order to minimize remote cores polling on the
186 * hardware interconnect.
187 *
188 * The user decides whether local interrupts are disabled or not, and if yes,
189 * whether he wants their previous state to be saved. It is up to the user
190 * to choose the appropriate @mode of operation, exactly the same way users
191 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
192 *
193 * Returns 0 when the @hwlock was successfully taken, and an appropriate
194 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
195 * busy after @timeout msecs). The function will never sleep.
196 */
197int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
198 int mode, unsigned long *flags)
199{
200 int ret;
201 unsigned long expire;
202
203 expire = msecs_to_jiffies(to) + jiffies;
204
205 for (;;) {
206 /* Try to take the hwspinlock */
207 ret = __hwspin_trylock(hwlock, mode, flags);
208 if (ret != -EBUSY)
209 break;
210
211 /*
212 * The lock is already taken, let's check if the user wants
213 * us to try again
214 */
215 if (time_is_before_eq_jiffies(expire))
216 return -ETIMEDOUT;
217
218 /*
219 * Allow platform-specific relax handlers to prevent
220 * hogging the interconnect (no sleeping, though)
221 */
300bab97
OBC
222 if (hwlock->bank->ops->relax)
223 hwlock->bank->ops->relax(hwlock);
bd9a4c7d
OBC
224 }
225
226 return ret;
227}
228EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
229
230/**
231 * __hwspin_unlock() - unlock a specific hwspinlock
232 * @hwlock: a previously-acquired hwspinlock which we want to unlock
233 * @mode: controls whether local interrupts needs to be restored or not
234 * @flags: previous caller's interrupt state to restore (if requested)
235 *
236 * This function will unlock a specific hwspinlock, enable preemption and
237 * (possibly) enable interrupts or restore their previous state.
238 * @hwlock must be already locked before calling this function: it is a bug
239 * to call unlock on a @hwlock that is already unlocked.
240 *
241 * The user decides whether local interrupts should be enabled or not, and
242 * if yes, whether he wants their previous state to be restored. It is up
243 * to the user to choose the appropriate @mode of operation, exactly the
244 * same way users decide between spin_unlock, spin_unlock_irq and
245 * spin_unlock_irqrestore.
246 *
247 * The function will never sleep.
248 */
249void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
250{
251 BUG_ON(!hwlock);
252 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
253
254 /*
255 * We must make sure that memory operations (both reads and writes),
256 * done before unlocking the hwspinlock, will not be reordered
257 * after the lock is released.
258 *
259 * That's the purpose of this explicit memory barrier.
260 *
261 * Note: the memory barrier induced by the spin_unlock below is too
262 * late; the other core is going to access memory soon after it will
263 * take the hwspinlock, and by then we want to be sure our memory
264 * operations are already observable.
265 */
266 mb();
267
300bab97 268 hwlock->bank->ops->unlock(hwlock);
bd9a4c7d
OBC
269
270 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
66742b19
BW
271 switch (mode) {
272 case HWLOCK_IRQSTATE:
bd9a4c7d 273 spin_unlock_irqrestore(&hwlock->lock, *flags);
66742b19
BW
274 break;
275 case HWLOCK_IRQ:
bd9a4c7d 276 spin_unlock_irq(&hwlock->lock);
66742b19 277 break;
1e6c06a7
BW
278 case HWLOCK_RAW:
279 /* Nothing to do */
280 break;
66742b19 281 default:
bd9a4c7d 282 spin_unlock(&hwlock->lock);
66742b19
BW
283 break;
284 }
bd9a4c7d
OBC
285}
286EXPORT_SYMBOL_GPL(__hwspin_unlock);
287
fb7737e9
SA
288/**
289 * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
290 * @bank: the hwspinlock device bank
291 * @hwlock_spec: hwlock specifier as found in the device tree
292 *
293 * This is a simple translation function, suitable for hwspinlock platform
294 * drivers that only has a lock specifier length of 1.
295 *
296 * Returns a relative index of the lock within a specified bank on success,
297 * or -EINVAL on invalid specifier cell count.
298 */
299static inline int
300of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
301{
302 if (WARN_ON(hwlock_spec->args_count != 1))
303 return -EINVAL;
304
305 return hwlock_spec->args[0];
306}
307
308/**
309 * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
310 * @np: device node from which to request the specific hwlock
311 * @index: index of the hwlock in the list of values
312 *
313 * This function provides a means for DT users of the hwspinlock module to
314 * get the global lock id of a specific hwspinlock using the phandle of the
315 * hwspinlock device, so that it can be requested using the normal
316 * hwspin_lock_request_specific() API.
317 *
318 * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
319 * device is not yet registered, -EINVAL on invalid args specifier value or an
320 * appropriate error as returned from the OF parsing of the DT client node.
321 */
322int of_hwspin_lock_get_id(struct device_node *np, int index)
323{
324 struct of_phandle_args args;
325 struct hwspinlock *hwlock;
326 struct radix_tree_iter iter;
327 void **slot;
328 int id;
329 int ret;
330
331 ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
332 &args);
333 if (ret)
334 return ret;
335
336 /* Find the hwspinlock device: we need its base_id */
337 ret = -EPROBE_DEFER;
338 rcu_read_lock();
339 radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
340 hwlock = radix_tree_deref_slot(slot);
341 if (unlikely(!hwlock))
342 continue;
b76ba4af 343 if (radix_tree_deref_retry(hwlock)) {
c6400ba7
MW
344 slot = radix_tree_iter_retry(&iter);
345 continue;
346 }
fb7737e9
SA
347
348 if (hwlock->bank->dev->of_node == args.np) {
349 ret = 0;
350 break;
351 }
352 }
353 rcu_read_unlock();
354 if (ret < 0)
355 goto out;
356
357 id = of_hwspin_lock_simple_xlate(&args);
358 if (id < 0 || id >= hwlock->bank->num_locks) {
359 ret = -EINVAL;
360 goto out;
361 }
362 id += hwlock->bank->base_id;
363
364out:
365 of_node_put(args.np);
366 return ret ? ret : id;
367}
368EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
369
300bab97 370static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
bd9a4c7d
OBC
371{
372 struct hwspinlock *tmp;
373 int ret;
374
93b465c2 375 mutex_lock(&hwspinlock_tree_lock);
bd9a4c7d 376
300bab97
OBC
377 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
378 if (ret) {
379 if (ret == -EEXIST)
380 pr_err("hwspinlock id %d already exists!\n", id);
bd9a4c7d 381 goto out;
300bab97 382 }
bd9a4c7d
OBC
383
384 /* mark this hwspinlock as available */
300bab97 385 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
bd9a4c7d
OBC
386
387 /* self-sanity check which should never fail */
388 WARN_ON(tmp != hwlock);
389
390out:
93b465c2 391 mutex_unlock(&hwspinlock_tree_lock);
300bab97 392 return 0;
bd9a4c7d 393}
bd9a4c7d 394
300bab97 395static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
bd9a4c7d
OBC
396{
397 struct hwspinlock *hwlock = NULL;
398 int ret;
399
93b465c2 400 mutex_lock(&hwspinlock_tree_lock);
bd9a4c7d
OBC
401
402 /* make sure the hwspinlock is not in use (tag is set) */
403 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
404 if (ret == 0) {
405 pr_err("hwspinlock %d still in use (or not present)\n", id);
406 goto out;
407 }
408
409 hwlock = radix_tree_delete(&hwspinlock_tree, id);
410 if (!hwlock) {
411 pr_err("failed to delete hwspinlock %d\n", id);
412 goto out;
413 }
414
415out:
93b465c2 416 mutex_unlock(&hwspinlock_tree_lock);
bd9a4c7d
OBC
417 return hwlock;
418}
300bab97
OBC
419
420/**
421 * hwspin_lock_register() - register a new hw spinlock device
422 * @bank: the hwspinlock device, which usually provides numerous hw locks
423 * @dev: the backing device
424 * @ops: hwspinlock handlers for this device
425 * @base_id: id of the first hardware spinlock in this bank
426 * @num_locks: number of hwspinlocks provided by this device
427 *
428 * This function should be called from the underlying platform-specific
429 * implementation, to register a new hwspinlock device instance.
430 *
431 * Should be called from a process context (might sleep)
432 *
433 * Returns 0 on success, or an appropriate error code on failure
434 */
435int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
436 const struct hwspinlock_ops *ops, int base_id, int num_locks)
437{
438 struct hwspinlock *hwlock;
439 int ret = 0, i;
440
441 if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
442 !ops->unlock) {
443 pr_err("invalid parameters\n");
444 return -EINVAL;
445 }
446
447 bank->dev = dev;
448 bank->ops = ops;
449 bank->base_id = base_id;
450 bank->num_locks = num_locks;
451
452 for (i = 0; i < num_locks; i++) {
453 hwlock = &bank->lock[i];
454
455 spin_lock_init(&hwlock->lock);
456 hwlock->bank = bank;
457
476a7eeb 458 ret = hwspin_lock_register_single(hwlock, base_id + i);
300bab97
OBC
459 if (ret)
460 goto reg_failed;
461 }
462
463 return 0;
464
465reg_failed:
466 while (--i >= 0)
476a7eeb 467 hwspin_lock_unregister_single(base_id + i);
300bab97
OBC
468 return ret;
469}
470EXPORT_SYMBOL_GPL(hwspin_lock_register);
471
472/**
473 * hwspin_lock_unregister() - unregister an hw spinlock device
474 * @bank: the hwspinlock device, which usually provides numerous hw locks
475 *
476 * This function should be called from the underlying platform-specific
477 * implementation, to unregister an existing (and unused) hwspinlock.
478 *
479 * Should be called from a process context (might sleep)
480 *
481 * Returns 0 on success, or an appropriate error code on failure
482 */
483int hwspin_lock_unregister(struct hwspinlock_device *bank)
484{
485 struct hwspinlock *hwlock, *tmp;
486 int i;
487
488 for (i = 0; i < bank->num_locks; i++) {
489 hwlock = &bank->lock[i];
490
491 tmp = hwspin_lock_unregister_single(bank->base_id + i);
492 if (!tmp)
493 return -EBUSY;
494
495 /* self-sanity check that should never fail */
496 WARN_ON(tmp != hwlock);
497 }
498
499 return 0;
500}
bd9a4c7d
OBC
501EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
502
503/**
504 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
505 *
506 * This is an internal function that prepares an hwspinlock instance
507 * before it is given to the user. The function assumes that
508 * hwspinlock_tree_lock is taken.
509 *
510 * Returns 0 or positive to indicate success, and a negative value to
511 * indicate an error (with the appropriate error code)
512 */
513static int __hwspin_lock_request(struct hwspinlock *hwlock)
514{
300bab97 515 struct device *dev = hwlock->bank->dev;
bd9a4c7d
OBC
516 struct hwspinlock *tmp;
517 int ret;
518
519 /* prevent underlying implementation from being removed */
300bab97
OBC
520 if (!try_module_get(dev->driver->owner)) {
521 dev_err(dev, "%s: can't get owner\n", __func__);
bd9a4c7d
OBC
522 return -EINVAL;
523 }
524
525 /* notify PM core that power is now needed */
300bab97 526 ret = pm_runtime_get_sync(dev);
bd9a4c7d 527 if (ret < 0) {
300bab97 528 dev_err(dev, "%s: can't power on device\n", __func__);
c10b90d8
LF
529 pm_runtime_put_noidle(dev);
530 module_put(dev->driver->owner);
bd9a4c7d
OBC
531 return ret;
532 }
533
534 /* mark hwspinlock as used, should not fail */
300bab97 535 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
bd9a4c7d
OBC
536 HWSPINLOCK_UNUSED);
537
538 /* self-sanity check that should never fail */
539 WARN_ON(tmp != hwlock);
540
541 return ret;
542}
543
544/**
545 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
546 * @hwlock: a valid hwspinlock instance
547 *
548 * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
549 */
550int hwspin_lock_get_id(struct hwspinlock *hwlock)
551{
552 if (!hwlock) {
553 pr_err("invalid hwlock\n");
554 return -EINVAL;
555 }
556
300bab97 557 return hwlock_to_id(hwlock);
bd9a4c7d
OBC
558}
559EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
560
561/**
562 * hwspin_lock_request() - request an hwspinlock
563 *
564 * This function should be called by users of the hwspinlock device,
565 * in order to dynamically assign them an unused hwspinlock.
566 * Usually the user of this lock will then have to communicate the lock's id
567 * to the remote core before it can be used for synchronization (to get the
568 * id of a given hwlock, use hwspin_lock_get_id()).
569 *
93b465c2 570 * Should be called from a process context (might sleep)
bd9a4c7d
OBC
571 *
572 * Returns the address of the assigned hwspinlock, or NULL on error
573 */
574struct hwspinlock *hwspin_lock_request(void)
575{
576 struct hwspinlock *hwlock;
577 int ret;
578
93b465c2 579 mutex_lock(&hwspinlock_tree_lock);
bd9a4c7d
OBC
580
581 /* look for an unused lock */
582 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
583 0, 1, HWSPINLOCK_UNUSED);
584 if (ret == 0) {
585 pr_warn("a free hwspinlock is not available\n");
586 hwlock = NULL;
587 goto out;
588 }
589
590 /* sanity check that should never fail */
591 WARN_ON(ret > 1);
592
593 /* mark as used and power up */
594 ret = __hwspin_lock_request(hwlock);
595 if (ret < 0)
596 hwlock = NULL;
597
598out:
93b465c2 599 mutex_unlock(&hwspinlock_tree_lock);
bd9a4c7d
OBC
600 return hwlock;
601}
602EXPORT_SYMBOL_GPL(hwspin_lock_request);
603
604/**
605 * hwspin_lock_request_specific() - request for a specific hwspinlock
606 * @id: index of the specific hwspinlock that is requested
607 *
608 * This function should be called by users of the hwspinlock module,
609 * in order to assign them a specific hwspinlock.
610 * Usually early board code will be calling this function in order to
611 * reserve specific hwspinlock ids for predefined purposes.
612 *
93b465c2 613 * Should be called from a process context (might sleep)
bd9a4c7d
OBC
614 *
615 * Returns the address of the assigned hwspinlock, or NULL on error
616 */
617struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
618{
619 struct hwspinlock *hwlock;
620 int ret;
621
93b465c2 622 mutex_lock(&hwspinlock_tree_lock);
bd9a4c7d
OBC
623
624 /* make sure this hwspinlock exists */
625 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
626 if (!hwlock) {
627 pr_warn("hwspinlock %u does not exist\n", id);
628 goto out;
629 }
630
631 /* sanity check (this shouldn't happen) */
300bab97 632 WARN_ON(hwlock_to_id(hwlock) != id);
bd9a4c7d
OBC
633
634 /* make sure this hwspinlock is unused */
635 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
636 if (ret == 0) {
637 pr_warn("hwspinlock %u is already in use\n", id);
638 hwlock = NULL;
639 goto out;
640 }
641
642 /* mark as used and power up */
643 ret = __hwspin_lock_request(hwlock);
644 if (ret < 0)
645 hwlock = NULL;
646
647out:
93b465c2 648 mutex_unlock(&hwspinlock_tree_lock);
bd9a4c7d
OBC
649 return hwlock;
650}
651EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
652
653/**
654 * hwspin_lock_free() - free a specific hwspinlock
655 * @hwlock: the specific hwspinlock to free
656 *
657 * This function mark @hwlock as free again.
658 * Should only be called with an @hwlock that was retrieved from
659 * an earlier call to omap_hwspin_lock_request{_specific}.
660 *
93b465c2 661 * Should be called from a process context (might sleep)
bd9a4c7d
OBC
662 *
663 * Returns 0 on success, or an appropriate error code on failure
664 */
665int hwspin_lock_free(struct hwspinlock *hwlock)
666{
e352614c 667 struct device *dev;
bd9a4c7d
OBC
668 struct hwspinlock *tmp;
669 int ret;
670
671 if (!hwlock) {
672 pr_err("invalid hwlock\n");
673 return -EINVAL;
674 }
675
e352614c 676 dev = hwlock->bank->dev;
93b465c2 677 mutex_lock(&hwspinlock_tree_lock);
bd9a4c7d
OBC
678
679 /* make sure the hwspinlock is used */
300bab97 680 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
bd9a4c7d
OBC
681 HWSPINLOCK_UNUSED);
682 if (ret == 1) {
300bab97 683 dev_err(dev, "%s: hwlock is already free\n", __func__);
bd9a4c7d
OBC
684 dump_stack();
685 ret = -EINVAL;
686 goto out;
687 }
688
689 /* notify the underlying device that power is not needed */
300bab97 690 ret = pm_runtime_put(dev);
bd9a4c7d
OBC
691 if (ret < 0)
692 goto out;
693
694 /* mark this hwspinlock as available */
300bab97 695 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
bd9a4c7d
OBC
696 HWSPINLOCK_UNUSED);
697
698 /* sanity check (this shouldn't happen) */
699 WARN_ON(tmp != hwlock);
700
300bab97 701 module_put(dev->driver->owner);
bd9a4c7d
OBC
702
703out:
93b465c2 704 mutex_unlock(&hwspinlock_tree_lock);
bd9a4c7d
OBC
705 return ret;
706}
707EXPORT_SYMBOL_GPL(hwspin_lock_free);
708
709MODULE_LICENSE("GPL v2");
710MODULE_DESCRIPTION("Hardware spinlock interface");
711MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");