DEFINE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks);
EXPORT_SYMBOL_GPL(rqspinlock_held_locks);
-static bool is_lock_released(rqspinlock_t *lock, u32 mask, struct rqspinlock_timeout *ts)
+static bool is_lock_released(rqspinlock_t *lock, u32 mask)
{
if (!(atomic_read_acquire(&lock->val) & (mask)))
return true;
return false;
}
-static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
- struct rqspinlock_timeout *ts)
+static noinline int check_deadlock_AA(rqspinlock_t *lock)
{
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
int cnt = min(RES_NR_HELD, rqh->cnt);
* more locks, which reduce to ABBA). This is not exhaustive, and we rely on
* timeouts as the final line of defense.
*/
-static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
- struct rqspinlock_timeout *ts)
+static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask)
{
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
int rqh_cnt = min(RES_NR_HELD, rqh->cnt);
* Let's ensure to break out of this loop if the lock is available for
* us to potentially acquire.
*/
- if (is_lock_released(lock, mask, ts))
+ if (is_lock_released(lock, mask))
return 0;
/*
return 0;
}
-static noinline int check_deadlock(rqspinlock_t *lock, u32 mask,
- struct rqspinlock_timeout *ts)
+static noinline int check_deadlock(rqspinlock_t *lock, u32 mask)
{
int ret;
- ret = check_deadlock_AA(lock, mask, ts);
+ ret = check_deadlock_AA(lock);
if (ret)
return ret;
- ret = check_deadlock_ABBA(lock, mask, ts);
+ ret = check_deadlock_ABBA(lock, mask);
if (ret)
return ret;
*/
if (prev + NSEC_PER_MSEC < time) {
ts->cur = time;
- return check_deadlock(lock, mask, ts);
+ return check_deadlock(lock, mask);
}
return 0;