*/
static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
{
- int val;
-
/*
- * We can try to avoid the cmpxchg() loop by simply incrementing the
- * lock counter. When held in exclusive state, the lock counter is set
- * to INT_MIN so these increments won't hurt as the value will remain
- * negative.
+ * When held in exclusive state, the lock counter is set to INT_MIN
+ * so these increments won't hurt as the value will remain negative.
+ * The increment will also signal the exclusive locker that there are
+ * shared waiters.
*/
if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
return;
- do {
- val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
- } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
+ /*
+ * Someone else is holding the lock in exclusive state, so wait
+ * for them to finish. Since we already incremented the lock counter,
+ * no exclusive lock can be acquired until we finish. We don't need
+ * the return value since we only care that the exclusive lock is
+ * released (i.e. the lock counter is non-negative).
+ * Once the exclusive locker releases the lock, the sign bit will
+ * be cleared and our increment will make the lock counter positive,
+ * allowing us to proceed.
+ */
+ atomic_cond_read_relaxed(&cmdq->lock, VAL > 0);
}
static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
__ret; \
})
+/*
+ * Only clear the sign bit when releasing the exclusive lock this will
+ * allow any shared_lock() waiters to proceed without the possibility
+ * of entering the exclusive lock in a tight loop.
+ */
#define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \
({ \
- atomic_set_release(&cmdq->lock, 0); \
+ atomic_fetch_andnot_release(INT_MIN, &cmdq->lock); \
local_irq_restore(flags); \
})