struct dma_fence_cb *cur, *tmp;
struct list_head cb_list;
- lockdep_assert_held(fence->lock);
+ dma_fence_assert_held(fence);
if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags)))
if (WARN_ON(!fence))
return;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
dma_fence_signal_timestamp_locked(fence, timestamp);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
}
EXPORT_SYMBOL(dma_fence_signal_timestamp);
unsigned long flags;
bool ret;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
ret = dma_fence_check_and_signal_locked(fence);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
tmp = dma_fence_begin_signalling();
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
dma_fence_signal_timestamp_locked(fence, ktime_get());
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
dma_fence_end_signalling(tmp);
}
* don't leave chains dangling. We set the error flag first
* so that the callbacks know this signal is due to an error.
*/
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
fence->error = -EDEADLK;
dma_fence_signal_locked(fence);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
}
ops = rcu_dereference(fence->ops);
const struct dma_fence_ops *ops;
bool was_set;
- lockdep_assert_held(fence->lock);
+ dma_fence_assert_held(fence);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
{
unsigned long flags;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
__dma_fence_enable_signaling(fence);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
}
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
return -ENOENT;
}
- spin_lock_irqsave(fence->lock, flags);
-
+ dma_fence_lock_irqsave(fence, flags);
if (__dma_fence_enable_signaling(fence)) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
INIT_LIST_HEAD(&cb->node);
ret = -ENOENT;
}
-
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
unsigned long flags;
int status;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
status = dma_fence_get_status_locked(fence);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return status;
}
unsigned long flags;
bool ret;
- spin_lock_irqsave(fence->lock, flags);
-
+ dma_fence_lock_irqsave(fence, flags);
ret = !list_empty(&cb->node);
if (ret)
list_del_init(&cb->node);
-
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
unsigned long flags;
signed long ret = timeout ? timeout : 1;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (dma_fence_test_signaled_flag(fence))
goto out;
__set_current_state(TASK_INTERRUPTIBLE);
else
__set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
ret = schedule_timeout(ret);
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (ret > 0 && intr && signal_pending(current))
ret = -ERESTARTSYS;
}
__set_current_state(TASK_RUNNING);
out:
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
EXPORT_SYMBOL(dma_fence_default_wait);
static void __wait_for_callbacks(struct dma_fence *f)
{
- spin_lock_irq(f->lock);
- spin_unlock_irq(f->lock);
+ unsigned long flags;
+
+ dma_fence_lock_irqsave(f, flags);
+ dma_fence_unlock_irqrestore(f, flags);
}
static int thread_signal_callback(void *arg)
struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (!list_empty(&pt->link)) {
list_del(&pt->link);
rb_erase(&pt->node, &parent->pt_tree);
}
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
sync_timeline_put(parent);
dma_fence_free(fence);
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
unsigned long flags;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
if (ktime_before(deadline, pt->deadline))
pt->deadline = deadline;
pt->deadline = deadline;
__set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags);
}
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
}
static const struct dma_fence_ops timeline_fence_ops = {
goto put_fence;
}
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
ret = -ENOENT;
goto unlock;
}
data.deadline_ns = ktime_to_ns(pt->deadline);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
dma_fence_put(fence);
return 0;
unlock:
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
put_fence:
dma_fence_put(fence);
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (!dma_fence_is_signaled_locked(fence))
dma_fence_set_error(fence, -ENODATA);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
/* Make sure that all fence callbacks have completed */
- spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
- spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
+ dma_fence_lock_irqsave(vm->last_tlb_flush, flags);
+ dma_fence_unlock_irqrestore(vm->last_tlb_flush, flags);
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
* sure that the dma_fence structure isn't freed up.
*/
rcu_read_lock();
- lock = vm->last_tlb_flush->lock;
+ lock = dma_fence_spinlock(vm->last_tlb_flush);
rcu_read_unlock();
spin_lock_irqsave(lock, flags);
{
struct dma_fence_cb *cur, *tmp;
- lockdep_assert_held(fence->lock);
+ dma_fence_assert_held(fence);
list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
* nesting rules for the fence->lock; the inner lock is always the
* older lock.
*/
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (prev)
- spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+ spin_lock_nested(dma_fence_spinlock(prev),
+ SINGLE_DEPTH_NESTING);
/*
* A does the cmpxchg first, and so it sees C or NULL, as before, or
*/
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
if (prev) {
- spin_unlock(prev->lock);
+ spin_unlock(dma_fence_spinlock(prev));
dma_fence_put(prev);
}
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
prev = i915_active_fence_get(active);
GEM_BUG_ON(prev == fence);
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (prev)
- spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+ spin_lock_nested(dma_fence_spinlock(prev),
+ SINGLE_DEPTH_NESTING);
}
/*
*/
if (prev) {
__list_del_entry(&active->cb.node);
- spin_unlock(prev->lock); /* serialise with prev->cb_list */
+ /* serialise with prev->cb_list */
+ spin_unlock(dma_fence_spinlock(prev));
}
list_add_tail(&active->cb.node, &fence->cb_list);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return prev;
}
static inline bool
nouveau_cli_work_ready(struct dma_fence *fence)
{
+ unsigned long flags;
bool ret = true;
- spin_lock_irq(fence->lock);
+ dma_fence_lock_irqsave(fence, flags);
if (!dma_fence_is_signaled_locked(fence))
ret = false;
- spin_unlock_irq(fence->lock);
+ dma_fence_unlock_irqrestore(fence, flags);
if (ret == true)
dma_fence_put(fence);
struct dma_fence *parent;
unsigned long flags;
- spin_lock_irqsave(&fence->lock, flags);
+ dma_fence_lock_irqsave(f, flags);
/* If we already have an earlier deadline, keep it: */
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
ktime_before(fence->deadline, deadline)) {
- spin_unlock_irqrestore(&fence->lock, flags);
+ dma_fence_unlock_irqrestore(f, flags);
return;
}
fence->deadline = deadline;
set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
- spin_unlock_irqrestore(&fence->lock, flags);
+ dma_fence_unlock_irqrestore(f, flags);
/*
* smp_load_aquire() to ensure that if we are racing another
unsigned long irq_flags;
bool signaled;
- spin_lock_irqsave(fence->lock, irq_flags);
+ dma_fence_lock_irqsave(fence, irq_flags);
signaled = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
if (!signaled)
dma_fence_set_error(fence, error);
- spin_unlock_irqrestore(fence->lock, irq_flags);
+ dma_fence_unlock_irqrestore(fence, irq_flags);
return signaled;
}
} while (1);
}
+/**
+ * dma_fence_spinlock - return pointer to the spinlock protecting the fence
+ * @fence: the fence to get the lock from
+ *
+ * Return the pointer to the extern lock.
+ */
+static inline spinlock_t *dma_fence_spinlock(struct dma_fence *fence)
+{
+ return fence->lock;
+}
+
+/**
+ * dma_fence_lock_irqsave - irqsave lock the fence
+ * @fence: the fence to lock
+ * @flags: where to store the CPU flags.
+ *
+ * Lock the fence, preventing it from changing to the signaled state.
+ */
+#define dma_fence_lock_irqsave(fence, flags) \
+ spin_lock_irqsave(fence->lock, flags)
+
+/**
+ * dma_fence_unlock_irqrestore - unlock the fence and irqrestore
+ * @fence: the fence to unlock
+ * @flags the CPU flags to restore
+ *
+ * Unlock the fence, allowing it to change it's state to signaled again.
+ */
+#define dma_fence_unlock_irqrestore(fence, flags) \
+ spin_unlock_irqrestore(fence->lock, flags)
+
+/**
+ * dma_fence_assert_held - lockdep assertion that fence is locked
+ * @fence: the fence which should be locked
+ */
+#define dma_fence_assert_held(fence) \
+ lockdep_assert_held(dma_fence_spinlock(fence));
+
#ifdef CONFIG_LOCKDEP
bool dma_fence_begin_signalling(void);
void dma_fence_end_signalling(bool cookie);