]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dma-buf: abstract fence locking v2
authorChristian König <christian.koenig@amd.com>
Thu, 9 Oct 2025 08:40:06 +0000 (10:40 +0200)
committerChristian König <christian.koenig@amd.com>
Mon, 23 Feb 2026 15:14:19 +0000 (16:14 +0100)
Add dma_fence_lock_irqsafe() and dma_fence_unlock_irqrestore() wrappers
and mechanically apply them everywhere.

Just a pre-requisite cleanup for a follow up patch.

v2: add some missing i915 bits, add abstraction for lockdep assertion as
    well
v3: one more suggestion by Tvrtko

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Link: https://lore.kernel.org/r/20260219160822.1529-4-christian.koenig@amd.com
12 files changed:
drivers/dma-buf/dma-fence.c
drivers/dma-buf/st-dma-fence.c
drivers/dma-buf/sw_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/scheduler/sched_fence.c
drivers/gpu/drm/xe/xe_sched_job.c
include/linux/dma-fence.h

index 3279d82ffa987b3dad5e9d37462da001762b1efd..698260c49f52362d87949e15f34f2a54cef5ef87 100644 (file)
@@ -366,7 +366,7 @@ void dma_fence_signal_timestamp_locked(struct dma_fence *fence,
        struct dma_fence_cb *cur, *tmp;
        struct list_head cb_list;
 
-       lockdep_assert_held(fence->lock);
+       dma_fence_assert_held(fence);
 
        if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
                                      &fence->flags)))
@@ -414,9 +414,9 @@ void dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
        if (WARN_ON(!fence))
                return;
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        dma_fence_signal_timestamp_locked(fence, timestamp);
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 }
 EXPORT_SYMBOL(dma_fence_signal_timestamp);
 
@@ -475,9 +475,9 @@ bool dma_fence_check_and_signal(struct dma_fence *fence)
        unsigned long flags;
        bool ret;
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        ret = dma_fence_check_and_signal_locked(fence);
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        return ret;
 }
@@ -503,9 +503,9 @@ void dma_fence_signal(struct dma_fence *fence)
 
        tmp = dma_fence_begin_signalling();
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        dma_fence_signal_timestamp_locked(fence, ktime_get());
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        dma_fence_end_signalling(tmp);
 }
@@ -606,10 +606,10 @@ void dma_fence_release(struct kref *kref)
                 * don't leave chains dangling. We set the error flag first
                 * so that the callbacks know this signal is due to an error.
                 */
-               spin_lock_irqsave(fence->lock, flags);
+               dma_fence_lock_irqsave(fence, flags);
                fence->error = -EDEADLK;
                dma_fence_signal_locked(fence);
-               spin_unlock_irqrestore(fence->lock, flags);
+               dma_fence_unlock_irqrestore(fence, flags);
        }
 
        ops = rcu_dereference(fence->ops);
@@ -639,7 +639,7 @@ static bool __dma_fence_enable_signaling(struct dma_fence *fence)
        const struct dma_fence_ops *ops;
        bool was_set;
 
-       lockdep_assert_held(fence->lock);
+       dma_fence_assert_held(fence);
 
        was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
                                   &fence->flags);
@@ -675,9 +675,9 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        __dma_fence_enable_signaling(fence);
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 }
 EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
 
@@ -717,8 +717,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
                return -ENOENT;
        }
 
-       spin_lock_irqsave(fence->lock, flags);
-
+       dma_fence_lock_irqsave(fence, flags);
        if (__dma_fence_enable_signaling(fence)) {
                cb->func = func;
                list_add_tail(&cb->node, &fence->cb_list);
@@ -726,8 +725,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
                INIT_LIST_HEAD(&cb->node);
                ret = -ENOENT;
        }
-
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        return ret;
 }
@@ -750,9 +748,9 @@ int dma_fence_get_status(struct dma_fence *fence)
        unsigned long flags;
        int status;
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        status = dma_fence_get_status_locked(fence);
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        return status;
 }
@@ -782,13 +780,11 @@ dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
        unsigned long flags;
        bool ret;
 
-       spin_lock_irqsave(fence->lock, flags);
-
+       dma_fence_lock_irqsave(fence, flags);
        ret = !list_empty(&cb->node);
        if (ret)
                list_del_init(&cb->node);
-
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        return ret;
 }
@@ -827,7 +823,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
        unsigned long flags;
        signed long ret = timeout ? timeout : 1;
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
 
        if (dma_fence_test_signaled_flag(fence))
                goto out;
@@ -851,11 +847,11 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
                        __set_current_state(TASK_INTERRUPTIBLE);
                else
                        __set_current_state(TASK_UNINTERRUPTIBLE);
-               spin_unlock_irqrestore(fence->lock, flags);
+               dma_fence_unlock_irqrestore(fence, flags);
 
                ret = schedule_timeout(ret);
 
-               spin_lock_irqsave(fence->lock, flags);
+               dma_fence_lock_irqsave(fence, flags);
                if (ret > 0 && intr && signal_pending(current))
                        ret = -ERESTARTSYS;
        }
@@ -865,7 +861,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
        __set_current_state(TASK_RUNNING);
 
 out:
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
        return ret;
 }
 EXPORT_SYMBOL(dma_fence_default_wait);
index 73ed6fd48a13fb4a2d0c5e9bf71c34b67c93d5d1..5d0d9abc6e2180e5ca783d33aaabc42c2dbd82e1 100644 (file)
@@ -410,8 +410,10 @@ struct race_thread {
 
 static void __wait_for_callbacks(struct dma_fence *f)
 {
-       spin_lock_irq(f->lock);
-       spin_unlock_irq(f->lock);
+       unsigned long flags;
+
+       dma_fence_lock_irqsave(f, flags);
+       dma_fence_unlock_irqrestore(f, flags);
 }
 
 static int thread_signal_callback(void *arg)
index 963a72324d16e3f6de32ae2aa7827624d7baf1e0..8df20b0218a9eef03c1a81241fcc86ef544f8a68 100644 (file)
@@ -156,12 +156,12 @@ static void timeline_fence_release(struct dma_fence *fence)
        struct sync_timeline *parent = dma_fence_parent(fence);
        unsigned long flags;
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        if (!list_empty(&pt->link)) {
                list_del(&pt->link);
                rb_erase(&pt->node, &parent->pt_tree);
        }
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        sync_timeline_put(parent);
        dma_fence_free(fence);
@@ -179,7 +179,7 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin
        struct sync_pt *pt = dma_fence_to_sync_pt(fence);
        unsigned long flags;
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
                if (ktime_before(deadline, pt->deadline))
                        pt->deadline = deadline;
@@ -187,7 +187,7 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin
                pt->deadline = deadline;
                __set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags);
        }
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 }
 
 static const struct dma_fence_ops timeline_fence_ops = {
@@ -431,13 +431,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
                goto put_fence;
        }
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
                ret = -ENOENT;
                goto unlock;
        }
        data.deadline_ns = ktime_to_ns(pt->deadline);
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        dma_fence_put(fence);
 
@@ -450,7 +450,7 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
        return 0;
 
 unlock:
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 put_fence:
        dma_fence_put(fence);
 
index 4638a686a84e758f8e120d2864059846ded90ea0..7c047f5a154922a3114da0fce445cfcd4005bad5 100644 (file)
@@ -479,10 +479,10 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
        if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
                return false;
 
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        if (!dma_fence_is_signaled_locked(fence))
                dma_fence_set_error(fence, -ENODATA);
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        while (!dma_fence_is_signaled(fence) &&
               ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
index f2beb980e3c3af0841fe412df3740aaea8776f2f..8b095087feb41b5794fdb5360d03c897c3eb2725 100644 (file)
@@ -2785,8 +2785,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        dma_fence_put(vm->last_unlocked);
        dma_fence_wait(vm->last_tlb_flush, false);
        /* Make sure that all fence callbacks have completed */
-       spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
-       spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
+       dma_fence_lock_irqsave(vm->last_tlb_flush, flags);
+       dma_fence_unlock_irqrestore(vm->last_tlb_flush, flags);
        dma_fence_put(vm->last_tlb_flush);
 
        list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
index 806d62ed61efff7be545ac3419e2657aa27704fe..a914ceec90aa96646e941c1cbe284e8487f765ac 100644 (file)
@@ -639,7 +639,7 @@ static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
         * sure that the dma_fence structure isn't freed up.
         */
        rcu_read_lock();
-       lock = vm->last_tlb_flush->lock;
+       lock = dma_fence_spinlock(vm->last_tlb_flush);
        rcu_read_unlock();
 
        spin_lock_irqsave(lock, flags);
index a2b413982ce646ebb440c5da75766b1c236191d5..c10ac0ab3bfa808e1b48a1a201c0208341494251 100644 (file)
@@ -148,7 +148,7 @@ __dma_fence_signal__notify(struct dma_fence *fence,
 {
        struct dma_fence_cb *cur, *tmp;
 
-       lockdep_assert_held(fence->lock);
+       dma_fence_assert_held(fence);
 
        list_for_each_entry_safe(cur, tmp, list, node) {
                INIT_LIST_HEAD(&cur->node);
index 25c46d7b1ea7ce3213ebdf5bc1d4f85f79640a08..cd44cbfb53b5b7cf408f21ec46bbb53e77a4de6f 100644 (file)
@@ -1045,9 +1045,10 @@ __i915_active_fence_set(struct i915_active_fence *active,
         * nesting rules for the fence->lock; the inner lock is always the
         * older lock.
         */
-       spin_lock_irqsave(fence->lock, flags);
+       dma_fence_lock_irqsave(fence, flags);
        if (prev)
-               spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+               spin_lock_nested(dma_fence_spinlock(prev),
+                                SINGLE_DEPTH_NESTING);
 
        /*
         * A does the cmpxchg first, and so it sees C or NULL, as before, or
@@ -1061,17 +1062,18 @@ __i915_active_fence_set(struct i915_active_fence *active,
         */
        while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
                if (prev) {
-                       spin_unlock(prev->lock);
+                       spin_unlock(dma_fence_spinlock(prev));
                        dma_fence_put(prev);
                }
-               spin_unlock_irqrestore(fence->lock, flags);
+               dma_fence_unlock_irqrestore(fence, flags);
 
                prev = i915_active_fence_get(active);
                GEM_BUG_ON(prev == fence);
 
-               spin_lock_irqsave(fence->lock, flags);
+               dma_fence_lock_irqsave(fence, flags);
                if (prev)
-                       spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+                       spin_lock_nested(dma_fence_spinlock(prev),
+                                        SINGLE_DEPTH_NESTING);
        }
 
        /*
@@ -1088,10 +1090,11 @@ __i915_active_fence_set(struct i915_active_fence *active,
         */
        if (prev) {
                __list_del_entry(&active->cb.node);
-               spin_unlock(prev->lock); /* serialise with prev->cb_list */
+               /* serialise with prev->cb_list */
+               spin_unlock(dma_fence_spinlock(prev));
        }
        list_add_tail(&active->cb.node, &fence->cb_list);
-       spin_unlock_irqrestore(fence->lock, flags);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        return prev;
 }
index cb22237ac17d9e5f23706eca200f998be85c9fc7..17c114645d9f9f75124766532e0de475be8ed060 100644 (file)
@@ -156,12 +156,13 @@ nouveau_name(struct drm_device *dev)
 static inline bool
 nouveau_cli_work_ready(struct dma_fence *fence)
 {
+       unsigned long flags;
        bool ret = true;
 
-       spin_lock_irq(fence->lock);
+       dma_fence_lock_irqsave(fence, flags);
        if (!dma_fence_is_signaled_locked(fence))
                ret = false;
-       spin_unlock_irq(fence->lock);
+       dma_fence_unlock_irqrestore(fence, flags);
 
        if (ret == true)
                dma_fence_put(fence);
index a27786cb86fb671e58bc4148e1df071f8392faee..096fe28aa9c9eec763d4995b396a88b1e095480f 100644 (file)
@@ -156,19 +156,19 @@ static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
        struct dma_fence *parent;
        unsigned long flags;
 
-       spin_lock_irqsave(&fence->lock, flags);
+       dma_fence_lock_irqsave(f, flags);
 
        /* If we already have an earlier deadline, keep it: */
        if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
            ktime_before(fence->deadline, deadline)) {
-               spin_unlock_irqrestore(&fence->lock, flags);
+               dma_fence_unlock_irqrestore(f, flags);
                return;
        }
 
        fence->deadline = deadline;
        set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
 
-       spin_unlock_irqrestore(&fence->lock, flags);
+       dma_fence_unlock_irqrestore(f, flags);
 
        /*
         * smp_load_aquire() to ensure that if we are racing another
index 3927666fe5566e372d7ee19885452cadf75a27e2..ae5b38b2a884ae324c4b040ce7d5e42333801e86 100644 (file)
@@ -190,11 +190,11 @@ static bool xe_fence_set_error(struct dma_fence *fence, int error)
        unsigned long irq_flags;
        bool signaled;
 
-       spin_lock_irqsave(fence->lock, irq_flags);
+       dma_fence_lock_irqsave(fence, irq_flags);
        signaled = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
        if (!signaled)
                dma_fence_set_error(fence, error);
-       spin_unlock_irqrestore(fence->lock, irq_flags);
+       dma_fence_unlock_irqrestore(fence, irq_flags);
 
        return signaled;
 }
index 9ff2c4a09cdcd918362f9d4609b53f55d9088bbe..85d6eac9fa852210a0760a36c7ddc39e396928cb 100644 (file)
@@ -377,6 +377,44 @@ dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
        } while (1);
 }
 
+/**
+ * dma_fence_spinlock - return pointer to the spinlock protecting the fence
+ * @fence: the fence to get the lock from
+ *
+ * Return the pointer to the extern lock.
+ */
+static inline spinlock_t *dma_fence_spinlock(struct dma_fence *fence)
+{
+       return fence->lock;
+}
+
+/**
+ * dma_fence_lock_irqsave - irqsave lock the fence
+ * @fence: the fence to lock
+ * @flags: where to store the CPU flags.
+ *
+ * Lock the fence, preventing it from changing to the signaled state.
+ */
+#define dma_fence_lock_irqsave(fence, flags)   \
+       spin_lock_irqsave(fence->lock, flags)
+
+/**
+ * dma_fence_unlock_irqrestore - unlock the fence and irqrestore
+ * @fence: the fence to unlock
+ * @flags the CPU flags to restore
+ *
+ * Unlock the fence, allowing it to change it's state to signaled again.
+ */
+#define dma_fence_unlock_irqrestore(fence, flags)      \
+       spin_unlock_irqrestore(fence->lock, flags)
+
+/**
+ * dma_fence_assert_held - lockdep assertion that fence is locked
+ * @fence: the fence which should be locked
+ */
+#define dma_fence_assert_held(fence)   \
+       lockdep_assert_held(dma_fence_spinlock(fence));
+
 #ifdef CONFIG_LOCKDEP
 bool dma_fence_begin_signalling(void);
 void dma_fence_end_signalling(bool cookie);