]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
dma-fence: Change signature of __dma_fence_is_later
authorTvrtko Ursulin <tvrtko.ursulin@igalia.com>
Thu, 15 May 2025 09:49:56 +0000 (10:49 +0100)
committerChristian König <christian.koenig@amd.com>
Thu, 15 May 2025 13:05:29 +0000 (15:05 +0200)
With the goal of reducing the need for drivers to touch (and dereference)
fence->ops, we change the prototype of __dma_fence_is_later() to take
fence instead of fence->ops.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/r/20250515095004.28318-2-tvrtko.ursulin@igalia.com
Signed-off-by: Christian König <christian.koenig@amd.com>
drivers/dma-buf/dma-fence-chain.c
drivers/dma-buf/sw_sync.c
drivers/gpu/drm/xe/xe_hw_fence.c
drivers/gpu/drm/xe/xe_sched_job.c
include/linux/dma-fence.h

index 9663ba1bb6ac3446acf4092814f92c56edf24cc3..90424f23fd733af6ac03bf6bed449464963cb08a 100644 (file)
@@ -252,7 +252,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
        chain->prev_seqno = 0;
 
        /* Try to reuse the context of the previous chain node. */
-       if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
+       if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) {
                context = prev->context;
                chain->prev_seqno = prev->seqno;
        } else {
index 4f27ee93a00c260d736896e76d845584766b62d8..3c20f1d31cf542ffba31d8808d933f9aeb0cf249 100644 (file)
@@ -170,7 +170,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
 {
        struct sync_timeline *parent = dma_fence_parent(fence);
 
-       return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
+       return !__dma_fence_is_later(fence, fence->seqno, parent->value);
 }
 
 static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
index 0b4f12be3692ab206d623c3403d8c9c69fe4f2c2..03eb8c6d16169668f15cdec32440ce3880fc63d7 100644 (file)
@@ -165,7 +165,7 @@ static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
        u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
 
        return dma_fence->error ||
-               !__dma_fence_is_later(dma_fence->seqno, seqno, dma_fence->ops);
+               !__dma_fence_is_later(dma_fence, dma_fence->seqno, seqno);
 }
 
 static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence)
index 1905ca5909658bf37f939e2dbc412e3ebc8032b3..f0a6ce610948bfcafe8acfff7d2fa237627e1167 100644 (file)
@@ -216,15 +216,17 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error)
 
 bool xe_sched_job_started(struct xe_sched_job *job)
 {
+       struct dma_fence *fence = dma_fence_chain_contained(job->fence);
        struct xe_lrc *lrc = job->q->lrc[0];
 
-       return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
-                                    xe_lrc_start_seqno(lrc),
-                                    dma_fence_chain_contained(job->fence)->ops);
+       return !__dma_fence_is_later(fence,
+                                    xe_sched_job_lrc_seqno(job),
+                                    xe_lrc_start_seqno(lrc));
 }
 
 bool xe_sched_job_completed(struct xe_sched_job *job)
 {
+       struct dma_fence *fence = dma_fence_chain_contained(job->fence);
        struct xe_lrc *lrc = job->q->lrc[0];
 
        /*
@@ -232,9 +234,9 @@ bool xe_sched_job_completed(struct xe_sched_job *job)
         * parallel handshake is done.
         */
 
-       return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
-                                    xe_lrc_seqno(lrc),
-                                    dma_fence_chain_contained(job->fence)->ops);
+       return !__dma_fence_is_later(fence,
+                                    xe_sched_job_lrc_seqno(job),
+                                    xe_lrc_seqno(lrc));
 }
 
 void xe_sched_job_arm(struct xe_sched_job *job)
index b12776883d1430b0b7c6bf564fa56be34855020d..48b5202c531d3b1a4aa929485d16d5acf845516c 100644 (file)
@@ -441,21 +441,20 @@ dma_fence_is_signaled(struct dma_fence *fence)
 
 /**
  * __dma_fence_is_later - return if f1 is chronologically later than f2
+ * @fence: fence in whose context to do the comparison
  * @f1: the first fence's seqno
  * @f2: the second fence's seqno from the same context
- * @ops: dma_fence_ops associated with the seqno
  *
  * Returns true if f1 is chronologically later than f2. Both fences must be
  * from the same context, since a seqno is not common across contexts.
  */
-static inline bool __dma_fence_is_later(u64 f1, u64 f2,
-                                       const struct dma_fence_ops *ops)
+static inline bool __dma_fence_is_later(struct dma_fence *fence, u64 f1, u64 f2)
 {
        /* This is for backward compatibility with drivers which can only handle
         * 32bit sequence numbers. Use a 64bit compare when the driver says to
         * do so.
         */
-       if (ops->use_64bit_seqno)
+       if (fence->ops->use_64bit_seqno)
                return f1 > f2;
 
        return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
@@ -475,7 +474,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
        if (WARN_ON(f1->context != f2->context))
                return false;
 
-       return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
+       return __dma_fence_is_later(f1, f1->seqno, f2->seqno);
 }
 
 /**