]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Create a helper function to init job's user fence
authorNirmoy Das <nirmoy.das@intel.com>
Thu, 21 Mar 2024 16:11:42 +0000 (17:11 +0100)
committerLucas De Marchi <lucas.demarchi@intel.com>
Tue, 26 Mar 2024 22:40:19 +0000 (15:40 -0700)
Refactor xe_sync_entry_signal so it doesn't have to
modify xe_sched_job struct instead create a new helper function
to set user fence values for a job.

v2: Move the sync type check to xe_sched_job_init_user_fence(Lucas)

Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240321161142.4954-1-nirmoy.das@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_sched_job.c
drivers/gpu/drm/xe/xe_sched_job.h
drivers/gpu/drm/xe/xe_sync.c
drivers/gpu/drm/xe/xe_sync.h
drivers/gpu/drm/xe/xe_vm.c

index 7692ebfe7d474bfbe3eac1542572d79a1307ea91..9d53ef8c49cc95901baeabab33ab9d739a8fc93f 100644 (file)
@@ -249,7 +249,7 @@ retry:
                                goto err_unlock_list;
                        }
                        for (i = 0; i < num_syncs; i++)
-                               xe_sync_entry_signal(&syncs[i], NULL, fence);
+                               xe_sync_entry_signal(&syncs[i], fence);
                        xe_exec_queue_last_fence_set(q, vm, fence);
                        dma_fence_put(fence);
                }
@@ -359,9 +359,10 @@ retry:
                drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
                                         DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
 
-       for (i = 0; i < num_syncs; i++)
-               xe_sync_entry_signal(&syncs[i], job,
-                                    &job->drm.s_fence->finished);
+       for (i = 0; i < num_syncs; i++) {
+               xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
+               xe_sched_job_init_user_fence(job, &syncs[i]);
+       }
 
        if (xe_exec_queue_is_lr(q))
                q->ring_ops->emit_job(job);
index 8151ddafb940756d87dbca45e6d3407354535ce4..add5a8b89be8fc1755bc195af5c62a7ce6fad10c 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "xe_sched_job.h"
 
+#include <drm/xe_drm.h>
 #include <linux/dma-fence-array.h>
 #include <linux/slab.h>
 
@@ -15,6 +16,7 @@
 #include "xe_hw_fence.h"
 #include "xe_lrc.h"
 #include "xe_macros.h"
+#include "xe_sync_types.h"
 #include "xe_trace.h"
 #include "xe_vm.h"
 
@@ -278,6 +280,22 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
        return drm_sched_job_add_dependency(&job->drm, fence);
 }
 
+/**
+ * xe_sched_job_init_user_fence - Initialize user_fence for the job
+ * @job: job whose user_fence needs an init
+ * @sync: sync to be use to init user_fence
+ */
+void xe_sched_job_init_user_fence(struct xe_sched_job *job,
+                                 struct xe_sync_entry *sync)
+{
+       if (sync->type != DRM_XE_SYNC_TYPE_USER_FENCE)
+               return;
+
+       job->user_fence.used = true;
+       job->user_fence.addr = sync->addr;
+       job->user_fence.value = sync->timeline_value;
+}
+
 struct xe_sched_job_snapshot *
 xe_sched_job_snapshot_capture(struct xe_sched_job *job)
 {
index f1a660648cf00c0c289ab9510615040eabf8780c..c75018f4660dcc191abffcb63c5d58396312d287 100644 (file)
@@ -10,6 +10,7 @@
 
 struct drm_printer;
 struct xe_vm;
+struct xe_sync_entry;
 
 #define XE_SCHED_HANG_LIMIT 1
 #define XE_SCHED_JOB_TIMEOUT LONG_MAX
@@ -58,6 +59,8 @@ void xe_sched_job_arm(struct xe_sched_job *job);
 void xe_sched_job_push(struct xe_sched_job *job);
 
 int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
+void xe_sched_job_init_user_fence(struct xe_sched_job *job,
+                                 struct xe_sync_entry *sync);
 
 static inline struct xe_sched_job *
 to_xe_sched_job(struct drm_sched_job *drm)
index 02c9577fe418516bcb891174b9599b6c0b2903bf..65f1f16282356d41c2f7c6ff9f4fdcbcd9cfc951 100644 (file)
@@ -224,8 +224,7 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
        return 0;
 }
 
-void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
-                         struct dma_fence *fence)
+void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
 {
        if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
                return;
@@ -254,10 +253,6 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
                        user_fence_put(sync->ufence);
                        dma_fence_put(fence);
                }
-       } else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) {
-               job->user_fence.used = true;
-               job->user_fence.addr = sync->addr;
-               job->user_fence.value = sync->timeline_value;
        }
 }
 
index 0fd0d51208e627c9be72eef661c160458db6f5a4..3e03396af2c6fad92c5cf9e8cf1411caf36c6488 100644 (file)
@@ -26,7 +26,6 @@ int xe_sync_entry_wait(struct xe_sync_entry *sync);
 int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
                           struct xe_sched_job *job);
 void xe_sync_entry_signal(struct xe_sync_entry *sync,
-                         struct xe_sched_job *job,
                          struct dma_fence *fence);
 void xe_sync_entry_cleanup(struct xe_sync_entry *sync);
 struct dma_fence *
index d82d7cd27123ec75e15d3ef9dc779f45ae97e5d8..694fbb546372cb03997d4ea9060c94502e96af7a 100644 (file)
@@ -1700,7 +1700,7 @@ next:
                xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
        if (last_op) {
                for (i = 0; i < num_syncs; i++)
-                       xe_sync_entry_signal(&syncs[i], NULL, fence);
+                       xe_sync_entry_signal(&syncs[i], fence);
        }
 
        return fence;
@@ -1774,7 +1774,7 @@ next:
 
        if (last_op) {
                for (i = 0; i < num_syncs; i++)
-                       xe_sync_entry_signal(&syncs[i], NULL,
+                       xe_sync_entry_signal(&syncs[i],
                                             cf ? &cf->base : fence);
        }
 
@@ -1835,7 +1835,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
                fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
                if (last_op) {
                        for (i = 0; i < num_syncs; i++)
-                               xe_sync_entry_signal(&syncs[i], NULL, fence);
+                               xe_sync_entry_signal(&syncs[i], fence);
                }
        }
 
@@ -2056,7 +2056,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
                                struct dma_fence *fence =
                                        xe_exec_queue_last_fence_get(wait_exec_queue, vm);
 
-                               xe_sync_entry_signal(&syncs[i], NULL, fence);
+                               xe_sync_entry_signal(&syncs[i], fence);
                                dma_fence_put(fence);
                        }
                }
@@ -2934,7 +2934,7 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
                return PTR_ERR(fence);
 
        for (i = 0; i < num_syncs; i++)
-               xe_sync_entry_signal(&syncs[i], NULL, fence);
+               xe_sync_entry_signal(&syncs[i], fence);
 
        xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
                                     fence);