goto err_unlock_list;
}
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
xe_exec_queue_last_fence_set(q, vm, fence);
dma_fence_put(fence);
}
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], job,
- &job->drm.s_fence->finished);
+ for (i = 0; i < num_syncs; i++) {
+ xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
+ xe_sched_job_init_user_fence(job, &syncs[i]);
+ }
if (xe_exec_queue_is_lr(q))
q->ring_ops->emit_job(job);
#include "xe_sched_job.h"
+#include <drm/xe_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/slab.h>
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
+#include "xe_sync_types.h"
#include "xe_trace.h"
#include "xe_vm.h"
return drm_sched_job_add_dependency(&job->drm, fence);
}
+/**
+ * xe_sched_job_init_user_fence - Initialize user_fence for the job
+ * @job: job whose user_fence needs an init
+ * @sync: sync to be use to init user_fence
+ */
+void xe_sched_job_init_user_fence(struct xe_sched_job *job,
+ struct xe_sync_entry *sync)
+{
+ if (sync->type != DRM_XE_SYNC_TYPE_USER_FENCE)
+ return;
+
+ job->user_fence.used = true;
+ job->user_fence.addr = sync->addr;
+ job->user_fence.value = sync->timeline_value;
+}
+
struct xe_sched_job_snapshot *
xe_sched_job_snapshot_capture(struct xe_sched_job *job)
{
struct drm_printer;
struct xe_vm;
+struct xe_sync_entry;
#define XE_SCHED_HANG_LIMIT 1
#define XE_SCHED_JOB_TIMEOUT LONG_MAX
void xe_sched_job_push(struct xe_sched_job *job);
int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
+void xe_sched_job_init_user_fence(struct xe_sched_job *job,
+ struct xe_sync_entry *sync);
static inline struct xe_sched_job *
to_xe_sched_job(struct drm_sched_job *drm)
return 0;
}
-void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
- struct dma_fence *fence)
+void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
{
if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
return;
user_fence_put(sync->ufence);
dma_fence_put(fence);
}
- } else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) {
- job->user_fence.used = true;
- job->user_fence.addr = sync->addr;
- job->user_fence.value = sync->timeline_value;
}
}
int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job);
void xe_sync_entry_signal(struct xe_sync_entry *sync,
- struct xe_sched_job *job,
struct dma_fence *fence);
void xe_sync_entry_cleanup(struct xe_sync_entry *sync);
struct dma_fence *
xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
if (last_op) {
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
}
return fence;
if (last_op) {
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL,
+ xe_sync_entry_signal(&syncs[i],
cf ? &cf->base : fence);
}
fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
if (last_op) {
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
}
}
struct dma_fence *fence =
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
dma_fence_put(fence);
}
}
return PTR_ERR(fence);
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
fence);