dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
- i = emit_bb_start(job->batch_addr[0], BIT(8), dw, i);
+ i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
/* XXX: Do we need this? Leaving for now. */
dw[i++] = preparser_disable(false);
}
- i = emit_bb_start(job->batch_addr[1], BIT(8), dw, i);
+ i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | job->migrate_flush_flags |
MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW;
xe_gt_assert(gt, job->q->width <= 1); /* no parallel submission for GSCCS */
__emit_job_gen12_simple(job, job->q->lrc,
- job->batch_addr[0],
+ job->ptrs[0].batch_addr,
xe_sched_job_lrc_seqno(job));
}
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_simple(job, job->q->lrc + i,
- job->batch_addr[i],
+ job->ptrs[i].batch_addr,
xe_sched_job_lrc_seqno(job));
}
/* FIXME: Not doing parallel handshake for now */
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_video(job, job->q->lrc + i,
- job->batch_addr[i],
+ job->ptrs[i].batch_addr,
xe_sched_job_lrc_seqno(job));
}
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_render_compute(job, job->q->lrc + i,
- job->batch_addr[i],
+ job->ptrs[i].batch_addr,
xe_sched_job_lrc_seqno(job));
}
#include "xe_sched_job.h"
#include <drm/xe_drm.h>
-#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/slab.h>
#include "xe_device.h"
xe_sched_job_slab =
kmem_cache_create("xe_sched_job",
sizeof(struct xe_sched_job) +
- sizeof(u64), 0,
+ sizeof(struct xe_job_ptrs), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!xe_sched_job_slab)
return -ENOMEM;
xe_sched_job_parallel_slab =
kmem_cache_create("xe_sched_job_parallel",
sizeof(struct xe_sched_job) +
- sizeof(u64) *
+ sizeof(struct xe_job_ptrs) *
XE_HW_ENGINE_MAX_INSTANCE, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!xe_sched_job_parallel_slab) {
return gt_to_xe(job->q->gt);
}
+/* Free unused pre-allocated fences */
+static void xe_sched_job_free_fences(struct xe_sched_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->q->width; ++i) {
+ struct xe_job_ptrs *ptrs = &job->ptrs[i];
+
+ if (ptrs->lrc_fence)
+ xe_lrc_free_seqno_fence(ptrs->lrc_fence);
+ if (ptrs->chain_fence)
+ dma_fence_chain_free(ptrs->chain_fence);
+ }
+}
+
struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
u64 *batch_addr)
{
- struct xe_sched_job *job;
- struct dma_fence **fences;
bool is_migration = xe_sched_job_is_migration(q);
+ struct xe_sched_job *job;
int err;
- int i, j;
+ int i;
u32 width;
/* only a kernel context can submit a vm-less job */
XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
- /* Migration and kernel engines have their own locking */
- if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
- lockdep_assert_held(&q->vm->lock);
- if (!xe_vm_in_lr_mode(q->vm))
- xe_vm_assert_held(q->vm);
- }
-
job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
if (!job)
return ERR_PTR(-ENOMEM);
if (err)
goto err_free;
- if (!xe_exec_queue_is_parallel(q)) {
- job->fence = xe_lrc_create_seqno_fence(q->lrc);
- if (IS_ERR(job->fence)) {
- err = PTR_ERR(job->fence);
- goto err_sched_job;
- }
- job->lrc_seqno = job->fence->seqno;
- } else {
- struct dma_fence_array *cf;
+ for (i = 0; i < q->width; ++i) {
+ struct dma_fence *fence = xe_lrc_alloc_seqno_fence();
+ struct dma_fence_chain *chain;
- fences = kmalloc_array(q->width, sizeof(*fences), GFP_KERNEL);
- if (!fences) {
- err = -ENOMEM;
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
goto err_sched_job;
}
+ job->ptrs[i].lrc_fence = fence;
- for (j = 0; j < q->width; ++j) {
- fences[j] = xe_lrc_create_seqno_fence(q->lrc + j);
- if (IS_ERR(fences[j])) {
- err = PTR_ERR(fences[j]);
- goto err_fences;
- }
- if (!j)
- job->lrc_seqno = fences[0]->seqno;
- }
+ if (i + 1 == q->width)
+ continue;
- cf = dma_fence_array_create(q->width, fences,
- q->parallel.composite_fence_ctx,
- q->parallel.composite_fence_seqno++,
- false);
- if (!cf) {
- --q->parallel.composite_fence_seqno;
+ chain = dma_fence_chain_alloc();
+ if (!chain) {
err = -ENOMEM;
- goto err_fences;
+ goto err_sched_job;
}
-
- job->fence = &cf->base;
+ job->ptrs[i].chain_fence = chain;
}
width = q->width;
width = 2;
for (i = 0; i < width; ++i)
- job->batch_addr[i] = batch_addr[i];
+ job->ptrs[i].batch_addr = batch_addr[i];
xe_pm_runtime_get_noresume(job_to_xe(job));
trace_xe_sched_job_create(job);
return job;
-err_fences:
- for (j = j - 1; j >= 0; --j) {
- --q->lrc[j].fence_ctx.next_seqno;
- dma_fence_put(fences[j]);
- }
- kfree(fences);
err_sched_job:
+ xe_sched_job_free_fences(job);
drm_sched_job_cleanup(&job->drm);
err_free:
xe_exec_queue_put(q);
container_of(ref, struct xe_sched_job, refcount);
struct xe_device *xe = job_to_xe(job);
+ xe_sched_job_free_fences(job);
xe_exec_queue_put(job->q);
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);
xe_pm_runtime_put(xe);
}
-void xe_sched_job_set_error(struct xe_sched_job *job, int error)
+/* Set the error status under the fence to avoid racing with signaling */
+static bool xe_fence_set_error(struct dma_fence *fence, int error)
{
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags))
- return;
+ unsigned long irq_flags;
+ bool signaled;
+
+ spin_lock_irqsave(fence->lock, irq_flags);
+ signaled = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+ if (!signaled)
+ dma_fence_set_error(fence, error);
+ spin_unlock_irqrestore(fence->lock, irq_flags);
- dma_fence_set_error(job->fence, error);
+ return signaled;
+}
- if (dma_fence_is_array(job->fence)) {
- struct dma_fence_array *array =
- to_dma_fence_array(job->fence);
- struct dma_fence **child = array->fences;
- unsigned int nchild = array->num_fences;
+void xe_sched_job_set_error(struct xe_sched_job *job, int error)
+{
+ if (xe_fence_set_error(job->fence, error))
+ return;
- do {
- struct dma_fence *current_fence = *child++;
+ if (dma_fence_is_chain(job->fence)) {
+ struct dma_fence *iter;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- ¤t_fence->flags))
- continue;
- dma_fence_set_error(current_fence, error);
- } while (--nchild);
+ dma_fence_chain_for_each(iter, job->fence)
+ xe_fence_set_error(dma_fence_chain_contained(iter),
+ error);
}
trace_xe_sched_job_set_error(job);
return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
xe_lrc_start_seqno(lrc),
- dma_fence_array_first(job->fence)->ops);
+ dma_fence_chain_contained(job->fence)->ops);
}
bool xe_sched_job_completed(struct xe_sched_job *job)
return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
xe_lrc_seqno(lrc),
- dma_fence_array_first(job->fence)->ops);
+ dma_fence_chain_contained(job->fence)->ops);
}
void xe_sched_job_arm(struct xe_sched_job *job)
{
struct xe_exec_queue *q = job->q;
+ struct dma_fence *fence, *prev;
struct xe_vm *vm = q->vm;
+ u64 seqno = 0;
+ int i;
+
+ /* Migration and kernel engines have their own locking */
+ if (IS_ENABLED(CONFIG_LOCKDEP) &&
+ !(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
+ lockdep_assert_held(&q->vm->lock);
+ if (!xe_vm_in_lr_mode(q->vm))
+ xe_vm_assert_held(q->vm);
+ }
if (vm && !xe_sched_job_is_migration(q) && !xe_vm_in_lr_mode(vm) &&
(vm->batch_invalidate_tlb || vm->tlb_flush_seqno != q->tlb_flush_seqno)) {
job->ring_ops_flush_tlb = true;
}
+ /* Arm the pre-allocated fences */
+ for (i = 0; i < q->width; prev = fence, ++i) {
+ struct dma_fence_chain *chain;
+
+ fence = job->ptrs[i].lrc_fence;
+ xe_lrc_init_seqno_fence(&q->lrc[i], fence);
+ job->ptrs[i].lrc_fence = NULL;
+ if (!i) {
+ job->lrc_seqno = fence->seqno;
+ continue;
+ } else {
+ xe_assert(gt_to_xe(q->gt), job->lrc_seqno == fence->seqno);
+ }
+
+ chain = job->ptrs[i - 1].chain_fence;
+ dma_fence_chain_init(chain, prev, fence, seqno++);
+ job->ptrs[i - 1].chain_fence = NULL;
+ fence = &chain->base;
+ }
+
+ job->fence = fence;
drm_sched_job_arm(&job->drm);
}
snapshot->batch_addr_len = q->width;
for (i = 0; i < q->width; i++)
- snapshot->batch_addr[i] = xe_device_uncanonicalize_addr(xe, job->batch_addr[i]);
+ snapshot->batch_addr[i] =
+ xe_device_uncanonicalize_addr(xe, job->ptrs[i].batch_addr);
return snapshot;
}