struct drm_sched_job *s_job;
bool restore_replay = false;
- list_for_each_entry(s_job, &sched->base.pending_list, list) {
- struct drm_sched_fence *s_fence = s_job->s_fence;
- struct dma_fence *hw_fence = s_fence->parent;
-
+ drm_sched_for_each_pending_job(s_job, &sched->base, NULL) {
restore_replay |= to_xe_sched_job(s_job)->restore_replay;
- if (restore_replay || (hw_fence && !dma_fence_is_signaled(hw_fence)))
+ if (restore_replay || !drm_sched_job_is_signaled(s_job))
sched->base.ops->run_job(s_job);
}
}
return drm_sched_invalidate_job(&job->drm, threshold);
}
-static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
- struct xe_sched_job *job)
-{
- spin_lock(&sched->base.job_list_lock);
- list_add(&job->drm.list, &sched->base.pending_list);
- spin_unlock(&sched->base.job_list_lock);
-}
-
/**
* xe_sched_first_pending_job() - Find first pending job which is unsignaled
* @sched: Xe GPU scheduler
static inline
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
{
- struct xe_sched_job *job, *r_job = NULL;
-
- spin_lock(&sched->base.job_list_lock);
- list_for_each_entry(job, &sched->base.pending_list, drm.list) {
- struct drm_sched_fence *s_fence = job->drm.s_fence;
- struct dma_fence *hw_fence = s_fence->parent;
+ struct drm_sched_job *job;
- if (hw_fence && !dma_fence_is_signaled(hw_fence)) {
- r_job = job;
- break;
- }
- }
- spin_unlock(&sched->base.job_list_lock);
+ drm_sched_for_each_pending_job(job, &sched->base, NULL)
+ if (!drm_sched_job_is_signaled(job))
+ return to_xe_sched_job(job);
- return r_job;
+ return NULL;
}
static inline int
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_gpu_scheduler *sched = &ge->sched;
- struct xe_sched_job *job;
+ struct drm_sched_job *job;
bool wedged = false;
xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
- xe_hw_fence_irq_stop(q->fence_irq);
+ drm_sched_for_each_pending_job(job, &sched->base, NULL)
+ xe_sched_job_set_error(to_xe_sched_job(job), -ECANCELED);
xe_sched_submission_start(sched);
-
- spin_lock(&sched->base.job_list_lock);
- list_for_each_entry(job, &sched->base.pending_list, drm.list)
- xe_sched_job_set_error(job, -ECANCELED);
- spin_unlock(&sched->base.job_list_lock);
-
- xe_hw_fence_irq_start(q->fence_irq);
}
#define ADJUST_FIVE_PERCENT(__t) mul_u64_u32_div(__t, 105, 100)
guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
- struct xe_sched_job *tmp_job;
+ struct drm_sched_job *tmp_job;
struct xe_exec_queue *q = job->q;
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
int err = -ETIME;
pid_t pid = -1;
- int i = 0;
bool wedged = false, skip_timeout_check;
xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_lr(q));
__deregister_exec_queue(guc, q);
}
- /* Stop fence signaling */
- xe_hw_fence_irq_stop(q->fence_irq);
+ /* Mark all outstanding jobs as bad, thus completing them */
+ xe_sched_job_set_error(job, err);
+ drm_sched_for_each_pending_job(tmp_job, &sched->base, NULL)
+ xe_sched_job_set_error(to_xe_sched_job(tmp_job), -ECANCELED);
- /*
- * Fence state now stable, stop / start scheduler which cleans up any
- * fences that are complete
- */
- xe_sched_add_pending_job(sched, job);
xe_sched_submission_start(sched);
if (xe_exec_queue_is_multi_queue(q))
else
xe_guc_exec_queue_trigger_cleanup(q);
- /* Mark all outstanding jobs as bad, thus completing them */
- spin_lock(&sched->base.job_list_lock);
- list_for_each_entry(tmp_job, &sched->base.pending_list, drm.list)
- xe_sched_job_set_error(tmp_job, !i++ ? err : -ECANCELED);
- spin_unlock(&sched->base.job_list_lock);
-
- /* Start fence signaling */
- xe_hw_fence_irq_start(q->fence_irq);
-
- return DRM_GPU_SCHED_STAT_RESET;
+ /*
+ * We want the job added back to the pending list so it gets freed; this
+ * is what DRM_GPU_SCHED_STAT_NO_HANG does.
+ */
+ return DRM_GPU_SCHED_STAT_NO_HANG;
sched_enable:
set_exec_queue_pending_tdr_exit(q);
struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_sched_job *job = NULL, *__job;
+ struct xe_sched_job *job = NULL;
+ struct drm_sched_job *s_job;
bool restore_replay = false;
- list_for_each_entry(__job, &sched->base.pending_list, drm.list) {
- job = __job;
+ drm_sched_for_each_pending_job(s_job, &sched->base, NULL) {
+ job = to_xe_sched_job(s_job);
restore_replay |= job->restore_replay;
if (restore_replay) {
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
* created after resfix done.
*/
if (q->guc->id != index ||
- !READ_ONCE(q->guc->sched.base.pause_submit))
+ !drm_sched_is_stopped(&q->guc->sched.base))
continue;
guc_exec_queue_unpause(guc, q);
snapshot->multi_queue.primary = xe_exec_queue_multi_queue_primary(q)->guc->id;
snapshot->multi_queue.pos = q->multi_queue.pos;
}
- spin_lock(&sched->base.job_list_lock);
- snapshot->pending_list_size = list_count_nodes(&sched->base.pending_list);
- snapshot->pending_list = kmalloc_array(snapshot->pending_list_size,
- sizeof(struct pending_list_snapshot),
- GFP_ATOMIC);
-
- if (snapshot->pending_list) {
- struct xe_sched_job *job_iter;
-
- i = 0;
- list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
- snapshot->pending_list[i].seqno =
- xe_sched_job_seqno(job_iter);
- snapshot->pending_list[i].fence =
- dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
- snapshot->pending_list[i].finished =
- dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
- ? 1 : 0;
- i++;
- }
- }
-
- spin_unlock(&sched->base.job_list_lock);
return snapshot;
}
drm_printf(p, "\tMulti queue primary GuC ID: %d\n", snapshot->multi_queue.primary);
drm_printf(p, "\tMulti queue position: %d\n", snapshot->multi_queue.pos);
}
-
- for (i = 0; snapshot->pending_list && i < snapshot->pending_list_size;
- i++)
- drm_printf(p, "\tJob: seqno=%d, fence=%d, finished=%d\n",
- snapshot->pending_list[i].seqno,
- snapshot->pending_list[i].fence,
- snapshot->pending_list[i].finished);
}
/**
xe_lrc_snapshot_free(snapshot->lrc[i]);
kfree(snapshot->lrc);
}
- kfree(snapshot->pending_list);
kfree(snapshot);
}