#define EXEC_QUEUE_STATE_KILLED (1 << 7)
#define EXEC_QUEUE_STATE_WEDGED (1 << 8)
#define EXEC_QUEUE_STATE_BANNED (1 << 9)
-#define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10)
-#define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 11)
-#define EXEC_QUEUE_STATE_PENDING_TDR_EXIT (1 << 12)
-#define EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND (1 << 13)
+#define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 10)
+#define EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND (1 << 11)
static bool exec_queue_registered(struct xe_exec_queue *q)
{
atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state);
}
-static bool exec_queue_check_timeout(struct xe_exec_queue *q)
-{
- return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT;
-}
-
-static void set_exec_queue_check_timeout(struct xe_exec_queue *q)
-{
- atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
-}
-
-static void clear_exec_queue_check_timeout(struct xe_exec_queue *q)
-{
- atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
-}
-
static bool exec_queue_pending_resume(struct xe_exec_queue *q)
{
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME;
atomic_and(~EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
}
-static bool exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
-{
- return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_TDR_EXIT;
-}
-
-static void set_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
-{
- atomic_or(EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
-}
-
-static void clear_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
-{
- atomic_and(~EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
-}
-
static bool exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
{
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND;
WRITE_ONCE(group->banned, true);
set_exec_queue_reset(primary);
- if (!exec_queue_banned(primary) && !exec_queue_check_timeout(primary))
+ if (!exec_queue_banned(primary))
xe_guc_exec_queue_trigger_cleanup(primary);
mutex_lock(&group->list_lock);
list_for_each_entry(eq, &group->list, multi_queue.link) {
set_exec_queue_reset(eq);
- if (!exec_queue_banned(eq) && !exec_queue_check_timeout(eq))
+ if (!exec_queue_banned(eq))
xe_guc_exec_queue_trigger_cleanup(eq);
}
mutex_unlock(&group->list_lock);
} else {
set_exec_queue_reset(q);
- if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
+ if (!exec_queue_banned(q))
xe_guc_exec_queue_trigger_cleanup(q);
}
}
return xe_sched_invalidate_job(job, 2);
}
- ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0]));
+ ctx_timestamp = lower_32_bits(xe_lrc_timestamp(q->lrc[0]));
+ if (ctx_timestamp == job->sample_timestamp) {
+ xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, timestamp stuck",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id);
+
+ return xe_sched_invalidate_job(job, 0);
+ }
+
+ job->sample_timestamp = ctx_timestamp;
ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
/*
}
/*
- * XXX: Sampling timeout doesn't work in wedged mode as we have to
- * modify scheduling state to read timestamp. We could read the
- * timestamp from a register to accumulate current running time but this
- * doesn't work for SRIOV. For now assuming timeouts in wedged mode are
- * genuine timeouts.
+ * Check if job is actually timed out, if so restart job execution and TDR
*/
+ if (!skip_timeout_check && !check_timeout(q, job))
+ goto rearm;
+
if (!exec_queue_killed(q))
wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
- /* Engine state now stable, disable scheduling to check timestamp */
+ set_exec_queue_banned(q);
+
+ /* Kick job / queue off hardware */
if (!wedged && (exec_queue_enabled(q) || exec_queue_pending_disable(q))) {
int ret;
if (!ret || xe_guc_read_stopped(guc))
goto trigger_reset;
- /*
- * Flag communicates to G2H handler that schedule
- * disable originated from a timeout check. The G2H then
- * avoid triggering cleanup or deregistering the exec
- * queue.
- */
- set_exec_queue_check_timeout(q);
disable_scheduling(q, skip_timeout_check);
}
xe_devcoredump(q, job,
"Schedule disable failed to respond, guc_id=%d, ret=%d, guc_read=%d",
q->guc->id, ret, xe_guc_read_stopped(guc));
- set_exec_queue_banned(q);
xe_gt_reset_async(q->gt);
xe_sched_tdr_queue_imm(sched);
goto rearm;
}
}
- /*
- * Check if job is actually timed out, if so restart job execution and TDR
- */
- if (!wedged && !skip_timeout_check && !check_timeout(q, job) &&
- !exec_queue_reset(q) && exec_queue_registered(q)) {
- clear_exec_queue_check_timeout(q);
- goto sched_enable;
- }
-
if (q->vm && q->vm->xef) {
process_name = q->vm->xef->process_name;
pid = q->vm->xef->pid;
if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) {
if (!xe_sched_invalidate_job(job, 2)) {
- clear_exec_queue_check_timeout(q);
xe_gt_reset_async(q->gt);
goto rearm;
}
}
- set_exec_queue_banned(q);
-
/* Mark all outstanding jobs as bad, thus completing them */
xe_sched_job_set_error(job, err);
drm_sched_for_each_pending_job(tmp_job, &sched->base, NULL)
*/
return DRM_GPU_SCHED_STAT_NO_HANG;
-sched_enable:
- set_exec_queue_pending_tdr_exit(q);
- enable_scheduling(q);
rearm:
/*
* XXX: Ideally want to adjust timeout based on current execution time
q->guc->id);
}
- if (pending_enable && !pending_resume &&
- !exec_queue_pending_tdr_exit(q)) {
+ if (pending_enable && !pending_resume) {
clear_exec_queue_registered(q);
xe_gt_dbg(guc_to_gt(guc), "Replay REGISTER - guc_id=%d",
q->guc->id);
if (pending_enable) {
clear_exec_queue_enabled(q);
clear_exec_queue_pending_resume(q);
- clear_exec_queue_pending_tdr_exit(q);
clear_exec_queue_pending_enable(q);
xe_gt_dbg(guc_to_gt(guc), "Replay ENABLE - guc_id=%d",
q->guc->id);
if (!pending_enable)
set_exec_queue_enabled(q);
clear_exec_queue_pending_disable(q);
- clear_exec_queue_check_timeout(q);
xe_gt_dbg(guc_to_gt(guc), "Replay DISABLE - guc_id=%d",
q->guc->id);
}
q->guc->resume_time = ktime_get();
clear_exec_queue_pending_resume(q);
- clear_exec_queue_pending_tdr_exit(q);
clear_exec_queue_pending_enable(q);
smp_wmb();
wake_up_all(&guc->ct.wq);
} else {
- bool check_timeout = exec_queue_check_timeout(q);
-
xe_gt_assert(guc_to_gt(guc), runnable_state == 0);
xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
suspend_fence_signal(q);
clear_exec_queue_pending_disable(q);
} else {
- if (exec_queue_banned(q) || check_timeout) {
+ if (exec_queue_banned(q)) {
smp_wmb();
wake_up_all(&guc->ct.wq);
}
- if (!check_timeout && exec_queue_destroyed(q)) {
+ if (exec_queue_destroyed(q)) {
/*
* Make sure to clear the pending_disable only
* after sampling the destroyed state. We want
*
* Returns: ctx timestamp value
*/
-u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
+static u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
{
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map map;
}
/**
- * xe_lrc_update_timestamp() - Update ctx timestamp
+ * xe_lrc_timestamp() - Current ctx timestamp
* @lrc: Pointer to the lrc.
- * @old_ts: Old timestamp value
*
- * Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
- * update saved value. With support for active contexts, the calculation may be
- * slightly racy, so follow a read-again logic to ensure that the context is
- * still active before returning the right timestamp.
+ * Return latest ctx timestamp. With support for active contexts, the
+ * calculation may bb slightly racy, so follow a read-again logic to ensure that
+ * the context is still active before returning the right timestamp.
*
* Returns: New ctx timestamp value
*/
-u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts)
+u64 xe_lrc_timestamp(struct xe_lrc *lrc)
{
- u64 lrc_ts, reg_ts;
+ u64 lrc_ts, reg_ts, new_ts;
u32 engine_id;
- *old_ts = lrc->ctx_timestamp;
-
lrc_ts = xe_lrc_ctx_timestamp(lrc);
/* CTX_TIMESTAMP mmio read is invalid on VF, so return the LRC value */
if (IS_SRIOV_VF(lrc_to_xe(lrc))) {
- lrc->ctx_timestamp = lrc_ts;
+ new_ts = lrc_ts;
goto done;
}
if (lrc_ts == CONTEXT_ACTIVE) {
engine_id = xe_lrc_engine_id(lrc);
if (!get_ctx_timestamp(lrc, engine_id, ®_ts))
- lrc->ctx_timestamp = reg_ts;
+ new_ts = reg_ts;
/* read lrc again to ensure context is still active */
lrc_ts = xe_lrc_ctx_timestamp(lrc);
* be a separate if condition.
*/
if (lrc_ts != CONTEXT_ACTIVE)
- lrc->ctx_timestamp = lrc_ts;
+ new_ts = lrc_ts;
done:
+ return new_ts;
+}
+
+/**
+ * xe_lrc_update_timestamp() - Update ctx timestamp
+ * @lrc: Pointer to the lrc.
+ * @old_ts: Old timestamp value
+ *
+ * Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
+ * update saved value.
+ *
+ * Returns: New ctx timestamp value
+ */
+u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts)
+{
+ *old_ts = lrc->ctx_timestamp;
+ lrc->ctx_timestamp = xe_lrc_timestamp(lrc);
+
trace_xe_lrc_update_timestamp(lrc, *old_ts);
return lrc->ctx_timestamp;
return 0;
}
-static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
+static int emit_copy_timestamp(struct xe_device *xe, struct xe_lrc *lrc,
+ u32 *dw, int i)
{
dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
dw[i++] = RING_CTX_TIMESTAMP(0).addr;
dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
+ /*
+ * Ensure CTX timestamp >= Job timestamp during VF sampling to avoid
+ * arithmetic wraparound in TDR.
+ */
+ if (IS_SRIOV_VF(xe)) {
+ dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT |
+ MI_SRM_ADD_CS_OFFSET;
+ dw[i++] = RING_CTX_TIMESTAMP(0).addr;
+ dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
+ dw[i++] = 0;
+ }
+
return i;
}
*head = lrc->ring.tail;
- i = emit_copy_timestamp(lrc, dw, i);
+ i = emit_copy_timestamp(gt_to_xe(gt), lrc, dw, i);
if (job->ring_ops_flush_tlb) {
dw[i++] = preparser_disable(true);
*head = lrc->ring.tail;
- i = emit_copy_timestamp(lrc, dw, i);
+ i = emit_copy_timestamp(xe, lrc, dw, i);
dw[i++] = preparser_disable(true);
*head = lrc->ring.tail;
- i = emit_copy_timestamp(lrc, dw, i);
+ i = emit_copy_timestamp(xe, lrc, dw, i);
dw[i++] = preparser_disable(true);
if (lacks_render)
struct xe_lrc *lrc, u32 *head,
u32 seqno)
{
+ struct xe_gt *gt = job->q->gt;
+ struct xe_device *xe = gt_to_xe(gt);
u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
u32 dw[MAX_JOB_SIZE_DW], i = 0;
*head = lrc->ring.tail;
- i = emit_copy_timestamp(lrc, dw, i);
+ i = emit_copy_timestamp(xe, lrc, dw, i);
i = emit_store_imm_ggtt(saddr, seqno, dw, i);