#define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11)
#define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 12)
#define EXEC_QUEUE_STATE_PENDING_TDR_EXIT (1 << 13)
+#define EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND (1 << 14)
static bool exec_queue_registered(struct xe_exec_queue *q)
{
atomic_and(~EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
}
+static bool exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND;
+}
+
+static void set_exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND, &q->guc->state);
+}
+
+static void clear_exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND, &q->guc->state);
+}
+
static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
{
return (atomic_read(&q->guc->state) &
if (!job->restore_replay || job->last_replay) {
if (xe_exec_queue_is_parallel(q))
wq_item_append(q);
- else
+ else if (!exec_queue_idle_skip_suspend(q))
xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
job->last_replay = false;
}
{
struct xe_exec_queue *q = msg->private_data;
struct xe_guc *guc = exec_queue_to_guc(q);
+ bool idle_skip_suspend = xe_exec_queue_idle_skip_suspend(q);
- if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
- exec_queue_enabled(q)) {
+ if (!idle_skip_suspend && guc_exec_queue_allowed_to_change_state(q) &&
+ !exec_queue_suspended(q) && exec_queue_enabled(q)) {
wait_event(guc->ct.wq, vf_recovery(guc) ||
((q->guc->resume_time != RESUME_PENDING ||
xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)));
disable_scheduling(q, false);
}
} else if (q->guc->suspend_pending) {
+ if (idle_skip_suspend)
+ set_exec_queue_idle_skip_suspend(q);
set_exec_queue_suspended(q);
suspend_fence_signal(q);
}
}
+static void sched_context(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_lrc *lrc = q->lrc[0];
+ u32 action[] = {
+ XE_GUC_ACTION_SCHED_CONTEXT,
+ q->guc->id,
+ };
+
+ xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_parallel(q));
+ xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
+
+ trace_xe_exec_queue_submit(q);
+
+ xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+ xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
+}
+
static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
{
struct xe_exec_queue *q = msg->private_data;
if (guc_exec_queue_allowed_to_change_state(q)) {
clear_exec_queue_suspended(q);
if (!exec_queue_enabled(q)) {
+ if (exec_queue_idle_skip_suspend(q)) {
+ struct xe_lrc *lrc = q->lrc[0];
+
+ clear_exec_queue_idle_skip_suspend(q);
+ xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+ }
q->guc->resume_time = RESUME_PENDING;
set_exec_queue_pending_resume(q);
enable_scheduling(q);
+ } else if (exec_queue_idle_skip_suspend(q)) {
+ clear_exec_queue_idle_skip_suspend(q);
+ sched_context(q);
}
} else {
clear_exec_queue_suspended(q);
+ clear_exec_queue_idle_skip_suspend(q);
}
}