]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Skip exec queue schedule toggle if queue is idle during suspend
authorMatthew Brost <matthew.brost@intel.com>
Fri, 12 Dec 2025 18:28:44 +0000 (10:28 -0800)
committerMatthew Brost <matthew.brost@intel.com>
Mon, 15 Dec 2025 21:54:18 +0000 (13:54 -0800)
If an exec queue is idle, there is no need to issue a schedule disable
to the GuC when suspending the queue’s execution. Opportunistically skip
this step if the queue is idle and not a parallel queue. Parallel queues
must have their scheduling state flipped in the GuC due to limitations
in how submission is implemented in run_job().

Also if all pagefault queues can skip the schedule disable during a
switch to dma-fence mode, do not schedule a resume for the pagefault
queues after the next submission.

v2:
 - Don't touch the LRC tail is queue is suspended but enabled in run_job
   (CI)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20251212182847.1683222-5-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_exec_queue.h
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_hw_engine_group.c

index 10abed98fb6b68a8a646d9ca14856ebf73e8e61d..b5ad975d7e97fad5be67a1c330795533d243ba2f 100644 (file)
@@ -162,4 +162,21 @@ int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
 
 struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
 
+/**
+ * xe_exec_queue_idle_skip_suspend() - Can exec queue skip suspend
+ * @q: The exec_queue
+ *
+ * If an exec queue is not parallel and is idle, the suspend steps can be
+ * skipped in the submission backend immediatley signaling the suspend fence.
+ * Parallel queues cannot skip this step due to limitations in the submission
+ * backend.
+ *
+ * Return: True if exec queue is idle and can skip suspend steps, False
+ * otherwise
+ */
+static inline bool xe_exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
+{
+       return !xe_exec_queue_is_parallel(q) && xe_exec_queue_is_idle(q);
+}
+
 #endif
index 18cac5594d6ab6dded29a3efd1e2bf1e86370ccf..43fd2069f9b2e3f5a180769f87240d23064e1f54 100644 (file)
@@ -75,6 +75,7 @@ exec_queue_to_guc(struct xe_exec_queue *q)
 #define EXEC_QUEUE_STATE_EXTRA_REF             (1 << 11)
 #define EXEC_QUEUE_STATE_PENDING_RESUME                (1 << 12)
 #define EXEC_QUEUE_STATE_PENDING_TDR_EXIT      (1 << 13)
+#define EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND     (1 << 14)
 
 static bool exec_queue_registered(struct xe_exec_queue *q)
 {
@@ -266,6 +267,21 @@ static void clear_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
        atomic_and(~EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
 }
 
+static bool exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
+{
+       return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND;
+}
+
+static void set_exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
+{
+       atomic_or(EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND, &q->guc->state);
+}
+
+static void clear_exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
+{
+       atomic_and(~EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND, &q->guc->state);
+}
+
 static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
 {
        return (atomic_read(&q->guc->state) &
@@ -1118,7 +1134,7 @@ static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
        if (!job->restore_replay || job->last_replay) {
                if (xe_exec_queue_is_parallel(q))
                        wq_item_append(q);
-               else
+               else if (!exec_queue_idle_skip_suspend(q))
                        xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
                job->last_replay = false;
        }
@@ -1906,9 +1922,10 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
 {
        struct xe_exec_queue *q = msg->private_data;
        struct xe_guc *guc = exec_queue_to_guc(q);
+       bool idle_skip_suspend = xe_exec_queue_idle_skip_suspend(q);
 
-       if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
-           exec_queue_enabled(q)) {
+       if (!idle_skip_suspend && guc_exec_queue_allowed_to_change_state(q) &&
+           !exec_queue_suspended(q) && exec_queue_enabled(q)) {
                wait_event(guc->ct.wq, vf_recovery(guc) ||
                           ((q->guc->resume_time != RESUME_PENDING ||
                           xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)));
@@ -1927,11 +1944,33 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
                        disable_scheduling(q, false);
                }
        } else if (q->guc->suspend_pending) {
+               if (idle_skip_suspend)
+                       set_exec_queue_idle_skip_suspend(q);
                set_exec_queue_suspended(q);
                suspend_fence_signal(q);
        }
 }
 
+static void sched_context(struct xe_exec_queue *q)
+{
+       struct xe_guc *guc = exec_queue_to_guc(q);
+       struct xe_lrc *lrc = q->lrc[0];
+       u32 action[] = {
+               XE_GUC_ACTION_SCHED_CONTEXT,
+               q->guc->id,
+       };
+
+       xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_parallel(q));
+       xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
+       xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
+       xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
+
+       trace_xe_exec_queue_submit(q);
+
+       xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+       xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
+}
+
 static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
 {
        struct xe_exec_queue *q = msg->private_data;
@@ -1939,12 +1978,22 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
        if (guc_exec_queue_allowed_to_change_state(q)) {
                clear_exec_queue_suspended(q);
                if (!exec_queue_enabled(q)) {
+                       if (exec_queue_idle_skip_suspend(q)) {
+                               struct xe_lrc *lrc = q->lrc[0];
+
+                               clear_exec_queue_idle_skip_suspend(q);
+                               xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+                       }
                        q->guc->resume_time = RESUME_PENDING;
                        set_exec_queue_pending_resume(q);
                        enable_scheduling(q);
+               } else if (exec_queue_idle_skip_suspend(q)) {
+                       clear_exec_queue_idle_skip_suspend(q);
+                       sched_context(q);
                }
        } else {
                clear_exec_queue_suspended(q);
+               clear_exec_queue_idle_skip_suspend(q);
        }
 }
 
index 290205a266b8b708574541b45cf0e26a691d508a..4d9263a1a20807efd81df72347869cc12855c860 100644 (file)
@@ -205,7 +205,7 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
                        continue;
 
                xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
-               need_resume = true;
+               need_resume |= !xe_exec_queue_idle_skip_suspend(q);
                q->ops->suspend(q);
        }