From: Matthew Brost Date: Wed, 8 Oct 2025 21:45:26 +0000 (-0700) Subject: drm/xe/vf: Add debug prints for GuC replaying state during VF recovery X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=1521fad9ad876238161fc25d91b7274a4a0b63d9;p=thirdparty%2Fkernel%2Flinux.git drm/xe/vf: Add debug prints for GuC replaying state during VF recovery Helpful to manually verify the GuC state machine can correctly replay the state during a VF post-migration recovery. All replay paths have been manually verified as triggered and working during testing. Signed-off-by: Matthew Brost Reviewed-by: Tomasz Lis Link: https://lore.kernel.org/r/20251008214532.3442967-29-matthew.brost@intel.com --- diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index b4c09604c5ea3..e9aa0625ce60d 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -2037,21 +2037,27 @@ void xe_guc_submit_stop(struct xe_guc *guc) } -static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q) +static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc, + struct xe_exec_queue *q) { bool pending_enable, pending_disable, pending_resume; pending_enable = exec_queue_pending_enable(q); pending_resume = exec_queue_pending_resume(q); - if (pending_enable && pending_resume) + if (pending_enable && pending_resume) { q->guc->needs_resume = true; + xe_gt_dbg(guc_to_gt(guc), "Replay RESUME - guc_id=%d", + q->guc->id); + } if (pending_enable && !pending_resume && !exec_queue_pending_tdr_exit(q)) { clear_exec_queue_registered(q); if (xe_exec_queue_is_lr(q)) xe_exec_queue_put(q); + xe_gt_dbg(guc_to_gt(guc), "Replay REGISTER - guc_id=%d", + q->guc->id); } if (pending_enable) { @@ -2059,6 +2065,8 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q) clear_exec_queue_pending_resume(q); clear_exec_queue_pending_tdr_exit(q); clear_exec_queue_pending_enable(q); + xe_gt_dbg(guc_to_gt(guc), "Replay ENABLE - guc_id=%d", + q->guc->id); } if (exec_queue_destroyed(q) && exec_queue_registered(q)) { @@ -2068,6 +2076,8 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q) else q->guc->needs_cleanup = true; clear_exec_queue_extra_ref(q); + xe_gt_dbg(guc_to_gt(guc), "Replay CLEANUP - guc_id=%d", + q->guc->id); } pending_disable = exec_queue_pending_disable(q); @@ -2075,6 +2085,8 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q) if (pending_disable && exec_queue_suspended(q)) { clear_exec_queue_suspended(q); q->guc->needs_suspend = true; + xe_gt_dbg(guc_to_gt(guc), "Replay SUSPEND - guc_id=%d", + q->guc->id); } if (pending_disable) { @@ -2082,6 +2094,8 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q) set_exec_queue_enabled(q); clear_exec_queue_pending_disable(q); clear_exec_queue_check_timeout(q); + xe_gt_dbg(guc_to_gt(guc), "Replay DISABLE - guc_id=%d", + q->guc->id); } q->guc->resume_time = 0; @@ -2107,7 +2121,7 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q) else cancel_delayed_work_sync(&sched->base.work_tdr); - guc_exec_queue_revert_pending_state_change(q); + guc_exec_queue_revert_pending_state_change(guc, q); if (xe_exec_queue_is_parallel(q)) { struct xe_device *xe = guc_to_xe(guc); @@ -2222,6 +2236,9 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc, list_for_each_entry(s_job, &sched->base.pending_list, list) { job = to_xe_sched_job(s_job); + xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d", + q->guc->id, xe_sched_job_seqno(job)); + q->ring_ops->emit_job(job); job->skip_emit = true; }