]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
Revert "drm/xe/vf: Post migration, repopulate ring area for pending request"
authorMatthew Brost <matthew.brost@intel.com>
Thu, 2 Oct 2025 23:38:23 +0000 (01:38 +0200)
committerMatthew Brost <matthew.brost@intel.com>
Sat, 4 Oct 2025 03:36:24 +0000 (20:36 -0700)
This reverts commit a0dda25d24e636df5c30a9370464b7cebc709faf.

Due to change in the VF migration recovery design this code
is not needed any more.

v3:
 - Add commit message (Michal / Lucas)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20251002233824.203417-3-michal.wajdeczko@intel.com
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue.h
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_guc_submit.h
drivers/gpu/drm/xe/xe_sriov_vf.c

index db3f869b53f39df3b2d7ee61344b8d48946927f4..9a251abe85f99449462ff6e5bfa65f5924af3ff8 100644 (file)
@@ -1146,27 +1146,3 @@ int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
 
        return err;
 }
-
-/**
- * xe_exec_queue_jobs_ring_restore - Re-emit ring commands of requests pending on given queue.
- * @q: the &xe_exec_queue struct instance
- */
-void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q)
-{
-       struct xe_gpu_scheduler *sched = &q->guc->sched;
-       struct xe_sched_job *job;
-
-       /*
-        * This routine is used within VF migration recovery. This means
-        * using the lock here introduces a restriction: we cannot wait
-        * for any GFX HW response while the lock is taken.
-        */
-       spin_lock(&sched->base.job_list_lock);
-       list_for_each_entry(job, &sched->base.pending_list, drm.list) {
-               if (xe_sched_job_is_error(job))
-                       continue;
-
-               q->ring_ops->emit_job(job);
-       }
-       spin_unlock(&sched->base.job_list_lock);
-}
index 15ec852e7f7e78fcfe4fbaa22709601d8287b803..8821ceb838d0b1adc872ad42b138b4a79961459b 100644 (file)
@@ -92,7 +92,6 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
 
 int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
 
-void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q);
-
 struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
+
 #endif
index 3ac0950f55bef5719348d43e41cefccf6eef489a..16f78376f196686f6016bb07075315ee19053242 100644 (file)
@@ -845,30 +845,6 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
        return fence;
 }
 
-/**
- * xe_guc_jobs_ring_rebase - Re-emit ring commands of requests pending
- * on all queues under a guc.
- * @guc: the &xe_guc struct instance
- */
-void xe_guc_jobs_ring_rebase(struct xe_guc *guc)
-{
-       struct xe_exec_queue *q;
-       unsigned long index;
-
-       /*
-        * This routine is used within VF migration recovery. This means
-        * using the lock here introduces a restriction: we cannot wait
-        * for any GFX HW response while the lock is taken.
-        */
-       mutex_lock(&guc->submission_state.lock);
-       xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
-               if (exec_queue_killed_or_banned_or_wedged(q))
-                       continue;
-               xe_exec_queue_jobs_ring_restore(q);
-       }
-       mutex_unlock(&guc->submission_state.lock);
-}
-
 static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
 {
        struct xe_sched_job *job = to_xe_sched_job(drm_job);
index 78c3f07e31a0a0b0dc74c3df853617ad5c19565d..5b4a0a6fd818d4001d3ce1e67e6fb0b619822150 100644 (file)
@@ -36,8 +36,6 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
 int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
 int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len);
 
-void xe_guc_jobs_ring_rebase(struct xe_guc *guc);
-
 struct xe_guc_submit_exec_queue_snapshot *
 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
 void
index cdd9f8e78b2a236c88c541d856715dbd724af5ff..d59e2b55cc9500ee68ab7bdf0030ce15489609dd 100644 (file)
@@ -334,7 +334,6 @@ static int gt_vf_post_migration_fixups(struct xe_gt *gt)
                err = xe_guc_contexts_hwsp_rebase(&gt->uc.guc, buf);
                if (err)
                        goto out;
-               xe_guc_jobs_ring_rebase(&gt->uc.guc);
                xe_guc_ct_fixup_messages_with_ggtt(&gt->uc.guc.ct, shift);
        }