spin_unlock_irq(>->sriov.vf.migration.lock);
xe_guc_ct_flush_and_stop(>->uc.guc.ct);
- xe_guc_submit_pause(>->uc.guc);
+ xe_guc_submit_pause_vf(>->uc.guc);
xe_tlb_inval_reset(>->tlb_inval);
return false;
static void vf_post_migration_rearm(struct xe_gt *gt)
{
xe_guc_ct_restart(>->uc.guc.ct);
- xe_guc_submit_unpause_prepare(>->uc.guc);
+ xe_guc_submit_unpause_prepare_vf(>->uc.guc);
}
static void vf_post_migration_kickstart(struct xe_gt *gt)
{
- xe_guc_submit_unpause(>->uc.guc);
+ xe_guc_submit_unpause_vf(>->uc.guc);
}
static void vf_post_migration_abort(struct xe_gt *gt)
}
/**
- * xe_guc_submit_pause - Stop further runs of submission tasks on given GuC.
+ * xe_guc_submit_pause_vf - Stop further runs of submission tasks for VF.
* @guc: the &xe_guc struct instance whose scheduler is to be disabled
*/
-void xe_guc_submit_pause(struct xe_guc *guc)
+void xe_guc_submit_pause_vf(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
+ xe_gt_assert(guc_to_gt(guc), IS_SRIOV_VF(guc_to_xe(guc)));
xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
mutex_lock(&guc->submission_state.lock);
}
/**
- * xe_guc_submit_unpause_prepare - Prepare unpause submission tasks on given GuC.
+ * xe_guc_submit_unpause_prepare_vf - Prepare unpause submission tasks for VF.
* @guc: the &xe_guc struct instance whose scheduler is to be prepared for unpause
*/
-void xe_guc_submit_unpause_prepare(struct xe_guc *guc)
+void xe_guc_submit_unpause_prepare_vf(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
+ xe_gt_assert(guc_to_gt(guc), IS_SRIOV_VF(guc_to_xe(guc)));
xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
mutex_lock(&guc->submission_state.lock);
}
/**
- * xe_guc_submit_unpause - Allow further runs of submission tasks on given GuC.
+ * xe_guc_submit_unpause_vf - Allow further runs of submission tasks for VF.
* @guc: the &xe_guc struct instance whose scheduler is to be enabled
*/
-void xe_guc_submit_unpause(struct xe_guc *guc)
+void xe_guc_submit_unpause_vf(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
+ xe_gt_assert(guc_to_gt(guc), IS_SRIOV_VF(guc_to_xe(guc)));
+
mutex_lock(&guc->submission_state.lock);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
/*
void xe_guc_submit_reset_wait(struct xe_guc *guc);
void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
-void xe_guc_submit_pause(struct xe_guc *guc);
-void xe_guc_submit_unpause(struct xe_guc *guc);
-void xe_guc_submit_unpause_prepare(struct xe_guc *guc);
void xe_guc_submit_pause_abort(struct xe_guc *guc);
+void xe_guc_submit_pause_vf(struct xe_guc *guc);
+void xe_guc_submit_unpause_vf(struct xe_guc *guc);
+void xe_guc_submit_unpause_prepare_vf(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
int xe_guc_read_stopped(struct xe_guc *guc);