spin_lock(>->sriov.vf.migration.lock);
- if (!gt->sriov.vf.migration.recovery_queued) {
+ if (!gt->sriov.vf.migration.recovery_queued ||
+ !gt->sriov.vf.migration.recovery_teardown) {
gt->sriov.vf.migration.recovery_queued = true;
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
vf_post_migration_recovery(gt);
}
+static void vf_migration_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ spin_lock_irq(>->sriov.vf.migration.lock);
+ gt->sriov.vf.migration.recovery_teardown = true;
+ spin_unlock_irq(>->sriov.vf.migration.lock);
+
+ cancel_work_sync(>->sriov.vf.migration.worker);
+}
+
/**
* xe_gt_sriov_vf_init_early() - GT VF init early
* @gt: the &xe_gt
return 0;
}
+/**
+ * xe_gt_sriov_vf_init() - GT VF init
+ * @gt: the &xe_gt
+ *
+ * Return 0 on success, errno on failure
+ */
+int xe_gt_sriov_vf_init(struct xe_gt *gt)
+{
+ if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return 0;
+
+ /*
+ * We want to tear down the VF post-migration early during driver
+ * unload; therefore, we add this finalization action later during
+ * driver load.
+ */
+ return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev,
+ vf_migration_fini, gt);
+}
+
/**
* xe_gt_sriov_vf_recovery_pending() - VF post migration recovery pending
* @gt: the &xe_gt
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
int xe_gt_sriov_vf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_vf_init(struct xe_gt *gt);
bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
struct xe_gt_sriov_vf_migration {
/** @migration: VF migration recovery worker */
struct work_struct worker;
- /** @lock: Protects recovery_queued */
+ /** @lock: Protects recovery_queued, teardown */
spinlock_t lock;
/** @scratch: Scratch memory for VF recovery */
void *scratch;
+ /** @recovery_teardown: VF post migration recovery is being torn down */
+ bool recovery_teardown;
/** @recovery_queued: VF post migration recovery in queued */
bool recovery_queued;
/** @recovery_inprogress: VF post migration recovery in progress */