static int vf_post_migration_fixups(struct xe_gt *gt)
{
+ void *buf = gt->sriov.vf.migration.scratch;
s64 shift;
- void *buf;
int err;
- buf = kmalloc(post_migration_scratch_size(gt_to_xe(gt)), GFP_ATOMIC);
- if (!buf)
- return -ENOMEM;
-
err = xe_gt_sriov_vf_query_config(gt);
if (err)
- goto out;
+ return err;
shift = xe_gt_sriov_vf_ggtt_shift(gt);
if (shift) {
xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
if (err)
- goto out;
+ return err;
}
-out:
- kfree(buf);
- return err;
+ return 0;
}
static void vf_post_migration_kickstart(struct xe_gt *gt)
*/
int xe_gt_sriov_vf_init_early(struct xe_gt *gt)
{
+ void *buf;
+
if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
return 0;
+ buf = drmm_kmalloc(>_to_xe(gt)->drm,
+ post_migration_scratch_size(gt_to_xe(gt)),
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ gt->sriov.vf.migration.scratch = buf;
spin_lock_init(>->sriov.vf.migration.lock);
INIT_WORK(>->sriov.vf.migration.worker, migration_worker_func);
struct work_struct worker;
/** @lock: Protects recovery_queued */
spinlock_t lock;
+ /** @scratch: Scratch memory for VF recovery */
+ void *scratch;
/** @recovery_queued: VF post migration recovery in queued */
bool recovery_queued;
/** @recovery_inprogress: VF post migration recovery in progress */