]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/vf: Remove memory allocations from VF post migration recovery
authorMatthew Brost <matthew.brost@intel.com>
Wed, 8 Oct 2025 21:45:09 +0000 (14:45 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Thu, 9 Oct 2025 10:22:29 +0000 (03:22 -0700)
VF post migration recovery is the path of dma-fence signaling / reclaim,
avoid memory allocations in this path.

v3:
 - s/lrc_wa_bb/scratch (Tomasz)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
Link: https://lore.kernel.org/r/20251008214532.3442967-12-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_gt_sriov_vf.c
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h

index 93612ea5029fb15edcb283b6b68be8ac15bd3876..737243fd3d40a16ff63ba56ca645eb163f978b2d 100644 (file)
@@ -1165,17 +1165,13 @@ static size_t post_migration_scratch_size(struct xe_device *xe)
 
 static int vf_post_migration_fixups(struct xe_gt *gt)
 {
+       void *buf = gt->sriov.vf.migration.scratch;
        s64 shift;
-       void *buf;
        int err;
 
-       buf = kmalloc(post_migration_scratch_size(gt_to_xe(gt)), GFP_ATOMIC);
-       if (!buf)
-               return -ENOMEM;
-
        err = xe_gt_sriov_vf_query_config(gt);
        if (err)
-               goto out;
+               return err;
 
        shift = xe_gt_sriov_vf_ggtt_shift(gt);
        if (shift) {
@@ -1183,12 +1179,10 @@ static int vf_post_migration_fixups(struct xe_gt *gt)
                xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
                err = xe_guc_contexts_hwsp_rebase(&gt->uc.guc, buf);
                if (err)
-                       goto out;
+                       return err;
        }
 
-out:
-       kfree(buf);
-       return err;
+       return 0;
 }
 
 static void vf_post_migration_kickstart(struct xe_gt *gt)
@@ -1273,9 +1267,18 @@ static void migration_worker_func(struct work_struct *w)
  */
 int xe_gt_sriov_vf_init_early(struct xe_gt *gt)
 {
+       void *buf;
+
        if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
                return 0;
 
+       buf = drmm_kmalloc(&gt_to_xe(gt)->drm,
+                          post_migration_scratch_size(gt_to_xe(gt)),
+                          GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       gt->sriov.vf.migration.scratch = buf;
        spin_lock_init(&gt->sriov.vf.migration.lock);
        INIT_WORK(&gt->sriov.vf.migration.worker, migration_worker_func);
 
index b2c8e8c89c3016825e535ab8f252d289386e8e96..e753646debc4d1f9a5b25d8941543e794bf0bd82 100644 (file)
@@ -55,6 +55,8 @@ struct xe_gt_sriov_vf_migration {
        struct work_struct worker;
        /** @lock: Protects recovery_queued */
        spinlock_t lock;
+       /** @scratch: Scratch memory for VF recovery */
+       void *scratch;
        /** @recovery_queued: VF post migration recovery in queued */
        bool recovery_queued;
        /** @recovery_inprogress: VF post migration recovery in progress */