]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe: add XE_BO_FLAG_PINNED_LATE_RESTORE
authorMatthew Auld <matthew.auld@intel.com>
Thu, 3 Apr 2025 10:24:45 +0000 (11:24 +0100)
committerMatthew Auld <matthew.auld@intel.com>
Fri, 4 Apr 2025 10:41:05 +0000 (11:41 +0100)
With the idea of having more pinned objects using the blitter engine
where possible, during suspend/resume, mark the pinned objects which
can be done during the late phase once submission/migration has been
setup. Start out simple with lrc and page-tables from userspace.

v2:
 - s/early_restore/late_restore; early restore was way too bold with too
   many places being impacted at once.
v3:
 - Split late vs early into separate lists, to align with newly added
   apply-to-pinned infra.
v4:
 - Rebase.
v5:
 - Make sure we restore the late phase kernel_bo_present in igpu.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Link: https://lore.kernel.org/r/20250403102440.266113-13-matthew.auld@intel.com
drivers/gpu/drm/xe/tests/xe_bo.c
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_bo.h
drivers/gpu/drm/xe/xe_bo_evict.c
drivers/gpu/drm/xe/xe_bo_evict.h
drivers/gpu/drm/xe/xe_device_types.h
drivers/gpu/drm/xe/xe_lrc.c
drivers/gpu/drm/xe/xe_pm.c
drivers/gpu/drm/xe/xe_pt.c

index 9fde67ca989f9ff1333e1d79314b139d673af5dc..230eb824550ffc82c3dc544fde617989f3317d33 100644 (file)
@@ -252,7 +252,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
 
                for_each_gt(__gt, xe, id)
                        xe_gt_sanitize(__gt);
-               err = xe_bo_restore_kernel(xe);
+               err = xe_bo_restore_early(xe);
                /*
                 * Snapshotting the CTB and copying back a potentially old
                 * version seems risky, depending on what might have been
@@ -273,7 +273,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
                        goto cleanup_all;
                }
 
-               err = xe_bo_restore_user(xe);
+               err = xe_bo_restore_late(xe);
                if (err) {
                        KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
                        goto cleanup_all;
index 6668a1a5eb93555ace5109d65bd664583d8c58ef..2166087fca090217c57b12e22f4068b885917e06 100644 (file)
@@ -1121,7 +1121,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
                goto out_unlock_bo;
        }
 
-       if (xe_bo_is_user(bo)) {
+       if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
                struct xe_migrate *migrate;
                struct dma_fence *fence;
 
@@ -1216,7 +1216,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
                goto out_backup;
        }
 
-       if (xe_bo_is_user(bo)) {
+       if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
                struct xe_migrate *migrate;
                struct dma_fence *fence;
 
@@ -2187,7 +2187,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
                        return err;
 
                spin_lock(&xe->pinned.lock);
-               list_add_tail(&bo->pinned_link, &xe->pinned.external);
+               list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
                spin_unlock(&xe->pinned.lock);
        }
 
@@ -2232,7 +2232,10 @@ int xe_bo_pin(struct xe_bo *bo)
 
        if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
                spin_lock(&xe->pinned.lock);
-               list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+               if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
+                       list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
+               else
+                       list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
                spin_unlock(&xe->pinned.lock);
        }
 
index 3d6e4902dff351700f171d8cb26bdaf0206dfd5d..f7e716f59948624975ab871a2f63c61fcdfcabb3 100644 (file)
 #define XE_BO_FLAG_NEEDS_2M            BIT(16)
 #define XE_BO_FLAG_GGTT_INVALIDATE     BIT(17)
 #define XE_BO_FLAG_PINNED_NORESTORE    BIT(18)
-#define XE_BO_FLAG_GGTT0                BIT(19)
-#define XE_BO_FLAG_GGTT1                BIT(20)
-#define XE_BO_FLAG_GGTT2                BIT(21)
-#define XE_BO_FLAG_GGTT3                BIT(22)
+#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19)
+#define XE_BO_FLAG_GGTT0                BIT(20)
+#define XE_BO_FLAG_GGTT1                BIT(21)
+#define XE_BO_FLAG_GGTT2                BIT(22)
+#define XE_BO_FLAG_GGTT3                BIT(23)
 #define XE_BO_FLAG_GGTT_ALL             (XE_BO_FLAG_GGTT0 | \
                                         XE_BO_FLAG_GGTT1 | \
                                         XE_BO_FLAG_GGTT2 | \
index f83444f7f34d824dd3574208a16c03c6fccbd5be..2bf74eb7f2817d0e280e969cc75a1e9721ae59f3 100644 (file)
@@ -91,10 +91,14 @@ int xe_bo_evict_all(struct xe_device *xe)
                }
        }
 
-       ret = xe_bo_apply_to_pinned(xe, &xe->pinned.external,
-                                   &xe->pinned.external,
+       ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+                                   &xe->pinned.late.external,
                                    xe_bo_evict_pinned);
 
+       if (!ret)
+               ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+                                           &xe->pinned.late.evicted, xe_bo_evict_pinned);
+
        /*
         * Wait for all user BO to be evicted as those evictions depend on the
         * memory moved below.
@@ -105,8 +109,8 @@ int xe_bo_evict_all(struct xe_device *xe)
        if (ret)
                return ret;
 
-       return xe_bo_apply_to_pinned(xe, &xe->pinned.kernel_bo_present,
-                                    &xe->pinned.evicted,
+       return xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+                                    &xe->pinned.early.evicted,
                                     xe_bo_evict_pinned);
 }
 
@@ -137,13 +141,14 @@ static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
         * We expect validate to trigger a move VRAM and our move code
         * should setup the iosys map.
         */
-       xe_assert(xe, !iosys_map_is_null(&bo->vmap));
+       xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) ||
+                 !iosys_map_is_null(&bo->vmap));
 
        return 0;
 }
 
 /**
- * xe_bo_restore_kernel - restore kernel BOs to VRAM
+ * xe_bo_restore_early - restore early phase kernel BOs to VRAM
  *
  * @xe: xe device
  *
@@ -153,34 +158,44 @@ static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
  * This function should be called early, before trying to init the GT, on device
  * resume.
  */
-int xe_bo_restore_kernel(struct xe_device *xe)
+int xe_bo_restore_early(struct xe_device *xe)
 {
-       return xe_bo_apply_to_pinned(xe, &xe->pinned.evicted,
-                                    &xe->pinned.kernel_bo_present,
+       return xe_bo_apply_to_pinned(xe, &xe->pinned.early.evicted,
+                                    &xe->pinned.early.kernel_bo_present,
                                     xe_bo_restore_and_map_ggtt);
 }
 
 /**
- * xe_bo_restore_user - restore pinned user BOs to VRAM
+ * xe_bo_restore_late - restore pinned late phase BOs
  *
  * @xe: xe device
  *
- * Move pinned user BOs from temporary (typically system) memory to VRAM via
- * CPU. All moves done via TTM calls.
+ * Move pinned user and kernel BOs which can use blitter from temporary
+ * (typically system) memory to VRAM. All moves done via TTM calls.
  *
  * This function should be called late, after GT init, on device resume.
  */
-int xe_bo_restore_user(struct xe_device *xe)
+int xe_bo_restore_late(struct xe_device *xe)
 {
        struct xe_tile *tile;
        int ret, id;
 
+       ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.evicted,
+                                   &xe->pinned.late.kernel_bo_present,
+                                   xe_bo_restore_and_map_ggtt);
+
+       for_each_tile(tile, xe, id)
+               xe_tile_migrate_wait(tile);
+
+       if (ret)
+               return ret;
+
        if (!IS_DGFX(xe))
                return 0;
 
        /* Pinned user memory in VRAM should be validated on resume */
-       ret = xe_bo_apply_to_pinned(xe, &xe->pinned.external,
-                                   &xe->pinned.external,
+       ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+                                   &xe->pinned.late.external,
                                    xe_bo_restore_pinned);
 
        /* Wait for restore to complete */
@@ -195,8 +210,8 @@ static void xe_bo_pci_dev_remove_pinned(struct xe_device *xe)
        struct xe_tile *tile;
        unsigned int id;
 
-       (void)xe_bo_apply_to_pinned(xe, &xe->pinned.external,
-                                   &xe->pinned.external,
+       (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+                                   &xe->pinned.late.external,
                                    xe_bo_dma_unmap_pinned);
        for_each_tile(tile, xe, id)
                xe_tile_migrate_wait(tile);
@@ -241,8 +256,11 @@ static void xe_bo_pinned_fini(void *arg)
 {
        struct xe_device *xe = arg;
 
-       (void)xe_bo_apply_to_pinned(xe, &xe->pinned.kernel_bo_present,
-                                   &xe->pinned.kernel_bo_present,
+       (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+                                   &xe->pinned.late.kernel_bo_present,
+                                   xe_bo_dma_unmap_pinned);
+       (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+                                   &xe->pinned.early.kernel_bo_present,
                                    xe_bo_dma_unmap_pinned);
 }
 
@@ -259,9 +277,11 @@ static void xe_bo_pinned_fini(void *arg)
 int xe_bo_pinned_init(struct xe_device *xe)
 {
        spin_lock_init(&xe->pinned.lock);
-       INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
-       INIT_LIST_HEAD(&xe->pinned.external);
-       INIT_LIST_HEAD(&xe->pinned.evicted);
+       INIT_LIST_HEAD(&xe->pinned.early.kernel_bo_present);
+       INIT_LIST_HEAD(&xe->pinned.early.evicted);
+       INIT_LIST_HEAD(&xe->pinned.late.kernel_bo_present);
+       INIT_LIST_HEAD(&xe->pinned.late.evicted);
+       INIT_LIST_HEAD(&xe->pinned.late.external);
 
        return devm_add_action_or_reset(xe->drm.dev, xe_bo_pinned_fini, xe);
 }
index 0708d50ddfa8c6c3844434c09aa96ecfba7d8173..d63eb3fc5cc914ca75d19cd719fa028d2d6e8ea2 100644 (file)
@@ -9,8 +9,8 @@
 struct xe_device;
 
 int xe_bo_evict_all(struct xe_device *xe);
-int xe_bo_restore_kernel(struct xe_device *xe);
-int xe_bo_restore_user(struct xe_device *xe);
+int xe_bo_restore_early(struct xe_device *xe);
+int xe_bo_restore_late(struct xe_device *xe);
 
 void xe_bo_pci_dev_remove_all(struct xe_device *xe);
 
index 0cc14daf75d3760e8533a38092666bf9ad6f9fb4..a68cc47ce014df4dc53de1d3789ded2d66fc5102 100644 (file)
@@ -422,12 +422,22 @@ struct xe_device {
        struct {
                /** @pinned.lock: protected pinned BO list state */
                spinlock_t lock;
-               /** @pinned.kernel_bo_present: pinned kernel BO that are present */
-               struct list_head kernel_bo_present;
-               /** @pinned.evicted: pinned BO that have been evicted */
-               struct list_head evicted;
-               /** @pinned.external: pinned external and dma-buf. */
-               struct list_head external;
+               /** @pinned.early: early pinned lists */
+               struct {
+                       /** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
+                       struct list_head kernel_bo_present;
+                       /** @pinned.early.evicted: pinned BO that have been evicted */
+                       struct list_head evicted;
+               } early;
+               /** @pinned.late: late pinned lists */
+               struct {
+                       /** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
+                       struct list_head kernel_bo_present;
+                       /** @pinned.late.evicted: pinned BO that have been evicted */
+                       struct list_head evicted;
+                       /** @pinned.external: pinned external and dma-buf. */
+                       struct list_head external;
+               } late;
        } pinned;
 
        /** @ufence_wq: user fence wait queue */
index 2639a3dfc9f7b3f420acbaf2c54c40efeb7bf354..855c8acaf3f1e74d2c2fcbab75710384e9c344df 100644 (file)
@@ -896,6 +896,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
        void *init_data = NULL;
        u32 arb_enable;
        u32 lrc_size;
+       u32 bo_flags;
        int err;
 
        kref_init(&lrc->refcount);
@@ -904,15 +905,18 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
        if (xe_gt_has_indirect_ring_state(gt))
                lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
 
+       bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
+                  XE_BO_FLAG_GGTT_INVALIDATE;
+       if (vm && vm->xef) /* userspace */
+               bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
        /*
         * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
         * via VM bind calls.
         */
        lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size,
                                       ttm_bo_type_kernel,
-                                      XE_BO_FLAG_VRAM_IF_DGFX(tile) |
-                                      XE_BO_FLAG_GGTT |
-                                      XE_BO_FLAG_GGTT_INVALIDATE);
+                                      bo_flags);
        if (IS_ERR(lrc->bo))
                return PTR_ERR(lrc->bo);
 
index 7b6b754ad6eb787429d6b80f4e4182028dcd631d..4e112fbacada45ec588656aa59a0b8cc35827bcd 100644 (file)
@@ -188,7 +188,7 @@ int xe_pm_resume(struct xe_device *xe)
         * This only restores pinned memory which is the memory required for the
         * GT(s) to resume.
         */
-       err = xe_bo_restore_kernel(xe);
+       err = xe_bo_restore_early(xe);
        if (err)
                goto err;
 
@@ -199,7 +199,7 @@ int xe_pm_resume(struct xe_device *xe)
 
        xe_display_pm_resume(xe);
 
-       err = xe_bo_restore_user(xe);
+       err = xe_bo_restore_late(xe);
        if (err)
                goto err;
 
@@ -484,7 +484,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
                 * This only restores pinned memory which is the memory
                 * required for the GT(s) to resume.
                 */
-               err = xe_bo_restore_kernel(xe);
+               err = xe_bo_restore_early(xe);
                if (err)
                        goto out;
        }
@@ -497,7 +497,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
        xe_display_pm_runtime_resume(xe);
 
        if (xe->d3cold.allowed) {
-               err = xe_bo_restore_user(xe);
+               err = xe_bo_restore_late(xe);
                if (err)
                        goto out;
        }
index 33839b25d7086625b22ef14aa6714c91be3633fb..49f2908f30d31d8db98655c5066d55cd765ac154 100644 (file)
@@ -103,6 +103,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
 {
        struct xe_pt *pt;
        struct xe_bo *bo;
+       u32 bo_flags;
        int err;
 
        if (level) {
@@ -115,14 +116,16 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
        if (!pt)
                return ERR_PTR(-ENOMEM);
 
+       bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                  XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE | XE_BO_FLAG_PINNED |
+                  XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
+       if (vm->xef) /* userspace */
+               bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
        pt->level = level;
        bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
                                  ttm_bo_type_kernel,
-                                 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
-                                 XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
-                                 XE_BO_FLAG_PINNED |
-                                 XE_BO_FLAG_NO_RESV_EVICT |
-                                 XE_BO_FLAG_PAGETABLE);
+                                 bo_flags);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                goto err_kfree;