for_each_gt(__gt, xe, id)
xe_gt_sanitize(__gt);
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
/*
* Snapshotting the CTB and copying back a potentially old
* version seems risky, depending on what might have been
goto cleanup_all;
}
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err) {
KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
goto cleanup_all;
goto out_unlock_bo;
}
- if (xe_bo_is_user(bo)) {
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
struct xe_migrate *migrate;
struct dma_fence *fence;
goto out_backup;
}
- if (xe_bo_is_user(bo)) {
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
struct xe_migrate *migrate;
struct dma_fence *fence;
return err;
spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.external);
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
spin_unlock(&xe->pinned.lock);
}
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
+ else
+ list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
spin_unlock(&xe->pinned.lock);
}
#define XE_BO_FLAG_NEEDS_2M BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
#define XE_BO_FLAG_PINNED_NORESTORE BIT(18)
-#define XE_BO_FLAG_GGTT0 BIT(19)
-#define XE_BO_FLAG_GGTT1 BIT(20)
-#define XE_BO_FLAG_GGTT2 BIT(21)
-#define XE_BO_FLAG_GGTT3 BIT(22)
+#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19)
+#define XE_BO_FLAG_GGTT0 BIT(20)
+#define XE_BO_FLAG_GGTT1 BIT(21)
+#define XE_BO_FLAG_GGTT2 BIT(22)
+#define XE_BO_FLAG_GGTT3 BIT(23)
#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
XE_BO_FLAG_GGTT1 | \
XE_BO_FLAG_GGTT2 | \
}
}
- ret = xe_bo_apply_to_pinned(xe, &xe->pinned.external,
- &xe->pinned.external,
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
xe_bo_evict_pinned);
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
+
/*
* Wait for all user BO to be evicted as those evictions depend on the
* memory moved below.
if (ret)
return ret;
- return xe_bo_apply_to_pinned(xe, &xe->pinned.kernel_bo_present,
- &xe->pinned.evicted,
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.evicted,
xe_bo_evict_pinned);
}
* We expect validate to trigger a move VRAM and our move code
* should setup the iosys map.
*/
- xe_assert(xe, !iosys_map_is_null(&bo->vmap));
+ xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) ||
+ !iosys_map_is_null(&bo->vmap));
return 0;
}
/**
- * xe_bo_restore_kernel - restore kernel BOs to VRAM
+ * xe_bo_restore_early - restore early phase kernel BOs to VRAM
*
* @xe: xe device
*
* This function should be called early, before trying to init the GT, on device
* resume.
*/
-int xe_bo_restore_kernel(struct xe_device *xe)
+int xe_bo_restore_early(struct xe_device *xe)
{
- return xe_bo_apply_to_pinned(xe, &xe->pinned.evicted,
- &xe->pinned.kernel_bo_present,
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.evicted,
+ &xe->pinned.early.kernel_bo_present,
xe_bo_restore_and_map_ggtt);
}
/**
- * xe_bo_restore_user - restore pinned user BOs to VRAM
+ * xe_bo_restore_late - restore pinned late phase BOs
*
* @xe: xe device
*
- * Move pinned user BOs from temporary (typically system) memory to VRAM via
- * CPU. All moves done via TTM calls.
+ * Move pinned user and kernel BOs which can use blitter from temporary
+ * (typically system) memory to VRAM. All moves done via TTM calls.
*
* This function should be called late, after GT init, on device resume.
*/
-int xe_bo_restore_user(struct xe_device *xe)
+int xe_bo_restore_late(struct xe_device *xe)
{
struct xe_tile *tile;
int ret, id;
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.evicted,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
+
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
+
+ if (ret)
+ return ret;
+
if (!IS_DGFX(xe))
return 0;
/* Pinned user memory in VRAM should be validated on resume */
- ret = xe_bo_apply_to_pinned(xe, &xe->pinned.external,
- &xe->pinned.external,
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
xe_bo_restore_pinned);
/* Wait for restore to complete */
struct xe_tile *tile;
unsigned int id;
- (void)xe_bo_apply_to_pinned(xe, &xe->pinned.external,
- &xe->pinned.external,
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
xe_bo_dma_unmap_pinned);
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
{
struct xe_device *xe = arg;
- (void)xe_bo_apply_to_pinned(xe, &xe->pinned.kernel_bo_present,
- &xe->pinned.kernel_bo_present,
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
xe_bo_dma_unmap_pinned);
}
int xe_bo_pinned_init(struct xe_device *xe)
{
spin_lock_init(&xe->pinned.lock);
- INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
- INIT_LIST_HEAD(&xe->pinned.external);
- INIT_LIST_HEAD(&xe->pinned.evicted);
+ INIT_LIST_HEAD(&xe->pinned.early.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.early.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.late.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.external);
return devm_add_action_or_reset(xe->drm.dev, xe_bo_pinned_fini, xe);
}
struct xe_device;
int xe_bo_evict_all(struct xe_device *xe);
-int xe_bo_restore_kernel(struct xe_device *xe);
-int xe_bo_restore_user(struct xe_device *xe);
+int xe_bo_restore_early(struct xe_device *xe);
+int xe_bo_restore_late(struct xe_device *xe);
void xe_bo_pci_dev_remove_all(struct xe_device *xe);
struct {
/** @pinned.lock: protected pinned BO list state */
spinlock_t lock;
- /** @pinned.kernel_bo_present: pinned kernel BO that are present */
- struct list_head kernel_bo_present;
- /** @pinned.evicted: pinned BO that have been evicted */
- struct list_head evicted;
- /** @pinned.external: pinned external and dma-buf. */
- struct list_head external;
+ /** @pinned.early: early pinned lists */
+ struct {
+ /** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.early.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ } early;
+ /** @pinned.late: late pinned lists */
+ struct {
+ /** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.late.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ /** @pinned.external: pinned external and dma-buf. */
+ struct list_head external;
+ } late;
} pinned;
/** @ufence_wq: user fence wait queue */
void *init_data = NULL;
u32 arb_enable;
u32 lrc_size;
+ u32 bo_flags;
int err;
kref_init(&lrc->refcount);
if (xe_gt_has_indirect_ring_state(gt))
lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE;
+ if (vm && vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
/*
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
* via VM bind calls.
*/
lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo_flags);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
* This only restores pinned memory which is the memory required for the
* GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto err;
xe_display_pm_resume(xe);
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto err;
* This only restores pinned memory which is the memory
* required for the GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto out;
}
xe_display_pm_runtime_resume(xe);
if (xe->d3cold.allowed) {
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto out;
}
{
struct xe_pt *pt;
struct xe_bo *bo;
+ u32 bo_flags;
int err;
if (level) {
if (!pt)
return ERR_PTR(-ENOMEM);
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE | XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
+ if (vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
- XE_BO_FLAG_PINNED |
- XE_BO_FLAG_NO_RESV_EVICT |
- XE_BO_FLAG_PAGETABLE);
+ bo_flags);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;