kvfree(sa_manager->cpu_ptr);
sa_manager->bo = NULL;
+ sa_manager->shadow = NULL;
}
/**
* @size: number of bytes to allocate
* @guard: number of bytes to exclude from suballocations
* @align: alignment for each suballocated chunk
+ * @flags: flags for suballocator
*
* Prepares the suballocation manager for suballocations.
*
* Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure.
*/
-struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align)
+struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size,
+ u32 guard, u32 align, u32 flags)
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_sa_manager *sa_manager;
memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
}
+ if (flags & XE_SA_BO_MANAGER_FLAG_SHADOW) {
+ struct xe_bo *shadow;
+
+ ret = drmm_mutex_init(&xe->drm, &sa_manager->swap_guard);
+ if (ret)
+ return ERR_PTR(ret);
+
+ shadow = xe_managed_bo_create_pin_map(xe, tile, size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
+ if (IS_ERR(shadow)) {
+ drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
+ size / SZ_1K, shadow);
+ return ERR_CAST(shadow);
+ }
+ sa_manager->shadow = shadow;
+ }
+
drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
sa_manager);
return sa_manager;
}
+/**
+ * xe_sa_bo_swap_shadow() - Swap the SA BO with shadow BO.
+ * @sa_manager: the XE sub allocator manager
+ *
+ * Swaps the sub-allocator primary buffer object with shadow buffer object.
+ *
+ * Return: None.
+ */
+void xe_sa_bo_swap_shadow(struct xe_sa_manager *sa_manager)
+{
+ struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
+
+ xe_assert(xe, sa_manager->shadow);
+ lockdep_assert_held(&sa_manager->swap_guard);
+
+ swap(sa_manager->bo, sa_manager->shadow);
+ if (!sa_manager->bo->vmap.is_iomem)
+ sa_manager->cpu_ptr = sa_manager->bo->vmap.vaddr;
+}
+
+/**
+ * xe_sa_bo_sync_shadow() - Sync the SA Shadow BO with primary BO.
+ * @sa_bo: the sub-allocator buffer object.
+ *
+ * Synchronize sub-allocator shadow buffer object with primary buffer object.
+ *
+ * Return: None.
+ */
+void xe_sa_bo_sync_shadow(struct drm_suballoc *sa_bo)
+{
+ struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
+ struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
+
+ xe_assert(xe, sa_manager->shadow);
+ lockdep_assert_held(&sa_manager->swap_guard);
+
+ xe_map_memcpy_to(xe, &sa_manager->shadow->vmap,
+ drm_suballoc_soffset(sa_bo),
+ xe_sa_bo_cpu_addr(sa_bo),
+ drm_suballoc_size(sa_bo));
+}
+
/**
* __xe_sa_bo_new() - Make a suballocation but use custom gfp flags.
* @sa_manager: the &xe_sa_manager
struct dma_fence;
struct xe_tile;
-struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align);
+#define XE_SA_BO_MANAGER_FLAG_SHADOW BIT(0)
+struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size,
+ u32 guard, u32 align, u32 flags);
struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp);
static inline struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align)
{
- return __xe_sa_bo_manager_init(tile, size, SZ_4K, align);
+ return __xe_sa_bo_manager_init(tile, size, SZ_4K, align, 0);
}
/**
drm_suballoc_soffset(sa);
}
+void xe_sa_bo_swap_shadow(struct xe_sa_manager *sa_manager);
+void xe_sa_bo_sync_shadow(struct drm_suballoc *sa_bo);
+
+/**
+ * xe_sa_bo_swap_guard() - Retrieve the SA BO swap guard within sub-allocator.
+ * @sa_manager: the &xe_sa_manager
+ *
+ * Return: Sub alloctor swap guard mutex.
+ */
+static inline struct mutex *xe_sa_bo_swap_guard(struct xe_sa_manager *sa_manager)
+{
+ return &sa_manager->swap_guard;
+}
+
#endif
offset = 0;
xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP,
bb_pool_size);
+ xe_map_memset(xe, &sa_manager->shadow->vmap, offset, MI_NOOP,
+ bb_pool_size);
offset = bb_pool_size - sizeof(u32);
xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END);
+ xe_map_wr(xe, &sa_manager->shadow->vmap, offset, u32, MI_BATCH_BUFFER_END);
ctx->mem.ccs_bb_pool = sa_manager;