From: Satyanarayana K V P Date: Tue, 18 Nov 2025 12:07:44 +0000 (+0000) Subject: drm/xe/sa: Shadow buffer support in the sub-allocator pool X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=1f2cf5295cdba71818288c9e495b4ef5097565ed;p=thirdparty%2Flinux.git drm/xe/sa: Shadow buffer support in the sub-allocator pool The existing sub-allocator is limited to managing a single buffer object. This enhancement introduces shadow buffer functionality to support scenarios requiring dual buffer management. The changes include added shadow buffer object creation capability, Management for both primary and shadow buffers, and appropriate locking mechanisms for thread-safe operations. This enables more flexible buffer allocation strategies in scenarios where shadow buffering is required. Signed-off-by: Satyanarayana K V P Suggested-by: Matthew Brost Cc: Michal Wajdeczko Cc: Matthew Auld Reviewed-by: Matthew Brost Signed-off-by: Matthew Brost Link: https://patch.msgid.link/20251118120745.3460172-2-satyanarayana.k.v.p@intel.com --- diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c index 3ce4425001303..c36fc31e04383 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.c +++ b/drivers/gpu/drm/xe/xe_guc_buf.c @@ -30,7 +30,7 @@ static int guc_buf_cache_init(struct xe_guc_buf_cache *cache, u32 size) struct xe_gt *gt = cache_to_gt(cache); struct xe_sa_manager *sam; - sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32)); + sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32), 0); if (IS_ERR(sam)) return PTR_ERR(sam); cache->sam = sam; diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index 63a5263dcf1b1..a87c1436c7c10 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -29,6 +29,7 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg) kvfree(sa_manager->cpu_ptr); sa_manager->bo = NULL; + sa_manager->shadow = NULL; } /** @@ -37,12 +38,14 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg) * @size: number of bytes to allocate * @guard: number of bytes to exclude from suballocations * @align: alignment for each suballocated chunk + * @flags: flags for suballocator * * Prepares the suballocation manager for suballocations. * * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure. */ -struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align) +struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, + u32 guard, u32 align, u32 flags) { struct xe_device *xe = tile_to_xe(tile); struct xe_sa_manager *sa_manager; @@ -79,6 +82,26 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3 memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size); } + if (flags & XE_SA_BO_MANAGER_FLAG_SHADOW) { + struct xe_bo *shadow; + + ret = drmm_mutex_init(&xe->drm, &sa_manager->swap_guard); + if (ret) + return ERR_PTR(ret); + + shadow = xe_managed_bo_create_pin_map(xe, tile, size, + XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); + if (IS_ERR(shadow)) { + drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n", + size / SZ_1K, shadow); + return ERR_CAST(shadow); + } + sa_manager->shadow = shadow; + } + drm_suballoc_manager_init(&sa_manager->base, managed_size, align); ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini, sa_manager); @@ -88,6 +111,48 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3 return sa_manager; } +/** + * xe_sa_bo_swap_shadow() - Swap the SA BO with shadow BO. + * @sa_manager: the XE sub allocator manager + * + * Swaps the sub-allocator primary buffer object with shadow buffer object. + * + * Return: None. + */ +void xe_sa_bo_swap_shadow(struct xe_sa_manager *sa_manager) +{ + struct xe_device *xe = tile_to_xe(sa_manager->bo->tile); + + xe_assert(xe, sa_manager->shadow); + lockdep_assert_held(&sa_manager->swap_guard); + + swap(sa_manager->bo, sa_manager->shadow); + if (!sa_manager->bo->vmap.is_iomem) + sa_manager->cpu_ptr = sa_manager->bo->vmap.vaddr; +} + +/** + * xe_sa_bo_sync_shadow() - Sync the SA Shadow BO with primary BO. + * @sa_bo: the sub-allocator buffer object. + * + * Synchronize sub-allocator shadow buffer object with primary buffer object. + * + * Return: None. + */ +void xe_sa_bo_sync_shadow(struct drm_suballoc *sa_bo) +{ + struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager); + struct xe_device *xe = tile_to_xe(sa_manager->bo->tile); + + xe_assert(xe, sa_manager->shadow); + lockdep_assert_held(&sa_manager->swap_guard); + + xe_map_memcpy_to(xe, &sa_manager->shadow->vmap, + drm_suballoc_soffset(sa_bo), + xe_sa_bo_cpu_addr(sa_bo), + drm_suballoc_size(sa_bo)); +} + /** * __xe_sa_bo_new() - Make a suballocation but use custom gfp flags. * @sa_manager: the &xe_sa_manager diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h index 1be7443508361..05e9a4e00e789 100644 --- a/drivers/gpu/drm/xe/xe_sa.h +++ b/drivers/gpu/drm/xe/xe_sa.h @@ -14,12 +14,14 @@ struct dma_fence; struct xe_tile; -struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align); +#define XE_SA_BO_MANAGER_FLAG_SHADOW BIT(0) +struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, + u32 guard, u32 align, u32 flags); struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp); static inline struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align) { - return __xe_sa_bo_manager_init(tile, size, SZ_4K, align); + return __xe_sa_bo_manager_init(tile, size, SZ_4K, align, 0); } /** @@ -69,4 +71,18 @@ static inline void *xe_sa_bo_cpu_addr(struct drm_suballoc *sa) drm_suballoc_soffset(sa); } +void xe_sa_bo_swap_shadow(struct xe_sa_manager *sa_manager); +void xe_sa_bo_sync_shadow(struct drm_suballoc *sa_bo); + +/** + * xe_sa_bo_swap_guard() - Retrieve the SA BO swap guard within sub-allocator. + * @sa_manager: the &xe_sa_manager + * + * Return: Sub alloctor swap guard mutex. + */ +static inline struct mutex *xe_sa_bo_swap_guard(struct xe_sa_manager *sa_manager) +{ + return &sa_manager->swap_guard; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_sa_types.h b/drivers/gpu/drm/xe/xe_sa_types.h index cb7238799dcb2..1085c9c37d6b6 100644 --- a/drivers/gpu/drm/xe/xe_sa_types.h +++ b/drivers/gpu/drm/xe/xe_sa_types.h @@ -12,6 +12,9 @@ struct xe_bo; struct xe_sa_manager { struct drm_suballoc_manager base; struct xe_bo *bo; + struct xe_bo *shadow; + /** @swap_guard: Timeline guard updating @bo and @shadow */ + struct mutex swap_guard; void *cpu_ptr; bool is_iomem; }; diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c index 797a4b8662262..9959d619addcf 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c @@ -162,9 +162,12 @@ static int alloc_bb_pool(struct xe_tile *tile, struct xe_sriov_vf_ccs_ctx *ctx) offset = 0; xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP, bb_pool_size); + xe_map_memset(xe, &sa_manager->shadow->vmap, offset, MI_NOOP, + bb_pool_size); offset = bb_pool_size - sizeof(u32); xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END); + xe_map_wr(xe, &sa_manager->shadow->vmap, offset, u32, MI_BATCH_BUFFER_END); ctx->mem.ccs_bb_pool = sa_manager;