]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: use multiple entities in amdgpu_move_blit
authorPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Tue, 3 Feb 2026 10:22:12 +0000 (11:22 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 30 Mar 2026 19:16:52 +0000 (15:16 -0400)
Thanks to "drm/ttm: rework pipelined eviction fence handling", ttm
can deal correctly with moves and evictions being executed from
different contexts.

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Acked-by: Felix Kuehling <felix.kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h

index 375c3414b0a6cfeac7874038bc42e7c4825f0f24..afaaab6496def0babfd105ad1751a1b9719c389c 100644 (file)
@@ -387,9 +387,11 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+       struct amdgpu_ttm_buffer_entity *entity;
        struct amdgpu_copy_mem src, dst;
        struct dma_fence *fence = NULL;
        int r;
+       u32 e;
 
        src.bo = bo;
        dst.bo = bo;
@@ -398,8 +400,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        src.offset = 0;
        dst.offset = 0;
 
+       e = atomic_inc_return(&adev->mman.next_move_entity) %
+                             adev->mman.num_move_entities;
+       entity = &adev->mman.move_entities[e];
+
        r = amdgpu_ttm_copy_mem_to_mem(adev,
-                                      &adev->mman.move_entities[0],
+                                      entity,
                                       &src, &dst,
                                       new_mem->size,
                                       amdgpu_bo_encrypted(abo),
@@ -411,9 +417,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        if (old_mem->mem_type == TTM_PL_VRAM &&
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
-
-               r = amdgpu_fill_buffer(&adev->mman.move_entities[0],
-                                      abo, 0, NULL, &wipe_fence,
+               r = amdgpu_fill_buffer(entity, abo, 0, NULL, &wipe_fence,
                                       AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
                if (r) {
                        goto error;
@@ -2392,6 +2396,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
                }
 
                adev->mman.num_move_entities = num_move_entities;
+               atomic_set(&adev->mman.next_move_entity, 0);
                for (i = 0; i < num_move_entities; i++) {
                        r = amdgpu_ttm_buffer_entity_init(
                                &adev->mman.gtt_mgr,
index cf32db3defb16a5fada81b33761bf27f33d61a21..3b19736114461a835359dbff362c44ff6057eac9 100644 (file)
@@ -76,6 +76,7 @@ struct amdgpu_mman {
        atomic_t next_clear_entity;
        u32 num_clear_entities;
        struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES];
+       atomic_t next_move_entity;
        u32 num_move_entities;
 
        struct amdgpu_vram_mgr vram_mgr;