]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: pass the entity to use to amdgpu_ttm_map_buffer
authorPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Mon, 17 Nov 2025 14:42:31 +0000 (15:42 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 8 Dec 2025 19:31:23 +0000 (14:31 -0500)
This way the caller can select the one it wants to use.

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index dcf53a78a28ab233667e91d4319f61830dd325cd..ff7ca20cea4eafd7f722af33c88f0a2fe1f0ac38 100644 (file)
@@ -176,6 +176,7 @@ amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_job *job, u32 nu
 
 /**
  * amdgpu_ttm_map_buffer - Map memory into the GART windows
+ * @entity: entity to run the window setup job
  * @bo: buffer object to map
  * @mem: memory object to map
  * @mm_cur: range to map
@@ -187,7 +188,8 @@ amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_job *job, u32 nu
  * Setup one of the GART windows to access a specific piece of memory or return
  * the physical address for local memory.
  */
-static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+static int amdgpu_ttm_map_buffer(struct amdgpu_ttm_buffer_entity *entity,
+                                struct ttm_buffer_object *bo,
                                 struct ttm_resource *mem,
                                 struct amdgpu_res_cursor *mm_cur,
                                 unsigned int window,
@@ -234,7 +236,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 
-       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.default_entity.base,
+       r = amdgpu_job_alloc_with_ib(adev, &entity->base,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4 + num_bytes,
                                     AMDGPU_IB_POOL_DELAYED, &job,
@@ -274,6 +276,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
 /**
  * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
  * @adev: amdgpu device
+ * @entity: entity to run the jobs
  * @src: buffer/address where to read from
  * @dst: buffer/address where to write to
  * @size: number of bytes to copy
@@ -288,6 +291,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
  */
 __attribute__((nonnull))
 static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+                                     struct amdgpu_ttm_buffer_entity *entity,
                                      const struct amdgpu_copy_mem *src,
                                      const struct amdgpu_copy_mem *dst,
                                      uint64_t size, bool tmz,
@@ -319,12 +323,12 @@ static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
                cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
 
                /* Map src to window 0 and dst to window 1. */
-               r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
+               r = amdgpu_ttm_map_buffer(entity, src->bo, src->mem, &src_mm,
                                          0, tmz, &cur_size, &from);
                if (r)
                        goto error;
 
-               r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
+               r = amdgpu_ttm_map_buffer(entity, dst->bo, dst->mem, &dst_mm,
                                          1, tmz, &cur_size, &to);
                if (r)
                        goto error;
@@ -393,7 +397,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        src.offset = 0;
        dst.offset = 0;
 
-       r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
+       r = amdgpu_ttm_copy_mem_to_mem(adev,
+                                      &adev->mman.move_entity,
+                                      &src, &dst,
                                       new_mem->size,
                                       amdgpu_bo_encrypted(abo),
                                       bo->base.resv, &fence);
@@ -2336,17 +2342,16 @@ error_free_entity:
 }
 
 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
+                                 struct amdgpu_ttm_buffer_entity *entity,
                                  unsigned int num_dw,
                                  struct dma_resv *resv,
                                  bool vm_needs_flush,
                                  struct amdgpu_job **job,
-                                 bool delayed, u64 k_job_id)
+                                 u64 k_job_id)
 {
        enum amdgpu_ib_pool_type pool = AMDGPU_IB_POOL_DELAYED;
        int r;
-       struct drm_sched_entity *entity = delayed ? &adev->mman.clear_entity.base :
-                                                   &adev->mman.default_entity.base;
-       r = amdgpu_job_alloc_with_ib(adev, entity,
+       r = amdgpu_job_alloc_with_ib(adev, &entity->base,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4, pool, job, k_job_id);
        if (r)
@@ -2389,8 +2394,8 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
        max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
-       r = amdgpu_ttm_prepare_job(adev, num_dw,
-                                  resv, vm_needs_flush, &job, false,
+       r = amdgpu_ttm_prepare_job(adev, &adev->mman.move_entity, num_dw,
+                                  resv, vm_needs_flush, &job,
                                   AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
        if (r)
                goto error_free;
@@ -2415,11 +2420,13 @@ error_free:
        return r;
 }
 
-static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, uint32_t src_data,
+static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev,
+                              struct amdgpu_ttm_buffer_entity *entity,
+                              uint32_t src_data,
                               uint64_t dst_addr, uint32_t byte_count,
                               struct dma_resv *resv,
                               struct dma_fence **fence,
-                              bool vm_needs_flush, bool delayed,
+                              bool vm_needs_flush,
                               u64 k_job_id)
 {
        unsigned int num_loops, num_dw;
@@ -2431,8 +2438,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, uint32_t src_data,
        max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
        num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
-       r = amdgpu_ttm_prepare_job(adev, num_dw, resv, vm_needs_flush,
-                                  &job, delayed, k_job_id);
+       r = amdgpu_ttm_prepare_job(adev, entity, num_dw, resv,
+                                  vm_needs_flush, &job, k_job_id);
        if (r)
                return r;
 
@@ -2493,13 +2500,14 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
                /* Never clear more than 256MiB at once to avoid timeouts */
                size = min(cursor.size, 256ULL << 20);
 
-               r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
+               r = amdgpu_ttm_map_buffer(&adev->mman.clear_entity,
+                                         &bo->tbo, bo->tbo.resource, &cursor,
                                          1, false, &size, &addr);
                if (r)
                        goto err;
 
-               r = amdgpu_ttm_fill_mem(adev, 0, addr, size, resv,
-                                       &next, true, true,
+               r = amdgpu_ttm_fill_mem(adev, &adev->mman.clear_entity, 0, addr, size, resv,
+                                       &next, true,
                                        AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
                if (r)
                        goto err;
@@ -2523,10 +2531,14 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        u64 k_job_id)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct amdgpu_ttm_buffer_entity *entity;
        struct dma_fence *fence = NULL;
        struct amdgpu_res_cursor dst;
        int r;
 
+       entity = delayed ? &adev->mman.clear_entity :
+                          &adev->mman.move_entity;
+
        if (!adev->mman.buffer_funcs_enabled) {
                dev_err(adev->dev,
                        "Trying to clear memory with ring turned off.\n");
@@ -2543,13 +2555,14 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                /* Never fill more than 256MiB at once to avoid timeouts */
                cur_size = min(dst.size, 256ULL << 20);
 
-               r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
+               r = amdgpu_ttm_map_buffer(&adev->mman.default_entity,
+                                         &bo->tbo, bo->tbo.resource, &dst,
                                          1, false, &cur_size, &to);
                if (r)
                        goto error;
 
-               r = amdgpu_ttm_fill_mem(adev, src_data, to, cur_size, resv,
-                                       &next, true, delayed, k_job_id);
+               r = amdgpu_ttm_fill_mem(adev, entity, src_data, to, cur_size, resv,
+                                       &next, true, k_job_id);
                if (r)
                        goto error;