]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: pass the entity to use to ttm public functions
authorPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Mon, 17 Nov 2025 14:53:15 +0000 (15:53 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 8 Dec 2025 19:31:27 +0000 (14:31 -0500)
This way the caller can select the one it wants to use.

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Acked-by: Felix Kuehling <felix.kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

index 3636b757c97491423f3549e5cae50a219a90c94c..a050167e76a4bad93d2b3a803cefa07f4fc81e3c 100644 (file)
@@ -37,7 +37,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 
        stime = ktime_get();
        for (i = 0; i < n; i++) {
-               r = amdgpu_copy_buffer(adev, saddr, daddr, size, NULL, &fence,
+               r = amdgpu_copy_buffer(adev, &adev->mman.default_entity,
+                                      saddr, daddr, size, NULL, &fence,
                                       false, 0);
                if (r)
                        goto exit_do_move;
index fe51087a54a9a11f18bc2c0d932d44ff2b94220d..415d8d88bbb63de37de1d2e3c0762cbf06d35c8b 100644 (file)
@@ -1333,8 +1333,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
        if (r)
                goto out;
 
-       r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
-                              AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
+       r = amdgpu_fill_buffer(&adev->mman.clear_entity, abo, 0, &bo->base._resv,
+                              &fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
        if (WARN_ON(r))
                goto out;
 
index ff7ca20cea4eafd7f722af33c88f0a2fe1f0ac38..aff0da62cc7f9b007753179642387c93a9ffafd9 100644 (file)
@@ -356,7 +356,7 @@ static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
                                                             write_compress_disable));
                }
 
-               r = amdgpu_copy_buffer(adev, from, to, cur_size, resv,
+               r = amdgpu_copy_buffer(adev, entity, from, to, cur_size, resv,
                                       &next, true, copy_flags);
                if (r)
                        goto error;
@@ -411,8 +411,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
 
-               r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
-                                      false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
+               r = amdgpu_fill_buffer(&adev->mman.move_entity,
+                                      abo, 0, NULL, &wipe_fence,
+                                      AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
                if (r) {
                        goto error;
                } else if (wipe_fence) {
@@ -2370,7 +2371,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
                                                   DMA_RESV_USAGE_BOOKKEEP);
 }
 
-int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
+int amdgpu_copy_buffer(struct amdgpu_device *adev,
+                      struct amdgpu_ttm_buffer_entity *entity,
+                      uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence,
@@ -2394,7 +2397,7 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
        max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
-       r = amdgpu_ttm_prepare_job(adev, &adev->mman.move_entity, num_dw,
+       r = amdgpu_ttm_prepare_job(adev, entity, num_dw,
                                   resv, vm_needs_flush, &job,
                                   AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
        if (r)
@@ -2523,22 +2526,18 @@ err:
        return r;
 }
 
-int amdgpu_fill_buffer(struct amdgpu_bo *bo,
-                       uint32_t src_data,
-                       struct dma_resv *resv,
-                       struct dma_fence **f,
-                       bool delayed,
-                       u64 k_job_id)
+int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
+                      struct amdgpu_bo *bo,
+                      uint32_t src_data,
+                      struct dma_resv *resv,
+                      struct dma_fence **f,
+                      u64 k_job_id)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       struct amdgpu_ttm_buffer_entity *entity;
        struct dma_fence *fence = NULL;
        struct amdgpu_res_cursor dst;
        int r;
 
-       entity = delayed ? &adev->mman.clear_entity :
-                          &adev->mman.move_entity;
-
        if (!adev->mman.buffer_funcs_enabled) {
                dev_err(adev->dev,
                        "Trying to clear memory with ring turned off.\n");
@@ -2555,13 +2554,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                /* Never fill more than 256MiB at once to avoid timeouts */
                cur_size = min(dst.size, 256ULL << 20);
 
-               r = amdgpu_ttm_map_buffer(&adev->mman.default_entity,
-                                         &bo->tbo, bo->tbo.resource, &dst,
+               r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &dst,
                                          1, false, &cur_size, &to);
                if (r)
                        goto error;
 
-               r = amdgpu_ttm_fill_mem(adev, entity, src_data, to, cur_size, resv,
+               r = amdgpu_ttm_fill_mem(adev, entity,
+                                       src_data, to, cur_size, resv,
                                        &next, true, k_job_id);
                if (r)
                        goto error;
index 8a4ea9910b6d633178c064352bef881dc46397f6..72488124aa59a712eed9a61aefa7f77e38593916 100644 (file)
@@ -167,7 +167,9 @@ int amdgpu_ttm_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);
 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
                                        bool enable);
-int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
+int amdgpu_copy_buffer(struct amdgpu_device *adev,
+                      struct amdgpu_ttm_buffer_entity *entity,
+                      uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence,
@@ -175,12 +177,12 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
                            struct dma_resv *resv,
                            struct dma_fence **fence);
-int amdgpu_fill_buffer(struct amdgpu_bo *bo,
-                       uint32_t src_data,
-                       struct dma_resv *resv,
-                       struct dma_fence **fence,
-                       bool delayed,
-                       u64 k_job_id);
+int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
+                      struct amdgpu_bo *bo,
+                      uint32_t src_data,
+                      struct dma_resv *resv,
+                      struct dma_fence **f,
+                      u64 k_job_id);
 
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
index f99d8274c488d8132f9d97968465b9e6c06684af..4e0629cbd7f83dc1d7b1c9fb8404b24427a0dc10 100644 (file)
@@ -157,7 +157,8 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
                        goto out_unlock;
                }
 
-               r = amdgpu_copy_buffer(adev, gart_s, gart_d, size * PAGE_SIZE,
+               r = amdgpu_copy_buffer(adev, entity,
+                                      gart_s, gart_d, size * PAGE_SIZE,
                                       NULL, &next, true, 0);
                if (r) {
                        dev_err(adev->dev, "fail %d to copy memory\n", r);