]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: introduce amdgpu_sdma_set_vm_pte_scheds
authorPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Tue, 9 Sep 2025 13:39:24 +0000 (15:39 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 23 Feb 2026 19:16:30 +0000 (14:16 -0500)
All sdma versions used the same logic, so add a helper and move the
common code to a single place.

---
v2: pass amdgpu_vm_pte_funcs as well
v3: drop all the *_set_vm_pte_funcs one liners
v5: rebased
---

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
13 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c
drivers/gpu/drm/amd/amdgpu/si_dma.c

index 447e734c362beee445310ff8a8664f4261774060..af4042387f3b1cbcf3b27e3ab28a7130df82f085 100644 (file)
@@ -1536,6 +1536,8 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
+void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
+                                  const struct amdgpu_vm_pte_funcs *vm_pte_funcs);
 
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
index f2beb980e3c3af0841fe412df3740aaea8776f2f..3a5ca6df2fdfa8d9d670f775367b33fd6bada488 100644 (file)
@@ -3210,3 +3210,20 @@ void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
                task_info->process_name, task_info->tgid,
                task_info->task.comm, task_info->task.pid);
 }
+
+void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
+                                  const struct amdgpu_vm_pte_funcs *vm_pte_funcs)
+{
+       struct drm_gpu_scheduler *sched;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (adev->sdma.has_page_queue)
+                       sched = &adev->sdma.instance[i].page.sched;
+               else
+                       sched = &adev->sdma.instance[i].ring.sched;
+               adev->vm_manager.vm_pte_scheds[i] = sched;
+       }
+       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_funcs = vm_pte_funcs;
+}
index 9e8715b4739da2111fdb8fd1ab1c665d4e41006e..22780c09177d88589b79ef01e487d9e672eff436 100644 (file)
@@ -53,7 +53,6 @@ static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
-static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
 static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
 
 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
@@ -919,6 +918,14 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
        }
 }
 
+static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
+       .copy_pte_num_dw = 7,
+       .copy_pte = cik_sdma_vm_copy_pte,
+
+       .write_pte = cik_sdma_vm_write_pte,
+       .set_pte_pde = cik_sdma_vm_set_pte_pde,
+};
+
 static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -933,7 +940,7 @@ static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block)
        cik_sdma_set_ring_funcs(adev);
        cik_sdma_set_irq_funcs(adev);
        cik_sdma_set_buffer_funcs(adev);
-       cik_sdma_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &cik_sdma_vm_pte_funcs);
 
        return 0;
 }
@@ -1337,26 +1344,6 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
        adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
-static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
-       .copy_pte_num_dw = 7,
-       .copy_pte = cik_sdma_vm_copy_pte,
-
-       .write_pte = cik_sdma_vm_write_pte,
-       .set_pte_pde = cik_sdma_vm_set_pte_pde,
-};
-
-static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       unsigned i;
-
-       adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->vm_manager.vm_pte_scheds[i] =
-                       &adev->sdma.instance[i].ring.sched;
-       }
-       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
 const struct amdgpu_ip_block_version cik_sdma_ip_block =
 {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
index 92ce580647cdc241bb91908a045c00430852c145..0090ace49024f43151323416295094d81a19bd76 100644 (file)
@@ -51,7 +51,6 @@
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
 
 MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
@@ -809,6 +808,14 @@ static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, val);
 }
 
+static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
+       .copy_pte_num_dw = 7,
+       .copy_pte = sdma_v2_4_vm_copy_pte,
+
+       .write_pte = sdma_v2_4_vm_write_pte,
+       .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
+};
+
 static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -822,7 +829,7 @@ static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
 
        sdma_v2_4_set_ring_funcs(adev);
        sdma_v2_4_set_buffer_funcs(adev);
-       sdma_v2_4_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v2_4_vm_pte_funcs);
        sdma_v2_4_set_irq_funcs(adev);
 
        return 0;
@@ -1232,26 +1239,6 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
        adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
-static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
-       .copy_pte_num_dw = 7,
-       .copy_pte = sdma_v2_4_vm_copy_pte,
-
-       .write_pte = sdma_v2_4_vm_write_pte,
-       .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
-};
-
-static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       unsigned i;
-
-       adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->vm_manager.vm_pte_scheds[i] =
-                       &adev->sdma.instance[i].ring.sched;
-       }
-       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
 const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
        .major = 2,
index 1c076bd1cf73eb1ee92510359f7fc18ecb958b2f..2526d393162ace11e50668fbe27937a886c1ee0d 100644 (file)
@@ -51,7 +51,6 @@
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
 
 MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
@@ -1082,6 +1081,14 @@ static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, val);
 }
 
+static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
+       .copy_pte_num_dw = 7,
+       .copy_pte = sdma_v3_0_vm_copy_pte,
+
+       .write_pte = sdma_v3_0_vm_write_pte,
+       .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
+};
+
 static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -1102,7 +1109,7 @@ static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block)
 
        sdma_v3_0_set_ring_funcs(adev);
        sdma_v3_0_set_buffer_funcs(adev);
-       sdma_v3_0_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v3_0_vm_pte_funcs);
        sdma_v3_0_set_irq_funcs(adev);
 
        return 0;
@@ -1674,26 +1681,6 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
        adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
-static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
-       .copy_pte_num_dw = 7,
-       .copy_pte = sdma_v3_0_vm_copy_pte,
-
-       .write_pte = sdma_v3_0_vm_write_pte,
-       .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
-};
-
-static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       unsigned i;
-
-       adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->vm_manager.vm_pte_scheds[i] =
-                        &adev->sdma.instance[i].ring.sched;
-       }
-       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
 const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
 {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
index f38004e6064e5c98f8bce183180670c37667d2b5..a35d9951e22a9329bc57a2b5234db88b2083dd26 100644 (file)
@@ -129,7 +129,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_0[] = {
 
 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
 
@@ -1751,6 +1750,14 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
        }
 }
 
+static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
+       .copy_pte_num_dw = 7,
+       .copy_pte = sdma_v4_0_vm_copy_pte,
+
+       .write_pte = sdma_v4_0_vm_write_pte,
+       .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
+};
+
 static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -1769,7 +1776,7 @@ static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block)
 
        sdma_v4_0_set_ring_funcs(adev);
        sdma_v4_0_set_buffer_funcs(adev);
-       sdma_v4_0_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v4_0_vm_pte_funcs);
        sdma_v4_0_set_irq_funcs(adev);
        sdma_v4_0_set_ras_funcs(adev);
 
@@ -2615,30 +2622,6 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
                adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
-static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
-       .copy_pte_num_dw = 7,
-       .copy_pte = sdma_v4_0_vm_copy_pte,
-
-       .write_pte = sdma_v4_0_vm_write_pte,
-       .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
-};
-
-static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       struct drm_gpu_scheduler *sched;
-       unsigned i;
-
-       adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               if (adev->sdma.has_page_queue)
-                       sched = &adev->sdma.instance[i].page.sched;
-               else
-                       sched = &adev->sdma.instance[i].ring.sched;
-               adev->vm_manager.vm_pte_scheds[i] = sched;
-       }
-       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
 static void sdma_v4_0_get_ras_error_count(uint32_t value,
                                        uint32_t instance,
                                        uint32_t *sec_count)
index a1443990d5c60d499dde5f1427e5a7fa09d3d70d..7f77367848d43daf97f11565f300dcee24225f5c 100644 (file)
@@ -104,7 +104,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
 
 static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
 static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
 static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
@@ -1347,6 +1346,14 @@ static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
        .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine,
 };
 
+static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
+       .copy_pte_num_dw = 7,
+       .copy_pte = sdma_v4_4_2_vm_copy_pte,
+
+       .write_pte = sdma_v4_4_2_vm_write_pte,
+       .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
+};
+
 static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -1362,7 +1369,7 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
 
        sdma_v4_4_2_set_ring_funcs(adev);
        sdma_v4_4_2_set_buffer_funcs(adev);
-       sdma_v4_4_2_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v4_4_2_vm_pte_funcs);
        sdma_v4_4_2_set_irq_funcs(adev);
        sdma_v4_4_2_set_ras_funcs(adev);
        return 0;
@@ -2316,30 +2323,6 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
                adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
-static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
-       .copy_pte_num_dw = 7,
-       .copy_pte = sdma_v4_4_2_vm_copy_pte,
-
-       .write_pte = sdma_v4_4_2_vm_write_pte,
-       .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
-};
-
-static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       struct drm_gpu_scheduler *sched;
-       unsigned i;
-
-       adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               if (adev->sdma.has_page_queue)
-                       sched = &adev->sdma.instance[i].page.sched;
-               else
-                       sched = &adev->sdma.instance[i].ring.sched;
-               adev->vm_manager.vm_pte_scheds[i] = sched;
-       }
-       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
 /**
  * sdma_v4_4_2_update_reset_mask - update  reset mask for SDMA
  * @adev: Pointer to the AMDGPU device structure
index e3a035c9feceefc1b0717bd1e1e5701462d64fef..52f4e9e099cbf2f5dd5860768fb6929848377d97 100644 (file)
@@ -110,7 +110,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_0[] = {
 
 static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
 static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
 static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
@@ -1357,6 +1356,13 @@ static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = {
        .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
 };
 
+static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
+       .copy_pte_num_dw = 7,
+       .copy_pte = sdma_v5_0_vm_copy_pte,
+       .write_pte = sdma_v5_0_vm_write_pte,
+       .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
+};
+
 static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -1368,7 +1374,7 @@ static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
 
        sdma_v5_0_set_ring_funcs(adev);
        sdma_v5_0_set_buffer_funcs(adev);
-       sdma_v5_0_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v5_0_vm_pte_funcs);
        sdma_v5_0_set_irq_funcs(adev);
        sdma_v5_0_set_mqd_funcs(adev);
 
@@ -2052,27 +2058,6 @@ static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
        }
 }
 
-static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
-       .copy_pte_num_dw = 7,
-       .copy_pte = sdma_v5_0_vm_copy_pte,
-       .write_pte = sdma_v5_0_vm_write_pte,
-       .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
-};
-
-static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       unsigned i;
-
-       if (adev->vm_manager.vm_pte_funcs == NULL) {
-               adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
-               for (i = 0; i < adev->sdma.num_instances; i++) {
-                       adev->vm_manager.vm_pte_scheds[i] =
-                               &adev->sdma.instance[i].ring.sched;
-               }
-               adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-       }
-}
-
 const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
        .major = 5,
index feebaa8cd9b14881c6e6b0898458ddd255cf489d..0a8269795241d1c7c45789e25a1d242b15fe1744 100644 (file)
@@ -111,7 +111,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_2[] = {
 
 static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
 static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring);
 static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring);
@@ -1248,6 +1247,13 @@ static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
        amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
 }
 
+static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
+       .copy_pte_num_dw = 7,
+       .copy_pte = sdma_v5_2_vm_copy_pte,
+       .write_pte = sdma_v5_2_vm_write_pte,
+       .set_pte_pde = sdma_v5_2_vm_set_pte_pde,
+};
+
 static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -1259,7 +1265,7 @@ static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block)
 
        sdma_v5_2_set_ring_funcs(adev);
        sdma_v5_2_set_buffer_funcs(adev);
-       sdma_v5_2_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v5_2_vm_pte_funcs);
        sdma_v5_2_set_irq_funcs(adev);
        sdma_v5_2_set_mqd_funcs(adev);
 
@@ -2056,27 +2062,6 @@ static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev)
        }
 }
 
-static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
-       .copy_pte_num_dw = 7,
-       .copy_pte = sdma_v5_2_vm_copy_pte,
-       .write_pte = sdma_v5_2_vm_write_pte,
-       .set_pte_pde = sdma_v5_2_vm_set_pte_pde,
-};
-
-static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       unsigned i;
-
-       if (adev->vm_manager.vm_pte_funcs == NULL) {
-               adev->vm_manager.vm_pte_funcs = &sdma_v5_2_vm_pte_funcs;
-               for (i = 0; i < adev->sdma.num_instances; i++) {
-                       adev->vm_manager.vm_pte_scheds[i] =
-                               &adev->sdma.instance[i].ring.sched;
-               }
-               adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-       }
-}
-
 const struct amdgpu_ip_block_version sdma_v5_2_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
        .major = 5,
index b40126f5d3ef5505c43827cd29112ac03d193459..264336432849838d191329346685495ddf284a10 100644 (file)
@@ -120,7 +120,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_6_0[] = {
 
 static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev);
 static int sdma_v6_0_start(struct amdgpu_device *adev);
 
@@ -1280,6 +1279,13 @@ static void sdma_v6_0_get_csa_info(struct amdgpu_device *adev,
        csa_info->alignment = SDMA6_CSA_ALIGNMENT;
 }
 
+static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
+       .copy_pte_num_dw = 7,
+       .copy_pte = sdma_v6_0_vm_copy_pte,
+       .write_pte = sdma_v6_0_vm_write_pte,
+       .set_pte_pde = sdma_v6_0_vm_set_pte_pde,
+};
+
 static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -1308,7 +1314,7 @@ static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
 
        sdma_v6_0_set_ring_funcs(adev);
        sdma_v6_0_set_buffer_funcs(adev);
-       sdma_v6_0_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v6_0_vm_pte_funcs);
        sdma_v6_0_set_irq_funcs(adev);
        sdma_v6_0_set_mqd_funcs(adev);
        sdma_v6_0_set_ras_funcs(adev);
@@ -1893,25 +1899,6 @@ static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev)
        adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
-static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
-       .copy_pte_num_dw = 7,
-       .copy_pte = sdma_v6_0_vm_copy_pte,
-       .write_pte = sdma_v6_0_vm_write_pte,
-       .set_pte_pde = sdma_v6_0_vm_set_pte_pde,
-};
-
-static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       unsigned i;
-
-       adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs;
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->vm_manager.vm_pte_scheds[i] =
-                       &adev->sdma.instance[i].ring.sched;
-       }
-       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
 const struct amdgpu_ip_block_version sdma_v6_0_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
        .major = 6,
index 8d16ef257bcb9dff2f57e21b3a688587c503e7b5..f938be0524cd17c863e7a4fbcc55c85808b4e109 100644 (file)
@@ -119,7 +119,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
 
 static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 static int sdma_v7_0_start(struct amdgpu_device *adev);
 
@@ -1264,6 +1263,13 @@ static void sdma_v7_0_get_csa_info(struct amdgpu_device *adev,
        csa_info->alignment = SDMA7_CSA_ALIGNMENT;
 }
 
+static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
+       .copy_pte_num_dw = 8,
+       .copy_pte = sdma_v7_0_vm_copy_pte,
+       .write_pte = sdma_v7_0_vm_write_pte,
+       .set_pte_pde = sdma_v7_0_vm_set_pte_pde,
+};
+
 static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -1294,7 +1300,7 @@ static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
 
        sdma_v7_0_set_ring_funcs(adev);
        sdma_v7_0_set_buffer_funcs(adev);
-       sdma_v7_0_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v7_0_vm_pte_funcs);
        sdma_v7_0_set_irq_funcs(adev);
        sdma_v7_0_set_mqd_funcs(adev);
        adev->sdma.get_csa_info = &sdma_v7_0_get_csa_info;
@@ -1843,25 +1849,6 @@ static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev)
        adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
-static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
-       .copy_pte_num_dw = 8,
-       .copy_pte = sdma_v7_0_vm_copy_pte,
-       .write_pte = sdma_v7_0_vm_write_pte,
-       .set_pte_pde = sdma_v7_0_vm_set_pte_pde,
-};
-
-static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       unsigned i;
-
-       adev->vm_manager.vm_pte_funcs = &sdma_v7_0_vm_pte_funcs;
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->vm_manager.vm_pte_scheds[i] =
-                       &adev->sdma.instance[i].ring.sched;
-       }
-       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
 const struct amdgpu_ip_block_version sdma_v7_0_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
        .major = 7,
index 0824cba48f2e7904187bc1eeca519a6608ff2056..3de76afe3e454697671c647e56c1cf96afec79cf 100644 (file)
@@ -110,7 +110,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_1[] = {
 
 static void sdma_v7_1_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v7_1_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v7_1_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v7_1_set_irq_funcs(struct amdgpu_device *adev);
 static int sdma_v7_1_inst_start(struct amdgpu_device *adev,
                                uint32_t inst_mask);
@@ -1248,6 +1247,13 @@ static void sdma_v7_1_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
        amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
 }
 
+static const struct amdgpu_vm_pte_funcs sdma_v7_1_vm_pte_funcs = {
+       .copy_pte_num_dw = 8,
+       .copy_pte = sdma_v7_1_vm_copy_pte,
+       .write_pte = sdma_v7_1_vm_write_pte,
+       .set_pte_pde = sdma_v7_1_vm_set_pte_pde,
+};
+
 static int sdma_v7_1_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -1261,7 +1267,7 @@ static int sdma_v7_1_early_init(struct amdgpu_ip_block *ip_block)
 
        sdma_v7_1_set_ring_funcs(adev);
        sdma_v7_1_set_buffer_funcs(adev);
-       sdma_v7_1_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v7_1_vm_pte_funcs);
        sdma_v7_1_set_irq_funcs(adev);
        sdma_v7_1_set_mqd_funcs(adev);
 
@@ -1753,25 +1759,6 @@ static void sdma_v7_1_set_buffer_funcs(struct amdgpu_device *adev)
        adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
-static const struct amdgpu_vm_pte_funcs sdma_v7_1_vm_pte_funcs = {
-       .copy_pte_num_dw = 8,
-       .copy_pte = sdma_v7_1_vm_copy_pte,
-       .write_pte = sdma_v7_1_vm_write_pte,
-       .set_pte_pde = sdma_v7_1_vm_set_pte_pde,
-};
-
-static void sdma_v7_1_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       unsigned i;
-
-       adev->vm_manager.vm_pte_funcs = &sdma_v7_1_vm_pte_funcs;
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->vm_manager.vm_pte_scheds[i] =
-                       &adev->sdma.instance[i].ring.sched;
-       }
-       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
 const struct amdgpu_ip_block_version sdma_v7_1_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
        .major = 7,
index 74fcaa340d9b15088f9a9fec8ff270ea9262ef2e..3e58feb2d5e4f107104e6256b2797e732a1d7a03 100644 (file)
@@ -37,7 +37,6 @@ const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
 
 static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
-static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
 
 /**
@@ -473,6 +472,14 @@ static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, val);
 }
 
+static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+       .copy_pte_num_dw = 5,
+       .copy_pte = si_dma_vm_copy_pte,
+
+       .write_pte = si_dma_vm_write_pte,
+       .set_pte_pde = si_dma_vm_set_pte_pde,
+};
+
 static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -481,7 +488,7 @@ static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
 
        si_dma_set_ring_funcs(adev);
        si_dma_set_buffer_funcs(adev);
-       si_dma_set_vm_pte_funcs(adev);
+       amdgpu_sdma_set_vm_pte_scheds(adev, &si_dma_vm_pte_funcs);
        si_dma_set_irq_funcs(adev);
 
        return 0;
@@ -830,26 +837,6 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
        adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
-static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
-       .copy_pte_num_dw = 5,
-       .copy_pte = si_dma_vm_copy_pte,
-
-       .write_pte = si_dma_vm_write_pte,
-       .set_pte_pde = si_dma_vm_set_pte_pde,
-};
-
-static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
-       unsigned i;
-
-       adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->vm_manager.vm_pte_scheds[i] =
-                       &adev->sdma.instance[i].ring.sched;
-       }
-       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
 const struct amdgpu_ip_block_version si_dma_ip_block =
 {
        .type = AMD_IP_BLOCK_TYPE_SDMA,