bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
+void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
+ const struct amdgpu_vm_pte_funcs *vm_pte_funcs);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
task_info->process_name, task_info->tgid,
task_info->task.comm, task_info->task.pid);
}
+
+void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
+ const struct amdgpu_vm_pte_funcs *vm_pte_funcs)
+{
+ struct drm_gpu_scheduler *sched;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue)
+ sched = &adev->sdma.instance[i].page.sched;
+ else
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_scheds[i] = sched;
+ }
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = vm_pte_funcs;
+}
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
-static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
}
}
+static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = cik_sdma_vm_copy_pte,
+
+ .write_pte = cik_sdma_vm_write_pte,
+ .set_pte_pde = cik_sdma_vm_set_pte_pde,
+};
+
static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
cik_sdma_set_ring_funcs(adev);
cik_sdma_set_irq_funcs(adev);
cik_sdma_set_buffer_funcs(adev);
- cik_sdma_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &cik_sdma_vm_pte_funcs);
return 0;
}
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
-static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
- .copy_pte = cik_sdma_vm_copy_pte,
-
- .write_pte = cik_sdma_vm_write_pte,
- .set_pte_pde = cik_sdma_vm_set_pte_pde,
-};
-
-static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- unsigned i;
-
- adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->vm_manager.vm_pte_scheds[i] =
- &adev->sdma.instance[i].ring.sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
const struct amdgpu_ip_block_version cik_sdma_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SDMA,
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
amdgpu_ring_write(ring, val);
}
+static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v2_4_vm_copy_pte,
+
+ .write_pte = sdma_v2_4_vm_write_pte,
+ .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
+};
+
static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v2_4_set_ring_funcs(adev);
sdma_v2_4_set_buffer_funcs(adev);
- sdma_v2_4_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v2_4_vm_pte_funcs);
sdma_v2_4_set_irq_funcs(adev);
return 0;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
-static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
- .copy_pte = sdma_v2_4_vm_copy_pte,
-
- .write_pte = sdma_v2_4_vm_write_pte,
- .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
-};
-
-static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- unsigned i;
-
- adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->vm_manager.vm_pte_scheds[i] =
- &adev->sdma.instance[i].ring.sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 2,
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
amdgpu_ring_write(ring, val);
}
+static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v3_0_vm_copy_pte,
+
+ .write_pte = sdma_v3_0_vm_write_pte,
+ .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
+};
+
static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v3_0_set_ring_funcs(adev);
sdma_v3_0_set_buffer_funcs(adev);
- sdma_v3_0_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v3_0_vm_pte_funcs);
sdma_v3_0_set_irq_funcs(adev);
return 0;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
-static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
- .copy_pte = sdma_v3_0_vm_copy_pte,
-
- .write_pte = sdma_v3_0_vm_write_pte,
- .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
-};
-
-static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- unsigned i;
-
- adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->vm_manager.vm_pte_scheds[i] =
- &adev->sdma.instance[i].ring.sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SDMA,
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
}
}
+static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v4_0_vm_copy_pte,
+
+ .write_pte = sdma_v4_0_vm_write_pte,
+ .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
+};
+
static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v4_0_set_ring_funcs(adev);
sdma_v4_0_set_buffer_funcs(adev);
- sdma_v4_0_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v4_0_vm_pte_funcs);
sdma_v4_0_set_irq_funcs(adev);
sdma_v4_0_set_ras_funcs(adev);
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
-static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
- .copy_pte = sdma_v4_0_vm_copy_pte,
-
- .write_pte = sdma_v4_0_vm_write_pte,
- .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
-};
-
-static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- struct drm_gpu_scheduler *sched;
- unsigned i;
-
- adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.has_page_queue)
- sched = &adev->sdma.instance[i].page.sched;
- else
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_scheds[i] = sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
static void sdma_v4_0_get_ras_error_count(uint32_t value,
uint32_t instance,
uint32_t *sec_count)
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
.soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine,
};
+static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v4_4_2_vm_copy_pte,
+
+ .write_pte = sdma_v4_4_2_vm_write_pte,
+ .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
+};
+
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v4_4_2_set_ring_funcs(adev);
sdma_v4_4_2_set_buffer_funcs(adev);
- sdma_v4_4_2_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v4_4_2_vm_pte_funcs);
sdma_v4_4_2_set_irq_funcs(adev);
sdma_v4_4_2_set_ras_funcs(adev);
return 0;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
-static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
- .copy_pte = sdma_v4_4_2_vm_copy_pte,
-
- .write_pte = sdma_v4_4_2_vm_write_pte,
- .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
-};
-
-static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- struct drm_gpu_scheduler *sched;
- unsigned i;
-
- adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.has_page_queue)
- sched = &adev->sdma.instance[i].page.sched;
- else
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_scheds[i] = sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
/**
* sdma_v4_4_2_update_reset_mask - update reset mask for SDMA
* @adev: Pointer to the AMDGPU device structure
static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
.soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
};
+static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v5_0_vm_copy_pte,
+ .write_pte = sdma_v5_0_vm_write_pte,
+ .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
+};
+
static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v5_0_set_ring_funcs(adev);
sdma_v5_0_set_buffer_funcs(adev);
- sdma_v5_0_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v5_0_vm_pte_funcs);
sdma_v5_0_set_irq_funcs(adev);
sdma_v5_0_set_mqd_funcs(adev);
}
}
-static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
- .copy_pte = sdma_v5_0_vm_copy_pte,
- .write_pte = sdma_v5_0_vm_write_pte,
- .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
-};
-
-static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- unsigned i;
-
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->vm_manager.vm_pte_scheds[i] =
- &adev->sdma.instance[i].ring.sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
- }
-}
-
const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 5,
static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring);
static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring);
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v5_2_vm_copy_pte,
+ .write_pte = sdma_v5_2_vm_write_pte,
+ .set_pte_pde = sdma_v5_2_vm_set_pte_pde,
+};
+
static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v5_2_set_ring_funcs(adev);
sdma_v5_2_set_buffer_funcs(adev);
- sdma_v5_2_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v5_2_vm_pte_funcs);
sdma_v5_2_set_irq_funcs(adev);
sdma_v5_2_set_mqd_funcs(adev);
}
}
-static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
- .copy_pte = sdma_v5_2_vm_copy_pte,
- .write_pte = sdma_v5_2_vm_write_pte,
- .set_pte_pde = sdma_v5_2_vm_set_pte_pde,
-};
-
-static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- unsigned i;
-
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &sdma_v5_2_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->vm_manager.vm_pte_scheds[i] =
- &adev->sdma.instance[i].ring.sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
- }
-}
-
const struct amdgpu_ip_block_version sdma_v5_2_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 5,
static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int sdma_v6_0_start(struct amdgpu_device *adev);
csa_info->alignment = SDMA6_CSA_ALIGNMENT;
}
+static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v6_0_vm_copy_pte,
+ .write_pte = sdma_v6_0_vm_write_pte,
+ .set_pte_pde = sdma_v6_0_vm_set_pte_pde,
+};
+
static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v6_0_set_ring_funcs(adev);
sdma_v6_0_set_buffer_funcs(adev);
- sdma_v6_0_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v6_0_vm_pte_funcs);
sdma_v6_0_set_irq_funcs(adev);
sdma_v6_0_set_mqd_funcs(adev);
sdma_v6_0_set_ras_funcs(adev);
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
-static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
- .copy_pte = sdma_v6_0_vm_copy_pte,
- .write_pte = sdma_v6_0_vm_write_pte,
- .set_pte_pde = sdma_v6_0_vm_set_pte_pde,
-};
-
-static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- unsigned i;
-
- adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->vm_manager.vm_pte_scheds[i] =
- &adev->sdma.instance[i].ring.sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
const struct amdgpu_ip_block_version sdma_v6_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 6,
static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int sdma_v7_0_start(struct amdgpu_device *adev);
csa_info->alignment = SDMA7_CSA_ALIGNMENT;
}
+static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 8,
+ .copy_pte = sdma_v7_0_vm_copy_pte,
+ .write_pte = sdma_v7_0_vm_write_pte,
+ .set_pte_pde = sdma_v7_0_vm_set_pte_pde,
+};
+
static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v7_0_set_ring_funcs(adev);
sdma_v7_0_set_buffer_funcs(adev);
- sdma_v7_0_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v7_0_vm_pte_funcs);
sdma_v7_0_set_irq_funcs(adev);
sdma_v7_0_set_mqd_funcs(adev);
adev->sdma.get_csa_info = &sdma_v7_0_get_csa_info;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
-static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
- .copy_pte_num_dw = 8,
- .copy_pte = sdma_v7_0_vm_copy_pte,
- .write_pte = sdma_v7_0_vm_write_pte,
- .set_pte_pde = sdma_v7_0_vm_set_pte_pde,
-};
-
-static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- unsigned i;
-
- adev->vm_manager.vm_pte_funcs = &sdma_v7_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->vm_manager.vm_pte_scheds[i] =
- &adev->sdma.instance[i].ring.sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
const struct amdgpu_ip_block_version sdma_v7_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 7,
static void sdma_v7_1_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v7_1_set_buffer_funcs(struct amdgpu_device *adev);
-static void sdma_v7_1_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v7_1_set_irq_funcs(struct amdgpu_device *adev);
static int sdma_v7_1_inst_start(struct amdgpu_device *adev,
uint32_t inst_mask);
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+static const struct amdgpu_vm_pte_funcs sdma_v7_1_vm_pte_funcs = {
+ .copy_pte_num_dw = 8,
+ .copy_pte = sdma_v7_1_vm_copy_pte,
+ .write_pte = sdma_v7_1_vm_write_pte,
+ .set_pte_pde = sdma_v7_1_vm_set_pte_pde,
+};
+
static int sdma_v7_1_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v7_1_set_ring_funcs(adev);
sdma_v7_1_set_buffer_funcs(adev);
- sdma_v7_1_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v7_1_vm_pte_funcs);
sdma_v7_1_set_irq_funcs(adev);
sdma_v7_1_set_mqd_funcs(adev);
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
-static const struct amdgpu_vm_pte_funcs sdma_v7_1_vm_pte_funcs = {
- .copy_pte_num_dw = 8,
- .copy_pte = sdma_v7_1_vm_copy_pte,
- .write_pte = sdma_v7_1_vm_write_pte,
- .set_pte_pde = sdma_v7_1_vm_set_pte_pde,
-};
-
-static void sdma_v7_1_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- unsigned i;
-
- adev->vm_manager.vm_pte_funcs = &sdma_v7_1_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->vm_manager.vm_pte_scheds[i] =
- &adev->sdma.instance[i].ring.sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
const struct amdgpu_ip_block_version sdma_v7_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 7,
static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
-static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
/**
amdgpu_ring_write(ring, val);
}
+static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+ .copy_pte_num_dw = 5,
+ .copy_pte = si_dma_vm_copy_pte,
+
+ .write_pte = si_dma_vm_write_pte,
+ .set_pte_pde = si_dma_vm_set_pte_pde,
+};
+
static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
si_dma_set_ring_funcs(adev);
si_dma_set_buffer_funcs(adev);
- si_dma_set_vm_pte_funcs(adev);
+ amdgpu_sdma_set_vm_pte_scheds(adev, &si_dma_vm_pte_funcs);
si_dma_set_irq_funcs(adev);
return 0;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
-static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
- .copy_pte_num_dw = 5,
- .copy_pte = si_dma_vm_copy_pte,
-
- .write_pte = si_dma_vm_write_pte,
- .set_pte_pde = si_dma_vm_set_pte_pde,
-};
-
-static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
-{
- unsigned i;
-
- adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->vm_manager.vm_pte_scheds[i] =
- &adev->sdma.instance[i].ring.sched;
- }
- adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
-}
-
const struct amdgpu_ip_block_version si_dma_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SDMA,