#define NUM_SDMA(x) hweight32(x)
+struct amdgpu_sdma_csa_info {
+ u32 size;
+ u32 alignment;
+};
+
struct amdgpu_sdma_funcs {
int (*stop_kernel_queue)(struct amdgpu_ring *ring);
int (*start_kernel_queue)(struct amdgpu_ring *ring);
struct list_head reset_callback_list;
bool no_user_submission;
bool disable_uq;
+ void (*get_csa_info)(struct amdgpu_device *adev,
+ struct amdgpu_sdma_csa_info *csa_info);
};
/*
}
}
+/* all sizes are in bytes */
+#define SDMA6_CSA_SIZE 32
+#define SDMA6_CSA_ALIGNMENT 4
+
+static void sdma_v6_0_get_csa_info(struct amdgpu_device *adev,
+ struct amdgpu_sdma_csa_info *csa_info)
+{
+ csa_info->size = SDMA6_CSA_SIZE;
+ csa_info->alignment = SDMA6_CSA_ALIGNMENT;
+}
+
static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v6_0_set_irq_funcs(adev);
sdma_v6_0_set_mqd_funcs(adev);
sdma_v6_0_set_ras_funcs(adev);
+ adev->sdma.get_csa_info = &sdma_v6_0_get_csa_info;
return 0;
}
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+/* all sizes are in bytes */
+#define SDMA7_CSA_SIZE 32
+#define SDMA7_CSA_ALIGNMENT 4
+
+static void sdma_v7_0_get_csa_info(struct amdgpu_device *adev,
+ struct amdgpu_sdma_csa_info *csa_info)
+{
+ csa_info->size = SDMA7_CSA_SIZE;
+ csa_info->alignment = SDMA7_CSA_ALIGNMENT;
+}
+
static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
sdma_v7_0_set_vm_pte_funcs(adev);
sdma_v7_0_set_irq_funcs(adev);
sdma_v7_0_set_mqd_funcs(adev);
+ adev->sdma.get_csa_info = &sdma_v7_0_get_csa_info;
return 0;
}