}
static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
- .copy_max_bytes = 0x400000,
+ .copy_max_bytes = 1 << 22,
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v4_0_emit_copy_buffer,
- .fill_max_bytes = 0x400000,
+ .fill_max_bytes = 1 << 22,
+ .fill_num_dw = 5,
+ .emit_fill_buffer = sdma_v4_0_emit_fill_buffer,
+};
+
+static const struct amdgpu_buffer_funcs sdma_v4_4_buffer_funcs = {
+ .copy_max_bytes = 1 << 30,
+ .copy_num_dw = 7,
+ .emit_copy_buffer = sdma_v4_0_emit_copy_buffer,
+
+ .fill_max_bytes = 1 << 30,
.fill_num_dw = 5,
.emit_fill_buffer = sdma_v4_0_emit_fill_buffer,
};
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
- adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= IP_VERSION(4, 4, 0))
+ adev->mman.buffer_funcs = &sdma_v4_4_buffer_funcs;
+ else
+ adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
if (adev->sdma.has_page_queue)
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
else