write_compress_disable));
}
- r = amdgpu_copy_buffer(adev, from, to, cur_size, resv,
+ r = amdgpu_copy_buffer(adev, entity, from, to, cur_size, resv,
&next, true, copy_flags);
if (r)
goto error;
(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
- r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
- false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
+ r = amdgpu_fill_buffer(&adev->mman.move_entity,
+ abo, 0, NULL, &wipe_fence,
+ AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
if (r) {
goto error;
} else if (wipe_fence) {
DMA_RESV_USAGE_BOOKKEEP);
}
-int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
+int amdgpu_copy_buffer(struct amdgpu_device *adev,
+ struct amdgpu_ttm_buffer_entity *entity,
+ uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_ttm_prepare_job(adev, &adev->mman.move_entity, num_dw,
+ r = amdgpu_ttm_prepare_job(adev, entity, num_dw,
resv, vm_needs_flush, &job,
AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
return r;
}
-int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
- struct dma_resv *resv,
- struct dma_fence **f,
- bool delayed,
- u64 k_job_id)
+int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
+ struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct dma_resv *resv,
+ struct dma_fence **f,
+ u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_ttm_buffer_entity *entity;
struct dma_fence *fence = NULL;
struct amdgpu_res_cursor dst;
int r;
- entity = delayed ? &adev->mman.clear_entity :
- &adev->mman.move_entity;
-
if (!adev->mman.buffer_funcs_enabled) {
dev_err(adev->dev,
"Trying to clear memory with ring turned off.\n");
/* Never fill more than 256MiB at once to avoid timeouts */
cur_size = min(dst.size, 256ULL << 20);
- r = amdgpu_ttm_map_buffer(&adev->mman.default_entity,
- &bo->tbo, bo->tbo.resource, &dst,
+ r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &dst,
1, false, &cur_size, &to);
if (r)
goto error;
- r = amdgpu_ttm_fill_mem(adev, entity, src_data, to, cur_size, resv,
+ r = amdgpu_ttm_fill_mem(adev, entity,
+ src_data, to, cur_size, resv,
&next, true, k_job_id);
if (r)
goto error;
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
bool enable);
-int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
+int amdgpu_copy_buffer(struct amdgpu_device *adev,
+ struct amdgpu_ttm_buffer_entity *entity,
+ uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct dma_resv *resv,
struct dma_fence **fence);
-int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
- struct dma_resv *resv,
- struct dma_fence **fence,
- bool delayed,
- u64 k_job_id);
+int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
+ struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct dma_resv *resv,
+ struct dma_fence **f,
+ u64 k_job_id);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);