}
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
- &next, false, true, copy_flags);
+ &next, true, copy_flags);
if (r)
goto error;
}
static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
- bool direct_submit,
unsigned int num_dw,
struct dma_resv *resv,
bool vm_needs_flush,
struct amdgpu_job **job,
bool delayed, u64 k_job_id)
{
- enum amdgpu_ib_pool_type pool = direct_submit ?
- AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED;
+ enum amdgpu_ib_pool_type pool = AMDGPU_IB_POOL_DELAYED;
int r;
struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
&adev->mman.high_pr;
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
- struct dma_fence **fence, bool direct_submit,
+ struct dma_fence **fence,
bool vm_needs_flush, uint32_t copy_flags)
{
struct amdgpu_device *adev = ring->adev;
unsigned int i;
int r;
- if (!direct_submit && !ring->sched.ready) {
+ if (!ring->sched.ready) {
dev_err(adev->dev,
"Trying to move memory with ring turned off.\n");
return -EINVAL;
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
+ r = amdgpu_ttm_prepare_job(adev, num_dw,
resv, vm_needs_flush, &job, false,
AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- if (direct_submit)
- r = amdgpu_job_submit_direct(job, ring, fence);
- else
- *fence = amdgpu_job_submit(job);
+ *fence = amdgpu_job_submit(job);
if (r)
goto error_free;
max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
- r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
+ r = amdgpu_ttm_prepare_job(adev, num_dw, resv, vm_needs_flush,
&job, delayed, k_job_id);
if (r)
return r;