* @mem: memory object to map
* @mm_cur: range to map
* @window: which GART window to use
- * @ring: DMA ring to use for the copy
* @tmz: if we should setup a TMZ enabled mapping
* @size: in number of bytes to map, out number of bytes mapped
* @addr: resulting address inside the MC address space
static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct amdgpu_res_cursor *mm_cur,
- unsigned int window, struct amdgpu_ring *ring,
+ unsigned int window,
bool tmz, uint64_t *size, uint64_t *addr)
{
- struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
unsigned int offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr;
+ struct amdgpu_ring *ring;
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
dst_addr, num_bytes, 0);
+ ring = adev->mman.buffer_funcs_ring;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
struct dma_resv *resv,
struct dma_fence **f)
{
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor src_mm, dst_mm;
struct dma_fence *fence = NULL;
int r = 0;
/* Map src to window 0 and dst to window 1. */
r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
- 0, ring, tmz, &cur_size, &from);
+ 0, tmz, &cur_size, &from);
if (r)
goto error;
r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
- 1, ring, tmz, &cur_size, &to);
+ 1, tmz, &cur_size, &to);
if (r)
goto error;
write_compress_disable));
}
- r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
+ r = amdgpu_copy_buffer(adev, from, to, cur_size, resv,
&next, true, copy_flags);
if (r)
goto error;
DMA_RESV_USAGE_BOOKKEEP);
}
-int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
bool vm_needs_flush, uint32_t copy_flags)
{
- struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
+ struct amdgpu_ring *ring;
struct amdgpu_job *job;
uint32_t max_bytes;
unsigned int i;
int r;
+ ring = adev->mman.buffer_funcs_ring;
+
if (!ring->sched.ready) {
dev_err(adev->dev,
"Trying to move memory with ring turned off.\n");
return r;
}
-static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
+static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
bool vm_needs_flush, bool delayed,
u64 k_job_id)
{
- struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
+ struct amdgpu_ring *ring;
struct amdgpu_job *job;
uint32_t max_bytes;
unsigned int i;
byte_count -= cur_size;
}
+ ring = adev->mman.buffer_funcs_ring;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
*fence = amdgpu_job_submit(job);
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor cursor;
u64 addr;
int r = 0;
size = min(cursor.size, 256ULL << 20);
r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
- 1, ring, false, &size, &addr);
+ 1, false, &size, &addr);
if (r)
goto err;
- r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
+ r = amdgpu_ttm_fill_mem(adev, 0, addr, size, resv,
&next, true, true,
AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct dma_fence *fence = NULL;
struct amdgpu_res_cursor dst;
int r;
cur_size = min(dst.size, 256ULL << 20);
r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
- 1, ring, false, &cur_size, &to);
+ 1, false, &cur_size, &to);
if (r)
goto error;
- r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
+ r = amdgpu_ttm_fill_mem(adev, src_data, to, cur_size, resv,
&next, true, delayed, k_job_id);
if (r)
goto error;