if (r)
goto out;
- r = amdgpu_fill_buffer(&adev->mman.clear_entities[0], abo, 0, &bo->base._resv,
+ r = amdgpu_fill_buffer(amdgpu_ttm_next_clear_entity(adev),
+ abo, 0, &bo->base._resv,
&fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
if (WARN_ON(r))
goto out;
adev->mman.clear_entities = kcalloc(num_clear_entities,
sizeof(struct amdgpu_ttm_buffer_entity),
GFP_KERNEL);
+ atomic_set(&adev->mman.next_clear_entity, 0);
if (!adev->mman.clear_entities)
goto error_free_default_entity;
struct amdgpu_res_cursor dst;
int r;
- if (!adev->mman.buffer_funcs_enabled) {
- dev_err(adev->dev,
- "Trying to clear memory with ring turned off.\n");
+ if (!entity)
return -EINVAL;
- }
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
return r;
}
+struct amdgpu_ttm_buffer_entity *
+amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev)
+{
+ struct amdgpu_mman *mman = &adev->mman;
+ u32 i;
+
+ if (mman->num_clear_entities == 0)
+ return NULL;
+
+ i = atomic_inc_return(&mman->next_clear_entity) %
+ mman->num_clear_entities;
+ return &mman->clear_entities[i];
+}
+
/**
* amdgpu_ttm_evict_resources - evict memory buffers
* @adev: amdgpu device object
/* @default_entity: for workarounds, has no gart windows */
struct amdgpu_ttm_buffer_entity default_entity;
struct amdgpu_ttm_buffer_entity *clear_entities;
+ atomic_t next_clear_entity;
u32 num_clear_entities;
struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES];
u32 num_move_entities;
struct dma_resv *resv,
struct dma_fence **f,
u64 k_job_id);
+struct amdgpu_ttm_buffer_entity *amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);