mutex_unlock(&uq_mgr->userq_mutex);
}
-void amdgpu_evf_mgr_attach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct amdgpu_bo *bo)
+int amdgpu_evf_mgr_attach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
{
struct dma_fence *ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
+ struct ttm_operation_ctx ctx = { false, false };
struct dma_resv *resv = bo->tbo.base.resv;
+ int ret;
+
+ if (!dma_fence_is_signaled(ev_fence)) {
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (!ret)
+ dma_resv_add_fence(resv, ev_fence,
+ DMA_RESV_USAGE_BOOKKEEP);
+ } else {
+ ret = 0;
+ }
- dma_resv_add_fence(resv, ev_fence, DMA_RESV_USAGE_BOOKKEEP);
dma_fence_put(ev_fence);
+ return ret;
}
int amdgpu_evf_mgr_rearm(struct amdgpu_eviction_fence_mgr *evf_mgr,
return ev_fence;
}
-void amdgpu_evf_mgr_attach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct amdgpu_bo *bo);
+int amdgpu_evf_mgr_attach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
int amdgpu_evf_mgr_rearm(struct amdgpu_eviction_fence_mgr *evf_mgr,
struct drm_exec *exec);
void amdgpu_evf_mgr_detach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
amdgpu_vm_bo_update_shared(abo);
bo_va = amdgpu_vm_bo_find(vm, abo);
- if (!bo_va)
+ if (!bo_va) {
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
- else
+ r = amdgpu_evf_mgr_attach_fence(&fpriv->evf_mgr, abo);
+ if (r)
+ goto out_unlock;
+ } else {
++bo_va->ref_count;
+ }
- amdgpu_evf_mgr_attach_fence(&fpriv->evf_mgr, abo);
drm_exec_fini(&exec);
/* Validate and add eviction fence to DMABuf imports with dynamic