In commit
9ce4aef9a5b1 ("drm/gpuvm: take GEM lock inside
drm_gpuvm_bo_obtain_prealloc()") we update
drm_gpuvm_bo_obtain_prealloc() to take locks internally, which means
that it's only usable in immediate mode.
In this commit, we notice that drm_gpuvm_bo_obtain() requires you to use
staged mode. This means that we now have one variant of obtain for each
mode you might use gpuvm in.
To reflect this information, we add a warning about using it in
immediate mode, and to make the distinction clearer we rename the method
with a _locked() suffix so that it's clear that it requires the caller
to take the locks.
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Link: https://patch.msgid.link/20260108-gpuvm-rust-v2-2-dbd014005a0b@google.com
[ Slightly reword commit message to refer to commit
9ce4aef9a5b1
("drm/gpuvm: take GEM lock inside drm_gpuvm_bo_obtain_prealloc()").
- Danilo ]
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
* count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
* &drm_gpuvm_bo.
*
+ * Requires the lock for the GEMs gpuva list.
+ *
* A new &drm_gpuvm_bo is added to the GEMs gpuva list.
*
* Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
*/
struct drm_gpuvm_bo *
-drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
- struct drm_gem_object *obj)
+drm_gpuvm_bo_obtain_locked(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
{
struct drm_gpuvm_bo *vm_bo;
+ /*
+ * In immediate mode this would require the caller to hold the GEMs
+ * gpuva mutex, but it's not okay to allocate while holding that lock,
+ * and this method allocates. Immediate mode drivers should use
+ * drm_gpuvm_bo_obtain_prealloc() instead.
+ */
+ drm_WARN_ON(gpuvm->drm, drm_gpuvm_immediate_mode(gpuvm));
+
vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
if (vm_bo)
return vm_bo;
return vm_bo;
}
-EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_locked);
/**
* drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
bind_op->type = PVR_VM_BIND_TYPE_MAP;
dma_resv_lock(obj->resv, NULL);
- bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
+ bind_op->gpuvm_bo = drm_gpuvm_bo_obtain_locked(&vm_ctx->gpuvm_mgr, obj);
dma_resv_unlock(obj->resv);
if (IS_ERR(bind_op->gpuvm_bo))
return PTR_ERR(bind_op->gpuvm_bo);
* embedded in any larger driver structure. The GEM object holds a list of
* drm_gpuvm_bo, which in turn holds a list of msm_gem_vma. A linked vma
* holds a reference to the vm_bo, and drops it when the vma is unlinked.
- * So we just need to call drm_gpuvm_bo_obtain() to return a ref to an
+ * So we just need to call drm_gpuvm_bo_obtain_locked() to return a ref to an
* existing vm_bo, or create a new one. Once the vma is linked, the ref
* to the vm_bo can be dropped (since the vma is holding one).
*/
if (!obj)
return &vma->base;
- vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj);
+ vm_bo = drm_gpuvm_bo_obtain_locked(&vm->base, obj);
if (IS_ERR(vm_bo)) {
ret = PTR_ERR(vm_bo);
goto err_va_remove;
return -ENOENT;
dma_resv_lock(obj->resv, NULL);
- op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj);
+ op->vm_bo = drm_gpuvm_bo_obtain_locked(&uvmm->base, obj);
dma_resv_unlock(obj->resv);
if (IS_ERR(op->vm_bo))
return PTR_ERR(op->vm_bo);
xe_bo_assert_held(bo);
- vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
+ vm_bo = drm_gpuvm_bo_obtain_locked(vma->gpuva.vm, &bo->ttm.base);
if (IS_ERR(vm_bo)) {
xe_vma_free(vma);
return ERR_CAST(vm_bo);
if (err)
return ERR_PTR(err);
- vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
+ vm_bo = drm_gpuvm_bo_obtain_locked(&vm->gpuvm, obj);
if (IS_ERR(vm_bo)) {
xe_bo_unlock(bo);
return ERR_CAST(vm_bo);
struct drm_gem_object *obj);
struct drm_gpuvm_bo *
-drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
- struct drm_gem_object *obj);
+drm_gpuvm_bo_obtain_locked(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
struct drm_gpuvm_bo *
drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);