Convert to using the gpuvm's r_obj for serializing access to the VM.
This way we can use the drm_exec helper for dealing with deadlock
detection and backoff.
This will let us deal with upcoming locking order conflicts with the
VM_BIND implmentation (ie. in some scenarious we need to acquire the obj
lock first, for ex. to iterate all the VMs an obj is bound in, and in
other scenarious we need to acquire the VM lock first).
Signed-off-by: Rob Clark <robdclark@chromium.org>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103@gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/661478/
static void detach_vm(struct drm_gem_object *obj, struct msm_gem_vm *vm)
{
msm_gem_assert_locked(obj);
+ drm_gpuvm_resv_assert_held(&vm->base);
struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_find(&vm->base, obj);
if (vm_bo) {
static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct msm_context *ctx = file->driver_priv;
+ struct drm_exec exec;
update_ctx_mem(file, -obj->size);
dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, false,
msecs_to_jiffies(1000));
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
put_iova_spaces(obj, &ctx->vm->base, true);
detach_vm(obj, ctx->vm);
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
}
/*
struct msm_gem_vm *vm, uint64_t *iova,
u64 range_start, u64 range_end)
{
+ struct drm_exec exec;
int ret;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
struct msm_gem_vm *vm, uint64_t *iova)
{
struct msm_gem_vma *vma;
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
vma = get_vma_locked(obj, vm, 0, U64_MAX);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
*iova = vma->base.va.addr;
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
int msm_gem_set_iova(struct drm_gem_object *obj,
struct msm_gem_vm *vm, uint64_t iova)
{
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
if (!iova) {
ret = clear_iova(obj, vm);
} else {
ret = -EBUSY;
}
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
struct msm_gem_vm *vm)
{
struct msm_gem_vma *vma;
+ struct drm_exec exec;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
vma = lookup_vma(obj, vm);
if (vma) {
msm_gem_unpin_locked(obj);
}
detach_vm(obj, vm);
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct drm_exec exec;
mutex_lock(&priv->obj_lock);
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
+ /*
+ * We need to lock any VMs the object is still attached to, but not
+ * the object itself (see explaination in msm_gem_assert_locked()),
+ * so just open-code this special case:
+ */
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked (&exec) {
+ struct drm_gpuvm_bo *vm_bo;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(vm_bo->vm));
+ drm_exec_retry_on_contention(&exec);
+ }
+ }
put_iova_spaces(obj, NULL, true);
+ drm_exec_fini(&exec); /* drop locks */
if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
*/
struct drm_mm mm;
- /** @mm_lock: protects @mm node allocation/removal */
- struct spinlock mm_lock;
-
- /** @vm_lock: protects gpuvm insert/remove/traverse */
- struct mutex vm_lock;
-
/** @mmu: The mmu object which manages the pgtables */
struct msm_mmu *mmu;
dma_resv_unlock(obj->resv);
}
+/**
+ * msm_gem_lock_vm_and_obj() - Helper to lock an obj + VM
+ * @exec: the exec context helper which will be initalized
+ * @obj: the GEM object to lock
+ * @vm: the VM to lock
+ *
+ * Operations which modify a VM frequently need to lock both the VM and
+ * the object being mapped/unmapped/etc. This helper uses drm_exec to
+ * acquire both locks, dealing with potential deadlock/backoff scenarios
+ * which arise when multiple locks are involved.
+ */
+static inline int
+msm_gem_lock_vm_and_obj(struct drm_exec *exec,
+ struct drm_gem_object *obj,
+ struct msm_gem_vm *vm)
+{
+ int ret = 0;
+
+ drm_exec_init(exec, 0, 2);
+ drm_exec_until_all_locked (exec) {
+ ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(&vm->base));
+ if (!ret && (obj->resv != drm_gpuvm_resv(&vm->base)))
+ ret = drm_exec_lock_obj(exec, obj);
+ drm_exec_retry_on_contention(exec);
+ if (GEM_WARN_ON(ret))
+ break;
+ }
+
+ return ret;
+}
+
static inline void
msm_gem_assert_locked(struct drm_gem_object *obj)
{
return count;
}
+static bool
+with_vm_locks(struct ww_acquire_ctx *ticket,
+ void (*fn)(struct drm_gem_object *obj),
+ struct drm_gem_object *obj)
+{
+ /*
+ * Track last locked entry for for unwinding locks in error and
+ * success paths
+ */
+ struct drm_gpuvm_bo *vm_bo, *last_locked = NULL;
+ int ret = 0;
+
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
+
+ if (resv == obj->resv)
+ continue;
+
+ ret = dma_resv_lock(resv, ticket);
+
+ /*
+ * Since we already skip the case when the VM and obj
+ * share a resv (ie. _NO_SHARE objs), we don't expect
+ * to hit a double-locking scenario... which the lock
+ * unwinding cannot really cope with.
+ */
+ WARN_ON(ret == -EALREADY);
+
+ /*
+ * Don't bother with slow-lock / backoff / retry sequence,
+ * if we can't get the lock just give up and move on to
+ * the next object.
+ */
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * Hold a ref to prevent the vm_bo from being freed
+ * and removed from the obj's gpuva list, as that would
+ * would result in missing the unlock below
+ */
+ drm_gpuvm_bo_get(vm_bo);
+
+ last_locked = vm_bo;
+ }
+
+ fn(obj);
+
+out_unlock:
+ if (last_locked) {
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
+
+ if (resv == obj->resv)
+ continue;
+
+ dma_resv_unlock(resv);
+
+ /* Drop the ref taken while locking: */
+ drm_gpuvm_bo_put(vm_bo);
+
+ if (last_locked == vm_bo)
+ break;
+ }
+ }
+
+ return ret == 0;
+}
+
static bool
purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (msm_gem_active(obj))
return false;
- msm_gem_purge(obj);
-
- return true;
+ return with_vm_locks(ticket, msm_gem_purge, obj);
}
static bool
if (msm_gem_active(obj))
return false;
- msm_gem_evict(obj);
-
- return true;
+ return with_vm_locks(ticket, msm_gem_evict, obj);
}
static bool
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv = shrinker->private_data;
+ struct ww_acquire_ctx ticket;
struct {
struct drm_gem_lru *lru;
bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket);
drm_gem_lru_scan(stages[i].lru, nr,
&stages[i].remaining,
stages[i].shrink,
- NULL);
+ &ticket);
nr -= stages[i].freed;
freed += stages[i].freed;
remaining += stages[i].remaining;
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
int ret;
- drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos);
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_exec_lock_obj(&submit->exec,
+ drm_gpuvm_resv_obj(&submit->vm->base));
+ drm_exec_retry_on_contention(&submit->exec);
+ if (ret)
+ goto error;
for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
GEM_WARN_ON(vma->mapped);
- spin_lock(&vm->mm_lock);
+ drm_gpuvm_resv_assert_held(&vm->base);
+
if (vma->base.va.addr)
drm_mm_remove_node(&vma->node);
- spin_unlock(&vm->mm_lock);
- mutex_lock(&vm->vm_lock);
drm_gpuva_remove(&vma->base);
drm_gpuva_unlink(&vma->base);
- mutex_unlock(&vm->vm_lock);
kfree(vma);
}
struct msm_gem_vma *vma;
int ret;
+ drm_gpuvm_resv_assert_held(&vm->base);
+
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
return ERR_PTR(-ENOMEM);
if (vm->managed) {
- spin_lock(&vm->mm_lock);
ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
obj->size, PAGE_SIZE, 0,
range_start, range_end, 0);
- spin_unlock(&vm->mm_lock);
if (ret)
goto err_free_vma;
drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, 0);
vma->mapped = false;
- mutex_lock(&vm->vm_lock);
ret = drm_gpuva_insert(&vm->base, &vma->base);
- mutex_unlock(&vm->vm_lock);
if (ret)
goto err_free_range;
goto err_va_remove;
}
- mutex_lock(&vm->vm_lock);
drm_gpuvm_bo_extobj_add(vm_bo);
drm_gpuva_link(&vma->base, vm_bo);
- mutex_unlock(&vm->vm_lock);
GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
return vma;
err_va_remove:
- mutex_lock(&vm->vm_lock);
drm_gpuva_remove(&vma->base);
- mutex_unlock(&vm->vm_lock);
err_free_range:
if (vm->managed)
drm_mm_remove_node(&vma->node);
msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
u64 va_start, u64 va_size, bool managed)
{
+ /*
+ * We mostly want to use DRM_GPUVM_RESV_PROTECTED, except that
+ * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose
+ * tracking that an extobj is evicted) :facepalm:
+ */
enum drm_gpuvm_flags flags = 0;
struct msm_gem_vm *vm;
struct drm_gem_object *dummy_gem;
va_start, va_size, 0, 0, &msm_gpuvm_ops);
drm_gem_object_put(dummy_gem);
- spin_lock_init(&vm->mm_lock);
- mutex_init(&vm->vm_lock);
-
vm->mmu = mmu;
vm->managed = managed;