return 0;
}
-static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close);
+static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason);
static void detach_vm(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{
drm_gpuvm_bo_for_each_va (vma, vm_bo) {
if (vma->vm != vm)
continue;
- msm_gem_vma_unmap(vma);
+ msm_gem_vma_unmap(vma, "detach");
msm_gem_vma_close(vma);
break;
}
MAX_SCHEDULE_TIMEOUT);
msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
- put_iova_spaces(obj, ctx->vm, true);
+ put_iova_spaces(obj, ctx->vm, true, "close");
detach_vm(obj, ctx->vm);
drm_exec_fini(&exec); /* drop locks */
}
* mapping.
*/
static void
-put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close)
+put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason)
{
struct drm_gpuvm_bo *vm_bo, *tmp;
drm_gpuvm_bo_get(vm_bo);
drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
- msm_gem_vma_unmap(vma);
+ msm_gem_vma_unmap(vma, reason);
if (close)
msm_gem_vma_close(vma);
}
if (!vma)
return 0;
- msm_gem_vma_unmap(vma);
+ msm_gem_vma_unmap(vma, NULL);
msm_gem_vma_close(vma);
return 0;
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, NULL, false);
+ put_iova_spaces(obj, NULL, false, "purge");
msm_gem_vunmap(obj);
GEM_WARN_ON(is_unevictable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, NULL, false);
+ put_iova_spaces(obj, NULL, false, "evict");
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_exec_retry_on_contention(&exec);
}
}
- put_iova_spaces(obj, NULL, true);
+ put_iova_spaces(obj, NULL, true, "free");
drm_exec_fini(&exec); /* drop locks */
}
/** @range: size of region to unmap */
uint64_t range;
+ /** @reason: The reason for the unmap */
+ const char *reason;
+
/**
* @queue_id: The id of the submitqueue the operation is performed
* on, or zero for (in particular) UNMAP ops triggered outside of
static void
vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
{
- vm_log(vm, "unmap", op->iova, op->range, op->queue_id);
+ const char *reason = op->reason;
+
+ if (!reason)
+ reason = "unmap";
+
+ vm_log(vm, reason, op->iova, op->range, op->queue_id);
vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
}
}
/* Actually unmap memory for the vma */
-void msm_gem_vma_unmap(struct drm_gpuva *vma)
+void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason)
{
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
vm_unmap_op(vm, &(struct msm_vm_unmap_op){
.iova = vma->va.addr,
.range = vma->va.range,
+ .reason = reason,
});
if (!vm->managed)
drm_exec_retry_on_contention(&exec);
}
- msm_gem_vma_unmap(vma);
+ msm_gem_vma_unmap(vma, "close");
msm_gem_vma_close(vma);
if (obj) {