return 0;
}
-static void
-amdgpu_gem_update_bo_mapping(struct drm_file *filp,
- struct amdgpu_bo_va *bo_va,
- uint32_t operation,
- uint64_t point,
- struct dma_fence *fence,
- struct drm_syncobj *syncobj,
- struct dma_fence_chain *chain)
-{
- struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- struct dma_fence *last_update;
-
- if (!syncobj)
- return;
-
- /* Find the last update fence */
- switch (operation) {
- case AMDGPU_VA_OP_MAP:
- case AMDGPU_VA_OP_REPLACE:
- if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv))
- last_update = vm->last_update;
- else
- last_update = bo_va->last_pt_update;
- break;
- case AMDGPU_VA_OP_UNMAP:
- case AMDGPU_VA_OP_CLEAR:
- last_update = fence;
- break;
- default:
- return;
- }
-
- /* Add fence to timeline */
- if (!point)
- drm_syncobj_replace_fence(syncobj, last_update);
- else
- drm_syncobj_add_point(syncobj, chain, last_update, point);
-}
-
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
struct amdgpu_bo_va *bo_va,
uint32_t operation)
{
- struct dma_fence *fence = dma_fence_get_stub();
+ struct dma_fence *clear_fence = dma_fence_get_stub();
+ struct dma_fence *last_update = NULL;
int r;
if (!amdgpu_vm_ready(vm))
- return fence;
+ return clear_fence;
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
+ /* First clear freed BOs and get a fence for that work, if any. */
+ r = amdgpu_vm_clear_freed(adev, vm, &clear_fence);
if (r)
goto error;
+ /* For MAP/REPLACE we also need to update the BO mappings. */
if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false);
goto error;
}
+ /* Always update PDEs after we touched the mappings. */
r = amdgpu_vm_update_pdes(adev, vm, false);
+ if (r)
+ goto error;
+
+ /*
+ * Decide which fence represents the "last update" for this VM/BO:
+ *
+ * - For MAP/REPLACE we want the PT update fence, which is tracked as
+ * either vm->last_update (for always-valid BOs) or bo_va->last_pt_update
+ * (for per-BO updates).
+ *
+ * - For UNMAP/CLEAR we rely on the fence returned by
+ * amdgpu_vm_clear_freed(), which already covers the page table work
+ * for the removed mappings.
+ */
+ switch (operation) {
+ case AMDGPU_VA_OP_MAP:
+ case AMDGPU_VA_OP_REPLACE:
+ if (bo_va && bo_va->base.bo) {
+ if (amdgpu_vm_is_bo_always_valid(vm, bo_va->base.bo)) {
+ if (vm->last_update)
+ last_update = dma_fence_get(vm->last_update);
+ } else {
+ if (bo_va->last_pt_update)
+ last_update = dma_fence_get(bo_va->last_pt_update);
+ }
+ }
+ break;
+ case AMDGPU_VA_OP_UNMAP:
+ case AMDGPU_VA_OP_CLEAR:
+ if (clear_fence)
+ last_update = dma_fence_get(clear_fence);
+ break;
+ default:
+ break;
+ }
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
- return fence;
+ /*
+ * If we managed to pick a more specific last-update fence, prefer it
+ * over the generic clear_fence and drop the extra reference to the
+ * latter.
+ */
+ if (last_update) {
+ dma_fence_put(clear_fence);
+ return last_update;
+ }
+
+ return clear_fence;
}
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
uint64_t vm_size;
int r = 0;
+ /* Validate virtual address range against reserved regions. */
if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
dev_dbg(dev->dev,
"va_address 0x%llx is in reserved area 0x%llx\n",
return -EINVAL;
}
+ /* Validate operation type. */
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
case AMDGPU_VA_OP_UNMAP:
abo = NULL;
}
+ /* Add input syncobj fences (if any) for synchronization. */
r = amdgpu_gem_add_input_fence(filp,
args->input_fence_syncobj_handles,
args->num_syncobj_handles);
goto error;
}
+ /* Resolve the BO-VA mapping for this VM/BO combination. */
if (abo) {
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
if (!bo_va) {
bo_va = NULL;
}
+ /*
+ * Prepare the timeline syncobj node if the user requested a VM
+ * timeline update. This only allocates/looks up the syncobj and
+ * chain node; the actual fence is attached later.
+ */
r = amdgpu_gem_update_timeline_node(filp,
args->vm_timeline_syncobj_out,
args->vm_timeline_point,
default:
break;
}
+
+ /*
+ * Once the VA operation is done, update the VM and obtain the fence
+ * that represents the last relevant update for this mapping. This
+ * fence can then be exported to the user-visible VM timeline.
+ */
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
args->operation);
- if (timeline_syncobj)
- amdgpu_gem_update_bo_mapping(filp, bo_va,
- args->operation,
- args->vm_timeline_point,
- fence, timeline_syncobj,
- timeline_chain);
- else
- dma_fence_put(fence);
+ if (timeline_syncobj && fence) {
+ if (!args->vm_timeline_point) {
+ /* Replace the existing fence when no point is given. */
+ drm_syncobj_replace_fence(timeline_syncobj,
+ fence);
+ } else {
+ /* Attach the last-update fence at a specific point. */
+ drm_syncobj_add_point(timeline_syncobj,
+ timeline_chain,
+ fence,
+ args->vm_timeline_point);
+ }
+ }
+ dma_fence_put(fence);
}