From: Matthew Brost Date: Thu, 25 Apr 2024 04:55:03 +0000 (-0700) Subject: drm/xe: Move migrate to prefetch to op_lock_and_prep function X-Git-Tag: v6.11-rc1~141^2~26^2~192 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=0a34c12449de4b09f74808c6f6c39205ee5071f0;p=thirdparty%2Fkernel%2Flinux.git drm/xe: Move migrate to prefetch to op_lock_and_prep function All non-binding operations in VM bind IOCTL should be in the lock and prepare step rather than the execution step. Move prefetch to conform to this pattern. v2: - Rebase - New function names (Oak) - Update stale comment (Oak) Cc: Oak Zeng Signed-off-by: Matthew Brost Reviewed-by: Oak Zeng Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-4-matthew.brost@intel.com --- diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index d0905d98de8cc..e7be99acaff2e 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1937,20 +1937,10 @@ static const u32 region_to_mem_type[] = { static struct dma_fence * xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, - struct xe_exec_queue *q, u32 region, - struct xe_sync_entry *syncs, u32 num_syncs, - bool first_op, bool last_op) + struct xe_exec_queue *q, struct xe_sync_entry *syncs, + u32 num_syncs, bool first_op, bool last_op) { struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - int err; - - xe_assert(vm->xe, region < ARRAY_SIZE(region_to_mem_type)); - - if (!xe_vma_has_no_bo(vma)) { - err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]); - if (err) - return ERR_PTR(err); - } if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) { return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, @@ -2490,8 +2480,7 @@ static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma, op->flags & XE_VMA_OP_LAST); break; case DRM_GPUVA_OP_PREFETCH: - fence = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region, - op->syncs, op->num_syncs, + fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs, op->flags & XE_VMA_OP_FIRST, op->flags & XE_VMA_OP_LAST); break; @@ -2722,9 +2711,20 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, false); break; case DRM_GPUVA_OP_PREFETCH: + { + struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); + u32 region = op->prefetch.region; + + xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type)); + err = vma_lock_and_validate(exec, - gpuva_to_vma(op->base.prefetch.va), true); + gpuva_to_vma(op->base.prefetch.va), + false); + if (!err && !xe_vma_has_no_bo(vma)) + err = xe_bo_migrate(xe_vma_bo(vma), + region_to_mem_type[region]); break; + } default: drm_warn(&vm->xe->drm, "NOT POSSIBLE"); }