]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Move migrate to prefetch to op_lock_and_prep function
authorMatthew Brost <matthew.brost@intel.com>
Thu, 25 Apr 2024 04:55:03 +0000 (21:55 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Fri, 26 Apr 2024 19:09:57 +0000 (12:09 -0700)
All non-binding operations in VM bind IOCTL should be in the lock and
prepare step rather than the execution step. Move prefetch to conform to
this pattern.

v2:
 - Rebase
 - New function names (Oak)
 - Update stale comment (Oak)

Cc: Oak Zeng <oak.zeng@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Oak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-4-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_vm.c

index d0905d98de8ccd0f0f605d547417ceceb39ebe99..e7be99acaff2eb89d2fdef370bc95910988ee24f 100644 (file)
@@ -1937,20 +1937,10 @@ static const u32 region_to_mem_type[] = {
 
 static struct dma_fence *
 xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
-              struct xe_exec_queue *q, u32 region,
-              struct xe_sync_entry *syncs, u32 num_syncs,
-              bool first_op, bool last_op)
+              struct xe_exec_queue *q, struct xe_sync_entry *syncs,
+              u32 num_syncs, bool first_op, bool last_op)
 {
        struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-       int err;
-
-       xe_assert(vm->xe, region < ARRAY_SIZE(region_to_mem_type));
-
-       if (!xe_vma_has_no_bo(vma)) {
-               err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
-               if (err)
-                       return ERR_PTR(err);
-       }
 
        if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
                return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
@@ -2490,8 +2480,7 @@ static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
                                     op->flags & XE_VMA_OP_LAST);
                break;
        case DRM_GPUVA_OP_PREFETCH:
-               fence = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
-                                      op->syncs, op->num_syncs,
+               fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
                                       op->flags & XE_VMA_OP_FIRST,
                                       op->flags & XE_VMA_OP_LAST);
                break;
@@ -2722,9 +2711,20 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
                                            false);
                break;
        case DRM_GPUVA_OP_PREFETCH:
+       {
+               struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+               u32 region = op->prefetch.region;
+
+               xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+
                err = vma_lock_and_validate(exec,
-                                           gpuva_to_vma(op->base.prefetch.va), true);
+                                           gpuva_to_vma(op->base.prefetch.va),
+                                           false);
+               if (!err && !xe_vma_has_no_bo(vma))
+                       err = xe_bo_migrate(xe_vma_bo(vma),
+                                           region_to_mem_type[region]);
                break;
+       }
        default:
                drm_warn(&vm->xe->drm, "NOT POSSIBLE");
        }