]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Move ufence check to op_lock_and_prep
authorMatthew Brost <matthew.brost@intel.com>
Thu, 25 Apr 2024 04:55:10 +0000 (21:55 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Fri, 26 Apr 2024 19:10:05 +0000 (12:10 -0700)
Rather than checking for an unsignaled ufence ay unbind time, check for
this during the op_lock_and_prep function. This helps with the
transition to job 1 per VM bind IOCTL.

v2:
 - Rebase
v3:
 - Fix typo in commit message (Oak)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Oak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-11-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_vm.c

index 2f19372aaad54282c5e960afcc851a5af04502bd..40c1258c3282e704084d92f85b60d33a0fd7c817 100644 (file)
@@ -1653,16 +1653,6 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
 
        trace_xe_vma_unbind(vma);
 
-       if (vma->ufence) {
-               struct xe_user_fence * const f = vma->ufence;
-
-               if (!xe_sync_ufence_get_status(f))
-                       return ERR_PTR(-EBUSY);
-
-               vma->ufence = NULL;
-               xe_sync_ufence_put(f);
-       }
-
        if (number_tiles > 1) {
                fences = kmalloc_array(number_tiles, sizeof(*fences),
                                       GFP_KERNEL);
@@ -2717,6 +2707,21 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
        return err;
 }
 
+static int check_ufence(struct xe_vma *vma)
+{
+       if (vma->ufence) {
+               struct xe_user_fence * const f = vma->ufence;
+
+               if (!xe_sync_ufence_get_status(f))
+                       return -EBUSY;
+
+               vma->ufence = NULL;
+               xe_sync_ufence_put(f);
+       }
+
+       return 0;
+}
+
 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
                            struct xe_vma_op *op)
 {
@@ -2729,6 +2734,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
                                            op->map.immediate);
                break;
        case DRM_GPUVA_OP_REMAP:
+               err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
+               if (err)
+                       break;
+
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.remap.unmap->va),
                                            false);
@@ -2738,6 +2747,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
                        err = vma_lock_and_validate(exec, op->remap.next, true);
                break;
        case DRM_GPUVA_OP_UNMAP:
+               err = check_ufence(gpuva_to_vma(op->base.unmap.va));
+               if (err)
+                       break;
+
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.unmap.va),
                                            false);