]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe: Fixup unwind on VM ops errors
authorMatthew Brost <matthew.brost@intel.com>
Mon, 14 Aug 2023 03:19:20 +0000 (20:19 -0700)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 21 Dec 2023 16:40:52 +0000 (11:40 -0500)
Remap ops have 3 parts: unmap, prev, and next. The commit step can fail
on any of these. Add a flag for each to these so the unwind is only done
the steps that have been committed.

v2: (Rodrigo) Use bit macros

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h

index a774f9632ddae3a561cfff4d06805e84ea86d8e8..71f61806df77ba94cf4dcee23dc2bb55955dcb2c 100644 (file)
@@ -2622,18 +2622,25 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
        switch (op->base.op) {
        case DRM_GPUVA_OP_MAP:
                err |= xe_vm_insert_vma(vm, op->map.vma);
+               if (!err)
+                       op->flags |= XE_VMA_OP_COMMITTED;
                break;
        case DRM_GPUVA_OP_REMAP:
                prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
                                 true);
+               op->flags |= XE_VMA_OP_COMMITTED;
 
                if (op->remap.prev) {
                        err |= xe_vm_insert_vma(vm, op->remap.prev);
+                       if (!err)
+                               op->flags |= XE_VMA_OP_PREV_COMMITTED;
                        if (!err && op->remap.skip_prev)
                                op->remap.prev = NULL;
                }
                if (op->remap.next) {
                        err |= xe_vm_insert_vma(vm, op->remap.next);
+                       if (!err)
+                               op->flags |= XE_VMA_OP_NEXT_COMMITTED;
                        if (!err && op->remap.skip_next)
                                op->remap.next = NULL;
                }
@@ -2646,15 +2653,15 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
                break;
        case DRM_GPUVA_OP_UNMAP:
                prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
+               op->flags |= XE_VMA_OP_COMMITTED;
                break;
        case DRM_GPUVA_OP_PREFETCH:
-               /* Nothing to do */
+               op->flags |= XE_VMA_OP_COMMITTED;
                break;
        default:
                XE_WARN_ON("NOT POSSIBLE");
        }
 
-       op->flags |= XE_VMA_OP_COMMITTED;
        return err;
 }
 
@@ -2859,7 +2866,8 @@ static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
 }
 
 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
-                            bool post_commit)
+                            bool post_commit, bool prev_post_commit,
+                            bool next_post_commit)
 {
        lockdep_assert_held_write(&vm->lock);
 
@@ -2886,11 +2894,11 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
                struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
 
                if (op->remap.prev) {
-                       prep_vma_destroy(vm, op->remap.prev, post_commit);
+                       prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
                        xe_vma_destroy_unlocked(op->remap.prev);
                }
                if (op->remap.next) {
-                       prep_vma_destroy(vm, op->remap.next, post_commit);
+                       prep_vma_destroy(vm, op->remap.next, next_post_commit);
                        xe_vma_destroy_unlocked(op->remap.next);
                }
                down_read(&vm->userptr.notifier_lock);
@@ -3029,7 +3037,9 @@ static int vm_bind_ioctl_ops_commit(struct xe_vm *vm,
 
 unwind:
        list_for_each_entry_reverse(op, ops_list, link)
-               xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED);
+               xe_vma_op_unwind(vm, op, op->flags & XE_VMA_OP_COMMITTED,
+                                op->flags & XE_VMA_OP_PREV_COMMITTED,
+                                op->flags & XE_VMA_OP_NEXT_COMMITTED);
        list_for_each_entry_safe(op, next, ops_list, link)
                xe_vma_op_cleanup(vm, op);
 
@@ -3056,7 +3066,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
                drm_gpuva_for_each_op(__op, __ops) {
                        struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
 
-                       xe_vma_op_unwind(vm, op, false);
+                       xe_vma_op_unwind(vm, op, false, false, false);
                }
        }
 }
index 40ce8953bacbb2a0026f05d829d1957a364abe59..dfbc53e56a8696b55ce900f9682e8491d7af1767 100644 (file)
@@ -370,11 +370,15 @@ struct xe_vma_op_prefetch {
 /** enum xe_vma_op_flags - flags for VMA operation */
 enum xe_vma_op_flags {
        /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
-       XE_VMA_OP_FIRST         = BIT(0),
+       XE_VMA_OP_FIRST                 = BIT(0),
        /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
-       XE_VMA_OP_LAST          = BIT(1),
+       XE_VMA_OP_LAST                  = BIT(1),
        /** @XE_VMA_OP_COMMITTED: VMA operation committed */
-       XE_VMA_OP_COMMITTED     = BIT(2),
+       XE_VMA_OP_COMMITTED             = BIT(2),
+       /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
+       XE_VMA_OP_PREV_COMMITTED        = BIT(3),
+       /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
+       XE_VMA_OP_NEXT_COMMITTED        = BIT(4),
 };
 
 /** struct xe_vma_op - VMA operation */