]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/vm: Remove VM_BIND_OP macro
authorFrancois Dugast <francois.dugast@intel.com>
Wed, 20 Sep 2023 19:29:25 +0000 (15:29 -0400)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 21 Dec 2023 16:43:10 +0000 (11:43 -0500)
This macro was necessary when bind operations were shifted but this
is no longer the case, so removing to simplify code.

Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
drivers/gpu/drm/xe/xe_vm.c

index 3ae911ade7e48d7bfb666055169d534749dddbf7..1a0546beef878ce383b71fda1dda7fe59d0fa3e8 100644 (file)
@@ -2273,8 +2273,6 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
        }
 }
 
-#define VM_BIND_OP(op) (op & 0xffff)
-
 static void vm_set_async_error(struct xe_vm *vm, int err)
 {
        lockdep_assert_held(&vm->lock);
@@ -2290,7 +2288,7 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
 
        lockdep_assert_held(&vm->lock);
 
-       switch (VM_BIND_OP(op)) {
+       switch (op) {
        case XE_VM_BIND_OP_MAP:
        case XE_VM_BIND_OP_MAP_USERPTR:
                vma = xe_vm_find_overlapping_vma(vm, addr, range);
@@ -2400,10 +2398,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
 
        vm_dbg(&vm->xe->drm,
               "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
-              VM_BIND_OP(operation), (ULL)addr, (ULL)range,
+              operation, (ULL)addr, (ULL)range,
               (ULL)bo_offset_or_userptr);
 
-       switch (VM_BIND_OP(operation)) {
+       switch (operation) {
        case XE_VM_BIND_OP_MAP:
        case XE_VM_BIND_OP_MAP_USERPTR:
                ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
@@ -3246,50 +3244,48 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
                        *async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
                } else if (XE_IOCTL_DBG(xe, !*async) ||
                           XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
-                          XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
-                                       XE_VM_BIND_OP_RESTART)) {
+                          XE_IOCTL_DBG(xe, op == XE_VM_BIND_OP_RESTART)) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
 
                if (XE_IOCTL_DBG(xe, !*async &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
+                                op == XE_VM_BIND_OP_UNMAP_ALL)) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
 
                if (XE_IOCTL_DBG(xe, !*async &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
+                                op == XE_VM_BIND_OP_PREFETCH)) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
 
-               if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
-                                XE_VM_BIND_OP_PREFETCH) ||
+               if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
                    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
                    XE_IOCTL_DBG(xe, obj && is_null) ||
                    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
-                   XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
+                   XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
                                 is_null) ||
                    XE_IOCTL_DBG(xe, !obj &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
+                                op == XE_VM_BIND_OP_MAP &&
                                 !is_null) ||
                    XE_IOCTL_DBG(xe, !obj &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
+                                op == XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_DBG(xe, addr &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
+                                op == XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_DBG(xe, range &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
+                                op == XE_VM_BIND_OP_UNMAP_ALL) ||
                    XE_IOCTL_DBG(xe, obj &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
+                                op == XE_VM_BIND_OP_MAP_USERPTR) ||
                    XE_IOCTL_DBG(xe, obj &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
+                                op == XE_VM_BIND_OP_PREFETCH) ||
                    XE_IOCTL_DBG(xe, region &&
-                                VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
+                                op != XE_VM_BIND_OP_PREFETCH) ||
                    XE_IOCTL_DBG(xe, !(BIT(region) &
                                       xe->info.mem_region_mask)) ||
                    XE_IOCTL_DBG(xe, obj &&
-                                VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
+                                op == XE_VM_BIND_OP_UNMAP)) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
@@ -3297,9 +3293,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
                if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
                    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
                    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
-                   XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
+                   XE_IOCTL_DBG(xe, !range && op !=
                                 XE_VM_BIND_OP_RESTART &&
-                                VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
+                                op != XE_VM_BIND_OP_UNMAP_ALL)) {
                        err = -EINVAL;
                        goto free_bind_ops;
                }
@@ -3363,7 +3359,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                goto release_vm_lock;
        }
 
-       if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
+       if (bind_ops[0].op == XE_VM_BIND_OP_RESTART) {
                if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
                        err = -EOPNOTSUPP;
                if (XE_IOCTL_DBG(xe, !err && args->num_syncs))