]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Retain vma flags when recreating and splitting vmas for madvise
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Wed, 15 Oct 2025 17:07:25 +0000 (19:07 +0200)
committerLucas De Marchi <lucas.demarchi@intel.com>
Tue, 21 Oct 2025 00:03:39 +0000 (17:03 -0700)
When splitting and restoring vmas for madvise, we only copied the
XE_VMA_SYSTEM_ALLOCATOR flag. That meant we lost flags for read_only,
dumpable and sparse (in case anyone would call madvise for the latter).

Instead, define a mask of relevant flags and ensure all are replicated,
To simplify this and make the code a bit less fragile, remove the
conversion to VMA_CREATE flags and instead just pass around the
gpuva flags after initial conversion from user-space.

Fixes: a2eb8aec3ebe ("drm/xe: Reset VMA attributes to default in SVM garbage collector")
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20251015170726.178685-1-thomas.hellstrom@linux.intel.com
(cherry picked from commit b3af8658ec70f2196190c66103478352286aba3b)
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h

index a1c88f9a6c7635e964a8715492d58e4809bc25eb..07f96bda638af486e5fe16badd98140f39f54c91 100644 (file)
@@ -2022,7 +2022,7 @@ static int op_prepare(struct xe_vm *vm,
        case DRM_GPUVA_OP_MAP:
                if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
                     !op->map.invalidate_on_bind) ||
-                   op->map.is_cpu_addr_mirror)
+                   (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
                        break;
 
                err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
@@ -2252,7 +2252,7 @@ static void op_commit(struct xe_vm *vm,
        switch (op->base.op) {
        case DRM_GPUVA_OP_MAP:
                if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
-                   op->map.is_cpu_addr_mirror)
+                   (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
                        break;
 
                bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
index f602b874e0547591d9008333c18f3de0634c48c7..f7a0931eb66cd9b9649a8574765832efaca91298 100644 (file)
@@ -616,6 +616,12 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
                        vops->pt_update_ops[i].num_ops += inc_val;
 }
 
+#define XE_VMA_CREATE_MASK (               \
+       XE_VMA_READ_ONLY |                  \
+       XE_VMA_DUMPABLE |                   \
+       XE_VMA_SYSTEM_ALLOCATOR |           \
+       DRM_GPUVA_SPARSE)
+
 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
                                  u8 tile_mask)
 {
@@ -628,8 +634,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
        op->base.map.gem.offset = vma->gpuva.gem.offset;
        op->map.vma = vma;
        op->map.immediate = true;
-       op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
-       op->map.is_null = xe_vma_is_null(vma);
+       op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
 }
 
 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
@@ -932,11 +937,6 @@ static void xe_vma_free(struct xe_vma *vma)
                kfree(vma);
 }
 
-#define VMA_CREATE_FLAG_READ_ONLY              BIT(0)
-#define VMA_CREATE_FLAG_IS_NULL                        BIT(1)
-#define VMA_CREATE_FLAG_DUMPABLE               BIT(2)
-#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR    BIT(3)
-
 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
                                    struct xe_bo *bo,
                                    u64 bo_offset_or_userptr,
@@ -947,11 +947,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        struct xe_vma *vma;
        struct xe_tile *tile;
        u8 id;
-       bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
-       bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
-       bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
-       bool is_cpu_addr_mirror =
-               (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
+       bool is_null = (flags & DRM_GPUVA_SPARSE);
+       bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
 
        xe_assert(vm->xe, start < end);
        xe_assert(vm->xe, end < vm->size);
@@ -972,10 +969,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
                if (!vma)
                        return ERR_PTR(-ENOMEM);
 
-               if (is_cpu_addr_mirror)
-                       vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
-               if (is_null)
-                       vma->gpuva.flags |= DRM_GPUVA_SPARSE;
                if (bo)
                        vma->gpuva.gem.obj = &bo->ttm.base;
        }
@@ -986,10 +979,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        vma->gpuva.vm = &vm->gpuvm;
        vma->gpuva.va.addr = start;
        vma->gpuva.va.range = end - start + 1;
-       if (read_only)
-               vma->gpuva.flags |= XE_VMA_READ_ONLY;
-       if (dumpable)
-               vma->gpuva.flags |= XE_VMA_DUMPABLE;
+       vma->gpuva.flags = flags;
 
        for_each_tile(tile, vm->xe, id)
                vma->tile_mask |= 0x1 << id;
@@ -2272,12 +2262,14 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
                if (__op->op == DRM_GPUVA_OP_MAP) {
                        op->map.immediate =
                                flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
-                       op->map.read_only =
-                               flags & DRM_XE_VM_BIND_FLAG_READONLY;
-                       op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
-                       op->map.is_cpu_addr_mirror = flags &
-                               DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
-                       op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
+                       if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
+                               op->map.vma_flags |= XE_VMA_READ_ONLY;
+                       if (flags & DRM_XE_VM_BIND_FLAG_NULL)
+                               op->map.vma_flags |= DRM_GPUVA_SPARSE;
+                       if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+                               op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
+                       if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
+                               op->map.vma_flags |= XE_VMA_DUMPABLE;
                        op->map.pat_index = pat_index;
                        op->map.invalidate_on_bind =
                                __xe_vm_needs_clear_scratch_pages(vm, flags);
@@ -2590,14 +2582,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
                                .pat_index = op->map.pat_index,
                        };
 
-                       flags |= op->map.read_only ?
-                               VMA_CREATE_FLAG_READ_ONLY : 0;
-                       flags |= op->map.is_null ?
-                               VMA_CREATE_FLAG_IS_NULL : 0;
-                       flags |= op->map.dumpable ?
-                               VMA_CREATE_FLAG_DUMPABLE : 0;
-                       flags |= op->map.is_cpu_addr_mirror ?
-                               VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
+                       flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
 
                        vma = new_vma(vm, &op->base.map, &default_attr,
                                      flags);
@@ -2606,7 +2591,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
 
                        op->map.vma = vma;
                        if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
-                            !op->map.is_cpu_addr_mirror) ||
+                            !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
                            op->map.invalidate_on_bind)
                                xe_vma_ops_incr_pt_update_ops(vops,
                                                              op->tile_mask, 1);
@@ -2637,18 +2622,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
                        op->remap.start = xe_vma_start(old);
                        op->remap.range = xe_vma_size(old);
 
-                       flags |= op->base.remap.unmap->va->flags &
-                               XE_VMA_READ_ONLY ?
-                               VMA_CREATE_FLAG_READ_ONLY : 0;
-                       flags |= op->base.remap.unmap->va->flags &
-                               DRM_GPUVA_SPARSE ?
-                               VMA_CREATE_FLAG_IS_NULL : 0;
-                       flags |= op->base.remap.unmap->va->flags &
-                               XE_VMA_DUMPABLE ?
-                               VMA_CREATE_FLAG_DUMPABLE : 0;
-                       flags |= xe_vma_is_cpu_addr_mirror(old) ?
-                               VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
-
+                       flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
                        if (op->base.remap.prev) {
                                vma = new_vma(vm, op->base.remap.prev,
                                              &old->attr, flags);
@@ -4212,7 +4186,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
        struct xe_vma_ops vops;
        struct drm_gpuva_ops *ops = NULL;
        struct drm_gpuva_op *__op;
-       bool is_cpu_addr_mirror = false;
+       unsigned int vma_flags = 0;
        bool remap_op = false;
        struct xe_vma_mem_attr tmp_attr;
        u16 default_pat;
@@ -4242,15 +4216,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
                                vma = gpuva_to_vma(op->base.unmap.va);
                                XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
                                default_pat = vma->attr.default_pat_index;
+                               vma_flags = vma->gpuva.flags;
                        }
 
                        if (__op->op == DRM_GPUVA_OP_REMAP) {
                                vma = gpuva_to_vma(op->base.remap.unmap->va);
                                default_pat = vma->attr.default_pat_index;
+                               vma_flags = vma->gpuva.flags;
                        }
 
                        if (__op->op == DRM_GPUVA_OP_MAP) {
-                               op->map.is_cpu_addr_mirror = true;
+                               op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
                                op->map.pat_index = default_pat;
                        }
                } else {
@@ -4259,11 +4235,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
                                xe_assert(vm->xe, !remap_op);
                                xe_assert(vm->xe, xe_vma_has_no_bo(vma));
                                remap_op = true;
-
-                               if (xe_vma_is_cpu_addr_mirror(vma))
-                                       is_cpu_addr_mirror = true;
-                               else
-                                       is_cpu_addr_mirror = false;
+                               vma_flags = vma->gpuva.flags;
                        }
 
                        if (__op->op == DRM_GPUVA_OP_MAP) {
@@ -4272,10 +4244,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
                                /*
                                 * In case of madvise ops DRM_GPUVA_OP_MAP is
                                 * always after DRM_GPUVA_OP_REMAP, so ensure
-                                * we assign op->map.is_cpu_addr_mirror true
-                                * if REMAP is for xe_vma_is_cpu_addr_mirror vma
+                                * to propagate the flags from the vma we're
+                                * unmapping.
                                 */
-                               op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
+                               op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
                        }
                }
                print_op(vm->xe, __op);
index 413353e1c2253bbdb6f8b051d416229cb4c8fcd3..a3b422b27ae817b9bb1c02622c9f709d5e04a096 100644 (file)
@@ -345,17 +345,10 @@ struct xe_vm {
 struct xe_vma_op_map {
        /** @vma: VMA to map */
        struct xe_vma *vma;
+       unsigned int vma_flags;
        /** @immediate: Immediate bind */
        bool immediate;
        /** @read_only: Read only */
-       bool read_only;
-       /** @is_null: is NULL binding */
-       bool is_null;
-       /** @is_cpu_addr_mirror: is CPU address mirror binding */
-       bool is_cpu_addr_mirror;
-       /** @dumpable: whether BO is dumped on GPU hang */
-       bool dumpable;
-       /** @invalidate: invalidate the VMA before bind */
        bool invalidate_on_bind;
        /** @pat_index: The pat index to use for this operation. */
        u16 pat_index;