]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Pass a drm_pagemap pointer around with the memory advise attributes
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Fri, 19 Dec 2025 11:33:08 +0000 (12:33 +0100)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Tue, 23 Dec 2025 09:00:47 +0000 (10:00 +0100)
As a consequence, struct xe_vma_mem_attr() can't simply be assigned
or freed without taking the reference count of individual members
into account. Also add helpers to do that.

v2:
- Move some calls to xe_vma_mem_attr_fini() to xe_vma_free(). (Matt Brost)
v3:
- Rebase.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v2
Link: https://patch.msgid.link/20251219113320.183860-13-thomas.hellstrom@linux.intel.com
drivers/gpu/drm/xe/xe_svm.c
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm.h
drivers/gpu/drm/xe/xe_vm_madvise.c
drivers/gpu/drm/xe/xe_vm_types.h

index 92c04d0e93f1e45362e0e6e66f9dd3fc9f4ed573..68b214d5e273dc2f537e23a4c57a078af88328e9 100644 (file)
@@ -290,10 +290,14 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm,
 
 static void xe_vma_set_default_attributes(struct xe_vma *vma)
 {
-       vma->attr.preferred_loc.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE;
-       vma->attr.preferred_loc.migration_policy = DRM_XE_MIGRATE_ALL_PAGES;
-       vma->attr.pat_index = vma->attr.default_pat_index;
-       vma->attr.atomic_access = DRM_XE_ATOMIC_UNDEFINED;
+       struct xe_vma_mem_attr default_attr = {
+               .preferred_loc.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+               .preferred_loc.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+               .pat_index = vma->attr.default_pat_index,
+               .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+       };
+
+       xe_vma_mem_attr_copy(&vma->attr, &default_attr);
 }
 
 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 start, u64 end)
index 0b8412574777943e203489930dd40881f6b6b4a3..351046c9587b74384dbdcfe2b67f8095e7841a16 100644 (file)
@@ -957,14 +957,37 @@ free_ops:
        return fence;
 }
 
+static void xe_vma_mem_attr_fini(struct xe_vma_mem_attr *attr)
+{
+       drm_pagemap_put(attr->preferred_loc.dpagemap);
+}
+
 static void xe_vma_free(struct xe_vma *vma)
 {
+       xe_vma_mem_attr_fini(&vma->attr);
+
        if (xe_vma_is_userptr(vma))
                kfree(to_userptr_vma(vma));
        else
                kfree(vma);
 }
 
+/**
+ * xe_vma_mem_attr_copy() - copy an xe_vma_mem_attr structure.
+ * @to: Destination.
+ * @from: Source.
+ *
+ * Copies an xe_vma_mem_attr structure taking care to get reference
+ * counting of individual members right.
+ */
+void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr *from)
+{
+       xe_vma_mem_attr_fini(to);
+       *to = *from;
+       if (to->preferred_loc.dpagemap)
+               drm_pagemap_get(to->preferred_loc.dpagemap);
+}
+
 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
                                    struct xe_bo *bo,
                                    u64 bo_offset_or_userptr,
@@ -1015,8 +1038,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        if (vm->xe->info.has_atomic_enable_pte_bit)
                vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
 
-       vma->attr = *attr;
-
+       xe_vma_mem_attr_copy(&vma->attr, attr);
        if (bo) {
                struct drm_gpuvm_bo *vm_bo;
 
@@ -4324,7 +4346,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
        struct drm_gpuva_op *__op;
        unsigned int vma_flags = 0;
        bool remap_op = false;
-       struct xe_vma_mem_attr tmp_attr;
+       struct xe_vma_mem_attr tmp_attr = {};
        u16 default_pat;
        int err;
 
@@ -4419,7 +4441,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
                         * VMA, so they can be assigned to newly MAP created vma.
                         */
                        if (is_madvise)
-                               tmp_attr = vma->attr;
+                               xe_vma_mem_attr_copy(&tmp_attr, &vma->attr);
 
                        xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
                } else if (__op->op == DRM_GPUVA_OP_MAP) {
@@ -4429,12 +4451,13 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
                         * copy them to new vma.
                         */
                        if (is_madvise)
-                               vma->attr = tmp_attr;
+                               xe_vma_mem_attr_copy(&vma->attr, &tmp_attr);
                }
        }
 
        xe_vm_unlock(vm);
        drm_gpuva_ops_free(&vm->gpuvm, ops);
+       xe_vma_mem_attr_fini(&tmp_attr);
        return 0;
 
 unwind_ops:
@@ -4532,3 +4555,4 @@ int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t start, uint64_t r
 
        return xe_vm_alloc_vma(vm, &map_req, false);
 }
+
index 361f10b3c453083d2c35f3913811086e02452a54..7d11ca47d73edea81aad1802381bffd980286963 100644 (file)
@@ -414,4 +414,5 @@ static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm)
 #define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated)      \
        ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
 
+void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr *from);
 #endif
index cad3cf627c3f2ff21e88c40c90bf92a6a7a6573d..9553008409d104f798d738823238372e27419cf9 100644 (file)
@@ -95,6 +95,7 @@ static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
                         */
                        vmas[i]->attr.preferred_loc.migration_policy =
                                                op->preferred_mem_loc.migration_policy;
+                       vmas[i]->attr.preferred_loc.dpagemap = NULL;
                }
        }
 }
index cfce18762aa7708db305582b086adb804bb79657..5876a966ed24feaab451eb152b5a18befd039220 100644 (file)
@@ -20,6 +20,8 @@
 #include "xe_range_fence.h"
 #include "xe_userptr.h"
 
+struct drm_pagemap;
+
 struct xe_bo;
 struct xe_svm_range;
 struct xe_sync_entry;
@@ -65,6 +67,13 @@ struct xe_vma_mem_attr {
                 * closest device memory respectively.
                 */
                u32 devmem_fd;
+               /**
+                * @preferred_loc.dpagemap: Reference-counted pointer to the drm_pagemap preferred
+                * for migration on a SVM page-fault. The pointer is protected by the
+                * vm lock, and is %NULL if @devmem_fd should be consulted for special
+                * values.
+                */
+               struct drm_pagemap *dpagemap;
        } preferred_loc;
 
        /**