static void xe_vma_set_default_attributes(struct xe_vma *vma)
{
- vma->attr.preferred_loc.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE;
- vma->attr.preferred_loc.migration_policy = DRM_XE_MIGRATE_ALL_PAGES;
- vma->attr.pat_index = vma->attr.default_pat_index;
- vma->attr.atomic_access = DRM_XE_ATOMIC_UNDEFINED;
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+ .preferred_loc.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+ .pat_index = vma->attr.default_pat_index,
+ .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+ };
+
+ xe_vma_mem_attr_copy(&vma->attr, &default_attr);
}
static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 start, u64 end)
return fence;
}
+static void xe_vma_mem_attr_fini(struct xe_vma_mem_attr *attr)
+{
+ drm_pagemap_put(attr->preferred_loc.dpagemap);
+}
+
static void xe_vma_free(struct xe_vma *vma)
{
+ xe_vma_mem_attr_fini(&vma->attr);
+
if (xe_vma_is_userptr(vma))
kfree(to_userptr_vma(vma));
else
kfree(vma);
}
+/**
+ * xe_vma_mem_attr_copy() - copy an xe_vma_mem_attr structure.
+ * @to: Destination.
+ * @from: Source.
+ *
+ * Copies an xe_vma_mem_attr structure taking care to get reference
+ * counting of individual members right.
+ */
+void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr *from)
+{
+ xe_vma_mem_attr_fini(to);
+ *to = *from;
+ if (to->preferred_loc.dpagemap)
+ drm_pagemap_get(to->preferred_loc.dpagemap);
+}
+
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
if (vm->xe->info.has_atomic_enable_pte_bit)
vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
- vma->attr = *attr;
-
+ xe_vma_mem_attr_copy(&vma->attr, attr);
if (bo) {
struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva_op *__op;
unsigned int vma_flags = 0;
bool remap_op = false;
- struct xe_vma_mem_attr tmp_attr;
+ struct xe_vma_mem_attr tmp_attr = {};
u16 default_pat;
int err;
* VMA, so they can be assigned to newly MAP created vma.
*/
if (is_madvise)
- tmp_attr = vma->attr;
+ xe_vma_mem_attr_copy(&tmp_attr, &vma->attr);
xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
} else if (__op->op == DRM_GPUVA_OP_MAP) {
* copy them to new vma.
*/
if (is_madvise)
- vma->attr = tmp_attr;
+ xe_vma_mem_attr_copy(&vma->attr, &tmp_attr);
}
}
xe_vm_unlock(vm);
drm_gpuva_ops_free(&vm->gpuvm, ops);
+ xe_vma_mem_attr_fini(&tmp_attr);
return 0;
unwind_ops:
return xe_vm_alloc_vma(vm, &map_req, false);
}
+
#include "xe_range_fence.h"
#include "xe_userptr.h"
+struct drm_pagemap;
+
struct xe_bo;
struct xe_svm_range;
struct xe_sync_entry;
* closest device memory respectively.
*/
u32 devmem_fd;
+ /**
+ * @preferred_loc.dpagemap: Reference-counted pointer to the drm_pagemap preferred
+ * for migration on a SVM page-fault. The pointer is protected by the
+ * vm lock, and is %NULL if @devmem_fd should be consulted for special
+ * values.
+ */
+ struct drm_pagemap *dpagemap;
} preferred_loc;
/**