return 0;
}
-static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
+static void xe_vma_set_default_attributes(struct xe_vma *vma)
+{
+ vma->attr.preferred_loc.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE;
+ vma->attr.preferred_loc.migration_policy = DRM_XE_MIGRATE_ALL_PAGES;
+ vma->attr.pat_index = vma->attr.default_pat_index;
+ vma->attr.atomic_access = DRM_XE_ATOMIC_UNDEFINED;
+}
+
+static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 start, u64 end)
{
struct xe_vma *vma;
- struct xe_vma_mem_attr default_attr = {
- .preferred_loc = {
- .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
- .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
- },
- .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
- };
- int err = 0;
+ bool has_default_attr;
+ int err;
- vma = xe_vm_find_vma_by_addr(vm, range_start);
+ vma = xe_vm_find_vma_by_addr(vm, start);
if (!vma)
return -EINVAL;
return 0;
}
- if (xe_vma_has_default_mem_attrs(vma))
- return 0;
-
vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
xe_vma_start(vma), xe_vma_end(vma));
- if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
- default_attr.pat_index = vma->attr.default_pat_index;
- default_attr.default_pat_index = vma->attr.default_pat_index;
- vma->attr = default_attr;
- } else {
- vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
- range_start, range_end);
- err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
- if (err) {
- drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
- xe_vm_kill(vm, true);
- return err;
- }
+ has_default_attr = xe_vma_has_default_mem_attrs(vma);
+
+ if (has_default_attr) {
+ if (xe_svm_has_mapping(vm, xe_vma_start(vma), xe_vma_end(vma)))
+ return 0;
+
+ start = xe_vma_start(vma);
+ end = xe_vma_end(vma);
+ } else if (xe_vma_start(vma) == start && xe_vma_end(vma) == end) {
+ xe_vma_set_default_attributes(vma);
+ }
+
+ xe_vm_find_cpu_addr_mirror_vma_range(vm, &start, &end);
+
+ if (xe_vma_start(vma) == start && xe_vma_end(vma) == end && has_default_attr)
+ return 0;
+
+ vm_dbg(&vm->xe->drm, "New VMA start=0x%016llx, vma_end=0x%016llx", start, end);
+
+ err = xe_vm_alloc_cpu_addr_mirror_vma(vm, start, end - start);
+ if (err) {
+ drm_warn(&vm->xe->drm, "New VMA MAP failed: %pe\n", ERR_PTR(err));
+ xe_vm_kill(vm, true);
+ return err;
}
/*