has_default_attr = xe_vma_has_default_mem_attrs(vma);
if (has_default_attr) {
- if (xe_svm_has_mapping(vm, xe_vma_start(vma), xe_vma_end(vma)))
- return 0;
-
start = xe_vma_start(vma);
end = xe_vma_end(vma);
} else if (xe_vma_start(vma) == start && xe_vma_end(vma) == end) {
switch (operation) {
case DRM_XE_VM_BIND_OP_MAP:
- if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR) {
xe_vm_find_cpu_addr_mirror_vma_range(vm, &range_start, &range_end);
+ vops->flags |= XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP;
+ }
fallthrough;
case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
if (xe_vma_is_cpu_addr_mirror(vma) &&
xe_svm_has_mapping(vm, xe_vma_start(vma),
- xe_vma_end(vma)))
+ xe_vma_end(vma)) &&
+ !(vops->flags & XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP))
return -EBUSY;
if (!xe_vma_is_cpu_addr_mirror(vma))
if (is_madvise)
vops.flags |= XE_VMA_OPS_FLAG_MADVISE;
+ else
+ vops.flags |= XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP;
err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
if (err)
static bool is_cpu_addr_vma_with_default_attr(struct xe_vma *vma)
{
return vma && xe_vma_is_cpu_addr_mirror(vma) &&
- !xe_svm_has_mapping(xe_vma_vm(vma), xe_vma_start(vma), xe_vma_end(vma)) &&
xe_vma_has_default_mem_attrs(vma);
}
#define XE_VMA_OPS_FLAG_MADVISE BIT(1)
#define XE_VMA_OPS_ARRAY_OF_BINDS BIT(2)
#define XE_VMA_OPS_FLAG_SKIP_TLB_WAIT BIT(3)
+#define XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP BIT(4)
u32 flags;
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */