struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
struct drm_gpuvm_bo *vm_bo;
+ u64 range_start = addr;
u64 range_end = addr + range;
int err;
switch (operation) {
case DRM_XE_VM_BIND_OP_MAP:
+ if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ xe_vm_find_cpu_addr_mirror_vma_range(vm, &range_start, &range_end);
+
+ fallthrough;
case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
struct drm_gpuvm_map_req map_req = {
- .map.va.addr = addr,
- .map.va.range = range,
+ .map.va.addr = range_start,
+ .map.va.range = range_end - range_start,
.map.gem.obj = obj,
.map.gem.offset = bo_offset_or_userptr,
};