return err;
}
+/*
+ * Map access type, fault type, and fault level from current bspec
+ * specification to user spec abstraction. The current mapping is
+ * approximately 1-to-1, with access type being the only notable
+ * exception as it carries additional data with respect to prefetch
+ * status that needs to be masked out.
+ */
+static u8 xe_to_user_access_type(u8 access_type)
+{
+ return access_type & XE_PAGEFAULT_ACCESS_TYPE_MASK;
+}
+
+static u8 xe_to_user_fault_type(u8 fault_type)
+{
+ return fault_type;
+}
+
+static u8 xe_to_user_fault_level(u8 fault_level)
+{
+ return fault_level;
+}
+
+static int fill_faults(struct xe_vm *vm,
+ struct drm_xe_vm_get_property *args)
+{
+ struct xe_vm_fault __user *usr_ptr = u64_to_user_ptr(args->data);
+ struct xe_vm_fault *fault_list, fault_entry = { 0 };
+ struct xe_vm_fault_entry *entry;
+ int ret = 0, i = 0, count, entry_size;
+
+ entry_size = sizeof(struct xe_vm_fault);
+ count = args->size / entry_size;
+
+ fault_list = kcalloc(count, sizeof(struct xe_vm_fault), GFP_KERNEL);
+ if (!fault_list)
+ return -ENOMEM;
+
+ spin_lock(&vm->faults.lock);
+ list_for_each_entry(entry, &vm->faults.list, list) {
+ if (i == count)
+ break;
+
+ fault_entry.address = xe_device_canonicalize_addr(vm->xe, entry->address);
+ fault_entry.address_precision = entry->address_precision;
+
+ fault_entry.access_type = xe_to_user_access_type(entry->access_type);
+ fault_entry.fault_type = xe_to_user_fault_type(entry->fault_type);
+ fault_entry.fault_level = xe_to_user_fault_level(entry->fault_level);
+
+ memcpy(&fault_list[i], &fault_entry, entry_size);
+
+ i++;
+ }
+ spin_unlock(&vm->faults.lock);
+
+ ret = copy_to_user(usr_ptr, fault_list, args->size);
+
+ kfree(fault_list);
+ return ret ? -EFAULT : 0;
+}
+
+static int xe_vm_get_property_helper(struct xe_vm *vm,
+ struct drm_xe_vm_get_property *args)
+{
+ size_t size;
+
+ switch (args->property) {
+ case DRM_XE_VM_GET_PROPERTY_FAULTS:
+ spin_lock(&vm->faults.lock);
+ size = size_mul(sizeof(struct xe_vm_fault), vm->faults.len);
+ spin_unlock(&vm->faults.lock);
+
+ if (!args->size) {
+ args->size = size;
+ return 0;
+ }
+
+ /*
+ * Number of faults may increase between calls to
+ * xe_vm_get_property_ioctl, so just report the number of
+ * faults the user requests if it's less than or equal to
+ * the number of faults in the VM fault array.
+ *
+ * We should also at least assert that the args->size value
+ * is a multiple of the xe_vm_fault struct size.
+ */
+ if (args->size > size || args->size % sizeof(struct xe_vm_fault))
+ return -EINVAL;
+
+ return fill_faults(vm, args);
+ }
+ return -EINVAL;
+}
+
+int xe_vm_get_property_ioctl(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_vm_get_property *args = data;
+ struct xe_vm *vm;
+ int ret = 0;
+
+ if (XE_IOCTL_DBG(xe, (args->reserved[0] || args->reserved[1] ||
+ args->reserved[2])))
+ return -EINVAL;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -ENOENT;
+
+ ret = xe_vm_get_property_helper(vm, args);
+
+ xe_vm_put(vm);
+ return ret;
+}
+
/**
* xe_vm_bind_kernel_bo - bind a kernel BO to a VM
* @vm: VM to bind the BO to