MODULE_PARM_DESC(limit,
"Maximum number of grants that may be mapped by one mapping request");
-/* True in PV mode, false otherwise */
-static int use_ptemod;
-
static void unmap_grant_pages(struct gntdev_grant_map *map,
int offset, int pages);
NULL == add->pages ||
NULL == add->being_removed)
goto err;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
GFP_KERNEL);
add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
add->grants[i].ref = INVALID_GRANT_REF;
add->map_ops[i].handle = INVALID_GRANT_HANDLE;
add->unmap_ops[i].handle = INVALID_GRANT_HANDLE;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
add->kmap_ops[i].handle = INVALID_GRANT_HANDLE;
add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE;
}
if (!refcount_dec_and_test(&map->users))
return;
- if (map->pages && !use_ptemod) {
+ if (map->pages && !xen_pv_domain()) {
/*
* Increment the reference count. This ensures that the
* subsequent call to unmap_grant_pages() will not wind up
*/
}
- if (use_ptemod && map->notifier_init)
+ if (xen_pv_domain() && map->notifier_init)
mmu_interval_notifier_remove(&map->notifier);
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
size_t alloced = 0;
int i, err = 0;
- if (!use_ptemod) {
+ if (!xen_pv_domain()) {
/* Note: it could already be mapped */
if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
return 0;
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
if (map->kmap_ops[i].status == GNTST_okay) {
alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
if (map->kunmap_ops[offset + i].status == GNTST_okay &&
map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
successful_unmaps++;
}
map->unmap_data.unmap_ops = map->unmap_ops + offset;
- map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
+ map->unmap_data.kunmap_ops = xen_pv_domain() ? map->kunmap_ops + offset : NULL;
map->unmap_data.pages = map->pages + offset;
map->unmap_data.count = pages;
map->unmap_data.done = __unmap_grant_pages_done;
#ifdef CONFIG_XEN_GNTDEV_DMABUF
case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
- return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
+ return gntdev_ioctl_dmabuf_exp_from_refs(priv, ptr);
case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
- if (use_ptemod)
+ if (xen_pv_domain())
vm_flags_set(vma, VM_DONTCOPY);
vma->vm_private_data = map;
map->pages_vm_start = vma->vm_start;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
err = mmu_interval_notifier_insert_locked(
&map->notifier, vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
}
mutex_unlock(&priv->lock);
- if (use_ptemod) {
+ if (xen_pv_domain()) {
/*
* gntdev takes the address of the PTE in find_grant_ptes() and
* passes it to the hypervisor in gntdev_map_grant_pages(). The
if (err)
goto out_put_map;
- if (!use_ptemod) {
+ if (!xen_pv_domain()) {
err = vm_map_pages_zero(vma, map->pages, map->count);
if (err)
goto out_put_map;
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod)
+ if (xen_pv_domain())
unmap_grant_pages(map, 0, map->count);
gntdev_put_map(priv, map);
return err;
if (!xen_domain())
return -ENODEV;
- use_ptemod = xen_pv_domain();
-
err = misc_register(&gntdev_miscdev);
if (err != 0) {
pr_err("Could not register gntdev device\n");