vduse_domain_free_iova(iovad, dma_addr, size);
}
-void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
- size_t size, dma_addr_t *dma_addr,
- gfp_t flag)
+dma_addr_t vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
+ size_t size, void *orig)
{
struct iova_domain *iovad = &domain->consistent_iovad;
unsigned long limit = domain->iova_limit;
dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
- void *orig = alloc_pages_exact(size, flag);
- if (!iova || !orig)
- goto err;
+ if (!iova)
+ return DMA_MAPPING_ERROR;
spin_lock(&domain->iotlb_lock);
if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1,
}
spin_unlock(&domain->iotlb_lock);
- *dma_addr = iova;
+ return iova;
- return orig;
err:
- *dma_addr = DMA_MAPPING_ERROR;
- if (orig)
- free_pages_exact(orig, size);
- if (iova)
- vduse_domain_free_iova(iovad, iova, size);
+ vduse_domain_free_iova(iovad, iova, size);
- return NULL;
+ return DMA_MAPPING_ERROR;
}
void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
struct iova_domain *iovad = &domain->consistent_iovad;
struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file;
- phys_addr_t pa;
spin_lock(&domain->iotlb_lock);
map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr,
map_file = (struct vdpa_map_file *)map->opaque;
fput(map_file->file);
kfree(map_file);
- pa = map->addr;
vhost_iotlb_map_free(domain->iotlb, map);
spin_unlock(&domain->iotlb_lock);
vduse_domain_free_iova(iovad, dma_addr, size);
- free_pages_exact(phys_to_virt(pa), size);
}
static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf)
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
-void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
- size_t size, dma_addr_t *dma_addr,
- gfp_t flag);
+dma_addr_t vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
+ size_t size, void *orig);
void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
dma_addr_t dma_addr, unsigned long attrs);
{
struct vduse_dev *vdev;
struct vduse_iova_domain *domain;
- unsigned long iova;
void *addr;
*dma_addr = DMA_MAPPING_ERROR;
if (!token.group)
return NULL;
- vdev = token.group->dev;
- domain = vdev->domain;
- addr = vduse_domain_alloc_coherent(domain, size,
- (dma_addr_t *)&iova, flag);
+ addr = alloc_pages_exact(size, flag);
if (!addr)
return NULL;
- *dma_addr = (dma_addr_t)iova;
+ vdev = token.group->dev;
+ domain = vdev->domain;
+ *dma_addr = vduse_domain_alloc_coherent(domain, size, addr);
+ if (*dma_addr == DMA_MAPPING_ERROR)
+ goto err;
return addr;
+
+err:
+ free_pages_exact(addr, size);
+ return NULL;
}
static void vduse_dev_free_coherent(union virtio_map token, size_t size,
domain = vdev->domain;
vduse_domain_free_coherent(domain, size, dma_addr, attrs);
+ free_pages_exact(vaddr, size);
}
static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)