EXPORT_SYMBOL_GPL(virtio_max_dma_size);
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
+ dma_addr_t *map_handle, gfp_t flag,
union virtio_map map)
{
if (vring_use_map_api(vdev)) {
return dma_alloc_coherent(map.dma_dev, size,
- dma_handle, flag);
+ map_handle, flag);
} else {
void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
if (queue) {
phys_addr_t phys_addr = virt_to_phys(queue);
- *dma_handle = (dma_addr_t)phys_addr;
+ *map_handle = (dma_addr_t)phys_addr;
/*
* Sanity check: make sure we dind't truncate
* warning and abort if we end up with an
* unrepresentable address.
*/
- if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
+ if (WARN_ON_ONCE(*map_handle != phys_addr)) {
free_pages_exact(queue, PAGE_ALIGN(size));
return NULL;
}
}
static void vring_free_queue(struct virtio_device *vdev, size_t size,
- void *queue, dma_addr_t dma_handle,
+ void *queue, dma_addr_t map_handle,
union virtio_map map)
{
if (vring_use_map_api(vdev))
- dma_free_coherent(map.dma_dev, size, queue, dma_handle);
+ dma_free_coherent(map.dma_dev, size, queue, map_handle);
else
free_pages_exact(queue, PAGE_ALIGN(size));
}