/* DMA, allocation, and size information */
bool we_own_ring;
- /* Device used for doing DMA */
- struct device *dma_dev;
+ union virtio_map map;
#ifdef DEBUG
/* They're supposed to lock for us. */
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct device *dma_dev)
+ union virtio_map map)
{
if (vring_use_map_api(vdev)) {
- return dma_alloc_coherent(dma_dev, size,
+ return dma_alloc_coherent(map.dma_dev, size,
dma_handle, flag);
} else {
void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
static void vring_free_queue(struct virtio_device *vdev, size_t size,
void *queue, dma_addr_t dma_handle,
- struct device *dma_dev)
+ union virtio_map map)
{
if (vring_use_map_api(vdev))
- dma_free_coherent(dma_dev, size, queue, dma_handle);
+ dma_free_coherent(map.dma_dev, size, queue, dma_handle);
else
free_pages_exact(queue, PAGE_ALIGN(size));
}
*/
static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
- return vq->dma_dev;
+ return vq->map.dma_dev;
}
/* Map one sg entry. */
}
static void vring_free_split(struct vring_virtqueue_split *vring_split,
- struct virtio_device *vdev, struct device *dma_dev)
+ struct virtio_device *vdev,
+ union virtio_map map)
{
vring_free_queue(vdev, vring_split->queue_size_in_bytes,
vring_split->vring.desc,
vring_split->queue_dma_addr,
- dma_dev);
+ map);
kfree(vring_split->desc_state);
kfree(vring_split->desc_extra);
u32 num,
unsigned int vring_align,
bool may_reduce_num,
- struct device *dma_dev)
+ union virtio_map map)
{
void *queue = NULL;
dma_addr_t dma_addr;
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
- dma_dev);
+ map);
if (queue)
break;
if (!may_reduce_num)
/* Try to get a single page. You are my only hope! */
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr, GFP_KERNEL | __GFP_ZERO,
- dma_dev);
+ map);
}
if (!queue)
return -ENOMEM;
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
struct vring_virtqueue *vq;
int err;
#else
vq->broken = false;
#endif
- vq->dma_dev = dma_dev;
+ vq->map = map;
vq->use_map_api = vring_use_map_api(vdev);
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
struct vring_virtqueue_split vring_split = {};
struct virtqueue *vq;
int err;
err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
- may_reduce_num, dma_dev);
+ may_reduce_num, map);
if (err)
return NULL;
vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
- context, notify, callback, name, dma_dev);
+ context, notify, callback, name, map);
if (!vq) {
- vring_free_split(&vring_split, vdev, dma_dev);
+ vring_free_split(&vring_split, vdev, map);
return NULL;
}
err = vring_alloc_queue_split(&vring_split, vdev, num,
vq->split.vring_align,
vq->split.may_reduce_num,
- vring_dma_dev(vq));
+ vq->map);
if (err)
goto err;
return 0;
err_state_extra:
- vring_free_split(&vring_split, vdev, vring_dma_dev(vq));
+ vring_free_split(&vring_split, vdev, vq->map);
err:
virtqueue_reinit_split(vq);
return -ENOMEM;
static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
struct virtio_device *vdev,
- struct device *dma_dev)
+ union virtio_map map)
{
if (vring_packed->vring.desc)
vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
vring_packed->vring.desc,
vring_packed->ring_dma_addr,
- dma_dev);
+ map);
if (vring_packed->vring.driver)
vring_free_queue(vdev, vring_packed->event_size_in_bytes,
vring_packed->vring.driver,
vring_packed->driver_event_dma_addr,
- dma_dev);
+ map);
if (vring_packed->vring.device)
vring_free_queue(vdev, vring_packed->event_size_in_bytes,
vring_packed->vring.device,
vring_packed->device_event_dma_addr,
- dma_dev);
+ map);
kfree(vring_packed->desc_state);
kfree(vring_packed->desc_extra);
static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
struct virtio_device *vdev,
- u32 num, struct device *dma_dev)
+ u32 num, union virtio_map map)
{
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
ring = vring_alloc_queue(vdev, ring_size_in_bytes,
&ring_dma_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
- dma_dev);
+ map);
if (!ring)
goto err;
driver = vring_alloc_queue(vdev, event_size_in_bytes,
&driver_event_dma_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
- dma_dev);
+ map);
if (!driver)
goto err;
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
- dma_dev);
+ map);
if (!device)
goto err;
return 0;
err:
- vring_free_packed(vring_packed, vdev, dma_dev);
+ vring_free_packed(vring_packed, vdev, map);
return -ENOMEM;
}
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
struct vring_virtqueue *vq;
int err;
vq->broken = false;
#endif
vq->packed_ring = true;
- vq->dma_dev = dma_dev;
+ vq->map = map;
vq->use_map_api = vring_use_map_api(vdev);
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
struct vring_virtqueue_packed vring_packed = {};
struct virtqueue *vq;
- if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num, map))
return NULL;
vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers,
- context, notify, callback, name, dma_dev);
+ context, notify, callback, name, map);
if (!vq) {
- vring_free_packed(&vring_packed, vdev, dma_dev);
+ vring_free_packed(&vring_packed, vdev, map);
return NULL;
}
struct virtio_device *vdev = _vq->vdev;
int err;
- if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq)))
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num, vq->map))
goto err_ring;
err = vring_alloc_state_extra_packed(&vring_packed);
return 0;
err_state_extra:
- vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq));
+ vring_free_packed(&vring_packed, vdev, vq->map);
err_ring:
virtqueue_reinit_packed(vq);
return -ENOMEM;
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->use_map_api)
- return vring_dma_dev(vq);
+ return vq->map.dma_dev;
else
return NULL;
}
void (*callback)(struct virtqueue *),
const char *name)
{
+ union virtio_map map = {.dma_dev = vdev->dev.parent};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return vring_create_virtqueue_packed(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, vdev->dev.parent);
+ context, notify, callback, name, map);
return vring_create_virtqueue_split(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, vdev->dev.parent);
+ context, notify, callback, name, map);
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
-struct virtqueue *vring_create_virtqueue_dma(
+struct virtqueue *vring_create_virtqueue_map(
unsigned int index,
unsigned int num,
unsigned int vring_align,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return vring_create_virtqueue_packed(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, dma_dev);
+ context, notify, callback, name, map);
return vring_create_virtqueue_split(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, dma_dev);
+ context, notify, callback, name, map);
}
-EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
+EXPORT_SYMBOL_GPL(vring_create_virtqueue_map);
/**
* virtqueue_resize - resize the vring of vq
const char *name)
{
struct vring_virtqueue_split vring_split = {};
+ union virtio_map map = {.dma_dev = vdev->dev.parent};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
struct vring_virtqueue_packed vring_packed = {};
return __vring_new_virtqueue_packed(index, &vring_packed,
vdev, weak_barriers,
context, notify, callback,
- name, vdev->dev.parent);
+ name, map);
}
vring_init(&vring_split.vring, num, pages, vring_align);
return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
context, notify, callback, name,
- vdev->dev.parent);
+ map);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
vq->packed.ring_size_in_bytes,
vq->packed.vring.desc,
vq->packed.ring_dma_addr,
- vring_dma_dev(vq));
+ vq->map);
vring_free_queue(vq->vq.vdev,
vq->packed.event_size_in_bytes,
vq->packed.vring.driver,
vq->packed.driver_event_dma_addr,
- vring_dma_dev(vq));
+ vq->map);
vring_free_queue(vq->vq.vdev,
vq->packed.event_size_in_bytes,
vq->packed.vring.device,
vq->packed.device_event_dma_addr,
- vring_dma_dev(vq));
+ vq->map);
kfree(vq->packed.desc_state);
kfree(vq->packed.desc_extra);
vq->split.queue_size_in_bytes,
vq->split.vring.desc,
vq->split.queue_dma_addr,
- vring_dma_dev(vq));
+ vq->map);
}
}
if (!vq->packed_ring) {