#define LAST_ADD_TIME_INVALID(vq)
#endif
+enum vq_layout {
+ VQ_LAYOUT_SPLIT = 0,
+ VQ_LAYOUT_PACKED,
+};
+
struct vring_desc_state_split {
void *data; /* Data for callback. */
size_t event_size_in_bytes;
};
+struct vring_virtqueue;
+
+struct virtqueue_ops {
+ int (*add)(struct vring_virtqueue *vq, struct scatterlist *sgs[],
+ unsigned int total_sg, unsigned int out_sgs,
+ unsigned int in_sgs, void *data,
+ void *ctx, bool premapped, gfp_t gfp);
+ void *(*get)(struct vring_virtqueue *vq, unsigned int *len, void **ctx);
+ bool (*kick_prepare)(struct vring_virtqueue *vq);
+ void (*disable_cb)(struct vring_virtqueue *vq);
+ bool (*enable_cb_delayed)(struct vring_virtqueue *vq);
+ unsigned int (*enable_cb_prepare)(struct vring_virtqueue *vq);
+ bool (*poll)(const struct vring_virtqueue *vq,
+ unsigned int last_used_idx);
+ void *(*detach_unused_buf)(struct vring_virtqueue *vq);
+ bool (*more_used)(const struct vring_virtqueue *vq);
+ int (*resize)(struct vring_virtqueue *vq, u32 num);
+ void (*reset)(struct vring_virtqueue *vq);
+};
+
struct vring_virtqueue {
struct virtqueue vq;
- /* Is this a packed ring? */
- bool packed_ring;
-
/* Is DMA API used? */
bool use_map_api;
/* Host publishes avail event idx */
bool event;
+ enum vq_layout layout;
+
/* Head of free buffer list. */
unsigned int free_head;
/* Number we've added since last sync. */
#define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
+
+static inline bool virtqueue_is_packed(const struct vring_virtqueue *vq)
+{
+ return vq->layout == VQ_LAYOUT_PACKED;
+}
+
static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
unsigned int total_sg)
{
{
vq->vq.num_free = num;
- if (vq->packed_ring)
+ if (virtqueue_is_packed(vq))
vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
else
vq->last_used_idx = 0;
return 0;
}
+static const struct virtqueue_ops split_ops;
+
static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
struct vring_virtqueue_split *vring_split,
struct virtio_device *vdev,
if (!vq)
return NULL;
- vq->packed_ring = false;
+ vq->layout = VQ_LAYOUT_SPLIT;
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
/* we need to reset the desc.flags. For more, see is_used_desc_packed() */
memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
-
virtqueue_init(vq, vq->packed.vring.num);
virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
}
+static const struct virtqueue_ops packed_ops;
+
static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
struct vring_virtqueue_packed *vring_packed,
struct virtio_device *vdev,
#else
vq->broken = false;
#endif
- vq->packed_ring = true;
+ vq->layout = VQ_LAYOUT_PACKED;
vq->map = map;
vq->use_map_api = vring_use_map_api(vdev);
return -ENOMEM;
}
+static const struct virtqueue_ops split_ops = {
+ .add = virtqueue_add_split,
+ .get = virtqueue_get_buf_ctx_split,
+ .kick_prepare = virtqueue_kick_prepare_split,
+ .disable_cb = virtqueue_disable_cb_split,
+ .enable_cb_delayed = virtqueue_enable_cb_delayed_split,
+ .enable_cb_prepare = virtqueue_enable_cb_prepare_split,
+ .poll = virtqueue_poll_split,
+ .detach_unused_buf = virtqueue_detach_unused_buf_split,
+ .more_used = more_used_split,
+ .resize = virtqueue_resize_split,
+ .reset = virtqueue_reset_split,
+};
+
+static const struct virtqueue_ops packed_ops = {
+ .add = virtqueue_add_packed,
+ .get = virtqueue_get_buf_ctx_packed,
+ .kick_prepare = virtqueue_kick_prepare_packed,
+ .disable_cb = virtqueue_disable_cb_packed,
+ .enable_cb_delayed = virtqueue_enable_cb_delayed_packed,
+ .enable_cb_prepare = virtqueue_enable_cb_prepare_packed,
+ .poll = virtqueue_poll_packed,
+ .detach_unused_buf = virtqueue_detach_unused_buf_packed,
+ .more_used = more_used_packed,
+ .resize = virtqueue_resize_packed,
+ .reset = virtqueue_reset_packed,
+};
+
static int virtqueue_disable_and_recycle(struct virtqueue *_vq,
void (*recycle)(struct virtqueue *vq, void *buf))
{
* Generic functions and exported symbols.
*/
+#define VIRTQUEUE_CALL(vq, op, ...) \
+ ({ \
+ typeof(vq) __VIRTQUEUE_CALL_vq = (vq); \
+ typeof(split_ops.op(__VIRTQUEUE_CALL_vq, ##__VA_ARGS__)) ret; \
+ \
+ switch (__VIRTQUEUE_CALL_vq->layout) { \
+ case VQ_LAYOUT_SPLIT: \
+ ret = split_ops.op(__VIRTQUEUE_CALL_vq, ##__VA_ARGS__); \
+ break; \
+ case VQ_LAYOUT_PACKED: \
+ ret = packed_ops.op(__VIRTQUEUE_CALL_vq, ##__VA_ARGS__);\
+ break; \
+ default: \
+ BUG(); \
+ break; \
+ } \
+ ret; \
+})
+
+#define VOID_VIRTQUEUE_CALL(vq, op, ...) \
+ ({ \
+ typeof(vq) __VIRTQUEUE_CALL_vq = (vq); \
+ \
+ switch (__VIRTQUEUE_CALL_vq->layout) { \
+ case VQ_LAYOUT_SPLIT: \
+ split_ops.op(__VIRTQUEUE_CALL_vq, ##__VA_ARGS__); \
+ break; \
+ case VQ_LAYOUT_PACKED: \
+ packed_ops.op(__VIRTQUEUE_CALL_vq, ##__VA_ARGS__); \
+ break; \
+ default: \
+ BUG(); \
+ break; \
+ } \
+})
+
static inline int virtqueue_add(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->packed_ring ? virtqueue_add_packed(vq, sgs, total_sg,
- out_sgs, in_sgs, data, ctx, premapped, gfp) :
- virtqueue_add_split(vq, sgs, total_sg,
- out_sgs, in_sgs, data, ctx, premapped, gfp);
+ return VIRTQUEUE_CALL(vq, add, sgs, total_sg,
+ out_sgs, in_sgs, data,
+ ctx, premapped, gfp);
}
/**
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->packed_ring ? virtqueue_kick_prepare_packed(vq) :
- virtqueue_kick_prepare_split(vq);
+ return VIRTQUEUE_CALL(vq, kick_prepare);
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->packed_ring ? virtqueue_get_buf_ctx_packed(vq, len, ctx) :
- virtqueue_get_buf_ctx_split(vq, len, ctx);
+ return VIRTQUEUE_CALL(vq, get, len, ctx);
}
EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
{
struct vring_virtqueue *vq = to_vvq(_vq);
- if (vq->packed_ring)
- virtqueue_disable_cb_packed(vq);
- else
- virtqueue_disable_cb_split(vq);
+ VOID_VIRTQUEUE_CALL(vq, disable_cb);
}
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
if (vq->event_triggered)
vq->event_triggered = false;
- return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(vq) :
- virtqueue_enable_cb_prepare_split(vq);
+ return VIRTQUEUE_CALL(vq, enable_cb_prepare);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
return false;
virtio_mb(vq->weak_barriers);
- return vq->packed_ring ? virtqueue_poll_packed(vq, last_used_idx) :
- virtqueue_poll_split(vq, last_used_idx);
+
+ return VIRTQUEUE_CALL(vq, poll, last_used_idx);
}
EXPORT_SYMBOL_GPL(virtqueue_poll);
if (vq->event_triggered)
data_race(vq->event_triggered = false);
- return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(vq) :
- virtqueue_enable_cb_delayed_split(vq);
+ return VIRTQUEUE_CALL(vq, enable_cb_delayed);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->packed_ring ? virtqueue_detach_unused_buf_packed(vq) :
- virtqueue_detach_unused_buf_split(vq);
+ return VIRTQUEUE_CALL(vq, detach_unused_buf);
}
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
static inline bool more_used(const struct vring_virtqueue *vq)
{
- return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
+ return VIRTQUEUE_CALL(vq, more_used);
}
/**
if (!num)
return -EINVAL;
- if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
+ if (virtqueue_get_vring_size(_vq) == num)
return 0;
err = virtqueue_disable_and_recycle(_vq, recycle);
if (recycle_done)
recycle_done(_vq);
- if (vq->packed_ring)
- err = virtqueue_resize_packed(vq, num);
- else
- err = virtqueue_resize_split(vq, num);
+ err = VIRTQUEUE_CALL(vq, resize, num);
err_reset = virtqueue_enable_after_reset(_vq);
if (err_reset)
if (recycle_done)
recycle_done(_vq);
- if (vq->packed_ring)
- virtqueue_reset_packed(vq);
- else
- virtqueue_reset_split(vq);
+ VOID_VIRTQUEUE_CALL(vq, reset);
return virtqueue_enable_after_reset(_vq);
}
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->we_own_ring) {
- if (vq->packed_ring) {
+ if (virtqueue_is_packed(vq)) {
vring_free_queue(vq->vq.vdev,
vq->packed.ring_size_in_bytes,
vq->packed.vring.desc,
vq->map);
}
}
- if (!vq->packed_ring) {
+ if (!virtqueue_is_packed(vq)) {
kfree(vq->split.desc_state);
kfree(vq->split.desc_extra);
}
struct vring_virtqueue *vq = to_vvq(_vq);
u16 next;
- if (vq->packed_ring)
+ if (virtqueue_is_packed(vq))
next = (vq->packed.next_avail_idx &
~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) |
vq->packed.avail_wrap_counter <<
const struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
+ return virtqueue_is_packed(vq) ? vq->packed.vring.num :
+ vq->split.vring.num;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
BUG_ON(!vq->we_own_ring);
- if (vq->packed_ring)
+ if (virtqueue_is_packed(vq))
return vq->packed.ring_dma_addr;
return vq->split.queue_dma_addr;
BUG_ON(!vq->we_own_ring);
- if (vq->packed_ring)
+ if (virtqueue_is_packed(vq))
return vq->packed.driver_event_dma_addr;
return vq->split.queue_dma_addr +
BUG_ON(!vq->we_own_ring);
- if (vq->packed_ring)
+ if (virtqueue_is_packed(vq))
return vq->packed.device_event_dma_addr;
return vq->split.queue_dma_addr +