int (*add)(struct vring_virtqueue *vq, struct scatterlist *sgs[],
unsigned int total_sg, unsigned int out_sgs,
unsigned int in_sgs, void *data,
- void *ctx, bool premapped, gfp_t gfp);
+ void *ctx, bool premapped, gfp_t gfp,
+ unsigned long attr);
void *(*get)(struct vring_virtqueue *vq, unsigned int *len, void **ctx);
bool (*kick_prepare)(struct vring_virtqueue *vq);
void (*disable_cb)(struct vring_virtqueue *vq);
/* Map one sg entry. */
static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
enum dma_data_direction direction, dma_addr_t *addr,
- u32 *len, bool premapped)
+ u32 *len, bool premapped, unsigned long attr)
{
if (premapped) {
*addr = sg_dma_address(sg);
*/
*addr = virtqueue_map_page_attrs(&vq->vq, sg_page(sg),
sg->offset, sg->length,
- direction, 0);
+ direction, attr);
if (vring_mapping_error(vq, *addr))
return -ENOMEM;
void *data,
void *ctx,
bool premapped,
- gfp_t gfp)
+ gfp_t gfp,
+ unsigned long attr)
{
struct vring_desc_extra *extra;
struct scatterlist *sg;
if (++sg_count != total_sg)
flags |= VRING_DESC_F_NEXT;
- if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped))
+ if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len,
+ premapped, attr))
goto unmap_release;
/* Note that we trust indirect descriptor
if (++sg_count != total_sg)
flags |= VRING_DESC_F_NEXT;
- if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped))
+ if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len,
+ premapped, attr))
goto unmap_release;
/* Note that we trust indirect descriptor
void *data,
bool premapped,
gfp_t gfp,
- u16 id)
+ u16 id,
+ unsigned long attr)
{
struct vring_desc_extra *extra;
struct vring_packed_desc *desc;
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
if (vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE,
- &addr, &len, premapped))
+ &addr, &len, premapped, attr))
goto unmap_release;
desc[i].flags = cpu_to_le16(n < out_sgs ?
void *data,
void *ctx,
bool premapped,
- gfp_t gfp)
+ gfp_t gfp,
+ unsigned long attr)
{
struct vring_packed_desc *desc;
struct scatterlist *sg;
id = vq->free_head;
BUG_ON(id == vq->packed.vring.num);
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
- in_sgs, data, premapped,
- gfp, id);
+ in_sgs, data, premapped, gfp,
+ id, attr);
if (err != -ENOMEM) {
END_USE(vq);
return err;
if (vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE,
- &addr, &len, premapped))
+ &addr, &len, premapped, attr))
goto unmap_release;
flags = cpu_to_le16(vq->packed.avail_used_flags |
void *data,
void *ctx,
bool premapped,
- gfp_t gfp)
+ gfp_t gfp,
+ unsigned long attr)
{
struct vring_packed_desc *desc;
struct scatterlist *sg;
if (virtqueue_use_indirect(vq, total_sg)) {
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
in_sgs, data, premapped, gfp,
- vq->packed.next_avail_idx);
+ vq->packed.next_avail_idx,
+ attr);
if (err != -ENOMEM) {
END_USE(vq);
return err;
if (vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE,
- &addr, &len, premapped))
+ &addr, &len, premapped, attr))
goto unmap_release;
flags |= cpu_to_le16(vq->packed.avail_used_flags);
void *data,
void *ctx,
bool premapped,
- gfp_t gfp)
+ gfp_t gfp,
+ unsigned long attr)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return VIRTQUEUE_CALL(vq, add, sgs, total_sg,
out_sgs, in_sgs, data,
- ctx, premapped, gfp);
+ ctx, premapped, gfp, attr);
}
/**
total_sg++;
}
return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
- data, NULL, false, gfp);
+ data, NULL, false, gfp, 0);
}
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
void *data,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp);
+ return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp, 0);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
void *data,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp);
+ return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp, 0);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped);
void *data,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp);
+ return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp, 0);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
+/**
+ * virtqueue_add_inbuf_cache_clean - expose input buffers with cache clean
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg writable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Same as virtqueue_add_inbuf but passes DMA_ATTR_CPU_CACHE_CLEAN to indicate
+ * that the CPU will not dirty any cacheline overlapping this buffer while it
+ * is available, and to suppress overlapping cacheline warnings in DMA debug
+ * builds.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_inbuf_cache_clean(struct virtqueue *vq,
+ struct scatterlist *sg, unsigned int num,
+ void *data,
+ gfp_t gfp)
+{
+ return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp,
+ DMA_ATTR_CPU_CACHE_CLEAN);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_cache_clean);
+
/**
* virtqueue_add_inbuf_ctx - expose input buffers to other end
* @vq: the struct virtqueue we're talking about.
void *ctx,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp);
+ return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp, 0);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
void *ctx,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp);
+ return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp, 0);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped);