]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
virtio: add virtqueue_add_inbuf_cache_clean API
authorMichael S. Tsirkin <mst@redhat.com>
Mon, 29 Dec 2025 18:25:23 +0000 (13:25 -0500)
committerMichael S. Tsirkin <mst@redhat.com>
Fri, 2 Jan 2026 11:22:49 +0000 (06:22 -0500)
Add virtqueue_add_inbuf_cache_clean() for passing DMA_ATTR_CPU_CACHE_CLEAN
to virtqueue operations. This suppresses DMA debug cacheline overlap
warnings for buffers where proper cache management is ensured by the
caller.

Message-ID: <e50d38c974859e731e50bda7a0ee5691debf5bc4.1767601130.git.mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
drivers/virtio/virtio_ring.c
include/linux/virtio.h

index 95e320b23624dcdd24a911ed6d498dfc845c8615..4fe0f78df5eccda3b66b5aaae778b6fce84cd9c9 100644 (file)
@@ -174,7 +174,8 @@ struct virtqueue_ops {
        int (*add)(struct vring_virtqueue *vq, struct scatterlist *sgs[],
                   unsigned int total_sg, unsigned int out_sgs,
                   unsigned int in_sgs, void *data,
-                  void *ctx, bool premapped, gfp_t gfp);
+                  void *ctx, bool premapped, gfp_t gfp,
+                  unsigned long attr);
        void *(*get)(struct vring_virtqueue *vq, unsigned int *len, void **ctx);
        bool (*kick_prepare)(struct vring_virtqueue *vq);
        void (*disable_cb)(struct vring_virtqueue *vq);
@@ -444,7 +445,7 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
 /* Map one sg entry. */
 static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
                            enum dma_data_direction direction, dma_addr_t *addr,
-                           u32 *len, bool premapped)
+                           u32 *len, bool premapped, unsigned long attr)
 {
        if (premapped) {
                *addr = sg_dma_address(sg);
@@ -472,7 +473,7 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
         */
        *addr = virtqueue_map_page_attrs(&vq->vq, sg_page(sg),
                                         sg->offset, sg->length,
-                                        direction, 0);
+                                        direction, attr);
 
        if (vring_mapping_error(vq, *addr))
                return -ENOMEM;
@@ -603,7 +604,8 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
                                      void *data,
                                      void *ctx,
                                      bool premapped,
-                                     gfp_t gfp)
+                                     gfp_t gfp,
+                                     unsigned long attr)
 {
        struct vring_desc_extra *extra;
        struct scatterlist *sg;
@@ -675,7 +677,8 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
                        if (++sg_count != total_sg)
                                flags |= VRING_DESC_F_NEXT;
 
-                       if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped))
+                       if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len,
+                                            premapped, attr))
                                goto unmap_release;
 
                        /* Note that we trust indirect descriptor
@@ -694,7 +697,8 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
                        if (++sg_count != total_sg)
                                flags |= VRING_DESC_F_NEXT;
 
-                       if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped))
+                       if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len,
+                                            premapped, attr))
                                goto unmap_release;
 
                        /* Note that we trust indirect descriptor
@@ -1487,7 +1491,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
                                         void *data,
                                         bool premapped,
                                         gfp_t gfp,
-                                        u16 id)
+                                        u16 id,
+                                        unsigned long attr)
 {
        struct vring_desc_extra *extra;
        struct vring_packed_desc *desc;
@@ -1516,7 +1521,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
                        if (vring_map_one_sg(vq, sg, n < out_sgs ?
                                             DMA_TO_DEVICE : DMA_FROM_DEVICE,
-                                            &addr, &len, premapped))
+                                            &addr, &len, premapped, attr))
                                goto unmap_release;
 
                        desc[i].flags = cpu_to_le16(n < out_sgs ?
@@ -1615,7 +1620,8 @@ static inline int virtqueue_add_packed(struct vring_virtqueue *vq,
                                       void *data,
                                       void *ctx,
                                       bool premapped,
-                                      gfp_t gfp)
+                                      gfp_t gfp,
+                                      unsigned long attr)
 {
        struct vring_packed_desc *desc;
        struct scatterlist *sg;
@@ -1642,8 +1648,8 @@ static inline int virtqueue_add_packed(struct vring_virtqueue *vq,
                id = vq->free_head;
                BUG_ON(id == vq->packed.vring.num);
                err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
-                                                   in_sgs, data, premapped,
-                                                   gfp, id);
+                                                   in_sgs, data, premapped, gfp,
+                                                   id, attr);
                if (err != -ENOMEM) {
                        END_USE(vq);
                        return err;
@@ -1679,7 +1685,7 @@ static inline int virtqueue_add_packed(struct vring_virtqueue *vq,
 
                        if (vring_map_one_sg(vq, sg, n < out_sgs ?
                                             DMA_TO_DEVICE : DMA_FROM_DEVICE,
-                                            &addr, &len, premapped))
+                                            &addr, &len, premapped, attr))
                                goto unmap_release;
 
                        flags = cpu_to_le16(vq->packed.avail_used_flags |
@@ -1772,7 +1778,8 @@ static inline int virtqueue_add_packed_in_order(struct vring_virtqueue *vq,
                                                void *data,
                                                void *ctx,
                                                bool premapped,
-                                               gfp_t gfp)
+                                               gfp_t gfp,
+                                               unsigned long attr)
 {
        struct vring_packed_desc *desc;
        struct scatterlist *sg;
@@ -1799,7 +1806,8 @@ static inline int virtqueue_add_packed_in_order(struct vring_virtqueue *vq,
        if (virtqueue_use_indirect(vq, total_sg)) {
                err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
                                                    in_sgs, data, premapped, gfp,
-                                                   vq->packed.next_avail_idx);
+                                                   vq->packed.next_avail_idx,
+                                                   attr);
                if (err != -ENOMEM) {
                        END_USE(vq);
                        return err;
@@ -1838,7 +1846,7 @@ static inline int virtqueue_add_packed_in_order(struct vring_virtqueue *vq,
 
                        if (vring_map_one_sg(vq, sg, n < out_sgs ?
                                             DMA_TO_DEVICE : DMA_FROM_DEVICE,
-                                            &addr, &len, premapped))
+                                            &addr, &len, premapped, attr))
                                goto unmap_release;
 
                        flags |= cpu_to_le16(vq->packed.avail_used_flags);
@@ -2781,13 +2789,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
                                void *data,
                                void *ctx,
                                bool premapped,
-                               gfp_t gfp)
+                               gfp_t gfp,
+                               unsigned long attr)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
        return VIRTQUEUE_CALL(vq, add, sgs, total_sg,
                              out_sgs, in_sgs, data,
-                             ctx, premapped, gfp);
+                             ctx, premapped, gfp, attr);
 }
 
 /**
@@ -2825,7 +2834,7 @@ int virtqueue_add_sgs(struct virtqueue *_vq,
                        total_sg++;
        }
        return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
-                            data, NULL, false, gfp);
+                            data, NULL, false, gfp, 0);
 }
 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
 
@@ -2847,7 +2856,7 @@ int virtqueue_add_outbuf(struct virtqueue *vq,
                         void *data,
                         gfp_t gfp)
 {
-       return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp);
+       return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp, 0);
 }
 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
 
@@ -2870,7 +2879,7 @@ int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
                                   void *data,
                                   gfp_t gfp)
 {
-       return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp);
+       return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp, 0);
 }
 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped);
 
@@ -2892,10 +2901,38 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
                        void *data,
                        gfp_t gfp)
 {
-       return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp);
+       return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp, 0);
 }
 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
 
+/**
+ * virtqueue_add_inbuf_cache_clean - expose input buffers with cache clean
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg writable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Same as virtqueue_add_inbuf but passes DMA_ATTR_CPU_CACHE_CLEAN to indicate
+ * that the CPU will not dirty any cacheline overlapping this buffer while it
+ * is available, and to suppress overlapping cacheline warnings in DMA debug
+ * builds.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_inbuf_cache_clean(struct virtqueue *vq,
+                                   struct scatterlist *sg, unsigned int num,
+                                   void *data,
+                                   gfp_t gfp)
+{
+       return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp,
+                            DMA_ATTR_CPU_CACHE_CLEAN);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_cache_clean);
+
 /**
  * virtqueue_add_inbuf_ctx - expose input buffers to other end
  * @vq: the struct virtqueue we're talking about.
@@ -2916,7 +2953,7 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
                        void *ctx,
                        gfp_t gfp)
 {
-       return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp);
+       return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp, 0);
 }
 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
 
@@ -2941,7 +2978,7 @@ int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
                                  void *ctx,
                                  gfp_t gfp)
 {
-       return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp);
+       return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp, 0);
 }
 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped);
 
index 3626eb6947282a2a3b65ca4515f2edf8087333c4..63bb05ece8c58fe50d79694d29bddea1ee36c005 100644 (file)
@@ -62,6 +62,11 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
                        void *data,
                        gfp_t gfp);
 
+int virtqueue_add_inbuf_cache_clean(struct virtqueue *vq,
+                                   struct scatterlist sg[], unsigned int num,
+                                   void *data,
+                                   gfp_t gfp);
+
 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
                            struct scatterlist sg[], unsigned int num,
                            void *data,