From 03f05c4eeb7bc5019deb25f7415a7af8dc3fdd3f Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 30 Dec 2025 14:46:44 +0800 Subject: [PATCH] virtio_ring: determine descriptor flags at one time MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Let's determine the last descriptor by counting the number of sg. This would be consistent with packed virtqueue implementation and ease the future in-order implementation. Acked-by: Eugenio Pérez Reviewed-by: Xuan Zhuo Signed-off-by: Jason Wang Signed-off-by: Michael S. Tsirkin Message-Id: <20251230064649.55597-15-jasowang@redhat.com> --- drivers/virtio/virtio_ring.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index d0904ac0aa933..e55b26a030371 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -574,7 +574,7 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq, struct vring_desc_extra *extra; struct scatterlist *sg; struct vring_desc *desc; - unsigned int i, n, avail, descs_used, prev, err_idx; + unsigned int i, n, avail, descs_used, err_idx, sg_count = 0; int head; bool indirect; @@ -634,42 +634,40 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq, for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; u32 len; + u16 flags = 0; + + if (++sg_count != total_sg) + flags |= VRING_DESC_F_NEXT; if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped)) goto unmap_release; - prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping. */ - i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len, - VRING_DESC_F_NEXT, - premapped); + i = virtqueue_add_desc_split(vq, desc, extra, i, addr, + len, flags, premapped); } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; u32 len; + u16 flags = VRING_DESC_F_WRITE; + + if (++sg_count != total_sg) + flags |= VRING_DESC_F_NEXT; if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped)) goto unmap_release; - prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping. */ - i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len, - VRING_DESC_F_NEXT | - VRING_DESC_F_WRITE, - premapped); + i = virtqueue_add_desc_split(vq, desc, extra, i, addr, + len, flags, premapped); } } - /* Last one doesn't continue. */ - desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT); - if (!indirect && vring_need_unmap_buffer(vq, &extra[prev])) - vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= - ~VRING_DESC_F_NEXT; if (indirect) { /* Now that the indirect table is filled in, map it. */ -- 2.47.3