]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
virtio_ring: factor out core logic for updating last_used_idx
authorJason Wang <jasowang@redhat.com>
Tue, 30 Dec 2025 06:46:46 +0000 (14:46 +0800)
committerMichael S. Tsirkin <mst@redhat.com>
Wed, 31 Dec 2025 10:39:18 +0000 (05:39 -0500)
Factor out the core logic for updating last_used_idx to be reused by
the packed in order implementation.

Acked-by: Eugenio Pérez <eperezma@redhat.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Reviewed-by: Eugenio Pérez <eperezma@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Message-Id: <20251230064649.55597-17-jasowang@redhat.com>

drivers/virtio/virtio_ring.c

index 27f69859ccf370075bdae01c161a3c5be81daae9..3389aad6f5a87d4b16284fe6779bdae93e0f4e2f 100644 (file)
@@ -1754,6 +1754,30 @@ static bool more_used_packed(const struct vring_virtqueue *vq)
        return virtqueue_poll_packed(vq, READ_ONCE(vq->last_used_idx));
 }
 
+static void update_last_used_idx_packed(struct vring_virtqueue *vq,
+                                       u16 id, u16 last_used,
+                                       u16 used_wrap_counter)
+{
+       last_used += vq->packed.desc_state[id].num;
+       if (unlikely(last_used >= vq->packed.vring.num)) {
+               last_used -= vq->packed.vring.num;
+               used_wrap_counter ^= 1;
+       }
+
+       last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
+       WRITE_ONCE(vq->last_used_idx, last_used);
+
+       /*
+        * If we expect an interrupt for the next entry, tell host
+        * by writing event index and flush out the write before
+        * the read in the next get_buf call.
+        */
+       if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
+               virtio_store_mb(vq->weak_barriers,
+                               &vq->packed.vring.driver->off_wrap,
+                               cpu_to_le16(vq->last_used_idx));
+}
+
 static void *virtqueue_get_buf_ctx_packed(struct vring_virtqueue *vq,
                                          unsigned int *len,
                                          void **ctx)
@@ -1797,24 +1821,7 @@ static void *virtqueue_get_buf_ctx_packed(struct vring_virtqueue *vq,
        ret = vq->packed.desc_state[id].data;
        detach_buf_packed(vq, id, ctx);
 
-       last_used += vq->packed.desc_state[id].num;
-       if (unlikely(last_used >= vq->packed.vring.num)) {
-               last_used -= vq->packed.vring.num;
-               used_wrap_counter ^= 1;
-       }
-
-       last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
-       WRITE_ONCE(vq->last_used_idx, last_used);
-
-       /*
-        * If we expect an interrupt for the next entry, tell host
-        * by writing event index and flush out the write before
-        * the read in the next get_buf call.
-        */
-       if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
-               virtio_store_mb(vq->weak_barriers,
-                               &vq->packed.vring.driver->off_wrap,
-                               cpu_to_le16(vq->last_used_idx));
+       update_last_used_idx_packed(vq, id, last_used, used_wrap_counter);
 
        LAST_ADD_TIME_INVALID(vq);