]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
virtio_net: move tx vq operation under tx queue lock
authorMichael S. Tsirkin <mst@redhat.com>
Tue, 13 Apr 2021 05:35:26 +0000 (01:35 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 20 Jul 2021 14:02:21 +0000 (16:02 +0200)
[ Upstream commit 5a2f966d0f3fa0ef6dada7ab9eda74cacee96b8a ]

It's unsafe to operate a vq from multiple threads.
Unfortunately this is exactly what we do when invoking
clean tx poll from rx napi.
Same happens with napi-tx even without the
opportunistic cleaning from the receive interrupt: that races
with processing the vq in start_xmit.

As a fix move everything that deals with the vq to under tx lock.

Fixes: b92f1e6751a6 ("virtio-net: transmit napi")
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/virtio_net.c

index db9a876035ecbea81a3535c6e04188ddc97eef17..beb0860230932a63b08df38b5a84f3f82a6483d0 100644 (file)
@@ -1514,6 +1514,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
        struct virtnet_info *vi = sq->vq->vdev->priv;
        unsigned int index = vq2txq(sq->vq);
        struct netdev_queue *txq;
+       int opaque;
+       bool done;
 
        if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
                /* We don't need to enable cb for XDP */
@@ -1523,10 +1525,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
 
        txq = netdev_get_tx_queue(vi->dev, index);
        __netif_tx_lock(txq, raw_smp_processor_id());
+       virtqueue_disable_cb(sq->vq);
        free_old_xmit_skbs(sq, true);
+
+       opaque = virtqueue_enable_cb_prepare(sq->vq);
+
+       done = napi_complete_done(napi, 0);
+
+       if (!done)
+               virtqueue_disable_cb(sq->vq);
+
        __netif_tx_unlock(txq);
 
-       virtqueue_napi_complete(napi, sq->vq, 0);
+       if (done) {
+               if (unlikely(virtqueue_poll(sq->vq, opaque))) {
+                       if (napi_schedule_prep(napi)) {
+                               __netif_tx_lock(txq, raw_smp_processor_id());
+                               virtqueue_disable_cb(sq->vq);
+                               __netif_tx_unlock(txq);
+                               __napi_schedule(napi);
+                       }
+               }
+       }
 
        if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
                netif_tx_wake_queue(txq);