]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: hv_netvsc: fix loss of early receive events from host during channel open.
authorDipayaan Roy <dipayanroy@linux.microsoft.com>
Mon, 25 Aug 2025 11:56:27 +0000 (04:56 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Sep 2025 13:31:51 +0000 (15:31 +0200)
[ Upstream commit 9448ccd853368582efa9db05db344f8bb9dffe0f ]

The hv_netvsc driver currently enables NAPI after opening the primary and
subchannels. This ordering creates a race: if the Hyper-V host places data
in the host -> guest ring buffer and signals the channel before
napi_enable() has been called, the channel callback will run but
napi_schedule_prep() will return false. As a result, the NAPI poller never
gets scheduled, the data in the ring buffer is not consumed, and the
receive queue may remain permanently stuck until another interrupt happens
to arrive.

Fix this by enabling NAPI and registering it with the RX/TX queues before
vmbus channel is opened. This guarantees that any early host signal after
open will correctly trigger NAPI scheduling and the ring buffer will be
drained.

Fixes: 76bb5db5c749d ("netvsc: fix use after free on module removal")
Signed-off-by: Dipayaan Roy <dipayanroy@linux.microsoft.com>
Link: https://patch.msgid.link/20250825115627.GA32189@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/rndis_filter.c

index 87ac2a5f180913bf17912d060e10c90e9103d0c7..5f14799b68c5321171b3ce26821ea18cd5c23ae5 100644 (file)
@@ -1811,6 +1811,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 
        /* Enable NAPI handler before init callbacks */
        netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
+       napi_enable(&net_device->chan_table[0].napi);
+       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
+                            &net_device->chan_table[0].napi);
+       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
+                            &net_device->chan_table[0].napi);
 
        /* Open the channel */
        device->channel->next_request_id_callback = vmbus_next_request_id;
@@ -1830,12 +1835,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
        /* Channel is opened */
        netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
 
-       napi_enable(&net_device->chan_table[0].napi);
-       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
-                            &net_device->chan_table[0].napi);
-       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
-                            &net_device->chan_table[0].napi);
-
        /* Connect with the NetVsp */
        ret = netvsc_connect_vsp(device, net_device, device_info);
        if (ret != 0) {
@@ -1853,14 +1852,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 
 close:
        RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
-       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
-       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
-       napi_disable(&net_device->chan_table[0].napi);
 
        /* Now, we can close the channel safely */
        vmbus_close(device->channel);
 
 cleanup:
+       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
+       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+       napi_disable(&net_device->chan_table[0].napi);
        netif_napi_del(&net_device->chan_table[0].napi);
 
 cleanup2:
index 9b8769a8b77a12c4af660661bf3d04247f3709a6..9a92552ee35c2891c112a3612de593be50b29d9b 100644 (file)
@@ -1252,17 +1252,26 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
        new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
        new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
 
+       /* Enable napi before opening the vmbus channel to avoid races
+        * as the host placing data on the host->guest ring may be left
+        * out if napi was not enabled.
+        */
+       napi_enable(&nvchan->napi);
+       netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+                            &nvchan->napi);
+       netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+                            &nvchan->napi);
+
        ret = vmbus_open(new_sc, netvsc_ring_bytes,
                         netvsc_ring_bytes, NULL, 0,
                         netvsc_channel_cb, nvchan);
-       if (ret == 0) {
-               napi_enable(&nvchan->napi);
-               netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
-                                    &nvchan->napi);
-               netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
-                                    &nvchan->napi);
-       } else {
+       if (ret != 0) {
                netdev_notice(ndev, "sub channel open failed: %d\n", ret);
+               netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+                                    NULL);
+               netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+                                    NULL);
+               napi_disable(&nvchan->napi);
        }
 
        if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)