]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
hv_netvsc: Fix unwanted wakeup after tx_disable
authorHaiyang Zhang <haiyangz@microsoft.com>
Thu, 28 Mar 2019 19:40:36 +0000 (19:40 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Apr 2019 06:37:51 +0000 (08:37 +0200)
[ Upstream commit 1b704c4a1ba95574832e730f23817b651db2aa59 ]

After queue stopped, the wakeup mechanism may wake it up again
when ring buffer usage is lower than a threshold. This may cause
send path panic on NULL pointer when we stopped all tx queues in
netvsc_detach and start removing the netvsc device.

This patch fix it by adding a tx_disable flag to prevent unwanted
queue wakeup.

Fixes: 7b2ee50c0cd5 ("hv_netvsc: common detach logic")
Reported-by: Mohammed Gamal <mgamal@redhat.com>
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c

index e33a6c672a0a4357e565fa5e1f1b5d12e28d0088..0f07b5978fa1afd99348de486caaf985ce6ae1dc 100644 (file)
@@ -779,6 +779,7 @@ struct netvsc_device {
 
        wait_queue_head_t wait_drain;
        bool destroy;
+       bool tx_disable; /* if true, do not wake up queue again */
 
        /* Receive buffer allocated by us but manages by NetVSP */
        void *recv_buf;
index 806239b89990d31820d54faa98d8e6e892e53437..a3bb4d5c64f5ac555533cfceefcf8b3cf3bd366d 100644 (file)
@@ -107,6 +107,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_waitqueue_head(&net_device->wait_drain);
        net_device->destroy = false;
+       net_device->tx_disable = false;
        atomic_set(&net_device->open_cnt, 0);
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -712,7 +713,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
        } else {
                struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
 
-               if (netif_tx_queue_stopped(txq) &&
+               if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
                    (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
                     queue_sends < 1)) {
                        netif_tx_wake_queue(txq);
@@ -865,7 +866,8 @@ static inline int netvsc_send_pkt(
                        netif_tx_stop_queue(txq);
        } else if (ret == -EAGAIN) {
                netif_tx_stop_queue(txq);
-               if (atomic_read(&nvchan->queue_sends) < 1) {
+               if (atomic_read(&nvchan->queue_sends) < 1 &&
+                   !net_device->tx_disable) {
                        netif_tx_wake_queue(txq);
                        ret = -ENOSPC;
                }
index 74b9e51b2b47059f405645a74a141f3487c5a98a..eb92720dd1c4acfd3b7deb7a2855567e8c883e61 100644 (file)
@@ -108,6 +108,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
        rcu_read_unlock();
 }
 
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+                            struct net_device *ndev)
+{
+       nvscdev->tx_disable = false;
+       virt_wmb(); /* ensure queue wake up mechanism is on */
+
+       netif_tx_wake_all_queues(ndev);
+}
+
 static int netvsc_open(struct net_device *net)
 {
        struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -128,7 +137,7 @@ static int netvsc_open(struct net_device *net)
        rdev = nvdev->extension;
        if (!rdev->link_state) {
                netif_carrier_on(net);
-               netif_tx_wake_all_queues(net);
+               netvsc_tx_enable(nvdev, net);
        }
 
        if (vf_netdev) {
@@ -183,6 +192,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
        }
 }
 
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+                             struct net_device *ndev)
+{
+       if (nvscdev) {
+               nvscdev->tx_disable = true;
+               virt_wmb(); /* ensure txq will not wake up after stop */
+       }
+
+       netif_tx_disable(ndev);
+}
+
 static int netvsc_close(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -191,7 +211,7 @@ static int netvsc_close(struct net_device *net)
        struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        int ret;
 
-       netif_tx_disable(net);
+       netvsc_tx_disable(nvdev, net);
 
        /* No need to close rndis filter if it is removed already */
        if (!nvdev)
@@ -893,7 +913,7 @@ static int netvsc_detach(struct net_device *ndev,
 
        /* If device was up (receiving) then shutdown */
        if (netif_running(ndev)) {
-               netif_tx_disable(ndev);
+               netvsc_tx_disable(nvdev, ndev);
 
                ret = rndis_filter_close(nvdev);
                if (ret) {
@@ -1720,7 +1740,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (rdev->link_state) {
                        rdev->link_state = false;
                        netif_carrier_on(net);
-                       netif_tx_wake_all_queues(net);
+                       netvsc_tx_enable(net_device, net);
                } else {
                        notify = true;
                }
@@ -1730,7 +1750,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                }
                kfree(event);
                break;
@@ -1739,7 +1759,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                        event->event = RNDIS_STATUS_MEDIA_CONNECT;
                        spin_lock_irqsave(&ndev_ctx->lock, flags);
                        list_add(&event->list, &ndev_ctx->reconfig_events);