]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.14.112/hv_netvsc-fix-unwanted-wakeup-after-tx_disable.patch
Linux 4.14.112
[thirdparty/kernel/stable-queue.git] / releases / 4.14.112 / hv_netvsc-fix-unwanted-wakeup-after-tx_disable.patch
1 From foo@baz Mon Apr 15 07:47:06 CEST 2019
2 From: Haiyang Zhang <haiyangz@microsoft.com>
3 Date: Thu, 28 Mar 2019 19:40:36 +0000
4 Subject: hv_netvsc: Fix unwanted wakeup after tx_disable
5
6 From: Haiyang Zhang <haiyangz@microsoft.com>
7
8 [ Upstream commit 1b704c4a1ba95574832e730f23817b651db2aa59 ]
9
10 After queue stopped, the wakeup mechanism may wake it up again
11 when ring buffer usage is lower than a threshold. This may cause
12 send path panic on NULL pointer when we stopped all tx queues in
13 netvsc_detach and start removing the netvsc device.
14
15 This patch fix it by adding a tx_disable flag to prevent unwanted
16 queue wakeup.
17
18 Fixes: 7b2ee50c0cd5 ("hv_netvsc: common detach logic")
19 Reported-by: Mohammed Gamal <mgamal@redhat.com>
20 Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
21 Signed-off-by: David S. Miller <davem@davemloft.net>
22 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
23 ---
24 drivers/net/hyperv/hyperv_net.h | 1 +
25 drivers/net/hyperv/netvsc.c | 6 ++++--
26 drivers/net/hyperv/netvsc_drv.c | 32 ++++++++++++++++++++++++++------
27 3 files changed, 31 insertions(+), 8 deletions(-)
28
29 --- a/drivers/net/hyperv/hyperv_net.h
30 +++ b/drivers/net/hyperv/hyperv_net.h
31 @@ -779,6 +779,7 @@ struct netvsc_device {
32
33 wait_queue_head_t wait_drain;
34 bool destroy;
35 + bool tx_disable; /* if true, do not wake up queue again */
36
37 /* Receive buffer allocated by us but manages by NetVSP */
38 void *recv_buf;
39 --- a/drivers/net/hyperv/netvsc.c
40 +++ b/drivers/net/hyperv/netvsc.c
41 @@ -107,6 +107,7 @@ static struct netvsc_device *alloc_net_d
42
43 init_waitqueue_head(&net_device->wait_drain);
44 net_device->destroy = false;
45 + net_device->tx_disable = false;
46 atomic_set(&net_device->open_cnt, 0);
47 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
48 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
49 @@ -712,7 +713,7 @@ static void netvsc_send_tx_complete(stru
50 } else {
51 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
52
53 - if (netif_tx_queue_stopped(txq) &&
54 + if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
55 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
56 queue_sends < 1)) {
57 netif_tx_wake_queue(txq);
58 @@ -865,7 +866,8 @@ static inline int netvsc_send_pkt(
59 netif_tx_stop_queue(txq);
60 } else if (ret == -EAGAIN) {
61 netif_tx_stop_queue(txq);
62 - if (atomic_read(&nvchan->queue_sends) < 1) {
63 + if (atomic_read(&nvchan->queue_sends) < 1 &&
64 + !net_device->tx_disable) {
65 netif_tx_wake_queue(txq);
66 ret = -ENOSPC;
67 }
68 --- a/drivers/net/hyperv/netvsc_drv.c
69 +++ b/drivers/net/hyperv/netvsc_drv.c
70 @@ -108,6 +108,15 @@ static void netvsc_set_rx_mode(struct ne
71 rcu_read_unlock();
72 }
73
74 +static void netvsc_tx_enable(struct netvsc_device *nvscdev,
75 + struct net_device *ndev)
76 +{
77 + nvscdev->tx_disable = false;
78 + virt_wmb(); /* ensure queue wake up mechanism is on */
79 +
80 + netif_tx_wake_all_queues(ndev);
81 +}
82 +
83 static int netvsc_open(struct net_device *net)
84 {
85 struct net_device_context *ndev_ctx = netdev_priv(net);
86 @@ -128,7 +137,7 @@ static int netvsc_open(struct net_device
87 rdev = nvdev->extension;
88 if (!rdev->link_state) {
89 netif_carrier_on(net);
90 - netif_tx_wake_all_queues(net);
91 + netvsc_tx_enable(nvdev, net);
92 }
93
94 if (vf_netdev) {
95 @@ -183,6 +192,17 @@ static int netvsc_wait_until_empty(struc
96 }
97 }
98
99 +static void netvsc_tx_disable(struct netvsc_device *nvscdev,
100 + struct net_device *ndev)
101 +{
102 + if (nvscdev) {
103 + nvscdev->tx_disable = true;
104 + virt_wmb(); /* ensure txq will not wake up after stop */
105 + }
106 +
107 + netif_tx_disable(ndev);
108 +}
109 +
110 static int netvsc_close(struct net_device *net)
111 {
112 struct net_device_context *net_device_ctx = netdev_priv(net);
113 @@ -191,7 +211,7 @@ static int netvsc_close(struct net_devic
114 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
115 int ret;
116
117 - netif_tx_disable(net);
118 + netvsc_tx_disable(nvdev, net);
119
120 /* No need to close rndis filter if it is removed already */
121 if (!nvdev)
122 @@ -893,7 +913,7 @@ static int netvsc_detach(struct net_devi
123
124 /* If device was up (receiving) then shutdown */
125 if (netif_running(ndev)) {
126 - netif_tx_disable(ndev);
127 + netvsc_tx_disable(nvdev, ndev);
128
129 ret = rndis_filter_close(nvdev);
130 if (ret) {
131 @@ -1720,7 +1740,7 @@ static void netvsc_link_change(struct wo
132 if (rdev->link_state) {
133 rdev->link_state = false;
134 netif_carrier_on(net);
135 - netif_tx_wake_all_queues(net);
136 + netvsc_tx_enable(net_device, net);
137 } else {
138 notify = true;
139 }
140 @@ -1730,7 +1750,7 @@ static void netvsc_link_change(struct wo
141 if (!rdev->link_state) {
142 rdev->link_state = true;
143 netif_carrier_off(net);
144 - netif_tx_stop_all_queues(net);
145 + netvsc_tx_disable(net_device, net);
146 }
147 kfree(event);
148 break;
149 @@ -1739,7 +1759,7 @@ static void netvsc_link_change(struct wo
150 if (!rdev->link_state) {
151 rdev->link_state = true;
152 netif_carrier_off(net);
153 - netif_tx_stop_all_queues(net);
154 + netvsc_tx_disable(net_device, net);
155 event->event = RNDIS_STATUS_MEDIA_CONNECT;
156 spin_lock_irqsave(&ndev_ctx->lock, flags);
157 list_add(&event->list, &ndev_ctx->reconfig_events);