]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.19.35/hv_netvsc-fix-unwanted-wakeup-after-tx_disable.patch
Linux 4.19.35
[thirdparty/kernel/stable-queue.git] / releases / 4.19.35 / hv_netvsc-fix-unwanted-wakeup-after-tx_disable.patch
1 From 3fb9bd32e120bbb2f3ec4931570704bf5ef616ef Mon Sep 17 00:00:00 2001
2 From: Haiyang Zhang <haiyangz@microsoft.com>
3 Date: Thu, 28 Mar 2019 19:40:36 +0000
4 Subject: hv_netvsc: Fix unwanted wakeup after tx_disable
5
6 [ Upstream commit 1b704c4a1ba95574832e730f23817b651db2aa59 ]
7
8 After queue stopped, the wakeup mechanism may wake it up again
9 when ring buffer usage is lower than a threshold. This may cause
10 send path panic on NULL pointer when we stopped all tx queues in
11 netvsc_detach and start removing the netvsc device.
12
13 This patch fix it by adding a tx_disable flag to prevent unwanted
14 queue wakeup.
15
16 Fixes: 7b2ee50c0cd5 ("hv_netvsc: common detach logic")
17 Reported-by: Mohammed Gamal <mgamal@redhat.com>
18 Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
19 Signed-off-by: David S. Miller <davem@davemloft.net>
20 Signed-off-by: Sasha Levin <sashal@kernel.org>
21 ---
22 drivers/net/hyperv/hyperv_net.h | 1 +
23 drivers/net/hyperv/netvsc.c | 6 ++++--
24 drivers/net/hyperv/netvsc_drv.c | 32 ++++++++++++++++++++++++++------
25 3 files changed, 31 insertions(+), 8 deletions(-)
26
27 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
28 index 42d284669b03..31d8d83c25ac 100644
29 --- a/drivers/net/hyperv/hyperv_net.h
30 +++ b/drivers/net/hyperv/hyperv_net.h
31 @@ -970,6 +970,7 @@ struct netvsc_device {
32
33 wait_queue_head_t wait_drain;
34 bool destroy;
35 + bool tx_disable; /* if true, do not wake up queue again */
36
37 /* Receive buffer allocated by us but manages by NetVSP */
38 void *recv_buf;
39 diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
40 index 1a942feab954..fb12b63439c6 100644
41 --- a/drivers/net/hyperv/netvsc.c
42 +++ b/drivers/net/hyperv/netvsc.c
43 @@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
44
45 init_waitqueue_head(&net_device->wait_drain);
46 net_device->destroy = false;
47 + net_device->tx_disable = false;
48
49 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
50 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
51 @@ -716,7 +717,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
52 } else {
53 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
54
55 - if (netif_tx_queue_stopped(txq) &&
56 + if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
57 (hv_get_avail_to_write_percent(&channel->outbound) >
58 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
59 netif_tx_wake_queue(txq);
60 @@ -871,7 +872,8 @@ static inline int netvsc_send_pkt(
61 } else if (ret == -EAGAIN) {
62 netif_tx_stop_queue(txq);
63 ndev_ctx->eth_stats.stop_queue++;
64 - if (atomic_read(&nvchan->queue_sends) < 1) {
65 + if (atomic_read(&nvchan->queue_sends) < 1 &&
66 + !net_device->tx_disable) {
67 netif_tx_wake_queue(txq);
68 ndev_ctx->eth_stats.wake_queue++;
69 ret = -ENOSPC;
70 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
71 index c8320405c8f1..9d699bd5f715 100644
72 --- a/drivers/net/hyperv/netvsc_drv.c
73 +++ b/drivers/net/hyperv/netvsc_drv.c
74 @@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
75 rcu_read_unlock();
76 }
77
78 +static void netvsc_tx_enable(struct netvsc_device *nvscdev,
79 + struct net_device *ndev)
80 +{
81 + nvscdev->tx_disable = false;
82 + virt_wmb(); /* ensure queue wake up mechanism is on */
83 +
84 + netif_tx_wake_all_queues(ndev);
85 +}
86 +
87 static int netvsc_open(struct net_device *net)
88 {
89 struct net_device_context *ndev_ctx = netdev_priv(net);
90 @@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
91 rdev = nvdev->extension;
92 if (!rdev->link_state) {
93 netif_carrier_on(net);
94 - netif_tx_wake_all_queues(net);
95 + netvsc_tx_enable(nvdev, net);
96 }
97
98 if (vf_netdev) {
99 @@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
100 }
101 }
102
103 +static void netvsc_tx_disable(struct netvsc_device *nvscdev,
104 + struct net_device *ndev)
105 +{
106 + if (nvscdev) {
107 + nvscdev->tx_disable = true;
108 + virt_wmb(); /* ensure txq will not wake up after stop */
109 + }
110 +
111 + netif_tx_disable(ndev);
112 +}
113 +
114 static int netvsc_close(struct net_device *net)
115 {
116 struct net_device_context *net_device_ctx = netdev_priv(net);
117 @@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
118 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
119 int ret;
120
121 - netif_tx_disable(net);
122 + netvsc_tx_disable(nvdev, net);
123
124 /* No need to close rndis filter if it is removed already */
125 if (!nvdev)
126 @@ -918,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev,
127
128 /* If device was up (receiving) then shutdown */
129 if (netif_running(ndev)) {
130 - netif_tx_disable(ndev);
131 + netvsc_tx_disable(nvdev, ndev);
132
133 ret = rndis_filter_close(nvdev);
134 if (ret) {
135 @@ -1899,7 +1919,7 @@ static void netvsc_link_change(struct work_struct *w)
136 if (rdev->link_state) {
137 rdev->link_state = false;
138 netif_carrier_on(net);
139 - netif_tx_wake_all_queues(net);
140 + netvsc_tx_enable(net_device, net);
141 } else {
142 notify = true;
143 }
144 @@ -1909,7 +1929,7 @@ static void netvsc_link_change(struct work_struct *w)
145 if (!rdev->link_state) {
146 rdev->link_state = true;
147 netif_carrier_off(net);
148 - netif_tx_stop_all_queues(net);
149 + netvsc_tx_disable(net_device, net);
150 }
151 kfree(event);
152 break;
153 @@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
154 if (!rdev->link_state) {
155 rdev->link_state = true;
156 netif_carrier_off(net);
157 - netif_tx_stop_all_queues(net);
158 + netvsc_tx_disable(net_device, net);
159 event->event = RNDIS_STATUS_MEDIA_CONNECT;
160 spin_lock_irqsave(&ndev_ctx->lock, flags);
161 list_add(&event->list, &ndev_ctx->reconfig_events);
162 --
163 2.19.1
164