--- /dev/null
+From 29869d66870a715177bfb505f66a7e0e8bcc89c3 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 21 Feb 2017 06:21:47 -0800
+Subject: tcp: Revert "tcp: tcp_probe: use spin_lock_bh()"
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 29869d66870a715177bfb505f66a7e0e8bcc89c3 upstream.
+
+This reverts commit e70ac171658679ecf6bea4bbd9e9325cd6079d2b.
+
+jtcp_rcv_established() is in fact called with hard irq being disabled.
+
+Initial bug report from Ricardo Nabinger Sanchez [1] still needs
+to be investigated, but does not look like a TCP bug.
+
+[1] https://www.spinics.net/lists/netdev/msg420960.html
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: kernel test robot <xiaolong.ye@intel.com>
+Cc: Ricardo Nabinger Sanchez <rnsanchez@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/tcp_probe.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_probe.c
++++ b/net/ipv4/tcp_probe.c
+@@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct
+ (fwmark > 0 && skb->mark == fwmark)) &&
+ (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
+
+- spin_lock_bh(&tcp_probe.lock);
++ spin_lock(&tcp_probe.lock);
+ /* If log fills, just silently drop */
+ if (tcp_probe_avail() > 1) {
+ struct tcp_log *p = tcp_probe.log + tcp_probe.head;
+@@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct
+ tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
+ }
+ tcp_probe.lastcwnd = tp->snd_cwnd;
+- spin_unlock_bh(&tcp_probe.lock);
++ spin_unlock(&tcp_probe.lock);
+
+ wake_up(&tcp_probe.wait);
+ }