]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 23 Nov 2022 08:04:40 +0000 (09:04 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 23 Nov 2022 08:04:40 +0000 (09:04 +0100)
added patches:
bpf-test_run-fix-alignment-problem-in-bpf_prog_test_run_skb.patch
kcm-avoid-potential-race-in-kcm_tx_work.patch
kcm-close-race-conditions-on-sk_receive_queue.patch
macvlan-enforce-a-consistent-minimal-mtu.patch
tcp-cdg-allow-tcp_cdg_release-to-be-called-multiple-times.patch

queue-4.19/bpf-test_run-fix-alignment-problem-in-bpf_prog_test_run_skb.patch [new file with mode: 0644]
queue-4.19/kcm-avoid-potential-race-in-kcm_tx_work.patch [new file with mode: 0644]
queue-4.19/kcm-close-race-conditions-on-sk_receive_queue.patch [new file with mode: 0644]
queue-4.19/macvlan-enforce-a-consistent-minimal-mtu.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/tcp-cdg-allow-tcp_cdg_release-to-be-called-multiple-times.patch [new file with mode: 0644]

diff --git a/queue-4.19/bpf-test_run-fix-alignment-problem-in-bpf_prog_test_run_skb.patch b/queue-4.19/bpf-test_run-fix-alignment-problem-in-bpf_prog_test_run_skb.patch
new file mode 100644 (file)
index 0000000..81946b9
--- /dev/null
@@ -0,0 +1,69 @@
+From d3fd203f36d46aa29600a72d57a1b61af80e4a25 Mon Sep 17 00:00:00 2001
+From: Baisong Zhong <zhongbaisong@huawei.com>
+Date: Wed, 2 Nov 2022 16:16:20 +0800
+Subject: bpf, test_run: Fix alignment problem in bpf_prog_test_run_skb()
+
+From: Baisong Zhong <zhongbaisong@huawei.com>
+
+commit d3fd203f36d46aa29600a72d57a1b61af80e4a25 upstream.
+
+We got a syzkaller problem because of aarch64 alignment fault
+if KFENCE enabled. When the size from user bpf program is an odd
+number, like 399, 407, etc, it will cause the struct skb_shared_info's
+unaligned access. As seen below:
+
+  BUG: KFENCE: use-after-free read in __skb_clone+0x23c/0x2a0 net/core/skbuff.c:1032
+
+  Use-after-free read at 0xffff6254fffac077 (in kfence-#213):
+   __lse_atomic_add arch/arm64/include/asm/atomic_lse.h:26 [inline]
+   arch_atomic_add arch/arm64/include/asm/atomic.h:28 [inline]
+   arch_atomic_inc include/linux/atomic-arch-fallback.h:270 [inline]
+   atomic_inc include/asm-generic/atomic-instrumented.h:241 [inline]
+   __skb_clone+0x23c/0x2a0 net/core/skbuff.c:1032
+   skb_clone+0xf4/0x214 net/core/skbuff.c:1481
+   ____bpf_clone_redirect net/core/filter.c:2433 [inline]
+   bpf_clone_redirect+0x78/0x1c0 net/core/filter.c:2420
+   bpf_prog_d3839dd9068ceb51+0x80/0x330
+   bpf_dispatcher_nop_func include/linux/bpf.h:728 [inline]
+   bpf_test_run+0x3c0/0x6c0 net/bpf/test_run.c:53
+   bpf_prog_test_run_skb+0x638/0xa7c net/bpf/test_run.c:594
+   bpf_prog_test_run kernel/bpf/syscall.c:3148 [inline]
+   __do_sys_bpf kernel/bpf/syscall.c:4441 [inline]
+   __se_sys_bpf+0xad0/0x1634 kernel/bpf/syscall.c:4381
+
+  kfence-#213: 0xffff6254fffac000-0xffff6254fffac196, size=407, cache=kmalloc-512
+
+  allocated by task 15074 on cpu 0 at 1342.585390s:
+   kmalloc include/linux/slab.h:568 [inline]
+   kzalloc include/linux/slab.h:675 [inline]
+   bpf_test_init.isra.0+0xac/0x290 net/bpf/test_run.c:191
+   bpf_prog_test_run_skb+0x11c/0xa7c net/bpf/test_run.c:512
+   bpf_prog_test_run kernel/bpf/syscall.c:3148 [inline]
+   __do_sys_bpf kernel/bpf/syscall.c:4441 [inline]
+   __se_sys_bpf+0xad0/0x1634 kernel/bpf/syscall.c:4381
+   __arm64_sys_bpf+0x50/0x60 kernel/bpf/syscall.c:4381
+
+To fix the problem, we adjust @size so that (@size + @hearoom) is a
+multiple of SMP_CACHE_BYTES. So we make sure the struct skb_shared_info
+is aligned to a cache line.
+
+Fixes: 1cf1cae963c2 ("bpf: introduce BPF_PROG_TEST_RUN command")
+Signed-off-by: Baisong Zhong <zhongbaisong@huawei.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/bpf/20221102081620.1465154-1-zhongbaisong@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bpf/test_run.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -87,6 +87,7 @@ static void *bpf_test_init(const union b
+       if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
+               return ERR_PTR(-EINVAL);
++      size = SKB_DATA_ALIGN(size);
+       data = kzalloc(size + headroom + tailroom, GFP_USER);
+       if (!data)
+               return ERR_PTR(-ENOMEM);
diff --git a/queue-4.19/kcm-avoid-potential-race-in-kcm_tx_work.patch b/queue-4.19/kcm-avoid-potential-race-in-kcm_tx_work.patch
new file mode 100644 (file)
index 0000000..969d349
--- /dev/null
@@ -0,0 +1,72 @@
+From ec7eede369fe5b0d085ac51fdbb95184f87bfc6c Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 12 Oct 2022 13:34:12 +0000
+Subject: kcm: avoid potential race in kcm_tx_work
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit ec7eede369fe5b0d085ac51fdbb95184f87bfc6c upstream.
+
+syzbot found that kcm_tx_work() could crash [1] in:
+
+       /* Primarily for SOCK_SEQPACKET sockets */
+       if (likely(sk->sk_socket) &&
+           test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
+<<*>>  clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+               sk->sk_write_space(sk);
+       }
+
+I think the reason is that another thread might concurrently
+run in kcm_release() and call sock_orphan(sk) while sk is not
+locked. kcm_tx_work() find sk->sk_socket being NULL.
+
+[1]
+BUG: KASAN: null-ptr-deref in instrument_atomic_write include/linux/instrumented.h:86 [inline]
+BUG: KASAN: null-ptr-deref in clear_bit include/asm-generic/bitops/instrumented-atomic.h:41 [inline]
+BUG: KASAN: null-ptr-deref in kcm_tx_work+0xff/0x160 net/kcm/kcmsock.c:742
+Write of size 8 at addr 0000000000000008 by task kworker/u4:3/53
+
+CPU: 0 PID: 53 Comm: kworker/u4:3 Not tainted 5.19.0-rc3-next-20220621-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Workqueue: kkcmd kcm_tx_work
+Call Trace:
+<TASK>
+__dump_stack lib/dump_stack.c:88 [inline]
+dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
+kasan_report+0xbe/0x1f0 mm/kasan/report.c:495
+check_region_inline mm/kasan/generic.c:183 [inline]
+kasan_check_range+0x13d/0x180 mm/kasan/generic.c:189
+instrument_atomic_write include/linux/instrumented.h:86 [inline]
+clear_bit include/asm-generic/bitops/instrumented-atomic.h:41 [inline]
+kcm_tx_work+0xff/0x160 net/kcm/kcmsock.c:742
+process_one_work+0x996/0x1610 kernel/workqueue.c:2289
+worker_thread+0x665/0x1080 kernel/workqueue.c:2436
+kthread+0x2e9/0x3a0 kernel/kthread.c:376
+ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:302
+</TASK>
+
+Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Tom Herbert <tom@herbertland.com>
+Link: https://lore.kernel.org/r/20221012133412.519394-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/kcm/kcmsock.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1845,10 +1845,10 @@ static int kcm_release(struct socket *so
+       kcm = kcm_sk(sk);
+       mux = kcm->mux;
++      lock_sock(sk);
+       sock_orphan(sk);
+       kfree_skb(kcm->seq_skb);
+-      lock_sock(sk);
+       /* Purge queue under lock to avoid race condition with tx_work trying
+        * to act when queue is nonempty. If tx_work runs after this point
+        * it will just return.
diff --git a/queue-4.19/kcm-close-race-conditions-on-sk_receive_queue.patch b/queue-4.19/kcm-close-race-conditions-on-sk_receive_queue.patch
new file mode 100644 (file)
index 0000000..a423e1f
--- /dev/null
@@ -0,0 +1,165 @@
+From 5121197ecc5db58c07da95eb1ff82b98b121a221 Mon Sep 17 00:00:00 2001
+From: Cong Wang <cong.wang@bytedance.com>
+Date: Sun, 13 Nov 2022 16:51:19 -0800
+Subject: kcm: close race conditions on sk_receive_queue
+
+From: Cong Wang <cong.wang@bytedance.com>
+
+commit 5121197ecc5db58c07da95eb1ff82b98b121a221 upstream.
+
+sk->sk_receive_queue is protected by skb queue lock, but for KCM
+sockets its RX path takes mux->rx_lock to protect more than just
+skb queue. However, kcm_recvmsg() still only grabs the skb queue
+lock, so race conditions still exist.
+
+We can teach kcm_recvmsg() to grab mux->rx_lock too but this would
+introduce a potential performance regression as struct kcm_mux can
+be shared by multiple KCM sockets.
+
+So we have to enforce skb queue lock in requeue_rx_msgs() and handle
+skb peek case carefully in kcm_wait_data(). Fortunately,
+skb_recv_datagram() already handles it nicely and is widely used by
+other sockets, we can just switch to skb_recv_datagram() after
+getting rid of the unnecessary sock lock in kcm_recvmsg() and
+kcm_splice_read(). Side note: SOCK_DONE is not used by KCM sockets,
+so it is safe to get rid of this check too.
+
+I ran the original syzbot reproducer for 30 min without seeing any
+issue.
+
+Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module")
+Reported-by: syzbot+278279efdd2730dd14bf@syzkaller.appspotmail.com
+Reported-by: shaozhengchao <shaozhengchao@huawei.com>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Cc: Tom Herbert <tom@herbertland.com>
+Signed-off-by: Cong Wang <cong.wang@bytedance.com>
+Link: https://lore.kernel.org/r/20221114005119.597905-1-xiyou.wangcong@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/kcm/kcmsock.c |   60 +++++++-----------------------------------------------
+ 1 file changed, 8 insertions(+), 52 deletions(-)
+
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -224,7 +224,7 @@ static void requeue_rx_msgs(struct kcm_m
+       struct sk_buff *skb;
+       struct kcm_sock *kcm;
+-      while ((skb = __skb_dequeue(head))) {
++      while ((skb = skb_dequeue(head))) {
+               /* Reset destructor to avoid calling kcm_rcv_ready */
+               skb->destructor = sock_rfree;
+               skb_orphan(skb);
+@@ -1085,53 +1085,18 @@ out_error:
+       return err;
+ }
+-static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
+-                                   long timeo, int *err)
+-{
+-      struct sk_buff *skb;
+-
+-      while (!(skb = skb_peek(&sk->sk_receive_queue))) {
+-              if (sk->sk_err) {
+-                      *err = sock_error(sk);
+-                      return NULL;
+-              }
+-
+-              if (sock_flag(sk, SOCK_DONE))
+-                      return NULL;
+-
+-              if ((flags & MSG_DONTWAIT) || !timeo) {
+-                      *err = -EAGAIN;
+-                      return NULL;
+-              }
+-
+-              sk_wait_data(sk, &timeo, NULL);
+-
+-              /* Handle signals */
+-              if (signal_pending(current)) {
+-                      *err = sock_intr_errno(timeo);
+-                      return NULL;
+-              }
+-      }
+-
+-      return skb;
+-}
+-
+ static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
+                      size_t len, int flags)
+ {
++      int noblock = flags & MSG_DONTWAIT;
+       struct sock *sk = sock->sk;
+       struct kcm_sock *kcm = kcm_sk(sk);
+       int err = 0;
+-      long timeo;
+       struct strp_msg *stm;
+       int copied = 0;
+       struct sk_buff *skb;
+-      timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+-
+-      lock_sock(sk);
+-
+-      skb = kcm_wait_data(sk, flags, timeo, &err);
++      skb = skb_recv_datagram(sk, flags, noblock, &err);
+       if (!skb)
+               goto out;
+@@ -1162,14 +1127,11 @@ msg_finished:
+                       /* Finished with message */
+                       msg->msg_flags |= MSG_EOR;
+                       KCM_STATS_INCR(kcm->stats.rx_msgs);
+-                      skb_unlink(skb, &sk->sk_receive_queue);
+-                      kfree_skb(skb);
+               }
+       }
+ out:
+-      release_sock(sk);
+-
++      skb_free_datagram(sk, skb);
+       return copied ? : err;
+ }
+@@ -1177,9 +1139,9 @@ static ssize_t kcm_splice_read(struct so
+                              struct pipe_inode_info *pipe, size_t len,
+                              unsigned int flags)
+ {
++      int noblock = flags & MSG_DONTWAIT;
+       struct sock *sk = sock->sk;
+       struct kcm_sock *kcm = kcm_sk(sk);
+-      long timeo;
+       struct strp_msg *stm;
+       int err = 0;
+       ssize_t copied;
+@@ -1187,11 +1149,7 @@ static ssize_t kcm_splice_read(struct so
+       /* Only support splice for SOCKSEQPACKET */
+-      timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+-
+-      lock_sock(sk);
+-
+-      skb = kcm_wait_data(sk, flags, timeo, &err);
++      skb = skb_recv_datagram(sk, flags, noblock, &err);
+       if (!skb)
+               goto err_out;
+@@ -1219,13 +1177,11 @@ static ssize_t kcm_splice_read(struct so
+        * finish reading the message.
+        */
+-      release_sock(sk);
+-
++      skb_free_datagram(sk, skb);
+       return copied;
+ err_out:
+-      release_sock(sk);
+-
++      skb_free_datagram(sk, skb);
+       return err;
+ }
diff --git a/queue-4.19/macvlan-enforce-a-consistent-minimal-mtu.patch b/queue-4.19/macvlan-enforce-a-consistent-minimal-mtu.patch
new file mode 100644 (file)
index 0000000..a70f897
--- /dev/null
@@ -0,0 +1,45 @@
+From b64085b00044bdf3cd1c9825e9ef5b2e0feae91a Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 7 Oct 2022 15:57:43 -0700
+Subject: macvlan: enforce a consistent minimal mtu
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit b64085b00044bdf3cd1c9825e9ef5b2e0feae91a upstream.
+
+macvlan should enforce a minimal mtu of 68, even at link creation.
+
+This patch avoids the current behavior (which could lead to crashes
+in ipv6 stack if the link is brought up)
+
+$ ip link add macvlan1 link eno1 mtu 8 type macvlan  # This should fail !
+$ ip link sh dev macvlan1
+5: macvlan1@eno1: <BROADCAST,MULTICAST> mtu 8 qdisc noop
+    state DOWN mode DEFAULT group default qlen 1000
+    link/ether 02:47:6c:24:74:82 brd ff:ff:ff:ff:ff:ff
+$ ip link set macvlan1 mtu 67
+Error: mtu less than device minimum.
+$ ip link set macvlan1 mtu 68
+$ ip link set macvlan1 mtu 8
+Error: mtu less than device minimum.
+
+Fixes: 91572088e3fd ("net: use core MTU range checking in core net infra")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/macvlan.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -1137,7 +1137,7 @@ void macvlan_common_setup(struct net_dev
+ {
+       ether_setup(dev);
+-      dev->min_mtu            = 0;
++      /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
+       dev->max_mtu            = ETH_MAX_MTU;
+       dev->priv_flags        &= ~IFF_TX_SKB_SHARING;
+       netif_keep_dst(dev);
index da394383f1d049e8ce1858de8770b872d35185f2..0e0fb67266b810ac7690e6a65fd096cf147df910 100644 (file)
@@ -99,3 +99,8 @@ misc-vmw_vmci-fix-an-infoleak-in-vmci_host_do_receive_datagram.patch
 scsi-target-tcm_loop-fix-possible-name-leak-in-tcm_l.patch
 input-i8042-fix-leaking-of-platform-device-on-module.patch
 serial-8250-flush-dma-rx-on-rlsi.patch
+macvlan-enforce-a-consistent-minimal-mtu.patch
+tcp-cdg-allow-tcp_cdg_release-to-be-called-multiple-times.patch
+kcm-avoid-potential-race-in-kcm_tx_work.patch
+bpf-test_run-fix-alignment-problem-in-bpf_prog_test_run_skb.patch
+kcm-close-race-conditions-on-sk_receive_queue.patch
diff --git a/queue-4.19/tcp-cdg-allow-tcp_cdg_release-to-be-called-multiple-times.patch b/queue-4.19/tcp-cdg-allow-tcp_cdg_release-to-be-called-multiple-times.patch
new file mode 100644 (file)
index 0000000..37c4b80
--- /dev/null
@@ -0,0 +1,155 @@
+From 72e560cb8c6f80fc2b4afc5d3634a32465e13a51 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 11 Oct 2022 15:07:48 -0700
+Subject: tcp: cdg: allow tcp_cdg_release() to be called multiple times
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 72e560cb8c6f80fc2b4afc5d3634a32465e13a51 upstream.
+
+Apparently, mptcp is able to call tcp_disconnect() on an already
+disconnected flow. This is generally fine, unless current congestion
+control is CDG, because it might trigger a double-free [1]
+
+Instead of fixing MPTCP, and future bugs, we can make tcp_disconnect()
+more resilient.
+
+[1]
+BUG: KASAN: double-free in slab_free mm/slub.c:3539 [inline]
+BUG: KASAN: double-free in kfree+0xe2/0x580 mm/slub.c:4567
+
+CPU: 0 PID: 3645 Comm: kworker/0:7 Not tainted 6.0.0-syzkaller-02734-g0326074ff465 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/22/2022
+Workqueue: events mptcp_worker
+Call Trace:
+<TASK>
+__dump_stack lib/dump_stack.c:88 [inline]
+dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
+print_address_description mm/kasan/report.c:317 [inline]
+print_report.cold+0x2ba/0x719 mm/kasan/report.c:433
+kasan_report_invalid_free+0x81/0x190 mm/kasan/report.c:462
+____kasan_slab_free+0x18b/0x1c0 mm/kasan/common.c:356
+kasan_slab_free include/linux/kasan.h:200 [inline]
+slab_free_hook mm/slub.c:1759 [inline]
+slab_free_freelist_hook+0x8b/0x1c0 mm/slub.c:1785
+slab_free mm/slub.c:3539 [inline]
+kfree+0xe2/0x580 mm/slub.c:4567
+tcp_disconnect+0x980/0x1e20 net/ipv4/tcp.c:3145
+__mptcp_close_ssk+0x5ca/0x7e0 net/mptcp/protocol.c:2327
+mptcp_do_fastclose net/mptcp/protocol.c:2592 [inline]
+mptcp_worker+0x78c/0xff0 net/mptcp/protocol.c:2627
+process_one_work+0x991/0x1610 kernel/workqueue.c:2289
+worker_thread+0x665/0x1080 kernel/workqueue.c:2436
+kthread+0x2e4/0x3a0 kernel/kthread.c:376
+ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:306
+</TASK>
+
+Allocated by task 3671:
+kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
+kasan_set_track mm/kasan/common.c:45 [inline]
+set_alloc_info mm/kasan/common.c:437 [inline]
+____kasan_kmalloc mm/kasan/common.c:516 [inline]
+____kasan_kmalloc mm/kasan/common.c:475 [inline]
+__kasan_kmalloc+0xa9/0xd0 mm/kasan/common.c:525
+kmalloc_array include/linux/slab.h:640 [inline]
+kcalloc include/linux/slab.h:671 [inline]
+tcp_cdg_init+0x10d/0x170 net/ipv4/tcp_cdg.c:380
+tcp_init_congestion_control+0xab/0x550 net/ipv4/tcp_cong.c:193
+tcp_reinit_congestion_control net/ipv4/tcp_cong.c:217 [inline]
+tcp_set_congestion_control+0x96c/0xaa0 net/ipv4/tcp_cong.c:391
+do_tcp_setsockopt+0x505/0x2320 net/ipv4/tcp.c:3513
+tcp_setsockopt+0xd4/0x100 net/ipv4/tcp.c:3801
+mptcp_setsockopt+0x35f/0x2570 net/mptcp/sockopt.c:844
+__sys_setsockopt+0x2d6/0x690 net/socket.c:2252
+__do_sys_setsockopt net/socket.c:2263 [inline]
+__se_sys_setsockopt net/socket.c:2260 [inline]
+__x64_sys_setsockopt+0xba/0x150 net/socket.c:2260
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Freed by task 16:
+kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
+kasan_set_track+0x21/0x30 mm/kasan/common.c:45
+kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:370
+____kasan_slab_free mm/kasan/common.c:367 [inline]
+____kasan_slab_free+0x166/0x1c0 mm/kasan/common.c:329
+kasan_slab_free include/linux/kasan.h:200 [inline]
+slab_free_hook mm/slub.c:1759 [inline]
+slab_free_freelist_hook+0x8b/0x1c0 mm/slub.c:1785
+slab_free mm/slub.c:3539 [inline]
+kfree+0xe2/0x580 mm/slub.c:4567
+tcp_cleanup_congestion_control+0x70/0x120 net/ipv4/tcp_cong.c:226
+tcp_v4_destroy_sock+0xdd/0x750 net/ipv4/tcp_ipv4.c:2254
+tcp_v6_destroy_sock+0x11/0x20 net/ipv6/tcp_ipv6.c:1969
+inet_csk_destroy_sock+0x196/0x440 net/ipv4/inet_connection_sock.c:1157
+tcp_done+0x23b/0x340 net/ipv4/tcp.c:4649
+tcp_rcv_state_process+0x40e7/0x4990 net/ipv4/tcp_input.c:6624
+tcp_v6_do_rcv+0x3fc/0x13c0 net/ipv6/tcp_ipv6.c:1525
+tcp_v6_rcv+0x2e8e/0x3830 net/ipv6/tcp_ipv6.c:1759
+ip6_protocol_deliver_rcu+0x2db/0x1950 net/ipv6/ip6_input.c:439
+ip6_input_finish+0x14c/0x2c0 net/ipv6/ip6_input.c:484
+NF_HOOK include/linux/netfilter.h:302 [inline]
+NF_HOOK include/linux/netfilter.h:296 [inline]
+ip6_input+0x9c/0xd0 net/ipv6/ip6_input.c:493
+dst_input include/net/dst.h:455 [inline]
+ip6_rcv_finish+0x193/0x2c0 net/ipv6/ip6_input.c:79
+ip_sabotage_in net/bridge/br_netfilter_hooks.c:874 [inline]
+ip_sabotage_in+0x1fa/0x260 net/bridge/br_netfilter_hooks.c:865
+nf_hook_entry_hookfn include/linux/netfilter.h:142 [inline]
+nf_hook_slow+0xc5/0x1f0 net/netfilter/core.c:614
+nf_hook.constprop.0+0x3ac/0x650 include/linux/netfilter.h:257
+NF_HOOK include/linux/netfilter.h:300 [inline]
+ipv6_rcv+0x9e/0x380 net/ipv6/ip6_input.c:309
+__netif_receive_skb_one_core+0x114/0x180 net/core/dev.c:5485
+__netif_receive_skb+0x1f/0x1c0 net/core/dev.c:5599
+netif_receive_skb_internal net/core/dev.c:5685 [inline]
+netif_receive_skb+0x12f/0x8d0 net/core/dev.c:5744
+NF_HOOK include/linux/netfilter.h:302 [inline]
+NF_HOOK include/linux/netfilter.h:296 [inline]
+br_pass_frame_up+0x303/0x410 net/bridge/br_input.c:68
+br_handle_frame_finish+0x909/0x1aa0 net/bridge/br_input.c:199
+br_nf_hook_thresh+0x2f8/0x3d0 net/bridge/br_netfilter_hooks.c:1041
+br_nf_pre_routing_finish_ipv6+0x695/0xef0 net/bridge/br_netfilter_ipv6.c:207
+NF_HOOK include/linux/netfilter.h:302 [inline]
+br_nf_pre_routing_ipv6+0x417/0x7c0 net/bridge/br_netfilter_ipv6.c:237
+br_nf_pre_routing+0x1496/0x1fe0 net/bridge/br_netfilter_hooks.c:507
+nf_hook_entry_hookfn include/linux/netfilter.h:142 [inline]
+nf_hook_bridge_pre net/bridge/br_input.c:255 [inline]
+br_handle_frame+0x9c9/0x12d0 net/bridge/br_input.c:399
+__netif_receive_skb_core+0x9fe/0x38f0 net/core/dev.c:5379
+__netif_receive_skb_one_core+0xae/0x180 net/core/dev.c:5483
+__netif_receive_skb+0x1f/0x1c0 net/core/dev.c:5599
+process_backlog+0x3a0/0x7c0 net/core/dev.c:5927
+__napi_poll+0xb3/0x6d0 net/core/dev.c:6494
+napi_poll net/core/dev.c:6561 [inline]
+net_rx_action+0x9c1/0xd90 net/core/dev.c:6672
+__do_softirq+0x1d0/0x9c8 kernel/softirq.c:571
+
+Fixes: 2b0a8c9eee81 ("tcp: add CDG congestion control")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_cdg.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/ipv4/tcp_cdg.c
++++ b/net/ipv4/tcp_cdg.c
+@@ -374,6 +374,7 @@ static void tcp_cdg_init(struct sock *sk
+       struct cdg *ca = inet_csk_ca(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
++      ca->gradients = NULL;
+       /* We silently fall back to window = 1 if allocation fails. */
+       if (window > 1)
+               ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
+@@ -387,6 +388,7 @@ static void tcp_cdg_release(struct sock
+       struct cdg *ca = inet_csk_ca(sk);
+       kfree(ca->gradients);
++      ca->gradients = NULL;
+ }
+ static struct tcp_congestion_ops tcp_cdg __read_mostly = {