--- /dev/null
+From 01bfe5e8e428b475982a98a46cca5755726f3f7f Mon Sep 17 00:00:00 2001
+From: Xin Long <lucien.xin@gmail.com>
+Date: Mon, 3 May 2021 05:11:41 +0800
+Subject: Revert "net/sctp: fix race condition in sctp_destroy_sock"
+
+From: Xin Long <lucien.xin@gmail.com>
+
+commit 01bfe5e8e428b475982a98a46cca5755726f3f7f upstream.
+
+This reverts commit b166a20b07382b8bc1dcee2a448715c9c2c81b5b.
+
+This one has to be reverted as it introduced a dead lock, as
+syzbot reported:
+
+ CPU0 CPU1
+ ---- ----
+ lock(&net->sctp.addr_wq_lock);
+ lock(slock-AF_INET6);
+ lock(&net->sctp.addr_wq_lock);
+ lock(slock-AF_INET6);
+
+CPU0 is the thread of sctp_addr_wq_timeout_handler(), and CPU1
+is that of sctp_close().
+
+The original issue this commit fixed will be fixed in the next
+patch.
+
+Reported-by: syzbot+959223586843e69a2674@syzkaller.appspotmail.com
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/socket.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1520,9 +1520,11 @@ static void sctp_close(struct sock *sk,
+
+ /* Supposedly, no process has access to the socket, but
+ * the net layers still may.
++ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
++ * held and that should be grabbed before socket lock.
+ */
+- local_bh_disable();
+- bh_lock_sock(sk);
++ spin_lock_bh(&net->sctp.addr_wq_lock);
++ bh_lock_sock_nested(sk);
+
+ /* Hold the sock, since sk_common_release() will put sock_put()
+ * and we have just a little more cleanup.
+@@ -1531,7 +1533,7 @@ static void sctp_close(struct sock *sk,
+ sk_common_release(sk);
+
+ bh_unlock_sock(sk);
+- local_bh_enable();
++ spin_unlock_bh(&net->sctp.addr_wq_lock);
+
+ sock_put(sk);
+
+@@ -4991,6 +4993,9 @@ static int sctp_init_sock(struct sock *s
+ sk_sockets_allocated_inc(sk);
+ sock_prot_inuse_add(net, sk->sk_prot, 1);
+
++ /* Nothing can fail after this block, otherwise
++ * sctp_destroy_sock() will be called without addr_wq_lock held
++ */
+ if (net->sctp.default_auto_asconf) {
+ spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+ list_add_tail(&sp->auto_asconf_list,
+@@ -5025,9 +5030,7 @@ static void sctp_destroy_sock(struct soc
+
+ if (sp->do_auto_asconf) {
+ sp->do_auto_asconf = 0;
+- spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ list_del(&sp->auto_asconf_list);
+- spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ }
+ sctp_endpoint_free(sp->ep);
+ local_bh_disable();
--- /dev/null
+From 34e5b01186858b36c4d7c87e1a025071e8e2401f Mon Sep 17 00:00:00 2001
+From: Xin Long <lucien.xin@gmail.com>
+Date: Mon, 3 May 2021 05:11:42 +0800
+Subject: sctp: delay auto_asconf init until binding the first addr
+
+From: Xin Long <lucien.xin@gmail.com>
+
+commit 34e5b01186858b36c4d7c87e1a025071e8e2401f upstream.
+
+As Or Cohen described:
+
+ If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock
+ held and sp->do_auto_asconf is true, then an element is removed
+ from the auto_asconf_splist without any proper locking.
+
+ This can happen in the following functions:
+ 1. In sctp_accept, if sctp_sock_migrate fails.
+ 2. In inet_create or inet6_create, if there is a bpf program
+ attached to BPF_CGROUP_INET_SOCK_CREATE which denies
+ creation of the sctp socket.
+
+This patch is to fix it by moving the auto_asconf init out of
+sctp_init_sock(), by which inet_create()/inet6_create() won't
+need to operate it in sctp_destroy_sock() when calling
+sk_common_release().
+
+It also makes more sense to do auto_asconf init while binding the
+first addr, as auto_asconf actually requires an ANY addr bind,
+see it in sctp_addr_wq_timeout_handler().
+
+This addresses CVE-2021-23133.
+
+Fixes: 610236587600 ("bpf: Add new cgroup attach type to enable sock modifications")
+Reported-by: Or Cohen <orcohen@paloaltonetworks.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/socket.c | 31 +++++++++++++++++--------------
+ 1 file changed, 17 insertions(+), 14 deletions(-)
+
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(
+ return af;
+ }
+
++static void sctp_auto_asconf_init(struct sctp_sock *sp)
++{
++ struct net *net = sock_net(&sp->inet.sk);
++
++ if (net->sctp.default_auto_asconf) {
++ spin_lock(&net->sctp.addr_wq_lock);
++ list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
++ spin_unlock(&net->sctp.addr_wq_lock);
++ sp->do_auto_asconf = 1;
++ }
++}
++
+ /* Bind a local address either to an endpoint or to an association. */
+ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
+ {
+@@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk,
+ return -EADDRINUSE;
+
+ /* Refresh ephemeral port. */
+- if (!bp->port)
++ if (!bp->port) {
+ bp->port = inet_sk(sk)->inet_num;
++ sctp_auto_asconf_init(sp);
++ }
+
+ /* Add the address to the bind address list.
+ * Use GFP_ATOMIC since BHs will be disabled.
+@@ -4993,19 +5007,6 @@ static int sctp_init_sock(struct sock *s
+ sk_sockets_allocated_inc(sk);
+ sock_prot_inuse_add(net, sk->sk_prot, 1);
+
+- /* Nothing can fail after this block, otherwise
+- * sctp_destroy_sock() will be called without addr_wq_lock held
+- */
+- if (net->sctp.default_auto_asconf) {
+- spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+- list_add_tail(&sp->auto_asconf_list,
+- &net->sctp.auto_asconf_splist);
+- sp->do_auto_asconf = 1;
+- spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
+- } else {
+- sp->do_auto_asconf = 0;
+- }
+-
+ local_bh_enable();
+
+ return 0;
+@@ -9401,6 +9402,8 @@ static int sctp_sock_migrate(struct sock
+ return err;
+ }
+
++ sctp_auto_asconf_init(newsp);
++
+ /* Move any messages in the old socket's receive queue that are for the
+ * peeled off association to the new socket's receive queue.
+ */