--- /dev/null
+From 8fd54a73b7cda11548154451bdb4bde6d8ff74c7 Mon Sep 17 00:00:00 2001
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+Date: Thu, 4 Feb 2021 18:33:51 +0200
+Subject: net: dsa: call teardown method on probe failure
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+commit 8fd54a73b7cda11548154451bdb4bde6d8ff74c7 upstream.
+
+Since teardown is supposed to undo the effects of the setup method, it
+should be called in the error path for dsa_switch_setup, not just in
+dsa_switch_teardown.
+
+Fixes: 5e3f847a02aa ("net: dsa: Add teardown callback for drivers")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Link: https://lore.kernel.org/r/20210204163351.2929670-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dsa/dsa2.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -399,18 +399,21 @@ static int dsa_switch_setup(struct dsa_s
+ ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ if (!ds->slave_mii_bus) {
+ err = -ENOMEM;
+- goto unregister_notifier;
++ goto teardown;
+ }
+
+ dsa_slave_mii_bus_init(ds);
+
+ err = mdiobus_register(ds->slave_mii_bus);
+ if (err < 0)
+- goto unregister_notifier;
++ goto teardown;
+ }
+
+ return 0;
+
++teardown:
++ if (ds->ops->teardown)
++ ds->ops->teardown(ds);
+ unregister_notifier:
+ dsa_switch_unregister_notifier(ds);
+ unregister_devlink:
--- /dev/null
+From af8085f3a4712c57d0dd415ad543bac85780375c Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Fri, 5 Feb 2021 11:36:30 +1100
+Subject: net: fix iteration for sctp transport seq_files
+
+From: NeilBrown <neilb@suse.de>
+
+commit af8085f3a4712c57d0dd415ad543bac85780375c upstream.
+
+The sctp transport seq_file iterators take a reference to the transport
+in the ->start and ->next functions and releases the reference in the
+->show function. The preferred handling for such resources is to
+release them in the subsequent ->next or ->stop function call.
+
+Since Commit 1f4aace60b0e ("fs/seq_file.c: simplify seq_file iteration
+code and interface") there is no guarantee that ->show will be called
+after ->next, so this function can now leak references.
+
+So move the sctp_transport_put() call to ->next and ->stop.
+
+Fixes: 1f4aace60b0e ("fs/seq_file.c: simplify seq_file iteration code and interface")
+Reported-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/proc.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/net/sctp/proc.c
++++ b/net/sctp/proc.c
+@@ -215,6 +215,12 @@ static void sctp_transport_seq_stop(stru
+ {
+ struct sctp_ht_iter *iter = seq->private;
+
++ if (v && v != SEQ_START_TOKEN) {
++ struct sctp_transport *transport = v;
++
++ sctp_transport_put(transport);
++ }
++
+ sctp_transport_walk_stop(&iter->hti);
+ }
+
+@@ -222,6 +228,12 @@ static void *sctp_transport_seq_next(str
+ {
+ struct sctp_ht_iter *iter = seq->private;
+
++ if (v && v != SEQ_START_TOKEN) {
++ struct sctp_transport *transport = v;
++
++ sctp_transport_put(transport);
++ }
++
+ ++*pos;
+
+ return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
+@@ -277,8 +289,6 @@ static int sctp_assocs_seq_show(struct s
+ sk->sk_rcvbuf);
+ seq_printf(seq, "\n");
+
+- sctp_transport_put(transport);
+-
+ return 0;
+ }
+
+@@ -354,8 +364,6 @@ static int sctp_remaddr_seq_show(struct
+ seq_printf(seq, "\n");
+ }
+
+- sctp_transport_put(transport);
+-
+ return 0;
+ }
+
--- /dev/null
+From 8dc1c444df193701910f5e80b5d4caaf705a8fb0 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 4 Feb 2021 13:31:46 -0800
+Subject: net: gro: do not keep too many GRO packets in napi->rx_list
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 8dc1c444df193701910f5e80b5d4caaf705a8fb0 upstream.
+
+Commit c80794323e82 ("net: Fix packet reordering caused by GRO and
+listified RX cooperation") had the unfortunate effect of adding
+latencies in common workloads.
+
+Before the patch, GRO packets were immediately passed to
+upper stacks.
+
+After the patch, we can accumulate quite a lot of GRO
+packets (depdending on NAPI budget).
+
+My fix is counting in napi->rx_count number of segments
+instead of number of logical packets.
+
+Fixes: c80794323e82 ("net: Fix packet reordering caused by GRO and listified RX cooperation")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Bisected-by: John Sperbeck <jsperbeck@google.com>
+Tested-by: Jian Yang <jianyang@google.com>
+Cc: Maxim Mikityanskiy <maximmi@mellanox.com>
+Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
+Reviewed-by: Edward Cree <ecree.xilinx@gmail.com>
+Reviewed-by: Alexander Lobakin <alobakin@pm.me>
+Link: https://lore.kernel.org/r/20210204213146.4192368-1-eric.dumazet@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5275,10 +5275,11 @@ static void gro_normal_list(struct napi_
+ /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
++static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
+ {
+ list_add_tail(&skb->list, &napi->rx_list);
+- if (++napi->rx_count >= gro_normal_batch)
++ napi->rx_count += segs;
++ if (napi->rx_count >= gro_normal_batch)
+ gro_normal_list(napi);
+ }
+
+@@ -5317,7 +5318,7 @@ static int napi_gro_complete(struct napi
+ }
+
+ out:
+- gro_normal_one(napi, skb);
++ gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
+ return NET_RX_SUCCESS;
+ }
+
+@@ -5608,7 +5609,7 @@ static gro_result_t napi_skb_finish(stru
+ {
+ switch (ret) {
+ case GRO_NORMAL:
+- gro_normal_one(napi, skb);
++ gro_normal_one(napi, skb, 1);
+ break;
+
+ case GRO_DROP:
+@@ -5696,7 +5697,7 @@ static gro_result_t napi_frags_finish(st
+ __skb_push(skb, ETH_HLEN);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (ret == GRO_NORMAL)
+- gro_normal_one(napi, skb);
++ gro_normal_one(napi, skb, 1);
+ break;
+
+ case GRO_DROP:
--- /dev/null
+From 2a80c15812372e554474b1dba0b1d8e467af295d Mon Sep 17 00:00:00 2001
+From: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
+Date: Tue, 2 Feb 2021 15:20:59 +0600
+Subject: net/qrtr: restrict user-controlled length in qrtr_tun_write_iter()
+
+From: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
+
+commit 2a80c15812372e554474b1dba0b1d8e467af295d upstream.
+
+syzbot found WARNING in qrtr_tun_write_iter [1] when write_iter length
+exceeds KMALLOC_MAX_SIZE causing order >= MAX_ORDER condition.
+
+Additionally, there is no check for 0 length write.
+
+[1]
+WARNING: mm/page_alloc.c:5011
+[..]
+Call Trace:
+ alloc_pages_current+0x18c/0x2a0 mm/mempolicy.c:2267
+ alloc_pages include/linux/gfp.h:547 [inline]
+ kmalloc_order+0x2e/0xb0 mm/slab_common.c:837
+ kmalloc_order_trace+0x14/0x120 mm/slab_common.c:853
+ kmalloc include/linux/slab.h:557 [inline]
+ kzalloc include/linux/slab.h:682 [inline]
+ qrtr_tun_write_iter+0x8a/0x180 net/qrtr/tun.c:83
+ call_write_iter include/linux/fs.h:1901 [inline]
+
+Reported-by: syzbot+c2a7e5c5211605a90865@syzkaller.appspotmail.com
+Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
+Link: https://lore.kernel.org/r/20210202092059.1361381-1-snovitoll@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/tun.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/qrtr/tun.c
++++ b/net/qrtr/tun.c
+@@ -80,6 +80,12 @@ static ssize_t qrtr_tun_write_iter(struc
+ ssize_t ret;
+ void *kbuf;
+
++ if (!len)
++ return -EINVAL;
++
++ if (len > KMALLOC_MAX_SIZE)
++ return -ENOMEM;
++
+ kbuf = kzalloc(len, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
--- /dev/null
+From a11148e6fcce2ae53f47f0a442d098d860b4f7db Mon Sep 17 00:00:00 2001
+From: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
+Date: Tue, 2 Feb 2021 02:32:33 +0600
+Subject: net/rds: restrict iovecs length for RDS_CMSG_RDMA_ARGS
+
+From: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
+
+commit a11148e6fcce2ae53f47f0a442d098d860b4f7db upstream.
+
+syzbot found WARNING in rds_rdma_extra_size [1] when RDS_CMSG_RDMA_ARGS
+control message is passed with user-controlled
+0x40001 bytes of args->nr_local, causing order >= MAX_ORDER condition.
+
+The exact value 0x40001 can be checked with UIO_MAXIOV which is 0x400.
+So for kcalloc() 0x400 iovecs with sizeof(struct rds_iovec) = 0x10
+is the closest limit, with 0x10 leftover.
+
+Same condition is currently done in rds_cmsg_rdma_args().
+
+[1] WARNING: mm/page_alloc.c:5011
+[..]
+Call Trace:
+ alloc_pages_current+0x18c/0x2a0 mm/mempolicy.c:2267
+ alloc_pages include/linux/gfp.h:547 [inline]
+ kmalloc_order+0x2e/0xb0 mm/slab_common.c:837
+ kmalloc_order_trace+0x14/0x120 mm/slab_common.c:853
+ kmalloc_array include/linux/slab.h:592 [inline]
+ kcalloc include/linux/slab.h:621 [inline]
+ rds_rdma_extra_size+0xb2/0x3b0 net/rds/rdma.c:568
+ rds_rm_size net/rds/send.c:928 [inline]
+
+Reported-by: syzbot+1bd2b07f93745fa38425@syzkaller.appspotmail.com
+Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
+Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Link: https://lore.kernel.org/r/20210201203233.1324704-1-snovitoll@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rds/rdma.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/rds/rdma.c
++++ b/net/rds/rdma.c
+@@ -532,6 +532,9 @@ int rds_rdma_extra_size(struct rds_rdma_
+ if (args->nr_local == 0)
+ return -EINVAL;
+
++ if (args->nr_local > UIO_MAXIOV)
++ return -EMSGSIZE;
++
+ iov->iov = kcalloc(args->nr_local,
+ sizeof(struct rds_iovec),
+ GFP_KERNEL);
--- /dev/null
+From 3d0bc44d39bca615b72637e340317b7899b7f911 Mon Sep 17 00:00:00 2001
+From: Norbert Slusarek <nslusarek@gmx.net>
+Date: Fri, 5 Feb 2021 13:14:05 +0100
+Subject: net/vmw_vsock: improve locking in vsock_connect_timeout()
+
+From: Norbert Slusarek <nslusarek@gmx.net>
+
+commit 3d0bc44d39bca615b72637e340317b7899b7f911 upstream.
+
+A possible locking issue in vsock_connect_timeout() was recognized by
+Eric Dumazet which might cause a null pointer dereference in
+vsock_transport_cancel_pkt(). This patch assures that
+vsock_transport_cancel_pkt() will be called within the lock, so a race
+condition won't occur which could result in vsk->transport to be set to NULL.
+
+Fixes: 380feae0def7 ("vsock: cancel packets when failing to connect")
+Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Norbert Slusarek <nslusarek@gmx.net>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Link: https://lore.kernel.org/r/trinity-f8e0937a-cf0e-4d80-a76e-d9a958ba3ef1-1612535522360@3c-app-gmx-bap12
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/af_vsock.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1099,7 +1099,6 @@ static void vsock_connect_timeout(struct
+ {
+ struct sock *sk;
+ struct vsock_sock *vsk;
+- int cancel = 0;
+
+ vsk = container_of(work, struct vsock_sock, connect_work.work);
+ sk = sk_vsock(vsk);
+@@ -1110,11 +1109,9 @@ static void vsock_connect_timeout(struct
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = ETIMEDOUT;
+ sk->sk_error_report(sk);
+- cancel = 1;
++ vsock_transport_cancel_pkt(vsk);
+ }
+ release_sock(sk);
+- if (cancel)
+- vsock_transport_cancel_pkt(vsk);
+
+ sock_put(sk);
+ }
--- /dev/null
+From 3aa6bce9af0e25b735c9c1263739a5639a336ae8 Mon Sep 17 00:00:00 2001
+From: Edwin Peer <edwin.peer@broadcom.com>
+Date: Fri, 5 Feb 2021 17:37:32 -0800
+Subject: net: watchdog: hold device global xmit lock during tx disable
+
+From: Edwin Peer <edwin.peer@broadcom.com>
+
+commit 3aa6bce9af0e25b735c9c1263739a5639a336ae8 upstream.
+
+Prevent netif_tx_disable() running concurrently with dev_watchdog() by
+taking the device global xmit lock. Otherwise, the recommended:
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+
+driver shutdown sequence can happen after the watchdog has already
+checked carrier, resulting in possible false alarms. This is because
+netif_tx_lock() only sets the frozen bit without maintaining the locks
+on the individual queues.
+
+Fixes: c3f26a269c24 ("netdev: Fix lockdep warnings in multiqueue configurations.")
+Signed-off-by: Edwin Peer <edwin.peer@broadcom.com>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/netdevice.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -4044,6 +4044,7 @@ static inline void netif_tx_disable(stru
+
+ local_bh_disable();
+ cpu = smp_processor_id();
++ spin_lock(&dev->tx_global_lock);
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+@@ -4051,6 +4052,7 @@ static inline void netif_tx_disable(stru
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock(txq);
+ }
++ spin_unlock(&dev->tx_global_lock);
+ local_bh_enable();
+ }
+
--- /dev/null
+From cef4cbff06fbc3be54d6d79ee139edecc2ee8598 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Thu, 12 Nov 2020 11:31:55 +0100
+Subject: ovl: expand warning in ovl_d_real()
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit cef4cbff06fbc3be54d6d79ee139edecc2ee8598 upstream.
+
+There was a syzbot report with this warning but insufficient information...
+
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/overlayfs/super.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -79,7 +79,7 @@ static void ovl_dentry_release(struct de
+ static struct dentry *ovl_d_real(struct dentry *dentry,
+ const struct inode *inode)
+ {
+- struct dentry *real;
++ struct dentry *real = NULL, *lower;
+
+ /* It's an overlay file */
+ if (inode && d_inode(dentry) == inode)
+@@ -98,9 +98,10 @@ static struct dentry *ovl_d_real(struct
+ if (real && !inode && ovl_has_upperdata(d_inode(dentry)))
+ return real;
+
+- real = ovl_dentry_lowerdata(dentry);
+- if (!real)
++ lower = ovl_dentry_lowerdata(dentry);
++ if (!lower)
+ goto bug;
++ real = lower;
+
+ /* Handle recursion */
+ real = d_real(real, inode);
+@@ -108,8 +109,10 @@ static struct dentry *ovl_d_real(struct
+ if (!inode || inode == d_inode(real))
+ return real;
+ bug:
+- WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
+- inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
++ WARN(1, "%s(%pd4, %s:%lu): real dentry (%p/%lu) not found\n",
++ __func__, dentry, inode ? inode->i_sb->s_id : "NULL",
++ inode ? inode->i_ino : 0, real,
++ real && d_inode(real) ? d_inode(real)->i_ino : 0);
+ return dentry;
+ }
+
--- /dev/null
+From 7b5eab57cac45e270a0ad624ba157c5b30b3d44d Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Wed, 3 Feb 2021 08:47:56 +0000
+Subject: rxrpc: Fix clearance of Tx/Rx ring when releasing a call
+
+From: David Howells <dhowells@redhat.com>
+
+commit 7b5eab57cac45e270a0ad624ba157c5b30b3d44d upstream.
+
+At the end of rxrpc_release_call(), rxrpc_cleanup_ring() is called to clear
+the Rx/Tx skbuff ring, but this doesn't lock the ring whilst it's accessing
+it. Unfortunately, rxrpc_resend() might be trying to retransmit a packet
+concurrently with this - and whilst it does lock the ring, this isn't
+protection against rxrpc_cleanup_call().
+
+Fix this by removing the call to rxrpc_cleanup_ring() from
+rxrpc_release_call(). rxrpc_cleanup_ring() will be called again anyway
+from rxrpc_cleanup_call(). The earlier call is just an optimisation to
+recycle skbuffs more quickly.
+
+Alternative solutions include rxrpc_release_call() could try to cancel the
+work item or wait for it to complete or rxrpc_cleanup_ring() could lock
+when accessing the ring (which would require a bh lock).
+
+This can produce a report like the following:
+
+ BUG: KASAN: use-after-free in rxrpc_send_data_packet+0x19b4/0x1e70 net/rxrpc/output.c:372
+ Read of size 4 at addr ffff888011606e04 by task kworker/0:0/5
+ ...
+ Workqueue: krxrpcd rxrpc_process_call
+ Call Trace:
+ ...
+ kasan_report.cold+0x79/0xd5 mm/kasan/report.c:413
+ rxrpc_send_data_packet+0x19b4/0x1e70 net/rxrpc/output.c:372
+ rxrpc_resend net/rxrpc/call_event.c:266 [inline]
+ rxrpc_process_call+0x1634/0x1f60 net/rxrpc/call_event.c:412
+ process_one_work+0x98d/0x15f0 kernel/workqueue.c:2275
+ ...
+
+ Allocated by task 2318:
+ ...
+ sock_alloc_send_pskb+0x793/0x920 net/core/sock.c:2348
+ rxrpc_send_data+0xb51/0x2bf0 net/rxrpc/sendmsg.c:358
+ rxrpc_do_sendmsg+0xc03/0x1350 net/rxrpc/sendmsg.c:744
+ rxrpc_sendmsg+0x420/0x630 net/rxrpc/af_rxrpc.c:560
+ ...
+
+ Freed by task 2318:
+ ...
+ kfree_skb+0x140/0x3f0 net/core/skbuff.c:704
+ rxrpc_free_skb+0x11d/0x150 net/rxrpc/skbuff.c:78
+ rxrpc_cleanup_ring net/rxrpc/call_object.c:485 [inline]
+ rxrpc_release_call+0x5dd/0x860 net/rxrpc/call_object.c:552
+ rxrpc_release_calls_on_socket+0x21c/0x300 net/rxrpc/call_object.c:579
+ rxrpc_release_sock net/rxrpc/af_rxrpc.c:885 [inline]
+ rxrpc_release+0x263/0x5a0 net/rxrpc/af_rxrpc.c:916
+ __sock_release+0xcd/0x280 net/socket.c:597
+ ...
+
+ The buggy address belongs to the object at ffff888011606dc0
+ which belongs to the cache skbuff_head_cache of size 232
+
+Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code")
+Reported-by: syzbot+174de899852504e4a74a@syzkaller.appspotmail.com
+Reported-by: syzbot+3d1c772efafd3c38d007@syzkaller.appspotmail.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Hillf Danton <hdanton@sina.com>
+Link: https://lore.kernel.org/r/161234207610.653119.5287360098400436976.stgit@warthog.procyon.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rxrpc/call_object.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -507,8 +507,6 @@ void rxrpc_release_call(struct rxrpc_soc
+ rxrpc_disconnect_call(call);
+ if (call->security)
+ call->security->free_call_crypto(call);
+-
+- rxrpc_cleanup_ring(call);
+ _leave("");
+ }
+
h8300-fix-preemption-build-ti_pre_count-undefined.patch
usb-dwc3-ulpi-fix-checkpatch-warning.patch
usb-dwc3-ulpi-replace-cpu-based-busyloop-with-protocol-based-one.patch
+rxrpc-fix-clearance-of-tx-rx-ring-when-releasing-a-call.patch
+udp-fix-skb_copy_and_csum_datagram-with-odd-segment-sizes.patch
+net-dsa-call-teardown-method-on-probe-failure.patch
+net-gro-do-not-keep-too-many-gro-packets-in-napi-rx_list.patch
+net-fix-iteration-for-sctp-transport-seq_files.patch
+net-vmw_vsock-improve-locking-in-vsock_connect_timeout.patch
+net-watchdog-hold-device-global-xmit-lock-during-tx-disable.patch
+vsock-virtio-update-credit-only-if-socket-is-not-closed.patch
+vsock-fix-locking-in-vsock_shutdown.patch
+net-rds-restrict-iovecs-length-for-rds_cmsg_rdma_args.patch
+net-qrtr-restrict-user-controlled-length-in-qrtr_tun_write_iter.patch
+ovl-expand-warning-in-ovl_d_real.patch
--- /dev/null
+From 52cbd23a119c6ebf40a527e53f3402d2ea38eccb Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Wed, 3 Feb 2021 14:29:52 -0500
+Subject: udp: fix skb_copy_and_csum_datagram with odd segment sizes
+
+From: Willem de Bruijn <willemb@google.com>
+
+commit 52cbd23a119c6ebf40a527e53f3402d2ea38eccb upstream.
+
+When iteratively computing a checksum with csum_block_add, track the
+offset "pos" to correctly rotate in csum_block_add when offset is odd.
+
+The open coded implementation of skb_copy_and_csum_datagram did this.
+With the switch to __skb_datagram_iter calling csum_and_copy_to_iter,
+pos was reinitialized to 0 on each call.
+
+Bring back the pos by passing it along with the csum to the callback.
+
+Changes v1->v2
+ - pass csum value, instead of csump pointer (Alexander Duyck)
+
+Link: https://lore.kernel.org/netdev/20210128152353.GB27281@optiplex/
+Fixes: 950fcaecd5cc ("datagram: consolidate datagram copy to iter helpers")
+Reported-by: Oliver Graute <oliver.graute@gmail.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20210203192952.1849843-1-willemdebruijn.kernel@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/uio.h | 8 +++++++-
+ lib/iov_iter.c | 24 ++++++++++++++----------
+ net/core/datagram.c | 12 ++++++++++--
+ 3 files changed, 31 insertions(+), 13 deletions(-)
+
+--- a/include/linux/uio.h
++++ b/include/linux/uio.h
+@@ -261,7 +261,13 @@ static inline void iov_iter_reexpand(str
+ {
+ i->count = count;
+ }
+-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
++
++struct csum_state {
++ __wsum csum;
++ size_t off;
++};
++
++size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
+ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -570,12 +570,13 @@ static __wsum csum_and_memcpy(void *to,
+ }
+
+ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
+- __wsum *csum, struct iov_iter *i)
++ struct csum_state *csstate,
++ struct iov_iter *i)
+ {
+ struct pipe_inode_info *pipe = i->pipe;
++ __wsum sum = csstate->csum;
++ size_t off = csstate->off;
+ size_t n, r;
+- size_t off = 0;
+- __wsum sum = *csum;
+ int idx;
+
+ if (!sanity(i))
+@@ -596,7 +597,8 @@ static size_t csum_and_copy_to_pipe_iter
+ addr += chunk;
+ }
+ i->count -= bytes;
+- *csum = sum;
++ csstate->csum = sum;
++ csstate->off = off;
+ return bytes;
+ }
+
+@@ -1484,18 +1486,19 @@ bool csum_and_copy_from_iter_full(void *
+ }
+ EXPORT_SYMBOL(csum_and_copy_from_iter_full);
+
+-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
++size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
+ struct iov_iter *i)
+ {
++ struct csum_state *csstate = _csstate;
+ const char *from = addr;
+- __wsum *csum = csump;
+ __wsum sum, next;
+- size_t off = 0;
++ size_t off;
+
+ if (unlikely(iov_iter_is_pipe(i)))
+- return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
++ return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
+
+- sum = *csum;
++ sum = csstate->csum;
++ off = csstate->off;
+ if (unlikely(iov_iter_is_discard(i))) {
+ WARN_ON(1); /* for now */
+ return 0;
+@@ -1524,7 +1527,8 @@ size_t csum_and_copy_to_iter(const void
+ off += v.iov_len;
+ })
+ )
+- *csum = sum;
++ csstate->csum = sum;
++ csstate->off = off;
+ return bytes;
+ }
+ EXPORT_SYMBOL(csum_and_copy_to_iter);
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -700,8 +700,16 @@ static int skb_copy_and_csum_datagram(co
+ struct iov_iter *to, int len,
+ __wsum *csump)
+ {
+- return __skb_datagram_iter(skb, offset, to, len, true,
+- csum_and_copy_to_iter, csump);
++ struct csum_state csdata = { .csum = *csump };
++ int ret;
++
++ ret = __skb_datagram_iter(skb, offset, to, len, true,
++ csum_and_copy_to_iter, &csdata);
++ if (ret)
++ return ret;
++
++ *csump = csdata.csum;
++ return 0;
+ }
+
+ /**
--- /dev/null
+From 1c5fae9c9a092574398a17facc31c533791ef232 Mon Sep 17 00:00:00 2001
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Tue, 9 Feb 2021 09:52:19 +0100
+Subject: vsock: fix locking in vsock_shutdown()
+
+From: Stefano Garzarella <sgarzare@redhat.com>
+
+commit 1c5fae9c9a092574398a17facc31c533791ef232 upstream.
+
+In vsock_shutdown() we touched some socket fields without holding the
+socket lock, such as 'state' and 'sk_flags'.
+
+Also, after the introduction of multi-transport, we are accessing
+'vsk->transport' in vsock_send_shutdown() without holding the lock
+and this call can be made while the connection is in progress, so
+the transport can change in the meantime.
+
+To avoid issues, we hold the socket lock when we enter in
+vsock_shutdown() and release it when we leave.
+
+Among the transports that implement the 'shutdown' callback, only
+hyperv_transport acquired the lock. Since the caller now holds it,
+we no longer take it.
+
+Fixes: d021c344051a ("VSOCK: Introduce VM Sockets")
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/af_vsock.c | 8 +++++---
+ net/vmw_vsock/hyperv_transport.c | 4 ----
+ 2 files changed, 5 insertions(+), 7 deletions(-)
+
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -808,10 +808,12 @@ static int vsock_shutdown(struct socket
+ */
+
+ sk = sock->sk;
++
++ lock_sock(sk);
+ if (sock->state == SS_UNCONNECTED) {
+ err = -ENOTCONN;
+ if (sk->sk_type == SOCK_STREAM)
+- return err;
++ goto out;
+ } else {
+ sock->state = SS_DISCONNECTING;
+ err = 0;
+@@ -820,10 +822,8 @@ static int vsock_shutdown(struct socket
+ /* Receive and send shutdowns are treated alike. */
+ mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
+ if (mode) {
+- lock_sock(sk);
+ sk->sk_shutdown |= mode;
+ sk->sk_state_change(sk);
+- release_sock(sk);
+
+ if (sk->sk_type == SOCK_STREAM) {
+ sock_reset_flag(sk, SOCK_DONE);
+@@ -831,6 +831,8 @@ static int vsock_shutdown(struct socket
+ }
+ }
+
++out:
++ release_sock(sk);
+ return err;
+ }
+
+--- a/net/vmw_vsock/hyperv_transport.c
++++ b/net/vmw_vsock/hyperv_transport.c
+@@ -464,14 +464,10 @@ static void hvs_shutdown_lock_held(struc
+
+ static int hvs_shutdown(struct vsock_sock *vsk, int mode)
+ {
+- struct sock *sk = sk_vsock(vsk);
+-
+ if (!(mode & SEND_SHUTDOWN))
+ return 0;
+
+- lock_sock(sk);
+ hvs_shutdown_lock_held(vsk->trans, mode);
+- release_sock(sk);
+ return 0;
+ }
+
--- /dev/null
+From ce7536bc7398e2ae552d2fabb7e0e371a9f1fe46 Mon Sep 17 00:00:00 2001
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Mon, 8 Feb 2021 15:44:54 +0100
+Subject: vsock/virtio: update credit only if socket is not closed
+
+From: Stefano Garzarella <sgarzare@redhat.com>
+
+commit ce7536bc7398e2ae552d2fabb7e0e371a9f1fe46 upstream.
+
+If the socket is closed or is being released, some resources used by
+virtio_transport_space_update() such as 'vsk->trans' may be released.
+
+To avoid a use after free bug we should only update the available credit
+when we are sure the socket is still open and we have the lock held.
+
+Fixes: 06a8fc78367d ("VSOCK: Introduce virtio_vsock_common.ko")
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Link: https://lore.kernel.org/r/20210208144454.84438-1-sgarzare@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/virtio_transport_common.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -1100,10 +1100,10 @@ void virtio_transport_recv_pkt(struct vi
+
+ vsk = vsock_sk(sk);
+
+- space_available = virtio_transport_space_update(sk, pkt);
+-
+ lock_sock(sk);
+
++ space_available = virtio_transport_space_update(sk, pkt);
++
+ /* Update CID in case it has changed after a transport reset event */
+ vsk->local_addr.svm_cid = dst.svm_cid;
+