--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+Date: Mon, 26 Oct 2020 01:05:39 +0530
+Subject: [PATCH stable 5.8 06/22] chelsio/chtls: fix deadlock issue
+
+From: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+
+[ Upstream commit 28e9dcd9172028263c8225c15c4e329e08475e89 ]
+
+In chtls_pass_establish() we hold child socket lock using bh_lock_sock
+and we are again trying bh_lock_sock in add_to_reap_list, causing deadlock.
+Remove bh_lock_sock in add_to_reap_list() as lock is already held.
+
+Fixes: cc35c88ae4db ("crypto : chtls - CPL handler definition")
+Signed-off-by: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+Link: https://lore.kernel.org/r/20201025193538.31112-1-vinay.yadav@chelsio.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/chelsio/chtls/chtls_cm.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -1348,7 +1348,6 @@ static void add_to_reap_list(struct sock
+ struct chtls_sock *csk = sk->sk_user_data;
+
+ local_bh_disable();
+- bh_lock_sock(sk);
+ release_tcp_port(sk); /* release the port immediately */
+
+ spin_lock(&reap_list_lock);
+@@ -1357,7 +1356,6 @@ static void add_to_reap_list(struct sock
+ if (!csk->passive_reap_next)
+ schedule_work(&reap_task);
+ spin_unlock(&reap_list_lock);
+- bh_unlock_sock(sk);
+ local_bh_enable();
+ }
+
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+Date: Mon, 26 Oct 2020 01:12:29 +0530
+Subject: [PATCH stable 5.8 07/22] chelsio/chtls: fix memory leaks in CPL handlers
+
+From: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+
+[ Upstream commit 6daa1da4e262b0cd52ef0acc1989ff22b5540264 ]
+
+CPL handler functions chtls_pass_open_rpl() and
+chtls_close_listsrv_rpl() should return CPL_RET_BUF_DONE
+so that caller function will do skb free to avoid leak.
+
+Fixes: cc35c88ae4db ("crypto : chtls - CPL handler definition")
+Signed-off-by: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+Link: https://lore.kernel.org/r/20201025194228.31271-1-vinay.yadav@chelsio.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/chelsio/chtls/chtls_cm.c | 27 ++++++++++++---------------
+ 1 file changed, 12 insertions(+), 15 deletions(-)
+
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -696,14 +696,13 @@ static int chtls_pass_open_rpl(struct ch
+ if (rpl->status != CPL_ERR_NONE) {
+ pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
+ rpl->status, stid);
+- return CPL_RET_BUF_DONE;
++ } else {
++ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
++ sock_put(listen_ctx->lsk);
++ kfree(listen_ctx);
++ module_put(THIS_MODULE);
+ }
+- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+- sock_put(listen_ctx->lsk);
+- kfree(listen_ctx);
+- module_put(THIS_MODULE);
+-
+- return 0;
++ return CPL_RET_BUF_DONE;
+ }
+
+ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+@@ -720,15 +719,13 @@ static int chtls_close_listsrv_rpl(struc
+ if (rpl->status != CPL_ERR_NONE) {
+ pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
+ rpl->status, stid);
+- return CPL_RET_BUF_DONE;
++ } else {
++ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
++ sock_put(listen_ctx->lsk);
++ kfree(listen_ctx);
++ module_put(THIS_MODULE);
+ }
+-
+- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+- sock_put(listen_ctx->lsk);
+- kfree(listen_ctx);
+- module_put(THIS_MODULE);
+-
+- return 0;
++ return CPL_RET_BUF_DONE;
+ }
+
+ static void chtls_purge_wr_queue(struct sock *sk)
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+Date: Fri, 23 Oct 2020 00:35:57 +0530
+Subject: [PATCH stable 5.8 08/22] chelsio/chtls: fix tls record info to user
+
+From: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+
+[ Upstream commit 4f3391ce8f5a69e7e6d66d0a3fc654eb6dbdc919 ]
+
+chtls_pt_recvmsg() receives a skb with tls header and subsequent
+skb with data, need to finalize the data copy whenever next skb
+with tls header is available. but here current tls header is
+overwritten by next available tls header, ends up corrupting
+user buffer data. fixing it by finalizing current record whenever
+next skb contains tls header.
+
+v1->v2:
+- Improved commit message.
+
+Fixes: 17a7d24aa89d ("crypto: chtls - generic handling of data and hdr")
+Signed-off-by: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+Link: https://lore.kernel.org/r/20201022190556.21308-1-vinay.yadav@chelsio.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/chelsio/chtls/chtls_io.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/chelsio/chtls/chtls_io.c
++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
+@@ -1549,6 +1549,7 @@ skip_copy:
+ tp->urg_data = 0;
+
+ if ((avail + offset) >= skb->len) {
++ struct sk_buff *next_skb;
+ if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
+ tp->copied_seq += skb->len;
+ hws->rcvpld = skb->hdr_len;
+@@ -1558,8 +1559,10 @@ skip_copy:
+ chtls_free_skb(sk, skb);
+ buffers_freed++;
+ hws->copied_seq = 0;
+- if (copied >= target &&
+- !skb_peek(&sk->sk_receive_queue))
++ next_skb = skb_peek(&sk->sk_receive_queue);
++ if (copied >= target && !next_skb)
++ break;
++ if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR)
+ break;
+ }
+ } while (len > 0);
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Raju Rangoju <rajur@chelsio.com>
+Date: Fri, 23 Oct 2020 17:28:52 +0530
+Subject: [PATCH stable 5.8 09/22] cxgb4: set up filter action after rewrites
+
+From: Raju Rangoju <rajur@chelsio.com>
+
+[ Upstream commit 937d8420588421eaa5c7aa5c79b26b42abb288ef ]
+
+The current code sets up the filter action field before
+rewrites are set up. When the action 'switch' is used
+with rewrites, this may result in initial few packets
+that get switched out don't have rewrites applied
+on them.
+
+So, make sure filter action is set up along with rewrites
+or only after everything else is set up for rewrites.
+
+Fixes: 12b276fbf6e0 ("cxgb4: add support to create hash filters")
+Signed-off-by: Raju Rangoju <rajur@chelsio.com>
+Link: https://lore.kernel.org/r/20201023115852.18262-1-rajur@chelsio.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 56 ++++++++++------------
+ drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h | 4 +
+ 2 files changed, 31 insertions(+), 29 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -145,13 +145,13 @@ static int configure_filter_smac(struct
+ int err;
+
+ /* do a set-tcb for smac-sel and CWR bit.. */
+- err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
+- if (err)
+- goto smac_err;
+-
+ err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
+ TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
+ TCB_SMAC_SEL_V(f->smt->idx), 1);
++ if (err)
++ goto smac_err;
++
++ err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
+ if (!err)
+ return 0;
+
+@@ -608,6 +608,7 @@ int set_filter_wr(struct adapter *adapte
+ FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
+ FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
+ FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
++ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
+ FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
+@@ -625,7 +626,7 @@ int set_filter_wr(struct adapter *adapte
+ FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
+- fwr->smac_sel = 0;
++ fwr->smac_sel = f->smt->idx;
+ fwr->rx_chan_rx_rpl_iq =
+ htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
+@@ -1019,11 +1020,8 @@ static void mk_act_open_req6(struct filt
+ TX_QUEUE_V(f->fs.nat_mode) |
+ T5_OPT_2_VALID_F |
+ RX_CHANNEL_F |
+- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
+- (f->fs.dirsteer << 1)) |
+ PACE_V((f->fs.maskhash) |
+- ((f->fs.dirsteerhash) << 1)) |
+- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
++ ((f->fs.dirsteerhash) << 1)));
+ }
+
+ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
+@@ -1059,11 +1057,8 @@ static void mk_act_open_req(struct filte
+ TX_QUEUE_V(f->fs.nat_mode) |
+ T5_OPT_2_VALID_F |
+ RX_CHANNEL_F |
+- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
+- (f->fs.dirsteer << 1)) |
+ PACE_V((f->fs.maskhash) |
+- ((f->fs.dirsteerhash) << 1)) |
+- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
++ ((f->fs.dirsteerhash) << 1)));
+ }
+
+ static int cxgb4_set_hash_filter(struct net_device *dev,
+@@ -1722,6 +1717,20 @@ void hash_filter_rpl(struct adapter *ada
+ }
+ return;
+ }
++ switch (f->fs.action) {
++ case FILTER_PASS:
++ if (f->fs.dirsteer)
++ set_tcb_tflag(adap, f, tid,
++ TF_DIRECT_STEER_S, 1, 1);
++ break;
++ case FILTER_DROP:
++ set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
++ break;
++ case FILTER_SWITCH:
++ set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
++ break;
++ }
++
+ break;
+
+ default:
+@@ -1781,22 +1790,11 @@ void filter_rpl(struct adapter *adap, co
+ if (ctx)
+ ctx->result = 0;
+ } else if (ret == FW_FILTER_WR_FLT_ADDED) {
+- int err = 0;
+-
+- if (f->fs.newsmac)
+- err = configure_filter_smac(adap, f);
+-
+- if (!err) {
+- f->pending = 0; /* async setup completed */
+- f->valid = 1;
+- if (ctx) {
+- ctx->result = 0;
+- ctx->tid = idx;
+- }
+- } else {
+- clear_filter(adap, f);
+- if (ctx)
+- ctx->result = err;
++ f->pending = 0; /* async setup completed */
++ f->valid = 1;
++ if (ctx) {
++ ctx->result = 0;
++ ctx->tid = idx;
+ }
+ } else {
+ /* Something went wrong. Issue a warning about the
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+@@ -42,6 +42,10 @@
+
+ #define TCB_T_FLAGS_W 1
+
++#define TF_DROP_S 22
++#define TF_DIRECT_STEER_S 23
++#define TF_LPBK_S 59
++
+ #define TF_CCTRL_ECE_S 60
+ #define TF_CCTRL_CWR_S 61
+ #define TF_CCTRL_RFR_S 62
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Masahiro Fujiwara <fujiwara.masahiro@gmail.com>
+Date: Tue, 27 Oct 2020 20:48:46 +0900
+Subject: [PATCH stable 5.8 10/22] gtp: fix an use-before-init in gtp_newlink()
+
+From: Masahiro Fujiwara <fujiwara.masahiro@gmail.com>
+
+[ Upstream commit 51467431200b91682b89d31317e35dcbca1469ce ]
+
+*_pdp_find() from gtp_encap_recv() would trigger a crash when a peer
+sends GTP packets while creating new GTP device.
+
+RIP: 0010:gtp1_pdp_find.isra.0+0x68/0x90 [gtp]
+<SNIP>
+Call Trace:
+ <IRQ>
+ gtp_encap_recv+0xc2/0x2e0 [gtp]
+ ? gtp1_pdp_find.isra.0+0x90/0x90 [gtp]
+ udp_queue_rcv_one_skb+0x1fe/0x530
+ udp_queue_rcv_skb+0x40/0x1b0
+ udp_unicast_rcv_skb.isra.0+0x78/0x90
+ __udp4_lib_rcv+0x5af/0xc70
+ udp_rcv+0x1a/0x20
+ ip_protocol_deliver_rcu+0xc5/0x1b0
+ ip_local_deliver_finish+0x48/0x50
+ ip_local_deliver+0xe5/0xf0
+ ? ip_protocol_deliver_rcu+0x1b0/0x1b0
+
+gtp_encap_enable() should be called after gtp_hastable_new() otherwise
+*_pdp_find() will access the uninitialized hash table.
+
+Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional")
+Signed-off-by: Masahiro Fujiwara <fujiwara.masahiro@gmail.com>
+Link: https://lore.kernel.org/r/20201027114846.3924-1-fujiwara.masahiro@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/gtp.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -667,10 +667,6 @@ static int gtp_newlink(struct net *src_n
+
+ gtp = netdev_priv(dev);
+
+- err = gtp_encap_enable(gtp, data);
+- if (err < 0)
+- return err;
+-
+ if (!data[IFLA_GTP_PDP_HASHSIZE]) {
+ hashsize = 1024;
+ } else {
+@@ -681,12 +677,16 @@ static int gtp_newlink(struct net *src_n
+
+ err = gtp_hashtable_new(gtp, hashsize);
+ if (err < 0)
+- goto out_encap;
++ return err;
++
++ err = gtp_encap_enable(gtp, data);
++ if (err < 0)
++ goto out_hashtable;
+
+ err = register_netdevice(dev);
+ if (err < 0) {
+ netdev_dbg(dev, "failed to register new netdev %d\n", err);
+- goto out_hashtable;
++ goto out_encap;
+ }
+
+ gn = net_generic(dev_net(dev), gtp_net_id);
+@@ -697,11 +697,11 @@ static int gtp_newlink(struct net *src_n
+
+ return 0;
+
++out_encap:
++ gtp_encap_disable(gtp);
+ out_hashtable:
+ kfree(gtp->addr_hash);
+ kfree(gtp->tid_hash);
+-out_encap:
+- gtp_encap_disable(gtp);
+ return err;
+ }
+
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Ido Schimmel <idosch@nvidia.com>
+Date: Sat, 24 Oct 2020 16:37:32 +0300
+Subject: [PATCH stable 5.8 13/22] mlxsw: core: Fix memory leak on module removal
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit adc80b6cfedff6dad8b93d46a5ea2775fd5af9ec ]
+
+Free the devlink instance during the teardown sequence in the non-reload
+case to avoid the following memory leak.
+
+unreferenced object 0xffff888232895000 (size 2048):
+ comm "modprobe", pid 1073, jiffies 4295568857 (age 164.871s)
+ hex dump (first 32 bytes):
+ 00 01 00 00 00 00 ad de 22 01 00 00 00 00 ad de ........".......
+ 10 50 89 32 82 88 ff ff 10 50 89 32 82 88 ff ff .P.2.....P.2....
+ backtrace:
+ [<00000000c704e9a6>] __kmalloc+0x13a/0x2a0
+ [<00000000ee30129d>] devlink_alloc+0xff/0x760
+ [<0000000092ab3e5d>] 0xffffffffa042e5b0
+ [<000000004f3f8a31>] 0xffffffffa042f6ad
+ [<0000000092800b4b>] 0xffffffffa0491df3
+ [<00000000c4843903>] local_pci_probe+0xcb/0x170
+ [<000000006993ded7>] pci_device_probe+0x2c2/0x4e0
+ [<00000000a8e0de75>] really_probe+0x2c5/0xf90
+ [<00000000d42ba75d>] driver_probe_device+0x1eb/0x340
+ [<00000000bcc95e05>] device_driver_attach+0x294/0x300
+ [<000000000e2bc177>] __driver_attach+0x167/0x2f0
+ [<000000007d44cd6e>] bus_for_each_dev+0x148/0x1f0
+ [<000000003cd5a91e>] driver_attach+0x45/0x60
+ [<000000000041ce51>] bus_add_driver+0x3b8/0x720
+ [<00000000f5215476>] driver_register+0x230/0x4e0
+ [<00000000d79356f5>] __pci_register_driver+0x190/0x200
+
+Fixes: a22712a96291 ("mlxsw: core: Fix devlink unregister flow")
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reported-by: Vadim Pasternak <vadimp@nvidia.com>
+Tested-by: Oleksandr Shamray <oleksandrs@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -1111,6 +1111,8 @@ void mlxsw_core_bus_device_unregister(st
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
+ mlxsw_core->bus->fini(mlxsw_core->bus_priv);
++ if (!reload)
++ devlink_free(devlink);
+
+ return;
+
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Aleksandr Nogikh <nogikh@google.com>
+Date: Wed, 28 Oct 2020 17:07:31 +0000
+Subject: [PATCH stable 5.8 14/22] netem: fix zero division in tabledist
+
+From: Aleksandr Nogikh <nogikh@google.com>
+
+[ Upstream commit eadd1befdd778a1eca57fad058782bd22b4db804 ]
+
+Currently it is possible to craft a special netlink RTM_NEWQDISC
+command that can result in jitter being equal to 0x80000000. It is
+enough to set the 32 bit jitter to 0x02000000 (it will later be
+multiplied by 2^6) or just set the 64 bit jitter via
+TCA_NETEM_JITTER64. This causes an overflow during the generation of
+uniformly distributed numbers in tabledist(), which in turn leads to
+division by zero (sigma != 0, but sigma * 2 is 0).
+
+The related fragment of code needs 32-bit division - see commit
+9b0ed89 ("netem: remove unnecessary 64 bit modulus"), so switching to
+64 bit is not an option.
+
+Fix the issue by keeping the value of jitter within the range that can
+be adequately handled by tabledist() - [0;INT_MAX]. As negative std
+deviation makes no sense, take the absolute value of the passed value
+and cap it at INT_MAX. Inside tabledist(), switch to unsigned 32 bit
+arithmetic in order to prevent overflows.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Aleksandr Nogikh <nogikh@google.com>
+Reported-by: syzbot+ec762a6342ad0d3c0d8f@syzkaller.appspotmail.com
+Acked-by: Stephen Hemminger <stephen@networkplumber.org>
+Link: https://lore.kernel.org/r/20201028170731.1383332-1-aleksandrnogikh@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_netem.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -330,7 +330,7 @@ static s64 tabledist(s64 mu, s32 sigma,
+
+ /* default uniform distribution */
+ if (dist == NULL)
+- return ((rnd % (2 * sigma)) + mu) - sigma;
++ return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
+
+ t = dist->table[rnd % dist->size];
+ x = (sigma % NETEM_DIST_SCALE) * t;
+@@ -787,6 +787,10 @@ static void get_slot(struct netem_sched_
+ q->slot_config.max_packets = INT_MAX;
+ if (q->slot_config.max_bytes == 0)
+ q->slot_config.max_bytes = INT_MAX;
++
++ /* capping dist_jitter to the range acceptable by tabledist() */
++ q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
++
+ q->slot.packets_left = q->slot_config.max_packets;
+ q->slot.bytes_left = q->slot_config.max_bytes;
+ if (q->slot_config.min_delay | q->slot_config.max_delay |
+@@ -1011,6 +1015,9 @@ static int netem_change(struct Qdisc *sc
+ if (tb[TCA_NETEM_SLOT])
+ get_slot(q, tb[TCA_NETEM_SLOT]);
+
++ /* capping jitter to the range acceptable by tabledist() */
++ q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
++
+ return ret;
+
+ get_table_failure:
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Thu, 29 Oct 2020 10:18:53 +0100
+Subject: [PATCH stable 5.8 18/22] r8169: fix issue with forced threading in combination with shared interrupts
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit 2734a24e6e5d18522fbf599135c59b82ec9b2c9e ]
+
+As reported by Serge flag IRQF_NO_THREAD causes an error if the
+interrupt is actually shared and the other driver(s) don't have this
+flag set. This situation can occur if a PCI(e) legacy interrupt is
+used in combination with forced threading.
+There's no good way to deal with this properly, therefore we have to
+remove flag IRQF_NO_THREAD. For fixing the original forced threading
+issue switch to napi_schedule().
+
+Fixes: 424a646e072a ("r8169: fix operation under forced interrupt threading")
+Link: https://www.spinics.net/lists/netdev/msg694960.html
+Reported-by: Serge Belyshev <belyshev@depni.sinp.msu.ru>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Tested-by: Serge Belyshev <belyshev@depni.sinp.msu.ru>
+Link: https://lore.kernel.org/r/b5b53bfe-35ac-3768-85bf-74d1290cf394@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -6630,7 +6630,7 @@ static irqreturn_t rtl8169_interrupt(int
+ return IRQ_NONE;
+
+ rtl_irq_disable(tp);
+- napi_schedule_irqoff(&tp->napi);
++ napi_schedule(&tp->napi);
+
+ return IRQ_HANDLED;
+ }
+@@ -6886,7 +6886,7 @@ static int rtl_open(struct net_device *d
+ rtl_request_firmware(tp);
+
+ retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
+- IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
++ IRQF_SHARED, dev->name, tp);
+ if (retval < 0)
+ goto err_release_fw_2;
+
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Andrew Gabbasov <andrew_gabbasov@mentor.com>
+Date: Mon, 26 Oct 2020 05:21:30 -0500
+Subject: [PATCH stable 5.8 19/22] ravb: Fix bit fields checking in ravb_hwtstamp_get()
+
+From: Andrew Gabbasov <andrew_gabbasov@mentor.com>
+
+[ Upstream commit 68b9f0865b1ef545da180c57d54b82c94cb464a4 ]
+
+In the function ravb_hwtstamp_get() in ravb_main.c with the existing
+values for RAVB_RXTSTAMP_TYPE_V2_L2_EVENT (0x2) and RAVB_RXTSTAMP_TYPE_ALL
+(0x6)
+
+if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+
+if the test on RAVB_RXTSTAMP_TYPE_ALL should be true,
+it will never be reached.
+
+This issue can be verified with 'hwtstamp_config' testing program
+(tools/testing/selftests/net/hwtstamp_config.c). Setting filter type
+to ALL and subsequent retrieving it gives incorrect value:
+
+$ hwtstamp_config eth0 OFF ALL
+flags = 0
+tx_type = OFF
+rx_filter = ALL
+$ hwtstamp_config eth0
+flags = 0
+tx_type = OFF
+rx_filter = PTP_V2_L2_EVENT
+
+Correct this by converting if-else's to switch.
+
+Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
+Reported-by: Julia Lawall <julia.lawall@inria.fr>
+Signed-off-by: Andrew Gabbasov <andrew_gabbasov@mentor.com>
+Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com>
+Link: https://lore.kernel.org/r/20201026102130.29368-1-andrew_gabbasov@mentor.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1732,12 +1732,16 @@ static int ravb_hwtstamp_get(struct net_
+ config.flags = 0;
+ config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
+ HWTSTAMP_TX_OFF;
+- if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
++ switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
++ case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+- else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
++ break;
++ case RAVB_RXTSTAMP_TYPE_ALL:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+- else
++ break;
++ default:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
++ }
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
arm64-link-with-z-norelro-regardless-of-config_relocatable.patch
x86-pci-fix-intel_mid_pci.c-build-error-when-acpi-is-not-enabled.patch
efivarfs-replace-invalid-slashes-with-exclamation-marks-in-dentries.patch
+chelsio-chtls-fix-deadlock-issue.patch
+chelsio-chtls-fix-memory-leaks-in-cpl-handlers.patch
+chelsio-chtls-fix-tls-record-info-to-user.patch
+gtp-fix-an-use-before-init-in-gtp_newlink.patch
+mlxsw-core-fix-memory-leak-on-module-removal.patch
+netem-fix-zero-division-in-tabledist.patch
+ravb-fix-bit-fields-checking-in-ravb_hwtstamp_get.patch
+tcp-prevent-low-rmem-stalls-with-so_rcvlowat.patch
+tipc-fix-memory-leak-caused-by-tipc_buf_append.patch
+r8169-fix-issue-with-forced-threading-in-combination-with-shared-interrupts.patch
+cxgb4-set-up-filter-action-after-rewrites.patch
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Arjun Roy <arjunroy@google.com>
+Date: Fri, 23 Oct 2020 11:47:09 -0700
+Subject: [PATCH stable 5.8 20/22] tcp: Prevent low rmem stalls with SO_RCVLOWAT.
+
+From: Arjun Roy <arjunroy@google.com>
+
+[ Upstream commit 435ccfa894e35e3d4a1799e6ac030e48a7b69ef5 ]
+
+With SO_RCVLOWAT, under memory pressure,
+it is possible to enter a state where:
+
+1. We have not received enough bytes to satisfy SO_RCVLOWAT.
+2. We have not entered buffer pressure (see tcp_rmem_pressure()).
+3. But, we do not have enough buffer space to accept more packets.
+
+In this case, we advertise 0 rwnd (due to #3) but the application does
+not drain the receive queue (no wakeup because of #1 and #2) so the
+flow stalls.
+
+Modify the heuristic for SO_RCVLOWAT so that, if we are advertising
+rwnd<=rcv_mss, force a wakeup to prevent a stall.
+
+Without this patch, setting tcp_rmem to 6143 and disabling TCP
+autotune causes a stalled flow. With this patch, no stall occurs. This
+is with RPC-style traffic with large messages.
+
+Fixes: 03f45c883c6f ("tcp: avoid extra wakeups for SO_RCVLOWAT users")
+Signed-off-by: Arjun Roy <arjunroy@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20201023184709.217614-1-arjunroy.kdev@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c | 2 ++
+ net/ipv4/tcp_input.c | 3 ++-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -495,6 +495,8 @@ static inline bool tcp_stream_is_readabl
+ return true;
+ if (tcp_rmem_pressure(sk))
+ return true;
++ if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss)
++ return true;
+ }
+ if (sk->sk_prot->stream_memory_read)
+ return sk->sk_prot->stream_memory_read(sk);
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4704,7 +4704,8 @@ void tcp_data_ready(struct sock *sk)
+ int avail = tp->rcv_nxt - tp->copied_seq;
+
+ if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
+- !sock_flag(sk, SOCK_DONE))
++ !sock_flag(sk, SOCK_DONE) &&
++ tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss)
+ return;
+
+ sk->sk_data_ready(sk);
--- /dev/null
+From foo@baz Sat Oct 31 10:28:22 AM CET 2020
+From: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+Date: Tue, 27 Oct 2020 10:24:03 +0700
+Subject: [PATCH stable 5.8 21/22] tipc: fix memory leak caused by tipc_buf_append()
+
+From: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+
+[ Upstream commit ceb1eb2fb609c88363e06618b8d4bbf7815a4e03 ]
+
+Commit ed42989eab57 ("tipc: fix the skb_unshare() in tipc_buf_append()")
+replaced skb_unshare() with skb_copy() to not reduce the data reference
+counter of the original skb intentionally. This is not the correct
+way to handle the cloned skb because it causes memory leak in 2
+following cases:
+ 1/ Sending multicast messages via broadcast link
+ The original skb list is cloned to the local skb list for local
+ destination. After that, the data reference counter of each skb
+ in the original list has the value of 2. This causes each skb not
+ to be freed after receiving ACK:
+ tipc_link_advance_transmq()
+ {
+ ...
+ /* release skb */
+ __skb_unlink(skb, &l->transmq);
+ kfree_skb(skb); <-- memory exists after being freed
+ }
+
+ 2/ Sending multicast messages via replicast link
+ Similar to the above case, each skb cannot be freed after purging
+ the skb list:
+ tipc_mcast_xmit()
+ {
+ ...
+ __skb_queue_purge(pkts); <-- memory exists after being freed
+ }
+
+This commit fixes this issue by using skb_unshare() instead. Besides,
+to avoid use-after-free error reported by KASAN, the pointer to the
+fragment is set to NULL before calling skb_unshare() to make sure that
+the original skb is not freed after freeing the fragment 2 times in
+case skb_unshare() returns NULL.
+
+Fixes: ed42989eab57 ("tipc: fix the skb_unshare() in tipc_buf_append()")
+Acked-by: Jon Maloy <jmaloy@redhat.com>
+Reported-by: Thang Hoang Ngo <thang.h.ngo@dektech.com.au>
+Signed-off-by: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+Reviewed-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
+Link: https://lore.kernel.org/r/20201027032403.1823-1-tung.q.nguyen@dektech.com.au
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/msg.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -140,12 +140,11 @@ int tipc_buf_append(struct sk_buff **hea
+ if (fragid == FIRST_FRAGMENT) {
+ if (unlikely(head))
+ goto err;
+- if (skb_cloned(frag))
+- frag = skb_copy(frag, GFP_ATOMIC);
++ *buf = NULL;
++ frag = skb_unshare(frag, GFP_ATOMIC);
+ if (unlikely(!frag))
+ goto err;
+ head = *headbuf = frag;
+- *buf = NULL;
+ TIPC_SKB_CB(head)->tail = NULL;
+ if (skb_is_nonlinear(head)) {
+ skb_walk_frags(head, tail) {