--- /dev/null
+From ce45ffb815e8e238f05de1630be3969b6bb15e4e Mon Sep 17 00:00:00 2001
+From: Jason Xing <kernelxing@tencent.com>
+Date: Wed, 8 Feb 2023 10:43:33 +0800
+Subject: i40e: add double of VLAN header when computing the max MTU
+
+From: Jason Xing <kernelxing@tencent.com>
+
+commit ce45ffb815e8e238f05de1630be3969b6bb15e4e upstream.
+
+Include the second VLAN HLEN into account when computing the maximum
+MTU size as other drivers do.
+
+Fixes: 0c8493d90b6b ("i40e: add XDP support for pass and drop actions")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2789,7 +2789,7 @@ static int i40e_change_mtu(struct net_de
+ struct i40e_pf *pf = vsi->back;
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+- int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
++ int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
+
+ if (frame_size > i40e_max_xdp_frame_size(vsi))
+ return -EINVAL;
--- /dev/null
+From f9cd6a4418bac6a046ee78382423b1ae7565fb24 Mon Sep 17 00:00:00 2001
+From: Jason Xing <kernelxing@tencent.com>
+Date: Wed, 8 Feb 2023 10:43:32 +0800
+Subject: ixgbe: allow to increase MTU to 3K with XDP enabled
+
+From: Jason Xing <kernelxing@tencent.com>
+
+commit f9cd6a4418bac6a046ee78382423b1ae7565fb24 upstream.
+
+Recently I encountered one case where I cannot increase the MTU size
+directly from 1500 to a much bigger value with XDP enabled if the
+server is equipped with IXGBE card, which happened on thousands of
+servers in production environment. After applying the current patch,
+we can set the maximum MTU size to 3K.
+
+This patch follows the behavior of changing MTU as i40e/ice does.
+
+References:
+[1] commit 23b44513c3e6 ("ice: allow 3k MTU for XDP")
+[2] commit 0c8493d90b6b ("i40e: add XDP support for pass and drop actions")
+
+Fixes: fabf1bce103a ("ixgbe: Prevent unsupported configurations with XDP")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 25 ++++++++++++++++---------
+ 1 file changed, 16 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -6730,6 +6730,18 @@ static void ixgbe_free_all_rx_resources(
+ }
+
+ /**
++ * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
++ * @adapter: device handle, pointer to adapter
++ */
++static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
++{
++ if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
++ return IXGBE_RXBUFFER_2K;
++ else
++ return IXGBE_RXBUFFER_3K;
++}
++
++/**
+ * ixgbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+@@ -6740,18 +6752,13 @@ static int ixgbe_change_mtu(struct net_d
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+- if (adapter->xdp_prog) {
++ if (ixgbe_enabled_xdp_adapter(adapter)) {
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+- int i;
+-
+- for (i = 0; i < adapter->num_rx_queues; i++) {
+- struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+- if (new_frame_size > ixgbe_rx_bufsz(ring)) {
+- e_warn(probe, "Requested MTU size is not supported with XDP\n");
+- return -EINVAL;
+- }
++ if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
++ e_warn(probe, "Requested MTU size is not supported with XDP\n");
++ return -EINVAL;
+ }
+ }
+
--- /dev/null
+From d61615c366a489646a1bfe5b33455f916762d5f4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Date: Wed, 8 Feb 2023 10:16:37 +0100
+Subject: net: bgmac: fix BCM5358 support by setting correct flags
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rafał Miłecki <rafal@milecki.pl>
+
+commit d61615c366a489646a1bfe5b33455f916762d5f4 upstream.
+
+Code blocks handling BCMA_CHIP_ID_BCM5357 and BCMA_CHIP_ID_BCM53572 were
+incorrectly unified. Chip package values are not unique and cannot be
+checked independently. They are meaningful only in a context of a given
+chip.
+
+Packages BCM5358 and BCM47188 share the same value but then belong to
+different chips. Code unification resulted in treating BCM5358 as
+BCM47188 and broke its initialization.
+
+Link: https://github.com/openwrt/openwrt/issues/8278
+Fixes: cb1b0f90acfe ("net: ethernet: bgmac: unify code of the same family")
+Cc: Jon Mason <jdmason@kudzu.us>
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Link: https://lore.kernel.org/r/20230208091637.16291-1-zajec5@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bgmac-bcma.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
++++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
+@@ -228,12 +228,12 @@ static int bgmac_probe(struct bcma_devic
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
+- if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
+- ci->pkg == BCMA_PKG_ID_BCM47186) {
++ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
++ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ }
+- if (ci->pkg == BCMA_PKG_ID_BCM5358)
++ if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
+ break;
+ case BCMA_CHIP_ID_BCM53573:
--- /dev/null
+From 0ed577e7e8e508c24e22ba07713ecc4903e147c3 Mon Sep 17 00:00:00 2001
+From: Siddharth Vadapalli <s-vadapalli@ti.com>
+Date: Thu, 9 Feb 2023 14:14:32 +0530
+Subject: net: ethernet: ti: am65-cpsw: Add RX DMA Channel Teardown Quirk
+
+From: Siddharth Vadapalli <s-vadapalli@ti.com>
+
+commit 0ed577e7e8e508c24e22ba07713ecc4903e147c3 upstream.
+
+In TI's AM62x/AM64x SoCs, successful teardown of RX DMA Channel raises an
+interrupt. The process of servicing this interrupt involves flushing all
+pending RX DMA descriptors and clearing the teardown completion marker
+(TDCM). The am65_cpsw_nuss_rx_packets() function invoked from the RX
+NAPI callback services the interrupt. Thus, it is necessary to wait for
+this handler to run, drain all packets and clear TDCM, before calling
+napi_disable() in am65_cpsw_nuss_common_stop() function post channel
+teardown. If napi_disable() executes before ensuring that TDCM is
+cleared, the TDCM remains set when the interfaces are down, resulting in
+an interrupt storm when the interfaces are brought up again.
+
+Since the interrupt raised to indicate the RX DMA Channel teardown is
+specific to the AM62x and AM64x SoCs, add a quirk for it.
+
+Fixes: 4f7cce272403 ("net: ethernet: ti: am65-cpsw: add support for am64x cpsw3g")
+Co-developed-by: Vignesh Raghavendra <vigneshr@ti.com>
+Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
+Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Reviewed-by: Roger Quadros <rogerq@kernel.org>
+Link: https://lore.kernel.org/r/20230209084432.189222-1-s-vadapalli@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 12 +++++++++++-
+ drivers/net/ethernet/ti/am65-cpsw-nuss.h | 1 +
+ 2 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -564,7 +564,15 @@ static int am65_cpsw_nuss_common_stop(st
+ k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
+ }
+
++ reinit_completion(&common->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
++
++ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
++ i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
++ if (!i)
++ dev_err(common->dev, "rx teardown timeout\n");
++ }
++
+ napi_disable(&common->napi_rx);
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+@@ -786,6 +794,8 @@ static int am65_cpsw_nuss_rx_packets(str
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
++ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
++ complete(&common->tdown_complete);
+ return 0;
+ }
+
+@@ -2609,7 +2619,7 @@ static const struct am65_cpsw_pdata j721
+ };
+
+ static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
+- .quirks = 0,
++ .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ };
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+@@ -84,6 +84,7 @@ struct am65_cpsw_rx_chn {
+ };
+
+ #define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
++#define AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ BIT(1)
+
+ struct am65_cpsw_pdata {
+ u32 quirks;
--- /dev/null
+From ee059170b1f7e94e55fa6cadee544e176a6e59c2 Mon Sep 17 00:00:00 2001
+From: Pedro Tammela <pctammela@mojatatu.com>
+Date: Thu, 9 Feb 2023 11:37:39 -0300
+Subject: net/sched: tcindex: update imperfect hash filters respecting rcu
+
+From: Pedro Tammela <pctammela@mojatatu.com>
+
+commit ee059170b1f7e94e55fa6cadee544e176a6e59c2 upstream.
+
+The imperfect hash area can be updated while packets are traversing,
+which will cause a use-after-free when 'tcf_exts_exec()' is called
+with the destroyed tcf_ext.
+
+CPU 0: CPU 1:
+tcindex_set_parms tcindex_classify
+tcindex_lookup
+ tcindex_lookup
+tcf_exts_change
+ tcf_exts_exec [UAF]
+
+Stop operating on the shared area directly, by using a local copy,
+and update the filter with 'rcu_replace_pointer()'. Delete the old
+filter version only after a rcu grace period elapsed.
+
+Fixes: 9b0d4446b569 ("net: sched: avoid atomic swap in tcf_exts_change")
+Reported-by: valis <sec@valis.email>
+Suggested-by: valis <sec@valis.email>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
+Link: https://lore.kernel.org/r/20230209143739.279867-1-pctammela@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_tcindex.c | 34 ++++++++++++++++++++++++++++++----
+ 1 file changed, 30 insertions(+), 4 deletions(-)
+
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -12,6 +12,7 @@
+ #include <linux/errno.h>
+ #include <linux/slab.h>
+ #include <linux/refcount.h>
++#include <linux/rcupdate.h>
+ #include <net/act_api.h>
+ #include <net/netlink.h>
+ #include <net/pkt_cls.h>
+@@ -338,6 +339,7 @@ tcindex_set_parms(struct net *net, struc
+ struct tcf_result cr = {};
+ int err, balloc = 0;
+ struct tcf_exts e;
++ bool update_h = false;
+
+ err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ if (err < 0)
+@@ -455,10 +457,13 @@ tcindex_set_parms(struct net *net, struc
+ }
+ }
+
+- if (cp->perfect)
++ if (cp->perfect) {
+ r = cp->perfect + handle;
+- else
+- r = tcindex_lookup(cp, handle) ? : &new_filter_result;
++ } else {
++ /* imperfect area is updated in-place using rcu */
++ update_h = !!tcindex_lookup(cp, handle);
++ r = &new_filter_result;
++ }
+
+ if (r == &new_filter_result) {
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+@@ -484,7 +489,28 @@ tcindex_set_parms(struct net *net, struc
+
+ rcu_assign_pointer(tp->root, cp);
+
+- if (r == &new_filter_result) {
++ if (update_h) {
++ struct tcindex_filter __rcu **fp;
++ struct tcindex_filter *cf;
++
++ f->result.res = r->res;
++ tcf_exts_change(&f->result.exts, &r->exts);
++
++ /* imperfect area bucket */
++ fp = cp->h + (handle % cp->hash);
++
++ /* lookup the filter, guaranteed to exist */
++ for (cf = rcu_dereference_bh_rtnl(*fp); cf;
++ fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp))
++ if (cf->key == handle)
++ break;
++
++ f->next = cf->next;
++
++ cf = rcu_replace_pointer(*fp, f, 1);
++ tcf_exts_get_net(&cf->result.exts);
++ tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work);
++ } else if (r == &new_filter_result) {
+ struct tcindex_filter *nfp;
+ struct tcindex_filter __rcu **fp;
+
--- /dev/null
+From a5b21d8d791cd4db609d0bbcaa9e0c7e019888d1 Mon Sep 17 00:00:00 2001
+From: Andrew Morton <akpm@linux-foundation.org>
+Date: Thu, 2 Feb 2023 18:07:35 -0800
+Subject: revert "squashfs: harden sanity check in squashfs_read_xattr_id_table"
+
+From: Andrew Morton <akpm@linux-foundation.org>
+
+commit a5b21d8d791cd4db609d0bbcaa9e0c7e019888d1 upstream.
+
+This fix was nacked by Philip, for reasons identified in the email linked
+below.
+
+Link: https://lkml.kernel.org/r/68f15d67-8945-2728-1f17-5b53a80ec52d@squashfs.org.uk
+Fixes: 72e544b1b28325 ("squashfs: harden sanity check in squashfs_read_xattr_id_table")
+Cc: Alexey Khoroshilov <khoroshilov@ispras.ru>
+Cc: Fedor Pchelkin <pchelkin@ispras.ru>
+Cc: Phillip Lougher <phillip@squashfs.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/squashfs/xattr_id.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/squashfs/xattr_id.c
++++ b/fs/squashfs/xattr_id.c
+@@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(str
+ /* Sanity check values */
+
+ /* there is always at least one xattr id */
+- if (*xattr_ids <= 0)
++ if (*xattr_ids == 0)
+ return ERR_PTR(-EINVAL);
+
+ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
--- /dev/null
+From a1221703a0f75a9d81748c516457e0fc76951496 Mon Sep 17 00:00:00 2001
+From: Pietro Borrello <borrello@diag.uniroma1.it>
+Date: Thu, 9 Feb 2023 12:13:05 +0000
+Subject: sctp: sctp_sock_filter(): avoid list_entry() on possibly empty list
+
+From: Pietro Borrello <borrello@diag.uniroma1.it>
+
+commit a1221703a0f75a9d81748c516457e0fc76951496 upstream.
+
+Use list_is_first() to check whether tsp->asoc matches the first
+element of ep->asocs, as the list is not guaranteed to have an entry.
+
+Fixes: 8f840e47f190 ("sctp: add the sctp_diag.c file")
+Signed-off-by: Pietro Borrello <borrello@diag.uniroma1.it>
+Acked-by: Xin Long <lucien.xin@gmail.com>
+Link: https://lore.kernel.org/r/20230208-sctp-filter-v2-1-6e1f4017f326@diag.uniroma1.it
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/diag.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/net/sctp/diag.c
++++ b/net/sctp/diag.c
+@@ -343,11 +343,9 @@ static int sctp_sock_filter(struct sctp_
+ struct sctp_comm_param *commp = p;
+ struct sock *sk = ep->base.sk;
+ const struct inet_diag_req_v2 *r = commp->r;
+- struct sctp_association *assoc =
+- list_entry(ep->asocs.next, struct sctp_association, asocs);
+
+ /* find the ep only once through the transports by this condition */
+- if (tsp->asoc != assoc)
++ if (!list_is_first(&tsp->asoc->asocs, &ep->asocs))
+ return 0;
+
+ if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
selftest-lkdtm-skip-stack-entropy-test-if-lkdtm-is-not-available.patch
revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch
net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch
+revert-squashfs-harden-sanity-check-in-squashfs_read_xattr_id_table.patch
+ixgbe-allow-to-increase-mtu-to-3k-with-xdp-enabled.patch
+i40e-add-double-of-vlan-header-when-computing-the-max-mtu.patch
+net-bgmac-fix-bcm5358-support-by-setting-correct-flags.patch
+net-ethernet-ti-am65-cpsw-add-rx-dma-channel-teardown-quirk.patch
+sctp-sctp_sock_filter-avoid-list_entry-on-possibly-empty-list.patch
+net-sched-tcindex-update-imperfect-hash-filters-respecting-rcu.patch