--- /dev/null
+From fd4bc5d9d5c2f39b406cde4ca4dcc2e5c08a06b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Aug 2022 18:03:46 +0800
+Subject: af_key: Do not call xfrm_probe_algs in parallel
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit ba953a9d89a00c078b85f4b190bc1dde66fe16b5 ]
+
+When namespace support was added to xfrm/afkey, it caused the
+previously single-threaded call to xfrm_probe_algs to become
+multi-threaded. This is buggy and needs to be fixed with a mutex.
+
+Reported-by: Abhishek Shah <abhishek.shah@columbia.edu>
+Fixes: 283bc9f35bbb ("xfrm: Namespacify xfrm state/policy locks")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/key/af_key.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index fb16d7c4e1b8d..20e73643b9c89 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1697,9 +1697,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
+ pfk->registered |= (1<<hdr->sadb_msg_satype);
+ }
+
++ mutex_lock(&pfkey_mutex);
+ xfrm_probe_algs();
+
+ supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
++ mutex_unlock(&pfkey_mutex);
++
+ if (!supp_skb) {
+ if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
+ pfk->registered &= ~(1<<hdr->sadb_msg_satype);
+--
+2.35.1
+
--- /dev/null
+From 8b0db0dff2f39452e2a48df2fbd3e4b7f6af76ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Aug 2022 11:06:54 -0400
+Subject: bnxt_en: fix LRO/GRO_HW features in ndo_fix_features callback
+
+From: Vikas Gupta <vikas.gupta@broadcom.com>
+
+[ Upstream commit 366c304741729e64d778c80555d9eb422cf5cc89 ]
+
+LRO/GRO_HW should be disabled if there is an attached XDP program.
+BNXT_FLAG_TPA is the current setting of the LRO/GRO_HW. Using
+BNXT_FLAG_TPA to disable LRO/GRO_HW will cause these features to be
+permanently disabled once they are disabled.
+
+Fixes: 1dc4c557bfed ("bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff")
+Signed-off-by: Vikas Gupta <vikas.gupta@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index cf9b00576ed36..964354536f9ce 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -11183,10 +11183,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
+ if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
+ features &= ~NETIF_F_NTUPLE;
+
+- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+- features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+-
+- if (!(bp->flags & BNXT_FLAG_TPA))
++ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
+ features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
+ if (!(features & NETIF_F_GRO))
+--
+2.35.1
+
--- /dev/null
+From dc4b8b5d912bd608f5c495a57292016e0103173a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Aug 2022 11:06:53 -0400
+Subject: bnxt_en: fix NQ resource accounting during vf creation on 57500 chips
+
+From: Vikas Gupta <vikas.gupta@broadcom.com>
+
+[ Upstream commit 09a89cc59ad67794a11e1d3dd13c5b3172adcc51 ]
+
+There are 2 issues:
+
+1. We should decrement hw_resc->max_nqs instead of hw_resc->max_irqs
+ with the number of NQs assigned to the VFs. The IRQs are fixed
+ on each function and cannot be re-assigned. Only the NQs are being
+ assigned to the VFs.
+
+2. vf_msix is the total number of NQs to be assigned to the VFs. So
+ we should decrement vf_msix from hw_resc->max_nqs.
+
+Fixes: b16b68918674 ("bnxt_en: Add SR-IOV support for 57500 chips.")
+Signed-off-by: Vikas Gupta <vikas.gupta@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+index a1a2c7a64fd58..c9cf0569451a2 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
+ hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
+ hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+- hw_resc->max_irqs -= vf_msix * n;
++ hw_resc->max_nqs -= vf_msix;
+
+ rc = pf->active_vfs;
+ }
+--
+2.35.1
+
--- /dev/null
+From 8a9efe0033d55398c553d3ca51d512f0c778fde8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Aug 2022 11:06:52 -0400
+Subject: bnxt_en: set missing reload flag in devlink features
+
+From: Vikas Gupta <vikas.gupta@broadcom.com>
+
+[ Upstream commit 574b2bb9692fd3d45ed631ac447176d4679f3010 ]
+
+Add missing devlink_set_features() API for callbacks reload_down
+and reload_up to function.
+
+Fixes: 228ea8c187d8 ("bnxt_en: implement devlink dev reload driver_reinit")
+Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Signed-off-by: Vikas Gupta <vikas.gupta@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 6b3d4f4c2a75f..d83be40785b89 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -1246,6 +1246,7 @@ int bnxt_dl_register(struct bnxt *bp)
+ if (rc)
+ goto err_dl_port_unreg;
+
++ devlink_set_features(dl, DEVLINK_F_RELOAD);
+ out:
+ devlink_register(dl);
+ return 0;
+--
+2.35.1
+
--- /dev/null
+From 6d224daabc70e7610e7b51436ba96a3313225b82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Aug 2022 11:06:51 -0400
+Subject: bnxt_en: Use PAGE_SIZE to init buffer when multi buffer XDP is not in
+ use
+
+From: Pavan Chebbi <pavan.chebbi@broadcom.com>
+
+[ Upstream commit 7dd3de7cb1d657a918c6b2bc673c71e318aa0c05 ]
+
+Using BNXT_PAGE_MODE_BUF_SIZE + offset as buffer length value is not
+sufficient when running single buffer XDP programs doing redirect
+operations. The stack will complain on missing skb tail room. Fix it
+by using PAGE_SIZE when calling xdp_init_buff() for single buffer
+programs.
+
+Fixes: b231c3f3414c ("bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff")
+Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
+Signed-off-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 +
+ drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 10 ++++++++--
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 075c6206325ce..b1b17f9113006 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -2130,6 +2130,7 @@ struct bnxt {
+ #define BNXT_DUMP_CRASH 1
+
+ struct bpf_prog *xdp_prog;
++ u8 xdp_has_frags;
+
+ struct bnxt_ptp_cfg *ptp_cfg;
+ u8 ptp_all_rx_tstamp;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index f53387ed0167b..c3065ec0a4798 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -181,6 +181,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp)
+ {
+ struct bnxt_sw_rx_bd *rx_buf;
++ u32 buflen = PAGE_SIZE;
+ struct pci_dev *pdev;
+ dma_addr_t mapping;
+ u32 offset;
+@@ -192,7 +193,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+- xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
++ if (bp->xdp_has_frags)
++ buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
++
++ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
+ xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
+ }
+
+@@ -397,8 +401,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
+ netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
+ return -EOPNOTSUPP;
+ }
+- if (prog)
++ if (prog) {
+ tx_xdp = bp->rx_nr_rings;
++ bp->xdp_has_frags = prog->aux->xdp_has_frags;
++ }
+
+ tc = netdev_get_num_tc(dev);
+ if (!tc)
+--
+2.35.1
+
--- /dev/null
+From 40110ae9dfc4d37d494436fe2ee60c59037f4d3d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Aug 2022 11:15:13 -0400
+Subject: bonding: 802.3ad: fix no transmission of LACPDUs
+
+From: Jonathan Toppins <jtoppins@redhat.com>
+
+[ Upstream commit d745b5062ad2b5da90a5e728d7ca884fc07315fd ]
+
+This is caused by the global variable ad_ticks_per_sec being zero as
+demonstrated by the reproducer script discussed below. This causes
+all timer values in __ad_timer_to_ticks to be zero, resulting
+in the periodic timer to never fire.
+
+To reproduce:
+Run the script in
+`tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh` which
+puts bonding into a state where it never transmits LACPDUs.
+
+line 44: ip link add fbond type bond mode 4 miimon 200 \
+ xmit_hash_policy 1 ad_actor_sys_prio 65535 lacp_rate fast
+setting bond param: ad_actor_sys_prio
+given:
+ params.ad_actor_system = 0
+call stack:
+ bond_option_ad_actor_sys_prio()
+ -> bond_3ad_update_ad_actor_settings()
+ -> set ad.system.sys_priority = bond->params.ad_actor_sys_prio
+ -> ad.system.sys_mac_addr = bond->dev->dev_addr; because
+ params.ad_actor_system == 0
+results:
+ ad.system.sys_mac_addr = bond->dev->dev_addr
+
+line 48: ip link set fbond address 52:54:00:3B:7C:A6
+setting bond MAC addr
+call stack:
+ bond->dev->dev_addr = new_mac
+
+line 52: ip link set fbond type bond ad_actor_sys_prio 65535
+setting bond param: ad_actor_sys_prio
+given:
+ params.ad_actor_system = 0
+call stack:
+ bond_option_ad_actor_sys_prio()
+ -> bond_3ad_update_ad_actor_settings()
+ -> set ad.system.sys_priority = bond->params.ad_actor_sys_prio
+ -> ad.system.sys_mac_addr = bond->dev->dev_addr; because
+ params.ad_actor_system == 0
+results:
+ ad.system.sys_mac_addr = bond->dev->dev_addr
+
+line 60: ip link set veth1-bond down master fbond
+given:
+ params.ad_actor_system = 0
+ params.mode = BOND_MODE_8023AD
+ ad.system.sys_mac_addr == bond->dev->dev_addr
+call stack:
+ bond_enslave
+ -> bond_3ad_initialize(); because first slave
+ -> if ad.system.sys_mac_addr != bond->dev->dev_addr
+ return
+results:
+ Nothing is run in bond_3ad_initialize() because dev_addr equals
+ sys_mac_addr leaving the global ad_ticks_per_sec zero as it is
+ never initialized anywhere else.
+
+The if check around the contents of bond_3ad_initialize() is no longer
+needed due to commit 5ee14e6d336f ("bonding: 3ad: apply ad_actor settings
+changes immediately") which sets ad.system.sys_mac_addr if any one of
+the bonding parameters whos set function calls
+bond_3ad_update_ad_actor_settings(). This is because if
+ad.system.sys_mac_addr is zero it will be set to the current bond mac
+address, this causes the if check to never be true.
+
+Fixes: 5ee14e6d336f ("bonding: 3ad: apply ad_actor settings changes immediately")
+Signed-off-by: Jonathan Toppins <jtoppins@redhat.com>
+Acked-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 38 ++++++++++++++--------------------
+ 1 file changed, 16 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index d7fb33c078e81..1f0120cbe9e80 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2007,30 +2007,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
+ */
+ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+ {
+- /* check that the bond is not initialized yet */
+- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
+- bond->dev->dev_addr)) {
+-
+- BOND_AD_INFO(bond).aggregator_identifier = 0;
+-
+- BOND_AD_INFO(bond).system.sys_priority =
+- bond->params.ad_actor_sys_prio;
+- if (is_zero_ether_addr(bond->params.ad_actor_system))
+- BOND_AD_INFO(bond).system.sys_mac_addr =
+- *((struct mac_addr *)bond->dev->dev_addr);
+- else
+- BOND_AD_INFO(bond).system.sys_mac_addr =
+- *((struct mac_addr *)bond->params.ad_actor_system);
++ BOND_AD_INFO(bond).aggregator_identifier = 0;
++ BOND_AD_INFO(bond).system.sys_priority =
++ bond->params.ad_actor_sys_prio;
++ if (is_zero_ether_addr(bond->params.ad_actor_system))
++ BOND_AD_INFO(bond).system.sys_mac_addr =
++ *((struct mac_addr *)bond->dev->dev_addr);
++ else
++ BOND_AD_INFO(bond).system.sys_mac_addr =
++ *((struct mac_addr *)bond->params.ad_actor_system);
+
+- /* initialize how many times this module is called in one
+- * second (should be about every 100ms)
+- */
+- ad_ticks_per_sec = tick_resolution;
++ /* initialize how many times this module is called in one
++ * second (should be about every 100ms)
++ */
++ ad_ticks_per_sec = tick_resolution;
+
+- bond_3ad_initiate_agg_selection(bond,
+- AD_AGGREGATOR_SELECTION_TIMER *
+- ad_ticks_per_sec);
+- }
++ bond_3ad_initiate_agg_selection(bond,
++ AD_AGGREGATOR_SELECTION_TIMER *
++ ad_ticks_per_sec);
+ }
+
+ /**
+--
+2.35.1
+
--- /dev/null
+From 9d77d7dea41f471323eb8a912c506fa3ae2f15b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Aug 2022 11:47:52 -0500
+Subject: fs: require CAP_SYS_ADMIN in target namespace for idmapped mounts
+
+From: Seth Forshee <sforshee@digitalocean.com>
+
+[ Upstream commit bf1ac16edf6770a92bc75cf2373f1f9feea398a4 ]
+
+Idmapped mounts should not allow a user to map file ownsership into a
+range of ids which is not under the control of that user. However, we
+currently don't check whether the mounter is privileged wrt to the
+target user namespace.
+
+Currently no FS_USERNS_MOUNT filesystems support idmapped mounts, thus
+this is not a problem as only CAP_SYS_ADMIN in init_user_ns is allowed
+to set up idmapped mounts. But this could change in the future, so add a
+check to refuse to create idmapped mounts when the mounter does not have
+CAP_SYS_ADMIN in the target user namespace.
+
+Fixes: bd303368b776 ("fs: support mapped mounts of mapped filesystems")
+Signed-off-by: Seth Forshee <sforshee@digitalocean.com>
+Reviewed-by: Christian Brauner (Microsoft) <brauner@kernel.org>
+Link: https://lore.kernel.org/r/20220816164752.2595240-1-sforshee@digitalocean.com
+Signed-off-by: Christian Brauner (Microsoft) <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/namespace.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index e6a7e769d25dd..a59f8d645654a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -4238,6 +4238,13 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
+ err = -EPERM;
+ goto out_fput;
+ }
++
++ /* We're not controlling the target namespace. */
++ if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
++ err = -EPERM;
++ goto out_fput;
++ }
++
+ kattr->mnt_userns = get_user_ns(mnt_userns);
+
+ out_fput:
+--
+2.35.1
+
--- /dev/null
+From 54ea8a24089a1fe6f43c3c267b72cb6d442e91b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Aug 2022 12:45:52 +0200
+Subject: i40e: Fix incorrect address type for IPv6 flow rules
+
+From: Sylwester Dziedziuch <sylwesterx.dziedziuch@intel.com>
+
+[ Upstream commit bcf3a156429306070afbfda5544f2b492d25e75b ]
+
+It was not possible to create 1-tuple flow director
+rule for IPv6 flow type. It was caused by incorrectly
+checking for source IP address when validating user provided
+destination IP address.
+
+Fix this by changing ip6src to correct ip6dst address
+in destination IP address validation for IPv6 flow type.
+
+Fixes: efca91e89b67 ("i40e: Add flow director support for IPv6")
+Signed-off-by: Sylwester Dziedziuch <sylwesterx.dziedziuch@intel.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 19704f5c8291c..22a61802a4027 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -4395,7 +4395,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_DST_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+- &usr_ip6_spec->ip6src))
++ &usr_ip6_spec->ip6dst))
+ new_mask &= ~I40E_L3_V6_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+--
+2.35.1
+
--- /dev/null
+From 01f463f3a5b27f26ef3e329e80fcbcaf455bc1ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Aug 2022 20:21:48 +0200
+Subject: ice: xsk: prohibit usage of non-balanced queue id
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 5a42f112d367bb4700a8a41f5c12724fde6bfbb9 ]
+
+Fix the following scenario:
+1. ethtool -L $IFACE rx 8 tx 96
+2. xdpsock -q 10 -t -z
+
+Above refers to a case where user would like to attach XSK socket in
+txonly mode at a queue id that does not have a corresponding Rx queue.
+At this moment ice's XSK logic is tightly bound to act on a "queue pair",
+e.g. both Tx and Rx queues at a given queue id are disabled/enabled and
+both of them will get XSK pool assigned, which is broken for the presented
+queue configuration. This results in the splat included at the bottom,
+which is basically an OOB access to Rx ring array.
+
+To fix this, allow using the ids only in scope of "combined" queues
+reported by ethtool. However, logic should be rewritten to allow such
+configurations later on, which would end up as a complete rewrite of the
+control path, so let us go with this temporary fix.
+
+[420160.558008] BUG: kernel NULL pointer dereference, address: 0000000000000082
+[420160.566359] #PF: supervisor read access in kernel mode
+[420160.572657] #PF: error_code(0x0000) - not-present page
+[420160.579002] PGD 0 P4D 0
+[420160.582756] Oops: 0000 [#1] PREEMPT SMP NOPTI
+[420160.588396] CPU: 10 PID: 21232 Comm: xdpsock Tainted: G OE 5.19.0-rc7+ #10
+[420160.597893] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0008.031920191559 03/19/2019
+[420160.609894] RIP: 0010:ice_xsk_pool_setup+0x44/0x7d0 [ice]
+[420160.616968] Code: f3 48 83 ec 40 48 8b 4f 20 48 8b 3f 65 48 8b 04 25 28 00 00 00 48 89 44 24 38 31 c0 48 8d 04 ed 00 00 00 00 48 01 c1 48 8b 11 <0f> b7 92 82 00 00 00 48 85 d2 0f 84 2d 75 00 00 48 8d 72 ff 48 85
+[420160.639421] RSP: 0018:ffffc9002d2afd48 EFLAGS: 00010282
+[420160.646650] RAX: 0000000000000050 RBX: ffff88811d8bdd00 RCX: ffff888112c14ff8
+[420160.655893] RDX: 0000000000000000 RSI: ffff88811d8bdd00 RDI: ffff888109861000
+[420160.665166] RBP: 000000000000000a R08: 000000000000000a R09: 0000000000000000
+[420160.674493] R10: 000000000000889f R11: 0000000000000000 R12: 000000000000000a
+[420160.683833] R13: 000000000000000a R14: 0000000000000000 R15: ffff888117611828
+[420160.693211] FS: 00007fa869fc1f80(0000) GS:ffff8897e0880000(0000) knlGS:0000000000000000
+[420160.703645] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[420160.711783] CR2: 0000000000000082 CR3: 00000001d076c001 CR4: 00000000007706e0
+[420160.721399] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[420160.731045] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[420160.740707] PKRU: 55555554
+[420160.745960] Call Trace:
+[420160.750962] <TASK>
+[420160.755597] ? kmalloc_large_node+0x79/0x90
+[420160.762703] ? __kmalloc_node+0x3f5/0x4b0
+[420160.769341] xp_assign_dev+0xfd/0x210
+[420160.775661] ? shmem_file_read_iter+0x29a/0x420
+[420160.782896] xsk_bind+0x152/0x490
+[420160.788943] __sys_bind+0xd0/0x100
+[420160.795097] ? exit_to_user_mode_prepare+0x20/0x120
+[420160.802801] __x64_sys_bind+0x16/0x20
+[420160.809298] do_syscall_64+0x38/0x90
+[420160.815741] entry_SYSCALL_64_after_hwframe+0x63/0xcd
+[420160.823731] RIP: 0033:0x7fa86a0dd2fb
+[420160.830264] Code: c3 66 0f 1f 44 00 00 48 8b 15 69 8b 0c 00 f7 d8 64 89 02 b8 ff ff ff ff eb bc 0f 1f 44 00 00 f3 0f 1e fa b8 31 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 3d 8b 0c 00 f7 d8 64 89 01 48
+[420160.855410] RSP: 002b:00007ffc1146f618 EFLAGS: 00000246 ORIG_RAX: 0000000000000031
+[420160.866366] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fa86a0dd2fb
+[420160.876957] RDX: 0000000000000010 RSI: 00007ffc1146f680 RDI: 0000000000000003
+[420160.887604] RBP: 000055d7113a0520 R08: 00007fa868fb8000 R09: 0000000080000000
+[420160.898293] R10: 0000000000008001 R11: 0000000000000246 R12: 000055d7113a04e0
+[420160.909038] R13: 000055d7113a0320 R14: 000000000000000a R15: 0000000000000000
+[420160.919817] </TASK>
+[420160.925659] Modules linked in: ice(OE) af_packet binfmt_misc nls_iso8859_1 ipmi_ssif intel_rapl_msr intel_rapl_common x86_pkg_temp_thermal intel_powerclamp mei_me coretemp ioatdma mei ipmi_si wmi ipmi_msghandler acpi_pad acpi_power_meter ip_tables x_tables autofs4 ixgbe i40e crct10dif_pclmul crc32_pclmul ghash_clmulni_intel aesni_intel crypto_simd cryptd ahci mdio dca libahci lpc_ich [last unloaded: ice]
+[420160.977576] CR2: 0000000000000082
+[420160.985037] ---[ end trace 0000000000000000 ]---
+[420161.097724] RIP: 0010:ice_xsk_pool_setup+0x44/0x7d0 [ice]
+[420161.107341] Code: f3 48 83 ec 40 48 8b 4f 20 48 8b 3f 65 48 8b 04 25 28 00 00 00 48 89 44 24 38 31 c0 48 8d 04 ed 00 00 00 00 48 01 c1 48 8b 11 <0f> b7 92 82 00 00 00 48 85 d2 0f 84 2d 75 00 00 48 8d 72 ff 48 85
+[420161.134741] RSP: 0018:ffffc9002d2afd48 EFLAGS: 00010282
+[420161.144274] RAX: 0000000000000050 RBX: ffff88811d8bdd00 RCX: ffff888112c14ff8
+[420161.155690] RDX: 0000000000000000 RSI: ffff88811d8bdd00 RDI: ffff888109861000
+[420161.168088] RBP: 000000000000000a R08: 000000000000000a R09: 0000000000000000
+[420161.179295] R10: 000000000000889f R11: 0000000000000000 R12: 000000000000000a
+[420161.190420] R13: 000000000000000a R14: 0000000000000000 R15: ffff888117611828
+[420161.201505] FS: 00007fa869fc1f80(0000) GS:ffff8897e0880000(0000) knlGS:0000000000000000
+[420161.213628] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[420161.223413] CR2: 0000000000000082 CR3: 00000001d076c001 CR4: 00000000007706e0
+[420161.234653] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[420161.245893] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[420161.257052] PKRU: 55555554
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 49ba8bfdbf047..45f88e6ec25e8 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -329,6 +329,12 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
+ bool if_running, pool_present = !!pool;
+ int ret = 0, pool_failure = 0;
+
++ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
++ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
++ pool_failure = -EINVAL;
++ goto failure;
++ }
++
+ if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
+ !is_power_of_2(vsi->tx_rings[qid]->count)) {
+ netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
+--
+2.35.1
+
--- /dev/null
+From c0e4dcc64a8363cb3bb66dece034960cc7ff5168 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Aug 2022 20:21:49 +0200
+Subject: ice: xsk: use Rx ring's XDP ring when picking NAPI context
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 9ead7e74bfd6dd54db12ef133b8604add72511de ]
+
+Ice driver allocates per cpu XDP queues so that redirect path can safely
+use smp_processor_id() as an index to the array. At the same time
+though, XDP rings are used to pick NAPI context to call napi_schedule()
+or set NAPIF_STATE_MISSED. When user reduces queue count, say to 8, and
+num_possible_cpus() of underlying platform is 44, then this means queue
+vectors with correlated NAPI contexts will carry several XDP queues.
+
+This in turn can result in a broken behavior where NAPI context of
+interest will never be scheduled and AF_XDP socket will not process any
+traffic.
+
+To fix this, let us change the way how XDP rings are assigned to Rx
+rings and use this information later on when setting
+ice_tx_ring::xsk_pool pointer. For each Rx ring, grab the associated
+queue vector and walk through Tx ring's linked list. Once we stumble
+upon XDP ring in it, assign this ring to ice_rx_ring::xdp_ring.
+
+Previous [0] approach of fixing this issue was for txonly scenario
+because of the described grouping of XDP rings across queue vectors. So,
+relying on Rx ring meant that NAPI context could be scheduled with a
+queue vector without XDP ring with associated XSK pool.
+
+[0]: https://lore.kernel.org/netdev/20220707161128.54215-1-maciej.fijalkowski@intel.com/
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Fixes: 22bf877e528f ("ice: introduce XDP_TX fallback path")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h | 36 +++++++++++++++--------
+ drivers/net/ethernet/intel/ice/ice_lib.c | 4 +--
+ drivers/net/ethernet/intel/ice/ice_main.c | 25 +++++++++++-----
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 12 ++++----
+ 4 files changed, 48 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 60453b3b8d233..6911cbb7afa50 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -684,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
+ * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: Rx ring to use
+ *
+- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
+- * NULL otherwise.
++ * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
++ * present, NULL otherwise.
+ */
+ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+ {
+@@ -699,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+ }
+
+ /**
+- * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
+- * @ring: Tx ring to use
++ * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
++ * @vsi: pointer to VSI
++ * @qid: index of a queue to look at XSK buff pool presence
+ *
+- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
+- * NULL otherwise. Tx equivalent of ice_xsk_pool.
++ * Sets XSK buff pool pointer on XDP ring.
++ *
++ * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
++ * queue id. Reason for doing so is that queue vectors might have assigned more
++ * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
++ * carries a pointer to one of these XDP rings for its own purposes, such as
++ * handling XDP_TX action, therefore we can piggyback here on the
++ * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
+ */
+-static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
++static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
+ {
+- struct ice_vsi *vsi = ring->vsi;
+- u16 qid;
++ struct ice_tx_ring *ring;
+
+- qid = ring->q_index - vsi->alloc_txq;
++ ring = vsi->rx_rings[qid]->xdp_ring;
++ if (!ring)
++ return;
+
+- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+- return NULL;
++ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
++ ring->xsk_pool = NULL;
++ return;
++ }
+
+- return xsk_get_pool_from_qid(vsi->netdev, qid);
++ ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index d6aafa272fb0b..6c4e1d45235ef 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -1983,8 +1983,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
+ if (ret)
+ return ret;
+
+- ice_for_each_xdp_txq(vsi, i)
+- vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
++ ice_for_each_rxq(vsi, i)
++ ice_tx_xsk_pool(vsi, i);
+
+ return ret;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index bfd97a9a8f2e0..3d45e075204e3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2581,7 +2581,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
+ if (ice_setup_tx_ring(xdp_ring))
+ goto free_xdp_rings;
+ ice_set_ring_xdp(xdp_ring);
+- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ spin_lock_init(&xdp_ring->tx_lock);
+ for (j = 0; j < xdp_ring->count; j++) {
+ tx_desc = ICE_TX_DESC(xdp_ring, j);
+@@ -2589,13 +2588,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
+ }
+ }
+
+- ice_for_each_rxq(vsi, i) {
+- if (static_key_enabled(&ice_xdp_locking_key))
+- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+- else
+- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
+- }
+-
+ return 0;
+
+ free_xdp_rings:
+@@ -2685,6 +2677,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
++ ice_for_each_rxq(vsi, i) {
++ if (static_key_enabled(&ice_xdp_locking_key)) {
++ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
++ } else {
++ struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
++ struct ice_tx_ring *ring;
++
++ ice_for_each_tx_ring(ring, q_vector->tx) {
++ if (ice_ring_is_xdp(ring)) {
++ vsi->rx_rings[i]->xdp_ring = ring;
++ break;
++ }
++ }
++ }
++ ice_tx_xsk_pool(vsi, i);
++ }
++
+ /* omit the scheduler update if in reset path; XDP queues will be
+ * taken into account at the end of ice_vsi_rebuild, where
+ * ice_cfg_vsi_lan is being called
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 45f88e6ec25e8..e48e29258450f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ if (err)
+ goto free_buf;
+ ice_set_ring_xdp(xdp_ring);
+- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
++ ice_tx_xsk_pool(vsi, q_idx);
+ }
+
+ err = ice_vsi_cfg_rxq(rx_ring);
+@@ -359,7 +359,7 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
+ if (if_running) {
+ ret = ice_qp_ena(vsi, qid);
+ if (!ret && pool_present)
+- napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
++ napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
+ else if (ret)
+ netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
+ }
+@@ -950,13 +950,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ if (!ice_is_xdp_ena_vsi(vsi))
+ return -EINVAL;
+
+- if (queue_id >= vsi->num_txq)
++ if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
+ return -EINVAL;
+
+- if (!vsi->xdp_rings[queue_id]->xsk_pool)
+- return -EINVAL;
++ ring = vsi->rx_rings[queue_id]->xdp_ring;
+
+- ring = vsi->xdp_rings[queue_id];
++ if (!ring->xsk_pool)
++ return -EINVAL;
+
+ /* The idea here is that if NAPI is running, mark a miss, so
+ * it will run again. If not, trigger an interrupt and
+--
+2.35.1
+
--- /dev/null
+From 2799dffc93d0955c33bae1fd601eb5c0146da4d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 09:50:49 -0700
+Subject: ionic: clear broken state on generation change
+
+From: Shannon Nelson <snelson@pensando.io>
+
+[ Upstream commit 9cb9dadb8f45c67e4310e002c2f221b70312b293 ]
+
+There is a case found in heavy testing where a link flap happens just
+before a firmware Recovery event and the driver gets stuck in the
+BROKEN state. This comes from the driver getting interrupted by a FW
+generation change when coming back up from the link flap, and the call
+to ionic_start_queues() in ionic_link_status_check() fails. This can be
+addressed by having the fw_up code clear the BROKEN bit if seen, rather
+than waiting for a user to manually force the interface down and then
+back up.
+
+Fixes: 9e8eaf8427b6 ("ionic: stop watchdog when in broken state")
+Signed-off-by: Shannon Nelson <snelson@pensando.io>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_lif.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 1443f788ee37c..d4226999547e8 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -2963,6 +2963,9 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
+
+ mutex_lock(&lif->queue_lock);
+
++ if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
++ dev_info(ionic->dev, "FW Up: clearing broken state\n");
++
+ err = ionic_qcqs_alloc(lif);
+ if (err)
+ goto err_unlock;
+--
+2.35.1
+
--- /dev/null
+From 812c0f1da3a2b15bb1e2ff35858d23561f4c7855 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 09:50:50 -0700
+Subject: ionic: fix up issues with handling EAGAIN on FW cmds
+
+From: Shannon Nelson <snelson@pensando.io>
+
+[ Upstream commit 0fc4dd452d6c14828eed6369155c75c0ac15bab3 ]
+
+In looping on FW update tests we occasionally see the
+FW_ACTIVATE_STATUS command fail while it is in its EAGAIN loop
+waiting for the FW activate step to finsh inside the FW. The
+firmware is complaining that the done bit is set when a new
+dev_cmd is going to be processed.
+
+Doing a clean on the cmd registers and doorbell before exiting
+the wait-for-done and cleaning the done bit before the sleep
+prevents this from occurring.
+
+Fixes: fbfb8031533c ("ionic: Add hardware init and device commands")
+Signed-off-by: Shannon Nelson <snelson@pensando.io>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_main.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+index 4029b4e021f86..56f93b0305519 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+@@ -474,8 +474,8 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(err), err);
+
+- msleep(1000);
+ iowrite32(0, &idev->dev_cmd_regs->done);
++ msleep(1000);
+ iowrite32(1, &idev->dev_cmd_regs->doorbell);
+ goto try_again;
+ }
+@@ -488,6 +488,8 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
+ return ionic_error_to_errno(err);
+ }
+
++ ionic_dev_cmd_clean(ionic);
++
+ return 0;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 2b382bed2918d3399c4c7f3acf192c15906157d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 09:50:51 -0700
+Subject: ionic: VF initial random MAC address if no assigned mac
+
+From: R Mohamed Shah <mohamed@pensando.io>
+
+[ Upstream commit 19058be7c48ceb3e60fa3948e24da1059bd68ee4 ]
+
+Assign a random mac address to the VF interface station
+address if it boots with a zero mac address in order to match
+similar behavior seen in other VF drivers. Handle the errors
+where the older firmware does not allow the VF to set its own
+station address.
+
+Newer firmware will allow the VF to set the station mac address
+if it hasn't already been set administratively through the PF.
+Setting it will also be allowed if the VF has trust.
+
+Fixes: fbb39807e9ae ("ionic: support sr-iov operations")
+Signed-off-by: R Mohamed Shah <mohamed@pensando.io>
+Signed-off-by: Shannon Nelson <snelson@pensando.io>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/pensando/ionic/ionic_lif.c | 92 ++++++++++++++++++-
+ 1 file changed, 87 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index d4226999547e8..0be79c5167813 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -1564,8 +1564,67 @@ static int ionic_set_features(struct net_device *netdev,
+ return err;
+ }
+
++static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
++{
++ struct ionic_admin_ctx ctx = {
++ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
++ .cmd.lif_setattr = {
++ .opcode = IONIC_CMD_LIF_SETATTR,
++ .index = cpu_to_le16(lif->index),
++ .attr = IONIC_LIF_ATTR_MAC,
++ },
++ };
++
++ ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
++ return ionic_adminq_post_wait(lif, &ctx);
++}
++
++static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
++{
++ struct ionic_admin_ctx ctx = {
++ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
++ .cmd.lif_getattr = {
++ .opcode = IONIC_CMD_LIF_GETATTR,
++ .index = cpu_to_le16(lif->index),
++ .attr = IONIC_LIF_ATTR_MAC,
++ },
++ };
++ int err;
++
++ err = ionic_adminq_post_wait(lif, &ctx);
++ if (err)
++ return err;
++
++ ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
++ return 0;
++}
++
++static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
++{
++ u8 get_mac[ETH_ALEN];
++ int err;
++
++ err = ionic_set_attr_mac(lif, mac);
++ if (err)
++ return err;
++
++ err = ionic_get_attr_mac(lif, get_mac);
++ if (err)
++ return err;
++
++ /* To deal with older firmware that silently ignores the set attr mac:
++ * doesn't actually change the mac and doesn't return an error, so we
++ * do the get attr to verify whether or not the set actually happened
++ */
++ if (!ether_addr_equal(get_mac, mac))
++ return 1;
++
++ return 0;
++}
++
+ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
+ {
++ struct ionic_lif *lif = netdev_priv(netdev);
+ struct sockaddr *addr = sa;
+ u8 *mac;
+ int err;
+@@ -1574,6 +1633,14 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
+ if (ether_addr_equal(netdev->dev_addr, mac))
+ return 0;
+
++ err = ionic_program_mac(lif, mac);
++ if (err < 0)
++ return err;
++
++ if (err > 0)
++ netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
++ __func__);
++
+ err = eth_prepare_mac_addr_change(netdev, addr);
+ if (err)
+ return err;
+@@ -3172,6 +3239,7 @@ static int ionic_station_set(struct ionic_lif *lif)
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
++ u8 mac_address[ETH_ALEN];
+ struct sockaddr addr;
+ int err;
+
+@@ -3180,8 +3248,23 @@ static int ionic_station_set(struct ionic_lif *lif)
+ return err;
+ netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
+ ctx.comp.lif_getattr.mac);
+- if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
+- return 0;
++ ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
++
++ if (is_zero_ether_addr(mac_address)) {
++ eth_hw_addr_random(netdev);
++ netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
++ ether_addr_copy(mac_address, netdev->dev_addr);
++
++ err = ionic_program_mac(lif, mac_address);
++ if (err < 0)
++ return err;
++
++ if (err > 0) {
++ netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
++ __func__);
++ return 0;
++ }
++ }
+
+ if (!is_zero_ether_addr(netdev->dev_addr)) {
+ /* If the netdev mac is non-zero and doesn't match the default
+@@ -3189,12 +3272,11 @@ static int ionic_station_set(struct ionic_lif *lif)
+ * likely here again after a fw-upgrade reset. We need to be
+ * sure the netdev mac is in our filter list.
+ */
+- if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
+- netdev->dev_addr))
++ if (!ether_addr_equal(mac_address, netdev->dev_addr))
+ ionic_lif_addr_add(lif, netdev->dev_addr);
+ } else {
+ /* Update the netdev mac with the device's mac */
+- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
++ ether_addr_copy(addr.sa_data, mac_address);
+ addr.sa_family = AF_INET;
+ err = eth_prepare_mac_addr_change(netdev, &addr);
+ if (err) {
+--
+2.35.1
+
--- /dev/null
+From b5e01d178ca9c9db8e50c4ec44d995d35dc3f84f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Aug 2022 17:24:19 -0700
+Subject: ixgbe: stop resetting SYSTIME in ixgbe_ptp_start_cyclecounter
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit 25d7a5f5a6bb15a2dae0a3f39ea5dda215024726 ]
+
+The ixgbe_ptp_start_cyclecounter is intended to be called whenever the
+cyclecounter parameters need to be changed.
+
+Since commit a9763f3cb54c ("ixgbe: Update PTP to support X550EM_x
+devices"), this function has cleared the SYSTIME registers and reset the
+TSAUXC DISABLE_SYSTIME bit.
+
+While these need to be cleared during ixgbe_ptp_reset, it is wrong to clear
+them during ixgbe_ptp_start_cyclecounter. This function may be called
+during both reset and link status change. When link changes, the SYSTIME
+counter is still operating normally, but the cyclecounter should be updated
+to account for the possibly changed parameters.
+
+Clearing SYSTIME when link changes causes the timecounter to jump because
+the cycle counter now reads zero.
+
+Extract the SYSTIME initialization out to a new function and call this
+during ixgbe_ptp_reset. This prevents the timecounter adjustment and avoids
+an unnecessary reset of the current time.
+
+This also restores the original SYSTIME clearing that occurred during
+ixgbe_ptp_reset before the commit above.
+
+Reported-by: Steve Payne <spayne@aurora.tech>
+Reported-by: Ilya Evenbach <ievenbach@aurora.tech>
+Fixes: a9763f3cb54c ("ixgbe: Update PTP to support X550EM_x devices")
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 59 +++++++++++++++-----
+ 1 file changed, 46 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+index 336426a67ac1b..38cda659f65f4 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+@@ -1208,7 +1208,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
+ struct cyclecounter cc;
+ unsigned long flags;
+ u32 incval = 0;
+- u32 tsauxc = 0;
+ u32 fuse0 = 0;
+
+ /* For some of the boards below this mask is technically incorrect.
+@@ -1243,18 +1242,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ cc.read = ixgbe_ptp_read_X550;
+-
+- /* enable SYSTIME counter */
+- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+-
+- IXGBE_WRITE_FLUSH(hw);
+ break;
+ case ixgbe_mac_X540:
+ cc.read = ixgbe_ptp_read_82599;
+@@ -1286,6 +1273,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ }
+
++/**
++ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
++ * @adapter: the ixgbe private board structure
++ *
++ * Initialize and start the SYSTIME registers.
++ */
++static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
++{
++ struct ixgbe_hw *hw = &adapter->hw;
++ u32 tsauxc;
++
++ switch (hw->mac.type) {
++ case ixgbe_mac_X550EM_x:
++ case ixgbe_mac_x550em_a:
++ case ixgbe_mac_X550:
++ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
++
++ /* Reset SYSTIME registers to 0 */
++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
++
++ /* Reset interrupt settings */
++ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
++
++ /* Activate the SYSTIME counter */
++ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
++ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
++ break;
++ case ixgbe_mac_X540:
++ case ixgbe_mac_82599EB:
++ /* Reset SYSTIME registers to 0 */
++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
++ break;
++ default:
++ /* Other devices aren't supported */
++ return;
++ };
++
++ IXGBE_WRITE_FLUSH(hw);
++}
++
+ /**
+ * ixgbe_ptp_reset
+ * @adapter: the ixgbe private board structure
+@@ -1312,6 +1343,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
+
+ ixgbe_ptp_start_cyclecounter(adapter);
+
++ ixgbe_ptp_init_systime(adapter);
++
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
+ ktime_to_ns(ktime_get_real()));
+--
+2.35.1
+
--- /dev/null
+From 5cbfca68cfeeb2e8f417010ab5d4183116b0afa1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Aug 2022 12:00:03 -0400
+Subject: mm/smaps: don't access young/dirty bit if pte unpresent
+
+From: Peter Xu <peterx@redhat.com>
+
+[ Upstream commit efd4149342db2df41b1bbe68972ead853b30e444 ]
+
+These bits should only be valid when the ptes are present. Introducing
+two booleans for it and set it to false when !pte_present() for both pte
+and pmd accountings.
+
+The bug is found during code reading and no real world issue reported, but
+logically such an error can cause incorrect readings for either smaps or
+smaps_rollup output on quite a few fields.
+
+For example, it could cause over-estimate on values like Shared_Dirty,
+Private_Dirty, Referenced. Or it could also cause under-estimate on
+values like LazyFree, Shared_Clean, Private_Clean.
+
+Link: https://lkml.kernel.org/r/20220805160003.58929-1-peterx@redhat.com
+Fixes: b1d4d9e0cbd0 ("proc/smaps: carefully handle migration entries")
+Fixes: c94b6923fa0a ("/proc/PID/smaps: Add PMD migration entry parsing")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
+Cc: Huang Ying <ying.huang@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/proc/task_mmu.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 2d04e3470d4cd..313788bc0c307 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -525,10 +525,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ struct vm_area_struct *vma = walk->vma;
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
+ struct page *page = NULL;
+- bool migration = false;
++ bool migration = false, young = false, dirty = false;
+
+ if (pte_present(*pte)) {
+ page = vm_normal_page(vma, addr, *pte);
++ young = pte_young(*pte);
++ dirty = pte_dirty(*pte);
+ } else if (is_swap_pte(*pte)) {
+ swp_entry_t swpent = pte_to_swp_entry(*pte);
+
+@@ -558,8 +560,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ if (!page)
+ return;
+
+- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
+- locked, migration);
++ smaps_account(mss, page, false, young, dirty, locked, migration);
+ }
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+--
+2.35.1
+
--- /dev/null
+From 8a94ed2d30012bf20e25f0abe609c987fd857f2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 17:32:50 +0300
+Subject: net: dsa: microchip: keep compatibility with device tree blobs with
+ no phy-mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 5fbb08eb7f945c7e8896ea39f03143ce66dfa4c7 ]
+
+DSA has multiple ways of specifying a MAC connection to an internal PHY.
+One requires a DT description like this:
+
+ port@0 {
+ reg = <0>;
+ phy-handle = <&internal_phy>;
+ phy-mode = "internal";
+ };
+
+(which is IMO the recommended approach, as it is the clearest
+description)
+
+but it is also possible to leave the specification as just:
+
+ port@0 {
+ reg = <0>;
+ }
+
+and if the driver implements ds->ops->phy_read and ds->ops->phy_write,
+the DSA framework "knows" it should create a ds->slave_mii_bus, and it
+should connect to a non-OF-based internal PHY on this MDIO bus, at an
+MDIO address equal to the port address.
+
+There is also an intermediary way of describing things:
+
+ port@0 {
+ reg = <0>;
+ phy-handle = <&internal_phy>;
+ };
+
+In case 2, DSA calls phylink_connect_phy() and in case 3, it calls
+phylink_of_phy_connect(). In both cases, phylink_create() has been
+called with a phy_interface_t of PHY_INTERFACE_MODE_NA, and in both
+cases, PHY_INTERFACE_MODE_NA is translated into phy->interface.
+
+It is important to note that phy_device_create() initializes
+dev->interface = PHY_INTERFACE_MODE_GMII, and so, when we use
+phylink_create(PHY_INTERFACE_MODE_NA), no one will override this, and we
+will end up with a PHY_INTERFACE_MODE_GMII interface inherited from the
+PHY.
+
+All this means that in order to maintain compatibility with device tree
+blobs where the phy-mode property is missing, we need to allow the
+"gmii" phy-mode and treat it as "internal".
+
+Fixes: 2c709e0bdad4 ("net: dsa: microchip: ksz8795: add phylink support")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216320
+Reported-by: Craig McQueen <craig@mcqueen.id.au>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Alvin Å ipraga <alsi@bang-olufsen.dk>
+Tested-by: Rasmus Villemoes <rasmus.villemoes@prevas.dk>
+Link: https://lore.kernel.org/r/20220818143250.2797111-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz_common.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 0f02c62b02685..c9389880ad1fa 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -453,9 +453,15 @@ void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ if (dev->info->supports_rgmii[port])
+ phy_interface_set_rgmii(config->supported_interfaces);
+
+- if (dev->info->internal_phy[port])
++ if (dev->info->internal_phy[port]) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
++ /* Compatibility for phylib's default interface type when the
++ * phy-mode property is absent
++ */
++ __set_bit(PHY_INTERFACE_MODE_GMII,
++ config->supported_interfaces);
++ }
+
+ if (dev->dev_ops->get_caps)
+ dev->dev_ops->get_caps(dev, port, config);
+--
+2.35.1
+
--- /dev/null
+From 12f649c6d241008017ddcc12a39d1b5e6053a336 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 14:12:45 +0530
+Subject: net: dsa: microchip: ksz9477: cleanup the ksz9477_switch_detect
+
+From: Arun Ramadoss <arun.ramadoss@microchip.com>
+
+[ Upstream commit 27faa0aa85f6696d411bbbebaed9f0f723c2a175 ]
+
+The ksz9477_switch_detect performs the detecting the chip id from the
+location 0x00 and also check gigabit compatibility check & number of
+ports based on the register global_options0. To prepare the common ksz
+switch detect function, routine other than chip id read is moved to
+ksz9477_switch_init.
+
+Signed-off-by: Arun Ramadoss <arun.ramadoss@microchip.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz9477.c | 48 +++++++++++++----------------
+ 1 file changed, 22 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index ebad795e4e95f..876a801ac23a4 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -1365,12 +1365,30 @@ static u32 ksz9477_get_port_addr(int port, int offset)
+
+ static int ksz9477_switch_detect(struct ksz_device *dev)
+ {
+- u8 data8;
+- u8 id_hi;
+- u8 id_lo;
+ u32 id32;
+ int ret;
+
++ /* read chip id */
++ ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
++ if (ret)
++ return ret;
++
++ dev_dbg(dev->dev, "Switch detect: ID=%08x\n", id32);
++
++ dev->chip_id = id32 & 0x00FFFF00;
++
++ return 0;
++}
++
++static int ksz9477_switch_init(struct ksz_device *dev)
++{
++ u8 data8;
++ int ret;
++
++ dev->ds->ops = &ksz9477_switch_ops;
++
++ dev->port_mask = (1 << dev->info->port_cnt) - 1;
++
+ /* turn off SPI DO Edge select */
+ ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+ if (ret)
+@@ -1381,10 +1399,6 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
+ if (ret)
+ return ret;
+
+- /* read chip id */
+- ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
+- if (ret)
+- return ret;
+ ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
+ if (ret)
+ return ret;
+@@ -1395,10 +1409,7 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
+ /* Default capability is gigabit capable. */
+ dev->features = GBIT_SUPPORT;
+
+- dev_dbg(dev->dev, "Switch detect: ID=%08x%02x\n", id32, data8);
+- id_hi = (u8)(id32 >> 16);
+- id_lo = (u8)(id32 >> 8);
+- if ((id_lo & 0xf) == 3) {
++ if (dev->chip_id == KSZ9893_CHIP_ID) {
+ /* Chip is from KSZ9893 design. */
+ dev_info(dev->dev, "Found KSZ9893\n");
+ dev->features |= IS_9893;
+@@ -1416,21 +1427,6 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
+ if (!(data8 & SW_GIGABIT_ABLE))
+ dev->features &= ~GBIT_SUPPORT;
+ }
+-
+- /* Change chip id to known ones so it can be matched against them. */
+- id32 = (id_hi << 16) | (id_lo << 8);
+-
+- dev->chip_id = id32;
+-
+- return 0;
+-}
+-
+-static int ksz9477_switch_init(struct ksz_device *dev)
+-{
+- dev->ds->ops = &ksz9477_switch_ops;
+-
+- dev->port_mask = (1 << dev->info->port_cnt) - 1;
+-
+ return 0;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 2ce5aed72b47d4c78a232cddda3c49c0d6d68de4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 14:12:46 +0530
+Subject: net: dsa: microchip: move switch chip_id detection to ksz_common
+
+From: Arun Ramadoss <arun.ramadoss@microchip.com>
+
+[ Upstream commit 91a98917a8839923d404a77c21646ca5fc9e330a ]
+
+KSZ87xx and KSZ88xx have chip_id representation at reg location 0. And
+KSZ9477 compatible switch and LAN937x switch have same chip_id detection
+at location 0x01 and 0x02. To have the common switch detect
+functionality for ksz switches, ksz_switch_detect function is
+introduced.
+
+Signed-off-by: Arun Ramadoss <arun.ramadoss@microchip.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz8795.c | 48 +--------------
+ drivers/net/dsa/microchip/ksz8795_reg.h | 16 -----
+ drivers/net/dsa/microchip/ksz9477.c | 21 -------
+ drivers/net/dsa/microchip/ksz9477_reg.h | 1 -
+ drivers/net/dsa/microchip/ksz_common.c | 78 +++++++++++++++++++++++--
+ drivers/net/dsa/microchip/ksz_common.h | 19 +++++-
+ 6 files changed, 93 insertions(+), 90 deletions(-)
+
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index 12a599d5e61a4..3cc51ee5fb6cc 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -1272,7 +1272,7 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
+ continue;
+ if (!ksz_is_ksz88x3(dev)) {
+ ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
+- if (remote & PORT_FIBER_MODE)
++ if (remote & KSZ8_PORT_FIBER_MODE)
+ p->fiber = 1;
+ }
+ if (p->fiber)
+@@ -1424,51 +1424,6 @@ static u32 ksz8_get_port_addr(int port, int offset)
+ return PORT_CTRL_ADDR(port, offset);
+ }
+
+-static int ksz8_switch_detect(struct ksz_device *dev)
+-{
+- u8 id1, id2;
+- u16 id16;
+- int ret;
+-
+- /* read chip id */
+- ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
+- if (ret)
+- return ret;
+-
+- id1 = id16 >> 8;
+- id2 = id16 & SW_CHIP_ID_M;
+-
+- switch (id1) {
+- case KSZ87_FAMILY_ID:
+- if ((id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
+- return -ENODEV;
+-
+- if (id2 == CHIP_ID_95) {
+- u8 val;
+-
+- id2 = 0x95;
+- ksz_read8(dev, REG_PORT_STATUS_0, &val);
+- if (val & PORT_FIBER_MODE)
+- id2 = 0x65;
+- } else if (id2 == CHIP_ID_94) {
+- id2 = 0x94;
+- }
+- break;
+- case KSZ88_FAMILY_ID:
+- if (id2 != CHIP_ID_63)
+- return -ENODEV;
+- break;
+- default:
+- dev_err(dev->dev, "invalid family id: %d\n", id1);
+- return -ENODEV;
+- }
+- id16 &= ~0xff;
+- id16 |= id2;
+- dev->chip_id = id16;
+-
+- return 0;
+-}
+-
+ static int ksz8_switch_init(struct ksz_device *dev)
+ {
+ struct ksz8 *ksz8 = dev->priv;
+@@ -1522,7 +1477,6 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .shutdown = ksz8_reset_switch,
+- .detect = ksz8_switch_detect,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ };
+diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
+index 4109433b6b6c2..b8f6ad7581bcd 100644
+--- a/drivers/net/dsa/microchip/ksz8795_reg.h
++++ b/drivers/net/dsa/microchip/ksz8795_reg.h
+@@ -14,23 +14,10 @@
+ #define KS_PRIO_M 0x3
+ #define KS_PRIO_S 2
+
+-#define REG_CHIP_ID0 0x00
+-
+-#define KSZ87_FAMILY_ID 0x87
+-#define KSZ88_FAMILY_ID 0x88
+-
+-#define REG_CHIP_ID1 0x01
+-
+-#define SW_CHIP_ID_M 0xF0
+-#define SW_CHIP_ID_S 4
+ #define SW_REVISION_M 0x0E
+ #define SW_REVISION_S 1
+ #define SW_START 0x01
+
+-#define CHIP_ID_94 0x60
+-#define CHIP_ID_95 0x90
+-#define CHIP_ID_63 0x30
+-
+ #define KSZ8863_REG_SW_RESET 0x43
+
+ #define KSZ8863_GLOBAL_SOFTWARE_RESET BIT(4)
+@@ -217,8 +204,6 @@
+ #define REG_PORT_4_STATUS_0 0x48
+
+ /* For KSZ8765. */
+-#define PORT_FIBER_MODE BIT(7)
+-
+ #define PORT_REMOTE_ASYM_PAUSE BIT(5)
+ #define PORT_REMOTE_SYM_PAUSE BIT(4)
+ #define PORT_REMOTE_100BTX_FD BIT(3)
+@@ -322,7 +307,6 @@
+
+ #define REG_PORT_CTRL_5 0x05
+
+-#define REG_PORT_STATUS_0 0x08
+ #define REG_PORT_STATUS_1 0x09
+ #define REG_PORT_LINK_MD_CTRL 0x0A
+ #define REG_PORT_LINK_MD_RESULT 0x0B
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index 876a801ac23a4..bcfdd505ca79a 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -1363,23 +1363,6 @@ static u32 ksz9477_get_port_addr(int port, int offset)
+ return PORT_CTRL_ADDR(port, offset);
+ }
+
+-static int ksz9477_switch_detect(struct ksz_device *dev)
+-{
+- u32 id32;
+- int ret;
+-
+- /* read chip id */
+- ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
+- if (ret)
+- return ret;
+-
+- dev_dbg(dev->dev, "Switch detect: ID=%08x\n", id32);
+-
+- dev->chip_id = id32 & 0x00FFFF00;
+-
+- return 0;
+-}
+-
+ static int ksz9477_switch_init(struct ksz_device *dev)
+ {
+ u8 data8;
+@@ -1410,8 +1393,6 @@ static int ksz9477_switch_init(struct ksz_device *dev)
+ dev->features = GBIT_SUPPORT;
+
+ if (dev->chip_id == KSZ9893_CHIP_ID) {
+- /* Chip is from KSZ9893 design. */
+- dev_info(dev->dev, "Found KSZ9893\n");
+ dev->features |= IS_9893;
+
+ /* Chip does not support gigabit. */
+@@ -1419,7 +1400,6 @@ static int ksz9477_switch_init(struct ksz_device *dev)
+ dev->features &= ~GBIT_SUPPORT;
+ dev->phy_port_cnt = 2;
+ } else {
+- dev_info(dev->dev, "Found KSZ9477 or compatible\n");
+ /* Chip uses new XMII register definitions. */
+ dev->features |= NEW_XMII;
+
+@@ -1446,7 +1426,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .shutdown = ksz9477_reset_switch,
+- .detect = ksz9477_switch_detect,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+ };
+diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
+index 7a2c8d4767aff..077e35ab11b54 100644
+--- a/drivers/net/dsa/microchip/ksz9477_reg.h
++++ b/drivers/net/dsa/microchip/ksz9477_reg.h
+@@ -25,7 +25,6 @@
+
+ #define REG_CHIP_ID2__1 0x0002
+
+-#define CHIP_ID_63 0x63
+ #define CHIP_ID_66 0x66
+ #define CHIP_ID_67 0x67
+ #define CHIP_ID_77 0x77
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 92a500e1ccd21..4511e99823f57 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -930,6 +930,72 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
+ }
+ EXPORT_SYMBOL_GPL(ksz_port_stp_state_set);
+
++static int ksz_switch_detect(struct ksz_device *dev)
++{
++ u8 id1, id2;
++ u16 id16;
++ u32 id32;
++ int ret;
++
++ /* read chip id */
++ ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
++ if (ret)
++ return ret;
++
++ id1 = FIELD_GET(SW_FAMILY_ID_M, id16);
++ id2 = FIELD_GET(SW_CHIP_ID_M, id16);
++
++ switch (id1) {
++ case KSZ87_FAMILY_ID:
++ if (id2 == KSZ87_CHIP_ID_95) {
++ u8 val;
++
++ dev->chip_id = KSZ8795_CHIP_ID;
++
++ ksz_read8(dev, KSZ8_PORT_STATUS_0, &val);
++ if (val & KSZ8_PORT_FIBER_MODE)
++ dev->chip_id = KSZ8765_CHIP_ID;
++ } else if (id2 == KSZ87_CHIP_ID_94) {
++ dev->chip_id = KSZ8794_CHIP_ID;
++ } else {
++ return -ENODEV;
++ }
++ break;
++ case KSZ88_FAMILY_ID:
++ if (id2 == KSZ88_CHIP_ID_63)
++ dev->chip_id = KSZ8830_CHIP_ID;
++ else
++ return -ENODEV;
++ break;
++ default:
++ ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
++ if (ret)
++ return ret;
++
++ dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32);
++ id32 &= ~0xFF;
++
++ switch (id32) {
++ case KSZ9477_CHIP_ID:
++ case KSZ9897_CHIP_ID:
++ case KSZ9893_CHIP_ID:
++ case KSZ9567_CHIP_ID:
++ case LAN9370_CHIP_ID:
++ case LAN9371_CHIP_ID:
++ case LAN9372_CHIP_ID:
++ case LAN9373_CHIP_ID:
++ case LAN9374_CHIP_ID:
++ dev->chip_id = id32;
++ break;
++ default:
++ dev_err(dev->dev,
++ "unsupported switch detected %x)\n", id32);
++ return -ENODEV;
++ }
++ }
++ return 0;
++}
++
+ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
+ {
+ struct dsa_switch *ds;
+@@ -986,10 +1052,9 @@ int ksz_switch_register(struct ksz_device *dev,
+ mutex_init(&dev->alu_mutex);
+ mutex_init(&dev->vlan_mutex);
+
+- dev->dev_ops = ops;
+-
+- if (dev->dev_ops->detect(dev))
+- return -EINVAL;
++ ret = ksz_switch_detect(dev);
++ if (ret)
++ return ret;
+
+ info = ksz_lookup_info(dev->chip_id);
+ if (!info)
+@@ -998,10 +1063,15 @@ int ksz_switch_register(struct ksz_device *dev,
+ /* Update the compatible info with the probed one */
+ dev->info = info;
+
++ dev_info(dev->dev, "found switch: %s, rev %i\n",
++ dev->info->dev_name, dev->chip_rev);
++
+ ret = ksz_check_device_id(dev);
+ if (ret)
+ return ret;
+
++ dev->dev_ops = ops;
++
+ ret = dev->dev_ops->init(dev);
+ if (ret)
+ return ret;
+diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
+index 8500eaedad67a..e6bc5fb2b1303 100644
+--- a/drivers/net/dsa/microchip/ksz_common.h
++++ b/drivers/net/dsa/microchip/ksz_common.h
+@@ -90,6 +90,7 @@ struct ksz_device {
+
+ /* chip specific data */
+ u32 chip_id;
++ u8 chip_rev;
+ int cpu_port; /* port connected to CPU */
+ int phy_port_cnt;
+ phy_interface_t compat_interface;
+@@ -182,7 +183,6 @@ struct ksz_dev_ops {
+ void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
+ void (*port_init_cnt)(struct ksz_device *dev, int port);
+ int (*shutdown)(struct ksz_device *dev);
+- int (*detect)(struct ksz_device *dev);
+ int (*init)(struct ksz_device *dev);
+ void (*exit)(struct ksz_device *dev);
+ };
+@@ -353,6 +353,23 @@ static inline void ksz_regmap_unlock(void *__mtx)
+ #define PORT_RX_ENABLE BIT(1)
+ #define PORT_LEARN_DISABLE BIT(0)
+
++/* Switch ID Defines */
++#define REG_CHIP_ID0 0x00
++
++#define SW_FAMILY_ID_M GENMASK(15, 8)
++#define KSZ87_FAMILY_ID 0x87
++#define KSZ88_FAMILY_ID 0x88
++
++#define KSZ8_PORT_STATUS_0 0x08
++#define KSZ8_PORT_FIBER_MODE BIT(7)
++
++#define SW_CHIP_ID_M GENMASK(7, 4)
++#define KSZ87_CHIP_ID_94 0x6
++#define KSZ87_CHIP_ID_95 0x9
++#define KSZ88_CHIP_ID_63 0x3
++
++#define SW_REV_ID_M GENMASK(7, 4)
++
+ /* Regmap tables generation */
+ #define KSZ_SPI_OP_RD 3
+ #define KSZ_SPI_OP_WR 2
+--
+2.35.1
+
--- /dev/null
+From 740047cfcfef2608e5e90a41d5f22206978a92c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 14:12:47 +0530
+Subject: net: dsa: microchip: move tag_protocol to ksz_common
+
+From: Arun Ramadoss <arun.ramadoss@microchip.com>
+
+[ Upstream commit 534a0431e9e68959e2c0d71c141d5b911d66ad7c ]
+
+This patch move the dsa hook get_tag_protocol to ksz_common file. And
+the tag_protocol is returned based on the dev->chip_id.
+
+Signed-off-by: Arun Ramadoss <arun.ramadoss@microchip.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz8795.c | 13 +------------
+ drivers/net/dsa/microchip/ksz9477.c | 14 +-------------
+ drivers/net/dsa/microchip/ksz_common.c | 24 ++++++++++++++++++++++++
+ drivers/net/dsa/microchip/ksz_common.h | 2 ++
+ 4 files changed, 28 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index 3cc51ee5fb6cc..041956e3c7b1a 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -898,17 +898,6 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+ }
+ }
+
+-static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds,
+- int port,
+- enum dsa_tag_protocol mp)
+-{
+- struct ksz_device *dev = ds->priv;
+-
+- /* ksz88x3 uses the same tag schema as KSZ9893 */
+- return ksz_is_ksz88x3(dev) ?
+- DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795;
+-}
+-
+ static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port)
+ {
+ /* Silicon Errata Sheet (DS80000830A):
+@@ -1394,7 +1383,7 @@ static void ksz8_get_caps(struct dsa_switch *ds, int port,
+ }
+
+ static const struct dsa_switch_ops ksz8_switch_ops = {
+- .get_tag_protocol = ksz8_get_tag_protocol,
++ .get_tag_protocol = ksz_get_tag_protocol,
+ .get_phy_flags = ksz8_sw_get_phy_flags,
+ .setup = ksz8_setup,
+ .phy_read = ksz_phy_read16,
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index bcfdd505ca79a..31be767027feb 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -276,18 +276,6 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
+ mutex_unlock(&mib->cnt_mutex);
+ }
+
+-static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
+- int port,
+- enum dsa_tag_protocol mp)
+-{
+- enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
+- struct ksz_device *dev = ds->priv;
+-
+- if (dev->features & IS_9893)
+- proto = DSA_TAG_PROTO_KSZ9893;
+- return proto;
+-}
+-
+ static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
+ {
+ struct ksz_device *dev = ds->priv;
+@@ -1329,7 +1317,7 @@ static int ksz9477_setup(struct dsa_switch *ds)
+ }
+
+ static const struct dsa_switch_ops ksz9477_switch_ops = {
+- .get_tag_protocol = ksz9477_get_tag_protocol,
++ .get_tag_protocol = ksz_get_tag_protocol,
+ .setup = ksz9477_setup,
+ .phy_read = ksz9477_phy_read16,
+ .phy_write = ksz9477_phy_write16,
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 4511e99823f57..0713a40685fa9 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -930,6 +930,30 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
+ }
+ EXPORT_SYMBOL_GPL(ksz_port_stp_state_set);
+
++enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
++ int port, enum dsa_tag_protocol mp)
++{
++ struct ksz_device *dev = ds->priv;
++ enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
++
++ if (dev->chip_id == KSZ8795_CHIP_ID ||
++ dev->chip_id == KSZ8794_CHIP_ID ||
++ dev->chip_id == KSZ8765_CHIP_ID)
++ proto = DSA_TAG_PROTO_KSZ8795;
++
++ if (dev->chip_id == KSZ8830_CHIP_ID ||
++ dev->chip_id == KSZ9893_CHIP_ID)
++ proto = DSA_TAG_PROTO_KSZ9893;
++
++ if (dev->chip_id == KSZ9477_CHIP_ID ||
++ dev->chip_id == KSZ9897_CHIP_ID ||
++ dev->chip_id == KSZ9567_CHIP_ID)
++ proto = DSA_TAG_PROTO_KSZ9477;
++
++ return proto;
++}
++EXPORT_SYMBOL_GPL(ksz_get_tag_protocol);
++
+ static int ksz_switch_detect(struct ksz_device *dev)
+ {
+ u8 id1, id2;
+diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
+index e6bc5fb2b1303..21db6f79035fa 100644
+--- a/drivers/net/dsa/microchip/ksz_common.h
++++ b/drivers/net/dsa/microchip/ksz_common.h
+@@ -231,6 +231,8 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+ void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf);
++enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
++ int port, enum dsa_tag_protocol mp);
+
+ /* Common register access functions */
+
+--
+2.35.1
+
--- /dev/null
+From 9019dbfb39a5f570c0acd29fb46783d67921838b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 14:12:50 +0530
+Subject: net: dsa: microchip: move the port mirror to ksz_common
+
+From: Arun Ramadoss <arun.ramadoss@microchip.com>
+
+[ Upstream commit 00a298bbc23876288b1cd04c38752d8e7ed53ae2 ]
+
+This patch updates the common port mirror add/del dsa_switch_ops in
+ksz_common.c. The individual switches implementation is executed based
+on the ksz_dev_ops function pointers.
+
+Signed-off-by: Arun Ramadoss <arun.ramadoss@microchip.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz8795.c | 13 ++++++-------
+ drivers/net/dsa/microchip/ksz9477.c | 12 ++++++------
+ drivers/net/dsa/microchip/ksz_common.c | 23 +++++++++++++++++++++++
+ drivers/net/dsa/microchip/ksz_common.h | 10 ++++++++++
+ 4 files changed, 45 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index 16e946dbd9d42..2e3d24a3260e1 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -1089,12 +1089,10 @@ static int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ return 0;
+ }
+
+-static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
++static int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+ {
+- struct ksz_device *dev = ds->priv;
+-
+ if (ingress) {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ dev->mirror_rx |= BIT(port);
+@@ -1113,10 +1111,9 @@ static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
+ return 0;
+ }
+
+-static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
++static void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+ {
+- struct ksz_device *dev = ds->priv;
+ u8 data;
+
+ if (mirror->ingress) {
+@@ -1400,8 +1397,8 @@ static const struct dsa_switch_ops ksz8_switch_ops = {
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+- .port_mirror_add = ksz8_port_mirror_add,
+- .port_mirror_del = ksz8_port_mirror_del,
++ .port_mirror_add = ksz_port_mirror_add,
++ .port_mirror_del = ksz_port_mirror_del,
+ };
+
+ static u32 ksz8_get_port_addr(int port, int offset)
+@@ -1464,6 +1461,8 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
++ .mirror_add = ksz8_port_mirror_add,
++ .mirror_del = ksz8_port_mirror_del,
+ .shutdown = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index 1bb994a9109cd..cd4a3088e9473 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -819,11 +819,10 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
+ return ret;
+ }
+
+-static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
++static int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+ {
+- struct ksz_device *dev = ds->priv;
+ u8 data;
+ int p;
+
+@@ -859,10 +858,9 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
+ return 0;
+ }
+
+-static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
++static void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+ {
+- struct ksz_device *dev = ds->priv;
+ bool in_use = false;
+ u8 data;
+ int p;
+@@ -1335,8 +1333,8 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
+ .port_fdb_del = ksz9477_port_fdb_del,
+ .port_mdb_add = ksz9477_port_mdb_add,
+ .port_mdb_del = ksz9477_port_mdb_del,
+- .port_mirror_add = ksz9477_port_mirror_add,
+- .port_mirror_del = ksz9477_port_mirror_del,
++ .port_mirror_add = ksz_port_mirror_add,
++ .port_mirror_del = ksz_port_mirror_del,
+ .get_stats64 = ksz_get_stats64,
+ .port_change_mtu = ksz9477_change_mtu,
+ .port_max_mtu = ksz9477_max_mtu,
+@@ -1412,6 +1410,8 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
++ .mirror_add = ksz9477_port_mirror_add,
++ .mirror_del = ksz9477_port_mirror_del,
+ .shutdown = ksz9477_reset_switch,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 5db2b55152885..676669d353ea6 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -991,6 +991,29 @@ int ksz_port_vlan_del(struct dsa_switch *ds, int port,
+ }
+ EXPORT_SYMBOL_GPL(ksz_port_vlan_del);
+
++int ksz_port_mirror_add(struct dsa_switch *ds, int port,
++ struct dsa_mall_mirror_tc_entry *mirror,
++ bool ingress, struct netlink_ext_ack *extack)
++{
++ struct ksz_device *dev = ds->priv;
++
++ if (!dev->dev_ops->mirror_add)
++ return -EOPNOTSUPP;
++
++ return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack);
++}
++EXPORT_SYMBOL_GPL(ksz_port_mirror_add);
++
++void ksz_port_mirror_del(struct dsa_switch *ds, int port,
++ struct dsa_mall_mirror_tc_entry *mirror)
++{
++ struct ksz_device *dev = ds->priv;
++
++ if (dev->dev_ops->mirror_del)
++ dev->dev_ops->mirror_del(dev, port, mirror);
++}
++EXPORT_SYMBOL_GPL(ksz_port_mirror_del);
++
+ static int ksz_switch_detect(struct ksz_device *dev)
+ {
+ u8 id1, id2;
+diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
+index 1baa270859aa2..c724cbb437e29 100644
+--- a/drivers/net/dsa/microchip/ksz_common.h
++++ b/drivers/net/dsa/microchip/ksz_common.h
+@@ -187,6 +187,11 @@ struct ksz_dev_ops {
+ struct netlink_ext_ack *extack);
+ int (*vlan_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
++ int (*mirror_add)(struct ksz_device *dev, int port,
++ struct dsa_mall_mirror_tc_entry *mirror,
++ bool ingress, struct netlink_ext_ack *extack);
++ void (*mirror_del)(struct ksz_device *dev, int port,
++ struct dsa_mall_mirror_tc_entry *mirror);
+ void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
+ void (*port_init_cnt)(struct ksz_device *dev, int port);
+ int (*shutdown)(struct ksz_device *dev);
+@@ -247,6 +252,11 @@ int ksz_port_vlan_add(struct dsa_switch *ds, int port,
+ struct netlink_ext_ack *extack);
+ int ksz_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
++int ksz_port_mirror_add(struct dsa_switch *ds, int port,
++ struct dsa_mall_mirror_tc_entry *mirror,
++ bool ingress, struct netlink_ext_ack *extack);
++void ksz_port_mirror_del(struct dsa_switch *ds, int port,
++ struct dsa_mall_mirror_tc_entry *mirror);
+
+ /* Common register access functions */
+
+--
+2.35.1
+
--- /dev/null
+From a090d842091695381a2717e166e7231b90416f61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 14:12:49 +0530
+Subject: net: dsa: microchip: move vlan functionality to ksz_common
+
+From: Arun Ramadoss <arun.ramadoss@microchip.com>
+
+[ Upstream commit f0d997e31bb307c7aa046c4992c568547fd25195 ]
+
+This patch moves the vlan dsa_switch_ops such as vlan_add, vlan_del and
+vlan_filtering from the individual files ksz8795.c, ksz9477.c to
+ksz_common.c file.
+
+Signed-off-by: Arun Ramadoss <arun.ramadoss@microchip.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz8795.c | 19 +++++++------
+ drivers/net/dsa/microchip/ksz9477.c | 19 +++++++------
+ drivers/net/dsa/microchip/ksz_common.c | 37 ++++++++++++++++++++++++++
+ drivers/net/dsa/microchip/ksz_common.h | 14 ++++++++++
+ 4 files changed, 69 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index 041956e3c7b1a..16e946dbd9d42 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -958,11 +958,9 @@ static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
+ }
+ }
+
+-static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
++static int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack)
+ {
+- struct ksz_device *dev = ds->priv;
+-
+ if (ksz_is_ksz88x3(dev))
+ return -ENOTSUPP;
+
+@@ -987,12 +985,11 @@ static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
+ }
+ }
+
+-static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
++static int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+ {
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+- struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+ u16 data, new_pvid = 0;
+ u8 fid, member, valid;
+@@ -1060,10 +1057,9 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
+ return 0;
+ }
+
+-static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
++static int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+ {
+- struct ksz_device *dev = ds->priv;
+ u16 data, pvid;
+ u8 fid, member, valid;
+
+@@ -1398,9 +1394,9 @@ static const struct dsa_switch_ops ksz8_switch_ops = {
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz8_port_stp_state_set,
+ .port_fast_age = ksz_port_fast_age,
+- .port_vlan_filtering = ksz8_port_vlan_filtering,
+- .port_vlan_add = ksz8_port_vlan_add,
+- .port_vlan_del = ksz8_port_vlan_del,
++ .port_vlan_filtering = ksz_port_vlan_filtering,
++ .port_vlan_add = ksz_port_vlan_add,
++ .port_vlan_del = ksz_port_vlan_del,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+@@ -1465,6 +1461,9 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
++ .vlan_filtering = ksz8_port_vlan_filtering,
++ .vlan_add = ksz8_port_vlan_add,
++ .vlan_del = ksz8_port_vlan_del,
+ .shutdown = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index 31be767027feb..1bb994a9109cd 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -377,12 +377,10 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
+ }
+ }
+
+-static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
++static int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag,
+ struct netlink_ext_ack *extack)
+ {
+- struct ksz_device *dev = ds->priv;
+-
+ if (flag) {
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+ PORT_VLAN_LOOKUP_VID_0, true);
+@@ -396,11 +394,10 @@ static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
+ return 0;
+ }
+
+-static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
++static int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+ {
+- struct ksz_device *dev = ds->priv;
+ u32 vlan_table[3];
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ int err;
+@@ -433,10 +430,9 @@ static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
+ return 0;
+ }
+
+-static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
++static int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+ {
+- struct ksz_device *dev = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ u32 vlan_table[3];
+ u16 pvid;
+@@ -1331,9 +1327,9 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz9477_port_stp_state_set,
+ .port_fast_age = ksz_port_fast_age,
+- .port_vlan_filtering = ksz9477_port_vlan_filtering,
+- .port_vlan_add = ksz9477_port_vlan_add,
+- .port_vlan_del = ksz9477_port_vlan_del,
++ .port_vlan_filtering = ksz_port_vlan_filtering,
++ .port_vlan_add = ksz_port_vlan_add,
++ .port_vlan_del = ksz_port_vlan_del,
+ .port_fdb_dump = ksz9477_port_fdb_dump,
+ .port_fdb_add = ksz9477_port_fdb_add,
+ .port_fdb_del = ksz9477_port_fdb_del,
+@@ -1413,6 +1409,9 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
++ .vlan_filtering = ksz9477_port_vlan_filtering,
++ .vlan_add = ksz9477_port_vlan_add,
++ .vlan_del = ksz9477_port_vlan_del,
+ .shutdown = ksz9477_reset_switch,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 0713a40685fa9..5db2b55152885 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -954,6 +954,43 @@ enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
+ }
+ EXPORT_SYMBOL_GPL(ksz_get_tag_protocol);
+
++int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
++ bool flag, struct netlink_ext_ack *extack)
++{
++ struct ksz_device *dev = ds->priv;
++
++ if (!dev->dev_ops->vlan_filtering)
++ return -EOPNOTSUPP;
++
++ return dev->dev_ops->vlan_filtering(dev, port, flag, extack);
++}
++EXPORT_SYMBOL_GPL(ksz_port_vlan_filtering);
++
++int ksz_port_vlan_add(struct dsa_switch *ds, int port,
++ const struct switchdev_obj_port_vlan *vlan,
++ struct netlink_ext_ack *extack)
++{
++ struct ksz_device *dev = ds->priv;
++
++ if (!dev->dev_ops->vlan_add)
++ return -EOPNOTSUPP;
++
++ return dev->dev_ops->vlan_add(dev, port, vlan, extack);
++}
++EXPORT_SYMBOL_GPL(ksz_port_vlan_add);
++
++int ksz_port_vlan_del(struct dsa_switch *ds, int port,
++ const struct switchdev_obj_port_vlan *vlan)
++{
++ struct ksz_device *dev = ds->priv;
++
++ if (!dev->dev_ops->vlan_del)
++ return -EOPNOTSUPP;
++
++ return dev->dev_ops->vlan_del(dev, port, vlan);
++}
++EXPORT_SYMBOL_GPL(ksz_port_vlan_del);
++
+ static int ksz_switch_detect(struct ksz_device *dev)
+ {
+ u8 id1, id2;
+diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
+index 21db6f79035fa..1baa270859aa2 100644
+--- a/drivers/net/dsa/microchip/ksz_common.h
++++ b/drivers/net/dsa/microchip/ksz_common.h
+@@ -180,6 +180,13 @@ struct ksz_dev_ops {
+ void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+ void (*r_mib_stat64)(struct ksz_device *dev, int port);
++ int (*vlan_filtering)(struct ksz_device *dev, int port,
++ bool flag, struct netlink_ext_ack *extack);
++ int (*vlan_add)(struct ksz_device *dev, int port,
++ const struct switchdev_obj_port_vlan *vlan,
++ struct netlink_ext_ack *extack);
++ int (*vlan_del)(struct ksz_device *dev, int port,
++ const struct switchdev_obj_port_vlan *vlan);
+ void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
+ void (*port_init_cnt)(struct ksz_device *dev, int port);
+ int (*shutdown)(struct ksz_device *dev);
+@@ -233,6 +240,13 @@ void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf);
+ enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
+ int port, enum dsa_tag_protocol mp);
++int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
++ bool flag, struct netlink_ext_ack *extack);
++int ksz_port_vlan_add(struct dsa_switch *ds, int port,
++ const struct switchdev_obj_port_vlan *vlan,
++ struct netlink_ext_ack *extack);
++int ksz_port_vlan_del(struct dsa_switch *ds, int port,
++ const struct switchdev_obj_port_vlan *vlan);
+
+ /* Common register access functions */
+
+--
+2.35.1
+
--- /dev/null
+From 359e28f8fadf8ef85c04a391542caefe3abf8164 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jun 2022 14:12:52 +0530
+Subject: net: dsa: microchip: update the ksz_phylink_get_caps
+
+From: Arun Ramadoss <arun.ramadoss@microchip.com>
+
+[ Upstream commit 7012033ce10e0968e6cb82709aa0ed7f2080b61e ]
+
+This patch assigns the phylink_get_caps in ksz8795 and ksz9477 to
+ksz_phylink_get_caps. And update their mac_capabilities in the
+respective ksz_dev_ops.
+
+Signed-off-by: Arun Ramadoss <arun.ramadoss@microchip.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz8795.c | 9 +++------
+ drivers/net/dsa/microchip/ksz9477.c | 7 +++----
+ drivers/net/dsa/microchip/ksz_common.c | 3 +++
+ drivers/net/dsa/microchip/ksz_common.h | 2 ++
+ 4 files changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index 2e3d24a3260e1..c771797fd902f 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -1353,13 +1353,9 @@ static int ksz8_setup(struct dsa_switch *ds)
+ return ksz8_handle_global_errata(ds);
+ }
+
+-static void ksz8_get_caps(struct dsa_switch *ds, int port,
++static void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+ {
+- struct ksz_device *dev = ds->priv;
+-
+- ksz_phylink_get_caps(ds, port, config);
+-
+ config->mac_capabilities = MAC_10 | MAC_100;
+
+ /* Silicon Errata Sheet (DS80000830A):
+@@ -1381,7 +1377,7 @@ static const struct dsa_switch_ops ksz8_switch_ops = {
+ .setup = ksz8_setup,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+- .phylink_get_caps = ksz8_get_caps,
++ .phylink_get_caps = ksz_phylink_get_caps,
+ .phylink_mac_link_down = ksz_mac_link_down,
+ .port_enable = ksz_enable_port,
+ .get_strings = ksz_get_strings,
+@@ -1463,6 +1459,7 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
++ .get_caps = ksz8_get_caps,
+ .shutdown = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index cd4a3088e9473..125124fdefbf4 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -1082,11 +1082,9 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
+ }
+
+-static void ksz9477_get_caps(struct dsa_switch *ds, int port,
++static void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+ {
+- ksz_phylink_get_caps(ds, port, config);
+-
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
+ }
+@@ -1316,7 +1314,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
+ .phy_read = ksz9477_phy_read16,
+ .phy_write = ksz9477_phy_write16,
+ .phylink_mac_link_down = ksz_mac_link_down,
+- .phylink_get_caps = ksz9477_get_caps,
++ .phylink_get_caps = ksz_phylink_get_caps,
+ .port_enable = ksz_enable_port,
+ .get_strings = ksz_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+@@ -1412,6 +1410,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
++ .get_caps = ksz9477_get_caps,
+ .shutdown = ksz9477_reset_switch,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 676669d353ea6..0f02c62b02685 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -456,6 +456,9 @@ void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ if (dev->info->internal_phy[port])
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
++
++ if (dev->dev_ops->get_caps)
++ dev->dev_ops->get_caps(dev, port, config);
+ }
+ EXPORT_SYMBOL_GPL(ksz_phylink_get_caps);
+
+diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
+index c724cbb437e29..10f9ef2dbf1ca 100644
+--- a/drivers/net/dsa/microchip/ksz_common.h
++++ b/drivers/net/dsa/microchip/ksz_common.h
+@@ -192,6 +192,8 @@ struct ksz_dev_ops {
+ bool ingress, struct netlink_ext_ack *extack);
+ void (*mirror_del)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
++ void (*get_caps)(struct ksz_device *dev, int port,
++ struct phylink_config *config);
+ void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
+ void (*port_init_cnt)(struct ksz_device *dev, int port);
+ int (*shutdown)(struct ksz_device *dev);
+--
+2.35.1
+
--- /dev/null
+From 48314205798f8e008a8da0274acdc4bf1f68b753 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jun 2022 21:49:00 +0200
+Subject: net: ethernet: mtk_eth_soc: enable rx cksum offload for MTK_NETSYS_V2
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit da6e113ff010815fdd21ee1e9af2e8d179a2680f ]
+
+Enable rx checksum offload for mt7986 chipset.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/c8699805c18f7fd38315fcb8da2787676d83a32c.1654544585.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 59c9a10f83ba5..6beb3d4873a37 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1444,8 +1444,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ int done = 0, bytes = 0;
+
+ while (done < budget) {
++ unsigned int pktlen, *rxdcsum;
+ struct net_device *netdev;
+- unsigned int pktlen;
+ dma_addr_t dma_addr;
+ u32 hash, reason;
+ int mac = 0;
+@@ -1512,7 +1512,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+ skb->dev = netdev;
+ skb_put(skb, pktlen);
+- if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
++
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ rxdcsum = &trxd.rxd3;
++ else
++ rxdcsum = &trxd.rxd4;
++
++ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+@@ -3761,6 +3767,7 @@ static const struct mtk_soc_data mt7986_data = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
++ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+--
+2.35.1
+
--- /dev/null
+From 59f0257329e9686e8be867b131bace30c73db540 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 14:24:07 +0200
+Subject: net: ethernet: mtk_eth_soc: fix hw hash reporting for MTK_NETSYS_V2
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 0cf731f9ebb5bf6f252055bebf4463a5c0bd490b ]
+
+Properly report hw rx hash for mt7986 chipset accroding to the new dma
+descriptor layout.
+
+Fixes: 197c9e9b17b11 ("net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/091394ea4e705fbb35f828011d98d0ba33808f69.1661257293.git.lorenzo@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 22 +++++++++++----------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +++++
+ 2 files changed, 17 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 6beb3d4873a37..dcf0aac0aa65d 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1513,10 +1513,19 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ skb->dev = netdev;
+ skb_put(skb, pktlen);
+
+- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
++ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
++ if (hash != MTK_RXD5_FOE_ENTRY)
++ skb_set_hash(skb, jhash_1word(hash, 0),
++ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd3;
+- else
++ } else {
++ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
++ if (hash != MTK_RXD4_FOE_ENTRY)
++ skb_set_hash(skb, jhash_1word(hash, 0),
++ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd4;
++ }
+
+ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -1525,16 +1534,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ skb->protocol = eth_type_trans(skb, netdev);
+ bytes += pktlen;
+
+- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+- if (hash != MTK_RXD4_FOE_ENTRY) {
+- hash = jhash_1word(hash, 0);
+- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+- }
+-
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+- mtk_ppe_check_skb(eth->ppe, skb,
+- trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
++ mtk_ppe_check_skb(eth->ppe, skb, hash);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 0a632896451a4..98d6a6d047e32 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -307,6 +307,11 @@
+ #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
+ #define RX_DMA_SPECIAL_TAG BIT(22)
+
++/* PDMA descriptor rxd5 */
++#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
++#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
++#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
++
+ #define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
+ #define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
+
+--
+2.35.1
+
--- /dev/null
+From 41db6dab8a567b55c4f7d8329d6f24e38ab6d6d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:58 -0700
+Subject: net: Fix a data-race around gro_normal_batch.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 8db24af3f02ebdbf302196006ebb270c4c3a2706 ]
+
+While reading gro_normal_batch, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 323ebb61e32b ("net: use listified RX for handling GRO_NORMAL skbs")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Acked-by: Edward Cree <ecree.xilinx@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/gro.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/gro.h b/include/net/gro.h
+index 867656b0739c0..24003dea8fa4d 100644
+--- a/include/net/gro.h
++++ b/include/net/gro.h
+@@ -439,7 +439,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
+ {
+ list_add_tail(&skb->list, &napi->rx_list);
+ napi->rx_count += segs;
+- if (napi->rx_count >= gro_normal_batch)
++ if (napi->rx_count >= READ_ONCE(gro_normal_batch))
+ gro_normal_list(napi);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From b33f86643b2928077ff7c409c9da2979aa300837 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:53 -0700
+Subject: net: Fix a data-race around netdev_budget.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 2e0c42374ee32e72948559d2ae2f7ba3dc6b977c ]
+
+While reading netdev_budget, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 51b0bdedb8e7 ("[NET]: Separate two usages of netdev_max_backlog.")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 34282b93c3f60..a330f93629314 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6647,7 +6647,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+ unsigned long time_limit = jiffies +
+ usecs_to_jiffies(netdev_budget_usecs);
+- int budget = netdev_budget;
++ int budget = READ_ONCE(netdev_budget);
+ LIST_HEAD(list);
+ LIST_HEAD(repoll);
+
+--
+2.35.1
+
--- /dev/null
+From 226adf47790a9fb77c9b41bfb6e2d585f52ba463 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:55 -0700
+Subject: net: Fix a data-race around netdev_budget_usecs.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit fa45d484c52c73f79db2c23b0cdfc6c6455093ad ]
+
+While reading netdev_budget_usecs, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 7acf8a1e8a28 ("Replace 2 jiffies with sysctl netdev_budget_usecs to enable softirq tuning")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a330f93629314..19baeaf65a646 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6646,7 +6646,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
+ {
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+ unsigned long time_limit = jiffies +
+- usecs_to_jiffies(netdev_budget_usecs);
++ usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
+ int budget = READ_ONCE(netdev_budget);
+ LIST_HEAD(list);
+ LIST_HEAD(repoll);
+--
+2.35.1
+
--- /dev/null
+From 7314582bacebe01e1505eda0347669f88e897f18 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:59 -0700
+Subject: net: Fix a data-race around netdev_unregister_timeout_secs.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 05e49cfc89e4f325eebbc62d24dd122e55f94c23 ]
+
+While reading netdev_unregister_timeout_secs, it can be changed
+concurrently. Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 5aa3afe107d9 ("net: make unregister netdev warning timeout configurable")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Acked-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 19baeaf65a646..a77a979a4bf75 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10265,7 +10265,7 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
+ return dev;
+
+ if (time_after(jiffies, warning_time +
+- netdev_unregister_timeout_secs * HZ)) {
++ READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
+ list_for_each_entry(dev, list, todo_list) {
+ pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
+ dev->name, netdev_refcnt_read(dev));
+--
+2.35.1
+
--- /dev/null
+From a5de9c4a278180b3dfe9b95f444904adbf792e62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:51 -0700
+Subject: net: Fix a data-race around sysctl_net_busy_poll.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit c42b7cddea47503411bfb5f2f93a4154aaffa2d9 ]
+
+While reading sysctl_net_busy_poll, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 060212928670 ("net: add low latency socket poll")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/busy_poll.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
+index c4898fcbf923b..f90f0021f5f2d 100644
+--- a/include/net/busy_poll.h
++++ b/include/net/busy_poll.h
+@@ -33,7 +33,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
+
+ static inline bool net_busy_loop_on(void)
+ {
+- return sysctl_net_busy_poll;
++ return READ_ONCE(sysctl_net_busy_poll);
+ }
+
+ static inline bool sk_can_busy_loop(const struct sock *sk)
+--
+2.35.1
+
--- /dev/null
+From 9181e44fd86a23e957c81a25ba3c923fdcb0aac8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:52 -0700
+Subject: net: Fix a data-race around sysctl_net_busy_read.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit e59ef36f0795696ab229569c153936bfd068d21c ]
+
+While reading sysctl_net_busy_read, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 2d48d67fa8cd ("net: poll/select low latency socket support")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/sock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index d672e63a5c2d4..16ab5ef749c60 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3365,7 +3365,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+ sk->sk_napi_id = 0;
+- sk->sk_ll_usec = sysctl_net_busy_read;
++ sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
+ #endif
+
+ sk->sk_max_pacing_rate = ~0UL;
+--
+2.35.1
+
--- /dev/null
+From 3672c22135da7cd12695b20f7d8368138a2525a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:47:00 -0700
+Subject: net: Fix a data-race around sysctl_somaxconn.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 3c9ba81d72047f2e81bb535d42856517b613aba7 ]
+
+While reading sysctl_somaxconn, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/socket.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/socket.c b/net/socket.c
+index 96300cdc06251..34102aa4ab0a6 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1801,7 +1801,7 @@ int __sys_listen(int fd, int backlog)
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock) {
+- somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
++ somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
+ if ((unsigned int)backlog > somaxconn)
+ backlog = somaxconn;
+
+--
+2.35.1
+
--- /dev/null
+From e63c87b60988fdd90bf952958e6b37692366f82c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:50 -0700
+Subject: net: Fix a data-race around sysctl_tstamp_allow_data.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit d2154b0afa73c0159b2856f875c6b4fe7cf6a95e ]
+
+While reading sysctl_tstamp_allow_data, it can be changed
+concurrently. Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: b245be1f4db1 ("net-timestamp: no-payload only sysctl")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skbuff.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 5b3559cb1d827..bebf58464d667 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4772,7 +4772,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
+ {
+ bool ret;
+
+- if (likely(sysctl_tstamp_allow_data || tsonly))
++ if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
+ return true;
+
+ read_lock_bh(&sk->sk_callback_lock);
+--
+2.35.1
+
--- /dev/null
+From 524fc5ee3edabf137b7b27c343f88189c35ddb5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:46 -0700
+Subject: net: Fix data-races around netdev_max_backlog.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 5dcd08cd19912892586c6082d56718333e2d19db ]
+
+While reading netdev_max_backlog, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its readers.
+
+While at it, we remove the unnecessary spaces in the doc.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/admin-guide/sysctl/net.rst | 2 +-
+ net/core/dev.c | 4 ++--
+ net/core/gro_cells.c | 2 +-
+ net/xfrm/espintcp.c | 2 +-
+ net/xfrm/xfrm_input.c | 2 +-
+ 5 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
+index fcd650bdbc7e2..01d9858197832 100644
+--- a/Documentation/admin-guide/sysctl/net.rst
++++ b/Documentation/admin-guide/sysctl/net.rst
+@@ -271,7 +271,7 @@ poll cycle or the number of packets processed reaches netdev_budget.
+ netdev_max_backlog
+ ------------------
+
+-Maximum number of packets, queued on the INPUT side, when the interface
++Maximum number of packets, queued on the INPUT side, when the interface
+ receives packets faster than kernel can process them.
+
+ netdev_rss_key
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2a7d81cd9e2ea..e1496e626a532 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4623,7 +4623,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
+ struct softnet_data *sd;
+ unsigned int old_flow, new_flow;
+
+- if (qlen < (netdev_max_backlog >> 1))
++ if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
+ return false;
+
+ sd = this_cpu_ptr(&softnet_data);
+@@ -4671,7 +4671,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+ if (!netif_running(skb->dev))
+ goto drop;
+ qlen = skb_queue_len(&sd->input_pkt_queue);
+- if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
++ if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
+ if (qlen) {
+ enqueue:
+ __skb_queue_tail(&sd->input_pkt_queue, skb);
+diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
+index 541c7a72a28a4..21619c70a82b7 100644
+--- a/net/core/gro_cells.c
++++ b/net/core/gro_cells.c
+@@ -26,7 +26,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
+
+ cell = this_cpu_ptr(gcells->cells);
+
+- if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
++ if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
+ drop:
+ dev_core_stats_rx_dropped_inc(dev);
+ kfree_skb(skb);
+diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
+index 82d14eea1b5ad..974eb97b77d22 100644
+--- a/net/xfrm/espintcp.c
++++ b/net/xfrm/espintcp.c
+@@ -168,7 +168,7 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
+ {
+ struct espintcp_ctx *ctx = espintcp_getctx(sk);
+
+- if (skb_queue_len(&ctx->out_queue) >= netdev_max_backlog)
++ if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
+ return -ENOBUFS;
+
+ __skb_queue_tail(&ctx->out_queue, skb);
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 70a8c36f0ba6e..b2f4ec9c537f0 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -782,7 +782,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
+
+ trans = this_cpu_ptr(&xfrm_trans_tasklet);
+
+- if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
++ if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
+ return -ENOBUFS;
+
+ BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
+--
+2.35.1
+
--- /dev/null
+From 0baf89be51ec49ba4e918e83584646afe3e89e89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:47 -0700
+Subject: net: Fix data-races around netdev_tstamp_prequeue.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 61adf447e38664447526698872e21c04623afb8e ]
+
+While reading netdev_tstamp_prequeue, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its readers.
+
+Fixes: 3b098e2d7c69 ("net: Consistent skb timestamping")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index e1496e626a532..34282b93c3f60 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4927,7 +4927,7 @@ static int netif_rx_internal(struct sk_buff *skb)
+ {
+ int ret;
+
+- net_timestamp_check(netdev_tstamp_prequeue, skb);
++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+
+ trace_netif_rx(skb);
+
+@@ -5280,7 +5280,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
+ int ret = NET_RX_DROP;
+ __be16 type;
+
+- net_timestamp_check(!netdev_tstamp_prequeue, skb);
++ net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
+
+ trace_netif_receive_skb(skb);
+
+@@ -5663,7 +5663,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
+ {
+ int ret;
+
+- net_timestamp_check(netdev_tstamp_prequeue, skb);
++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+
+ if (skb_defer_rx_timestamp(skb))
+ return NET_RX_SUCCESS;
+@@ -5693,7 +5693,7 @@ void netif_receive_skb_list_internal(struct list_head *head)
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+- net_timestamp_check(netdev_tstamp_prequeue, skb);
++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ skb_list_del_init(skb);
+ if (!skb_defer_rx_timestamp(skb))
+ list_add_tail(&skb->list, &sublist);
+--
+2.35.1
+
--- /dev/null
+From 95301ce191966591f1a042e386c3fa50f002cf9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:44 -0700
+Subject: net: Fix data-races around sysctl_[rw]mem_(max|default).
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 1227c1771dd2ad44318aa3ab9e3a293b3f34ff2a ]
+
+While reading sysctl_[rw]mem_(max|default), they can be changed
+concurrently. Thus, we need to add READ_ONCE() to its readers.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 4 ++--
+ net/core/sock.c | 8 ++++----
+ net/ipv4/ip_output.c | 2 +-
+ net/ipv4/tcp_output.c | 2 +-
+ net/netfilter/ipvs/ip_vs_sync.c | 4 ++--
+ 5 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 74f05ed6aff29..60c854e7d98ba 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -5036,14 +5036,14 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
+ /* Only some socketops are supported */
+ switch (optname) {
+ case SO_RCVBUF:
+- val = min_t(u32, val, sysctl_rmem_max);
++ val = min_t(u32, val, READ_ONCE(sysctl_rmem_max));
+ val = min_t(int, val, INT_MAX / 2);
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ WRITE_ONCE(sk->sk_rcvbuf,
+ max_t(int, val * 2, SOCK_MIN_RCVBUF));
+ break;
+ case SO_SNDBUF:
+- val = min_t(u32, val, sysctl_wmem_max);
++ val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
+ val = min_t(int, val, INT_MAX / 2);
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ WRITE_ONCE(sk->sk_sndbuf,
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 2ff40dd0a7a65..62f69bc3a0e6e 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1100,7 +1100,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+- val = min_t(u32, val, sysctl_wmem_max);
++ val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
+ set_sndbuf:
+ /* Ensure val * 2 fits into an int, to prevent max_t()
+ * from treating it as a negative value.
+@@ -1132,7 +1132,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+- __sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
++ __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
+ break;
+
+ case SO_RCVBUFFORCE:
+@@ -3307,8 +3307,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+ timer_setup(&sk->sk_timer, NULL, 0);
+
+ sk->sk_allocation = GFP_KERNEL;
+- sk->sk_rcvbuf = sysctl_rmem_default;
+- sk->sk_sndbuf = sysctl_wmem_default;
++ sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default);
++ sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
+ sk->sk_state = TCP_CLOSE;
+ sk_set_socket(sk, sock);
+
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 00b4bf26fd932..da8b3cc67234d 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1712,7 +1712,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+
+ sk->sk_protocol = ip_hdr(skb)->protocol;
+ sk->sk_bound_dev_if = arg->bound_dev_if;
+- sk->sk_sndbuf = sysctl_wmem_default;
++ sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
+ ipc.sockc.mark = fl4.flowi4_mark;
+ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
+ len, 0, &ipc, &rt, MSG_DONTWAIT);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index aed0c5f828bef..84314de754f87 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -239,7 +239,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
+ if (wscale_ok) {
+ /* Set window scaling on max possible window */
+ space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
+- space = max_t(u32, space, sysctl_rmem_max);
++ space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
+ space = min_t(u32, space, *window_clamp);
+ *rcv_wscale = clamp_t(int, ilog2(space) - 15,
+ 0, TCP_MAX_WSCALE);
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 9d43277b8b4fe..a56fd0b5a430a 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -1280,12 +1280,12 @@ static void set_sock_size(struct sock *sk, int mode, int val)
+ lock_sock(sk);
+ if (mode) {
+ val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
+- sysctl_wmem_max);
++ READ_ONCE(sysctl_wmem_max));
+ sk->sk_sndbuf = val * 2;
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ } else {
+ val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
+- sysctl_rmem_max);
++ READ_ONCE(sysctl_rmem_max));
+ sk->sk_rcvbuf = val * 2;
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ }
+--
+2.35.1
+
--- /dev/null
+From 9f88658514221b6aef62cecd2fa6db0612a62366 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:57 -0700
+Subject: net: Fix data-races around sysctl_devconf_inherit_init_net.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit a5612ca10d1aa05624ebe72633e0c8c792970833 ]
+
+While reading sysctl_devconf_inherit_init_net, it can be changed
+concurrently. Thus, we need to add READ_ONCE() to its readers.
+
+Fixes: 856c395cfa63 ("net: introduce a knob to control whether to inherit devconf config")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 9 +++++++++
+ net/ipv4/devinet.c | 16 ++++++++++------
+ net/ipv6/addrconf.c | 5 ++---
+ 3 files changed, 21 insertions(+), 9 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 78dd63a5c7c80..db40bc62213bd 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -650,6 +650,15 @@ static inline bool net_has_fallback_tunnels(const struct net *net)
+ #endif
+ }
+
++static inline int net_inherit_devconf(void)
++{
++#if IS_ENABLED(CONFIG_SYSCTL)
++ return READ_ONCE(sysctl_devconf_inherit_init_net);
++#else
++ return 0;
++#endif
++}
++
+ static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
+ {
+ #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index b2366ad540e62..787a44e3222db 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -2682,23 +2682,27 @@ static __net_init int devinet_init_net(struct net *net)
+ #endif
+
+ if (!net_eq(net, &init_net)) {
+- if (IS_ENABLED(CONFIG_SYSCTL) &&
+- sysctl_devconf_inherit_init_net == 3) {
++ switch (net_inherit_devconf()) {
++ case 3:
+ /* copy from the current netns */
+ memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
+ sizeof(ipv4_devconf));
+ memcpy(dflt,
+ current->nsproxy->net_ns->ipv4.devconf_dflt,
+ sizeof(ipv4_devconf_dflt));
+- } else if (!IS_ENABLED(CONFIG_SYSCTL) ||
+- sysctl_devconf_inherit_init_net != 2) {
+- /* inherit == 0 or 1: copy from init_net */
++ break;
++ case 0:
++ case 1:
++ /* copy from init_net */
+ memcpy(all, init_net.ipv4.devconf_all,
+ sizeof(ipv4_devconf));
+ memcpy(dflt, init_net.ipv4.devconf_dflt,
+ sizeof(ipv4_devconf_dflt));
++ break;
++ case 2:
++ /* use compiled values */
++ break;
+ }
+- /* else inherit == 2: use compiled values */
+ }
+
+ #ifdef CONFIG_SYSCTL
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 49cc6587dd771..b738eb7e1cae8 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -7158,9 +7158,8 @@ static int __net_init addrconf_init_net(struct net *net)
+ if (!dflt)
+ goto err_alloc_dflt;
+
+- if (IS_ENABLED(CONFIG_SYSCTL) &&
+- !net_eq(net, &init_net)) {
+- switch (sysctl_devconf_inherit_init_net) {
++ if (!net_eq(net, &init_net)) {
++ switch (net_inherit_devconf()) {
+ case 1: /* copy from init_net */
+ memcpy(all, init_net.ipv6.devconf_all,
+ sizeof(ipv6_devconf));
+--
+2.35.1
+
--- /dev/null
+From 371d28cecdf9fc70b8a830cac7566c83b38d4526 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:56 -0700
+Subject: net: Fix data-races around sysctl_fb_tunnels_only_for_init_net.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit af67508ea6cbf0e4ea27f8120056fa2efce127dd ]
+
+While reading sysctl_fb_tunnels_only_for_init_net, it can be changed
+concurrently. Thus, we need to add READ_ONCE() to its readers.
+
+Fixes: 79134e6ce2c9 ("net: do not create fallback tunnels for non-default namespaces")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 2563d30736e9a..78dd63a5c7c80 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -640,9 +640,14 @@ extern int sysctl_devconf_inherit_init_net;
+ */
+ static inline bool net_has_fallback_tunnels(const struct net *net)
+ {
+- return !IS_ENABLED(CONFIG_SYSCTL) ||
+- !sysctl_fb_tunnels_only_for_init_net ||
+- (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
++#if IS_ENABLED(CONFIG_SYSCTL)
++ int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
++
++ return !fb_tunnels_only_for_init_net ||
++ (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
++#else
++ return true;
++#endif
+ }
+
+ static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
+--
+2.35.1
+
--- /dev/null
+From e933b1bc5abdaa726245bd7182e8546a8fb2686c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:54 -0700
+Subject: net: Fix data-races around sysctl_max_skb_frags.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 657b991afb89d25fe6c4783b1b75a8ad4563670d ]
+
+While reading sysctl_max_skb_frags, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its readers.
+
+Fixes: 5f74f82ea34c ("net:Add sysctl_max_skb_frags")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 4 ++--
+ net/mptcp/protocol.c | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 3ae2ea0488838..3d446773ff2a5 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1000,7 +1000,7 @@ static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
+
+ i = skb_shinfo(skb)->nr_frags;
+ can_coalesce = skb_can_coalesce(skb, i, page, offset);
+- if (!can_coalesce && i >= sysctl_max_skb_frags) {
++ if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
+ tcp_mark_push(tp, skb);
+ goto new_segment;
+ }
+@@ -1348,7 +1348,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
+
+ if (!skb_can_coalesce(skb, i, pfrag->page,
+ pfrag->offset)) {
+- if (i >= sysctl_max_skb_frags) {
++ if (i >= READ_ONCE(sysctl_max_skb_frags)) {
+ tcp_mark_push(tp, skb);
+ goto new_segment;
+ }
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 3d90fa9653ef3..513f571a082ba 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1299,7 +1299,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+
+ i = skb_shinfo(skb)->nr_frags;
+ can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
+- if (!can_coalesce && i >= sysctl_max_skb_frags) {
++ if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
+ tcp_mark_push(tcp_sk(ssk), skb);
+ goto alloc_skb;
+ }
+--
+2.35.1
+
--- /dev/null
+From 2270bbd9723fffdb4b9822fa716b765f3ae3177b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:49 -0700
+Subject: net: Fix data-races around sysctl_optmem_max.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 7de6d09f51917c829af2b835aba8bb5040f8e86a ]
+
+While reading sysctl_optmem_max, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its readers.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/bpf_sk_storage.c | 5 +++--
+ net/core/filter.c | 9 +++++----
+ net/core/sock.c | 8 +++++---
+ net/ipv4/ip_sockglue.c | 6 +++---
+ net/ipv6/ipv6_sockglue.c | 4 ++--
+ 5 files changed, 18 insertions(+), 14 deletions(-)
+
+diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
+index 1b7f385643b4c..94374d529ea42 100644
+--- a/net/core/bpf_sk_storage.c
++++ b/net/core/bpf_sk_storage.c
+@@ -310,11 +310,12 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
+ static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
+ void *owner, u32 size)
+ {
++ int optmem_max = READ_ONCE(sysctl_optmem_max);
+ struct sock *sk = (struct sock *)owner;
+
+ /* same check as in sock_kmalloc() */
+- if (size <= sysctl_optmem_max &&
+- atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
++ if (size <= optmem_max &&
++ atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
+ atomic_add(size, &sk->sk_omem_alloc);
+ return 0;
+ }
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 60c854e7d98ba..063176428086b 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1214,10 +1214,11 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
+ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+ {
+ u32 filter_size = bpf_prog_size(fp->prog->len);
++ int optmem_max = READ_ONCE(sysctl_optmem_max);
+
+ /* same check as in sock_kmalloc() */
+- if (filter_size <= sysctl_optmem_max &&
+- atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
++ if (filter_size <= optmem_max &&
++ atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) {
+ atomic_add(filter_size, &sk->sk_omem_alloc);
+ return true;
+ }
+@@ -1548,7 +1549,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+- if (bpf_prog_size(prog->len) > sysctl_optmem_max)
++ if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max))
+ err = -ENOMEM;
+ else
+ err = reuseport_attach_prog(sk, prog);
+@@ -1615,7 +1616,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
+ }
+ } else {
+ /* BPF_PROG_TYPE_SOCKET_FILTER */
+- if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
++ if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) {
+ err = -ENOMEM;
+ goto err_prog_put;
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 62f69bc3a0e6e..d672e63a5c2d4 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2535,7 +2535,7 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
+
+ /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
+ if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
+- sysctl_optmem_max)
++ READ_ONCE(sysctl_optmem_max))
+ return NULL;
+
+ skb = alloc_skb(size, priority);
+@@ -2553,8 +2553,10 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
+ */
+ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
+ {
+- if ((unsigned int)size <= sysctl_optmem_max &&
+- atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
++ int optmem_max = READ_ONCE(sysctl_optmem_max);
++
++ if ((unsigned int)size <= optmem_max &&
++ atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
+ void *mem;
+ /* First do the add, to avoid the race if kmalloc
+ * might sleep.
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index a8a323ecbb54b..e49a61a053a68 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -772,7 +772,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
+
+ if (optlen < GROUP_FILTER_SIZE(0))
+ return -EINVAL;
+- if (optlen > sysctl_optmem_max)
++ if (optlen > READ_ONCE(sysctl_optmem_max))
+ return -ENOBUFS;
+
+ gsf = memdup_sockptr(optval, optlen);
+@@ -808,7 +808,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
+
+ if (optlen < size0)
+ return -EINVAL;
+- if (optlen > sysctl_optmem_max - 4)
++ if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
+ return -ENOBUFS;
+
+ p = kmalloc(optlen + 4, GFP_KERNEL);
+@@ -1233,7 +1233,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
+
+ if (optlen < IP_MSFILTER_SIZE(0))
+ goto e_inval;
+- if (optlen > sysctl_optmem_max) {
++ if (optlen > READ_ONCE(sysctl_optmem_max)) {
+ err = -ENOBUFS;
+ break;
+ }
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 222f6bf220ba0..e0dcc7a193df2 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -210,7 +210,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
+
+ if (optlen < GROUP_FILTER_SIZE(0))
+ return -EINVAL;
+- if (optlen > sysctl_optmem_max)
++ if (optlen > READ_ONCE(sysctl_optmem_max))
+ return -ENOBUFS;
+
+ gsf = memdup_sockptr(optval, optlen);
+@@ -244,7 +244,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
+
+ if (optlen < size0)
+ return -EINVAL;
+- if (optlen > sysctl_optmem_max - 4)
++ if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
+ return -ENOBUFS;
+
+ p = kmalloc(optlen + 4, GFP_KERNEL);
+--
+2.35.1
+
--- /dev/null
+From b5e0871608ec656bd87a1d895e8abbacc7204bcc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:45 -0700
+Subject: net: Fix data-races around weight_p and dev_weight_[rt]x_bias.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit bf955b5ab8f6f7b0632cdef8e36b14e4f6e77829 ]
+
+While reading weight_p, it can be changed concurrently. Thus, we need
+to add READ_ONCE() to its reader.
+
+Also, dev_[rt]x_weight can be read/written at the same time. So, we
+need to use READ_ONCE() and WRITE_ONCE() for its access. Moreover, to
+use the same weight_p while changing dev_[rt]x_weight, we add a mutex
+in proc_do_dev_weight().
+
+Fixes: 3d48b53fb2ae ("net: dev_weight: TX/RX orthogonality")
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 2 +-
+ net/core/sysctl_net_core.c | 15 +++++++++------
+ net/sched/sch_generic.c | 2 +-
+ 3 files changed, 11 insertions(+), 8 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 30a1603a7225c..2a7d81cd9e2ea 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5917,7 +5917,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
+ net_rps_action_and_irq_enable(sd);
+ }
+
+- napi->weight = dev_rx_weight;
++ napi->weight = READ_ONCE(dev_rx_weight);
+ while (again) {
+ struct sk_buff *skb;
+
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 71a13596ea2bf..725891527814c 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -234,14 +234,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
+ static int proc_do_dev_weight(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+ {
+- int ret;
++ static DEFINE_MUTEX(dev_weight_mutex);
++ int ret, weight;
+
++ mutex_lock(&dev_weight_mutex);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+- if (ret != 0)
+- return ret;
+-
+- dev_rx_weight = weight_p * dev_weight_rx_bias;
+- dev_tx_weight = weight_p * dev_weight_tx_bias;
++ if (!ret && write) {
++ weight = READ_ONCE(weight_p);
++ WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
++ WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
++ }
++ mutex_unlock(&dev_weight_mutex);
+
+ return ret;
+ }
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index dba0b3e24af5e..a64c3c1541118 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -409,7 +409,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
+
+ void __qdisc_run(struct Qdisc *q)
+ {
+- int quota = dev_tx_weight;
++ int quota = READ_ONCE(dev_tx_weight);
+ int packets;
+
+ while (qdisc_restart(q, &packets)) {
+--
+2.35.1
+
--- /dev/null
+From 8d590e275cdf797c08acb58439f6911c403a853b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 08:42:05 -0500
+Subject: net: ipa: don't assume SMEM is page-aligned
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit b8d4380365c515d8e0351f2f46d371738dd19be1 ]
+
+In ipa_smem_init(), a Qualcomm SMEM region is allocated (if needed)
+and then its virtual address is fetched using qcom_smem_get(). The
+physical address associated with that region is also fetched.
+
+The physical address is adjusted so that it is page-aligned, and an
+attempt is made to update the size of the region to compensate for
+any non-zero adjustment.
+
+But that adjustment isn't done properly. The physical address is
+aligned twice, and as a result the size is never actually adjusted.
+
+Fix this by *not* aligning the "addr" local variable, and instead
+making the "phys" local variable be the adjusted "addr" value.
+
+Fixes: a0036bb413d5b ("net: ipa: define SMEM memory region for IPA")
+Signed-off-by: Alex Elder <elder@linaro.org>
+Link: https://lore.kernel.org/r/20220818134206.567618-1-elder@linaro.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/ipa_mem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
+index 1e9eae208e44f..53a1dbeaffa6d 100644
+--- a/drivers/net/ipa/ipa_mem.c
++++ b/drivers/net/ipa/ipa_mem.c
+@@ -568,7 +568,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
+ }
+
+ /* Align the address down and the size up to a page boundary */
+- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
++ addr = qcom_smem_virt_to_phys(virt);
+ phys = addr & PAGE_MASK;
+ size = PAGE_ALIGN(size + addr - phys);
+ iova = phys; /* We just want a direct mapping */
+--
+2.35.1
+
--- /dev/null
+From 292b8a481ede6ce2849dbab56b80584232fe0c32 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Aug 2022 06:08:08 -0700
+Subject: net: ipvtap - add __init/__exit annotations to module init/exit funcs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maciej Żenczykowski <maze@google.com>
+
+[ Upstream commit 4b2e3a17e9f279325712b79fb01d1493f9e3e005 ]
+
+Looks to have been left out in an oversight.
+
+Cc: Mahesh Bandewar <maheshb@google.com>
+Cc: Sainath Grandhi <sainath.grandhi@intel.com>
+Fixes: 235a9d89da97 ('ipvtap: IP-VLAN based tap driver')
+Signed-off-by: Maciej Żenczykowski <maze@google.com>
+Link: https://lore.kernel.org/r/20220821130808.12143-1-zenczykowski@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvtap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
+index ef02f2cf5ce13..cbabca167a078 100644
+--- a/drivers/net/ipvlan/ipvtap.c
++++ b/drivers/net/ipvlan/ipvtap.c
+@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
+ .notifier_call = ipvtap_device_event,
+ };
+
+-static int ipvtap_init(void)
++static int __init ipvtap_init(void)
+ {
+ int err;
+
+@@ -228,7 +228,7 @@ static int ipvtap_init(void)
+ }
+ module_init(ipvtap_init);
+
+-static void ipvtap_exit(void)
++static void __exit ipvtap_exit(void)
+ {
+ rtnl_link_unregister(&ipvtap_link_ops);
+ unregister_netdevice_notifier(&ipvtap_notifier_block);
+--
+2.35.1
+
--- /dev/null
+From a2f44019fd08b7d03d3f9f544d460d9d48f97816 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 23:54:06 +0200
+Subject: net: lantiq_xrx200: confirm skb is allocated before using
+
+From: Aleksander Jan Bajkowski <olek2@wp.pl>
+
+[ Upstream commit c8b043702dc0894c07721c5b019096cebc8c798f ]
+
+xrx200_hw_receive() assumes build_skb() always works and goes straight
+to skb_reserve(). However, build_skb() can fail under memory pressure.
+
+Add a check in case build_skb() failed to allocate and return NULL.
+
+Fixes: e015593573b3 ("net: lantiq_xrx200: convert to build_skb")
+Reported-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/lantiq_xrx200.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
+index 5edb68a8aab1e..89314b645c822 100644
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -239,6 +239,12 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
+ }
+
+ skb = build_skb(buf, priv->rx_skb_size);
++ if (!skb) {
++ skb_free_frag(buf);
++ net_dev->stats.rx_dropped++;
++ return -ENOMEM;
++ }
++
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_put(skb, len);
+
+--
+2.35.1
+
--- /dev/null
+From 923e36189a7705f3b700a55375d8612f31755dce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 23:54:07 +0200
+Subject: net: lantiq_xrx200: fix lock under memory pressure
+
+From: Aleksander Jan Bajkowski <olek2@wp.pl>
+
+[ Upstream commit c4b6e9341f930e4dd089231c0414758f5f1f9dbd ]
+
+When the xrx200_hw_receive() function returns -ENOMEM, the NAPI poll
+function immediately returns an error.
+This is incorrect for two reasons:
+* the function terminates without enabling interrupts or scheduling NAPI,
+* the error code (-ENOMEM) is returned instead of the number of received
+packets.
+
+After the first memory allocation failure occurs, packet reception is
+locked due to disabled interrupts from DMA..
+
+Fixes: fe1a56420cf2 ("net: lantiq: Add Lantiq / Intel VRX200 Ethernet driver")
+Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/lantiq_xrx200.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
+index 89314b645c822..25adce7f0c7c0 100644
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -294,7 +294,7 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
+ if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
+ continue;
+ if (ret != XRX200_DMA_PACKET_COMPLETE)
+- return ret;
++ break;
+ rx++;
+ } else {
+ break;
+--
+2.35.1
+
--- /dev/null
+From 79be235465aec53531d6d5a8d5fdcab9dea82112 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 23:54:08 +0200
+Subject: net: lantiq_xrx200: restore buffer if memory allocation failed
+
+From: Aleksander Jan Bajkowski <olek2@wp.pl>
+
+[ Upstream commit c9c3b1775f80fa21f5bff874027d2ccb10f5d90c ]
+
+In a situation where memory allocation fails, an invalid buffer address
+is stored. When this descriptor is used again, the system panics in the
+build_skb() function when accessing memory.
+
+Fixes: 7ea6cd16f159 ("lantiq: net: fix duplicated skb in rx descriptor ring")
+Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/lantiq_xrx200.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
+index 25adce7f0c7c0..57f27cc7724e7 100644
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -193,6 +193,7 @@ static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int
+
+ ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
+ if (!ch->rx_buff[ch->dma.desc]) {
++ ch->rx_buff[ch->dma.desc] = buf;
+ ret = -ENOMEM;
+ goto skip;
+ }
+--
+2.35.1
+
--- /dev/null
+From 587eb88b2177476bb5d004b0515332d7c1f3205b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Aug 2022 10:49:23 +0300
+Subject: net/mlx5: Avoid false positive lockdep warning by adding
+ lock_class_key
+
+From: Moshe Shemesh <moshe@nvidia.com>
+
+[ Upstream commit d59b73a66e5e0682442b6d7b4965364e57078b80 ]
+
+Add a lock_class_key per mlx5 device to avoid a false positive
+"possible circular locking dependency" warning by lockdep, on flows
+which lock more than one mlx5 device, such as adding SF.
+
+kernel log:
+ ======================================================
+ WARNING: possible circular locking dependency detected
+ 5.19.0-rc8+ #2 Not tainted
+ ------------------------------------------------------
+ kworker/u20:0/8 is trying to acquire lock:
+ ffff88812dfe0d98 (&dev->intf_state_mutex){+.+.}-{3:3}, at: mlx5_init_one+0x2e/0x490 [mlx5_core]
+
+ but task is already holding lock:
+ ffff888101aa7898 (&(¬ifier->n_head)->rwsem){++++}-{3:3}, at: blocking_notifier_call_chain+0x5a/0x130
+
+ which lock already depends on the new lock.
+
+ the existing dependency chain (in reverse order) is:
+
+ -> #1 (&(¬ifier->n_head)->rwsem){++++}-{3:3}:
+ down_write+0x90/0x150
+ blocking_notifier_chain_register+0x53/0xa0
+ mlx5_sf_table_init+0x369/0x4a0 [mlx5_core]
+ mlx5_init_one+0x261/0x490 [mlx5_core]
+ probe_one+0x430/0x680 [mlx5_core]
+ local_pci_probe+0xd6/0x170
+ work_for_cpu_fn+0x4e/0xa0
+ process_one_work+0x7c2/0x1340
+ worker_thread+0x6f6/0xec0
+ kthread+0x28f/0x330
+ ret_from_fork+0x1f/0x30
+
+ -> #0 (&dev->intf_state_mutex){+.+.}-{3:3}:
+ __lock_acquire+0x2fc7/0x6720
+ lock_acquire+0x1c1/0x550
+ __mutex_lock+0x12c/0x14b0
+ mlx5_init_one+0x2e/0x490 [mlx5_core]
+ mlx5_sf_dev_probe+0x29c/0x370 [mlx5_core]
+ auxiliary_bus_probe+0x9d/0xe0
+ really_probe+0x1e0/0xaa0
+ __driver_probe_device+0x219/0x480
+ driver_probe_device+0x49/0x130
+ __device_attach_driver+0x1b8/0x280
+ bus_for_each_drv+0x123/0x1a0
+ __device_attach+0x1a3/0x460
+ bus_probe_device+0x1a2/0x260
+ device_add+0x9b1/0x1b40
+ __auxiliary_device_add+0x88/0xc0
+ mlx5_sf_dev_state_change_handler+0x67e/0x9d0 [mlx5_core]
+ blocking_notifier_call_chain+0xd5/0x130
+ mlx5_vhca_state_work_handler+0x2b0/0x3f0 [mlx5_core]
+ process_one_work+0x7c2/0x1340
+ worker_thread+0x59d/0xec0
+ kthread+0x28f/0x330
+ ret_from_fork+0x1f/0x30
+
+ other info that might help us debug this:
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(&(¬ifier->n_head)->rwsem);
+ lock(&dev->intf_state_mutex);
+ lock(&(¬ifier->n_head)->rwsem);
+ lock(&dev->intf_state_mutex);
+
+ *** DEADLOCK ***
+
+ 4 locks held by kworker/u20:0/8:
+ #0: ffff888150612938 ((wq_completion)mlx5_events){+.+.}-{0:0}, at: process_one_work+0x6e2/0x1340
+ #1: ffff888100cafdb8 ((work_completion)(&work->work)#3){+.+.}-{0:0}, at: process_one_work+0x70f/0x1340
+ #2: ffff888101aa7898 (&(¬ifier->n_head)->rwsem){++++}-{3:3}, at: blocking_notifier_call_chain+0x5a/0x130
+ #3: ffff88813682d0e8 (&dev->mutex){....}-{3:3}, at:__device_attach+0x76/0x460
+
+ stack backtrace:
+ CPU: 6 PID: 8 Comm: kworker/u20:0 Not tainted 5.19.0-rc8+
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+ Workqueue: mlx5_events mlx5_vhca_state_work_handler [mlx5_core]
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x57/0x7d
+ check_noncircular+0x278/0x300
+ ? print_circular_bug+0x460/0x460
+ ? lock_chain_count+0x20/0x20
+ ? register_lock_class+0x1880/0x1880
+ __lock_acquire+0x2fc7/0x6720
+ ? register_lock_class+0x1880/0x1880
+ ? register_lock_class+0x1880/0x1880
+ lock_acquire+0x1c1/0x550
+ ? mlx5_init_one+0x2e/0x490 [mlx5_core]
+ ? lockdep_hardirqs_on_prepare+0x400/0x400
+ __mutex_lock+0x12c/0x14b0
+ ? mlx5_init_one+0x2e/0x490 [mlx5_core]
+ ? mlx5_init_one+0x2e/0x490 [mlx5_core]
+ ? _raw_read_unlock+0x1f/0x30
+ ? mutex_lock_io_nested+0x1320/0x1320
+ ? __ioremap_caller.constprop.0+0x306/0x490
+ ? mlx5_sf_dev_probe+0x269/0x370 [mlx5_core]
+ ? iounmap+0x160/0x160
+ mlx5_init_one+0x2e/0x490 [mlx5_core]
+ mlx5_sf_dev_probe+0x29c/0x370 [mlx5_core]
+ ? mlx5_sf_dev_remove+0x130/0x130 [mlx5_core]
+ auxiliary_bus_probe+0x9d/0xe0
+ really_probe+0x1e0/0xaa0
+ __driver_probe_device+0x219/0x480
+ ? auxiliary_match_id+0xe9/0x140
+ driver_probe_device+0x49/0x130
+ __device_attach_driver+0x1b8/0x280
+ ? driver_allows_async_probing+0x140/0x140
+ bus_for_each_drv+0x123/0x1a0
+ ? bus_for_each_dev+0x1a0/0x1a0
+ ? lockdep_hardirqs_on_prepare+0x286/0x400
+ ? trace_hardirqs_on+0x2d/0x100
+ __device_attach+0x1a3/0x460
+ ? device_driver_attach+0x1e0/0x1e0
+ ? kobject_uevent_env+0x22d/0xf10
+ bus_probe_device+0x1a2/0x260
+ device_add+0x9b1/0x1b40
+ ? dev_set_name+0xab/0xe0
+ ? __fw_devlink_link_to_suppliers+0x260/0x260
+ ? memset+0x20/0x40
+ ? lockdep_init_map_type+0x21a/0x7d0
+ __auxiliary_device_add+0x88/0xc0
+ ? auxiliary_device_init+0x86/0xa0
+ mlx5_sf_dev_state_change_handler+0x67e/0x9d0 [mlx5_core]
+ blocking_notifier_call_chain+0xd5/0x130
+ mlx5_vhca_state_work_handler+0x2b0/0x3f0 [mlx5_core]
+ ? mlx5_vhca_event_arm+0x100/0x100 [mlx5_core]
+ ? lock_downgrade+0x6e0/0x6e0
+ ? lockdep_hardirqs_on_prepare+0x286/0x400
+ process_one_work+0x7c2/0x1340
+ ? lockdep_hardirqs_on_prepare+0x400/0x400
+ ? pwq_dec_nr_in_flight+0x230/0x230
+ ? rwlock_bug.part.0+0x90/0x90
+ worker_thread+0x59d/0xec0
+ ? process_one_work+0x1340/0x1340
+ kthread+0x28f/0x330
+ ? kthread_complete_and_exit+0x20/0x20
+ ret_from_fork+0x1f/0x30
+ </TASK>
+
+Fixes: 6a3273217469 ("net/mlx5: SF, Port function state change support")
+Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
+Reviewed-by: Shay Drory <shayd@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/main.c | 4 ++++
+ include/linux/mlx5/driver.h | 1 +
+ 2 files changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index ba2e5232b90be..616207c3b187a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1472,7 +1472,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
+ memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
+ INIT_LIST_HEAD(&priv->ctx_list);
+ spin_lock_init(&priv->ctx_lock);
++ lockdep_register_key(&dev->lock_key);
+ mutex_init(&dev->intf_state_mutex);
++ lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
+
+ mutex_init(&priv->bfregs.reg_head.lock);
+ mutex_init(&priv->bfregs.wc_head.lock);
+@@ -1527,6 +1529,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
+ mutex_destroy(&priv->bfregs.wc_head.lock);
+ mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->intf_state_mutex);
++ lockdep_unregister_key(&dev->lock_key);
+ return err;
+ }
+
+@@ -1545,6 +1548,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
+ mutex_destroy(&priv->bfregs.wc_head.lock);
+ mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->intf_state_mutex);
++ lockdep_unregister_key(&dev->lock_key);
+ }
+
+ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 5040cd774c5a3..b0b4ac92354a2 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -773,6 +773,7 @@ struct mlx5_core_dev {
+ enum mlx5_device_state state;
+ /* sync interface state */
+ struct mutex intf_state_mutex;
++ struct lock_class_key lock_key;
+ unsigned long intf_state;
+ struct mlx5_priv priv;
+ struct mlx5_profile profile;
+--
+2.35.1
+
--- /dev/null
+From fadd850f95fdf46ceafb6a51e280548d429fb5e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Aug 2022 13:46:36 +0200
+Subject: net/mlx5: Disable irq when locking lag_lock
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit 8e93f29422ffe968d7161f91acdf0d47f5323727 ]
+
+The lag_lock is taken from both process and softirq contexts which results
+lockdep warning[0] about potential deadlock. However, just disabling
+softirqs by using *_bh spinlock API is not enough since it will cause
+warning in some contexts where the lock is obtained with hard irqs
+disabled. To fix the issue save current irq state, disable them before
+obtaining the lock an re-enable irqs from saved state after releasing it.
+
+[0]:
+
+[Sun Aug 7 13:12:29 2022] ================================
+[Sun Aug 7 13:12:29 2022] WARNING: inconsistent lock state
+[Sun Aug 7 13:12:29 2022] 5.19.0_for_upstream_debug_2022_08_04_16_06 #1 Not tainted
+[Sun Aug 7 13:12:29 2022] --------------------------------
+[Sun Aug 7 13:12:29 2022] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
+[Sun Aug 7 13:12:29 2022] swapper/0/0 [HC0[0]:SC1[1]:HE1:SE0] takes:
+[Sun Aug 7 13:12:29 2022] ffffffffa06dc0d8 (lag_lock){+.?.}-{2:2}, at: mlx5_lag_is_shared_fdb+0x1f/0x120 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] {SOFTIRQ-ON-W} state was registered at:
+[Sun Aug 7 13:12:29 2022] lock_acquire+0x1c1/0x550
+[Sun Aug 7 13:12:29 2022] _raw_spin_lock+0x2c/0x40
+[Sun Aug 7 13:12:29 2022] mlx5_lag_add_netdev+0x13b/0x480 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] mlx5e_nic_enable+0x114/0x470 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] mlx5e_attach_netdev+0x30e/0x6a0 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] mlx5e_resume+0x105/0x160 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] mlx5e_probe+0xac3/0x14f0 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] auxiliary_bus_probe+0x9d/0xe0
+[Sun Aug 7 13:12:29 2022] really_probe+0x1e0/0xaa0
+[Sun Aug 7 13:12:29 2022] __driver_probe_device+0x219/0x480
+[Sun Aug 7 13:12:29 2022] driver_probe_device+0x49/0x130
+[Sun Aug 7 13:12:29 2022] __driver_attach+0x1e4/0x4d0
+[Sun Aug 7 13:12:29 2022] bus_for_each_dev+0x11e/0x1a0
+[Sun Aug 7 13:12:29 2022] bus_add_driver+0x3f4/0x5a0
+[Sun Aug 7 13:12:29 2022] driver_register+0x20f/0x390
+[Sun Aug 7 13:12:29 2022] __auxiliary_driver_register+0x14e/0x260
+[Sun Aug 7 13:12:29 2022] mlx5e_init+0x38/0x90 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] vhost_iotlb_itree_augment_rotate+0xcb/0x180 [vhost_iotlb]
+[Sun Aug 7 13:12:29 2022] do_one_initcall+0xc4/0x400
+[Sun Aug 7 13:12:29 2022] do_init_module+0x18a/0x620
+[Sun Aug 7 13:12:29 2022] load_module+0x563a/0x7040
+[Sun Aug 7 13:12:29 2022] __do_sys_finit_module+0x122/0x1d0
+[Sun Aug 7 13:12:29 2022] do_syscall_64+0x3d/0x90
+[Sun Aug 7 13:12:29 2022] entry_SYSCALL_64_after_hwframe+0x46/0xb0
+[Sun Aug 7 13:12:29 2022] irq event stamp: 3596508
+[Sun Aug 7 13:12:29 2022] hardirqs last enabled at (3596508): [<ffffffff813687c2>] __local_bh_enable_ip+0xa2/0x100
+[Sun Aug 7 13:12:29 2022] hardirqs last disabled at (3596507): [<ffffffff813687da>] __local_bh_enable_ip+0xba/0x100
+[Sun Aug 7 13:12:29 2022] softirqs last enabled at (3596488): [<ffffffff81368a2a>] irq_exit_rcu+0x11a/0x170
+[Sun Aug 7 13:12:29 2022] softirqs last disabled at (3596495): [<ffffffff81368a2a>] irq_exit_rcu+0x11a/0x170
+[Sun Aug 7 13:12:29 2022]
+ other info that might help us debug this:
+[Sun Aug 7 13:12:29 2022] Possible unsafe locking scenario:
+
+[Sun Aug 7 13:12:29 2022] CPU0
+[Sun Aug 7 13:12:29 2022] ----
+[Sun Aug 7 13:12:29 2022] lock(lag_lock);
+[Sun Aug 7 13:12:29 2022] <Interrupt>
+[Sun Aug 7 13:12:29 2022] lock(lag_lock);
+[Sun Aug 7 13:12:29 2022]
+ *** DEADLOCK ***
+
+[Sun Aug 7 13:12:29 2022] 4 locks held by swapper/0/0:
+[Sun Aug 7 13:12:29 2022] #0: ffffffff84643260 (rcu_read_lock){....}-{1:2}, at: mlx5e_napi_poll+0x43/0x20a0 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] #1: ffffffff84643260 (rcu_read_lock){....}-{1:2}, at: netif_receive_skb_list_internal+0x2d7/0xd60
+[Sun Aug 7 13:12:29 2022] #2: ffff888144a18b58 (&br->hash_lock){+.-.}-{2:2}, at: br_fdb_update+0x301/0x570
+[Sun Aug 7 13:12:29 2022] #3: ffffffff84643260 (rcu_read_lock){....}-{1:2}, at: atomic_notifier_call_chain+0x5/0x1d0
+[Sun Aug 7 13:12:29 2022]
+ stack backtrace:
+[Sun Aug 7 13:12:29 2022] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.19.0_for_upstream_debug_2022_08_04_16_06 #1
+[Sun Aug 7 13:12:29 2022] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+[Sun Aug 7 13:12:29 2022] Call Trace:
+[Sun Aug 7 13:12:29 2022] <IRQ>
+[Sun Aug 7 13:12:29 2022] dump_stack_lvl+0x57/0x7d
+[Sun Aug 7 13:12:29 2022] mark_lock.part.0.cold+0x5f/0x92
+[Sun Aug 7 13:12:29 2022] ? lock_chain_count+0x20/0x20
+[Sun Aug 7 13:12:29 2022] ? unwind_next_frame+0x1c4/0x1b50
+[Sun Aug 7 13:12:29 2022] ? secondary_startup_64_no_verify+0xcd/0xdb
+[Sun Aug 7 13:12:29 2022] ? mlx5e_napi_poll+0x4e9/0x20a0 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] ? mlx5e_napi_poll+0x4e9/0x20a0 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] ? stack_access_ok+0x1d0/0x1d0
+[Sun Aug 7 13:12:29 2022] ? start_kernel+0x3a7/0x3c5
+[Sun Aug 7 13:12:29 2022] __lock_acquire+0x1260/0x6720
+[Sun Aug 7 13:12:29 2022] ? lock_chain_count+0x20/0x20
+[Sun Aug 7 13:12:29 2022] ? lock_chain_count+0x20/0x20
+[Sun Aug 7 13:12:29 2022] ? register_lock_class+0x1880/0x1880
+[Sun Aug 7 13:12:29 2022] ? mark_lock.part.0+0xed/0x3060
+[Sun Aug 7 13:12:29 2022] ? stack_trace_save+0x91/0xc0
+[Sun Aug 7 13:12:29 2022] lock_acquire+0x1c1/0x550
+[Sun Aug 7 13:12:29 2022] ? mlx5_lag_is_shared_fdb+0x1f/0x120 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] ? lockdep_hardirqs_on_prepare+0x400/0x400
+[Sun Aug 7 13:12:29 2022] ? __lock_acquire+0xd6f/0x6720
+[Sun Aug 7 13:12:29 2022] _raw_spin_lock+0x2c/0x40
+[Sun Aug 7 13:12:29 2022] ? mlx5_lag_is_shared_fdb+0x1f/0x120 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] mlx5_lag_is_shared_fdb+0x1f/0x120 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] mlx5_esw_bridge_rep_vport_num_vhca_id_get+0x1a0/0x600 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] ? mlx5_esw_bridge_update_work+0x90/0x90 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] ? lock_acquire+0x1c1/0x550
+[Sun Aug 7 13:12:29 2022] mlx5_esw_bridge_switchdev_event+0x185/0x8f0 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] ? mlx5_esw_bridge_port_obj_attr_set+0x3e0/0x3e0 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] ? check_chain_key+0x24a/0x580
+[Sun Aug 7 13:12:29 2022] atomic_notifier_call_chain+0xd7/0x1d0
+[Sun Aug 7 13:12:29 2022] br_switchdev_fdb_notify+0xea/0x100
+[Sun Aug 7 13:12:29 2022] ? br_switchdev_set_port_flag+0x310/0x310
+[Sun Aug 7 13:12:29 2022] fdb_notify+0x11b/0x150
+[Sun Aug 7 13:12:29 2022] br_fdb_update+0x34c/0x570
+[Sun Aug 7 13:12:29 2022] ? lock_chain_count+0x20/0x20
+[Sun Aug 7 13:12:29 2022] ? br_fdb_add_local+0x50/0x50
+[Sun Aug 7 13:12:29 2022] ? br_allowed_ingress+0x5f/0x1070
+[Sun Aug 7 13:12:29 2022] ? check_chain_key+0x24a/0x580
+[Sun Aug 7 13:12:29 2022] br_handle_frame_finish+0x786/0x18e0
+[Sun Aug 7 13:12:29 2022] ? check_chain_key+0x24a/0x580
+[Sun Aug 7 13:12:29 2022] ? br_handle_local_finish+0x20/0x20
+[Sun Aug 7 13:12:29 2022] ? __lock_acquire+0xd6f/0x6720
+[Sun Aug 7 13:12:29 2022] ? sctp_inet_bind_verify+0x4d/0x190
+[Sun Aug 7 13:12:29 2022] ? xlog_unpack_data+0x2e0/0x310
+[Sun Aug 7 13:12:29 2022] ? br_handle_local_finish+0x20/0x20
+[Sun Aug 7 13:12:29 2022] br_nf_hook_thresh+0x227/0x380 [br_netfilter]
+[Sun Aug 7 13:12:29 2022] ? setup_pre_routing+0x460/0x460 [br_netfilter]
+[Sun Aug 7 13:12:29 2022] ? br_handle_local_finish+0x20/0x20
+[Sun Aug 7 13:12:29 2022] ? br_nf_pre_routing_ipv6+0x48b/0x69c [br_netfilter]
+[Sun Aug 7 13:12:29 2022] br_nf_pre_routing_finish_ipv6+0x5c2/0xbf0 [br_netfilter]
+[Sun Aug 7 13:12:29 2022] ? br_handle_local_finish+0x20/0x20
+[Sun Aug 7 13:12:29 2022] br_nf_pre_routing_ipv6+0x4c6/0x69c [br_netfilter]
+[Sun Aug 7 13:12:29 2022] ? br_validate_ipv6+0x9e0/0x9e0 [br_netfilter]
+[Sun Aug 7 13:12:29 2022] ? br_nf_forward_arp+0xb70/0xb70 [br_netfilter]
+[Sun Aug 7 13:12:29 2022] ? br_nf_pre_routing+0xacf/0x1160 [br_netfilter]
+[Sun Aug 7 13:12:29 2022] br_handle_frame+0x8a9/0x1270
+[Sun Aug 7 13:12:29 2022] ? br_handle_frame_finish+0x18e0/0x18e0
+[Sun Aug 7 13:12:29 2022] ? register_lock_class+0x1880/0x1880
+[Sun Aug 7 13:12:29 2022] ? br_handle_local_finish+0x20/0x20
+[Sun Aug 7 13:12:29 2022] ? bond_handle_frame+0xf9/0xac0 [bonding]
+[Sun Aug 7 13:12:29 2022] ? br_handle_frame_finish+0x18e0/0x18e0
+[Sun Aug 7 13:12:29 2022] __netif_receive_skb_core+0x7c0/0x2c70
+[Sun Aug 7 13:12:29 2022] ? check_chain_key+0x24a/0x580
+[Sun Aug 7 13:12:29 2022] ? generic_xdp_tx+0x5b0/0x5b0
+[Sun Aug 7 13:12:29 2022] ? __lock_acquire+0xd6f/0x6720
+[Sun Aug 7 13:12:29 2022] ? register_lock_class+0x1880/0x1880
+[Sun Aug 7 13:12:29 2022] ? check_chain_key+0x24a/0x580
+[Sun Aug 7 13:12:29 2022] __netif_receive_skb_list_core+0x2d7/0x8a0
+[Sun Aug 7 13:12:29 2022] ? lock_acquire+0x1c1/0x550
+[Sun Aug 7 13:12:29 2022] ? process_backlog+0x960/0x960
+[Sun Aug 7 13:12:29 2022] ? lockdep_hardirqs_on_prepare+0x129/0x400
+[Sun Aug 7 13:12:29 2022] ? kvm_clock_get_cycles+0x14/0x20
+[Sun Aug 7 13:12:29 2022] netif_receive_skb_list_internal+0x5f4/0xd60
+[Sun Aug 7 13:12:29 2022] ? do_xdp_generic+0x150/0x150
+[Sun Aug 7 13:12:29 2022] ? mlx5e_poll_rx_cq+0xf6b/0x2960 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] ? mlx5e_poll_ico_cq+0x3d/0x1590 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] napi_complete_done+0x188/0x710
+[Sun Aug 7 13:12:29 2022] mlx5e_napi_poll+0x4e9/0x20a0 [mlx5_core]
+[Sun Aug 7 13:12:29 2022] ? __queue_work+0x53c/0xeb0
+[Sun Aug 7 13:12:29 2022] __napi_poll+0x9f/0x540
+[Sun Aug 7 13:12:29 2022] net_rx_action+0x420/0xb70
+[Sun Aug 7 13:12:29 2022] ? napi_threaded_poll+0x470/0x470
+[Sun Aug 7 13:12:29 2022] ? __common_interrupt+0x79/0x1a0
+[Sun Aug 7 13:12:29 2022] __do_softirq+0x271/0x92c
+[Sun Aug 7 13:12:29 2022] irq_exit_rcu+0x11a/0x170
+[Sun Aug 7 13:12:29 2022] common_interrupt+0x7d/0xa0
+[Sun Aug 7 13:12:29 2022] </IRQ>
+[Sun Aug 7 13:12:29 2022] <TASK>
+[Sun Aug 7 13:12:29 2022] asm_common_interrupt+0x22/0x40
+[Sun Aug 7 13:12:29 2022] RIP: 0010:default_idle+0x42/0x60
+[Sun Aug 7 13:12:29 2022] Code: c1 83 e0 07 48 c1 e9 03 83 c0 03 0f b6 14 11 38 d0 7c 04 84 d2 75 14 8b 05 6b f1 22 02 85 c0 7e 07 0f 00 2d 80 3b 4a 00 fb f4 <c3> 48 c7 c7 e0 07 7e 85 e8 21 bd 40 fe eb de 66 66 2e 0f 1f 84 00
+[Sun Aug 7 13:12:29 2022] RSP: 0018:ffffffff84407e18 EFLAGS: 00000242
+[Sun Aug 7 13:12:29 2022] RAX: 0000000000000001 RBX: ffffffff84ec4a68 RCX: 1ffffffff0afc0fc
+[Sun Aug 7 13:12:29 2022] RDX: 0000000000000004 RSI: 0000000000000000 RDI: ffffffff835b1fac
+[Sun Aug 7 13:12:29 2022] RBP: 0000000000000000 R08: 0000000000000001 R09: ffff8884d2c44ac3
+[Sun Aug 7 13:12:29 2022] R10: ffffed109a588958 R11: 00000000ffffffff R12: 0000000000000000
+[Sun Aug 7 13:12:29 2022] R13: ffffffff84efac20 R14: 0000000000000000 R15: dffffc0000000000
+[Sun Aug 7 13:12:29 2022] ? default_idle_call+0xcc/0x460
+[Sun Aug 7 13:12:29 2022] default_idle_call+0xec/0x460
+[Sun Aug 7 13:12:29 2022] do_idle+0x394/0x450
+[Sun Aug 7 13:12:29 2022] ? arch_cpu_idle_exit+0x40/0x40
+[Sun Aug 7 13:12:29 2022] cpu_startup_entry+0x19/0x20
+[Sun Aug 7 13:12:29 2022] rest_init+0x156/0x250
+[Sun Aug 7 13:12:29 2022] arch_call_rest_init+0xf/0x15
+[Sun Aug 7 13:12:29 2022] start_kernel+0x3a7/0x3c5
+[Sun Aug 7 13:12:29 2022] secondary_startup_64_no_verify+0xcd/0xdb
+[Sun Aug 7 13:12:29 2022] </TASK>
+
+Fixes: ff9b7521468b ("net/mlx5: Bridge, support LAG")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/lag/lag.c | 55 +++++++++++--------
+ 1 file changed, 33 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index c520edb942ca5..d98acd68af2ec 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -1067,30 +1067,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
+ struct net_device *netdev)
+ {
+ unsigned int fn = mlx5_get_dev_index(dev);
++ unsigned long flags;
+
+ if (fn >= ldev->ports)
+ return;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev->pf[fn].netdev = netdev;
+ ldev->tracker.netdev_state[fn].link_up = 0;
+ ldev->tracker.netdev_state[fn].tx_enabled = 0;
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+ }
+
+ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
+ struct net_device *netdev)
+ {
++ unsigned long flags;
+ int i;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ for (i = 0; i < ldev->ports; i++) {
+ if (ldev->pf[i].netdev == netdev) {
+ ldev->pf[i].netdev = NULL;
+ break;
+ }
+ }
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+ }
+
+ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
+@@ -1246,12 +1248,13 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
+ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_lag *ldev;
++ unsigned long flags;
+ bool res;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ res = ldev && __mlx5_lag_is_roce(ldev);
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return res;
+ }
+@@ -1260,12 +1263,13 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
+ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_lag *ldev;
++ unsigned long flags;
+ bool res;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ res = ldev && __mlx5_lag_is_active(ldev);
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return res;
+ }
+@@ -1274,13 +1278,14 @@ EXPORT_SYMBOL(mlx5_lag_is_active);
+ bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_lag *ldev;
++ unsigned long flags;
+ bool res;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ res = ldev && __mlx5_lag_is_active(ldev) &&
+ dev == ldev->pf[MLX5_LAG_P1].dev;
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return res;
+ }
+@@ -1289,12 +1294,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
+ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_lag *ldev;
++ unsigned long flags;
+ bool res;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ res = ldev && __mlx5_lag_is_sriov(ldev);
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return res;
+ }
+@@ -1303,13 +1309,14 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
+ bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_lag *ldev;
++ unsigned long flags;
+ bool res;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ res = ldev && __mlx5_lag_is_sriov(ldev) &&
+ test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return res;
+ }
+@@ -1352,9 +1359,10 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
+ {
+ struct net_device *ndev = NULL;
+ struct mlx5_lag *ldev;
++ unsigned long flags;
+ int i;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+
+ if (!(ldev && __mlx5_lag_is_roce(ldev)))
+@@ -1373,7 +1381,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
+ dev_hold(ndev);
+
+ unlock:
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return ndev;
+ }
+@@ -1383,10 +1391,11 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ struct net_device *slave)
+ {
+ struct mlx5_lag *ldev;
++ unsigned long flags;
+ u8 port = 0;
+ int i;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ if (!(ldev && __mlx5_lag_is_roce(ldev)))
+ goto unlock;
+@@ -1401,7 +1410,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ port = ldev->v2p_map[port * ldev->buckets];
+
+ unlock:
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+ return port;
+ }
+ EXPORT_SYMBOL(mlx5_lag_get_slave_port);
+@@ -1422,8 +1431,9 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_core_dev *peer_dev = NULL;
+ struct mlx5_lag *ldev;
++ unsigned long flags;
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ goto unlock;
+@@ -1433,7 +1443,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
+ ldev->pf[MLX5_LAG_P1].dev;
+
+ unlock:
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+ return peer_dev;
+ }
+ EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
+@@ -1446,6 +1456,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+ int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
+ struct mlx5_core_dev **mdev;
+ struct mlx5_lag *ldev;
++ unsigned long flags;
+ int num_ports;
+ int ret, i, j;
+ void *out;
+@@ -1462,7 +1473,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+
+ memset(values, 0, sizeof(*values) * num_counters);
+
+- spin_lock(&lag_lock);
++ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ if (ldev && __mlx5_lag_is_active(ldev)) {
+ num_ports = ldev->ports;
+@@ -1472,7 +1483,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+ num_ports = 1;
+ mdev[MLX5_LAG_P1] = dev;
+ }
+- spin_unlock(&lag_lock);
++ spin_unlock_irqrestore(&lag_lock, flags);
+
+ for (i = 0; i < num_ports; ++i) {
+ u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
+--
+2.35.1
+
--- /dev/null
+From f1e7182ab8ab97dbf3b69d1e55e7f80352a3ef96 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 7 Aug 2022 08:25:28 +0300
+Subject: net/mlx5: Eswitch, Fix forwarding decision to uplink
+
+From: Eli Cohen <elic@nvidia.com>
+
+[ Upstream commit 942fca7e762be39204e5926e91a288a343a97c72 ]
+
+Make sure to modify the rule for uplink forwarding only for the case
+where destination vport number is MLX5_VPORT_UPLINK.
+
+Fixes: 94db33177819 ("net/mlx5: Support multiport eswitch mode")
+Signed-off-by: Eli Cohen <elic@nvidia.com>
+Reviewed-by: Maor Dickman <maord@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index eb79810199d3e..d04739cb793e5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -427,7 +427,8 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
+ dest[dest_idx].vport.vhca_id =
+ MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
+ dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+- if (mlx5_lag_mpesw_is_activated(esw->dev))
++ if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
++ mlx5_lag_mpesw_is_activated(esw->dev))
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+ }
+ if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
+--
+2.35.1
+
--- /dev/null
+From 84f7dfa5c04d080dcb7c735e1d68b1da2b3d5b85 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Mar 2022 17:59:27 +0300
+Subject: net/mlx5: Fix cmd error logging for manage pages cmd
+
+From: Roy Novich <royno@nvidia.com>
+
+[ Upstream commit 090f3e4f4089ab8041ed7d632c7851c2a42fcc10 ]
+
+When the driver unloads, give/reclaim_pages may fail as PF driver in
+teardown flow, current code will lead to the following kernel log print
+'failed reclaiming pages: err 0'.
+
+Fix it to get same behavior as before the cited commits,
+by calling mlx5_cmd_check before handling error state.
+mlx5_cmd_check will verify if the returned error is an actual error
+needed to be handled by the driver or not and will return an
+appropriate value.
+
+Fixes: 8d564292a166 ("net/mlx5: Remove redundant error on reclaim pages")
+Fixes: 4dac2f10ada0 ("net/mlx5: Remove redundant notify fail on give pages")
+Signed-off-by: Roy Novich <royno@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index ec76a8b1acc1c..60596357bfc7a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -376,8 +376,8 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
+ goto out_dropped;
+ }
+ }
++ err = mlx5_cmd_check(dev, err, in, out);
+ if (err) {
+- err = mlx5_cmd_check(dev, err, in, out);
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
+ func_id, npages, err);
+ goto out_dropped;
+@@ -524,10 +524,13 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
+ dev->priv.reclaim_pages_discard += npages;
+ }
+ /* if triggered by FW event and failed by FW then ignore */
+- if (event && err == -EREMOTEIO)
++ if (event && err == -EREMOTEIO) {
+ err = 0;
++ goto out_free;
++ }
++
++ err = mlx5_cmd_check(dev, err, in, out);
+ if (err) {
+- err = mlx5_cmd_check(dev, err, in, out);
+ mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
+ goto out_free;
+ }
+--
+2.35.1
+
--- /dev/null
+From c6273f208f018efb4ffde0421406851a77d671b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Aug 2022 19:45:36 +0300
+Subject: net/mlx5: LAG, fix logic over MLX5_LAG_FLAG_NDEVS_READY
+
+From: Eli Cohen <elic@nvidia.com>
+
+[ Upstream commit a6e675a66175869b7d87c0e1dd0ddf93e04f8098 ]
+
+Only set MLX5_LAG_FLAG_NDEVS_READY if both netdevices are registered.
+Doing so guarantees that both ldev->pf[MLX5_LAG_P0].dev and
+ldev->pf[MLX5_LAG_P1].dev have valid pointers when
+MLX5_LAG_FLAG_NDEVS_READY is set.
+
+The core issue is asymmetry in setting MLX5_LAG_FLAG_NDEVS_READY and
+clearing it. Setting it is done wrongly when both
+ldev->pf[MLX5_LAG_P0].dev and ldev->pf[MLX5_LAG_P1].dev are set;
+clearing it is done right when either of ldev->pf[i].netdev is cleared.
+
+Consider the following scenario:
+1. PF0 loads and sets ldev->pf[MLX5_LAG_P0].dev to a valid pointer
+2. PF1 loads and sets both ldev->pf[MLX5_LAG_P1].dev and
+ ldev->pf[MLX5_LAG_P1].netdev with valid pointers. This results in
+ MLX5_LAG_FLAG_NDEVS_READY is set.
+3. PF0 is unloaded before setting dev->pf[MLX5_LAG_P0].netdev.
+ MLX5_LAG_FLAG_NDEVS_READY remains set.
+
+Further execution of mlx5_do_bond() will result in null pointer
+dereference when calling mlx5_lag_is_multipath()
+
+This patch fixes the following call trace actually encountered:
+
+[ 1293.475195] BUG: kernel NULL pointer dereference, address: 00000000000009a8
+[ 1293.478756] #PF: supervisor read access in kernel mode
+[ 1293.481320] #PF: error_code(0x0000) - not-present page
+[ 1293.483686] PGD 0 P4D 0
+[ 1293.484434] Oops: 0000 [#1] SMP PTI
+[ 1293.485377] CPU: 1 PID: 23690 Comm: kworker/u16:2 Not tainted 5.18.0-rc5_for_upstream_min_debug_2022_05_05_10_13 #1
+[ 1293.488039] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+[ 1293.490836] Workqueue: mlx5_lag mlx5_do_bond_work [mlx5_core]
+[ 1293.492448] RIP: 0010:mlx5_lag_is_multipath+0x5/0x50 [mlx5_core]
+[ 1293.494044] Code: e8 70 40 ff e0 48 8b 14 24 48 83 05 5c 1a 1b 00 01 e9 19 ff ff ff 48 83 05 47 1a 1b 00 01 eb d7 0f 1f 44 00 00 0f 1f 44 00 00 <48> 8b 87 a8 09 00 00 48 85 c0 74 26 48 83 05 a7 1b 1b 00 01 41 b8
+[ 1293.498673] RSP: 0018:ffff88811b2fbe40 EFLAGS: 00010202
+[ 1293.500152] RAX: ffff88818a94e1c0 RBX: ffff888165eca6c0 RCX: 0000000000000000
+[ 1293.501841] RDX: 0000000000000001 RSI: ffff88818a94e1c0 RDI: 0000000000000000
+[ 1293.503585] RBP: 0000000000000000 R08: ffff888119886740 R09: ffff888165eca73c
+[ 1293.505286] R10: 0000000000000018 R11: 0000000000000018 R12: ffff88818a94e1c0
+[ 1293.506979] R13: ffff888112729800 R14: 0000000000000000 R15: ffff888112729858
+[ 1293.508753] FS: 0000000000000000(0000) GS:ffff88852cc40000(0000) knlGS:0000000000000000
+[ 1293.510782] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 1293.512265] CR2: 00000000000009a8 CR3: 00000001032d4002 CR4: 0000000000370ea0
+[ 1293.514001] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 1293.515806] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+
+Fixes: 8a66e4585979 ("net/mlx5: Change ownership model for lag")
+Signed-off-by: Eli Cohen <elic@nvidia.com>
+Reviewed-by: Maor Dickman <maord@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index 5d41e19378e09..c520edb942ca5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -1234,7 +1234,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
+ mlx5_ldev_add_netdev(ldev, dev, netdev);
+
+ for (i = 0; i < ldev->ports; i++)
+- if (!ldev->pf[i].dev)
++ if (!ldev->pf[i].netdev)
+ break;
+
+ if (i >= ldev->ports)
+--
+2.35.1
+
--- /dev/null
+From ef71d575c51dc57c685af9ed41996643b71dffbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 18:38:37 +0300
+Subject: net/mlx5e: Fix wrong application of the LRO state
+
+From: Aya Levin <ayal@nvidia.com>
+
+[ Upstream commit 7b3707fc79044871ab8f3d5fa5e9603155bb5577 ]
+
+Driver caches packet merge type in mlx5e_params instance which must be
+in perfect sync with the netdev_feature's bit.
+Prior to this patch, in certain conditions (*) LRO state was set in
+mlx5e_params, while netdev_feature's bit was off. Causing the LRO to
+be applied on the RQs (HW level).
+
+(*) This can happen only on profile init (mlx5e_build_nic_params()),
+when RQ expect non-linear SKB and PCI is fast enough in comparison to
+link width.
+
+Solution: remove setting of packet merge type from
+mlx5e_build_nic_params() as netdev features are not updated.
+
+Fixes: 619a8f2a42f1 ("net/mlx5e: Use linear SKB in Striding RQ")
+Signed-off-by: Aya Levin <ayal@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Maxim Mikityanskiy <maximmi@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 087952b84ccb0..62aab20025345 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4733,14 +4733,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
+ /* RQ */
+ mlx5e_build_rq_params(mdev, params);
+
+- /* HW LRO */
+- if (MLX5_CAP_ETH(mdev, lro_cap) &&
+- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+- /* No XSK params: checking the availability of striding RQ in general. */
+- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
+- params->packet_merge.type = slow_pci_heuristic(mdev) ?
+- MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
+- }
+ params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
+ /* CQ moderation params */
+--
+2.35.1
+
--- /dev/null
+From fce3eed2b49880a2c75f915d27bccbfc472d07d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Aug 2022 15:28:42 +0300
+Subject: net/mlx5e: Fix wrong tc flag used when set hw-tc-offload off
+
+From: Maor Dickman <maord@nvidia.com>
+
+[ Upstream commit 550f96432e6f6770efdaee0e65239d61431062a1 ]
+
+The cited commit reintroduced the ability to set hw-tc-offload
+in switchdev mode by reusing NIC mode calls without modifying it
+to support both modes, this can cause an illegal memory access
+when trying to turn hw-tc-offload off.
+
+Fix this by using the right TC_FLAG when checking if tc rules
+are installed while disabling hw-tc-offload.
+
+Fixes: d3cbd4254df8 ("net/mlx5e: Add ndo_set_feature for uplink representor")
+Signed-off-by: Maor Dickman <maord@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 62aab20025345..9e6db779b6efa 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3678,7 +3678,9 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+- if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
++ int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
++ MLX5_TC_FLAG(NIC_OFFLOAD);
++ if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
+ netdev_err(netdev,
+ "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+--
+2.35.1
+
--- /dev/null
+From 36ccc945de171a13e9f4965a85b2337a546a0b3c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Jul 2022 21:41:48 +0200
+Subject: net/mlx5e: Properly disable vlan strip on non-UL reps
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit f37044fd759b6bc40b6398a978e0b1acdf717372 ]
+
+When querying mlx5 non-uplink representors capabilities with ethtool
+rx-vlan-offload is marked as "off [fixed]". However, it is actually always
+enabled because mlx5e_params->vlan_strip_disable is 0 by default when
+initializing struct mlx5e_params instance. Fix the issue by explicitly
+setting the vlan_strip_disable to 'true' for non-uplink representors.
+
+Fixes: cb67b832921c ("net/mlx5e: Introduce SRIOV VF representors")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index f797fd97d305b..7da3dc6261929 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -662,6 +662,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
+
+ params->mqprio.num_tc = 1;
+ params->tunneled_offload_en = false;
++ if (rep->vport != MLX5_VPORT_UPLINK)
++ params->vlan_strip_disable = true;
+
+ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
+ }
+--
+2.35.1
+
--- /dev/null
+From 1707ea46020f07a9226c3e1485a74a3f5a25eae2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Aug 2022 14:05:19 +0300
+Subject: net: moxa: get rid of asymmetry in DMA mapping/unmapping
+
+From: Sergei Antonov <saproj@gmail.com>
+
+[ Upstream commit 0ee7828dfc56e97d71e51e6374dc7b4eb2b6e081 ]
+
+Since priv->rx_mapping[i] is maped in moxart_mac_open(), we
+should unmap it from moxart_mac_stop(). Fixes 2 warnings.
+
+1. During error unwinding in moxart_mac_probe(): "goto init_fail;",
+then moxart_mac_free_memory() calls dma_unmap_single() with
+priv->rx_mapping[i] pointers zeroed.
+
+WARNING: CPU: 0 PID: 1 at kernel/dma/debug.c:963 check_unmap+0x704/0x980
+DMA-API: moxart-ethernet 92000000.mac: device driver tries to free DMA memory it has not allocated [device address=0x0000000000000000] [size=1600 bytes]
+CPU: 0 PID: 1 Comm: swapper Not tainted 5.19.0+ #60
+Hardware name: Generic DT based system
+ unwind_backtrace from show_stack+0x10/0x14
+ show_stack from dump_stack_lvl+0x34/0x44
+ dump_stack_lvl from __warn+0xbc/0x1f0
+ __warn from warn_slowpath_fmt+0x94/0xc8
+ warn_slowpath_fmt from check_unmap+0x704/0x980
+ check_unmap from debug_dma_unmap_page+0x8c/0x9c
+ debug_dma_unmap_page from moxart_mac_free_memory+0x3c/0xa8
+ moxart_mac_free_memory from moxart_mac_probe+0x190/0x218
+ moxart_mac_probe from platform_probe+0x48/0x88
+ platform_probe from really_probe+0xc0/0x2e4
+
+2. After commands:
+ ip link set dev eth0 down
+ ip link set dev eth0 up
+
+WARNING: CPU: 0 PID: 55 at kernel/dma/debug.c:570 add_dma_entry+0x204/0x2ec
+DMA-API: moxart-ethernet 92000000.mac: cacheline tracking EEXIST, overlapping mappings aren't supported
+CPU: 0 PID: 55 Comm: ip Not tainted 5.19.0+ #57
+Hardware name: Generic DT based system
+ unwind_backtrace from show_stack+0x10/0x14
+ show_stack from dump_stack_lvl+0x34/0x44
+ dump_stack_lvl from __warn+0xbc/0x1f0
+ __warn from warn_slowpath_fmt+0x94/0xc8
+ warn_slowpath_fmt from add_dma_entry+0x204/0x2ec
+ add_dma_entry from dma_map_page_attrs+0x110/0x328
+ dma_map_page_attrs from moxart_mac_open+0x134/0x320
+ moxart_mac_open from __dev_open+0x11c/0x1ec
+ __dev_open from __dev_change_flags+0x194/0x22c
+ __dev_change_flags from dev_change_flags+0x14/0x44
+ dev_change_flags from devinet_ioctl+0x6d4/0x93c
+ devinet_ioctl from inet_ioctl+0x1ac/0x25c
+
+v1 -> v2:
+Extraneous change removed.
+
+Fixes: 6c821bd9edc9 ("net: Add MOXA ART SoCs ethernet driver")
+Signed-off-by: Sergei Antonov <saproj@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20220819110519.1230877-1-saproj@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/moxa/moxart_ether.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
+index f11f1cb92025f..3b6beb96ca856 100644
+--- a/drivers/net/ethernet/moxa/moxart_ether.c
++++ b/drivers/net/ethernet/moxa/moxart_ether.c
+@@ -74,11 +74,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
+ static void moxart_mac_free_memory(struct net_device *ndev)
+ {
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+- int i;
+-
+- for (i = 0; i < RX_DESC_NUM; i++)
+- dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+- priv->rx_buf_size, DMA_FROM_DEVICE);
+
+ if (priv->tx_desc_base)
+ dma_free_coherent(&priv->pdev->dev,
+@@ -193,6 +188,7 @@ static int moxart_mac_open(struct net_device *ndev)
+ static int moxart_mac_stop(struct net_device *ndev)
+ {
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
++ int i;
+
+ napi_disable(&priv->napi);
+
+@@ -204,6 +200,11 @@ static int moxart_mac_stop(struct net_device *ndev)
+ /* disable all functions */
+ writel(0, priv->base + REG_MAC_CTRL);
+
++ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
++ for (i = 0; i < RX_DESC_NUM; i++)
++ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
++ priv->rx_buf_size, DMA_FROM_DEVICE);
++
+ return 0;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 63e025e1948f786db287ac60d1a1450492a01ece Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Aug 2022 16:24:51 +0800
+Subject: net: phy: Don't WARN for PHY_READY state in mdio_bus_phy_resume()
+
+From: Xiaolei Wang <xiaolei.wang@windriver.com>
+
+[ Upstream commit 6dbe852c379ff032a70a6b13a91914918c82cb07 ]
+
+For some MAC drivers, they set the mac_managed_pm to true in its
+->ndo_open() callback. So before the mac_managed_pm is set to true,
+we still want to leverage the mdio_bus_phy_suspend()/resume() for
+the phy device suspend and resume. In this case, the phy device is
+in PHY_READY, and we shouldn't warn about this. It also seems that
+the check of mac_managed_pm in WARN_ON is redundant since we already
+check this in the entry of mdio_bus_phy_resume(), so drop it.
+
+Fixes: 744d23c71af3 ("net: phy: Warn about incorrect mdio_bus_phy_resume() state")
+Signed-off-by: Xiaolei Wang <xiaolei.wang@windriver.com>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Link: https://lore.kernel.org/r/20220819082451.1992102-1-xiaolei.wang@windriver.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/phy_device.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 608de5a94165f..f90a21781d8d6 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -316,11 +316,11 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
+
+ phydev->suspended_by_mdio_bus = 0;
+
+- /* If we managed to get here with the PHY state machine in a state other
+- * than PHY_HALTED this is an indication that something went wrong and
+- * we should most likely be using MAC managed PM and we are not.
++ /* If we manged to get here with the PHY state machine in a state neither
++ * PHY_HALTED nor PHY_READY this is an indication that something went wrong
++ * and we should most likely be using MAC managed PM and we are not.
+ */
+- WARN_ON(phydev->state != PHY_HALTED && !phydev->mac_managed_pm);
++ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
+
+ ret = phy_init_hw(phydev);
+ if (ret < 0)
+--
+2.35.1
+
--- /dev/null
+From 0dd4fe03156810ab212427474240ea66e35e49f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 22:34:49 +0200
+Subject: net: stmmac: work around sporadic tx issue on link-up
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit a3a57bf07de23fe1ff779e0fdf710aa581c3ff73 ]
+
+This is a follow-up to the discussion in [0]. It seems to me that
+at least the IP version used on Amlogic SoC's sometimes has a problem
+if register MAC_CTRL_REG is written whilst the chip is still processing
+a previous write. But that's just a guess.
+Adding a delay between two writes to this register helps, but we can
+also simply omit the offending second write. This patch uses the second
+approach and is based on a suggestion from Qi Duan.
+Benefit of this approach is that we can save few register writes, also
+on not affected chip versions.
+
+[0] https://www.spinics.net/lists/netdev/msg831526.html
+
+Fixes: bfab27a146ed ("stmmac: add the experimental PCI support")
+Suggested-by: Qi Duan <qi.duan@amlogic.com>
+Suggested-by: Jerome Brunet <jbrunet@baylibre.com>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Link: https://lore.kernel.org/r/e99857ce-bd90-5093-ca8c-8cd480b5a0a2@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | 8 ++++++--
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 9 +++++----
+ 2 files changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+index caa4bfc4c1d62..9b6138b117766 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+@@ -258,14 +258,18 @@ EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
+ /* Enable disable MAC RX/TX */
+ void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+ {
+- u32 value = readl(ioaddr + MAC_CTRL_REG);
++ u32 old_val, value;
++
++ old_val = readl(ioaddr + MAC_CTRL_REG);
++ value = old_val;
+
+ if (enable)
+ value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+ else
+ value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+
+- writel(value, ioaddr + MAC_CTRL_REG);
++ if (value != old_val)
++ writel(value, ioaddr + MAC_CTRL_REG);
+ }
+
+ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index c5f33630e7718..78f11dabca056 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -983,10 +983,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
+ bool tx_pause, bool rx_pause)
+ {
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+- u32 ctrl;
++ u32 old_ctrl, ctrl;
+
+- ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+- ctrl &= ~priv->hw->link.speed_mask;
++ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
++ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
+
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
+ switch (speed) {
+@@ -1061,7 +1061,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
+ if (tx_pause && rx_pause)
+ stmmac_mac_flow_ctrl(priv, duplex);
+
+- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
++ if (ctrl != old_ctrl)
++ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+
+ stmmac_mac_set(priv, priv->ioaddr, true);
+ if (phy && priv->dma_cap.eee) {
+--
+2.35.1
+
--- /dev/null
+From 6dc7ffdefc225e1d5f4894087e45aa41053f7392 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 20 Aug 2022 17:38:37 +0200
+Subject: netfilter: ebtables: reject blobs that don't provide all entry points
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 7997eff82828304b780dc0a39707e1946d6f1ebf ]
+
+Harshit Mogalapalli says:
+ In ebt_do_table() function dereferencing 'private->hook_entry[hook]'
+ can lead to NULL pointer dereference. [..] Kernel panic:
+
+general protection fault, probably for non-canonical address 0xdffffc0000000005: 0000 [#1] PREEMPT SMP KASAN
+KASAN: null-ptr-deref in range [0x0000000000000028-0x000000000000002f]
+[..]
+RIP: 0010:ebt_do_table+0x1dc/0x1ce0
+Code: 89 fa 48 c1 ea 03 80 3c 02 00 0f 85 5c 16 00 00 48 b8 00 00 00 00 00 fc ff df 49 8b 6c df 08 48 8d 7d 2c 48 89 fa 48 c1 ea 03 <0f> b6 14 02 48 89 f8 83 e0 07 83 c0 03 38 d0 7c 08 84 d2 0f 85 88
+[..]
+Call Trace:
+ nf_hook_slow+0xb1/0x170
+ __br_forward+0x289/0x730
+ maybe_deliver+0x24b/0x380
+ br_flood+0xc6/0x390
+ br_dev_xmit+0xa2e/0x12c0
+
+For some reason ebtables rejects blobs that provide entry points that are
+not supported by the table, but what it should instead reject is the
+opposite: blobs that DO NOT provide an entry point supported by the table.
+
+t->valid_hooks is the bitmask of hooks (input, forward ...) that will see
+packets. Providing an entry point that is not support is harmless
+(never called/used), but the inverse isn't: it results in a crash
+because the ebtables traverser doesn't expect a NULL blob for a location
+its receiving packets for.
+
+Instead of fixing all the individual checks, do what iptables is doing and
+reject all blobs that differ from the expected hooks.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netfilter_bridge/ebtables.h | 4 ----
+ net/bridge/netfilter/ebtable_broute.c | 8 --------
+ net/bridge/netfilter/ebtable_filter.c | 8 --------
+ net/bridge/netfilter/ebtable_nat.c | 8 --------
+ net/bridge/netfilter/ebtables.c | 8 +-------
+ 5 files changed, 1 insertion(+), 35 deletions(-)
+
+diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
+index a13296d6c7ceb..fd533552a062c 100644
+--- a/include/linux/netfilter_bridge/ebtables.h
++++ b/include/linux/netfilter_bridge/ebtables.h
+@@ -94,10 +94,6 @@ struct ebt_table {
+ struct ebt_replace_kernel *table;
+ unsigned int valid_hooks;
+ rwlock_t lock;
+- /* e.g. could be the table explicitly only allows certain
+- * matches, targets, ... 0 == let it in */
+- int (*check)(const struct ebt_table_info *info,
+- unsigned int valid_hooks);
+ /* the data used by the kernel */
+ struct ebt_table_info *private;
+ struct nf_hook_ops *ops;
+diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
+index 1a11064f99907..8f19253024b0a 100644
+--- a/net/bridge/netfilter/ebtable_broute.c
++++ b/net/bridge/netfilter/ebtable_broute.c
+@@ -36,18 +36,10 @@ static struct ebt_replace_kernel initial_table = {
+ .entries = (char *)&initial_chain,
+ };
+
+-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
+-{
+- if (valid_hooks & ~(1 << NF_BR_BROUTING))
+- return -EINVAL;
+- return 0;
+-}
+-
+ static const struct ebt_table broute_table = {
+ .name = "broute",
+ .table = &initial_table,
+ .valid_hooks = 1 << NF_BR_BROUTING,
+- .check = check,
+ .me = THIS_MODULE,
+ };
+
+diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
+index cb949436bc0e3..278f324e67524 100644
+--- a/net/bridge/netfilter/ebtable_filter.c
++++ b/net/bridge/netfilter/ebtable_filter.c
+@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
+ .entries = (char *)initial_chains,
+ };
+
+-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
+-{
+- if (valid_hooks & ~FILTER_VALID_HOOKS)
+- return -EINVAL;
+- return 0;
+-}
+-
+ static const struct ebt_table frame_filter = {
+ .name = "filter",
+ .table = &initial_table,
+ .valid_hooks = FILTER_VALID_HOOKS,
+- .check = check,
+ .me = THIS_MODULE,
+ };
+
+diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
+index 5ee0531ae5061..9066f7f376d57 100644
+--- a/net/bridge/netfilter/ebtable_nat.c
++++ b/net/bridge/netfilter/ebtable_nat.c
+@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
+ .entries = (char *)initial_chains,
+ };
+
+-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
+-{
+- if (valid_hooks & ~NAT_VALID_HOOKS)
+- return -EINVAL;
+- return 0;
+-}
+-
+ static const struct ebt_table frame_nat = {
+ .name = "nat",
+ .table = &initial_table,
+ .valid_hooks = NAT_VALID_HOOKS,
+- .check = check,
+ .me = THIS_MODULE,
+ };
+
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index f2dbefb61ce83..9a0ae59cdc500 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1040,8 +1040,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+ goto free_iterate;
+ }
+
+- /* the table doesn't like it */
+- if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
++ if (repl->valid_hooks != t->valid_hooks)
+ goto free_unlock;
+
+ if (repl->num_counters && repl->num_counters != t->private->nentries) {
+@@ -1231,11 +1230,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
+ if (ret != 0)
+ goto free_chainstack;
+
+- if (table->check && table->check(newinfo, table->valid_hooks)) {
+- ret = -EINVAL;
+- goto free_chainstack;
+- }
+-
+ table->private = newinfo;
+ rwlock_init(&table->lock);
+ mutex_lock(&ebt_mutex);
+--
+2.35.1
+
--- /dev/null
+From 6921f764c947a2238e422f52e9a6279ff89f3966 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Aug 2022 23:13:00 +0200
+Subject: netfilter: flowtable: add function to invoke garbage collection
+ immediately
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 759eebbcfafcefa23b59e912396306543764bd3c ]
+
+Expose nf_flow_table_gc_run() to force a garbage collector run from the
+offload infrastructure.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_flow_table.h | 1 +
+ net/netfilter/nf_flow_table_core.c | 12 +++++++++---
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index 64daafd1fc41c..32c25122ab184 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -270,6 +270,7 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
+
+ struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
+ struct flow_offload_tuple *tuple);
++void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
+ void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
+ struct net_device *dev);
+ void nf_flow_table_cleanup(struct net_device *dev);
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index f2def06d10709..18453fa25199c 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -442,12 +442,17 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
+ }
+ }
+
++void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
++{
++ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
++}
++
+ static void nf_flow_offload_work_gc(struct work_struct *work)
+ {
+ struct nf_flowtable *flow_table;
+
+ flow_table = container_of(work, struct nf_flowtable, gc_work.work);
+- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
++ nf_flow_table_gc_run(flow_table);
+ queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
+ }
+
+@@ -606,10 +611,11 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
+
+ cancel_delayed_work_sync(&flow_table->gc_work);
+ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
++ nf_flow_table_gc_run(flow_table);
+ nf_flow_table_offload_flush(flow_table);
+ if (nf_flowtable_hw_offload(flow_table))
+- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
++ nf_flow_table_gc_run(flow_table);
++
+ rhashtable_destroy(&flow_table->rhashtable);
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_table_free);
+--
+2.35.1
+
--- /dev/null
+From f9233156116d145804d08d38233d7c4d66b2c1f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Nov 2021 22:24:15 +0100
+Subject: netfilter: flowtable: fix stuck flows on cleanup due to pending work
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 9afb4b27349a499483ae0134282cefd0c90f480f ]
+
+To clear the flow table on flow table free, the following sequence
+normally happens in order:
+
+ 1) gc_step work is stopped to disable any further stats/del requests.
+ 2) All flow table entries are set to teardown state.
+ 3) Run gc_step which will queue HW del work for each flow table entry.
+ 4) Waiting for the above del work to finish (flush).
+ 5) Run gc_step again, deleting all entries from the flow table.
+ 6) Flow table is freed.
+
+But if a flow table entry already has pending HW stats or HW add work
+step 3 will not queue HW del work (it will be skipped), step 4 will wait
+for the pending add/stats to finish, and step 5 will queue HW del work
+which might execute after freeing of the flow table.
+
+To fix the above, this patch flushes the pending work, then it sets the
+teardown flag to all flows in the flowtable and it forces a garbage
+collector run to queue work to remove the flows from hardware, then it
+flushes this new pending work and (finally) it forces another garbage
+collector run to remove the entry from the software flowtable.
+
+Stack trace:
+[47773.882335] BUG: KASAN: use-after-free in down_read+0x99/0x460
+[47773.883634] Write of size 8 at addr ffff888103b45aa8 by task kworker/u20:6/543704
+[47773.885634] CPU: 3 PID: 543704 Comm: kworker/u20:6 Not tainted 5.12.0-rc7+ #2
+[47773.886745] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009)
+[47773.888438] Workqueue: nf_ft_offload_del flow_offload_work_handler [nf_flow_table]
+[47773.889727] Call Trace:
+[47773.890214] dump_stack+0xbb/0x107
+[47773.890818] print_address_description.constprop.0+0x18/0x140
+[47773.892990] kasan_report.cold+0x7c/0xd8
+[47773.894459] kasan_check_range+0x145/0x1a0
+[47773.895174] down_read+0x99/0x460
+[47773.899706] nf_flow_offload_tuple+0x24f/0x3c0 [nf_flow_table]
+[47773.907137] flow_offload_work_handler+0x72d/0xbe0 [nf_flow_table]
+[47773.913372] process_one_work+0x8ac/0x14e0
+[47773.921325]
+[47773.921325] Allocated by task 592159:
+[47773.922031] kasan_save_stack+0x1b/0x40
+[47773.922730] __kasan_kmalloc+0x7a/0x90
+[47773.923411] tcf_ct_flow_table_get+0x3cb/0x1230 [act_ct]
+[47773.924363] tcf_ct_init+0x71c/0x1156 [act_ct]
+[47773.925207] tcf_action_init_1+0x45b/0x700
+[47773.925987] tcf_action_init+0x453/0x6b0
+[47773.926692] tcf_exts_validate+0x3d0/0x600
+[47773.927419] fl_change+0x757/0x4a51 [cls_flower]
+[47773.928227] tc_new_tfilter+0x89a/0x2070
+[47773.936652]
+[47773.936652] Freed by task 543704:
+[47773.937303] kasan_save_stack+0x1b/0x40
+[47773.938039] kasan_set_track+0x1c/0x30
+[47773.938731] kasan_set_free_info+0x20/0x30
+[47773.939467] __kasan_slab_free+0xe7/0x120
+[47773.940194] slab_free_freelist_hook+0x86/0x190
+[47773.941038] kfree+0xce/0x3a0
+[47773.941644] tcf_ct_flow_table_cleanup_work
+
+Original patch description and stack trace by Paul Blakey.
+
+Fixes: c29f74e0df7a ("netfilter: nf_flow_table: hardware offload support")
+Reported-by: Paul Blakey <paulb@nvidia.com>
+Tested-by: Paul Blakey <paulb@nvidia.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_flow_table.h | 2 ++
+ net/netfilter/nf_flow_table_core.c | 7 +++----
+ net/netfilter/nf_flow_table_offload.c | 8 ++++++++
+ 3 files changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index 32c25122ab184..9c93e4981b680 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -307,6 +307,8 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+
+ void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
++void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
++
+ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 18453fa25199c..483b18d35cade 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -610,12 +610,11 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
+ mutex_unlock(&flowtable_lock);
+
+ cancel_delayed_work_sync(&flow_table->gc_work);
++ nf_flow_table_offload_flush(flow_table);
++ /* ... no more pending work after this stage ... */
+ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+ nf_flow_table_gc_run(flow_table);
+- nf_flow_table_offload_flush(flow_table);
+- if (nf_flowtable_hw_offload(flow_table))
+- nf_flow_table_gc_run(flow_table);
+-
++ nf_flow_table_offload_flush_cleanup(flow_table);
+ rhashtable_destroy(&flow_table->rhashtable);
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_table_free);
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
+index 11b6e19420920..4d1169b634c5f 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -1063,6 +1063,14 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
+ flow_offload_queue_work(offload);
+ }
+
++void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable)
++{
++ if (nf_flowtable_hw_offload(flowtable)) {
++ flush_workqueue(nf_flow_offload_del_wq);
++ nf_flow_table_gc_run(flowtable);
++ }
++}
++
+ void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
+ {
+ if (nf_flowtable_hw_offload(flowtable)) {
+--
+2.35.1
+
--- /dev/null
+From d1788a0c1a110b3f209334b3b0dfab9837896798 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Aug 2022 11:06:39 +0200
+Subject: netfilter: nf_tables: disallow binding to already bound chain
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit e02f0d3970404bfea385b6edb86f2d936db0ea2b ]
+
+Update nft_data_init() to report EINVAL if chain is already bound.
+
+Fixes: d0e2c7de92c7 ("netfilter: nf_tables: add NFT_CHAIN_BINDING")
+Reported-by: Gwangun Jung <exsociety@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index b2c89e8c2a655..bc690238a3c56 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -9657,6 +9657,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ return PTR_ERR(chain);
+ if (nft_is_base_chain(chain))
+ return -EOPNOTSUPP;
++ if (nft_chain_is_bound(chain))
++ return -EINVAL;
+ if (desc->flags & NFT_DATA_DESC_SETELEM &&
+ chain->flags & NFT_CHAIN_BINDING)
+ return -EINVAL;
+--
+2.35.1
+
--- /dev/null
+From 68d86ff6f56c33e6eb852f5b6a75af6dfc2e4470 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Aug 2022 10:28:25 +0200
+Subject: netfilter: nf_tables: disallow updates of implicit chain
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 5dc52d83baac30decf5f3b371d5eb41dfa1d1412 ]
+
+Updates on existing implicit chain make no sense, disallow this.
+
+Fixes: d0e2c7de92c7 ("netfilter: nf_tables: add NFT_CHAIN_BINDING")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4bd6e9427c918..8b6ee9df817fb 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2574,6 +2574,9 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
+
+ if (chain != NULL) {
++ if (chain->flags & NFT_CHAIN_BINDING)
++ return -EINVAL;
++
+ if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
+ NL_SET_BAD_ATTR(extack, attr);
+ return -EEXIST;
+--
+2.35.1
+
--- /dev/null
+From 462ab2e29e67129d33a3208c51f2ea0ea9f59d91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Aug 2022 12:41:33 +0200
+Subject: netfilter: nf_tables: do not leave chain stats enabled on error
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 43eb8949cfdffa764b92bc6c54b87cbe5b0003fe ]
+
+Error might occur later in the nf_tables_addchain() codepath, enable
+static key only after transaction has been created.
+
+Fixes: 9f08ea848117 ("netfilter: nf_tables: keep chain counters away from hot path")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index e171257739c2f..b2c89e8c2a655 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2195,9 +2195,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ struct netlink_ext_ack *extack)
+ {
+ const struct nlattr * const *nla = ctx->nla;
++ struct nft_stats __percpu *stats = NULL;
+ struct nft_table *table = ctx->table;
+ struct nft_base_chain *basechain;
+- struct nft_stats __percpu *stats;
+ struct net *net = ctx->net;
+ char name[NFT_NAME_MAXLEN];
+ struct nft_rule_blob *blob;
+@@ -2235,7 +2235,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ return PTR_ERR(stats);
+ }
+ rcu_assign_pointer(basechain->stats, stats);
+- static_branch_inc(&nft_counters_enabled);
+ }
+
+ err = nft_basechain_init(basechain, family, &hook, flags);
+@@ -2318,6 +2317,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ goto err_unregister_hook;
+ }
+
++ if (stats)
++ static_branch_inc(&nft_counters_enabled);
++
+ table->use++;
+
+ return 0;
+--
+2.35.1
+
--- /dev/null
+From c41964a21dbb3022b6c0c173e14694dedb9cb7fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Aug 2022 10:52:48 +0200
+Subject: netfilter: nf_tables: make table handle allocation per-netns friendly
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit ab482c6b66a4a8c0a8c0b0f577a785cf9ff1c2e2 ]
+
+mutex is per-netns, move table_netns to the pernet area.
+
+*read-write* to 0xffffffff883a01e8 of 8 bytes by task 6542 on cpu 0:
+ nf_tables_newtable+0x6dc/0xc00 net/netfilter/nf_tables_api.c:1221
+ nfnetlink_rcv_batch net/netfilter/nfnetlink.c:513 [inline]
+ nfnetlink_rcv_skb_batch net/netfilter/nfnetlink.c:634 [inline]
+ nfnetlink_rcv+0xa6a/0x13a0 net/netfilter/nfnetlink.c:652
+ netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+ netlink_unicast+0x652/0x730 net/netlink/af_netlink.c:1345
+ netlink_sendmsg+0x643/0x740 net/netlink/af_netlink.c:1921
+
+Fixes: f102d66b335a ("netfilter: nf_tables: use dedicated mutex to guard transactions")
+Reported-by: Abhishek Shah <abhishek.shah@columbia.edu>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 1 +
+ net/netfilter/nf_tables_api.c | 3 +--
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index b8890ace0f879..0daad6e63ccb2 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1635,6 +1635,7 @@ struct nftables_pernet {
+ struct list_head module_list;
+ struct list_head notify_list;
+ struct mutex commit_mutex;
++ u64 table_handle;
+ unsigned int base_seq;
+ u8 validate_state;
+ };
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8b6ee9df817fb..e171257739c2f 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -32,7 +32,6 @@ static LIST_HEAD(nf_tables_objects);
+ static LIST_HEAD(nf_tables_flowtables);
+ static LIST_HEAD(nf_tables_destroy_list);
+ static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
+-static u64 table_handle;
+
+ enum {
+ NFT_VALIDATE_SKIP = 0,
+@@ -1235,7 +1234,7 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
+ INIT_LIST_HEAD(&table->flowtables);
+ table->family = family;
+ table->flags = flags;
+- table->handle = ++table_handle;
++ table->handle = ++nft_net->table_handle;
+ if (table->flags & NFT_TABLE_F_OWNER)
+ table->nlpid = NETLINK_CB(skb).portid;
+
+--
+2.35.1
+
--- /dev/null
+From c1549f137a81faeb9fb3f4342d5e28b07a5f2218 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Aug 2022 16:25:07 +0200
+Subject: netfilter: nft_osf: restrict osf to ipv4, ipv6 and inet families
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 5f3b7aae14a706d0d7da9f9e39def52ff5fc3d39 ]
+
+As it was originally intended, restrict extension to supported families.
+
+Fixes: b96af92d6eaf ("netfilter: nf_tables: implement Passive OS fingerprint module in nft_osf")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_osf.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
+index 5eed18f90b020..175d666c8d87e 100644
+--- a/net/netfilter/nft_osf.c
++++ b/net/netfilter/nft_osf.c
+@@ -115,9 +115,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+ {
+- return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
+- (1 << NF_INET_PRE_ROUTING) |
+- (1 << NF_INET_FORWARD));
++ unsigned int hooks;
++
++ switch (ctx->family) {
++ case NFPROTO_IPV4:
++ case NFPROTO_IPV6:
++ case NFPROTO_INET:
++ hooks = (1 << NF_INET_LOCAL_IN) |
++ (1 << NF_INET_PRE_ROUTING) |
++ (1 << NF_INET_FORWARD);
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ return nft_chain_validate_hooks(ctx->chain, hooks);
+ }
+
+ static bool nft_osf_reduce(struct nft_regs_track *track,
+--
+2.35.1
+
--- /dev/null
+From 12394e1b36151809f15c2ee835233dfe252e8fb1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Aug 2022 11:55:19 +0200
+Subject: netfilter: nft_payload: do not truncate csum_offset and csum_type
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 7044ab281febae9e2fa9b0b247693d6026166293 ]
+
+Instead report ERANGE if csum_offset is too long, and EOPNOTSUPP if type
+is not support.
+
+Fixes: 7ec3f7b47b8d ("netfilter: nft_payload: add packet mangling support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 4fee67abfe2c5..eb0e40c297121 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -740,17 +740,23 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+ {
+ struct nft_payload_set *priv = nft_expr_priv(expr);
++ u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
++ int err;
+
+ priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+ priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+
+ if (tb[NFTA_PAYLOAD_CSUM_TYPE])
+- priv->csum_type =
+- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
+- if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
+- priv->csum_offset =
+- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
++ csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
++ if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
++ &csum_offset);
++ if (err < 0)
++ return err;
++
++ priv->csum_offset = csum_offset;
++ }
+ if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
+ u32 flags;
+
+@@ -761,7 +767,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
+ priv->csum_flags = flags;
+ }
+
+- switch (priv->csum_type) {
++ switch (csum_type) {
+ case NFT_PAYLOAD_CSUM_NONE:
+ case NFT_PAYLOAD_CSUM_INET:
+ break;
+@@ -775,6 +781,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
+ default:
+ return -EOPNOTSUPP;
+ }
++ priv->csum_type = csum_type;
+
+ return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
+ priv->len);
+--
+2.35.1
+
--- /dev/null
+From d886a5d79d449a2407f137dc196d76e5a6e39f42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Aug 2022 11:47:04 +0200
+Subject: netfilter: nft_payload: report ERANGE for too long offset and length
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 94254f990c07e9ddf1634e0b727fab821c3b5bf9 ]
+
+Instead of offset and length are truncation to u8, report ERANGE.
+
+Fixes: 96518518cc41 ("netfilter: add nftables")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 2e7ac007cb30f..4fee67abfe2c5 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -833,6 +833,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
+ {
+ enum nft_payload_bases base;
+ unsigned int offset, len;
++ int err;
+
+ if (tb[NFTA_PAYLOAD_BASE] == NULL ||
+ tb[NFTA_PAYLOAD_OFFSET] == NULL ||
+@@ -859,8 +860,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
+ if (tb[NFTA_PAYLOAD_DREG] == NULL)
+ return ERR_PTR(-EINVAL);
+
+- offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+- len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
++ if (err < 0)
++ return ERR_PTR(err);
++
++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
++ if (err < 0)
++ return ERR_PTR(err);
+
+ if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
+ base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
+--
+2.35.1
+
--- /dev/null
+From 9e12fa64de6587bfbfa6e4bb5baf990f891b07e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 20 Aug 2022 17:54:06 +0200
+Subject: netfilter: nft_tproxy: restrict to prerouting hook
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 18bbc3213383a82b05383827f4b1b882e3f0a5a5 ]
+
+TPROXY is only allowed from prerouting, but nft_tproxy doesn't check this.
+This fixes a crash (null dereference) when using tproxy from e.g. output.
+
+Fixes: 4ed8eb6570a4 ("netfilter: nf_tables: Add native tproxy support")
+Reported-by: Shell Chen <xierch@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_tproxy.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
+index 801f013971dfa..a701ad64f10af 100644
+--- a/net/netfilter/nft_tproxy.c
++++ b/net/netfilter/nft_tproxy.c
+@@ -312,6 +312,13 @@ static int nft_tproxy_dump(struct sk_buff *skb,
+ return 0;
+ }
+
++static int nft_tproxy_validate(const struct nft_ctx *ctx,
++ const struct nft_expr *expr,
++ const struct nft_data **data)
++{
++ return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
++}
++
+ static struct nft_expr_type nft_tproxy_type;
+ static const struct nft_expr_ops nft_tproxy_ops = {
+ .type = &nft_tproxy_type,
+@@ -321,6 +328,7 @@ static const struct nft_expr_ops nft_tproxy_ops = {
+ .destroy = nft_tproxy_destroy,
+ .dump = nft_tproxy_dump,
+ .reduce = NFT_REDUCE_READONLY,
++ .validate = nft_tproxy_validate,
+ };
+
+ static struct nft_expr_type nft_tproxy_type __read_mostly = {
+--
+2.35.1
+
--- /dev/null
+From f82e3f66f5398194287f482e58d5b0d681b6d1da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Aug 2022 16:32:44 +0200
+Subject: netfilter: nft_tunnel: restrict it to netdev family
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 01e4092d53bc4fe122a6e4b6d664adbd57528ca3 ]
+
+Only allow to use this expression from NFPROTO_NETDEV family.
+
+Fixes: af308b94a2a4 ("netfilter: nf_tables: add tunnel support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_tunnel.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
+index d0f9b1d51b0e9..96b03e0bf74ff 100644
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -161,6 +161,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = {
+
+ static struct nft_expr_type nft_tunnel_type __read_mostly = {
+ .name = "tunnel",
++ .family = NFPROTO_NETDEV,
+ .ops = &nft_tunnel_get_ops,
+ .policy = nft_tunnel_policy,
+ .maxattr = NFTA_TUNNEL_MAX,
+--
+2.35.1
+
--- /dev/null
+From ddb90b8c0594b6f4fe72112e85492ba9f1f1956d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 17:06:21 +0800
+Subject: nfc: pn533: Fix use-after-free bugs caused by pn532_cmd_timeout
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit f1e941dbf80a9b8bab0bffbc4cbe41cc7f4c6fb6 ]
+
+When the pn532 uart device is detaching, the pn532_uart_remove()
+is called. But there are no functions in pn532_uart_remove() that
+could delete the cmd_timeout timer, which will cause use-after-free
+bugs. The process is shown below:
+
+ (thread 1) | (thread 2)
+ | pn532_uart_send_frame
+pn532_uart_remove | mod_timer(&pn532->cmd_timeout,...)
+ ... | (wait a time)
+ kfree(pn532) //FREE | pn532_cmd_timeout
+ | pn532_uart_send_frame
+ | pn532->... //USE
+
+This patch adds del_timer_sync() in pn532_uart_remove() in order to
+prevent the use-after-free bugs. What's more, the pn53x_unregister_nfc()
+is well synchronized, it sets nfc_dev->shutting_down to true and there
+are no syscalls could restart the cmd_timeout timer.
+
+Fixes: c656aa4c27b1 ("nfc: pn533: add UART phy driver")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nfc/pn533/uart.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
+index 2caf997f9bc94..07596bf5f7d6d 100644
+--- a/drivers/nfc/pn533/uart.c
++++ b/drivers/nfc/pn533/uart.c
+@@ -310,6 +310,7 @@ static void pn532_uart_remove(struct serdev_device *serdev)
+ pn53x_unregister_nfc(pn532->priv);
+ serdev_device_close(serdev);
+ pn53x_common_clean(pn532->priv);
++ del_timer_sync(&pn532->cmd_timeout);
+ kfree_skb(pn532->recv_skb);
+ kfree(pn532);
+ }
+--
+2.35.1
+
--- /dev/null
+From 972f8bb83a83f1d7c40353430af15a50e477bd4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 15:07:05 -0400
+Subject: NFSv4.2 fix problems with __nfs42_ssc_open
+
+From: Olga Kornievskaia <kolga@netapp.com>
+
+[ Upstream commit fcfc8be1e9cf2f12b50dce8b579b3ae54443a014 ]
+
+A destination server while doing a COPY shouldn't accept using the
+passed in filehandle if its not a regular filehandle.
+
+If alloc_file_pseudo() has failed, we need to decrement a reference
+on the newly created inode, otherwise it leaks.
+
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Fixes: ec4b092508982 ("NFS: inter ssc open")
+Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4file.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index e88f6b18445ec..9eb1812878795 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -340,6 +340,11 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
+ goto out;
+ }
+
++ if (!S_ISREG(fattr->mode)) {
++ res = ERR_PTR(-EBADF);
++ goto out;
++ }
++
+ res = ERR_PTR(-ENOMEM);
+ len = strlen(SSC_READ_NAME_BODY) + 16;
+ read_name = kzalloc(len, GFP_KERNEL);
+@@ -357,6 +362,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
+ r_ino->i_fop);
+ if (IS_ERR(filep)) {
+ res = ERR_CAST(filep);
++ iput(r_ino);
+ goto out_free_name;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From e717e6ab90d3e93c757ffff46c9844ce7eeba973 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Jul 2022 14:32:52 +0200
+Subject: ntfs: fix acl handling
+
+From: Christian Brauner <brauner@kernel.org>
+
+[ Upstream commit 0c3bc7899e6dfb52df1c46118a5a670ae619645f ]
+
+While looking at our current POSIX ACL handling in the context of some
+overlayfs work I went through a range of other filesystems checking how they
+handle them currently and encountered ntfs3.
+
+The posic_acl_{from,to}_xattr() helpers always need to operate on the
+filesystem idmapping. Since ntfs3 can only be mounted in the initial user
+namespace the relevant idmapping is init_user_ns.
+
+The posix_acl_{from,to}_xattr() helpers are concerned with translating between
+the kernel internal struct posix_acl{_entry} and the uapi struct
+posix_acl_xattr_{header,entry} and the kernel internal data structure is cached
+filesystem wide.
+
+Additional idmappings such as the caller's idmapping or the mount's idmapping
+are handled higher up in the VFS. Individual filesystems usually do not need to
+concern themselves with these.
+
+The posix_acl_valid() helper is concerned with checking whether the values in
+the kernel internal struct posix_acl can be represented in the filesystem's
+idmapping. IOW, if they can be written to disk. So this helper too needs to
+take the filesystem's idmapping.
+
+Fixes: be71b5cba2e6 ("fs/ntfs3: Add attrib operations")
+Cc: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Cc: ntfs3@lists.linux.dev
+Signed-off-by: Christian Brauner (Microsoft) <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ntfs3/xattr.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index 1b8c89dbf6684..3629049decac1 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -478,8 +478,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
+ }
+
+ #ifdef CONFIG_NTFS3_FS_POSIX_ACL
+-static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
+- struct inode *inode, int type,
++static struct posix_acl *ntfs_get_acl_ex(struct inode *inode, int type,
+ int locked)
+ {
+ struct ntfs_inode *ni = ntfs_i(inode);
+@@ -514,7 +513,7 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
+
+ /* Translate extended attribute to acl. */
+ if (err >= 0) {
+- acl = posix_acl_from_xattr(mnt_userns, buf, err);
++ acl = posix_acl_from_xattr(&init_user_ns, buf, err);
+ } else if (err == -ENODATA) {
+ acl = NULL;
+ } else {
+@@ -537,8 +536,7 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
+ if (rcu)
+ return ERR_PTR(-ECHILD);
+
+- /* TODO: init_user_ns? */
+- return ntfs_get_acl_ex(&init_user_ns, inode, type, 0);
++ return ntfs_get_acl_ex(inode, type, 0);
+ }
+
+ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
+@@ -590,7 +588,7 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
+ value = kmalloc(size, GFP_NOFS);
+ if (!value)
+ return -ENOMEM;
+- err = posix_acl_to_xattr(mnt_userns, acl, value, size);
++ err = posix_acl_to_xattr(&init_user_ns, acl, value, size);
+ if (err < 0)
+ goto out;
+ flags = 0;
+@@ -641,7 +639,7 @@ static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
+ if (!acl)
+ return -ENODATA;
+
+- err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
++ err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+ posix_acl_release(acl);
+
+ return err;
+@@ -665,12 +663,12 @@ static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
+ if (!value) {
+ acl = NULL;
+ } else {
+- acl = posix_acl_from_xattr(mnt_userns, value, size);
++ acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+
+ if (acl) {
+- err = posix_acl_valid(mnt_userns, acl);
++ err = posix_acl_valid(&init_user_ns, acl);
+ if (err)
+ goto release_and_out;
+ }
+--
+2.35.1
+
--- /dev/null
+From f21e8c6c49e3a27deb308da11f8cb32ef4d41bc4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 16:06:20 +0800
+Subject: r8152: fix the RX FIFO settings when suspending
+
+From: Hayes Wang <hayeswang@realtek.com>
+
+[ Upstream commit b75d612014447e04abdf0e37ffb8f2fd8b0b49d6 ]
+
+The RX FIFO would be changed when suspending, so the related settings
+have to be modified, too. Otherwise, the flow control would work
+abnormally.
+
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=216333
+Reported-by: Mark Blakeney <mark.blakeney@bullet-systems.net>
+Fixes: cdf0b86b250f ("r8152: fix a WOL issue")
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/r8152.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 46c7954d27629..d142ac8fcf6e2 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -5906,6 +5906,11 @@ static void r8153_enter_oob(struct r8152 *tp)
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
++ /* RX FIFO settings for OOB */
++ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
++
+ rtl_disable(tp);
+ rtl_reset_bmu(tp);
+
+@@ -6544,6 +6549,11 @@ static void rtl8156_down(struct r8152 *tp)
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
++ /* RX FIFO settings for OOB */
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16);
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16);
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16);
++
+ rtl_disable(tp);
+ rtl_reset_bmu(tp);
+
+--
+2.35.1
+
--- /dev/null
+From eabafb634c0b9c6fe1dd1457c4181493dd10343a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 16:06:19 +0800
+Subject: r8152: fix the units of some registers for RTL8156A
+
+From: Hayes Wang <hayeswang@realtek.com>
+
+[ Upstream commit 6dc4df12d741c0fe8f885778a43039e0619b9cd9 ]
+
+The units of PLA_RX_FIFO_FULL and PLA_RX_FIFO_EMPTY are 16 bytes.
+
+Fixes: 195aae321c82 ("r8152: support new chips")
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/r8152.c | 17 ++---------------
+ 1 file changed, 2 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 0f6efaabaa32b..46c7954d27629 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -6431,21 +6431,8 @@ static void r8156_fc_parameter(struct r8152 *tp)
+ u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
+ u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
+
+- switch (tp->version) {
+- case RTL_VER_10:
+- case RTL_VER_11:
+- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
+- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
+- break;
+- case RTL_VER_12:
+- case RTL_VER_13:
+- case RTL_VER_15:
+- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
+- break;
+- default:
+- break;
+- }
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
+ }
+
+ static void rtl8156_change_mtu(struct r8152 *tp)
+--
+2.35.1
+
--- /dev/null
+From eeef1f69ba263d3d5366fa90848e549f6189fe58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Aug 2022 10:46:48 -0700
+Subject: ratelimit: Fix data-races in ___ratelimit().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 6bae8ceb90ba76cdba39496db936164fa672b9be ]
+
+While reading rs->interval and rs->burst, they can be changed
+concurrently via sysctl (e.g. net_ratelimit_state). Thus, we
+need to add READ_ONCE() to their readers.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/ratelimit.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/lib/ratelimit.c b/lib/ratelimit.c
+index e01a93f46f833..ce945c17980b9 100644
+--- a/lib/ratelimit.c
++++ b/lib/ratelimit.c
+@@ -26,10 +26,16 @@
+ */
+ int ___ratelimit(struct ratelimit_state *rs, const char *func)
+ {
++ /* Paired with WRITE_ONCE() in .proc_handler().
++ * Changing two values seperately could be inconsistent
++ * and some message could be lost. (See: net_ratelimit_state).
++ */
++ int interval = READ_ONCE(rs->interval);
++ int burst = READ_ONCE(rs->burst);
+ unsigned long flags;
+ int ret;
+
+- if (!rs->interval)
++ if (!interval)
+ return 1;
+
+ /*
+@@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
+ if (!rs->begin)
+ rs->begin = jiffies;
+
+- if (time_is_before_jiffies(rs->begin + rs->interval)) {
++ if (time_is_before_jiffies(rs->begin + interval)) {
+ if (rs->missed) {
+ if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
+ printk_deferred(KERN_WARNING
+@@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
+ rs->begin = jiffies;
+ rs->printed = 0;
+ }
+- if (rs->burst && rs->burst > rs->printed) {
++ if (burst && burst > rs->printed) {
+ rs->printed++;
+ ret = 1;
+ } else {
+--
+2.35.1
+
--- /dev/null
+From 70f9b1e2e33bdba82f592198fde80562881f0edb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Aug 2022 14:54:36 +0200
+Subject: Revert "net: macsec: update SCI upon MAC address change."
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit e82c649e851c9c25367fb7a2a6cf3479187de467 ]
+
+This reverts commit 6fc498bc82929ee23aa2f35a828c6178dfd3f823.
+
+Commit 6fc498bc8292 states:
+
+ SCI should be updated, because it contains MAC in its first 6
+ octets.
+
+That's not entirely correct. The SCI can be based on the MAC address,
+but doesn't have to be. We can also use any 64-bit number as the
+SCI. When the SCI based on the MAC address, it uses a 16-bit "port
+number" provided by userspace, which commit 6fc498bc8292 overwrites
+with 1.
+
+In addition, changing the SCI after macsec has been setup can just
+confuse the receiver. If we configure the RXSC on the peer based on
+the original SCI, we should keep the same SCI on TX.
+
+When the macsec device is being managed by a userspace key negotiation
+daemon such as wpa_supplicant, commit 6fc498bc8292 would also
+overwrite the SCI defined by userspace.
+
+Fixes: 6fc498bc8292 ("net: macsec: update SCI upon MAC address change.")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Link: https://lore.kernel.org/r/9b1a9d28327e7eb54550a92eebda45d25e54dd0d.1660667033.git.sd@queasysnail.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/macsec.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index f354fad05714a..5b0b23e55fa76 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -449,11 +449,6 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
+ return (struct macsec_eth_header *)skb_mac_header(skb);
+ }
+
+-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+-{
+- return make_sci(dev->dev_addr, port);
+-}
+-
+ static void __macsec_pn_wrapped(struct macsec_secy *secy,
+ struct macsec_tx_sa *tx_sa)
+ {
+@@ -3622,7 +3617,6 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
+
+ out:
+ eth_hw_addr_set(dev, addr->sa_data);
+- macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
+
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+@@ -3960,6 +3954,11 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
+ return false;
+ }
+
++static sci_t dev_to_sci(struct net_device *dev, __be16 port)
++{
++ return make_sci(dev->dev_addr, port);
++}
++
+ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
+ {
+ struct macsec_dev *macsec = macsec_priv(dev);
+--
+2.35.1
+
--- /dev/null
+From 3e778a1a8cb85f80a331d51014e7a1a9f549e08f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Jul 2022 17:38:35 +0200
+Subject: Revert "xfrm: update SA curlft.use_time"
+
+From: Antony Antony <antony.antony@secunet.com>
+
+[ Upstream commit 717ada9f10f2de8c4f4d72ad045f3b67a7ced715 ]
+
+This reverts commit af734a26a1a95a9fda51f2abb0c22a7efcafd5ca.
+
+The abvoce commit is a regression according RFC 2367. A better fix would be
+use x->lastused. Which will be propsed later.
+
+according to RFC 2367 use_time == sadb_lifetime_usetime.
+
+"sadb_lifetime_usetime
+ For CURRENT, the time, in seconds, when association
+ was first used. For HARD and SOFT, the number of
+ seconds after the first use of the association until
+ it expires."
+
+Fixes: af734a26a1a9 ("xfrm: update SA curlft.use_time")
+Signed-off-by: Antony Antony <antony.antony@secunet.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xfrm/xfrm_input.c | 1 -
+ net/xfrm/xfrm_output.c | 1 -
+ 2 files changed, 2 deletions(-)
+
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 144238a50f3d4..70a8c36f0ba6e 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -669,7 +669,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+
+ x->curlft.bytes += skb->len;
+ x->curlft.packets++;
+- x->curlft.use_time = ktime_get_real_seconds();
+
+ spin_unlock(&x->lock);
+
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index 555ab35cd119a..9a5e79a38c679 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -534,7 +534,6 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
+
+ x->curlft.bytes += skb->len;
+ x->curlft.packets++;
+- x->curlft.use_time = ktime_get_real_seconds();
+
+ spin_unlock_bh(&x->lock);
+
+--
+2.35.1
+
--- /dev/null
+From 394a866bea453ffe2a7eaeec184f1228602c83a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 02:02:13 +0200
+Subject: rose: check NULL rose_loopback_neigh->loopback
+
+From: Bernard Pidoux <f6bvp@free.fr>
+
+[ Upstream commit 3c53cd65dece47dd1f9d3a809f32e59d1d87b2b8 ]
+
+Commit 3b3fd068c56e3fbea30090859216a368398e39bf added NULL check for
+`rose_loopback_neigh->dev` in rose_loopback_timer() but omitted to
+check rose_loopback_neigh->loopback.
+
+It thus prevents *all* rose connect.
+
+The reason is that a special rose_neigh loopback has a NULL device.
+
+/proc/net/rose_neigh illustrates it via rose_neigh_show() function :
+[...]
+seq_printf(seq, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu",
+ rose_neigh->number,
+ (rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign),
+ rose_neigh->dev ? rose_neigh->dev->name : "???",
+ rose_neigh->count,
+
+/proc/net/rose_neigh displays special rose_loopback_neigh->loopback as
+callsign RSLOOP-0:
+
+addr callsign dev count use mode restart t0 tf digipeaters
+00001 RSLOOP-0 ??? 1 2 DCE yes 0 0
+
+By checking rose_loopback_neigh->loopback, rose_rx_call_request() is called
+even in case rose_loopback_neigh->dev is NULL. This repairs rose connections.
+
+Verification with rose client application FPAC:
+
+FPAC-Node v 4.1.3 (built Aug 5 2022) for LINUX (help = h)
+F6BVP-4 (Commands = ?) : u
+Users - AX.25 Level 2 sessions :
+Port Callsign Callsign AX.25 state ROSE state NetRom status
+axudp F6BVP-5 -> F6BVP-9 Connected Connected ---------
+
+Fixes: 3b3fd068c56e ("rose: Fix Null pointer dereference in rose_send_frame()")
+Signed-off-by: Bernard Pidoux <f6bvp@free.fr>
+Suggested-by: Francois Romieu <romieu@fr.zoreil.com>
+Cc: Thomas DL9SAU Osterried <thomas@osterried.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rose/rose_loopback.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
+index 11c45c8c6c164..036d92c0ad794 100644
+--- a/net/rose/rose_loopback.c
++++ b/net/rose/rose_loopback.c
+@@ -96,7 +96,8 @@ static void rose_loopback_timer(struct timer_list *unused)
+ }
+
+ if (frametype == ROSE_CALL_REQUEST) {
+- if (!rose_loopback_neigh->dev) {
++ if (!rose_loopback_neigh->dev &&
++ !rose_loopback_neigh->loopback) {
+ kfree_skb(skb);
+ continue;
+ }
+--
+2.35.1
+
--- /dev/null
+From 453895ef01dc94cb8b758f3e30a3236bdcbb30aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 17:35:45 +0100
+Subject: rxrpc: Fix locking in rxrpc's sendmsg
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit b0f571ecd7943423c25947439045f0d352ca3dbf ]
+
+Fix three bugs in the rxrpc's sendmsg implementation:
+
+ (1) rxrpc_new_client_call() should release the socket lock when returning
+ an error from rxrpc_get_call_slot().
+
+ (2) rxrpc_wait_for_tx_window_intr() will return without the call mutex
+ held in the event that we're interrupted by a signal whilst waiting
+ for tx space on the socket or relocking the call mutex afterwards.
+
+ Fix this by: (a) moving the unlock/lock of the call mutex up to
+ rxrpc_send_data() such that the lock is not held around all of
+ rxrpc_wait_for_tx_window*() and (b) indicating to higher callers
+ whether we're return with the lock dropped. Note that this means
+ recvmsg() will not block on this call whilst we're waiting.
+
+ (3) After dropping and regaining the call mutex, rxrpc_send_data() needs
+ to go and recheck the state of the tx_pending buffer and the
+ tx_total_len check in case we raced with another sendmsg() on the same
+ call.
+
+Thinking on this some more, it might make sense to have different locks for
+sendmsg() and recvmsg(). There's probably no need to make recvmsg() wait
+for sendmsg(). It does mean that recvmsg() can return MSG_EOR indicating
+that a call is dead before a sendmsg() to that call returns - but that can
+currently happen anyway.
+
+Without fix (2), something like the following can be induced:
+
+ WARNING: bad unlock balance detected!
+ 5.16.0-rc6-syzkaller #0 Not tainted
+ -------------------------------------
+ syz-executor011/3597 is trying to release lock (&call->user_mutex) at:
+ [<ffffffff885163a3>] rxrpc_do_sendmsg+0xc13/0x1350 net/rxrpc/sendmsg.c:748
+ but there are no more locks to release!
+
+ other info that might help us debug this:
+ no locks held by syz-executor011/3597.
+ ...
+ Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
+ print_unlock_imbalance_bug include/trace/events/lock.h:58 [inline]
+ __lock_release kernel/locking/lockdep.c:5306 [inline]
+ lock_release.cold+0x49/0x4e kernel/locking/lockdep.c:5657
+ __mutex_unlock_slowpath+0x99/0x5e0 kernel/locking/mutex.c:900
+ rxrpc_do_sendmsg+0xc13/0x1350 net/rxrpc/sendmsg.c:748
+ rxrpc_sendmsg+0x420/0x630 net/rxrpc/af_rxrpc.c:561
+ sock_sendmsg_nosec net/socket.c:704 [inline]
+ sock_sendmsg+0xcf/0x120 net/socket.c:724
+ ____sys_sendmsg+0x6e8/0x810 net/socket.c:2409
+ ___sys_sendmsg+0xf3/0x170 net/socket.c:2463
+ __sys_sendmsg+0xe5/0x1b0 net/socket.c:2492
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+[Thanks to Hawkins Jiawei and Khalid Masum for their attempts to fix this]
+
+Fixes: bc5e3a546d55 ("rxrpc: Use MSG_WAITALL to tell sendmsg() to temporarily ignore signals")
+Reported-by: syzbot+7f0483225d0c94cb3441@syzkaller.appspotmail.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
+Tested-by: syzbot+7f0483225d0c94cb3441@syzkaller.appspotmail.com
+cc: Hawkins Jiawei <yin31149@gmail.com>
+cc: Khalid Masum <khalid.masum.92@gmail.com>
+cc: Dan Carpenter <dan.carpenter@oracle.com>
+cc: linux-afs@lists.infradead.org
+Link: https://lore.kernel.org/r/166135894583.600315.7170979436768124075.stgit@warthog.procyon.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/call_object.c | 4 +-
+ net/rxrpc/sendmsg.c | 92 ++++++++++++++++++++++++-----------------
+ 2 files changed, 57 insertions(+), 39 deletions(-)
+
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index 84d0a41096450..6401cdf7a6246 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -285,8 +285,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
+ _enter("%p,%lx", rx, p->user_call_ID);
+
+ limiter = rxrpc_get_call_slot(p, gfp);
+- if (!limiter)
++ if (!limiter) {
++ release_sock(&rx->sk);
+ return ERR_PTR(-ERESTARTSYS);
++ }
+
+ call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
+ if (IS_ERR(call)) {
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 1d38e279e2efa..3c3a626459deb 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -51,10 +51,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
+ return sock_intr_errno(*timeo);
+
+ trace_rxrpc_transmit(call, rxrpc_transmit_wait);
+- mutex_unlock(&call->user_mutex);
+ *timeo = schedule_timeout(*timeo);
+- if (mutex_lock_interruptible(&call->user_mutex) < 0)
+- return sock_intr_errno(*timeo);
+ }
+ }
+
+@@ -290,37 +287,48 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+ static int rxrpc_send_data(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
+ struct msghdr *msg, size_t len,
+- rxrpc_notify_end_tx_t notify_end_tx)
++ rxrpc_notify_end_tx_t notify_end_tx,
++ bool *_dropped_lock)
+ {
+ struct rxrpc_skb_priv *sp;
+ struct sk_buff *skb;
+ struct sock *sk = &rx->sk;
++ enum rxrpc_call_state state;
+ long timeo;
+- bool more;
+- int ret, copied;
++ bool more = msg->msg_flags & MSG_MORE;
++ int ret, copied = 0;
+
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+
+ /* this should be in poll */
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
++reload:
++ ret = -EPIPE;
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+- return -EPIPE;
+-
+- more = msg->msg_flags & MSG_MORE;
+-
++ goto maybe_error;
++ state = READ_ONCE(call->state);
++ ret = -ESHUTDOWN;
++ if (state >= RXRPC_CALL_COMPLETE)
++ goto maybe_error;
++ ret = -EPROTO;
++ if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
++ state != RXRPC_CALL_SERVER_ACK_REQUEST &&
++ state != RXRPC_CALL_SERVER_SEND_REPLY)
++ goto maybe_error;
++
++ ret = -EMSGSIZE;
+ if (call->tx_total_len != -1) {
+- if (len > call->tx_total_len)
+- return -EMSGSIZE;
+- if (!more && len != call->tx_total_len)
+- return -EMSGSIZE;
++ if (len - copied > call->tx_total_len)
++ goto maybe_error;
++ if (!more && len - copied != call->tx_total_len)
++ goto maybe_error;
+ }
+
+ skb = call->tx_pending;
+ call->tx_pending = NULL;
+ rxrpc_see_skb(skb, rxrpc_skb_seen);
+
+- copied = 0;
+ do {
+ /* Check to see if there's a ping ACK to reply to. */
+ if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
+@@ -331,16 +339,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
+
+ _debug("alloc");
+
+- if (!rxrpc_check_tx_space(call, NULL)) {
+- ret = -EAGAIN;
+- if (msg->msg_flags & MSG_DONTWAIT)
+- goto maybe_error;
+- ret = rxrpc_wait_for_tx_window(rx, call,
+- &timeo,
+- msg->msg_flags & MSG_WAITALL);
+- if (ret < 0)
+- goto maybe_error;
+- }
++ if (!rxrpc_check_tx_space(call, NULL))
++ goto wait_for_space;
+
+ /* Work out the maximum size of a packet. Assume that
+ * the security header is going to be in the padded
+@@ -468,6 +468,27 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
+ efault:
+ ret = -EFAULT;
+ goto out;
++
++wait_for_space:
++ ret = -EAGAIN;
++ if (msg->msg_flags & MSG_DONTWAIT)
++ goto maybe_error;
++ mutex_unlock(&call->user_mutex);
++ *_dropped_lock = true;
++ ret = rxrpc_wait_for_tx_window(rx, call, &timeo,
++ msg->msg_flags & MSG_WAITALL);
++ if (ret < 0)
++ goto maybe_error;
++ if (call->interruptibility == RXRPC_INTERRUPTIBLE) {
++ if (mutex_lock_interruptible(&call->user_mutex) < 0) {
++ ret = sock_intr_errno(timeo);
++ goto maybe_error;
++ }
++ } else {
++ mutex_lock(&call->user_mutex);
++ }
++ *_dropped_lock = false;
++ goto reload;
+ }
+
+ /*
+@@ -629,6 +650,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ enum rxrpc_call_state state;
+ struct rxrpc_call *call;
+ unsigned long now, j;
++ bool dropped_lock = false;
+ int ret;
+
+ struct rxrpc_send_params p = {
+@@ -737,21 +759,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ ret = rxrpc_send_abort_packet(call);
+ } else if (p.command != RXRPC_CMD_SEND_DATA) {
+ ret = -EINVAL;
+- } else if (rxrpc_is_client_call(call) &&
+- state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
+- /* request phase complete for this client call */
+- ret = -EPROTO;
+- } else if (rxrpc_is_service_call(call) &&
+- state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+- state != RXRPC_CALL_SERVER_SEND_REPLY) {
+- /* Reply phase not begun or not complete for service call. */
+- ret = -EPROTO;
+ } else {
+- ret = rxrpc_send_data(rx, call, msg, len, NULL);
++ ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock);
+ }
+
+ out_put_unlock:
+- mutex_unlock(&call->user_mutex);
++ if (!dropped_lock)
++ mutex_unlock(&call->user_mutex);
+ error_put:
+ rxrpc_put_call(call, rxrpc_call_put);
+ _leave(" = %d", ret);
+@@ -779,6 +793,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
+ struct msghdr *msg, size_t len,
+ rxrpc_notify_end_tx_t notify_end_tx)
+ {
++ bool dropped_lock = false;
+ int ret;
+
+ _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
+@@ -796,7 +811,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
+ case RXRPC_CALL_SERVER_ACK_REQUEST:
+ case RXRPC_CALL_SERVER_SEND_REPLY:
+ ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
+- notify_end_tx);
++ notify_end_tx, &dropped_lock);
+ break;
+ case RXRPC_CALL_COMPLETE:
+ read_lock_bh(&call->state_lock);
+@@ -810,7 +825,8 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
+ break;
+ }
+
+- mutex_unlock(&call->user_mutex);
++ if (!dropped_lock)
++ mutex_unlock(&call->user_mutex);
+ _leave(" = %d", ret);
+ return ret;
+ }
+--
+2.35.1
+
mm-uffd-reset-write-protection-when-unregister-with-wp-mode.patch
mm-hugetlb-support-write-faults-in-shared-mappings.patch
mt76-mt7921-fix-command-timeout-in-ap-stop-period.patch
+xfrm-fix-refcount-leak-in-__xfrm_policy_check.patch
+revert-xfrm-update-sa-curlft.use_time.patch
+xfrm-clone-missing-x-lastused-in-xfrm_do_migrate.patch
+af_key-do-not-call-xfrm_probe_algs-in-parallel.patch
+xfrm-policy-fix-metadata-dst-dev-xmit-null-pointer-d.patch
+fs-require-cap_sys_admin-in-target-namespace-for-idm.patch
+revert-net-macsec-update-sci-upon-mac-address-change.patch
+nfsv4.2-fix-problems-with-__nfs42_ssc_open.patch
+sunrpc-rpc-level-errors-should-set-task-tk_rpc_statu.patch
+mm-smaps-don-t-access-young-dirty-bit-if-pte-unprese.patch
+ntfs-fix-acl-handling.patch
+rose-check-null-rose_loopback_neigh-loopback.patch
+r8152-fix-the-units-of-some-registers-for-rtl8156a.patch
+r8152-fix-the-rx-fifo-settings-when-suspending.patch
+nfc-pn533-fix-use-after-free-bugs-caused-by-pn532_cm.patch
+ice-xsk-prohibit-usage-of-non-balanced-queue-id.patch
+ice-xsk-use-rx-ring-s-xdp-ring-when-picking-napi-con.patch
+net-mlx5e-properly-disable-vlan-strip-on-non-ul-reps.patch
+net-mlx5-lag-fix-logic-over-mlx5_lag_flag_ndevs_read.patch
+net-mlx5-eswitch-fix-forwarding-decision-to-uplink.patch
+net-mlx5-disable-irq-when-locking-lag_lock.patch
+net-mlx5-fix-cmd-error-logging-for-manage-pages-cmd.patch
+net-mlx5-avoid-false-positive-lockdep-warning-by-add.patch
+net-mlx5e-fix-wrong-application-of-the-lro-state.patch
+net-mlx5e-fix-wrong-tc-flag-used-when-set-hw-tc-offl.patch
+net-dsa-microchip-ksz9477-cleanup-the-ksz9477_switch.patch
+net-dsa-microchip-move-switch-chip_id-detection-to-k.patch
+net-dsa-microchip-move-tag_protocol-to-ksz_common.patch
+net-dsa-microchip-move-vlan-functionality-to-ksz_com.patch
+net-dsa-microchip-move-the-port-mirror-to-ksz_common.patch
+net-dsa-microchip-update-the-ksz_phylink_get_caps.patch
+net-dsa-microchip-keep-compatibility-with-device-tre.patch
+net-ipa-don-t-assume-smem-is-page-aligned.patch
+net-phy-don-t-warn-for-phy_ready-state-in-mdio_bus_p.patch
+net-moxa-get-rid-of-asymmetry-in-dma-mapping-unmappi.patch
+bonding-802.3ad-fix-no-transmission-of-lacpdus.patch
+net-ipvtap-add-__init-__exit-annotations-to-module-i.patch
+netfilter-ebtables-reject-blobs-that-don-t-provide-a.patch
+netfilter-nft_tproxy-restrict-to-prerouting-hook.patch
+bnxt_en-use-page_size-to-init-buffer-when-multi-buff.patch
+bnxt_en-set-missing-reload-flag-in-devlink-features.patch
+bnxt_en-fix-nq-resource-accounting-during-vf-creatio.patch
+bnxt_en-fix-lro-gro_hw-features-in-ndo_fix_features-.patch
+netfilter-nf_tables-disallow-updates-of-implicit-cha.patch
+netfilter-nf_tables-make-table-handle-allocation-per.patch
+netfilter-nft_payload-report-erange-for-too-long-off.patch
+netfilter-nft_payload-do-not-truncate-csum_offset-an.patch
+netfilter-nf_tables-do-not-leave-chain-stats-enabled.patch
+netfilter-nft_osf-restrict-osf-to-ipv4-ipv6-and-inet.patch
+netfilter-nft_tunnel-restrict-it-to-netdev-family.patch
+netfilter-nf_tables-disallow-binding-to-already-boun.patch
+netfilter-flowtable-add-function-to-invoke-garbage-c.patch
+netfilter-flowtable-fix-stuck-flows-on-cleanup-due-t.patch
+net-fix-data-races-around-sysctl_-rw-mem_-max-defaul.patch
+net-fix-data-races-around-weight_p-and-dev_weight_-r.patch
+net-fix-data-races-around-netdev_max_backlog.patch
+net-fix-data-races-around-netdev_tstamp_prequeue.patch
+ratelimit-fix-data-races-in-___ratelimit.patch
+net-fix-data-races-around-sysctl_optmem_max.patch
+net-fix-a-data-race-around-sysctl_tstamp_allow_data.patch
+net-fix-a-data-race-around-sysctl_net_busy_poll.patch
+net-fix-a-data-race-around-sysctl_net_busy_read.patch
+net-fix-a-data-race-around-netdev_budget.patch
+net-fix-data-races-around-sysctl_max_skb_frags.patch
+net-fix-a-data-race-around-netdev_budget_usecs.patch
+net-fix-data-races-around-sysctl_fb_tunnels_only_for.patch
+net-fix-data-races-around-sysctl_devconf_inherit_ini.patch
+net-fix-a-data-race-around-gro_normal_batch.patch
+net-fix-a-data-race-around-netdev_unregister_timeout.patch
+net-fix-a-data-race-around-sysctl_somaxconn.patch
+ixgbe-stop-resetting-systime-in-ixgbe_ptp_start_cycl.patch
+i40e-fix-incorrect-address-type-for-ipv6-flow-rules.patch
+net-ethernet-mtk_eth_soc-enable-rx-cksum-offload-for.patch
+net-ethernet-mtk_eth_soc-fix-hw-hash-reporting-for-m.patch
+rxrpc-fix-locking-in-rxrpc-s-sendmsg.patch
+ionic-clear-broken-state-on-generation-change.patch
+ionic-fix-up-issues-with-handling-eagain-on-fw-cmds.patch
+ionic-vf-initial-random-mac-address-if-no-assigned-m.patch
+net-stmmac-work-around-sporadic-tx-issue-on-link-up.patch
+net-lantiq_xrx200-confirm-skb-is-allocated-before-us.patch
+net-lantiq_xrx200-fix-lock-under-memory-pressure.patch
+net-lantiq_xrx200-restore-buffer-if-memory-allocatio.patch
--- /dev/null
+From f9162fc34ba42699f1de7b88f6d0548003d7e43e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Aug 2022 14:55:03 -0400
+Subject: SUNRPC: RPC level errors should set task->tk_rpc_status
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit ed06fce0b034b2e25bd93430f5c4cbb28036cc1a ]
+
+Fix up a case in call_encode() where we're failing to set
+task->tk_rpc_status when an RPC level error occurred.
+
+Fixes: 9c5948c24869 ("SUNRPC: task should be exit if encode return EKEYEXPIRED more times")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/clnt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 733f9f2260926..c1a01947530f0 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1888,7 +1888,7 @@ call_encode(struct rpc_task *task)
+ break;
+ case -EKEYEXPIRED:
+ if (!task->tk_cred_retry) {
+- rpc_exit(task, task->tk_status);
++ rpc_call_rpcerror(task, task->tk_status);
+ } else {
+ task->tk_action = call_refresh;
+ task->tk_cred_retry--;
+--
+2.35.1
+
--- /dev/null
+From f76f8b6c3d319dd7d5ce75f3114e2b9ed813dee4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Jul 2022 17:41:22 +0200
+Subject: xfrm: clone missing x->lastused in xfrm_do_migrate
+
+From: Antony Antony <antony.antony@secunet.com>
+
+[ Upstream commit 6aa811acdb76facca0b705f4e4c1d948ccb6af8b ]
+
+x->lastused was not cloned in xfrm_do_migrate. Add it to clone during
+migrate.
+
+Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)")
+Signed-off-by: Antony Antony <antony.antony@secunet.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xfrm/xfrm_state.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index ccfb172eb5b8d..11d89af9cb55a 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -1592,6 +1592,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
+ x->replay = orig->replay;
+ x->preplay = orig->preplay;
+ x->mapping_maxage = orig->mapping_maxage;
++ x->lastused = orig->lastused;
+ x->new_mapping = 0;
+ x->new_mapping_sport = 0;
+
+--
+2.35.1
+
--- /dev/null
+From c9d7314fdbbb705223b44f368509f685f4a472c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Jul 2022 17:55:58 +0800
+Subject: xfrm: fix refcount leak in __xfrm_policy_check()
+
+From: Xin Xiong <xiongx18@fudan.edu.cn>
+
+[ Upstream commit 9c9cb23e00ddf45679b21b4dacc11d1ae7961ebe ]
+
+The issue happens on an error path in __xfrm_policy_check(). When the
+fetching process of the object `pols[1]` fails, the function simply
+returns 0, forgetting to decrement the reference count of `pols[0]`,
+which is incremented earlier by either xfrm_sk_policy_lookup() or
+xfrm_policy_lookup(). This may result in memory leaks.
+
+Fix it by decreasing the reference count of `pols[0]` in that path.
+
+Fixes: 134b0fc544ba ("IPsec: propagate security module errors up from flow_cache_lookup")
+Signed-off-by: Xin Xiong <xiongx18@fudan.edu.cn>
+Signed-off-by: Xin Tan <tanxin.ctf@gmail.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xfrm/xfrm_policy.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index f1a0bab920a55..4f8bbb825abcb 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3599,6 +3599,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ if (pols[1]) {
+ if (IS_ERR(pols[1])) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
++ xfrm_pol_put(pols[0]);
+ return 0;
+ }
+ pols[1]->curlft.use_time = ktime_get_real_seconds();
+--
+2.35.1
+
--- /dev/null
+From 794aeff53fea9d492bb1ec9ce4790feaa7d20f54 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Aug 2022 18:30:50 +0300
+Subject: xfrm: policy: fix metadata dst->dev xmit null pointer dereference
+
+From: Nikolay Aleksandrov <razor@blackwall.org>
+
+[ Upstream commit 17ecd4a4db4783392edd4944f5e8268205083f70 ]
+
+When we try to transmit an skb with metadata_dst attached (i.e. dst->dev
+== NULL) through xfrm interface we can hit a null pointer dereference[1]
+in xfrmi_xmit2() -> xfrm_lookup_with_ifid() due to the check for a
+loopback skb device when there's no policy which dereferences dst->dev
+unconditionally. Not having dst->dev can be interepreted as it not being
+a loopback device, so just add a check for a null dst_orig->dev.
+
+With this fix xfrm interface's Tx error counters go up as usual.
+
+[1] net-next calltrace captured via netconsole:
+ BUG: kernel NULL pointer dereference, address: 00000000000000c0
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: 0000 [#1] PREEMPT SMP
+ CPU: 1 PID: 7231 Comm: ping Kdump: loaded Not tainted 5.19.0+ #24
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.0-1.fc36 04/01/2014
+ RIP: 0010:xfrm_lookup_with_ifid+0x5eb/0xa60
+ Code: 8d 74 24 38 e8 26 a4 37 00 48 89 c1 e9 12 fc ff ff 49 63 ed 41 83 fd be 0f 85 be 01 00 00 41 be ff ff ff ff 45 31 ed 48 8b 03 <f6> 80 c0 00 00 00 08 75 0f 41 80 bc 24 19 0d 00 00 01 0f 84 1e 02
+ RSP: 0018:ffffb0db82c679f0 EFLAGS: 00010246
+ RAX: 0000000000000000 RBX: ffffd0db7fcad430 RCX: ffffb0db82c67a10
+ RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffb0db82c67a80
+ RBP: ffffb0db82c67a80 R08: ffffb0db82c67a14 R09: 0000000000000000
+ R10: 0000000000000000 R11: ffff8fa449667dc8 R12: ffffffff966db880
+ R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+ FS: 00007ff35c83f000(0000) GS:ffff8fa478480000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00000000000000c0 CR3: 000000001ebb7000 CR4: 0000000000350ee0
+ Call Trace:
+ <TASK>
+ xfrmi_xmit+0xde/0x460
+ ? tcf_bpf_act+0x13d/0x2a0
+ dev_hard_start_xmit+0x72/0x1e0
+ __dev_queue_xmit+0x251/0xd30
+ ip_finish_output2+0x140/0x550
+ ip_push_pending_frames+0x56/0x80
+ raw_sendmsg+0x663/0x10a0
+ ? try_charge_memcg+0x3fd/0x7a0
+ ? __mod_memcg_lruvec_state+0x93/0x110
+ ? sock_sendmsg+0x30/0x40
+ sock_sendmsg+0x30/0x40
+ __sys_sendto+0xeb/0x130
+ ? handle_mm_fault+0xae/0x280
+ ? do_user_addr_fault+0x1e7/0x680
+ ? kvm_read_and_reset_apf_flags+0x3b/0x50
+ __x64_sys_sendto+0x20/0x30
+ do_syscall_64+0x34/0x80
+ entry_SYSCALL_64_after_hwframe+0x46/0xb0
+ RIP: 0033:0x7ff35cac1366
+ Code: eb 0b 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b8 0f 1f 00 41 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 11 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 72 c3 90 55 48 83 ec 30 44 89 4c 24 2c 4c 89
+ RSP: 002b:00007fff738e4028 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
+ RAX: ffffffffffffffda RBX: 00007fff738e57b0 RCX: 00007ff35cac1366
+ RDX: 0000000000000040 RSI: 0000557164e4b450 RDI: 0000000000000003
+ RBP: 0000557164e4b450 R08: 00007fff738e7a2c R09: 0000000000000010
+ R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000040
+ R13: 00007fff738e5770 R14: 00007fff738e4030 R15: 0000001d00000001
+ </TASK>
+ Modules linked in: netconsole veth br_netfilter bridge bonding virtio_net [last unloaded: netconsole]
+ CR2: 00000000000000c0
+
+CC: Steffen Klassert <steffen.klassert@secunet.com>
+CC: Daniel Borkmann <daniel@iogearbox.net>
+Fixes: 2d151d39073a ("xfrm: Add possibility to set the default to block if we have no policy")
+Signed-off-by: Nikolay Aleksandrov <razor@blackwall.org>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xfrm/xfrm_policy.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 4f8bbb825abcb..cc6ab79609e29 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3162,7 +3162,7 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
+ return dst;
+
+ nopol:
+- if (!(dst_orig->dev->flags & IFF_LOOPBACK) &&
++ if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
+ net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
+ err = -EPERM;
+ goto error;
+--
+2.35.1
+