--- /dev/null
+From 2addf5982f2673e403247651c96dad657d1c0afa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Jun 2023 22:39:39 +0100
+Subject: afs: Fix vlserver probe RTT handling
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit ba00b190670809c1a89326d80de96d714f6004f2 ]
+
+In the same spirit as commit ca57f02295f1 ("afs: Fix fileserver probe
+RTT handling"), don't rule out using a vlserver just because there
+haven't been enough packets yet to calculate a real rtt. Always set the
+server's probe rtt from the estimate provided by rxrpc_kernel_get_srtt,
+which is capped at 1 second.
+
+This could lead to EDESTADDRREQ errors when accessing a cell for the
+first time, even though the vl servers are known and have responded to a
+probe.
+
+Fixes: 1d4adfaf6574 ("rxrpc: Make rxrpc_kernel_get_srtt() indicate validity")
+Signed-off-by: Marc Dionne <marc.dionne@auristor.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: linux-afs@lists.infradead.org
+Link: http://lists.infradead.org/pipermail/linux-afs/2023-June/006746.html
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/vl_probe.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
+index d1c7068b4346f..58452b86e6727 100644
+--- a/fs/afs/vl_probe.c
++++ b/fs/afs/vl_probe.c
+@@ -115,8 +115,8 @@ void afs_vlserver_probe_result(struct afs_call *call)
+ }
+ }
+
+- if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
+- rtt_us < server->probe.rtt) {
++ rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us);
++ if (rtt_us < server->probe.rtt) {
+ server->probe.rtt = rtt_us;
+ server->rtt = rtt_us;
+ alist->preferred = index;
+--
+2.39.2
+
--- /dev/null
+From 587e40a3aa56ef357e09f59254cb7291ee20e8fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Jun 2023 11:23:32 -0500
+Subject: cifs: fix lease break oops in xfstest generic/098
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit c774e6779f38bf36f0cce65e30793704bab4b0d7 ]
+
+umount can race with lease break so need to check if
+tcon->ses->server is still valid to send the lease
+break response.
+
+Reviewed-by: Bharath SM <bharathsm@microsoft.com>
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Fixes: 59a556aebc43 ("SMB3: drop reference to cfile before sending oplock break")
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/file.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 872aebac9f686..bda1ffe6e41f8 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -4929,9 +4929,13 @@ void cifs_oplock_break(struct work_struct *work)
+ * disconnected since oplock already released by the server
+ */
+ if (!oplock_break_cancelled) {
+- rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
++ /* check for server null since can race with kill_sb calling tree disconnect */
++ if (tcon->ses && tcon->ses->server) {
++ rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+ volatile_fid, net_fid, cinode);
+- cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
++ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
++ } else
++ pr_warn_once("lease break not sent for unmounted share\n");
+ }
+
+ cifs_done_oplock_break(cinode);
+--
+2.39.2
+
--- /dev/null
+From aae8e4e2c44e6fbcbc3437c79cd13b567bc51bf7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 14:14:23 +0800
+Subject: dm: don't lock fs when the map is NULL during suspend or resume
+
+From: Li Lingfeng <lilingfeng3@huawei.com>
+
+[ Upstream commit 2760904d895279f87196f0fa9ec570c79fe6a2e4 ]
+
+As described in commit 38d11da522aa ("dm: don't lock fs when the map is
+NULL in process of resume"), a deadlock may be triggered between
+do_resume() and do_mount().
+
+This commit preserves the fix from commit 38d11da522aa but moves it to
+where it also serves to fix a similar deadlock between do_suspend()
+and do_mount(). It does so, if the active map is NULL, by clearing
+DM_SUSPEND_LOCKFS_FLAG in dm_suspend() which is called by both
+do_suspend() and do_resume().
+
+Fixes: 38d11da522aa ("dm: don't lock fs when the map is NULL in process of resume")
+Signed-off-by: Li Lingfeng <lilingfeng3@huawei.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-ioctl.c | 5 +----
+ drivers/md/dm.c | 4 ++++
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 438c0b77bb48c..815c41e1ebdb8 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1145,13 +1145,10 @@ static int do_resume(struct dm_ioctl *param)
+ /* Do we need to load a new map ? */
+ if (new_map) {
+ sector_t old_size, new_size;
+- int srcu_idx;
+
+ /* Suspend if it isn't already suspended */
+- old_map = dm_get_live_table(md, &srcu_idx);
+- if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map)
++ if (param->flags & DM_SKIP_LOCKFS_FLAG)
+ suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+- dm_put_live_table(md, srcu_idx);
+ if (param->flags & DM_NOFLUSH_FLAG)
+ suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
+ if (!dm_suspended_md(md))
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 8bc121d394471..d6285a23dc3ed 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2526,6 +2526,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
+ }
+
+ map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
++ if (!map) {
++ /* avoid deadlock with fs/namespace.c:do_mount() */
++ suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
++ }
+
+ r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
+ if (r)
+--
+2.39.2
+
--- /dev/null
+From eaabbbf1f7d4148f7036de4a4a453657abe1da23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 May 2023 13:33:20 +0300
+Subject: drm/nouveau: add nv_encoder pointer check for NULL
+
+From: Natalia Petrova <n.petrova@fintech.ru>
+
+[ Upstream commit 55b94bb8c42464bad3d2217f6874aa1a85664eac ]
+
+Pointer nv_encoder could be dereferenced at nouveau_connector.c
+in case it's equal to NULL by jumping to goto label.
+This patch adds a NULL-check to avoid it.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 3195c5f9784a ("drm/nouveau: set encoder for lvds")
+Signed-off-by: Natalia Petrova <n.petrova@fintech.ru>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+[Fixed patch title]
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230512103320.82234-1-n.petrova@fintech.ru
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_connector.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index f5c79d367f290..7f8607b97707e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -729,7 +729,8 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
+ #endif
+
+ nouveau_connector_set_edid(nv_connector, edid);
+- nouveau_connector_set_encoder(connector, nv_encoder);
++ if (nv_encoder)
++ nouveau_connector_set_encoder(connector, nv_encoder);
+ return status;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 1439c88367d4ad0d2f1bfad5feba0a9dfa3957f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 May 2023 04:11:56 +0700
+Subject: drm/nouveau: don't detect DSM for non-NVIDIA device
+
+From: Ratchanan Srirattanamet <peathot@hotmail.com>
+
+[ Upstream commit 11d24327c2d7ad7f24fcc44fb00e1fa91ebf6525 ]
+
+The call site of nouveau_dsm_pci_probe() uses single set of output
+variables for all invocations. So, we must not write anything to them
+unless it's an NVIDIA device. Otherwise, if we are called with another
+device after the NVIDIA device, we'll clober the result of the NVIDIA
+device.
+
+For example, if the other device doesn't have _PR3 resources, the
+detection later would miss the presence of power resource support, and
+the rest of the code will keep using Optimus DSM, breaking power
+management for that machine.
+
+Also, because we're detecting NVIDIA's DSM, it doesn't make sense to run
+this detection on a non-NVIDIA device anyway. Thus, check at the
+beginning of the detection code if this is an NVIDIA card, and just
+return if it isn't.
+
+This, together with commit d22915d22ded ("drm/nouveau/devinit/tu102-:
+wait for GFW_BOOT_PROGRESS == COMPLETED") developed independently and
+landed earlier, fixes runtime power management of the NVIDIA card in
+Lenovo Legion 5-15ARH05. Without this patch, the GPU resumption code
+will "timeout", sometimes hanging userspace.
+
+As a bonus, we'll also stop preventing _PR3 usage from the bridge for
+unrelated devices, which is always nice, I guess.
+
+Fixes: ccfc2d5cdb02 ("drm/nouveau: Use generic helper to check _PR3 presence")
+Signed-off-by: Ratchanan Srirattanamet <peathot@hotmail.com>
+Closes: https://gitlab.freedesktop.org/drm/nouveau/-/issues/79
+Reviewed-by: Karol Herbst <kherbst@redhat.com>
+Signed-off-by: Karol Herbst <kherbst@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/DM6PR19MB2780805D4BE1E3F9B3AC96D0BC409@DM6PR19MB2780.namprd19.prod.outlook.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_acpi.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index 7c15f64484281..9c55f205ab663 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -220,6 +220,9 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
+ int optimus_funcs;
+ struct pci_dev *parent_pdev;
+
++ if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
++ return;
++
+ *has_pr3 = false;
+ parent_pdev = pci_upstream_bridge(pdev);
+ if (parent_pdev) {
+--
+2.39.2
+
--- /dev/null
+From 17bfebd79bb4a2d2e1a6ef05415c152f5ce7903c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 May 2023 14:15:26 +0300
+Subject: drm/nouveau/dp: check for NULL nv_connector->native_mode
+
+From: Natalia Petrova <n.petrova@fintech.ru>
+
+[ Upstream commit 20a2ce87fbaf81e4c3dcb631d738e423959eb320 ]
+
+Add checking for NULL before calling nouveau_connector_detect_depth() in
+nouveau_connector_get_modes() function because nv_connector->native_mode
+could be dereferenced there since connector pointer passed to
+nouveau_connector_detect_depth() and the same value of
+nv_connector->native_mode is used there.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: d4c2c99bdc83 ("drm/nouveau/dp: remove broken display depth function, use the improved one")
+
+Signed-off-by: Natalia Petrova <n.petrova@fintech.ru>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230512111526.82408-1-n.petrova@fintech.ru
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_connector.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 96be2ecb86d4d..f5c79d367f290 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -965,7 +965,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ /* Determine display colour depth for everything except LVDS now,
+ * DP requires this before mode_valid() is called.
+ */
+- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
++ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
+ nouveau_connector_detect_depth(connector);
+
+ /* Find the native mode if this is a digital panel, if we didn't
+@@ -986,7 +986,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ * "native" mode as some VBIOS tables require us to use the
+ * pixel clock as part of the lookup...
+ */
+- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
++ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
+ nouveau_connector_detect_depth(connector);
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
+--
+2.39.2
+
--- /dev/null
+From 5ec3bbc1dfc007fa67914f1fc08e23f56a4e4f50 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 12:02:55 +0200
+Subject: ext4: drop the call to ext4_error() from ext4_get_group_info()
+
+From: Fabio M. De Francesco <fmdefrancesco@gmail.com>
+
+[ Upstream commit f451fd97dd2b78f286379203a47d9d295c467255 ]
+
+A recent patch added a call to ext4_error() which is problematic since
+some callers of the ext4_get_group_info() function may be holding a
+spinlock, whereas ext4_error() must never be called in atomic context.
+
+This triggered a report from Syzbot: "BUG: sleeping function called from
+invalid context in ext4_update_super" (see the link below).
+
+Therefore, drop the call to ext4_error() from ext4_get_group_info(). In
+the meantime use eight characters tabs instead of nine characters ones.
+
+Reported-by: syzbot+4acc7d910e617b360859@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/00000000000070575805fdc6cdb2@google.com/
+Fixes: 5354b2af3406 ("ext4: allow ext4_get_group_info() to fail")
+Suggested-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
+Link: https://lore.kernel.org/r/20230614100446.14337-1-fmdefrancesco@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/balloc.c | 20 +++++++++-----------
+ 1 file changed, 9 insertions(+), 11 deletions(-)
+
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index fadcb94e80fa1..7649376fd93d3 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -322,17 +322,15 @@ static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
+ struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+ ext4_group_t group)
+ {
+- struct ext4_group_info **grp_info;
+- long indexv, indexh;
+-
+- if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) {
+- ext4_error(sb, "invalid group %u", group);
+- return NULL;
+- }
+- indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+- indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+- grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
+- return grp_info[indexh];
++ struct ext4_group_info **grp_info;
++ long indexv, indexh;
++
++ if (unlikely(group >= EXT4_SB(sb)->s_groups_count))
++ return NULL;
++ indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
++ indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
++ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
++ return grp_info[indexh];
+ }
+
+ /*
+--
+2.39.2
+
--- /dev/null
+From 77f8dceda9d087981a7fb7743ed807c16825ef1a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 13:02:26 -0700
+Subject: iavf: remove mask from iavf_irq_enable_queues()
+
+From: Ahmed Zaki <ahmed.zaki@intel.com>
+
+[ Upstream commit c37cf54c12cfaa51e7aaf88708167b0d3259e64e ]
+
+Enable more than 32 IRQs by removing the u32 bit mask in
+iavf_irq_enable_queues(). There is no need for the mask as there are no
+callers that select individual IRQs through the bitmask. Also, if the PF
+allocates more than 32 IRQs, this mask will prevent us from using all of
+them.
+
+Modify the comment in iavf_register.h to show that the maximum number
+allowed for the IRQ index is 63 as per the iAVF standard 1.0 [1].
+
+link: [1] https://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/ethernet-adaptive-virtual-function-hardware-spec.pdf
+Fixes: 5eae00c57f5e ("i40evf: main driver core")
+Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://lore.kernel.org/r/20230608200226.451861-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/iavf/iavf.h | 2 +-
+ drivers/net/ethernet/intel/iavf/iavf_main.c | 15 ++++++---------
+ drivers/net/ethernet/intel/iavf/iavf_register.h | 2 +-
+ 3 files changed, 8 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 99d2b090a1e61..478a292ac803b 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -461,7 +461,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev);
+ void iavf_update_stats(struct iavf_adapter *adapter);
+ void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
+ int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
+-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
++void iavf_irq_enable_queues(struct iavf_adapter *adapter);
+ void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
+ void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 3b62f37b3cf14..a3caab0b6fa2a 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -253,21 +253,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
+ }
+
+ /**
+- * iavf_irq_enable_queues - Enable interrupt for specified queues
++ * iavf_irq_enable_queues - Enable interrupt for all queues
+ * @adapter: board private structure
+- * @mask: bitmap of queues to enable
+ **/
+-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
++void iavf_irq_enable_queues(struct iavf_adapter *adapter)
+ {
+ struct iavf_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+- if (mask & BIT(i - 1)) {
+- wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
+- IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+- IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+- }
++ wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
++ IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
++ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+ }
+ }
+
+@@ -281,7 +278,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
+ struct iavf_hw *hw = &adapter->hw;
+
+ iavf_misc_irq_enable(adapter);
+- iavf_irq_enable_queues(adapter, ~0);
++ iavf_irq_enable_queues(adapter);
+
+ if (flush)
+ iavf_flush(hw);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h
+index bf793332fc9d5..a19e88898a0bb 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_register.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_register.h
+@@ -40,7 +40,7 @@
+ #define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
+ #define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+ #define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+-#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
++#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */
+ #define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+ #define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
+ #define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+--
+2.39.2
+
--- /dev/null
+From 58ce20c70921292eb2e05de62ecec063f0844e76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 03:25:29 -0700
+Subject: IB/isert: Fix dead lock in ib_isert
+
+From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+
+[ Upstream commit 691b0480933f0ce88a81ed1d1a0aff340ff6293a ]
+
+- When a iSER session is released, ib_isert module is taking a mutex
+ lock and releasing all pending connections. As part of this, ib_isert
+ is destroying rdma cm_id. To destroy cm_id, rdma_cm module is sending
+ CM events to CMA handler of ib_isert. This handler is taking same
+ mutex lock. Hence it leads to deadlock between ib_isert & rdma_cm
+ modules.
+
+- For fix, created local list of pending connections and release the
+ connection outside of mutex lock.
+
+Calltrace:
+---------
+[ 1229.791410] INFO: task kworker/10:1:642 blocked for more than 120 seconds.
+[ 1229.791416] Tainted: G OE --------- - - 4.18.0-372.9.1.el8.x86_64 #1
+[ 1229.791418] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+[ 1229.791419] task:kworker/10:1 state:D stack: 0 pid: 642 ppid: 2 flags:0x80004000
+[ 1229.791424] Workqueue: ib_cm cm_work_handler [ib_cm]
+[ 1229.791436] Call Trace:
+[ 1229.791438] __schedule+0x2d1/0x830
+[ 1229.791445] ? select_idle_sibling+0x23/0x6f0
+[ 1229.791449] schedule+0x35/0xa0
+[ 1229.791451] schedule_preempt_disabled+0xa/0x10
+[ 1229.791453] __mutex_lock.isra.7+0x310/0x420
+[ 1229.791456] ? select_task_rq_fair+0x351/0x990
+[ 1229.791459] isert_cma_handler+0x224/0x330 [ib_isert]
+[ 1229.791463] ? ttwu_queue_wakelist+0x159/0x170
+[ 1229.791466] cma_cm_event_handler+0x25/0xd0 [rdma_cm]
+[ 1229.791474] cma_ib_handler+0xa7/0x2e0 [rdma_cm]
+[ 1229.791478] cm_process_work+0x22/0xf0 [ib_cm]
+[ 1229.791483] cm_work_handler+0xf4/0xf30 [ib_cm]
+[ 1229.791487] ? move_linked_works+0x6e/0xa0
+[ 1229.791490] process_one_work+0x1a7/0x360
+[ 1229.791491] ? create_worker+0x1a0/0x1a0
+[ 1229.791493] worker_thread+0x30/0x390
+[ 1229.791494] ? create_worker+0x1a0/0x1a0
+[ 1229.791495] kthread+0x10a/0x120
+[ 1229.791497] ? set_kthread_struct+0x40/0x40
+[ 1229.791499] ret_from_fork+0x1f/0x40
+
+[ 1229.791739] INFO: task targetcli:28666 blocked for more than 120 seconds.
+[ 1229.791740] Tainted: G OE --------- - - 4.18.0-372.9.1.el8.x86_64 #1
+[ 1229.791741] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+[ 1229.791742] task:targetcli state:D stack: 0 pid:28666 ppid: 5510 flags:0x00004080
+[ 1229.791743] Call Trace:
+[ 1229.791744] __schedule+0x2d1/0x830
+[ 1229.791746] schedule+0x35/0xa0
+[ 1229.791748] schedule_preempt_disabled+0xa/0x10
+[ 1229.791749] __mutex_lock.isra.7+0x310/0x420
+[ 1229.791751] rdma_destroy_id+0x15/0x20 [rdma_cm]
+[ 1229.791755] isert_connect_release+0x115/0x130 [ib_isert]
+[ 1229.791757] isert_free_np+0x87/0x140 [ib_isert]
+[ 1229.791761] iscsit_del_np+0x74/0x120 [iscsi_target_mod]
+[ 1229.791776] lio_target_np_driver_store+0xe9/0x140 [iscsi_target_mod]
+[ 1229.791784] configfs_write_file+0xb2/0x110
+[ 1229.791788] vfs_write+0xa5/0x1a0
+[ 1229.791792] ksys_write+0x4f/0xb0
+[ 1229.791794] do_syscall_64+0x5b/0x1a0
+[ 1229.791798] entry_SYSCALL_64_after_hwframe+0x65/0xca
+
+Fixes: bd3792205aae ("iser-target: Fix pending connections handling in target stack shutdown sequnce")
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Link: https://lore.kernel.org/r/20230606102531.162967-2-saravanan.vajravel@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 636d590765f95..fbee14c8f6116 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2431,6 +2431,7 @@ isert_free_np(struct iscsi_np *np)
+ {
+ struct isert_np *isert_np = np->np_context;
+ struct isert_conn *isert_conn, *n;
++ LIST_HEAD(drop_conn_list);
+
+ if (isert_np->cm_id)
+ rdma_destroy_id(isert_np->cm_id);
+@@ -2450,7 +2451,7 @@ isert_free_np(struct iscsi_np *np)
+ node) {
+ isert_info("cleaning isert_conn %p state (%d)\n",
+ isert_conn, isert_conn->state);
+- isert_connect_release(isert_conn);
++ list_move_tail(&isert_conn->node, &drop_conn_list);
+ }
+ }
+
+@@ -2461,11 +2462,16 @@ isert_free_np(struct iscsi_np *np)
+ node) {
+ isert_info("cleaning isert_conn %p state (%d)\n",
+ isert_conn, isert_conn->state);
+- isert_connect_release(isert_conn);
++ list_move_tail(&isert_conn->node, &drop_conn_list);
+ }
+ }
+ mutex_unlock(&isert_np->mutex);
+
++ list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
++ list_del_init(&isert_conn->node);
++ isert_connect_release(isert_conn);
++ }
++
+ np->np_context = NULL;
+ kfree(isert_np);
+ }
+--
+2.39.2
+
--- /dev/null
+From 3e9d26d93781fc68d87daec0a2c9cd387cb43e3d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 03:25:31 -0700
+Subject: IB/isert: Fix incorrect release of isert connection
+
+From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+
+[ Upstream commit 699826f4e30ab76a62c238c86fbef7e826639c8d ]
+
+The ib_isert module is releasing the isert connection both in
+isert_wait_conn() handler as well as isert_free_conn() handler.
+In isert_wait_conn() handler, it is expected to wait for iSCSI
+session logout operation to complete. It should free the isert
+connection only in isert_free_conn() handler.
+
+When a bunch of iSER target is cleared, this issue can lead to
+use-after-free memory issue as isert conn is twice released
+
+Fixes: b02efbfc9a05 ("iser-target: Fix implicit termination of connections")
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Link: https://lore.kernel.org/r/20230606102531.162967-4-saravanan.vajravel@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 6082695a02d88..b71711defb81d 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2570,8 +2570,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ isert_put_unsol_pending_cmds(conn);
+ isert_wait4cmds(conn);
+ isert_wait4logout(isert_conn);
+-
+- queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+
+ static void isert_free_conn(struct iscsi_conn *conn)
+--
+2.39.2
+
--- /dev/null
+From 502824f3e3b875e87d0bab38438550767f47476a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 03:25:30 -0700
+Subject: IB/isert: Fix possible list corruption in CMA handler
+
+From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+
+[ Upstream commit 7651e2d6c5b359a28c2d4c904fec6608d1021ca8 ]
+
+When ib_isert module receives connection error event, it is
+releasing the isert session and removes corresponding list
+node but it doesn't take appropriate mutex lock to remove
+the list node. This can lead to linked list corruption
+
+Fixes: bd3792205aae ("iser-target: Fix pending connections handling in target stack shutdown sequnce")
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Link: https://lore.kernel.org/r/20230606102531.162967-3-saravanan.vajravel@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index fbee14c8f6116..6082695a02d88 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -656,9 +656,13 @@ static int
+ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
++ struct isert_np *isert_np = cma_id->context;
+
+ ib_drain_qp(isert_conn->qp);
++
++ mutex_lock(&isert_np->mutex);
+ list_del_init(&isert_conn->node);
++ mutex_unlock(&isert_np->mutex);
+ isert_conn->cm_id = NULL;
+ isert_put_conn(isert_conn);
+
+--
+2.39.2
+
--- /dev/null
+From 223c4e2e20ed79ba872a92f8a32e310ecf99aa1d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 13:33:25 +0300
+Subject: IB/uverbs: Fix to consider event queue closing also upon non-blocking
+ mode
+
+From: Yishai Hadas <yishaih@nvidia.com>
+
+[ Upstream commit 62fab312fa1683e812e605db20d4f22de3e3fb2f ]
+
+Fix ib_uverbs_event_read() to consider event queue closing also upon
+non-blocking mode.
+
+Once the queue is closed (e.g. hot-plug flow) all the existing events
+are cleaned-up as part of ib_uverbs_free_event_queue().
+
+An application that uses the non-blocking FD mode should get -EIO in
+that case to let it knows that the device was removed already.
+
+Otherwise, it can loose the indication that the device was removed and
+won't recover.
+
+As part of that, refactor the code to have a single flow with regards to
+'is_closed' for both blocking and non-blocking modes.
+
+Fixes: 14e23bd6d221 ("RDMA/core: Fix locking in ib_uverbs_event_read")
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
+Link: https://lore.kernel.org/r/97b00116a1e1e13f8dc4ec38a5ea81cf8c030210.1685960567.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/uverbs_main.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index d544340887277..fa937cd268219 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -222,8 +222,12 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
+ spin_lock_irq(&ev_queue->lock);
+
+ while (list_empty(&ev_queue->event_list)) {
+- spin_unlock_irq(&ev_queue->lock);
++ if (ev_queue->is_closed) {
++ spin_unlock_irq(&ev_queue->lock);
++ return -EIO;
++ }
+
++ spin_unlock_irq(&ev_queue->lock);
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+@@ -233,12 +237,6 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
+ return -ERESTARTSYS;
+
+ spin_lock_irq(&ev_queue->lock);
+-
+- /* If device was disassociated and no event exists set an error */
+- if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
+- spin_unlock_irq(&ev_queue->lock);
+- return -EIO;
+- }
+ }
+
+ event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
+--
+2.39.2
+
--- /dev/null
+From fa92aeb885f5678b95ee504eeb5c00493fd7beb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Apr 2023 17:44:14 +0200
+Subject: igb: fix nvm.ops.read() error handling
+
+From: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+
+[ Upstream commit 48a821fd58837800750ec1b3962f0f799630a844 ]
+
+Add error handling into igb_set_eeprom() function, in case
+nvm.ops.read() fails just quit with error code asap.
+
+Fixes: 9d5c824399de ("igb: PCI-Express 82575 Gigabit Ethernet driver")
+Signed-off-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ethtool.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index e99e6e44b525a..b2f46004a3d0f 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev,
+ */
+ ret_val = hw->nvm.ops.read(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
++ if (ret_val)
++ goto out;
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+@@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev,
+ hw->nvm.ops.update(hw);
+
+ igb_set_fw_version(adapter);
++out:
+ kfree(eeprom_buff);
+ return ret_val;
+ }
+--
+2.39.2
+
--- /dev/null
+From b5c325b98d54dc68f4747986918a0f85e0142fe0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 May 2023 23:49:36 +0800
+Subject: igc: Clean the TX buffer and TX descriptor ring
+
+From: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+
+[ Upstream commit e43516f5978d11d36511ce63d31d1da4db916510 ]
+
+There could be a race condition during link down where interrupt
+being generated and igc_clean_tx_irq() been called to perform the
+TX completion. Properly clear the TX buffer/descriptor ring and
+disable the TX Queue ring in igc_free_tx_resources() to avoid that.
+
+Kernel trace:
+[ 108.237177] Hardware name: Intel Corporation Tiger Lake Client Platform/TigerLake U DDR4 SODIMM RVP, BIOS TGLIFUI1.R00.4204.A00.2105270302 05/27/2021
+[ 108.237178] RIP: 0010:refcount_warn_saturate+0x55/0x110
+[ 108.242143] RSP: 0018:ffff9e7980003db0 EFLAGS: 00010286
+[ 108.245555] Code: 84 bc 00 00 00 c3 cc cc cc cc 85 f6 74 46 80 3d 20 8c 4d 01 00 75 ee 48 c7 c7 88 f4 03 ab c6 05 10 8c 4d 01 01 e8 0b 10 96 ff <0f> 0b c3 cc cc cc cc 80 3d fc 8b 4d 01 00 75 cb 48 c7 c7 b0 f4 03
+[ 108.250434]
+[ 108.250434] RSP: 0018:ffff9e798125f910 EFLAGS: 00010286
+[ 108.254358] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
+[ 108.259325]
+[ 108.259325] RAX: 0000000000000000 RBX: ffff8ddb935b8000 RCX: 0000000000000027
+[ 108.261868] RDX: ffff8de250a28800 RSI: ffff8de250a1c580 RDI: ffff8de250a1c580
+[ 108.265538] RDX: 0000000000000027 RSI: 0000000000000002 RDI: ffff8de250a9c588
+[ 108.265539] RBP: ffff8ddb935b8000 R08: ffffffffab2655a0 R09: ffff9e798125f898
+[ 108.267914] RBP: ffff8ddb8a5b8d80 R08: 0000005648eba354 R09: 0000000000000000
+[ 108.270196] R10: 0000000000000001 R11: 000000002d2d2d2d R12: ffff9e798125f948
+[ 108.270197] R13: ffff9e798125fa1c R14: ffff8ddb8a5b8d80 R15: 7fffffffffffffff
+[ 108.273001] R10: 000000002d2d2d2d R11: 000000002d2d2d2d R12: ffff8ddb8a5b8ed4
+[ 108.276410] FS: 00007f605851b740(0000) GS:ffff8de250a80000(0000) knlGS:0000000000000000
+[ 108.280597] R13: 00000000000002ac R14: 00000000ffffff99 R15: ffff8ddb92561b80
+[ 108.282966] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 108.282967] CR2: 00007f053c039248 CR3: 0000000185850003 CR4: 0000000000f70ee0
+[ 108.286206] FS: 0000000000000000(0000) GS:ffff8de250a00000(0000) knlGS:0000000000000000
+[ 108.289701] PKRU: 55555554
+[ 108.289702] Call Trace:
+[ 108.289704] <TASK>
+[ 108.293977] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 108.297562] sock_alloc_send_pskb+0x20c/0x240
+[ 108.301494] CR2: 00007f053c03a168 CR3: 0000000184394002 CR4: 0000000000f70ef0
+[ 108.301495] PKRU: 55555554
+[ 108.306464] __ip_append_data.isra.0+0x96f/0x1040
+[ 108.309441] Call Trace:
+[ 108.309443] ? __pfx_ip_generic_getfrag+0x10/0x10
+[ 108.314927] <IRQ>
+[ 108.314928] sock_wfree+0x1c7/0x1d0
+[ 108.318078] ? __pfx_ip_generic_getfrag+0x10/0x10
+[ 108.320276] skb_release_head_state+0x32/0x90
+[ 108.324812] ip_make_skb+0xf6/0x130
+[ 108.327188] skb_release_all+0x16/0x40
+[ 108.330775] ? udp_sendmsg+0x9f3/0xcb0
+[ 108.332626] napi_consume_skb+0x48/0xf0
+[ 108.334134] ? xfrm_lookup_route+0x23/0xb0
+[ 108.344285] igc_poll+0x787/0x1620 [igc]
+[ 108.346659] udp_sendmsg+0x9f3/0xcb0
+[ 108.360010] ? ttwu_do_activate+0x40/0x220
+[ 108.365237] ? __pfx_ip_generic_getfrag+0x10/0x10
+[ 108.366744] ? try_to_wake_up+0x289/0x5e0
+[ 108.376987] ? sock_sendmsg+0x81/0x90
+[ 108.395698] ? __pfx_process_timeout+0x10/0x10
+[ 108.395701] sock_sendmsg+0x81/0x90
+[ 108.409052] __napi_poll+0x29/0x1c0
+[ 108.414279] ____sys_sendmsg+0x284/0x310
+[ 108.419507] net_rx_action+0x257/0x2d0
+[ 108.438216] ___sys_sendmsg+0x7c/0xc0
+[ 108.439723] __do_softirq+0xc1/0x2a8
+[ 108.444950] ? finish_task_switch+0xb4/0x2f0
+[ 108.452077] irq_exit_rcu+0xa9/0xd0
+[ 108.453584] ? __schedule+0x372/0xd00
+[ 108.460713] common_interrupt+0x84/0xa0
+[ 108.467840] ? clockevents_program_event+0x95/0x100
+[ 108.474968] </IRQ>
+[ 108.482096] ? do_nanosleep+0x88/0x130
+[ 108.489224] <TASK>
+[ 108.489225] asm_common_interrupt+0x26/0x40
+[ 108.496353] ? __rseq_handle_notify_resume+0xa9/0x4f0
+[ 108.503478] RIP: 0010:cpu_idle_poll+0x2c/0x100
+[ 108.510607] __sys_sendmsg+0x5d/0xb0
+[ 108.518687] Code: 05 e1 d9 c8 00 65 8b 15 de 64 85 55 85 c0 7f 57 e8 b9 ef ff ff fb 65 48 8b 1c 25 00 cc 02 00 48 8b 03 a8 08 74 0b eb 1c f3 90 <48> 8b 03 a8 08 75 13 8b 05 77 63 cd 00 85 c0 75 ed e8 ce ec ff ff
+[ 108.525817] do_syscall_64+0x44/0xa0
+[ 108.531563] RSP: 0018:ffffffffab203e70 EFLAGS: 00000202
+[ 108.538693] entry_SYSCALL_64_after_hwframe+0x72/0xdc
+[ 108.546775]
+[ 108.546777] RIP: 0033:0x7f605862b7f7
+[ 108.549495] RAX: 0000000000000001 RBX: ffffffffab20c940 RCX: 000000000000003b
+[ 108.551955] Code: 0e 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b9 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10
+[ 108.554068] RDX: 4000000000000000 RSI: 000000002da97f6a RDI: 00000000002b8ff4
+[ 108.559816] RSP: 002b:00007ffc99264058 EFLAGS: 00000246
+[ 108.564178] RBP: 0000000000000000 R08: 00000000002b8ff4 R09: ffff8ddb01554c80
+[ 108.571302] ORIG_RAX: 000000000000002e
+[ 108.571303] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f605862b7f7
+[ 108.574023] R10: 000000000000015b R11: 000000000000000f R12: ffffffffab20c940
+[ 108.574024] R13: 0000000000000000 R14: ffff8de26fbeef40 R15: ffffffffab20c940
+[ 108.578727] RDX: 0000000000000000 RSI: 00007ffc992640a0 RDI: 0000000000000003
+[ 108.578728] RBP: 00007ffc99264110 R08: 0000000000000000 R09: 175f48ad1c3a9c00
+[ 108.581187] do_idle+0x62/0x230
+[ 108.585890] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc992642d8
+[ 108.585891] R13: 00005577814ab2ba R14: 00005577814addf0 R15: 00007f605876d000
+[ 108.587920] cpu_startup_entry+0x1d/0x20
+[ 108.591422] </TASK>
+[ 108.596127] rest_init+0xc5/0xd0
+[ 108.600490] ---[ end trace 0000000000000000 ]---
+
+Test Setup:
+
+DUT:
+- Change mac address on DUT Side. Ensure NIC not having same MAC Address
+- Running udp_tai on DUT side. Let udp_tai running throughout the test
+
+Example:
+./udp_tai -i enp170s0 -P 100000 -p 90 -c 1 -t 0 -u 30004
+
+Host:
+- Perform link up/down every 5 second.
+
+Result:
+Kernel panic will happen on DUT Side.
+
+Fixes: 13b5b7fd6a4a ("igc: Add support for Tx/Rx rings")
+Signed-off-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index e255b0a004f88..eb7aa8c13f7e5 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -254,6 +254,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
+ /* reset BQL for queue */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
++ /* Zero out the buffer ring */
++ memset(tx_ring->tx_buffer_info, 0,
++ sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
++
++ /* Zero out the descriptor ring */
++ memset(tx_ring->desc, 0, tx_ring->size);
++
+ /* reset next_to_use and next_to_clean */
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+@@ -267,7 +274,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
+ */
+ void igc_free_tx_resources(struct igc_ring *tx_ring)
+ {
+- igc_clean_tx_ring(tx_ring);
++ igc_disable_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+--
+2.39.2
+
--- /dev/null
+From eb020662fc14feaa7fca853805904127808061bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 17:15:02 +0800
+Subject: ipvlan: fix bound dev checking for IPv6 l3s mode
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit ce57adc222aba32431c42632b396e9213d0eb0b8 ]
+
+The commit 59a0b022aa24 ("ipvlan: Make skb->skb_iif track skb->dev for l3s
+mode") fixed ipvlan bonded dev checking by updating skb skb_iif. This fix
+works for IPv4, as in raw_v4_input() the dif is from inet_iif(skb), which
+is skb->skb_iif when there is no route.
+
+But for IPv6, the fix is not enough, because in ipv6_raw_deliver() ->
+raw_v6_match(), the dif is inet6_iif(skb), which is returns IP6CB(skb)->iif
+instead of skb->skb_iif if it's not a l3_slave. To fix the IPv6 part
+issue. Let's set IP6CB(skb)->iif to correct ifindex.
+
+BTW, ipvlan handles NS/NA specifically. Since it works fine, I will not
+reset IP6CB(skb)->iif when addr->atype is IPVL_ICMPV6.
+
+Fixes: c675e06a98a4 ("ipvlan: decouple l3s mode dependencies from other modes")
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=2196710
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_l3s.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
+index 71712ea25403d..d5b05e8032199 100644
+--- a/drivers/net/ipvlan/ipvlan_l3s.c
++++ b/drivers/net/ipvlan/ipvlan_l3s.c
+@@ -102,6 +102,10 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+
+ skb->dev = addr->master->dev;
+ skb->skb_iif = skb->dev->ifindex;
++#if IS_ENABLED(CONFIG_IPV6)
++ if (addr->atype == IPVL_IPV6)
++ IP6CB(skb)->iif = skb->dev->ifindex;
++#endif
+ len = skb->len + ETH_HLEN;
+ ipvlan_count_rx(addr->master, len, true, false);
+ out:
+--
+2.39.2
+
--- /dev/null
+From a7ca85433ba3eca5d27273c1baeb3e6ae6f79a7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 17:10:48 +0800
+Subject: net: enetc: correct the indexes of highest and 2nd highest TCs
+
+From: Wei Fang <wei.fang@nxp.com>
+
+[ Upstream commit 21225873be1472b7c59ed3650396af0e40578112 ]
+
+For ENETC hardware, the TCs are numbered from 0 to N-1, where N
+is the number of TCs. Numerically higher TC has higher priority.
+It's obvious that the highest priority TC index should be N-1 and
+the 2nd highest priority TC index should be N-2.
+
+However, the previous logic uses netdev_get_prio_tc_map() to get
+the indexes of highest priority and 2nd highest priority TCs, it
+does not make sense and is incorrect to give a "tc" argument to
+netdev_get_prio_tc_map(). So the driver may get the wrong indexes
+of the two highest priotiry TCs which would lead to failed to set
+the CBS for the two highest priotiry TCs.
+
+e.g.
+$ tc qdisc add dev eno0 parent root handle 100: mqprio num_tc 6 \
+ map 0 0 1 1 2 3 4 5 queues 1@0 1@1 1@2 1@3 2@4 2@6 hw 1
+$ tc qdisc replace dev eno0 parent 100:6 cbs idleslope 100000 \
+ sendslope -900000 hicredit 12 locredit -113 offload 1
+$ Error: Specified device failed to setup cbs hardware offload.
+ ^^^^^
+
+In this example, the previous logic deems the indexes of the two
+highest priotiry TCs should be 3 and 2. Actually, the indexes are
+5 and 4, because the number of TCs is 6. So it would be failed to
+configure the CBS for the two highest priority TCs.
+
+Fixes: c431047c4efe ("enetc: add support Credit Based Shaper(CBS) for hardware offload")
+Signed-off-by: Wei Fang <wei.fang@nxp.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc_qos.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index 4e9cb1deaf810..c348b6fb0e6f9 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -197,8 +197,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
+ int bw_sum = 0;
+ u8 bw;
+
+- prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
+- prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
++ prio_top = tc_nums - 1;
++ prio_next = tc_nums - 2;
+
+ /* Support highest prio and second prio tc in cbs mode */
+ if (tc != prio_top && tc != prio_next)
+--
+2.39.2
+
--- /dev/null
+From 2737704ac654d2c55da38818d8ee72db11cfcdfe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 09:23:44 -0700
+Subject: net: ethtool: correct MAX attribute value for stats
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 52f79609c0c5b25fddb88e85f25ce08aa7e3fb42 ]
+
+When compiling YNL generated code compiler complains about
+array-initializer-out-of-bounds. Turns out the MAX value
+for STATS_GRP uses the value for STATS.
+
+This may lead to random corruptions in user space (kernel
+itself doesn't use this value as it never parses stats).
+
+Fixes: f09ea6fb1272 ("ethtool: add a new command for reading standard stats")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/ethtool_netlink.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
+index f7204bdfe8db1..97e5d303810f2 100644
+--- a/include/uapi/linux/ethtool_netlink.h
++++ b/include/uapi/linux/ethtool_netlink.h
+@@ -743,7 +743,7 @@ enum {
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_GRP_CNT,
+- ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_CNT - 1)
++ ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_GRP_CNT - 1)
+ };
+
+ enum {
+--
+2.39.2
+
--- /dev/null
+From c654a9c3b89849e61670e205805285bf5addf65b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 16:18:02 +0000
+Subject: net: lapbether: only support ethernet devices
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 9eed321cde22fc1afd76eac563ce19d899e0d6b2 ]
+
+It probbaly makes no sense to support arbitrary network devices
+for lapbether.
+
+syzbot reported:
+
+skbuff: skb_under_panic: text:ffff80008934c100 len:44 put:40 head:ffff0000d18dd200 data:ffff0000d18dd1ea tail:0x16 end:0x140 dev:bond1
+kernel BUG at net/core/skbuff.c:200 !
+Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP
+Modules linked in:
+CPU: 0 PID: 5643 Comm: dhcpcd Not tainted 6.4.0-rc5-syzkaller-g4641cff8e810 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/25/2023
+pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : skb_panic net/core/skbuff.c:196 [inline]
+pc : skb_under_panic+0x13c/0x140 net/core/skbuff.c:210
+lr : skb_panic net/core/skbuff.c:196 [inline]
+lr : skb_under_panic+0x13c/0x140 net/core/skbuff.c:210
+sp : ffff8000973b7260
+x29: ffff8000973b7270 x28: ffff8000973b7360 x27: dfff800000000000
+x26: ffff0000d85d8150 x25: 0000000000000016 x24: ffff0000d18dd1ea
+x23: ffff0000d18dd200 x22: 000000000000002c x21: 0000000000000140
+x20: 0000000000000028 x19: ffff80008934c100 x18: ffff8000973b68a0
+x17: 0000000000000000 x16: ffff80008a43bfbc x15: 0000000000000202
+x14: 0000000000000000 x13: 0000000000000001 x12: 0000000000000001
+x11: 0000000000000201 x10: 0000000000000000 x9 : f22f7eb937cced00
+x8 : f22f7eb937cced00 x7 : 0000000000000001 x6 : 0000000000000001
+x5 : ffff8000973b6b78 x4 : ffff80008df9ee80 x3 : ffff8000805974f4
+x2 : 0000000000000001 x1 : 0000000100000201 x0 : 0000000000000086
+Call trace:
+skb_panic net/core/skbuff.c:196 [inline]
+skb_under_panic+0x13c/0x140 net/core/skbuff.c:210
+skb_push+0xf0/0x108 net/core/skbuff.c:2409
+ip6gre_header+0xbc/0x738 net/ipv6/ip6_gre.c:1383
+dev_hard_header include/linux/netdevice.h:3137 [inline]
+lapbeth_data_transmit+0x1c4/0x298 drivers/net/wan/lapbether.c:257
+lapb_data_transmit+0x8c/0xb0 net/lapb/lapb_iface.c:447
+lapb_transmit_buffer+0x178/0x204 net/lapb/lapb_out.c:149
+lapb_send_control+0x220/0x320 net/lapb/lapb_subr.c:251
+lapb_establish_data_link+0x94/0xec
+lapb_device_event+0x348/0x4e0
+notifier_call_chain+0x1a4/0x510 kernel/notifier.c:93
+raw_notifier_call_chain+0x3c/0x50 kernel/notifier.c:461
+__dev_notify_flags+0x2bc/0x544
+dev_change_flags+0xd0/0x15c net/core/dev.c:8643
+devinet_ioctl+0x858/0x17e4 net/ipv4/devinet.c:1150
+inet_ioctl+0x2ac/0x4d8 net/ipv4/af_inet.c:979
+sock_do_ioctl+0x134/0x2dc net/socket.c:1201
+sock_ioctl+0x4ec/0x858 net/socket.c:1318
+vfs_ioctl fs/ioctl.c:51 [inline]
+__do_sys_ioctl fs/ioctl.c:870 [inline]
+__se_sys_ioctl fs/ioctl.c:856 [inline]
+__arm64_sys_ioctl+0x14c/0x1c8 fs/ioctl.c:856
+__invoke_syscall arch/arm64/kernel/syscall.c:38 [inline]
+invoke_syscall+0x98/0x2c0 arch/arm64/kernel/syscall.c:52
+el0_svc_common+0x138/0x244 arch/arm64/kernel/syscall.c:142
+do_el0_svc+0x64/0x198 arch/arm64/kernel/syscall.c:191
+el0_svc+0x4c/0x160 arch/arm64/kernel/entry-common.c:647
+el0t_64_sync_handler+0x84/0xfc arch/arm64/kernel/entry-common.c:665
+el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:591
+Code: aa1803e6 aa1903e7 a90023f5 947730f5 (d4210000)
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Martin Schiller <ms@dev.tdt.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wan/lapbether.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
+index 5037ef82be461..75613ac26641f 100644
+--- a/drivers/net/wan/lapbether.c
++++ b/drivers/net/wan/lapbether.c
+@@ -384,6 +384,9 @@ static int lapbeth_new_device(struct net_device *dev)
+
+ ASSERT_RTNL();
+
++ if (dev->type != ARPHRD_ETHER)
++ return -EINVAL;
++
+ ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN,
+ lapbeth_setup);
+ if (!ndev)
+--
+2.39.2
+
--- /dev/null
+From 51047613fb5af81a68093be02764bd8f121b93c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 12:23:54 -0400
+Subject: net/sched: act_pedit: Parse L3 Header for L4 offset
+
+From: Max Tottenham <mtottenh@akamai.com>
+
+[ Upstream commit 6c02568fd1ae53099b4ab86365c5be1ff15f586b ]
+
+Instead of relying on skb->transport_header being set correctly, opt
+instead to parse the L3 header length out of the L3 headers for both
+IPv4/IPv6 when the Extended Layer Op for tcp/udp is used. This fixes a
+bug if GRO is disabled, when GRO is disabled skb->transport_header is
+set by __netif_receive_skb_core() to point to the L3 header, it's later
+fixed by the upper protocol layers, but act_pedit will receive the SKB
+before the fixups are completed. The existing behavior causes the
+following to edit the L3 header if GRO is disabled instead of the UDP
+header:
+
+ tc filter add dev eth0 ingress protocol ip flower ip_proto udp \
+ dst_ip 192.168.1.3 action pedit ex munge udp set dport 18053
+
+Also re-introduce a rate-limited warning if we were unable to extract
+the header offset when using the 'ex' interface.
+
+Fixes: 71d0ed7079df ("net/act_pedit: Support using offset relative to
+the conventional network headers")
+Signed-off-by: Max Tottenham <mtottenh@akamai.com>
+Reviewed-by: Josh Hunt <johunt@akamai.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202305261541.N165u9TZ-lkp@intel.com/
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_pedit.c | 48 ++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 43 insertions(+), 5 deletions(-)
+
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index e3bc5bb6c60ef..e77da0545b553 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -13,7 +13,10 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
+ #include <linux/slab.h>
++#include <net/ipv6.h>
+ #include <net/netlink.h>
+ #include <net/pkt_sched.h>
+ #include <linux/tc_act/tc_pedit.h>
+@@ -313,28 +316,58 @@ static bool offset_valid(struct sk_buff *skb, int offset)
+ return true;
+ }
+
+-static void pedit_skb_hdr_offset(struct sk_buff *skb,
++static int pedit_l4_skb_offset(struct sk_buff *skb, int *hoffset, const int header_type)
++{
++ const int noff = skb_network_offset(skb);
++ int ret = -EINVAL;
++ struct iphdr _iph;
++
++ switch (skb->protocol) {
++ case htons(ETH_P_IP): {
++ const struct iphdr *iph = skb_header_pointer(skb, noff, sizeof(_iph), &_iph);
++
++ if (!iph)
++ goto out;
++ *hoffset = noff + iph->ihl * 4;
++ ret = 0;
++ break;
++ }
++ case htons(ETH_P_IPV6):
++ ret = ipv6_find_hdr(skb, hoffset, header_type, NULL, NULL) == header_type ? 0 : -EINVAL;
++ break;
++ }
++out:
++ return ret;
++}
++
++static int pedit_skb_hdr_offset(struct sk_buff *skb,
+ enum pedit_header_type htype, int *hoffset)
+ {
++ int ret = -EINVAL;
+ /* 'htype' is validated in the netlink parsing */
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+- if (skb_mac_header_was_set(skb))
++ if (skb_mac_header_was_set(skb)) {
+ *hoffset = skb_mac_offset(skb);
++ ret = 0;
++ }
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK:
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ *hoffset = skb_network_offset(skb);
++ ret = 0;
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
++ ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_TCP);
++ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+- if (skb_transport_header_was_set(skb))
+- *hoffset = skb_transport_offset(skb);
++ ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_UDP);
+ break;
+ default:
+ break;
+ }
++ return ret;
+ }
+
+ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+@@ -369,6 +402,7 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ int hoffset = 0;
+ u32 *ptr, hdata;
+ u32 val;
++ int rc;
+
+ if (tkey_ex) {
+ htype = tkey_ex->htype;
+@@ -377,7 +411,11 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ tkey_ex++;
+ }
+
+- pedit_skb_hdr_offset(skb, htype, &hoffset);
++ rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
++ if (rc) {
++ pr_info_ratelimited("tc action pedit unable to extract header offset for header type (0x%x)\n", htype);
++ goto bad;
++ }
+
+ if (tkey->offmask) {
+ u8 *d, _d;
+--
+2.39.2
+
--- /dev/null
+From 3c606d8c2e23e0938b50286bf9a2828b54efa314 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Apr 2023 18:25:16 -0300
+Subject: net/sched: act_pedit: remove extra check for key type
+
+From: Pedro Tammela <pctammela@mojatatu.com>
+
+[ Upstream commit 577140180ba28d0d37bc898c7bd6702c83aa106f ]
+
+The netlink parsing already validates the key 'htype'.
+Remove the datapath check as it's redundant.
+
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 6c02568fd1ae ("net/sched: act_pedit: Parse L3 Header for L4 offset")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_pedit.c | 29 +++++++----------------------
+ 1 file changed, 7 insertions(+), 22 deletions(-)
+
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 0237d898fabea..e3bc5bb6c60ef 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -313,37 +313,28 @@ static bool offset_valid(struct sk_buff *skb, int offset)
+ return true;
+ }
+
+-static int pedit_skb_hdr_offset(struct sk_buff *skb,
+- enum pedit_header_type htype, int *hoffset)
++static void pedit_skb_hdr_offset(struct sk_buff *skb,
++ enum pedit_header_type htype, int *hoffset)
+ {
+- int ret = -EINVAL;
+-
++ /* 'htype' is validated in the netlink parsing */
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+- if (skb_mac_header_was_set(skb)) {
++ if (skb_mac_header_was_set(skb))
+ *hoffset = skb_mac_offset(skb);
+- ret = 0;
+- }
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK:
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ *hoffset = skb_network_offset(skb);
+- ret = 0;
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+- if (skb_transport_header_was_set(skb)) {
++ if (skb_transport_header_was_set(skb))
+ *hoffset = skb_transport_offset(skb);
+- ret = 0;
+- }
+ break;
+ default:
+- ret = -EINVAL;
+ break;
+ }
+-
+- return ret;
+ }
+
+ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+@@ -375,10 +366,9 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+
+ for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) {
+ int offset = tkey->off;
++ int hoffset = 0;
+ u32 *ptr, hdata;
+- int hoffset;
+ u32 val;
+- int rc;
+
+ if (tkey_ex) {
+ htype = tkey_ex->htype;
+@@ -387,12 +377,7 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ tkey_ex++;
+ }
+
+- rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
+- if (rc) {
+- pr_info("tc action pedit bad header type specified (0x%x)\n",
+- htype);
+- goto bad;
+- }
++ pedit_skb_hdr_offset(skb, htype, &hoffset);
+
+ if (tkey->offmask) {
+ u8 *d, _d;
+--
+2.39.2
+
--- /dev/null
+From e5991706b44cfd222e5692385cf82f04693286b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 11:34:26 +0200
+Subject: net/sched: cls_api: Fix lockup on flushing explicitly created chain
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit c9a82bec02c339cdda99b37c5e62b3b71fc4209c ]
+
+Mingshuai Ren reports:
+
+When a new chain is added by using tc, one soft lockup alarm will be
+ generated after delete the prio 0 filter of the chain. To reproduce
+ the problem, perform the following steps:
+(1) tc qdisc add dev eth0 root handle 1: htb default 1
+(2) tc chain add dev eth0
+(3) tc filter del dev eth0 chain 0 parent 1: prio 0
+(4) tc filter add dev eth0 chain 0 parent 1:
+
+Fix the issue by accounting for additional reference to chains that are
+explicitly created by RTM_NEWCHAIN message as opposed to implicitly by
+RTM_NEWTFILTER message.
+
+Fixes: 726d061286ce ("net: sched: prevent insertion of new classifiers during chain flush")
+Reported-by: Mingshuai Ren <renmingshuai@huawei.com>
+Closes: https://lore.kernel.org/lkml/87legswvi3.fsf@nvidia.com/T/
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Link: https://lore.kernel.org/r/20230612093426.2867183-1-vladbu@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_api.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index d88a0946301c5..a5864ddfb8902 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -533,8 +533,8 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
+ {
+ struct tcf_block *block = chain->block;
+ const struct tcf_proto_ops *tmplt_ops;
++ unsigned int refcnt, non_act_refcnt;
+ bool free_block = false;
+- unsigned int refcnt;
+ void *tmplt_priv;
+
+ mutex_lock(&block->lock);
+@@ -554,13 +554,15 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
+ * save these to temporary variables.
+ */
+ refcnt = --chain->refcnt;
++ non_act_refcnt = refcnt - chain->action_refcnt;
+ tmplt_ops = chain->tmplt_ops;
+ tmplt_priv = chain->tmplt_priv;
+
+- /* The last dropped non-action reference will trigger notification. */
+- if (refcnt - chain->action_refcnt == 0 && !by_act) {
+- tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
+- block, NULL, 0, 0, false);
++ if (non_act_refcnt == chain->explicitly_created && !by_act) {
++ if (non_act_refcnt == 0)
++ tc_chain_notify_delete(tmplt_ops, tmplt_priv,
++ chain->index, block, NULL, 0, 0,
++ false);
+ /* Last reference to chain, no need to lock. */
+ chain->flushing = false;
+ }
+--
+2.39.2
+
--- /dev/null
+From 5bdfd7c81a51eb21ff816dfb767633251c02e9b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 08:29:03 +0100
+Subject: net/sched: cls_u32: Fix reference counter leak leading to overflow
+
+From: Lee Jones <lee@kernel.org>
+
+[ Upstream commit 04c55383fa5689357bcdd2c8036725a55ed632bc ]
+
+In the event of a failure in tcf_change_indev(), u32_set_parms() will
+immediately return without decrementing the recently incremented
+reference counter. If this happens enough times, the counter will
+rollover and the reference freed, leading to a double free which can be
+used to do 'bad things'.
+
+In order to prevent this, move the point of possible failure above the
+point where the reference counter is incremented. Also save any
+meaningful return values to be applied to the return data at the
+appropriate point in time.
+
+This issue was caught with KASAN.
+
+Fixes: 705c7091262d ("net: sched: cls_u32: no need to call tcf_exts_change for newly allocated struct")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Lee Jones <lee@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_u32.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 5d30db0d71574..0025fa837e857 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -716,12 +716,18 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
+ struct nlattr *est, u32 flags,
+ struct netlink_ext_ack *extack)
+ {
+- int err;
++ int err, ifindex = -1;
+
+ err = tcf_exts_validate(net, tp, tb, est, &n->exts, flags, extack);
+ if (err < 0)
+ return err;
+
++ if (tb[TCA_U32_INDEV]) {
++ ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
++ if (ifindex < 0)
++ return -EINVAL;
++ }
++
+ if (tb[TCA_U32_LINK]) {
+ u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
+ struct tc_u_hnode *ht_down = NULL, *ht_old;
+@@ -756,13 +762,9 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
+ tcf_bind_filter(tp, &n->res, base);
+ }
+
+- if (tb[TCA_U32_INDEV]) {
+- int ret;
+- ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
+- if (ret < 0)
+- return -EINVAL;
+- n->ifindex = ret;
+- }
++ if (ifindex >= 0)
++ n->ifindex = ifindex;
++
+ return 0;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 3e547e600ce8374d621cf3b0cb1be6ef36ffcd94 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Jan 2023 16:05:12 -0300
+Subject: net/sched: simplify tcf_pedit_act
+
+From: Pedro Tammela <pctammela@mojatatu.com>
+
+[ Upstream commit 95b069382351826c0ae37938070aa82dbeaf288d ]
+
+Remove the check for a negative number of keys as
+this cannot ever happen
+
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 6c02568fd1ae ("net/sched: act_pedit: Parse L3 Header for L4 offset")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_pedit.c | 137 +++++++++++++++++++++---------------------
+ 1 file changed, 67 insertions(+), 70 deletions(-)
+
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 051cd20928599..0237d898fabea 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -349,8 +349,12 @@ static int pedit_skb_hdr_offset(struct sk_buff *skb,
+ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+ {
++ enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
++ enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ struct tcf_pedit *p = to_pedit(a);
++ struct tcf_pedit_key_ex *tkey_ex;
+ struct tcf_pedit_parms *parms;
++ struct tc_pedit_key *tkey;
+ u32 max_offset;
+ int i;
+
+@@ -366,88 +370,81 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ tcf_lastuse_update(&p->tcf_tm);
+ tcf_action_update_bstats(&p->common, skb);
+
+- if (parms->tcfp_nkeys > 0) {
+- struct tc_pedit_key *tkey = parms->tcfp_keys;
+- struct tcf_pedit_key_ex *tkey_ex = parms->tcfp_keys_ex;
+- enum pedit_header_type htype =
+- TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+- enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+-
+- for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) {
+- u32 *ptr, hdata;
+- int offset = tkey->off;
+- int hoffset;
+- u32 val;
+- int rc;
+-
+- if (tkey_ex) {
+- htype = tkey_ex->htype;
+- cmd = tkey_ex->cmd;
+-
+- tkey_ex++;
+- }
++ tkey = parms->tcfp_keys;
++ tkey_ex = parms->tcfp_keys_ex;
+
+- rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
+- if (rc) {
+- pr_info("tc action pedit bad header type specified (0x%x)\n",
+- htype);
+- goto bad;
+- }
++ for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) {
++ int offset = tkey->off;
++ u32 *ptr, hdata;
++ int hoffset;
++ u32 val;
++ int rc;
+
+- if (tkey->offmask) {
+- u8 *d, _d;
+-
+- if (!offset_valid(skb, hoffset + tkey->at)) {
+- pr_info("tc action pedit 'at' offset %d out of bounds\n",
+- hoffset + tkey->at);
+- goto bad;
+- }
+- d = skb_header_pointer(skb, hoffset + tkey->at,
+- sizeof(_d), &_d);
+- if (!d)
+- goto bad;
+- offset += (*d & tkey->offmask) >> tkey->shift;
+- }
++ if (tkey_ex) {
++ htype = tkey_ex->htype;
++ cmd = tkey_ex->cmd;
+
+- if (offset % 4) {
+- pr_info("tc action pedit offset must be on 32 bit boundaries\n");
+- goto bad;
+- }
++ tkey_ex++;
++ }
+
+- if (!offset_valid(skb, hoffset + offset)) {
+- pr_info("tc action pedit offset %d out of bounds\n",
+- hoffset + offset);
+- goto bad;
+- }
++ rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
++ if (rc) {
++ pr_info("tc action pedit bad header type specified (0x%x)\n",
++ htype);
++ goto bad;
++ }
+
+- ptr = skb_header_pointer(skb, hoffset + offset,
+- sizeof(hdata), &hdata);
+- if (!ptr)
+- goto bad;
+- /* just do it, baby */
+- switch (cmd) {
+- case TCA_PEDIT_KEY_EX_CMD_SET:
+- val = tkey->val;
+- break;
+- case TCA_PEDIT_KEY_EX_CMD_ADD:
+- val = (*ptr + tkey->val) & ~tkey->mask;
+- break;
+- default:
+- pr_info("tc action pedit bad command (%d)\n",
+- cmd);
++ if (tkey->offmask) {
++ u8 *d, _d;
++
++ if (!offset_valid(skb, hoffset + tkey->at)) {
++ pr_info("tc action pedit 'at' offset %d out of bounds\n",
++ hoffset + tkey->at);
+ goto bad;
+ }
++ d = skb_header_pointer(skb, hoffset + tkey->at,
++ sizeof(_d), &_d);
++ if (!d)
++ goto bad;
++ offset += (*d & tkey->offmask) >> tkey->shift;
++ }
+
+- *ptr = ((*ptr & tkey->mask) ^ val);
+- if (ptr == &hdata)
+- skb_store_bits(skb, hoffset + offset, ptr, 4);
++ if (offset % 4) {
++ pr_info("tc action pedit offset must be on 32 bit boundaries\n");
++ goto bad;
+ }
+
+- goto done;
+- } else {
+- WARN(1, "pedit BUG: index %d\n", p->tcf_index);
++ if (!offset_valid(skb, hoffset + offset)) {
++ pr_info("tc action pedit offset %d out of bounds\n",
++ hoffset + offset);
++ goto bad;
++ }
++
++ ptr = skb_header_pointer(skb, hoffset + offset,
++ sizeof(hdata), &hdata);
++ if (!ptr)
++ goto bad;
++ /* just do it, baby */
++ switch (cmd) {
++ case TCA_PEDIT_KEY_EX_CMD_SET:
++ val = tkey->val;
++ break;
++ case TCA_PEDIT_KEY_EX_CMD_ADD:
++ val = (*ptr + tkey->val) & ~tkey->mask;
++ break;
++ default:
++ pr_info("tc action pedit bad command (%d)\n",
++ cmd);
++ goto bad;
++ }
++
++ *ptr = ((*ptr & tkey->mask) ^ val);
++ if (ptr == &hdata)
++ skb_store_bits(skb, hoffset + offset, ptr, 4);
+ }
+
++ goto done;
++
+ bad:
+ spin_lock(&p->tcf_lock);
+ p->tcf_qstats.overlimits++;
+--
+2.39.2
+
--- /dev/null
+From b3eb3335905ae7a33ea79fcd29abee359b74b6de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 20:06:04 +0800
+Subject: net: tipc: resize nlattr array to correct size
+
+From: Lin Ma <linma@zju.edu.cn>
+
+[ Upstream commit 44194cb1b6045dea33ae9a0d54fb7e7cd93a2e09 ]
+
+According to nla_parse_nested_deprecated(), the tb[] is supposed to the
+destination array with maxtype+1 elements. In current
+tipc_nl_media_get() and __tipc_nl_media_set(), a larger array is used
+which is unnecessary. This patch resize them to a proper size.
+
+Fixes: 1e55417d8fc6 ("tipc: add media set to new netlink api")
+Fixes: 46f15c6794fb ("tipc: add media get/dump to new netlink api")
+Signed-off-by: Lin Ma <linma@zju.edu.cn>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+Link: https://lore.kernel.org/r/20230614120604.1196377-1-linma@zju.edu.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/bearer.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index 1048607a1528a..dcbae29aa7e0a 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -1258,7 +1258,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
+ struct tipc_nl_msg msg;
+ struct tipc_media *media;
+ struct sk_buff *rep;
+- struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
++ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_MEDIA])
+ return -EINVAL;
+@@ -1307,7 +1307,7 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+ int err;
+ char *name;
+ struct tipc_media *m;
+- struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
++ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_MEDIA])
+ return -EINVAL;
+--
+2.39.2
+
--- /dev/null
+From acb490235d4aa45c868fbb232c89f96ba5cbad29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 02:32:02 +0200
+Subject: netfilter: nf_tables: incorrect error path handling with
+ NFT_MSG_NEWRULE
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 1240eb93f0616b21c675416516ff3d74798fdc97 ]
+
+In case of error when adding a new rule that refers to an anonymous set,
+deactivate expressions via NFT_TRANS_PREPARE state, not NFT_TRANS_RELEASE.
+Thus, the lookup expression marks anonymous sets as inactive in the next
+generation to ensure it is not reachable in this transaction anymore and
+decrement the set refcount as introduced by c1592a89942e ("netfilter:
+nf_tables: deactivate anonymous set from preparation phase"). The abort
+step takes care of undoing the anonymous set.
+
+This is also consistent with rule deletion, where NFT_TRANS_PREPARE is
+used. Note that this error path is exercised in the preparation step of
+the commit protocol. This patch replaces nf_tables_rule_release() by the
+deactivate and destroy calls, this time with NFT_TRANS_PREPARE.
+
+Due to this incorrect error handling, it is possible to access a
+dangling pointer to the anonymous set that remains in the transaction
+list.
+
+[1009.379054] BUG: KASAN: use-after-free in nft_set_lookup_global+0x147/0x1a0 [nf_tables]
+[1009.379106] Read of size 8 at addr ffff88816c4c8020 by task nft-rule-add/137110
+[1009.379116] CPU: 7 PID: 137110 Comm: nft-rule-add Not tainted 6.4.0-rc4+ #256
+[1009.379128] Call Trace:
+[1009.379132] <TASK>
+[1009.379135] dump_stack_lvl+0x33/0x50
+[1009.379146] ? nft_set_lookup_global+0x147/0x1a0 [nf_tables]
+[1009.379191] print_address_description.constprop.0+0x27/0x300
+[1009.379201] kasan_report+0x107/0x120
+[1009.379210] ? nft_set_lookup_global+0x147/0x1a0 [nf_tables]
+[1009.379255] nft_set_lookup_global+0x147/0x1a0 [nf_tables]
+[1009.379302] nft_lookup_init+0xa5/0x270 [nf_tables]
+[1009.379350] nf_tables_newrule+0x698/0xe50 [nf_tables]
+[1009.379397] ? nf_tables_rule_release+0xe0/0xe0 [nf_tables]
+[1009.379441] ? kasan_unpoison+0x23/0x50
+[1009.379450] nfnetlink_rcv_batch+0x97c/0xd90 [nfnetlink]
+[1009.379470] ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink]
+[1009.379485] ? __alloc_skb+0xb8/0x1e0
+[1009.379493] ? __alloc_skb+0xb8/0x1e0
+[1009.379502] ? entry_SYSCALL_64_after_hwframe+0x46/0xb0
+[1009.379509] ? unwind_get_return_address+0x2a/0x40
+[1009.379517] ? write_profile+0xc0/0xc0
+[1009.379524] ? avc_lookup+0x8f/0xc0
+[1009.379532] ? __rcu_read_unlock+0x43/0x60
+
+Fixes: 958bee14d071 ("netfilter: nf_tables: use new transaction infrastructure to handle sets")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index a1f74fd97fb36..35b9f74f0bc61 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3572,7 +3572,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ if (flow)
+ nft_flow_rule_destroy(flow);
+ err_release_rule:
+- nf_tables_rule_release(&ctx, rule);
++ nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE);
++ nf_tables_rule_destroy(&ctx, rule);
+ err_release_expr:
+ for (i = 0; i < n; i++) {
+ if (expr_info[i].ops) {
+--
+2.39.2
+
--- /dev/null
+From ee4a6dac60d706bccfaa06c58cc274cf76a3cba9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 11:55:42 +0200
+Subject: netfilter: nf_tables: integrate pipapo into commit protocol
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 212ed75dc5fb9d1423b3942c8f872a868cda3466 ]
+
+The pipapo set backend follows copy-on-update approach, maintaining one
+clone of the existing datastructure that is being updated. The clone
+and current datastructures are swapped via rcu from the commit step.
+
+The existing integration with the commit protocol is flawed because
+there is no operation to clean up the clone if the transaction is
+aborted. Moreover, the datastructure swap happens on set element
+activation.
+
+This patch adds two new operations for sets: commit and abort, these new
+operations are invoked from the commit and abort steps, after the
+transactions have been digested, and it updates the pipapo set backend
+to use it.
+
+This patch adds a new ->pending_update field to sets to maintain a list
+of sets that require this new commit and abort operations.
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 4 ++-
+ net/netfilter/nf_tables_api.c | 56 +++++++++++++++++++++++++++++++
+ net/netfilter/nft_set_pipapo.c | 55 +++++++++++++++++++++---------
+ 3 files changed, 99 insertions(+), 16 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 22f67ae935e0b..8bac5a5ca0f11 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -427,7 +427,8 @@ struct nft_set_ops {
+ const struct nft_set *set,
+ const struct nft_set_elem *elem,
+ unsigned int flags);
+-
++ void (*commit)(const struct nft_set *set);
++ void (*abort)(const struct nft_set *set);
+ u64 (*privsize)(const struct nlattr * const nla[],
+ const struct nft_set_desc *desc);
+ bool (*estimate)(const struct nft_set_desc *desc,
+@@ -522,6 +523,7 @@ struct nft_set {
+ u16 policy;
+ u16 udlen;
+ unsigned char *udata;
++ struct list_head pending_update;
+ /* runtime data below here */
+ const struct nft_set_ops *ops ____cacheline_aligned;
+ u16 flags:14,
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index f20244a91d781..a1f74fd97fb36 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4633,6 +4633,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+
+ set->num_exprs = num_exprs;
+ set->handle = nf_tables_alloc_handle(table);
++ INIT_LIST_HEAD(&set->pending_update);
+
+ err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
+ if (err < 0)
+@@ -8786,10 +8787,25 @@ static void nf_tables_commit_audit_log(struct list_head *adl, u32 generation)
+ }
+ }
+
++static void nft_set_commit_update(struct list_head *set_update_list)
++{
++ struct nft_set *set, *next;
++
++ list_for_each_entry_safe(set, next, set_update_list, pending_update) {
++ list_del_init(&set->pending_update);
++
++ if (!set->ops->commit)
++ continue;
++
++ set->ops->commit(set);
++ }
++}
++
+ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ {
+ struct nftables_pernet *nft_net = nft_pernet(net);
+ struct nft_trans *trans, *next;
++ LIST_HEAD(set_update_list);
+ struct nft_trans_elem *te;
+ struct nft_chain *chain;
+ struct nft_table *table;
+@@ -8948,6 +8964,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ nf_tables_setelem_notify(&trans->ctx, te->set,
+ &te->elem,
+ NFT_MSG_NEWSETELEM);
++ if (te->set->ops->commit &&
++ list_empty(&te->set->pending_update)) {
++ list_add_tail(&te->set->pending_update,
++ &set_update_list);
++ }
+ nft_trans_destroy(trans);
+ break;
+ case NFT_MSG_DELSETELEM:
+@@ -8961,6 +8982,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ atomic_dec(&te->set->nelems);
+ te->set->ndeact--;
+ }
++ if (te->set->ops->commit &&
++ list_empty(&te->set->pending_update)) {
++ list_add_tail(&te->set->pending_update,
++ &set_update_list);
++ }
+ break;
+ case NFT_MSG_NEWOBJ:
+ if (nft_trans_obj_update(trans)) {
+@@ -9021,6 +9047,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ }
+ }
+
++ nft_set_commit_update(&set_update_list);
++
+ nft_commit_notify(net, NETLINK_CB(skb).portid);
+ nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+ nf_tables_commit_audit_log(&adl, nft_net->base_seq);
+@@ -9077,10 +9105,25 @@ static void nf_tables_abort_release(struct nft_trans *trans)
+ kfree(trans);
+ }
+
++static void nft_set_abort_update(struct list_head *set_update_list)
++{
++ struct nft_set *set, *next;
++
++ list_for_each_entry_safe(set, next, set_update_list, pending_update) {
++ list_del_init(&set->pending_update);
++
++ if (!set->ops->abort)
++ continue;
++
++ set->ops->abort(set);
++ }
++}
++
+ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ {
+ struct nftables_pernet *nft_net = nft_pernet(net);
+ struct nft_trans *trans, *next;
++ LIST_HEAD(set_update_list);
+ struct nft_trans_elem *te;
+
+ if (action == NFNL_ABORT_VALIDATE &&
+@@ -9178,6 +9221,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ nft_setelem_remove(net, te->set, &te->elem);
+ if (!nft_setelem_is_catchall(te->set, &te->elem))
+ atomic_dec(&te->set->nelems);
++
++ if (te->set->ops->abort &&
++ list_empty(&te->set->pending_update)) {
++ list_add_tail(&te->set->pending_update,
++ &set_update_list);
++ }
+ break;
+ case NFT_MSG_DELSETELEM:
+ te = (struct nft_trans_elem *)trans->data;
+@@ -9187,6 +9236,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ if (!nft_setelem_is_catchall(te->set, &te->elem))
+ te->set->ndeact--;
+
++ if (te->set->ops->abort &&
++ list_empty(&te->set->pending_update)) {
++ list_add_tail(&te->set->pending_update,
++ &set_update_list);
++ }
+ nft_trans_destroy(trans);
+ break;
+ case NFT_MSG_NEWOBJ:
+@@ -9227,6 +9281,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ }
+ }
+
++ nft_set_abort_update(&set_update_list);
++
+ synchronize_rcu();
+
+ list_for_each_entry_safe_reverse(trans, next,
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 06d46d1826347..15e451dc3fc46 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1600,17 +1600,10 @@ static void pipapo_free_fields(struct nft_pipapo_match *m)
+ }
+ }
+
+-/**
+- * pipapo_reclaim_match - RCU callback to free fields from old matching data
+- * @rcu: RCU head
+- */
+-static void pipapo_reclaim_match(struct rcu_head *rcu)
++static void pipapo_free_match(struct nft_pipapo_match *m)
+ {
+- struct nft_pipapo_match *m;
+ int i;
+
+- m = container_of(rcu, struct nft_pipapo_match, rcu);
+-
+ for_each_possible_cpu(i)
+ kfree(*per_cpu_ptr(m->scratch, i));
+
+@@ -1625,7 +1618,19 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
+ }
+
+ /**
+- * pipapo_commit() - Replace lookup data with current working copy
++ * pipapo_reclaim_match - RCU callback to free fields from old matching data
++ * @rcu: RCU head
++ */
++static void pipapo_reclaim_match(struct rcu_head *rcu)
++{
++ struct nft_pipapo_match *m;
++
++ m = container_of(rcu, struct nft_pipapo_match, rcu);
++ pipapo_free_match(m);
++}
++
++/**
++ * nft_pipapo_commit() - Replace lookup data with current working copy
+ * @set: nftables API set representation
+ *
+ * While at it, check if we should perform garbage collection on the working
+@@ -1635,7 +1640,7 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
+ * We also need to create a new working copy for subsequent insertions and
+ * deletions.
+ */
+-static void pipapo_commit(const struct nft_set *set)
++static void nft_pipapo_commit(const struct nft_set *set)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *new_clone, *old;
+@@ -1660,6 +1665,26 @@ static void pipapo_commit(const struct nft_set *set)
+ priv->clone = new_clone;
+ }
+
++static void nft_pipapo_abort(const struct nft_set *set)
++{
++ struct nft_pipapo *priv = nft_set_priv(set);
++ struct nft_pipapo_match *new_clone, *m;
++
++ if (!priv->dirty)
++ return;
++
++ m = rcu_dereference(priv->match);
++
++ new_clone = pipapo_clone(m);
++ if (IS_ERR(new_clone))
++ return;
++
++ priv->dirty = false;
++
++ pipapo_free_match(priv->clone);
++ priv->clone = new_clone;
++}
++
+ /**
+ * nft_pipapo_activate() - Mark element reference as active given key, commit
+ * @net: Network namespace
+@@ -1667,8 +1692,7 @@ static void pipapo_commit(const struct nft_set *set)
+ * @elem: nftables API element representation containing key data
+ *
+ * On insertion, elements are added to a copy of the matching data currently
+- * in use for lookups, and not directly inserted into current lookup data, so
+- * we'll take care of that by calling pipapo_commit() here. Both
++ * in use for lookups, and not directly inserted into current lookup data. Both
+ * nft_pipapo_insert() and nft_pipapo_activate() are called once for each
+ * element, hence we can't purpose either one as a real commit operation.
+ */
+@@ -1684,8 +1708,6 @@ static void nft_pipapo_activate(const struct net *net,
+
+ nft_set_elem_change_active(net, set, &e->ext);
+ nft_set_elem_clear_busy(&e->ext);
+-
+- pipapo_commit(set);
+ }
+
+ /**
+@@ -1931,7 +1953,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+ if (i == m->field_count) {
+ priv->dirty = true;
+ pipapo_drop(m, rulemap);
+- pipapo_commit(set);
+ return;
+ }
+
+@@ -2230,6 +2251,8 @@ const struct nft_set_type nft_set_pipapo_type = {
+ .init = nft_pipapo_init,
+ .destroy = nft_pipapo_destroy,
+ .gc_init = nft_pipapo_gc_init,
++ .commit = nft_pipapo_commit,
++ .abort = nft_pipapo_abort,
+ .elemsize = offsetof(struct nft_pipapo_elem, ext),
+ },
+ };
+@@ -2252,6 +2275,8 @@ const struct nft_set_type nft_set_pipapo_avx2_type = {
+ .init = nft_pipapo_init,
+ .destroy = nft_pipapo_destroy,
+ .gc_init = nft_pipapo_gc_init,
++ .commit = nft_pipapo_commit,
++ .abort = nft_pipapo_abort,
+ .elemsize = offsetof(struct nft_pipapo_elem, ext),
+ },
+ };
+--
+2.39.2
+
--- /dev/null
+From a070d74fae2b4ee25ff2082726c4cf26b1451738 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 00:19:12 +0200
+Subject: netfilter: nfnetlink: skip error delivery on batch in case of ENOMEM
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit a1a64a151dae8ac3581c1cbde44b672045cb658b ]
+
+If caller reports ENOMEM, then stop iterating over the batch and send a
+single netlink message to userspace to report OOM.
+
+Fixes: cbb8125eb40b ("netfilter: nfnetlink: deliver netlink errors on batch completion")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nfnetlink.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 4d7a2a7bbd434..85c74d46cd23a 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -528,7 +528,8 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
+ * processed, this avoids that the same error is
+ * reported several times when replaying the batch.
+ */
+- if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
++ if (err == -ENOMEM ||
++ nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
+ /* We failed to enqueue an error, reset the
+ * list of errors and send OOM to userspace
+ * pointing to the batch header.
+--
+2.39.2
+
--- /dev/null
+From c35de945638b606c64840573b6afbca6eb9517e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 17:12:01 +0530
+Subject: octeontx2-af: fix lbk link credits on cn10k
+
+From: Nithin Dabilpuram <ndabilpuram@marvell.com>
+
+[ Upstream commit 87e12a17eef476bbf768dc3a74419ad461f36fbc ]
+
+Fix LBK link credits on CN10K to be same as CN9K i.e
+16 * MAX_LBK_DATA_RATE instead of current scheme of
+calculation based on LBK buf length / FIFO size.
+
+Fixes: 6e54e1c5399a ("octeontx2-af: cn10K: Add MTU configuration")
+Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
+Signed-off-by: Naveen Mamindlapalli <naveenm@marvell.com>
+Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 6ea14c8bd59b8..dee2f2086bb5d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -4067,10 +4067,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+
+ static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
+ {
+- /* CN10k supports 72KB FIFO size and max packet size of 64k */
+- if (rvu->hw->lbk_bufsize == 0x12000)
+- return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
+-
+ return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 370143a59722629d613fa1ad622d54eb29f30a63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 17:12:00 +0530
+Subject: octeontx2-af: fixed resource availability check
+
+From: Satha Rao <skoteshwar@marvell.com>
+
+[ Upstream commit 4e635f9d86165e47f5440196f2ebdb258efb8341 ]
+
+txschq_alloc response have two different arrays to store continuous
+and non-continuous schedulers of each level. Requested count should
+be checked for each array separately.
+
+Fixes: 5d9b976d4480 ("octeontx2-af: Support fixed transmit scheduler topology")
+Signed-off-by: Satha Rao <skoteshwar@marvell.com>
+Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
+Signed-off-by: Naveen Mamindlapalli <naveenm@marvell.com>
+Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index d274d552924a3..6ea14c8bd59b8 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -1885,7 +1885,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
+ free_cnt = rvu_rsrc_free_count(&txsch->schq);
+ }
+
+- if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
++ if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
++ req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
+ /* If contiguous queues are needed, check for availability */
+--
+2.39.2
+
--- /dev/null
+From 20acc6cdf2603e8611584382ef191a45e87662d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 18:05:02 +0200
+Subject: ping6: Fix send to link-local addresses with VRF.
+
+From: Guillaume Nault <gnault@redhat.com>
+
+[ Upstream commit 91ffd1bae1dafbb9e34b46813f5b058581d9144d ]
+
+Ping sockets can't send packets when they're bound to a VRF master
+device and the output interface is set to a slave device.
+
+For example, when net.ipv4.ping_group_range is properly set, so that
+ping6 can use ping sockets, the following kind of commands fails:
+ $ ip vrf exec red ping6 fe80::854:e7ff:fe88:4bf1%eth1
+
+What happens is that sk->sk_bound_dev_if is set to the VRF master
+device, but 'oif' is set to the real output device. Since both are set
+but different, ping_v6_sendmsg() sees their value as inconsistent and
+fails.
+
+Fix this by allowing 'oif' to be a slave device of ->sk_bound_dev_if.
+
+This fixes the following kselftest failure:
+ $ ./fcnal-test.sh -t ipv6_ping
+ [...]
+ TEST: ping out, vrf device+address bind - ns-B IPv6 LLA [FAIL]
+
+Reported-by: Mirsad Todorovac <mirsad.todorovac@alu.unizg.hr>
+Closes: https://lore.kernel.org/netdev/b6191f90-ffca-dbca-7d06-88a9788def9c@alu.unizg.hr/
+Tested-by: Mirsad Todorovac <mirsad.todorovac@alu.unizg.hr>
+Fixes: 5e457896986e ("net: ipv6: Fix ping to link-local addresses.")
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://lore.kernel.org/r/6c8b53108816a8d0d5705ae37bdc5a8322b5e3d9.1686153846.git.gnault@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ping.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 6ac88fe24a8e0..7fab29f3ce6e8 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -96,7 +96,8 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ addr_type = ipv6_addr_type(daddr);
+ if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
+ (addr_type & IPV6_ADDR_MAPPED) ||
+- (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
++ (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if &&
++ l3mdev_master_ifindex_by_index(sock_net(sk), oif) != sk->sk_bound_dev_if))
+ return -EINVAL;
+
+ /* TODO: use ip6_datagram_send_ctl to get options from cmsg */
+--
+2.39.2
+
--- /dev/null
+From 30efe24054cbcad12aedfa572d0f93e4253faba2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 13:33:23 +0300
+Subject: RDMA/cma: Always set static rate to 0 for RoCE
+
+From: Mark Zhang <markzhang@nvidia.com>
+
+[ Upstream commit 58030c76cce473b6cfd630bbecb97215def0dff8 ]
+
+Set static rate to 0 as it should be discovered by path query and
+has no meaning for RoCE.
+This also avoid of using the rtnl lock and ethtool API, which is
+a bottleneck when try to setup many rdma-cm connections at the same
+time, especially with multiple processes.
+
+Fixes: 3c86aa70bf67 ("RDMA/cm: Add RDMA CM support for IBoE devices")
+Signed-off-by: Mark Zhang <markzhang@nvidia.com>
+Link: https://lore.kernel.org/r/f72a4f8b667b803aee9fa794069f61afb5839ce4.1685960567.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c | 4 ++--
+ include/rdma/ib_addr.h | 23 -----------------------
+ 2 files changed, 2 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index c66d8bf405854..044f9d44001bb 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -3113,7 +3113,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+ route->path_rec->traffic_class = tos;
+ route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
+ route->path_rec->rate_selector = IB_SA_EQ;
+- route->path_rec->rate = iboe_get_rate(ndev);
++ route->path_rec->rate = IB_RATE_PORT_CURRENT;
+ dev_put(ndev);
+ route->path_rec->packet_life_time_selector = IB_SA_EQ;
+ /* In case ACK timeout is set, use this value to calculate
+@@ -4770,7 +4770,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ if (!ndev)
+ return -ENODEV;
+
+- ib.rec.rate = iboe_get_rate(ndev);
++ ib.rec.rate = IB_RATE_PORT_CURRENT;
+ ib.rec.hop_limit = 1;
+ ib.rec.mtu = iboe_get_mtu(ndev->mtu);
+
+diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
+index d808dc3d239e8..811a0f11d0dbe 100644
+--- a/include/rdma/ib_addr.h
++++ b/include/rdma/ib_addr.h
+@@ -194,29 +194,6 @@ static inline enum ib_mtu iboe_get_mtu(int mtu)
+ return 0;
+ }
+
+-static inline int iboe_get_rate(struct net_device *dev)
+-{
+- struct ethtool_link_ksettings cmd;
+- int err;
+-
+- rtnl_lock();
+- err = __ethtool_get_link_ksettings(dev, &cmd);
+- rtnl_unlock();
+- if (err)
+- return IB_RATE_PORT_CURRENT;
+-
+- if (cmd.base.speed >= 40000)
+- return IB_RATE_40_GBPS;
+- else if (cmd.base.speed >= 30000)
+- return IB_RATE_30_GBPS;
+- else if (cmd.base.speed >= 20000)
+- return IB_RATE_20_GBPS;
+- else if (cmd.base.speed >= 10000)
+- return IB_RATE_10_GBPS;
+- else
+- return IB_RATE_PORT_CURRENT;
+-}
+-
+ static inline int rdma_link_local_addr(struct in6_addr *addr)
+ {
+ if (addr->s6_addr32[0] == htonl(0xfe800000) &&
+--
+2.39.2
+
--- /dev/null
+From 34d1a6482730e28ac4724e67ad1ceae81513ec2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 13:33:17 +0300
+Subject: RDMA/mlx5: Initiate dropless RQ for RAW Ethernet functions
+
+From: Maher Sanalla <msanalla@nvidia.com>
+
+[ Upstream commit ee4d269eccfea6c17b18281bef482700d898e86f ]
+
+Delay drop data is initiated for PFs that have the capability of
+rq_delay_drop and are in roce profile.
+
+However, PFs with RAW ethernet profile do not initiate delay drop data
+on function load, causing kernel panic if delay drop struct members are
+accessed later on in case a dropless RQ is created.
+
+Thus, stage the delay drop initialization as part of RAW ethernet
+PF loading process.
+
+Fixes: b5ca15ad7e61 ("IB/mlx5: Add proper representors support")
+Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Link: https://lore.kernel.org/r/2e9d386785043d48c38711826eb910315c1de141.1685960567.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 2361caa385471..0ebd3c7b2d2a3 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4376,6 +4376,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+ mlx5_ib_stage_post_ib_reg_umr_init,
+ NULL),
++ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
++ mlx5_ib_stage_delay_drop_init,
++ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
+ mlx5_ib_restrack_init,
+ NULL),
+--
+2.39.2
+
--- /dev/null
+From 0a68fbcbb095a596a9eba3561a50397cbfe5aa56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Apr 2023 01:02:43 +0000
+Subject: RDMA/rtrs: Fix rxe_dealloc_pd warning
+
+From: Li Zhijian <lizhijian@fujitsu.com>
+
+[ Upstream commit 9c29c8c7df0688f358d2df5ddd16c97c2f7292b4 ]
+
+In current design:
+1. PD and clt_path->s.dev are shared among connections.
+2. every con[n]'s cleanup phase will call destroy_con_cq_qp()
+3. clt_path->s.dev will be always decreased in destroy_con_cq_qp(), and
+ when clt_path->s.dev become zero, it will destroy PD.
+4. when con[1] failed to create, con[1] will not take clt_path->s.dev,
+ but it try to decreased clt_path->s.dev
+
+So, in case create_cm(con[0]) succeeds but create_cm(con[1]) fails,
+destroy_con_cq_qp(con[1]) will be called first which will destroy the PD
+while this PD is still taken by con[0].
+
+Here, we refactor the error path of create_cm() and init_conns(), so that
+we do the cleanup in the order they are created.
+
+The warning occurs when destroying RXE PD whose reference count is not
+zero.
+
+ rnbd_client L597: Mapping device /dev/nvme0n1 on session client, (access_mode: rw, nr_poll_queues: 0)
+ ------------[ cut here ]------------
+ WARNING: CPU: 0 PID: 26407 at drivers/infiniband/sw/rxe/rxe_pool.c:256 __rxe_cleanup+0x13a/0x170 [rdma_rxe]
+ Modules linked in: rpcrdma rdma_ucm ib_iser rnbd_client libiscsi rtrs_client scsi_transport_iscsi rtrs_core rdma_cm iw_cm ib_cm crc32_generic rdma_rxe udp_tunnel ib_uverbs ib_core kmem device_dax nd_pmem dax_pmem nd_vme crc32c_intel fuse nvme_core nfit libnvdimm dm_multipath scsi_dh_rdac scsi_dh_emc scsi_dh_alua dm_mirror dm_region_hash dm_log dm_mod
+ CPU: 0 PID: 26407 Comm: rnbd-client.sh Kdump: loaded Not tainted 6.2.0-rc6-roce-flush+ #53
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
+ RIP: 0010:__rxe_cleanup+0x13a/0x170 [rdma_rxe]
+ Code: 45 84 e4 0f 84 5a ff ff ff 48 89 ef e8 5f 18 71 f9 84 c0 75 90 be c8 00 00 00 48 89 ef e8 be 89 1f fa 85 c0 0f 85 7b ff ff ff <0f> 0b 41 bc ea ff ff ff e9 71 ff ff ff e8 84 7f 1f fa e9 d0 fe ff
+ RSP: 0018:ffffb09880b6f5f0 EFLAGS: 00010246
+ RAX: 0000000000000000 RBX: ffff99401f15d6a8 RCX: 0000000000000000
+ RDX: 0000000000000001 RSI: ffffffffbac8234b RDI: 00000000ffffffff
+ RBP: ffff99401f15d6d0 R08: 0000000000000001 R09: 0000000000000001
+ R10: 0000000000002d82 R11: 0000000000000000 R12: 0000000000000001
+ R13: ffff994101eff208 R14: ffffb09880b6f6a0 R15: 00000000fffffe00
+ FS: 00007fe113904740(0000) GS:ffff99413bc00000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007ff6cde656c8 CR3: 000000001f108004 CR4: 00000000001706f0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ <TASK>
+ rxe_dealloc_pd+0x16/0x20 [rdma_rxe]
+ ib_dealloc_pd_user+0x4b/0x80 [ib_core]
+ rtrs_ib_dev_put+0x79/0xd0 [rtrs_core]
+ destroy_con_cq_qp+0x8a/0xa0 [rtrs_client]
+ init_path+0x1e7/0x9a0 [rtrs_client]
+ ? __pfx_autoremove_wake_function+0x10/0x10
+ ? lock_is_held_type+0xd7/0x130
+ ? rcu_read_lock_sched_held+0x43/0x80
+ ? pcpu_alloc+0x3dd/0x7d0
+ ? rtrs_clt_init_stats+0x18/0x40 [rtrs_client]
+ rtrs_clt_open+0x24f/0x5a0 [rtrs_client]
+ ? __pfx_rnbd_clt_link_ev+0x10/0x10 [rnbd_client]
+ rnbd_clt_map_device+0x6a5/0xe10 [rnbd_client]
+
+Fixes: 6a98d71daea1 ("RDMA/rtrs: client: main functionality")
+Link: https://lore.kernel.org/r/1682384563-2-4-git-send-email-lizhijian@fujitsu.com
+Signed-off-by: Li Zhijian <lizhijian@fujitsu.com>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Tested-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-clt.c | 55 +++++++++++---------------
+ 1 file changed, 23 insertions(+), 32 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 54eb6556c63db..afe8670f9e555 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -2028,6 +2028,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ return 0;
+ }
+
++/* The caller should do the cleanup in case of error */
+ static int create_cm(struct rtrs_clt_con *con)
+ {
+ struct rtrs_path *s = con->c.path;
+@@ -2050,14 +2051,14 @@ static int create_cm(struct rtrs_clt_con *con)
+ err = rdma_set_reuseaddr(cm_id, 1);
+ if (err != 0) {
+ rtrs_err(s, "Set address reuse failed, err: %d\n", err);
+- goto destroy_cm;
++ return err;
+ }
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
+ (struct sockaddr *)&clt_path->s.dst_addr,
+ RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Failed to resolve address, err: %d\n", err);
+- goto destroy_cm;
++ return err;
+ }
+ /*
+ * Combine connection status and session events. This is needed
+@@ -2072,29 +2073,15 @@ static int create_cm(struct rtrs_clt_con *con)
+ if (err == 0)
+ err = -ETIMEDOUT;
+ /* Timedout or interrupted */
+- goto errr;
+- }
+- if (con->cm_err < 0) {
+- err = con->cm_err;
+- goto errr;
++ return err;
+ }
+- if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
++ if (con->cm_err < 0)
++ return con->cm_err;
++ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
+ /* Device removal */
+- err = -ECONNABORTED;
+- goto errr;
+- }
++ return -ECONNABORTED;
+
+ return 0;
+-
+-errr:
+- stop_cm(con);
+- mutex_lock(&con->con_mutex);
+- destroy_con_cq_qp(con);
+- mutex_unlock(&con->con_mutex);
+-destroy_cm:
+- destroy_cm(con);
+-
+- return err;
+ }
+
+ static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
+@@ -2331,7 +2318,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
+ static int init_conns(struct rtrs_clt_path *clt_path)
+ {
+ unsigned int cid;
+- int err;
++ int err, i;
+
+ /*
+ * On every new session connections increase reconnect counter
+@@ -2347,10 +2334,8 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+ goto destroy;
+
+ err = create_cm(to_clt_con(clt_path->s.con[cid]));
+- if (err) {
+- destroy_con(to_clt_con(clt_path->s.con[cid]));
++ if (err)
+ goto destroy;
+- }
+ }
+ err = alloc_path_reqs(clt_path);
+ if (err)
+@@ -2361,15 +2346,21 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+ return 0;
+
+ destroy:
+- while (cid--) {
+- struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
++ /* Make sure we do the cleanup in the order they are created */
++ for (i = 0; i <= cid; i++) {
++ struct rtrs_clt_con *con;
+
+- stop_cm(con);
++ if (!clt_path->s.con[i])
++ break;
+
+- mutex_lock(&con->con_mutex);
+- destroy_con_cq_qp(con);
+- mutex_unlock(&con->con_mutex);
+- destroy_cm(con);
++ con = to_clt_con(clt_path->s.con[i]);
++ if (con->c.cm_id) {
++ stop_cm(con);
++ mutex_lock(&con->con_mutex);
++ destroy_con_cq_qp(con);
++ mutex_unlock(&con->con_mutex);
++ destroy_cm(con);
++ }
+ destroy_con(con);
+ }
+ /*
+--
+2.39.2
+
--- /dev/null
+From b4a73d9031378a1b8dfceec6ac378280df92e4c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Apr 2023 01:02:42 +0000
+Subject: RDMA/rtrs: Fix the last iu->buf leak in err path
+
+From: Li Zhijian <lizhijian@fujitsu.com>
+
+[ Upstream commit 3bf3a7c6985c625f64e73baefdaa36f1c2045a29 ]
+
+The last iu->buf will leak if ib_dma_mapping_error() fails.
+
+Fixes: c0894b3ea69d ("RDMA/rtrs: core: lib functions shared between client and server modules")
+Link: https://lore.kernel.org/r/1682384563-2-3-git-send-email-lizhijian@fujitsu.com
+Signed-off-by: Li Zhijian <lizhijian@fujitsu.com>
+Acked-by: Guoqing Jiang <guoqing.jiang@linux.dev>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index 4da889103a5ff..4745f33d7104a 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -37,8 +37,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
+ goto err;
+
+ iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
+- if (ib_dma_mapping_error(dma_dev, iu->dma_addr))
++ if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) {
++ kfree(iu->buf);
+ goto err;
++ }
+
+ iu->cqe.done = done;
+ iu->size = size;
+--
+2.39.2
+
--- /dev/null
+From 8fdc0d7a9a2b412372799422250515d5bf2e5879 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 May 2023 12:22:42 -0500
+Subject: RDMA/rxe: Fix packet length checks
+
+From: Bob Pearson <rpearsonhpe@gmail.com>
+
+[ Upstream commit 9a3763e87379c97a78b7c6c6f40720b1e877174f ]
+
+In rxe_net.c a received packet, from udp or loopback, is passed to
+rxe_rcv() in rxe_recv.c as a udp packet. I.e. skb->data is pointing at the
+udp header. But rxe_rcv() makes length checks to verify the packet is long
+enough to hold the roce headers as if it were a roce
+packet. I.e. skb->data pointing at the bth header. A runt packet would
+appear to have 8 more bytes than it actually does which may lead to
+incorrect behavior.
+
+This patch calls skb_pull() to adjust the skb to point at the bth header
+before calling rxe_rcv() which fixes this error.
+
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Link: https://lore.kernel.org/r/20230517172242.1806340-1-rpearsonhpe@gmail.com
+Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_net.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 2cb810cb890a5..be86b879a0d53 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -179,6 +179,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ pkt->mask = RXE_GRH_MASK;
+ pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
+
++ /* remove udp header */
++ skb_pull(skb, sizeof(struct udphdr));
++
+ rxe_rcv(skb);
+
+ return 0;
+@@ -419,6 +422,9 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+ return -EIO;
+ }
+
++ /* remove udp header */
++ skb_pull(skb, sizeof(struct udphdr));
++
+ rxe_rcv(skb);
+
+ return 0;
+--
+2.39.2
+
--- /dev/null
+From 0492b876f77b7fb433411fcb120adcfc6b68c66e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 11:54:08 +0800
+Subject: RDMA/rxe: Fix the use-before-initialization error of resp_pkts
+
+From: Zhu Yanjun <yanjun.zhu@linux.dev>
+
+[ Upstream commit 2a62b6210ce876c596086ab8fd4c8a0c3d10611a ]
+
+In the following:
+
+ Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0xd9/0x150 lib/dump_stack.c:106
+ assign_lock_key kernel/locking/lockdep.c:982 [inline]
+ register_lock_class+0xdb6/0x1120 kernel/locking/lockdep.c:1295
+ __lock_acquire+0x10a/0x5df0 kernel/locking/lockdep.c:4951
+ lock_acquire kernel/locking/lockdep.c:5691 [inline]
+ lock_acquire+0x1b1/0x520 kernel/locking/lockdep.c:5656
+ __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline]
+ _raw_spin_lock_irqsave+0x3d/0x60 kernel/locking/spinlock.c:162
+ skb_dequeue+0x20/0x180 net/core/skbuff.c:3639
+ drain_resp_pkts drivers/infiniband/sw/rxe/rxe_comp.c:555 [inline]
+ rxe_completer+0x250d/0x3cc0 drivers/infiniband/sw/rxe/rxe_comp.c:652
+ rxe_qp_do_cleanup+0x1be/0x820 drivers/infiniband/sw/rxe/rxe_qp.c:761
+ execute_in_process_context+0x3b/0x150 kernel/workqueue.c:3473
+ __rxe_cleanup+0x21e/0x370 drivers/infiniband/sw/rxe/rxe_pool.c:233
+ rxe_create_qp+0x3f6/0x5f0 drivers/infiniband/sw/rxe/rxe_verbs.c:583
+
+This is a use-before-initialization problem.
+
+It happens because rxe_qp_do_cleanup is called during error unwind before
+the struct has been fully initialized.
+
+Move the initialization of the skb earlier.
+
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Link: https://lore.kernel.org/r/20230602035408.741534-1-yanjun.zhu@intel.com
+Reported-by: syzbot+eba589d8f49c73d356da@syzkaller.appspotmail.com
+Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_qp.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 64c2729f4c0c0..13b237d93a616 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -203,6 +203,9 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
+ spin_lock_init(&qp->rq.producer_lock);
+ spin_lock_init(&qp->rq.consumer_lock);
+
++ skb_queue_head_init(&qp->req_pkts);
++ skb_queue_head_init(&qp->resp_pkts);
++
+ atomic_set(&qp->ssn, 0);
+ atomic_set(&qp->skb_out, 0);
+ }
+@@ -263,8 +266,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ qp->req.opcode = -1;
+ qp->comp.opcode = -1;
+
+- skb_queue_head_init(&qp->req_pkts);
+-
+ rxe_init_task(&qp->req.task, qp, rxe_requester);
+ rxe_init_task(&qp->comp.task, qp, rxe_completer);
+
+@@ -311,8 +312,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+ }
+ }
+
+- skb_queue_head_init(&qp->resp_pkts);
+-
+ rxe_init_task(&qp->resp.task, qp, rxe_responder);
+
+ qp->resp.opcode = OPCODE_NONE;
+--
+2.39.2
+
--- /dev/null
+From c5aeeeaafd389dd1a060b5ba6dc300cc6682ebd7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Aug 2022 21:16:15 -0400
+Subject: RDMA/rxe: Remove the unused variable obj
+
+From: Zhu Yanjun <yanjun.zhu@linux.dev>
+
+[ Upstream commit f07853582d1f6ed282f8d9a0b1209a87dd761f58 ]
+
+The member variable obj in struct rxe_task is not needed.
+So remove it to save memory.
+
+Link: https://lore.kernel.org/r/20220822011615.805603-4-yanjun.zhu@linux.dev
+Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
+Reviewed-by: Li Zhijian <lizhijian@fujitsu.com>
+Reviewed-by: Bob Pearson <rpearsonhpe@gmail.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Stable-dep-of: 2a62b6210ce8 ("RDMA/rxe: Fix the use-before-initialization error of resp_pkts")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_qp.c | 6 +++---
+ drivers/infiniband/sw/rxe/rxe_task.c | 3 +--
+ drivers/infiniband/sw/rxe/rxe_task.h | 3 +--
+ 3 files changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index d7a968356a9bb..5b78230692fda 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -265,9 +265,9 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+
+ skb_queue_head_init(&qp->req_pkts);
+
+- rxe_init_task(rxe, &qp->req.task, qp,
++ rxe_init_task(&qp->req.task, qp,
+ rxe_requester, "req");
+- rxe_init_task(rxe, &qp->comp.task, qp,
++ rxe_init_task(&qp->comp.task, qp,
+ rxe_completer, "comp");
+
+ qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
+@@ -315,7 +315,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+
+ skb_queue_head_init(&qp->resp_pkts);
+
+- rxe_init_task(rxe, &qp->resp.task, qp,
++ rxe_init_task(&qp->resp.task, qp,
+ rxe_responder, "resp");
+
+ qp->resp.opcode = OPCODE_NONE;
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
+index 568cf56c236bc..f48882b20d6b2 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.c
++++ b/drivers/infiniband/sw/rxe/rxe_task.c
+@@ -95,10 +95,9 @@ void rxe_do_task(struct tasklet_struct *t)
+ task->ret = ret;
+ }
+
+-int rxe_init_task(void *obj, struct rxe_task *task,
++int rxe_init_task(struct rxe_task *task,
+ void *arg, int (*func)(void *), char *name)
+ {
+- task->obj = obj;
+ task->arg = arg;
+ task->func = func;
+ snprintf(task->name, sizeof(task->name), "%s", name);
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
+index 11d183fd33386..7f612a1c68a7b 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.h
++++ b/drivers/infiniband/sw/rxe/rxe_task.h
+@@ -19,7 +19,6 @@ enum {
+ * called again.
+ */
+ struct rxe_task {
+- void *obj;
+ struct tasklet_struct tasklet;
+ int state;
+ spinlock_t state_lock; /* spinlock for task state */
+@@ -35,7 +34,7 @@ struct rxe_task {
+ * arg => parameter to pass to fcn
+ * func => function to call until it returns != 0
+ */
+-int rxe_init_task(void *obj, struct rxe_task *task,
++int rxe_init_task(struct rxe_task *task,
+ void *arg, int (*func)(void *), char *name);
+
+ /* cleanup task */
+--
+2.39.2
+
--- /dev/null
+From 9305cbb1b02cb24806ea16a7ff518cde6587e7f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Oct 2022 15:01:04 -0500
+Subject: RDMA/rxe: Removed unused name from rxe_task struct
+
+From: Bob Pearson <rpearsonhpe@gmail.com>
+
+[ Upstream commit de669ae8af49ceed0eed44f5b3d51dc62affc5e4 ]
+
+The name field in struct rxe_task is never used. This patch removes it.
+
+Link: https://lore.kernel.org/r/20221021200118.2163-4-rpearsonhpe@gmail.com
+Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com>
+Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Stable-dep-of: 2a62b6210ce8 ("RDMA/rxe: Fix the use-before-initialization error of resp_pkts")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_qp.c | 9 +++------
+ drivers/infiniband/sw/rxe/rxe_task.c | 4 +---
+ drivers/infiniband/sw/rxe/rxe_task.h | 4 +---
+ 3 files changed, 5 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 5b78230692fda..64c2729f4c0c0 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -265,10 +265,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+
+ skb_queue_head_init(&qp->req_pkts);
+
+- rxe_init_task(&qp->req.task, qp,
+- rxe_requester, "req");
+- rxe_init_task(&qp->comp.task, qp,
+- rxe_completer, "comp");
++ rxe_init_task(&qp->req.task, qp, rxe_requester);
++ rxe_init_task(&qp->comp.task, qp, rxe_completer);
+
+ qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
+ if (init->qp_type == IB_QPT_RC) {
+@@ -315,8 +313,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+
+ skb_queue_head_init(&qp->resp_pkts);
+
+- rxe_init_task(&qp->resp.task, qp,
+- rxe_responder, "resp");
++ rxe_init_task(&qp->resp.task, qp, rxe_responder);
+
+ qp->resp.opcode = OPCODE_NONE;
+ qp->resp.msn = 0;
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
+index f48882b20d6b2..5aa69947a9791 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.c
++++ b/drivers/infiniband/sw/rxe/rxe_task.c
+@@ -95,12 +95,10 @@ void rxe_do_task(struct tasklet_struct *t)
+ task->ret = ret;
+ }
+
+-int rxe_init_task(struct rxe_task *task,
+- void *arg, int (*func)(void *), char *name)
++int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *))
+ {
+ task->arg = arg;
+ task->func = func;
+- snprintf(task->name, sizeof(task->name), "%s", name);
+ task->destroyed = false;
+
+ tasklet_setup(&task->tasklet, rxe_do_task);
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
+index 7f612a1c68a7b..b3dfd970d1dc6 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.h
++++ b/drivers/infiniband/sw/rxe/rxe_task.h
+@@ -25,7 +25,6 @@ struct rxe_task {
+ void *arg;
+ int (*func)(void *arg);
+ int ret;
+- char name[16];
+ bool destroyed;
+ };
+
+@@ -34,8 +33,7 @@ struct rxe_task {
+ * arg => parameter to pass to fcn
+ * func => function to call until it returns != 0
+ */
+-int rxe_init_task(struct rxe_task *task,
+- void *arg, int (*func)(void *), char *name);
++int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *));
+
+ /* cleanup task */
+ void rxe_cleanup_task(struct rxe_task *task);
+--
+2.39.2
+
--- /dev/null
+From 2412ecd37604837bcad14682d68ae3798615bc32 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 14:05:19 +0300
+Subject: sctp: fix an error code in sctp_sf_eat_auth()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 75e6def3b26736e7ff80639810098c9074229737 ]
+
+The sctp_sf_eat_auth() function is supposed to enum sctp_disposition
+values and returning a kernel error code will cause issues in the
+caller. Change -ENOMEM to SCTP_DISPOSITION_NOMEM.
+
+Fixes: 65b07e5d0d09 ("[SCTP]: API updates to suport SCTP-AUTH extensions.")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Acked-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sctp/sm_statefuns.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index fb3da4d8f4a34..13acb84b00c2b 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -4478,7 +4478,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net,
+ SCTP_AUTH_NEW_KEY, GFP_ATOMIC);
+
+ if (!ev)
+- return -ENOMEM;
++ return SCTP_DISPOSITION_NOMEM;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(ev));
+--
+2.39.2
+
--- /dev/null
+From 0fc7a9c3e30634a9f9582cbdd6db08f84f2bd019 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Jun 2023 09:34:04 +0100
+Subject: selftests/ptp: Fix timestamp printf format for PTP_SYS_OFFSET
+
+From: Alex Maftei <alex.maftei@amd.com>
+
+[ Upstream commit 76a4c8b82938bc5020b67663db41f451684bf327 ]
+
+Previously, timestamps were printed using "%lld.%u" which is incorrect
+for nanosecond values lower than 100,000,000 as they're fractional
+digits, therefore leading zeros are meaningful.
+
+This patch changes the format strings to "%lld.%09u" in order to add
+leading zeros to the nanosecond value.
+
+Fixes: 568ebc5985f5 ("ptp: add the PTP_SYS_OFFSET ioctl to the testptp program")
+Fixes: 4ec54f95736f ("ptp: Fix compiler warnings in the testptp utility")
+Fixes: 6ab0e475f1f3 ("Documentation: fix misc. warnings")
+Signed-off-by: Alex Maftei <alex.maftei@amd.com>
+Acked-by: Richard Cochran <richardcochran@gmail.com>
+Link: https://lore.kernel.org/r/20230615083404.57112-1-alex.maftei@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/ptp/testptp.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
+index f7911aaeb0075..aa474febb4712 100644
+--- a/tools/testing/selftests/ptp/testptp.c
++++ b/tools/testing/selftests/ptp/testptp.c
+@@ -492,11 +492,11 @@ int main(int argc, char *argv[])
+ interval = t2 - t1;
+ offset = (t2 + t1) / 2 - tp;
+
+- printf("system time: %lld.%u\n",
++ printf("system time: %lld.%09u\n",
+ (pct+2*i)->sec, (pct+2*i)->nsec);
+- printf("phc time: %lld.%u\n",
++ printf("phc time: %lld.%09u\n",
+ (pct+2*i+1)->sec, (pct+2*i+1)->nsec);
+- printf("system time: %lld.%u\n",
++ printf("system time: %lld.%09u\n",
+ (pct+2*i+2)->sec, (pct+2*i+2)->nsec);
+ printf("system/phc clock time offset is %" PRId64 " ns\n"
+ "system clock time delay is %" PRId64 " ns\n",
+--
+2.39.2
+
usb-serial-option-add-quectel-em061kgl-series.patch
serial-lantiq-add-missing-interrupt-ack.patch
usb-dwc3-gadget-reset-num-trbs-before-giving-back-the-request.patch
+rdma-rtrs-fix-the-last-iu-buf-leak-in-err-path.patch
+rdma-rtrs-fix-rxe_dealloc_pd-warning.patch
+rdma-rxe-fix-packet-length-checks.patch
+spi-fsl-dspi-avoid-sck-glitches-with-continuous-tran.patch
+netfilter-nf_tables-integrate-pipapo-into-commit-pro.patch
+netfilter-nfnetlink-skip-error-delivery-on-batch-in-.patch
+netfilter-nf_tables-incorrect-error-path-handling-wi.patch
+net-enetc-correct-the-indexes-of-highest-and-2nd-hig.patch
+ping6-fix-send-to-link-local-addresses-with-vrf.patch
+net-sched-simplify-tcf_pedit_act.patch
+net-sched-act_pedit-remove-extra-check-for-key-type.patch
+net-sched-act_pedit-parse-l3-header-for-l4-offset.patch
+net-sched-cls_u32-fix-reference-counter-leak-leading.patch
+rdma-rxe-remove-the-unused-variable-obj.patch
+rdma-rxe-removed-unused-name-from-rxe_task-struct.patch
+rdma-rxe-fix-the-use-before-initialization-error-of-.patch
+iavf-remove-mask-from-iavf_irq_enable_queues.patch
+octeontx2-af-fixed-resource-availability-check.patch
+octeontx2-af-fix-lbk-link-credits-on-cn10k.patch
+rdma-mlx5-initiate-dropless-rq-for-raw-ethernet-func.patch
+rdma-cma-always-set-static-rate-to-0-for-roce.patch
+ib-uverbs-fix-to-consider-event-queue-closing-also-u.patch
+ib-isert-fix-dead-lock-in-ib_isert.patch
+ib-isert-fix-possible-list-corruption-in-cma-handler.patch
+ib-isert-fix-incorrect-release-of-isert-connection.patch
+net-ethtool-correct-max-attribute-value-for-stats.patch
+ipvlan-fix-bound-dev-checking-for-ipv6-l3s-mode.patch
+sctp-fix-an-error-code-in-sctp_sf_eat_auth.patch
+igc-clean-the-tx-buffer-and-tx-descriptor-ring.patch
+igb-fix-nvm.ops.read-error-handling.patch
+drm-nouveau-don-t-detect-dsm-for-non-nvidia-device.patch
+drm-nouveau-dp-check-for-null-nv_connector-native_mo.patch
+drm-nouveau-add-nv_encoder-pointer-check-for-null.patch
+cifs-fix-lease-break-oops-in-xfstest-generic-098.patch
+ext4-drop-the-call-to-ext4_error-from-ext4_get_group.patch
+net-sched-cls_api-fix-lockup-on-flushing-explicitly-.patch
+net-lapbether-only-support-ethernet-devices.patch
+dm-don-t-lock-fs-when-the-map-is-null-during-suspend.patch
+net-tipc-resize-nlattr-array-to-correct-size.patch
+selftests-ptp-fix-timestamp-printf-format-for-ptp_sy.patch
+afs-fix-vlserver-probe-rtt-handling.patch
--- /dev/null
+From 8561a12da640b4bb527f674d1ac00efc9407c91e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 May 2023 01:34:02 +0300
+Subject: spi: fsl-dspi: avoid SCK glitches with continuous transfers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit c5c31fb71f16ba75bad4ade208abbae225305b65 ]
+
+The DSPI controller has configurable timing for
+
+(a) tCSC: the interval between the assertion of the chip select and the
+ first clock edge
+
+(b) tASC: the interval between the last clock edge and the deassertion
+ of the chip select
+
+What is a bit surprising, but is documented in the figure "Example of
+continuous transfer (CPHA=1, CONT=1)" in the datasheet, is that when the
+chip select stays asserted between multiple TX FIFO writes, the tCSC and
+tASC times still apply. With CONT=1, chip select remains asserted, but
+SCK takes a break and goes to the idle state for tASC + tCSC ns.
+
+In other words, the default values (of 0 and 0 ns) result in SCK
+glitches where the SCK transition to the idle state, as well as the SCK
+transition from the idle state, will have no delay in between, and it
+may appear that a SCK cycle has simply gone missing. The resulting
+timing violation might cause data corruption in many peripherals, as
+their chip select is asserted.
+
+The driver has device tree bindings for tCSC ("fsl,spi-cs-sck-delay")
+and tASC ("fsl,spi-sck-cs-delay"), but these are only specified to apply
+when the chip select toggles in the first place, and this timing
+characteristic depends on each peripheral. Many peripherals do not have
+explicit timing requirements, so many device trees do not have these
+properties present at all.
+
+Nonetheless, the lack of SCK glitches is a common sense requirement, and
+since the SCK stays in the idle state during transfers for tCSC+tASC ns,
+and that in itself should look like half a cycle, then let's ensure that
+tCSC and tASC are at least a quarter of a SCK period, such that their
+sum is at least half of one.
+
+Fixes: 95bf15f38641 ("spi: fsl-dspi: Add ~50ns delay between cs and sck")
+Reported-by: Lisa Chen (陈敏捷) <minjie.chen@geekplus.com>
+Debugged-by: Lisa Chen (陈敏捷) <minjie.chen@geekplus.com>
+Tested-by: Lisa Chen (陈敏捷) <minjie.chen@geekplus.com>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://lore.kernel.org/r/20230529223402.1199503-1-vladimir.oltean@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-fsl-dspi.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index fd004c9db9dc0..0d9201a2999de 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -975,7 +975,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ static int dspi_setup(struct spi_device *spi)
+ {
+ struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
++ u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
+ unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
++ u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
+ u32 cs_sck_delay = 0, sck_cs_delay = 0;
+ struct fsl_dspi_platform_data *pdata;
+ unsigned char pasc = 0, asc = 0;
+@@ -1003,6 +1005,19 @@ static int dspi_setup(struct spi_device *spi)
+ sck_cs_delay = pdata->sck_cs_delay;
+ }
+
++ /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
++ * glitches of half a cycle by never allowing tCSC + tASC to go below
++ * half a SCK period.
++ */
++ if (cs_sck_delay < quarter_period_ns)
++ cs_sck_delay = quarter_period_ns;
++ if (sck_cs_delay < quarter_period_ns)
++ sck_cs_delay = quarter_period_ns;
++
++ dev_dbg(&spi->dev,
++ "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
++ cs_sck_delay, sck_cs_delay);
++
+ clkrate = clk_get_rate(dspi->clk);
+ hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
+
+--
+2.39.2
+