From: Sasha Levin Date: Sun, 18 Jun 2023 14:10:48 +0000 (-0400) Subject: Fixes for 6.1 X-Git-Tag: v4.14.319~39 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=4824f0543b2973bbc78b3bcc171cbf31183297a2;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 6.1 Signed-off-by: Sasha Levin --- diff --git a/queue-6.1/afs-fix-vlserver-probe-rtt-handling.patch b/queue-6.1/afs-fix-vlserver-probe-rtt-handling.patch new file mode 100644 index 00000000000..d2e90d9be56 --- /dev/null +++ b/queue-6.1/afs-fix-vlserver-probe-rtt-handling.patch @@ -0,0 +1,48 @@ +From be5471e3412303b0cdb986ca7a1ea692e329780f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 16 Jun 2023 22:39:39 +0100 +Subject: afs: Fix vlserver probe RTT handling + +From: David Howells + +[ Upstream commit ba00b190670809c1a89326d80de96d714f6004f2 ] + +In the same spirit as commit ca57f02295f1 ("afs: Fix fileserver probe +RTT handling"), don't rule out using a vlserver just because there +haven't been enough packets yet to calculate a real rtt. Always set the +server's probe rtt from the estimate provided by rxrpc_kernel_get_srtt, +which is capped at 1 second. + +This could lead to EDESTADDRREQ errors when accessing a cell for the +first time, even though the vl servers are known and have responded to a +probe. + +Fixes: 1d4adfaf6574 ("rxrpc: Make rxrpc_kernel_get_srtt() indicate validity") +Signed-off-by: Marc Dionne +Signed-off-by: David Howells +cc: linux-afs@lists.infradead.org +Link: http://lists.infradead.org/pipermail/linux-afs/2023-June/006746.html +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + fs/afs/vl_probe.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c +index d1c7068b4346f..58452b86e6727 100644 +--- a/fs/afs/vl_probe.c ++++ b/fs/afs/vl_probe.c +@@ -115,8 +115,8 @@ void afs_vlserver_probe_result(struct afs_call *call) + } + } + +- if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && +- rtt_us < server->probe.rtt) { ++ rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us); ++ if (rtt_us < server->probe.rtt) { + server->probe.rtt = rtt_us; + server->rtt = rtt_us; + alist->preferred = index; +-- +2.39.2 + diff --git a/queue-6.1/cifs-fix-lease-break-oops-in-xfstest-generic-098.patch b/queue-6.1/cifs-fix-lease-break-oops-in-xfstest-generic-098.patch new file mode 100644 index 00000000000..d67505d926f --- /dev/null +++ b/queue-6.1/cifs-fix-lease-break-oops-in-xfstest-generic-098.patch @@ -0,0 +1,45 @@ +From 99b3dc2a02b32634d733fbf56f82923de6b7def1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 11 Jun 2023 11:23:32 -0500 +Subject: cifs: fix lease break oops in xfstest generic/098 + +From: Steve French + +[ Upstream commit c774e6779f38bf36f0cce65e30793704bab4b0d7 ] + +umount can race with lease break so need to check if +tcon->ses->server is still valid to send the lease +break response. + +Reviewed-by: Bharath SM +Reviewed-by: Shyam Prasad N +Fixes: 59a556aebc43 ("SMB3: drop reference to cfile before sending oplock break") +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/cifs/file.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 9a4c33ffb75fa..87dcffece7623 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -5146,9 +5146,13 @@ void cifs_oplock_break(struct work_struct *work) + * disconnected since oplock already released by the server + */ + if (!oplock_break_cancelled) { +- rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid, ++ /* check for server null since can race with kill_sb calling tree disconnect */ ++ if (tcon->ses && tcon->ses->server) { ++ rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid, + volatile_fid, net_fid, cinode); +- cifs_dbg(FYI, "Oplock release rc = %d\n", rc); ++ cifs_dbg(FYI, "Oplock release rc = %d\n", rc); ++ } else ++ pr_warn_once("lease break not sent for unmounted share\n"); + } + + cifs_done_oplock_break(cinode); +-- +2.39.2 + diff --git a/queue-6.1/dm-don-t-lock-fs-when-the-map-is-null-during-suspend.patch b/queue-6.1/dm-don-t-lock-fs-when-the-map-is-null-during-suspend.patch new file mode 100644 index 00000000000..64a161450b5 --- /dev/null +++ b/queue-6.1/dm-don-t-lock-fs-when-the-map-is-null-during-suspend.patch @@ -0,0 +1,65 @@ +From d095b9b6985baeff4e10308896522af93b8f8eb4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 Jun 2023 14:14:23 +0800 +Subject: dm: don't lock fs when the map is NULL during suspend or resume + +From: Li Lingfeng + +[ Upstream commit 2760904d895279f87196f0fa9ec570c79fe6a2e4 ] + +As described in commit 38d11da522aa ("dm: don't lock fs when the map is +NULL in process of resume"), a deadlock may be triggered between +do_resume() and do_mount(). + +This commit preserves the fix from commit 38d11da522aa but moves it to +where it also serves to fix a similar deadlock between do_suspend() +and do_mount(). It does so, if the active map is NULL, by clearing +DM_SUSPEND_LOCKFS_FLAG in dm_suspend() which is called by both +do_suspend() and do_resume(). + +Fixes: 38d11da522aa ("dm: don't lock fs when the map is NULL in process of resume") +Signed-off-by: Li Lingfeng +Signed-off-by: Mike Snitzer +Signed-off-by: Sasha Levin +--- + drivers/md/dm-ioctl.c | 5 +---- + drivers/md/dm.c | 4 ++++ + 2 files changed, 5 insertions(+), 4 deletions(-) + +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index 83aecd9250ba6..6ae1c19b82433 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -1151,13 +1151,10 @@ static int do_resume(struct dm_ioctl *param) + /* Do we need to load a new map ? */ + if (new_map) { + sector_t old_size, new_size; +- int srcu_idx; + + /* Suspend if it isn't already suspended */ +- old_map = dm_get_live_table(md, &srcu_idx); +- if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map) ++ if (param->flags & DM_SKIP_LOCKFS_FLAG) + suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; +- dm_put_live_table(md, srcu_idx); + if (param->flags & DM_NOFLUSH_FLAG) + suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; + if (!dm_suspended_md(md)) +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 24284d22f15bc..acf7e7551c941 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -2801,6 +2801,10 @@ int dm_suspend(struct mapped_device *md, unsigned int suspend_flags) + } + + map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); ++ if (!map) { ++ /* avoid deadlock with fs/namespace.c:do_mount() */ ++ suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; ++ } + + r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); + if (r) +-- +2.39.2 + diff --git a/queue-6.1/drm-bridge-ti-sn65dsi86-avoid-possible-buffer-overfl.patch b/queue-6.1/drm-bridge-ti-sn65dsi86-avoid-possible-buffer-overfl.patch new file mode 100644 index 00000000000..4e66ea61081 --- /dev/null +++ b/queue-6.1/drm-bridge-ti-sn65dsi86-avoid-possible-buffer-overfl.patch @@ -0,0 +1,39 @@ +From 5514f5a86b701f9ca45c36e19b730fe7b672f1c9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 09:24:43 +0800 +Subject: drm/bridge: ti-sn65dsi86: Avoid possible buffer overflow + +From: Su Hui + +[ Upstream commit 95011f267c44a4d1f9ca1769e8a29ab2c559e004 ] + +Smatch error:buffer overflow 'ti_sn_bridge_refclk_lut' 5 <= 5. + +Fixes: cea86c5bb442 ("drm/bridge: ti-sn65dsi86: Implement the pwm_chip") +Signed-off-by: Su Hui +Reviewed-by: Douglas Anderson +Signed-off-by: Douglas Anderson +Link: https://patchwork.freedesktop.org/patch/msgid/20230608012443.839372-1-suhui@nfschina.com +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/bridge/ti-sn65dsi86.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +index aeca9c066bf29..d16775c973c4e 100644 +--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c ++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +@@ -298,6 +298,10 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) + if (refclk_lut[i] == refclk_rate) + break; + ++ /* avoid buffer overflow and "1" is the default rate in the datasheet. */ ++ if (i >= refclk_lut_size) ++ i = 1; ++ + regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK, + REFCLK_FREQ(i)); + +-- +2.39.2 + diff --git a/queue-6.1/drm-nouveau-add-nv_encoder-pointer-check-for-null.patch b/queue-6.1/drm-nouveau-add-nv_encoder-pointer-check-for-null.patch new file mode 100644 index 00000000000..2a18bcf2985 --- /dev/null +++ b/queue-6.1/drm-nouveau-add-nv_encoder-pointer-check-for-null.patch @@ -0,0 +1,43 @@ +From 68047ab2f2629c8ef1a9245db9325b27fa83c8b9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 12 May 2023 13:33:20 +0300 +Subject: drm/nouveau: add nv_encoder pointer check for NULL + +From: Natalia Petrova + +[ Upstream commit 55b94bb8c42464bad3d2217f6874aa1a85664eac ] + +Pointer nv_encoder could be dereferenced at nouveau_connector.c +in case it's equal to NULL by jumping to goto label. +This patch adds a NULL-check to avoid it. + +Found by Linux Verification Center (linuxtesting.org) with SVACE. + +Fixes: 3195c5f9784a ("drm/nouveau: set encoder for lvds") +Signed-off-by: Natalia Petrova +Reviewed-by: Lyude Paul +[Fixed patch title] +Signed-off-by: Lyude Paul +Link: https://patchwork.freedesktop.org/patch/msgid/20230512103320.82234-1-n.petrova@fintech.ru +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/nouveau/nouveau_connector.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index fd984733b8e6f..f40310559d13f 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -730,7 +730,8 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) + #endif + + nouveau_connector_set_edid(nv_connector, edid); +- nouveau_connector_set_encoder(connector, nv_encoder); ++ if (nv_encoder) ++ nouveau_connector_set_encoder(connector, nv_encoder); + return status; + } + +-- +2.39.2 + diff --git a/queue-6.1/drm-nouveau-don-t-detect-dsm-for-non-nvidia-device.patch b/queue-6.1/drm-nouveau-don-t-detect-dsm-for-non-nvidia-device.patch new file mode 100644 index 00000000000..a1b3f0f9ef3 --- /dev/null +++ b/queue-6.1/drm-nouveau-don-t-detect-dsm-for-non-nvidia-device.patch @@ -0,0 +1,62 @@ +From 0420c6c1681073ef59a748dcfcb834d7a0104a15 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 24 May 2023 04:11:56 +0700 +Subject: drm/nouveau: don't detect DSM for non-NVIDIA device + +From: Ratchanan Srirattanamet + +[ Upstream commit 11d24327c2d7ad7f24fcc44fb00e1fa91ebf6525 ] + +The call site of nouveau_dsm_pci_probe() uses single set of output +variables for all invocations. So, we must not write anything to them +unless it's an NVIDIA device. Otherwise, if we are called with another +device after the NVIDIA device, we'll clober the result of the NVIDIA +device. + +For example, if the other device doesn't have _PR3 resources, the +detection later would miss the presence of power resource support, and +the rest of the code will keep using Optimus DSM, breaking power +management for that machine. + +Also, because we're detecting NVIDIA's DSM, it doesn't make sense to run +this detection on a non-NVIDIA device anyway. Thus, check at the +beginning of the detection code if this is an NVIDIA card, and just +return if it isn't. + +This, together with commit d22915d22ded ("drm/nouveau/devinit/tu102-: +wait for GFW_BOOT_PROGRESS == COMPLETED") developed independently and +landed earlier, fixes runtime power management of the NVIDIA card in +Lenovo Legion 5-15ARH05. Without this patch, the GPU resumption code +will "timeout", sometimes hanging userspace. + +As a bonus, we'll also stop preventing _PR3 usage from the bridge for +unrelated devices, which is always nice, I guess. + +Fixes: ccfc2d5cdb02 ("drm/nouveau: Use generic helper to check _PR3 presence") +Signed-off-by: Ratchanan Srirattanamet +Closes: https://gitlab.freedesktop.org/drm/nouveau/-/issues/79 +Reviewed-by: Karol Herbst +Signed-off-by: Karol Herbst +Link: https://patchwork.freedesktop.org/patch/msgid/DM6PR19MB2780805D4BE1E3F9B3AC96D0BC409@DM6PR19MB2780.namprd19.prod.outlook.com +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/nouveau/nouveau_acpi.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c +index 8cf096f841a90..a2ae8c21e4dce 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c ++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c +@@ -220,6 +220,9 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out + int optimus_funcs; + struct pci_dev *parent_pdev; + ++ if (pdev->vendor != PCI_VENDOR_ID_NVIDIA) ++ return; ++ + *has_pr3 = false; + parent_pdev = pci_upstream_bridge(pdev); + if (parent_pdev) { +-- +2.39.2 + diff --git a/queue-6.1/drm-nouveau-dp-check-for-null-nv_connector-native_mo.patch b/queue-6.1/drm-nouveau-dp-check-for-null-nv_connector-native_mo.patch new file mode 100644 index 00000000000..c413bdcdf99 --- /dev/null +++ b/queue-6.1/drm-nouveau-dp-check-for-null-nv_connector-native_mo.patch @@ -0,0 +1,53 @@ +From 148752c3b298ebfa22a5b1372417b3fe2d2f326f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 12 May 2023 14:15:26 +0300 +Subject: drm/nouveau/dp: check for NULL nv_connector->native_mode + +From: Natalia Petrova + +[ Upstream commit 20a2ce87fbaf81e4c3dcb631d738e423959eb320 ] + +Add checking for NULL before calling nouveau_connector_detect_depth() in +nouveau_connector_get_modes() function because nv_connector->native_mode +could be dereferenced there since connector pointer passed to +nouveau_connector_detect_depth() and the same value of +nv_connector->native_mode is used there. + +Found by Linux Verification Center (linuxtesting.org) with SVACE. + +Fixes: d4c2c99bdc83 ("drm/nouveau/dp: remove broken display depth function, use the improved one") + +Signed-off-by: Natalia Petrova +Reviewed-by: Lyude Paul +Signed-off-by: Lyude Paul +Link: https://patchwork.freedesktop.org/patch/msgid/20230512111526.82408-1-n.petrova@fintech.ru +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/nouveau/nouveau_connector.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index 1991bbb1d05c3..fd984733b8e6f 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -966,7 +966,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) + /* Determine display colour depth for everything except LVDS now, + * DP requires this before mode_valid() is called. + */ +- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) ++ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) + nouveau_connector_detect_depth(connector); + + /* Find the native mode if this is a digital panel, if we didn't +@@ -987,7 +987,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) + * "native" mode as some VBIOS tables require us to use the + * pixel clock as part of the lookup... + */ +- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ++ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) + nouveau_connector_detect_depth(connector); + + if (nv_encoder->dcb->type == DCB_OUTPUT_TV) +-- +2.39.2 + diff --git a/queue-6.1/ext4-drop-the-call-to-ext4_error-from-ext4_get_group.patch b/queue-6.1/ext4-drop-the-call-to-ext4_error-from-ext4_get_group.patch new file mode 100644 index 00000000000..0950eb15b51 --- /dev/null +++ b/queue-6.1/ext4-drop-the-call-to-ext4_error-from-ext4_get_group.patch @@ -0,0 +1,64 @@ +From bf2f7cc67ce43b1e4c6685ee5da67139440c003f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 Jun 2023 12:02:55 +0200 +Subject: ext4: drop the call to ext4_error() from ext4_get_group_info() + +From: Fabio M. De Francesco + +[ Upstream commit f451fd97dd2b78f286379203a47d9d295c467255 ] + +A recent patch added a call to ext4_error() which is problematic since +some callers of the ext4_get_group_info() function may be holding a +spinlock, whereas ext4_error() must never be called in atomic context. + +This triggered a report from Syzbot: "BUG: sleeping function called from +invalid context in ext4_update_super" (see the link below). + +Therefore, drop the call to ext4_error() from ext4_get_group_info(). In +the meantime use eight characters tabs instead of nine characters ones. + +Reported-by: syzbot+4acc7d910e617b360859@syzkaller.appspotmail.com +Closes: https://lore.kernel.org/all/00000000000070575805fdc6cdb2@google.com/ +Fixes: 5354b2af3406 ("ext4: allow ext4_get_group_info() to fail") +Suggested-by: Theodore Ts'o +Signed-off-by: Fabio M. De Francesco +Link: https://lore.kernel.org/r/20230614100446.14337-1-fmdefrancesco@gmail.com +Signed-off-by: Sasha Levin +--- + fs/ext4/balloc.c | 20 +++++++++----------- + 1 file changed, 9 insertions(+), 11 deletions(-) + +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index a38aa33af08ef..8e83b51e3c68a 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -322,17 +322,15 @@ static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb, + struct ext4_group_info *ext4_get_group_info(struct super_block *sb, + ext4_group_t group) + { +- struct ext4_group_info **grp_info; +- long indexv, indexh; +- +- if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) { +- ext4_error(sb, "invalid group %u", group); +- return NULL; +- } +- indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); +- indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); +- grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); +- return grp_info[indexh]; ++ struct ext4_group_info **grp_info; ++ long indexv, indexh; ++ ++ if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) ++ return NULL; ++ indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); ++ indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); ++ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); ++ return grp_info[indexh]; + } + + /* +-- +2.39.2 + diff --git a/queue-6.1/iavf-remove-mask-from-iavf_irq_enable_queues.patch b/queue-6.1/iavf-remove-mask-from-iavf_irq_enable_queues.patch new file mode 100644 index 00000000000..3b40747e833 --- /dev/null +++ b/queue-6.1/iavf-remove-mask-from-iavf_irq_enable_queues.patch @@ -0,0 +1,103 @@ +From 17f433701c8fa571918f5c291913e70a4d4860a2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 13:02:26 -0700 +Subject: iavf: remove mask from iavf_irq_enable_queues() + +From: Ahmed Zaki + +[ Upstream commit c37cf54c12cfaa51e7aaf88708167b0d3259e64e ] + +Enable more than 32 IRQs by removing the u32 bit mask in +iavf_irq_enable_queues(). There is no need for the mask as there are no +callers that select individual IRQs through the bitmask. Also, if the PF +allocates more than 32 IRQs, this mask will prevent us from using all of +them. + +Modify the comment in iavf_register.h to show that the maximum number +allowed for the IRQ index is 63 as per the iAVF standard 1.0 [1]. + +link: [1] https://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/ethernet-adaptive-virtual-function-hardware-spec.pdf +Fixes: 5eae00c57f5e ("i40evf: main driver core") +Signed-off-by: Ahmed Zaki +Tested-by: Rafal Romanowski +Reviewed-by: Simon Horman +Reviewed-by: Maciej Fijalkowski +Signed-off-by: Tony Nguyen +Link: https://lore.kernel.org/r/20230608200226.451861-1-anthony.l.nguyen@intel.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/iavf/iavf.h | 2 +- + drivers/net/ethernet/intel/iavf/iavf_main.c | 15 ++++++--------- + drivers/net/ethernet/intel/iavf/iavf_register.h | 2 +- + 3 files changed, 8 insertions(+), 11 deletions(-) + +diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h +index 93a998f169de7..6625625f91e47 100644 +--- a/drivers/net/ethernet/intel/iavf/iavf.h ++++ b/drivers/net/ethernet/intel/iavf/iavf.h +@@ -525,7 +525,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev); + void iavf_update_stats(struct iavf_adapter *adapter); + void iavf_reset_interrupt_capability(struct iavf_adapter *adapter); + int iavf_init_interrupt_scheme(struct iavf_adapter *adapter); +-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask); ++void iavf_irq_enable_queues(struct iavf_adapter *adapter); + void iavf_free_all_tx_resources(struct iavf_adapter *adapter); + void iavf_free_all_rx_resources(struct iavf_adapter *adapter); + +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c +index 34711a88dbaa0..965d02d7ff80f 100644 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c +@@ -359,21 +359,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter) + } + + /** +- * iavf_irq_enable_queues - Enable interrupt for specified queues ++ * iavf_irq_enable_queues - Enable interrupt for all queues + * @adapter: board private structure +- * @mask: bitmap of queues to enable + **/ +-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) ++void iavf_irq_enable_queues(struct iavf_adapter *adapter) + { + struct iavf_hw *hw = &adapter->hw; + int i; + + for (i = 1; i < adapter->num_msix_vectors; i++) { +- if (mask & BIT(i - 1)) { +- wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), +- IAVF_VFINT_DYN_CTLN1_INTENA_MASK | +- IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); +- } ++ wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), ++ IAVF_VFINT_DYN_CTLN1_INTENA_MASK | ++ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); + } + } + +@@ -387,7 +384,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) + struct iavf_hw *hw = &adapter->hw; + + iavf_misc_irq_enable(adapter); +- iavf_irq_enable_queues(adapter, ~0); ++ iavf_irq_enable_queues(adapter); + + if (flush) + iavf_flush(hw); +diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h +index bf793332fc9d5..a19e88898a0bb 100644 +--- a/drivers/net/ethernet/intel/iavf/iavf_register.h ++++ b/drivers/net/ethernet/intel/iavf/iavf_register.h +@@ -40,7 +40,7 @@ + #define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT) + #define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 + #define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +-#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ ++#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */ + #define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0 + #define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT) + #define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +-- +2.39.2 + diff --git a/queue-6.1/ib-isert-fix-dead-lock-in-ib_isert.patch b/queue-6.1/ib-isert-fix-dead-lock-in-ib_isert.patch new file mode 100644 index 00000000000..8f965c9eabd --- /dev/null +++ b/queue-6.1/ib-isert-fix-dead-lock-in-ib_isert.patch @@ -0,0 +1,121 @@ +From eb58e9d5cf0ab04a64006dca7b43d9d910077b2c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 03:25:29 -0700 +Subject: IB/isert: Fix dead lock in ib_isert + +From: Saravanan Vajravel + +[ Upstream commit 691b0480933f0ce88a81ed1d1a0aff340ff6293a ] + +- When a iSER session is released, ib_isert module is taking a mutex + lock and releasing all pending connections. As part of this, ib_isert + is destroying rdma cm_id. To destroy cm_id, rdma_cm module is sending + CM events to CMA handler of ib_isert. This handler is taking same + mutex lock. Hence it leads to deadlock between ib_isert & rdma_cm + modules. + +- For fix, created local list of pending connections and release the + connection outside of mutex lock. + +Calltrace: +--------- +[ 1229.791410] INFO: task kworker/10:1:642 blocked for more than 120 seconds. +[ 1229.791416] Tainted: G OE --------- - - 4.18.0-372.9.1.el8.x86_64 #1 +[ 1229.791418] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. +[ 1229.791419] task:kworker/10:1 state:D stack: 0 pid: 642 ppid: 2 flags:0x80004000 +[ 1229.791424] Workqueue: ib_cm cm_work_handler [ib_cm] +[ 1229.791436] Call Trace: +[ 1229.791438] __schedule+0x2d1/0x830 +[ 1229.791445] ? select_idle_sibling+0x23/0x6f0 +[ 1229.791449] schedule+0x35/0xa0 +[ 1229.791451] schedule_preempt_disabled+0xa/0x10 +[ 1229.791453] __mutex_lock.isra.7+0x310/0x420 +[ 1229.791456] ? select_task_rq_fair+0x351/0x990 +[ 1229.791459] isert_cma_handler+0x224/0x330 [ib_isert] +[ 1229.791463] ? ttwu_queue_wakelist+0x159/0x170 +[ 1229.791466] cma_cm_event_handler+0x25/0xd0 [rdma_cm] +[ 1229.791474] cma_ib_handler+0xa7/0x2e0 [rdma_cm] +[ 1229.791478] cm_process_work+0x22/0xf0 [ib_cm] +[ 1229.791483] cm_work_handler+0xf4/0xf30 [ib_cm] +[ 1229.791487] ? move_linked_works+0x6e/0xa0 +[ 1229.791490] process_one_work+0x1a7/0x360 +[ 1229.791491] ? create_worker+0x1a0/0x1a0 +[ 1229.791493] worker_thread+0x30/0x390 +[ 1229.791494] ? create_worker+0x1a0/0x1a0 +[ 1229.791495] kthread+0x10a/0x120 +[ 1229.791497] ? set_kthread_struct+0x40/0x40 +[ 1229.791499] ret_from_fork+0x1f/0x40 + +[ 1229.791739] INFO: task targetcli:28666 blocked for more than 120 seconds. +[ 1229.791740] Tainted: G OE --------- - - 4.18.0-372.9.1.el8.x86_64 #1 +[ 1229.791741] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. +[ 1229.791742] task:targetcli state:D stack: 0 pid:28666 ppid: 5510 flags:0x00004080 +[ 1229.791743] Call Trace: +[ 1229.791744] __schedule+0x2d1/0x830 +[ 1229.791746] schedule+0x35/0xa0 +[ 1229.791748] schedule_preempt_disabled+0xa/0x10 +[ 1229.791749] __mutex_lock.isra.7+0x310/0x420 +[ 1229.791751] rdma_destroy_id+0x15/0x20 [rdma_cm] +[ 1229.791755] isert_connect_release+0x115/0x130 [ib_isert] +[ 1229.791757] isert_free_np+0x87/0x140 [ib_isert] +[ 1229.791761] iscsit_del_np+0x74/0x120 [iscsi_target_mod] +[ 1229.791776] lio_target_np_driver_store+0xe9/0x140 [iscsi_target_mod] +[ 1229.791784] configfs_write_file+0xb2/0x110 +[ 1229.791788] vfs_write+0xa5/0x1a0 +[ 1229.791792] ksys_write+0x4f/0xb0 +[ 1229.791794] do_syscall_64+0x5b/0x1a0 +[ 1229.791798] entry_SYSCALL_64_after_hwframe+0x65/0xca + +Fixes: bd3792205aae ("iser-target: Fix pending connections handling in target stack shutdown sequnce") +Reviewed-by: Sagi Grimberg +Signed-off-by: Selvin Xavier +Signed-off-by: Saravanan Vajravel +Link: https://lore.kernel.org/r/20230606102531.162967-2-saravanan.vajravel@broadcom.com +Signed-off-by: Leon Romanovsky +Signed-off-by: Sasha Levin +--- + drivers/infiniband/ulp/isert/ib_isert.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index 7cca171478a22..50d7373b425c0 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -2432,6 +2432,7 @@ isert_free_np(struct iscsi_np *np) + { + struct isert_np *isert_np = np->np_context; + struct isert_conn *isert_conn, *n; ++ LIST_HEAD(drop_conn_list); + + if (isert_np->cm_id) + rdma_destroy_id(isert_np->cm_id); +@@ -2451,7 +2452,7 @@ isert_free_np(struct iscsi_np *np) + node) { + isert_info("cleaning isert_conn %p state (%d)\n", + isert_conn, isert_conn->state); +- isert_connect_release(isert_conn); ++ list_move_tail(&isert_conn->node, &drop_conn_list); + } + } + +@@ -2462,11 +2463,16 @@ isert_free_np(struct iscsi_np *np) + node) { + isert_info("cleaning isert_conn %p state (%d)\n", + isert_conn, isert_conn->state); +- isert_connect_release(isert_conn); ++ list_move_tail(&isert_conn->node, &drop_conn_list); + } + } + mutex_unlock(&isert_np->mutex); + ++ list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) { ++ list_del_init(&isert_conn->node); ++ isert_connect_release(isert_conn); ++ } ++ + np->np_context = NULL; + kfree(isert_np); + } +-- +2.39.2 + diff --git a/queue-6.1/ib-isert-fix-incorrect-release-of-isert-connection.patch b/queue-6.1/ib-isert-fix-incorrect-release-of-isert-connection.patch new file mode 100644 index 00000000000..2307d822f20 --- /dev/null +++ b/queue-6.1/ib-isert-fix-incorrect-release-of-isert-connection.patch @@ -0,0 +1,45 @@ +From 3d6165ef1513ef16c567050173f74c6b3e83774d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 03:25:31 -0700 +Subject: IB/isert: Fix incorrect release of isert connection + +From: Saravanan Vajravel + +[ Upstream commit 699826f4e30ab76a62c238c86fbef7e826639c8d ] + +The ib_isert module is releasing the isert connection both in +isert_wait_conn() handler as well as isert_free_conn() handler. +In isert_wait_conn() handler, it is expected to wait for iSCSI +session logout operation to complete. It should free the isert +connection only in isert_free_conn() handler. + +When a bunch of iSER target is cleared, this issue can lead to +use-after-free memory issue as isert conn is twice released + +Fixes: b02efbfc9a05 ("iser-target: Fix implicit termination of connections") +Reviewed-by: Sagi Grimberg +Signed-off-by: Saravanan Vajravel +Signed-off-by: Selvin Xavier +Link: https://lore.kernel.org/r/20230606102531.162967-4-saravanan.vajravel@broadcom.com +Signed-off-by: Leon Romanovsky +Signed-off-by: Sasha Levin +--- + drivers/infiniband/ulp/isert/ib_isert.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index a02a3caeaa4e7..a7fef3ea77fe3 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -2571,8 +2571,6 @@ static void isert_wait_conn(struct iscsit_conn *conn) + isert_put_unsol_pending_cmds(conn); + isert_wait4cmds(conn); + isert_wait4logout(isert_conn); +- +- queue_work(isert_release_wq, &isert_conn->release_work); + } + + static void isert_free_conn(struct iscsit_conn *conn) +-- +2.39.2 + diff --git a/queue-6.1/ib-isert-fix-possible-list-corruption-in-cma-handler.patch b/queue-6.1/ib-isert-fix-possible-list-corruption-in-cma-handler.patch new file mode 100644 index 00000000000..ca5e172514f --- /dev/null +++ b/queue-6.1/ib-isert-fix-possible-list-corruption-in-cma-handler.patch @@ -0,0 +1,45 @@ +From f78c4e2792fcdc585fbc0501793641d12296c8b7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 03:25:30 -0700 +Subject: IB/isert: Fix possible list corruption in CMA handler + +From: Saravanan Vajravel + +[ Upstream commit 7651e2d6c5b359a28c2d4c904fec6608d1021ca8 ] + +When ib_isert module receives connection error event, it is +releasing the isert session and removes corresponding list +node but it doesn't take appropriate mutex lock to remove +the list node. This can lead to linked list corruption + +Fixes: bd3792205aae ("iser-target: Fix pending connections handling in target stack shutdown sequnce") +Signed-off-by: Selvin Xavier +Signed-off-by: Saravanan Vajravel +Link: https://lore.kernel.org/r/20230606102531.162967-3-saravanan.vajravel@broadcom.com +Signed-off-by: Leon Romanovsky +Signed-off-by: Sasha Levin +--- + drivers/infiniband/ulp/isert/ib_isert.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index 50d7373b425c0..a02a3caeaa4e7 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -657,9 +657,13 @@ static int + isert_connect_error(struct rdma_cm_id *cma_id) + { + struct isert_conn *isert_conn = cma_id->qp->qp_context; ++ struct isert_np *isert_np = cma_id->context; + + ib_drain_qp(isert_conn->qp); ++ ++ mutex_lock(&isert_np->mutex); + list_del_init(&isert_conn->node); ++ mutex_unlock(&isert_np->mutex); + isert_conn->cm_id = NULL; + isert_put_conn(isert_conn); + +-- +2.39.2 + diff --git a/queue-6.1/ib-uverbs-fix-to-consider-event-queue-closing-also-u.patch b/queue-6.1/ib-uverbs-fix-to-consider-event-queue-closing-also-u.patch new file mode 100644 index 00000000000..ebcae61e1bd --- /dev/null +++ b/queue-6.1/ib-uverbs-fix-to-consider-event-queue-closing-also-u.patch @@ -0,0 +1,69 @@ +From 6c4c91d1aa0959b2f52732e662ce6835e197d801 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Jun 2023 13:33:25 +0300 +Subject: IB/uverbs: Fix to consider event queue closing also upon non-blocking + mode + +From: Yishai Hadas + +[ Upstream commit 62fab312fa1683e812e605db20d4f22de3e3fb2f ] + +Fix ib_uverbs_event_read() to consider event queue closing also upon +non-blocking mode. + +Once the queue is closed (e.g. hot-plug flow) all the existing events +are cleaned-up as part of ib_uverbs_free_event_queue(). + +An application that uses the non-blocking FD mode should get -EIO in +that case to let it knows that the device was removed already. + +Otherwise, it can loose the indication that the device was removed and +won't recover. + +As part of that, refactor the code to have a single flow with regards to +'is_closed' for both blocking and non-blocking modes. + +Fixes: 14e23bd6d221 ("RDMA/core: Fix locking in ib_uverbs_event_read") +Reviewed-by: Maor Gottlieb +Signed-off-by: Yishai Hadas +Link: https://lore.kernel.org/r/97b00116a1e1e13f8dc4ec38a5ea81cf8c030210.1685960567.git.leon@kernel.org +Signed-off-by: Leon Romanovsky +Signed-off-by: Sasha Levin +--- + drivers/infiniband/core/uverbs_main.c | 12 +++++------- + 1 file changed, 5 insertions(+), 7 deletions(-) + +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c +index d544340887277..fa937cd268219 100644 +--- a/drivers/infiniband/core/uverbs_main.c ++++ b/drivers/infiniband/core/uverbs_main.c +@@ -222,8 +222,12 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, + spin_lock_irq(&ev_queue->lock); + + while (list_empty(&ev_queue->event_list)) { +- spin_unlock_irq(&ev_queue->lock); ++ if (ev_queue->is_closed) { ++ spin_unlock_irq(&ev_queue->lock); ++ return -EIO; ++ } + ++ spin_unlock_irq(&ev_queue->lock); + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + +@@ -233,12 +237,6 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, + return -ERESTARTSYS; + + spin_lock_irq(&ev_queue->lock); +- +- /* If device was disassociated and no event exists set an error */ +- if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) { +- spin_unlock_irq(&ev_queue->lock); +- return -EIO; +- } + } + + event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); +-- +2.39.2 + diff --git a/queue-6.1/ice-fix-xdp-memory-leak-when-nic-is-brought-up-and-d.patch b/queue-6.1/ice-fix-xdp-memory-leak-when-nic-is-brought-up-and-d.patch new file mode 100644 index 00000000000..e4c9fd957be --- /dev/null +++ b/queue-6.1/ice-fix-xdp-memory-leak-when-nic-is-brought-up-and-d.patch @@ -0,0 +1,41 @@ +From 7f5833cc1edddf34caad60f88d6c55f6c9f082b4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 12:33:58 +0200 +Subject: ice: Fix XDP memory leak when NIC is brought up and down + +From: Kamil Maziarz + +[ Upstream commit 78c50d6961fc05491ebbc71c35d87324b1a4f49a ] + +Fix the buffer leak that occurs while switching +the port up and down with traffic and XDP by +checking for an active XDP program and freeing all empty TX buffers. + +Fixes: efc2214b6047 ("ice: Add support for XDP") +Signed-off-by: Kamil Maziarz +Tested-by: Chandan Kumar Rout (A Contingent Worker at Intel) +Acked-by: Maciej Fijalkowski +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/ice/ice_main.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index 6a50f8ba3940c..4095fe40dfc9b 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -6784,6 +6784,10 @@ int ice_down(struct ice_vsi *vsi) + ice_for_each_txq(vsi, i) + ice_clean_tx_ring(vsi->tx_rings[i]); + ++ if (ice_is_xdp_ena_vsi(vsi)) ++ ice_for_each_xdp_txq(vsi, i) ++ ice_clean_tx_ring(vsi->xdp_rings[i]); ++ + ice_for_each_rxq(vsi, i) + ice_clean_rx_ring(vsi->rx_rings[i]); + +-- +2.39.2 + diff --git a/queue-6.1/igb-fix-extts-capture-value-format-for-82580-i354-i3.patch b/queue-6.1/igb-fix-extts-capture-value-format-for-82580-i354-i3.patch new file mode 100644 index 00000000000..0c9fb68cbe9 --- /dev/null +++ b/queue-6.1/igb-fix-extts-capture-value-format-for-82580-i354-i3.patch @@ -0,0 +1,71 @@ +From 5b2bf77bae84a0da87347e5999211e0f89fac760 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 09:41:16 -0700 +Subject: igb: Fix extts capture value format for 82580/i354/i350 + +From: Yuezhen Luan + +[ Upstream commit 6292d7436cf2f0a2ea8800a1d2cbb155d237818a ] + +82580/i354/i350 features circle-counter-like timestamp registers +that are different with newer i210. The EXTTS capture value in +AUXTSMPx should be converted from raw circle counter value to +timestamp value in resolution of 1 nanosec by the driver. + +This issue can be reproduced on i350 nics, connecting an 1PPS +signal to a SDP pin, and run 'ts2phc' command to read external +1PPS timestamp value. On i210 this works fine, but on i350 the +extts is not correctly converted. + +The i350/i354/82580's SYSTIM and other timestamp registers are +40bit counters, presenting time range of 2^40 ns, that means these +registers overflows every about 1099s. This causes all these regs +can't be used directly in contrast to the newer i210/i211s. + +The igb driver needs to convert these raw register values to +valid time stamp format by using kernel timecounter apis for i350s +families. Here the igb_extts() just forgot to do the convert. + +Fixes: 38970eac41db ("igb: support EXTTS on 82580/i354/i350") +Signed-off-by: Yuezhen Luan +Reviewed-by: Jacob Keller +Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) +Signed-off-by: Tony Nguyen +Reviewed-by: Simon Horman +Link: https://lore.kernel.org/r/20230607164116.3768175-1-anthony.l.nguyen@intel.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/igb/igb_main.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index b3aed4e2ca91c..18ffbc892f86c 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -6893,6 +6893,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) + struct e1000_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; ++ unsigned long flags; + + if (pin < 0 || pin >= IGB_N_SDP) + return; +@@ -6900,9 +6901,12 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) + if (hw->mac.type == e1000_82580 || + hw->mac.type == e1000_i354 || + hw->mac.type == e1000_i350) { +- s64 ns = rd32(auxstmpl); ++ u64 ns = rd32(auxstmpl); + +- ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32; ++ ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32; ++ spin_lock_irqsave(&adapter->tmreg_lock, flags); ++ ns = timecounter_cyc2time(&adapter->tc, ns); ++ spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + ts = ns_to_timespec64(ns); + } else { + ts.tv_nsec = rd32(auxstmpl); +-- +2.39.2 + diff --git a/queue-6.1/igb-fix-nvm.ops.read-error-handling.patch b/queue-6.1/igb-fix-nvm.ops.read-error-handling.patch new file mode 100644 index 00000000000..9eb360a1f69 --- /dev/null +++ b/queue-6.1/igb-fix-nvm.ops.read-error-handling.patch @@ -0,0 +1,44 @@ +From 000910f26bd2f02f84eac31d932f9d8cec24074b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Apr 2023 17:44:14 +0200 +Subject: igb: fix nvm.ops.read() error handling + +From: Aleksandr Loktionov + +[ Upstream commit 48a821fd58837800750ec1b3962f0f799630a844 ] + +Add error handling into igb_set_eeprom() function, in case +nvm.ops.read() fails just quit with error code asap. + +Fixes: 9d5c824399de ("igb: PCI-Express 82575 Gigabit Ethernet driver") +Signed-off-by: Aleksandr Loktionov +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/igb/igb_ethtool.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c +index ff911af16a4b5..96fa1c420f910 100644 +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c +@@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev, + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); ++ if (ret_val) ++ goto out; + } + + /* Device's eeprom is always little-endian, word addressable */ +@@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev, + hw->nvm.ops.update(hw); + + igb_set_fw_version(adapter); ++out: + kfree(eeprom_buff); + return ret_val; + } +-- +2.39.2 + diff --git a/queue-6.1/igc-clean-the-tx-buffer-and-tx-descriptor-ring.patch b/queue-6.1/igc-clean-the-tx-buffer-and-tx-descriptor-ring.patch new file mode 100644 index 00000000000..82e5403097f --- /dev/null +++ b/queue-6.1/igc-clean-the-tx-buffer-and-tx-descriptor-ring.patch @@ -0,0 +1,160 @@ +From 50bbef025c2a933097f8528d24371949ebdf3d0b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 15 May 2023 23:49:36 +0800 +Subject: igc: Clean the TX buffer and TX descriptor ring + +From: Muhammad Husaini Zulkifli + +[ Upstream commit e43516f5978d11d36511ce63d31d1da4db916510 ] + +There could be a race condition during link down where interrupt +being generated and igc_clean_tx_irq() been called to perform the +TX completion. Properly clear the TX buffer/descriptor ring and +disable the TX Queue ring in igc_free_tx_resources() to avoid that. + +Kernel trace: +[ 108.237177] Hardware name: Intel Corporation Tiger Lake Client Platform/TigerLake U DDR4 SODIMM RVP, BIOS TGLIFUI1.R00.4204.A00.2105270302 05/27/2021 +[ 108.237178] RIP: 0010:refcount_warn_saturate+0x55/0x110 +[ 108.242143] RSP: 0018:ffff9e7980003db0 EFLAGS: 00010286 +[ 108.245555] Code: 84 bc 00 00 00 c3 cc cc cc cc 85 f6 74 46 80 3d 20 8c 4d 01 00 75 ee 48 c7 c7 88 f4 03 ab c6 05 10 8c 4d 01 01 e8 0b 10 96 ff <0f> 0b c3 cc cc cc cc 80 3d fc 8b 4d 01 00 75 cb 48 c7 c7 b0 f4 03 +[ 108.250434] +[ 108.250434] RSP: 0018:ffff9e798125f910 EFLAGS: 00010286 +[ 108.254358] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 +[ 108.259325] +[ 108.259325] RAX: 0000000000000000 RBX: ffff8ddb935b8000 RCX: 0000000000000027 +[ 108.261868] RDX: ffff8de250a28800 RSI: ffff8de250a1c580 RDI: ffff8de250a1c580 +[ 108.265538] RDX: 0000000000000027 RSI: 0000000000000002 RDI: ffff8de250a9c588 +[ 108.265539] RBP: ffff8ddb935b8000 R08: ffffffffab2655a0 R09: ffff9e798125f898 +[ 108.267914] RBP: ffff8ddb8a5b8d80 R08: 0000005648eba354 R09: 0000000000000000 +[ 108.270196] R10: 0000000000000001 R11: 000000002d2d2d2d R12: ffff9e798125f948 +[ 108.270197] R13: ffff9e798125fa1c R14: ffff8ddb8a5b8d80 R15: 7fffffffffffffff +[ 108.273001] R10: 000000002d2d2d2d R11: 000000002d2d2d2d R12: ffff8ddb8a5b8ed4 +[ 108.276410] FS: 00007f605851b740(0000) GS:ffff8de250a80000(0000) knlGS:0000000000000000 +[ 108.280597] R13: 00000000000002ac R14: 00000000ffffff99 R15: ffff8ddb92561b80 +[ 108.282966] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 108.282967] CR2: 00007f053c039248 CR3: 0000000185850003 CR4: 0000000000f70ee0 +[ 108.286206] FS: 0000000000000000(0000) GS:ffff8de250a00000(0000) knlGS:0000000000000000 +[ 108.289701] PKRU: 55555554 +[ 108.289702] Call Trace: +[ 108.289704] +[ 108.293977] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 108.297562] sock_alloc_send_pskb+0x20c/0x240 +[ 108.301494] CR2: 00007f053c03a168 CR3: 0000000184394002 CR4: 0000000000f70ef0 +[ 108.301495] PKRU: 55555554 +[ 108.306464] __ip_append_data.isra.0+0x96f/0x1040 +[ 108.309441] Call Trace: +[ 108.309443] ? __pfx_ip_generic_getfrag+0x10/0x10 +[ 108.314927] +[ 108.314928] sock_wfree+0x1c7/0x1d0 +[ 108.318078] ? __pfx_ip_generic_getfrag+0x10/0x10 +[ 108.320276] skb_release_head_state+0x32/0x90 +[ 108.324812] ip_make_skb+0xf6/0x130 +[ 108.327188] skb_release_all+0x16/0x40 +[ 108.330775] ? udp_sendmsg+0x9f3/0xcb0 +[ 108.332626] napi_consume_skb+0x48/0xf0 +[ 108.334134] ? xfrm_lookup_route+0x23/0xb0 +[ 108.344285] igc_poll+0x787/0x1620 [igc] +[ 108.346659] udp_sendmsg+0x9f3/0xcb0 +[ 108.360010] ? ttwu_do_activate+0x40/0x220 +[ 108.365237] ? __pfx_ip_generic_getfrag+0x10/0x10 +[ 108.366744] ? try_to_wake_up+0x289/0x5e0 +[ 108.376987] ? sock_sendmsg+0x81/0x90 +[ 108.395698] ? __pfx_process_timeout+0x10/0x10 +[ 108.395701] sock_sendmsg+0x81/0x90 +[ 108.409052] __napi_poll+0x29/0x1c0 +[ 108.414279] ____sys_sendmsg+0x284/0x310 +[ 108.419507] net_rx_action+0x257/0x2d0 +[ 108.438216] ___sys_sendmsg+0x7c/0xc0 +[ 108.439723] __do_softirq+0xc1/0x2a8 +[ 108.444950] ? finish_task_switch+0xb4/0x2f0 +[ 108.452077] irq_exit_rcu+0xa9/0xd0 +[ 108.453584] ? __schedule+0x372/0xd00 +[ 108.460713] common_interrupt+0x84/0xa0 +[ 108.467840] ? clockevents_program_event+0x95/0x100 +[ 108.474968] +[ 108.482096] ? do_nanosleep+0x88/0x130 +[ 108.489224] +[ 108.489225] asm_common_interrupt+0x26/0x40 +[ 108.496353] ? __rseq_handle_notify_resume+0xa9/0x4f0 +[ 108.503478] RIP: 0010:cpu_idle_poll+0x2c/0x100 +[ 108.510607] __sys_sendmsg+0x5d/0xb0 +[ 108.518687] Code: 05 e1 d9 c8 00 65 8b 15 de 64 85 55 85 c0 7f 57 e8 b9 ef ff ff fb 65 48 8b 1c 25 00 cc 02 00 48 8b 03 a8 08 74 0b eb 1c f3 90 <48> 8b 03 a8 08 75 13 8b 05 77 63 cd 00 85 c0 75 ed e8 ce ec ff ff +[ 108.525817] do_syscall_64+0x44/0xa0 +[ 108.531563] RSP: 0018:ffffffffab203e70 EFLAGS: 00000202 +[ 108.538693] entry_SYSCALL_64_after_hwframe+0x72/0xdc +[ 108.546775] +[ 108.546777] RIP: 0033:0x7f605862b7f7 +[ 108.549495] RAX: 0000000000000001 RBX: ffffffffab20c940 RCX: 000000000000003b +[ 108.551955] Code: 0e 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b9 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10 +[ 108.554068] RDX: 4000000000000000 RSI: 000000002da97f6a RDI: 00000000002b8ff4 +[ 108.559816] RSP: 002b:00007ffc99264058 EFLAGS: 00000246 +[ 108.564178] RBP: 0000000000000000 R08: 00000000002b8ff4 R09: ffff8ddb01554c80 +[ 108.571302] ORIG_RAX: 000000000000002e +[ 108.571303] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f605862b7f7 +[ 108.574023] R10: 000000000000015b R11: 000000000000000f R12: ffffffffab20c940 +[ 108.574024] R13: 0000000000000000 R14: ffff8de26fbeef40 R15: ffffffffab20c940 +[ 108.578727] RDX: 0000000000000000 RSI: 00007ffc992640a0 RDI: 0000000000000003 +[ 108.578728] RBP: 00007ffc99264110 R08: 0000000000000000 R09: 175f48ad1c3a9c00 +[ 108.581187] do_idle+0x62/0x230 +[ 108.585890] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc992642d8 +[ 108.585891] R13: 00005577814ab2ba R14: 00005577814addf0 R15: 00007f605876d000 +[ 108.587920] cpu_startup_entry+0x1d/0x20 +[ 108.591422] +[ 108.596127] rest_init+0xc5/0xd0 +[ 108.600490] ---[ end trace 0000000000000000 ]--- + +Test Setup: + +DUT: +- Change mac address on DUT Side. Ensure NIC not having same MAC Address +- Running udp_tai on DUT side. Let udp_tai running throughout the test + +Example: +./udp_tai -i enp170s0 -P 100000 -p 90 -c 1 -t 0 -u 30004 + +Host: +- Perform link up/down every 5 second. + +Result: +Kernel panic will happen on DUT Side. + +Fixes: 13b5b7fd6a4a ("igc: Add support for Tx/Rx rings") +Signed-off-by: Muhammad Husaini Zulkifli +Tested-by: Naama Meir +Reviewed-by: Maciej Fijalkowski +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/igc/igc_main.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c +index 1d9b70e0ff67f..e5cb76cf2c3f1 100644 +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -255,6 +255,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + ++ /* Zero out the buffer ring */ ++ memset(tx_ring->tx_buffer_info, 0, ++ sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); ++ ++ /* Zero out the descriptor ring */ ++ memset(tx_ring->desc, 0, tx_ring->size); ++ + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +@@ -268,7 +275,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) + */ + void igc_free_tx_resources(struct igc_ring *tx_ring) + { +- igc_clean_tx_ring(tx_ring); ++ igc_disable_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; +-- +2.39.2 + diff --git a/queue-6.1/igc-fix-possible-system-crash-when-loading-module.patch b/queue-6.1/igc-fix-possible-system-crash-when-loading-module.patch new file mode 100644 index 00000000000..383c98d9111 --- /dev/null +++ b/queue-6.1/igc-fix-possible-system-crash-when-loading-module.patch @@ -0,0 +1,48 @@ +From aaa7303a5a6199cbcb8c436173495ec7817e724f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 17 Apr 2023 15:18:39 -0700 +Subject: igc: Fix possible system crash when loading module + +From: Vinicius Costa Gomes + +[ Upstream commit c080fe262f9e73a00934b70c16b1479cf40cd2bd ] + +Guarantee that when probe() is run again, PTM and PCI busmaster will be +in the same state as it was if the driver was never loaded. + +Avoid an i225/i226 hardware issue that PTM requests can be made even +though PCI bus mastering is not enabled. These unexpected PTM requests +can crash some systems. + +So, "force" disable PTM and busmastering before removing the driver, +so they can be re-enabled in the right order during probe(). This is +more like a workaround and should be applicable for i225 and i226, in +any platform. + +Fixes: 1b5d73fb8624 ("igc: Enable PCIe PTM") +Signed-off-by: Vinicius Costa Gomes +Reviewed-by: Muhammad Husaini Zulkifli +Tested-by: Naama Meir +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/igc/igc_main.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c +index e5cb76cf2c3f1..3509974c1f8e4 100644 +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -6685,6 +6685,9 @@ static void igc_remove(struct pci_dev *pdev) + + igc_ptp_stop(adapter); + ++ pci_disable_ptm(pdev); ++ pci_clear_master(pdev); ++ + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); +-- +2.39.2 + diff --git a/queue-6.1/ipvlan-fix-bound-dev-checking-for-ipv6-l3s-mode.patch b/queue-6.1/ipvlan-fix-bound-dev-checking-for-ipv6-l3s-mode.patch new file mode 100644 index 00000000000..f0c62d87e8a --- /dev/null +++ b/queue-6.1/ipvlan-fix-bound-dev-checking-for-ipv6-l3s-mode.patch @@ -0,0 +1,50 @@ +From ab18fc9da9b9357c62b1c7e7d8b20470e56e9cd0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 9 Jun 2023 17:15:02 +0800 +Subject: ipvlan: fix bound dev checking for IPv6 l3s mode + +From: Hangbin Liu + +[ Upstream commit ce57adc222aba32431c42632b396e9213d0eb0b8 ] + +The commit 59a0b022aa24 ("ipvlan: Make skb->skb_iif track skb->dev for l3s +mode") fixed ipvlan bonded dev checking by updating skb skb_iif. This fix +works for IPv4, as in raw_v4_input() the dif is from inet_iif(skb), which +is skb->skb_iif when there is no route. + +But for IPv6, the fix is not enough, because in ipv6_raw_deliver() -> +raw_v6_match(), the dif is inet6_iif(skb), which is returns IP6CB(skb)->iif +instead of skb->skb_iif if it's not a l3_slave. To fix the IPv6 part +issue. Let's set IP6CB(skb)->iif to correct ifindex. + +BTW, ipvlan handles NS/NA specifically. Since it works fine, I will not +reset IP6CB(skb)->iif when addr->atype is IPVL_ICMPV6. + +Fixes: c675e06a98a4 ("ipvlan: decouple l3s mode dependencies from other modes") +Link: https://bugzilla.redhat.com/show_bug.cgi?id=2196710 +Signed-off-by: Hangbin Liu +Reviewed-by: Larysa Zaremba +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ipvlan/ipvlan_l3s.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c +index 71712ea25403d..d5b05e8032199 100644 +--- a/drivers/net/ipvlan/ipvlan_l3s.c ++++ b/drivers/net/ipvlan/ipvlan_l3s.c +@@ -102,6 +102,10 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, + + skb->dev = addr->master->dev; + skb->skb_iif = skb->dev->ifindex; ++#if IS_ENABLED(CONFIG_IPV6) ++ if (addr->atype == IPVL_IPV6) ++ IP6CB(skb)->iif = skb->dev->ifindex; ++#endif + len = skb->len + ETH_HLEN; + ipvlan_count_rx(addr->master, len, true, false); + out: +-- +2.39.2 + diff --git a/queue-6.1/net-dsa-felix-fix-taprio-guard-band-overflow-at-10mb.patch b/queue-6.1/net-dsa-felix-fix-taprio-guard-band-overflow-at-10mb.patch new file mode 100644 index 00000000000..fa4eba665f0 --- /dev/null +++ b/queue-6.1/net-dsa-felix-fix-taprio-guard-band-overflow-at-10mb.patch @@ -0,0 +1,52 @@ +From 0fce8160449ab0c45313c7a88fec421d6fafa157 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 13 Jun 2023 20:09:07 +0300 +Subject: net: dsa: felix: fix taprio guard band overflow at 10Mbps with jumbo + frames + +From: Vladimir Oltean + +[ Upstream commit 6ac7a27a8b07588497ed53dfd885df9c72bc67e0 ] + +The DEV_MAC_MAXLEN_CFG register contains a 16-bit value - up to 65535. +Plus 2 * VLAN_HLEN (4), that is up to 65543. + +The picos_per_byte variable is the largest when "speed" is lowest - +SPEED_10 = 10. In that case it is (1000000L * 8) / 10 = 800000. + +Their product - 52434400000 - exceeds 32 bits, which is a problem, +because apparently, a multiplication between two 32-bit factors is +evaluated as 32-bit before being assigned to a 64-bit variable. +In fact it's a problem for any MTU value larger than 5368. + +Cast one of the factors of the multiplication to u64 to force the +multiplication to take place on 64 bits. + +Issue found by Coverity. + +Fixes: 55a515b1f5a9 ("net: dsa: felix: drop oversized frames with tc-taprio instead of hanging the port") +Signed-off-by: Vladimir Oltean +Reviewed-by: Simon Horman +Link: https://lore.kernel.org/r/20230613170907.2413559-1-vladimir.oltean@nxp.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/dsa/ocelot/felix_vsc9959.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c +index cc89cff029e1f..5f6af0870dfd6 100644 +--- a/drivers/net/dsa/ocelot/felix_vsc9959.c ++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c +@@ -1253,7 +1253,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) + /* Consider the standard Ethernet overhead of 8 octets preamble+SFD, + * 4 octets FCS, 12 octets IFG. + */ +- needed_bit_time_ps = (maxlen + 24) * picos_per_byte; ++ needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte; + + dev_dbg(ocelot->dev, + "port %d: max frame size %d needs %llu ps at speed %d\n", +-- +2.39.2 + diff --git a/queue-6.1/net-enetc-correct-the-indexes-of-highest-and-2nd-hig.patch b/queue-6.1/net-enetc-correct-the-indexes-of-highest-and-2nd-hig.patch new file mode 100644 index 00000000000..bdf29e36a5d --- /dev/null +++ b/queue-6.1/net-enetc-correct-the-indexes-of-highest-and-2nd-hig.patch @@ -0,0 +1,62 @@ +From 8710aff3c1e8e2178b728c087294a07baca0ce8f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 17:10:48 +0800 +Subject: net: enetc: correct the indexes of highest and 2nd highest TCs + +From: Wei Fang + +[ Upstream commit 21225873be1472b7c59ed3650396af0e40578112 ] + +For ENETC hardware, the TCs are numbered from 0 to N-1, where N +is the number of TCs. Numerically higher TC has higher priority. +It's obvious that the highest priority TC index should be N-1 and +the 2nd highest priority TC index should be N-2. + +However, the previous logic uses netdev_get_prio_tc_map() to get +the indexes of highest priority and 2nd highest priority TCs, it +does not make sense and is incorrect to give a "tc" argument to +netdev_get_prio_tc_map(). So the driver may get the wrong indexes +of the two highest priotiry TCs which would lead to failed to set +the CBS for the two highest priotiry TCs. + +e.g. +$ tc qdisc add dev eno0 parent root handle 100: mqprio num_tc 6 \ + map 0 0 1 1 2 3 4 5 queues 1@0 1@1 1@2 1@3 2@4 2@6 hw 1 +$ tc qdisc replace dev eno0 parent 100:6 cbs idleslope 100000 \ + sendslope -900000 hicredit 12 locredit -113 offload 1 +$ Error: Specified device failed to setup cbs hardware offload. + ^^^^^ + +In this example, the previous logic deems the indexes of the two +highest priotiry TCs should be 3 and 2. Actually, the indexes are +5 and 4, because the number of TCs is 6. So it would be failed to +configure the CBS for the two highest priority TCs. + +Fixes: c431047c4efe ("enetc: add support Credit Based Shaper(CBS) for hardware offload") +Signed-off-by: Wei Fang +Reviewed-by: Vladimir Oltean +Reviewed-by: Maciej Fijalkowski +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/freescale/enetc/enetc_qos.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c +index a8539a8554a13..762849959cc1b 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c +@@ -189,8 +189,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) + int bw_sum = 0; + u8 bw; + +- prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); +- prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); ++ prio_top = tc_nums - 1; ++ prio_next = tc_nums - 2; + + /* Support highest prio and second prio tc in cbs mode */ + if (tc != prio_top && tc != prio_next) +-- +2.39.2 + diff --git a/queue-6.1/net-ethtool-correct-max-attribute-value-for-stats.patch b/queue-6.1/net-ethtool-correct-max-attribute-value-for-stats.patch new file mode 100644 index 00000000000..24237529633 --- /dev/null +++ b/queue-6.1/net-ethtool-correct-max-attribute-value-for-stats.patch @@ -0,0 +1,41 @@ +From 06f397efe5f7af3fae011d312e3fd883a37b6352 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 09:23:44 -0700 +Subject: net: ethtool: correct MAX attribute value for stats + +From: Jakub Kicinski + +[ Upstream commit 52f79609c0c5b25fddb88e85f25ce08aa7e3fb42 ] + +When compiling YNL generated code compiler complains about +array-initializer-out-of-bounds. Turns out the MAX value +for STATS_GRP uses the value for STATS. + +This may lead to random corruptions in user space (kernel +itself doesn't use this value as it never parses stats). + +Fixes: f09ea6fb1272 ("ethtool: add a new command for reading standard stats") +Signed-off-by: Jakub Kicinski +Reviewed-by: David Ahern +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/uapi/linux/ethtool_netlink.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h +index bb57084ac524a..69f5bec347c20 100644 +--- a/include/uapi/linux/ethtool_netlink.h ++++ b/include/uapi/linux/ethtool_netlink.h +@@ -761,7 +761,7 @@ enum { + + /* add new constants above here */ + __ETHTOOL_A_STATS_GRP_CNT, +- ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_CNT - 1) ++ ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_GRP_CNT - 1) + }; + + enum { +-- +2.39.2 + diff --git a/queue-6.1/net-lapbether-only-support-ethernet-devices.patch b/queue-6.1/net-lapbether-only-support-ethernet-devices.patch new file mode 100644 index 00000000000..ce87c4e8f0d --- /dev/null +++ b/queue-6.1/net-lapbether-only-support-ethernet-devices.patch @@ -0,0 +1,96 @@ +From 0b54b16d844a79d3d5aeb85415732a457316bf65 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 Jun 2023 16:18:02 +0000 +Subject: net: lapbether: only support ethernet devices + +From: Eric Dumazet + +[ Upstream commit 9eed321cde22fc1afd76eac563ce19d899e0d6b2 ] + +It probbaly makes no sense to support arbitrary network devices +for lapbether. + +syzbot reported: + +skbuff: skb_under_panic: text:ffff80008934c100 len:44 put:40 head:ffff0000d18dd200 data:ffff0000d18dd1ea tail:0x16 end:0x140 dev:bond1 +kernel BUG at net/core/skbuff.c:200 ! +Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP +Modules linked in: +CPU: 0 PID: 5643 Comm: dhcpcd Not tainted 6.4.0-rc5-syzkaller-g4641cff8e810 #0 +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/25/2023 +pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +pc : skb_panic net/core/skbuff.c:196 [inline] +pc : skb_under_panic+0x13c/0x140 net/core/skbuff.c:210 +lr : skb_panic net/core/skbuff.c:196 [inline] +lr : skb_under_panic+0x13c/0x140 net/core/skbuff.c:210 +sp : ffff8000973b7260 +x29: ffff8000973b7270 x28: ffff8000973b7360 x27: dfff800000000000 +x26: ffff0000d85d8150 x25: 0000000000000016 x24: ffff0000d18dd1ea +x23: ffff0000d18dd200 x22: 000000000000002c x21: 0000000000000140 +x20: 0000000000000028 x19: ffff80008934c100 x18: ffff8000973b68a0 +x17: 0000000000000000 x16: ffff80008a43bfbc x15: 0000000000000202 +x14: 0000000000000000 x13: 0000000000000001 x12: 0000000000000001 +x11: 0000000000000201 x10: 0000000000000000 x9 : f22f7eb937cced00 +x8 : f22f7eb937cced00 x7 : 0000000000000001 x6 : 0000000000000001 +x5 : ffff8000973b6b78 x4 : ffff80008df9ee80 x3 : ffff8000805974f4 +x2 : 0000000000000001 x1 : 0000000100000201 x0 : 0000000000000086 +Call trace: +skb_panic net/core/skbuff.c:196 [inline] +skb_under_panic+0x13c/0x140 net/core/skbuff.c:210 +skb_push+0xf0/0x108 net/core/skbuff.c:2409 +ip6gre_header+0xbc/0x738 net/ipv6/ip6_gre.c:1383 +dev_hard_header include/linux/netdevice.h:3137 [inline] +lapbeth_data_transmit+0x1c4/0x298 drivers/net/wan/lapbether.c:257 +lapb_data_transmit+0x8c/0xb0 net/lapb/lapb_iface.c:447 +lapb_transmit_buffer+0x178/0x204 net/lapb/lapb_out.c:149 +lapb_send_control+0x220/0x320 net/lapb/lapb_subr.c:251 +lapb_establish_data_link+0x94/0xec +lapb_device_event+0x348/0x4e0 +notifier_call_chain+0x1a4/0x510 kernel/notifier.c:93 +raw_notifier_call_chain+0x3c/0x50 kernel/notifier.c:461 +__dev_notify_flags+0x2bc/0x544 +dev_change_flags+0xd0/0x15c net/core/dev.c:8643 +devinet_ioctl+0x858/0x17e4 net/ipv4/devinet.c:1150 +inet_ioctl+0x2ac/0x4d8 net/ipv4/af_inet.c:979 +sock_do_ioctl+0x134/0x2dc net/socket.c:1201 +sock_ioctl+0x4ec/0x858 net/socket.c:1318 +vfs_ioctl fs/ioctl.c:51 [inline] +__do_sys_ioctl fs/ioctl.c:870 [inline] +__se_sys_ioctl fs/ioctl.c:856 [inline] +__arm64_sys_ioctl+0x14c/0x1c8 fs/ioctl.c:856 +__invoke_syscall arch/arm64/kernel/syscall.c:38 [inline] +invoke_syscall+0x98/0x2c0 arch/arm64/kernel/syscall.c:52 +el0_svc_common+0x138/0x244 arch/arm64/kernel/syscall.c:142 +do_el0_svc+0x64/0x198 arch/arm64/kernel/syscall.c:191 +el0_svc+0x4c/0x160 arch/arm64/kernel/entry-common.c:647 +el0t_64_sync_handler+0x84/0xfc arch/arm64/kernel/entry-common.c:665 +el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:591 +Code: aa1803e6 aa1903e7 a90023f5 947730f5 (d4210000) + +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Reported-by: syzbot +Signed-off-by: Eric Dumazet +Cc: Martin Schiller +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/wan/lapbether.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c +index d62a904d2e422..56326f38fe8a3 100644 +--- a/drivers/net/wan/lapbether.c ++++ b/drivers/net/wan/lapbether.c +@@ -384,6 +384,9 @@ static int lapbeth_new_device(struct net_device *dev) + + ASSERT_RTNL(); + ++ if (dev->type != ARPHRD_ETHER) ++ return -EINVAL; ++ + ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN, + lapbeth_setup); + if (!ndev) +-- +2.39.2 + diff --git a/queue-6.1/net-macsec-fix-double-free-of-percpu-stats.patch b/queue-6.1/net-macsec-fix-double-free-of-percpu-stats.patch new file mode 100644 index 00000000000..56c87492a1c --- /dev/null +++ b/queue-6.1/net-macsec-fix-double-free-of-percpu-stats.patch @@ -0,0 +1,63 @@ +From 1d4b5bd687915b7112dc6deaec2609d651efdd76 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 13 Jun 2023 22:22:20 +0300 +Subject: net: macsec: fix double free of percpu stats + +From: Fedor Pchelkin + +[ Upstream commit 0c0cf3db83f8c7c9bb141c2771a34043bcf952ef ] + +Inside macsec_add_dev() we free percpu macsec->secy.tx_sc.stats and +macsec->stats on some of the memory allocation failure paths. However, the +net_device is already registered to that moment: in macsec_newlink(), just +before calling macsec_add_dev(). This means that during unregister process +its priv_destructor - macsec_free_netdev() - will be called and will free +the stats again. + +Remove freeing percpu stats inside macsec_add_dev() because +macsec_free_netdev() will correctly free the already allocated ones. The +pointers to unallocated stats stay NULL, and free_percpu() treats that +correctly. + +Found by Linux Verification Center (linuxtesting.org) with Syzkaller. + +Fixes: 0a28bfd4971f ("net/macsec: Add MACsec skb_metadata_dst Tx Data path support") +Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver") +Signed-off-by: Fedor Pchelkin +Reviewed-by: Sabrina Dubroca +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/macsec.c | 12 +++++------- + 1 file changed, 5 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c +index 038a787943927..983cabf9a0f67 100644 +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -3981,17 +3981,15 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) + return -ENOMEM; + + secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); +- if (!secy->tx_sc.stats) { +- free_percpu(macsec->stats); ++ if (!secy->tx_sc.stats) + return -ENOMEM; +- } + + secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); +- if (!secy->tx_sc.md_dst) { +- free_percpu(secy->tx_sc.stats); +- free_percpu(macsec->stats); ++ if (!secy->tx_sc.md_dst) ++ /* macsec and secy percpu stats will be freed when unregistering ++ * net_device in macsec_free_netdev() ++ */ + return -ENOMEM; +- } + + if (sci == MACSEC_UNDEF_SCI) + sci = dev_to_sci(dev, MACSEC_PORT_ES); +-- +2.39.2 + diff --git a/queue-6.1/net-phylink-report-correct-max-speed-for-qusgmii.patch b/queue-6.1/net-phylink-report-correct-max-speed-for-qusgmii.patch new file mode 100644 index 00000000000..1c23bf8e062 --- /dev/null +++ b/queue-6.1/net-phylink-report-correct-max-speed-for-qusgmii.patch @@ -0,0 +1,45 @@ +From a175418c30161f02416528e00519dfbac66dcb14 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 9 Jun 2023 10:03:04 +0200 +Subject: net: phylink: report correct max speed for QUSGMII + +From: Maxime Chevallier + +[ Upstream commit b9dc1046edfeb7d9dbc2272c8d9ad5a8c47f3199 ] + +Q-USGMII is the quad port version of USGMII, and supports a max speed of +1Gbps on each line. Make so that phylink_interface_max_speed() reports +this information correctly. + +Fixes: ae0e4bb2a0e0 ("net: phylink: Adjust link settings based on rate matching") +Signed-off-by: Maxime Chevallier +Reviewed-by: Russell King (Oracle) +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/phy/phylink.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c +index 4073e8243df3f..acd3405ddc9c6 100644 +--- a/drivers/net/phy/phylink.c ++++ b/drivers/net/phy/phylink.c +@@ -188,6 +188,7 @@ static int phylink_interface_max_speed(phy_interface_t interface) + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_QSGMII: ++ case PHY_INTERFACE_MODE_QUSGMII: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_GMII: + return SPEED_1000; +@@ -204,7 +205,6 @@ static int phylink_interface_max_speed(phy_interface_t interface) + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_10GKR: + case PHY_INTERFACE_MODE_USXGMII: +- case PHY_INTERFACE_MODE_QUSGMII: + return SPEED_10000; + + case PHY_INTERFACE_MODE_25GBASER: +-- +2.39.2 + diff --git a/queue-6.1/net-phylink-use-a-dedicated-helper-to-parse-usgmii-c.patch b/queue-6.1/net-phylink-use-a-dedicated-helper-to-parse-usgmii-c.patch new file mode 100644 index 00000000000..cfbe08d3e80 --- /dev/null +++ b/queue-6.1/net-phylink-use-a-dedicated-helper-to-parse-usgmii-c.patch @@ -0,0 +1,86 @@ +From f7e8f5afd398ecda865936f6ceeba3c82ba0c8a2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 9 Jun 2023 10:03:05 +0200 +Subject: net: phylink: use a dedicated helper to parse usgmii control word + +From: Maxime Chevallier + +[ Upstream commit 923454c0368b8092e9d05c020f50abca577e7290 ] + +Q-USGMII is a derivative of USGMII, that uses a specific formatting for +the control word. The layout is close to the USXGMII control word, but +doesn't support speeds over 1Gbps. Use a dedicated decoding logic for +the USGMII control word, re-using USXGMII definitions but only considering +10/100/1000Mbps speeds + +Fixes: 5e61fe157a27 ("net: phy: Introduce QUSGMII PHY mode") +Signed-off-by: Maxime Chevallier +Reviewed-by: Russell King (Oracle) +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/phy/phylink.c | 39 ++++++++++++++++++++++++++++++++++++++- + 1 file changed, 38 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c +index acd3405ddc9c6..5b064a1de92f0 100644 +--- a/drivers/net/phy/phylink.c ++++ b/drivers/net/phy/phylink.c +@@ -3263,6 +3263,41 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state, + } + EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word); + ++/** ++ * phylink_decode_usgmii_word() - decode the USGMII word from a MAC PCS ++ * @state: a pointer to a struct phylink_link_state. ++ * @lpa: a 16 bit value which stores the USGMII auto-negotiation word ++ * ++ * Helper for MAC PCS supporting the USGMII protocol and the auto-negotiation ++ * code word. Decode the USGMII code word and populate the corresponding fields ++ * (speed, duplex) into the phylink_link_state structure. The structure for this ++ * word is the same as the USXGMII word, except it only supports speeds up to ++ * 1Gbps. ++ */ ++static void phylink_decode_usgmii_word(struct phylink_link_state *state, ++ uint16_t lpa) ++{ ++ switch (lpa & MDIO_USXGMII_SPD_MASK) { ++ case MDIO_USXGMII_10: ++ state->speed = SPEED_10; ++ break; ++ case MDIO_USXGMII_100: ++ state->speed = SPEED_100; ++ break; ++ case MDIO_USXGMII_1000: ++ state->speed = SPEED_1000; ++ break; ++ default: ++ state->link = false; ++ return; ++ } ++ ++ if (lpa & MDIO_USXGMII_FULL_DUPLEX) ++ state->duplex = DUPLEX_FULL; ++ else ++ state->duplex = DUPLEX_HALF; ++} ++ + /** + * phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers + * @state: a pointer to a &struct phylink_link_state. +@@ -3299,9 +3334,11 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, + + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: +- case PHY_INTERFACE_MODE_QUSGMII: + phylink_decode_sgmii_word(state, lpa); + break; ++ case PHY_INTERFACE_MODE_QUSGMII: ++ phylink_decode_usgmii_word(state, lpa); ++ break; + + default: + state->link = false; +-- +2.39.2 + diff --git a/queue-6.1/net-sched-act_pedit-parse-l3-header-for-l4-offset.patch b/queue-6.1/net-sched-act_pedit-parse-l3-header-for-l4-offset.patch new file mode 100644 index 00000000000..cb67a1c84dc --- /dev/null +++ b/queue-6.1/net-sched-act_pedit-parse-l3-header-for-l4-offset.patch @@ -0,0 +1,140 @@ +From a9edd5b2aa760857864eb65619f4206ecf63e779 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 12:23:54 -0400 +Subject: net/sched: act_pedit: Parse L3 Header for L4 offset + +From: Max Tottenham + +[ Upstream commit 6c02568fd1ae53099b4ab86365c5be1ff15f586b ] + +Instead of relying on skb->transport_header being set correctly, opt +instead to parse the L3 header length out of the L3 headers for both +IPv4/IPv6 when the Extended Layer Op for tcp/udp is used. This fixes a +bug if GRO is disabled, when GRO is disabled skb->transport_header is +set by __netif_receive_skb_core() to point to the L3 header, it's later +fixed by the upper protocol layers, but act_pedit will receive the SKB +before the fixups are completed. The existing behavior causes the +following to edit the L3 header if GRO is disabled instead of the UDP +header: + + tc filter add dev eth0 ingress protocol ip flower ip_proto udp \ + dst_ip 192.168.1.3 action pedit ex munge udp set dport 18053 + +Also re-introduce a rate-limited warning if we were unable to extract +the header offset when using the 'ex' interface. + +Fixes: 71d0ed7079df ("net/act_pedit: Support using offset relative to +the conventional network headers") +Signed-off-by: Max Tottenham +Reviewed-by: Josh Hunt +Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202305261541.N165u9TZ-lkp@intel.com/ +Reviewed-by: Pedro Tammela +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/sched/act_pedit.c | 48 ++++++++++++++++++++++++++++++++++++++----- + 1 file changed, 43 insertions(+), 5 deletions(-) + +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c +index 48e14cbcd8ffe..180669aa9d097 100644 +--- a/net/sched/act_pedit.c ++++ b/net/sched/act_pedit.c +@@ -13,7 +13,10 @@ + #include + #include + #include ++#include ++#include + #include ++#include + #include + #include + #include +@@ -312,28 +315,58 @@ static bool offset_valid(struct sk_buff *skb, int offset) + return true; + } + +-static void pedit_skb_hdr_offset(struct sk_buff *skb, ++static int pedit_l4_skb_offset(struct sk_buff *skb, int *hoffset, const int header_type) ++{ ++ const int noff = skb_network_offset(skb); ++ int ret = -EINVAL; ++ struct iphdr _iph; ++ ++ switch (skb->protocol) { ++ case htons(ETH_P_IP): { ++ const struct iphdr *iph = skb_header_pointer(skb, noff, sizeof(_iph), &_iph); ++ ++ if (!iph) ++ goto out; ++ *hoffset = noff + iph->ihl * 4; ++ ret = 0; ++ break; ++ } ++ case htons(ETH_P_IPV6): ++ ret = ipv6_find_hdr(skb, hoffset, header_type, NULL, NULL) == header_type ? 0 : -EINVAL; ++ break; ++ } ++out: ++ return ret; ++} ++ ++static int pedit_skb_hdr_offset(struct sk_buff *skb, + enum pedit_header_type htype, int *hoffset) + { ++ int ret = -EINVAL; + /* 'htype' is validated in the netlink parsing */ + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: +- if (skb_mac_header_was_set(skb)) ++ if (skb_mac_header_was_set(skb)) { + *hoffset = skb_mac_offset(skb); ++ ret = 0; ++ } + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK: + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + *hoffset = skb_network_offset(skb); ++ ret = 0; + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: ++ ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_TCP); ++ break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: +- if (skb_transport_header_was_set(skb)) +- *hoffset = skb_transport_offset(skb); ++ ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_UDP); + break; + default: + break; + } ++ return ret; + } + + static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, +@@ -368,6 +401,7 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, + int hoffset = 0; + u32 *ptr, hdata; + u32 val; ++ int rc; + + if (tkey_ex) { + htype = tkey_ex->htype; +@@ -376,7 +410,11 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, + tkey_ex++; + } + +- pedit_skb_hdr_offset(skb, htype, &hoffset); ++ rc = pedit_skb_hdr_offset(skb, htype, &hoffset); ++ if (rc) { ++ pr_info_ratelimited("tc action pedit unable to extract header offset for header type (0x%x)\n", htype); ++ goto bad; ++ } + + if (tkey->offmask) { + u8 *d, _d; +-- +2.39.2 + diff --git a/queue-6.1/net-sched-act_pedit-remove-extra-check-for-key-type.patch b/queue-6.1/net-sched-act_pedit-remove-extra-check-for-key-type.patch new file mode 100644 index 00000000000..74656a8e669 --- /dev/null +++ b/queue-6.1/net-sched-act_pedit-remove-extra-check-for-key-type.patch @@ -0,0 +1,98 @@ +From 3ee29c20d1f042ec596bc057372a4d9dea2e34e6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Apr 2023 18:25:16 -0300 +Subject: net/sched: act_pedit: remove extra check for key type + +From: Pedro Tammela + +[ Upstream commit 577140180ba28d0d37bc898c7bd6702c83aa106f ] + +The netlink parsing already validates the key 'htype'. +Remove the datapath check as it's redundant. + +Reviewed-by: Jamal Hadi Salim +Reviewed-by: Simon Horman +Signed-off-by: Pedro Tammela +Signed-off-by: David S. Miller +Stable-dep-of: 6c02568fd1ae ("net/sched: act_pedit: Parse L3 Header for L4 offset") +Signed-off-by: Sasha Levin +--- + net/sched/act_pedit.c | 29 +++++++---------------------- + 1 file changed, 7 insertions(+), 22 deletions(-) + +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c +index 19f6b3fa6a557..48e14cbcd8ffe 100644 +--- a/net/sched/act_pedit.c ++++ b/net/sched/act_pedit.c +@@ -312,37 +312,28 @@ static bool offset_valid(struct sk_buff *skb, int offset) + return true; + } + +-static int pedit_skb_hdr_offset(struct sk_buff *skb, +- enum pedit_header_type htype, int *hoffset) ++static void pedit_skb_hdr_offset(struct sk_buff *skb, ++ enum pedit_header_type htype, int *hoffset) + { +- int ret = -EINVAL; +- ++ /* 'htype' is validated in the netlink parsing */ + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: +- if (skb_mac_header_was_set(skb)) { ++ if (skb_mac_header_was_set(skb)) + *hoffset = skb_mac_offset(skb); +- ret = 0; +- } + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK: + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + *hoffset = skb_network_offset(skb); +- ret = 0; + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: +- if (skb_transport_header_was_set(skb)) { ++ if (skb_transport_header_was_set(skb)) + *hoffset = skb_transport_offset(skb); +- ret = 0; +- } + break; + default: +- ret = -EINVAL; + break; + } +- +- return ret; + } + + static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, +@@ -374,10 +365,9 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, + + for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) { + int offset = tkey->off; ++ int hoffset = 0; + u32 *ptr, hdata; +- int hoffset; + u32 val; +- int rc; + + if (tkey_ex) { + htype = tkey_ex->htype; +@@ -386,12 +376,7 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, + tkey_ex++; + } + +- rc = pedit_skb_hdr_offset(skb, htype, &hoffset); +- if (rc) { +- pr_info("tc action pedit bad header type specified (0x%x)\n", +- htype); +- goto bad; +- } ++ pedit_skb_hdr_offset(skb, htype, &hoffset); + + if (tkey->offmask) { + u8 *d, _d; +-- +2.39.2 + diff --git a/queue-6.1/net-sched-cls_api-fix-lockup-on-flushing-explicitly-.patch b/queue-6.1/net-sched-cls_api-fix-lockup-on-flushing-explicitly-.patch new file mode 100644 index 00000000000..448de90efb5 --- /dev/null +++ b/queue-6.1/net-sched-cls_api-fix-lockup-on-flushing-explicitly-.patch @@ -0,0 +1,71 @@ +From 6e9af75ecc51f1e1c4e1c409a5c9240ca3feb4f8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Jun 2023 11:34:26 +0200 +Subject: net/sched: cls_api: Fix lockup on flushing explicitly created chain + +From: Vlad Buslov + +[ Upstream commit c9a82bec02c339cdda99b37c5e62b3b71fc4209c ] + +Mingshuai Ren reports: + +When a new chain is added by using tc, one soft lockup alarm will be + generated after delete the prio 0 filter of the chain. To reproduce + the problem, perform the following steps: +(1) tc qdisc add dev eth0 root handle 1: htb default 1 +(2) tc chain add dev eth0 +(3) tc filter del dev eth0 chain 0 parent 1: prio 0 +(4) tc filter add dev eth0 chain 0 parent 1: + +Fix the issue by accounting for additional reference to chains that are +explicitly created by RTM_NEWCHAIN message as opposed to implicitly by +RTM_NEWTFILTER message. + +Fixes: 726d061286ce ("net: sched: prevent insertion of new classifiers during chain flush") +Reported-by: Mingshuai Ren +Closes: https://lore.kernel.org/lkml/87legswvi3.fsf@nvidia.com/T/ +Signed-off-by: Vlad Buslov +Link: https://lore.kernel.org/r/20230612093426.2867183-1-vladbu@nvidia.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/sched/cls_api.c | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index 0dbfc37d97991..445ab1b0537da 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -552,8 +552,8 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, + { + struct tcf_block *block = chain->block; + const struct tcf_proto_ops *tmplt_ops; ++ unsigned int refcnt, non_act_refcnt; + bool free_block = false; +- unsigned int refcnt; + void *tmplt_priv; + + mutex_lock(&block->lock); +@@ -573,13 +573,15 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, + * save these to temporary variables. + */ + refcnt = --chain->refcnt; ++ non_act_refcnt = refcnt - chain->action_refcnt; + tmplt_ops = chain->tmplt_ops; + tmplt_priv = chain->tmplt_priv; + +- /* The last dropped non-action reference will trigger notification. */ +- if (refcnt - chain->action_refcnt == 0 && !by_act) { +- tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, +- block, NULL, 0, 0, false); ++ if (non_act_refcnt == chain->explicitly_created && !by_act) { ++ if (non_act_refcnt == 0) ++ tc_chain_notify_delete(tmplt_ops, tmplt_priv, ++ chain->index, block, NULL, 0, 0, ++ false); + /* Last reference to chain, no need to lock. */ + chain->flushing = false; + } +-- +2.39.2 + diff --git a/queue-6.1/net-sched-cls_u32-fix-reference-counter-leak-leading.patch b/queue-6.1/net-sched-cls_u32-fix-reference-counter-leak-leading.patch new file mode 100644 index 00000000000..00ef4477542 --- /dev/null +++ b/queue-6.1/net-sched-cls_u32-fix-reference-counter-leak-leading.patch @@ -0,0 +1,78 @@ +From a8755383aa16ee1a62442fd696cd68bf4d737907 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 08:29:03 +0100 +Subject: net/sched: cls_u32: Fix reference counter leak leading to overflow + +From: Lee Jones + +[ Upstream commit 04c55383fa5689357bcdd2c8036725a55ed632bc ] + +In the event of a failure in tcf_change_indev(), u32_set_parms() will +immediately return without decrementing the recently incremented +reference counter. If this happens enough times, the counter will +rollover and the reference freed, leading to a double free which can be +used to do 'bad things'. + +In order to prevent this, move the point of possible failure above the +point where the reference counter is incremented. Also save any +meaningful return values to be applied to the return data at the +appropriate point in time. + +This issue was caught with KASAN. + +Fixes: 705c7091262d ("net: sched: cls_u32: no need to call tcf_exts_change for newly allocated struct") +Suggested-by: Eric Dumazet +Signed-off-by: Lee Jones +Reviewed-by: Eric Dumazet +Acked-by: Jamal Hadi Salim +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/sched/cls_u32.c | 18 ++++++++++-------- + 1 file changed, 10 insertions(+), 8 deletions(-) + +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c +index 34d25f7a0687a..a3477537c102b 100644 +--- a/net/sched/cls_u32.c ++++ b/net/sched/cls_u32.c +@@ -716,13 +716,19 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, + struct nlattr *est, u32 flags, u32 fl_flags, + struct netlink_ext_ack *extack) + { +- int err; ++ int err, ifindex = -1; + + err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags, + fl_flags, extack); + if (err < 0) + return err; + ++ if (tb[TCA_U32_INDEV]) { ++ ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack); ++ if (ifindex < 0) ++ return -EINVAL; ++ } ++ + if (tb[TCA_U32_LINK]) { + u32 handle = nla_get_u32(tb[TCA_U32_LINK]); + struct tc_u_hnode *ht_down = NULL, *ht_old; +@@ -757,13 +763,9 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, + tcf_bind_filter(tp, &n->res, base); + } + +- if (tb[TCA_U32_INDEV]) { +- int ret; +- ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack); +- if (ret < 0) +- return -EINVAL; +- n->ifindex = ret; +- } ++ if (ifindex >= 0) ++ n->ifindex = ifindex; ++ + return 0; + } + +-- +2.39.2 + diff --git a/queue-6.1/net-sched-qdisc_destroy-old-ingress-and-clsact-qdisc.patch b/queue-6.1/net-sched-qdisc_destroy-old-ingress-and-clsact-qdisc.patch new file mode 100644 index 00000000000..b0d81ae0aad --- /dev/null +++ b/queue-6.1/net-sched-qdisc_destroy-old-ingress-and-clsact-qdisc.patch @@ -0,0 +1,246 @@ +From 768422c111725ba8a2ca17553a4fd15955a17a2b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 10 Jun 2023 20:30:25 -0700 +Subject: net/sched: qdisc_destroy() old ingress and clsact Qdiscs before + grafting + +From: Peilin Ye + +[ Upstream commit 84ad0af0bccd3691cb951c2974c5cb2c10594d4a ] + +mini_Qdisc_pair::p_miniq is a double pointer to mini_Qdisc, initialized +in ingress_init() to point to net_device::miniq_ingress. ingress Qdiscs +access this per-net_device pointer in mini_qdisc_pair_swap(). Similar +for clsact Qdiscs and miniq_egress. + +Unfortunately, after introducing RTNL-unlocked RTM_{NEW,DEL,GET}TFILTER +requests (thanks Hillf Danton for the hint), when replacing ingress or +clsact Qdiscs, for example, the old Qdisc ("@old") could access the same +miniq_{in,e}gress pointer(s) concurrently with the new Qdisc ("@new"), +causing race conditions [1] including a use-after-free bug in +mini_qdisc_pair_swap() reported by syzbot: + + BUG: KASAN: slab-use-after-free in mini_qdisc_pair_swap+0x1c2/0x1f0 net/sched/sch_generic.c:1573 + Write of size 8 at addr ffff888045b31308 by task syz-executor690/14901 +... + Call Trace: + + __dump_stack lib/dump_stack.c:88 [inline] + dump_stack_lvl+0xd9/0x150 lib/dump_stack.c:106 + print_address_description.constprop.0+0x2c/0x3c0 mm/kasan/report.c:319 + print_report mm/kasan/report.c:430 [inline] + kasan_report+0x11c/0x130 mm/kasan/report.c:536 + mini_qdisc_pair_swap+0x1c2/0x1f0 net/sched/sch_generic.c:1573 + tcf_chain_head_change_item net/sched/cls_api.c:495 [inline] + tcf_chain0_head_change.isra.0+0xb9/0x120 net/sched/cls_api.c:509 + tcf_chain_tp_insert net/sched/cls_api.c:1826 [inline] + tcf_chain_tp_insert_unique net/sched/cls_api.c:1875 [inline] + tc_new_tfilter+0x1de6/0x2290 net/sched/cls_api.c:2266 +... + +@old and @new should not affect each other. In other words, @old should +never modify miniq_{in,e}gress after @new, and @new should not update +@old's RCU state. + +Fixing without changing sch_api.c turned out to be difficult (please +refer to Closes: for discussions). Instead, make sure @new's first call +always happen after @old's last call (in {ingress,clsact}_destroy()) has +finished: + +In qdisc_graft(), return -EBUSY if @old has any ongoing filter requests, +and call qdisc_destroy() for @old before grafting @new. + +Introduce qdisc_refcount_dec_if_one() as the counterpart of +qdisc_refcount_inc_nz() used for filter requests. Introduce a +non-static version of qdisc_destroy() that does a TCQ_F_BUILTIN check, +just like qdisc_put() etc. + +Depends on patch "net/sched: Refactor qdisc_graft() for ingress and +clsact Qdiscs". + +[1] To illustrate, the syzkaller reproducer adds ingress Qdiscs under +TC_H_ROOT (no longer possible after commit c7cfbd115001 ("net/sched: +sch_ingress: Only create under TC_H_INGRESS")) on eth0 that has 8 +transmission queues: + + Thread 1 creates ingress Qdisc A (containing mini Qdisc a1 and a2), + then adds a flower filter X to A. + + Thread 2 creates another ingress Qdisc B (containing mini Qdisc b1 and + b2) to replace A, then adds a flower filter Y to B. + + Thread 1 A's refcnt Thread 2 + RTM_NEWQDISC (A, RTNL-locked) + qdisc_create(A) 1 + qdisc_graft(A) 9 + + RTM_NEWTFILTER (X, RTNL-unlocked) + __tcf_qdisc_find(A) 10 + tcf_chain0_head_change(A) + mini_qdisc_pair_swap(A) (1st) + | + | RTM_NEWQDISC (B, RTNL-locked) + RCU sync 2 qdisc_graft(B) + | 1 notify_and_destroy(A) + | + tcf_block_release(A) 0 RTM_NEWTFILTER (Y, RTNL-unlocked) + qdisc_destroy(A) tcf_chain0_head_change(B) + tcf_chain0_head_change_cb_del(A) mini_qdisc_pair_swap(B) (2nd) + mini_qdisc_pair_swap(A) (3rd) | + ... ... + +Here, B calls mini_qdisc_pair_swap(), pointing eth0->miniq_ingress to +its mini Qdisc, b1. Then, A calls mini_qdisc_pair_swap() again during +ingress_destroy(), setting eth0->miniq_ingress to NULL, so ingress +packets on eth0 will not find filter Y in sch_handle_ingress(). + +This is just one of the possible consequences of concurrently accessing +miniq_{in,e}gress pointers. + +Fixes: 7a096d579e8e ("net: sched: ingress: set 'unlocked' flag for Qdisc ops") +Fixes: 87f373921c4e ("net: sched: ingress: set 'unlocked' flag for clsact Qdisc ops") +Reported-by: syzbot+b53a9c0d1ea4ad62da8b@syzkaller.appspotmail.com +Closes: https://lore.kernel.org/r/0000000000006cf87705f79acf1a@google.com/ +Cc: Hillf Danton +Cc: Vlad Buslov +Signed-off-by: Peilin Ye +Acked-by: Jamal Hadi Salim +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + include/net/sch_generic.h | 8 ++++++++ + net/sched/sch_api.c | 28 +++++++++++++++++++++++----- + net/sched/sch_generic.c | 14 +++++++++++--- + 3 files changed, 42 insertions(+), 8 deletions(-) + +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index 989eb972fcaec..b3e3128402961 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -137,6 +137,13 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc) + refcount_inc(&qdisc->refcnt); + } + ++static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc) ++{ ++ if (qdisc->flags & TCQ_F_BUILTIN) ++ return true; ++ return refcount_dec_if_one(&qdisc->refcnt); ++} ++ + /* Intended to be used by unlocked users, when concurrent qdisc release is + * possible. + */ +@@ -650,6 +657,7 @@ void dev_deactivate_many(struct list_head *head); + struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + struct Qdisc *qdisc); + void qdisc_reset(struct Qdisc *qdisc); ++void qdisc_destroy(struct Qdisc *qdisc); + void qdisc_put(struct Qdisc *qdisc); + void qdisc_put_unlocked(struct Qdisc *qdisc); + void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index bd9f523b169a3..01d07e6a68119 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -1083,10 +1083,22 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, + if ((q && q->flags & TCQ_F_INGRESS) || + (new && new->flags & TCQ_F_INGRESS)) { + ingress = 1; +- if (!dev_ingress_queue(dev)) { ++ dev_queue = dev_ingress_queue(dev); ++ if (!dev_queue) { + NL_SET_ERR_MSG(extack, "Device does not have an ingress queue"); + return -ENOENT; + } ++ ++ q = rtnl_dereference(dev_queue->qdisc_sleeping); ++ ++ /* This is the counterpart of that qdisc_refcount_inc_nz() call in ++ * __tcf_qdisc_find() for filter requests. ++ */ ++ if (!qdisc_refcount_dec_if_one(q)) { ++ NL_SET_ERR_MSG(extack, ++ "Current ingress or clsact Qdisc has ongoing filter requests"); ++ return -EBUSY; ++ } + } + + if (dev->flags & IFF_UP) +@@ -1107,8 +1119,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, + qdisc_put(old); + } + } else { +- dev_queue = dev_ingress_queue(dev); +- old = dev_graft_qdisc(dev_queue, new); ++ old = dev_graft_qdisc(dev_queue, NULL); ++ ++ /* {ingress,clsact}_destroy() @old before grafting @new to avoid ++ * unprotected concurrent accesses to net_device::miniq_{in,e}gress ++ * pointer(s) in mini_qdisc_pair_swap(). ++ */ ++ qdisc_notify(net, skb, n, classid, old, new, extack); ++ qdisc_destroy(old); ++ ++ dev_graft_qdisc(dev_queue, new); + } + + skip: +@@ -1122,8 +1142,6 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, + + if (new && new->ops->attach) + new->ops->attach(new); +- } else { +- notify_and_destroy(net, skb, n, classid, old, new, extack); + } + + if (dev->flags & IFF_UP) +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c +index ee43e8ac039ed..a5693e25b2482 100644 +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -1046,7 +1046,7 @@ static void qdisc_free_cb(struct rcu_head *head) + qdisc_free(q); + } + +-static void qdisc_destroy(struct Qdisc *qdisc) ++static void __qdisc_destroy(struct Qdisc *qdisc) + { + const struct Qdisc_ops *ops = qdisc->ops; + +@@ -1070,6 +1070,14 @@ static void qdisc_destroy(struct Qdisc *qdisc) + call_rcu(&qdisc->rcu, qdisc_free_cb); + } + ++void qdisc_destroy(struct Qdisc *qdisc) ++{ ++ if (qdisc->flags & TCQ_F_BUILTIN) ++ return; ++ ++ __qdisc_destroy(qdisc); ++} ++ + void qdisc_put(struct Qdisc *qdisc) + { + if (!qdisc) +@@ -1079,7 +1087,7 @@ void qdisc_put(struct Qdisc *qdisc) + !refcount_dec_and_test(&qdisc->refcnt)) + return; + +- qdisc_destroy(qdisc); ++ __qdisc_destroy(qdisc); + } + EXPORT_SYMBOL(qdisc_put); + +@@ -1094,7 +1102,7 @@ void qdisc_put_unlocked(struct Qdisc *qdisc) + !refcount_dec_and_rtnl_lock(&qdisc->refcnt)) + return; + +- qdisc_destroy(qdisc); ++ __qdisc_destroy(qdisc); + rtnl_unlock(); + } + EXPORT_SYMBOL(qdisc_put_unlocked); +-- +2.39.2 + diff --git a/queue-6.1/net-sched-refactor-qdisc_graft-for-ingress-and-clsac.patch b/queue-6.1/net-sched-refactor-qdisc_graft-for-ingress-and-clsac.patch new file mode 100644 index 00000000000..211819617d8 --- /dev/null +++ b/queue-6.1/net-sched-refactor-qdisc_graft-for-ingress-and-clsac.patch @@ -0,0 +1,73 @@ +From 3fd867d1900661cfae6718aecd9fddfff2c0b053 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 10 Jun 2023 20:30:15 -0700 +Subject: net/sched: Refactor qdisc_graft() for ingress and clsact Qdiscs + +From: Peilin Ye + +[ Upstream commit 2d5f6a8d7aef7852a9ecc555f88c673a1c91754f ] + +Grafting ingress and clsact Qdiscs does not need a for-loop in +qdisc_graft(). Refactor it. No functional changes intended. + +Tested-by: Pedro Tammela +Acked-by: Jamal Hadi Salim +Reviewed-by: Jamal Hadi Salim +Reviewed-by: Vlad Buslov +Signed-off-by: Peilin Ye +Signed-off-by: Paolo Abeni +Stable-dep-of: 84ad0af0bccd ("net/sched: qdisc_destroy() old ingress and clsact Qdiscs before grafting") +Signed-off-by: Sasha Levin +--- + net/sched/sch_api.c | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index 3907483dae624..bd9f523b169a3 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -1076,12 +1076,12 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, + + if (parent == NULL) { + unsigned int i, num_q, ingress; ++ struct netdev_queue *dev_queue; + + ingress = 0; + num_q = dev->num_tx_queues; + if ((q && q->flags & TCQ_F_INGRESS) || + (new && new->flags & TCQ_F_INGRESS)) { +- num_q = 1; + ingress = 1; + if (!dev_ingress_queue(dev)) { + NL_SET_ERR_MSG(extack, "Device does not have an ingress queue"); +@@ -1097,18 +1097,18 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, + if (new && new->ops->attach && !ingress) + goto skip; + +- for (i = 0; i < num_q; i++) { +- struct netdev_queue *dev_queue = dev_ingress_queue(dev); +- +- if (!ingress) ++ if (!ingress) { ++ for (i = 0; i < num_q; i++) { + dev_queue = netdev_get_tx_queue(dev, i); ++ old = dev_graft_qdisc(dev_queue, new); + +- old = dev_graft_qdisc(dev_queue, new); +- if (new && i > 0) +- qdisc_refcount_inc(new); +- +- if (!ingress) ++ if (new && i > 0) ++ qdisc_refcount_inc(new); + qdisc_put(old); ++ } ++ } else { ++ dev_queue = dev_ingress_queue(dev); ++ old = dev_graft_qdisc(dev_queue, new); + } + + skip: +-- +2.39.2 + diff --git a/queue-6.1/net-sched-simplify-tcf_pedit_act.patch b/queue-6.1/net-sched-simplify-tcf_pedit_act.patch new file mode 100644 index 00000000000..6f8ece0331d --- /dev/null +++ b/queue-6.1/net-sched-simplify-tcf_pedit_act.patch @@ -0,0 +1,194 @@ +From 05868542d0cf828aaf35a45731856d4cd61e1bc7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 31 Jan 2023 16:05:12 -0300 +Subject: net/sched: simplify tcf_pedit_act + +From: Pedro Tammela + +[ Upstream commit 95b069382351826c0ae37938070aa82dbeaf288d ] + +Remove the check for a negative number of keys as +this cannot ever happen + +Reviewed-by: Jamal Hadi Salim +Reviewed-by: Simon Horman +Signed-off-by: Pedro Tammela +Signed-off-by: Paolo Abeni +Stable-dep-of: 6c02568fd1ae ("net/sched: act_pedit: Parse L3 Header for L4 offset") +Signed-off-by: Sasha Levin +--- + net/sched/act_pedit.c | 137 +++++++++++++++++++++--------------------- + 1 file changed, 67 insertions(+), 70 deletions(-) + +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c +index 238759c3192e8..19f6b3fa6a557 100644 +--- a/net/sched/act_pedit.c ++++ b/net/sched/act_pedit.c +@@ -348,8 +348,12 @@ static int pedit_skb_hdr_offset(struct sk_buff *skb, + static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) + { ++ enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; ++ enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET; + struct tcf_pedit *p = to_pedit(a); ++ struct tcf_pedit_key_ex *tkey_ex; + struct tcf_pedit_parms *parms; ++ struct tc_pedit_key *tkey; + u32 max_offset; + int i; + +@@ -365,88 +369,81 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, + tcf_lastuse_update(&p->tcf_tm); + tcf_action_update_bstats(&p->common, skb); + +- if (parms->tcfp_nkeys > 0) { +- struct tc_pedit_key *tkey = parms->tcfp_keys; +- struct tcf_pedit_key_ex *tkey_ex = parms->tcfp_keys_ex; +- enum pedit_header_type htype = +- TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; +- enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET; +- +- for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) { +- u32 *ptr, hdata; +- int offset = tkey->off; +- int hoffset; +- u32 val; +- int rc; +- +- if (tkey_ex) { +- htype = tkey_ex->htype; +- cmd = tkey_ex->cmd; +- +- tkey_ex++; +- } ++ tkey = parms->tcfp_keys; ++ tkey_ex = parms->tcfp_keys_ex; + +- rc = pedit_skb_hdr_offset(skb, htype, &hoffset); +- if (rc) { +- pr_info("tc action pedit bad header type specified (0x%x)\n", +- htype); +- goto bad; +- } ++ for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) { ++ int offset = tkey->off; ++ u32 *ptr, hdata; ++ int hoffset; ++ u32 val; ++ int rc; + +- if (tkey->offmask) { +- u8 *d, _d; +- +- if (!offset_valid(skb, hoffset + tkey->at)) { +- pr_info("tc action pedit 'at' offset %d out of bounds\n", +- hoffset + tkey->at); +- goto bad; +- } +- d = skb_header_pointer(skb, hoffset + tkey->at, +- sizeof(_d), &_d); +- if (!d) +- goto bad; +- offset += (*d & tkey->offmask) >> tkey->shift; +- } ++ if (tkey_ex) { ++ htype = tkey_ex->htype; ++ cmd = tkey_ex->cmd; + +- if (offset % 4) { +- pr_info("tc action pedit offset must be on 32 bit boundaries\n"); +- goto bad; +- } ++ tkey_ex++; ++ } + +- if (!offset_valid(skb, hoffset + offset)) { +- pr_info("tc action pedit offset %d out of bounds\n", +- hoffset + offset); +- goto bad; +- } ++ rc = pedit_skb_hdr_offset(skb, htype, &hoffset); ++ if (rc) { ++ pr_info("tc action pedit bad header type specified (0x%x)\n", ++ htype); ++ goto bad; ++ } + +- ptr = skb_header_pointer(skb, hoffset + offset, +- sizeof(hdata), &hdata); +- if (!ptr) +- goto bad; +- /* just do it, baby */ +- switch (cmd) { +- case TCA_PEDIT_KEY_EX_CMD_SET: +- val = tkey->val; +- break; +- case TCA_PEDIT_KEY_EX_CMD_ADD: +- val = (*ptr + tkey->val) & ~tkey->mask; +- break; +- default: +- pr_info("tc action pedit bad command (%d)\n", +- cmd); ++ if (tkey->offmask) { ++ u8 *d, _d; ++ ++ if (!offset_valid(skb, hoffset + tkey->at)) { ++ pr_info("tc action pedit 'at' offset %d out of bounds\n", ++ hoffset + tkey->at); + goto bad; + } ++ d = skb_header_pointer(skb, hoffset + tkey->at, ++ sizeof(_d), &_d); ++ if (!d) ++ goto bad; ++ offset += (*d & tkey->offmask) >> tkey->shift; ++ } + +- *ptr = ((*ptr & tkey->mask) ^ val); +- if (ptr == &hdata) +- skb_store_bits(skb, hoffset + offset, ptr, 4); ++ if (offset % 4) { ++ pr_info("tc action pedit offset must be on 32 bit boundaries\n"); ++ goto bad; + } + +- goto done; +- } else { +- WARN(1, "pedit BUG: index %d\n", p->tcf_index); ++ if (!offset_valid(skb, hoffset + offset)) { ++ pr_info("tc action pedit offset %d out of bounds\n", ++ hoffset + offset); ++ goto bad; ++ } ++ ++ ptr = skb_header_pointer(skb, hoffset + offset, ++ sizeof(hdata), &hdata); ++ if (!ptr) ++ goto bad; ++ /* just do it, baby */ ++ switch (cmd) { ++ case TCA_PEDIT_KEY_EX_CMD_SET: ++ val = tkey->val; ++ break; ++ case TCA_PEDIT_KEY_EX_CMD_ADD: ++ val = (*ptr + tkey->val) & ~tkey->mask; ++ break; ++ default: ++ pr_info("tc action pedit bad command (%d)\n", ++ cmd); ++ goto bad; ++ } ++ ++ *ptr = ((*ptr & tkey->mask) ^ val); ++ if (ptr == &hdata) ++ skb_store_bits(skb, hoffset + offset, ptr, 4); + } + ++ goto done; ++ + bad: + spin_lock(&p->tcf_lock); + p->tcf_qstats.overlimits++; +-- +2.39.2 + diff --git a/queue-6.1/net-tipc-resize-nlattr-array-to-correct-size.patch b/queue-6.1/net-tipc-resize-nlattr-array-to-correct-size.patch new file mode 100644 index 00000000000..54acd0ac320 --- /dev/null +++ b/queue-6.1/net-tipc-resize-nlattr-array-to-correct-size.patch @@ -0,0 +1,51 @@ +From 097a3a9b8b371efdd9fdaa06feac399043c4e8ab Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 Jun 2023 20:06:04 +0800 +Subject: net: tipc: resize nlattr array to correct size + +From: Lin Ma + +[ Upstream commit 44194cb1b6045dea33ae9a0d54fb7e7cd93a2e09 ] + +According to nla_parse_nested_deprecated(), the tb[] is supposed to the +destination array with maxtype+1 elements. In current +tipc_nl_media_get() and __tipc_nl_media_set(), a larger array is used +which is unnecessary. This patch resize them to a proper size. + +Fixes: 1e55417d8fc6 ("tipc: add media set to new netlink api") +Fixes: 46f15c6794fb ("tipc: add media get/dump to new netlink api") +Signed-off-by: Lin Ma +Reviewed-by: Florian Westphal +Reviewed-by: Tung Nguyen +Link: https://lore.kernel.org/r/20230614120604.1196377-1-linma@zju.edu.cn +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/tipc/bearer.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c +index 53881406e2006..cdcd2731860ba 100644 +--- a/net/tipc/bearer.c ++++ b/net/tipc/bearer.c +@@ -1258,7 +1258,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info) + struct tipc_nl_msg msg; + struct tipc_media *media; + struct sk_buff *rep; +- struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; ++ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1]; + + if (!info->attrs[TIPC_NLA_MEDIA]) + return -EINVAL; +@@ -1307,7 +1307,7 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) + int err; + char *name; + struct tipc_media *m; +- struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; ++ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1]; + + if (!info->attrs[TIPC_NLA_MEDIA]) + return -EINVAL; +-- +2.39.2 + diff --git a/queue-6.1/netfilter-nf_tables-incorrect-error-path-handling-wi.patch b/queue-6.1/netfilter-nf_tables-incorrect-error-path-handling-wi.patch new file mode 100644 index 00000000000..6d449829177 --- /dev/null +++ b/queue-6.1/netfilter-nf_tables-incorrect-error-path-handling-wi.patch @@ -0,0 +1,76 @@ +From 470ac9c9452927ee7da8f34c3dd604f2397bb225 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 02:32:02 +0200 +Subject: netfilter: nf_tables: incorrect error path handling with + NFT_MSG_NEWRULE + +From: Pablo Neira Ayuso + +[ Upstream commit 1240eb93f0616b21c675416516ff3d74798fdc97 ] + +In case of error when adding a new rule that refers to an anonymous set, +deactivate expressions via NFT_TRANS_PREPARE state, not NFT_TRANS_RELEASE. +Thus, the lookup expression marks anonymous sets as inactive in the next +generation to ensure it is not reachable in this transaction anymore and +decrement the set refcount as introduced by c1592a89942e ("netfilter: +nf_tables: deactivate anonymous set from preparation phase"). The abort +step takes care of undoing the anonymous set. + +This is also consistent with rule deletion, where NFT_TRANS_PREPARE is +used. Note that this error path is exercised in the preparation step of +the commit protocol. This patch replaces nf_tables_rule_release() by the +deactivate and destroy calls, this time with NFT_TRANS_PREPARE. + +Due to this incorrect error handling, it is possible to access a +dangling pointer to the anonymous set that remains in the transaction +list. + +[1009.379054] BUG: KASAN: use-after-free in nft_set_lookup_global+0x147/0x1a0 [nf_tables] +[1009.379106] Read of size 8 at addr ffff88816c4c8020 by task nft-rule-add/137110 +[1009.379116] CPU: 7 PID: 137110 Comm: nft-rule-add Not tainted 6.4.0-rc4+ #256 +[1009.379128] Call Trace: +[1009.379132] +[1009.379135] dump_stack_lvl+0x33/0x50 +[1009.379146] ? nft_set_lookup_global+0x147/0x1a0 [nf_tables] +[1009.379191] print_address_description.constprop.0+0x27/0x300 +[1009.379201] kasan_report+0x107/0x120 +[1009.379210] ? nft_set_lookup_global+0x147/0x1a0 [nf_tables] +[1009.379255] nft_set_lookup_global+0x147/0x1a0 [nf_tables] +[1009.379302] nft_lookup_init+0xa5/0x270 [nf_tables] +[1009.379350] nf_tables_newrule+0x698/0xe50 [nf_tables] +[1009.379397] ? nf_tables_rule_release+0xe0/0xe0 [nf_tables] +[1009.379441] ? kasan_unpoison+0x23/0x50 +[1009.379450] nfnetlink_rcv_batch+0x97c/0xd90 [nfnetlink] +[1009.379470] ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink] +[1009.379485] ? __alloc_skb+0xb8/0x1e0 +[1009.379493] ? __alloc_skb+0xb8/0x1e0 +[1009.379502] ? entry_SYSCALL_64_after_hwframe+0x46/0xb0 +[1009.379509] ? unwind_get_return_address+0x2a/0x40 +[1009.379517] ? write_profile+0xc0/0xc0 +[1009.379524] ? avc_lookup+0x8f/0xc0 +[1009.379532] ? __rcu_read_unlock+0x43/0x60 + +Fixes: 958bee14d071 ("netfilter: nf_tables: use new transaction infrastructure to handle sets") +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/nf_tables_api.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index db84d607a413e..13d4913266b4d 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -3669,7 +3669,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, + if (flow) + nft_flow_rule_destroy(flow); + err_release_rule: +- nf_tables_rule_release(&ctx, rule); ++ nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE); ++ nf_tables_rule_destroy(&ctx, rule); + err_release_expr: + for (i = 0; i < n; i++) { + if (expr_info[i].ops) { +-- +2.39.2 + diff --git a/queue-6.1/netfilter-nf_tables-integrate-pipapo-into-commit-pro.patch b/queue-6.1/netfilter-nf_tables-integrate-pipapo-into-commit-pro.patch new file mode 100644 index 00000000000..6cd49e14051 --- /dev/null +++ b/queue-6.1/netfilter-nf_tables-integrate-pipapo-into-commit-pro.patch @@ -0,0 +1,316 @@ +From 6eee553d193b940828d9aeabc473d51f8a6167c0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 11:55:42 +0200 +Subject: netfilter: nf_tables: integrate pipapo into commit protocol + +From: Pablo Neira Ayuso + +[ Upstream commit 212ed75dc5fb9d1423b3942c8f872a868cda3466 ] + +The pipapo set backend follows copy-on-update approach, maintaining one +clone of the existing datastructure that is being updated. The clone +and current datastructures are swapped via rcu from the commit step. + +The existing integration with the commit protocol is flawed because +there is no operation to clean up the clone if the transaction is +aborted. Moreover, the datastructure swap happens on set element +activation. + +This patch adds two new operations for sets: commit and abort, these new +operations are invoked from the commit and abort steps, after the +transactions have been digested, and it updates the pipapo set backend +to use it. + +This patch adds a new ->pending_update field to sets to maintain a list +of sets that require this new commit and abort operations. + +Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges") +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + include/net/netfilter/nf_tables.h | 4 ++- + net/netfilter/nf_tables_api.c | 56 +++++++++++++++++++++++++++++++ + net/netfilter/nft_set_pipapo.c | 55 +++++++++++++++++++++--------- + 3 files changed, 99 insertions(+), 16 deletions(-) + +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h +index a1ccf1276f3ee..22e96b7e1b44a 100644 +--- a/include/net/netfilter/nf_tables.h ++++ b/include/net/netfilter/nf_tables.h +@@ -457,7 +457,8 @@ struct nft_set_ops { + const struct nft_set *set, + const struct nft_set_elem *elem, + unsigned int flags); +- ++ void (*commit)(const struct nft_set *set); ++ void (*abort)(const struct nft_set *set); + u64 (*privsize)(const struct nlattr * const nla[], + const struct nft_set_desc *desc); + bool (*estimate)(const struct nft_set_desc *desc, +@@ -552,6 +553,7 @@ struct nft_set { + u16 policy; + u16 udlen; + unsigned char *udata; ++ struct list_head pending_update; + /* runtime data below here */ + const struct nft_set_ops *ops ____cacheline_aligned; + u16 flags:14, +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 437891cb8c417..db84d607a413e 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -4730,6 +4730,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, + + set->num_exprs = num_exprs; + set->handle = nf_tables_alloc_handle(table); ++ INIT_LIST_HEAD(&set->pending_update); + + err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); + if (err < 0) +@@ -8992,10 +8993,25 @@ static void nf_tables_commit_audit_log(struct list_head *adl, u32 generation) + } + } + ++static void nft_set_commit_update(struct list_head *set_update_list) ++{ ++ struct nft_set *set, *next; ++ ++ list_for_each_entry_safe(set, next, set_update_list, pending_update) { ++ list_del_init(&set->pending_update); ++ ++ if (!set->ops->commit) ++ continue; ++ ++ set->ops->commit(set); ++ } ++} ++ + static int nf_tables_commit(struct net *net, struct sk_buff *skb) + { + struct nftables_pernet *nft_net = nft_pernet(net); + struct nft_trans *trans, *next; ++ LIST_HEAD(set_update_list); + struct nft_trans_elem *te; + struct nft_chain *chain; + struct nft_table *table; +@@ -9154,6 +9170,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) + nf_tables_setelem_notify(&trans->ctx, te->set, + &te->elem, + NFT_MSG_NEWSETELEM); ++ if (te->set->ops->commit && ++ list_empty(&te->set->pending_update)) { ++ list_add_tail(&te->set->pending_update, ++ &set_update_list); ++ } + nft_trans_destroy(trans); + break; + case NFT_MSG_DELSETELEM: +@@ -9167,6 +9188,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) + atomic_dec(&te->set->nelems); + te->set->ndeact--; + } ++ if (te->set->ops->commit && ++ list_empty(&te->set->pending_update)) { ++ list_add_tail(&te->set->pending_update, ++ &set_update_list); ++ } + break; + case NFT_MSG_NEWOBJ: + if (nft_trans_obj_update(trans)) { +@@ -9227,6 +9253,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) + } + } + ++ nft_set_commit_update(&set_update_list); ++ + nft_commit_notify(net, NETLINK_CB(skb).portid); + nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); + nf_tables_commit_audit_log(&adl, nft_net->base_seq); +@@ -9283,10 +9311,25 @@ static void nf_tables_abort_release(struct nft_trans *trans) + kfree(trans); + } + ++static void nft_set_abort_update(struct list_head *set_update_list) ++{ ++ struct nft_set *set, *next; ++ ++ list_for_each_entry_safe(set, next, set_update_list, pending_update) { ++ list_del_init(&set->pending_update); ++ ++ if (!set->ops->abort) ++ continue; ++ ++ set->ops->abort(set); ++ } ++} ++ + static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) + { + struct nftables_pernet *nft_net = nft_pernet(net); + struct nft_trans *trans, *next; ++ LIST_HEAD(set_update_list); + struct nft_trans_elem *te; + + if (action == NFNL_ABORT_VALIDATE && +@@ -9384,6 +9427,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) + nft_setelem_remove(net, te->set, &te->elem); + if (!nft_setelem_is_catchall(te->set, &te->elem)) + atomic_dec(&te->set->nelems); ++ ++ if (te->set->ops->abort && ++ list_empty(&te->set->pending_update)) { ++ list_add_tail(&te->set->pending_update, ++ &set_update_list); ++ } + break; + case NFT_MSG_DELSETELEM: + te = (struct nft_trans_elem *)trans->data; +@@ -9393,6 +9442,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) + if (!nft_setelem_is_catchall(te->set, &te->elem)) + te->set->ndeact--; + ++ if (te->set->ops->abort && ++ list_empty(&te->set->pending_update)) { ++ list_add_tail(&te->set->pending_update, ++ &set_update_list); ++ } + nft_trans_destroy(trans); + break; + case NFT_MSG_NEWOBJ: +@@ -9433,6 +9487,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) + } + } + ++ nft_set_abort_update(&set_update_list); ++ + synchronize_rcu(); + + list_for_each_entry_safe_reverse(trans, next, +diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c +index 06d46d1826347..15e451dc3fc46 100644 +--- a/net/netfilter/nft_set_pipapo.c ++++ b/net/netfilter/nft_set_pipapo.c +@@ -1600,17 +1600,10 @@ static void pipapo_free_fields(struct nft_pipapo_match *m) + } + } + +-/** +- * pipapo_reclaim_match - RCU callback to free fields from old matching data +- * @rcu: RCU head +- */ +-static void pipapo_reclaim_match(struct rcu_head *rcu) ++static void pipapo_free_match(struct nft_pipapo_match *m) + { +- struct nft_pipapo_match *m; + int i; + +- m = container_of(rcu, struct nft_pipapo_match, rcu); +- + for_each_possible_cpu(i) + kfree(*per_cpu_ptr(m->scratch, i)); + +@@ -1625,7 +1618,19 @@ static void pipapo_reclaim_match(struct rcu_head *rcu) + } + + /** +- * pipapo_commit() - Replace lookup data with current working copy ++ * pipapo_reclaim_match - RCU callback to free fields from old matching data ++ * @rcu: RCU head ++ */ ++static void pipapo_reclaim_match(struct rcu_head *rcu) ++{ ++ struct nft_pipapo_match *m; ++ ++ m = container_of(rcu, struct nft_pipapo_match, rcu); ++ pipapo_free_match(m); ++} ++ ++/** ++ * nft_pipapo_commit() - Replace lookup data with current working copy + * @set: nftables API set representation + * + * While at it, check if we should perform garbage collection on the working +@@ -1635,7 +1640,7 @@ static void pipapo_reclaim_match(struct rcu_head *rcu) + * We also need to create a new working copy for subsequent insertions and + * deletions. + */ +-static void pipapo_commit(const struct nft_set *set) ++static void nft_pipapo_commit(const struct nft_set *set) + { + struct nft_pipapo *priv = nft_set_priv(set); + struct nft_pipapo_match *new_clone, *old; +@@ -1660,6 +1665,26 @@ static void pipapo_commit(const struct nft_set *set) + priv->clone = new_clone; + } + ++static void nft_pipapo_abort(const struct nft_set *set) ++{ ++ struct nft_pipapo *priv = nft_set_priv(set); ++ struct nft_pipapo_match *new_clone, *m; ++ ++ if (!priv->dirty) ++ return; ++ ++ m = rcu_dereference(priv->match); ++ ++ new_clone = pipapo_clone(m); ++ if (IS_ERR(new_clone)) ++ return; ++ ++ priv->dirty = false; ++ ++ pipapo_free_match(priv->clone); ++ priv->clone = new_clone; ++} ++ + /** + * nft_pipapo_activate() - Mark element reference as active given key, commit + * @net: Network namespace +@@ -1667,8 +1692,7 @@ static void pipapo_commit(const struct nft_set *set) + * @elem: nftables API element representation containing key data + * + * On insertion, elements are added to a copy of the matching data currently +- * in use for lookups, and not directly inserted into current lookup data, so +- * we'll take care of that by calling pipapo_commit() here. Both ++ * in use for lookups, and not directly inserted into current lookup data. Both + * nft_pipapo_insert() and nft_pipapo_activate() are called once for each + * element, hence we can't purpose either one as a real commit operation. + */ +@@ -1684,8 +1708,6 @@ static void nft_pipapo_activate(const struct net *net, + + nft_set_elem_change_active(net, set, &e->ext); + nft_set_elem_clear_busy(&e->ext); +- +- pipapo_commit(set); + } + + /** +@@ -1931,7 +1953,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set, + if (i == m->field_count) { + priv->dirty = true; + pipapo_drop(m, rulemap); +- pipapo_commit(set); + return; + } + +@@ -2230,6 +2251,8 @@ const struct nft_set_type nft_set_pipapo_type = { + .init = nft_pipapo_init, + .destroy = nft_pipapo_destroy, + .gc_init = nft_pipapo_gc_init, ++ .commit = nft_pipapo_commit, ++ .abort = nft_pipapo_abort, + .elemsize = offsetof(struct nft_pipapo_elem, ext), + }, + }; +@@ -2252,6 +2275,8 @@ const struct nft_set_type nft_set_pipapo_avx2_type = { + .init = nft_pipapo_init, + .destroy = nft_pipapo_destroy, + .gc_init = nft_pipapo_gc_init, ++ .commit = nft_pipapo_commit, ++ .abort = nft_pipapo_abort, + .elemsize = offsetof(struct nft_pipapo_elem, ext), + }, + }; +-- +2.39.2 + diff --git a/queue-6.1/netfilter-nfnetlink-skip-error-delivery-on-batch-in-.patch b/queue-6.1/netfilter-nfnetlink-skip-error-delivery-on-batch-in-.patch new file mode 100644 index 00000000000..88d3686ff8c --- /dev/null +++ b/queue-6.1/netfilter-nfnetlink-skip-error-delivery-on-batch-in-.patch @@ -0,0 +1,36 @@ +From 127943de4f640e3896a4226f996253520ce09a05 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 00:19:12 +0200 +Subject: netfilter: nfnetlink: skip error delivery on batch in case of ENOMEM + +From: Pablo Neira Ayuso + +[ Upstream commit a1a64a151dae8ac3581c1cbde44b672045cb658b ] + +If caller reports ENOMEM, then stop iterating over the batch and send a +single netlink message to userspace to report OOM. + +Fixes: cbb8125eb40b ("netfilter: nfnetlink: deliver netlink errors on batch completion") +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/nfnetlink.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c +index ae7146475d17a..c9fbe0f707b5f 100644 +--- a/net/netfilter/nfnetlink.c ++++ b/net/netfilter/nfnetlink.c +@@ -533,7 +533,8 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, + * processed, this avoids that the same error is + * reported several times when replaying the batch. + */ +- if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) { ++ if (err == -ENOMEM || ++ nfnl_err_add(&err_list, nlh, err, &extack) < 0) { + /* We failed to enqueue an error, reset the + * list of errors and send OOM to userspace + * pointing to the batch header. +-- +2.39.2 + diff --git a/queue-6.1/octeon_ep-add-missing-check-for-ioremap.patch b/queue-6.1/octeon_ep-add-missing-check-for-ioremap.patch new file mode 100644 index 00000000000..80f1b5fce89 --- /dev/null +++ b/queue-6.1/octeon_ep-add-missing-check-for-ioremap.patch @@ -0,0 +1,50 @@ +From b4a784210dc88d58025c3d0a4b12b0493e54b549 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 15 Jun 2023 11:34:00 +0800 +Subject: octeon_ep: Add missing check for ioremap + +From: Jiasheng Jiang + +[ Upstream commit 9a36e2d44d122fe73a2a76ba73f1d50a65cf8210 ] + +Add check for ioremap() and return the error if it fails in order to +guarantee the success of ioremap(). + +Fixes: 862cd659a6fb ("octeon_ep: Add driver framework and device initialization") +Signed-off-by: Jiasheng Jiang +Reviewed-by: Kalesh AP +Link: https://lore.kernel.org/r/20230615033400.2971-1-jiasheng@iscas.ac.cn +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/marvell/octeon_ep/octep_main.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +index b45dd7f04e213..8979dd05e873f 100644 +--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c ++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +@@ -928,6 +928,9 @@ int octep_device_setup(struct octep_device *oct) + oct->mmio[i].hw_addr = + ioremap(pci_resource_start(oct->pdev, i * 2), + pci_resource_len(oct->pdev, i * 2)); ++ if (!oct->mmio[i].hw_addr) ++ goto unmap_prev; ++ + oct->mmio[i].mapped = 1; + } + +@@ -966,7 +969,9 @@ int octep_device_setup(struct octep_device *oct) + return 0; + + unsupported_dev: +- for (i = 0; i < OCTEP_MMIO_REGIONS; i++) ++ i = OCTEP_MMIO_REGIONS; ++unmap_prev: ++ while (i--) + iounmap(oct->mmio[i].hw_addr); + + kfree(oct->conf); +-- +2.39.2 + diff --git a/queue-6.1/octeontx2-af-fix-lbk-link-credits-on-cn10k.patch b/queue-6.1/octeontx2-af-fix-lbk-link-credits-on-cn10k.patch new file mode 100644 index 00000000000..afcb21454a3 --- /dev/null +++ b/queue-6.1/octeontx2-af-fix-lbk-link-credits-on-cn10k.patch @@ -0,0 +1,41 @@ +From 7141b6bb6311dc318754ec0e3e6d58faf55c618a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 17:12:01 +0530 +Subject: octeontx2-af: fix lbk link credits on cn10k + +From: Nithin Dabilpuram + +[ Upstream commit 87e12a17eef476bbf768dc3a74419ad461f36fbc ] + +Fix LBK link credits on CN10K to be same as CN9K i.e +16 * MAX_LBK_DATA_RATE instead of current scheme of +calculation based on LBK buf length / FIFO size. + +Fixes: 6e54e1c5399a ("octeontx2-af: cn10K: Add MTU configuration") +Signed-off-by: Nithin Dabilpuram +Signed-off-by: Naveen Mamindlapalli +Reviewed-by: Sridhar Samudrala +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 4 ---- + 1 file changed, 4 deletions(-) + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +index 506c67dd6cd40..8cb2a0181fb9b 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +@@ -4070,10 +4070,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, + + static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) + { +- /* CN10k supports 72KB FIFO size and max packet size of 64k */ +- if (rvu->hw->lbk_bufsize == 0x12000) +- return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; +- + return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ + } + +-- +2.39.2 + diff --git a/queue-6.1/octeontx2-af-fix-promiscuous-mode.patch b/queue-6.1/octeontx2-af-fix-promiscuous-mode.patch new file mode 100644 index 00000000000..b0e06fc2457 --- /dev/null +++ b/queue-6.1/octeontx2-af-fix-promiscuous-mode.patch @@ -0,0 +1,112 @@ +From cadec6c6f5f90b84a2be1fd2caa5b2d01ec3a6c5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 10:46:25 +0530 +Subject: octeontx2-af: Fix promiscuous mode + +From: Ratheesh Kannoth + +[ Upstream commit c0e489372a294044feea650b38f38c888eff57a4 ] + +CN10KB silicon introduced a new exact match feature, +which is used for DMAC filtering. The state of installed +DMAC filters in this exact match table is getting corrupted +when promiscuous mode is toggled. Fix this by not touching +Exact match related config when promiscuous mode is toggled. + +Fixes: 2dba9459d2c9 ("octeontx2-af: Wrapper functions for MAC addr add/del/update/reset") +Signed-off-by: Ratheesh Kannoth +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + .../marvell/octeontx2/af/rvu_npc_hash.c | 29 ++----------------- + 1 file changed, 2 insertions(+), 27 deletions(-) + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +index 3182adb7b9a80..3b48b635977f6 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +@@ -1168,10 +1168,8 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i + { + struct npc_exact_table *table; + u16 *cnt, old_cnt; +- bool promisc; + + table = rvu->hw->table; +- promisc = table->promisc_mode[drop_mcam_idx]; + + cnt = &table->cnt_cmd_rules[drop_mcam_idx]; + old_cnt = *cnt; +@@ -1183,16 +1181,13 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i + + *enable_or_disable_cam = false; + +- if (promisc) +- goto done; +- +- /* If all rules are deleted and not already in promisc mode; disable cam */ ++ /* If all rules are deleted, disable cam */ + if (!*cnt && val < 0) { + *enable_or_disable_cam = true; + goto done; + } + +- /* If rule got added and not already in promisc mode; enable cam */ ++ /* If rule got added, enable cam */ + if (!old_cnt && val > 0) { + *enable_or_disable_cam = true; + goto done; +@@ -1447,7 +1442,6 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) + u32 drop_mcam_idx; + bool *promisc; + bool rc; +- u32 cnt; + + table = rvu->hw->table; + +@@ -1470,17 +1464,8 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) + return LMAC_AF_ERR_INVALID_PARAM; + } + *promisc = false; +- cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); + mutex_unlock(&table->lock); + +- /* If no dmac filter entries configured, disable drop rule */ +- if (!cnt) +- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); +- else +- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); +- +- dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n", +- __func__, cgx_id, lmac_id, cnt); + return 0; + } + +@@ -1498,7 +1483,6 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) + u32 drop_mcam_idx; + bool *promisc; + bool rc; +- u32 cnt; + + table = rvu->hw->table; + +@@ -1521,17 +1505,8 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) + return LMAC_AF_ERR_INVALID_PARAM; + } + *promisc = true; +- cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); + mutex_unlock(&table->lock); + +- /* If no dmac filter entries configured, disable drop rule */ +- if (!cnt) +- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); +- else +- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); +- +- dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n", +- __func__, cgx_id, lmac_id, cnt); + return 0; + } + +-- +2.39.2 + diff --git a/queue-6.1/octeontx2-af-fixed-resource-availability-check.patch b/queue-6.1/octeontx2-af-fixed-resource-availability-check.patch new file mode 100644 index 00000000000..33cc6d8519e --- /dev/null +++ b/queue-6.1/octeontx2-af-fixed-resource-availability-check.patch @@ -0,0 +1,41 @@ +From 5da263639ac54769a12dcf82d20b9301e53c4b37 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 17:12:00 +0530 +Subject: octeontx2-af: fixed resource availability check + +From: Satha Rao + +[ Upstream commit 4e635f9d86165e47f5440196f2ebdb258efb8341 ] + +txschq_alloc response have two different arrays to store continuous +and non-continuous schedulers of each level. Requested count should +be checked for each array separately. + +Fixes: 5d9b976d4480 ("octeontx2-af: Support fixed transmit scheduler topology") +Signed-off-by: Satha Rao +Signed-off-by: Sunil Kovvuri Goutham +Signed-off-by: Naveen Mamindlapalli +Reviewed-by: Sridhar Samudrala +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +index 84f2ba53b8b68..506c67dd6cd40 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +@@ -1878,7 +1878,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, + free_cnt = rvu_rsrc_free_count(&txsch->schq); + } + +- if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) ++ if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || ++ req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + + /* If contiguous queues are needed, check for availability */ +-- +2.39.2 + diff --git a/queue-6.1/ping6-fix-send-to-link-local-addresses-with-vrf.patch b/queue-6.1/ping6-fix-send-to-link-local-addresses-with-vrf.patch new file mode 100644 index 00000000000..0e49fccdb8a --- /dev/null +++ b/queue-6.1/ping6-fix-send-to-link-local-addresses-with-vrf.patch @@ -0,0 +1,58 @@ +From c8147d2ed42489e8f11ef145721cd7b2ae7b6ad9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 18:05:02 +0200 +Subject: ping6: Fix send to link-local addresses with VRF. + +From: Guillaume Nault + +[ Upstream commit 91ffd1bae1dafbb9e34b46813f5b058581d9144d ] + +Ping sockets can't send packets when they're bound to a VRF master +device and the output interface is set to a slave device. + +For example, when net.ipv4.ping_group_range is properly set, so that +ping6 can use ping sockets, the following kind of commands fails: + $ ip vrf exec red ping6 fe80::854:e7ff:fe88:4bf1%eth1 + +What happens is that sk->sk_bound_dev_if is set to the VRF master +device, but 'oif' is set to the real output device. Since both are set +but different, ping_v6_sendmsg() sees their value as inconsistent and +fails. + +Fix this by allowing 'oif' to be a slave device of ->sk_bound_dev_if. + +This fixes the following kselftest failure: + $ ./fcnal-test.sh -t ipv6_ping + [...] + TEST: ping out, vrf device+address bind - ns-B IPv6 LLA [FAIL] + +Reported-by: Mirsad Todorovac +Closes: https://lore.kernel.org/netdev/b6191f90-ffca-dbca-7d06-88a9788def9c@alu.unizg.hr/ +Tested-by: Mirsad Todorovac +Fixes: 5e457896986e ("net: ipv6: Fix ping to link-local addresses.") +Signed-off-by: Guillaume Nault +Reviewed-by: David Ahern +Link: https://lore.kernel.org/r/6c8b53108816a8d0d5705ae37bdc5a8322b5e3d9.1686153846.git.gnault@redhat.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/ipv6/ping.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c +index 808983bc2ec9f..4651aaf70db4f 100644 +--- a/net/ipv6/ping.c ++++ b/net/ipv6/ping.c +@@ -114,7 +114,8 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + addr_type = ipv6_addr_type(daddr); + if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) || + (addr_type & IPV6_ADDR_MAPPED) || +- (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if)) ++ (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if && ++ l3mdev_master_ifindex_by_index(sock_net(sk), oif) != sk->sk_bound_dev_if)) + return -EINVAL; + + ipcm6_init_sk(&ipc6, np); +-- +2.39.2 + diff --git a/queue-6.1/rdma-cma-always-set-static-rate-to-0-for-roce.patch b/queue-6.1/rdma-cma-always-set-static-rate-to-0-for-roce.patch new file mode 100644 index 00000000000..6d079bf0d87 --- /dev/null +++ b/queue-6.1/rdma-cma-always-set-static-rate-to-0-for-roce.patch @@ -0,0 +1,84 @@ +From 67b0c9428e9873337dbffe8f325a691e44220afb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Jun 2023 13:33:23 +0300 +Subject: RDMA/cma: Always set static rate to 0 for RoCE + +From: Mark Zhang + +[ Upstream commit 58030c76cce473b6cfd630bbecb97215def0dff8 ] + +Set static rate to 0 as it should be discovered by path query and +has no meaning for RoCE. +This also avoid of using the rtnl lock and ethtool API, which is +a bottleneck when try to setup many rdma-cm connections at the same +time, especially with multiple processes. + +Fixes: 3c86aa70bf67 ("RDMA/cm: Add RDMA CM support for IBoE devices") +Signed-off-by: Mark Zhang +Link: https://lore.kernel.org/r/f72a4f8b667b803aee9fa794069f61afb5839ce4.1685960567.git.leon@kernel.org +Signed-off-by: Leon Romanovsky +Signed-off-by: Sasha Levin +--- + drivers/infiniband/core/cma.c | 4 ++-- + include/rdma/ib_addr.h | 23 ----------------------- + 2 files changed, 2 insertions(+), 25 deletions(-) + +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index c6a671edba5c8..4632b1833381a 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -3293,7 +3293,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) + route->path_rec->traffic_class = tos; + route->path_rec->mtu = iboe_get_mtu(ndev->mtu); + route->path_rec->rate_selector = IB_SA_EQ; +- route->path_rec->rate = iboe_get_rate(ndev); ++ route->path_rec->rate = IB_RATE_PORT_CURRENT; + dev_put(ndev); + route->path_rec->packet_life_time_selector = IB_SA_EQ; + /* In case ACK timeout is set, use this value to calculate +@@ -4955,7 +4955,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, + if (!ndev) + return -ENODEV; + +- ib.rec.rate = iboe_get_rate(ndev); ++ ib.rec.rate = IB_RATE_PORT_CURRENT; + ib.rec.hop_limit = 1; + ib.rec.mtu = iboe_get_mtu(ndev->mtu); + +diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h +index d808dc3d239e8..811a0f11d0dbe 100644 +--- a/include/rdma/ib_addr.h ++++ b/include/rdma/ib_addr.h +@@ -194,29 +194,6 @@ static inline enum ib_mtu iboe_get_mtu(int mtu) + return 0; + } + +-static inline int iboe_get_rate(struct net_device *dev) +-{ +- struct ethtool_link_ksettings cmd; +- int err; +- +- rtnl_lock(); +- err = __ethtool_get_link_ksettings(dev, &cmd); +- rtnl_unlock(); +- if (err) +- return IB_RATE_PORT_CURRENT; +- +- if (cmd.base.speed >= 40000) +- return IB_RATE_40_GBPS; +- else if (cmd.base.speed >= 30000) +- return IB_RATE_30_GBPS; +- else if (cmd.base.speed >= 20000) +- return IB_RATE_20_GBPS; +- else if (cmd.base.speed >= 10000) +- return IB_RATE_10_GBPS; +- else +- return IB_RATE_PORT_CURRENT; +-} +- + static inline int rdma_link_local_addr(struct in6_addr *addr) + { + if (addr->s6_addr32[0] == htonl(0xfe800000) && +-- +2.39.2 + diff --git a/queue-6.1/rdma-mlx5-create-an-indirect-flow-table-for-steering.patch b/queue-6.1/rdma-mlx5-create-an-indirect-flow-table-for-steering.patch new file mode 100644 index 00000000000..a9959f535b9 --- /dev/null +++ b/queue-6.1/rdma-mlx5-create-an-indirect-flow-table-for-steering.patch @@ -0,0 +1,438 @@ +From c41a77239de38a490bb4dfaed3a6a26998bf0d2d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Jun 2023 13:33:18 +0300 +Subject: RDMA/mlx5: Create an indirect flow table for steering anchor + +From: Mark Bloch + +[ Upstream commit e1f4a52ac171dd863fe89055e749ef5e0a0bc5ce ] + +A misbehaved user can create a steering anchor that points to a kernel +flow table and then destroy the anchor without freeing the associated +STC. This creates a problem as the kernel can't destroy the flow +table since there is still a reference to it. As a result, this can +exhaust all available flow table resources, preventing other users from +using the RDMA device. + +To prevent this problem, a solution is implemented where a special flow +table with two steering rules is created when a user creates a steering +anchor for the first time. The rules include one that drops all traffic +and another that points to the kernel flow table. If the steering anchor +is destroyed, only the rule pointing to the kernel's flow table is removed. +Any traffic reaching the special flow table after that is dropped. + +Since the special flow table is not destroyed when the steering anchor is +destroyed, any issues are prevented from occurring. The remaining resources +are only destroyed when the RDMA device is destroyed, which happens after +all DEVX objects are freed, including the STCs, thus mitigating the issue. + +Fixes: 0c6ab0ca9a66 ("RDMA/mlx5: Expose steering anchor to userspace") +Signed-off-by: Mark Bloch +Reviewed-by: Maor Gottlieb +Link: https://lore.kernel.org/r/b4a88a871d651fa4e8f98d552553c1cfe9ba2cd6.1685960567.git.leon@kernel.org +Signed-off-by: Leon Romanovsky +Signed-off-by: Sasha Levin +--- + drivers/infiniband/hw/mlx5/fs.c | 276 ++++++++++++++++++++++++++- + drivers/infiniband/hw/mlx5/fs.h | 16 ++ + drivers/infiniband/hw/mlx5/mlx5_ib.h | 11 ++ + 3 files changed, 296 insertions(+), 7 deletions(-) + +diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c +index 490ec308e3098..5a13d902b0641 100644 +--- a/drivers/infiniband/hw/mlx5/fs.c ++++ b/drivers/infiniband/hw/mlx5/fs.c +@@ -696,8 +696,6 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev, + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table *ft; + +- if (mlx5_ib_shared_ft_allowed(&dev->ib_dev)) +- ft_attr.uid = MLX5_SHARED_RESOURCE_UID; + ft_attr.prio = priority; + ft_attr.max_fte = num_entries; + ft_attr.flags = flags; +@@ -2026,6 +2024,237 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject, + return 0; + } + ++static int steering_anchor_create_ft(struct mlx5_ib_dev *dev, ++ struct mlx5_ib_flow_prio *ft_prio, ++ enum mlx5_flow_namespace_type ns_type) ++{ ++ struct mlx5_flow_table_attr ft_attr = {}; ++ struct mlx5_flow_namespace *ns; ++ struct mlx5_flow_table *ft; ++ ++ if (ft_prio->anchor.ft) ++ return 0; ++ ++ ns = mlx5_get_flow_namespace(dev->mdev, ns_type); ++ if (!ns) ++ return -EOPNOTSUPP; ++ ++ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; ++ ft_attr.uid = MLX5_SHARED_RESOURCE_UID; ++ ft_attr.prio = 0; ++ ft_attr.max_fte = 2; ++ ft_attr.level = 1; ++ ++ ft = mlx5_create_flow_table(ns, &ft_attr); ++ if (IS_ERR(ft)) ++ return PTR_ERR(ft); ++ ++ ft_prio->anchor.ft = ft; ++ ++ return 0; ++} ++ ++static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ if (ft_prio->anchor.ft) { ++ mlx5_destroy_flow_table(ft_prio->anchor.ft); ++ ft_prio->anchor.ft = NULL; ++ } ++} ++ ++static int ++steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); ++ struct mlx5_flow_group *fg; ++ void *flow_group_in; ++ int err = 0; ++ ++ if (ft_prio->anchor.fg_drop) ++ return 0; ++ ++ flow_group_in = kvzalloc(inlen, GFP_KERNEL); ++ if (!flow_group_in) ++ return -ENOMEM; ++ ++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); ++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); ++ ++ fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in); ++ if (IS_ERR(fg)) { ++ err = PTR_ERR(fg); ++ goto out; ++ } ++ ++ ft_prio->anchor.fg_drop = fg; ++ ++out: ++ kvfree(flow_group_in); ++ ++ return err; ++} ++ ++static void ++steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ if (ft_prio->anchor.fg_drop) { ++ mlx5_destroy_flow_group(ft_prio->anchor.fg_drop); ++ ft_prio->anchor.fg_drop = NULL; ++ } ++} ++ ++static int ++steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); ++ struct mlx5_flow_group *fg; ++ void *flow_group_in; ++ int err = 0; ++ ++ if (ft_prio->anchor.fg_goto_table) ++ return 0; ++ ++ flow_group_in = kvzalloc(inlen, GFP_KERNEL); ++ if (!flow_group_in) ++ return -ENOMEM; ++ ++ fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in); ++ if (IS_ERR(fg)) { ++ err = PTR_ERR(fg); ++ goto out; ++ } ++ ft_prio->anchor.fg_goto_table = fg; ++ ++out: ++ kvfree(flow_group_in); ++ ++ return err; ++} ++ ++static void ++steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ if (ft_prio->anchor.fg_goto_table) { ++ mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table); ++ ft_prio->anchor.fg_goto_table = NULL; ++ } ++} ++ ++static int ++steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ struct mlx5_flow_act flow_act = {}; ++ struct mlx5_flow_handle *handle; ++ ++ if (ft_prio->anchor.rule_drop) ++ return 0; ++ ++ flow_act.fg = ft_prio->anchor.fg_drop; ++ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; ++ ++ handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act, ++ NULL, 0); ++ if (IS_ERR(handle)) ++ return PTR_ERR(handle); ++ ++ ft_prio->anchor.rule_drop = handle; ++ ++ return 0; ++} ++ ++static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ if (ft_prio->anchor.rule_drop) { ++ mlx5_del_flow_rules(ft_prio->anchor.rule_drop); ++ ft_prio->anchor.rule_drop = NULL; ++ } ++} ++ ++static int ++steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ struct mlx5_flow_destination dest = {}; ++ struct mlx5_flow_act flow_act = {}; ++ struct mlx5_flow_handle *handle; ++ ++ if (ft_prio->anchor.rule_goto_table) ++ return 0; ++ ++ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; ++ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; ++ flow_act.fg = ft_prio->anchor.fg_goto_table; ++ ++ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; ++ dest.ft = ft_prio->flow_table; ++ ++ handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act, ++ &dest, 1); ++ if (IS_ERR(handle)) ++ return PTR_ERR(handle); ++ ++ ft_prio->anchor.rule_goto_table = handle; ++ ++ return 0; ++} ++ ++static void ++steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ if (ft_prio->anchor.rule_goto_table) { ++ mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table); ++ ft_prio->anchor.rule_goto_table = NULL; ++ } ++} ++ ++static int steering_anchor_create_res(struct mlx5_ib_dev *dev, ++ struct mlx5_ib_flow_prio *ft_prio, ++ enum mlx5_flow_namespace_type ns_type) ++{ ++ int err; ++ ++ err = steering_anchor_create_ft(dev, ft_prio, ns_type); ++ if (err) ++ return err; ++ ++ err = steering_anchor_create_fg_drop(ft_prio); ++ if (err) ++ goto destroy_ft; ++ ++ err = steering_anchor_create_fg_goto_table(ft_prio); ++ if (err) ++ goto destroy_fg_drop; ++ ++ err = steering_anchor_create_rule_drop(ft_prio); ++ if (err) ++ goto destroy_fg_goto_table; ++ ++ err = steering_anchor_create_rule_goto_table(ft_prio); ++ if (err) ++ goto destroy_rule_drop; ++ ++ return 0; ++ ++destroy_rule_drop: ++ steering_anchor_destroy_rule_drop(ft_prio); ++destroy_fg_goto_table: ++ steering_anchor_destroy_fg_goto_table(ft_prio); ++destroy_fg_drop: ++ steering_anchor_destroy_fg_drop(ft_prio); ++destroy_ft: ++ steering_anchor_destroy_ft(ft_prio); ++ ++ return err; ++} ++ ++static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio) ++{ ++ steering_anchor_destroy_rule_goto_table(ft_prio); ++ steering_anchor_destroy_rule_drop(ft_prio); ++ steering_anchor_destroy_fg_goto_table(ft_prio); ++ steering_anchor_destroy_fg_drop(ft_prio); ++ steering_anchor_destroy_ft(ft_prio); ++} ++ + static int steering_anchor_cleanup(struct ib_uobject *uobject, + enum rdma_remove_reason why, + struct uverbs_attr_bundle *attrs) +@@ -2036,6 +2265,9 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject, + return -EBUSY; + + mutex_lock(&obj->dev->flow_db->lock); ++ if (!--obj->ft_prio->anchor.rule_goto_table_ref) ++ steering_anchor_destroy_rule_goto_table(obj->ft_prio); ++ + put_flow_table(obj->dev, obj->ft_prio, true); + mutex_unlock(&obj->dev->flow_db->lock); + +@@ -2043,6 +2275,24 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject, + return 0; + } + ++static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio, ++ int count) ++{ ++ while (count--) ++ mlx5_steering_anchor_destroy_res(&prio[count]); ++} ++ ++void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) ++{ ++ fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT); ++ fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT); ++ fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS); ++ fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS); ++ fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS); ++ fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT); ++ fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT); ++} ++ + static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs, + struct mlx5_ib_flow_matcher *obj) + { +@@ -2183,21 +2433,31 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)( + return -ENOMEM; + + mutex_lock(&dev->flow_db->lock); ++ + ft_prio = _get_flow_table(dev, priority, ns_type, 0); + if (IS_ERR(ft_prio)) { +- mutex_unlock(&dev->flow_db->lock); + err = PTR_ERR(ft_prio); + goto free_obj; + } + + ft_prio->refcount++; +- ft_id = mlx5_flow_table_id(ft_prio->flow_table); +- mutex_unlock(&dev->flow_db->lock); ++ ++ if (!ft_prio->anchor.rule_goto_table_ref) { ++ err = steering_anchor_create_res(dev, ft_prio, ns_type); ++ if (err) ++ goto put_flow_table; ++ } ++ ++ ft_prio->anchor.rule_goto_table_ref++; ++ ++ ft_id = mlx5_flow_table_id(ft_prio->anchor.ft); + + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID, + &ft_id, sizeof(ft_id)); + if (err) +- goto put_flow_table; ++ goto destroy_res; ++ ++ mutex_unlock(&dev->flow_db->lock); + + uobj->object = obj; + obj->dev = dev; +@@ -2206,8 +2466,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)( + + return 0; + ++destroy_res: ++ --ft_prio->anchor.rule_goto_table_ref; ++ mlx5_steering_anchor_destroy_res(ft_prio); + put_flow_table: +- mutex_lock(&dev->flow_db->lock); + put_flow_table(dev, ft_prio, true); + mutex_unlock(&dev->flow_db->lock); + free_obj: +diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h +index ad320adaf3217..b9734904f5f01 100644 +--- a/drivers/infiniband/hw/mlx5/fs.h ++++ b/drivers/infiniband/hw/mlx5/fs.h +@@ -10,6 +10,7 @@ + + #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) + int mlx5_ib_fs_init(struct mlx5_ib_dev *dev); ++void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev); + #else + static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) + { +@@ -21,9 +22,24 @@ static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) + mutex_init(&dev->flow_db->lock); + return 0; + } ++ ++inline void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) {} + #endif ++ + static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev) + { ++ /* When a steering anchor is created, a special flow table is also ++ * created for the user to reference. Since the user can reference it, ++ * the kernel cannot trust that when the user destroys the steering ++ * anchor, they no longer reference the flow table. ++ * ++ * To address this issue, when a user destroys a steering anchor, only ++ * the flow steering rule in the table is destroyed, but the table ++ * itself is kept to deal with the above scenario. The remaining ++ * resources are only removed when the RDMA device is destroyed, which ++ * is a safe assumption that all references are gone. ++ */ ++ mlx5_ib_fs_cleanup_anchor(dev); + kfree(dev->flow_db); + } + #endif /* _MLX5_IB_FS_H */ +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h +index 4a7f7064bd0eb..038522bb7113e 100644 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h +@@ -233,8 +233,19 @@ enum { + #define MLX5_IB_NUM_SNIFFER_FTS 2 + #define MLX5_IB_NUM_EGRESS_FTS 1 + #define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS ++ ++struct mlx5_ib_anchor { ++ struct mlx5_flow_table *ft; ++ struct mlx5_flow_group *fg_goto_table; ++ struct mlx5_flow_group *fg_drop; ++ struct mlx5_flow_handle *rule_goto_table; ++ struct mlx5_flow_handle *rule_drop; ++ unsigned int rule_goto_table_ref; ++}; ++ + struct mlx5_ib_flow_prio { + struct mlx5_flow_table *flow_table; ++ struct mlx5_ib_anchor anchor; + unsigned int refcount; + }; + +-- +2.39.2 + diff --git a/queue-6.1/rdma-mlx5-fix-affinity-assignment.patch b/queue-6.1/rdma-mlx5-fix-affinity-assignment.patch new file mode 100644 index 00000000000..2db1d6bb29f --- /dev/null +++ b/queue-6.1/rdma-mlx5-fix-affinity-assignment.patch @@ -0,0 +1,125 @@ +From 8b80d06a3619c3c2918710fa2561a3a09184a676 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Jun 2023 13:33:26 +0300 +Subject: RDMA/mlx5: Fix affinity assignment + +From: Mark Bloch + +[ Upstream commit 617f5db1a626f18d5cbb7c7faf7bf8f9ea12be78 ] + +The cited commit aimed to ensure that Virtual Functions (VFs) assign a +queue affinity to a Queue Pair (QP) to distribute traffic when +the LAG master creates a hardware LAG. If the affinity was set while +the hardware was not in LAG, the firmware would ignore the affinity value. + +However, this commit unintentionally assigned an affinity to QPs on the LAG +master's VPORT even if the RDMA device was not marked as LAG-enabled. +In most cases, this was not an issue because when the hardware entered +hardware LAG configuration, the RDMA device of the LAG master would be +destroyed and a new one would be created, marked as LAG-enabled. + +The problem arises when a user configures Equal-Cost Multipath (ECMP). +In ECMP mode, traffic can be directed to different physical ports based on +the queue affinity, which is intended for use by VPORTS other than the +E-Switch manager. ECMP mode is supported only if both E-Switch managers are +in switchdev mode and the appropriate route is configured via IP. In this +configuration, the RDMA device is not destroyed, and we retain the RDMA +device that is not marked as LAG-enabled. + +To ensure correct behavior, Send Queues (SQs) opened by the E-Switch +manager through verbs should be assigned strict affinity. This means they +will only be able to communicate through the native physical port +associated with the E-Switch manager. This will prevent the firmware from +assigning affinity and will not allow the SQs to be remapped in case of +failover. + +Fixes: 802dcc7fc5ec ("RDMA/mlx5: Support TX port affinity for VF drivers in LAG mode") +Reviewed-by: Maor Gottlieb +Signed-off-by: Mark Bloch +Link: https://lore.kernel.org/r/425b05f4da840bc684b0f7e8ebf61aeb5cef09b0.1685960567.git.leon@kernel.org +Signed-off-by: Leon Romanovsky +Signed-off-by: Sasha Levin +--- + drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 +++ + drivers/infiniband/hw/mlx5/qp.c | 3 +++ + drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 12 ------------ + include/linux/mlx5/driver.h | 12 ++++++++++++ + 4 files changed, 18 insertions(+), 12 deletions(-) + +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h +index 038522bb7113e..8d94e6834e01b 100644 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h +@@ -1564,6 +1564,9 @@ static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) + MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass)) + return 0; + ++ if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active) ++ return 0; ++ + return dev->lag_active || + (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && + MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity)); +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c +index f7d3643b08f50..ac53ed79ca64c 100644 +--- a/drivers/infiniband/hw/mlx5/qp.c ++++ b/drivers/infiniband/hw/mlx5/qp.c +@@ -1156,6 +1156,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, + + MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid); + MLX5_SET(tisc, tisc, transport_domain, tdn); ++ if (!mlx5_ib_lag_should_assign_affinity(dev) && ++ mlx5_lag_is_lacp_owner(dev->mdev)) ++ MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); + if (qp->flags & IB_QP_CREATE_SOURCE_QPN) + MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +index 1a35b3c2a3674..0b560e97a3563 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +@@ -275,18 +275,6 @@ static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) + return pci_num_vf(dev->pdev) ? true : false; + } + +-static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) +-{ +- /* LACP owner conditions: +- * 1) Function is physical. +- * 2) LAG is supported by FW. +- * 3) LAG is managed by driver (currently the only option). +- */ +- return MLX5_CAP_GEN(dev, vport_group_manager) && +- (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && +- MLX5_CAP_GEN(dev, lag_master); +-} +- + int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev); + static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) + { +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h +index fff61e6d6d4de..3660ce6a93496 100644 +--- a/include/linux/mlx5/driver.h ++++ b/include/linux/mlx5/driver.h +@@ -1230,6 +1230,18 @@ static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) + return dev->priv.sriov.max_vfs; + } + ++static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) ++{ ++ /* LACP owner conditions: ++ * 1) Function is physical. ++ * 2) LAG is supported by FW. ++ * 3) LAG is managed by driver (currently the only option). ++ */ ++ return MLX5_CAP_GEN(dev, vport_group_manager) && ++ (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && ++ MLX5_CAP_GEN(dev, lag_master); ++} ++ + static inline int mlx5_get_gid_table_len(u16 param) + { + if (param > 4) { +-- +2.39.2 + diff --git a/queue-6.1/rdma-mlx5-initiate-dropless-rq-for-raw-ethernet-func.patch b/queue-6.1/rdma-mlx5-initiate-dropless-rq-for-raw-ethernet-func.patch new file mode 100644 index 00000000000..abdab564f2f --- /dev/null +++ b/queue-6.1/rdma-mlx5-initiate-dropless-rq-for-raw-ethernet-func.patch @@ -0,0 +1,46 @@ +From 5a186f5434514f9df76b4f943288afb1715df5bb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Jun 2023 13:33:17 +0300 +Subject: RDMA/mlx5: Initiate dropless RQ for RAW Ethernet functions + +From: Maher Sanalla + +[ Upstream commit ee4d269eccfea6c17b18281bef482700d898e86f ] + +Delay drop data is initiated for PFs that have the capability of +rq_delay_drop and are in roce profile. + +However, PFs with RAW ethernet profile do not initiate delay drop data +on function load, causing kernel panic if delay drop struct members are +accessed later on in case a dropless RQ is created. + +Thus, stage the delay drop initialization as part of RAW ethernet +PF loading process. + +Fixes: b5ca15ad7e61 ("IB/mlx5: Add proper representors support") +Signed-off-by: Maher Sanalla +Reviewed-by: Maor Gottlieb +Link: https://lore.kernel.org/r/2e9d386785043d48c38711826eb910315c1de141.1685960567.git.leon@kernel.org +Signed-off-by: Leon Romanovsky +Signed-off-by: Sasha Levin +--- + drivers/infiniband/hw/mlx5/main.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index eaa35e1df2a85..3178df55c4d85 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -4250,6 +4250,9 @@ const struct mlx5_ib_profile raw_eth_profile = { + STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, + mlx5_ib_stage_post_ib_reg_umr_init, + NULL), ++ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, ++ mlx5_ib_stage_delay_drop_init, ++ mlx5_ib_stage_delay_drop_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_RESTRACK, + mlx5_ib_restrack_init, + NULL), +-- +2.39.2 + diff --git a/queue-6.1/rdma-rtrs-fix-rxe_dealloc_pd-warning.patch b/queue-6.1/rdma-rtrs-fix-rxe_dealloc_pd-warning.patch new file mode 100644 index 00000000000..59b3218c162 --- /dev/null +++ b/queue-6.1/rdma-rtrs-fix-rxe_dealloc_pd-warning.patch @@ -0,0 +1,190 @@ +From 53aa9ce13d50c58780de0b439c0539f830d20911 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Apr 2023 01:02:43 +0000 +Subject: RDMA/rtrs: Fix rxe_dealloc_pd warning + +From: Li Zhijian + +[ Upstream commit 9c29c8c7df0688f358d2df5ddd16c97c2f7292b4 ] + +In current design: +1. PD and clt_path->s.dev are shared among connections. +2. every con[n]'s cleanup phase will call destroy_con_cq_qp() +3. clt_path->s.dev will be always decreased in destroy_con_cq_qp(), and + when clt_path->s.dev become zero, it will destroy PD. +4. when con[1] failed to create, con[1] will not take clt_path->s.dev, + but it try to decreased clt_path->s.dev + +So, in case create_cm(con[0]) succeeds but create_cm(con[1]) fails, +destroy_con_cq_qp(con[1]) will be called first which will destroy the PD +while this PD is still taken by con[0]. + +Here, we refactor the error path of create_cm() and init_conns(), so that +we do the cleanup in the order they are created. + +The warning occurs when destroying RXE PD whose reference count is not +zero. + + rnbd_client L597: Mapping device /dev/nvme0n1 on session client, (access_mode: rw, nr_poll_queues: 0) + ------------[ cut here ]------------ + WARNING: CPU: 0 PID: 26407 at drivers/infiniband/sw/rxe/rxe_pool.c:256 __rxe_cleanup+0x13a/0x170 [rdma_rxe] + Modules linked in: rpcrdma rdma_ucm ib_iser rnbd_client libiscsi rtrs_client scsi_transport_iscsi rtrs_core rdma_cm iw_cm ib_cm crc32_generic rdma_rxe udp_tunnel ib_uverbs ib_core kmem device_dax nd_pmem dax_pmem nd_vme crc32c_intel fuse nvme_core nfit libnvdimm dm_multipath scsi_dh_rdac scsi_dh_emc scsi_dh_alua dm_mirror dm_region_hash dm_log dm_mod + CPU: 0 PID: 26407 Comm: rnbd-client.sh Kdump: loaded Not tainted 6.2.0-rc6-roce-flush+ #53 + Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 + RIP: 0010:__rxe_cleanup+0x13a/0x170 [rdma_rxe] + Code: 45 84 e4 0f 84 5a ff ff ff 48 89 ef e8 5f 18 71 f9 84 c0 75 90 be c8 00 00 00 48 89 ef e8 be 89 1f fa 85 c0 0f 85 7b ff ff ff <0f> 0b 41 bc ea ff ff ff e9 71 ff ff ff e8 84 7f 1f fa e9 d0 fe ff + RSP: 0018:ffffb09880b6f5f0 EFLAGS: 00010246 + RAX: 0000000000000000 RBX: ffff99401f15d6a8 RCX: 0000000000000000 + RDX: 0000000000000001 RSI: ffffffffbac8234b RDI: 00000000ffffffff + RBP: ffff99401f15d6d0 R08: 0000000000000001 R09: 0000000000000001 + R10: 0000000000002d82 R11: 0000000000000000 R12: 0000000000000001 + R13: ffff994101eff208 R14: ffffb09880b6f6a0 R15: 00000000fffffe00 + FS: 00007fe113904740(0000) GS:ffff99413bc00000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 00007ff6cde656c8 CR3: 000000001f108004 CR4: 00000000001706f0 + DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 + DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 + Call Trace: + + rxe_dealloc_pd+0x16/0x20 [rdma_rxe] + ib_dealloc_pd_user+0x4b/0x80 [ib_core] + rtrs_ib_dev_put+0x79/0xd0 [rtrs_core] + destroy_con_cq_qp+0x8a/0xa0 [rtrs_client] + init_path+0x1e7/0x9a0 [rtrs_client] + ? __pfx_autoremove_wake_function+0x10/0x10 + ? lock_is_held_type+0xd7/0x130 + ? rcu_read_lock_sched_held+0x43/0x80 + ? pcpu_alloc+0x3dd/0x7d0 + ? rtrs_clt_init_stats+0x18/0x40 [rtrs_client] + rtrs_clt_open+0x24f/0x5a0 [rtrs_client] + ? __pfx_rnbd_clt_link_ev+0x10/0x10 [rnbd_client] + rnbd_clt_map_device+0x6a5/0xe10 [rnbd_client] + +Fixes: 6a98d71daea1 ("RDMA/rtrs: client: main functionality") +Link: https://lore.kernel.org/r/1682384563-2-4-git-send-email-lizhijian@fujitsu.com +Signed-off-by: Li Zhijian +Acked-by: Jack Wang +Tested-by: Jack Wang +Signed-off-by: Jason Gunthorpe +Signed-off-by: Sasha Levin +--- + drivers/infiniband/ulp/rtrs/rtrs-clt.c | 55 +++++++++++--------------- + 1 file changed, 23 insertions(+), 32 deletions(-) + +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c +index 730f2f1e09bbd..a67f58359de9e 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c +@@ -2042,6 +2042,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, + return 0; + } + ++/* The caller should do the cleanup in case of error */ + static int create_cm(struct rtrs_clt_con *con) + { + struct rtrs_path *s = con->c.path; +@@ -2064,14 +2065,14 @@ static int create_cm(struct rtrs_clt_con *con) + err = rdma_set_reuseaddr(cm_id, 1); + if (err != 0) { + rtrs_err(s, "Set address reuse failed, err: %d\n", err); +- goto destroy_cm; ++ return err; + } + err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, + (struct sockaddr *)&clt_path->s.dst_addr, + RTRS_CONNECT_TIMEOUT_MS); + if (err) { + rtrs_err(s, "Failed to resolve address, err: %d\n", err); +- goto destroy_cm; ++ return err; + } + /* + * Combine connection status and session events. This is needed +@@ -2086,29 +2087,15 @@ static int create_cm(struct rtrs_clt_con *con) + if (err == 0) + err = -ETIMEDOUT; + /* Timedout or interrupted */ +- goto errr; +- } +- if (con->cm_err < 0) { +- err = con->cm_err; +- goto errr; ++ return err; + } +- if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) { ++ if (con->cm_err < 0) ++ return con->cm_err; ++ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) + /* Device removal */ +- err = -ECONNABORTED; +- goto errr; +- } ++ return -ECONNABORTED; + + return 0; +- +-errr: +- stop_cm(con); +- mutex_lock(&con->con_mutex); +- destroy_con_cq_qp(con); +- mutex_unlock(&con->con_mutex); +-destroy_cm: +- destroy_cm(con); +- +- return err; + } + + static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path) +@@ -2336,7 +2323,7 @@ static void rtrs_clt_close_work(struct work_struct *work) + static int init_conns(struct rtrs_clt_path *clt_path) + { + unsigned int cid; +- int err; ++ int err, i; + + /* + * On every new session connections increase reconnect counter +@@ -2352,10 +2339,8 @@ static int init_conns(struct rtrs_clt_path *clt_path) + goto destroy; + + err = create_cm(to_clt_con(clt_path->s.con[cid])); +- if (err) { +- destroy_con(to_clt_con(clt_path->s.con[cid])); ++ if (err) + goto destroy; +- } + } + err = alloc_path_reqs(clt_path); + if (err) +@@ -2366,15 +2351,21 @@ static int init_conns(struct rtrs_clt_path *clt_path) + return 0; + + destroy: +- while (cid--) { +- struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]); ++ /* Make sure we do the cleanup in the order they are created */ ++ for (i = 0; i <= cid; i++) { ++ struct rtrs_clt_con *con; + +- stop_cm(con); ++ if (!clt_path->s.con[i]) ++ break; + +- mutex_lock(&con->con_mutex); +- destroy_con_cq_qp(con); +- mutex_unlock(&con->con_mutex); +- destroy_cm(con); ++ con = to_clt_con(clt_path->s.con[i]); ++ if (con->c.cm_id) { ++ stop_cm(con); ++ mutex_lock(&con->con_mutex); ++ destroy_con_cq_qp(con); ++ mutex_unlock(&con->con_mutex); ++ destroy_cm(con); ++ } + destroy_con(con); + } + /* +-- +2.39.2 + diff --git a/queue-6.1/rdma-rtrs-fix-the-last-iu-buf-leak-in-err-path.patch b/queue-6.1/rdma-rtrs-fix-the-last-iu-buf-leak-in-err-path.patch new file mode 100644 index 00000000000..eb1fc9de89e --- /dev/null +++ b/queue-6.1/rdma-rtrs-fix-the-last-iu-buf-leak-in-err-path.patch @@ -0,0 +1,41 @@ +From 413e0e9e19e741ab210aceab69e12417db1f5396 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Apr 2023 01:02:42 +0000 +Subject: RDMA/rtrs: Fix the last iu->buf leak in err path + +From: Li Zhijian + +[ Upstream commit 3bf3a7c6985c625f64e73baefdaa36f1c2045a29 ] + +The last iu->buf will leak if ib_dma_mapping_error() fails. + +Fixes: c0894b3ea69d ("RDMA/rtrs: core: lib functions shared between client and server modules") +Link: https://lore.kernel.org/r/1682384563-2-3-git-send-email-lizhijian@fujitsu.com +Signed-off-by: Li Zhijian +Acked-by: Guoqing Jiang +Acked-by: Jack Wang +Signed-off-by: Jason Gunthorpe +Signed-off-by: Sasha Levin +--- + drivers/infiniband/ulp/rtrs/rtrs.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c +index ed324b47d93ae..716ec7baddefd 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs.c +@@ -37,8 +37,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask, + goto err; + + iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir); +- if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) ++ if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) { ++ kfree(iu->buf); + goto err; ++ } + + iu->cqe.done = done; + iu->size = size; +-- +2.39.2 + diff --git a/queue-6.1/rdma-rxe-fix-packet-length-checks.patch b/queue-6.1/rdma-rxe-fix-packet-length-checks.patch new file mode 100644 index 00000000000..8224c8285b5 --- /dev/null +++ b/queue-6.1/rdma-rxe-fix-packet-length-checks.patch @@ -0,0 +1,56 @@ +From 378ae21ef6ff7965b346d41cf50da6c529cb9237 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 17 May 2023 12:22:42 -0500 +Subject: RDMA/rxe: Fix packet length checks + +From: Bob Pearson + +[ Upstream commit 9a3763e87379c97a78b7c6c6f40720b1e877174f ] + +In rxe_net.c a received packet, from udp or loopback, is passed to +rxe_rcv() in rxe_recv.c as a udp packet. I.e. skb->data is pointing at the +udp header. But rxe_rcv() makes length checks to verify the packet is long +enough to hold the roce headers as if it were a roce +packet. I.e. skb->data pointing at the bth header. A runt packet would +appear to have 8 more bytes than it actually does which may lead to +incorrect behavior. + +This patch calls skb_pull() to adjust the skb to point at the bth header +before calling rxe_rcv() which fixes this error. + +Fixes: 8700e3e7c485 ("Soft RoCE driver") +Link: https://lore.kernel.org/r/20230517172242.1806340-1-rpearsonhpe@gmail.com +Signed-off-by: Bob Pearson +Signed-off-by: Jason Gunthorpe +Signed-off-by: Sasha Levin +--- + drivers/infiniband/sw/rxe/rxe_net.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c +index 35f327b9d4b8e..65d16024b3bf6 100644 +--- a/drivers/infiniband/sw/rxe/rxe_net.c ++++ b/drivers/infiniband/sw/rxe/rxe_net.c +@@ -156,6 +156,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) + pkt->mask = RXE_GRH_MASK; + pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); + ++ /* remove udp header */ ++ skb_pull(skb, sizeof(struct udphdr)); ++ + rxe_rcv(skb); + + return 0; +@@ -397,6 +400,9 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) + return -EIO; + } + ++ /* remove udp header */ ++ skb_pull(skb, sizeof(struct udphdr)); ++ + rxe_rcv(skb); + + return 0; +-- +2.39.2 + diff --git a/queue-6.1/rdma-rxe-fix-ref-count-error-in-check_rkey.patch b/queue-6.1/rdma-rxe-fix-ref-count-error-in-check_rkey.patch new file mode 100644 index 00000000000..8279f0b65b8 --- /dev/null +++ b/queue-6.1/rdma-rxe-fix-ref-count-error-in-check_rkey.patch @@ -0,0 +1,48 @@ +From a5e24014555d7b614d55cb93b8abed5f88bdb143 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 17 May 2023 16:15:10 -0500 +Subject: RDMA/rxe: Fix ref count error in check_rkey() + +From: Bob Pearson + +[ Upstream commit b00683422fd79dd07c9b75efdce1660e5e19150e ] + +There is a reference count error in error path code and a potential race +in check_rkey() in rxe_resp.c. When looking up the rkey for a memory +window the reference to the mw from rxe_lookup_mw() is dropped before a +reference is taken on the mr referenced by the mw. If the mr is destroyed +immediately after the call to rxe_put(mw) the mr pointer is unprotected +and may end up pointing at freed memory. The rxe_get(mr) call should take +place before the rxe_put(mw) call. + +All errors in check_rkey() call rxe_put(mw) if mw is not NULL but it was +already called after the above. The mw pointer should be set to NULL after +the rxe_put(mw) call to prevent this from happening. + +Fixes: cdd0b85675ae ("RDMA/rxe: Implement memory access through MWs") +Link: https://lore.kernel.org/r/20230517211509.1819998-1-rpearsonhpe@gmail.com +Signed-off-by: Bob Pearson +Signed-off-by: Jason Gunthorpe +Signed-off-by: Sasha Levin +--- + drivers/infiniband/sw/rxe/rxe_resp.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c +index 693081e813ec0..9f65c346d8432 100644 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c +@@ -466,8 +466,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp, + if (mw->access & IB_ZERO_BASED) + qp->resp.offset = mw->addr; + +- rxe_put(mw); + rxe_get(mr); ++ rxe_put(mw); ++ mw = NULL; + } else { + mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); + if (!mr) { +-- +2.39.2 + diff --git a/queue-6.1/rdma-rxe-fix-the-use-before-initialization-error-of-.patch b/queue-6.1/rdma-rxe-fix-the-use-before-initialization-error-of-.patch new file mode 100644 index 00000000000..ea41d7c3672 --- /dev/null +++ b/queue-6.1/rdma-rxe-fix-the-use-before-initialization-error-of-.patch @@ -0,0 +1,82 @@ +From 77eee5c4907f7e98885ea3eb51275214d50807a7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 2 Jun 2023 11:54:08 +0800 +Subject: RDMA/rxe: Fix the use-before-initialization error of resp_pkts + +From: Zhu Yanjun + +[ Upstream commit 2a62b6210ce876c596086ab8fd4c8a0c3d10611a ] + +In the following: + + Call Trace: + + __dump_stack lib/dump_stack.c:88 [inline] + dump_stack_lvl+0xd9/0x150 lib/dump_stack.c:106 + assign_lock_key kernel/locking/lockdep.c:982 [inline] + register_lock_class+0xdb6/0x1120 kernel/locking/lockdep.c:1295 + __lock_acquire+0x10a/0x5df0 kernel/locking/lockdep.c:4951 + lock_acquire kernel/locking/lockdep.c:5691 [inline] + lock_acquire+0x1b1/0x520 kernel/locking/lockdep.c:5656 + __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] + _raw_spin_lock_irqsave+0x3d/0x60 kernel/locking/spinlock.c:162 + skb_dequeue+0x20/0x180 net/core/skbuff.c:3639 + drain_resp_pkts drivers/infiniband/sw/rxe/rxe_comp.c:555 [inline] + rxe_completer+0x250d/0x3cc0 drivers/infiniband/sw/rxe/rxe_comp.c:652 + rxe_qp_do_cleanup+0x1be/0x820 drivers/infiniband/sw/rxe/rxe_qp.c:761 + execute_in_process_context+0x3b/0x150 kernel/workqueue.c:3473 + __rxe_cleanup+0x21e/0x370 drivers/infiniband/sw/rxe/rxe_pool.c:233 + rxe_create_qp+0x3f6/0x5f0 drivers/infiniband/sw/rxe/rxe_verbs.c:583 + +This is a use-before-initialization problem. + +It happens because rxe_qp_do_cleanup is called during error unwind before +the struct has been fully initialized. + +Move the initialization of the skb earlier. + +Fixes: 8700e3e7c485 ("Soft RoCE driver") +Link: https://lore.kernel.org/r/20230602035408.741534-1-yanjun.zhu@intel.com +Reported-by: syzbot+eba589d8f49c73d356da@syzkaller.appspotmail.com +Signed-off-by: Zhu Yanjun +Signed-off-by: Jason Gunthorpe +Signed-off-by: Sasha Levin +--- + drivers/infiniband/sw/rxe/rxe_qp.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c +index f48a624de493d..59b2024b34ef4 100644 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c +@@ -180,6 +180,9 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, + spin_lock_init(&qp->rq.producer_lock); + spin_lock_init(&qp->rq.consumer_lock); + ++ skb_queue_head_init(&qp->req_pkts); ++ skb_queue_head_init(&qp->resp_pkts); ++ + atomic_set(&qp->ssn, 0); + atomic_set(&qp->skb_out, 0); + } +@@ -240,8 +243,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, + qp->req.opcode = -1; + qp->comp.opcode = -1; + +- skb_queue_head_init(&qp->req_pkts); +- + rxe_init_task(&qp->req.task, qp, rxe_requester); + rxe_init_task(&qp->comp.task, qp, rxe_completer); + +@@ -288,8 +289,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, + } + } + +- skb_queue_head_init(&qp->resp_pkts); +- + rxe_init_task(&qp->resp.task, qp, rxe_responder); + + qp->resp.opcode = OPCODE_NONE; +-- +2.39.2 + diff --git a/queue-6.1/rdma-rxe-removed-unused-name-from-rxe_task-struct.patch b/queue-6.1/rdma-rxe-removed-unused-name-from-rxe_task-struct.patch new file mode 100644 index 00000000000..d2fdba8aa18 --- /dev/null +++ b/queue-6.1/rdma-rxe-removed-unused-name-from-rxe_task-struct.patch @@ -0,0 +1,93 @@ +From a957a7777c9dc80a3417f6b88b487b187aa40bd0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Oct 2022 15:01:04 -0500 +Subject: RDMA/rxe: Removed unused name from rxe_task struct + +From: Bob Pearson + +[ Upstream commit de669ae8af49ceed0eed44f5b3d51dc62affc5e4 ] + +The name field in struct rxe_task is never used. This patch removes it. + +Link: https://lore.kernel.org/r/20221021200118.2163-4-rpearsonhpe@gmail.com +Signed-off-by: Ian Ziemba +Signed-off-by: Bob Pearson +Signed-off-by: Jason Gunthorpe +Stable-dep-of: 2a62b6210ce8 ("RDMA/rxe: Fix the use-before-initialization error of resp_pkts") +Signed-off-by: Sasha Levin +--- + drivers/infiniband/sw/rxe/rxe_qp.c | 9 +++------ + drivers/infiniband/sw/rxe/rxe_task.c | 4 +--- + drivers/infiniband/sw/rxe/rxe_task.h | 4 +--- + 3 files changed, 5 insertions(+), 12 deletions(-) + +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c +index 1f6e006c51c4a..f48a624de493d 100644 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c +@@ -242,10 +242,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, + + skb_queue_head_init(&qp->req_pkts); + +- rxe_init_task(&qp->req.task, qp, +- rxe_requester, "req"); +- rxe_init_task(&qp->comp.task, qp, +- rxe_completer, "comp"); ++ rxe_init_task(&qp->req.task, qp, rxe_requester); ++ rxe_init_task(&qp->comp.task, qp, rxe_completer); + + qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ + if (init->qp_type == IB_QPT_RC) { +@@ -292,8 +290,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, + + skb_queue_head_init(&qp->resp_pkts); + +- rxe_init_task(&qp->resp.task, qp, +- rxe_responder, "resp"); ++ rxe_init_task(&qp->resp.task, qp, rxe_responder); + + qp->resp.opcode = OPCODE_NONE; + qp->resp.msn = 0; +diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c +index ec2b7de1c4972..182d0532a8ab9 100644 +--- a/drivers/infiniband/sw/rxe/rxe_task.c ++++ b/drivers/infiniband/sw/rxe/rxe_task.c +@@ -94,12 +94,10 @@ void rxe_do_task(struct tasklet_struct *t) + task->ret = ret; + } + +-int rxe_init_task(struct rxe_task *task, +- void *arg, int (*func)(void *), char *name) ++int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *)) + { + task->arg = arg; + task->func = func; +- snprintf(task->name, sizeof(task->name), "%s", name); + task->destroyed = false; + + tasklet_setup(&task->tasklet, rxe_do_task); +diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h +index 7f612a1c68a7b..b3dfd970d1dc6 100644 +--- a/drivers/infiniband/sw/rxe/rxe_task.h ++++ b/drivers/infiniband/sw/rxe/rxe_task.h +@@ -25,7 +25,6 @@ struct rxe_task { + void *arg; + int (*func)(void *arg); + int ret; +- char name[16]; + bool destroyed; + }; + +@@ -34,8 +33,7 @@ struct rxe_task { + * arg => parameter to pass to fcn + * func => function to call until it returns != 0 + */ +-int rxe_init_task(struct rxe_task *task, +- void *arg, int (*func)(void *), char *name); ++int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *)); + + /* cleanup task */ + void rxe_cleanup_task(struct rxe_task *task); +-- +2.39.2 + diff --git a/queue-6.1/sched-add-new-attr-tca_ext_warn_msg-to-report-tc-ext.patch b/queue-6.1/sched-add-new-attr-tca_ext_warn_msg-to-report-tc-ext.patch new file mode 100644 index 00000000000..e6de3f9acc3 --- /dev/null +++ b/queue-6.1/sched-add-new-attr-tca_ext_warn_msg-to-report-tc-ext.patch @@ -0,0 +1,566 @@ +From 961935f09db4c902b4f4e5de7658cdc93d0ef05e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 13 Jan 2023 11:43:53 +0800 +Subject: sched: add new attr TCA_EXT_WARN_MSG to report tc extact message + +From: Hangbin Liu + +[ Upstream commit 0349b8779cc949ad9e6aced32672ee48cf79b497 ] + +We will report extack message if there is an error via netlink_ack(). But +if the rule is not to be exclusively executed by the hardware, extack is not +passed along and offloading failures don't get logged. + +In commit 81c7288b170a ("sched: cls: enable verbose logging") Marcelo +made cls could log verbose info for offloading failures, which helps +improving Open vSwitch debuggability when using flower offloading. + +It would also be helpful if userspace monitor tools, like "tc monitor", +could log this kind of message, as it doesn't require vswitchd log level +adjusment. Let's add a new tc attributes to report the extack message so +the monitor program could receive the failures. e.g. + + # tc monitor + added chain dev enp3s0f1np1 parent ffff: chain 0 + added filter dev enp3s0f1np1 ingress protocol all pref 49152 flower chain 0 handle 0x1 + ct_state +trk+new + not_in_hw + action order 1: gact action drop + random type none pass val 0 + index 1 ref 1 bind 1 + + Warning: mlx5_core: matching on ct_state +new isn't supported. + +In this patch I only report the extack message on add/del operations. +It doesn't look like we need to report the extack message on get/dump +operations. + +Note this message not only reporte to multicast groups, it could also +be reported unicast, which may affect the current usersapce tool's behaivor. + +Suggested-by: Marcelo Ricardo Leitner +Signed-off-by: Hangbin Liu +Acked-by: Jakub Kicinski +Acked-by: Jamal Hadi Salim +Link: https://lore.kernel.org/r/20230113034353.2766735-1-liuhangbin@gmail.com +Signed-off-by: Paolo Abeni +Stable-dep-of: 84ad0af0bccd ("net/sched: qdisc_destroy() old ingress and clsact Qdiscs before grafting") +Signed-off-by: Sasha Levin +--- + include/uapi/linux/rtnetlink.h | 1 + + net/sched/act_api.c | 15 +++++--- + net/sched/cls_api.c | 62 +++++++++++++++++++++------------- + net/sched/sch_api.c | 55 ++++++++++++++++++------------ + 4 files changed, 84 insertions(+), 49 deletions(-) + +diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h +index eb2747d58a813..25a0af57dd5ed 100644 +--- a/include/uapi/linux/rtnetlink.h ++++ b/include/uapi/linux/rtnetlink.h +@@ -635,6 +635,7 @@ enum { + TCA_INGRESS_BLOCK, + TCA_EGRESS_BLOCK, + TCA_DUMP_FLAGS, ++ TCA_EXT_WARN_MSG, + __TCA_MAX + }; + +diff --git a/net/sched/act_api.c b/net/sched/act_api.c +index 9b31a10cc6399..cc6628a42e839 100644 +--- a/net/sched/act_api.c ++++ b/net/sched/act_api.c +@@ -1581,7 +1581,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, + + static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], + u32 portid, u32 seq, u16 flags, int event, int bind, +- int ref) ++ int ref, struct netlink_ext_ack *extack) + { + struct tcamsg *t; + struct nlmsghdr *nlh; +@@ -1605,7 +1605,12 @@ static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], + + nla_nest_end(skb, nest); + ++ if (extack && extack->_msg && ++ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) ++ goto out_nlmsg_trim; ++ + nlh->nlmsg_len = skb_tail_pointer(skb) - b; ++ + return skb->len; + + out_nlmsg_trim: +@@ -1624,7 +1629,7 @@ tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, + if (!skb) + return -ENOBUFS; + if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, +- 0, 1) <= 0) { ++ 0, 1, NULL) <= 0) { + NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); + kfree_skb(skb); + return -EINVAL; +@@ -1798,7 +1803,7 @@ tcf_reoffload_del_notify(struct net *net, struct tc_action *action) + if (!skb) + return -ENOBUFS; + +- if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) { ++ if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1, NULL) <= 0) { + kfree_skb(skb); + return -EINVAL; + } +@@ -1885,7 +1890,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], + return -ENOBUFS; + + if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, +- 0, 2) <= 0) { ++ 0, 2, extack) <= 0) { + NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); + kfree_skb(skb); + return -EINVAL; +@@ -1964,7 +1969,7 @@ tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], + return -ENOBUFS; + + if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, +- RTM_NEWACTION, 0, 0) <= 0) { ++ RTM_NEWACTION, 0, 0, extack) <= 0) { + NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); + kfree_skb(skb); + return -EINVAL; +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index abaf75300497d..0dbfc37d97991 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -485,7 +485,8 @@ static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, + #endif + + static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, +- u32 seq, u16 flags, int event, bool unicast); ++ u32 seq, u16 flags, int event, bool unicast, ++ struct netlink_ext_ack *extack); + + static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, + u32 chain_index, bool create, +@@ -518,7 +519,7 @@ static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, + */ + if (is_first_reference && !by_act) + tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, +- RTM_NEWCHAIN, false); ++ RTM_NEWCHAIN, false, NULL); + + return chain; + +@@ -1815,7 +1816,8 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb, + struct tcf_proto *tp, struct tcf_block *block, + struct Qdisc *q, u32 parent, void *fh, + u32 portid, u32 seq, u16 flags, int event, +- bool terse_dump, bool rtnl_held) ++ bool terse_dump, bool rtnl_held, ++ struct netlink_ext_ack *extack) + { + struct tcmsg *tcm; + struct nlmsghdr *nlh; +@@ -1855,7 +1857,13 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb, + tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) + goto nla_put_failure; + } ++ ++ if (extack && extack->_msg && ++ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) ++ goto nla_put_failure; ++ + nlh->nlmsg_len = skb_tail_pointer(skb) - b; ++ + return skb->len; + + out_nlmsg_trim: +@@ -1869,7 +1877,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, + struct nlmsghdr *n, struct tcf_proto *tp, + struct tcf_block *block, struct Qdisc *q, + u32 parent, void *fh, int event, bool unicast, +- bool rtnl_held) ++ bool rtnl_held, struct netlink_ext_ack *extack) + { + struct sk_buff *skb; + u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; +@@ -1881,7 +1889,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, + + if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, + n->nlmsg_seq, n->nlmsg_flags, event, +- false, rtnl_held) <= 0) { ++ false, rtnl_held, extack) <= 0) { + kfree_skb(skb); + return -EINVAL; + } +@@ -1910,7 +1918,7 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, + + if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, + n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, +- false, rtnl_held) <= 0) { ++ false, rtnl_held, extack) <= 0) { + NL_SET_ERR_MSG(extack, "Failed to build del event notification"); + kfree_skb(skb); + return -EINVAL; +@@ -1936,14 +1944,15 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, + static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, + struct tcf_block *block, struct Qdisc *q, + u32 parent, struct nlmsghdr *n, +- struct tcf_chain *chain, int event) ++ struct tcf_chain *chain, int event, ++ struct netlink_ext_ack *extack) + { + struct tcf_proto *tp; + + for (tp = tcf_get_next_proto(chain, NULL); + tp; tp = tcf_get_next_proto(chain, tp)) +- tfilter_notify(net, oskb, n, tp, block, +- q, parent, NULL, event, false, true); ++ tfilter_notify(net, oskb, n, tp, block, q, parent, NULL, ++ event, false, true, extack); + } + + static void tfilter_put(struct tcf_proto *tp, void *fh) +@@ -2147,7 +2156,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, + flags, extack); + if (err == 0) { + tfilter_notify(net, skb, n, tp, block, q, parent, fh, +- RTM_NEWTFILTER, false, rtnl_held); ++ RTM_NEWTFILTER, false, rtnl_held, extack); + tfilter_put(tp, fh); + /* q pointer is NULL for shared blocks */ + if (q) +@@ -2275,7 +2284,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, + + if (prio == 0) { + tfilter_notify_chain(net, skb, block, q, parent, n, +- chain, RTM_DELTFILTER); ++ chain, RTM_DELTFILTER, extack); + tcf_chain_flush(chain, rtnl_held); + err = 0; + goto errout; +@@ -2299,7 +2308,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, + + tcf_proto_put(tp, rtnl_held, NULL); + tfilter_notify(net, skb, n, tp, block, q, parent, fh, +- RTM_DELTFILTER, false, rtnl_held); ++ RTM_DELTFILTER, false, rtnl_held, extack); + err = 0; + goto errout; + } +@@ -2443,7 +2452,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, + err = -ENOENT; + } else { + err = tfilter_notify(net, skb, n, tp, block, q, parent, +- fh, RTM_NEWTFILTER, true, rtnl_held); ++ fh, RTM_NEWTFILTER, true, rtnl_held, NULL); + if (err < 0) + NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); + } +@@ -2481,7 +2490,7 @@ static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) + return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, + n, NETLINK_CB(a->cb->skb).portid, + a->cb->nlh->nlmsg_seq, NLM_F_MULTI, +- RTM_NEWTFILTER, a->terse_dump, true); ++ RTM_NEWTFILTER, a->terse_dump, true, NULL); + } + + static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, +@@ -2515,7 +2524,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, + if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, +- RTM_NEWTFILTER, false, true) <= 0) ++ RTM_NEWTFILTER, false, true, NULL) <= 0) + goto errout; + cb->args[1] = 1; + } +@@ -2658,7 +2667,8 @@ static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, + void *tmplt_priv, u32 chain_index, + struct net *net, struct sk_buff *skb, + struct tcf_block *block, +- u32 portid, u32 seq, u16 flags, int event) ++ u32 portid, u32 seq, u16 flags, int event, ++ struct netlink_ext_ack *extack) + { + unsigned char *b = skb_tail_pointer(skb); + const struct tcf_proto_ops *ops; +@@ -2695,7 +2705,12 @@ static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, + goto nla_put_failure; + } + ++ if (extack && extack->_msg && ++ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) ++ goto out_nlmsg_trim; ++ + nlh->nlmsg_len = skb_tail_pointer(skb) - b; ++ + return skb->len; + + out_nlmsg_trim: +@@ -2705,7 +2720,8 @@ static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, + } + + static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, +- u32 seq, u16 flags, int event, bool unicast) ++ u32 seq, u16 flags, int event, bool unicast, ++ struct netlink_ext_ack *extack) + { + u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; + struct tcf_block *block = chain->block; +@@ -2719,7 +2735,7 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, + + if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, + chain->index, net, skb, block, portid, +- seq, flags, event) <= 0) { ++ seq, flags, event, extack) <= 0) { + kfree_skb(skb); + return -EINVAL; + } +@@ -2747,7 +2763,7 @@ static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, + return -ENOBUFS; + + if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, +- block, portid, seq, flags, RTM_DELCHAIN) <= 0) { ++ block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) { + kfree_skb(skb); + return -EINVAL; + } +@@ -2900,11 +2916,11 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, + } + + tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, +- RTM_NEWCHAIN, false); ++ RTM_NEWCHAIN, false, extack); + break; + case RTM_DELCHAIN: + tfilter_notify_chain(net, skb, block, q, parent, n, +- chain, RTM_DELTFILTER); ++ chain, RTM_DELTFILTER, extack); + /* Flush the chain first as the user requested chain removal. */ + tcf_chain_flush(chain, true); + /* In case the chain was successfully deleted, put a reference +@@ -2914,7 +2930,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, + break; + case RTM_GETCHAIN: + err = tc_chain_notify(chain, skb, n->nlmsg_seq, +- n->nlmsg_flags, n->nlmsg_type, true); ++ n->nlmsg_flags, n->nlmsg_type, true, extack); + if (err < 0) + NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); + break; +@@ -3014,7 +3030,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) + chain->index, net, skb, block, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, +- RTM_NEWCHAIN); ++ RTM_NEWCHAIN, NULL); + if (err <= 0) + break; + index++; +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index 6fb345ec22641..3907483dae624 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -907,7 +907,8 @@ static void qdisc_offload_graft_root(struct net_device *dev, + } + + static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, +- u32 portid, u32 seq, u16 flags, int event) ++ u32 portid, u32 seq, u16 flags, int event, ++ struct netlink_ext_ack *extack) + { + struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL; + struct gnet_stats_queue __percpu *cpu_qstats = NULL; +@@ -975,7 +976,12 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, + if (gnet_stats_finish_copy(&d) < 0) + goto nla_put_failure; + ++ if (extack && extack->_msg && ++ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) ++ goto out_nlmsg_trim; ++ + nlh->nlmsg_len = skb_tail_pointer(skb) - b; ++ + return skb->len; + + out_nlmsg_trim: +@@ -996,7 +1002,8 @@ static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible) + + static int qdisc_notify(struct net *net, struct sk_buff *oskb, + struct nlmsghdr *n, u32 clid, +- struct Qdisc *old, struct Qdisc *new) ++ struct Qdisc *old, struct Qdisc *new, ++ struct netlink_ext_ack *extack) + { + struct sk_buff *skb; + u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; +@@ -1007,12 +1014,12 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb, + + if (old && !tc_qdisc_dump_ignore(old, false)) { + if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq, +- 0, RTM_DELQDISC) < 0) ++ 0, RTM_DELQDISC, extack) < 0) + goto err_out; + } + if (new && !tc_qdisc_dump_ignore(new, false)) { + if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq, +- old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) ++ old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0) + goto err_out; + } + +@@ -1027,10 +1034,11 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb, + + static void notify_and_destroy(struct net *net, struct sk_buff *skb, + struct nlmsghdr *n, u32 clid, +- struct Qdisc *old, struct Qdisc *new) ++ struct Qdisc *old, struct Qdisc *new, ++ struct netlink_ext_ack *extack) + { + if (new || old) +- qdisc_notify(net, skb, n, clid, old, new); ++ qdisc_notify(net, skb, n, clid, old, new, extack); + + if (old) + qdisc_put(old); +@@ -1110,12 +1118,12 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, + qdisc_refcount_inc(new); + rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); + +- notify_and_destroy(net, skb, n, classid, old, new); ++ notify_and_destroy(net, skb, n, classid, old, new, extack); + + if (new && new->ops->attach) + new->ops->attach(new); + } else { +- notify_and_destroy(net, skb, n, classid, old, new); ++ notify_and_destroy(net, skb, n, classid, old, new, extack); + } + + if (dev->flags & IFF_UP) +@@ -1146,7 +1154,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, + err = cops->graft(parent, cl, new, &old, extack); + if (err) + return err; +- notify_and_destroy(net, skb, n, classid, old, new); ++ notify_and_destroy(net, skb, n, classid, old, new, extack); + } + return 0; + } +@@ -1519,7 +1527,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + if (err != 0) + return err; + } else { +- qdisc_notify(net, skb, n, clid, NULL, q); ++ qdisc_notify(net, skb, n, clid, NULL, q, NULL); + } + return 0; + } +@@ -1667,7 +1675,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + } + err = qdisc_change(q, tca, extack); + if (err == 0) +- qdisc_notify(net, skb, n, clid, NULL, q); ++ qdisc_notify(net, skb, n, clid, NULL, q, extack); + return err; + + create_n_graft: +@@ -1734,7 +1742,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, + if (!tc_qdisc_dump_ignore(q, dump_invisible) && + tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, +- RTM_NEWQDISC) <= 0) ++ RTM_NEWQDISC, NULL) <= 0) + goto done; + q_idx++; + } +@@ -1756,7 +1764,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, + if (!tc_qdisc_dump_ignore(q, dump_invisible) && + tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, +- RTM_NEWQDISC) <= 0) ++ RTM_NEWQDISC, NULL) <= 0) + goto done; + q_idx++; + } +@@ -1829,8 +1837,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) + ************************************************/ + + static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, +- unsigned long cl, +- u32 portid, u32 seq, u16 flags, int event) ++ unsigned long cl, u32 portid, u32 seq, u16 flags, ++ int event, struct netlink_ext_ack *extack) + { + struct tcmsg *tcm; + struct nlmsghdr *nlh; +@@ -1865,7 +1873,12 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, + if (gnet_stats_finish_copy(&d) < 0) + goto nla_put_failure; + ++ if (extack && extack->_msg && ++ nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) ++ goto out_nlmsg_trim; ++ + nlh->nlmsg_len = skb_tail_pointer(skb) - b; ++ + return skb->len; + + out_nlmsg_trim: +@@ -1876,7 +1889,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, + + static int tclass_notify(struct net *net, struct sk_buff *oskb, + struct nlmsghdr *n, struct Qdisc *q, +- unsigned long cl, int event) ++ unsigned long cl, int event, struct netlink_ext_ack *extack) + { + struct sk_buff *skb; + u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; +@@ -1885,7 +1898,7 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb, + if (!skb) + return -ENOBUFS; + +- if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) { ++ if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) { + kfree_skb(skb); + return -EINVAL; + } +@@ -1912,7 +1925,7 @@ static int tclass_del_notify(struct net *net, + return -ENOBUFS; + + if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, +- RTM_DELTCLASS) < 0) { ++ RTM_DELTCLASS, extack) < 0) { + kfree_skb(skb); + return -EINVAL; + } +@@ -2119,7 +2132,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, + tc_bind_tclass(q, portid, clid, 0); + goto out; + case RTM_GETTCLASS: +- err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); ++ err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS, extack); + goto out; + default: + err = -EINVAL; +@@ -2137,7 +2150,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, + if (cops->change) + err = cops->change(q, clid, portid, tca, &new_cl, extack); + if (err == 0) { +- tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); ++ tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack); + /* We just create a new class, need to do reverse binding. */ + if (cl != new_cl) + tc_bind_tclass(q, portid, clid, new_cl); +@@ -2159,7 +2172,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, + + return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid, + a->cb->nlh->nlmsg_seq, NLM_F_MULTI, +- RTM_NEWTCLASS); ++ RTM_NEWTCLASS, NULL); + } + + static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, +-- +2.39.2 + diff --git a/queue-6.1/sctp-fix-an-error-code-in-sctp_sf_eat_auth.patch b/queue-6.1/sctp-fix-an-error-code-in-sctp_sf_eat_auth.patch new file mode 100644 index 00000000000..5cc5915d2e9 --- /dev/null +++ b/queue-6.1/sctp-fix-an-error-code-in-sctp_sf_eat_auth.patch @@ -0,0 +1,38 @@ +From 95d5abbe4541cb787c71308a7fc82a660d8d20e1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 9 Jun 2023 14:05:19 +0300 +Subject: sctp: fix an error code in sctp_sf_eat_auth() + +From: Dan Carpenter + +[ Upstream commit 75e6def3b26736e7ff80639810098c9074229737 ] + +The sctp_sf_eat_auth() function is supposed to enum sctp_disposition +values and returning a kernel error code will cause issues in the +caller. Change -ENOMEM to SCTP_DISPOSITION_NOMEM. + +Fixes: 65b07e5d0d09 ("[SCTP]: API updates to suport SCTP-AUTH extensions.") +Signed-off-by: Dan Carpenter +Acked-by: Xin Long +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/sctp/sm_statefuns.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index f6ee7f4040c14..5383b6a9da61c 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -4484,7 +4484,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net, + SCTP_AUTH_NEW_KEY, GFP_ATOMIC); + + if (!ev) +- return -ENOMEM; ++ return SCTP_DISPOSITION_NOMEM; + + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ev)); +-- +2.39.2 + diff --git a/queue-6.1/selftests-forwarding-hw_stats_l3-set-addrgenmode-in-.patch b/queue-6.1/selftests-forwarding-hw_stats_l3-set-addrgenmode-in-.patch new file mode 100644 index 00000000000..8590e8b7b6b --- /dev/null +++ b/queue-6.1/selftests-forwarding-hw_stats_l3-set-addrgenmode-in-.patch @@ -0,0 +1,71 @@ +From 5625805e408b4d4d5f70ff61f24e41ea27370e02 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Jun 2023 16:34:58 +0200 +Subject: selftests: forwarding: hw_stats_l3: Set addrgenmode in a separate + step + +From: Danielle Ratson + +[ Upstream commit bef68e201e538eaa3a91f97aae8161eb2d0a8ed7 ] + +Setting the IPv6 address generation mode of a net device during its +creation never worked, but after commit b0ad3c179059 ("rtnetlink: call +validate_linkmsg in rtnl_create_link") it explicitly fails [1]. The +failure is caused by the fact that validate_linkmsg() is called before +the net device is registered, when it still does not have an 'inet6_dev'. + +Likewise, raising the net device before setting the address generation +mode is meaningless, because by the time the mode is set, the address +has already been generated. + +Therefore, fix the test to first create the net device, then set its +IPv6 address generation mode and finally bring it up. + +[1] + # ip link add name mydev addrgenmode eui64 type dummy + RTNETLINK answers: Address family not supported by protocol + +Fixes: ba95e7930957 ("selftests: forwarding: hw_stats_l3: Add a new test") +Signed-off-by: Danielle Ratson +Reviewed-by: Ido Schimmel +Signed-off-by: Petr Machata +Link: https://lore.kernel.org/r/f3b05d85b2bc0c3d6168fe8f7207c6c8365703db.1686580046.git.petrm@nvidia.com +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/net/forwarding/hw_stats_l3.sh | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +diff --git a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh +index 9c1f76e108af1..1a936ffbacee7 100755 +--- a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh ++++ b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh +@@ -84,8 +84,9 @@ h2_destroy() + + router_rp1_200_create() + { +- ip link add name $rp1.200 up \ +- link $rp1 addrgenmode eui64 type vlan id 200 ++ ip link add name $rp1.200 link $rp1 type vlan id 200 ++ ip link set dev $rp1.200 addrgenmode eui64 ++ ip link set dev $rp1.200 up + ip address add dev $rp1.200 192.0.2.2/28 + ip address add dev $rp1.200 2001:db8:1::2/64 + ip stats set dev $rp1.200 l3_stats on +@@ -256,9 +257,11 @@ reapply_config() + + router_rp1_200_destroy + +- ip link add name $rp1.200 link $rp1 addrgenmode none type vlan id 200 ++ ip link add name $rp1.200 link $rp1 type vlan id 200 ++ ip link set dev $rp1.200 addrgenmode none + ip stats set dev $rp1.200 l3_stats on +- ip link set dev $rp1.200 up addrgenmode eui64 ++ ip link set dev $rp1.200 addrgenmode eui64 ++ ip link set dev $rp1.200 up + ip address add dev $rp1.200 192.0.2.2/28 + ip address add dev $rp1.200 2001:db8:1::2/64 + } +-- +2.39.2 + diff --git a/queue-6.1/selftests-ptp-fix-timestamp-printf-format-for-ptp_sy.patch b/queue-6.1/selftests-ptp-fix-timestamp-printf-format-for-ptp_sy.patch new file mode 100644 index 00000000000..339add5e9f1 --- /dev/null +++ b/queue-6.1/selftests-ptp-fix-timestamp-printf-format-for-ptp_sy.patch @@ -0,0 +1,50 @@ +From e9d77e45cd5aba0c7124e5063d13ab6cdd888ce6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 15 Jun 2023 09:34:04 +0100 +Subject: selftests/ptp: Fix timestamp printf format for PTP_SYS_OFFSET + +From: Alex Maftei + +[ Upstream commit 76a4c8b82938bc5020b67663db41f451684bf327 ] + +Previously, timestamps were printed using "%lld.%u" which is incorrect +for nanosecond values lower than 100,000,000 as they're fractional +digits, therefore leading zeros are meaningful. + +This patch changes the format strings to "%lld.%09u" in order to add +leading zeros to the nanosecond value. + +Fixes: 568ebc5985f5 ("ptp: add the PTP_SYS_OFFSET ioctl to the testptp program") +Fixes: 4ec54f95736f ("ptp: Fix compiler warnings in the testptp utility") +Fixes: 6ab0e475f1f3 ("Documentation: fix misc. warnings") +Signed-off-by: Alex Maftei +Acked-by: Richard Cochran +Link: https://lore.kernel.org/r/20230615083404.57112-1-alex.maftei@amd.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/ptp/testptp.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c +index 198ad5f321878..cfa9562f3cd83 100644 +--- a/tools/testing/selftests/ptp/testptp.c ++++ b/tools/testing/selftests/ptp/testptp.c +@@ -502,11 +502,11 @@ int main(int argc, char *argv[]) + interval = t2 - t1; + offset = (t2 + t1) / 2 - tp; + +- printf("system time: %lld.%u\n", ++ printf("system time: %lld.%09u\n", + (pct+2*i)->sec, (pct+2*i)->nsec); +- printf("phc time: %lld.%u\n", ++ printf("phc time: %lld.%09u\n", + (pct+2*i+1)->sec, (pct+2*i+1)->nsec); +- printf("system time: %lld.%u\n", ++ printf("system time: %lld.%09u\n", + (pct+2*i+2)->sec, (pct+2*i+2)->nsec); + printf("system/phc clock time offset is %" PRId64 " ns\n" + "system clock time delay is %" PRId64 " ns\n", +-- +2.39.2 + diff --git a/queue-6.1/selftests-tc-testing-fix-error-failed-to-find-target.patch b/queue-6.1/selftests-tc-testing-fix-error-failed-to-find-target.patch new file mode 100644 index 00000000000..91cfa815675 --- /dev/null +++ b/queue-6.1/selftests-tc-testing-fix-error-failed-to-find-target.patch @@ -0,0 +1,85 @@ +From c47870dfabc7ed5ef2f707c9c8057f9887ab1194 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Jun 2023 09:57:10 +0200 +Subject: selftests/tc-testing: Fix Error: failed to find target LOG + +From: Vlad Buslov + +[ Upstream commit b849c566ee9c6ed78288a522278dcaf419f8e239 ] + +Add missing netfilter config dependency. + +Fixes following example error when running tests via tdc.sh for all XT +tests: + + # $ sudo ./tdc.py -d eth2 -e 2029 + # Test 2029: Add xt action with log-prefix + # exit: 255 + # exit: 0 + # failed to find target LOG + # + # bad action parsing + # parse_action: bad value (7:xt)! + # Illegal "action" + # + # -----> teardown stage *** Could not execute: "$TC actions flush action xt" + # + # -----> teardown stage *** Error message: "Error: Cannot flush unknown TC action. + # We have an error flushing + # " + # returncode 1; expected [0] + # + # -----> teardown stage *** Aborting test run. + # + # <_io.BufferedReader name=3> *** stdout *** + # + # <_io.BufferedReader name=5> *** stderr *** + # "-----> teardown stage" did not complete successfully + # Exception ('teardown', ' failed to find target LOG\n\nbad action parsing\nparse_action: bad value (7:xt)!\nIllegal "action"\n', '"-----> teardown stage" did not complete successfully') (caught in test_runner, running test 2 2029 Add xt action with log-prefix stage teardown) + # --------------- + # traceback + # File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 495, in test_runner + # res = run_one_test(pm, args, index, tidx) + # File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 434, in run_one_test + # prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout) + # File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 245, in prepare_env + # raise PluginMgrTestFail( + # --------------- + # accumulated output for this test: + # failed to find target LOG + # + # bad action parsing + # parse_action: bad value (7:xt)! + # Illegal "action" + # + # --------------- + # + # All test results: + # + # 1..1 + # ok 1 2029 - Add xt action with log-prefix # skipped - "-----> teardown stage" did not complete successfully + +Fixes: 910d504bc187 ("selftests/tc-testings: add selftests for xt action") +Signed-off-by: Vlad Buslov +Reviewed-by: Pedro Tammela +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/tc-testing/config | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config +index 4638c63a339ff..aec4de8bea78b 100644 +--- a/tools/testing/selftests/tc-testing/config ++++ b/tools/testing/selftests/tc-testing/config +@@ -6,6 +6,7 @@ CONFIG_NF_CONNTRACK_MARK=y + CONFIG_NF_CONNTRACK_ZONES=y + CONFIG_NF_CONNTRACK_LABELS=y + CONFIG_NF_NAT=m ++CONFIG_NETFILTER_XT_TARGET_LOG=m + + CONFIG_NET_SCHED=y + +-- +2.39.2 + diff --git a/queue-6.1/selftests-tc-testing-fix-error-specified-qdisc-kind-.patch b/queue-6.1/selftests-tc-testing-fix-error-specified-qdisc-kind-.patch new file mode 100644 index 00000000000..a1e2ce0bdc6 --- /dev/null +++ b/queue-6.1/selftests-tc-testing-fix-error-specified-qdisc-kind-.patch @@ -0,0 +1,78 @@ +From e7bb853a7c7f209997c91715dc845d3be537fcff Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Jun 2023 09:57:09 +0200 +Subject: selftests/tc-testing: Fix Error: Specified qdisc kind is unknown. + +From: Vlad Buslov + +[ Upstream commit aef6e908b54200d04f2d77dab31509fcff2e60ae ] + +All TEQL tests assume that sch_teql module is loaded. Load module in tdc.sh +before running qdisc tests. + +Fixes following example error when running tests via tdc.sh for all TEQL +tests: + + # $ sudo ./tdc.py -d eth2 -e 84a0 + # -- ns/SubPlugin.__init__ + # Test 84a0: Create TEQL with default setting + # exit: 2 + # exit: 0 + # Error: Specified qdisc kind is unknown. + # + # -----> teardown stage *** Could not execute: "$TC qdisc del dev $DUMMY handle 1: root" + # + # -----> teardown stage *** Error message: "Error: Invalid handle. + # " + # returncode 2; expected [0] + # + # -----> teardown stage *** Aborting test run. + # + # <_io.BufferedReader name=3> *** stdout *** + # + # <_io.BufferedReader name=5> *** stderr *** + # "-----> teardown stage" did not complete successfully + # Exception ('teardown', 'Error: Specified qdisc kind is unknown.\n', '"-----> teardown stage" did not complete successfully') (caught in test_runner, running test 2 84a0 Create TEQL with default setting stage teardown) + # --------------- + # traceback + # File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 495, in test_runner + # res = run_one_test(pm, args, index, tidx) + # File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 434, in run_one_test + # prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout) + # File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 245, in prepare_env + # raise PluginMgrTestFail( + # --------------- + # accumulated output for this test: + # Error: Specified qdisc kind is unknown. + # + # --------------- + # + # All test results: + # + # 1..1 + # ok 1 84a0 - Create TEQL with default setting # skipped - "-----> teardown stage" did not complete successfully + +Fixes: cc62fbe114c9 ("selftests/tc-testing: add selftests for teql qdisc") +Signed-off-by: Vlad Buslov +Reviewed-by: Victor Nogueira +Reviewed-by: Pedro Tammela +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/tc-testing/tdc.sh | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh +index afb0cd86fa3df..eb357bd7923c0 100755 +--- a/tools/testing/selftests/tc-testing/tdc.sh ++++ b/tools/testing/selftests/tc-testing/tdc.sh +@@ -2,5 +2,6 @@ + # SPDX-License-Identifier: GPL-2.0 + + modprobe netdevsim ++modprobe sch_teql + ./tdc.py -c actions --nobuildebpf + ./tdc.py -c qdisc +-- +2.39.2 + diff --git a/queue-6.1/selftests-tc-testing-fix-sfb-db-test.patch b/queue-6.1/selftests-tc-testing-fix-sfb-db-test.patch new file mode 100644 index 00000000000..d224992e1ad --- /dev/null +++ b/queue-6.1/selftests-tc-testing-fix-sfb-db-test.patch @@ -0,0 +1,56 @@ +From d67a68be93b476883accb04841369f8e4b530aa2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Jun 2023 09:57:11 +0200 +Subject: selftests/tc-testing: Fix SFB db test + +From: Vlad Buslov + +[ Upstream commit b39d8c41c7a8336ce85c376b5d4906089524a0ae ] + +Setting very small value of db like 10ms introduces rounding errors when +converting to/from jiffies on some kernel configs. For example, on 250hz +the actual value will be set to 12ms which causes the test to fail: + + # $ sudo ./tdc.py -d eth2 -e 3410 + # -- ns/SubPlugin.__init__ + # Test 3410: Create SFB with db setting + # + # All test results: + # + # 1..1 + # not ok 1 3410 - Create SFB with db setting + # Could not match regex pattern. Verify command output: + # qdisc sfb 1: root refcnt 2 rehash 600s db 12ms limit 1000p max 25p target 20p increment 0.000503548 decrement 4.57771e-05 penalty_rate 10pps penalty_burst 20p + +Set the value to 100ms instead which currently seem to work on 100hz, +250hz, 300hz and 1000hz kernel configs. + +Fixes: 6ad92dc56fca ("selftests/tc-testing: add selftests for sfb qdisc") +Signed-off-by: Vlad Buslov +Reviewed-by: Pedro Tammela +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json +index ba2f5e79cdbfe..e21c7f22c6d4c 100644 +--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json ++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json +@@ -58,10 +58,10 @@ + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true" + ], +- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb db 10", ++ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb db 100", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $DUMMY", +- "matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 600s db 10ms", ++ "matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 600s db 100ms", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1: root", +-- +2.39.2 + diff --git a/queue-6.1/series b/queue-6.1/series index f9665a6bf36..857abd278df 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -87,3 +87,68 @@ usb-gadget-udc-core-prevent-soft_connect_store-race.patch usb-dwc3-qcom-fix-null-deref-on-suspend.patch usb-dwc3-fix-use-after-free-on-core-driver-unbind.patch usb-dwc3-gadget-reset-num-trbs-before-giving-back-the-request.patch +rdma-rtrs-fix-the-last-iu-buf-leak-in-err-path.patch +rdma-rtrs-fix-rxe_dealloc_pd-warning.patch +rdma-rxe-fix-packet-length-checks.patch +rdma-rxe-fix-ref-count-error-in-check_rkey.patch +spi-cadence-quadspi-add-missing-check-for-dma_set_ma.patch +spi-fsl-dspi-avoid-sck-glitches-with-continuous-tran.patch +netfilter-nf_tables-integrate-pipapo-into-commit-pro.patch +netfilter-nfnetlink-skip-error-delivery-on-batch-in-.patch +ice-fix-xdp-memory-leak-when-nic-is-brought-up-and-d.patch +netfilter-nf_tables-incorrect-error-path-handling-wi.patch +net-enetc-correct-the-indexes-of-highest-and-2nd-hig.patch +ping6-fix-send-to-link-local-addresses-with-vrf.patch +igb-fix-extts-capture-value-format-for-82580-i354-i3.patch +net-sched-simplify-tcf_pedit_act.patch +net-sched-act_pedit-remove-extra-check-for-key-type.patch +net-sched-act_pedit-parse-l3-header-for-l4-offset.patch +octeontx2-af-fix-promiscuous-mode.patch +net-sched-cls_u32-fix-reference-counter-leak-leading.patch +wifi-mac80211-fix-link-activation-settings-order.patch +wifi-cfg80211-fix-link-del-callback-to-call-correct-.patch +wifi-mac80211-take-lock-before-setting-vif-links.patch +rdma-rxe-removed-unused-name-from-rxe_task-struct.patch +rdma-rxe-fix-the-use-before-initialization-error-of-.patch +iavf-remove-mask-from-iavf_irq_enable_queues.patch +octeontx2-af-fixed-resource-availability-check.patch +octeontx2-af-fix-lbk-link-credits-on-cn10k.patch +rdma-mlx5-initiate-dropless-rq-for-raw-ethernet-func.patch +rdma-mlx5-create-an-indirect-flow-table-for-steering.patch +rdma-cma-always-set-static-rate-to-0-for-roce.patch +ib-uverbs-fix-to-consider-event-queue-closing-also-u.patch +rdma-mlx5-fix-affinity-assignment.patch +ib-isert-fix-dead-lock-in-ib_isert.patch +ib-isert-fix-possible-list-corruption-in-cma-handler.patch +ib-isert-fix-incorrect-release-of-isert-connection.patch +net-ethtool-correct-max-attribute-value-for-stats.patch +ipvlan-fix-bound-dev-checking-for-ipv6-l3s-mode.patch +sctp-fix-an-error-code-in-sctp_sf_eat_auth.patch +igc-clean-the-tx-buffer-and-tx-descriptor-ring.patch +igc-fix-possible-system-crash-when-loading-module.patch +igb-fix-nvm.ops.read-error-handling.patch +net-phylink-report-correct-max-speed-for-qusgmii.patch +net-phylink-use-a-dedicated-helper-to-parse-usgmii-c.patch +drm-nouveau-don-t-detect-dsm-for-non-nvidia-device.patch +drm-bridge-ti-sn65dsi86-avoid-possible-buffer-overfl.patch +drm-nouveau-dp-check-for-null-nv_connector-native_mo.patch +drm-nouveau-add-nv_encoder-pointer-check-for-null.patch +selftests-tc-testing-fix-error-specified-qdisc-kind-.patch +selftests-tc-testing-fix-error-failed-to-find-target.patch +selftests-tc-testing-fix-sfb-db-test.patch +sched-add-new-attr-tca_ext_warn_msg-to-report-tc-ext.patch +net-sched-refactor-qdisc_graft-for-ingress-and-clsac.patch +net-sched-qdisc_destroy-old-ingress-and-clsact-qdisc.patch +selftests-forwarding-hw_stats_l3-set-addrgenmode-in-.patch +cifs-fix-lease-break-oops-in-xfstest-generic-098.patch +ext4-drop-the-call-to-ext4_error-from-ext4_get_group.patch +net-sched-cls_api-fix-lockup-on-flushing-explicitly-.patch +net-dsa-felix-fix-taprio-guard-band-overflow-at-10mb.patch +net-lapbether-only-support-ethernet-devices.patch +net-macsec-fix-double-free-of-percpu-stats.patch +sfc-fix-xdp-queues-mode-with-legacy-irq.patch +dm-don-t-lock-fs-when-the-map-is-null-during-suspend.patch +net-tipc-resize-nlattr-array-to-correct-size.patch +selftests-ptp-fix-timestamp-printf-format-for-ptp_sy.patch +octeon_ep-add-missing-check-for-ioremap.patch +afs-fix-vlserver-probe-rtt-handling.patch diff --git a/queue-6.1/sfc-fix-xdp-queues-mode-with-legacy-irq.patch b/queue-6.1/sfc-fix-xdp-queues-mode-with-legacy-irq.patch new file mode 100644 index 00000000000..46a2562f591 --- /dev/null +++ b/queue-6.1/sfc-fix-xdp-queues-mode-with-legacy-irq.patch @@ -0,0 +1,90 @@ +From 2e9356af4863bff560b16befe2cecdaab60bca72 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 13 Jun 2023 15:38:54 +0200 +Subject: sfc: fix XDP queues mode with legacy IRQ +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Íñigo Huguet + +[ Upstream commit e84a1e1e683f3558e30f437d7c99df35afb8b52c ] + +In systems without MSI-X capabilities, xdp_txq_queues_mode is calculated +in efx_allocate_msix_channels, but when enabling MSI-X fails, it was not +changed to a proper default value. This was leading to the driver +thinking that it has dedicated XDP queues, when it didn't. + +Fix it by setting xdp_txq_queues_mode to the correct value if the driver +fallbacks to MSI or legacy IRQ mode. The correct value is +EFX_XDP_TX_QUEUES_BORROWED because there are no XDP dedicated queues. + +The issue can be easily visible if the kernel is started with pci=nomsi, +then a call trace is shown. It is not shown only with sfc's modparam +interrupt_mode=2. Call trace example: + WARNING: CPU: 2 PID: 663 at drivers/net/ethernet/sfc/efx_channels.c:828 efx_set_xdp_channels+0x124/0x260 [sfc] + [...skip...] + Call Trace: + + efx_set_channels+0x5c/0xc0 [sfc] + efx_probe_nic+0x9b/0x15a [sfc] + efx_probe_all+0x10/0x1a2 [sfc] + efx_pci_probe_main+0x12/0x156 [sfc] + efx_pci_probe_post_io+0x18/0x103 [sfc] + efx_pci_probe.cold+0x154/0x257 [sfc] + local_pci_probe+0x42/0x80 + +Fixes: 6215b608a8c4 ("sfc: last resort fallback for lack of xdp tx queues") +Reported-by: Yanghang Liu +Signed-off-by: Íñigo Huguet +Acked-by: Martin Habets +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/sfc/efx_channels.c | 2 ++ + drivers/net/ethernet/sfc/siena/efx_channels.c | 2 ++ + 2 files changed, 4 insertions(+) + +diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c +index aaa381743bca3..27d00ffac68f4 100644 +--- a/drivers/net/ethernet/sfc/efx_channels.c ++++ b/drivers/net/ethernet/sfc/efx_channels.c +@@ -301,6 +301,7 @@ int efx_probe_interrupts(struct efx_nic *efx) + efx->tx_channel_offset = 0; + efx->n_xdp_channels = 0; + efx->xdp_channel_offset = efx->n_channels; ++ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + rc = pci_enable_msi(efx->pci_dev); + if (rc == 0) { + efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; +@@ -322,6 +323,7 @@ int efx_probe_interrupts(struct efx_nic *efx) + efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0; + efx->n_xdp_channels = 0; + efx->xdp_channel_offset = efx->n_channels; ++ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + efx->legacy_irq = efx->pci_dev->irq; + } + +diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c +index 06ed74994e366..1776f7f8a7a90 100644 +--- a/drivers/net/ethernet/sfc/siena/efx_channels.c ++++ b/drivers/net/ethernet/sfc/siena/efx_channels.c +@@ -302,6 +302,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) + efx->tx_channel_offset = 0; + efx->n_xdp_channels = 0; + efx->xdp_channel_offset = efx->n_channels; ++ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + rc = pci_enable_msi(efx->pci_dev); + if (rc == 0) { + efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; +@@ -323,6 +324,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) + efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0; + efx->n_xdp_channels = 0; + efx->xdp_channel_offset = efx->n_channels; ++ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + efx->legacy_irq = efx->pci_dev->irq; + } + +-- +2.39.2 + diff --git a/queue-6.1/spi-cadence-quadspi-add-missing-check-for-dma_set_ma.patch b/queue-6.1/spi-cadence-quadspi-add-missing-check-for-dma_set_ma.patch new file mode 100644 index 00000000000..ef17ad72ef7 --- /dev/null +++ b/queue-6.1/spi-cadence-quadspi-add-missing-check-for-dma_set_ma.patch @@ -0,0 +1,41 @@ +From 2fcf78c70003317834896cb3ba7885d43f0567c8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 17:38:59 +0800 +Subject: spi: cadence-quadspi: Add missing check for dma_set_mask + +From: Jiasheng Jiang + +[ Upstream commit 947c70a213769f60e9d5aca2bc88b50a1cfaf5a6 ] + +Add check for dma_set_mask() and return the error if it fails. + +Fixes: 1a6f854f7daa ("spi: cadence-quadspi: Add Xilinx Versal external DMA support") +Signed-off-by: Jiasheng Jiang +Link: https://lore.kernel.org/r/20230606093859.27818-1-jiasheng@iscas.ac.cn +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + drivers/spi/spi-cadence-quadspi.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c +index 30fd4bc90580e..b371e4eb41ec3 100644 +--- a/drivers/spi/spi-cadence-quadspi.c ++++ b/drivers/spi/spi-cadence-quadspi.c +@@ -1697,8 +1697,11 @@ static int cqspi_probe(struct platform_device *pdev) + cqspi->slow_sram = true; + + if (of_device_is_compatible(pdev->dev.of_node, +- "xlnx,versal-ospi-1.0")) +- dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); ++ "xlnx,versal-ospi-1.0")) { ++ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); ++ if (ret) ++ goto probe_reset_failed; ++ } + } + + ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, +-- +2.39.2 + diff --git a/queue-6.1/spi-fsl-dspi-avoid-sck-glitches-with-continuous-tran.patch b/queue-6.1/spi-fsl-dspi-avoid-sck-glitches-with-continuous-tran.patch new file mode 100644 index 00000000000..f4bfea34fb1 --- /dev/null +++ b/queue-6.1/spi-fsl-dspi-avoid-sck-glitches-with-continuous-tran.patch @@ -0,0 +1,95 @@ +From c7b54c57270b95df8a01f4e904333acba7f26d13 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 30 May 2023 01:34:02 +0300 +Subject: spi: fsl-dspi: avoid SCK glitches with continuous transfers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Vladimir Oltean + +[ Upstream commit c5c31fb71f16ba75bad4ade208abbae225305b65 ] + +The DSPI controller has configurable timing for + +(a) tCSC: the interval between the assertion of the chip select and the + first clock edge + +(b) tASC: the interval between the last clock edge and the deassertion + of the chip select + +What is a bit surprising, but is documented in the figure "Example of +continuous transfer (CPHA=1, CONT=1)" in the datasheet, is that when the +chip select stays asserted between multiple TX FIFO writes, the tCSC and +tASC times still apply. With CONT=1, chip select remains asserted, but +SCK takes a break and goes to the idle state for tASC + tCSC ns. + +In other words, the default values (of 0 and 0 ns) result in SCK +glitches where the SCK transition to the idle state, as well as the SCK +transition from the idle state, will have no delay in between, and it +may appear that a SCK cycle has simply gone missing. The resulting +timing violation might cause data corruption in many peripherals, as +their chip select is asserted. + +The driver has device tree bindings for tCSC ("fsl,spi-cs-sck-delay") +and tASC ("fsl,spi-sck-cs-delay"), but these are only specified to apply +when the chip select toggles in the first place, and this timing +characteristic depends on each peripheral. Many peripherals do not have +explicit timing requirements, so many device trees do not have these +properties present at all. + +Nonetheless, the lack of SCK glitches is a common sense requirement, and +since the SCK stays in the idle state during transfers for tCSC+tASC ns, +and that in itself should look like half a cycle, then let's ensure that +tCSC and tASC are at least a quarter of a SCK period, such that their +sum is at least half of one. + +Fixes: 95bf15f38641 ("spi: fsl-dspi: Add ~50ns delay between cs and sck") +Reported-by: Lisa Chen (陈敏捷) +Debugged-by: Lisa Chen (陈敏捷) +Tested-by: Lisa Chen (陈敏捷) +Signed-off-by: Vladimir Oltean +Link: https://lore.kernel.org/r/20230529223402.1199503-1-vladimir.oltean@nxp.com +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + drivers/spi/spi-fsl-dspi.c | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c +index a33e547b7d395..01930b52c4fb8 100644 +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -975,7 +975,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, + static int dspi_setup(struct spi_device *spi) + { + struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller); ++ u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz); + unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0; ++ u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4); + u32 cs_sck_delay = 0, sck_cs_delay = 0; + struct fsl_dspi_platform_data *pdata; + unsigned char pasc = 0, asc = 0; +@@ -1003,6 +1005,19 @@ static int dspi_setup(struct spi_device *spi) + sck_cs_delay = pdata->sck_cs_delay; + } + ++ /* Since tCSC and tASC apply to continuous transfers too, avoid SCK ++ * glitches of half a cycle by never allowing tCSC + tASC to go below ++ * half a SCK period. ++ */ ++ if (cs_sck_delay < quarter_period_ns) ++ cs_sck_delay = quarter_period_ns; ++ if (sck_cs_delay < quarter_period_ns) ++ sck_cs_delay = quarter_period_ns; ++ ++ dev_dbg(&spi->dev, ++ "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n", ++ cs_sck_delay, sck_cs_delay); ++ + clkrate = clk_get_rate(dspi->clk); + hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate); + +-- +2.39.2 + diff --git a/queue-6.1/wifi-cfg80211-fix-link-del-callback-to-call-correct-.patch b/queue-6.1/wifi-cfg80211-fix-link-del-callback-to-call-correct-.patch new file mode 100644 index 00000000000..cfcfffb9bef --- /dev/null +++ b/queue-6.1/wifi-cfg80211-fix-link-del-callback-to-call-correct-.patch @@ -0,0 +1,50 @@ +From 003cec0d5f07b9c4a1c80e521e27c9f0dd4f970c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 16:36:01 +0300 +Subject: wifi: cfg80211: fix link del callback to call correct handler + +From: Benjamin Berg + +[ Upstream commit 1ff56684fa8682bdfbbce4e12cf67ab23cb1db05 ] + +The wrapper function was incorrectly calling the add handler instead of +the del handler. This had no negative side effect as the default +handlers are essentially identical. + +Fixes: f2a0290b2df2 ("wifi: cfg80211: add optional link add/remove callbacks") +Signed-off-by: Benjamin Berg +Signed-off-by: Gregory Greenman +Link: https://lore.kernel.org/r/20230608163202.ebd00e000459.Iaff7dc8d1cdecf77f53ea47a0e5080caa36ea02a@changeid +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/wireless/rdev-ops.h | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h +index 13b209a8db287..ee853a14a02de 100644 +--- a/net/wireless/rdev-ops.h ++++ b/net/wireless/rdev-ops.h +@@ -2,7 +2,7 @@ + /* + * Portions of this file + * Copyright(c) 2016-2017 Intel Deutschland GmbH +- * Copyright (C) 2018, 2021-2022 Intel Corporation ++ * Copyright (C) 2018, 2021-2023 Intel Corporation + */ + #ifndef __CFG80211_RDEV_OPS + #define __CFG80211_RDEV_OPS +@@ -1441,8 +1441,8 @@ rdev_del_intf_link(struct cfg80211_registered_device *rdev, + unsigned int link_id) + { + trace_rdev_del_intf_link(&rdev->wiphy, wdev, link_id); +- if (rdev->ops->add_intf_link) +- rdev->ops->add_intf_link(&rdev->wiphy, wdev, link_id); ++ if (rdev->ops->del_intf_link) ++ rdev->ops->del_intf_link(&rdev->wiphy, wdev, link_id); + trace_rdev_return_void(&rdev->wiphy); + } + +-- +2.39.2 + diff --git a/queue-6.1/wifi-mac80211-fix-link-activation-settings-order.patch b/queue-6.1/wifi-mac80211-fix-link-activation-settings-order.patch new file mode 100644 index 00000000000..df291e93dfc --- /dev/null +++ b/queue-6.1/wifi-mac80211-fix-link-activation-settings-order.patch @@ -0,0 +1,60 @@ +From 229b04b731054a195be371962e27afc7a3f7fd90 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 16:35:59 +0300 +Subject: wifi: mac80211: fix link activation settings order + +From: Johannes Berg + +[ Upstream commit 01605ad6c3e8608d7e147c9b75d67eb8a3d27d88 ] + +In the normal MLME code we always call +ieee80211_mgd_set_link_qos_params() before +ieee80211_link_info_change_notify() and some drivers, +notably iwlwifi, rely on that as they don't do anything +(but store the data) in their conf_tx. + +Fix the order here to be the same as in the normal code +paths, so this isn't broken. + +Fixes: 3d9011029227 ("wifi: mac80211: implement link switching") +Signed-off-by: Johannes Berg +Signed-off-by: Gregory Greenman +Link: https://lore.kernel.org/r/20230608163202.a2a86bba2f80.Iac97e04827966d22161e63bb6e201b4061e9651b@changeid +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/mac80211/link.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/net/mac80211/link.c b/net/mac80211/link.c +index a1b3031fefce2..a85b44c1bc995 100644 +--- a/net/mac80211/link.c ++++ b/net/mac80211/link.c +@@ -2,7 +2,7 @@ + /* + * MLO link handling + * +- * Copyright (C) 2022 Intel Corporation ++ * Copyright (C) 2022-2023 Intel Corporation + */ + #include + #include +@@ -387,6 +387,7 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata, + IEEE80211_CHANCTX_SHARED); + WARN_ON_ONCE(ret); + ++ ieee80211_mgd_set_link_qos_params(link); + ieee80211_link_info_change_notify(sdata, link, + BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE | +@@ -401,7 +402,6 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata, + BSS_CHANGED_TWT | + BSS_CHANGED_HE_OBSS_PD | + BSS_CHANGED_HE_BSS_COLOR); +- ieee80211_mgd_set_link_qos_params(link); + } + + old_active = sdata->vif.active_links; +-- +2.39.2 + diff --git a/queue-6.1/wifi-mac80211-take-lock-before-setting-vif-links.patch b/queue-6.1/wifi-mac80211-take-lock-before-setting-vif-links.patch new file mode 100644 index 00000000000..c7772ade3dd --- /dev/null +++ b/queue-6.1/wifi-mac80211-take-lock-before-setting-vif-links.patch @@ -0,0 +1,61 @@ +From b9464162c96d2113d9fc975fbcf643d97fbd5be7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 Jun 2023 16:36:02 +0300 +Subject: wifi: mac80211: take lock before setting vif links + +From: Benjamin Berg + +[ Upstream commit 15846f95ab01b71fdb1cef8df73680aad41edf70 ] + +ieee80211_vif_set_links requires the sdata->local->mtx lock to be held. +Add the appropriate locking around the calls in both the link add and +remove handlers. + +This causes a warning when e.g. ieee80211_link_release_channel is called +via ieee80211_link_stop from ieee80211_vif_update_links. + +Fixes: 0d8c4a3c8688 ("wifi: mac80211: implement add/del interface link callbacks") +Signed-off-by: Benjamin Berg +Signed-off-by: Gregory Greenman +Link: https://lore.kernel.org/r/20230608163202.fa0c6597fdad.I83dd70359f6cda30f86df8418d929c2064cf4995@changeid +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/mac80211/cfg.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 06b9df2fbcd77..23a44edcb11f7 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -4732,11 +4732,16 @@ static int ieee80211_add_intf_link(struct wiphy *wiphy, + unsigned int link_id) + { + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); ++ int res; + + if (wdev->use_4addr) + return -EOPNOTSUPP; + +- return ieee80211_vif_set_links(sdata, wdev->valid_links); ++ mutex_lock(&sdata->local->mtx); ++ res = ieee80211_vif_set_links(sdata, wdev->valid_links); ++ mutex_unlock(&sdata->local->mtx); ++ ++ return res; + } + + static void ieee80211_del_intf_link(struct wiphy *wiphy, +@@ -4745,7 +4750,9 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy, + { + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + ++ mutex_lock(&sdata->local->mtx); + ieee80211_vif_set_links(sdata, wdev->valid_links); ++ mutex_unlock(&sdata->local->mtx); + } + + static int sta_add_link_station(struct ieee80211_local *local, +-- +2.39.2 +