]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.3
authorSasha Levin <sashal@kernel.org>
Sun, 18 Jun 2023 14:10:47 +0000 (10:10 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 18 Jun 2023 14:10:47 +0000 (10:10 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
76 files changed:
queue-6.3/afs-fix-vlserver-probe-rtt-handling.patch [new file with mode: 0644]
queue-6.3/cifs-fix-lease-break-oops-in-xfstest-generic-098.patch [new file with mode: 0644]
queue-6.3/dm-don-t-lock-fs-when-the-map-is-null-during-suspend.patch [new file with mode: 0644]
queue-6.3/drm-bridge-ti-sn65dsi86-avoid-possible-buffer-overfl.patch [new file with mode: 0644]
queue-6.3/drm-nouveau-add-nv_encoder-pointer-check-for-null.patch [new file with mode: 0644]
queue-6.3/drm-nouveau-don-t-detect-dsm-for-non-nvidia-device.patch [new file with mode: 0644]
queue-6.3/drm-nouveau-dp-check-for-null-nv_connector-native_mo.patch [new file with mode: 0644]
queue-6.3/ext4-drop-the-call-to-ext4_error-from-ext4_get_group.patch [new file with mode: 0644]
queue-6.3/iavf-remove-mask-from-iavf_irq_enable_queues.patch [new file with mode: 0644]
queue-6.3/ib-isert-fix-dead-lock-in-ib_isert.patch [new file with mode: 0644]
queue-6.3/ib-isert-fix-incorrect-release-of-isert-connection.patch [new file with mode: 0644]
queue-6.3/ib-isert-fix-possible-list-corruption-in-cma-handler.patch [new file with mode: 0644]
queue-6.3/ib-uverbs-fix-to-consider-event-queue-closing-also-u.patch [new file with mode: 0644]
queue-6.3/ice-do-not-busy-wait-to-read-gnss-data.patch [new file with mode: 0644]
queue-6.3/ice-don-t-dereference-null-in-ice_gnss_read-error-pa.patch [new file with mode: 0644]
queue-6.3/ice-fix-ice-module-unload.patch [new file with mode: 0644]
queue-6.3/ice-fix-xdp-memory-leak-when-nic-is-brought-up-and-d.patch [new file with mode: 0644]
queue-6.3/igb-fix-extts-capture-value-format-for-82580-i354-i3.patch [new file with mode: 0644]
queue-6.3/igb-fix-nvm.ops.read-error-handling.patch [new file with mode: 0644]
queue-6.3/igc-clean-the-tx-buffer-and-tx-descriptor-ring.patch [new file with mode: 0644]
queue-6.3/igc-fix-possible-system-crash-when-loading-module.patch [new file with mode: 0644]
queue-6.3/ipvlan-fix-bound-dev-checking-for-ipv6-l3s-mode.patch [new file with mode: 0644]
queue-6.3/net-dsa-felix-fix-taprio-guard-band-overflow-at-10mb.patch [new file with mode: 0644]
queue-6.3/net-enetc-correct-the-indexes-of-highest-and-2nd-hig.patch [new file with mode: 0644]
queue-6.3/net-ethernet-ti-am65-cpsw-call-of_node_put-on-error-.patch [new file with mode: 0644]
queue-6.3/net-ethtool-correct-max-attribute-value-for-stats.patch [new file with mode: 0644]
queue-6.3/net-lapbether-only-support-ethernet-devices.patch [new file with mode: 0644]
queue-6.3/net-macsec-fix-double-free-of-percpu-stats.patch [new file with mode: 0644]
queue-6.3/net-phylink-report-correct-max-speed-for-qusgmii.patch [new file with mode: 0644]
queue-6.3/net-phylink-use-a-dedicated-helper-to-parse-usgmii-c.patch [new file with mode: 0644]
queue-6.3/net-renesas-rswitch-fix-timestamp-feature-after-all-.patch [new file with mode: 0644]
queue-6.3/net-sched-act_ct-fix-promotion-of-offloaded-unreplie.patch [new file with mode: 0644]
queue-6.3/net-sched-act_pedit-parse-l3-header-for-l4-offset.patch [new file with mode: 0644]
queue-6.3/net-sched-act_pedit-remove-extra-check-for-key-type.patch [new file with mode: 0644]
queue-6.3/net-sched-cls_api-fix-lockup-on-flushing-explicitly-.patch [new file with mode: 0644]
queue-6.3/net-sched-cls_u32-fix-reference-counter-leak-leading.patch [new file with mode: 0644]
queue-6.3/net-sched-qdisc_destroy-old-ingress-and-clsact-qdisc.patch [new file with mode: 0644]
queue-6.3/net-sched-refactor-qdisc_graft-for-ingress-and-clsac.patch [new file with mode: 0644]
queue-6.3/net-sched-taprio-fix-slab-out-of-bounds-read-in-tapr.patch [new file with mode: 0644]
queue-6.3/net-tipc-resize-nlattr-array-to-correct-size.patch [new file with mode: 0644]
queue-6.3/netfilter-nf_tables-incorrect-error-path-handling-wi.patch [new file with mode: 0644]
queue-6.3/netfilter-nf_tables-integrate-pipapo-into-commit-pro.patch [new file with mode: 0644]
queue-6.3/netfilter-nfnetlink-skip-error-delivery-on-batch-in-.patch [new file with mode: 0644]
queue-6.3/octeon_ep-add-missing-check-for-ioremap.patch [new file with mode: 0644]
queue-6.3/octeontx2-af-fix-lbk-link-credits-on-cn10k.patch [new file with mode: 0644]
queue-6.3/octeontx2-af-fix-promiscuous-mode.patch [new file with mode: 0644]
queue-6.3/octeontx2-af-fixed-resource-availability-check.patch [new file with mode: 0644]
queue-6.3/ping6-fix-send-to-link-local-addresses-with-vrf.patch [new file with mode: 0644]
queue-6.3/rdma-bnxt_re-fix-reporting-active_-speed-width-attri.patch [new file with mode: 0644]
queue-6.3/rdma-cma-always-set-static-rate-to-0-for-roce.patch [new file with mode: 0644]
queue-6.3/rdma-mlx5-create-an-indirect-flow-table-for-steering.patch [new file with mode: 0644]
queue-6.3/rdma-mlx5-fix-affinity-assignment.patch [new file with mode: 0644]
queue-6.3/rdma-mlx5-initiate-dropless-rq-for-raw-ethernet-func.patch [new file with mode: 0644]
queue-6.3/rdma-rtrs-fix-rxe_dealloc_pd-warning.patch [new file with mode: 0644]
queue-6.3/rdma-rtrs-fix-the-last-iu-buf-leak-in-err-path.patch [new file with mode: 0644]
queue-6.3/rdma-rxe-fix-packet-length-checks.patch [new file with mode: 0644]
queue-6.3/rdma-rxe-fix-ref-count-error-in-check_rkey.patch [new file with mode: 0644]
queue-6.3/rdma-rxe-fix-rxe_cq_post.patch [new file with mode: 0644]
queue-6.3/rdma-rxe-fix-the-use-before-initialization-error-of-.patch [new file with mode: 0644]
queue-6.3/regulator-qcom-rpmh-add-support-for-pmm8654au-regula.patch [new file with mode: 0644]
queue-6.3/regulator-qcom-rpmh-fix-regulators-for-pm8550.patch [new file with mode: 0644]
queue-6.3/revert-media-dvb-core-fix-use-after-free-on-race-con.patch [new file with mode: 0644]
queue-6.3/sctp-fix-an-error-code-in-sctp_sf_eat_auth.patch [new file with mode: 0644]
queue-6.3/selftests-forwarding-hw_stats_l3-set-addrgenmode-in-.patch [new file with mode: 0644]
queue-6.3/selftests-ptp-fix-timestamp-printf-format-for-ptp_sy.patch [new file with mode: 0644]
queue-6.3/selftests-tc-testing-fix-error-failed-to-find-target.patch [new file with mode: 0644]
queue-6.3/selftests-tc-testing-fix-error-specified-qdisc-kind-.patch [new file with mode: 0644]
queue-6.3/selftests-tc-testing-fix-sfb-db-test.patch [new file with mode: 0644]
queue-6.3/series
queue-6.3/sfc-fix-xdp-queues-mode-with-legacy-irq.patch [new file with mode: 0644]
queue-6.3/spi-cadence-quadspi-add-missing-check-for-dma_set_ma.patch [new file with mode: 0644]
queue-6.3/spi-fsl-dspi-avoid-sck-glitches-with-continuous-tran.patch [new file with mode: 0644]
queue-6.3/wifi-cfg80211-fix-link-del-callback-to-call-correct-.patch [new file with mode: 0644]
queue-6.3/wifi-mac80211-fix-link-activation-settings-order.patch [new file with mode: 0644]
queue-6.3/wifi-mac80211-fragment-per-sta-profile-correctly.patch [new file with mode: 0644]
queue-6.3/wifi-mac80211-take-lock-before-setting-vif-links.patch [new file with mode: 0644]

diff --git a/queue-6.3/afs-fix-vlserver-probe-rtt-handling.patch b/queue-6.3/afs-fix-vlserver-probe-rtt-handling.patch
new file mode 100644 (file)
index 0000000..2c2b985
--- /dev/null
@@ -0,0 +1,48 @@
+From bb5f85f56eb0ef94f3eb133fd11856b5406c1356 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Jun 2023 22:39:39 +0100
+Subject: afs: Fix vlserver probe RTT handling
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit ba00b190670809c1a89326d80de96d714f6004f2 ]
+
+In the same spirit as commit ca57f02295f1 ("afs: Fix fileserver probe
+RTT handling"), don't rule out using a vlserver just because there
+haven't been enough packets yet to calculate a real rtt.  Always set the
+server's probe rtt from the estimate provided by rxrpc_kernel_get_srtt,
+which is capped at 1 second.
+
+This could lead to EDESTADDRREQ errors when accessing a cell for the
+first time, even though the vl servers are known and have responded to a
+probe.
+
+Fixes: 1d4adfaf6574 ("rxrpc: Make rxrpc_kernel_get_srtt() indicate validity")
+Signed-off-by: Marc Dionne <marc.dionne@auristor.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: linux-afs@lists.infradead.org
+Link: http://lists.infradead.org/pipermail/linux-afs/2023-June/006746.html
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/vl_probe.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
+index d1c7068b4346f..58452b86e6727 100644
+--- a/fs/afs/vl_probe.c
++++ b/fs/afs/vl_probe.c
+@@ -115,8 +115,8 @@ void afs_vlserver_probe_result(struct afs_call *call)
+               }
+       }
+-      if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
+-          rtt_us < server->probe.rtt) {
++      rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us);
++      if (rtt_us < server->probe.rtt) {
+               server->probe.rtt = rtt_us;
+               server->rtt = rtt_us;
+               alist->preferred = index;
+-- 
+2.39.2
+
diff --git a/queue-6.3/cifs-fix-lease-break-oops-in-xfstest-generic-098.patch b/queue-6.3/cifs-fix-lease-break-oops-in-xfstest-generic-098.patch
new file mode 100644 (file)
index 0000000..2be3d83
--- /dev/null
@@ -0,0 +1,45 @@
+From ab01e1a434bec62d5b3e056d12c39ed635e3aed1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Jun 2023 11:23:32 -0500
+Subject: cifs: fix lease break oops in xfstest generic/098
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit c774e6779f38bf36f0cce65e30793704bab4b0d7 ]
+
+umount can race with lease break so need to check if
+tcon->ses->server is still valid to send the lease
+break response.
+
+Reviewed-by: Bharath SM <bharathsm@microsoft.com>
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Fixes: 59a556aebc43 ("SMB3: drop reference to cfile before sending oplock break")
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/file.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index df88b8c04d03d..051283386e229 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -4942,9 +4942,13 @@ void cifs_oplock_break(struct work_struct *work)
+        * disconnected since oplock already released by the server
+        */
+       if (!oplock_break_cancelled) {
+-              rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
++              /* check for server null since can race with kill_sb calling tree disconnect */
++              if (tcon->ses && tcon->ses->server) {
++                      rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+                               volatile_fid, net_fid, cinode);
+-              cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
++                      cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
++              } else
++                      pr_warn_once("lease break not sent for unmounted share\n");
+       }
+       cifs_done_oplock_break(cinode);
+-- 
+2.39.2
+
diff --git a/queue-6.3/dm-don-t-lock-fs-when-the-map-is-null-during-suspend.patch b/queue-6.3/dm-don-t-lock-fs-when-the-map-is-null-during-suspend.patch
new file mode 100644 (file)
index 0000000..1f7bb3c
--- /dev/null
@@ -0,0 +1,65 @@
+From 514e502e16b737ea99816066abc291911af330e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 14:14:23 +0800
+Subject: dm: don't lock fs when the map is NULL during suspend or resume
+
+From: Li Lingfeng <lilingfeng3@huawei.com>
+
+[ Upstream commit 2760904d895279f87196f0fa9ec570c79fe6a2e4 ]
+
+As described in commit 38d11da522aa ("dm: don't lock fs when the map is
+NULL in process of resume"), a deadlock may be triggered between
+do_resume() and do_mount().
+
+This commit preserves the fix from commit 38d11da522aa but moves it to
+where it also serves to fix a similar deadlock between do_suspend()
+and do_mount().  It does so, if the active map is NULL, by clearing
+DM_SUSPEND_LOCKFS_FLAG in dm_suspend() which is called by both
+do_suspend() and do_resume().
+
+Fixes: 38d11da522aa ("dm: don't lock fs when the map is NULL in process of resume")
+Signed-off-by: Li Lingfeng <lilingfeng3@huawei.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-ioctl.c | 5 +----
+ drivers/md/dm.c       | 4 ++++
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index cc77cf3d41092..7d5c9c582ed2d 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1168,13 +1168,10 @@ static int do_resume(struct dm_ioctl *param)
+       /* Do we need to load a new map ? */
+       if (new_map) {
+               sector_t old_size, new_size;
+-              int srcu_idx;
+               /* Suspend if it isn't already suspended */
+-              old_map = dm_get_live_table(md, &srcu_idx);
+-              if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map)
++              if (param->flags & DM_SKIP_LOCKFS_FLAG)
+                       suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+-              dm_put_live_table(md, srcu_idx);
+               if (param->flags & DM_NOFLUSH_FLAG)
+                       suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
+               if (!dm_suspended_md(md))
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index dfde0088147a1..0a10e94a6db17 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2795,6 +2795,10 @@ int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
+       }
+       map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
++      if (!map) {
++              /* avoid deadlock with fs/namespace.c:do_mount() */
++              suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
++      }
+       r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
+       if (r)
+-- 
+2.39.2
+
diff --git a/queue-6.3/drm-bridge-ti-sn65dsi86-avoid-possible-buffer-overfl.patch b/queue-6.3/drm-bridge-ti-sn65dsi86-avoid-possible-buffer-overfl.patch
new file mode 100644 (file)
index 0000000..31a3994
--- /dev/null
@@ -0,0 +1,39 @@
+From 07ee2f8cf49fc6cd033e2f41aad868ce57633eae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 09:24:43 +0800
+Subject: drm/bridge: ti-sn65dsi86: Avoid possible buffer overflow
+
+From: Su Hui <suhui@nfschina.com>
+
+[ Upstream commit 95011f267c44a4d1f9ca1769e8a29ab2c559e004 ]
+
+Smatch error:buffer overflow 'ti_sn_bridge_refclk_lut' 5 <= 5.
+
+Fixes: cea86c5bb442 ("drm/bridge: ti-sn65dsi86: Implement the pwm_chip")
+Signed-off-by: Su Hui <suhui@nfschina.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230608012443.839372-1-suhui@nfschina.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/bridge/ti-sn65dsi86.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 1e26fa63845a2..0ae8a52acf5e4 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -298,6 +298,10 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
+               if (refclk_lut[i] == refclk_rate)
+                       break;
++      /* avoid buffer overflow and "1" is the default rate in the datasheet. */
++      if (i >= refclk_lut_size)
++              i = 1;
++
+       regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
+                          REFCLK_FREQ(i));
+-- 
+2.39.2
+
diff --git a/queue-6.3/drm-nouveau-add-nv_encoder-pointer-check-for-null.patch b/queue-6.3/drm-nouveau-add-nv_encoder-pointer-check-for-null.patch
new file mode 100644 (file)
index 0000000..09f15f3
--- /dev/null
@@ -0,0 +1,43 @@
+From 80345941c114e3d673d8182fd166654591206b34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 May 2023 13:33:20 +0300
+Subject: drm/nouveau: add nv_encoder pointer check for NULL
+
+From: Natalia Petrova <n.petrova@fintech.ru>
+
+[ Upstream commit 55b94bb8c42464bad3d2217f6874aa1a85664eac ]
+
+Pointer nv_encoder could be dereferenced at nouveau_connector.c
+in case it's equal to NULL by jumping to goto label.
+This patch adds a NULL-check to avoid it.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 3195c5f9784a ("drm/nouveau: set encoder for lvds")
+Signed-off-by: Natalia Petrova <n.petrova@fintech.ru>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+[Fixed patch title]
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230512103320.82234-1-n.petrova@fintech.ru
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_connector.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 5dbf025e68737..f75c6f09dd2af 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -730,7 +730,8 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
+ #endif
+       nouveau_connector_set_edid(nv_connector, edid);
+-      nouveau_connector_set_encoder(connector, nv_encoder);
++      if (nv_encoder)
++              nouveau_connector_set_encoder(connector, nv_encoder);
+       return status;
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/drm-nouveau-don-t-detect-dsm-for-non-nvidia-device.patch b/queue-6.3/drm-nouveau-don-t-detect-dsm-for-non-nvidia-device.patch
new file mode 100644 (file)
index 0000000..638b375
--- /dev/null
@@ -0,0 +1,62 @@
+From 2ddb78c7f9140e24e9a18440603dc7f5e1a1ef0f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 May 2023 04:11:56 +0700
+Subject: drm/nouveau: don't detect DSM for non-NVIDIA device
+
+From: Ratchanan Srirattanamet <peathot@hotmail.com>
+
+[ Upstream commit 11d24327c2d7ad7f24fcc44fb00e1fa91ebf6525 ]
+
+The call site of nouveau_dsm_pci_probe() uses single set of output
+variables for all invocations. So, we must not write anything to them
+unless it's an NVIDIA device. Otherwise, if we are called with another
+device after the NVIDIA device, we'll clober the result of the NVIDIA
+device.
+
+For example, if the other device doesn't have _PR3 resources, the
+detection later would miss the presence of power resource support, and
+the rest of the code will keep using Optimus DSM, breaking power
+management for that machine.
+
+Also, because we're detecting NVIDIA's DSM, it doesn't make sense to run
+this detection on a non-NVIDIA device anyway. Thus, check at the
+beginning of the detection code if this is an NVIDIA card, and just
+return if it isn't.
+
+This, together with commit d22915d22ded ("drm/nouveau/devinit/tu102-:
+wait for GFW_BOOT_PROGRESS == COMPLETED") developed independently and
+landed earlier, fixes runtime power management of the NVIDIA card in
+Lenovo Legion 5-15ARH05. Without this patch, the GPU resumption code
+will "timeout", sometimes hanging userspace.
+
+As a bonus, we'll also stop preventing _PR3 usage from the bridge for
+unrelated devices, which is always nice, I guess.
+
+Fixes: ccfc2d5cdb02 ("drm/nouveau: Use generic helper to check _PR3 presence")
+Signed-off-by: Ratchanan Srirattanamet <peathot@hotmail.com>
+Closes: https://gitlab.freedesktop.org/drm/nouveau/-/issues/79
+Reviewed-by: Karol Herbst <kherbst@redhat.com>
+Signed-off-by: Karol Herbst <kherbst@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/DM6PR19MB2780805D4BE1E3F9B3AC96D0BC409@DM6PR19MB2780.namprd19.prod.outlook.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_acpi.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index 8cf096f841a90..a2ae8c21e4dce 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -220,6 +220,9 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
+       int optimus_funcs;
+       struct pci_dev *parent_pdev;
++      if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
++              return;
++
+       *has_pr3 = false;
+       parent_pdev = pci_upstream_bridge(pdev);
+       if (parent_pdev) {
+-- 
+2.39.2
+
diff --git a/queue-6.3/drm-nouveau-dp-check-for-null-nv_connector-native_mo.patch b/queue-6.3/drm-nouveau-dp-check-for-null-nv_connector-native_mo.patch
new file mode 100644 (file)
index 0000000..e320f1f
--- /dev/null
@@ -0,0 +1,53 @@
+From 1b4d130de7295a5501d0cdb7f66bd407eacff842 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 May 2023 14:15:26 +0300
+Subject: drm/nouveau/dp: check for NULL nv_connector->native_mode
+
+From: Natalia Petrova <n.petrova@fintech.ru>
+
+[ Upstream commit 20a2ce87fbaf81e4c3dcb631d738e423959eb320 ]
+
+Add checking for NULL before calling nouveau_connector_detect_depth() in
+nouveau_connector_get_modes() function because nv_connector->native_mode
+could be dereferenced there since connector pointer passed to
+nouveau_connector_detect_depth() and the same value of
+nv_connector->native_mode is used there.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: d4c2c99bdc83 ("drm/nouveau/dp: remove broken display depth function, use the improved one")
+
+Signed-off-by: Natalia Petrova <n.petrova@fintech.ru>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230512111526.82408-1-n.petrova@fintech.ru
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_connector.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 086b66b60d918..5dbf025e68737 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -966,7 +966,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+       /* Determine display colour depth for everything except LVDS now,
+        * DP requires this before mode_valid() is called.
+        */
+-      if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
++      if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
+               nouveau_connector_detect_depth(connector);
+       /* Find the native mode if this is a digital panel, if we didn't
+@@ -987,7 +987,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+        * "native" mode as some VBIOS tables require us to use the
+        * pixel clock as part of the lookup...
+        */
+-      if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
++      if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
+               nouveau_connector_detect_depth(connector);
+       if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
+-- 
+2.39.2
+
diff --git a/queue-6.3/ext4-drop-the-call-to-ext4_error-from-ext4_get_group.patch b/queue-6.3/ext4-drop-the-call-to-ext4_error-from-ext4_get_group.patch
new file mode 100644 (file)
index 0000000..cbdfcd2
--- /dev/null
@@ -0,0 +1,64 @@
+From 5311501d73d303c1fbf779aaaf9406b0914ae4a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 12:02:55 +0200
+Subject: ext4: drop the call to ext4_error() from ext4_get_group_info()
+
+From: Fabio M. De Francesco <fmdefrancesco@gmail.com>
+
+[ Upstream commit f451fd97dd2b78f286379203a47d9d295c467255 ]
+
+A recent patch added a call to ext4_error() which is problematic since
+some callers of the ext4_get_group_info() function may be holding a
+spinlock, whereas ext4_error() must never be called in atomic context.
+
+This triggered a report from Syzbot: "BUG: sleeping function called from
+invalid context in ext4_update_super" (see the link below).
+
+Therefore, drop the call to ext4_error() from ext4_get_group_info(). In
+the meantime use eight characters tabs instead of nine characters ones.
+
+Reported-by: syzbot+4acc7d910e617b360859@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/00000000000070575805fdc6cdb2@google.com/
+Fixes: 5354b2af3406 ("ext4: allow ext4_get_group_info() to fail")
+Suggested-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
+Link: https://lore.kernel.org/r/20230614100446.14337-1-fmdefrancesco@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/balloc.c | 20 +++++++++-----------
+ 1 file changed, 9 insertions(+), 11 deletions(-)
+
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index a38aa33af08ef..8e83b51e3c68a 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -322,17 +322,15 @@ static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
+ struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+                                           ext4_group_t group)
+ {
+-       struct ext4_group_info **grp_info;
+-       long indexv, indexh;
+-
+-       if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) {
+-               ext4_error(sb, "invalid group %u", group);
+-               return NULL;
+-       }
+-       indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+-       indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+-       grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
+-       return grp_info[indexh];
++      struct ext4_group_info **grp_info;
++      long indexv, indexh;
++
++      if (unlikely(group >= EXT4_SB(sb)->s_groups_count))
++              return NULL;
++      indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
++      indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
++      grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
++      return grp_info[indexh];
+ }
+ /*
+-- 
+2.39.2
+
diff --git a/queue-6.3/iavf-remove-mask-from-iavf_irq_enable_queues.patch b/queue-6.3/iavf-remove-mask-from-iavf_irq_enable_queues.patch
new file mode 100644 (file)
index 0000000..0b3930b
--- /dev/null
@@ -0,0 +1,103 @@
+From 1f7df6ac97e73a0ec4ee0237c49e4050766b2620 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 13:02:26 -0700
+Subject: iavf: remove mask from iavf_irq_enable_queues()
+
+From: Ahmed Zaki <ahmed.zaki@intel.com>
+
+[ Upstream commit c37cf54c12cfaa51e7aaf88708167b0d3259e64e ]
+
+Enable more than 32 IRQs by removing the u32 bit mask in
+iavf_irq_enable_queues(). There is no need for the mask as there are no
+callers that select individual IRQs through the bitmask. Also, if the PF
+allocates more than 32 IRQs, this mask will prevent us from using all of
+them.
+
+Modify the comment in iavf_register.h to show that the maximum number
+allowed for the IRQ index is 63 as per the iAVF standard 1.0 [1].
+
+link: [1] https://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/ethernet-adaptive-virtual-function-hardware-spec.pdf
+Fixes: 5eae00c57f5e ("i40evf: main driver core")
+Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://lore.kernel.org/r/20230608200226.451861-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/iavf/iavf.h          |  2 +-
+ drivers/net/ethernet/intel/iavf/iavf_main.c     | 15 ++++++---------
+ drivers/net/ethernet/intel/iavf/iavf_register.h |  2 +-
+ 3 files changed, 8 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 746ff76f2fb1e..0615bb2b3c33b 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -526,7 +526,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev);
+ void iavf_update_stats(struct iavf_adapter *adapter);
+ void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
+ int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
+-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
++void iavf_irq_enable_queues(struct iavf_adapter *adapter);
+ void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
+ void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 2de4baff4c205..4a66873882d12 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -359,21 +359,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
+ }
+ /**
+- * iavf_irq_enable_queues - Enable interrupt for specified queues
++ * iavf_irq_enable_queues - Enable interrupt for all queues
+  * @adapter: board private structure
+- * @mask: bitmap of queues to enable
+  **/
+-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
++void iavf_irq_enable_queues(struct iavf_adapter *adapter)
+ {
+       struct iavf_hw *hw = &adapter->hw;
+       int i;
+       for (i = 1; i < adapter->num_msix_vectors; i++) {
+-              if (mask & BIT(i - 1)) {
+-                      wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
+-                           IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+-                           IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+-              }
++              wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
++                   IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
++                   IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+       }
+ }
+@@ -387,7 +384,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
+       struct iavf_hw *hw = &adapter->hw;
+       iavf_misc_irq_enable(adapter);
+-      iavf_irq_enable_queues(adapter, ~0);
++      iavf_irq_enable_queues(adapter);
+       if (flush)
+               iavf_flush(hw);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h
+index bf793332fc9d5..a19e88898a0bb 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_register.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_register.h
+@@ -40,7 +40,7 @@
+ #define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
+ #define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+ #define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+-#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
++#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */
+ #define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+ #define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
+ #define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+-- 
+2.39.2
+
diff --git a/queue-6.3/ib-isert-fix-dead-lock-in-ib_isert.patch b/queue-6.3/ib-isert-fix-dead-lock-in-ib_isert.patch
new file mode 100644 (file)
index 0000000..07f0f64
--- /dev/null
@@ -0,0 +1,121 @@
+From 83cf9fb67937a6a0b1a25c533f1f2d176e6bee0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 03:25:29 -0700
+Subject: IB/isert: Fix dead lock in ib_isert
+
+From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+
+[ Upstream commit 691b0480933f0ce88a81ed1d1a0aff340ff6293a ]
+
+- When a iSER session is released, ib_isert module is taking a mutex
+  lock and releasing all pending connections. As part of this, ib_isert
+  is destroying rdma cm_id. To destroy cm_id, rdma_cm module is sending
+  CM events to CMA handler of ib_isert. This handler is taking same
+  mutex lock. Hence it leads to deadlock between ib_isert & rdma_cm
+  modules.
+
+- For fix, created local list of pending connections and release the
+  connection outside of mutex lock.
+
+Calltrace:
+---------
+[ 1229.791410] INFO: task kworker/10:1:642 blocked for more than 120 seconds.
+[ 1229.791416]       Tainted: G           OE    --------- -  - 4.18.0-372.9.1.el8.x86_64 #1
+[ 1229.791418] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+[ 1229.791419] task:kworker/10:1    state:D stack:    0 pid:  642 ppid:     2 flags:0x80004000
+[ 1229.791424] Workqueue: ib_cm cm_work_handler [ib_cm]
+[ 1229.791436] Call Trace:
+[ 1229.791438]  __schedule+0x2d1/0x830
+[ 1229.791445]  ? select_idle_sibling+0x23/0x6f0
+[ 1229.791449]  schedule+0x35/0xa0
+[ 1229.791451]  schedule_preempt_disabled+0xa/0x10
+[ 1229.791453]  __mutex_lock.isra.7+0x310/0x420
+[ 1229.791456]  ? select_task_rq_fair+0x351/0x990
+[ 1229.791459]  isert_cma_handler+0x224/0x330 [ib_isert]
+[ 1229.791463]  ? ttwu_queue_wakelist+0x159/0x170
+[ 1229.791466]  cma_cm_event_handler+0x25/0xd0 [rdma_cm]
+[ 1229.791474]  cma_ib_handler+0xa7/0x2e0 [rdma_cm]
+[ 1229.791478]  cm_process_work+0x22/0xf0 [ib_cm]
+[ 1229.791483]  cm_work_handler+0xf4/0xf30 [ib_cm]
+[ 1229.791487]  ? move_linked_works+0x6e/0xa0
+[ 1229.791490]  process_one_work+0x1a7/0x360
+[ 1229.791491]  ? create_worker+0x1a0/0x1a0
+[ 1229.791493]  worker_thread+0x30/0x390
+[ 1229.791494]  ? create_worker+0x1a0/0x1a0
+[ 1229.791495]  kthread+0x10a/0x120
+[ 1229.791497]  ? set_kthread_struct+0x40/0x40
+[ 1229.791499]  ret_from_fork+0x1f/0x40
+
+[ 1229.791739] INFO: task targetcli:28666 blocked for more than 120 seconds.
+[ 1229.791740]       Tainted: G           OE    --------- -  - 4.18.0-372.9.1.el8.x86_64 #1
+[ 1229.791741] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+[ 1229.791742] task:targetcli       state:D stack:    0 pid:28666 ppid:  5510 flags:0x00004080
+[ 1229.791743] Call Trace:
+[ 1229.791744]  __schedule+0x2d1/0x830
+[ 1229.791746]  schedule+0x35/0xa0
+[ 1229.791748]  schedule_preempt_disabled+0xa/0x10
+[ 1229.791749]  __mutex_lock.isra.7+0x310/0x420
+[ 1229.791751]  rdma_destroy_id+0x15/0x20 [rdma_cm]
+[ 1229.791755]  isert_connect_release+0x115/0x130 [ib_isert]
+[ 1229.791757]  isert_free_np+0x87/0x140 [ib_isert]
+[ 1229.791761]  iscsit_del_np+0x74/0x120 [iscsi_target_mod]
+[ 1229.791776]  lio_target_np_driver_store+0xe9/0x140 [iscsi_target_mod]
+[ 1229.791784]  configfs_write_file+0xb2/0x110
+[ 1229.791788]  vfs_write+0xa5/0x1a0
+[ 1229.791792]  ksys_write+0x4f/0xb0
+[ 1229.791794]  do_syscall_64+0x5b/0x1a0
+[ 1229.791798]  entry_SYSCALL_64_after_hwframe+0x65/0xca
+
+Fixes: bd3792205aae ("iser-target: Fix pending connections handling in target stack shutdown sequnce")
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Link: https://lore.kernel.org/r/20230606102531.162967-2-saravanan.vajravel@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index f290cd49698ea..b4809d2372506 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2431,6 +2431,7 @@ isert_free_np(struct iscsi_np *np)
+ {
+       struct isert_np *isert_np = np->np_context;
+       struct isert_conn *isert_conn, *n;
++      LIST_HEAD(drop_conn_list);
+       if (isert_np->cm_id)
+               rdma_destroy_id(isert_np->cm_id);
+@@ -2450,7 +2451,7 @@ isert_free_np(struct iscsi_np *np)
+                                        node) {
+                       isert_info("cleaning isert_conn %p state (%d)\n",
+                                  isert_conn, isert_conn->state);
+-                      isert_connect_release(isert_conn);
++                      list_move_tail(&isert_conn->node, &drop_conn_list);
+               }
+       }
+@@ -2461,11 +2462,16 @@ isert_free_np(struct iscsi_np *np)
+                                        node) {
+                       isert_info("cleaning isert_conn %p state (%d)\n",
+                                  isert_conn, isert_conn->state);
+-                      isert_connect_release(isert_conn);
++                      list_move_tail(&isert_conn->node, &drop_conn_list);
+               }
+       }
+       mutex_unlock(&isert_np->mutex);
++      list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
++              list_del_init(&isert_conn->node);
++              isert_connect_release(isert_conn);
++      }
++
+       np->np_context = NULL;
+       kfree(isert_np);
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/ib-isert-fix-incorrect-release-of-isert-connection.patch b/queue-6.3/ib-isert-fix-incorrect-release-of-isert-connection.patch
new file mode 100644 (file)
index 0000000..7981167
--- /dev/null
@@ -0,0 +1,45 @@
+From 60bf24ac40c60ba0ff2be41b7422f1b39e4df372 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 03:25:31 -0700
+Subject: IB/isert: Fix incorrect release of isert connection
+
+From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+
+[ Upstream commit 699826f4e30ab76a62c238c86fbef7e826639c8d ]
+
+The ib_isert module is releasing the isert connection both in
+isert_wait_conn() handler as well as isert_free_conn() handler.
+In isert_wait_conn() handler, it is expected to wait for iSCSI
+session logout operation to complete. It should free the isert
+connection only in isert_free_conn() handler.
+
+When a bunch of iSER target is cleared, this issue can lead to
+use-after-free memory issue as isert conn is twice released
+
+Fixes: b02efbfc9a05 ("iser-target: Fix implicit termination of connections")
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Link: https://lore.kernel.org/r/20230606102531.162967-4-saravanan.vajravel@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 00a7303c8cc60..92e1e7587af8b 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2570,8 +2570,6 @@ static void isert_wait_conn(struct iscsit_conn *conn)
+       isert_put_unsol_pending_cmds(conn);
+       isert_wait4cmds(conn);
+       isert_wait4logout(isert_conn);
+-
+-      queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ static void isert_free_conn(struct iscsit_conn *conn)
+-- 
+2.39.2
+
diff --git a/queue-6.3/ib-isert-fix-possible-list-corruption-in-cma-handler.patch b/queue-6.3/ib-isert-fix-possible-list-corruption-in-cma-handler.patch
new file mode 100644 (file)
index 0000000..93da815
--- /dev/null
@@ -0,0 +1,45 @@
+From 5e9d5adcb80f8854dfa854686de9408899726d9f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 03:25:30 -0700
+Subject: IB/isert: Fix possible list corruption in CMA handler
+
+From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+
+[ Upstream commit 7651e2d6c5b359a28c2d4c904fec6608d1021ca8 ]
+
+When ib_isert module receives connection error event, it is
+releasing the isert session and removes corresponding list
+node but it doesn't take appropriate mutex lock to remove
+the list node.  This can lead to linked  list corruption
+
+Fixes: bd3792205aae ("iser-target: Fix pending connections handling in target stack shutdown sequnce")
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Link: https://lore.kernel.org/r/20230606102531.162967-3-saravanan.vajravel@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index b4809d2372506..00a7303c8cc60 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -657,9 +657,13 @@ static int
+ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+       struct isert_conn *isert_conn = cma_id->qp->qp_context;
++      struct isert_np *isert_np = cma_id->context;
+       ib_drain_qp(isert_conn->qp);
++
++      mutex_lock(&isert_np->mutex);
+       list_del_init(&isert_conn->node);
++      mutex_unlock(&isert_np->mutex);
+       isert_conn->cm_id = NULL;
+       isert_put_conn(isert_conn);
+-- 
+2.39.2
+
diff --git a/queue-6.3/ib-uverbs-fix-to-consider-event-queue-closing-also-u.patch b/queue-6.3/ib-uverbs-fix-to-consider-event-queue-closing-also-u.patch
new file mode 100644 (file)
index 0000000..7b05cde
--- /dev/null
@@ -0,0 +1,69 @@
+From e3c2f284679f6918bab32ad5075970c1b5b49247 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 13:33:25 +0300
+Subject: IB/uverbs: Fix to consider event queue closing also upon non-blocking
+ mode
+
+From: Yishai Hadas <yishaih@nvidia.com>
+
+[ Upstream commit 62fab312fa1683e812e605db20d4f22de3e3fb2f ]
+
+Fix ib_uverbs_event_read() to consider event queue closing also upon
+non-blocking mode.
+
+Once the queue is closed (e.g. hot-plug flow) all the existing events
+are cleaned-up as part of ib_uverbs_free_event_queue().
+
+An application that uses the non-blocking FD mode should get -EIO in
+that case to let it knows that the device was removed already.
+
+Otherwise, it can loose the indication that the device was removed and
+won't recover.
+
+As part of that, refactor the code to have a single flow with regards to
+'is_closed' for both blocking and non-blocking modes.
+
+Fixes: 14e23bd6d221 ("RDMA/core: Fix locking in ib_uverbs_event_read")
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
+Link: https://lore.kernel.org/r/97b00116a1e1e13f8dc4ec38a5ea81cf8c030210.1685960567.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/uverbs_main.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index bdb179a09d77c..3cd4b195007b8 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -222,8 +222,12 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
+       spin_lock_irq(&ev_queue->lock);
+       while (list_empty(&ev_queue->event_list)) {
+-              spin_unlock_irq(&ev_queue->lock);
++              if (ev_queue->is_closed) {
++                      spin_unlock_irq(&ev_queue->lock);
++                      return -EIO;
++              }
++              spin_unlock_irq(&ev_queue->lock);
+               if (filp->f_flags & O_NONBLOCK)
+                       return -EAGAIN;
+@@ -233,12 +237,6 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
+                       return -ERESTARTSYS;
+               spin_lock_irq(&ev_queue->lock);
+-
+-              /* If device was disassociated and no event exists set an error */
+-              if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
+-                      spin_unlock_irq(&ev_queue->lock);
+-                      return -EIO;
+-              }
+       }
+       event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
+-- 
+2.39.2
+
diff --git a/queue-6.3/ice-do-not-busy-wait-to-read-gnss-data.patch b/queue-6.3/ice-do-not-busy-wait-to-read-gnss-data.patch
new file mode 100644 (file)
index 0000000..43923f5
--- /dev/null
@@ -0,0 +1,153 @@
+From abc213afc1e7fd49c6b14d3a916b5221c4285e56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Apr 2023 10:19:24 +0200
+Subject: ice: do not busy-wait to read GNSS data
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 2f8fdcb0a73a1831cc4f205f23493a17c0e5536f ]
+
+The ice-gnss-<dev_name> kernel thread, which reads data from the u-blox
+GNSS module, keep a CPU core almost 100% busy. The main reason is that
+it busy-waits for data to become available.
+
+A simple improvement would be to replace the "mdelay(10);" in
+ice_gnss_read() with sleeping. A better fix is to not do any waiting
+directly in the function and just requeue this delayed work as needed.
+The advantage is that canceling the work from ice_gnss_exit() becomes
+immediate, rather than taking up to ~2.5 seconds (ICE_MAX_UBX_READ_TRIES
+* 10 ms).
+
+This lowers the CPU usage of the ice-gnss-<dev_name> thread on my system
+from ~90 % to ~8 %.
+
+I am not sure if the larger 0.1 s pause after inserting data into the
+gnss subsystem is really necessary, but I'm keeping that as it was.
+
+Of course, ideally the driver would not have to poll at all, but I don't
+know if the E810 can watch for GNSS data availability over the i2c bus
+by itself and notify the driver.
+
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Tested-by: Sunitha Mekala <sunithax.d.mekala@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: 05a1308a2e08 ("ice: Don't dereference NULL in ice_gnss_read error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_gnss.c | 42 ++++++++++-------------
+ drivers/net/ethernet/intel/ice/ice_gnss.h |  3 +-
+ 2 files changed, 20 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
+index 12086aafb42fb..bd0ed155e11b6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
++++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
+@@ -85,6 +85,7 @@ static void ice_gnss_read(struct kthread_work *work)
+ {
+       struct gnss_serial *gnss = container_of(work, struct gnss_serial,
+                                               read_work.work);
++      unsigned long delay = ICE_GNSS_POLL_DATA_DELAY_TIME;
+       unsigned int i, bytes_read, data_len, count;
+       struct ice_aqc_link_topo_addr link_topo;
+       struct ice_pf *pf;
+@@ -104,11 +105,6 @@ static void ice_gnss_read(struct kthread_work *work)
+               return;
+       hw = &pf->hw;
+-      buf = (char *)get_zeroed_page(GFP_KERNEL);
+-      if (!buf) {
+-              err = -ENOMEM;
+-              goto exit;
+-      }
+       memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
+       link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
+@@ -119,25 +115,24 @@ static void ice_gnss_read(struct kthread_work *work)
+       i2c_params = ICE_GNSS_UBX_DATA_LEN_WIDTH |
+                    ICE_AQC_I2C_USE_REPEATED_START;
+-      /* Read data length in a loop, when it's not 0 the data is ready */
+-      for (i = 0; i < ICE_MAX_UBX_READ_TRIES; i++) {
+-              err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+-                                    cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H),
+-                                    i2c_params, (u8 *)&data_len_b, NULL);
+-              if (err)
+-                      goto exit_buf;
++      err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
++                            cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H),
++                            i2c_params, (u8 *)&data_len_b, NULL);
++      if (err)
++              goto requeue;
+-              data_len = be16_to_cpu(data_len_b);
+-              if (data_len != 0 && data_len != U16_MAX)
+-                      break;
++      data_len = be16_to_cpu(data_len_b);
++      if (data_len == 0 || data_len == U16_MAX)
++              goto requeue;
+-              mdelay(10);
+-      }
++      /* The u-blox has data_len bytes for us to read */
+       data_len = min_t(typeof(data_len), data_len, PAGE_SIZE);
+-      if (!data_len) {
++
++      buf = (char *)get_zeroed_page(GFP_KERNEL);
++      if (!buf) {
+               err = -ENOMEM;
+-              goto exit_buf;
++              goto requeue;
+       }
+       /* Read received data */
+@@ -151,7 +146,7 @@ static void ice_gnss_read(struct kthread_work *work)
+                                     cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA),
+                                     bytes_read, &buf[i], NULL);
+               if (err)
+-                      goto exit_buf;
++                      goto free_buf;
+       }
+       count = gnss_insert_raw(pf->gnss_dev, buf, i);
+@@ -159,10 +154,11 @@ static void ice_gnss_read(struct kthread_work *work)
+               dev_warn(ice_pf_to_dev(pf),
+                        "gnss_insert_raw ret=%d size=%d\n",
+                        count, i);
+-exit_buf:
++      delay = ICE_GNSS_TIMER_DELAY_TIME;
++free_buf:
+       free_page((unsigned long)buf);
+-      kthread_queue_delayed_work(gnss->kworker, &gnss->read_work,
+-                                 ICE_GNSS_TIMER_DELAY_TIME);
++requeue:
++      kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, delay);
+ exit:
+       if (err)
+               dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
+diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
+index d95ca3928b2ea..d206afe550a56 100644
+--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
++++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
+@@ -5,6 +5,7 @@
+ #define _ICE_GNSS_H_
+ #define ICE_E810T_GNSS_I2C_BUS                0x2
++#define ICE_GNSS_POLL_DATA_DELAY_TIME (HZ / 100) /* poll every 10 ms */
+ #define ICE_GNSS_TIMER_DELAY_TIME     (HZ / 10) /* 0.1 second per message */
+ #define ICE_GNSS_TTY_WRITE_BUF                250
+ #define ICE_MAX_I2C_DATA_SIZE         FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
+@@ -20,8 +21,6 @@
+  * passed as I2C addr parameter.
+  */
+ #define ICE_GNSS_UBX_WRITE_BYTES      (ICE_MAX_I2C_WRITE_BYTES + 1)
+-#define ICE_MAX_UBX_READ_TRIES                255
+-#define ICE_MAX_UBX_ACK_READ_TRIES    4095
+ /**
+  * struct gnss_serial - data used to initialize GNSS TTY port
+-- 
+2.39.2
+
diff --git a/queue-6.3/ice-don-t-dereference-null-in-ice_gnss_read-error-pa.patch b/queue-6.3/ice-don-t-dereference-null-in-ice_gnss_read-error-pa.patch
new file mode 100644 (file)
index 0000000..a935831
--- /dev/null
@@ -0,0 +1,59 @@
+From 88d4bcc23a5f83547dc3a9593063e863d08e0e86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 May 2023 12:52:58 +0200
+Subject: ice: Don't dereference NULL in ice_gnss_read error path
+
+From: Simon Horman <horms@kernel.org>
+
+[ Upstream commit 05a1308a2e08e4a375bf60eb4c6c057a201d81fc ]
+
+If pf is NULL in ice_gnss_read() then it will be dereferenced
+in the error path by a call to dev_dbg(ice_pf_to_dev(pf), ...).
+
+Avoid this by simply returning in this case.
+If logging is desired an alternate approach might be to
+use pr_err() before returning.
+
+Flagged by Smatch as:
+
+  .../ice_gnss.c:196 ice_gnss_read() error: we previously assumed 'pf' could be null (see line 131)
+
+Fixes: 43113ff73453 ("ice: add TTY for GNSS module for E810T device")
+Signed-off-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Tested-by: Sunitha Mekala <sunithax.d.mekala@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_gnss.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
+index bd0ed155e11b6..75c9de675f202 100644
+--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
++++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
+@@ -96,12 +96,7 @@ static void ice_gnss_read(struct kthread_work *work)
+       int err = 0;
+       pf = gnss->back;
+-      if (!pf) {
+-              err = -EFAULT;
+-              goto exit;
+-      }
+-
+-      if (!test_bit(ICE_FLAG_GNSS, pf->flags))
++      if (!pf || !test_bit(ICE_FLAG_GNSS, pf->flags))
+               return;
+       hw = &pf->hw;
+@@ -159,7 +154,6 @@ static void ice_gnss_read(struct kthread_work *work)
+       free_page((unsigned long)buf);
+ requeue:
+       kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, delay);
+-exit:
+       if (err)
+               dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/ice-fix-ice-module-unload.patch b/queue-6.3/ice-fix-ice-module-unload.patch
new file mode 100644 (file)
index 0000000..80bc336
--- /dev/null
@@ -0,0 +1,121 @@
+From 4b22c615522cf78f2bbe1767a0101da24bd07972 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 10:14:21 -0700
+Subject: ice: Fix ice module unload
+
+From: Jakub Buchocki <jakubx.buchocki@intel.com>
+
+[ Upstream commit 24b454bc354ab7b1aa918a4fe3d7696516f592d4 ]
+
+Clearing the interrupt scheme before PFR reset,
+during the removal routine, could cause the hardware
+errors and possibly lead to system reboot, as the PF
+reset can cause the interrupt to be generated.
+
+Place the call for PFR reset inside ice_deinit_dev(),
+wait until reset and all pending transactions are done,
+then call ice_clear_interrupt_scheme().
+
+This introduces a PFR reset to multiple error paths.
+
+Additionally, remove the call for the reset from
+ice_load() - it will be a part of ice_unload() now.
+
+Error example:
+[   75.229328] ice 0000:ca:00.1: Failed to read Tx Scheduler Tree - User Selection data from flash
+[   77.571315] {1}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 1
+[   77.571418] {1}[Hardware Error]: event severity: recoverable
+[   77.571459] {1}[Hardware Error]:  Error 0, type: recoverable
+[   77.571500] {1}[Hardware Error]:   section_type: PCIe error
+[   77.571540] {1}[Hardware Error]:   port_type: 4, root port
+[   77.571580] {1}[Hardware Error]:   version: 3.0
+[   77.571615] {1}[Hardware Error]:   command: 0x0547, status: 0x4010
+[   77.571661] {1}[Hardware Error]:   device_id: 0000:c9:02.0
+[   77.571703] {1}[Hardware Error]:   slot: 25
+[   77.571736] {1}[Hardware Error]:   secondary_bus: 0xca
+[   77.571773] {1}[Hardware Error]:   vendor_id: 0x8086, device_id: 0x347a
+[   77.571821] {1}[Hardware Error]:   class_code: 060400
+[   77.571858] {1}[Hardware Error]:   bridge: secondary_status: 0x2800, control: 0x0013
+[   77.572490] pcieport 0000:c9:02.0: AER: aer_status: 0x00200000, aer_mask: 0x00100020
+[   77.572870] pcieport 0000:c9:02.0:    [21] ACSViol                (First)
+[   77.573222] pcieport 0000:c9:02.0: AER: aer_layer=Transaction Layer, aer_agent=Receiver ID
+[   77.573554] pcieport 0000:c9:02.0: AER: aer_uncor_severity: 0x00463010
+[   77.691273] {2}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 1
+[   77.691738] {2}[Hardware Error]: event severity: recoverable
+[   77.691971] {2}[Hardware Error]:  Error 0, type: recoverable
+[   77.692192] {2}[Hardware Error]:   section_type: PCIe error
+[   77.692403] {2}[Hardware Error]:   port_type: 4, root port
+[   77.692616] {2}[Hardware Error]:   version: 3.0
+[   77.692825] {2}[Hardware Error]:   command: 0x0547, status: 0x4010
+[   77.693032] {2}[Hardware Error]:   device_id: 0000:c9:02.0
+[   77.693238] {2}[Hardware Error]:   slot: 25
+[   77.693440] {2}[Hardware Error]:   secondary_bus: 0xca
+[   77.693641] {2}[Hardware Error]:   vendor_id: 0x8086, device_id: 0x347a
+[   77.693853] {2}[Hardware Error]:   class_code: 060400
+[   77.694054] {2}[Hardware Error]:   bridge: secondary_status: 0x0800, control: 0x0013
+[   77.719115] pci 0000:ca:00.1: AER: can't recover (no error_detected callback)
+[   77.719140] pcieport 0000:c9:02.0: AER: device recovery failed
+[   77.719216] pcieport 0000:c9:02.0: AER: aer_status: 0x00200000, aer_mask: 0x00100020
+[   77.719390] pcieport 0000:c9:02.0:    [21] ACSViol                (First)
+[   77.719557] pcieport 0000:c9:02.0: AER: aer_layer=Transaction Layer, aer_agent=Receiver ID
+[   77.719723] pcieport 0000:c9:02.0: AER: aer_uncor_severity: 0x00463010
+
+Fixes: 5b246e533d01 ("ice: split probe into smaller functions")
+Signed-off-by: Jakub Buchocki <jakubx.buchocki@intel.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230612171421.21570-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 16 +++++-----------
+ 1 file changed, 5 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 0c949ed22a313..98e8ce743fb2e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -4794,9 +4794,13 @@ static int ice_init_dev(struct ice_pf *pf)
+ static void ice_deinit_dev(struct ice_pf *pf)
+ {
+       ice_free_irq_msix_misc(pf);
+-      ice_clear_interrupt_scheme(pf);
+       ice_deinit_pf(pf);
+       ice_deinit_hw(&pf->hw);
++
++      /* Service task is already stopped, so call reset directly. */
++      ice_reset(&pf->hw, ICE_RESET_PFR);
++      pci_wait_for_pending_transaction(pf->pdev);
++      ice_clear_interrupt_scheme(pf);
+ }
+ static void ice_init_features(struct ice_pf *pf)
+@@ -5086,10 +5090,6 @@ int ice_load(struct ice_pf *pf)
+       struct ice_vsi *vsi;
+       int err;
+-      err = ice_reset(&pf->hw, ICE_RESET_PFR);
+-      if (err)
+-              return err;
+-
+       err = ice_init_dev(pf);
+       if (err)
+               return err;
+@@ -5346,12 +5346,6 @@ static void ice_remove(struct pci_dev *pdev)
+       ice_setup_mc_magic_wake(pf);
+       ice_set_wake(pf);
+-      /* Issue a PFR as part of the prescribed driver unload flow.  Do not
+-       * do it via ice_schedule_reset() since there is no need to rebuild
+-       * and the service task is already stopped.
+-       */
+-      ice_reset(&pf->hw, ICE_RESET_PFR);
+-      pci_wait_for_pending_transaction(pdev);
+       pci_disable_device(pdev);
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/ice-fix-xdp-memory-leak-when-nic-is-brought-up-and-d.patch b/queue-6.3/ice-fix-xdp-memory-leak-when-nic-is-brought-up-and-d.patch
new file mode 100644 (file)
index 0000000..bec2a71
--- /dev/null
@@ -0,0 +1,41 @@
+From 9affdfb4e46d5692cddedc494708bd2f0213fc50 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 12:33:58 +0200
+Subject: ice: Fix XDP memory leak when NIC is brought up and down
+
+From: Kamil Maziarz <kamil.maziarz@intel.com>
+
+[ Upstream commit 78c50d6961fc05491ebbc71c35d87324b1a4f49a ]
+
+Fix the buffer leak that occurs while switching
+the port up and down with traffic and XDP by
+checking for an active XDP program and freeing all empty TX buffers.
+
+Fixes: efc2214b6047 ("ice: Add support for XDP")
+Signed-off-by: Kamil Maziarz <kamil.maziarz@intel.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 0d8b8c6f9bd35..0c949ed22a313 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -7048,6 +7048,10 @@ int ice_down(struct ice_vsi *vsi)
+       ice_for_each_txq(vsi, i)
+               ice_clean_tx_ring(vsi->tx_rings[i]);
++      if (ice_is_xdp_ena_vsi(vsi))
++              ice_for_each_xdp_txq(vsi, i)
++                      ice_clean_tx_ring(vsi->xdp_rings[i]);
++
+       ice_for_each_rxq(vsi, i)
+               ice_clean_rx_ring(vsi->rx_rings[i]);
+-- 
+2.39.2
+
diff --git a/queue-6.3/igb-fix-extts-capture-value-format-for-82580-i354-i3.patch b/queue-6.3/igb-fix-extts-capture-value-format-for-82580-i354-i3.patch
new file mode 100644 (file)
index 0000000..392158b
--- /dev/null
@@ -0,0 +1,71 @@
+From 39fde3d0c96447977ecc469d1a6cdb32e47dacc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 09:41:16 -0700
+Subject: igb: Fix extts capture value format for 82580/i354/i350
+
+From: Yuezhen Luan <eggcar.luan@gmail.com>
+
+[ Upstream commit 6292d7436cf2f0a2ea8800a1d2cbb155d237818a ]
+
+82580/i354/i350 features circle-counter-like timestamp registers
+that are different with newer i210. The EXTTS capture value in
+AUXTSMPx should be converted from raw circle counter value to
+timestamp value in resolution of 1 nanosec by the driver.
+
+This issue can be reproduced on i350 nics, connecting an 1PPS
+signal to a SDP pin, and run 'ts2phc' command to read external
+1PPS timestamp value. On i210 this works fine, but on i350 the
+extts is not correctly converted.
+
+The i350/i354/82580's SYSTIM and other timestamp registers are
+40bit counters, presenting time range of 2^40 ns, that means these
+registers overflows every about 1099s. This causes all these regs
+can't be used directly in contrast to the newer i210/i211s.
+
+The igb driver needs to convert these raw register values to
+valid time stamp format by using kernel timecounter apis for i350s
+families. Here the igb_extts() just forgot to do the convert.
+
+Fixes: 38970eac41db ("igb: support EXTTS on 82580/i354/i350")
+Signed-off-by: Yuezhen Luan <eggcar.luan@gmail.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230607164116.3768175-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 274c781b55473..f2f9f81c123b8 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6948,6 +6948,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+       struct e1000_hw *hw = &adapter->hw;
+       struct ptp_clock_event event;
+       struct timespec64 ts;
++      unsigned long flags;
+       if (pin < 0 || pin >= IGB_N_SDP)
+               return;
+@@ -6955,9 +6956,12 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+       if (hw->mac.type == e1000_82580 ||
+           hw->mac.type == e1000_i354 ||
+           hw->mac.type == e1000_i350) {
+-              s64 ns = rd32(auxstmpl);
++              u64 ns = rd32(auxstmpl);
+-              ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
++              ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32;
++              spin_lock_irqsave(&adapter->tmreg_lock, flags);
++              ns = timecounter_cyc2time(&adapter->tc, ns);
++              spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+               ts = ns_to_timespec64(ns);
+       } else {
+               ts.tv_nsec = rd32(auxstmpl);
+-- 
+2.39.2
+
diff --git a/queue-6.3/igb-fix-nvm.ops.read-error-handling.patch b/queue-6.3/igb-fix-nvm.ops.read-error-handling.patch
new file mode 100644 (file)
index 0000000..d1fa1fb
--- /dev/null
@@ -0,0 +1,44 @@
+From 828453131a050996941285f8c77b184065638ffe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Apr 2023 17:44:14 +0200
+Subject: igb: fix nvm.ops.read() error handling
+
+From: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+
+[ Upstream commit 48a821fd58837800750ec1b3962f0f799630a844 ]
+
+Add error handling into igb_set_eeprom() function, in case
+nvm.ops.read() fails just quit with error code asap.
+
+Fixes: 9d5c824399de ("igb: PCI-Express 82575 Gigabit Ethernet driver")
+Signed-off-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ethtool.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 7d60da1b7bf41..319ed601eaa1e 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev,
+                */
+               ret_val = hw->nvm.ops.read(hw, last_word, 1,
+                                  &eeprom_buff[last_word - first_word]);
++              if (ret_val)
++                      goto out;
+       }
+       /* Device's eeprom is always little-endian, word addressable */
+@@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev,
+               hw->nvm.ops.update(hw);
+       igb_set_fw_version(adapter);
++out:
+       kfree(eeprom_buff);
+       return ret_val;
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/igc-clean-the-tx-buffer-and-tx-descriptor-ring.patch b/queue-6.3/igc-clean-the-tx-buffer-and-tx-descriptor-ring.patch
new file mode 100644 (file)
index 0000000..dd97cc2
--- /dev/null
@@ -0,0 +1,160 @@
+From 57e418079c041eaf9eddae30e384ff32afe361af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 May 2023 23:49:36 +0800
+Subject: igc: Clean the TX buffer and TX descriptor ring
+
+From: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+
+[ Upstream commit e43516f5978d11d36511ce63d31d1da4db916510 ]
+
+There could be a race condition during link down where interrupt
+being generated and igc_clean_tx_irq() been called to perform the
+TX completion. Properly clear the TX buffer/descriptor ring and
+disable the TX Queue ring in igc_free_tx_resources() to avoid that.
+
+Kernel trace:
+[  108.237177] Hardware name: Intel Corporation Tiger Lake Client Platform/TigerLake U DDR4 SODIMM RVP, BIOS TGLIFUI1.R00.4204.A00.2105270302 05/27/2021
+[  108.237178] RIP: 0010:refcount_warn_saturate+0x55/0x110
+[  108.242143] RSP: 0018:ffff9e7980003db0 EFLAGS: 00010286
+[  108.245555] Code: 84 bc 00 00 00 c3 cc cc cc cc 85 f6 74 46 80 3d 20 8c 4d 01 00 75 ee 48 c7 c7 88 f4 03 ab c6 05 10 8c 4d 01 01 e8 0b 10 96 ff <0f> 0b c3 cc cc cc cc 80 3d fc 8b 4d 01 00 75 cb 48 c7 c7 b0 f4 03
+[  108.250434]
+[  108.250434] RSP: 0018:ffff9e798125f910 EFLAGS: 00010286
+[  108.254358] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
+[  108.259325]
+[  108.259325] RAX: 0000000000000000 RBX: ffff8ddb935b8000 RCX: 0000000000000027
+[  108.261868] RDX: ffff8de250a28800 RSI: ffff8de250a1c580 RDI: ffff8de250a1c580
+[  108.265538] RDX: 0000000000000027 RSI: 0000000000000002 RDI: ffff8de250a9c588
+[  108.265539] RBP: ffff8ddb935b8000 R08: ffffffffab2655a0 R09: ffff9e798125f898
+[  108.267914] RBP: ffff8ddb8a5b8d80 R08: 0000005648eba354 R09: 0000000000000000
+[  108.270196] R10: 0000000000000001 R11: 000000002d2d2d2d R12: ffff9e798125f948
+[  108.270197] R13: ffff9e798125fa1c R14: ffff8ddb8a5b8d80 R15: 7fffffffffffffff
+[  108.273001] R10: 000000002d2d2d2d R11: 000000002d2d2d2d R12: ffff8ddb8a5b8ed4
+[  108.276410] FS:  00007f605851b740(0000) GS:ffff8de250a80000(0000) knlGS:0000000000000000
+[  108.280597] R13: 00000000000002ac R14: 00000000ffffff99 R15: ffff8ddb92561b80
+[  108.282966] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[  108.282967] CR2: 00007f053c039248 CR3: 0000000185850003 CR4: 0000000000f70ee0
+[  108.286206] FS:  0000000000000000(0000) GS:ffff8de250a00000(0000) knlGS:0000000000000000
+[  108.289701] PKRU: 55555554
+[  108.289702] Call Trace:
+[  108.289704]  <TASK>
+[  108.293977] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[  108.297562]  sock_alloc_send_pskb+0x20c/0x240
+[  108.301494] CR2: 00007f053c03a168 CR3: 0000000184394002 CR4: 0000000000f70ef0
+[  108.301495] PKRU: 55555554
+[  108.306464]  __ip_append_data.isra.0+0x96f/0x1040
+[  108.309441] Call Trace:
+[  108.309443]  ? __pfx_ip_generic_getfrag+0x10/0x10
+[  108.314927]  <IRQ>
+[  108.314928]  sock_wfree+0x1c7/0x1d0
+[  108.318078]  ? __pfx_ip_generic_getfrag+0x10/0x10
+[  108.320276]  skb_release_head_state+0x32/0x90
+[  108.324812]  ip_make_skb+0xf6/0x130
+[  108.327188]  skb_release_all+0x16/0x40
+[  108.330775]  ? udp_sendmsg+0x9f3/0xcb0
+[  108.332626]  napi_consume_skb+0x48/0xf0
+[  108.334134]  ? xfrm_lookup_route+0x23/0xb0
+[  108.344285]  igc_poll+0x787/0x1620 [igc]
+[  108.346659]  udp_sendmsg+0x9f3/0xcb0
+[  108.360010]  ? ttwu_do_activate+0x40/0x220
+[  108.365237]  ? __pfx_ip_generic_getfrag+0x10/0x10
+[  108.366744]  ? try_to_wake_up+0x289/0x5e0
+[  108.376987]  ? sock_sendmsg+0x81/0x90
+[  108.395698]  ? __pfx_process_timeout+0x10/0x10
+[  108.395701]  sock_sendmsg+0x81/0x90
+[  108.409052]  __napi_poll+0x29/0x1c0
+[  108.414279]  ____sys_sendmsg+0x284/0x310
+[  108.419507]  net_rx_action+0x257/0x2d0
+[  108.438216]  ___sys_sendmsg+0x7c/0xc0
+[  108.439723]  __do_softirq+0xc1/0x2a8
+[  108.444950]  ? finish_task_switch+0xb4/0x2f0
+[  108.452077]  irq_exit_rcu+0xa9/0xd0
+[  108.453584]  ? __schedule+0x372/0xd00
+[  108.460713]  common_interrupt+0x84/0xa0
+[  108.467840]  ? clockevents_program_event+0x95/0x100
+[  108.474968]  </IRQ>
+[  108.482096]  ? do_nanosleep+0x88/0x130
+[  108.489224]  <TASK>
+[  108.489225]  asm_common_interrupt+0x26/0x40
+[  108.496353]  ? __rseq_handle_notify_resume+0xa9/0x4f0
+[  108.503478] RIP: 0010:cpu_idle_poll+0x2c/0x100
+[  108.510607]  __sys_sendmsg+0x5d/0xb0
+[  108.518687] Code: 05 e1 d9 c8 00 65 8b 15 de 64 85 55 85 c0 7f 57 e8 b9 ef ff ff fb 65 48 8b 1c 25 00 cc 02 00 48 8b 03 a8 08 74 0b eb 1c f3 90 <48> 8b 03 a8 08 75 13 8b 05 77 63 cd 00 85 c0 75 ed e8 ce ec ff ff
+[  108.525817]  do_syscall_64+0x44/0xa0
+[  108.531563] RSP: 0018:ffffffffab203e70 EFLAGS: 00000202
+[  108.538693]  entry_SYSCALL_64_after_hwframe+0x72/0xdc
+[  108.546775]
+[  108.546777] RIP: 0033:0x7f605862b7f7
+[  108.549495] RAX: 0000000000000001 RBX: ffffffffab20c940 RCX: 000000000000003b
+[  108.551955] Code: 0e 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b9 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10
+[  108.554068] RDX: 4000000000000000 RSI: 000000002da97f6a RDI: 00000000002b8ff4
+[  108.559816] RSP: 002b:00007ffc99264058 EFLAGS: 00000246
+[  108.564178] RBP: 0000000000000000 R08: 00000000002b8ff4 R09: ffff8ddb01554c80
+[  108.571302]  ORIG_RAX: 000000000000002e
+[  108.571303] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f605862b7f7
+[  108.574023] R10: 000000000000015b R11: 000000000000000f R12: ffffffffab20c940
+[  108.574024] R13: 0000000000000000 R14: ffff8de26fbeef40 R15: ffffffffab20c940
+[  108.578727] RDX: 0000000000000000 RSI: 00007ffc992640a0 RDI: 0000000000000003
+[  108.578728] RBP: 00007ffc99264110 R08: 0000000000000000 R09: 175f48ad1c3a9c00
+[  108.581187]  do_idle+0x62/0x230
+[  108.585890] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc992642d8
+[  108.585891] R13: 00005577814ab2ba R14: 00005577814addf0 R15: 00007f605876d000
+[  108.587920]  cpu_startup_entry+0x1d/0x20
+[  108.591422]  </TASK>
+[  108.596127]  rest_init+0xc5/0xd0
+[  108.600490] ---[ end trace 0000000000000000 ]---
+
+Test Setup:
+
+DUT:
+- Change mac address on DUT Side. Ensure NIC not having same MAC Address
+- Running udp_tai on DUT side. Let udp_tai running throughout the test
+
+Example:
+./udp_tai -i enp170s0 -P 100000 -p 90 -c 1 -t 0 -u 30004
+
+Host:
+- Perform link up/down every 5 second.
+
+Result:
+Kernel panic will happen on DUT Side.
+
+Fixes: 13b5b7fd6a4a ("igc: Add support for Tx/Rx rings")
+Signed-off-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index a2d823e646095..f44d9b8414567 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -255,6 +255,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
+       /* reset BQL for queue */
+       netdev_tx_reset_queue(txring_txq(tx_ring));
++      /* Zero out the buffer ring */
++      memset(tx_ring->tx_buffer_info, 0,
++             sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
++
++      /* Zero out the descriptor ring */
++      memset(tx_ring->desc, 0, tx_ring->size);
++
+       /* reset next_to_use and next_to_clean */
+       tx_ring->next_to_use = 0;
+       tx_ring->next_to_clean = 0;
+@@ -268,7 +275,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
+  */
+ void igc_free_tx_resources(struct igc_ring *tx_ring)
+ {
+-      igc_clean_tx_ring(tx_ring);
++      igc_disable_tx_ring(tx_ring);
+       vfree(tx_ring->tx_buffer_info);
+       tx_ring->tx_buffer_info = NULL;
+-- 
+2.39.2
+
diff --git a/queue-6.3/igc-fix-possible-system-crash-when-loading-module.patch b/queue-6.3/igc-fix-possible-system-crash-when-loading-module.patch
new file mode 100644 (file)
index 0000000..48b888c
--- /dev/null
@@ -0,0 +1,48 @@
+From 18777da07f7267c0f010c0405260c286461d30a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Apr 2023 15:18:39 -0700
+Subject: igc: Fix possible system crash when loading module
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit c080fe262f9e73a00934b70c16b1479cf40cd2bd ]
+
+Guarantee that when probe() is run again, PTM and PCI busmaster will be
+in the same state as it was if the driver was never loaded.
+
+Avoid an i225/i226 hardware issue that PTM requests can be made even
+though PCI bus mastering is not enabled. These unexpected PTM requests
+can crash some systems.
+
+So, "force" disable PTM and busmastering before removing the driver,
+so they can be re-enabled in the right order during probe(). This is
+more like a workaround and should be applicable for i225 and i226, in
+any platform.
+
+Fixes: 1b5d73fb8624 ("igc: Enable PCIe PTM")
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Reviewed-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index f44d9b8414567..b35f5ff3536e5 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6722,6 +6722,9 @@ static void igc_remove(struct pci_dev *pdev)
+       igc_ptp_stop(adapter);
++      pci_disable_ptm(pdev);
++      pci_clear_master(pdev);
++
+       set_bit(__IGC_DOWN, &adapter->state);
+       del_timer_sync(&adapter->watchdog_timer);
+-- 
+2.39.2
+
diff --git a/queue-6.3/ipvlan-fix-bound-dev-checking-for-ipv6-l3s-mode.patch b/queue-6.3/ipvlan-fix-bound-dev-checking-for-ipv6-l3s-mode.patch
new file mode 100644 (file)
index 0000000..963cdb1
--- /dev/null
@@ -0,0 +1,50 @@
+From dc9d7db01a36033a01a82c8bf575444f81d4c195 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 17:15:02 +0800
+Subject: ipvlan: fix bound dev checking for IPv6 l3s mode
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit ce57adc222aba32431c42632b396e9213d0eb0b8 ]
+
+The commit 59a0b022aa24 ("ipvlan: Make skb->skb_iif track skb->dev for l3s
+mode") fixed ipvlan bonded dev checking by updating skb skb_iif. This fix
+works for IPv4, as in raw_v4_input() the dif is from inet_iif(skb), which
+is skb->skb_iif when there is no route.
+
+But for IPv6, the fix is not enough, because in ipv6_raw_deliver() ->
+raw_v6_match(), the dif is inet6_iif(skb), which is returns IP6CB(skb)->iif
+instead of skb->skb_iif if it's not a l3_slave. To fix the IPv6 part
+issue. Let's set IP6CB(skb)->iif to correct ifindex.
+
+BTW, ipvlan handles NS/NA specifically. Since it works fine, I will not
+reset IP6CB(skb)->iif when addr->atype is IPVL_ICMPV6.
+
+Fixes: c675e06a98a4 ("ipvlan: decouple l3s mode dependencies from other modes")
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=2196710
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_l3s.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
+index 71712ea25403d..d5b05e8032199 100644
+--- a/drivers/net/ipvlan/ipvlan_l3s.c
++++ b/drivers/net/ipvlan/ipvlan_l3s.c
+@@ -102,6 +102,10 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+       skb->dev = addr->master->dev;
+       skb->skb_iif = skb->dev->ifindex;
++#if IS_ENABLED(CONFIG_IPV6)
++      if (addr->atype == IPVL_IPV6)
++              IP6CB(skb)->iif = skb->dev->ifindex;
++#endif
+       len = skb->len + ETH_HLEN;
+       ipvlan_count_rx(addr->master, len, true, false);
+ out:
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-dsa-felix-fix-taprio-guard-band-overflow-at-10mb.patch b/queue-6.3/net-dsa-felix-fix-taprio-guard-band-overflow-at-10mb.patch
new file mode 100644 (file)
index 0000000..d05967e
--- /dev/null
@@ -0,0 +1,52 @@
+From 2b8e219ea5f5ec4f958a903e478cd59ae5f1ee64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jun 2023 20:09:07 +0300
+Subject: net: dsa: felix: fix taprio guard band overflow at 10Mbps with jumbo
+ frames
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 6ac7a27a8b07588497ed53dfd885df9c72bc67e0 ]
+
+The DEV_MAC_MAXLEN_CFG register contains a 16-bit value - up to 65535.
+Plus 2 * VLAN_HLEN (4), that is up to 65543.
+
+The picos_per_byte variable is the largest when "speed" is lowest -
+SPEED_10 = 10. In that case it is (1000000L * 8) / 10 = 800000.
+
+Their product - 52434400000 - exceeds 32 bits, which is a problem,
+because apparently, a multiplication between two 32-bit factors is
+evaluated as 32-bit before being assigned to a 64-bit variable.
+In fact it's a problem for any MTU value larger than 5368.
+
+Cast one of the factors of the multiplication to u64 to force the
+multiplication to take place on 64 bits.
+
+Issue found by Coverity.
+
+Fixes: 55a515b1f5a9 ("net: dsa: felix: drop oversized frames with tc-taprio instead of hanging the port")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230613170907.2413559-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/ocelot/felix_vsc9959.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index dddb28984bdfc..841c5ebc1afaa 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1263,7 +1263,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+       /* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
+        * 4 octets FCS, 12 octets IFG.
+        */
+-      needed_bit_time_ps = (maxlen + 24) * picos_per_byte;
++      needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte;
+       dev_dbg(ocelot->dev,
+               "port %d: max frame size %d needs %llu ps at speed %d\n",
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-enetc-correct-the-indexes-of-highest-and-2nd-hig.patch b/queue-6.3/net-enetc-correct-the-indexes-of-highest-and-2nd-hig.patch
new file mode 100644 (file)
index 0000000..dae9cee
--- /dev/null
@@ -0,0 +1,62 @@
+From 5be1c073de28d73b4897128fd5021e71803ee184 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 17:10:48 +0800
+Subject: net: enetc: correct the indexes of highest and 2nd highest TCs
+
+From: Wei Fang <wei.fang@nxp.com>
+
+[ Upstream commit 21225873be1472b7c59ed3650396af0e40578112 ]
+
+For ENETC hardware, the TCs are numbered from 0 to N-1, where N
+is the number of TCs. Numerically higher TC has higher priority.
+It's obvious that the highest priority TC index should be N-1 and
+the 2nd highest priority TC index should be N-2.
+
+However, the previous logic uses netdev_get_prio_tc_map() to get
+the indexes of highest priority and 2nd highest priority TCs, it
+does not make sense and is incorrect to give a "tc" argument to
+netdev_get_prio_tc_map(). So the driver may get the wrong indexes
+of the two highest priotiry TCs which would lead to failed to set
+the CBS for the two highest priotiry TCs.
+
+e.g.
+$ tc qdisc add dev eno0 parent root handle 100: mqprio num_tc 6 \
+       map 0 0 1 1 2 3 4 5 queues 1@0 1@1 1@2 1@3 2@4 2@6 hw 1
+$ tc qdisc replace dev eno0 parent 100:6 cbs idleslope 100000 \
+       sendslope -900000 hicredit 12 locredit -113 offload 1
+$ Error: Specified device failed to setup cbs hardware offload.
+  ^^^^^
+
+In this example, the previous logic deems the indexes of the two
+highest priotiry TCs should be 3 and 2. Actually, the indexes are
+5 and 4, because the number of TCs is 6. So it would be failed to
+configure the CBS for the two highest priority TCs.
+
+Fixes: c431047c4efe ("enetc: add support Credit Based Shaper(CBS) for hardware offload")
+Signed-off-by: Wei Fang <wei.fang@nxp.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc_qos.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index 83c27bbbc6edf..126007ab70f61 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -181,8 +181,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
+       int bw_sum = 0;
+       u8 bw;
+-      prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
+-      prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
++      prio_top = tc_nums - 1;
++      prio_next = tc_nums - 2;
+       /* Support highest prio and second prio tc in cbs mode */
+       if (tc != prio_top && tc != prio_next)
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-ethernet-ti-am65-cpsw-call-of_node_put-on-error-.patch b/queue-6.3/net-ethernet-ti-am65-cpsw-call-of_node_put-on-error-.patch
new file mode 100644 (file)
index 0000000..5137615
--- /dev/null
@@ -0,0 +1,38 @@
+From 754b3488fd26642cbc7f13f1e1656c4c91dcca04 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 10:18:50 +0300
+Subject: net: ethernet: ti: am65-cpsw: Call of_node_put() on error path
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 374283a1001277e4d07491387aac1fad5aa08d43 ]
+
+This code returns directly but it should instead call of_node_put()
+to drop some reference counts.
+
+Fixes: dab2b265dd23 ("net: ethernet: ti: am65-cpsw: Add support for SERDES configuration")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Roger Quadros <rogerq@kernel.org>
+Link: https://lore.kernel.org/r/e3012f0c-1621-40e6-bf7d-03c276f6e07f@kili.mountain
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index bcea87b7151c0..ad69c5d5761b3 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2031,7 +2031,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
+               /* Initialize the Serdes PHY for the port */
+               ret = am65_cpsw_init_serdes_phy(dev, port_np, port);
+               if (ret)
+-                      return ret;
++                      goto of_node_put;
+               port->slave.mac_only =
+                               of_property_read_bool(port_np, "ti,mac-only");
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-ethtool-correct-max-attribute-value-for-stats.patch b/queue-6.3/net-ethtool-correct-max-attribute-value-for-stats.patch
new file mode 100644 (file)
index 0000000..56407dd
--- /dev/null
@@ -0,0 +1,41 @@
+From 8cf9a16024e8ba590a6c127e4b67905930518a33 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 09:23:44 -0700
+Subject: net: ethtool: correct MAX attribute value for stats
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 52f79609c0c5b25fddb88e85f25ce08aa7e3fb42 ]
+
+When compiling YNL generated code compiler complains about
+array-initializer-out-of-bounds. Turns out the MAX value
+for STATS_GRP uses the value for STATS.
+
+This may lead to random corruptions in user space (kernel
+itself doesn't use this value as it never parses stats).
+
+Fixes: f09ea6fb1272 ("ethtool: add a new command for reading standard stats")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/ethtool_netlink.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
+index d39ce21381c5b..b8171e1ffd327 100644
+--- a/include/uapi/linux/ethtool_netlink.h
++++ b/include/uapi/linux/ethtool_netlink.h
+@@ -781,7 +781,7 @@ enum {
+       /* add new constants above here */
+       __ETHTOOL_A_STATS_GRP_CNT,
+-      ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_CNT - 1)
++      ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_GRP_CNT - 1)
+ };
+ enum {
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-lapbether-only-support-ethernet-devices.patch b/queue-6.3/net-lapbether-only-support-ethernet-devices.patch
new file mode 100644 (file)
index 0000000..7aeefdc
--- /dev/null
@@ -0,0 +1,96 @@
+From 1fe77cf6af597406ad874858ce20f2ea53b5bd9f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 16:18:02 +0000
+Subject: net: lapbether: only support ethernet devices
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 9eed321cde22fc1afd76eac563ce19d899e0d6b2 ]
+
+It probbaly makes no sense to support arbitrary network devices
+for lapbether.
+
+syzbot reported:
+
+skbuff: skb_under_panic: text:ffff80008934c100 len:44 put:40 head:ffff0000d18dd200 data:ffff0000d18dd1ea tail:0x16 end:0x140 dev:bond1
+kernel BUG at net/core/skbuff.c:200 !
+Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP
+Modules linked in:
+CPU: 0 PID: 5643 Comm: dhcpcd Not tainted 6.4.0-rc5-syzkaller-g4641cff8e810 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/25/2023
+pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : skb_panic net/core/skbuff.c:196 [inline]
+pc : skb_under_panic+0x13c/0x140 net/core/skbuff.c:210
+lr : skb_panic net/core/skbuff.c:196 [inline]
+lr : skb_under_panic+0x13c/0x140 net/core/skbuff.c:210
+sp : ffff8000973b7260
+x29: ffff8000973b7270 x28: ffff8000973b7360 x27: dfff800000000000
+x26: ffff0000d85d8150 x25: 0000000000000016 x24: ffff0000d18dd1ea
+x23: ffff0000d18dd200 x22: 000000000000002c x21: 0000000000000140
+x20: 0000000000000028 x19: ffff80008934c100 x18: ffff8000973b68a0
+x17: 0000000000000000 x16: ffff80008a43bfbc x15: 0000000000000202
+x14: 0000000000000000 x13: 0000000000000001 x12: 0000000000000001
+x11: 0000000000000201 x10: 0000000000000000 x9 : f22f7eb937cced00
+x8 : f22f7eb937cced00 x7 : 0000000000000001 x6 : 0000000000000001
+x5 : ffff8000973b6b78 x4 : ffff80008df9ee80 x3 : ffff8000805974f4
+x2 : 0000000000000001 x1 : 0000000100000201 x0 : 0000000000000086
+Call trace:
+skb_panic net/core/skbuff.c:196 [inline]
+skb_under_panic+0x13c/0x140 net/core/skbuff.c:210
+skb_push+0xf0/0x108 net/core/skbuff.c:2409
+ip6gre_header+0xbc/0x738 net/ipv6/ip6_gre.c:1383
+dev_hard_header include/linux/netdevice.h:3137 [inline]
+lapbeth_data_transmit+0x1c4/0x298 drivers/net/wan/lapbether.c:257
+lapb_data_transmit+0x8c/0xb0 net/lapb/lapb_iface.c:447
+lapb_transmit_buffer+0x178/0x204 net/lapb/lapb_out.c:149
+lapb_send_control+0x220/0x320 net/lapb/lapb_subr.c:251
+lapb_establish_data_link+0x94/0xec
+lapb_device_event+0x348/0x4e0
+notifier_call_chain+0x1a4/0x510 kernel/notifier.c:93
+raw_notifier_call_chain+0x3c/0x50 kernel/notifier.c:461
+__dev_notify_flags+0x2bc/0x544
+dev_change_flags+0xd0/0x15c net/core/dev.c:8643
+devinet_ioctl+0x858/0x17e4 net/ipv4/devinet.c:1150
+inet_ioctl+0x2ac/0x4d8 net/ipv4/af_inet.c:979
+sock_do_ioctl+0x134/0x2dc net/socket.c:1201
+sock_ioctl+0x4ec/0x858 net/socket.c:1318
+vfs_ioctl fs/ioctl.c:51 [inline]
+__do_sys_ioctl fs/ioctl.c:870 [inline]
+__se_sys_ioctl fs/ioctl.c:856 [inline]
+__arm64_sys_ioctl+0x14c/0x1c8 fs/ioctl.c:856
+__invoke_syscall arch/arm64/kernel/syscall.c:38 [inline]
+invoke_syscall+0x98/0x2c0 arch/arm64/kernel/syscall.c:52
+el0_svc_common+0x138/0x244 arch/arm64/kernel/syscall.c:142
+do_el0_svc+0x64/0x198 arch/arm64/kernel/syscall.c:191
+el0_svc+0x4c/0x160 arch/arm64/kernel/entry-common.c:647
+el0t_64_sync_handler+0x84/0xfc arch/arm64/kernel/entry-common.c:665
+el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:591
+Code: aa1803e6 aa1903e7 a90023f5 947730f5 (d4210000)
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Martin Schiller <ms@dev.tdt.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wan/lapbether.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
+index d62a904d2e422..56326f38fe8a3 100644
+--- a/drivers/net/wan/lapbether.c
++++ b/drivers/net/wan/lapbether.c
+@@ -384,6 +384,9 @@ static int lapbeth_new_device(struct net_device *dev)
+       ASSERT_RTNL();
++      if (dev->type != ARPHRD_ETHER)
++              return -EINVAL;
++
+       ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN,
+                           lapbeth_setup);
+       if (!ndev)
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-macsec-fix-double-free-of-percpu-stats.patch b/queue-6.3/net-macsec-fix-double-free-of-percpu-stats.patch
new file mode 100644 (file)
index 0000000..1664e84
--- /dev/null
@@ -0,0 +1,63 @@
+From 9ae6ab7fb80b3ceeb5746656a45e907e65773009 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jun 2023 22:22:20 +0300
+Subject: net: macsec: fix double free of percpu stats
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit 0c0cf3db83f8c7c9bb141c2771a34043bcf952ef ]
+
+Inside macsec_add_dev() we free percpu macsec->secy.tx_sc.stats and
+macsec->stats on some of the memory allocation failure paths. However, the
+net_device is already registered to that moment: in macsec_newlink(), just
+before calling macsec_add_dev(). This means that during unregister process
+its priv_destructor - macsec_free_netdev() - will be called and will free
+the stats again.
+
+Remove freeing percpu stats inside macsec_add_dev() because
+macsec_free_netdev() will correctly free the already allocated ones. The
+pointers to unallocated stats stay NULL, and free_percpu() treats that
+correctly.
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Fixes: 0a28bfd4971f ("net/macsec: Add MACsec skb_metadata_dst Tx Data path support")
+Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/macsec.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 25616247d7a56..e040c191659b9 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3987,17 +3987,15 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
+               return -ENOMEM;
+       secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
+-      if (!secy->tx_sc.stats) {
+-              free_percpu(macsec->stats);
++      if (!secy->tx_sc.stats)
+               return -ENOMEM;
+-      }
+       secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
+-      if (!secy->tx_sc.md_dst) {
+-              free_percpu(secy->tx_sc.stats);
+-              free_percpu(macsec->stats);
++      if (!secy->tx_sc.md_dst)
++              /* macsec and secy percpu stats will be freed when unregistering
++               * net_device in macsec_free_netdev()
++               */
+               return -ENOMEM;
+-      }
+       if (sci == MACSEC_UNDEF_SCI)
+               sci = dev_to_sci(dev, MACSEC_PORT_ES);
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-phylink-report-correct-max-speed-for-qusgmii.patch b/queue-6.3/net-phylink-report-correct-max-speed-for-qusgmii.patch
new file mode 100644 (file)
index 0000000..ab87fda
--- /dev/null
@@ -0,0 +1,45 @@
+From e9bdc6651f363cf9be5e5735c377c17c42033c09 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 10:03:04 +0200
+Subject: net: phylink: report correct max speed for QUSGMII
+
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+
+[ Upstream commit b9dc1046edfeb7d9dbc2272c8d9ad5a8c47f3199 ]
+
+Q-USGMII is the quad port version of USGMII, and supports a max speed of
+1Gbps on each line. Make so that phylink_interface_max_speed() reports
+this information correctly.
+
+Fixes: ae0e4bb2a0e0 ("net: phylink: Adjust link settings based on rate matching")
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/phylink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 30c166b334686..65ff118f22314 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -188,6 +188,7 @@ static int phylink_interface_max_speed(phy_interface_t interface)
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
++      case PHY_INTERFACE_MODE_QUSGMII:
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_GMII:
+               return SPEED_1000;
+@@ -204,7 +205,6 @@ static int phylink_interface_max_speed(phy_interface_t interface)
+       case PHY_INTERFACE_MODE_10GBASER:
+       case PHY_INTERFACE_MODE_10GKR:
+       case PHY_INTERFACE_MODE_USXGMII:
+-      case PHY_INTERFACE_MODE_QUSGMII:
+               return SPEED_10000;
+       case PHY_INTERFACE_MODE_25GBASER:
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-phylink-use-a-dedicated-helper-to-parse-usgmii-c.patch b/queue-6.3/net-phylink-use-a-dedicated-helper-to-parse-usgmii-c.patch
new file mode 100644 (file)
index 0000000..7262f20
--- /dev/null
@@ -0,0 +1,86 @@
+From 6f7b5f1ca8a2745f2645cefd483f00b0c47b6c14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 10:03:05 +0200
+Subject: net: phylink: use a dedicated helper to parse usgmii control word
+
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+
+[ Upstream commit 923454c0368b8092e9d05c020f50abca577e7290 ]
+
+Q-USGMII is a derivative of USGMII, that uses a specific formatting for
+the control word. The layout is close to the USXGMII control word, but
+doesn't support speeds over 1Gbps. Use a dedicated decoding logic for
+the USGMII control word, re-using USXGMII definitions but only considering
+10/100/1000Mbps speeds
+
+Fixes: 5e61fe157a27 ("net: phy: Introduce QUSGMII PHY mode")
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/phylink.c | 39 ++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 38 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 65ff118f22314..0598debb93dea 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -3297,6 +3297,41 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state,
+ }
+ EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
++/**
++ * phylink_decode_usgmii_word() - decode the USGMII word from a MAC PCS
++ * @state: a pointer to a struct phylink_link_state.
++ * @lpa: a 16 bit value which stores the USGMII auto-negotiation word
++ *
++ * Helper for MAC PCS supporting the USGMII protocol and the auto-negotiation
++ * code word.  Decode the USGMII code word and populate the corresponding fields
++ * (speed, duplex) into the phylink_link_state structure. The structure for this
++ * word is the same as the USXGMII word, except it only supports speeds up to
++ * 1Gbps.
++ */
++static void phylink_decode_usgmii_word(struct phylink_link_state *state,
++                                     uint16_t lpa)
++{
++      switch (lpa & MDIO_USXGMII_SPD_MASK) {
++      case MDIO_USXGMII_10:
++              state->speed = SPEED_10;
++              break;
++      case MDIO_USXGMII_100:
++              state->speed = SPEED_100;
++              break;
++      case MDIO_USXGMII_1000:
++              state->speed = SPEED_1000;
++              break;
++      default:
++              state->link = false;
++              return;
++      }
++
++      if (lpa & MDIO_USXGMII_FULL_DUPLEX)
++              state->duplex = DUPLEX_FULL;
++      else
++              state->duplex = DUPLEX_HALF;
++}
++
+ /**
+  * phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
+  * @state: a pointer to a &struct phylink_link_state.
+@@ -3333,9 +3368,11 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+-      case PHY_INTERFACE_MODE_QUSGMII:
+               phylink_decode_sgmii_word(state, lpa);
+               break;
++      case PHY_INTERFACE_MODE_QUSGMII:
++              phylink_decode_usgmii_word(state, lpa);
++              break;
+       default:
+               state->link = false;
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-renesas-rswitch-fix-timestamp-feature-after-all-.patch b/queue-6.3/net-renesas-rswitch-fix-timestamp-feature-after-all-.patch
new file mode 100644 (file)
index 0000000..8d0600e
--- /dev/null
@@ -0,0 +1,91 @@
+From 59beb47594998d4a7481745ff90d041ac5d82169 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 10:57:27 +0900
+Subject: net: renesas: rswitch: Fix timestamp feature after all descriptors
+ are used
+
+From: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+[ Upstream commit 0ad4982c520ed87ea7ebfc9381ea1f617ed75364 ]
+
+The timestamp descriptors were intended to act cyclically. Descriptors
+from index 0 through gq->ring_size - 1 contain actual information, and
+the last index (gq->ring_size) should have LINKFIX to indicate
+the first index 0 descriptor. However, the LINKFIX value is missing,
+causing the timestamp feature to stop after all descriptors are used.
+To resolve this issue, set the LINKFIX to the timestamp descritors.
+
+Reported-by: Phong Hoang <phong.hoang.wz@renesas.com>
+Fixes: 33f5d733b589 ("net: renesas: rswitch: Improve TX timestamp accuracy")
+Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/rswitch.c | 36 ++++++++++++++++----------
+ 1 file changed, 22 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 7855d9ef81eb1..66bce799471c1 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -347,17 +347,6 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
+       return -ENOMEM;
+ }
+-static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
+-{
+-      struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+-
+-      gq->ring_size = TS_RING_SIZE;
+-      gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
+-                                       sizeof(struct rswitch_ts_desc) *
+-                                       (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+-      return !gq->ts_ring ? -ENOMEM : 0;
+-}
+-
+ static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
+ {
+       desc->dptrl = cpu_to_le32(lower_32_bits(addr));
+@@ -533,6 +522,28 @@ static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
+       gwca->linkfix_table = NULL;
+ }
++static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
++{
++      struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
++      struct rswitch_ts_desc *desc;
++
++      gq->ring_size = TS_RING_SIZE;
++      gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
++                                       sizeof(struct rswitch_ts_desc) *
++                                       (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
++
++      if (!gq->ts_ring)
++              return -ENOMEM;
++
++      rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
++      desc = &gq->ts_ring[gq->ring_size];
++      desc->desc.die_dt = DT_LINKFIX;
++      rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
++      INIT_LIST_HEAD(&priv->gwca.ts_info_list);
++
++      return 0;
++}
++
+ static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
+ {
+       struct rswitch_gwca_queue *gq;
+@@ -1782,9 +1793,6 @@ static int rswitch_init(struct rswitch_private *priv)
+       if (err < 0)
+               goto err_ts_queue_alloc;
+-      rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
+-      INIT_LIST_HEAD(&priv->gwca.ts_info_list);
+-
+       for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+               err = rswitch_device_alloc(priv, i);
+               if (err < 0) {
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-sched-act_ct-fix-promotion-of-offloaded-unreplie.patch b/queue-6.3/net-sched-act_ct-fix-promotion-of-offloaded-unreplie.patch
new file mode 100644 (file)
index 0000000..70449df
--- /dev/null
@@ -0,0 +1,157 @@
+From a6b98969c96c3a3818a111792e3df36ed0914752 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 15:22:59 +0300
+Subject: net/sched: act_ct: Fix promotion of offloaded unreplied tuple
+
+From: Paul Blakey <paulb@nvidia.com>
+
+[ Upstream commit 41f2c7c342d3adb1c4dd5f2e3dd831adff16a669 ]
+
+Currently UNREPLIED and UNASSURED connections are added to the nf flow
+table. This causes the following connection packets to be processed
+by the flow table which then skips conntrack_in(), and thus such the
+connections will remain UNREPLIED and UNASSURED even if reply traffic
+is then seen. Even still, the unoffloaded reply packets are the ones
+triggering hardware update from new to established state, and if
+there aren't any to triger an update and/or previous update was
+missed, hardware can get out of sync with sw and still mark
+packets as new.
+
+Fix the above by:
+1) Not skipping conntrack_in() for UNASSURED packets, but still
+   refresh for hardware, as before the cited patch.
+2) Try and force a refresh by reply-direction packets that update
+   the hardware rules from new to established state.
+3) Remove any bidirectional flows that didn't failed to update in
+   hardware for re-insertion as bidrectional once any new packet
+   arrives.
+
+Fixes: 6a9bad0069cf ("net/sched: act_ct: offload UDP NEW connections")
+Co-developed-by: Vlad Buslov <vladbu@nvidia.com>
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Signed-off-by: Paul Blakey <paulb@nvidia.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Link: https://lore.kernel.org/r/1686313379-117663-1-git-send-email-paulb@nvidia.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_flow_table.h |  2 +-
+ net/netfilter/nf_flow_table_core.c    | 13 ++++++++++---
+ net/netfilter/nf_flow_table_ip.c      |  4 ++--
+ net/sched/act_ct.c                    |  9 ++++++++-
+ 4 files changed, 21 insertions(+), 7 deletions(-)
+
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index ebb28ec5b6faf..f37f9f34430c1 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -268,7 +268,7 @@ int flow_offload_route_init(struct flow_offload *flow,
+ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+ void flow_offload_refresh(struct nf_flowtable *flow_table,
+-                        struct flow_offload *flow);
++                        struct flow_offload *flow, bool force);
+ struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
+                                                    struct flow_offload_tuple *tuple);
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 04bd0ed4d2ae7..b0ef48b21dcb4 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -317,12 +317,12 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
+ EXPORT_SYMBOL_GPL(flow_offload_add);
+ void flow_offload_refresh(struct nf_flowtable *flow_table,
+-                        struct flow_offload *flow)
++                        struct flow_offload *flow, bool force)
+ {
+       u32 timeout;
+       timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+-      if (timeout - READ_ONCE(flow->timeout) > HZ)
++      if (force || timeout - READ_ONCE(flow->timeout) > HZ)
+               WRITE_ONCE(flow->timeout, timeout);
+       else
+               return;
+@@ -334,6 +334,12 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_refresh);
++static bool nf_flow_is_outdated(const struct flow_offload *flow)
++{
++      return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
++              !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
++}
++
+ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
+ {
+       return nf_flow_timeout_delta(flow->timeout) <= 0;
+@@ -423,7 +429,8 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
+                                   struct flow_offload *flow, void *data)
+ {
+       if (nf_flow_has_expired(flow) ||
+-          nf_ct_is_dying(flow->ct))
++          nf_ct_is_dying(flow->ct) ||
++          nf_flow_is_outdated(flow))
+               flow_offload_teardown(flow);
+       if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index 19efba1e51ef9..3bbaf9c7ea46a 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -384,7 +384,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+       if (skb_try_make_writable(skb, thoff + hdrsize))
+               return NF_DROP;
+-      flow_offload_refresh(flow_table, flow);
++      flow_offload_refresh(flow_table, flow, false);
+       nf_flow_encap_pop(skb, tuplehash);
+       thoff -= offset;
+@@ -650,7 +650,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+       if (skb_try_make_writable(skb, thoff + hdrsize))
+               return NF_DROP;
+-      flow_offload_refresh(flow_table, flow);
++      flow_offload_refresh(flow_table, flow, false);
+       nf_flow_encap_pop(skb, tuplehash);
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 9cc0bc7c71ed7..abc71a06d634a 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -610,6 +610,7 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+       struct flow_offload_tuple tuple = {};
+       enum ip_conntrack_info ctinfo;
+       struct tcphdr *tcph = NULL;
++      bool force_refresh = false;
+       struct flow_offload *flow;
+       struct nf_conn *ct;
+       u8 dir;
+@@ -647,6 +648,7 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+                        * established state, then don't refresh.
+                        */
+                       return false;
++              force_refresh = true;
+       }
+       if (tcph && (unlikely(tcph->fin || tcph->rst))) {
+@@ -660,7 +662,12 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+       else
+               ctinfo = IP_CT_ESTABLISHED_REPLY;
+-      flow_offload_refresh(nf_ft, flow);
++      flow_offload_refresh(nf_ft, flow, force_refresh);
++      if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
++              /* Process this flow in SW to allow promoting to ASSURED */
++              return false;
++      }
++
+       nf_conntrack_get(&ct->ct_general);
+       nf_ct_set(skb, ct, ctinfo);
+       if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-sched-act_pedit-parse-l3-header-for-l4-offset.patch b/queue-6.3/net-sched-act_pedit-parse-l3-header-for-l4-offset.patch
new file mode 100644 (file)
index 0000000..a13bdab
--- /dev/null
@@ -0,0 +1,140 @@
+From eba5fc1c4484b3851c69fdde4dfb28bb109beff5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 12:23:54 -0400
+Subject: net/sched: act_pedit: Parse L3 Header for L4 offset
+
+From: Max Tottenham <mtottenh@akamai.com>
+
+[ Upstream commit 6c02568fd1ae53099b4ab86365c5be1ff15f586b ]
+
+Instead of relying on skb->transport_header being set correctly, opt
+instead to parse the L3 header length out of the L3 headers for both
+IPv4/IPv6 when the Extended Layer Op for tcp/udp is used. This fixes a
+bug if GRO is disabled, when GRO is disabled skb->transport_header is
+set by __netif_receive_skb_core() to point to the L3 header, it's later
+fixed by the upper protocol layers, but act_pedit will receive the SKB
+before the fixups are completed. The existing behavior causes the
+following to edit the L3 header if GRO is disabled instead of the UDP
+header:
+
+    tc filter add dev eth0 ingress protocol ip flower ip_proto udp \
+ dst_ip 192.168.1.3 action pedit ex munge udp set dport 18053
+
+Also re-introduce a rate-limited warning if we were unable to extract
+the header offset when using the 'ex' interface.
+
+Fixes: 71d0ed7079df ("net/act_pedit: Support using offset relative to
+the conventional network headers")
+Signed-off-by: Max Tottenham <mtottenh@akamai.com>
+Reviewed-by: Josh Hunt <johunt@akamai.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202305261541.N165u9TZ-lkp@intel.com/
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_pedit.c | 48 ++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 43 insertions(+), 5 deletions(-)
+
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 1e7e959b90e48..300b5ae760dcc 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -13,7 +13,10 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
+ #include <linux/slab.h>
++#include <net/ipv6.h>
+ #include <net/netlink.h>
+ #include <net/pkt_sched.h>
+ #include <linux/tc_act/tc_pedit.h>
+@@ -313,28 +316,58 @@ static bool offset_valid(struct sk_buff *skb, int offset)
+       return true;
+ }
+-static void pedit_skb_hdr_offset(struct sk_buff *skb,
++static int pedit_l4_skb_offset(struct sk_buff *skb, int *hoffset, const int header_type)
++{
++      const int noff = skb_network_offset(skb);
++      int ret = -EINVAL;
++      struct iphdr _iph;
++
++      switch (skb->protocol) {
++      case htons(ETH_P_IP): {
++              const struct iphdr *iph = skb_header_pointer(skb, noff, sizeof(_iph), &_iph);
++
++              if (!iph)
++                      goto out;
++              *hoffset = noff + iph->ihl * 4;
++              ret = 0;
++              break;
++      }
++      case htons(ETH_P_IPV6):
++              ret = ipv6_find_hdr(skb, hoffset, header_type, NULL, NULL) == header_type ? 0 : -EINVAL;
++              break;
++      }
++out:
++      return ret;
++}
++
++static int pedit_skb_hdr_offset(struct sk_buff *skb,
+                                enum pedit_header_type htype, int *hoffset)
+ {
++      int ret = -EINVAL;
+       /* 'htype' is validated in the netlink parsing */
+       switch (htype) {
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+-              if (skb_mac_header_was_set(skb))
++              if (skb_mac_header_was_set(skb)) {
+                       *hoffset = skb_mac_offset(skb);
++                      ret = 0;
++              }
+               break;
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK:
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+               *hoffset = skb_network_offset(skb);
++              ret = 0;
+               break;
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
++              ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_TCP);
++              break;
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+-              if (skb_transport_header_was_set(skb))
+-                      *hoffset = skb_transport_offset(skb);
++              ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_UDP);
+               break;
+       default:
+               break;
+       }
++      return ret;
+ }
+ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
+@@ -370,6 +403,7 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
+               int hoffset = 0;
+               u32 *ptr, hdata;
+               u32 val;
++              int rc;
+               if (tkey_ex) {
+                       htype = tkey_ex->htype;
+@@ -378,7 +412,11 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
+                       tkey_ex++;
+               }
+-              pedit_skb_hdr_offset(skb, htype, &hoffset);
++              rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
++              if (rc) {
++                      pr_info_ratelimited("tc action pedit unable to extract header offset for header type (0x%x)\n", htype);
++                      goto bad;
++              }
+               if (tkey->offmask) {
+                       u8 *d, _d;
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-sched-act_pedit-remove-extra-check-for-key-type.patch b/queue-6.3/net-sched-act_pedit-remove-extra-check-for-key-type.patch
new file mode 100644 (file)
index 0000000..e42e068
--- /dev/null
@@ -0,0 +1,98 @@
+From deb109fa618c4a6832edadc9eee9a6298560af87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Apr 2023 18:25:16 -0300
+Subject: net/sched: act_pedit: remove extra check for key type
+
+From: Pedro Tammela <pctammela@mojatatu.com>
+
+[ Upstream commit 577140180ba28d0d37bc898c7bd6702c83aa106f ]
+
+The netlink parsing already validates the key 'htype'.
+Remove the datapath check as it's redundant.
+
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 6c02568fd1ae ("net/sched: act_pedit: Parse L3 Header for L4 offset")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_pedit.c | 29 +++++++----------------------
+ 1 file changed, 7 insertions(+), 22 deletions(-)
+
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 4559a1507ea5a..1e7e959b90e48 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -313,37 +313,28 @@ static bool offset_valid(struct sk_buff *skb, int offset)
+       return true;
+ }
+-static int pedit_skb_hdr_offset(struct sk_buff *skb,
+-                              enum pedit_header_type htype, int *hoffset)
++static void pedit_skb_hdr_offset(struct sk_buff *skb,
++                               enum pedit_header_type htype, int *hoffset)
+ {
+-      int ret = -EINVAL;
+-
++      /* 'htype' is validated in the netlink parsing */
+       switch (htype) {
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+-              if (skb_mac_header_was_set(skb)) {
++              if (skb_mac_header_was_set(skb))
+                       *hoffset = skb_mac_offset(skb);
+-                      ret = 0;
+-              }
+               break;
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK:
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+               *hoffset = skb_network_offset(skb);
+-              ret = 0;
+               break;
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+-              if (skb_transport_header_was_set(skb)) {
++              if (skb_transport_header_was_set(skb))
+                       *hoffset = skb_transport_offset(skb);
+-                      ret = 0;
+-              }
+               break;
+       default:
+-              ret = -EINVAL;
+               break;
+       }
+-
+-      return ret;
+ }
+ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
+@@ -376,10 +367,9 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
+       for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) {
+               int offset = tkey->off;
++              int hoffset = 0;
+               u32 *ptr, hdata;
+-              int hoffset;
+               u32 val;
+-              int rc;
+               if (tkey_ex) {
+                       htype = tkey_ex->htype;
+@@ -388,12 +378,7 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
+                       tkey_ex++;
+               }
+-              rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
+-              if (rc) {
+-                      pr_info("tc action pedit bad header type specified (0x%x)\n",
+-                              htype);
+-                      goto bad;
+-              }
++              pedit_skb_hdr_offset(skb, htype, &hoffset);
+               if (tkey->offmask) {
+                       u8 *d, _d;
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-sched-cls_api-fix-lockup-on-flushing-explicitly-.patch b/queue-6.3/net-sched-cls_api-fix-lockup-on-flushing-explicitly-.patch
new file mode 100644 (file)
index 0000000..d1cc6c7
--- /dev/null
@@ -0,0 +1,71 @@
+From 27bc6162d71ccfbe9cb7494cb933bd4671195381 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 11:34:26 +0200
+Subject: net/sched: cls_api: Fix lockup on flushing explicitly created chain
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit c9a82bec02c339cdda99b37c5e62b3b71fc4209c ]
+
+Mingshuai Ren reports:
+
+When a new chain is added by using tc, one soft lockup alarm will be
+ generated after delete the prio 0 filter of the chain. To reproduce
+ the problem, perform the following steps:
+(1) tc qdisc add dev eth0 root handle 1: htb default 1
+(2) tc chain add dev eth0
+(3) tc filter del dev eth0 chain 0 parent 1: prio 0
+(4) tc filter add dev eth0 chain 0 parent 1:
+
+Fix the issue by accounting for additional reference to chains that are
+explicitly created by RTM_NEWCHAIN message as opposed to implicitly by
+RTM_NEWTFILTER message.
+
+Fixes: 726d061286ce ("net: sched: prevent insertion of new classifiers during chain flush")
+Reported-by: Mingshuai Ren <renmingshuai@huawei.com>
+Closes: https://lore.kernel.org/lkml/87legswvi3.fsf@nvidia.com/T/
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Link: https://lore.kernel.org/r/20230612093426.2867183-1-vladbu@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_api.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index c877a6343fd47..a193cc7b32418 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -657,8 +657,8 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
+ {
+       struct tcf_block *block = chain->block;
+       const struct tcf_proto_ops *tmplt_ops;
++      unsigned int refcnt, non_act_refcnt;
+       bool free_block = false;
+-      unsigned int refcnt;
+       void *tmplt_priv;
+       mutex_lock(&block->lock);
+@@ -678,13 +678,15 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
+        * save these to temporary variables.
+        */
+       refcnt = --chain->refcnt;
++      non_act_refcnt = refcnt - chain->action_refcnt;
+       tmplt_ops = chain->tmplt_ops;
+       tmplt_priv = chain->tmplt_priv;
+-      /* The last dropped non-action reference will trigger notification. */
+-      if (refcnt - chain->action_refcnt == 0 && !by_act) {
+-              tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
+-                                     block, NULL, 0, 0, false);
++      if (non_act_refcnt == chain->explicitly_created && !by_act) {
++              if (non_act_refcnt == 0)
++                      tc_chain_notify_delete(tmplt_ops, tmplt_priv,
++                                             chain->index, block, NULL, 0, 0,
++                                             false);
+               /* Last reference to chain, no need to lock. */
+               chain->flushing = false;
+       }
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-sched-cls_u32-fix-reference-counter-leak-leading.patch b/queue-6.3/net-sched-cls_u32-fix-reference-counter-leak-leading.patch
new file mode 100644 (file)
index 0000000..a75d817
--- /dev/null
@@ -0,0 +1,78 @@
+From bea5f097748796ca6ba3bb844f16c9a125fd10ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 08:29:03 +0100
+Subject: net/sched: cls_u32: Fix reference counter leak leading to overflow
+
+From: Lee Jones <lee@kernel.org>
+
+[ Upstream commit 04c55383fa5689357bcdd2c8036725a55ed632bc ]
+
+In the event of a failure in tcf_change_indev(), u32_set_parms() will
+immediately return without decrementing the recently incremented
+reference counter.  If this happens enough times, the counter will
+rollover and the reference freed, leading to a double free which can be
+used to do 'bad things'.
+
+In order to prevent this, move the point of possible failure above the
+point where the reference counter is incremented.  Also save any
+meaningful return values to be applied to the return data at the
+appropriate point in time.
+
+This issue was caught with KASAN.
+
+Fixes: 705c7091262d ("net: sched: cls_u32: no need to call tcf_exts_change for newly allocated struct")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Lee Jones <lee@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_u32.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 4e2e269f121f8..d15d50de79802 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -718,13 +718,19 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
+                        struct nlattr *est, u32 flags, u32 fl_flags,
+                        struct netlink_ext_ack *extack)
+ {
+-      int err;
++      int err, ifindex = -1;
+       err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
+                                  fl_flags, extack);
+       if (err < 0)
+               return err;
++      if (tb[TCA_U32_INDEV]) {
++              ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
++              if (ifindex < 0)
++                      return -EINVAL;
++      }
++
+       if (tb[TCA_U32_LINK]) {
+               u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
+               struct tc_u_hnode *ht_down = NULL, *ht_old;
+@@ -759,13 +765,9 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
+               tcf_bind_filter(tp, &n->res, base);
+       }
+-      if (tb[TCA_U32_INDEV]) {
+-              int ret;
+-              ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
+-              if (ret < 0)
+-                      return -EINVAL;
+-              n->ifindex = ret;
+-      }
++      if (ifindex >= 0)
++              n->ifindex = ifindex;
++
+       return 0;
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-sched-qdisc_destroy-old-ingress-and-clsact-qdisc.patch b/queue-6.3/net-sched-qdisc_destroy-old-ingress-and-clsact-qdisc.patch
new file mode 100644 (file)
index 0000000..c6ec305
--- /dev/null
@@ -0,0 +1,246 @@
+From 306013122daf961601cd0bd6e22881e89c5a42be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Jun 2023 20:30:25 -0700
+Subject: net/sched: qdisc_destroy() old ingress and clsact Qdiscs before
+ grafting
+
+From: Peilin Ye <peilin.ye@bytedance.com>
+
+[ Upstream commit 84ad0af0bccd3691cb951c2974c5cb2c10594d4a ]
+
+mini_Qdisc_pair::p_miniq is a double pointer to mini_Qdisc, initialized
+in ingress_init() to point to net_device::miniq_ingress.  ingress Qdiscs
+access this per-net_device pointer in mini_qdisc_pair_swap().  Similar
+for clsact Qdiscs and miniq_egress.
+
+Unfortunately, after introducing RTNL-unlocked RTM_{NEW,DEL,GET}TFILTER
+requests (thanks Hillf Danton for the hint), when replacing ingress or
+clsact Qdiscs, for example, the old Qdisc ("@old") could access the same
+miniq_{in,e}gress pointer(s) concurrently with the new Qdisc ("@new"),
+causing race conditions [1] including a use-after-free bug in
+mini_qdisc_pair_swap() reported by syzbot:
+
+ BUG: KASAN: slab-use-after-free in mini_qdisc_pair_swap+0x1c2/0x1f0 net/sched/sch_generic.c:1573
+ Write of size 8 at addr ffff888045b31308 by task syz-executor690/14901
+...
+ Call Trace:
+  <TASK>
+  __dump_stack lib/dump_stack.c:88 [inline]
+  dump_stack_lvl+0xd9/0x150 lib/dump_stack.c:106
+  print_address_description.constprop.0+0x2c/0x3c0 mm/kasan/report.c:319
+  print_report mm/kasan/report.c:430 [inline]
+  kasan_report+0x11c/0x130 mm/kasan/report.c:536
+  mini_qdisc_pair_swap+0x1c2/0x1f0 net/sched/sch_generic.c:1573
+  tcf_chain_head_change_item net/sched/cls_api.c:495 [inline]
+  tcf_chain0_head_change.isra.0+0xb9/0x120 net/sched/cls_api.c:509
+  tcf_chain_tp_insert net/sched/cls_api.c:1826 [inline]
+  tcf_chain_tp_insert_unique net/sched/cls_api.c:1875 [inline]
+  tc_new_tfilter+0x1de6/0x2290 net/sched/cls_api.c:2266
+...
+
+@old and @new should not affect each other.  In other words, @old should
+never modify miniq_{in,e}gress after @new, and @new should not update
+@old's RCU state.
+
+Fixing without changing sch_api.c turned out to be difficult (please
+refer to Closes: for discussions).  Instead, make sure @new's first call
+always happen after @old's last call (in {ingress,clsact}_destroy()) has
+finished:
+
+In qdisc_graft(), return -EBUSY if @old has any ongoing filter requests,
+and call qdisc_destroy() for @old before grafting @new.
+
+Introduce qdisc_refcount_dec_if_one() as the counterpart of
+qdisc_refcount_inc_nz() used for filter requests.  Introduce a
+non-static version of qdisc_destroy() that does a TCQ_F_BUILTIN check,
+just like qdisc_put() etc.
+
+Depends on patch "net/sched: Refactor qdisc_graft() for ingress and
+clsact Qdiscs".
+
+[1] To illustrate, the syzkaller reproducer adds ingress Qdiscs under
+TC_H_ROOT (no longer possible after commit c7cfbd115001 ("net/sched:
+sch_ingress: Only create under TC_H_INGRESS")) on eth0 that has 8
+transmission queues:
+
+  Thread 1 creates ingress Qdisc A (containing mini Qdisc a1 and a2),
+  then adds a flower filter X to A.
+
+  Thread 2 creates another ingress Qdisc B (containing mini Qdisc b1 and
+  b2) to replace A, then adds a flower filter Y to B.
+
+ Thread 1               A's refcnt   Thread 2
+  RTM_NEWQDISC (A, RTNL-locked)
+   qdisc_create(A)               1
+   qdisc_graft(A)                9
+
+  RTM_NEWTFILTER (X, RTNL-unlocked)
+   __tcf_qdisc_find(A)          10
+   tcf_chain0_head_change(A)
+   mini_qdisc_pair_swap(A) (1st)
+            |
+            |                         RTM_NEWQDISC (B, RTNL-locked)
+         RCU sync                2     qdisc_graft(B)
+            |                    1     notify_and_destroy(A)
+            |
+   tcf_block_release(A)          0    RTM_NEWTFILTER (Y, RTNL-unlocked)
+   qdisc_destroy(A)                    tcf_chain0_head_change(B)
+   tcf_chain0_head_change_cb_del(A)    mini_qdisc_pair_swap(B) (2nd)
+   mini_qdisc_pair_swap(A) (3rd)                |
+           ...                                 ...
+
+Here, B calls mini_qdisc_pair_swap(), pointing eth0->miniq_ingress to
+its mini Qdisc, b1.  Then, A calls mini_qdisc_pair_swap() again during
+ingress_destroy(), setting eth0->miniq_ingress to NULL, so ingress
+packets on eth0 will not find filter Y in sch_handle_ingress().
+
+This is just one of the possible consequences of concurrently accessing
+miniq_{in,e}gress pointers.
+
+Fixes: 7a096d579e8e ("net: sched: ingress: set 'unlocked' flag for Qdisc ops")
+Fixes: 87f373921c4e ("net: sched: ingress: set 'unlocked' flag for clsact Qdisc ops")
+Reported-by: syzbot+b53a9c0d1ea4ad62da8b@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/r/0000000000006cf87705f79acf1a@google.com/
+Cc: Hillf Danton <hdanton@sina.com>
+Cc: Vlad Buslov <vladbu@mellanox.com>
+Signed-off-by: Peilin Ye <peilin.ye@bytedance.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h |  8 ++++++++
+ net/sched/sch_api.c       | 28 +++++++++++++++++++++++-----
+ net/sched/sch_generic.c   | 14 +++++++++++---
+ 3 files changed, 42 insertions(+), 8 deletions(-)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 27271f2b37cb3..12eadecf8cd05 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -137,6 +137,13 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
+       refcount_inc(&qdisc->refcnt);
+ }
++static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc)
++{
++      if (qdisc->flags & TCQ_F_BUILTIN)
++              return true;
++      return refcount_dec_if_one(&qdisc->refcnt);
++}
++
+ /* Intended to be used by unlocked users, when concurrent qdisc release is
+  * possible.
+  */
+@@ -652,6 +659,7 @@ void dev_deactivate_many(struct list_head *head);
+ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+                             struct Qdisc *qdisc);
+ void qdisc_reset(struct Qdisc *qdisc);
++void qdisc_destroy(struct Qdisc *qdisc);
+ void qdisc_put(struct Qdisc *qdisc);
+ void qdisc_put_unlocked(struct Qdisc *qdisc);
+ void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 5fa9ddd629f95..3f7311529cc00 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1084,10 +1084,22 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+               if ((q && q->flags & TCQ_F_INGRESS) ||
+                   (new && new->flags & TCQ_F_INGRESS)) {
+                       ingress = 1;
+-                      if (!dev_ingress_queue(dev)) {
++                      dev_queue = dev_ingress_queue(dev);
++                      if (!dev_queue) {
+                               NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
+                               return -ENOENT;
+                       }
++
++                      q = rtnl_dereference(dev_queue->qdisc_sleeping);
++
++                      /* This is the counterpart of that qdisc_refcount_inc_nz() call in
++                       * __tcf_qdisc_find() for filter requests.
++                       */
++                      if (!qdisc_refcount_dec_if_one(q)) {
++                              NL_SET_ERR_MSG(extack,
++                                             "Current ingress or clsact Qdisc has ongoing filter requests");
++                              return -EBUSY;
++                      }
+               }
+               if (dev->flags & IFF_UP)
+@@ -1108,8 +1120,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+                               qdisc_put(old);
+                       }
+               } else {
+-                      dev_queue = dev_ingress_queue(dev);
+-                      old = dev_graft_qdisc(dev_queue, new);
++                      old = dev_graft_qdisc(dev_queue, NULL);
++
++                      /* {ingress,clsact}_destroy() @old before grafting @new to avoid
++                       * unprotected concurrent accesses to net_device::miniq_{in,e}gress
++                       * pointer(s) in mini_qdisc_pair_swap().
++                       */
++                      qdisc_notify(net, skb, n, classid, old, new, extack);
++                      qdisc_destroy(old);
++
++                      dev_graft_qdisc(dev_queue, new);
+               }
+ skip:
+@@ -1123,8 +1143,6 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+                       if (new && new->ops->attach)
+                               new->ops->attach(new);
+-              } else {
+-                      notify_and_destroy(net, skb, n, classid, old, new, extack);
+               }
+               if (dev->flags & IFF_UP)
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index ee43e8ac039ed..a5693e25b2482 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -1046,7 +1046,7 @@ static void qdisc_free_cb(struct rcu_head *head)
+       qdisc_free(q);
+ }
+-static void qdisc_destroy(struct Qdisc *qdisc)
++static void __qdisc_destroy(struct Qdisc *qdisc)
+ {
+       const struct Qdisc_ops  *ops = qdisc->ops;
+@@ -1070,6 +1070,14 @@ static void qdisc_destroy(struct Qdisc *qdisc)
+       call_rcu(&qdisc->rcu, qdisc_free_cb);
+ }
++void qdisc_destroy(struct Qdisc *qdisc)
++{
++      if (qdisc->flags & TCQ_F_BUILTIN)
++              return;
++
++      __qdisc_destroy(qdisc);
++}
++
+ void qdisc_put(struct Qdisc *qdisc)
+ {
+       if (!qdisc)
+@@ -1079,7 +1087,7 @@ void qdisc_put(struct Qdisc *qdisc)
+           !refcount_dec_and_test(&qdisc->refcnt))
+               return;
+-      qdisc_destroy(qdisc);
++      __qdisc_destroy(qdisc);
+ }
+ EXPORT_SYMBOL(qdisc_put);
+@@ -1094,7 +1102,7 @@ void qdisc_put_unlocked(struct Qdisc *qdisc)
+           !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
+               return;
+-      qdisc_destroy(qdisc);
++      __qdisc_destroy(qdisc);
+       rtnl_unlock();
+ }
+ EXPORT_SYMBOL(qdisc_put_unlocked);
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-sched-refactor-qdisc_graft-for-ingress-and-clsac.patch b/queue-6.3/net-sched-refactor-qdisc_graft-for-ingress-and-clsac.patch
new file mode 100644 (file)
index 0000000..be664c1
--- /dev/null
@@ -0,0 +1,73 @@
+From 957dd9dd1ca8618a46ba1cf91e5b29bb106d62b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Jun 2023 20:30:15 -0700
+Subject: net/sched: Refactor qdisc_graft() for ingress and clsact Qdiscs
+
+From: Peilin Ye <peilin.ye@bytedance.com>
+
+[ Upstream commit 2d5f6a8d7aef7852a9ecc555f88c673a1c91754f ]
+
+Grafting ingress and clsact Qdiscs does not need a for-loop in
+qdisc_graft().  Refactor it.  No functional changes intended.
+
+Tested-by: Pedro Tammela <pctammela@mojatatu.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Reviewed-by: Vlad Buslov <vladbu@nvidia.com>
+Signed-off-by: Peilin Ye <peilin.ye@bytedance.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 84ad0af0bccd ("net/sched: qdisc_destroy() old ingress and clsact Qdiscs before grafting")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_api.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index b2a63d697a4aa..5fa9ddd629f95 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1077,12 +1077,12 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+       if (parent == NULL) {
+               unsigned int i, num_q, ingress;
++              struct netdev_queue *dev_queue;
+               ingress = 0;
+               num_q = dev->num_tx_queues;
+               if ((q && q->flags & TCQ_F_INGRESS) ||
+                   (new && new->flags & TCQ_F_INGRESS)) {
+-                      num_q = 1;
+                       ingress = 1;
+                       if (!dev_ingress_queue(dev)) {
+                               NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
+@@ -1098,18 +1098,18 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+               if (new && new->ops->attach && !ingress)
+                       goto skip;
+-              for (i = 0; i < num_q; i++) {
+-                      struct netdev_queue *dev_queue = dev_ingress_queue(dev);
+-
+-                      if (!ingress)
++              if (!ingress) {
++                      for (i = 0; i < num_q; i++) {
+                               dev_queue = netdev_get_tx_queue(dev, i);
++                              old = dev_graft_qdisc(dev_queue, new);
+-                      old = dev_graft_qdisc(dev_queue, new);
+-                      if (new && i > 0)
+-                              qdisc_refcount_inc(new);
+-
+-                      if (!ingress)
++                              if (new && i > 0)
++                                      qdisc_refcount_inc(new);
+                               qdisc_put(old);
++                      }
++              } else {
++                      dev_queue = dev_ingress_queue(dev);
++                      old = dev_graft_qdisc(dev_queue, new);
+               }
+ skip:
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-sched-taprio-fix-slab-out-of-bounds-read-in-tapr.patch b/queue-6.3/net-sched-taprio-fix-slab-out-of-bounds-read-in-tapr.patch
new file mode 100644 (file)
index 0000000..c751302
--- /dev/null
@@ -0,0 +1,50 @@
+From f6ee4d45cb95442ed7d9072a6be6ae8e23fdbe02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 14:27:56 +0800
+Subject: net/sched: taprio: fix slab-out-of-bounds Read in
+ taprio_dequeue_from_txq
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit be3618d9651002cd5ff190dbfc6cf78f03e34e27 ]
+
+As shown in [1], out-of-bounds access occurs in two cases:
+1)when the qdisc of the taprio type is used to replace the previously
+configured taprio, count and offset in tc_to_txq can be set to 0. In this
+case, the value of *txq in taprio_next_tc_txq() will increases
+continuously. When the number of accessed queues exceeds the number of
+queues on the device, out-of-bounds access occurs.
+2)When packets are dequeued, taprio can be deleted. In this case, the tc
+rule of dev is cleared. The count and offset values are also set to 0. In
+this case, out-of-bounds access is also caused.
+
+Now the restriction on the queue number is added.
+
+[1] https://groups.google.com/g/syzkaller-bugs/c/_lYOKgkBVMg
+Fixes: 2f530df76c8c ("net/sched: taprio: give higher priority to higher TCs in software dequeue mode")
+Reported-by: syzbot+04afcb3d2c840447559a@syzkaller.appspotmail.com
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Tested-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_taprio.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index a6cf56a969421..7190482b52e05 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -795,6 +795,9 @@ static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
+                       taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
++                      if (q->cur_txq[tc] >= dev->num_tx_queues)
++                              q->cur_txq[tc] = first_txq;
++
+                       if (skb)
+                               return skb;
+               } while (q->cur_txq[tc] != first_txq);
+-- 
+2.39.2
+
diff --git a/queue-6.3/net-tipc-resize-nlattr-array-to-correct-size.patch b/queue-6.3/net-tipc-resize-nlattr-array-to-correct-size.patch
new file mode 100644 (file)
index 0000000..fed802d
--- /dev/null
@@ -0,0 +1,51 @@
+From 79b4087dd0eeb78548d11da0a3843e0858551f93 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 20:06:04 +0800
+Subject: net: tipc: resize nlattr array to correct size
+
+From: Lin Ma <linma@zju.edu.cn>
+
+[ Upstream commit 44194cb1b6045dea33ae9a0d54fb7e7cd93a2e09 ]
+
+According to nla_parse_nested_deprecated(), the tb[] is supposed to the
+destination array with maxtype+1 elements. In current
+tipc_nl_media_get() and __tipc_nl_media_set(), a larger array is used
+which is unnecessary. This patch resize them to a proper size.
+
+Fixes: 1e55417d8fc6 ("tipc: add media set to new netlink api")
+Fixes: 46f15c6794fb ("tipc: add media get/dump to new netlink api")
+Signed-off-by: Lin Ma <linma@zju.edu.cn>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+Link: https://lore.kernel.org/r/20230614120604.1196377-1-linma@zju.edu.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/bearer.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index 53881406e2006..cdcd2731860ba 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -1258,7 +1258,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
+       struct tipc_nl_msg msg;
+       struct tipc_media *media;
+       struct sk_buff *rep;
+-      struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
++      struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
+       if (!info->attrs[TIPC_NLA_MEDIA])
+               return -EINVAL;
+@@ -1307,7 +1307,7 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+       int err;
+       char *name;
+       struct tipc_media *m;
+-      struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
++      struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
+       if (!info->attrs[TIPC_NLA_MEDIA])
+               return -EINVAL;
+-- 
+2.39.2
+
diff --git a/queue-6.3/netfilter-nf_tables-incorrect-error-path-handling-wi.patch b/queue-6.3/netfilter-nf_tables-incorrect-error-path-handling-wi.patch
new file mode 100644 (file)
index 0000000..116f2aa
--- /dev/null
@@ -0,0 +1,76 @@
+From 5190dcedae3027758c489aa26ad1743522b77ee2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 02:32:02 +0200
+Subject: netfilter: nf_tables: incorrect error path handling with
+ NFT_MSG_NEWRULE
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 1240eb93f0616b21c675416516ff3d74798fdc97 ]
+
+In case of error when adding a new rule that refers to an anonymous set,
+deactivate expressions via NFT_TRANS_PREPARE state, not NFT_TRANS_RELEASE.
+Thus, the lookup expression marks anonymous sets as inactive in the next
+generation to ensure it is not reachable in this transaction anymore and
+decrement the set refcount as introduced by c1592a89942e ("netfilter:
+nf_tables: deactivate anonymous set from preparation phase"). The abort
+step takes care of undoing the anonymous set.
+
+This is also consistent with rule deletion, where NFT_TRANS_PREPARE is
+used. Note that this error path is exercised in the preparation step of
+the commit protocol. This patch replaces nf_tables_rule_release() by the
+deactivate and destroy calls, this time with NFT_TRANS_PREPARE.
+
+Due to this incorrect error handling, it is possible to access a
+dangling pointer to the anonymous set that remains in the transaction
+list.
+
+[1009.379054] BUG: KASAN: use-after-free in nft_set_lookup_global+0x147/0x1a0 [nf_tables]
+[1009.379106] Read of size 8 at addr ffff88816c4c8020 by task nft-rule-add/137110
+[1009.379116] CPU: 7 PID: 137110 Comm: nft-rule-add Not tainted 6.4.0-rc4+ #256
+[1009.379128] Call Trace:
+[1009.379132]  <TASK>
+[1009.379135]  dump_stack_lvl+0x33/0x50
+[1009.379146]  ? nft_set_lookup_global+0x147/0x1a0 [nf_tables]
+[1009.379191]  print_address_description.constprop.0+0x27/0x300
+[1009.379201]  kasan_report+0x107/0x120
+[1009.379210]  ? nft_set_lookup_global+0x147/0x1a0 [nf_tables]
+[1009.379255]  nft_set_lookup_global+0x147/0x1a0 [nf_tables]
+[1009.379302]  nft_lookup_init+0xa5/0x270 [nf_tables]
+[1009.379350]  nf_tables_newrule+0x698/0xe50 [nf_tables]
+[1009.379397]  ? nf_tables_rule_release+0xe0/0xe0 [nf_tables]
+[1009.379441]  ? kasan_unpoison+0x23/0x50
+[1009.379450]  nfnetlink_rcv_batch+0x97c/0xd90 [nfnetlink]
+[1009.379470]  ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink]
+[1009.379485]  ? __alloc_skb+0xb8/0x1e0
+[1009.379493]  ? __alloc_skb+0xb8/0x1e0
+[1009.379502]  ? entry_SYSCALL_64_after_hwframe+0x46/0xb0
+[1009.379509]  ? unwind_get_return_address+0x2a/0x40
+[1009.379517]  ? write_profile+0xc0/0xc0
+[1009.379524]  ? avc_lookup+0x8f/0xc0
+[1009.379532]  ? __rcu_read_unlock+0x43/0x60
+
+Fixes: 958bee14d071 ("netfilter: nf_tables: use new transaction infrastructure to handle sets")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index f90b1113e5ecc..8f63514656a17 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3781,7 +3781,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+       if (flow)
+               nft_flow_rule_destroy(flow);
+ err_release_rule:
+-      nf_tables_rule_release(&ctx, rule);
++      nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE);
++      nf_tables_rule_destroy(&ctx, rule);
+ err_release_expr:
+       for (i = 0; i < n; i++) {
+               if (expr_info[i].ops) {
+-- 
+2.39.2
+
diff --git a/queue-6.3/netfilter-nf_tables-integrate-pipapo-into-commit-pro.patch b/queue-6.3/netfilter-nf_tables-integrate-pipapo-into-commit-pro.patch
new file mode 100644 (file)
index 0000000..de96340
--- /dev/null
@@ -0,0 +1,316 @@
+From 9f316fd6f71c732a6f33e96b5ad4cf35f6c0d31f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 11:55:42 +0200
+Subject: netfilter: nf_tables: integrate pipapo into commit protocol
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 212ed75dc5fb9d1423b3942c8f872a868cda3466 ]
+
+The pipapo set backend follows copy-on-update approach, maintaining one
+clone of the existing datastructure that is being updated. The clone
+and current datastructures are swapped via rcu from the commit step.
+
+The existing integration with the commit protocol is flawed because
+there is no operation to clean up the clone if the transaction is
+aborted. Moreover, the datastructure swap happens on set element
+activation.
+
+This patch adds two new operations for sets: commit and abort, these new
+operations are invoked from the commit and abort steps, after the
+transactions have been digested, and it updates the pipapo set backend
+to use it.
+
+This patch adds a new ->pending_update field to sets to maintain a list
+of sets that require this new commit and abort operations.
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h |  4 ++-
+ net/netfilter/nf_tables_api.c     | 56 +++++++++++++++++++++++++++++++
+ net/netfilter/nft_set_pipapo.c    | 55 +++++++++++++++++++++---------
+ 3 files changed, 99 insertions(+), 16 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 3eb7d20ddfc97..300bce4d67cec 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -462,7 +462,8 @@ struct nft_set_ops {
+                                              const struct nft_set *set,
+                                              const struct nft_set_elem *elem,
+                                              unsigned int flags);
+-
++      void                            (*commit)(const struct nft_set *set);
++      void                            (*abort)(const struct nft_set *set);
+       u64                             (*privsize)(const struct nlattr * const nla[],
+                                                   const struct nft_set_desc *desc);
+       bool                            (*estimate)(const struct nft_set_desc *desc,
+@@ -557,6 +558,7 @@ struct nft_set {
+       u16                             policy;
+       u16                             udlen;
+       unsigned char                   *udata;
++      struct list_head                pending_update;
+       /* runtime data below here */
+       const struct nft_set_ops        *ops ____cacheline_aligned;
+       u16                             flags:14,
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 368aeabd8f8f1..f90b1113e5ecc 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4850,6 +4850,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+       set->num_exprs = num_exprs;
+       set->handle = nf_tables_alloc_handle(table);
++      INIT_LIST_HEAD(&set->pending_update);
+       err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
+       if (err < 0)
+@@ -9190,10 +9191,25 @@ static void nf_tables_commit_audit_log(struct list_head *adl, u32 generation)
+       }
+ }
++static void nft_set_commit_update(struct list_head *set_update_list)
++{
++      struct nft_set *set, *next;
++
++      list_for_each_entry_safe(set, next, set_update_list, pending_update) {
++              list_del_init(&set->pending_update);
++
++              if (!set->ops->commit)
++                      continue;
++
++              set->ops->commit(set);
++      }
++}
++
+ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ {
+       struct nftables_pernet *nft_net = nft_pernet(net);
+       struct nft_trans *trans, *next;
++      LIST_HEAD(set_update_list);
+       struct nft_trans_elem *te;
+       struct nft_chain *chain;
+       struct nft_table *table;
+@@ -9359,6 +9375,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+                       nf_tables_setelem_notify(&trans->ctx, te->set,
+                                                &te->elem,
+                                                NFT_MSG_NEWSETELEM);
++                      if (te->set->ops->commit &&
++                          list_empty(&te->set->pending_update)) {
++                              list_add_tail(&te->set->pending_update,
++                                            &set_update_list);
++                      }
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELSETELEM:
+@@ -9373,6 +9394,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+                               atomic_dec(&te->set->nelems);
+                               te->set->ndeact--;
+                       }
++                      if (te->set->ops->commit &&
++                          list_empty(&te->set->pending_update)) {
++                              list_add_tail(&te->set->pending_update,
++                                            &set_update_list);
++                      }
+                       break;
+               case NFT_MSG_NEWOBJ:
+                       if (nft_trans_obj_update(trans)) {
+@@ -9435,6 +9461,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+               }
+       }
++      nft_set_commit_update(&set_update_list);
++
+       nft_commit_notify(net, NETLINK_CB(skb).portid);
+       nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+       nf_tables_commit_audit_log(&adl, nft_net->base_seq);
+@@ -9494,10 +9522,25 @@ static void nf_tables_abort_release(struct nft_trans *trans)
+       kfree(trans);
+ }
++static void nft_set_abort_update(struct list_head *set_update_list)
++{
++      struct nft_set *set, *next;
++
++      list_for_each_entry_safe(set, next, set_update_list, pending_update) {
++              list_del_init(&set->pending_update);
++
++              if (!set->ops->abort)
++                      continue;
++
++              set->ops->abort(set);
++      }
++}
++
+ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ {
+       struct nftables_pernet *nft_net = nft_pernet(net);
+       struct nft_trans *trans, *next;
++      LIST_HEAD(set_update_list);
+       struct nft_trans_elem *te;
+       if (action == NFNL_ABORT_VALIDATE &&
+@@ -9602,6 +9645,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+                       nft_setelem_remove(net, te->set, &te->elem);
+                       if (!nft_setelem_is_catchall(te->set, &te->elem))
+                               atomic_dec(&te->set->nelems);
++
++                      if (te->set->ops->abort &&
++                          list_empty(&te->set->pending_update)) {
++                              list_add_tail(&te->set->pending_update,
++                                            &set_update_list);
++                      }
+                       break;
+               case NFT_MSG_DELSETELEM:
+               case NFT_MSG_DESTROYSETELEM:
+@@ -9612,6 +9661,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+                       if (!nft_setelem_is_catchall(te->set, &te->elem))
+                               te->set->ndeact--;
++                      if (te->set->ops->abort &&
++                          list_empty(&te->set->pending_update)) {
++                              list_add_tail(&te->set->pending_update,
++                                            &set_update_list);
++                      }
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWOBJ:
+@@ -9654,6 +9708,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+               }
+       }
++      nft_set_abort_update(&set_update_list);
++
+       synchronize_rcu();
+       list_for_each_entry_safe_reverse(trans, next,
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 06d46d1826347..15e451dc3fc46 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1600,17 +1600,10 @@ static void pipapo_free_fields(struct nft_pipapo_match *m)
+       }
+ }
+-/**
+- * pipapo_reclaim_match - RCU callback to free fields from old matching data
+- * @rcu:      RCU head
+- */
+-static void pipapo_reclaim_match(struct rcu_head *rcu)
++static void pipapo_free_match(struct nft_pipapo_match *m)
+ {
+-      struct nft_pipapo_match *m;
+       int i;
+-      m = container_of(rcu, struct nft_pipapo_match, rcu);
+-
+       for_each_possible_cpu(i)
+               kfree(*per_cpu_ptr(m->scratch, i));
+@@ -1625,7 +1618,19 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
+ }
+ /**
+- * pipapo_commit() - Replace lookup data with current working copy
++ * pipapo_reclaim_match - RCU callback to free fields from old matching data
++ * @rcu:      RCU head
++ */
++static void pipapo_reclaim_match(struct rcu_head *rcu)
++{
++      struct nft_pipapo_match *m;
++
++      m = container_of(rcu, struct nft_pipapo_match, rcu);
++      pipapo_free_match(m);
++}
++
++/**
++ * nft_pipapo_commit() - Replace lookup data with current working copy
+  * @set:      nftables API set representation
+  *
+  * While at it, check if we should perform garbage collection on the working
+@@ -1635,7 +1640,7 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
+  * We also need to create a new working copy for subsequent insertions and
+  * deletions.
+  */
+-static void pipapo_commit(const struct nft_set *set)
++static void nft_pipapo_commit(const struct nft_set *set)
+ {
+       struct nft_pipapo *priv = nft_set_priv(set);
+       struct nft_pipapo_match *new_clone, *old;
+@@ -1660,6 +1665,26 @@ static void pipapo_commit(const struct nft_set *set)
+       priv->clone = new_clone;
+ }
++static void nft_pipapo_abort(const struct nft_set *set)
++{
++      struct nft_pipapo *priv = nft_set_priv(set);
++      struct nft_pipapo_match *new_clone, *m;
++
++      if (!priv->dirty)
++              return;
++
++      m = rcu_dereference(priv->match);
++
++      new_clone = pipapo_clone(m);
++      if (IS_ERR(new_clone))
++              return;
++
++      priv->dirty = false;
++
++      pipapo_free_match(priv->clone);
++      priv->clone = new_clone;
++}
++
+ /**
+  * nft_pipapo_activate() - Mark element reference as active given key, commit
+  * @net:      Network namespace
+@@ -1667,8 +1692,7 @@ static void pipapo_commit(const struct nft_set *set)
+  * @elem:     nftables API element representation containing key data
+  *
+  * On insertion, elements are added to a copy of the matching data currently
+- * in use for lookups, and not directly inserted into current lookup data, so
+- * we'll take care of that by calling pipapo_commit() here. Both
++ * in use for lookups, and not directly inserted into current lookup data. Both
+  * nft_pipapo_insert() and nft_pipapo_activate() are called once for each
+  * element, hence we can't purpose either one as a real commit operation.
+  */
+@@ -1684,8 +1708,6 @@ static void nft_pipapo_activate(const struct net *net,
+       nft_set_elem_change_active(net, set, &e->ext);
+       nft_set_elem_clear_busy(&e->ext);
+-
+-      pipapo_commit(set);
+ }
+ /**
+@@ -1931,7 +1953,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+               if (i == m->field_count) {
+                       priv->dirty = true;
+                       pipapo_drop(m, rulemap);
+-                      pipapo_commit(set);
+                       return;
+               }
+@@ -2230,6 +2251,8 @@ const struct nft_set_type nft_set_pipapo_type = {
+               .init           = nft_pipapo_init,
+               .destroy        = nft_pipapo_destroy,
+               .gc_init        = nft_pipapo_gc_init,
++              .commit         = nft_pipapo_commit,
++              .abort          = nft_pipapo_abort,
+               .elemsize       = offsetof(struct nft_pipapo_elem, ext),
+       },
+ };
+@@ -2252,6 +2275,8 @@ const struct nft_set_type nft_set_pipapo_avx2_type = {
+               .init           = nft_pipapo_init,
+               .destroy        = nft_pipapo_destroy,
+               .gc_init        = nft_pipapo_gc_init,
++              .commit         = nft_pipapo_commit,
++              .abort          = nft_pipapo_abort,
+               .elemsize       = offsetof(struct nft_pipapo_elem, ext),
+       },
+ };
+-- 
+2.39.2
+
diff --git a/queue-6.3/netfilter-nfnetlink-skip-error-delivery-on-batch-in-.patch b/queue-6.3/netfilter-nfnetlink-skip-error-delivery-on-batch-in-.patch
new file mode 100644 (file)
index 0000000..8eb4e8b
--- /dev/null
@@ -0,0 +1,36 @@
+From 2d6235e468adef348a0ef6ef2693ebdc08b766a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 00:19:12 +0200
+Subject: netfilter: nfnetlink: skip error delivery on batch in case of ENOMEM
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit a1a64a151dae8ac3581c1cbde44b672045cb658b ]
+
+If caller reports ENOMEM, then stop iterating over the batch and send a
+single netlink message to userspace to report OOM.
+
+Fixes: cbb8125eb40b ("netfilter: nfnetlink: deliver netlink errors on batch completion")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nfnetlink.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index ae7146475d17a..c9fbe0f707b5f 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -533,7 +533,8 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
+                        * processed, this avoids that the same error is
+                        * reported several times when replaying the batch.
+                        */
+-                      if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
++                      if (err == -ENOMEM ||
++                          nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
+                               /* We failed to enqueue an error, reset the
+                                * list of errors and send OOM to userspace
+                                * pointing to the batch header.
+-- 
+2.39.2
+
diff --git a/queue-6.3/octeon_ep-add-missing-check-for-ioremap.patch b/queue-6.3/octeon_ep-add-missing-check-for-ioremap.patch
new file mode 100644 (file)
index 0000000..b031f64
--- /dev/null
@@ -0,0 +1,50 @@
+From 2acd45e0aba6e9bdcc6c588e76cba1a9ecfdef96 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Jun 2023 11:34:00 +0800
+Subject: octeon_ep: Add missing check for ioremap
+
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+
+[ Upstream commit 9a36e2d44d122fe73a2a76ba73f1d50a65cf8210 ]
+
+Add check for ioremap() and return the error if it fails in order to
+guarantee the success of ioremap().
+
+Fixes: 862cd659a6fb ("octeon_ep: Add driver framework and device initialization")
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://lore.kernel.org/r/20230615033400.2971-1-jiasheng@iscas.ac.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeon_ep/octep_main.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 5a898fb88e375..c70d2500a3634 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -941,6 +941,9 @@ int octep_device_setup(struct octep_device *oct)
+               oct->mmio[i].hw_addr =
+                       ioremap(pci_resource_start(oct->pdev, i * 2),
+                               pci_resource_len(oct->pdev, i * 2));
++              if (!oct->mmio[i].hw_addr)
++                      goto unmap_prev;
++
+               oct->mmio[i].mapped = 1;
+       }
+@@ -980,7 +983,9 @@ int octep_device_setup(struct octep_device *oct)
+       return 0;
+ unsupported_dev:
+-      for (i = 0; i < OCTEP_MMIO_REGIONS; i++)
++      i = OCTEP_MMIO_REGIONS;
++unmap_prev:
++      while (i--)
+               iounmap(oct->mmio[i].hw_addr);
+       kfree(oct->conf);
+-- 
+2.39.2
+
diff --git a/queue-6.3/octeontx2-af-fix-lbk-link-credits-on-cn10k.patch b/queue-6.3/octeontx2-af-fix-lbk-link-credits-on-cn10k.patch
new file mode 100644 (file)
index 0000000..c389cc5
--- /dev/null
@@ -0,0 +1,41 @@
+From ede85f4f83c61a78dca09fd2b2fbc8e4f63f6e29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 17:12:01 +0530
+Subject: octeontx2-af: fix lbk link credits on cn10k
+
+From: Nithin Dabilpuram <ndabilpuram@marvell.com>
+
+[ Upstream commit 87e12a17eef476bbf768dc3a74419ad461f36fbc ]
+
+Fix LBK link credits on CN10K to be same as CN9K i.e
+16 * MAX_LBK_DATA_RATE instead of current scheme of
+calculation based on LBK buf length / FIFO size.
+
+Fixes: 6e54e1c5399a ("octeontx2-af: cn10K: Add MTU configuration")
+Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
+Signed-off-by: Naveen Mamindlapalli <naveenm@marvell.com>
+Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 1e058b96cbe27..f01d057ad025a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -4081,10 +4081,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+ static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
+ {
+-      /* CN10k supports 72KB FIFO size and max packet size of 64k */
+-      if (rvu->hw->lbk_bufsize == 0x12000)
+-              return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
+-
+       return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/octeontx2-af-fix-promiscuous-mode.patch b/queue-6.3/octeontx2-af-fix-promiscuous-mode.patch
new file mode 100644 (file)
index 0000000..28f0112
--- /dev/null
@@ -0,0 +1,112 @@
+From d9d8890d191e8cf22bb71ea45fad9c7b93867af3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 10:46:25 +0530
+Subject: octeontx2-af: Fix promiscuous mode
+
+From: Ratheesh Kannoth <rkannoth@marvell.com>
+
+[ Upstream commit c0e489372a294044feea650b38f38c888eff57a4 ]
+
+CN10KB silicon introduced a new exact match feature,
+which is used for DMAC filtering. The state of installed
+DMAC filters in this exact match table is getting corrupted
+when promiscuous mode is toggled. Fix this by not touching
+Exact match related config when promiscuous mode is toggled.
+
+Fixes: 2dba9459d2c9 ("octeontx2-af: Wrapper functions for MAC addr add/del/update/reset")
+Signed-off-by: Ratheesh Kannoth <rkannoth@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../marvell/octeontx2/af/rvu_npc_hash.c       | 29 ++-----------------
+ 1 file changed, 2 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+index 51209119f0f2f..9f11c1e407373 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+@@ -1164,10 +1164,8 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
+ {
+       struct npc_exact_table *table;
+       u16 *cnt, old_cnt;
+-      bool promisc;
+       table = rvu->hw->table;
+-      promisc = table->promisc_mode[drop_mcam_idx];
+       cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+       old_cnt = *cnt;
+@@ -1179,16 +1177,13 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
+       *enable_or_disable_cam = false;
+-      if (promisc)
+-              goto done;
+-
+-      /* If all rules are deleted and not already in promisc mode; disable cam */
++      /* If all rules are deleted, disable cam */
+       if (!*cnt && val < 0) {
+               *enable_or_disable_cam = true;
+               goto done;
+       }
+-      /* If rule got added and not already in promisc mode; enable cam */
++      /* If rule got added, enable cam */
+       if (!old_cnt && val > 0) {
+               *enable_or_disable_cam = true;
+               goto done;
+@@ -1443,7 +1438,6 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+       u32 drop_mcam_idx;
+       bool *promisc;
+       bool rc;
+-      u32 cnt;
+       table = rvu->hw->table;
+@@ -1466,17 +1460,8 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+               return LMAC_AF_ERR_INVALID_PARAM;
+       }
+       *promisc = false;
+-      cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+       mutex_unlock(&table->lock);
+-      /* If no dmac filter entries configured, disable drop rule */
+-      if (!cnt)
+-              rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+-      else
+-              rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+-
+-      dev_dbg(rvu->dev, "%s: disabled  promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
+-              __func__, cgx_id, lmac_id, cnt);
+       return 0;
+ }
+@@ -1494,7 +1479,6 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+       u32 drop_mcam_idx;
+       bool *promisc;
+       bool rc;
+-      u32 cnt;
+       table = rvu->hw->table;
+@@ -1517,17 +1501,8 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+               return LMAC_AF_ERR_INVALID_PARAM;
+       }
+       *promisc = true;
+-      cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+       mutex_unlock(&table->lock);
+-      /* If no dmac filter entries configured, disable drop rule */
+-      if (!cnt)
+-              rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+-      else
+-              rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+-
+-      dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
+-              __func__, cgx_id, lmac_id, cnt);
+       return 0;
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/octeontx2-af-fixed-resource-availability-check.patch b/queue-6.3/octeontx2-af-fixed-resource-availability-check.patch
new file mode 100644 (file)
index 0000000..612d8a8
--- /dev/null
@@ -0,0 +1,41 @@
+From 932ad40a1719cce97c97700f2089895640493625 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 17:12:00 +0530
+Subject: octeontx2-af: fixed resource availability check
+
+From: Satha Rao <skoteshwar@marvell.com>
+
+[ Upstream commit 4e635f9d86165e47f5440196f2ebdb258efb8341 ]
+
+txschq_alloc response have two different arrays to store continuous
+and non-continuous schedulers of each level. Requested count should
+be checked for each array separately.
+
+Fixes: 5d9b976d4480 ("octeontx2-af: Support fixed transmit scheduler topology")
+Signed-off-by: Satha Rao <skoteshwar@marvell.com>
+Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
+Signed-off-by: Naveen Mamindlapalli <naveenm@marvell.com>
+Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 4ad707e758b9f..1e058b96cbe27 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -1878,7 +1878,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
+               free_cnt = rvu_rsrc_free_count(&txsch->schq);
+       }
+-      if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
++      if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
++          req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
+               return NIX_AF_ERR_TLX_ALLOC_FAIL;
+       /* If contiguous queues are needed, check for availability */
+-- 
+2.39.2
+
diff --git a/queue-6.3/ping6-fix-send-to-link-local-addresses-with-vrf.patch b/queue-6.3/ping6-fix-send-to-link-local-addresses-with-vrf.patch
new file mode 100644 (file)
index 0000000..9ead339
--- /dev/null
@@ -0,0 +1,58 @@
+From c0ab286a1ce355c13f1b940549236d21619ce0e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 18:05:02 +0200
+Subject: ping6: Fix send to link-local addresses with VRF.
+
+From: Guillaume Nault <gnault@redhat.com>
+
+[ Upstream commit 91ffd1bae1dafbb9e34b46813f5b058581d9144d ]
+
+Ping sockets can't send packets when they're bound to a VRF master
+device and the output interface is set to a slave device.
+
+For example, when net.ipv4.ping_group_range is properly set, so that
+ping6 can use ping sockets, the following kind of commands fails:
+  $ ip vrf exec red ping6 fe80::854:e7ff:fe88:4bf1%eth1
+
+What happens is that sk->sk_bound_dev_if is set to the VRF master
+device, but 'oif' is set to the real output device. Since both are set
+but different, ping_v6_sendmsg() sees their value as inconsistent and
+fails.
+
+Fix this by allowing 'oif' to be a slave device of ->sk_bound_dev_if.
+
+This fixes the following kselftest failure:
+  $ ./fcnal-test.sh -t ipv6_ping
+  [...]
+  TEST: ping out, vrf device+address bind - ns-B IPv6 LLA        [FAIL]
+
+Reported-by: Mirsad Todorovac <mirsad.todorovac@alu.unizg.hr>
+Closes: https://lore.kernel.org/netdev/b6191f90-ffca-dbca-7d06-88a9788def9c@alu.unizg.hr/
+Tested-by: Mirsad Todorovac <mirsad.todorovac@alu.unizg.hr>
+Fixes: 5e457896986e ("net: ipv6: Fix ping to link-local addresses.")
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://lore.kernel.org/r/6c8b53108816a8d0d5705ae37bdc5a8322b5e3d9.1686153846.git.gnault@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ping.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 808983bc2ec9f..4651aaf70db4f 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -114,7 +114,8 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+       addr_type = ipv6_addr_type(daddr);
+       if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
+           (addr_type & IPV6_ADDR_MAPPED) ||
+-          (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
++          (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if &&
++           l3mdev_master_ifindex_by_index(sock_net(sk), oif) != sk->sk_bound_dev_if))
+               return -EINVAL;
+       ipcm6_init_sk(&ipc6, np);
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-bnxt_re-fix-reporting-active_-speed-width-attri.patch b/queue-6.3/rdma-bnxt_re-fix-reporting-active_-speed-width-attri.patch
new file mode 100644 (file)
index 0000000..d1eb48e
--- /dev/null
@@ -0,0 +1,82 @@
+From a1a06418d46efe33b8cfe8529336d257eec7b5f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 May 2023 11:35:26 -0400
+Subject: RDMA/bnxt_re: Fix reporting active_{speed,width} attributes
+
+From: Kamal Heib <kheib@redhat.com>
+
+[ Upstream commit 18e7e3e4217083a682e2c7282011c70c8a1ba070 ]
+
+After commit 6d758147c7b8 ("RDMA/bnxt_re: Use auxiliary driver interface")
+the active_{speed, width} attributes are reported incorrectly, This is
+happening because ib_get_eth_speed() is called only once from
+bnxt_re_ib_init() - Fix this issue by calling ib_get_eth_speed() from
+bnxt_re_query_port().
+
+Fixes: 6d758147c7b8 ("RDMA/bnxt_re: Use auxiliary driver interface")
+Link: https://lore.kernel.org/r/20230529153525.87254-1-kheib@redhat.com
+Signed-off-by: Kamal Heib <kheib@redhat.com>
+Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/bnxt_re.h  | 2 --
+ drivers/infiniband/hw/bnxt_re/ib_verbs.c | 7 ++++---
+ drivers/infiniband/hw/bnxt_re/main.c     | 2 --
+ 3 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+index 5a2baf49ecaa4..2c95e6f3d47ac 100644
+--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
++++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+@@ -135,8 +135,6 @@ struct bnxt_re_dev {
+       struct delayed_work             worker;
+       u8                              cur_prio_map;
+-      u16                             active_speed;
+-      u8                              active_width;
+       /* FP Notification Queue (CQ & SRQ) */
+       struct tasklet_struct           nq_task;
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 94222de1d3719..584d6e64ca708 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -199,6 +199,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
+ {
+       struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+       struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++      int rc;
+       memset(port_attr, 0, sizeof(*port_attr));
+@@ -228,10 +229,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
+       port_attr->sm_sl = 0;
+       port_attr->subnet_timeout = 0;
+       port_attr->init_type_reply = 0;
+-      port_attr->active_speed = rdev->active_speed;
+-      port_attr->active_width = rdev->active_width;
++      rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
++                            &port_attr->active_width);
+-      return 0;
++      return rc;
+ }
+ int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index c5867e78f2319..85e36c9f8e797 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -1152,8 +1152,6 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
+               return rc;
+       }
+       dev_info(rdev_to_dev(rdev), "Device registered with IB successfully");
+-      ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
+-                       &rdev->active_width);
+       set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
+       event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-cma-always-set-static-rate-to-0-for-roce.patch b/queue-6.3/rdma-cma-always-set-static-rate-to-0-for-roce.patch
new file mode 100644 (file)
index 0000000..49ad3c7
--- /dev/null
@@ -0,0 +1,84 @@
+From 3e71d247df5cb5a4166ef454bb4ec64c40f7042f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 13:33:23 +0300
+Subject: RDMA/cma: Always set static rate to 0 for RoCE
+
+From: Mark Zhang <markzhang@nvidia.com>
+
+[ Upstream commit 58030c76cce473b6cfd630bbecb97215def0dff8 ]
+
+Set static rate to 0 as it should be discovered by path query and
+has no meaning for RoCE.
+This also avoid of using the rtnl lock and ethtool API, which is
+a bottleneck when try to setup many rdma-cm connections at the same
+time, especially with multiple processes.
+
+Fixes: 3c86aa70bf67 ("RDMA/cm: Add RDMA CM support for IBoE devices")
+Signed-off-by: Mark Zhang <markzhang@nvidia.com>
+Link: https://lore.kernel.org/r/f72a4f8b667b803aee9fa794069f61afb5839ce4.1685960567.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c |  4 ++--
+ include/rdma/ib_addr.h        | 23 -----------------------
+ 2 files changed, 2 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 6b9563d4f23c9..033df909b92e1 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -3297,7 +3297,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+       route->path_rec->traffic_class = tos;
+       route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
+       route->path_rec->rate_selector = IB_SA_EQ;
+-      route->path_rec->rate = iboe_get_rate(ndev);
++      route->path_rec->rate = IB_RATE_PORT_CURRENT;
+       dev_put(ndev);
+       route->path_rec->packet_life_time_selector = IB_SA_EQ;
+       /* In case ACK timeout is set, use this value to calculate
+@@ -4966,7 +4966,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+       if (!ndev)
+               return -ENODEV;
+-      ib.rec.rate = iboe_get_rate(ndev);
++      ib.rec.rate = IB_RATE_PORT_CURRENT;
+       ib.rec.hop_limit = 1;
+       ib.rec.mtu = iboe_get_mtu(ndev->mtu);
+diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
+index d808dc3d239e8..811a0f11d0dbe 100644
+--- a/include/rdma/ib_addr.h
++++ b/include/rdma/ib_addr.h
+@@ -194,29 +194,6 @@ static inline enum ib_mtu iboe_get_mtu(int mtu)
+               return 0;
+ }
+-static inline int iboe_get_rate(struct net_device *dev)
+-{
+-      struct ethtool_link_ksettings cmd;
+-      int err;
+-
+-      rtnl_lock();
+-      err = __ethtool_get_link_ksettings(dev, &cmd);
+-      rtnl_unlock();
+-      if (err)
+-              return IB_RATE_PORT_CURRENT;
+-
+-      if (cmd.base.speed >= 40000)
+-              return IB_RATE_40_GBPS;
+-      else if (cmd.base.speed >= 30000)
+-              return IB_RATE_30_GBPS;
+-      else if (cmd.base.speed >= 20000)
+-              return IB_RATE_20_GBPS;
+-      else if (cmd.base.speed >= 10000)
+-              return IB_RATE_10_GBPS;
+-      else
+-              return IB_RATE_PORT_CURRENT;
+-}
+-
+ static inline int rdma_link_local_addr(struct in6_addr *addr)
+ {
+       if (addr->s6_addr32[0] == htonl(0xfe800000) &&
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-mlx5-create-an-indirect-flow-table-for-steering.patch b/queue-6.3/rdma-mlx5-create-an-indirect-flow-table-for-steering.patch
new file mode 100644 (file)
index 0000000..e844481
--- /dev/null
@@ -0,0 +1,438 @@
+From c5bf47e8957f04b5c90926bd51bdf041e32bee8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 13:33:18 +0300
+Subject: RDMA/mlx5: Create an indirect flow table for steering anchor
+
+From: Mark Bloch <mbloch@nvidia.com>
+
+[ Upstream commit e1f4a52ac171dd863fe89055e749ef5e0a0bc5ce ]
+
+A misbehaved user can create a steering anchor that points to a kernel
+flow table and then destroy the anchor without freeing the associated
+STC. This creates a problem as the kernel can't destroy the flow
+table since there is still a reference to it. As a result, this can
+exhaust all available flow table resources, preventing other users from
+using the RDMA device.
+
+To prevent this problem, a solution is implemented where a special flow
+table with two steering rules is created when a user creates a steering
+anchor for the first time. The rules include one that drops all traffic
+and another that points to the kernel flow table. If the steering anchor
+is destroyed, only the rule pointing to the kernel's flow table is removed.
+Any traffic reaching the special flow table after that is dropped.
+
+Since the special flow table is not destroyed when the steering anchor is
+destroyed, any issues are prevented from occurring. The remaining resources
+are only destroyed when the RDMA device is destroyed, which happens after
+all DEVX objects are freed, including the STCs, thus mitigating the issue.
+
+Fixes: 0c6ab0ca9a66 ("RDMA/mlx5: Expose steering anchor to userspace")
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Link: https://lore.kernel.org/r/b4a88a871d651fa4e8f98d552553c1cfe9ba2cd6.1685960567.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/fs.c      | 276 ++++++++++++++++++++++++++-
+ drivers/infiniband/hw/mlx5/fs.h      |  16 ++
+ drivers/infiniband/hw/mlx5/mlx5_ib.h |  11 ++
+ 3 files changed, 296 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
+index 3008632a6c206..1e419e080b535 100644
+--- a/drivers/infiniband/hw/mlx5/fs.c
++++ b/drivers/infiniband/hw/mlx5/fs.c
+@@ -695,8 +695,6 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
+       struct mlx5_flow_table_attr ft_attr = {};
+       struct mlx5_flow_table *ft;
+-      if (mlx5_ib_shared_ft_allowed(&dev->ib_dev))
+-              ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
+       ft_attr.prio = priority;
+       ft_attr.max_fte = num_entries;
+       ft_attr.flags = flags;
+@@ -2025,6 +2023,237 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject,
+       return 0;
+ }
++static int steering_anchor_create_ft(struct mlx5_ib_dev *dev,
++                                   struct mlx5_ib_flow_prio *ft_prio,
++                                   enum mlx5_flow_namespace_type ns_type)
++{
++      struct mlx5_flow_table_attr ft_attr = {};
++      struct mlx5_flow_namespace *ns;
++      struct mlx5_flow_table *ft;
++
++      if (ft_prio->anchor.ft)
++              return 0;
++
++      ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
++      if (!ns)
++              return -EOPNOTSUPP;
++
++      ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
++      ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
++      ft_attr.prio = 0;
++      ft_attr.max_fte = 2;
++      ft_attr.level = 1;
++
++      ft = mlx5_create_flow_table(ns, &ft_attr);
++      if (IS_ERR(ft))
++              return PTR_ERR(ft);
++
++      ft_prio->anchor.ft = ft;
++
++      return 0;
++}
++
++static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio)
++{
++      if (ft_prio->anchor.ft) {
++              mlx5_destroy_flow_table(ft_prio->anchor.ft);
++              ft_prio->anchor.ft = NULL;
++      }
++}
++
++static int
++steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
++{
++      int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
++      struct mlx5_flow_group *fg;
++      void *flow_group_in;
++      int err = 0;
++
++      if (ft_prio->anchor.fg_drop)
++              return 0;
++
++      flow_group_in = kvzalloc(inlen, GFP_KERNEL);
++      if (!flow_group_in)
++              return -ENOMEM;
++
++      MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
++      MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
++
++      fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
++      if (IS_ERR(fg)) {
++              err = PTR_ERR(fg);
++              goto out;
++      }
++
++      ft_prio->anchor.fg_drop = fg;
++
++out:
++      kvfree(flow_group_in);
++
++      return err;
++}
++
++static void
++steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
++{
++      if (ft_prio->anchor.fg_drop) {
++              mlx5_destroy_flow_group(ft_prio->anchor.fg_drop);
++              ft_prio->anchor.fg_drop = NULL;
++      }
++}
++
++static int
++steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
++{
++      int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
++      struct mlx5_flow_group *fg;
++      void *flow_group_in;
++      int err = 0;
++
++      if (ft_prio->anchor.fg_goto_table)
++              return 0;
++
++      flow_group_in = kvzalloc(inlen, GFP_KERNEL);
++      if (!flow_group_in)
++              return -ENOMEM;
++
++      fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
++      if (IS_ERR(fg)) {
++              err = PTR_ERR(fg);
++              goto out;
++      }
++      ft_prio->anchor.fg_goto_table = fg;
++
++out:
++      kvfree(flow_group_in);
++
++      return err;
++}
++
++static void
++steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
++{
++      if (ft_prio->anchor.fg_goto_table) {
++              mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table);
++              ft_prio->anchor.fg_goto_table = NULL;
++      }
++}
++
++static int
++steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
++{
++      struct mlx5_flow_act flow_act = {};
++      struct mlx5_flow_handle *handle;
++
++      if (ft_prio->anchor.rule_drop)
++              return 0;
++
++      flow_act.fg = ft_prio->anchor.fg_drop;
++      flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
++
++      handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
++                                   NULL, 0);
++      if (IS_ERR(handle))
++              return PTR_ERR(handle);
++
++      ft_prio->anchor.rule_drop = handle;
++
++      return 0;
++}
++
++static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
++{
++      if (ft_prio->anchor.rule_drop) {
++              mlx5_del_flow_rules(ft_prio->anchor.rule_drop);
++              ft_prio->anchor.rule_drop = NULL;
++      }
++}
++
++static int
++steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
++{
++      struct mlx5_flow_destination dest = {};
++      struct mlx5_flow_act flow_act = {};
++      struct mlx5_flow_handle *handle;
++
++      if (ft_prio->anchor.rule_goto_table)
++              return 0;
++
++      flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
++      flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
++      flow_act.fg = ft_prio->anchor.fg_goto_table;
++
++      dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
++      dest.ft = ft_prio->flow_table;
++
++      handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
++                                   &dest, 1);
++      if (IS_ERR(handle))
++              return PTR_ERR(handle);
++
++      ft_prio->anchor.rule_goto_table = handle;
++
++      return 0;
++}
++
++static void
++steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
++{
++      if (ft_prio->anchor.rule_goto_table) {
++              mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table);
++              ft_prio->anchor.rule_goto_table = NULL;
++      }
++}
++
++static int steering_anchor_create_res(struct mlx5_ib_dev *dev,
++                                    struct mlx5_ib_flow_prio *ft_prio,
++                                    enum mlx5_flow_namespace_type ns_type)
++{
++      int err;
++
++      err = steering_anchor_create_ft(dev, ft_prio, ns_type);
++      if (err)
++              return err;
++
++      err = steering_anchor_create_fg_drop(ft_prio);
++      if (err)
++              goto destroy_ft;
++
++      err = steering_anchor_create_fg_goto_table(ft_prio);
++      if (err)
++              goto destroy_fg_drop;
++
++      err = steering_anchor_create_rule_drop(ft_prio);
++      if (err)
++              goto destroy_fg_goto_table;
++
++      err = steering_anchor_create_rule_goto_table(ft_prio);
++      if (err)
++              goto destroy_rule_drop;
++
++      return 0;
++
++destroy_rule_drop:
++      steering_anchor_destroy_rule_drop(ft_prio);
++destroy_fg_goto_table:
++      steering_anchor_destroy_fg_goto_table(ft_prio);
++destroy_fg_drop:
++      steering_anchor_destroy_fg_drop(ft_prio);
++destroy_ft:
++      steering_anchor_destroy_ft(ft_prio);
++
++      return err;
++}
++
++static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio)
++{
++      steering_anchor_destroy_rule_goto_table(ft_prio);
++      steering_anchor_destroy_rule_drop(ft_prio);
++      steering_anchor_destroy_fg_goto_table(ft_prio);
++      steering_anchor_destroy_fg_drop(ft_prio);
++      steering_anchor_destroy_ft(ft_prio);
++}
++
+ static int steering_anchor_cleanup(struct ib_uobject *uobject,
+                                  enum rdma_remove_reason why,
+                                  struct uverbs_attr_bundle *attrs)
+@@ -2035,6 +2264,9 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject,
+               return -EBUSY;
+       mutex_lock(&obj->dev->flow_db->lock);
++      if (!--obj->ft_prio->anchor.rule_goto_table_ref)
++              steering_anchor_destroy_rule_goto_table(obj->ft_prio);
++
+       put_flow_table(obj->dev, obj->ft_prio, true);
+       mutex_unlock(&obj->dev->flow_db->lock);
+@@ -2042,6 +2274,24 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject,
+       return 0;
+ }
++static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio,
++                            int count)
++{
++      while (count--)
++              mlx5_steering_anchor_destroy_res(&prio[count]);
++}
++
++void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev)
++{
++      fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT);
++      fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT);
++      fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS);
++      fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS);
++      fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS);
++      fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT);
++      fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT);
++}
++
+ static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
+                             struct mlx5_ib_flow_matcher *obj)
+ {
+@@ -2182,21 +2432,31 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
+               return -ENOMEM;
+       mutex_lock(&dev->flow_db->lock);
++
+       ft_prio = _get_flow_table(dev, priority, ns_type, 0);
+       if (IS_ERR(ft_prio)) {
+-              mutex_unlock(&dev->flow_db->lock);
+               err = PTR_ERR(ft_prio);
+               goto free_obj;
+       }
+       ft_prio->refcount++;
+-      ft_id = mlx5_flow_table_id(ft_prio->flow_table);
+-      mutex_unlock(&dev->flow_db->lock);
++
++      if (!ft_prio->anchor.rule_goto_table_ref) {
++              err = steering_anchor_create_res(dev, ft_prio, ns_type);
++              if (err)
++                      goto put_flow_table;
++      }
++
++      ft_prio->anchor.rule_goto_table_ref++;
++
++      ft_id = mlx5_flow_table_id(ft_prio->anchor.ft);
+       err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+                            &ft_id, sizeof(ft_id));
+       if (err)
+-              goto put_flow_table;
++              goto destroy_res;
++
++      mutex_unlock(&dev->flow_db->lock);
+       uobj->object = obj;
+       obj->dev = dev;
+@@ -2205,8 +2465,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
+       return 0;
++destroy_res:
++      --ft_prio->anchor.rule_goto_table_ref;
++      mlx5_steering_anchor_destroy_res(ft_prio);
+ put_flow_table:
+-      mutex_lock(&dev->flow_db->lock);
+       put_flow_table(dev, ft_prio, true);
+       mutex_unlock(&dev->flow_db->lock);
+ free_obj:
+diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h
+index ad320adaf3217..b9734904f5f01 100644
+--- a/drivers/infiniband/hw/mlx5/fs.h
++++ b/drivers/infiniband/hw/mlx5/fs.h
+@@ -10,6 +10,7 @@
+ #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+ int mlx5_ib_fs_init(struct mlx5_ib_dev *dev);
++void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev);
+ #else
+ static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
+ {
+@@ -21,9 +22,24 @@ static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
+       mutex_init(&dev->flow_db->lock);
+       return 0;
+ }
++
++inline void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) {}
+ #endif
++
+ static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
+ {
++      /* When a steering anchor is created, a special flow table is also
++       * created for the user to reference. Since the user can reference it,
++       * the kernel cannot trust that when the user destroys the steering
++       * anchor, they no longer reference the flow table.
++       *
++       * To address this issue, when a user destroys a steering anchor, only
++       * the flow steering rule in the table is destroyed, but the table
++       * itself is kept to deal with the above scenario. The remaining
++       * resources are only removed when the RDMA device is destroyed, which
++       * is a safe assumption that all references are gone.
++       */
++      mlx5_ib_fs_cleanup_anchor(dev);
+       kfree(dev->flow_db);
+ }
+ #endif /* _MLX5_IB_FS_H */
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index efa4dc6e7dee1..91fc0cdf377d1 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -237,8 +237,19 @@ enum {
+ #define MLX5_IB_NUM_SNIFFER_FTS               2
+ #define MLX5_IB_NUM_EGRESS_FTS                1
+ #define MLX5_IB_NUM_FDB_FTS           MLX5_BY_PASS_NUM_REGULAR_PRIOS
++
++struct mlx5_ib_anchor {
++      struct mlx5_flow_table *ft;
++      struct mlx5_flow_group *fg_goto_table;
++      struct mlx5_flow_group *fg_drop;
++      struct mlx5_flow_handle *rule_goto_table;
++      struct mlx5_flow_handle *rule_drop;
++      unsigned int rule_goto_table_ref;
++};
++
+ struct mlx5_ib_flow_prio {
+       struct mlx5_flow_table          *flow_table;
++      struct mlx5_ib_anchor           anchor;
+       unsigned int                    refcount;
+ };
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-mlx5-fix-affinity-assignment.patch b/queue-6.3/rdma-mlx5-fix-affinity-assignment.patch
new file mode 100644 (file)
index 0000000..5e5778b
--- /dev/null
@@ -0,0 +1,125 @@
+From 4baa20b2d4db327941a1bbfa87a786ac92ac4bf0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 13:33:26 +0300
+Subject: RDMA/mlx5: Fix affinity assignment
+
+From: Mark Bloch <mbloch@nvidia.com>
+
+[ Upstream commit 617f5db1a626f18d5cbb7c7faf7bf8f9ea12be78 ]
+
+The cited commit aimed to ensure that Virtual Functions (VFs) assign a
+queue affinity to a Queue Pair (QP) to distribute traffic when
+the LAG master creates a hardware LAG. If the affinity was set while
+the hardware was not in LAG, the firmware would ignore the affinity value.
+
+However, this commit unintentionally assigned an affinity to QPs on the LAG
+master's VPORT even if the RDMA device was not marked as LAG-enabled.
+In most cases, this was not an issue because when the hardware entered
+hardware LAG configuration, the RDMA device of the LAG master would be
+destroyed and a new one would be created, marked as LAG-enabled.
+
+The problem arises when a user configures Equal-Cost Multipath (ECMP).
+In ECMP mode, traffic can be directed to different physical ports based on
+the queue affinity, which is intended for use by VPORTS other than the
+E-Switch manager. ECMP mode is supported only if both E-Switch managers are
+in switchdev mode and the appropriate route is configured via IP. In this
+configuration, the RDMA device is not destroyed, and we retain the RDMA
+device that is not marked as LAG-enabled.
+
+To ensure correct behavior, Send Queues (SQs) opened by the E-Switch
+manager through verbs should be assigned strict affinity. This means they
+will only be able to communicate through the native physical port
+associated with the E-Switch manager. This will prevent the firmware from
+assigning affinity and will not allow the SQs to be remapped in case of
+failover.
+
+Fixes: 802dcc7fc5ec ("RDMA/mlx5: Support TX port affinity for VF drivers in LAG mode")
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://lore.kernel.org/r/425b05f4da840bc684b0f7e8ebf61aeb5cef09b0.1685960567.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/mlx5_ib.h                |  3 +++
+ drivers/infiniband/hw/mlx5/qp.c                     |  3 +++
+ drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 12 ------------
+ include/linux/mlx5/driver.h                         | 12 ++++++++++++
+ 4 files changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 91fc0cdf377d1..2dfa6f49a6f48 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -1598,6 +1598,9 @@ static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
+           MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
+               return 0;
++      if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
++              return 0;
++
+       return dev->lag_active ||
+               (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
+                MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 86284aba3470d..5bc63020b766d 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -1233,6 +1233,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
+       MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
+       MLX5_SET(tisc, tisc, transport_domain, tdn);
++      if (!mlx5_ib_lag_should_assign_affinity(dev) &&
++          mlx5_lag_is_lacp_owner(dev->mdev))
++              MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+       if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+               MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index a3c5c2dab5fd7..d7ef853702b79 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -275,18 +275,6 @@ static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+       return pci_num_vf(dev->pdev) ? true : false;
+ }
+-static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
+-{
+-      /* LACP owner conditions:
+-       * 1) Function is physical.
+-       * 2) LAG is supported by FW.
+-       * 3) LAG is managed by driver (currently the only option).
+-       */
+-      return  MLX5_CAP_GEN(dev, vport_group_manager) &&
+-                 (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
+-                  MLX5_CAP_GEN(dev, lag_master);
+-}
+-
+ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
+ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
+ {
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 68a3183d5d589..33dbe941d070c 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1233,6 +1233,18 @@ static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
+       return dev->priv.sriov.max_vfs;
+ }
++static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
++{
++      /* LACP owner conditions:
++       * 1) Function is physical.
++       * 2) LAG is supported by FW.
++       * 3) LAG is managed by driver (currently the only option).
++       */
++      return  MLX5_CAP_GEN(dev, vport_group_manager) &&
++                 (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
++                  MLX5_CAP_GEN(dev, lag_master);
++}
++
+ static inline int mlx5_get_gid_table_len(u16 param)
+ {
+       if (param > 4) {
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-mlx5-initiate-dropless-rq-for-raw-ethernet-func.patch b/queue-6.3/rdma-mlx5-initiate-dropless-rq-for-raw-ethernet-func.patch
new file mode 100644 (file)
index 0000000..6bff55a
--- /dev/null
@@ -0,0 +1,46 @@
+From 2d4a8ea008460c30ee0c6a0fa3a8785a9260464d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 13:33:17 +0300
+Subject: RDMA/mlx5: Initiate dropless RQ for RAW Ethernet functions
+
+From: Maher Sanalla <msanalla@nvidia.com>
+
+[ Upstream commit ee4d269eccfea6c17b18281bef482700d898e86f ]
+
+Delay drop data is initiated for PFs that have the capability of
+rq_delay_drop and are in roce profile.
+
+However, PFs with RAW ethernet profile do not initiate delay drop data
+on function load, causing kernel panic if delay drop struct members are
+accessed later on in case a dropless RQ is created.
+
+Thus, stage the delay drop initialization as part of RAW ethernet
+PF loading process.
+
+Fixes: b5ca15ad7e61 ("IB/mlx5: Add proper representors support")
+Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Link: https://lore.kernel.org/r/2e9d386785043d48c38711826eb910315c1de141.1685960567.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 5d45de223c43a..f0b394ed7452a 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4275,6 +4275,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
+       STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+                    mlx5_ib_stage_post_ib_reg_umr_init,
+                    NULL),
++      STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
++                   mlx5_ib_stage_delay_drop_init,
++                   mlx5_ib_stage_delay_drop_cleanup),
+       STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
+                    mlx5_ib_restrack_init,
+                    NULL),
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-rtrs-fix-rxe_dealloc_pd-warning.patch b/queue-6.3/rdma-rtrs-fix-rxe_dealloc_pd-warning.patch
new file mode 100644 (file)
index 0000000..58d0856
--- /dev/null
@@ -0,0 +1,190 @@
+From b20402cfbe4edc47f52f46a2bb5037cf1e60c75c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Apr 2023 01:02:43 +0000
+Subject: RDMA/rtrs: Fix rxe_dealloc_pd warning
+
+From: Li Zhijian <lizhijian@fujitsu.com>
+
+[ Upstream commit 9c29c8c7df0688f358d2df5ddd16c97c2f7292b4 ]
+
+In current design:
+1. PD and clt_path->s.dev are shared among connections.
+2. every con[n]'s cleanup phase will call destroy_con_cq_qp()
+3. clt_path->s.dev will be always decreased in destroy_con_cq_qp(), and
+   when clt_path->s.dev become zero, it will destroy PD.
+4. when con[1] failed to create, con[1] will not take clt_path->s.dev,
+   but it try to decreased clt_path->s.dev
+
+So, in case create_cm(con[0]) succeeds but create_cm(con[1]) fails,
+destroy_con_cq_qp(con[1]) will be called first which will destroy the PD
+while this PD is still taken by con[0].
+
+Here, we refactor the error path of create_cm() and init_conns(), so that
+we do the cleanup in the order they are created.
+
+The warning occurs when destroying RXE PD whose reference count is not
+zero.
+
+ rnbd_client L597: Mapping device /dev/nvme0n1 on session client, (access_mode: rw, nr_poll_queues: 0)
+ ------------[ cut here ]------------
+ WARNING: CPU: 0 PID: 26407 at drivers/infiniband/sw/rxe/rxe_pool.c:256 __rxe_cleanup+0x13a/0x170 [rdma_rxe]
+ Modules linked in: rpcrdma rdma_ucm ib_iser rnbd_client libiscsi rtrs_client scsi_transport_iscsi rtrs_core rdma_cm iw_cm ib_cm crc32_generic rdma_rxe udp_tunnel ib_uverbs ib_core kmem device_dax nd_pmem dax_pmem nd_vme crc32c_intel fuse nvme_core nfit libnvdimm dm_multipath scsi_dh_rdac scsi_dh_emc scsi_dh_alua dm_mirror dm_region_hash dm_log dm_mod
+ CPU: 0 PID: 26407 Comm: rnbd-client.sh Kdump: loaded Not tainted 6.2.0-rc6-roce-flush+ #53
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
+ RIP: 0010:__rxe_cleanup+0x13a/0x170 [rdma_rxe]
+ Code: 45 84 e4 0f 84 5a ff ff ff 48 89 ef e8 5f 18 71 f9 84 c0 75 90 be c8 00 00 00 48 89 ef e8 be 89 1f fa 85 c0 0f 85 7b ff ff ff <0f> 0b 41 bc ea ff ff ff e9 71 ff ff ff e8 84 7f 1f fa e9 d0 fe ff
+ RSP: 0018:ffffb09880b6f5f0 EFLAGS: 00010246
+ RAX: 0000000000000000 RBX: ffff99401f15d6a8 RCX: 0000000000000000
+ RDX: 0000000000000001 RSI: ffffffffbac8234b RDI: 00000000ffffffff
+ RBP: ffff99401f15d6d0 R08: 0000000000000001 R09: 0000000000000001
+ R10: 0000000000002d82 R11: 0000000000000000 R12: 0000000000000001
+ R13: ffff994101eff208 R14: ffffb09880b6f6a0 R15: 00000000fffffe00
+ FS:  00007fe113904740(0000) GS:ffff99413bc00000(0000) knlGS:0000000000000000
+ CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007ff6cde656c8 CR3: 000000001f108004 CR4: 00000000001706f0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+  <TASK>
+  rxe_dealloc_pd+0x16/0x20 [rdma_rxe]
+  ib_dealloc_pd_user+0x4b/0x80 [ib_core]
+  rtrs_ib_dev_put+0x79/0xd0 [rtrs_core]
+  destroy_con_cq_qp+0x8a/0xa0 [rtrs_client]
+  init_path+0x1e7/0x9a0 [rtrs_client]
+  ? __pfx_autoremove_wake_function+0x10/0x10
+  ? lock_is_held_type+0xd7/0x130
+  ? rcu_read_lock_sched_held+0x43/0x80
+  ? pcpu_alloc+0x3dd/0x7d0
+  ? rtrs_clt_init_stats+0x18/0x40 [rtrs_client]
+  rtrs_clt_open+0x24f/0x5a0 [rtrs_client]
+  ? __pfx_rnbd_clt_link_ev+0x10/0x10 [rnbd_client]
+  rnbd_clt_map_device+0x6a5/0xe10 [rnbd_client]
+
+Fixes: 6a98d71daea1 ("RDMA/rtrs: client: main functionality")
+Link: https://lore.kernel.org/r/1682384563-2-4-git-send-email-lizhijian@fujitsu.com
+Signed-off-by: Li Zhijian <lizhijian@fujitsu.com>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Tested-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-clt.c | 55 +++++++++++---------------
+ 1 file changed, 23 insertions(+), 32 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 80abf45a197ac..047bbfb96e2cb 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -2040,6 +2040,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+       return 0;
+ }
++/* The caller should do the cleanup in case of error */
+ static int create_cm(struct rtrs_clt_con *con)
+ {
+       struct rtrs_path *s = con->c.path;
+@@ -2062,14 +2063,14 @@ static int create_cm(struct rtrs_clt_con *con)
+       err = rdma_set_reuseaddr(cm_id, 1);
+       if (err != 0) {
+               rtrs_err(s, "Set address reuse failed, err: %d\n", err);
+-              goto destroy_cm;
++              return err;
+       }
+       err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
+                               (struct sockaddr *)&clt_path->s.dst_addr,
+                               RTRS_CONNECT_TIMEOUT_MS);
+       if (err) {
+               rtrs_err(s, "Failed to resolve address, err: %d\n", err);
+-              goto destroy_cm;
++              return err;
+       }
+       /*
+        * Combine connection status and session events. This is needed
+@@ -2084,29 +2085,15 @@ static int create_cm(struct rtrs_clt_con *con)
+               if (err == 0)
+                       err = -ETIMEDOUT;
+               /* Timedout or interrupted */
+-              goto errr;
+-      }
+-      if (con->cm_err < 0) {
+-              err = con->cm_err;
+-              goto errr;
++              return err;
+       }
+-      if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
++      if (con->cm_err < 0)
++              return con->cm_err;
++      if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
+               /* Device removal */
+-              err = -ECONNABORTED;
+-              goto errr;
+-      }
++              return -ECONNABORTED;
+       return 0;
+-
+-errr:
+-      stop_cm(con);
+-      mutex_lock(&con->con_mutex);
+-      destroy_con_cq_qp(con);
+-      mutex_unlock(&con->con_mutex);
+-destroy_cm:
+-      destroy_cm(con);
+-
+-      return err;
+ }
+ static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
+@@ -2334,7 +2321,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
+ static int init_conns(struct rtrs_clt_path *clt_path)
+ {
+       unsigned int cid;
+-      int err;
++      int err, i;
+       /*
+        * On every new session connections increase reconnect counter
+@@ -2350,10 +2337,8 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+                       goto destroy;
+               err = create_cm(to_clt_con(clt_path->s.con[cid]));
+-              if (err) {
+-                      destroy_con(to_clt_con(clt_path->s.con[cid]));
++              if (err)
+                       goto destroy;
+-              }
+       }
+       err = alloc_path_reqs(clt_path);
+       if (err)
+@@ -2364,15 +2349,21 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+       return 0;
+ destroy:
+-      while (cid--) {
+-              struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
++      /* Make sure we do the cleanup in the order they are created */
++      for (i = 0; i <= cid; i++) {
++              struct rtrs_clt_con *con;
+-              stop_cm(con);
++              if (!clt_path->s.con[i])
++                      break;
+-              mutex_lock(&con->con_mutex);
+-              destroy_con_cq_qp(con);
+-              mutex_unlock(&con->con_mutex);
+-              destroy_cm(con);
++              con = to_clt_con(clt_path->s.con[i]);
++              if (con->c.cm_id) {
++                      stop_cm(con);
++                      mutex_lock(&con->con_mutex);
++                      destroy_con_cq_qp(con);
++                      mutex_unlock(&con->con_mutex);
++                      destroy_cm(con);
++              }
+               destroy_con(con);
+       }
+       /*
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-rtrs-fix-the-last-iu-buf-leak-in-err-path.patch b/queue-6.3/rdma-rtrs-fix-the-last-iu-buf-leak-in-err-path.patch
new file mode 100644 (file)
index 0000000..942e866
--- /dev/null
@@ -0,0 +1,41 @@
+From db37bad860e5aba85b4c62f528f624b37a63ccbf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Apr 2023 01:02:42 +0000
+Subject: RDMA/rtrs: Fix the last iu->buf leak in err path
+
+From: Li Zhijian <lizhijian@fujitsu.com>
+
+[ Upstream commit 3bf3a7c6985c625f64e73baefdaa36f1c2045a29 ]
+
+The last iu->buf will leak if ib_dma_mapping_error() fails.
+
+Fixes: c0894b3ea69d ("RDMA/rtrs: core: lib functions shared between client and server modules")
+Link: https://lore.kernel.org/r/1682384563-2-3-git-send-email-lizhijian@fujitsu.com
+Signed-off-by: Li Zhijian <lizhijian@fujitsu.com>
+Acked-by: Guoqing Jiang <guoqing.jiang@linux.dev>
+Acked-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index 4bf9d868cc522..3696f367ff515 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -37,8 +37,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
+                       goto err;
+               iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
+-              if (ib_dma_mapping_error(dma_dev, iu->dma_addr))
++              if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) {
++                      kfree(iu->buf);
+                       goto err;
++              }
+               iu->cqe.done  = done;
+               iu->size      = size;
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-rxe-fix-packet-length-checks.patch b/queue-6.3/rdma-rxe-fix-packet-length-checks.patch
new file mode 100644 (file)
index 0000000..eb3f3ab
--- /dev/null
@@ -0,0 +1,56 @@
+From 4446f7c1a5b8a9a420436062d9241d4236f67933 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 May 2023 12:22:42 -0500
+Subject: RDMA/rxe: Fix packet length checks
+
+From: Bob Pearson <rpearsonhpe@gmail.com>
+
+[ Upstream commit 9a3763e87379c97a78b7c6c6f40720b1e877174f ]
+
+In rxe_net.c a received packet, from udp or loopback, is passed to
+rxe_rcv() in rxe_recv.c as a udp packet. I.e. skb->data is pointing at the
+udp header. But rxe_rcv() makes length checks to verify the packet is long
+enough to hold the roce headers as if it were a roce
+packet. I.e. skb->data pointing at the bth header. A runt packet would
+appear to have 8 more bytes than it actually does which may lead to
+incorrect behavior.
+
+This patch calls skb_pull() to adjust the skb to point at the bth header
+before calling rxe_rcv() which fixes this error.
+
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Link: https://lore.kernel.org/r/20230517172242.1806340-1-rpearsonhpe@gmail.com
+Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_net.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index a2ace42e95366..e8403a70c6b74 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -159,6 +159,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+       pkt->mask = RXE_GRH_MASK;
+       pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
++      /* remove udp header */
++      skb_pull(skb, sizeof(struct udphdr));
++
+       rxe_rcv(skb);
+       return 0;
+@@ -401,6 +404,9 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+               return -EIO;
+       }
++      /* remove udp header */
++      skb_pull(skb, sizeof(struct udphdr));
++
+       rxe_rcv(skb);
+       return 0;
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-rxe-fix-ref-count-error-in-check_rkey.patch b/queue-6.3/rdma-rxe-fix-ref-count-error-in-check_rkey.patch
new file mode 100644 (file)
index 0000000..c100ea6
--- /dev/null
@@ -0,0 +1,48 @@
+From e8804b690adef0ef5a98efd0f5feabedda4b0d9a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 May 2023 16:15:10 -0500
+Subject: RDMA/rxe: Fix ref count error in check_rkey()
+
+From: Bob Pearson <rpearsonhpe@gmail.com>
+
+[ Upstream commit b00683422fd79dd07c9b75efdce1660e5e19150e ]
+
+There is a reference count error in error path code and a potential race
+in check_rkey() in rxe_resp.c. When looking up the rkey for a memory
+window the reference to the mw from rxe_lookup_mw() is dropped before a
+reference is taken on the mr referenced by the mw. If the mr is destroyed
+immediately after the call to rxe_put(mw) the mr pointer is unprotected
+and may end up pointing at freed memory. The rxe_get(mr) call should take
+place before the rxe_put(mw) call.
+
+All errors in check_rkey() call rxe_put(mw) if mw is not NULL but it was
+already called after the above. The mw pointer should be set to NULL after
+the rxe_put(mw) call to prevent this from happening.
+
+Fixes: cdd0b85675ae ("RDMA/rxe: Implement memory access through MWs")
+Link: https://lore.kernel.org/r/20230517211509.1819998-1-rpearsonhpe@gmail.com
+Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_resp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index 8c68340502769..4e8d8bec0010b 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -519,8 +519,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
+               if (mw->access & IB_ZERO_BASED)
+                       qp->resp.offset = mw->addr;
+-              rxe_put(mw);
+               rxe_get(mr);
++              rxe_put(mw);
++              mw = NULL;
+       } else {
+               mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
+               if (!mr) {
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-rxe-fix-rxe_cq_post.patch b/queue-6.3/rdma-rxe-fix-rxe_cq_post.patch
new file mode 100644 (file)
index 0000000..ab37210
--- /dev/null
@@ -0,0 +1,51 @@
+From b8cfed029580ae1ff0175b0082dc5d024e631e9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 10:50:33 -0500
+Subject: RDMA/rxe: Fix rxe_cq_post
+
+From: Bob Pearson <rpearsonhpe@gmail.com>
+
+[ Upstream commit 0c7e314a6352664e12ec465f576cf039e95f8369 ]
+
+A recent patch replaced a tasklet execution of cq->comp_handler by a
+direct call. While this made sense it let changes to cq->notify state be
+unprotected and assumed that the cq completion machinery and the ulp done
+callbacks were reentrant. The result is that in some cases completion
+events can be lost. This patch moves the cq->comp_handler call inside of
+the spinlock in rxe_cq_post which solves both issues. This is compatible
+with the matching code in the request notify verb.
+
+Fixes: 78b26a335310 ("RDMA/rxe: Remove tasklet call from rxe_cq.c")
+Link: https://lore.kernel.org/r/20230612155032.17036-1-rpearsonhpe@gmail.com
+Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_cq.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
+index 519ddec29b4ba..d6113329fee61 100644
+--- a/drivers/infiniband/sw/rxe/rxe_cq.c
++++ b/drivers/infiniband/sw/rxe/rxe_cq.c
+@@ -112,8 +112,6 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
+       queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
+-      spin_unlock_irqrestore(&cq->cq_lock, flags);
+-
+       if ((cq->notify == IB_CQ_NEXT_COMP) ||
+           (cq->notify == IB_CQ_SOLICITED && solicited)) {
+               cq->notify = 0;
+@@ -121,6 +119,8 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
+               cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+       }
++      spin_unlock_irqrestore(&cq->cq_lock, flags);
++
+       return 0;
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/rdma-rxe-fix-the-use-before-initialization-error-of-.patch b/queue-6.3/rdma-rxe-fix-the-use-before-initialization-error-of-.patch
new file mode 100644 (file)
index 0000000..01c3891
--- /dev/null
@@ -0,0 +1,82 @@
+From 695334c14e87bd8faaa9561e42b49f0fd8180518 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Jun 2023 11:54:08 +0800
+Subject: RDMA/rxe: Fix the use-before-initialization error of resp_pkts
+
+From: Zhu Yanjun <yanjun.zhu@linux.dev>
+
+[ Upstream commit 2a62b6210ce876c596086ab8fd4c8a0c3d10611a ]
+
+In the following:
+
+  Call Trace:
+   <TASK>
+   __dump_stack lib/dump_stack.c:88 [inline]
+   dump_stack_lvl+0xd9/0x150 lib/dump_stack.c:106
+   assign_lock_key kernel/locking/lockdep.c:982 [inline]
+   register_lock_class+0xdb6/0x1120 kernel/locking/lockdep.c:1295
+   __lock_acquire+0x10a/0x5df0 kernel/locking/lockdep.c:4951
+   lock_acquire kernel/locking/lockdep.c:5691 [inline]
+   lock_acquire+0x1b1/0x520 kernel/locking/lockdep.c:5656
+   __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline]
+   _raw_spin_lock_irqsave+0x3d/0x60 kernel/locking/spinlock.c:162
+   skb_dequeue+0x20/0x180 net/core/skbuff.c:3639
+   drain_resp_pkts drivers/infiniband/sw/rxe/rxe_comp.c:555 [inline]
+   rxe_completer+0x250d/0x3cc0 drivers/infiniband/sw/rxe/rxe_comp.c:652
+   rxe_qp_do_cleanup+0x1be/0x820 drivers/infiniband/sw/rxe/rxe_qp.c:761
+   execute_in_process_context+0x3b/0x150 kernel/workqueue.c:3473
+   __rxe_cleanup+0x21e/0x370 drivers/infiniband/sw/rxe/rxe_pool.c:233
+   rxe_create_qp+0x3f6/0x5f0 drivers/infiniband/sw/rxe/rxe_verbs.c:583
+
+This is a use-before-initialization problem.
+
+It happens because rxe_qp_do_cleanup is called during error unwind before
+the struct has been fully initialized.
+
+Move the initialization of the skb earlier.
+
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Link: https://lore.kernel.org/r/20230602035408.741534-1-yanjun.zhu@intel.com
+Reported-by: syzbot+eba589d8f49c73d356da@syzkaller.appspotmail.com
+Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_qp.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index d5de5ba6940f1..94a7f5ebc6292 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -176,6 +176,9 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
+       spin_lock_init(&qp->rq.producer_lock);
+       spin_lock_init(&qp->rq.consumer_lock);
++      skb_queue_head_init(&qp->req_pkts);
++      skb_queue_head_init(&qp->resp_pkts);
++
+       atomic_set(&qp->ssn, 0);
+       atomic_set(&qp->skb_out, 0);
+ }
+@@ -236,8 +239,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+       qp->req.opcode          = -1;
+       qp->comp.opcode         = -1;
+-      skb_queue_head_init(&qp->req_pkts);
+-
+       rxe_init_task(&qp->req.task, qp, rxe_requester);
+       rxe_init_task(&qp->comp.task, qp, rxe_completer);
+@@ -281,8 +282,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+               }
+       }
+-      skb_queue_head_init(&qp->resp_pkts);
+-
+       rxe_init_task(&qp->resp.task, qp, rxe_responder);
+       qp->resp.opcode         = OPCODE_NONE;
+-- 
+2.39.2
+
diff --git a/queue-6.3/regulator-qcom-rpmh-add-support-for-pmm8654au-regula.patch b/queue-6.3/regulator-qcom-rpmh-add-support-for-pmm8654au-regula.patch
new file mode 100644 (file)
index 0000000..f4aa70f
--- /dev/null
@@ -0,0 +1,118 @@
+From 7aacbe4507470894dfe081efdb0c907035e024ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Apr 2023 21:28:10 +0200
+Subject: regulator: qcom-rpmh: add support for pmm8654au regulators
+
+From: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+
+[ Upstream commit 65f1b1dc0cc90236ed9be3970f4a763e853f3aab ]
+
+Add the RPMH regulators exposed by the PMM8654au PMIC and its variants.
+
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Link: https://lore.kernel.org/r/20230406192811.460888-3-brgl@bgdev.pl
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: b00de0000a69 ("regulator: qcom-rpmh: Fix regulators for PM8550")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/qcom-rpmh-regulator.c | 55 +++++++++++++++++++++++++
+ 1 file changed, 55 insertions(+)
+
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index ae6021390143c..4c07ec15aff20 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -694,6 +694,16 @@ static const struct rpmh_vreg_hw_data pmic5_pldo_lv = {
+       .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+ };
++static const struct rpmh_vreg_hw_data pmic5_pldo515_mv = {
++      .regulator_type = VRM,
++      .ops = &rpmh_regulator_vrm_drms_ops,
++      .voltage_range = REGULATOR_LINEAR_RANGE(1800000, 0, 187, 8000),
++      .n_voltages = 188,
++      .hpm_min_load_uA = 10000,
++      .pmic_mode_map = pmic_mode_map_pmic5_ldo,
++      .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
++};
++
+ static const struct rpmh_vreg_hw_data pmic5_nldo = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_drms_ops,
+@@ -704,6 +714,16 @@ static const struct rpmh_vreg_hw_data pmic5_nldo = {
+       .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+ };
++static const struct rpmh_vreg_hw_data pmic5_nldo515 = {
++      .regulator_type = VRM,
++      .ops = &rpmh_regulator_vrm_drms_ops,
++      .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 210, 8000),
++      .n_voltages = 211,
++      .hpm_min_load_uA = 30000,
++      .pmic_mode_map = pmic_mode_map_pmic5_ldo,
++      .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
++};
++
+ static const struct rpmh_vreg_hw_data pmic5_hfsmps510 = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_ops,
+@@ -749,6 +769,15 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = {
+       .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+ };
++static const struct rpmh_vreg_hw_data pmic5_ftsmps527 = {
++      .regulator_type = VRM,
++      .ops = &rpmh_regulator_vrm_ops,
++      .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
++      .n_voltages = 215,
++      .pmic_mode_map = pmic_mode_map_pmic5_smps,
++      .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
++};
++
+ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_ops,
+@@ -937,6 +966,28 @@ static const struct rpmh_vreg_init_data pmm8155au_vreg_data[] = {
+       {}
+ };
++static const struct rpmh_vreg_init_data pmm8654au_vreg_data[] = {
++      RPMH_VREG("smps1",  "smp%s1",  &pmic5_ftsmps527,  "vdd-s1"),
++      RPMH_VREG("smps2",  "smp%s2",  &pmic5_ftsmps527,  "vdd-s2"),
++      RPMH_VREG("smps3",  "smp%s3",  &pmic5_ftsmps527,  "vdd-s3"),
++      RPMH_VREG("smps4",  "smp%s4",  &pmic5_ftsmps527,  "vdd-s4"),
++      RPMH_VREG("smps5",  "smp%s5",  &pmic5_ftsmps527,  "vdd-s5"),
++      RPMH_VREG("smps6",  "smp%s6",  &pmic5_ftsmps527,  "vdd-s6"),
++      RPMH_VREG("smps7",  "smp%s7",  &pmic5_ftsmps527,  "vdd-s7"),
++      RPMH_VREG("smps8",  "smp%s8",  &pmic5_ftsmps527,  "vdd-s8"),
++      RPMH_VREG("smps9",  "smp%s9",  &pmic5_ftsmps527,  "vdd-s9"),
++      RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_nldo515,    "vdd-s9"),
++      RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_nldo515,    "vdd-l2-l3"),
++      RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo515,    "vdd-l2-l3"),
++      RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo515,    "vdd-s9"),
++      RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_nldo515,    "vdd-s9"),
++      RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_nldo515,    "vdd-l6-l7"),
++      RPMH_VREG("ldo7",   "ldo%s7",  &pmic5_nldo515,    "vdd-l6-l7"),
++      RPMH_VREG("ldo8",   "ldo%s8",  &pmic5_pldo515_mv, "vdd-l8-l9"),
++      RPMH_VREG("ldo9",   "ldo%s9",  &pmic5_pldo,       "vdd-l8-l9"),
++      {}
++};
++
+ static const struct rpmh_vreg_init_data pm8350_vreg_data[] = {
+       RPMH_VREG("smps1",  "smp%s1",  &pmic5_ftsmps510, "vdd-s1"),
+       RPMH_VREG("smps2",  "smp%s2",  &pmic5_ftsmps510, "vdd-s2"),
+@@ -1431,6 +1482,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
+               .compatible = "qcom,pmm8155au-rpmh-regulators",
+               .data = pmm8155au_vreg_data,
+       },
++      {
++              .compatible = "qcom,pmm8654au-rpmh-regulators",
++              .data = pmm8654au_vreg_data,
++      },
+       {
+               .compatible = "qcom,pmx55-rpmh-regulators",
+               .data = pmx55_vreg_data,
+-- 
+2.39.2
+
diff --git a/queue-6.3/regulator-qcom-rpmh-fix-regulators-for-pm8550.patch b/queue-6.3/regulator-qcom-rpmh-fix-regulators-for-pm8550.patch
new file mode 100644 (file)
index 0000000..7e86148
--- /dev/null
@@ -0,0 +1,85 @@
+From 02717f64db0c14b667278efcbffaae522a56461c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 14:56:07 +0300
+Subject: regulator: qcom-rpmh: Fix regulators for PM8550
+
+From: Abel Vesa <abel.vesa@linaro.org>
+
+[ Upstream commit b00de0000a69579f4d730077fe3ea8ca31404255 ]
+
+The PM8550 uses only NLDOs 515 and the LDO 6 through 8 are low voltage
+type, so fix accordingly.
+
+Fixes: e6e3776d682d ("regulator: qcom-rpmh: Add support for PM8550 regulators")
+Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
+Link: https://lore.kernel.org/r/20230605115607.921308-1-abel.vesa@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/qcom-rpmh-regulator.c | 30 ++++++++++++-------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index 4c07ec15aff20..1e2455fc1967b 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -1057,21 +1057,21 @@ static const struct rpmh_vreg_init_data pm8450_vreg_data[] = {
+ };
+ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
+-      RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_pldo,    "vdd-l1-l4-l10"),
++      RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_nldo515,    "vdd-l1-l4-l10"),
+       RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_pldo,    "vdd-l2-l13-l14"),
+-      RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo,    "vdd-l3"),
+-      RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo,    "vdd-l1-l4-l10"),
++      RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo515,    "vdd-l3"),
++      RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo515,    "vdd-l1-l4-l10"),
+       RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_pldo,    "vdd-l5-l16"),
+-      RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_pldo_lv, "vdd-l6-l7"),
+-      RPMH_VREG("ldo7",   "ldo%s7",  &pmic5_pldo_lv, "vdd-l6-l7"),
+-      RPMH_VREG("ldo8",   "ldo%s8",  &pmic5_pldo_lv, "vdd-l8-l9"),
++      RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_pldo, "vdd-l6-l7"),
++      RPMH_VREG("ldo7",   "ldo%s7",  &pmic5_pldo, "vdd-l6-l7"),
++      RPMH_VREG("ldo8",   "ldo%s8",  &pmic5_pldo, "vdd-l8-l9"),
+       RPMH_VREG("ldo9",   "ldo%s9",  &pmic5_pldo,    "vdd-l8-l9"),
+-      RPMH_VREG("ldo10",  "ldo%s10", &pmic5_nldo,    "vdd-l1-l4-l10"),
+-      RPMH_VREG("ldo11",  "ldo%s11", &pmic5_nldo,    "vdd-l11"),
++      RPMH_VREG("ldo10",  "ldo%s10", &pmic5_nldo515,    "vdd-l1-l4-l10"),
++      RPMH_VREG("ldo11",  "ldo%s11", &pmic5_nldo515,    "vdd-l11"),
+       RPMH_VREG("ldo12",  "ldo%s12", &pmic5_pldo,    "vdd-l12"),
+       RPMH_VREG("ldo13",  "ldo%s13", &pmic5_pldo,    "vdd-l2-l13-l14"),
+       RPMH_VREG("ldo14",  "ldo%s14", &pmic5_pldo,    "vdd-l2-l13-l14"),
+-      RPMH_VREG("ldo15",  "ldo%s15", &pmic5_pldo,    "vdd-l15"),
++      RPMH_VREG("ldo15",  "ldo%s15", &pmic5_nldo515,    "vdd-l15"),
+       RPMH_VREG("ldo16",  "ldo%s16", &pmic5_pldo,    "vdd-l5-l16"),
+       RPMH_VREG("ldo17",  "ldo%s17", &pmic5_pldo,    "vdd-l17"),
+       RPMH_VREG("bob1",   "bob%s1",  &pmic5_bob,     "vdd-bob1"),
+@@ -1086,9 +1086,9 @@ static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = {
+       RPMH_VREG("smps4",  "smp%s4",  &pmic5_ftsmps525_lv, "vdd-s4"),
+       RPMH_VREG("smps5",  "smp%s5",  &pmic5_ftsmps525_lv, "vdd-s5"),
+       RPMH_VREG("smps6",  "smp%s6",  &pmic5_ftsmps525_mv, "vdd-s6"),
+-      RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_nldo,   "vdd-l1"),
+-      RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_nldo,   "vdd-l2"),
+-      RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo,   "vdd-l3"),
++      RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_nldo515,   "vdd-l1"),
++      RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_nldo515,   "vdd-l2"),
++      RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo515,   "vdd-l3"),
+       {}
+ };
+@@ -1101,9 +1101,9 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
+       RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
+       RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
+       RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"),
+-      RPMH_VREG("ldo1",  "ldo%s1", &pmic5_nldo,   "vdd-l1"),
+-      RPMH_VREG("ldo2",  "ldo%s2", &pmic5_nldo,   "vdd-l2"),
+-      RPMH_VREG("ldo3",  "ldo%s3", &pmic5_nldo,   "vdd-l3"),
++      RPMH_VREG("ldo1",  "ldo%s1", &pmic5_nldo515,   "vdd-l1"),
++      RPMH_VREG("ldo2",  "ldo%s2", &pmic5_nldo515,   "vdd-l2"),
++      RPMH_VREG("ldo3",  "ldo%s3", &pmic5_nldo515,   "vdd-l3"),
+       {}
+ };
+-- 
+2.39.2
+
diff --git a/queue-6.3/revert-media-dvb-core-fix-use-after-free-on-race-con.patch b/queue-6.3/revert-media-dvb-core-fix-use-after-free-on-race-con.patch
new file mode 100644 (file)
index 0000000..63bdc0b
--- /dev/null
@@ -0,0 +1,244 @@
+From 9144766fc049156e6e1e9add5126e4aff623e7a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 09:16:21 +0100
+Subject: Revert "media: dvb-core: Fix use-after-free on race condition at
+ dvb_frontend"
+
+From: Mauro Carvalho Chehab <mchehab@kernel.org>
+
+[ Upstream commit ec21a38df77a5aefbd2f70c48127003b6f259cf3 ]
+
+As reported by Thomas Voegtle <tv@lio96.de>, sometimes a DVB card does
+not initialize properly booting Linux 6.4-rc4. This is not always, maybe
+in 3 out of 4 attempts.
+
+After double-checking, the root cause seems to be related to the
+UAF fix, which is causing a race issue:
+
+[   26.332149] tda10071 7-0005: found a 'NXP TDA10071' in cold state, will try to load a firmware
+[   26.340779] tda10071 7-0005: downloading firmware from file 'dvb-fe-tda10071.fw'
+[  989.277402] INFO: task vdr:743 blocked for more than 491 seconds.
+[  989.283504]       Not tainted 6.4.0-rc5-i5 #249
+[  989.288036] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+[  989.295860] task:vdr             state:D stack:0     pid:743   ppid:711    flags:0x00004002
+[  989.295865] Call Trace:
+[  989.295867]  <TASK>
+[  989.295869]  __schedule+0x2ea/0x12d0
+[  989.295877]  ? asm_sysvec_apic_timer_interrupt+0x16/0x20
+[  989.295881]  schedule+0x57/0xc0
+[  989.295884]  schedule_preempt_disabled+0xc/0x20
+[  989.295887]  __mutex_lock.isra.16+0x237/0x480
+[  989.295891]  ? dvb_get_property.isra.10+0x1bc/0xa50
+[  989.295898]  ? dvb_frontend_stop+0x36/0x180
+[  989.338777]  dvb_frontend_stop+0x36/0x180
+[  989.338781]  dvb_frontend_open+0x2f1/0x470
+[  989.338784]  dvb_device_open+0x81/0xf0
+[  989.338804]  ? exact_lock+0x20/0x20
+[  989.338808]  chrdev_open+0x7f/0x1c0
+[  989.338811]  ? generic_permission+0x1a2/0x230
+[  989.338813]  ? link_path_walk.part.63+0x340/0x380
+[  989.338815]  ? exact_lock+0x20/0x20
+[  989.338817]  do_dentry_open+0x18e/0x450
+[  989.374030]  path_openat+0xca5/0xe00
+[  989.374031]  ? terminate_walk+0xec/0x100
+[  989.374034]  ? path_lookupat+0x93/0x140
+[  989.374036]  do_filp_open+0xc0/0x140
+[  989.374038]  ? __call_rcu_common.constprop.91+0x92/0x240
+[  989.374041]  ? __check_object_size+0x147/0x260
+[  989.374043]  ? __check_object_size+0x147/0x260
+[  989.374045]  ? alloc_fd+0xbb/0x180
+[  989.374048]  ? do_sys_openat2+0x243/0x310
+[  989.374050]  do_sys_openat2+0x243/0x310
+[  989.374052]  do_sys_open+0x52/0x80
+[  989.374055]  do_syscall_64+0x5b/0x80
+[  989.421335]  ? __task_pid_nr_ns+0x92/0xa0
+[  989.421337]  ? syscall_exit_to_user_mode+0x20/0x40
+[  989.421339]  ? do_syscall_64+0x67/0x80
+[  989.421341]  ? syscall_exit_to_user_mode+0x20/0x40
+[  989.421343]  ? do_syscall_64+0x67/0x80
+[  989.421345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
+[  989.421348] RIP: 0033:0x7fe895d067e3
+[  989.421349] RSP: 002b:00007fff933c2ba0 EFLAGS: 00000293 ORIG_RAX: 0000000000000101
+[  989.421351] RAX: ffffffffffffffda RBX: 00007fff933c2c10 RCX: 00007fe895d067e3
+[  989.421352] RDX: 0000000000000802 RSI: 00005594acdce160 RDI: 00000000ffffff9c
+[  989.421353] RBP: 0000000000000802 R08: 0000000000000000 R09: 0000000000000000
+[  989.421353] R10: 0000000000000000 R11: 0000000000000293 R12: 0000000000000001
+[  989.421354] R13: 00007fff933c2ca0 R14: 00000000ffffffff R15: 00007fff933c2c90
+[  989.421355]  </TASK>
+
+This reverts commit 6769a0b7ee0c3b31e1b22c3fadff2bfb642de23f.
+
+Fixes: 6769a0b7ee0c ("media: dvb-core: Fix use-after-free on race condition at dvb_frontend")
+Link: https://lore.kernel.org/all/da5382ad-09d6-20ac-0d53-611594b30861@lio96.de/
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/dvb-core/dvb_frontend.c | 53 +++++----------------------
+ include/media/dvb_frontend.h          |  6 +--
+ 2 files changed, 10 insertions(+), 49 deletions(-)
+
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index bc6950a5740f6..9293b058ab997 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -817,26 +817,15 @@ static void dvb_frontend_stop(struct dvb_frontend *fe)
+       dev_dbg(fe->dvb->device, "%s:\n", __func__);
+-      mutex_lock(&fe->remove_mutex);
+-
+       if (fe->exit != DVB_FE_DEVICE_REMOVED)
+               fe->exit = DVB_FE_NORMAL_EXIT;
+       mb();
+-      if (!fepriv->thread) {
+-              mutex_unlock(&fe->remove_mutex);
++      if (!fepriv->thread)
+               return;
+-      }
+       kthread_stop(fepriv->thread);
+-      mutex_unlock(&fe->remove_mutex);
+-
+-      if (fepriv->dvbdev->users < -1) {
+-              wait_event(fepriv->dvbdev->wait_queue,
+-                         fepriv->dvbdev->users == -1);
+-      }
+-
+       sema_init(&fepriv->sem, 1);
+       fepriv->state = FESTATE_IDLE;
+@@ -2780,13 +2769,9 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
+       struct dvb_adapter *adapter = fe->dvb;
+       int ret;
+-      mutex_lock(&fe->remove_mutex);
+-
+       dev_dbg(fe->dvb->device, "%s:\n", __func__);
+-      if (fe->exit == DVB_FE_DEVICE_REMOVED) {
+-              ret = -ENODEV;
+-              goto err_remove_mutex;
+-      }
++      if (fe->exit == DVB_FE_DEVICE_REMOVED)
++              return -ENODEV;
+       if (adapter->mfe_shared == 2) {
+               mutex_lock(&adapter->mfe_lock);
+@@ -2794,8 +2779,7 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
+                       if (adapter->mfe_dvbdev &&
+                           !adapter->mfe_dvbdev->writers) {
+                               mutex_unlock(&adapter->mfe_lock);
+-                              ret = -EBUSY;
+-                              goto err_remove_mutex;
++                              return -EBUSY;
+                       }
+                       adapter->mfe_dvbdev = dvbdev;
+               }
+@@ -2818,10 +2802,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
+                       while (mferetry-- && (mfedev->users != -1 ||
+                                             mfepriv->thread)) {
+                               if (msleep_interruptible(500)) {
+-                                      if (signal_pending(current)) {
+-                                              ret = -EINTR;
+-                                              goto err_remove_mutex;
+-                                      }
++                                      if (signal_pending(current))
++                                              return -EINTR;
+                               }
+                       }
+@@ -2833,8 +2815,7 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
+                               if (mfedev->users != -1 ||
+                                   mfepriv->thread) {
+                                       mutex_unlock(&adapter->mfe_lock);
+-                                      ret = -EBUSY;
+-                                      goto err_remove_mutex;
++                                      return -EBUSY;
+                               }
+                               adapter->mfe_dvbdev = dvbdev;
+                       }
+@@ -2893,8 +2874,6 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
+       if (adapter->mfe_shared)
+               mutex_unlock(&adapter->mfe_lock);
+-
+-      mutex_unlock(&fe->remove_mutex);
+       return ret;
+ err3:
+@@ -2916,9 +2895,6 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
+ err0:
+       if (adapter->mfe_shared)
+               mutex_unlock(&adapter->mfe_lock);
+-
+-err_remove_mutex:
+-      mutex_unlock(&fe->remove_mutex);
+       return ret;
+ }
+@@ -2929,8 +2905,6 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
+       struct dvb_frontend_private *fepriv = fe->frontend_priv;
+       int ret;
+-      mutex_lock(&fe->remove_mutex);
+-
+       dev_dbg(fe->dvb->device, "%s:\n", __func__);
+       if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+@@ -2952,18 +2926,10 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
+               }
+               mutex_unlock(&fe->dvb->mdev_lock);
+ #endif
++              if (fe->exit != DVB_FE_NO_EXIT)
++                      wake_up(&dvbdev->wait_queue);
+               if (fe->ops.ts_bus_ctrl)
+                       fe->ops.ts_bus_ctrl(fe, 0);
+-
+-              if (fe->exit != DVB_FE_NO_EXIT) {
+-                      mutex_unlock(&fe->remove_mutex);
+-                      wake_up(&dvbdev->wait_queue);
+-              } else {
+-                      mutex_unlock(&fe->remove_mutex);
+-              }
+-
+-      } else {
+-              mutex_unlock(&fe->remove_mutex);
+       }
+       dvb_frontend_put(fe);
+@@ -3064,7 +3030,6 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
+       fepriv = fe->frontend_priv;
+       kref_init(&fe->refcount);
+-      mutex_init(&fe->remove_mutex);
+       /*
+        * After initialization, there need to be two references: one
+diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
+index 367d5381217b5..e7c44870f20de 100644
+--- a/include/media/dvb_frontend.h
++++ b/include/media/dvb_frontend.h
+@@ -686,10 +686,7 @@ struct dtv_frontend_properties {
+  * @id:                       Frontend ID
+  * @exit:             Used to inform the DVB core that the frontend
+  *                    thread should exit (usually, means that the hardware
+- *                    got disconnected).
+- * @remove_mutex:     mutex that avoids a race condition between a callback
+- *                    called when the hardware is disconnected and the
+- *                    file_operations of dvb_frontend.
++ *                    got disconnected.
+  */
+ struct dvb_frontend {
+@@ -707,7 +704,6 @@ struct dvb_frontend {
+       int (*callback)(void *adapter_priv, int component, int cmd, int arg);
+       int id;
+       unsigned int exit;
+-      struct mutex remove_mutex;
+ };
+ /**
+-- 
+2.39.2
+
diff --git a/queue-6.3/sctp-fix-an-error-code-in-sctp_sf_eat_auth.patch b/queue-6.3/sctp-fix-an-error-code-in-sctp_sf_eat_auth.patch
new file mode 100644 (file)
index 0000000..7ba7607
--- /dev/null
@@ -0,0 +1,38 @@
+From dbcf166a9539d01fe76e7b0c99a1fd325ec57469 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 14:05:19 +0300
+Subject: sctp: fix an error code in sctp_sf_eat_auth()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 75e6def3b26736e7ff80639810098c9074229737 ]
+
+The sctp_sf_eat_auth() function is supposed to enum sctp_disposition
+values and returning a kernel error code will cause issues in the
+caller.  Change -ENOMEM to SCTP_DISPOSITION_NOMEM.
+
+Fixes: 65b07e5d0d09 ("[SCTP]: API updates to suport SCTP-AUTH extensions.")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Acked-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sctp/sm_statefuns.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index ce54261712062..07edf56f7b8c6 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -4484,7 +4484,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net,
+                                   SCTP_AUTH_NEW_KEY, GFP_ATOMIC);
+               if (!ev)
+-                      return -ENOMEM;
++                      return SCTP_DISPOSITION_NOMEM;
+               sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+                               SCTP_ULPEVENT(ev));
+-- 
+2.39.2
+
diff --git a/queue-6.3/selftests-forwarding-hw_stats_l3-set-addrgenmode-in-.patch b/queue-6.3/selftests-forwarding-hw_stats_l3-set-addrgenmode-in-.patch
new file mode 100644 (file)
index 0000000..0cf4683
--- /dev/null
@@ -0,0 +1,71 @@
+From c58df12331e1e1f3f3e4b7e6387842d89c52abec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 16:34:58 +0200
+Subject: selftests: forwarding: hw_stats_l3: Set addrgenmode in a separate
+ step
+
+From: Danielle Ratson <danieller@nvidia.com>
+
+[ Upstream commit bef68e201e538eaa3a91f97aae8161eb2d0a8ed7 ]
+
+Setting the IPv6 address generation mode of a net device during its
+creation never worked, but after commit b0ad3c179059 ("rtnetlink: call
+validate_linkmsg in rtnl_create_link") it explicitly fails [1]. The
+failure is caused by the fact that validate_linkmsg() is called before
+the net device is registered, when it still does not have an 'inet6_dev'.
+
+Likewise, raising the net device before setting the address generation
+mode is meaningless, because by the time the mode is set, the address
+has already been generated.
+
+Therefore, fix the test to first create the net device, then set its
+IPv6 address generation mode and finally bring it up.
+
+[1]
+ # ip link add name mydev addrgenmode eui64 type dummy
+ RTNETLINK answers: Address family not supported by protocol
+
+Fixes: ba95e7930957 ("selftests: forwarding: hw_stats_l3: Add a new test")
+Signed-off-by: Danielle Ratson <danieller@nvidia.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://lore.kernel.org/r/f3b05d85b2bc0c3d6168fe8f7207c6c8365703db.1686580046.git.petrm@nvidia.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/forwarding/hw_stats_l3.sh | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh
+index 9c1f76e108af1..1a936ffbacee7 100755
+--- a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh
++++ b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh
+@@ -84,8 +84,9 @@ h2_destroy()
+ router_rp1_200_create()
+ {
+-      ip link add name $rp1.200 up \
+-              link $rp1 addrgenmode eui64 type vlan id 200
++      ip link add name $rp1.200 link $rp1 type vlan id 200
++      ip link set dev $rp1.200 addrgenmode eui64
++      ip link set dev $rp1.200 up
+       ip address add dev $rp1.200 192.0.2.2/28
+       ip address add dev $rp1.200 2001:db8:1::2/64
+       ip stats set dev $rp1.200 l3_stats on
+@@ -256,9 +257,11 @@ reapply_config()
+       router_rp1_200_destroy
+-      ip link add name $rp1.200 link $rp1 addrgenmode none type vlan id 200
++      ip link add name $rp1.200 link $rp1 type vlan id 200
++      ip link set dev $rp1.200 addrgenmode none
+       ip stats set dev $rp1.200 l3_stats on
+-      ip link set dev $rp1.200 up addrgenmode eui64
++      ip link set dev $rp1.200 addrgenmode eui64
++      ip link set dev $rp1.200 up
+       ip address add dev $rp1.200 192.0.2.2/28
+       ip address add dev $rp1.200 2001:db8:1::2/64
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/selftests-ptp-fix-timestamp-printf-format-for-ptp_sy.patch b/queue-6.3/selftests-ptp-fix-timestamp-printf-format-for-ptp_sy.patch
new file mode 100644 (file)
index 0000000..0ef14a6
--- /dev/null
@@ -0,0 +1,50 @@
+From 72142ba7a19d45915f7f10fa0846ab9143f38050 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Jun 2023 09:34:04 +0100
+Subject: selftests/ptp: Fix timestamp printf format for PTP_SYS_OFFSET
+
+From: Alex Maftei <alex.maftei@amd.com>
+
+[ Upstream commit 76a4c8b82938bc5020b67663db41f451684bf327 ]
+
+Previously, timestamps were printed using "%lld.%u" which is incorrect
+for nanosecond values lower than 100,000,000 as they're fractional
+digits, therefore leading zeros are meaningful.
+
+This patch changes the format strings to "%lld.%09u" in order to add
+leading zeros to the nanosecond value.
+
+Fixes: 568ebc5985f5 ("ptp: add the PTP_SYS_OFFSET ioctl to the testptp program")
+Fixes: 4ec54f95736f ("ptp: Fix compiler warnings in the testptp utility")
+Fixes: 6ab0e475f1f3 ("Documentation: fix misc. warnings")
+Signed-off-by: Alex Maftei <alex.maftei@amd.com>
+Acked-by: Richard Cochran <richardcochran@gmail.com>
+Link: https://lore.kernel.org/r/20230615083404.57112-1-alex.maftei@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/ptp/testptp.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
+index 198ad5f321878..cfa9562f3cd83 100644
+--- a/tools/testing/selftests/ptp/testptp.c
++++ b/tools/testing/selftests/ptp/testptp.c
+@@ -502,11 +502,11 @@ int main(int argc, char *argv[])
+                       interval = t2 - t1;
+                       offset = (t2 + t1) / 2 - tp;
+-                      printf("system time: %lld.%u\n",
++                      printf("system time: %lld.%09u\n",
+                               (pct+2*i)->sec, (pct+2*i)->nsec);
+-                      printf("phc    time: %lld.%u\n",
++                      printf("phc    time: %lld.%09u\n",
+                               (pct+2*i+1)->sec, (pct+2*i+1)->nsec);
+-                      printf("system time: %lld.%u\n",
++                      printf("system time: %lld.%09u\n",
+                               (pct+2*i+2)->sec, (pct+2*i+2)->nsec);
+                       printf("system/phc clock time offset is %" PRId64 " ns\n"
+                              "system     clock time delay  is %" PRId64 " ns\n",
+-- 
+2.39.2
+
diff --git a/queue-6.3/selftests-tc-testing-fix-error-failed-to-find-target.patch b/queue-6.3/selftests-tc-testing-fix-error-failed-to-find-target.patch
new file mode 100644 (file)
index 0000000..81aecc5
--- /dev/null
@@ -0,0 +1,85 @@
+From 1e70f9e3c758daa7a84622aac19bfb694f7dccdf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 09:57:10 +0200
+Subject: selftests/tc-testing: Fix Error: failed to find target LOG
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit b849c566ee9c6ed78288a522278dcaf419f8e239 ]
+
+Add missing netfilter config dependency.
+
+Fixes following example error when running tests via tdc.sh for all XT
+tests:
+
+ # $ sudo ./tdc.py -d eth2 -e 2029
+ # Test 2029: Add xt action with log-prefix
+ # exit: 255
+ # exit: 0
+ #  failed to find target LOG
+ #
+ # bad action parsing
+ # parse_action: bad value (7:xt)!
+ # Illegal "action"
+ #
+ # -----> teardown stage *** Could not execute: "$TC actions flush action xt"
+ #
+ # -----> teardown stage *** Error message: "Error: Cannot flush unknown TC action.
+ # We have an error flushing
+ # "
+ # returncode 1; expected [0]
+ #
+ # -----> teardown stage *** Aborting test run.
+ #
+ # <_io.BufferedReader name=3> *** stdout ***
+ #
+ # <_io.BufferedReader name=5> *** stderr ***
+ # "-----> teardown stage" did not complete successfully
+ # Exception <class '__main__.PluginMgrTestFail'> ('teardown', ' failed to find target LOG\n\nbad action parsing\nparse_action: bad value (7:xt)!\nIllegal "action"\n', '"-----> teardown stage" did not complete successfully') (caught in test_runner, running test 2 2029 Add xt action with log-prefix stage teardown)
+ # ---------------
+ # traceback
+ #   File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 495, in test_runner
+ #     res = run_one_test(pm, args, index, tidx)
+ #   File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 434, in run_one_test
+ #     prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
+ #   File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 245, in prepare_env
+ #     raise PluginMgrTestFail(
+ # ---------------
+ # accumulated output for this test:
+ #  failed to find target LOG
+ #
+ # bad action parsing
+ # parse_action: bad value (7:xt)!
+ # Illegal "action"
+ #
+ # ---------------
+ #
+ # All test results:
+ #
+ # 1..1
+ # ok 1 2029 - Add xt action with log-prefix # skipped - "-----> teardown stage" did not complete successfully
+
+Fixes: 910d504bc187 ("selftests/tc-testings: add selftests for xt action")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/tc-testing/config | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
+index 4638c63a339ff..aec4de8bea78b 100644
+--- a/tools/testing/selftests/tc-testing/config
++++ b/tools/testing/selftests/tc-testing/config
+@@ -6,6 +6,7 @@ CONFIG_NF_CONNTRACK_MARK=y
+ CONFIG_NF_CONNTRACK_ZONES=y
+ CONFIG_NF_CONNTRACK_LABELS=y
+ CONFIG_NF_NAT=m
++CONFIG_NETFILTER_XT_TARGET_LOG=m
+ CONFIG_NET_SCHED=y
+-- 
+2.39.2
+
diff --git a/queue-6.3/selftests-tc-testing-fix-error-specified-qdisc-kind-.patch b/queue-6.3/selftests-tc-testing-fix-error-specified-qdisc-kind-.patch
new file mode 100644 (file)
index 0000000..bd34c2b
--- /dev/null
@@ -0,0 +1,78 @@
+From ec81cd50e7b1846dbc74d73dd3a6f7381ec2d4d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 09:57:09 +0200
+Subject: selftests/tc-testing: Fix Error: Specified qdisc kind is unknown.
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit aef6e908b54200d04f2d77dab31509fcff2e60ae ]
+
+All TEQL tests assume that sch_teql module is loaded. Load module in tdc.sh
+before running qdisc tests.
+
+Fixes following example error when running tests via tdc.sh for all TEQL
+tests:
+
+ # $ sudo ./tdc.py -d eth2 -e 84a0
+ #  -- ns/SubPlugin.__init__
+ # Test 84a0: Create TEQL with default setting
+ # exit: 2
+ # exit: 0
+ # Error: Specified qdisc kind is unknown.
+ #
+ # -----> teardown stage *** Could not execute: "$TC qdisc del dev $DUMMY handle 1: root"
+ #
+ # -----> teardown stage *** Error message: "Error: Invalid handle.
+ # "
+ # returncode 2; expected [0]
+ #
+ # -----> teardown stage *** Aborting test run.
+ #
+ # <_io.BufferedReader name=3> *** stdout ***
+ #
+ # <_io.BufferedReader name=5> *** stderr ***
+ # "-----> teardown stage" did not complete successfully
+ # Exception <class '__main__.PluginMgrTestFail'> ('teardown', 'Error: Specified qdisc kind is unknown.\n', '"-----> teardown stage" did not complete successfully') (caught in test_runner, running test 2 84a0 Create TEQL with default setting stage teardown)
+ # ---------------
+ # traceback
+ #   File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 495, in test_runner
+ #     res = run_one_test(pm, args, index, tidx)
+ #   File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 434, in run_one_test
+ #     prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
+ #   File "/images/src/linux/tools/testing/selftests/tc-testing/./tdc.py", line 245, in prepare_env
+ #     raise PluginMgrTestFail(
+ # ---------------
+ # accumulated output for this test:
+ # Error: Specified qdisc kind is unknown.
+ #
+ # ---------------
+ #
+ # All test results:
+ #
+ # 1..1
+ # ok 1 84a0 - Create TEQL with default setting # skipped - "-----> teardown stage" did not complete successfully
+
+Fixes: cc62fbe114c9 ("selftests/tc-testing: add selftests for teql qdisc")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Reviewed-by: Victor Nogueira <victor@mojatatu.com>
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/tc-testing/tdc.sh | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh
+index afb0cd86fa3df..eb357bd7923c0 100755
+--- a/tools/testing/selftests/tc-testing/tdc.sh
++++ b/tools/testing/selftests/tc-testing/tdc.sh
+@@ -2,5 +2,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ modprobe netdevsim
++modprobe sch_teql
+ ./tdc.py -c actions --nobuildebpf
+ ./tdc.py -c qdisc
+-- 
+2.39.2
+
diff --git a/queue-6.3/selftests-tc-testing-fix-sfb-db-test.patch b/queue-6.3/selftests-tc-testing-fix-sfb-db-test.patch
new file mode 100644 (file)
index 0000000..a3c39b1
--- /dev/null
@@ -0,0 +1,56 @@
+From 6ca82858f40b73b6cbc760f6f890417e07e2d66f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 09:57:11 +0200
+Subject: selftests/tc-testing: Fix SFB db test
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit b39d8c41c7a8336ce85c376b5d4906089524a0ae ]
+
+Setting very small value of db like 10ms introduces rounding errors when
+converting to/from jiffies on some kernel configs. For example, on 250hz
+the actual value will be set to 12ms which causes the test to fail:
+
+ # $ sudo ./tdc.py  -d eth2 -e 3410
+ #  -- ns/SubPlugin.__init__
+ # Test 3410: Create SFB with db setting
+ #
+ # All test results:
+ #
+ # 1..1
+ # not ok 1 3410 - Create SFB with db setting
+ #         Could not match regex pattern. Verify command output:
+ # qdisc sfb 1: root refcnt 2 rehash 600s db 12ms limit 1000p max 25p target 20p increment 0.000503548 decrement 4.57771e-05 penalty_rate 10pps penalty_burst 20p
+
+Set the value to 100ms instead which currently seem to work on 100hz,
+250hz, 300hz and 1000hz kernel configs.
+
+Fixes: 6ad92dc56fca ("selftests/tc-testing: add selftests for sfb qdisc")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json
+index ba2f5e79cdbfe..e21c7f22c6d4c 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json
+@@ -58,10 +58,10 @@
+         "setup": [
+             "$IP link add dev $DUMMY type dummy || /bin/true"
+         ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb db 10",
++        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb db 100",
+         "expExitCode": "0",
+         "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 600s db 10ms",
++        "matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 600s db 100ms",
+         "matchCount": "1",
+         "teardown": [
+             "$TC qdisc del dev $DUMMY handle 1: root",
+-- 
+2.39.2
+
index 866cec5295aba75b32e241c792e80cd331370a1c..639da1a20fa660f8f4aa98b4029c5a9b05e84042 100644 (file)
@@ -105,3 +105,78 @@ usb-gadget-udc-renesas_usb3-fix-rz-v2m-modprobe-bind-error.patch
 usb-dwc3-qcom-fix-null-deref-on-suspend.patch
 usb-dwc3-fix-use-after-free-on-core-driver-unbind.patch
 usb-dwc3-gadget-reset-num-trbs-before-giving-back-the-request.patch
+rdma-rtrs-fix-the-last-iu-buf-leak-in-err-path.patch
+rdma-rtrs-fix-rxe_dealloc_pd-warning.patch
+rdma-rxe-fix-packet-length-checks.patch
+rdma-rxe-fix-ref-count-error-in-check_rkey.patch
+rdma-bnxt_re-fix-reporting-active_-speed-width-attri.patch
+spi-cadence-quadspi-add-missing-check-for-dma_set_ma.patch
+spi-fsl-dspi-avoid-sck-glitches-with-continuous-tran.patch
+regulator-qcom-rpmh-add-support-for-pmm8654au-regula.patch
+regulator-qcom-rpmh-fix-regulators-for-pm8550.patch
+netfilter-nf_tables-integrate-pipapo-into-commit-pro.patch
+netfilter-nfnetlink-skip-error-delivery-on-batch-in-.patch
+ice-do-not-busy-wait-to-read-gnss-data.patch
+ice-don-t-dereference-null-in-ice_gnss_read-error-pa.patch
+ice-fix-xdp-memory-leak-when-nic-is-brought-up-and-d.patch
+netfilter-nf_tables-incorrect-error-path-handling-wi.patch
+net-enetc-correct-the-indexes-of-highest-and-2nd-hig.patch
+ping6-fix-send-to-link-local-addresses-with-vrf.patch
+igb-fix-extts-capture-value-format-for-82580-i354-i3.patch
+net-sched-act_pedit-remove-extra-check-for-key-type.patch
+net-sched-act_pedit-parse-l3-header-for-l4-offset.patch
+net-renesas-rswitch-fix-timestamp-feature-after-all-.patch
+octeontx2-af-fix-promiscuous-mode.patch
+net-sched-taprio-fix-slab-out-of-bounds-read-in-tapr.patch
+net-sched-cls_u32-fix-reference-counter-leak-leading.patch
+wifi-mac80211-fix-link-activation-settings-order.patch
+wifi-cfg80211-fix-link-del-callback-to-call-correct-.patch
+wifi-mac80211-take-lock-before-setting-vif-links.patch
+rdma-rxe-fix-the-use-before-initialization-error-of-.patch
+iavf-remove-mask-from-iavf_irq_enable_queues.patch
+octeontx2-af-fixed-resource-availability-check.patch
+octeontx2-af-fix-lbk-link-credits-on-cn10k.patch
+rdma-mlx5-initiate-dropless-rq-for-raw-ethernet-func.patch
+rdma-mlx5-create-an-indirect-flow-table-for-steering.patch
+rdma-cma-always-set-static-rate-to-0-for-roce.patch
+ib-uverbs-fix-to-consider-event-queue-closing-also-u.patch
+rdma-mlx5-fix-affinity-assignment.patch
+ib-isert-fix-dead-lock-in-ib_isert.patch
+ib-isert-fix-possible-list-corruption-in-cma-handler.patch
+ib-isert-fix-incorrect-release-of-isert-connection.patch
+net-ethtool-correct-max-attribute-value-for-stats.patch
+wifi-mac80211-fragment-per-sta-profile-correctly.patch
+ipvlan-fix-bound-dev-checking-for-ipv6-l3s-mode.patch
+sctp-fix-an-error-code-in-sctp_sf_eat_auth.patch
+igc-clean-the-tx-buffer-and-tx-descriptor-ring.patch
+igc-fix-possible-system-crash-when-loading-module.patch
+igb-fix-nvm.ops.read-error-handling.patch
+net-phylink-report-correct-max-speed-for-qusgmii.patch
+net-phylink-use-a-dedicated-helper-to-parse-usgmii-c.patch
+drm-nouveau-don-t-detect-dsm-for-non-nvidia-device.patch
+drm-bridge-ti-sn65dsi86-avoid-possible-buffer-overfl.patch
+drm-nouveau-dp-check-for-null-nv_connector-native_mo.patch
+drm-nouveau-add-nv_encoder-pointer-check-for-null.patch
+net-ethernet-ti-am65-cpsw-call-of_node_put-on-error-.patch
+selftests-tc-testing-fix-error-specified-qdisc-kind-.patch
+selftests-tc-testing-fix-error-failed-to-find-target.patch
+selftests-tc-testing-fix-sfb-db-test.patch
+net-sched-act_ct-fix-promotion-of-offloaded-unreplie.patch
+net-sched-refactor-qdisc_graft-for-ingress-and-clsac.patch
+net-sched-qdisc_destroy-old-ingress-and-clsact-qdisc.patch
+selftests-forwarding-hw_stats_l3-set-addrgenmode-in-.patch
+cifs-fix-lease-break-oops-in-xfstest-generic-098.patch
+rdma-rxe-fix-rxe_cq_post.patch
+revert-media-dvb-core-fix-use-after-free-on-race-con.patch
+ext4-drop-the-call-to-ext4_error-from-ext4_get_group.patch
+ice-fix-ice-module-unload.patch
+net-sched-cls_api-fix-lockup-on-flushing-explicitly-.patch
+net-dsa-felix-fix-taprio-guard-band-overflow-at-10mb.patch
+net-lapbether-only-support-ethernet-devices.patch
+net-macsec-fix-double-free-of-percpu-stats.patch
+sfc-fix-xdp-queues-mode-with-legacy-irq.patch
+dm-don-t-lock-fs-when-the-map-is-null-during-suspend.patch
+net-tipc-resize-nlattr-array-to-correct-size.patch
+selftests-ptp-fix-timestamp-printf-format-for-ptp_sy.patch
+octeon_ep-add-missing-check-for-ioremap.patch
+afs-fix-vlserver-probe-rtt-handling.patch
diff --git a/queue-6.3/sfc-fix-xdp-queues-mode-with-legacy-irq.patch b/queue-6.3/sfc-fix-xdp-queues-mode-with-legacy-irq.patch
new file mode 100644 (file)
index 0000000..87e4dff
--- /dev/null
@@ -0,0 +1,90 @@
+From 543b2dee145d5fb9e49647b62bfebea178a443cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jun 2023 15:38:54 +0200
+Subject: sfc: fix XDP queues mode with legacy IRQ
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Íñigo Huguet <ihuguet@redhat.com>
+
+[ Upstream commit e84a1e1e683f3558e30f437d7c99df35afb8b52c ]
+
+In systems without MSI-X capabilities, xdp_txq_queues_mode is calculated
+in efx_allocate_msix_channels, but when enabling MSI-X fails, it was not
+changed to a proper default value. This was leading to the driver
+thinking that it has dedicated XDP queues, when it didn't.
+
+Fix it by setting xdp_txq_queues_mode to the correct value if the driver
+fallbacks to MSI or legacy IRQ mode. The correct value is
+EFX_XDP_TX_QUEUES_BORROWED because there are no XDP dedicated queues.
+
+The issue can be easily visible if the kernel is started with pci=nomsi,
+then a call trace is shown. It is not shown only with sfc's modparam
+interrupt_mode=2. Call trace example:
+ WARNING: CPU: 2 PID: 663 at drivers/net/ethernet/sfc/efx_channels.c:828 efx_set_xdp_channels+0x124/0x260 [sfc]
+ [...skip...]
+ Call Trace:
+  <TASK>
+  efx_set_channels+0x5c/0xc0 [sfc]
+  efx_probe_nic+0x9b/0x15a [sfc]
+  efx_probe_all+0x10/0x1a2 [sfc]
+  efx_pci_probe_main+0x12/0x156 [sfc]
+  efx_pci_probe_post_io+0x18/0x103 [sfc]
+  efx_pci_probe.cold+0x154/0x257 [sfc]
+  local_pci_probe+0x42/0x80
+
+Fixes: 6215b608a8c4 ("sfc: last resort fallback for lack of xdp tx queues")
+Reported-by: Yanghang Liu <yanghliu@redhat.com>
+Signed-off-by: Íñigo Huguet <ihuguet@redhat.com>
+Acked-by: Martin Habets <habetsm.xilinx@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sfc/efx_channels.c       | 2 ++
+ drivers/net/ethernet/sfc/siena/efx_channels.c | 2 ++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
+index fcea3ea809d77..41b33a75333cd 100644
+--- a/drivers/net/ethernet/sfc/efx_channels.c
++++ b/drivers/net/ethernet/sfc/efx_channels.c
+@@ -301,6 +301,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
+               efx->tx_channel_offset = 0;
+               efx->n_xdp_channels = 0;
+               efx->xdp_channel_offset = efx->n_channels;
++              efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               rc = pci_enable_msi(efx->pci_dev);
+               if (rc == 0) {
+                       efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+@@ -322,6 +323,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
+               efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
+               efx->n_xdp_channels = 0;
+               efx->xdp_channel_offset = efx->n_channels;
++              efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               efx->legacy_irq = efx->pci_dev->irq;
+       }
+diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
+index 06ed74994e366..1776f7f8a7a90 100644
+--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
++++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
+@@ -302,6 +302,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
+               efx->tx_channel_offset = 0;
+               efx->n_xdp_channels = 0;
+               efx->xdp_channel_offset = efx->n_channels;
++              efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               rc = pci_enable_msi(efx->pci_dev);
+               if (rc == 0) {
+                       efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+@@ -323,6 +324,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
+               efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
+               efx->n_xdp_channels = 0;
+               efx->xdp_channel_offset = efx->n_channels;
++              efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               efx->legacy_irq = efx->pci_dev->irq;
+       }
+-- 
+2.39.2
+
diff --git a/queue-6.3/spi-cadence-quadspi-add-missing-check-for-dma_set_ma.patch b/queue-6.3/spi-cadence-quadspi-add-missing-check-for-dma_set_ma.patch
new file mode 100644 (file)
index 0000000..52808b8
--- /dev/null
@@ -0,0 +1,41 @@
+From 2812ecdd9efcbb5c4d7ca7502bb369033d9dd79c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 17:38:59 +0800
+Subject: spi: cadence-quadspi: Add missing check for dma_set_mask
+
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+
+[ Upstream commit 947c70a213769f60e9d5aca2bc88b50a1cfaf5a6 ]
+
+Add check for dma_set_mask() and return the error if it fails.
+
+Fixes: 1a6f854f7daa ("spi: cadence-quadspi: Add Xilinx Versal external DMA support")
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Link: https://lore.kernel.org/r/20230606093859.27818-1-jiasheng@iscas.ac.cn
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cadence-quadspi.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 8f36e1306e169..3fb85607fa4fa 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1736,8 +1736,11 @@ static int cqspi_probe(struct platform_device *pdev)
+                       cqspi->slow_sram = true;
+               if (of_device_is_compatible(pdev->dev.of_node,
+-                                          "xlnx,versal-ospi-1.0"))
+-                      dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++                                          "xlnx,versal-ospi-1.0")) {
++                      ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++                      if (ret)
++                              goto probe_reset_failed;
++              }
+       }
+       ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
+-- 
+2.39.2
+
diff --git a/queue-6.3/spi-fsl-dspi-avoid-sck-glitches-with-continuous-tran.patch b/queue-6.3/spi-fsl-dspi-avoid-sck-glitches-with-continuous-tran.patch
new file mode 100644 (file)
index 0000000..07d990f
--- /dev/null
@@ -0,0 +1,95 @@
+From 622d2e4ebd0c9c2018988881d6ee17f7aaae2e7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 May 2023 01:34:02 +0300
+Subject: spi: fsl-dspi: avoid SCK glitches with continuous transfers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit c5c31fb71f16ba75bad4ade208abbae225305b65 ]
+
+The DSPI controller has configurable timing for
+
+(a) tCSC: the interval between the assertion of the chip select and the
+    first clock edge
+
+(b) tASC: the interval between the last clock edge and the deassertion
+    of the chip select
+
+What is a bit surprising, but is documented in the figure "Example of
+continuous transfer (CPHA=1, CONT=1)" in the datasheet, is that when the
+chip select stays asserted between multiple TX FIFO writes, the tCSC and
+tASC times still apply. With CONT=1, chip select remains asserted, but
+SCK takes a break and goes to the idle state for tASC + tCSC ns.
+
+In other words, the default values (of 0 and 0 ns) result in SCK
+glitches where the SCK transition to the idle state, as well as the SCK
+transition from the idle state, will have no delay in between, and it
+may appear that a SCK cycle has simply gone missing. The resulting
+timing violation might cause data corruption in many peripherals, as
+their chip select is asserted.
+
+The driver has device tree bindings for tCSC ("fsl,spi-cs-sck-delay")
+and tASC ("fsl,spi-sck-cs-delay"), but these are only specified to apply
+when the chip select toggles in the first place, and this timing
+characteristic depends on each peripheral. Many peripherals do not have
+explicit timing requirements, so many device trees do not have these
+properties present at all.
+
+Nonetheless, the lack of SCK glitches is a common sense requirement, and
+since the SCK stays in the idle state during transfers for tCSC+tASC ns,
+and that in itself should look like half a cycle, then let's ensure that
+tCSC and tASC are at least a quarter of a SCK period, such that their
+sum is at least half of one.
+
+Fixes: 95bf15f38641 ("spi: fsl-dspi: Add ~50ns delay between cs and sck")
+Reported-by: Lisa Chen (陈敏捷) <minjie.chen@geekplus.com>
+Debugged-by: Lisa Chen (陈敏捷) <minjie.chen@geekplus.com>
+Tested-by: Lisa Chen (陈敏捷) <minjie.chen@geekplus.com>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://lore.kernel.org/r/20230529223402.1199503-1-vladimir.oltean@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-fsl-dspi.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index e419642eb10e5..0da5c6ec46fb1 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -1002,7 +1002,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ static int dspi_setup(struct spi_device *spi)
+ {
+       struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
++      u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
+       unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
++      u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
+       u32 cs_sck_delay = 0, sck_cs_delay = 0;
+       struct fsl_dspi_platform_data *pdata;
+       unsigned char pasc = 0, asc = 0;
+@@ -1031,6 +1033,19 @@ static int dspi_setup(struct spi_device *spi)
+               sck_cs_delay = pdata->sck_cs_delay;
+       }
++      /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
++       * glitches of half a cycle by never allowing tCSC + tASC to go below
++       * half a SCK period.
++       */
++      if (cs_sck_delay < quarter_period_ns)
++              cs_sck_delay = quarter_period_ns;
++      if (sck_cs_delay < quarter_period_ns)
++              sck_cs_delay = quarter_period_ns;
++
++      dev_dbg(&spi->dev,
++              "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
++              cs_sck_delay, sck_cs_delay);
++
+       clkrate = clk_get_rate(dspi->clk);
+       hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
+-- 
+2.39.2
+
diff --git a/queue-6.3/wifi-cfg80211-fix-link-del-callback-to-call-correct-.patch b/queue-6.3/wifi-cfg80211-fix-link-del-callback-to-call-correct-.patch
new file mode 100644 (file)
index 0000000..e34de39
--- /dev/null
@@ -0,0 +1,50 @@
+From 14da4423a93c57d13f593e506877ae2270f6b1d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 16:36:01 +0300
+Subject: wifi: cfg80211: fix link del callback to call correct handler
+
+From: Benjamin Berg <benjamin.berg@intel.com>
+
+[ Upstream commit 1ff56684fa8682bdfbbce4e12cf67ab23cb1db05 ]
+
+The wrapper function was incorrectly calling the add handler instead of
+the del handler. This had no negative side effect as the default
+handlers are essentially identical.
+
+Fixes: f2a0290b2df2 ("wifi: cfg80211: add optional link add/remove callbacks")
+Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230608163202.ebd00e000459.Iaff7dc8d1cdecf77f53ea47a0e5080caa36ea02a@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/rdev-ops.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
+index 13b209a8db287..ee853a14a02de 100644
+--- a/net/wireless/rdev-ops.h
++++ b/net/wireless/rdev-ops.h
+@@ -2,7 +2,7 @@
+ /*
+  * Portions of this file
+  * Copyright(c) 2016-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018, 2021-2022 Intel Corporation
++ * Copyright (C) 2018, 2021-2023 Intel Corporation
+  */
+ #ifndef __CFG80211_RDEV_OPS
+ #define __CFG80211_RDEV_OPS
+@@ -1441,8 +1441,8 @@ rdev_del_intf_link(struct cfg80211_registered_device *rdev,
+                  unsigned int link_id)
+ {
+       trace_rdev_del_intf_link(&rdev->wiphy, wdev, link_id);
+-      if (rdev->ops->add_intf_link)
+-              rdev->ops->add_intf_link(&rdev->wiphy, wdev, link_id);
++      if (rdev->ops->del_intf_link)
++              rdev->ops->del_intf_link(&rdev->wiphy, wdev, link_id);
+       trace_rdev_return_void(&rdev->wiphy);
+ }
+-- 
+2.39.2
+
diff --git a/queue-6.3/wifi-mac80211-fix-link-activation-settings-order.patch b/queue-6.3/wifi-mac80211-fix-link-activation-settings-order.patch
new file mode 100644 (file)
index 0000000..f4dccf4
--- /dev/null
@@ -0,0 +1,60 @@
+From 21f3734d745d9af926d6ff71c2470df59447cdb2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 16:35:59 +0300
+Subject: wifi: mac80211: fix link activation settings order
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 01605ad6c3e8608d7e147c9b75d67eb8a3d27d88 ]
+
+In the normal MLME code we always call
+ieee80211_mgd_set_link_qos_params() before
+ieee80211_link_info_change_notify() and some drivers,
+notably iwlwifi, rely on that as they don't do anything
+(but store the data) in their conf_tx.
+
+Fix the order here to be the same as in the normal code
+paths, so this isn't broken.
+
+Fixes: 3d9011029227 ("wifi: mac80211: implement link switching")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230608163202.a2a86bba2f80.Iac97e04827966d22161e63bb6e201b4061e9651b@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/link.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index 8c8869cc1fb4c..aab4d7b4def24 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -2,7 +2,7 @@
+ /*
+  * MLO link handling
+  *
+- * Copyright (C) 2022 Intel Corporation
++ * Copyright (C) 2022-2023 Intel Corporation
+  */
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+@@ -404,6 +404,7 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
+                                                IEEE80211_CHANCTX_SHARED);
+               WARN_ON_ONCE(ret);
++              ieee80211_mgd_set_link_qos_params(link);
+               ieee80211_link_info_change_notify(sdata, link,
+                                                 BSS_CHANGED_ERP_CTS_PROT |
+                                                 BSS_CHANGED_ERP_PREAMBLE |
+@@ -418,7 +419,6 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
+                                                 BSS_CHANGED_TWT |
+                                                 BSS_CHANGED_HE_OBSS_PD |
+                                                 BSS_CHANGED_HE_BSS_COLOR);
+-              ieee80211_mgd_set_link_qos_params(link);
+       }
+       old_active = sdata->vif.active_links;
+-- 
+2.39.2
+
diff --git a/queue-6.3/wifi-mac80211-fragment-per-sta-profile-correctly.patch b/queue-6.3/wifi-mac80211-fragment-per-sta-profile-correctly.patch
new file mode 100644 (file)
index 0000000..06e3675
--- /dev/null
@@ -0,0 +1,83 @@
+From e7af0ceeace59df51e07cc9b9d7c006491a1db02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Jun 2023 12:14:29 +0300
+Subject: wifi: mac80211: fragment per STA profile correctly
+
+From: Benjamin Berg <benjamin.berg@intel.com>
+
+[ Upstream commit d094482c9974a543851a18a1c587a7d132a81659 ]
+
+When fragmenting the ML per STA profile, the element ID should be
+IEEE80211_MLE_SUBELEM_PER_STA_PROFILE rather than WLAN_EID_FRAGMENT.
+
+Change the helper function to take the to be used element ID and pass
+the appropriate value for each of the fragmentation levels.
+
+Fixes: 81151ce462e5 ("wifi: mac80211: support MLO authentication/association with one link")
+Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230611121219.9b5c793d904b.I7dad952bea8e555e2f3139fbd415d0cd2b3a08c3@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/ieee80211_i.h | 2 +-
+ net/mac80211/mlme.c        | 5 +++--
+ net/mac80211/util.c        | 4 ++--
+ 3 files changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index eba7ae63fac45..347030b9bb9e3 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -2272,7 +2272,7 @@ ieee802_11_parse_elems(const u8 *start, size_t len, bool action,
+       return ieee802_11_parse_elems_crc(start, len, action, 0, 0, bss);
+ }
+-void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos);
++void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id);
+ extern const int ieee802_1d_to_ac[8];
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 7a970b6dda640..d28a35c538bac 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1372,10 +1372,11 @@ static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
+               ieee80211_add_non_inheritance_elem(skb, outer_present_elems,
+                                                  link_present_elems);
+-              ieee80211_fragment_element(skb, subelem_len);
++              ieee80211_fragment_element(skb, subelem_len,
++                                         IEEE80211_MLE_SUBELEM_FRAGMENT);
+       }
+-      ieee80211_fragment_element(skb, ml_elem_len);
++      ieee80211_fragment_element(skb, ml_elem_len, WLAN_EID_FRAGMENT);
+ }
+ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index d7b382866b260..1a0d38cd46337 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -4955,7 +4955,7 @@ u8 *ieee80211_ie_build_eht_cap(u8 *pos,
+       return pos;
+ }
+-void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos)
++void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id)
+ {
+       unsigned int elem_len;
+@@ -4975,7 +4975,7 @@ void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos)
+               memmove(len_pos + 255 + 3, len_pos + 255 + 1, elem_len);
+               /* place the fragment ID */
+               len_pos += 255 + 1;
+-              *len_pos = WLAN_EID_FRAGMENT;
++              *len_pos = frag_id;
+               /* and point to fragment length to update later */
+               len_pos++;
+       }
+-- 
+2.39.2
+
diff --git a/queue-6.3/wifi-mac80211-take-lock-before-setting-vif-links.patch b/queue-6.3/wifi-mac80211-take-lock-before-setting-vif-links.patch
new file mode 100644 (file)
index 0000000..fc37c92
--- /dev/null
@@ -0,0 +1,61 @@
+From 48ca02f3d07efa7c9cccfecbc04e2b517a0ac81c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 16:36:02 +0300
+Subject: wifi: mac80211: take lock before setting vif links
+
+From: Benjamin Berg <benjamin.berg@intel.com>
+
+[ Upstream commit 15846f95ab01b71fdb1cef8df73680aad41edf70 ]
+
+ieee80211_vif_set_links requires the sdata->local->mtx lock to be held.
+Add the appropriate locking around the calls in both the link add and
+remove handlers.
+
+This causes a warning when e.g. ieee80211_link_release_channel is called
+via ieee80211_link_stop from ieee80211_vif_update_links.
+
+Fixes: 0d8c4a3c8688 ("wifi: mac80211: implement add/del interface link callbacks")
+Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230608163202.fa0c6597fdad.I83dd70359f6cda30f86df8418d929c2064cf4995@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/cfg.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 5ddbe0c8cfaa1..2c4689a1bf064 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -4778,11 +4778,16 @@ static int ieee80211_add_intf_link(struct wiphy *wiphy,
+                                  unsigned int link_id)
+ {
+       struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
++      int res;
+       if (wdev->use_4addr)
+               return -EOPNOTSUPP;
+-      return ieee80211_vif_set_links(sdata, wdev->valid_links);
++      mutex_lock(&sdata->local->mtx);
++      res = ieee80211_vif_set_links(sdata, wdev->valid_links);
++      mutex_unlock(&sdata->local->mtx);
++
++      return res;
+ }
+ static void ieee80211_del_intf_link(struct wiphy *wiphy,
+@@ -4791,7 +4796,9 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy,
+ {
+       struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
++      mutex_lock(&sdata->local->mtx);
+       ieee80211_vif_set_links(sdata, wdev->valid_links);
++      mutex_unlock(&sdata->local->mtx);
+ }
+ static int sta_add_link_station(struct ieee80211_local *local,
+-- 
+2.39.2
+