--- /dev/null
+From 8db5a9426016df5713600f4aee28c1fa9920042c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Aug 2022 16:45:04 -0700
+Subject: arm64: dts: rockchip: Pull up wlan wake# on Gru-Bob
+
+From: Brian Norris <briannorris@chromium.org>
+
+[ Upstream commit e5467359a725de90b6b8d0dd865500f6373828ca ]
+
+The Gru-Bob board does not have a pull-up resistor on its
+WLAN_HOST_WAKE# pin, but Kevin does. The production/vendor kernel
+specified the pin configuration correctly as a pull-up, but this didn't
+get ported correctly to upstream.
+
+This means Bob's WLAN_HOST_WAKE# pin is floating, causing inconsistent
+wakeup behavior.
+
+Note that bt_host_wake_l has a similar dynamic, but apparently the
+upstream choice was to redundantly configure both internal and external
+pull-up on Kevin (see the "Kevin has an external pull up" comment in
+rk3399-gru.dtsi). This doesn't cause any functional problem, although
+it's perhaps wasteful.
+
+Fixes: 8559bbeeb849 ("arm64: dts: rockchip: add Google Bob")
+Signed-off-by: Brian Norris <briannorris@chromium.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Link: https://lore.kernel.org/r/20220822164453.1.I75c57b48b0873766ec993bdfb7bc1e63da5a1637@changeid
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts | 5 +++++
+ arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi | 1 +
+ 2 files changed, 6 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+index e6c1c94c8d69..07737b65d7a3 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+@@ -87,3 +87,8 @@ h1_int_od_l: h1-int-od-l {
+ };
+ };
+ };
++
++&wlan_host_wake_l {
++ /* Kevin has an external pull up, but Bob does not. */
++ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
++};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+index 1384dabbdf40..0d8458d55626 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+@@ -395,6 +395,7 @@ wifi_perst_l: wifi-perst-l {
+ };
+
+ wlan_host_wake_l: wlan-host-wake-l {
++ /* Kevin has an external pull up, but Bob does not */
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+--
+2.35.1
+
--- /dev/null
+From 4e841dfa1ccfc252f344d4fb419ee29b81cc442a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 27 Aug 2022 14:51:39 -0300
+Subject: arm64: dts: rockchip: Remove 'enable-active-low' from rk3399-puma
+
+From: Fabio Estevam <festevam@denx.de>
+
+[ Upstream commit a994b34b9abb9c08ee09e835b4027ff2147f9d94 ]
+
+The 'enable-active-low' property is not a valid one.
+
+Only 'enable-active-high' is valid, and when this property is absent
+the gpio regulator will act as active low by default.
+
+Remove the invalid 'enable-active-low' property.
+
+Fixes: 2c66fc34e945 ("arm64: dts: rockchip: add RK3399-Q7 (Puma) SoM")
+Signed-off-by: Fabio Estevam <festevam@denx.de>
+Link: https://lore.kernel.org/r/20220827175140.1696699-1-festevam@denx.de
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index 544110aaffc5..95bc7a5f61dd 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -102,7 +102,6 @@ vcc3v3_sys: vcc3v3-sys {
+ vcc5v0_host: vcc5v0-host-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
+- enable-active-low;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ regulator-name = "vcc5v0_host";
+--
+2.35.1
+
--- /dev/null
+From 1106d0e6636a2194d3c6e661bc3f718dbcb1f9e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Aug 2022 13:16:17 -0700
+Subject: arm64: dts: rockchip: Set RK3399-Gru PCLK_EDP to 24 MHz
+
+From: zain wang <wzz@rock-chips.com>
+
+[ Upstream commit 8123437cf46ea5a0f6ca5cb3c528d8b6db97b9c2 ]
+
+We've found the AUX channel to be less reliable with PCLK_EDP at a
+higher rate (typically 25 MHz). This is especially important on systems
+with PSR-enabled panels (like Gru-Kevin), since we make heavy, constant
+use of AUX.
+
+According to Rockchip, using any rate other than 24 MHz can cause
+"problems between syncing the PHY an PCLK", which leads to all sorts of
+unreliabilities around register operations.
+
+Fixes: d67a38c5a623 ("arm64: dts: rockchip: move core edp from rk3399-kevin to shared chromebook")
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: zain wang <wzz@rock-chips.com>
+Signed-off-by: Brian Norris <briannorris@chromium.org>
+Link: https://lore.kernel.org/r/20220830131212.v2.1.I98d30623f13b785ca77094d0c0fd4339550553b6@changeid
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+index 0d8458d55626..739937f70f8d 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+@@ -237,6 +237,14 @@ &cdn_dp {
+ &edp {
+ status = "okay";
+
++ /*
++ * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
++ * set this here, because rk3399-gru.dtsi ensures we can generate this
++ * off GPLL=600MHz, whereas some other RK3399 boards may not.
++ */
++ assigned-clocks = <&cru PCLK_EDP>;
++ assigned-clock-rates = <24000000>;
++
+ ports {
+ edp_out: port@1 {
+ reg = <1>;
+--
+2.35.1
+
--- /dev/null
+From 68d555970a7c9b74f1aa7572e678b544284a2aa9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Sep 2022 11:40:56 +0200
+Subject: can: gs_usb: gs_can_open(): fix race dev->can.state condition
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+[ Upstream commit 5440428b3da65408dba0241985acb7a05258b85e ]
+
+The dev->can.state is set to CAN_STATE_ERROR_ACTIVE, after the device
+has been started. On busy networks the CAN controller might receive
+CAN frame between and go into an error state before the dev->can.state
+is assigned.
+
+Assign dev->can.state before starting the controller to close the race
+window.
+
+Fixes: d08e973a77d1 ("can: gs_usb: Added support for the GS_USB CAN devices")
+Link: https://lore.kernel.org/all/20220920195216.232481-1-mkl@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/usb/gs_usb.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index 1bfc497da9ac..a879200eaab0 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -678,6 +678,7 @@ static int gs_can_open(struct net_device *netdev)
+ flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
+ /* finally start device */
++ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dm->mode = cpu_to_le32(GS_CAN_MODE_START);
+ dm->flags = cpu_to_le32(flags);
+ rc = usb_control_msg(interface_to_usbdev(dev->iface),
+@@ -694,13 +695,12 @@ static int gs_can_open(struct net_device *netdev)
+ if (rc < 0) {
+ netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
+ kfree(dm);
++ dev->can.state = CAN_STATE_STOPPED;
+ return rc;
+ }
+
+ kfree(dm);
+
+- dev->can.state = CAN_STATE_ERROR_ACTIVE;
+-
+ parent->active_channels++;
+ if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_start_queue(netdev);
+--
+2.35.1
+
--- /dev/null
+From 9c335f7f237f4dd9ded51f10fdba82bf1ecb285c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Jul 2022 15:32:34 +0800
+Subject: dmaengine: ti: k3-udma-private: Fix refcount leak bug in
+ of_xudma_dev_get()
+
+From: Liang He <windhl@126.com>
+
+[ Upstream commit f9fdb0b86f087c2b7f6c6168dd0985a3c1eda87e ]
+
+We should call of_node_put() for the reference returned by
+of_parse_phandle() in fail path or when it is not used anymore.
+Here we only need to move the of_node_put() before the check.
+
+Fixes: d70241913413 ("dmaengine: ti: k3-udma: Add glue layer for non DMAengine users")
+Signed-off-by: Liang He <windhl@126.com>
+Acked-by: Peter Ujfalusi <peter.ujfalusi@gmail.com>
+Link: https://lore.kernel.org/r/20220720073234.1255474-1-windhl@126.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ti/k3-udma-private.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
+index 8563a392f30b..dadab2feca08 100644
+--- a/drivers/dma/ti/k3-udma-private.c
++++ b/drivers/dma/ti/k3-udma-private.c
+@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
+ }
+
+ pdev = of_find_device_by_node(udma_node);
++ if (np != udma_node)
++ of_node_put(udma_node);
++
+ if (!pdev) {
+ pr_debug("UDMA device not found\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+- if (np != udma_node)
+- of_node_put(udma_node);
+-
+ ud = platform_get_drvdata(pdev);
+ if (!ud) {
+ pr_debug("UDMA has not been probed\n");
+--
+2.35.1
+
--- /dev/null
+From 08acb1bfe38285defda1b82985b17285ad00ea3d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 May 2022 19:55:57 -0700
+Subject: drm/hisilicon: Add depends on MMU
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit d8a79c03054911c375a2252627a429c9bc4615b6 ]
+
+The Kconfig symbol depended on MMU but was dropped by the commit
+acad3fe650a5 ("drm/hisilicon: Removed the dependency on the mmu")
+because it already had as a dependency ARM64 that already selects MMU.
+
+But later, commit a0f25a6bb319 ("drm/hisilicon/hibmc: Allow to be built
+if COMPILE_TEST is enabled") allowed the driver to be built for non-ARM64
+when COMPILE_TEST is set but that could lead to unmet direct dependencies
+and linking errors.
+
+Prevent a kconfig warning when MMU is not enabled by making
+DRM_HISI_HIBMC depend on MMU.
+
+WARNING: unmet direct dependencies detected for DRM_TTM
+ Depends on [n]: HAS_IOMEM [=y] && DRM [=m] && MMU [=n]
+ Selected by [m]:
+ - DRM_TTM_HELPER [=m] && HAS_IOMEM [=y] && DRM [=m]
+ - DRM_HISI_HIBMC [=m] && HAS_IOMEM [=y] && DRM [=m] && PCI [=y] && (ARM64 || COMPILE_TEST [=y])
+
+Fixes: acad3fe650a5 ("drm/hisilicon: Removed the dependency on the mmu")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: Gerd Hoffmann <kraxel@redhat.com>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Xinliang Liu <xinliang.liu@linaro.org>
+Cc: Tian Tao <tiantao6@hisilicon.com>
+Cc: John Stultz <jstultz@google.com>
+Cc: Xinwei Kong <kong.kongxinwei@hisilicon.com>
+Cc: Chen Feng <puck.chen@hisilicon.com>
+Cc: Christian Koenig <christian.koenig@amd.com>
+Cc: Huang Rui <ray.huang@amd.com>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220531025557.29593-1-rdunlap@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+index 073adfe438dd..4e41c144a290 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
++++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+@@ -2,6 +2,7 @@
+ config DRM_HISI_HIBMC
+ tristate "DRM Support for Hisilicon Hibmc"
+ depends on DRM && PCI && (ARM64 || COMPILE_TEST)
++ depends on MMU
+ select DRM_KMS_HELPER
+ select DRM_VRAM_HELPER
+ select DRM_TTM
+--
+2.35.1
+
--- /dev/null
+From 7a4424afb662b731ff4cf0af938c44f89f944f57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Dec 2021 22:09:36 +0100
+Subject: drm/hisilicon/hibmc: Allow to be built if COMPILE_TEST is enabled
+
+From: Javier Martinez Canillas <javierm@redhat.com>
+
+[ Upstream commit a0f25a6bb319aa05e04dcf51707c97c2881b4f47 ]
+
+The commit feeb07d0ca5a ("drm/hisilicon/hibmc: Make CONFIG_DRM_HISI_HIBMC
+depend on ARM64") made the driver Kconfig symbol to depend on ARM64 since
+it only supports that architecture and loading the module on others would
+lead to incorrect video modes being used.
+
+But it also prevented the driver to be built on other architectures which
+is useful to have compile test coverage when doing subsystem wide changes.
+
+Make the dependency instead to be (ARM64 || COMPILE_TEST), so the driver
+is buildable when the CONFIG_COMPILE_TEST option is enabled.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20211216210936.3329977-1-javierm@redhat.com
+Stable-dep-of: d8a79c030549 ("drm/hisilicon: Add depends on MMU")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+index 43943e980203..073adfe438dd 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
++++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config DRM_HISI_HIBMC
+ tristate "DRM Support for Hisilicon Hibmc"
+- depends on DRM && PCI && ARM64
++ depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ select DRM_KMS_HELPER
+ select DRM_VRAM_HELPER
+ select DRM_TTM
+--
+2.35.1
+
--- /dev/null
+From bb881b829535b99b816e0db81779c2457713d06c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Jul 2022 19:27:27 +0200
+Subject: drm/mediatek: dsi: Add atomic {destroy,duplicate}_state, reset
+ callbacks
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+
+[ Upstream commit eeda05b5e92f51d9a09646ecb493f0a1e872a6ef ]
+
+Add callbacks for atomic_destroy_state, atomic_duplicate_state and
+atomic_reset to restore functionality of the DSI driver: this solves
+vblank timeouts when another bridge is present in the chain.
+
+Tested bridge chain: DSI <=> ANX7625 => aux-bus panel
+
+Fixes: 7f6335c6a258 ("drm/mediatek: Modify dsi funcs to atomic operations")
+Signed-off-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Tested-by: Chen-Yu Tsai <wenst@chromium.org>
+Reviewed-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
+Tested-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
+Link: https://patchwork.kernel.org/project/linux-mediatek/patch/20220721172727.14624-1-angelogioacchino.delregno@collabora.com/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/mediatek/mtk_dsi.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index 7d37d2a01e3c..b8c1a3c1c517 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -791,10 +791,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+
+ static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
+ .attach = mtk_dsi_bridge_attach,
++ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_disable = mtk_dsi_bridge_atomic_disable,
++ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_enable = mtk_dsi_bridge_atomic_enable,
+ .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
+ .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
++ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .mode_set = mtk_dsi_bridge_mode_set,
+ };
+
+--
+2.35.1
+
--- /dev/null
+From 5d9df7c67c1030dac01fc634141558626bae7741 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Aug 2022 15:43:25 -0400
+Subject: drm/mediatek: dsi: Move mtk_dsi_stop() call back to
+ mtk_dsi_poweroff()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nícolas F. R. A. Prado <nfraprado@collabora.com>
+
+[ Upstream commit 90144dd8b0d137d9e78ef34b3c418e51a49299ad ]
+
+As the comment right before the mtk_dsi_stop() call advises,
+mtk_dsi_stop() should only be called after
+mtk_drm_crtc_atomic_disable(). That's because that function calls
+drm_crtc_wait_one_vblank(), which requires the vblank irq to be enabled.
+
+Previously mtk_dsi_stop(), being in mtk_dsi_poweroff() and guarded by a
+refcount, would only be called at the end of
+mtk_drm_crtc_atomic_disable(), through the call to mtk_crtc_ddp_hw_fini().
+Commit cde7e2e35c28 ("drm/mediatek: Separate poweron/poweroff from
+enable/disable and define new funcs") moved the mtk_dsi_stop() call to
+mtk_output_dsi_disable(), causing it to be called before
+mtk_drm_crtc_atomic_disable(), and consequently generating vblank
+timeout warnings during suspend.
+
+Move the mtk_dsi_stop() call back to mtk_dsi_poweroff() so that we have
+a working vblank irq during mtk_drm_crtc_atomic_disable() and stop
+getting vblank timeout warnings.
+
+Fixes: cde7e2e35c28 ("drm/mediatek: Separate poweron/poweroff from enable/disable and define new funcs")
+Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
+Tested-by: Hsin-Yi Wang <hsinyi@chromium.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Tested-by: Allen-KH Cheng <allen-kh.cheng@mediatek.com>
+Link: http://lists.infradead.org/pipermail/linux-mediatek/2022-August/046713.html
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/mediatek/mtk_dsi.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index b8c1a3c1c517..146c4d04f572 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -668,6 +668,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
+ if (--dsi->refcount != 0)
+ return;
+
++ /*
++ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
++ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
++ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
++ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
++ * after dsi is fully set.
++ */
++ mtk_dsi_stop(dsi);
++
++ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
+ mtk_dsi_reset_engine(dsi);
+ mtk_dsi_lane0_ulp_mode_enter(dsi);
+ mtk_dsi_clk_ulp_mode_enter(dsi);
+@@ -718,17 +728,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
+ if (!dsi->enabled)
+ return;
+
+- /*
+- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+- * after dsi is fully set.
+- */
+- mtk_dsi_stop(dsi);
+-
+- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
+-
+ dsi->enabled = false;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From c9118d5ddae5399059f0379f85d2725a6aa4fc36 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 13:50:21 -0300
+Subject: drm/panel: simple: Fix innolux_g121i1_l01 bus_format
+
+From: Heiko Schocher <hs@denx.de>
+
+[ Upstream commit a7c48a0ab87ae52c087d663e83e56b8225ac4cce ]
+
+innolux_g121i1_l01 sets bpc to 6, so use the corresponding bus format:
+MEDIA_BUS_FMT_RGB666_1X7X3_SPWG.
+
+Fixes: 4ae13e486866 ("drm/panel: simple: Add more properties to Innolux G121I1-L01")
+Signed-off-by: Heiko Schocher <hs@denx.de>
+Signed-off-by: Fabio Estevam <festevam@denx.de>
+Signed-off-by: Marek Vasut <marex@denx.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220826165021.1592532-1-festevam@denx.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-simple.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index bf2c845ef3a2..b7b37082a9d7 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2201,7 +2201,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
+ .enable = 200,
+ .disable = 20,
+ },
+- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
++ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+
+--
+2.35.1
+
--- /dev/null
+From d362e26786df94c5516295a1a9d53198fd7d6785 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Sep 2022 09:49:33 +0200
+Subject: i40e: Fix set max_tx_rate when it is lower than 1 Mbps
+
+From: Michal Jaron <michalx.jaron@intel.com>
+
+[ Upstream commit 198eb7e1b81d8ba676d0f4f120c092032ae69a8e ]
+
+While converting max_tx_rate from bytes to Mbps, this value was set to 0,
+if the original value was lower than 125000 bytes (1 Mbps). This would
+cause no transmission rate limiting to occur. This happened due to lack of
+check of max_tx_rate against the 1 Mbps value for max_tx_rate and the
+following division by 125000. Fix this issue by adding a helper
+i40e_bw_bytes_to_mbits() which sets max_tx_rate to minimum usable value of
+50 Mbps, if its value is less than 1 Mbps, otherwise do the required
+conversion by dividing by 125000.
+
+Fixes: 5ecae4120a6b ("i40e: Refactor VF BW rate limiting")
+Signed-off-by: Michal Jaron <michalx.jaron@intel.com>
+Signed-off-by: Andrii Staikov <andrii.staikov@intel.com>
+Tested-by: Bharathi Sreenivas <bharathi.sreenivas@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 32 +++++++++++++++++----
+ 1 file changed, 26 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 97009cbea779..c7f243ddbcf7 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -5733,6 +5733,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
+ }
+ }
+
++/**
++ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
++ * @vsi: Pointer to vsi structure
++ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
++ *
++ * Helper function to convert units before send to set BW limit
++ **/
++static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
++{
++ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
++ dev_warn(&vsi->back->pdev->dev,
++ "Setting max tx rate to minimum usable value of 50Mbps.\n");
++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
++ } else {
++ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
++ }
++
++ return max_tx_rate;
++}
++
+ /**
+ * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
+ * @vsi: VSI to be configured
+@@ -5755,10 +5775,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
+ max_tx_rate, seid);
+ return -EINVAL;
+ }
+- if (max_tx_rate && max_tx_rate < 50) {
++ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
+ dev_warn(&pf->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+- max_tx_rate = 50;
++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled */
+@@ -7719,9 +7739,9 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
+
+ if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (vsi->mqprio_qopt.max_rate[0]) {
+- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
++ vsi->mqprio_qopt.max_rate[0]);
+
+- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (!ret) {
+ u64 credits = max_tx_rate;
+@@ -10366,10 +10386,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ }
+
+ if (vsi->mqprio_qopt.max_rate[0]) {
+- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
++ vsi->mqprio_qopt.max_rate[0]);
+ u64 credits = 0;
+
+- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (ret)
+ goto end_unlock;
+--
+2.35.1
+
--- /dev/null
+From bc6ca8f0c1acfc1db98a0fa1d8cea52b4f78cf0f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Sep 2022 15:38:36 +0200
+Subject: i40e: Fix VF set max MTU size
+
+From: Michal Jaron <michalx.jaron@intel.com>
+
+[ Upstream commit 372539def2824c43b6afe2403045b140f65c5acc ]
+
+Max MTU sent to VF is set to 0 during memory allocation. It cause
+that max MTU on VF is changed to IAVF_MAX_RXBUFFER and does not
+depend on data from HW.
+
+Set max_mtu field in virtchnl_vf_resource struct to inform
+VF in GET_VF_RESOURCES msg what size should be max frame.
+
+Fixes: dab86afdbbd1 ("i40e/i40evf: Change the way we limit the maximum frame size for Rx")
+Signed-off-by: Michal Jaron <michalx.jaron@intel.com>
+Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 20 +++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 1947c5a77550..ffff7de801af 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1985,6 +1985,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
+ }
+ }
+
++/**
++ * i40e_vc_get_max_frame_size
++ * @vf: pointer to the VF
++ *
++ * Max frame size is determined based on the current port's max frame size and
++ * whether a port VLAN is configured on this VF. The VF is not aware whether
++ * it's in a port VLAN so the PF needs to account for this in max frame size
++ * checks and sending the max frame size to the VF.
++ **/
++static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
++{
++ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
++
++ if (vf->port_vlan_id)
++ max_frame_size -= VLAN_HLEN;
++
++ return max_frame_size;
++}
++
+ /**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the VF info
+@@ -2085,6 +2104,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+ vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
++ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
+
+ if (vf->lan_vsi_idx) {
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+--
+2.35.1
+
--- /dev/null
+From ee6f4f9c42d54ea6ebc4178f2a8b74a50b85db57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Sep 2022 15:39:13 +0200
+Subject: iavf: Fix bad page state
+
+From: Norbert Zulinski <norbertx.zulinski@intel.com>
+
+[ Upstream commit 66039eb9015eee4f7ff0c99b83c65c7ecb3c8190 ]
+
+Fix bad page state, free inappropriate page in handling dummy
+descriptor. iavf_build_skb now has to check not only if rx_buffer is
+NULL but also if size is zero, same thing in iavf_clean_rx_irq.
+Without this patch driver would free page that will be used
+by napi_build_skb.
+
+Fixes: a9f49e006030 ("iavf: Fix handling of dummy receive descriptors")
+Signed-off-by: Norbert Zulinski <norbertx.zulinski@intel.com>
+Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/iavf/iavf_txrx.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+index 8f6269e9f6a7..d481a922f018 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+@@ -1371,7 +1371,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
+ #endif
+ struct sk_buff *skb;
+
+- if (!rx_buffer)
++ if (!rx_buffer || !size)
+ return NULL;
+ /* prefetch first cache line of first page */
+ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+@@ -1529,7 +1529,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+- if (rx_buffer)
++ if (rx_buffer && size)
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+--
+2.35.1
+
--- /dev/null
+From a61e71c73f123db9398eeffae2c9749f801e6122 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Sep 2022 16:34:40 +0200
+Subject: iavf: Fix cached head and tail value for iavf_get_tx_pending
+
+From: Brett Creeley <brett.creeley@intel.com>
+
+[ Upstream commit 809f23c0423a43266e47a7dc67e95b5cb4d1cbfc ]
+
+The underlying hardware may or may not allow reading of the head or tail
+registers and it really makes no difference if we use the software
+cached values. So, always used the software cached values.
+
+Fixes: 9c6c12595b73 ("i40e: Detection and recovery of TX queue hung logic moved to service_task from tx_timeout")
+Signed-off-by: Brett Creeley <brett.creeley@intel.com>
+Co-developed-by: Norbert Zulinski <norbertx.zulinski@intel.com>
+Signed-off-by: Norbert Zulinski <norbertx.zulinski@intel.com>
+Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/iavf/iavf_txrx.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+index 99983f7a0ce0..8f6269e9f6a7 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
+ {
+ u32 head, tail;
+
++ /* underlying hardware might not allow access and/or always return
++ * 0 for the head/tail registers so just use the cached values
++ */
+ head = ring->next_to_clean;
+- tail = readl(ring->tail);
++ tail = ring->next_to_use;
+
+ if (head != tail)
+ return (head < tail) ?
+--
+2.35.1
+
--- /dev/null
+From e34cf6104d1f2c8629d27697c6d97f96c0323cb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Sep 2022 15:38:35 +0200
+Subject: iavf: Fix set max MTU size with port VLAN and jumbo frames
+
+From: Michal Jaron <michalx.jaron@intel.com>
+
+[ Upstream commit 399c98c4dc50b7eb7e9f24da7ffdda6f025676ef ]
+
+After setting port VLAN and MTU to 9000 on VF with ice driver there
+was an iavf error
+"PF returned error -5 (IAVF_ERR_PARAM) to our request 6".
+
+During queue configuration, VF's max packet size was set to
+IAVF_MAX_RXBUFFER but on ice max frame size was smaller by VLAN_HLEN
+due to making some space for port VLAN as VF is not aware whether it's
+in a port VLAN. This mismatch in sizes caused ice to reject queue
+configuration with ERR_PARAM error. Proper max_mtu is sent from ice PF
+to VF with GET_VF_RESOURCES msg but VF does not look at this.
+
+In iavf change max_frame from IAVF_MAX_RXBUFFER to max_mtu
+received from pf with GET_VF_RESOURCES msg to make vf's
+max_frame_size dependent from pf. Add check if received max_mtu is
+not in eligible range then set it to IAVF_MAX_RXBUFFER.
+
+Fixes: dab86afdbbd1 ("i40e/i40evf: Change the way we limit the maximum frame size for Rx")
+Signed-off-by: Michal Jaron <michalx.jaron@intel.com>
+Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/iavf/iavf_virtchnl.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index ff479bf72144..5deee75bc436 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -241,11 +241,14 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
+ void iavf_configure_queues(struct iavf_adapter *adapter)
+ {
+ struct virtchnl_vsi_queue_config_info *vqci;
+- struct virtchnl_queue_pair_info *vqpi;
++ int i, max_frame = adapter->vf_res->max_mtu;
+ int pairs = adapter->num_active_queues;
+- int i, max_frame = IAVF_MAX_RXBUFFER;
++ struct virtchnl_queue_pair_info *vqpi;
+ size_t len;
+
++ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
++ max_frame = IAVF_MAX_RXBUFFER;
++
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
+--
+2.35.1
+
--- /dev/null
+From 7e8235142a0f1c4e7cc1f711edddd4e67a1ccddc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Sep 2022 18:12:04 +0800
+Subject: ipvlan: Fix out-of-bound bugs caused by unset skb->mac_header
+
+From: Lu Wei <luwei32@huawei.com>
+
+[ Upstream commit 81225b2ea161af48e093f58e8dfee6d705b16af4 ]
+
+If an AF_PACKET socket is used to send packets through ipvlan and the
+default xmit function of the AF_PACKET socket is changed from
+dev_queue_xmit() to packet_direct_xmit() via setsockopt() with the option
+name of PACKET_QDISC_BYPASS, the skb->mac_header may not be reset and
+remains as the initial value of 65535, this may trigger slab-out-of-bounds
+bugs as following:
+
+=================================================================
+UG: KASAN: slab-out-of-bounds in ipvlan_xmit_mode_l2+0xdb/0x330 [ipvlan]
+PU: 2 PID: 1768 Comm: raw_send Kdump: loaded Not tainted 6.0.0-rc4+ #6
+ardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-1.fc33
+all Trace:
+print_address_description.constprop.0+0x1d/0x160
+print_report.cold+0x4f/0x112
+kasan_report+0xa3/0x130
+ipvlan_xmit_mode_l2+0xdb/0x330 [ipvlan]
+ipvlan_start_xmit+0x29/0xa0 [ipvlan]
+__dev_direct_xmit+0x2e2/0x380
+packet_direct_xmit+0x22/0x60
+packet_snd+0x7c9/0xc40
+sock_sendmsg+0x9a/0xa0
+__sys_sendto+0x18a/0x230
+__x64_sys_sendto+0x74/0x90
+do_syscall_64+0x3b/0x90
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+The root cause is:
+ 1. packet_snd() only reset skb->mac_header when sock->type is SOCK_RAW
+ and skb->protocol is not specified as in packet_parse_headers()
+
+ 2. packet_direct_xmit() doesn't reset skb->mac_header as dev_queue_xmit()
+
+In this case, skb->mac_header is 65535 when ipvlan_xmit_mode_l2() is
+called. So when ipvlan_xmit_mode_l2() gets mac header with eth_hdr() which
+use "skb->head + skb->mac_header", out-of-bound access occurs.
+
+This patch replaces eth_hdr() with skb_eth_hdr() in ipvlan_xmit_mode_l2()
+and reset mac header in multicast to solve this out-of-bound bug.
+
+Fixes: 2ad7bf363841 ("ipvlan: Initial check-in of the IPVLAN driver.")
+Signed-off-by: Lu Wei <luwei32@huawei.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 8801d093135c..a33149ee0ddc 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -496,7 +496,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+
+ static int ipvlan_process_outbound(struct sk_buff *skb)
+ {
+- struct ethhdr *ethh = eth_hdr(skb);
+ int ret = NET_XMIT_DROP;
+
+ /* The ipvlan is a pseudo-L2 device, so the packets that we receive
+@@ -506,6 +505,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
+ if (skb_mac_header_was_set(skb)) {
+ /* In this mode we dont care about
+ * multicast and broadcast traffic */
++ struct ethhdr *ethh = eth_hdr(skb);
++
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ pr_debug_ratelimited(
+ "Dropped {multi|broad}cast of type=[%x]\n",
+@@ -590,7 +591,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
+ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+ {
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+- struct ethhdr *eth = eth_hdr(skb);
++ struct ethhdr *eth = skb_eth_hdr(skb);
+ struct ipvl_addr *addr;
+ void *lyr3h;
+ int addr_type;
+@@ -620,6 +621,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+ return dev_forward_skb(ipvlan->phy_dev, skb);
+
+ } else if (is_multicast_ether_addr(eth->h_dest)) {
++ skb_reset_mac_header(skb);
+ ipvlan_skb_crossing_ns(skb, NULL);
+ ipvlan_multicast_enqueue(ipvlan->port, skb, true);
+ return NET_XMIT_SUCCESS;
+--
+2.35.1
+
--- /dev/null
+From 4efee3ff8e734d235c96997f217aaee29136801b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 17 Sep 2022 16:25:40 -0700
+Subject: MIPS: lantiq: export clk_get_io() for lantiq_wdt.ko
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit 502550123bee6a2ffa438409b5b9aad4d6db3a8c ]
+
+The lantiq WDT driver uses clk_get_io(), which is not exported,
+so export it to fix a build error:
+
+ERROR: modpost: "clk_get_io" [drivers/watchdog/lantiq_wdt.ko] undefined!
+
+Fixes: 287e3f3f4e68 ("MIPS: lantiq: implement support for clkdev api")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Reported-by: kernel test robot <lkp@intel.com>
+Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Cc: John Crispin <john@phrozen.org>
+Cc: linux-mips@vger.kernel.org
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/lantiq/clk.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
+index 7a623684d9b5..2d5a0bcb0cec 100644
+--- a/arch/mips/lantiq/clk.c
++++ b/arch/mips/lantiq/clk.c
+@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
+ {
+ return &cpu_clk_generic[2];
+ }
++EXPORT_SYMBOL_GPL(clk_get_io);
+
+ struct clk *clk_get_ppe(void)
+ {
+--
+2.35.1
+
--- /dev/null
+From b1bd8598712566902316050d33bf3f9197ac4daa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Sep 2022 00:10:09 +0800
+Subject: MIPS: Loongson32: Fix PHY-mode being left unspecified
+
+From: Serge Semin <Sergey.Semin@baikalelectronics.ru>
+
+[ Upstream commit e9f3f8f488005f6da3cfb66070706770ecaef747 ]
+
+commit 0060c8783330 ("net: stmmac: implement support for passive mode
+converters via dt") has changed the plat->interface field semantics from
+containing the PHY-mode to specifying the MAC-PCS interface mode. Due to
+that the loongson32 platform code will leave the phylink interface
+uninitialized with the PHY-mode intended by the means of the actual
+platform setup. The commit-author most likely has just missed the
+arch-specific code to fix. Let's mend the Loongson32 platform code then by
+assigning the PHY-mode to the phy_interface field of the STMMAC platform
+data.
+
+Fixes: 0060c8783330 ("net: stmmac: implement support for passive mode converters via dt")
+Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru>
+Signed-off-by: Keguang Zhang <keguang.zhang@gmail.com>
+Tested-by: Keguang Zhang <keguang.zhang@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/loongson32/common/platform.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
+index 794c96c2a4cd..311dc1580bbd 100644
+--- a/arch/mips/loongson32/common/platform.c
++++ b/arch/mips/loongson32/common/platform.c
+@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+ if (plat_dat->bus_id) {
+ __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
+ GMAC1_USE_UART0, LS1X_MUX_CTRL0);
+- switch (plat_dat->interface) {
++ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
+ break;
+@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+ break;
+ default:
+ pr_err("unsupported mii mode %d\n",
+- plat_dat->interface);
++ plat_dat->phy_interface);
+ return -ENOTSUPP;
+ }
+ val &= ~GMAC1_SHUT;
+ } else {
+- switch (plat_dat->interface) {
++ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
+ break;
+@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+ break;
+ default:
+ pr_err("unsupported mii mode %d\n",
+- plat_dat->interface);
++ plat_dat->phy_interface);
+ return -ENOTSUPP;
+ }
+ val &= ~GMAC0_SHUT;
+@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+ plat_dat = dev_get_platdata(&pdev->dev);
+
+ val &= ~PHY_INTF_SELI;
+- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
++ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
+ val |= 0x4 << PHY_INTF_SELI_SHIFT;
+ __raw_writel(val, LS1X_MUX_CTRL1);
+
+@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
+ .bus_id = 0,
+ .phy_addr = -1,
+ #if defined(CONFIG_LOONGSON1_LS1B)
+- .interface = PHY_INTERFACE_MODE_MII,
++ .phy_interface = PHY_INTERFACE_MODE_MII,
+ #elif defined(CONFIG_LOONGSON1_LS1C)
+- .interface = PHY_INTERFACE_MODE_RMII,
++ .phy_interface = PHY_INTERFACE_MODE_RMII,
+ #endif
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
+ static struct plat_stmmacenet_data ls1x_eth1_pdata = {
+ .bus_id = 1,
+ .phy_addr = -1,
+- .interface = PHY_INTERFACE_MODE_MII,
++ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+ .has_gmac = 1,
+--
+2.35.1
+
--- /dev/null
+From d3772f673d3339e31d11f35a63ed2a1be7f1475b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Sep 2022 16:56:39 +0900
+Subject: net: bonding: Share lacpdu_mcast_addr definition
+
+From: Benjamin Poirier <bpoirier@nvidia.com>
+
+[ Upstream commit 1d9a143ee3408349700f44a9197b7ae0e4faae5d ]
+
+There are already a few definitions of arrays containing
+MULTICAST_LACPDU_ADDR and the next patch will add one more use. These all
+contain the same constant data so define one common instance for all
+bonding code.
+
+Signed-off-by: Benjamin Poirier <bpoirier@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 86247aba599e ("net: bonding: Unsync device addresses on ndo_stop")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 5 +++--
+ drivers/net/bonding/bond_main.c | 16 ++++------------
+ include/net/bond_3ad.h | 2 --
+ include/net/bonding.h | 3 +++
+ 4 files changed, 10 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index b0f8d551b61d..acb6ff0be5ff 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -85,8 +85,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+ static u16 ad_ticks_per_sec;
+ static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
+
+-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+- MULTICAST_LACPDU_ADDR;
++const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
++ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
++};
+
+ /* ================= main 802.3ad protocol functions ================== */
+ static int ad_lacpdu_send(struct port *port);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 9c4b45341fd2..be1fd4ef4531 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -827,12 +827,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
+ dev_uc_unsync(slave_dev, bond_dev);
+ dev_mc_unsync(slave_dev, bond_dev);
+
+- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+- /* del lacpdu mc addr from mc list */
+- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+-
+- dev_mc_del(slave_dev, lacpdu_multicast);
+- }
++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
++ dev_mc_del(slave_dev, lacpdu_mcast_addr);
+ }
+
+ /*--------------------------- Active slave change ---------------------------*/
+@@ -2078,12 +2074,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
+
+- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+- /* add lacpdu mc addr to mc list */
+- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+-
+- dev_mc_add(slave_dev, lacpdu_multicast);
+- }
++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
++ dev_mc_add(slave_dev, lacpdu_mcast_addr);
+ }
+
+ bond->slave_cnt++;
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index 1a28f299a4c6..895eae18271f 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -15,8 +15,6 @@
+ #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
+ #define AD_TIMER_INTERVAL 100 /*msec*/
+
+-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
+-
+ #define AD_LACP_SLOW 0
+ #define AD_LACP_FAST 1
+
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 67d676059aa0..d9cc3f5602fb 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -763,6 +763,9 @@ extern struct rtnl_link_ops bond_link_ops;
+ /* exported from bond_sysfs_slave.c */
+ extern const struct sysfs_ops slave_sysfs_ops;
+
++/* exported from bond_3ad.c */
++extern const u8 lacpdu_mcast_addr[];
++
+ static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
+ {
+ atomic_long_inc(&dev->tx_dropped);
+--
+2.35.1
+
--- /dev/null
+From 9c1516f2582bd65f0eca20545c31353060c9657b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Sep 2022 16:56:40 +0900
+Subject: net: bonding: Unsync device addresses on ndo_stop
+
+From: Benjamin Poirier <bpoirier@nvidia.com>
+
+[ Upstream commit 86247aba599e5b07d7e828e6edaaebb0ef2b1158 ]
+
+Netdev drivers are expected to call dev_{uc,mc}_sync() in their
+ndo_set_rx_mode method and dev_{uc,mc}_unsync() in their ndo_stop method.
+This is mentioned in the kerneldoc for those dev_* functions.
+
+The bonding driver calls dev_{uc,mc}_unsync() during ndo_uninit instead of
+ndo_stop. This is ineffective because address lists (dev->{uc,mc}) have
+already been emptied in unregister_netdevice_many() before ndo_uninit is
+called. This mistake can result in addresses being leftover on former bond
+slaves after a bond has been deleted; see test_LAG_cleanup() in the last
+patch in this series.
+
+Add unsync calls, via bond_hw_addr_flush(), at their expected location,
+bond_close().
+Add dev_mc_add() call to bond_open() to match the above change.
+
+v3:
+* When adding or deleting a slave, only sync/unsync, add/del addresses if
+ the bond is up. In other cases, it is taken care of at the right time by
+ ndo_open/ndo_set_rx_mode/ndo_stop.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Benjamin Poirier <bpoirier@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 47 ++++++++++++++++++++++++---------
+ 1 file changed, 35 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index be1fd4ef4531..f38a6ce5749b 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -848,7 +848,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
+ if (bond->dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(old_active->dev, -1);
+
+- bond_hw_addr_flush(bond->dev, old_active->dev);
++ if (bond->dev->flags & IFF_UP)
++ bond_hw_addr_flush(bond->dev, old_active->dev);
+ }
+
+ if (new_active) {
+@@ -859,10 +860,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
+ if (bond->dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(new_active->dev, 1);
+
+- netif_addr_lock_bh(bond->dev);
+- dev_uc_sync(new_active->dev, bond->dev);
+- dev_mc_sync(new_active->dev, bond->dev);
+- netif_addr_unlock_bh(bond->dev);
++ if (bond->dev->flags & IFF_UP) {
++ netif_addr_lock_bh(bond->dev);
++ dev_uc_sync(new_active->dev, bond->dev);
++ dev_mc_sync(new_active->dev, bond->dev);
++ netif_addr_unlock_bh(bond->dev);
++ }
+ }
+ }
+
+@@ -2069,13 +2072,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ }
+ }
+
+- netif_addr_lock_bh(bond_dev);
+- dev_mc_sync_multiple(slave_dev, bond_dev);
+- dev_uc_sync_multiple(slave_dev, bond_dev);
+- netif_addr_unlock_bh(bond_dev);
++ if (bond_dev->flags & IFF_UP) {
++ netif_addr_lock_bh(bond_dev);
++ dev_mc_sync_multiple(slave_dev, bond_dev);
++ dev_uc_sync_multiple(slave_dev, bond_dev);
++ netif_addr_unlock_bh(bond_dev);
+
+- if (BOND_MODE(bond) == BOND_MODE_8023AD)
+- dev_mc_add(slave_dev, lacpdu_mcast_addr);
++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
++ dev_mc_add(slave_dev, lacpdu_mcast_addr);
++ }
+ }
+
+ bond->slave_cnt++;
+@@ -2302,7 +2307,8 @@ static int __bond_release_one(struct net_device *bond_dev,
+ if (old_flags & IFF_ALLMULTI)
+ dev_set_allmulti(slave_dev, -1);
+
+- bond_hw_addr_flush(bond_dev, slave_dev);
++ if (old_flags & IFF_UP)
++ bond_hw_addr_flush(bond_dev, slave_dev);
+ }
+
+ slave_disable_netpoll(slave);
+@@ -3764,6 +3770,9 @@ static int bond_open(struct net_device *bond_dev)
+ /* register to receive LACPDUs */
+ bond->recv_probe = bond_3ad_lacpdu_recv;
+ bond_3ad_initiate_agg_selection(bond, 1);
++
++ bond_for_each_slave(bond, slave, iter)
++ dev_mc_add(slave->dev, lacpdu_mcast_addr);
+ }
+
+ if (bond_mode_can_use_xmit_hash(bond))
+@@ -3775,6 +3784,7 @@ static int bond_open(struct net_device *bond_dev)
+ static int bond_close(struct net_device *bond_dev)
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
++ struct slave *slave;
+
+ bond_work_cancel_all(bond);
+ bond->send_peer_notif = 0;
+@@ -3782,6 +3792,19 @@ static int bond_close(struct net_device *bond_dev)
+ bond_alb_deinitialize(bond);
+ bond->recv_probe = NULL;
+
++ if (bond_uses_primary(bond)) {
++ rcu_read_lock();
++ slave = rcu_dereference(bond->curr_active_slave);
++ if (slave)
++ bond_hw_addr_flush(bond_dev, slave->dev);
++ rcu_read_unlock();
++ } else {
++ struct list_head *iter;
++
++ bond_for_each_slave(bond, slave, iter)
++ bond_hw_addr_flush(bond_dev, slave->dev);
++ }
++
+ return 0;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 5a7dd19f9242fcb5c8a29b39985c19b0ad3bd6e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Sep 2022 12:08:13 +0200
+Subject: net: core: fix flow symmetric hash
+
+From: Ludovic Cintrat <ludovic.cintrat@gatewatcher.com>
+
+[ Upstream commit 64ae13ed478428135cddc2f1113dff162d8112d4 ]
+
+__flow_hash_consistentify() wrongly swaps ipv4 addresses in few cases.
+This function is indirectly used by __skb_get_hash_symmetric(), which is
+used to fanout packets in AF_PACKET.
+Intrusion detection systems may be impacted by this issue.
+
+__flow_hash_consistentify() computes the addresses difference then swaps
+them if the difference is negative. In few cases src - dst and dst - src
+are both negative.
+
+The following snippet mimics __flow_hash_consistentify():
+
+```
+ #include <stdio.h>
+ #include <stdint.h>
+
+ int main(int argc, char** argv) {
+
+ int diffs_d, diffd_s;
+ uint32_t dst = 0xb225a8c0; /* 178.37.168.192 --> 192.168.37.178 */
+ uint32_t src = 0x3225a8c0; /* 50.37.168.192 --> 192.168.37.50 */
+ uint32_t dst2 = 0x3325a8c0; /* 51.37.168.192 --> 192.168.37.51 */
+
+ diffs_d = src - dst;
+ diffd_s = dst - src;
+
+ printf("src:%08x dst:%08x, diff(s-d)=%d(0x%x) diff(d-s)=%d(0x%x)\n",
+ src, dst, diffs_d, diffs_d, diffd_s, diffd_s);
+
+ diffs_d = src - dst2;
+ diffd_s = dst2 - src;
+
+ printf("src:%08x dst:%08x, diff(s-d)=%d(0x%x) diff(d-s)=%d(0x%x)\n",
+ src, dst2, diffs_d, diffs_d, diffd_s, diffd_s);
+
+ return 0;
+ }
+```
+
+Results:
+
+src:3225a8c0 dst:b225a8c0, \
+ diff(s-d)=-2147483648(0x80000000) \
+ diff(d-s)=-2147483648(0x80000000)
+
+src:3225a8c0 dst:3325a8c0, \
+ diff(s-d)=-16777216(0xff000000) \
+ diff(d-s)=16777216(0x1000000)
+
+In the first case the addresses differences are always < 0, therefore
+__flow_hash_consistentify() always swaps, thus dst->src and src->dst
+packets have differents hashes.
+
+Fixes: c3f8324188fa8 ("net: Add full IPv6 addresses to flow_keys")
+Signed-off-by: Ludovic Cintrat <ludovic.cintrat@gatewatcher.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/flow_dissector.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index aad311c73810..ed120828c7e2 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1494,9 +1494,8 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
+
+ switch (keys->control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+- addr_diff = (__force u32)keys->addrs.v4addrs.dst -
+- (__force u32)keys->addrs.v4addrs.src;
+- if (addr_diff < 0)
++ if ((__force u32)keys->addrs.v4addrs.dst <
++ (__force u32)keys->addrs.v4addrs.src)
+ swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
+
+ if ((__force u16)keys->ports.dst <
+--
+2.35.1
+
--- /dev/null
+From 6dd22faff242cd4d5cced3a451e5955f77857ca2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Sep 2022 16:32:08 +0300
+Subject: net: enetc: move enetc_set_psfp() out of the common
+ enetc_set_features()
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit fed38e64d9b99d65a36c0dbadc3d3f8ddd9ea030 ]
+
+The VF netdev driver shouldn't respond to changes in the NETIF_F_HW_TC
+flag; only PFs should. Moreover, TSN-specific code should go to
+enetc_qos.c, which should not be included in the VF driver.
+
+Fixes: 79e499829f3f ("net: enetc: add hw tc hw offload features for PSPF capability")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://lore.kernel.org/r/20220916133209.3351399-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc.c | 32 +------------------
+ drivers/net/ethernet/freescale/enetc/enetc.h | 9 ++++--
+ .../net/ethernet/freescale/enetc/enetc_pf.c | 11 ++++++-
+ .../net/ethernet/freescale/enetc/enetc_qos.c | 23 +++++++++++++
+ .../net/ethernet/freescale/enetc/enetc_vf.c | 4 ++-
+ 5 files changed, 44 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 15aa3b3c0089..4af253825957 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -1671,29 +1671,6 @@ static int enetc_set_rss(struct net_device *ndev, int en)
+ return 0;
+ }
+
+-static int enetc_set_psfp(struct net_device *ndev, int en)
+-{
+- struct enetc_ndev_priv *priv = netdev_priv(ndev);
+- int err;
+-
+- if (en) {
+- err = enetc_psfp_enable(priv);
+- if (err)
+- return err;
+-
+- priv->active_offloads |= ENETC_F_QCI;
+- return 0;
+- }
+-
+- err = enetc_psfp_disable(priv);
+- if (err)
+- return err;
+-
+- priv->active_offloads &= ~ENETC_F_QCI;
+-
+- return 0;
+-}
+-
+ static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
+ {
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+@@ -1712,11 +1689,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en)
+ enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+ }
+
+-int enetc_set_features(struct net_device *ndev,
+- netdev_features_t features)
++void enetc_set_features(struct net_device *ndev, netdev_features_t features)
+ {
+ netdev_features_t changed = ndev->features ^ features;
+- int err = 0;
+
+ if (changed & NETIF_F_RXHASH)
+ enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
+@@ -1728,11 +1703,6 @@ int enetc_set_features(struct net_device *ndev,
+ if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+ enetc_enable_txvlan(ndev,
+ !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+-
+- if (changed & NETIF_F_HW_TC)
+- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+-
+- return err;
+ }
+
+ #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
+index 15d19cbd5a95..00386c5d3cde 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc.h
+@@ -301,8 +301,7 @@ void enetc_start(struct net_device *ndev);
+ void enetc_stop(struct net_device *ndev);
+ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
+ struct net_device_stats *enetc_get_stats(struct net_device *ndev);
+-int enetc_set_features(struct net_device *ndev,
+- netdev_features_t features);
++void enetc_set_features(struct net_device *ndev, netdev_features_t features);
+ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
+ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+@@ -335,6 +334,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
+ int enetc_psfp_init(struct enetc_ndev_priv *priv);
+ int enetc_psfp_clean(struct enetc_ndev_priv *priv);
++int enetc_set_psfp(struct net_device *ndev, bool en);
+
+ static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
+ {
+@@ -410,4 +410,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+ {
+ return 0;
+ }
++
++static inline int enetc_set_psfp(struct net_device *ndev, bool en)
++{
++ return 0;
++}
+ #endif
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index 716b396bf094..6904e10dd46b 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -671,6 +671,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
+ {
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
++ int err;
++
++ if (changed & NETIF_F_HW_TC) {
++ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
++ if (err)
++ return err;
++ }
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+@@ -684,7 +691,9 @@ static int enetc_pf_set_features(struct net_device *ndev,
+ if (changed & NETIF_F_LOOPBACK)
+ enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+- return enetc_set_features(ndev, features);
++ enetc_set_features(ndev, features);
++
++ return 0;
+ }
+
+ static const struct net_device_ops enetc_ndev_ops = {
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index 9e6988fd3787..62efe1aebf86 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -1525,6 +1525,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ }
+ }
+
++int enetc_set_psfp(struct net_device *ndev, bool en)
++{
++ struct enetc_ndev_priv *priv = netdev_priv(ndev);
++ int err;
++
++ if (en) {
++ err = enetc_psfp_enable(priv);
++ if (err)
++ return err;
++
++ priv->active_offloads |= ENETC_F_QCI;
++ return 0;
++ }
++
++ err = enetc_psfp_disable(priv);
++ if (err)
++ return err;
++
++ priv->active_offloads &= ~ENETC_F_QCI;
++
++ return 0;
++}
++
+ int enetc_psfp_init(struct enetc_ndev_priv *priv)
+ {
+ if (epsfp.psfp_sfi_bitmap)
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+index 33c125735db7..5ce3e2593bdd 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+@@ -88,7 +88,9 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
+ static int enetc_vf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+ {
+- return enetc_set_features(ndev, features);
++ enetc_set_features(ndev, features);
++
++ return 0;
+ }
+
+ /* Probing/ Init */
+--
+2.35.1
+
--- /dev/null
+From 8b5ff5ce40741e9917658be1a354270767d7f247 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Mar 2021 20:05:05 -0500
+Subject: net: ipa: avoid 64-bit modulus
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit 437c78f976f5b39fc4b2a1c65903a229f55912dd ]
+
+It is possible for a 32 bit x86 build to use a 64 bit DMA address.
+
+There are two remaining spots where the IPA driver does a modulo
+operation to check alignment of a DMA address, and under certain
+conditions this can lead to a build error on i386 (at least).
+
+The alignment checks we're doing are for power-of-2 values, and this
+means the lower 32 bits of the DMA address can be used. This ensures
+both operands to the modulo operator are 32 bits wide.
+
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Alex Elder <elder@linaro.org>
+Acked-by: Randy Dunlap <rdunlap@infradead.org> # build-tested
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: cf412ec33325 ("net: ipa: properly limit modem routing table use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/gsi.c | 11 +++++++----
+ drivers/net/ipa/ipa_table.c | 9 ++++++---
+ 2 files changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index fe91b72eca36..e46d3622f9eb 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -1251,15 +1251,18 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+ /* Initialize a ring, including allocating DMA memory for its entries */
+ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
+ {
+- size_t size = count * GSI_RING_ELEMENT_SIZE;
++ u32 size = count * GSI_RING_ELEMENT_SIZE;
+ struct device *dev = gsi->dev;
+ dma_addr_t addr;
+
+- /* Hardware requires a 2^n ring size, with alignment equal to size */
++ /* Hardware requires a 2^n ring size, with alignment equal to size.
++ * The size is a power of 2, so we can check alignment using just
++ * the bottom 32 bits for a DMA address of any size.
++ */
+ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+- if (ring->virt && addr % size) {
++ if (ring->virt && lower_32_bits(addr) % size) {
+ dma_free_coherent(dev, size, ring->virt, addr);
+- dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
++ dev_err(dev, "unable to alloc 0x%x-aligned ring buffer\n",
+ size);
+ return -EINVAL; /* Not a good error value, but distinct */
+ } else if (!ring->virt) {
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index 45e1d68b4694..4f15391aad5f 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -662,10 +662,13 @@ int ipa_table_init(struct ipa *ipa)
+ return -ENOMEM;
+
+ /* We put the "zero rule" at the base of our table area. The IPA
+- * hardware requires rules to be aligned on a 128-byte boundary.
+- * Make sure the allocation satisfies this constraint.
++ * hardware requires route and filter table rules to be aligned
++ * on a 128-byte boundary. As long as the alignment constraint
++ * is a power of 2, we can check alignment using just the bottom
++ * 32 bits for a DMA address of any size.
+ */
+- if (addr % IPA_TABLE_ALIGN) {
++ BUILD_BUG_ON(!is_power_of_2(IPA_TABLE_ALIGN));
++ if (lower_32_bits(addr) % IPA_TABLE_ALIGN) {
+ dev_err(dev, "table address %pad not %u-byte aligned\n",
+ &addr, IPA_TABLE_ALIGN);
+ dma_free_coherent(dev, size, virt, addr);
+--
+2.35.1
+
--- /dev/null
+From 18e8ca542d2c0eb2dbe63a7fab447dfb430c57d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 28 Mar 2021 12:31:10 -0500
+Subject: net: ipa: DMA addresses are nicely aligned
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit 19aaf72c0c7a26ab7ffc655a6d84da6a379f899b ]
+
+A recent patch avoided doing 64-bit modulo operations by checking
+the alignment of some DMA allocations using only the lower 32 bits
+of the address.
+
+David Laight pointed out (after the fix was committed) that DMA
+allocations might already satisfy the alignment requirements. And
+he was right.
+
+Remove the alignment checks that occur after DMA allocation requests,
+and update comments to explain why the constraint is satisfied. The
+only place IPA_TABLE_ALIGN was used was to check the alignment; it is
+therefore no longer needed, so get rid of it.
+
+Add comments where GSI_RING_ELEMENT_SIZE and the tre_count and
+event_count channel data fields are defined to make explicit they
+are required to be powers of 2.
+
+Revise a comment in gsi_trans_pool_init_dma(), taking into account
+that dma_alloc_coherent() guarantees its result is aligned to a page
+size (or order thereof).
+
+Don't bother printing an error if a DMA allocation fails.
+
+Suggested-by: David Laight <David.Laight@ACULAB.COM>
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: cf412ec33325 ("net: ipa: properly limit modem routing table use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/gsi.c | 13 ++++---------
+ drivers/net/ipa/gsi_private.h | 2 +-
+ drivers/net/ipa/gsi_trans.c | 9 ++++-----
+ drivers/net/ipa/ipa_data.h | 4 ++--
+ drivers/net/ipa/ipa_table.c | 24 ++++++------------------
+ 5 files changed, 17 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index e46d3622f9eb..64b12e462765 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -1256,18 +1256,13 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
+ dma_addr_t addr;
+
+ /* Hardware requires a 2^n ring size, with alignment equal to size.
+- * The size is a power of 2, so we can check alignment using just
+- * the bottom 32 bits for a DMA address of any size.
++ * The DMA address returned by dma_alloc_coherent() is guaranteed to
++ * be a power-of-2 number of pages, which satisfies the requirement.
+ */
+ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+- if (ring->virt && lower_32_bits(addr) % size) {
+- dma_free_coherent(dev, size, ring->virt, addr);
+- dev_err(dev, "unable to alloc 0x%x-aligned ring buffer\n",
+- size);
+- return -EINVAL; /* Not a good error value, but distinct */
+- } else if (!ring->virt) {
++ if (!ring->virt)
+ return -ENOMEM;
+- }
++
+ ring->addr = addr;
+ ring->count = count;
+
+diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
+index 1785c9d3344d..d58dce46e061 100644
+--- a/drivers/net/ipa/gsi_private.h
++++ b/drivers/net/ipa/gsi_private.h
+@@ -14,7 +14,7 @@ struct gsi_trans;
+ struct gsi_ring;
+ struct gsi_channel;
+
+-#define GSI_RING_ELEMENT_SIZE 16 /* bytes */
++#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
+
+ /* Return the entry that follows one provided in a transaction pool */
+ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
+diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
+index 6c3ed5b17b80..70c2b585f98d 100644
+--- a/drivers/net/ipa/gsi_trans.c
++++ b/drivers/net/ipa/gsi_trans.c
+@@ -153,11 +153,10 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ size = __roundup_pow_of_two(size);
+ total_size = (count + max_alloc - 1) * size;
+
+- /* The allocator will give us a power-of-2 number of pages. But we
+- * can't guarantee that, so request it. That way we won't waste any
+- * memory that would be available beyond the required space.
+- *
+- * Note that gsi_trans_pool_exit_dma() assumes the total allocated
++ /* The allocator will give us a power-of-2 number of pages
++ * sufficient to satisfy our request. Round up our requested
++ * size to avoid any unused space in the allocation. This way
++ * gsi_trans_pool_exit_dma() can assume the total allocated
+ * size is exactly (count * size).
+ */
+ total_size = get_order(total_size) << PAGE_SHIFT;
+diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
+index 7fc1058a5ca9..ba05e26c3c60 100644
+--- a/drivers/net/ipa/ipa_data.h
++++ b/drivers/net/ipa/ipa_data.h
+@@ -72,8 +72,8 @@
+ * that can be included in a single transaction.
+ */
+ struct gsi_channel_data {
+- u16 tre_count;
+- u16 event_count;
++ u16 tre_count; /* must be a power of 2 */
++ u16 event_count; /* must be a power of 2 */
+ u8 tlv_count;
+ };
+
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index 4f15391aad5f..087bcae29cc7 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -96,9 +96,6 @@
+ * ----------------------
+ */
+
+-/* IPA hardware constrains filter and route tables alignment */
+-#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */
+-
+ /* Assignment of route table entries to the modem and AP */
+ #define IPA_ROUTE_MODEM_MIN 0
+ #define IPA_ROUTE_MODEM_COUNT 8
+@@ -656,26 +653,17 @@ int ipa_table_init(struct ipa *ipa)
+
+ ipa_table_validate_build();
+
++ /* The IPA hardware requires route and filter table rules to be
++ * aligned on a 128-byte boundary. We put the "zero rule" at the
++ * base of the table area allocated here. The DMA address returned
++ * by dma_alloc_coherent() is guaranteed to be a power-of-2 number
++ * of pages, which satisfies the rule alignment requirement.
++ */
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+ virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+- /* We put the "zero rule" at the base of our table area. The IPA
+- * hardware requires route and filter table rules to be aligned
+- * on a 128-byte boundary. As long as the alignment constraint
+- * is a power of 2, we can check alignment using just the bottom
+- * 32 bits for a DMA address of any size.
+- */
+- BUILD_BUG_ON(!is_power_of_2(IPA_TABLE_ALIGN));
+- if (lower_32_bits(addr) % IPA_TABLE_ALIGN) {
+- dev_err(dev, "table address %pad not %u-byte aligned\n",
+- &addr, IPA_TABLE_ALIGN);
+- dma_free_coherent(dev, size, virt, addr);
+-
+- return -ERANGE;
+- }
+-
+ ipa->table_virt = virt;
+ ipa->table_addr = addr;
+
+--
+2.35.1
+
--- /dev/null
+From 6ca9d847aa1e418f45b8b76728d2db179f5717b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Mar 2021 13:59:27 -0500
+Subject: net: ipa: fix assumptions about DMA address size
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit d2fd2311de909a7f4e99b4bd11a19e6b671d6a6b ]
+
+Some build time checks in ipa_table_validate_build() assume that a
+DMA address is 64 bits wide. That is more restrictive than it has
+to be. A route or filter table is 64 bits wide no matter what the
+size of a DMA address is on the AP. The code actually uses a
+pointer to __le64 to access table entries, and a fixed constant
+IPA_TABLE_ENTRY_SIZE to describe the size of those entries.
+
+Loosen up two checks so they still verify some requirements, but
+such that they do not assume the size of a DMA address is 64 bits.
+
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: cf412ec33325 ("net: ipa: properly limit modem routing table use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/ipa_table.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index 0747866d60ab..f26cb9d706da 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -126,13 +126,15 @@ static void ipa_table_validate_build(void)
+ */
+ BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN);
+
+- /* Filter and route tables contain DMA addresses that refer to
+- * filter or route rules. We use a fixed constant to represent
+- * the size of either type of table entry. Code in ipa_table_init()
+- * uses a pointer to __le64 to initialize table entriews.
++ /* Filter and route tables contain DMA addresses that refer
++ * to filter or route rules. But the size of a table entry
++ * is 64 bits regardless of what the size of an AP DMA address
++ * is. A fixed constant defines the size of an entry, and
++ * code in ipa_table_init() uses a pointer to __le64 to
++ * initialize tables.
+ */
+- BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t));
+- BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64));
++ BUILD_BUG_ON(sizeof(dma_addr_t) > IPA_TABLE_ENTRY_SIZE);
++ BUILD_BUG_ON(sizeof(__le64) != IPA_TABLE_ENTRY_SIZE);
+
+ /* A "zero rule" is used to represent no filtering or no routing.
+ * It is a 64-bit block of zeroed memory. Code in ipa_table_init()
+--
+2.35.1
+
--- /dev/null
+From 33e2fc4a1b5ba3ab1686201d0c60be50f7f1d590 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Mar 2021 13:59:29 -0500
+Subject: net: ipa: fix table alignment requirement
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit e5d4e96b44cf20330c970c3e30ea0a8c3a23feca ]
+
+We currently have a build-time check to ensure that the minimum DMA
+allocation alignment satisfies the constraint that IPA filter and
+route tables must point to rules that are 128-byte aligned.
+
+But what's really important is that the actual allocated DMA memory
+has that alignment, even if the minimum is smaller than that.
+
+Remove the BUILD_BUG_ON() call checking against minimim DMA alignment
+and instead verify at rutime that the allocated memory is properly
+aligned.
+
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: cf412ec33325 ("net: ipa: properly limit modem routing table use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/ipa_table.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index f26cb9d706da..45e1d68b4694 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -118,14 +118,6 @@
+ /* Check things that can be validated at build time. */
+ static void ipa_table_validate_build(void)
+ {
+- /* IPA hardware accesses memory 128 bytes at a time. Addresses
+- * referred to by entries in filter and route tables must be
+- * aligned on 128-byte byte boundaries. The only rule address
+- * ever use is the "zero rule", and it's aligned at the base
+- * of a coherent DMA allocation.
+- */
+- BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN);
+-
+ /* Filter and route tables contain DMA addresses that refer
+ * to filter or route rules. But the size of a table entry
+ * is 64 bits regardless of what the size of an AP DMA address
+@@ -669,6 +661,18 @@ int ipa_table_init(struct ipa *ipa)
+ if (!virt)
+ return -ENOMEM;
+
++ /* We put the "zero rule" at the base of our table area. The IPA
++ * hardware requires rules to be aligned on a 128-byte boundary.
++ * Make sure the allocation satisfies this constraint.
++ */
++ if (addr % IPA_TABLE_ALIGN) {
++ dev_err(dev, "table address %pad not %u-byte aligned\n",
++ &addr, IPA_TABLE_ALIGN);
++ dma_free_coherent(dev, size, virt, addr);
++
++ return -ERANGE;
++ }
++
+ ipa->table_virt = virt;
+ ipa->table_addr = addr;
+
+--
+2.35.1
+
--- /dev/null
+From 29a8095d3111367ab299d3ed423558f0b77196d4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 28 Mar 2021 12:31:11 -0500
+Subject: net: ipa: kill IPA_TABLE_ENTRY_SIZE
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit 4ea29143ebe6c453f5fddc80ffe4ed046f44aa3a ]
+
+Entries in an IPA route or filter table are 64-bit little-endian
+addresses, each of which refers to a routing or filtering rule.
+
+The format of these table slots are fixed, but IPA_TABLE_ENTRY_SIZE
+is used to define their size. This symbol doesn't really add value,
+and I think it unnecessarily obscures what a table entry *is*.
+
+So get rid of IPA_TABLE_ENTRY_SIZE, and just use sizeof(__le64) in
+its place throughout the code.
+
+Update the comments in "ipa_table.c" to provide a little better
+explanation of these table slots.
+
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: cf412ec33325 ("net: ipa: properly limit modem routing table use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/ipa_cmd.c | 2 +-
+ drivers/net/ipa/ipa_qmi.c | 10 +++----
+ drivers/net/ipa/ipa_table.c | 59 +++++++++++++++++++++----------------
+ drivers/net/ipa/ipa_table.h | 3 --
+ 4 files changed, 39 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
+index a47378b7d9b2..dc94ce035655 100644
+--- a/drivers/net/ipa/ipa_cmd.c
++++ b/drivers/net/ipa/ipa_cmd.c
+@@ -154,7 +154,7 @@ static void ipa_cmd_validate_build(void)
+ * of entries, as and IPv4 and IPv6 route tables have the same number
+ * of entries.
+ */
+-#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE)
++#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64))
+ #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
+ BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
+ BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
+diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
+index 1a87a49538c5..fea61657867e 100644
+--- a/drivers/net/ipa/ipa_qmi.c
++++ b/drivers/net/ipa/ipa_qmi.c
+@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+ mem = &ipa->mem[IPA_MEM_V4_ROUTE];
+ req.v4_route_tbl_info_valid = 1;
+ req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
+- req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
++ req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE];
+ req.v6_route_tbl_info_valid = 1;
+ req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
+- req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
++ req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER];
+ req.v4_filter_tbl_start_valid = 1;
+@@ -352,8 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+ req.v4_hash_route_tbl_info_valid = 1;
+ req.v4_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+- req.v4_hash_route_tbl_info.count =
+- mem->size / IPA_TABLE_ENTRY_SIZE;
++ req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ }
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
+@@ -361,8 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+ req.v6_hash_route_tbl_info_valid = 1;
+ req.v6_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+- req.v6_hash_route_tbl_info.count =
+- mem->size / IPA_TABLE_ENTRY_SIZE;
++ req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ }
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index 087bcae29cc7..bada98d7360c 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -27,28 +27,38 @@
+ /**
+ * DOC: IPA Filter and Route Tables
+ *
+- * The IPA has tables defined in its local shared memory that define filter
+- * and routing rules. Each entry in these tables contains a 64-bit DMA
+- * address that refers to DRAM (system memory) containing a rule definition.
++ * The IPA has tables defined in its local (IPA-resident) memory that define
++ * filter and routing rules. An entry in either of these tables is a little
++ * endian 64-bit "slot" that holds the address of a rule definition. (The
++ * size of these slots is 64 bits regardless of the host DMA address size.)
++ *
++ * Separate tables (both filter and route) used for IPv4 and IPv6. There
++ * are normally another set of "hashed" filter and route tables, which are
++ * used with a hash of message metadata. Hashed operation is not supported
++ * by all IPA hardware (IPA v4.2 doesn't support hashed tables).
++ *
++ * Rules can be in local memory or in DRAM (system memory). The offset of
++ * an object (such as a route or filter table) in IPA-resident memory must
++ * 128-byte aligned. An object in system memory (such as a route or filter
++ * rule) must be at an 8-byte aligned address. We currently only place
++ * route or filter rules in system memory.
++ *
+ * A rule consists of a contiguous block of 32-bit values terminated with
+ * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
+ * represents "no filtering" or "no routing," and is the reset value for
+- * filter or route table rules. Separate tables (both filter and route)
+- * used for IPv4 and IPv6. Additionally, there can be hashed filter or
+- * route tables, which are used when a hash of message metadata matches.
+- * Hashed operation is not supported by all IPA hardware.
++ * filter or route table rules.
+ *
+ * Each filter rule is associated with an AP or modem TX endpoint, though
+- * not all TX endpoints support filtering. The first 64-bit entry in a
++ * not all TX endpoints support filtering. The first 64-bit slot in a
+ * filter table is a bitmap indicating which endpoints have entries in
+ * the table. The low-order bit (bit 0) in this bitmap represents a
+ * special global filter, which applies to all traffic. This is not
+ * used in the current code. Bit 1, if set, indicates that there is an
+- * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the
+- * table. Bit 2, if set, indicates there is an entry for endpoint 1,
+- * and so on. Space is set aside in IPA local memory to hold as many
+- * filter table entries as might be required, but typically they are not
+- * all used.
++ * entry (i.e. slot containing a system address referring to a rule) for
++ * endpoint 0 in the table. Bit 3, if set, indicates there is an entry
++ * for endpoint 2, and so on. Space is set aside in IPA local memory to
++ * hold as many filter table entries as might be required, but typically
++ * they are not all used.
+ *
+ * The AP initializes all entries in a filter table to refer to a "zero"
+ * entry. Once initialized the modem and AP update the entries for
+@@ -122,8 +132,7 @@ static void ipa_table_validate_build(void)
+ * code in ipa_table_init() uses a pointer to __le64 to
+ * initialize tables.
+ */
+- BUILD_BUG_ON(sizeof(dma_addr_t) > IPA_TABLE_ENTRY_SIZE);
+- BUILD_BUG_ON(sizeof(__le64) != IPA_TABLE_ENTRY_SIZE);
++ BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64));
+
+ /* A "zero rule" is used to represent no filtering or no routing.
+ * It is a 64-bit block of zeroed memory. Code in ipa_table_init()
+@@ -154,7 +163,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
+ else
+ mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
+ : &ipa->mem[IPA_MEM_V4_ROUTE];
+- size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE;
++ size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
+ } else {
+ if (ipv6)
+ mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
+@@ -162,7 +171,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
+ else
+ mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
+ : &ipa->mem[IPA_MEM_V4_FILTER];
+- size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE;
++ size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
+ }
+
+ if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
+@@ -261,8 +270,8 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
+ if (filter)
+ first++; /* skip over bitmap */
+
+- offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE;
+- size = count * IPA_TABLE_ENTRY_SIZE;
++ offset = mem->offset + first * sizeof(__le64);
++ size = count * sizeof(__le64);
+ addr = ipa_table_addr(ipa, false, count);
+
+ ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
+@@ -446,11 +455,11 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
+ count = 1 + hweight32(ipa->filter_map);
+ hash_count = hash_mem->size ? count : 0;
+ } else {
+- count = mem->size / IPA_TABLE_ENTRY_SIZE;
+- hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE;
++ count = mem->size / sizeof(__le64);
++ hash_count = hash_mem->size / sizeof(__le64);
+ }
+- size = count * IPA_TABLE_ENTRY_SIZE;
+- hash_size = hash_count * IPA_TABLE_ENTRY_SIZE;
++ size = count * sizeof(__le64);
++ hash_size = hash_count * sizeof(__le64);
+
+ addr = ipa_table_addr(ipa, filter, count);
+ hash_addr = ipa_table_addr(ipa, filter, hash_count);
+@@ -659,7 +668,7 @@ int ipa_table_init(struct ipa *ipa)
+ * by dma_alloc_coherent() is guaranteed to be a power-of-2 number
+ * of pages, which satisfies the rule alignment requirement.
+ */
+- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
++ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
+ virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+@@ -691,7 +700,7 @@ void ipa_table_exit(struct ipa *ipa)
+ struct device *dev = &ipa->pdev->dev;
+ size_t size;
+
+- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
++ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
+
+ dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
+ ipa->table_addr = 0;
+diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
+index 78038d14fcea..dc9ff21dbdfb 100644
+--- a/drivers/net/ipa/ipa_table.h
++++ b/drivers/net/ipa/ipa_table.h
+@@ -10,9 +10,6 @@
+
+ struct ipa;
+
+-/* The size of a filter or route table entry */
+-#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */
+-
+ /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
+ #define IPA_FILTER_COUNT_MAX 14
+
+--
+2.35.1
+
--- /dev/null
+From ab2de346cc22be384ef1776fed86e5918128380a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Sep 2022 15:46:02 -0500
+Subject: net: ipa: properly limit modem routing table use
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit cf412ec333250cb82bafe57169204e14a9f1c2ac ]
+
+IPA can route packets between IPA-connected entities. The AP and
+modem are currently the only such entities supported, and no routing
+is required to transfer packets between them.
+
+The number of entries in each routing table is fixed, and defined at
+initialization time. Some of these entries are designated for use
+by the modem, and the rest are available for the AP to use. The AP
+sends a QMI message to the modem which describes (among other
+things) information about routing table memory available for the
+modem to use.
+
+Currently the QMI initialization packet gives wrong information in
+its description of routing tables. What *should* be supplied is the
+maximum index that the modem can use for the routing table memory
+located at a given location. The current code instead supplies the
+total *number* of routing table entries. Furthermore, the modem is
+granted the entire table, not just the subset it's supposed to use.
+
+This patch fixes this. First, the ipa_mem_bounds structure is
+generalized so its "end" field can be interpreted either as a final
+byte offset, or a final array index. Second, the IPv4 and IPv6
+(non-hashed and hashed) table information fields in the QMI
+ipa_init_modem_driver_req structure are changed to be ipa_mem_bounds
+rather than ipa_mem_array structures. Third, we set the "end" value
+for each routing table to be the last index, rather than setting the
+"count" to be the number of indices. Finally, instead of allowing
+the modem to use all of a routing table's memory, it is limited to
+just the portion meant to be used by the modem. In all versions of
+IPA currently supported, that is IPA_ROUTE_MODEM_COUNT (8) entries.
+
+Update a few comments for clarity.
+
+Fixes: 530f9216a9537 ("soc: qcom: ipa: AP/modem communications")
+Signed-off-by: Alex Elder <elder@linaro.org>
+Link: https://lore.kernel.org/r/20220913204602.1803004-1-elder@linaro.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/ipa_qmi.c | 8 ++++----
+ drivers/net/ipa/ipa_qmi_msg.c | 8 ++++----
+ drivers/net/ipa/ipa_qmi_msg.h | 37 ++++++++++++++++++++---------------
+ drivers/net/ipa/ipa_table.c | 2 --
+ drivers/net/ipa/ipa_table.h | 3 +++
+ 5 files changed, 32 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
+index fea61657867e..880ec353f958 100644
+--- a/drivers/net/ipa/ipa_qmi.c
++++ b/drivers/net/ipa/ipa_qmi.c
+@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+ mem = &ipa->mem[IPA_MEM_V4_ROUTE];
+ req.v4_route_tbl_info_valid = 1;
+ req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
+- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
++ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE];
+ req.v6_route_tbl_info_valid = 1;
+ req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
+- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
++ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER];
+ req.v4_filter_tbl_start_valid = 1;
+@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+ req.v4_hash_route_tbl_info_valid = 1;
+ req.v4_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
++ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
+@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+ req.v6_hash_route_tbl_info_valid = 1;
+ req.v6_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
++ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
+diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
+index 73413371e3d3..ecf9f863c842 100644
+--- a/drivers/net/ipa/ipa_qmi_msg.c
++++ b/drivers/net/ipa/ipa_qmi_msg.c
+@@ -271,7 +271,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+- .ei_array = ipa_mem_array_ei,
++ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+@@ -292,7 +292,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+- .ei_array = ipa_mem_array_ei,
++ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+@@ -456,7 +456,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+- .ei_array = ipa_mem_array_ei,
++ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+@@ -477,7 +477,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+- .ei_array = ipa_mem_array_ei,
++ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
+index cfac456cea0c..58de425bb8e6 100644
+--- a/drivers/net/ipa/ipa_qmi_msg.h
++++ b/drivers/net/ipa/ipa_qmi_msg.h
+@@ -82,9 +82,11 @@ enum ipa_platform_type {
+ IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */
+ };
+
+-/* This defines the start and end offset of a range of memory. Both
+- * fields are offsets relative to the start of IPA shared memory.
+- * The end value is the last addressable byte *within* the range.
++/* This defines the start and end offset of a range of memory. The start
++ * value is a byte offset relative to the start of IPA shared memory. The
++ * end value is the last addressable unit *within* the range. Typically
++ * the end value is in units of bytes, however it can also be a maximum
++ * array index value.
+ */
+ struct ipa_mem_bounds {
+ u32 start;
+@@ -125,18 +127,19 @@ struct ipa_init_modem_driver_req {
+ u8 hdr_tbl_info_valid;
+ struct ipa_mem_bounds hdr_tbl_info;
+
+- /* Routing table information. These define the location and size of
+- * non-hashable IPv4 and IPv6 filter tables. The start values are
+- * offsets relative to the start of IPA shared memory.
++ /* Routing table information. These define the location and maximum
++ * *index* (not byte) for the modem portion of non-hashable IPv4 and
++ * IPv6 routing tables. The start values are byte offsets relative
++ * to the start of IPA shared memory.
+ */
+ u8 v4_route_tbl_info_valid;
+- struct ipa_mem_array v4_route_tbl_info;
++ struct ipa_mem_bounds v4_route_tbl_info;
+ u8 v6_route_tbl_info_valid;
+- struct ipa_mem_array v6_route_tbl_info;
++ struct ipa_mem_bounds v6_route_tbl_info;
+
+ /* Filter table information. These define the location of the
+ * non-hashable IPv4 and IPv6 filter tables. The start values are
+- * offsets relative to the start of IPA shared memory.
++ * byte offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_filter_tbl_start_valid;
+ u32 v4_filter_tbl_start;
+@@ -177,18 +180,20 @@ struct ipa_init_modem_driver_req {
+ u8 zip_tbl_info_valid;
+ struct ipa_mem_bounds zip_tbl_info;
+
+- /* Routing table information. These define the location and size
+- * of hashable IPv4 and IPv6 filter tables. The start values are
+- * offsets relative to the start of IPA shared memory.
++ /* Routing table information. These define the location and maximum
++ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
++ * routing tables (if supported by hardware). The start values are
++ * byte offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_hash_route_tbl_info_valid;
+- struct ipa_mem_array v4_hash_route_tbl_info;
++ struct ipa_mem_bounds v4_hash_route_tbl_info;
+ u8 v6_hash_route_tbl_info_valid;
+- struct ipa_mem_array v6_hash_route_tbl_info;
++ struct ipa_mem_bounds v6_hash_route_tbl_info;
+
+ /* Filter table information. These define the location and size
+- * of hashable IPv4 and IPv6 filter tables. The start values are
+- * offsets relative to the start of IPA shared memory.
++ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
++ * The start values are byte offsets relative to the start of IPA
++ * shared memory.
+ */
+ u8 v4_hash_filter_tbl_start_valid;
+ u32 v4_hash_filter_tbl_start;
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index bada98d7360c..02c192837414 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -108,8 +108,6 @@
+
+ /* Assignment of route table entries to the modem and AP */
+ #define IPA_ROUTE_MODEM_MIN 0
+-#define IPA_ROUTE_MODEM_COUNT 8
+-
+ #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
+ #define IPA_ROUTE_AP_COUNT \
+ (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
+diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
+index dc9ff21dbdfb..35e519cef25d 100644
+--- a/drivers/net/ipa/ipa_table.h
++++ b/drivers/net/ipa/ipa_table.h
+@@ -13,6 +13,9 @@ struct ipa;
+ /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
+ #define IPA_FILTER_COUNT_MAX 14
+
++/* The number of route table entries allotted to the modem */
++#define IPA_ROUTE_MODEM_COUNT 8
++
+ /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
+ #define IPA_ROUTE_COUNT_MAX 15
+
+--
+2.35.1
+
--- /dev/null
+From 6c0aef207a4fd4abb0d4eaea8606a4c6b28b475d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jul 2021 18:54:18 +0800
+Subject: net: let flow have same hash in two directions
+
+From: zhang kai <zhangkaiheb@126.com>
+
+[ Upstream commit 1e60cebf82948cfdc9497ea4553bab125587593c ]
+
+using same source and destination ip/port for flow hash calculation
+within the two directions.
+
+Signed-off-by: zhang kai <zhangkaiheb@126.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 64ae13ed4784 ("net: core: fix flow symmetric hash")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/flow_dissector.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index f9baa9b1c77f..aad311c73810 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1485,7 +1485,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow)
+ }
+ EXPORT_SYMBOL(flow_get_u32_dst);
+
+-/* Sort the source and destination IP (and the ports if the IP are the same),
++/* Sort the source and destination IP and the ports,
+ * to have consistent hash within the two directions
+ */
+ static inline void __flow_hash_consistentify(struct flow_keys *keys)
+@@ -1496,11 +1496,11 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ addr_diff = (__force u32)keys->addrs.v4addrs.dst -
+ (__force u32)keys->addrs.v4addrs.src;
+- if ((addr_diff < 0) ||
+- (addr_diff == 0 &&
+- ((__force u16)keys->ports.dst <
+- (__force u16)keys->ports.src))) {
++ if (addr_diff < 0)
+ swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
++
++ if ((__force u16)keys->ports.dst <
++ (__force u16)keys->ports.src) {
+ swap(keys->ports.src, keys->ports.dst);
+ }
+ break;
+@@ -1508,13 +1508,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
+ addr_diff = memcmp(&keys->addrs.v6addrs.dst,
+ &keys->addrs.v6addrs.src,
+ sizeof(keys->addrs.v6addrs.dst));
+- if ((addr_diff < 0) ||
+- (addr_diff == 0 &&
+- ((__force u16)keys->ports.dst <
+- (__force u16)keys->ports.src))) {
++ if (addr_diff < 0) {
+ for (i = 0; i < 4; i++)
+ swap(keys->addrs.v6addrs.src.s6_addr32[i],
+ keys->addrs.v6addrs.dst.s6_addr32[i]);
++ }
++ if ((__force u16)keys->ports.dst <
++ (__force u16)keys->ports.src) {
+ swap(keys->ports.src, keys->ports.dst);
+ }
+ break;
+--
+2.35.1
+
--- /dev/null
+From 06306d6c8bfe592736385aad8e7848c4a191f3ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Sep 2022 16:04:51 +0300
+Subject: net: phy: aquantia: wait for the suspend/resume operations to finish
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit ca2dccdeeb49a7e408112d681bf447984c845292 ]
+
+The Aquantia datasheet notes that after issuing a Processor-Intensive
+MDIO operation, like changing the low-power state of the device, the
+driver should wait for the operation to finish before issuing a new MDIO
+command.
+
+The new aqr107_wait_processor_intensive_op() function is added which can
+be used after these kind of MDIO operations. At the moment, we are only
+adding it at the end of the suspend/resume calls.
+
+The issue was identified on a board featuring the AQR113C PHY, on
+which commands like 'ip link (..) up / down' issued without any delays
+between them would render the link on the PHY to remain down.
+The issue was easy to reproduce with a one-liner:
+ $ ip link set dev ethX down; ip link set dev ethX up; \
+ ip link set dev ethX down; ip link set dev ethX up;
+
+Fixes: ac9e81c230eb ("net: phy: aquantia: add suspend / resume callbacks for AQR107 family")
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20220906130451.1483448-1-ioana.ciornei@nxp.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/aquantia_main.c | 53 ++++++++++++++++++++++++++++++---
+ 1 file changed, 49 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
+index 75a62d1cc737..7045595f8d7d 100644
+--- a/drivers/net/phy/aquantia_main.c
++++ b/drivers/net/phy/aquantia_main.c
+@@ -89,6 +89,9 @@
+ #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
+ #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+
++#define VEND1_GLOBAL_GEN_STAT2 0xc831
++#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
++
+ #define VEND1_GLOBAL_RSVD_STAT1 0xc885
+ #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
+ #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
+@@ -123,6 +126,12 @@
+ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
+ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+
++/* Sleep and timeout for checking if the Processor-Intensive
++ * MDIO operation is finished
++ */
++#define AQR107_OP_IN_PROG_SLEEP 1000
++#define AQR107_OP_IN_PROG_TIMEOUT 100000
++
+ struct aqr107_hw_stat {
+ const char *name;
+ int reg;
+@@ -569,16 +578,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
+ phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
+ }
+
++static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
++{
++ int val, err;
++
++ /* The datasheet notes to wait at least 1ms after issuing a
++ * processor intensive operation before checking.
++ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
++ * because that just determines the maximum time slept, not the minimum.
++ */
++ usleep_range(1000, 5000);
++
++ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
++ VEND1_GLOBAL_GEN_STAT2, val,
++ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
++ AQR107_OP_IN_PROG_SLEEP,
++ AQR107_OP_IN_PROG_TIMEOUT, false);
++ if (err) {
++ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
++ return err;
++ }
++
++ return 0;
++}
++
+ static int aqr107_suspend(struct phy_device *phydev)
+ {
+- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+- MDIO_CTRL1_LPOWER);
++ int err;
++
++ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
++ MDIO_CTRL1_LPOWER);
++ if (err)
++ return err;
++
++ return aqr107_wait_processor_intensive_op(phydev);
+ }
+
+ static int aqr107_resume(struct phy_device *phydev)
+ {
+- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+- MDIO_CTRL1_LPOWER);
++ int err;
++
++ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
++ MDIO_CTRL1_LPOWER);
++ if (err)
++ return err;
++
++ return aqr107_wait_processor_intensive_op(phydev);
+ }
+
+ static int aqr107_probe(struct phy_device *phydev)
+--
+2.35.1
+
--- /dev/null
+From 53d9e5c151df75101b150b346f7c7249a6a3f5e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Sep 2022 17:27:34 +0800
+Subject: net: sched: fix possible refcount leak in tc_new_tfilter()
+
+From: Hangyu Hua <hbh25y@gmail.com>
+
+[ Upstream commit c2e1cfefcac35e0eea229e148c8284088ce437b5 ]
+
+tfilter_put need to be called to put the refount got by tp->ops->get to
+avoid possible refcount leak when chain->tmplt_ops != NULL and
+chain->tmplt_ops != tp->ops.
+
+Fixes: 7d5509fa0d3d ("net: sched: extend proto ops with 'put' callback")
+Signed-off-by: Hangyu Hua <hbh25y@gmail.com>
+Reviewed-by: Vlad Buslov <vladbu@nvidia.com>
+Link: https://lore.kernel.org/r/20220921092734.31700-1-hbh25y@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_api.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index b8ffb7e4f696..c410a736301b 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -2124,6 +2124,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ }
+
+ if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
++ tfilter_put(tp, fh);
+ NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
+ err = -EINVAL;
+ goto errout;
+--
+2.35.1
+
--- /dev/null
+From 19210faea0e6dd691d21545a385b73772e977b4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Sep 2022 13:08:01 +0300
+Subject: net/sched: taprio: avoid disabling offload when it was never enabled
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit db46e3a88a09c5cf7e505664d01da7238cd56c92 ]
+
+In an incredibly strange API design decision, qdisc->destroy() gets
+called even if qdisc->init() never succeeded, not exclusively since
+commit 87b60cfacf9f ("net_sched: fix error recovery at qdisc creation"),
+but apparently also earlier (in the case of qdisc_create_dflt()).
+
+The taprio qdisc does not fully acknowledge this when it attempts full
+offload, because it starts off with q->flags = TAPRIO_FLAGS_INVALID in
+taprio_init(), then it replaces q->flags with TCA_TAPRIO_ATTR_FLAGS
+parsed from netlink (in taprio_change(), tail called from taprio_init()).
+
+But in taprio_destroy(), we call taprio_disable_offload(), and this
+determines what to do based on FULL_OFFLOAD_IS_ENABLED(q->flags).
+
+But looking at the implementation of FULL_OFFLOAD_IS_ENABLED()
+(a bitwise check of bit 1 in q->flags), it is invalid to call this macro
+on q->flags when it contains TAPRIO_FLAGS_INVALID, because that is set
+to U32_MAX, and therefore FULL_OFFLOAD_IS_ENABLED() will return true on
+an invalid set of flags.
+
+As a result, it is possible to crash the kernel if user space forces an
+error between setting q->flags = TAPRIO_FLAGS_INVALID, and the calling
+of taprio_enable_offload(). This is because drivers do not expect the
+offload to be disabled when it was never enabled.
+
+The error that we force here is to attach taprio as a non-root qdisc,
+but instead as child of an mqprio root qdisc:
+
+$ tc qdisc add dev swp0 root handle 1: \
+ mqprio num_tc 8 map 0 1 2 3 4 5 6 7 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 hw 0
+$ tc qdisc replace dev swp0 parent 1:1 \
+ taprio num_tc 8 map 0 1 2 3 4 5 6 7 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 base-time 0 \
+ sched-entry S 0x7f 990000 sched-entry S 0x80 100000 \
+ flags 0x0 clockid CLOCK_TAI
+Unable to handle kernel paging request at virtual address fffffffffffffff8
+[fffffffffffffff8] pgd=0000000000000000, p4d=0000000000000000
+Internal error: Oops: 96000004 [#1] PREEMPT SMP
+Call trace:
+ taprio_dump+0x27c/0x310
+ vsc9959_port_setup_tc+0x1f4/0x460
+ felix_port_setup_tc+0x24/0x3c
+ dsa_slave_setup_tc+0x54/0x27c
+ taprio_disable_offload.isra.0+0x58/0xe0
+ taprio_destroy+0x80/0x104
+ qdisc_create+0x240/0x470
+ tc_modify_qdisc+0x1fc/0x6b0
+ rtnetlink_rcv_msg+0x12c/0x390
+ netlink_rcv_skb+0x5c/0x130
+ rtnetlink_rcv+0x1c/0x2c
+
+Fix this by keeping track of the operations we made, and undo the
+offload only if we actually did it.
+
+I've added "bool offloaded" inside a 4 byte hole between "int clockid"
+and "atomic64_t picos_per_byte". Now the first cache line looks like
+below:
+
+$ pahole -C taprio_sched net/sched/sch_taprio.o
+struct taprio_sched {
+ struct Qdisc * * qdiscs; /* 0 8 */
+ struct Qdisc * root; /* 8 8 */
+ u32 flags; /* 16 4 */
+ enum tk_offsets tk_offset; /* 20 4 */
+ int clockid; /* 24 4 */
+ bool offloaded; /* 28 1 */
+
+ /* XXX 3 bytes hole, try to pack */
+
+ atomic64_t picos_per_byte; /* 32 0 */
+
+ /* XXX 8 bytes hole, try to pack */
+
+ spinlock_t current_entry_lock; /* 40 0 */
+
+ /* XXX 8 bytes hole, try to pack */
+
+ struct sched_entry * current_entry; /* 48 8 */
+ struct sched_gate_list * oper_sched; /* 56 8 */
+ /* --- cacheline 1 boundary (64 bytes) --- */
+
+Fixes: 9c66d1564676 ("taprio: Add support for hardware offloading")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_taprio.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index eca525791013..384316c11e98 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -65,6 +65,7 @@ struct taprio_sched {
+ u32 flags;
+ enum tk_offsets tk_offset;
+ int clockid;
++ bool offloaded;
+ atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
+ * speeds it's sub-nanoseconds per byte
+ */
+@@ -1267,6 +1268,8 @@ static int taprio_enable_offload(struct net_device *dev,
+ goto done;
+ }
+
++ q->offloaded = true;
++
+ done:
+ taprio_offload_free(offload);
+
+@@ -1281,12 +1284,9 @@ static int taprio_disable_offload(struct net_device *dev,
+ struct tc_taprio_qopt_offload *offload;
+ int err;
+
+- if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
++ if (!q->offloaded)
+ return 0;
+
+- if (!ops->ndo_setup_tc)
+- return -EOPNOTSUPP;
+-
+ offload = taprio_offload_alloc(0);
+ if (!offload) {
+ NL_SET_ERR_MSG(extack,
+@@ -1302,6 +1302,8 @@ static int taprio_disable_offload(struct net_device *dev,
+ goto out;
+ }
+
++ q->offloaded = false;
++
+ out:
+ taprio_offload_free(offload);
+
+--
+2.35.1
+
--- /dev/null
+From 92a3e258c02af02e880ffd41aadea0494d175927 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Sep 2022 13:08:02 +0300
+Subject: net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo
+ child qdiscs
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 1461d212ab277d8bba1a753d33e9afe03d81f9d4 ]
+
+taprio can only operate as root qdisc, and to that end, there exists the
+following check in taprio_init(), just as in mqprio:
+
+ if (sch->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+And indeed, when we try to attach taprio to an mqprio child, it fails as
+expected:
+
+$ tc qdisc add dev swp0 root handle 1: mqprio num_tc 8 \
+ map 0 1 2 3 4 5 6 7 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 hw 0
+$ tc qdisc replace dev swp0 parent 1:2 taprio num_tc 8 \
+ map 0 1 2 3 4 5 6 7 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \
+ base-time 0 sched-entry S 0x7f 990000 sched-entry S 0x80 100000 \
+ flags 0x0 clockid CLOCK_TAI
+Error: sch_taprio: Can only be attached as root qdisc.
+
+(extack message added by me)
+
+But when we try to attach a taprio child to a taprio root qdisc,
+surprisingly it doesn't fail:
+
+$ tc qdisc replace dev swp0 root handle 1: taprio num_tc 8 \
+ map 0 1 2 3 4 5 6 7 queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \
+ base-time 0 sched-entry S 0x7f 990000 sched-entry S 0x80 100000 \
+ flags 0x0 clockid CLOCK_TAI
+$ tc qdisc replace dev swp0 parent 1:2 taprio num_tc 8 \
+ map 0 1 2 3 4 5 6 7 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \
+ base-time 0 sched-entry S 0x7f 990000 sched-entry S 0x80 100000 \
+ flags 0x0 clockid CLOCK_TAI
+
+This is because tc_modify_qdisc() behaves differently when mqprio is
+root, vs when taprio is root.
+
+In the mqprio case, it finds the parent qdisc through
+p = qdisc_lookup(dev, TC_H_MAJ(clid)), and then the child qdisc through
+q = qdisc_leaf(p, clid). This leaf qdisc q has handle 0, so it is
+ignored according to the comment right below ("It may be default qdisc,
+ignore it"). As a result, tc_modify_qdisc() goes through the
+qdisc_create() code path, and this gives taprio_init() a chance to check
+for sch_parent != TC_H_ROOT and error out.
+
+Whereas in the taprio case, the returned q = qdisc_leaf(p, clid) is
+different. It is not the default qdisc created for each netdev queue
+(both taprio and mqprio call qdisc_create_dflt() and keep them in
+a private q->qdiscs[], or priv->qdiscs[], respectively). Instead, taprio
+makes qdisc_leaf() return the _root_ qdisc, aka itself.
+
+When taprio does that, tc_modify_qdisc() goes through the qdisc_change()
+code path, because the qdisc layer never finds out about the child qdisc
+of the root. And through the ->change() ops, taprio has no reason to
+check whether its parent is root or not, just through ->init(), which is
+not called.
+
+The problem is the taprio_leaf() implementation. Even though code wise,
+it does the exact same thing as mqprio_leaf() which it is copied from,
+it works with different input data. This is because mqprio does not
+attach itself (the root) to each device TX queue, but one of the default
+qdiscs from its private array.
+
+In fact, since commit 13511704f8d7 ("net: taprio offload: enforce qdisc
+to netdev queue mapping"), taprio does this too, but just for the full
+offload case. So if we tried to attach a taprio child to a fully
+offloaded taprio root qdisc, it would properly fail too; just not to a
+software root taprio.
+
+To fix the problem, stop looking at the Qdisc that's attached to the TX
+queue, and instead, always return the default qdiscs that we've
+allocated (and to which we privately enqueue and dequeue, in software
+scheduling mode).
+
+Since Qdisc_class_ops :: leaf is only called from tc_modify_qdisc(),
+the risk of unforeseen side effects introduced by this change is
+minimal.
+
+Fixes: 5a781ccbd19e ("tc: Add support for configuring the taprio scheduler")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_taprio.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 384316c11e98..ab8835a72cee 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1906,12 +1906,14 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+
+ static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
+ {
+- struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
++ struct taprio_sched *q = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ unsigned int ntx = cl - 1;
+
+- if (!dev_queue)
++ if (ntx >= dev->num_tx_queues)
+ return NULL;
+
+- return dev_queue->qdisc_sleeping;
++ return q->qdiscs[ntx];
+ }
+
+ static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
+--
+2.35.1
+
--- /dev/null
+From fa34c40dc1fd0aefe9e9715f035ecd8e3b252348 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Sep 2022 14:43:09 +0800
+Subject: net/smc: Stop the CLC flow if no link to map buffers on
+
+From: Wen Gu <guwen@linux.alibaba.com>
+
+[ Upstream commit e738455b2c6dcdab03e45d97de36476f93f557d2 ]
+
+There might be a potential race between SMC-R buffer map and
+link group termination.
+
+smc_smcr_terminate_all() | smc_connect_rdma()
+--------------------------------------------------------------
+ | smc_conn_create()
+for links in smcibdev |
+ schedule links down |
+ | smc_buf_create()
+ | \- smcr_buf_map_usable_links()
+ | \- no usable links found,
+ | (rmb->mr = NULL)
+ |
+ | smc_clc_send_confirm()
+ | \- access conn->rmb_desc->mr[]->rkey
+ | (panic)
+
+During reboot and IB device module remove, all links will be set
+down and no usable links remain in link groups. In such situation
+smcr_buf_map_usable_links() should return an error and stop the
+CLC flow accessing to uninitialized mr.
+
+Fixes: b9247544c1bc ("net/smc: convert static link ID instances to support multiple links")
+Signed-off-by: Wen Gu <guwen@linux.alibaba.com>
+Link: https://lore.kernel.org/r/1663656189-32090-1-git-send-email-guwen@linux.alibaba.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_core.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index ef2fd28999ba..bf485a2017a4 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1584,7 +1584,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
+ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
+ struct smc_buf_desc *buf_desc, bool is_rmb)
+ {
+- int i, rc = 0;
++ int i, rc = 0, cnt = 0;
+
+ /* protect against parallel link reconfiguration */
+ mutex_lock(&lgr->llc_conf_mutex);
+@@ -1597,9 +1597,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
+ rc = -ENOMEM;
+ goto out;
+ }
++ cnt++;
+ }
+ out:
+ mutex_unlock(&lgr->llc_conf_mutex);
++ if (!rc && !cnt)
++ rc = -EINVAL;
+ return rc;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 65ccf2817d85972ee9dd9a56fffdb569ba205046 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Jul 2021 16:29:01 +0200
+Subject: net: socket: remove register_gifconf
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit b0e99d03778b2418aec20db99d97d19d25d198b6 ]
+
+Since dynamic registration of the gifconf() helper is only used for
+IPv4, and this can not be in a loadable module, this can be simplified
+noticeably by turning it into a direct function call as a preparation
+for cleaning up the compat handling.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 5641c751fe2f ("net: enetc: deny offload of tc-based TSN features on VF interfaces")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/inetdevice.h | 9 ++++++++
+ include/linux/netdevice.h | 8 -------
+ net/core/dev_ioctl.c | 43 +++++++++-----------------------------
+ net/ipv4/devinet.c | 4 +---
+ 4 files changed, 20 insertions(+), 44 deletions(-)
+
+diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
+index b68fca08be27..3088d94684c1 100644
+--- a/include/linux/inetdevice.h
++++ b/include/linux/inetdevice.h
+@@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
+
+ int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
+ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
++#ifdef CONFIG_INET
++int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size);
++#else
++static inline int inet_gifconf(struct net_device *dev, char __user *buf,
++ int len, int size)
++{
++ return 0;
++}
++#endif
+ void devinet_init(void);
+ struct in_device *inetdev_by_index(struct net *, int);
+ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 6564fb4ac49e..ef75567efd27 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3201,14 +3201,6 @@ static inline bool dev_has_header(const struct net_device *dev)
+ return dev->header_ops && dev->header_ops->create;
+ }
+
+-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
+- int len, int size);
+-int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
+-static inline int unregister_gifconf(unsigned int family)
+-{
+- return register_gifconf(family, NULL);
+-}
+-
+ #ifdef CONFIG_NET_FLOW_LIMIT
+ #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
+ struct sd_flow_limit {
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index 54fb18b4f55e..48afea19d3e1 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/kmod.h>
+ #include <linux/netdevice.h>
++#include <linux/inetdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/net_tstamp.h>
+@@ -25,26 +26,6 @@ static int dev_ifname(struct net *net, struct ifreq *ifr)
+ return netdev_get_name(net, ifr->ifr_name, ifr->ifr_ifindex);
+ }
+
+-static gifconf_func_t *gifconf_list[NPROTO];
+-
+-/**
+- * register_gifconf - register a SIOCGIF handler
+- * @family: Address family
+- * @gifconf: Function handler
+- *
+- * Register protocol dependent address dumping routines. The handler
+- * that is passed must not be freed or reused until it has been replaced
+- * by another handler.
+- */
+-int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
+-{
+- if (family >= NPROTO)
+- return -EINVAL;
+- gifconf_list[family] = gifconf;
+- return 0;
+-}
+-EXPORT_SYMBOL(register_gifconf);
+-
+ /*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size eventually, and there is nothing I can do about it.
+@@ -72,19 +53,15 @@ int dev_ifconf(struct net *net, struct ifconf *ifc, int size)
+
+ total = 0;
+ for_each_netdev(net, dev) {
+- for (i = 0; i < NPROTO; i++) {
+- if (gifconf_list[i]) {
+- int done;
+- if (!pos)
+- done = gifconf_list[i](dev, NULL, 0, size);
+- else
+- done = gifconf_list[i](dev, pos + total,
+- len - total, size);
+- if (done < 0)
+- return -EFAULT;
+- total += done;
+- }
+- }
++ int done;
++ if (!pos)
++ done = inet_gifconf(dev, NULL, 0, size);
++ else
++ done = inet_gifconf(dev, pos + total,
++ len - total, size);
++ if (done < 0)
++ return -EFAULT;
++ total += done;
+ }
+
+ /*
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 8f1753875550..88b6120878cd 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1244,7 +1244,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
+ return ret;
+ }
+
+-static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
++int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
+ {
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
+ const struct in_ifaddr *ifa;
+@@ -2766,8 +2766,6 @@ void __init devinet_init(void)
+ INIT_HLIST_HEAD(&inet_addr_lst[i]);
+
+ register_pernet_subsys(&devinet_ops);
+-
+- register_gifconf(PF_INET, inet_gifconf);
+ register_netdevice_notifier(&ip_netdev_notifier);
+
+ queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
+--
+2.35.1
+
--- /dev/null
+From e988da96c1518ae6bf5c58057707c34dd5651d2e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Sep 2022 19:50:18 -0400
+Subject: net: sunhme: Fix packet reception for len < RX_COPY_THRESHOLD
+
+From: Sean Anderson <seanga2@gmail.com>
+
+[ Upstream commit 878e2405710aacfeeb19364c300f38b7a9abfe8f ]
+
+There is a separate receive path for small packets (under 256 bytes).
+Instead of allocating a new dma-capable skb to be used for the next packet,
+this path allocates a skb and copies the data into it (reusing the existing
+sbk for the next packet). There are two bytes of junk data at the beginning
+of every packet. I believe these are inserted in order to allow aligned DMA
+and IP headers. We skip over them using skb_reserve. Before copying over
+the data, we must use a barrier to ensure we see the whole packet. The
+current code only synchronizes len bytes, starting from the beginning of
+the packet, including the junk bytes. However, this leaves off the final
+two bytes in the packet. Synchronize the whole packet.
+
+To reproduce this problem, ping a HME with a payload size between 17 and
+214
+
+ $ ping -s 17 <hme_address>
+
+which will complain rather loudly about the data mismatch. Small packets
+(below 60 bytes on the wire) do not have this issue. I suspect this is
+related to the padding added to increase the minimum packet size.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Sean Anderson <seanga2@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20220920235018.1675956-1-seanga2@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sun/sunhme.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
+index 69fc47089e62..940db4ec5714 100644
+--- a/drivers/net/ethernet/sun/sunhme.c
++++ b/drivers/net/ethernet/sun/sunhme.c
+@@ -2063,9 +2063,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
+
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
++ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
+ skb_copy_from_linear_data(skb, copy_skb->data, len);
+- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
++ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
+ /* Reuse original ring buffer. */
+ hme_write_rxd(hp, this,
+ (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+--
+2.35.1
+
--- /dev/null
+From 096c88ab01ce03ce56f49e1430522f8b02ed0ed4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Sep 2022 16:56:41 +0900
+Subject: net: team: Unsync device addresses on ndo_stop
+
+From: Benjamin Poirier <bpoirier@nvidia.com>
+
+[ Upstream commit bd60234222b2fd5573526da7bcd422801f271f5f ]
+
+Netdev drivers are expected to call dev_{uc,mc}_sync() in their
+ndo_set_rx_mode method and dev_{uc,mc}_unsync() in their ndo_stop method.
+This is mentioned in the kerneldoc for those dev_* functions.
+
+The team driver calls dev_{uc,mc}_unsync() during ndo_uninit instead of
+ndo_stop. This is ineffective because address lists (dev->{uc,mc}) have
+already been emptied in unregister_netdevice_many() before ndo_uninit is
+called. This mistake can result in addresses being leftover on former team
+ports after a team device has been deleted; see test_LAG_cleanup() in the
+last patch in this series.
+
+Add unsync calls at their expected location, team_close().
+
+v3:
+* When adding or deleting a port, only sync/unsync addresses if the team
+ device is up. In other cases, it is taken care of at the right time by
+ ndo_open/ndo_set_rx_mode/ndo_stop.
+
+Fixes: 3d249d4ca7d0 ("net: introduce ethernet teaming device")
+Signed-off-by: Benjamin Poirier <bpoirier@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/team/team.c | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 615f3776b4be..7117d559a32e 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1270,10 +1270,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
+ }
+ }
+
+- netif_addr_lock_bh(dev);
+- dev_uc_sync_multiple(port_dev, dev);
+- dev_mc_sync_multiple(port_dev, dev);
+- netif_addr_unlock_bh(dev);
++ if (dev->flags & IFF_UP) {
++ netif_addr_lock_bh(dev);
++ dev_uc_sync_multiple(port_dev, dev);
++ dev_mc_sync_multiple(port_dev, dev);
++ netif_addr_unlock_bh(dev);
++ }
+
+ port->index = -1;
+ list_add_tail_rcu(&port->list, &team->port_list);
+@@ -1344,8 +1346,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
+ netdev_rx_handler_unregister(port_dev);
+ team_port_disable_netpoll(port);
+ vlan_vids_del_by_dev(port_dev, dev);
+- dev_uc_unsync(port_dev, dev);
+- dev_mc_unsync(port_dev, dev);
++ if (dev->flags & IFF_UP) {
++ dev_uc_unsync(port_dev, dev);
++ dev_mc_unsync(port_dev, dev);
++ }
+ dev_close(port_dev);
+ team_port_leave(team, port);
+
+@@ -1695,6 +1699,14 @@ static int team_open(struct net_device *dev)
+
+ static int team_close(struct net_device *dev)
+ {
++ struct team *team = netdev_priv(dev);
++ struct team_port *port;
++
++ list_for_each_entry(port, &team->port_list, list) {
++ dev_uc_unsync(port->dev, dev);
++ dev_mc_unsync(port->dev, dev);
++ }
++
+ return 0;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 766539ae2e06fb1436312b6b4cd619ca429a705a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Sep 2022 14:20:17 +0200
+Subject: netfilter: ebtables: fix memory leak when blob is malformed
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 62ce44c4fff947eebdf10bb582267e686e6835c9 ]
+
+The bug fix was incomplete, it "replaced" crash with a memory leak.
+The old code had an assignment to "ret" embedded into the conditional,
+restore this.
+
+Fixes: 7997eff82828 ("netfilter: ebtables: reject blobs that don't provide all entry points")
+Reported-and-tested-by: syzbot+a24c5252f3e3ab733464@syzkaller.appspotmail.com
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/netfilter/ebtables.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 310740cc684a..06b80b584381 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -999,8 +999,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+ goto free_iterate;
+ }
+
+- if (repl->valid_hooks != t->valid_hooks)
++ if (repl->valid_hooks != t->valid_hooks) {
++ ret = -EINVAL;
+ goto free_unlock;
++ }
+
+ if (repl->num_counters && repl->num_counters != t->private->nentries) {
+ ret = -EINVAL;
+--
+2.35.1
+
--- /dev/null
+From a0d91f832ab0ce24739fb19c312478df916f0821 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Aug 2022 14:56:57 +1000
+Subject: netfilter: nf_conntrack_irc: Tighten matching on DCC message
+
+From: David Leadbeater <dgl@dgl.cx>
+
+[ Upstream commit e8d5dfd1d8747b56077d02664a8838c71ced948e ]
+
+CTCP messages should only be at the start of an IRC message, not
+anywhere within it.
+
+While the helper only decodes packes in the ORIGINAL direction, its
+possible to make a client send a CTCP message back by empedding one into
+a PING request. As-is, thats enough to make the helper believe that it
+saw a CTCP message.
+
+Fixes: 869f37d8e48f ("[NETFILTER]: nf_conntrack/nf_nat: add IRC helper port")
+Signed-off-by: David Leadbeater <dgl@dgl.cx>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_irc.c | 34 ++++++++++++++++++++++++++------
+ 1 file changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
+index 26245419ef4a..65b5b05fe38d 100644
+--- a/net/netfilter/nf_conntrack_irc.c
++++ b/net/netfilter/nf_conntrack_irc.c
+@@ -148,15 +148,37 @@ static int help(struct sk_buff *skb, unsigned int protoff,
+ data = ib_ptr;
+ data_limit = ib_ptr + skb->len - dataoff;
+
+- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
+- * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
+- while (data < data_limit - (19 + MINMATCHLEN)) {
+- if (memcmp(data, "\1DCC ", 5)) {
++ /* Skip any whitespace */
++ while (data < data_limit - 10) {
++ if (*data == ' ' || *data == '\r' || *data == '\n')
++ data++;
++ else
++ break;
++ }
++
++ /* strlen("PRIVMSG x ")=10 */
++ if (data < data_limit - 10) {
++ if (strncasecmp("PRIVMSG ", data, 8))
++ goto out;
++ data += 8;
++ }
++
++ /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26
++ * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26
++ */
++ while (data < data_limit - (21 + MINMATCHLEN)) {
++ /* Find first " :", the start of message */
++ if (memcmp(data, " :", 2)) {
+ data++;
+ continue;
+ }
++ data += 2;
++
++ /* then check that place only for the DCC command */
++ if (memcmp(data, "\1DCC ", 5))
++ goto out;
+ data += 5;
+- /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
++ /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
+
+ iph = ip_hdr(skb);
+ pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
+@@ -172,7 +194,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
+ pr_debug("DCC %s detected\n", dccprotos[i]);
+
+ /* we have at least
+- * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
++ * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid
+ * data left (== 14/13 bytes) */
+ if (parse_dcc(data, data_limit, &dcc_ip,
+ &dcc_port, &addr_beg_p, &addr_end_p)) {
+--
+2.35.1
+
--- /dev/null
+From a37634703dac64c315b0493222d68523fdeaa6e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jun 2019 12:32:40 +0300
+Subject: netfilter: nf_conntrack_sip: fix ct_sip_walk_headers
+
+From: Igor Ryzhov <iryzhov@nfware.com>
+
+[ Upstream commit 39aebedeaaa95757f5c1f2ddb5f43fdddbf478ca ]
+
+ct_sip_next_header and ct_sip_get_header return an absolute
+value of matchoff, not a shift from current dataoff.
+So dataoff should be assigned matchoff, not incremented by it.
+
+This issue can be seen in the scenario when there are multiple
+Contact headers and the first one is using a hostname and other headers
+use IP addresses. In this case, ct_sip_walk_headers will work as follows:
+
+The first ct_sip_get_header call to will find the first Contact header
+but will return -1 as the header uses a hostname. But matchoff will
+be changed to the offset of this header. After that, dataoff should be
+set to matchoff, so that the next ct_sip_get_header call find the next
+Contact header. But instead of assigning dataoff to matchoff, it is
+incremented by it, which is not correct, as matchoff is an absolute
+value of the offset. So on the next call to the ct_sip_get_header,
+dataoff will be incorrect, and the next Contact header may not be
+found at all.
+
+Fixes: 05e3ced297fe ("[NETFILTER]: nf_conntrack_sip: introduce SIP-URI parsing helper")
+Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_sip.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index b83dc9bf0a5d..78fd9122b70c 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
+ return ret;
+ if (ret == 0)
+ break;
+- dataoff += *matchoff;
++ dataoff = *matchoff;
+ }
+ *in_header = 0;
+ }
+@@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
+ break;
+ if (ret == 0)
+ return ret;
+- dataoff += *matchoff;
++ dataoff = *matchoff;
+ }
+
+ if (in_header)
+--
+2.35.1
+
--- /dev/null
+From 4bddeb6e5516bfbcef613ccbf494405ab1fdfa02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Sep 2022 21:41:00 +0900
+Subject: netfilter: nf_tables: fix nft_counters_enabled underflow at
+ nf_tables_addchain()
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 921ebde3c0d22c8cba74ce8eb3cc4626abff1ccd ]
+
+syzbot is reporting underflow of nft_counters_enabled counter at
+nf_tables_addchain() [1], for commit 43eb8949cfdffa76 ("netfilter:
+nf_tables: do not leave chain stats enabled on error") missed that
+nf_tables_chain_destroy() after nft_basechain_init() in the error path of
+nf_tables_addchain() decrements the counter because nft_basechain_init()
+makes nft_is_base_chain() return true by setting NFT_CHAIN_BASE flag.
+
+Increment the counter immediately after returning from
+nft_basechain_init().
+
+Link: https://syzkaller.appspot.com/bug?extid=b5d82a651b71cd8a75ab [1]
+Reported-by: syzbot <syzbot+b5d82a651b71cd8a75ab@syzkaller.appspotmail.com>
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Tested-by: syzbot <syzbot+b5d82a651b71cd8a75ab@syzkaller.appspotmail.com>
+Fixes: 43eb8949cfdffa76 ("netfilter: nf_tables: do not leave chain stats enabled on error")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index b8e7e1c5c08a..d65c47bcbfc9 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2001,7 +2001,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ u8 policy, u32 flags)
+ {
+ const struct nlattr * const *nla = ctx->nla;
+- struct nft_stats __percpu *stats = NULL;
+ struct nft_table *table = ctx->table;
+ struct nft_base_chain *basechain;
+ struct net *net = ctx->net;
+@@ -2015,6 +2014,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ return -EOVERFLOW;
+
+ if (nla[NFTA_CHAIN_HOOK]) {
++ struct nft_stats __percpu *stats = NULL;
+ struct nft_chain_hook hook;
+
+ if (flags & NFT_CHAIN_BINDING)
+@@ -2047,6 +2047,8 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ kfree(basechain);
+ return err;
+ }
++ if (stats)
++ static_branch_inc(&nft_counters_enabled);
+ } else {
+ if (flags & NFT_CHAIN_BASE)
+ return -EINVAL;
+@@ -2121,9 +2123,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ goto err_unregister_hook;
+ }
+
+- if (stats)
+- static_branch_inc(&nft_counters_enabled);
+-
+ table->use++;
+
+ return 0;
+--
+2.35.1
+
--- /dev/null
+From 5e636cd72de7808b247e76e0bec87be3f9a6077e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Sep 2022 22:58:51 +0900
+Subject: netfilter: nf_tables: fix percpu memory leak at nf_tables_addchain()
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 9a4d6dd554b86e65581ef6b6638a39ae079b17ac ]
+
+It seems to me that percpu memory for chain stats started leaking since
+commit 3bc158f8d0330f0a ("netfilter: nf_tables: map basechain priority to
+hardware priority") when nft_chain_offload_priority() returned an error.
+
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Fixes: 3bc158f8d0330f0a ("netfilter: nf_tables: map basechain priority to hardware priority")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index d65c47bcbfc9..810995d712ac 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2045,6 +2045,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ if (err < 0) {
+ nft_chain_release_hook(&hook);
+ kfree(basechain);
++ free_percpu(stats);
+ return err;
+ }
+ if (stats)
+--
+2.35.1
+
--- /dev/null
+From b6136b771f18581ae2a4d2fe3bdce9bf51e61e31 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Sep 2022 10:26:18 +0200
+Subject: netfilter: nfnetlink_osf: fix possible bogus match in nf_osf_find()
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 559c36c5a8d730c49ef805a72b213d3bba155cc8 ]
+
+nf_osf_find() incorrectly returns true on mismatch, this leads to
+copying uninitialized memory area in nft_osf which can be used to leak
+stale kernel stack data to userspace.
+
+Fixes: 22c7652cdaa8 ("netfilter: nft_osf: Add version option support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nfnetlink_osf.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
+index 79fbf37291f3..51e3953b414c 100644
+--- a/net/netfilter/nfnetlink_osf.c
++++ b/net/netfilter/nfnetlink_osf.c
+@@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb,
+ struct nf_osf_hdr_ctx ctx;
+ const struct tcphdr *tcp;
+ struct tcphdr _tcph;
++ bool found = false;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb,
+
+ data->genre = f->genre;
+ data->version = f->version;
++ found = true;
+ break;
+ }
+
+- return true;
++ return found;
+ }
+ EXPORT_SYMBOL_GPL(nf_osf_find);
+
+--
+2.35.1
+
--- /dev/null
+From 331c758cc39875e72c728d08769aa4172131655a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Sep 2022 20:56:59 +0800
+Subject: of: mdio: Add of_node_put() when breaking out of for_each_xx
+
+From: Liang He <windhl@126.com>
+
+[ Upstream commit 1c48709e6d9d353acaaac1d8e33474756b121d78 ]
+
+In of_mdiobus_register(), we should call of_node_put() for 'child'
+escaped out of for_each_available_child_of_node().
+
+Fixes: 66bdede495c7 ("of_mdio: Fix broken PHY IRQ in case of probe deferral")
+Co-developed-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Liang He <windhl@126.com>
+Link: https://lore.kernel.org/r/20220913125659.3331969-1-windhl@126.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/mdio/of_mdio.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
+index ea0bf13e8ac3..5bae47f3da40 100644
+--- a/drivers/net/mdio/of_mdio.c
++++ b/drivers/net/mdio/of_mdio.c
+@@ -332,6 +332,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+ return 0;
+
+ unregister:
++ of_node_put(child);
+ mdiobus_unregister(mdio);
+ return rc;
+ }
+--
+2.35.1
+
--- /dev/null
+From 60dbdad7dc5ee8cef25e06a1125569962ba9cd35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Sep 2022 11:29:10 +0200
+Subject: perf jit: Include program header in ELF files
+
+From: Lieven Hey <lieven.hey@kdab.com>
+
+[ Upstream commit babd04386b1df8c364cdaa39ac0e54349502e1e5 ]
+
+The missing header makes it hard for programs like elfutils to open
+these files.
+
+Fixes: 2d86612aacb7805f ("perf symbol: Correct address for bss symbols")
+Reviewed-by: Leo Yan <leo.yan@linaro.org>
+Signed-off-by: Lieven Hey <lieven.hey@kdab.com>
+Tested-by: Leo Yan <leo.yan@linaro.org>
+Cc: Leo Yan <leo.yan@linaro.org>
+Link: https://lore.kernel.org/r/20220915092910.711036-1-lieven.hey@kdab.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/genelf.c | 14 ++++++++++++++
+ tools/perf/util/genelf.h | 4 ++++
+ 2 files changed, 18 insertions(+)
+
+diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
+index 953338b9e887..02cd9f75e3d2 100644
+--- a/tools/perf/util/genelf.c
++++ b/tools/perf/util/genelf.c
+@@ -251,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ Elf_Data *d;
+ Elf_Scn *scn;
+ Elf_Ehdr *ehdr;
++ Elf_Phdr *phdr;
+ Elf_Shdr *shdr;
+ uint64_t eh_frame_base_offset;
+ char *strsym = NULL;
+@@ -285,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ ehdr->e_version = EV_CURRENT;
+ ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
+
++ /*
++ * setup program header
++ */
++ phdr = elf_newphdr(e, 1);
++ phdr[0].p_type = PT_LOAD;
++ phdr[0].p_offset = 0;
++ phdr[0].p_vaddr = 0;
++ phdr[0].p_paddr = 0;
++ phdr[0].p_filesz = csize;
++ phdr[0].p_memsz = csize;
++ phdr[0].p_flags = PF_X | PF_R;
++ phdr[0].p_align = 8;
++
+ /*
+ * setup text section
+ */
+diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
+index d4137559be05..ac638945b4cb 100644
+--- a/tools/perf/util/genelf.h
++++ b/tools/perf/util/genelf.h
+@@ -50,8 +50,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
+
+ #if GEN_ELF_CLASS == ELFCLASS64
+ #define elf_newehdr elf64_newehdr
++#define elf_newphdr elf64_newphdr
+ #define elf_getshdr elf64_getshdr
+ #define Elf_Ehdr Elf64_Ehdr
++#define Elf_Phdr Elf64_Phdr
+ #define Elf_Shdr Elf64_Shdr
+ #define Elf_Sym Elf64_Sym
+ #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
+@@ -59,8 +61,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
+ #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
+ #else
+ #define elf_newehdr elf32_newehdr
++#define elf_newphdr elf32_newphdr
+ #define elf_getshdr elf32_getshdr
+ #define Elf_Ehdr Elf32_Ehdr
++#define Elf_Phdr Elf32_Phdr
+ #define Elf_Shdr Elf32_Shdr
+ #define Elf_Sym Elf32_Sym
+ #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
+--
+2.35.1
+
--- /dev/null
+From 6423ee43d0a6b111d29cc086a793bfd0768971d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Sep 2022 15:24:29 +0300
+Subject: perf kcore_copy: Do not check /proc/modules is unchanged
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+[ Upstream commit 5b427df27b94aec1312cace48a746782a0925c53 ]
+
+/proc/kallsyms and /proc/modules are compared before and after the copy
+in order to ensure no changes during the copy.
+
+However /proc/modules also might change due to reference counts changing
+even though that does not make any difference.
+
+Any modules loaded or unloaded should be visible in changes to kallsyms,
+so it is not necessary to check /proc/modules also anyway.
+
+Remove the comparison checking that /proc/modules is unchanged.
+
+Fixes: fc1b691d7651d949 ("perf buildid-cache: Add ability to add kcore to the cache")
+Reported-by: Daniel Dao <dqminh@cloudflare.com>
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Tested-by: Daniel Dao <dqminh@cloudflare.com>
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/r/20220914122429.8770-1-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/symbol-elf.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index d8d79a9ec775..3e423a920015 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -2002,8 +2002,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
+ * unusual. One significant peculiarity is that the mapping (start -> pgoff)
+ * is not the same for the kernel map and the modules map. That happens because
+ * the data is copied adjacently whereas the original kcore has gaps. Finally,
+- * kallsyms and modules files are compared with their copies to check that
+- * modules have not been loaded or unloaded while the copies were taking place.
++ * kallsyms file is compared with its copy to check that modules have not been
++ * loaded or unloaded while the copies were taking place.
+ *
+ * Return: %0 on success, %-1 on failure.
+ */
+@@ -2066,9 +2066,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
+ goto out_extract_close;
+ }
+
+- if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
+- goto out_extract_close;
+-
+ if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
+ goto out_extract_close;
+
+--
+2.35.1
+
--- /dev/null
+From 7d4cd53b67b9658afb868fd8500ffa4598ff513f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Sep 2022 17:35:38 +0530
+Subject: scsi: mpt3sas: Fix return value check of dma_get_required_mask()
+
+From: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+
+[ Upstream commit e0e0747de0ea3dd87cdbb0393311e17471a9baf1 ]
+
+Fix the incorrect return value check of dma_get_required_mask(). Due to
+this incorrect check, the driver was always setting the DMA mask to 63 bit.
+
+Link: https://lore.kernel.org/r/20220913120538.18759-2-sreekanth.reddy@broadcom.com
+Fixes: ba27c5cf286d ("scsi: mpt3sas: Don't change the DMA coherent mask after allocations")
+Signed-off-by: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpt3sas/mpt3sas_base.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 18f85c963944..c1b76cda60db 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -2825,7 +2825,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+
+ if (ioc->is_mcpu_endpoint ||
+ sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
+- dma_get_required_mask(&pdev->dev) <= 32)
++ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
+ ioc->dma_mask = 32;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
+--
+2.35.1
+
--- /dev/null
+From 0450dca01073803b710142665b4516d769295509 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Mar 2021 15:58:58 +0530
+Subject: scsi: mpt3sas: Force PCIe scatterlist allocations to be within same 4
+ GB region
+
+From: Suganath Prabu S <suganath-prabu.subramani@broadcom.com>
+
+[ Upstream commit d6adc251dd2fede6aaaf6c39f7e4ad799eda3758 ]
+
+According to the MPI specification, PCIe SGL buffers can not cross a 4 GB
+boundary.
+
+While allocating, if any buffer crosses the 4 GB boundary, then:
+
+ - Release the already allocated memory pools; and
+
+ - Reallocate them by changing the DMA coherent mask to 32-bit
+
+Link: https://lore.kernel.org/r/20210305102904.7560-2-suganath-prabu.subramani@broadcom.com
+Signed-off-by: Suganath Prabu S <suganath-prabu.subramani@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: e0e0747de0ea ("scsi: mpt3sas: Fix return value check of dma_get_required_mask()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/mpt3sas/mpt3sas_base.c | 159 ++++++++++++++++++++--------
+ drivers/scsi/mpt3sas/mpt3sas_base.h | 1 +
+ 2 files changed, 113 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 3153f164554a..18f85c963944 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -2822,23 +2822,22 @@ static int
+ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+ {
+ struct sysinfo s;
+- int dma_mask;
+
+ if (ioc->is_mcpu_endpoint ||
+ sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
+ dma_get_required_mask(&pdev->dev) <= 32)
+- dma_mask = 32;
++ ioc->dma_mask = 32;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
+- dma_mask = 63;
++ ioc->dma_mask = 63;
+ else
+- dma_mask = 64;
++ ioc->dma_mask = 64;
+
+- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
+- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
++ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
++ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
+ return -ENODEV;
+
+- if (dma_mask > 32) {
++ if (ioc->dma_mask > 32) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ } else {
+@@ -2848,7 +2847,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+
+ si_meminfo(&s);
+ ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+- dma_mask, convert_to_kb(s.totalram));
++ ioc->dma_mask, convert_to_kb(s.totalram));
+
+ return 0;
+ }
+@@ -4902,10 +4901,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ dma_pool_free(ioc->pcie_sgl_dma_pool,
+ ioc->pcie_sg_lookup[i].pcie_sgl,
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
++ ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
+ }
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
+ }
+-
+ if (ioc->config_page) {
+ dexitprintk(ioc,
+ ioc_info(ioc, "config_page(0x%p): free\n",
+@@ -4960,6 +4959,89 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
+ return 0;
+ }
+
++/**
++ * _base_reduce_hba_queue_depth- Retry with reduced queue depth
++ * @ioc: Adapter object
++ *
++ * Return: 0 for success, non-zero for failure.
++ **/
++static inline int
++_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
++{
++ int reduce_sz = 64;
++
++ if ((ioc->hba_queue_depth - reduce_sz) >
++ (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
++ ioc->hba_queue_depth -= reduce_sz;
++ return 0;
++ } else
++ return -ENOMEM;
++}
++
++/**
++ * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
++ * for pcie sgl pools.
++ * @ioc: Adapter object
++ * @sz: DMA Pool size
++ * @ct: Chain tracker
++ * Return: 0 for success, non-zero for failure.
++ */
++
++static int
++_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
++{
++ int i = 0, j = 0;
++ struct chain_tracker *ct;
++
++ ioc->pcie_sgl_dma_pool =
++ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
++ ioc->page_size, 0);
++ if (!ioc->pcie_sgl_dma_pool) {
++ ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
++ return -ENOMEM;
++ }
++
++ ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
++ ioc->chains_per_prp_buffer =
++ min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
++ for (i = 0; i < ioc->scsiio_depth; i++) {
++ ioc->pcie_sg_lookup[i].pcie_sgl =
++ dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
++ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
++ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
++ ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
++ return -EAGAIN;
++ }
++
++ if (!mpt3sas_check_same_4gb_region(
++ (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
++ ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
++ ioc->pcie_sg_lookup[i].pcie_sgl,
++ (unsigned long long)
++ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
++ ioc->use_32bit_dma = true;
++ return -EAGAIN;
++ }
++
++ for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
++ ct = &ioc->chain_lookup[i].chains_per_smid[j];
++ ct->chain_buffer =
++ ioc->pcie_sg_lookup[i].pcie_sgl +
++ (j * ioc->chain_segment_sz);
++ ct->chain_buffer_dma =
++ ioc->pcie_sg_lookup[i].pcie_sgl_dma +
++ (j * ioc->chain_segment_sz);
++ }
++ }
++ dinitprintk(ioc, ioc_info(ioc,
++ "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
++ ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
++ dinitprintk(ioc, ioc_info(ioc,
++ "Number of chains can fit in a PRP page(%d)\n",
++ ioc->chains_per_prp_buffer));
++ return 0;
++}
++
+ /**
+ * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
+ * for reply queues.
+@@ -5058,7 +5140,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ unsigned short sg_tablesize;
+ u16 sge_size;
+ int i, j;
+- int ret = 0;
++ int ret = 0, rc = 0;
+ struct chain_tracker *ct;
+
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
+@@ -5357,6 +5439,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ * be required for NVMe PRP's, only each set of NVMe blocks will be
+ * contiguous, so a new set is allocated for each possible I/O.
+ */
++
+ ioc->chains_per_prp_buffer = 0;
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
+ nvme_blocks_needed =
+@@ -5371,43 +5454,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ goto out;
+ }
+ sz = nvme_blocks_needed * ioc->page_size;
+- ioc->pcie_sgl_dma_pool =
+- dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
+- if (!ioc->pcie_sgl_dma_pool) {
+- ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
+- goto out;
+- }
+-
+- ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
+- ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
+- ioc->chains_needed_per_io);
+-
+- for (i = 0; i < ioc->scsiio_depth; i++) {
+- ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
+- ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+- &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+- if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
+- ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
+- goto out;
+- }
+- for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
+- ct = &ioc->chain_lookup[i].chains_per_smid[j];
+- ct->chain_buffer =
+- ioc->pcie_sg_lookup[i].pcie_sgl +
+- (j * ioc->chain_segment_sz);
+- ct->chain_buffer_dma =
+- ioc->pcie_sg_lookup[i].pcie_sgl_dma +
+- (j * ioc->chain_segment_sz);
+- }
+- }
+-
+- dinitprintk(ioc,
+- ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+- ioc->scsiio_depth, sz,
+- (sz * ioc->scsiio_depth) / 1024));
+- dinitprintk(ioc,
+- ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
+- ioc->chains_per_prp_buffer));
++ rc = _base_allocate_pcie_sgl_pool(ioc, sz);
++ if (rc == -ENOMEM)
++ return -ENOMEM;
++ else if (rc == -EAGAIN)
++ goto try_32bit_dma;
+ total_sz += sz * ioc->scsiio_depth;
+ }
+
+@@ -5577,6 +5628,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ ioc->shost->sg_tablesize);
+ return 0;
+
++try_32bit_dma:
++ _base_release_memory_pools(ioc);
++ if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
++ /* Change dma coherent mask to 32 bit and reallocate */
++ if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
++ pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
++ pci_name(ioc->pdev));
++ return -ENODEV;
++ }
++ } else if (_base_reduce_hba_queue_depth(ioc) != 0)
++ return -ENOMEM;
++ goto retry_allocation;
++
+ out:
+ return -ENOMEM;
+ }
+@@ -7239,6 +7303,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+
+ ioc->rdpq_array_enable_assigned = 0;
+ ioc->use_32bit_dma = false;
++ ioc->dma_mask = 64;
+ if (ioc->is_aero_ioc)
+ ioc->base_readl = &_base_readl_aero;
+ else
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
+index bc8beb10f3fc..823bbe64a477 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -1257,6 +1257,7 @@ struct MPT3SAS_ADAPTER {
+ u16 thresh_hold;
+ u8 high_iops_queues;
+ u32 drv_support_bitmap;
++ u32 dma_mask;
+ bool enable_sdev_max_qd;
+ bool use_32bit_dma;
+
+--
+2.35.1
+
--- /dev/null
+From 092cf337af52e988ef054ff267fcdab3a79a17cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Sep 2022 10:44:53 +0800
+Subject: selftests: forwarding: add shebang for sch_red.sh
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 83e4b196838d90799a8879e5054a3beecf9ed256 ]
+
+RHEL/Fedora RPM build checks are stricter, and complain when executable
+files don't have a shebang line, e.g.
+
+*** WARNING: ./kselftests/net/forwarding/sch_red.sh is executable but has no shebang, removing executable bit
+
+Fix it by adding shebang line.
+
+Fixes: 6cf0291f9517 ("selftests: forwarding: Add a RED test for SW datapath")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Link: https://lore.kernel.org/r/20220922024453.437757-1-liuhangbin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/forwarding/sch_red.sh | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh
+index e714bae473fb..81f31179ac88 100755
+--- a/tools/testing/selftests/net/forwarding/sch_red.sh
++++ b/tools/testing/selftests/net/forwarding/sch_red.sh
+@@ -1,3 +1,4 @@
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
+ # This test sends one stream of traffic from H1 through a TBF shaper, to a RED
+--
+2.35.1
+
xfs-fix-up-non-directory-creation-in-sgid-directories.patch
xfs-reorder-iunlink-remove-operation-in-xfs_ifree.patch
xfs-validate-inode-fork-size-against-fork-format.patch
+arm64-dts-rockchip-pull-up-wlan-wake-on-gru-bob.patch
+drm-mediatek-dsi-add-atomic-destroy-duplicate-_state.patch
+arm64-dts-rockchip-set-rk3399-gru-pclk_edp-to-24-mhz.patch
+dmaengine-ti-k3-udma-private-fix-refcount-leak-bug-i.patch
+arm64-dts-rockchip-remove-enable-active-low-from-rk3.patch
+netfilter-nf_conntrack_sip-fix-ct_sip_walk_headers.patch
+netfilter-nf_conntrack_irc-tighten-matching-on-dcc-m.patch
+netfilter-nfnetlink_osf-fix-possible-bogus-match-in-.patch
+iavf-fix-cached-head-and-tail-value-for-iavf_get_tx_.patch
+ipvlan-fix-out-of-bound-bugs-caused-by-unset-skb-mac.patch
+net-let-flow-have-same-hash-in-two-directions.patch
+net-core-fix-flow-symmetric-hash.patch
+net-phy-aquantia-wait-for-the-suspend-resume-operati.patch
+scsi-mpt3sas-force-pcie-scatterlist-allocations-to-b.patch
+scsi-mpt3sas-fix-return-value-check-of-dma_get_requi.patch
+net-bonding-share-lacpdu_mcast_addr-definition.patch
+net-bonding-unsync-device-addresses-on-ndo_stop.patch
+net-team-unsync-device-addresses-on-ndo_stop.patch
+drm-panel-simple-fix-innolux_g121i1_l01-bus_format.patch
+mips-lantiq-export-clk_get_io-for-lantiq_wdt.ko.patch
+mips-loongson32-fix-phy-mode-being-left-unspecified.patch
+iavf-fix-bad-page-state.patch
+iavf-fix-set-max-mtu-size-with-port-vlan-and-jumbo-f.patch
+i40e-fix-vf-set-max-mtu-size.patch
+i40e-fix-set-max_tx_rate-when-it-is-lower-than-1-mbp.patch
+sfc-fix-tx-channel-offset-when-using-legacy-interrup.patch
+sfc-fix-null-pointer-dereference-in-efx_hard_start_x.patch
+drm-hisilicon-hibmc-allow-to-be-built-if-compile_tes.patch
+drm-hisilicon-add-depends-on-mmu.patch
+of-mdio-add-of_node_put-when-breaking-out-of-for_eac.patch
+net-ipa-fix-assumptions-about-dma-address-size.patch
+net-ipa-fix-table-alignment-requirement.patch
+net-ipa-avoid-64-bit-modulus.patch
+net-ipa-dma-addresses-are-nicely-aligned.patch
+net-ipa-kill-ipa_table_entry_size.patch
+net-ipa-properly-limit-modem-routing-table-use.patch
+wireguard-ratelimiter-disable-timings-test-by-defaul.patch
+wireguard-netlink-avoid-variable-sized-memcpy-on-soc.patch
+net-enetc-move-enetc_set_psfp-out-of-the-common-enet.patch
+net-socket-remove-register_gifconf.patch
+net-sched-taprio-avoid-disabling-offload-when-it-was.patch
+net-sched-taprio-make-qdisc_leaf-see-the-per-netdev-.patch
+netfilter-nf_tables-fix-nft_counters_enabled-underfl.patch
+netfilter-nf_tables-fix-percpu-memory-leak-at-nf_tab.patch
+netfilter-ebtables-fix-memory-leak-when-blob-is-malf.patch
+can-gs_usb-gs_can_open-fix-race-dev-can.state-condit.patch
+perf-jit-include-program-header-in-elf-files.patch
+perf-kcore_copy-do-not-check-proc-modules-is-unchang.patch
+drm-mediatek-dsi-move-mtk_dsi_stop-call-back-to-mtk_.patch
+net-smc-stop-the-clc-flow-if-no-link-to-map-buffers-.patch
+net-sunhme-fix-packet-reception-for-len-rx_copy_thre.patch
+net-sched-fix-possible-refcount-leak-in-tc_new_tfilt.patch
+selftests-forwarding-add-shebang-for-sch_red.sh.patch
--- /dev/null
+From 9edb49a818940566c077c09e57d166af97642fbe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Sep 2022 13:11:35 +0200
+Subject: sfc: fix null pointer dereference in efx_hard_start_xmit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Íñigo Huguet <ihuguet@redhat.com>
+
+[ Upstream commit 0a242eb2913a4aa3d6fbdb86559f27628e9466f3 ]
+
+Trying to get the channel from the tx_queue variable here is wrong
+because we can only be here if tx_queue is NULL, so we shouldn't
+dereference it. As the above comment in the code says, this is very
+unlikely to happen, but it's wrong anyway so let's fix it.
+
+I hit this issue because of a different bug that caused tx_queue to be
+NULL. If that happens, this is the error message that we get here:
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000020
+ [...]
+ RIP: 0010:efx_hard_start_xmit+0x153/0x170 [sfc]
+
+Fixes: 12804793b17c ("sfc: decouple TXQ type from label")
+Reported-by: Tianhao Zhao <tizhao@redhat.com>
+Signed-off-by: Íñigo Huguet <ihuguet@redhat.com>
+Acked-by: Edward Cree <ecree.xilinx@gmail.com>
+Link: https://lore.kernel.org/r/20220914111135.21038-1-ihuguet@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sfc/tx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
+index 1665529a7271..fcc7de8ae2bf 100644
+--- a/drivers/net/ethernet/sfc/tx.c
++++ b/drivers/net/ethernet/sfc/tx.c
+@@ -545,7 +545,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+ * previous packets out.
+ */
+ if (!netdev_xmit_more())
+- efx_tx_send_pending(tx_queue->channel);
++ efx_tx_send_pending(efx_get_tx_channel(efx, index));
+ return NETDEV_TX_OK;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 1f0be45d8d7d72fa772fc2dbfc958bb3061771ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Sep 2022 12:36:48 +0200
+Subject: sfc: fix TX channel offset when using legacy interrupts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Íñigo Huguet <ihuguet@redhat.com>
+
+[ Upstream commit f232af4295653afa4ade3230462b3be15ad16419 ]
+
+In legacy interrupt mode the tx_channel_offset was hardcoded to 1, but
+that's not correct if efx_sepparate_tx_channels is false. In that case,
+the offset is 0 because the tx queues are in the single existing channel
+at index 0, together with the rx queue.
+
+Without this fix, as soon as you try to send any traffic, it tries to
+get the tx queues from an uninitialized channel getting these errors:
+ WARNING: CPU: 1 PID: 0 at drivers/net/ethernet/sfc/tx.c:540 efx_hard_start_xmit+0x12e/0x170 [sfc]
+ [...]
+ RIP: 0010:efx_hard_start_xmit+0x12e/0x170 [sfc]
+ [...]
+ Call Trace:
+ <IRQ>
+ dev_hard_start_xmit+0xd7/0x230
+ sch_direct_xmit+0x9f/0x360
+ __dev_queue_xmit+0x890/0xa40
+ [...]
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000020
+ [...]
+ RIP: 0010:efx_hard_start_xmit+0x153/0x170 [sfc]
+ [...]
+ Call Trace:
+ <IRQ>
+ dev_hard_start_xmit+0xd7/0x230
+ sch_direct_xmit+0x9f/0x360
+ __dev_queue_xmit+0x890/0xa40
+ [...]
+
+Fixes: c308dfd1b43e ("sfc: fix wrong tx channel offset with efx_separate_tx_channels")
+Reported-by: Tianhao Zhao <tizhao@redhat.com>
+Signed-off-by: Íñigo Huguet <ihuguet@redhat.com>
+Acked-by: Edward Cree <ecree.xilinx@gmail.com>
+Link: https://lore.kernel.org/r/20220914103648.16902-1-ihuguet@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sfc/efx_channels.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
+index d0f1b2dc7dff..c49168ba7a4d 100644
+--- a/drivers/net/ethernet/sfc/efx_channels.c
++++ b/drivers/net/ethernet/sfc/efx_channels.c
+@@ -308,7 +308,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
+ efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+- efx->tx_channel_offset = 1;
++ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ efx->legacy_irq = efx->pci_dev->irq;
+--
+2.35.1
+
--- /dev/null
+From 840e8e4e92e78bbb168dadbbcd724f29aecc1853 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Sep 2022 15:37:40 +0100
+Subject: wireguard: netlink: avoid variable-sized memcpy on sockaddr
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+[ Upstream commit 26c013108c12b94bc023bf19198a4300596c98b1 ]
+
+Doing a variable-sized memcpy is slower, and the compiler isn't smart
+enough to turn this into a constant-size assignment.
+
+Further, Kees' latest fortified memcpy will actually bark, because the
+destination pointer is type sockaddr, not explicitly sockaddr_in or
+sockaddr_in6, so it thinks there's an overflow:
+
+ memcpy: detected field-spanning write (size 28) of single field
+ "&endpoint.addr" at drivers/net/wireguard/netlink.c:446 (size 16)
+
+Fix this by just assigning by using explicit casts for each checked
+case.
+
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reported-by: syzbot+a448cda4dba2dac50de5@syzkaller.appspotmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireguard/netlink.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
+index d0f3b6d7f408..5c804bcabfe6 100644
+--- a/drivers/net/wireguard/netlink.c
++++ b/drivers/net/wireguard/netlink.c
+@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
+ if (attrs[WGPEER_A_ENDPOINT]) {
+ struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
+ size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
++ struct endpoint endpoint = { { { 0 } } };
+
+- if ((len == sizeof(struct sockaddr_in) &&
+- addr->sa_family == AF_INET) ||
+- (len == sizeof(struct sockaddr_in6) &&
+- addr->sa_family == AF_INET6)) {
+- struct endpoint endpoint = { { { 0 } } };
+-
+- memcpy(&endpoint.addr, addr, len);
++ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
++ endpoint.addr4 = *(struct sockaddr_in *)addr;
++ wg_socket_set_peer_endpoint(peer, &endpoint);
++ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
++ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ }
+ }
+--
+2.35.1
+
--- /dev/null
+From 01909334194ca9e5e6152024ce8a8f1c09963367 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Sep 2022 15:37:38 +0100
+Subject: wireguard: ratelimiter: disable timings test by default
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+[ Upstream commit 684dec3cf45da2b0848298efae4adf3b2aeafeda ]
+
+A previous commit tried to make the ratelimiter timings test more
+reliable but in the process made it less reliable on other
+configurations. This is an impossible problem to solve without
+increasingly ridiculous heuristics. And it's not even a problem that
+actually needs to be solved in any comprehensive way, since this is only
+ever used during development. So just cordon this off with a DEBUG_
+ifdef, just like we do for the trie's randomized tests, so it can be
+enabled while hacking on the code, and otherwise disabled in CI. In the
+process we also revert 151c8e499f47.
+
+Fixes: 151c8e499f47 ("wireguard: ratelimiter: use hrtimer in selftest")
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireguard/selftest/ratelimiter.c | 25 ++++++++------------
+ 1 file changed, 10 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
+index ba87d294604f..d4bb40a695ab 100644
+--- a/drivers/net/wireguard/selftest/ratelimiter.c
++++ b/drivers/net/wireguard/selftest/ratelimiter.c
+@@ -6,29 +6,28 @@
+ #ifdef DEBUG
+
+ #include <linux/jiffies.h>
+-#include <linux/hrtimer.h>
+
+ static const struct {
+ bool result;
+- u64 nsec_to_sleep_before;
++ unsigned int msec_to_sleep_before;
+ } expected_results[] __initconst = {
+ [0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
+ [PACKETS_BURSTABLE] = { false, 0 },
+- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
++ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 2] = { false, 0 },
+- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
++ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 4] = { true, 0 },
+ [PACKETS_BURSTABLE + 5] = { false, 0 }
+ };
+
+ static __init unsigned int maximum_jiffies_at_index(int index)
+ {
+- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
++ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ int i;
+
+ for (i = 0; i <= index; ++i)
+- total_nsecs += expected_results[i].nsec_to_sleep_before;
+- return nsecs_to_jiffies(total_nsecs);
++ total_msecs += expected_results[i].msec_to_sleep_before;
++ return msecs_to_jiffies(total_msecs);
+ }
+
+ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
+@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
+ loop_start_time = jiffies;
+
+ for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
+- if (expected_results[i].nsec_to_sleep_before) {
+- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
+- ns_to_ktime(expected_results[i].nsec_to_sleep_before));
+- set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
+- }
++ if (expected_results[i].msec_to_sleep_before)
++ msleep(expected_results[i].msec_to_sleep_before);
+
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
+ if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
+ return true;
+
+- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
++ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+
+ if (wg_ratelimiter_init())
+ goto out;
+@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
+ ++test;
+ #endif
+
+- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
++ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
+ int test_count = 0, ret;
+
+ ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
+--
+2.35.1
+