]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.15
authorSasha Levin <sashal@kernel.org>
Mon, 6 May 2024 19:23:44 +0000 (15:23 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 6 May 2024 19:23:44 +0000 (15:23 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
41 files changed:
queue-5.15/alsa-hda-intel-sdw-acpi-fix-usage-of-device_get_name.patch [new file with mode: 0644]
queue-5.15/asoc-meson-axg-card-make-links-nonatomic.patch [new file with mode: 0644]
queue-5.15/asoc-meson-axg-fifo-use-field-helpers.patch [new file with mode: 0644]
queue-5.15/asoc-meson-axg-fifo-use-threaded-irq-to-check-period.patch [new file with mode: 0644]
queue-5.15/asoc-meson-axg-tdm-interface-manage-formatters-in-tr.patch [new file with mode: 0644]
queue-5.15/asoc-meson-cards-select-snd_dynamic_minors.patch [new file with mode: 0644]
queue-5.15/bna-ensure-the-copied-buf-is-nul-terminated.patch [new file with mode: 0644]
queue-5.15/bpf-fix-a-verifier-verbose-message.patch [new file with mode: 0644]
queue-5.15/bpf-kconfig-fix-debug_info_btf_modules-kconfig-defin.patch [new file with mode: 0644]
queue-5.15/bpf-skmsg-fix-null-pointer-dereference-in-sk_psock_s.patch [new file with mode: 0644]
queue-5.15/bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch [new file with mode: 0644]
queue-5.15/bpf-sockmap-fix-null-pointer-dereference-in-sk_psock.patch [new file with mode: 0644]
queue-5.15/bpf-sockmap-wake-up-polling-after-data-copy.patch [new file with mode: 0644]
queue-5.15/cxgb4-properly-lock-tx-queue-for-the-selftest.patch [new file with mode: 0644]
queue-5.15/drm-panel-ili9341-respect-deferred-probe.patch [new file with mode: 0644]
queue-5.15/drm-panel-ili9341-use-predefined-error-codes.patch [new file with mode: 0644]
queue-5.15/net-bridge-fix-multicast-to-unicast-with-fraglist-gs.patch [new file with mode: 0644]
queue-5.15/net-core-reject-skb_copy-_expand-for-fraglist-gso-sk.patch [new file with mode: 0644]
queue-5.15/net-dsa-mv88e6xxx-fix-number-of-databases-for-88e614.patch [new file with mode: 0644]
queue-5.15/net-gro-add-flush-check-in-udp_gro_receive_segment.patch [new file with mode: 0644]
queue-5.15/net-introduce-a-new-proto_ops-read_skb.patch [new file with mode: 0644]
queue-5.15/net-l2tp-drop-flow-hash-on-forward.patch [new file with mode: 0644]
queue-5.15/net-qede-sanitize-rc-in-qede_add_tc_flower_fltr.patch [new file with mode: 0644]
queue-5.15/net-qede-use-return-from-qede_parse_actions.patch [new file with mode: 0644]
queue-5.15/net-qede-use-return-from-qede_parse_flow_attr-for-fl.patch [new file with mode: 0644]
queue-5.15/net-qede-use-return-from-qede_parse_flow_attr-for-fl.patch-8226 [new file with mode: 0644]
queue-5.15/nsh-restore-skb-protocol-data-mac_header-for-outer-h.patch [new file with mode: 0644]
queue-5.15/octeontx2-af-avoid-off-by-one-read-from-userspace.patch [new file with mode: 0644]
queue-5.15/s390-cio-ensure-the-copied-buf-is-nul-terminated.patch [new file with mode: 0644]
queue-5.15/s390-mm-fix-clearing-storage-keys-for-huge-pages.patch [new file with mode: 0644]
queue-5.15/s390-mm-fix-storage-key-clearing-for-guest-huge-page.patch [new file with mode: 0644]
queue-5.15/s390-qeth-don-t-keep-track-of-input-queue-count.patch [new file with mode: 0644]
queue-5.15/s390-qeth-fix-kernel-panic-after-setting-hsuid.patch [new file with mode: 0644]
queue-5.15/s390-vdso-add-cfi-for-ra-register-to-asm-macro-vdso_.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/spi-hisi-kunpeng-delete-the-dump-interface-of-data-r.patch [new file with mode: 0644]
queue-5.15/tcp-introduce-tcp_read_skb.patch [new file with mode: 0644]
queue-5.15/tipc-fix-a-possible-memleak-in-tipc_buf_append.patch [new file with mode: 0644]
queue-5.15/xdp-add-xdp_do_redirect_frame-for-pre-computed-xdp_f.patch [new file with mode: 0644]
queue-5.15/xdp-move-conversion-to-xdp_frame-out-of-map-function.patch [new file with mode: 0644]
queue-5.15/xdp-use-flags-field-to-disambiguate-broadcast-redire.patch [new file with mode: 0644]

diff --git a/queue-5.15/alsa-hda-intel-sdw-acpi-fix-usage-of-device_get_name.patch b/queue-5.15/alsa-hda-intel-sdw-acpi-fix-usage-of-device_get_name.patch
new file mode 100644 (file)
index 0000000..f027337
--- /dev/null
@@ -0,0 +1,44 @@
+From 6095d07ceee333329121cd5598ff9e6334d22b47 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Apr 2024 10:27:31 -0500
+Subject: ALSA: hda: intel-sdw-acpi: fix usage of device_get_named_child_node()
+
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+
+[ Upstream commit c158cf914713efc3bcdc25680c7156c48c12ef6a ]
+
+The documentation for device_get_named_child_node() mentions this
+important point:
+
+"
+The caller is responsible for calling fwnode_handle_put() on the
+returned fwnode pointer.
+"
+
+Add fwnode_handle_put() to avoid a leaked reference.
+
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Fixes: 08c2a4bc9f2a ("ALSA: hda: move Intel SoundWire ACPI scan to dedicated module")
+Message-ID: <20240426152731.38420-1-pierre-louis.bossart@linux.intel.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/hda/intel-sdw-acpi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
+index b7758dbe23714..7c1e47aa4e7a7 100644
+--- a/sound/hda/intel-sdw-acpi.c
++++ b/sound/hda/intel-sdw-acpi.c
+@@ -41,6 +41,8 @@ static bool is_link_enabled(struct fwnode_handle *fw_node, int i)
+                                "intel-quirk-mask",
+                                &quirk_mask);
++      fwnode_handle_put(link);
++
+       if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
+               return false;
+-- 
+2.43.0
+
diff --git a/queue-5.15/asoc-meson-axg-card-make-links-nonatomic.patch b/queue-5.15/asoc-meson-axg-card-make-links-nonatomic.patch
new file mode 100644 (file)
index 0000000..b2eef5e
--- /dev/null
@@ -0,0 +1,37 @@
+From 11ded335f56fd7dc63ffaebfe96c31b675de87bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Oct 2021 13:42:16 +0200
+Subject: ASoC: meson: axg-card: make links nonatomic
+
+From: Jerome Brunet <jbrunet@baylibre.com>
+
+[ Upstream commit e138233e56e9829e65b6293887063a1a3ccb2d68 ]
+
+Non atomic operations need to be performed in the trigger callback
+of the TDM interfaces. Those are BEs but what matters is the nonatomic
+flag of the FE in the DPCM context. Just set nonatomic for everything so,
+at least, it is clear.
+
+Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
+Link: https://lore.kernel.org/r/20211020114217.133153-2-jbrunet@baylibre.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/meson/axg-card.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
+index 2b77010c2c5ce..cbbaa55d92a66 100644
+--- a/sound/soc/meson/axg-card.c
++++ b/sound/soc/meson/axg-card.c
+@@ -320,6 +320,7 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
+       dai_link->cpus = cpu;
+       dai_link->num_cpus = 1;
++      dai_link->nonatomic = true;
+       ret = meson_card_parse_dai(card, np, &dai_link->cpus->of_node,
+                                  &dai_link->cpus->dai_name);
+-- 
+2.43.0
+
diff --git a/queue-5.15/asoc-meson-axg-fifo-use-field-helpers.patch b/queue-5.15/asoc-meson-axg-fifo-use-field-helpers.patch
new file mode 100644 (file)
index 0000000..4595b1d
--- /dev/null
@@ -0,0 +1,208 @@
+From 69678f88be06a69eeef72596a7418249ec03fd7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Feb 2024 16:08:25 +0100
+Subject: ASoC: meson: axg-fifo: use FIELD helpers
+
+From: Jerome Brunet <jbrunet@baylibre.com>
+
+[ Upstream commit 9e6f39535c794adea6ba802a52c722d193c28124 ]
+
+Use FIELD_GET() and FIELD_PREP() helpers instead of doing it manually.
+
+Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
+Link: https://msgid.link/r/20240227150826.573581-1-jbrunet@baylibre.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: b11d26660dff ("ASoC: meson: axg-fifo: use threaded irq to check periods")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/meson/axg-fifo.c  | 25 +++++++++++++------------
+ sound/soc/meson/axg-fifo.h  | 12 +++++-------
+ sound/soc/meson/axg-frddr.c |  5 +++--
+ sound/soc/meson/axg-toddr.c | 22 ++++++++++------------
+ 4 files changed, 31 insertions(+), 33 deletions(-)
+
+diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
+index bccfb770b3391..bde7598750064 100644
+--- a/sound/soc/meson/axg-fifo.c
++++ b/sound/soc/meson/axg-fifo.c
+@@ -3,6 +3,7 @@
+ // Copyright (c) 2018 BayLibre, SAS.
+ // Author: Jerome Brunet <jbrunet@baylibre.com>
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+@@ -145,8 +146,8 @@ int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
+       /* Enable irq if necessary  */
+       irq_en = runtime->no_period_wakeup ? 0 : FIFO_INT_COUNT_REPEAT;
+       regmap_update_bits(fifo->map, FIFO_CTRL0,
+-                         CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
+-                         CTRL0_INT_EN(irq_en));
++                         CTRL0_INT_EN,
++                         FIELD_PREP(CTRL0_INT_EN, irq_en));
+       return 0;
+ }
+@@ -176,9 +177,9 @@ int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
+ {
+       struct axg_fifo *fifo = axg_fifo_data(ss);
+-      /* Disable the block count irq */
++      /* Disable irqs */
+       regmap_update_bits(fifo->map, FIFO_CTRL0,
+-                         CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
++                         CTRL0_INT_EN, 0);
+       return 0;
+ }
+@@ -187,13 +188,13 @@ EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free);
+ static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
+ {
+       regmap_update_bits(fifo->map, FIFO_CTRL1,
+-                         CTRL1_INT_CLR(FIFO_INT_MASK),
+-                         CTRL1_INT_CLR(mask));
++                         CTRL1_INT_CLR,
++                         FIELD_PREP(CTRL1_INT_CLR, mask));
+       /* Clear must also be cleared */
+       regmap_update_bits(fifo->map, FIFO_CTRL1,
+-                         CTRL1_INT_CLR(FIFO_INT_MASK),
+-                         0);
++                         CTRL1_INT_CLR,
++                         FIELD_PREP(CTRL1_INT_CLR, 0));
+ }
+ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
+@@ -204,7 +205,7 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
+       regmap_read(fifo->map, FIFO_STATUS1, &status);
+-      status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
++      status = FIELD_GET(STATUS1_INT_STS, status);
+       if (status & FIFO_INT_COUNT_REPEAT)
+               snd_pcm_period_elapsed(ss);
+       else
+@@ -254,15 +255,15 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
+       /* Setup status2 so it reports the memory pointer */
+       regmap_update_bits(fifo->map, FIFO_CTRL1,
+-                         CTRL1_STATUS2_SEL_MASK,
+-                         CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
++                         CTRL1_STATUS2_SEL,
++                         FIELD_PREP(CTRL1_STATUS2_SEL, STATUS2_SEL_DDR_READ));
+       /* Make sure the dma is initially disabled */
+       __dma_enable(fifo, false);
+       /* Disable irqs until params are ready */
+       regmap_update_bits(fifo->map, FIFO_CTRL0,
+-                         CTRL0_INT_EN(FIFO_INT_MASK), 0);
++                         CTRL0_INT_EN, 0);
+       /* Clear any pending interrupt */
+       axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
+diff --git a/sound/soc/meson/axg-fifo.h b/sound/soc/meson/axg-fifo.h
+index b63acd723c870..5b7d32c37991b 100644
+--- a/sound/soc/meson/axg-fifo.h
++++ b/sound/soc/meson/axg-fifo.h
+@@ -42,21 +42,19 @@ struct snd_soc_pcm_runtime;
+ #define FIFO_CTRL0                    0x00
+ #define  CTRL0_DMA_EN                 BIT(31)
+-#define  CTRL0_INT_EN(x)              ((x) << 16)
++#define  CTRL0_INT_EN                 GENMASK(23, 16)
+ #define  CTRL0_SEL_MASK                       GENMASK(2, 0)
+ #define  CTRL0_SEL_SHIFT              0
+ #define FIFO_CTRL1                    0x04
+-#define  CTRL1_INT_CLR(x)             ((x) << 0)
+-#define  CTRL1_STATUS2_SEL_MASK               GENMASK(11, 8)
+-#define  CTRL1_STATUS2_SEL(x)         ((x) << 8)
++#define  CTRL1_INT_CLR                        GENMASK(7, 0)
++#define  CTRL1_STATUS2_SEL            GENMASK(11, 8)
+ #define   STATUS2_SEL_DDR_READ                0
+-#define  CTRL1_FRDDR_DEPTH_MASK               GENMASK(31, 24)
+-#define  CTRL1_FRDDR_DEPTH(x)         ((x) << 24)
++#define  CTRL1_FRDDR_DEPTH            GENMASK(31, 24)
+ #define FIFO_START_ADDR                       0x08
+ #define FIFO_FINISH_ADDR              0x0c
+ #define FIFO_INT_ADDR                 0x10
+ #define FIFO_STATUS1                  0x14
+-#define  STATUS1_INT_STS(x)           ((x) << 0)
++#define  STATUS1_INT_STS              GENMASK(7, 0)
+ #define FIFO_STATUS2                  0x18
+ #define FIFO_INIT_ADDR                        0x24
+ #define FIFO_CTRL2                    0x28
+diff --git a/sound/soc/meson/axg-frddr.c b/sound/soc/meson/axg-frddr.c
+index 37f4bb3469b5c..38c731ad40706 100644
+--- a/sound/soc/meson/axg-frddr.c
++++ b/sound/soc/meson/axg-frddr.c
+@@ -7,6 +7,7 @@
+  * This driver implements the frontend playback DAI of AXG and G12A based SoCs
+  */
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/regmap.h>
+ #include <linux/module.h>
+@@ -59,8 +60,8 @@ static int axg_frddr_dai_hw_params(struct snd_pcm_substream *substream,
+       /* Trim the FIFO depth if the period is small to improve latency */
+       depth = min(period, fifo->depth);
+       val = (depth / AXG_FIFO_BURST) - 1;
+-      regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH_MASK,
+-                         CTRL1_FRDDR_DEPTH(val));
++      regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH,
++                         FIELD_PREP(CTRL1_FRDDR_DEPTH, val));
+       return 0;
+ }
+diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
+index d6adf7edea41f..85a17d8861f26 100644
+--- a/sound/soc/meson/axg-toddr.c
++++ b/sound/soc/meson/axg-toddr.c
+@@ -5,6 +5,7 @@
+ /* This driver implements the frontend capture DAI of AXG based SoCs */
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/regmap.h>
+ #include <linux/module.h>
+@@ -19,12 +20,9 @@
+ #define CTRL0_TODDR_EXT_SIGNED                BIT(29)
+ #define CTRL0_TODDR_PP_MODE           BIT(28)
+ #define CTRL0_TODDR_SYNC_CH           BIT(27)
+-#define CTRL0_TODDR_TYPE_MASK         GENMASK(15, 13)
+-#define CTRL0_TODDR_TYPE(x)           ((x) << 13)
+-#define CTRL0_TODDR_MSB_POS_MASK      GENMASK(12, 8)
+-#define CTRL0_TODDR_MSB_POS(x)                ((x) << 8)
+-#define CTRL0_TODDR_LSB_POS_MASK      GENMASK(7, 3)
+-#define CTRL0_TODDR_LSB_POS(x)                ((x) << 3)
++#define CTRL0_TODDR_TYPE              GENMASK(15, 13)
++#define CTRL0_TODDR_MSB_POS           GENMASK(12, 8)
++#define CTRL0_TODDR_LSB_POS           GENMASK(7, 3)
+ #define CTRL1_TODDR_FORCE_FINISH      BIT(25)
+ #define CTRL1_SEL_SHIFT                       28
+@@ -76,12 +74,12 @@ static int axg_toddr_dai_hw_params(struct snd_pcm_substream *substream,
+       width = params_width(params);
+       regmap_update_bits(fifo->map, FIFO_CTRL0,
+-                         CTRL0_TODDR_TYPE_MASK |
+-                         CTRL0_TODDR_MSB_POS_MASK |
+-                         CTRL0_TODDR_LSB_POS_MASK,
+-                         CTRL0_TODDR_TYPE(type) |
+-                         CTRL0_TODDR_MSB_POS(TODDR_MSB_POS) |
+-                         CTRL0_TODDR_LSB_POS(TODDR_MSB_POS - (width - 1)));
++                         CTRL0_TODDR_TYPE |
++                         CTRL0_TODDR_MSB_POS |
++                         CTRL0_TODDR_LSB_POS,
++                         FIELD_PREP(CTRL0_TODDR_TYPE, type) |
++                         FIELD_PREP(CTRL0_TODDR_MSB_POS, TODDR_MSB_POS) |
++                         FIELD_PREP(CTRL0_TODDR_LSB_POS, TODDR_MSB_POS - (width - 1)));
+       return 0;
+ }
+-- 
+2.43.0
+
diff --git a/queue-5.15/asoc-meson-axg-fifo-use-threaded-irq-to-check-period.patch b/queue-5.15/asoc-meson-axg-fifo-use-threaded-irq-to-check-period.patch
new file mode 100644 (file)
index 0000000..873e2d5
--- /dev/null
@@ -0,0 +1,89 @@
+From 68d6fbb68d76ba219216e400680dee3ab1f79f89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Apr 2024 17:29:38 +0200
+Subject: ASoC: meson: axg-fifo: use threaded irq to check periods
+
+From: Jerome Brunet <jbrunet@baylibre.com>
+
+[ Upstream commit b11d26660dff8d7430892008616452dc8e5fb0f3 ]
+
+With the AXG audio subsystem, there is a possible random channel shift on
+TDM capture, when the slot number per lane is more than 2, and there is
+more than one lane used.
+
+The problem has been there since the introduction of the axg audio support
+but such scenario is pretty uncommon. This is why there is no loud
+complains about the problem.
+
+Solving the problem require to make the links non-atomic and use the
+trigger() callback to start FEs and BEs in the appropriate order.
+
+This was tried in the past and reverted because it caused the block irq to
+sleep while atomic. However, instead of reverting, the solution is to call
+snd_pcm_period_elapsed() in a non atomic context.
+
+Use the bottom half of a threaded IRQ to do so.
+
+Fixes: 6dc4fa179fb8 ("ASoC: meson: add axg fifo base driver")
+Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
+Link: https://lore.kernel.org/r/20240426152946.3078805-2-jbrunet@baylibre.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/meson/axg-fifo.c | 29 +++++++++++++++++++----------
+ 1 file changed, 19 insertions(+), 10 deletions(-)
+
+diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
+index bde7598750064..94b169a5493b5 100644
+--- a/sound/soc/meson/axg-fifo.c
++++ b/sound/soc/meson/axg-fifo.c
+@@ -204,18 +204,26 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
+       unsigned int status;
+       regmap_read(fifo->map, FIFO_STATUS1, &status);
+-
+       status = FIELD_GET(STATUS1_INT_STS, status);
++      axg_fifo_ack_irq(fifo, status);
++
++      /* Use the thread to call period elapsed on nonatomic links */
+       if (status & FIFO_INT_COUNT_REPEAT)
+-              snd_pcm_period_elapsed(ss);
+-      else
+-              dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
+-                      status);
++              return IRQ_WAKE_THREAD;
+-      /* Ack irqs */
+-      axg_fifo_ack_irq(fifo, status);
++      dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
++              status);
++
++      return IRQ_NONE;
++}
++
++static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id)
++{
++      struct snd_pcm_substream *ss = dev_id;
++
++      snd_pcm_period_elapsed(ss);
+-      return IRQ_RETVAL(status);
++      return IRQ_HANDLED;
+ }
+ int axg_fifo_pcm_open(struct snd_soc_component *component,
+@@ -243,8 +251,9 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
+       if (ret)
+               return ret;
+-      ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
+-                        dev_name(dev), ss);
++      ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block,
++                                 axg_fifo_pcm_irq_block_thread,
++                                 IRQF_ONESHOT, dev_name(dev), ss);
+       if (ret)
+               return ret;
+-- 
+2.43.0
+
diff --git a/queue-5.15/asoc-meson-axg-tdm-interface-manage-formatters-in-tr.patch b/queue-5.15/asoc-meson-axg-tdm-interface-manage-formatters-in-tr.patch
new file mode 100644 (file)
index 0000000..56600e0
--- /dev/null
@@ -0,0 +1,83 @@
+From f2d8691a97d7fa6370cf3299db087c0de99f5c0f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Oct 2021 13:42:17 +0200
+Subject: ASoC: meson: axg-tdm-interface: manage formatters in trigger
+
+From: Jerome Brunet <jbrunet@baylibre.com>
+
+[ Upstream commit bf5e4887eeddb48480568466536aa08ec7f179a5 ]
+
+So far, the formatters have been reset/enabled using the .prepare()
+callback. This was done in this callback because walking the formatters use
+a mutex so it could not be done in .trigger(), which is atomic by default.
+
+It turns out there is a problem on capture path of the AXG series.
+The FIFO may get out of sync with the TDM decoder if the IP are not enabled
+in a specific order. The FIFO must be enabled before the formatter starts
+producing data. IOW, we must deal with FE before the BE. The .prepare()
+callback is called on the BEs before the FE so it is not OK for the AXG.
+
+The .trigger() callback order can be configured, and it deals with the FE
+before the BEs by default. To solve our problem, we just need to start and
+stop the formatters from the .trigger() callback. It is OK do so now that
+the links have been made 'nonatomic' in the card driver.
+
+Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
+Link: https://lore.kernel.org/r/20211020114217.133153-3-jbrunet@baylibre.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/meson/axg-tdm-interface.c | 26 +++++++++++++++++++++-----
+ 1 file changed, 21 insertions(+), 5 deletions(-)
+
+diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
+index 60d132ab1ab78..f5145902360de 100644
+--- a/sound/soc/meson/axg-tdm-interface.c
++++ b/sound/soc/meson/axg-tdm-interface.c
+@@ -362,13 +362,29 @@ static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream,
+       return 0;
+ }
+-static int axg_tdm_iface_prepare(struct snd_pcm_substream *substream,
++static int axg_tdm_iface_trigger(struct snd_pcm_substream *substream,
++                               int cmd,
+                                struct snd_soc_dai *dai)
+ {
+-      struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
++      struct axg_tdm_stream *ts =
++              snd_soc_dai_get_dma_data(dai, substream);
++
++      switch (cmd) {
++      case SNDRV_PCM_TRIGGER_START:
++      case SNDRV_PCM_TRIGGER_RESUME:
++      case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++              axg_tdm_stream_start(ts);
++              break;
++      case SNDRV_PCM_TRIGGER_SUSPEND:
++      case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++      case SNDRV_PCM_TRIGGER_STOP:
++              axg_tdm_stream_stop(ts);
++              break;
++      default:
++              return -EINVAL;
++      }
+-      /* Force all attached formatters to update */
+-      return axg_tdm_stream_reset(ts);
++      return 0;
+ }
+ static int axg_tdm_iface_remove_dai(struct snd_soc_dai *dai)
+@@ -408,8 +424,8 @@ static const struct snd_soc_dai_ops axg_tdm_iface_ops = {
+       .set_fmt        = axg_tdm_iface_set_fmt,
+       .startup        = axg_tdm_iface_startup,
+       .hw_params      = axg_tdm_iface_hw_params,
+-      .prepare        = axg_tdm_iface_prepare,
+       .hw_free        = axg_tdm_iface_hw_free,
++      .trigger        = axg_tdm_iface_trigger,
+ };
+ /* TDM Backend DAIs */
+-- 
+2.43.0
+
diff --git a/queue-5.15/asoc-meson-cards-select-snd_dynamic_minors.patch b/queue-5.15/asoc-meson-cards-select-snd_dynamic_minors.patch
new file mode 100644 (file)
index 0000000..07b8414
--- /dev/null
@@ -0,0 +1,43 @@
+From c0ec7edbd2338b23cac7a3634ab7f341c363ce5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Apr 2024 15:41:47 +0200
+Subject: ASoC: meson: cards: select SND_DYNAMIC_MINORS
+
+From: Jerome Brunet <jbrunet@baylibre.com>
+
+[ Upstream commit 6db26f9ea4edd8a17d39ab3c20111e3ccd704aef ]
+
+Amlogic sound cards do create a lot of pcm interfaces, possibly more than
+8. Some pcm interfaces are internal (like DPCM backends and c2c) and not
+exposed to userspace.
+
+Those interfaces still increase the number passed to snd_find_free_minor(),
+which eventually exceeds 8 causing -EBUSY error on card registration if
+CONFIG_SND_DYNAMIC_MINORS=n and the interface is exposed to userspace.
+
+select CONFIG_SND_DYNAMIC_MINORS for Amlogic cards to avoid the problem.
+
+Fixes: 7864a79f37b5 ("ASoC: meson: add axg sound card support")
+Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
+Link: https://lore.kernel.org/r/20240426134150.3053741-1-jbrunet@baylibre.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/meson/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
+index b93ea33739f29..6458d5dc4902f 100644
+--- a/sound/soc/meson/Kconfig
++++ b/sound/soc/meson/Kconfig
+@@ -99,6 +99,7 @@ config SND_MESON_AXG_PDM
+ config SND_MESON_CARD_UTILS
+       tristate
++      select SND_DYNAMIC_MINORS
+ config SND_MESON_CODEC_GLUE
+       tristate
+-- 
+2.43.0
+
diff --git a/queue-5.15/bna-ensure-the-copied-buf-is-nul-terminated.patch b/queue-5.15/bna-ensure-the-copied-buf-is-nul-terminated.patch
new file mode 100644 (file)
index 0000000..0424488
--- /dev/null
@@ -0,0 +1,49 @@
+From 8371549a271baef479dc343b0e4692af2bd5e6dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Apr 2024 21:44:19 +0700
+Subject: bna: ensure the copied buf is NUL terminated
+
+From: Bui Quang Minh <minhquangbui99@gmail.com>
+
+[ Upstream commit 8c34096c7fdf272fd4c0c37fe411cd2e3ed0ee9f ]
+
+Currently, we allocate a nbytes-sized kernel buffer and copy nbytes from
+userspace to that buffer. Later, we use sscanf on this buffer but we don't
+ensure that the string is terminated inside the buffer, this can lead to
+OOB read when using sscanf. Fix this issue by using memdup_user_nul
+instead of memdup_user.
+
+Fixes: 7afc5dbde091 ("bna: Add debugfs interface.")
+Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
+Link: https://lore.kernel.org/r/20240424-fix-oob-read-v2-2-f1f1b53a10f4@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/brocade/bna/bnad_debugfs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+index 04ad0f2b9677e..777f0d7e48192 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
++++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+       void *kern_buf;
+       /* Copy the user space buf */
+-      kern_buf = memdup_user(buf, nbytes);
++      kern_buf = memdup_user_nul(buf, nbytes);
+       if (IS_ERR(kern_buf))
+               return PTR_ERR(kern_buf);
+@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+       void *kern_buf;
+       /* Copy the user space buf */
+-      kern_buf = memdup_user(buf, nbytes);
++      kern_buf = memdup_user_nul(buf, nbytes);
+       if (IS_ERR(kern_buf))
+               return PTR_ERR(kern_buf);
+-- 
+2.43.0
+
diff --git a/queue-5.15/bpf-fix-a-verifier-verbose-message.patch b/queue-5.15/bpf-fix-a-verifier-verbose-message.patch
new file mode 100644 (file)
index 0000000..69dd51a
--- /dev/null
@@ -0,0 +1,46 @@
+From 4901a9cd76763806fedc35f8c5821cf397195aab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Apr 2024 16:11:00 +0200
+Subject: bpf: Fix a verifier verbose message
+
+From: Anton Protopopov <aspsk@isovalent.com>
+
+[ Upstream commit 37eacb9f6e89fb399a79e952bc9c78eb3e16290e ]
+
+Long ago a map file descriptor in a pseudo ldimm64 instruction could
+only be present as an immediate value insn[0].imm, and thus this value
+was used in a verbose verifier message printed when the file descriptor
+wasn't valid. Since addition of BPF_PSEUDO_MAP_IDX_VALUE/BPF_PSEUDO_MAP_IDX
+the insn[0].imm field can also contain an index pointing to the file
+descriptor in the attr.fd_array array. However, if the file descriptor
+is invalid, the verifier still prints the verbose message containing
+value of insn[0].imm. Patch the verifier message to always print the
+actual file descriptor value.
+
+Fixes: 387544bfa291 ("bpf: Introduce fd_idx")
+Signed-off-by: Anton Protopopov <aspsk@isovalent.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20240412141100.3562942-1-aspsk@isovalent.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 67b3254270221..94d952967fbf9 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -11875,8 +11875,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
+                       f = fdget(fd);
+                       map = __bpf_map_get(f);
+                       if (IS_ERR(map)) {
+-                              verbose(env, "fd %d is not pointing to valid bpf_map\n",
+-                                      insn[0].imm);
++                              verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
+                               return PTR_ERR(map);
+                       }
+-- 
+2.43.0
+
diff --git a/queue-5.15/bpf-kconfig-fix-debug_info_btf_modules-kconfig-defin.patch b/queue-5.15/bpf-kconfig-fix-debug_info_btf_modules-kconfig-defin.patch
new file mode 100644 (file)
index 0000000..8960ae0
--- /dev/null
@@ -0,0 +1,57 @@
+From 0ded0798a7c1c90d604fe02efd4715bc9d3e9b97 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Apr 2024 15:03:44 -0700
+Subject: bpf, kconfig: Fix DEBUG_INFO_BTF_MODULES Kconfig definition
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+[ Upstream commit 229087f6f1dc2d0c38feba805770f28529980ec0 ]
+
+Turns out that due to CONFIG_DEBUG_INFO_BTF_MODULES not having an
+explicitly specified "menu item name" in Kconfig, it's basically
+impossible to turn it off (see [0]).
+
+This patch fixes the issue by defining menu name for
+CONFIG_DEBUG_INFO_BTF_MODULES, which makes it actually adjustable
+and independent of CONFIG_DEBUG_INFO_BTF, in the sense that one can
+have DEBUG_INFO_BTF=y and DEBUG_INFO_BTF_MODULES=n.
+
+We still keep it as defaulting to Y, of course.
+
+Fixes: 5f9ae91f7c0d ("kbuild: Build kernel module BTFs if BTF is enabled and pahole supports it")
+Reported-by: Vincent Li <vincent.mc.li@gmail.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/CAK3+h2xiFfzQ9UXf56nrRRP=p1+iUxGoEP5B+aq9MDT5jLXDSg@mail.gmail.com [0]
+Link: https://lore.kernel.org/bpf/20240404220344.3879270-1-andrii@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/Kconfig.debug | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 28faea9b5da62..2025b624fbb67 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -319,7 +319,7 @@ config DEBUG_INFO_DWARF5
+ endchoice # "DWARF version"
+ config DEBUG_INFO_BTF
+-      bool "Generate BTF typeinfo"
++      bool "Generate BTF type information"
+       depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
+       depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+       help
+@@ -331,7 +331,8 @@ config PAHOLE_HAS_SPLIT_BTF
+       def_bool PAHOLE_VERSION >= 119
+ config DEBUG_INFO_BTF_MODULES
+-      def_bool y
++      bool "Generate BTF type information for kernel modules"
++      default y
+       depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
+       help
+         Generate compact split BTF type information for kernel modules.
+-- 
+2.43.0
+
diff --git a/queue-5.15/bpf-skmsg-fix-null-pointer-dereference-in-sk_psock_s.patch b/queue-5.15/bpf-skmsg-fix-null-pointer-dereference-in-sk_psock_s.patch
new file mode 100644 (file)
index 0000000..c8ae895
--- /dev/null
@@ -0,0 +1,120 @@
+From ecd5512a0cf9438649aabe191526c7d821c05899 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Apr 2024 10:10:01 +0800
+Subject: bpf, skmsg: Fix NULL pointer dereference in
+ sk_psock_skb_ingress_enqueue
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit 6648e613226e18897231ab5e42ffc29e63fa3365 ]
+
+Fix NULL pointer data-races in sk_psock_skb_ingress_enqueue() which
+syzbot reported [1].
+
+[1]
+BUG: KCSAN: data-race in sk_psock_drop / sk_psock_skb_ingress_enqueue
+
+write to 0xffff88814b3278b8 of 8 bytes by task 10724 on cpu 1:
+ sk_psock_stop_verdict net/core/skmsg.c:1257 [inline]
+ sk_psock_drop+0x13e/0x1f0 net/core/skmsg.c:843
+ sk_psock_put include/linux/skmsg.h:459 [inline]
+ sock_map_close+0x1a7/0x260 net/core/sock_map.c:1648
+ unix_release+0x4b/0x80 net/unix/af_unix.c:1048
+ __sock_release net/socket.c:659 [inline]
+ sock_close+0x68/0x150 net/socket.c:1421
+ __fput+0x2c1/0x660 fs/file_table.c:422
+ __fput_sync+0x44/0x60 fs/file_table.c:507
+ __do_sys_close fs/open.c:1556 [inline]
+ __se_sys_close+0x101/0x1b0 fs/open.c:1541
+ __x64_sys_close+0x1f/0x30 fs/open.c:1541
+ do_syscall_64+0xd3/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x6d/0x75
+
+read to 0xffff88814b3278b8 of 8 bytes by task 10713 on cpu 0:
+ sk_psock_data_ready include/linux/skmsg.h:464 [inline]
+ sk_psock_skb_ingress_enqueue+0x32d/0x390 net/core/skmsg.c:555
+ sk_psock_skb_ingress_self+0x185/0x1e0 net/core/skmsg.c:606
+ sk_psock_verdict_apply net/core/skmsg.c:1008 [inline]
+ sk_psock_verdict_recv+0x3e4/0x4a0 net/core/skmsg.c:1202
+ unix_read_skb net/unix/af_unix.c:2546 [inline]
+ unix_stream_read_skb+0x9e/0xf0 net/unix/af_unix.c:2682
+ sk_psock_verdict_data_ready+0x77/0x220 net/core/skmsg.c:1223
+ unix_stream_sendmsg+0x527/0x860 net/unix/af_unix.c:2339
+ sock_sendmsg_nosec net/socket.c:730 [inline]
+ __sock_sendmsg+0x140/0x180 net/socket.c:745
+ ____sys_sendmsg+0x312/0x410 net/socket.c:2584
+ ___sys_sendmsg net/socket.c:2638 [inline]
+ __sys_sendmsg+0x1e9/0x280 net/socket.c:2667
+ __do_sys_sendmsg net/socket.c:2676 [inline]
+ __se_sys_sendmsg net/socket.c:2674 [inline]
+ __x64_sys_sendmsg+0x46/0x50 net/socket.c:2674
+ do_syscall_64+0xd3/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x6d/0x75
+
+value changed: 0xffffffff83d7feb0 -> 0x0000000000000000
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 10713 Comm: syz-executor.4 Tainted: G        W          6.8.0-syzkaller-08951-gfe46a7dd189e #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/29/2024
+
+Prior to this, commit 4cd12c6065df ("bpf, sockmap: Fix NULL pointer
+dereference in sk_psock_verdict_data_ready()") fixed one NULL pointer
+similarly due to no protection of saved_data_ready. Here is another
+different caller causing the same issue because of the same reason. So
+we should protect it with sk_callback_lock read lock because the writer
+side in the sk_psock_drop() uses "write_lock_bh(&sk->sk_callback_lock);".
+
+To avoid errors that could happen in future, I move those two pairs of
+lock into the sk_psock_data_ready(), which is suggested by John Fastabend.
+
+Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface")
+Reported-by: syzbot+aa8c8ec2538929f18f2d@syzkaller.appspotmail.com
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Closes: https://syzkaller.appspot.com/bug?extid=aa8c8ec2538929f18f2d
+Link: https://lore.kernel.org/all/20240329134037.92124-1-kerneljasonxing@gmail.com
+Link: https://lore.kernel.org/bpf/20240404021001.94815-1-kerneljasonxing@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/skmsg.h | 2 ++
+ net/core/skmsg.c      | 5 +----
+ 2 files changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index 4273505d309a7..f18eb6a6f7631 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -462,10 +462,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
+ static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
+ {
++      read_lock_bh(&sk->sk_callback_lock);
+       if (psock->saved_data_ready)
+               psock->saved_data_ready(sk);
+       else
+               sk->sk_data_ready(sk);
++      read_unlock_bh(&sk->sk_callback_lock);
+ }
+ static inline void psock_set_prog(struct bpf_prog **pprog,
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 349a1d055a064..6bdb15b05a78d 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1223,11 +1223,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
+               rcu_read_lock();
+               psock = sk_psock(sk);
+-              if (psock) {
+-                      read_lock_bh(&sk->sk_callback_lock);
++              if (psock)
+                       sk_psock_data_ready(sk, psock);
+-                      read_unlock_bh(&sk->sk_callback_lock);
+-              }
+               rcu_read_unlock();
+       }
+ }
+-- 
+2.43.0
+
diff --git a/queue-5.15/bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch b/queue-5.15/bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch
new file mode 100644 (file)
index 0000000..660e4e5
--- /dev/null
@@ -0,0 +1,93 @@
+From b8beca67eaa0d62d309047f1e0474ac189cf61f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 May 2023 19:51:49 +0000
+Subject: bpf, sockmap: Avoid potential NULL dereference in
+ sk_psock_verdict_data_ready()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit b320a45638296b63be8d9a901ca8bc43716b1ae1 ]
+
+syzbot found sk_psock(sk) could return NULL when called
+from sk_psock_verdict_data_ready().
+
+Just make sure to handle this case.
+
+[1]
+general protection fault, probably for non-canonical address 0xdffffc000000005c: 0000 [#1] PREEMPT SMP KASAN
+KASAN: null-ptr-deref in range [0x00000000000002e0-0x00000000000002e7]
+CPU: 0 PID: 15 Comm: ksoftirqd/0 Not tainted 6.4.0-rc3-syzkaller-00588-g4781e965e655 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/16/2023
+RIP: 0010:sk_psock_verdict_data_ready+0x19f/0x3c0 net/core/skmsg.c:1213
+Code: 4c 89 e6 e8 63 70 5e f9 4d 85 e4 75 75 e8 19 74 5e f9 48 8d bb e0 02 00 00 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <80> 3c 02 00 0f 85 07 02 00 00 48 89 ef ff 93 e0 02 00 00 e8 29 fd
+RSP: 0018:ffffc90000147688 EFLAGS: 00010206
+RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000100
+RDX: 000000000000005c RSI: ffffffff8825ceb7 RDI: 00000000000002e0
+RBP: ffff888076518c40 R08: 0000000000000007 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000000
+R13: 0000000000000000 R14: 0000000000008000 R15: ffff888076518c40
+FS: 0000000000000000(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f901375bab0 CR3: 000000004bf26000 CR4: 00000000003506f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+<TASK>
+tcp_data_ready+0x10a/0x520 net/ipv4/tcp_input.c:5006
+tcp_data_queue+0x25d3/0x4c50 net/ipv4/tcp_input.c:5080
+tcp_rcv_established+0x829/0x1f90 net/ipv4/tcp_input.c:6019
+tcp_v4_do_rcv+0x65a/0x9c0 net/ipv4/tcp_ipv4.c:1726
+tcp_v4_rcv+0x2cbf/0x3340 net/ipv4/tcp_ipv4.c:2148
+ip_protocol_deliver_rcu+0x9f/0x480 net/ipv4/ip_input.c:205
+ip_local_deliver_finish+0x2ec/0x520 net/ipv4/ip_input.c:233
+NF_HOOK include/linux/netfilter.h:303 [inline]
+NF_HOOK include/linux/netfilter.h:297 [inline]
+ip_local_deliver+0x1ae/0x200 net/ipv4/ip_input.c:254
+dst_input include/net/dst.h:468 [inline]
+ip_rcv_finish+0x1cf/0x2f0 net/ipv4/ip_input.c:449
+NF_HOOK include/linux/netfilter.h:303 [inline]
+NF_HOOK include/linux/netfilter.h:297 [inline]
+ip_rcv+0xae/0xd0 net/ipv4/ip_input.c:569
+__netif_receive_skb_one_core+0x114/0x180 net/core/dev.c:5491
+__netif_receive_skb+0x1f/0x1c0 net/core/dev.c:5605
+process_backlog+0x101/0x670 net/core/dev.c:5933
+__napi_poll+0xb7/0x6f0 net/core/dev.c:6499
+napi_poll net/core/dev.c:6566 [inline]
+net_rx_action+0x8a9/0xcb0 net/core/dev.c:6699
+__do_softirq+0x1d4/0x905 kernel/softirq.c:571
+run_ksoftirqd kernel/softirq.c:939 [inline]
+run_ksoftirqd+0x31/0x60 kernel/softirq.c:931
+smpboot_thread_fn+0x659/0x9e0 kernel/smpboot.c:164
+kthread+0x344/0x440 kernel/kthread.c:379
+ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:308
+</TASK>
+
+Fixes: 6df7f764cd3c ("bpf, sockmap: Wake up polling after data copy")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20230530195149.68145-1-edumazet@google.com
+Stable-dep-of: 6648e613226e ("bpf, skmsg: Fix NULL pointer dereference in sk_psock_skb_ingress_enqueue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 75554adef5df9..4b851a43cb0b7 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1223,7 +1223,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
+               rcu_read_lock();
+               psock = sk_psock(sk);
+-              psock->saved_data_ready(sk);
++              if (psock)
++                      psock->saved_data_ready(sk);
+               rcu_read_unlock();
+       }
+ }
+-- 
+2.43.0
+
diff --git a/queue-5.15/bpf-sockmap-fix-null-pointer-dereference-in-sk_psock.patch b/queue-5.15/bpf-sockmap-fix-null-pointer-dereference-in-sk_psock.patch
new file mode 100644 (file)
index 0000000..ffd54ac
--- /dev/null
@@ -0,0 +1,70 @@
+From b7955538434060b99101a56673962f0695f0da85 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Feb 2024 00:09:33 +0900
+Subject: bpf, sockmap: Fix NULL pointer dereference in
+ sk_psock_verdict_data_ready()
+
+From: Shigeru Yoshida <syoshida@redhat.com>
+
+[ Upstream commit 4cd12c6065dfcdeba10f49949bffcf383b3952d8 ]
+
+syzbot reported the following NULL pointer dereference issue [1]:
+
+  BUG: kernel NULL pointer dereference, address: 0000000000000000
+  [...]
+  RIP: 0010:0x0
+  [...]
+  Call Trace:
+   <TASK>
+   sk_psock_verdict_data_ready+0x232/0x340 net/core/skmsg.c:1230
+   unix_stream_sendmsg+0x9b4/0x1230 net/unix/af_unix.c:2293
+   sock_sendmsg_nosec net/socket.c:730 [inline]
+   __sock_sendmsg+0x221/0x270 net/socket.c:745
+   ____sys_sendmsg+0x525/0x7d0 net/socket.c:2584
+   ___sys_sendmsg net/socket.c:2638 [inline]
+   __sys_sendmsg+0x2b0/0x3a0 net/socket.c:2667
+   do_syscall_64+0xf9/0x240
+   entry_SYSCALL_64_after_hwframe+0x6f/0x77
+
+If sk_psock_verdict_data_ready() and sk_psock_stop_verdict() are called
+concurrently, psock->saved_data_ready can be NULL, causing the above issue.
+
+This patch fixes this issue by calling the appropriate data ready function
+using the sk_psock_data_ready() helper and protecting it from concurrency
+with sk->sk_callback_lock.
+
+Fixes: 6df7f764cd3c ("bpf, sockmap: Wake up polling after data copy")
+Reported-by: syzbot+fd7b34375c1c8ce29c93@syzkaller.appspotmail.com
+Signed-off-by: Shigeru Yoshida <syoshida@redhat.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: syzbot+fd7b34375c1c8ce29c93@syzkaller.appspotmail.com
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Closes: https://syzkaller.appspot.com/bug?extid=fd7b34375c1c8ce29c93 [1]
+Link: https://lore.kernel.org/bpf/20240218150933.6004-1-syoshida@redhat.com
+Stable-dep-of: 6648e613226e ("bpf, skmsg: Fix NULL pointer dereference in sk_psock_skb_ingress_enqueue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 4b851a43cb0b7..349a1d055a064 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1223,8 +1223,11 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
+               rcu_read_lock();
+               psock = sk_psock(sk);
+-              if (psock)
+-                      psock->saved_data_ready(sk);
++              if (psock) {
++                      read_lock_bh(&sk->sk_callback_lock);
++                      sk_psock_data_ready(sk, psock);
++                      read_unlock_bh(&sk->sk_callback_lock);
++              }
+               rcu_read_unlock();
+       }
+ }
+-- 
+2.43.0
+
diff --git a/queue-5.15/bpf-sockmap-wake-up-polling-after-data-copy.patch b/queue-5.15/bpf-sockmap-wake-up-polling-after-data-copy.patch
new file mode 100644 (file)
index 0000000..b00ac6a
--- /dev/null
@@ -0,0 +1,61 @@
+From 345ad8e80d4dd3f71799f527f3eabb77d9429e2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 May 2023 19:56:11 -0700
+Subject: bpf, sockmap: Wake up polling after data copy
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+[ Upstream commit 6df7f764cd3cf5a03a4a47b23be47e57e41fcd85 ]
+
+When TCP stack has data ready to read sk_data_ready() is called. Sockmap
+overwrites this with its own handler to call into BPF verdict program.
+But, the original TCP socket had sock_def_readable that would additionally
+wake up any user space waiters with sk_wake_async().
+
+Sockmap saved the callback when the socket was created so call the saved
+data ready callback and then we can wake up any epoll() logic waiting
+on the read.
+
+Note we call on 'copied >= 0' to account for returning 0 when a FIN is
+received because we need to wake up user for this as well so they
+can do the recvmsg() -> 0 and detect the shutdown.
+
+Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()")
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>
+Link: https://lore.kernel.org/bpf/20230523025618.113937-8-john.fastabend@gmail.com
+Stable-dep-of: 6648e613226e ("bpf, skmsg: Fix NULL pointer dereference in sk_psock_skb_ingress_enqueue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 68418954ac492..75554adef5df9 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1213,10 +1213,19 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
+ static void sk_psock_verdict_data_ready(struct sock *sk)
+ {
+       struct socket *sock = sk->sk_socket;
++      int copied;
+       if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
+               return;
+-      sock->ops->read_skb(sk, sk_psock_verdict_recv);
++      copied = sock->ops->read_skb(sk, sk_psock_verdict_recv);
++      if (copied >= 0) {
++              struct sk_psock *psock;
++
++              rcu_read_lock();
++              psock = sk_psock(sk);
++              psock->saved_data_ready(sk);
++              rcu_read_unlock();
++      }
+ }
+ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
+-- 
+2.43.0
+
diff --git a/queue-5.15/cxgb4-properly-lock-tx-queue-for-the-selftest.patch b/queue-5.15/cxgb4-properly-lock-tx-queue-for-the-selftest.patch
new file mode 100644 (file)
index 0000000..52bca85
--- /dev/null
@@ -0,0 +1,62 @@
+From f86a6d2c1fadfc4771cc75ab393e52ded1b18e68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Apr 2024 11:11:47 +0200
+Subject: cxgb4: Properly lock TX queue for the selftest.
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+[ Upstream commit 9067eccdd7849dd120d5495dbd5a686fa6ed2c1a ]
+
+The selftest for the driver sends a dummy packet and checks if the
+packet will be received properly as it should be. The regular TX path
+and the selftest can use the same network queue so locking is required
+and was missing in the selftest path. This was addressed in the commit
+cited below.
+Unfortunately locking the TX queue requires BH to be disabled which is
+not the case in selftest path which is invoked in process context.
+Lockdep should be complaining about this.
+
+Use __netif_tx_lock_bh() for TX queue locking.
+
+Fixes: c650e04898072 ("cxgb4: Fix race between loopback and normal Tx path")
+Reported-by: "John B. Wyatt IV" <jwyatt@redhat.com>
+Closes: https://lore.kernel.org/all/Zic0ot5aGgR-V4Ks@thinkpad2021/
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20240429091147.YWAaal4v@linutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/sge.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index fa5b596ff23a1..a074e9d44277f 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2682,12 +2682,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
+       lb->loopback = 1;
+       q = &adap->sge.ethtxq[pi->first_qset];
+-      __netif_tx_lock(q->txq, smp_processor_id());
++      __netif_tx_lock_bh(q->txq);
+       reclaim_completed_tx(adap, &q->q, -1, true);
+       credits = txq_avail(&q->q) - ndesc;
+       if (unlikely(credits < 0)) {
+-              __netif_tx_unlock(q->txq);
++              __netif_tx_unlock_bh(q->txq);
+               return -ENOMEM;
+       }
+@@ -2722,7 +2722,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
+       init_completion(&lb->completion);
+       txq_advance(&q->q, ndesc);
+       cxgb4_ring_tx_db(adap, &q->q, ndesc);
+-      __netif_tx_unlock(q->txq);
++      __netif_tx_unlock_bh(q->txq);
+       /* wait for the pkt to return */
+       ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
+-- 
+2.43.0
+
diff --git a/queue-5.15/drm-panel-ili9341-respect-deferred-probe.patch b/queue-5.15/drm-panel-ili9341-respect-deferred-probe.patch
new file mode 100644 (file)
index 0000000..abcef3f
--- /dev/null
@@ -0,0 +1,50 @@
+From 9f9038074e9dd1bb9f860dc77cedb0f6c6150373 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Apr 2024 17:26:18 +0300
+Subject: drm/panel: ili9341: Respect deferred probe
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 740fc1e0509be3f7e2207e89125b06119ed62943 ]
+
+GPIO controller might not be available when driver is being probed.
+There are plenty of reasons why, one of which is deferred probe.
+
+Since GPIOs are optional, return any error code we got to the upper
+layer, including deferred probe. With that in mind, use dev_err_probe()
+in order to avoid spamming the logs.
+
+Fixes: 5a04227326b0 ("drm/panel: Add ilitek ili9341 panel driver")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Reviewed-by: Sui Jingfeng <sui.jingfeng@linux.dev>
+Link: https://lore.kernel.org/r/20240425142706.2440113-3-andriy.shevchenko@linux.intel.com
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240425142706.2440113-3-andriy.shevchenko@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-ilitek-ili9341.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+index e1542451ef9d0..f8afa922fe9ca 100644
+--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+@@ -716,11 +716,11 @@ static int ili9341_probe(struct spi_device *spi)
+       reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(reset))
+-              dev_err(dev, "Failed to get gpio 'reset'\n");
++              return dev_err_probe(dev, PTR_ERR(reset), "Failed to get gpio 'reset'\n");
+       dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+       if (IS_ERR(dc))
+-              dev_err(dev, "Failed to get gpio 'dc'\n");
++              return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n");
+       if (!strcmp(id->name, "sf-tc240t-9370-t"))
+               return ili9341_dpi_probe(spi, dc, reset);
+-- 
+2.43.0
+
diff --git a/queue-5.15/drm-panel-ili9341-use-predefined-error-codes.patch b/queue-5.15/drm-panel-ili9341-use-predefined-error-codes.patch
new file mode 100644 (file)
index 0000000..8ab5fb3
--- /dev/null
@@ -0,0 +1,53 @@
+From 0672d22a7adb33bf9149c9c5c10d0b05e9115927 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Apr 2024 17:26:19 +0300
+Subject: drm/panel: ili9341: Use predefined error codes
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit da85f0aaa9f21999753b01d45c0343f885a8f905 ]
+
+In one case the -1 is returned which is quite confusing code for
+the wrong device ID, in another the ret is returning instead of
+plain 0 that also confusing as readed may ask the possible meaning
+of positive codes, which are never the case there. Convert both
+to use explicit predefined error codes to make it clear what's going
+on there.
+
+Fixes: 5a04227326b0 ("drm/panel: Add ilitek ili9341 panel driver")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Reviewed-by: Sui Jingfeng <sui.jingfeng@linux.dev>
+Link: https://lore.kernel.org/r/20240425142706.2440113-4-andriy.shevchenko@linux.intel.com
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240425142706.2440113-4-andriy.shevchenko@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-ilitek-ili9341.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+index f8afa922fe9ca..0d89779de22b0 100644
+--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+@@ -420,7 +420,7 @@ static int ili9341_dpi_prepare(struct drm_panel *panel)
+       ili9341_dpi_init(ili);
+-      return ret;
++      return 0;
+ }
+ static int ili9341_dpi_enable(struct drm_panel *panel)
+@@ -727,7 +727,7 @@ static int ili9341_probe(struct spi_device *spi)
+       else if (!strcmp(id->name, "yx240qv29"))
+               return ili9341_dbi_probe(spi, dc, reset);
+-      return -1;
++      return -ENODEV;
+ }
+ static int ili9341_remove(struct spi_device *spi)
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-bridge-fix-multicast-to-unicast-with-fraglist-gs.patch b/queue-5.15/net-bridge-fix-multicast-to-unicast-with-fraglist-gs.patch
new file mode 100644 (file)
index 0000000..ef1acac
--- /dev/null
@@ -0,0 +1,39 @@
+From 49cc327e933948c194b23f2754bcfc2c27acc166 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 27 Apr 2024 20:24:18 +0200
+Subject: net: bridge: fix multicast-to-unicast with fraglist GSO
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit 59c878cbcdd80ed39315573b3511d0acfd3501b5 ]
+
+Calling skb_copy on a SKB_GSO_FRAGLIST skb is not valid, since it returns
+an invalid linearized skb. This code only needs to change the ethernet
+header, so pskb_copy is the right function to call here.
+
+Fixes: 6db6f0eae605 ("bridge: multicast to unicast")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_forward.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 011bd3c59da19..0bdd2892646db 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -261,7 +261,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+       if (skb->dev == p->dev && ether_addr_equal(src, addr))
+               return;
+-      skb = skb_copy(skb, GFP_ATOMIC);
++      skb = pskb_copy(skb, GFP_ATOMIC);
+       if (!skb) {
+               DEV_STATS_INC(dev, tx_dropped);
+               return;
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-core-reject-skb_copy-_expand-for-fraglist-gso-sk.patch b/queue-5.15/net-core-reject-skb_copy-_expand-for-fraglist-gso-sk.patch
new file mode 100644 (file)
index 0000000..b6608e2
--- /dev/null
@@ -0,0 +1,73 @@
+From 93cd48b65ec207b619520f459fad403010def6f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 27 Apr 2024 20:24:19 +0200
+Subject: net: core: reject skb_copy(_expand) for fraglist GSO skbs
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit d091e579b864fa790dd6a0cd537a22c383126681 ]
+
+SKB_GSO_FRAGLIST skbs must not be linearized, otherwise they become
+invalid. Return NULL if such an skb is passed to skb_copy or
+skb_copy_expand, in order to prevent a crash on a potential later
+call to skb_gso_segment.
+
+Fixes: 3a1296a38d0c ("net: Support GRO/GSO fraglist chaining.")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skbuff.c | 27 +++++++++++++++++++--------
+ 1 file changed, 19 insertions(+), 8 deletions(-)
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index a42431860af9a..4ec8cfd357eba 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1583,11 +1583,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
+ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
+ {
+-      int headerlen = skb_headroom(skb);
+-      unsigned int size = skb_end_offset(skb) + skb->data_len;
+-      struct sk_buff *n = __alloc_skb(size, gfp_mask,
+-                                      skb_alloc_rx_flag(skb), NUMA_NO_NODE);
++      struct sk_buff *n;
++      unsigned int size;
++      int headerlen;
++
++      if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++              return NULL;
++      headerlen = skb_headroom(skb);
++      size = skb_end_offset(skb) + skb->data_len;
++      n = __alloc_skb(size, gfp_mask,
++                      skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+       if (!n)
+               return NULL;
+@@ -1899,12 +1905,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+       /*
+        *      Allocate the copy buffer
+        */
+-      struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
+-                                      gfp_mask, skb_alloc_rx_flag(skb),
+-                                      NUMA_NO_NODE);
+-      int oldheadroom = skb_headroom(skb);
+       int head_copy_len, head_copy_off;
++      struct sk_buff *n;
++      int oldheadroom;
++
++      if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++              return NULL;
++      oldheadroom = skb_headroom(skb);
++      n = __alloc_skb(newheadroom + skb->len + newtailroom,
++                      gfp_mask, skb_alloc_rx_flag(skb),
++                      NUMA_NO_NODE);
+       if (!n)
+               return NULL;
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-dsa-mv88e6xxx-fix-number-of-databases-for-88e614.patch b/queue-5.15/net-dsa-mv88e6xxx-fix-number-of-databases-for-88e614.patch
new file mode 100644 (file)
index 0000000..2c60544
--- /dev/null
@@ -0,0 +1,52 @@
+From 0b6b7d6d3aec176795e1a2a2f31e8139eb7f7e06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Apr 2024 15:38:32 +0200
+Subject: net: dsa: mv88e6xxx: Fix number of databases for 88E6141 / 88E6341
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marek Behún <kabel@kernel.org>
+
+[ Upstream commit b9a61c20179fda7bdfe2c1210aa72451991ab81a ]
+
+The Topaz family (88E6141 and 88E6341) only support 256 Forwarding
+Information Tables.
+
+Fixes: a75961d0ebfd ("net: dsa: mv88e6xxx: Add support for ethernet switch 88E6341")
+Fixes: 1558727a1c1b ("net: dsa: mv88e6xxx: Add support for ethernet switch 88E6141")
+Signed-off-by: Marek Behún <kabel@kernel.org>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://lore.kernel.org/r/20240429133832.9547-1-kabel@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 30fba1ea933e3..3fc120802883a 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -5116,7 +5116,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+               .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
+               .family = MV88E6XXX_FAMILY_6341,
+               .name = "Marvell 88E6141",
+-              .num_databases = 4096,
++              .num_databases = 256,
+               .num_macs = 2048,
+               .num_ports = 6,
+               .num_internal_phys = 5,
+@@ -5559,7 +5559,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+               .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+               .family = MV88E6XXX_FAMILY_6341,
+               .name = "Marvell 88E6341",
+-              .num_databases = 4096,
++              .num_databases = 256,
+               .num_macs = 2048,
+               .num_internal_phys = 5,
+               .num_ports = 6,
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-gro-add-flush-check-in-udp_gro_receive_segment.patch b/queue-5.15/net-gro-add-flush-check-in-udp_gro_receive_segment.patch
new file mode 100644 (file)
index 0000000..27842a0
--- /dev/null
@@ -0,0 +1,61 @@
+From 59583f01ecf388ca3a7d0a01f58ca97c1377b6c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Apr 2024 16:35:55 +0200
+Subject: net: gro: add flush check in udp_gro_receive_segment
+
+From: Richard Gobert <richardbgobert@gmail.com>
+
+[ Upstream commit 5babae777c61aa8a8679d59d3cdc54165ad96d42 ]
+
+GRO-GSO path is supposed to be transparent and as such L3 flush checks are
+relevant to all UDP flows merging in GRO. This patch uses the same logic
+and code from tcp_gro_receive, terminating merge if flush is non zero.
+
+Fixes: e20cf8d3f1f7 ("udp: implement GRO for plain UDP sockets.")
+Signed-off-by: Richard Gobert <richardbgobert@gmail.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/udp_offload.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 7c6ac47b0bb18..c61268849948a 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -434,6 +434,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+       struct sk_buff *p;
+       unsigned int ulen;
+       int ret = 0;
++      int flush;
+       /* requires non zero csum, for symmetry with GSO */
+       if (!uh->check) {
+@@ -467,13 +468,22 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+                       return p;
+               }
++              flush = NAPI_GRO_CB(p)->flush;
++
++              if (NAPI_GRO_CB(p)->flush_id != 1 ||
++                  NAPI_GRO_CB(p)->count != 1 ||
++                  !NAPI_GRO_CB(p)->is_atomic)
++                      flush |= NAPI_GRO_CB(p)->flush_id;
++              else
++                      NAPI_GRO_CB(p)->is_atomic = false;
++
+               /* Terminate the flow on len mismatch or if it grow "too much".
+                * Under small packet flood GRO count could elsewhere grow a lot
+                * leading to excessive truesize values.
+                * On len mismatch merge the first packet shorter than gso_size,
+                * otherwise complete the GRO packet.
+                */
+-              if (ulen > ntohs(uh2->len)) {
++              if (ulen > ntohs(uh2->len) || flush) {
+                       pp = p;
+               } else {
+                       if (NAPI_GRO_CB(skb)->is_flist) {
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-introduce-a-new-proto_ops-read_skb.patch b/queue-5.15/net-introduce-a-new-proto_ops-read_skb.patch
new file mode 100644 (file)
index 0000000..7c7a4b1
--- /dev/null
@@ -0,0 +1,333 @@
+From 2e55a8ad6cdcf880e5bae3a674eafefb0ac63458 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jun 2022 09:20:12 -0700
+Subject: net: Introduce a new proto_ops ->read_skb()
+
+From: Cong Wang <cong.wang@bytedance.com>
+
+[ Upstream commit 965b57b469a589d64d81b1688b38dcb537011bb0 ]
+
+Currently both splice() and sockmap use ->read_sock() to
+read skb from receive queue, but for sockmap we only read
+one entire skb at a time, so ->read_sock() is too conservative
+to use. Introduce a new proto_ops ->read_skb() which supports
+this sematic, with this we can finally pass the ownership of
+skb to recv actors.
+
+For non-TCP protocols, all ->read_sock() can be simply
+converted to ->read_skb().
+
+Signed-off-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20220615162014.89193-3-xiyou.wangcong@gmail.com
+Stable-dep-of: 6648e613226e ("bpf, skmsg: Fix NULL pointer dereference in sk_psock_skb_ingress_enqueue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/net.h |  4 ++++
+ include/net/tcp.h   |  3 +--
+ include/net/udp.h   |  3 +--
+ net/core/skmsg.c    | 20 +++++---------------
+ net/ipv4/af_inet.c  |  3 ++-
+ net/ipv4/tcp.c      |  9 +++------
+ net/ipv4/udp.c      | 10 ++++------
+ net/ipv6/af_inet6.c |  3 ++-
+ net/unix/af_unix.c  | 23 +++++++++--------------
+ 9 files changed, 31 insertions(+), 47 deletions(-)
+
+diff --git a/include/linux/net.h b/include/linux/net.h
+index ba736b457a068..3e000dadeb8f3 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -133,6 +133,8 @@ struct module;
+ struct sk_buff;
+ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
+                              unsigned int, size_t);
++typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *);
++
+ struct proto_ops {
+       int             family;
+@@ -195,6 +197,8 @@ struct proto_ops {
+        */
+       int             (*read_sock)(struct sock *sk, read_descriptor_t *desc,
+                                    sk_read_actor_t recv_actor);
++      /* This is different from read_sock(), it reads an entire skb at a time. */
++      int             (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor);
+       int             (*sendpage_locked)(struct sock *sk, struct page *page,
+                                          int offset, size_t size, int flags);
+       int             (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 3047a8b3dbd1c..bdc5a16af8190 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -663,8 +663,7 @@ void tcp_get_info(struct sock *, struct tcp_info *);
+ /* Read 'sendfile()'-style from a TCP socket */
+ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+                 sk_read_actor_t recv_actor);
+-int tcp_read_skb(struct sock *sk, read_descriptor_t *desc,
+-               sk_read_actor_t recv_actor);
++int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+ void tcp_initialize_rcv_mss(struct sock *sk);
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 10508c66e7a19..20ae344bc1082 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -329,8 +329,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
+                              struct sk_buff *skb);
+ struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
+                                __be16 sport, __be16 dport);
+-int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
+-                sk_read_actor_t recv_actor);
++int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+ /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
+  * possibly multiple cache miss on dequeue()
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 9cd14212dcd0b..68418954ac492 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1173,21 +1173,17 @@ static void sk_psock_done_strp(struct sk_psock *psock)
+ }
+ #endif /* CONFIG_BPF_STREAM_PARSER */
+-static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
+-                               unsigned int offset, size_t orig_len)
++static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
+ {
+-      struct sock *sk = (struct sock *)desc->arg.data;
+       struct sk_psock *psock;
+       struct bpf_prog *prog;
+       int ret = __SK_DROP;
+-      int len = orig_len;
++      int len = skb->len;
+       /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
+       skb = skb_clone(skb, GFP_ATOMIC);
+-      if (!skb) {
+-              desc->error = -ENOMEM;
++      if (!skb)
+               return 0;
+-      }
+       rcu_read_lock();
+       psock = sk_psock(sk);
+@@ -1217,16 +1213,10 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
+ static void sk_psock_verdict_data_ready(struct sock *sk)
+ {
+       struct socket *sock = sk->sk_socket;
+-      read_descriptor_t desc;
+-      if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
++      if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
+               return;
+-
+-      desc.arg.data = sk;
+-      desc.error = 0;
+-      desc.count = 1;
+-
+-      sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
++      sock->ops->read_skb(sk, sk_psock_verdict_recv);
+ }
+ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 487f75993bf4f..f931d2534ab42 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1050,6 +1050,7 @@ const struct proto_ops inet_stream_ops = {
+       .sendpage          = inet_sendpage,
+       .splice_read       = tcp_splice_read,
+       .read_sock         = tcp_read_sock,
++      .read_skb          = tcp_read_skb,
+       .sendmsg_locked    = tcp_sendmsg_locked,
+       .sendpage_locked   = tcp_sendpage_locked,
+       .peek_len          = tcp_peek_len,
+@@ -1077,7 +1078,7 @@ const struct proto_ops inet_dgram_ops = {
+       .setsockopt        = sock_common_setsockopt,
+       .getsockopt        = sock_common_getsockopt,
+       .sendmsg           = inet_sendmsg,
+-      .read_sock         = udp_read_sock,
++      .read_skb          = udp_read_skb,
+       .recvmsg           = inet_recvmsg,
+       .mmap              = sock_no_mmap,
+       .sendpage          = inet_sendpage,
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 3647a5f08c22d..3fd4de1961a62 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1705,8 +1705,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ }
+ EXPORT_SYMBOL(tcp_read_sock);
+-int tcp_read_skb(struct sock *sk, read_descriptor_t *desc,
+-               sk_read_actor_t recv_actor)
++int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+       struct tcp_sock *tp = tcp_sk(sk);
+       u32 seq = tp->copied_seq;
+@@ -1721,7 +1720,7 @@ int tcp_read_skb(struct sock *sk, read_descriptor_t *desc,
+               int used;
+               __skb_unlink(skb, &sk->sk_receive_queue);
+-              used = recv_actor(desc, skb, 0, skb->len);
++              used = recv_actor(sk, skb);
+               if (used <= 0) {
+                       if (!copied)
+                               copied = used;
+@@ -1736,9 +1735,7 @@ int tcp_read_skb(struct sock *sk, read_descriptor_t *desc,
+                       break;
+               }
+               consume_skb(skb);
+-              if (!desc->count)
+-                      break;
+-              WRITE_ONCE(tp->copied_seq, seq);
++              break;
+       }
+       WRITE_ONCE(tp->copied_seq, seq);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index d0387e5eee5b5..6a054bfb17850 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1819,8 +1819,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
+ }
+ EXPORT_SYMBOL(__skb_recv_udp);
+-int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
+-                sk_read_actor_t recv_actor)
++int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+       int copied = 0;
+@@ -1842,7 +1841,7 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
+                       continue;
+               }
+-              used = recv_actor(desc, skb, 0, skb->len);
++              used = recv_actor(sk, skb);
+               if (used <= 0) {
+                       if (!copied)
+                               copied = used;
+@@ -1853,13 +1852,12 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
+               }
+               kfree_skb(skb);
+-              if (!desc->count)
+-                      break;
++              break;
+       }
+       return copied;
+ }
+-EXPORT_SYMBOL(udp_read_sock);
++EXPORT_SYMBOL(udp_read_skb);
+ /*
+  *    This should be easy, if there is something there we
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 8e0c33b683010..1b7749882c7ab 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -707,6 +707,7 @@ const struct proto_ops inet6_stream_ops = {
+       .sendpage_locked   = tcp_sendpage_locked,
+       .splice_read       = tcp_splice_read,
+       .read_sock         = tcp_read_sock,
++      .read_skb          = tcp_read_skb,
+       .peek_len          = tcp_peek_len,
+ #ifdef CONFIG_COMPAT
+       .compat_ioctl      = inet6_compat_ioctl,
+@@ -732,7 +733,7 @@ const struct proto_ops inet6_dgram_ops = {
+       .getsockopt        = sock_common_getsockopt,    /* ok           */
+       .sendmsg           = inet6_sendmsg,             /* retpoline's sake */
+       .recvmsg           = inet6_recvmsg,             /* retpoline's sake */
+-      .read_sock         = udp_read_sock,
++      .read_skb          = udp_read_skb,
+       .mmap              = sock_no_mmap,
+       .sendpage          = sock_no_sendpage,
+       .set_peek_off      = sk_set_peek_off,
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index f66f867049015..bf610fad8775d 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -700,10 +700,8 @@ static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
+                                      unsigned int flags);
+ static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
+ static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
+-static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
+-                        sk_read_actor_t recv_actor);
+-static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
+-                               sk_read_actor_t recv_actor);
++static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
++static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+ static int unix_dgram_connect(struct socket *, struct sockaddr *,
+                             int, int);
+ static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
+@@ -757,7 +755,7 @@ static const struct proto_ops unix_stream_ops = {
+       .shutdown =     unix_shutdown,
+       .sendmsg =      unix_stream_sendmsg,
+       .recvmsg =      unix_stream_recvmsg,
+-      .read_sock =    unix_stream_read_sock,
++      .read_skb =     unix_stream_read_skb,
+       .mmap =         sock_no_mmap,
+       .sendpage =     unix_stream_sendpage,
+       .splice_read =  unix_stream_splice_read,
+@@ -782,7 +780,7 @@ static const struct proto_ops unix_dgram_ops = {
+       .listen =       sock_no_listen,
+       .shutdown =     unix_shutdown,
+       .sendmsg =      unix_dgram_sendmsg,
+-      .read_sock =    unix_read_sock,
++      .read_skb =     unix_read_skb,
+       .recvmsg =      unix_dgram_recvmsg,
+       .mmap =         sock_no_mmap,
+       .sendpage =     sock_no_sendpage,
+@@ -2412,8 +2410,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t si
+       return __unix_dgram_recvmsg(sk, msg, size, flags);
+ }
+-static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
+-                        sk_read_actor_t recv_actor)
++static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+       int copied = 0;
+@@ -2428,7 +2425,7 @@ static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
+               if (!skb)
+                       return err;
+-              used = recv_actor(desc, skb, 0, skb->len);
++              used = recv_actor(sk, skb);
+               if (used <= 0) {
+                       if (!copied)
+                               copied = used;
+@@ -2439,8 +2436,7 @@ static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
+               }
+               kfree_skb(skb);
+-              if (!desc->count)
+-                      break;
++              break;
+       }
+       return copied;
+@@ -2580,13 +2576,12 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ }
+ #endif
+-static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
+-                               sk_read_actor_t recv_actor)
++static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+       if (unlikely(sk->sk_state != TCP_ESTABLISHED))
+               return -ENOTCONN;
+-      return unix_read_sock(sk, desc, recv_actor);
++      return unix_read_skb(sk, recv_actor);
+ }
+ static int unix_stream_read_generic(struct unix_stream_read_state *state,
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-l2tp-drop-flow-hash-on-forward.patch b/queue-5.15/net-l2tp-drop-flow-hash-on-forward.patch
new file mode 100644 (file)
index 0000000..4684acc
--- /dev/null
@@ -0,0 +1,49 @@
+From d07c935da8b1e864459b6058789a3325162f2318 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Apr 2024 19:11:10 +0200
+Subject: net l2tp: drop flow hash on forward
+
+From: David Bauer <mail@david-bauer.net>
+
+[ Upstream commit 42f853b42899d9b445763b55c3c8adc72be0f0e1 ]
+
+Drop the flow-hash of the skb when forwarding to the L2TP netdev.
+
+This avoids the L2TP qdisc from using the flow-hash from the outer
+packet, which is identical for every flow within the tunnel.
+
+This does not affect every platform but is specific for the ethernet
+driver. It depends on the platform including L4 information in the
+flow-hash.
+
+One such example is the Mediatek Filogic MT798x family of networking
+processors.
+
+Fixes: d9e31d17ceba ("l2tp: Add L2TP ethernet pseudowire support")
+Acked-by: James Chapman <jchapman@katalix.com>
+Signed-off-by: David Bauer <mail@david-bauer.net>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20240424171110.13701-1-mail@david-bauer.net
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/l2tp/l2tp_eth.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index 6cd97c75445c8..9a36e174984cf 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -136,6 +136,9 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
+       /* checksums verified by L2TP */
+       skb->ip_summed = CHECKSUM_NONE;
++      /* drop outer flow-hash */
++      skb_clear_hash(skb);
++
+       skb_dst_drop(skb);
+       nf_reset_ct(skb);
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-qede-sanitize-rc-in-qede_add_tc_flower_fltr.patch b/queue-5.15/net-qede-sanitize-rc-in-qede_add_tc_flower_fltr.patch
new file mode 100644 (file)
index 0000000..7322a1d
--- /dev/null
@@ -0,0 +1,76 @@
+From 0aac863a84e29c423064cc7f49b226e9cfb0e369 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Apr 2024 09:12:23 +0000
+Subject: net: qede: sanitize 'rc' in qede_add_tc_flower_fltr()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Asbjørn Sloth Tønnesen <ast@fiberby.net>
+
+[ Upstream commit e25714466abd9d96901b15efddf82c60a38abd86 ]
+
+Explicitly set 'rc' (return code), before jumping to the
+unlock and return path.
+
+By not having any code depend on that 'rc' remains at
+it's initial value of -EINVAL, then we can re-use 'rc' for
+the return code of function calls in subsequent patches.
+
+Only compile tested.
+
+Signed-off-by: Asbjørn Sloth Tønnesen <ast@fiberby.net>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: fcee2065a178 ("net: qede: use return from qede_parse_flow_attr() for flower")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_filter.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 3010833ddde33..76aa5934e985b 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -1868,8 +1868,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+                           struct flow_cls_offload *f)
+ {
+       struct qede_arfs_fltr_node *n;
+-      int min_hlen, rc = -EINVAL;
+       struct qede_arfs_tuple t;
++      int min_hlen, rc;
+       __qede_lock(edev);
+@@ -1879,8 +1879,10 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+       }
+       /* parse flower attribute and prepare filter */
+-      if (qede_parse_flow_attr(edev, proto, f->rule, &t))
++      if (qede_parse_flow_attr(edev, proto, f->rule, &t)) {
++              rc = -EINVAL;
+               goto unlock;
++      }
+       /* Validate profile mode and number of filters */
+       if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
+@@ -1888,12 +1890,15 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+               DP_NOTICE(edev,
+                         "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
+                         t.mode, edev->arfs->mode, edev->arfs->filter_count);
++              rc = -EINVAL;
+               goto unlock;
+       }
+       /* parse tc actions and get the vf_id */
+-      if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
++      if (qede_parse_actions(edev, &f->rule->action, f->common.extack)) {
++              rc = -EINVAL;
+               goto unlock;
++      }
+       if (qede_flow_find_fltr(edev, &t)) {
+               rc = -EEXIST;
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-qede-use-return-from-qede_parse_actions.patch b/queue-5.15/net-qede-use-return-from-qede_parse_actions.patch
new file mode 100644 (file)
index 0000000..b8b500c
--- /dev/null
@@ -0,0 +1,57 @@
+From 767a9bbaf73fbd7c5fb59ac29cf394ff101d0228 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Apr 2024 09:12:26 +0000
+Subject: net: qede: use return from qede_parse_actions()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Asbjørn Sloth Tønnesen <ast@fiberby.net>
+
+[ Upstream commit f26f719a36e56381a1f4230e5364e7ad4d485888 ]
+
+When calling qede_parse_actions() then the
+return code was only used for a non-zero check,
+and then -EINVAL was returned.
+
+qede_parse_actions() can currently fail with:
+* -EINVAL
+* -EOPNOTSUPP
+
+This patch changes the code to use the actual
+return code, not just return -EINVAL.
+
+The blaimed commit broke the implicit assumption
+that only -EINVAL would ever be returned.
+
+Only compile tested.
+
+Fixes: 319a1d19471e ("flow_offload: check for basic action hw stats type")
+Signed-off-by: Asbjørn Sloth Tønnesen <ast@fiberby.net>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_filter.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index aeff091cdfaee..8871099b99d8a 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -1894,10 +1894,9 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+       }
+       /* parse tc actions and get the vf_id */
+-      if (qede_parse_actions(edev, &f->rule->action, f->common.extack)) {
+-              rc = -EINVAL;
++      rc = qede_parse_actions(edev, &f->rule->action, f->common.extack);
++      if (rc)
+               goto unlock;
+-      }
+       if (qede_flow_find_fltr(edev, &t)) {
+               rc = -EEXIST;
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-qede-use-return-from-qede_parse_flow_attr-for-fl.patch b/queue-5.15/net-qede-use-return-from-qede_parse_flow_attr-for-fl.patch
new file mode 100644 (file)
index 0000000..df19aae
--- /dev/null
@@ -0,0 +1,58 @@
+From 9a628f044dfc74118e43ce81d5946a181e1b63bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Apr 2024 09:12:24 +0000
+Subject: net: qede: use return from qede_parse_flow_attr() for flower
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Asbjørn Sloth Tønnesen <ast@fiberby.net>
+
+[ Upstream commit fcee2065a178f78be6fd516302830378b17dba3d ]
+
+In qede_add_tc_flower_fltr(), when calling
+qede_parse_flow_attr() then the return code
+was only used for a non-zero check, and then
+-EINVAL was returned.
+
+qede_parse_flow_attr() can currently fail with:
+* -EINVAL
+* -EOPNOTSUPP
+* -EPROTONOSUPPORT
+
+This patch changes the code to use the actual
+return code, not just return -EINVAL.
+
+The blaimed commit introduced these functions.
+
+Only compile tested.
+
+Fixes: 2ce9c93eaca6 ("qede: Ingress tc flower offload (drop action) support.")
+Signed-off-by: Asbjørn Sloth Tønnesen <ast@fiberby.net>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_filter.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 76aa5934e985b..aedb98713bbf2 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -1879,10 +1879,9 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+       }
+       /* parse flower attribute and prepare filter */
+-      if (qede_parse_flow_attr(edev, proto, f->rule, &t)) {
+-              rc = -EINVAL;
++      rc = qede_parse_flow_attr(edev, proto, f->rule, &t);
++      if (rc)
+               goto unlock;
+-      }
+       /* Validate profile mode and number of filters */
+       if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
+-- 
+2.43.0
+
diff --git a/queue-5.15/net-qede-use-return-from-qede_parse_flow_attr-for-fl.patch-8226 b/queue-5.15/net-qede-use-return-from-qede_parse_flow_attr-for-fl.patch-8226
new file mode 100644 (file)
index 0000000..14af999
--- /dev/null
@@ -0,0 +1,60 @@
+From 9405b69fabdadf048978f36330f868642ef0a4d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Apr 2024 09:12:25 +0000
+Subject: net: qede: use return from qede_parse_flow_attr() for flow_spec
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Asbjørn Sloth Tønnesen <ast@fiberby.net>
+
+[ Upstream commit 27b44414a34b108c5a37cd5b4894f606061d86e7 ]
+
+In qede_flow_spec_to_rule(), when calling
+qede_parse_flow_attr() then the return code
+was only used for a non-zero check, and then
+-EINVAL was returned.
+
+qede_parse_flow_attr() can currently fail with:
+* -EINVAL
+* -EOPNOTSUPP
+* -EPROTONOSUPPORT
+
+This patch changes the code to use the actual
+return code, not just return -EINVAL.
+
+The blaimed commit introduced qede_flow_spec_to_rule(),
+and this call to qede_parse_flow_attr(), it looks
+like it just duplicated how it was already used.
+
+Only compile tested.
+
+Fixes: 37c5d3efd7f8 ("qede: use ethtool_rx_flow_rule() to remove duplicated parser code")
+Signed-off-by: Asbjørn Sloth Tønnesen <ast@fiberby.net>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_filter.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index aedb98713bbf2..aeff091cdfaee 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -2002,10 +2002,9 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
+       if (IS_ERR(flow))
+               return PTR_ERR(flow);
+-      if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
+-              err = -EINVAL;
++      err = qede_parse_flow_attr(edev, proto, flow->rule, t);
++      if (err)
+               goto err_out;
+-      }
+       /* Make sure location is valid and filter isn't already set */
+       err = qede_flow_spec_validate(edev, &flow->rule->action, t,
+-- 
+2.43.0
+
diff --git a/queue-5.15/nsh-restore-skb-protocol-data-mac_header-for-outer-h.patch b/queue-5.15/nsh-restore-skb-protocol-data-mac_header-for-outer-h.patch
new file mode 100644 (file)
index 0000000..35caf0e
--- /dev/null
@@ -0,0 +1,181 @@
+From 0dc3e514172f75bae705db2b59fa1edcccf75320 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Apr 2024 19:35:49 -0700
+Subject: nsh: Restore skb->{protocol,data,mac_header} for outer header in
+ nsh_gso_segment().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 4b911a9690d72641879ea6d13cce1de31d346d79 ]
+
+syzbot triggered various splats (see [0] and links) by a crafted GSO
+packet of VIRTIO_NET_HDR_GSO_UDP layering the following protocols:
+
+  ETH_P_8021AD + ETH_P_NSH + ETH_P_IPV6 + IPPROTO_UDP
+
+NSH can encapsulate IPv4, IPv6, Ethernet, NSH, and MPLS.  As the inner
+protocol can be Ethernet, NSH GSO handler, nsh_gso_segment(), calls
+skb_mac_gso_segment() to invoke inner protocol GSO handlers.
+
+nsh_gso_segment() does the following for the original skb before
+calling skb_mac_gso_segment()
+
+  1. reset skb->network_header
+  2. save the original skb->{mac_heaeder,mac_len} in a local variable
+  3. pull the NSH header
+  4. resets skb->mac_header
+  5. set up skb->mac_len and skb->protocol for the inner protocol.
+
+and does the following for the segmented skb
+
+  6. set ntohs(ETH_P_NSH) to skb->protocol
+  7. push the NSH header
+  8. restore skb->mac_header
+  9. set skb->mac_header + mac_len to skb->network_header
+ 10. restore skb->mac_len
+
+There are two problems in 6-7 and 8-9.
+
+  (a)
+  After 6 & 7, skb->data points to the NSH header, so the outer header
+  (ETH_P_8021AD in this case) is stripped when skb is sent out of netdev.
+
+  Also, if NSH is encapsulated by NSH + Ethernet (so NSH-Ethernet-NSH),
+  skb_pull() in the first nsh_gso_segment() will make skb->data point
+  to the middle of the outer NSH or Ethernet header because the Ethernet
+  header is not pulled by the second nsh_gso_segment().
+
+  (b)
+  While restoring skb->{mac_header,network_header} in 8 & 9,
+  nsh_gso_segment() does not assume that the data in the linear
+  buffer is shifted.
+
+  However, udp6_ufo_fragment() could shift the data and change
+  skb->mac_header accordingly as demonstrated by syzbot.
+
+  If this happens, even the restored skb->mac_header points to
+  the middle of the outer header.
+
+It seems nsh_gso_segment() has never worked with outer headers so far.
+
+At the end of nsh_gso_segment(), the outer header must be restored for
+the segmented skb, instead of the NSH header.
+
+To do that, let's calculate the outer header position relatively from
+the inner header and set skb->{data,mac_header,protocol} properly.
+
+[0]:
+BUG: KMSAN: uninit-value in ipvlan_process_outbound drivers/net/ipvlan/ipvlan_core.c:524 [inline]
+BUG: KMSAN: uninit-value in ipvlan_xmit_mode_l3 drivers/net/ipvlan/ipvlan_core.c:602 [inline]
+BUG: KMSAN: uninit-value in ipvlan_queue_xmit+0xf44/0x16b0 drivers/net/ipvlan/ipvlan_core.c:668
+ ipvlan_process_outbound drivers/net/ipvlan/ipvlan_core.c:524 [inline]
+ ipvlan_xmit_mode_l3 drivers/net/ipvlan/ipvlan_core.c:602 [inline]
+ ipvlan_queue_xmit+0xf44/0x16b0 drivers/net/ipvlan/ipvlan_core.c:668
+ ipvlan_start_xmit+0x5c/0x1a0 drivers/net/ipvlan/ipvlan_main.c:222
+ __netdev_start_xmit include/linux/netdevice.h:4989 [inline]
+ netdev_start_xmit include/linux/netdevice.h:5003 [inline]
+ xmit_one net/core/dev.c:3547 [inline]
+ dev_hard_start_xmit+0x244/0xa10 net/core/dev.c:3563
+ __dev_queue_xmit+0x33ed/0x51c0 net/core/dev.c:4351
+ dev_queue_xmit include/linux/netdevice.h:3171 [inline]
+ packet_xmit+0x9c/0x6b0 net/packet/af_packet.c:276
+ packet_snd net/packet/af_packet.c:3081 [inline]
+ packet_sendmsg+0x8aef/0x9f10 net/packet/af_packet.c:3113
+ sock_sendmsg_nosec net/socket.c:730 [inline]
+ __sock_sendmsg net/socket.c:745 [inline]
+ __sys_sendto+0x735/0xa10 net/socket.c:2191
+ __do_sys_sendto net/socket.c:2203 [inline]
+ __se_sys_sendto net/socket.c:2199 [inline]
+ __x64_sys_sendto+0x125/0x1c0 net/socket.c:2199
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xcf/0x1e0 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x63/0x6b
+
+Uninit was created at:
+ slab_post_alloc_hook mm/slub.c:3819 [inline]
+ slab_alloc_node mm/slub.c:3860 [inline]
+ __do_kmalloc_node mm/slub.c:3980 [inline]
+ __kmalloc_node_track_caller+0x705/0x1000 mm/slub.c:4001
+ kmalloc_reserve+0x249/0x4a0 net/core/skbuff.c:582
+ __alloc_skb+0x352/0x790 net/core/skbuff.c:651
+ skb_segment+0x20aa/0x7080 net/core/skbuff.c:4647
+ udp6_ufo_fragment+0xcab/0x1150 net/ipv6/udp_offload.c:109
+ ipv6_gso_segment+0x14be/0x2ca0 net/ipv6/ip6_offload.c:152
+ skb_mac_gso_segment+0x3e8/0x760 net/core/gso.c:53
+ nsh_gso_segment+0x6f4/0xf70 net/nsh/nsh.c:108
+ skb_mac_gso_segment+0x3e8/0x760 net/core/gso.c:53
+ __skb_gso_segment+0x4b0/0x730 net/core/gso.c:124
+ skb_gso_segment include/net/gso.h:83 [inline]
+ validate_xmit_skb+0x107f/0x1930 net/core/dev.c:3628
+ __dev_queue_xmit+0x1f28/0x51c0 net/core/dev.c:4343
+ dev_queue_xmit include/linux/netdevice.h:3171 [inline]
+ packet_xmit+0x9c/0x6b0 net/packet/af_packet.c:276
+ packet_snd net/packet/af_packet.c:3081 [inline]
+ packet_sendmsg+0x8aef/0x9f10 net/packet/af_packet.c:3113
+ sock_sendmsg_nosec net/socket.c:730 [inline]
+ __sock_sendmsg net/socket.c:745 [inline]
+ __sys_sendto+0x735/0xa10 net/socket.c:2191
+ __do_sys_sendto net/socket.c:2203 [inline]
+ __se_sys_sendto net/socket.c:2199 [inline]
+ __x64_sys_sendto+0x125/0x1c0 net/socket.c:2199
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xcf/0x1e0 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x63/0x6b
+
+CPU: 1 PID: 5101 Comm: syz-executor421 Not tainted 6.8.0-rc5-syzkaller-00297-gf2e367d6ad3b #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/25/2024
+
+Fixes: c411ed854584 ("nsh: add GSO support")
+Reported-and-tested-by: syzbot+42a0dc856239de4de60e@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=42a0dc856239de4de60e
+Reported-and-tested-by: syzbot+c298c9f0e46a3c86332b@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=c298c9f0e46a3c86332b
+Link: https://lore.kernel.org/netdev/20240415222041.18537-1-kuniyu@amazon.com/
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://lore.kernel.org/r/20240424023549.21862-1-kuniyu@amazon.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nsh/nsh.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
+index 0f23e5e8e03eb..3e0fc71d95a14 100644
+--- a/net/nsh/nsh.c
++++ b/net/nsh/nsh.c
+@@ -76,13 +76,15 @@ EXPORT_SYMBOL_GPL(nsh_pop);
+ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+                                      netdev_features_t features)
+ {
++      unsigned int outer_hlen, mac_len, nsh_len;
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+       u16 mac_offset = skb->mac_header;
+-      unsigned int nsh_len, mac_len;
+-      __be16 proto;
++      __be16 outer_proto, proto;
+       skb_reset_network_header(skb);
++      outer_proto = skb->protocol;
++      outer_hlen = skb_mac_header_len(skb);
+       mac_len = skb->mac_len;
+       if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
+@@ -112,10 +114,10 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+       }
+       for (skb = segs; skb; skb = skb->next) {
+-              skb->protocol = htons(ETH_P_NSH);
+-              __skb_push(skb, nsh_len);
+-              skb->mac_header = mac_offset;
+-              skb->network_header = skb->mac_header + mac_len;
++              skb->protocol = outer_proto;
++              __skb_push(skb, nsh_len + outer_hlen);
++              skb_reset_mac_header(skb);
++              skb_set_network_header(skb, outer_hlen);
+               skb->mac_len = mac_len;
+       }
+-- 
+2.43.0
+
diff --git a/queue-5.15/octeontx2-af-avoid-off-by-one-read-from-userspace.patch b/queue-5.15/octeontx2-af-avoid-off-by-one-read-from-userspace.patch
new file mode 100644 (file)
index 0000000..f927ecf
--- /dev/null
@@ -0,0 +1,44 @@
+From d073e68f5a04c4a91775031e485b2a2a531bca76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Apr 2024 21:44:23 +0700
+Subject: octeontx2-af: avoid off-by-one read from userspace
+
+From: Bui Quang Minh <minhquangbui99@gmail.com>
+
+[ Upstream commit f299ee709fb45036454ca11e90cb2810fe771878 ]
+
+We try to access count + 1 byte from userspace with memdup_user(buffer,
+count + 1). However, the userspace only provides buffer of count bytes and
+only these count bytes are verified to be okay to access. To ensure the
+copied buffer is NUL terminated, we use memdup_user_nul instead.
+
+Fixes: 3a2eb515d136 ("octeontx2-af: Fix an off by one in rvu_dbg_qsize_write()")
+Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
+Link: https://lore.kernel.org/r/20240424-fix-oob-read-v2-6-f1f1b53a10f4@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index 4dddf6ec3be87..e201827529513 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -559,12 +559,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
+       u16 pcifunc;
+       int ret, lf;
+-      cmd_buf = memdup_user(buffer, count + 1);
++      cmd_buf = memdup_user_nul(buffer, count);
+       if (IS_ERR(cmd_buf))
+               return -ENOMEM;
+-      cmd_buf[count] = '\0';
+-
+       cmd_buf_tmp = strchr(cmd_buf, '\n');
+       if (cmd_buf_tmp) {
+               *cmd_buf_tmp = '\0';
+-- 
+2.43.0
+
diff --git a/queue-5.15/s390-cio-ensure-the-copied-buf-is-nul-terminated.patch b/queue-5.15/s390-cio-ensure-the-copied-buf-is-nul-terminated.patch
new file mode 100644 (file)
index 0000000..803d779
--- /dev/null
@@ -0,0 +1,40 @@
+From 8353b5b46f0cf4144d1895c5e36f55cd4fc7342a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Apr 2024 21:44:22 +0700
+Subject: s390/cio: Ensure the copied buf is NUL terminated
+
+From: Bui Quang Minh <minhquangbui99@gmail.com>
+
+[ Upstream commit da7c622cddd4fe36be69ca61e8c42e43cde94784 ]
+
+Currently, we allocate a lbuf-sized kernel buffer and copy lbuf from
+userspace to that buffer. Later, we use scanf on this buffer but we don't
+ensure that the string is terminated inside the buffer, this can lead to
+OOB read when using scanf. Fix this issue by using memdup_user_nul instead.
+
+Fixes: a4f17cc72671 ("s390/cio: add CRW inject functionality")
+Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Link: https://lore.kernel.org/r/20240424-fix-oob-read-v2-5-f1f1b53a10f4@gmail.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/cio/cio_inject.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/s390/cio/cio_inject.c b/drivers/s390/cio/cio_inject.c
+index 8613fa937237b..a2e771ebae8eb 100644
+--- a/drivers/s390/cio/cio_inject.c
++++ b/drivers/s390/cio/cio_inject.c
+@@ -95,7 +95,7 @@ static ssize_t crw_inject_write(struct file *file, const char __user *buf,
+               return -EINVAL;
+       }
+-      buffer = vmemdup_user(buf, lbuf);
++      buffer = memdup_user_nul(buf, lbuf);
+       if (IS_ERR(buffer))
+               return -ENOMEM;
+-- 
+2.43.0
+
diff --git a/queue-5.15/s390-mm-fix-clearing-storage-keys-for-huge-pages.patch b/queue-5.15/s390-mm-fix-clearing-storage-keys-for-huge-pages.patch
new file mode 100644 (file)
index 0000000..f872b7c
--- /dev/null
@@ -0,0 +1,42 @@
+From 6709ad79e125ec8f69d1f649abe88a0c08ad4c0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Apr 2024 13:42:20 +0200
+Subject: s390/mm: Fix clearing storage keys for huge pages
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+[ Upstream commit 412050af2ea39407fe43324b0be4ab641530ce88 ]
+
+The function __storage_key_init_range() expects the end address to be
+the first byte outside the range to be initialized. I.e. end - start
+should be the size of the area to be initialized.
+
+The current code works because __storage_key_init_range() will still loop
+over every page in the range, but it is slower than using sske_frame().
+
+Fixes: 3afdfca69870 ("s390/mm: Clear skeys for newly mapped huge guest pmds")
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Link: https://lore.kernel.org/r/20240416114220.28489-3-imbrenda@linux.ibm.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/mm/hugetlbpage.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
+index da36d13ffc162..8631307d3defc 100644
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -146,7 +146,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
+       }
+       if (!test_and_set_bit(PG_arch_1, &page->flags))
+-              __storage_key_init_range(paddr, paddr + size - 1);
++              __storage_key_init_range(paddr, paddr + size);
+ }
+ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+-- 
+2.43.0
+
diff --git a/queue-5.15/s390-mm-fix-storage-key-clearing-for-guest-huge-page.patch b/queue-5.15/s390-mm-fix-storage-key-clearing-for-guest-huge-page.patch
new file mode 100644 (file)
index 0000000..957826a
--- /dev/null
@@ -0,0 +1,42 @@
+From 882c3dc127d29e2706008c9be2e7f395e9f8750e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Apr 2024 13:42:19 +0200
+Subject: s390/mm: Fix storage key clearing for guest huge pages
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+[ Upstream commit 843c3280686fc1a83d89ee1e0b5599c9f6b09d0c ]
+
+The function __storage_key_init_range() expects the end address to be
+the first byte outside the range to be initialized. I.e. end - start
+should be the size of the area to be initialized.
+
+The current code works because __storage_key_init_range() will still loop
+over every page in the range, but it is slower than using sske_frame().
+
+Fixes: 964c2c05c9f3 ("s390/mm: Clear huge page storage keys on enable_skey")
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Link: https://lore.kernel.org/r/20240416114220.28489-2-imbrenda@linux.ibm.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/mm/gmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index a2c872de29a66..32d9db5e6f53c 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -2632,7 +2632,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
+               return 0;
+       start = pmd_val(*pmd) & HPAGE_MASK;
+-      end = start + HPAGE_SIZE - 1;
++      end = start + HPAGE_SIZE;
+       __storage_key_init_range(start, end);
+       set_bit(PG_arch_1, &page->flags);
+       cond_resched();
+-- 
+2.43.0
+
diff --git a/queue-5.15/s390-qeth-don-t-keep-track-of-input-queue-count.patch b/queue-5.15/s390-qeth-don-t-keep-track-of-input-queue-count.patch
new file mode 100644 (file)
index 0000000..df64b71
--- /dev/null
@@ -0,0 +1,113 @@
+From bce7b0f8387db6b603f8ad1c187520f86bde5d44 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Oct 2021 11:56:54 +0200
+Subject: s390/qeth: don't keep track of Input Queue count
+
+From: Julian Wiedmann <jwi@linux.ibm.com>
+
+[ Upstream commit dc15012bb083c70502b625cf56fbf32b6cf17fe4 ]
+
+The only actual user of qdio.no_input_queues is qeth_qdio_establish(),
+and there we already have full awareness of the current Input Queue
+configuration (1 RX queue, plus potentially 1 TX Completion queue).
+
+So avoid this state tracking, and the ambiguity it brings with it.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 8a2e4d37afb8 ("s390/qeth: Fix kernel panic after setting hsuid")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/net/qeth_core.h      |  1 -
+ drivers/s390/net/qeth_core_main.c | 17 +++++++----------
+ 2 files changed, 7 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index e8c360879883b..71464e9ad4f82 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -545,7 +545,6 @@ static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
+ struct qeth_qdio_info {
+       atomic_t state;
+       /* input */
+-      int no_in_queues;
+       struct qeth_qdio_q *in_q;
+       struct qeth_qdio_q *c_q;
+       struct qeth_qdio_buffer_pool in_buf_pool;
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index c1346c4e2242d..9b7f518395e16 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -354,8 +354,8 @@ static int qeth_cq_init(struct qeth_card *card)
+               qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
+                                  QDIO_MAX_BUFFERS_PER_Q);
+               card->qdio.c_q->next_buf_to_init = 127;
+-              rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
+-                           card->qdio.no_in_queues - 1, 0, 127, NULL);
++              rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 1, 0, 127,
++                           NULL);
+               if (rc) {
+                       QETH_CARD_TEXT_(card, 2, "1err%d", rc);
+                       goto out;
+@@ -375,21 +375,16 @@ static int qeth_alloc_cq(struct qeth_card *card)
+                       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
+                       return -ENOMEM;
+               }
+-
+-              card->qdio.no_in_queues = 2;
+       } else {
+               QETH_CARD_TEXT(card, 2, "nocq");
+               card->qdio.c_q = NULL;
+-              card->qdio.no_in_queues = 1;
+       }
+-      QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
+       return 0;
+ }
+ static void qeth_free_cq(struct qeth_card *card)
+ {
+       if (card->qdio.c_q) {
+-              --card->qdio.no_in_queues;
+               qeth_free_qdio_queue(card->qdio.c_q);
+               card->qdio.c_q = NULL;
+       }
+@@ -1492,7 +1487,6 @@ static void qeth_init_qdio_info(struct qeth_card *card)
+       card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+       /* inbound */
+-      card->qdio.no_in_queues = 1;
+       card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
+       if (IS_IQD(card))
+               card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
+@@ -5173,6 +5167,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
+       struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
+       struct qeth_qib_parms *qib_parms = NULL;
+       struct qdio_initialize init_data;
++      unsigned int no_input_qs = 1;
+       unsigned int i;
+       int rc = 0;
+@@ -5187,8 +5182,10 @@ static int qeth_qdio_establish(struct qeth_card *card)
+       }
+       in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
+-      if (card->options.cq == QETH_CQ_ENABLED)
++      if (card->options.cq == QETH_CQ_ENABLED) {
+               in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
++              no_input_qs++;
++      }
+       for (i = 0; i < card->qdio.no_out_queues; i++)
+               out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
+@@ -5198,7 +5195,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
+                                                         QDIO_QETH_QFMT;
+       init_data.qib_param_field_format = 0;
+       init_data.qib_param_field        = (void *)qib_parms;
+-      init_data.no_input_qs            = card->qdio.no_in_queues;
++      init_data.no_input_qs            = no_input_qs;
+       init_data.no_output_qs           = card->qdio.no_out_queues;
+       init_data.input_handler          = qeth_qdio_input_handler;
+       init_data.output_handler         = qeth_qdio_output_handler;
+-- 
+2.43.0
+
diff --git a/queue-5.15/s390-qeth-fix-kernel-panic-after-setting-hsuid.patch b/queue-5.15/s390-qeth-fix-kernel-panic-after-setting-hsuid.patch
new file mode 100644 (file)
index 0000000..266008d
--- /dev/null
@@ -0,0 +1,230 @@
+From 4f96a5f1ea00168ad6a7652e1bfc6a2fdec98624 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Apr 2024 11:10:04 +0200
+Subject: s390/qeth: Fix kernel panic after setting hsuid
+
+From: Alexandra Winter <wintera@linux.ibm.com>
+
+[ Upstream commit 8a2e4d37afb8500b276e5ee903dee06f50ab0494 ]
+
+Symptom:
+When the hsuid attribute is set for the first time on an IQD Layer3
+device while the corresponding network interface is already UP,
+the kernel will try to execute a napi function pointer that is NULL.
+
+Example:
+---------------------------------------------------------------------------
+[ 2057.572696] illegal operation: 0001 ilc:1 [#1] SMP
+[ 2057.572702] Modules linked in: af_iucv qeth_l3 zfcp scsi_transport_fc sunrpc nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6
+nft_reject nft_ct nf_tables_set nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ip_set nf_tables libcrc32c nfnetlink ghash_s390 prng xts aes_s390 des_s390 de
+s_generic sha3_512_s390 sha3_256_s390 sha512_s390 vfio_ccw vfio_mdev mdev vfio_iommu_type1 eadm_sch vfio ext4 mbcache jbd2 qeth_l2 bridge stp llc dasd_eckd_mod qeth dasd_mod
+ qdio ccwgroup pkey zcrypt
+[ 2057.572739] CPU: 6 PID: 60182 Comm: stress_client Kdump: loaded Not tainted 4.18.0-541.el8.s390x #1
+[ 2057.572742] Hardware name: IBM 3931 A01 704 (LPAR)
+[ 2057.572744] Krnl PSW : 0704f00180000000 0000000000000002 (0x2)
+[ 2057.572748]            R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:3 PM:0 RI:0 EA:3
+[ 2057.572751] Krnl GPRS: 0000000000000004 0000000000000000 00000000a3b008d8 0000000000000000
+[ 2057.572754]            00000000a3b008d8 cb923a29c779abc5 0000000000000000 00000000814cfd80
+[ 2057.572756]            000000000000012c 0000000000000000 00000000a3b008d8 00000000a3b008d8
+[ 2057.572758]            00000000bab6d500 00000000814cfd80 0000000091317e46 00000000814cfc68
+[ 2057.572762] Krnl Code:#0000000000000000: 0000                illegal
+                         >0000000000000002: 0000                illegal
+                          0000000000000004: 0000                illegal
+                          0000000000000006: 0000                illegal
+                          0000000000000008: 0000                illegal
+                          000000000000000a: 0000                illegal
+                          000000000000000c: 0000                illegal
+                          000000000000000e: 0000                illegal
+[ 2057.572800] Call Trace:
+[ 2057.572801] ([<00000000ec639700>] 0xec639700)
+[ 2057.572803]  [<00000000913183e2>] net_rx_action+0x2ba/0x398
+[ 2057.572809]  [<0000000091515f76>] __do_softirq+0x11e/0x3a0
+[ 2057.572813]  [<0000000090ce160c>] do_softirq_own_stack+0x3c/0x58
+[ 2057.572817] ([<0000000090d2cbd6>] do_softirq.part.1+0x56/0x60)
+[ 2057.572822]  [<0000000090d2cc60>] __local_bh_enable_ip+0x80/0x98
+[ 2057.572825]  [<0000000091314706>] __dev_queue_xmit+0x2be/0xd70
+[ 2057.572827]  [<000003ff803dd6d6>] afiucv_hs_send+0x24e/0x300 [af_iucv]
+[ 2057.572830]  [<000003ff803dd88a>] iucv_send_ctrl+0x102/0x138 [af_iucv]
+[ 2057.572833]  [<000003ff803de72a>] iucv_sock_connect+0x37a/0x468 [af_iucv]
+[ 2057.572835]  [<00000000912e7e90>] __sys_connect+0xa0/0xd8
+[ 2057.572839]  [<00000000912e9580>] sys_socketcall+0x228/0x348
+[ 2057.572841]  [<0000000091514e1a>] system_call+0x2a6/0x2c8
+[ 2057.572843] Last Breaking-Event-Address:
+[ 2057.572844]  [<0000000091317e44>] __napi_poll+0x4c/0x1d8
+[ 2057.572846]
+[ 2057.572847] Kernel panic - not syncing: Fatal exception in interrupt
+-------------------------------------------------------------------------------------------
+
+Analysis:
+There is one napi structure per out_q: card->qdio.out_qs[i].napi
+The napi.poll functions are set during qeth_open().
+
+Since
+commit 1cfef80d4c2b ("s390/qeth: Don't call dev_close/dev_open (DOWN/UP)")
+qeth_set_offline()/qeth_set_online() no longer call dev_close()/
+dev_open(). So if qeth_free_qdio_queues() cleared
+card->qdio.out_qs[i].napi.poll while the network interface was UP and the
+card was offline, they are not set again.
+
+Reproduction:
+chzdev -e $devno layer2=0
+ip link set dev $network_interface up
+echo 0 > /sys/bus/ccwgroup/devices/0.0.$devno/online
+echo foo > /sys/bus/ccwgroup/devices/0.0.$devno/hsuid
+echo 1 > /sys/bus/ccwgroup/devices/0.0.$devno/online
+-> Crash (can be enforced e.g. by af_iucv connect(), ip link down/up, ...)
+
+Note that a Completion Queue (CQ) is only enabled or disabled, when hsuid
+is set for the first time or when it is removed.
+
+Workarounds:
+- Set hsuid before setting the device online for the first time
+or
+- Use chzdev -d $devno; chzdev $devno hsuid=xxx; chzdev -e $devno;
+to set hsuid on an existing device. (this will remove and recreate the
+network interface)
+
+Fix:
+There is no need to free the output queues when a completion queue is
+added or removed.
+card->qdio.state now indicates whether the inbound buffer pool and the
+outbound queues are allocated.
+card->qdio.c_q indicates whether a CQ is allocated.
+
+Fixes: 1cfef80d4c2b ("s390/qeth: Don't call dev_close/dev_open (DOWN/UP)")
+Signed-off-by: Alexandra Winter <wintera@linux.ibm.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20240430091004.2265683-1-wintera@linux.ibm.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/net/qeth_core_main.c | 61 ++++++++++++++-----------------
+ 1 file changed, 27 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 9b7f518395e16..5c69cba6459f2 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -366,30 +366,33 @@ static int qeth_cq_init(struct qeth_card *card)
+       return rc;
+ }
++static void qeth_free_cq(struct qeth_card *card)
++{
++      if (card->qdio.c_q) {
++              qeth_free_qdio_queue(card->qdio.c_q);
++              card->qdio.c_q = NULL;
++      }
++}
++
+ static int qeth_alloc_cq(struct qeth_card *card)
+ {
+       if (card->options.cq == QETH_CQ_ENABLED) {
+               QETH_CARD_TEXT(card, 2, "cqon");
+-              card->qdio.c_q = qeth_alloc_qdio_queue();
+               if (!card->qdio.c_q) {
+-                      dev_err(&card->gdev->dev, "Failed to create completion queue\n");
+-                      return -ENOMEM;
++                      card->qdio.c_q = qeth_alloc_qdio_queue();
++                      if (!card->qdio.c_q) {
++                              dev_err(&card->gdev->dev,
++                                      "Failed to create completion queue\n");
++                              return -ENOMEM;
++                      }
+               }
+       } else {
+               QETH_CARD_TEXT(card, 2, "nocq");
+-              card->qdio.c_q = NULL;
++              qeth_free_cq(card);
+       }
+       return 0;
+ }
+-static void qeth_free_cq(struct qeth_card *card)
+-{
+-      if (card->qdio.c_q) {
+-              qeth_free_qdio_queue(card->qdio.c_q);
+-              card->qdio.c_q = NULL;
+-      }
+-}
+-
+ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+                                                       int delayed)
+ {
+@@ -2586,6 +2589,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+       QETH_CARD_TEXT(card, 2, "allcqdbf");
++      /* completion */
++      if (qeth_alloc_cq(card))
++              goto out_err;
++
+       if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
+               QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
+               return 0;
+@@ -2626,10 +2633,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+               queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
+       }
+-      /* completion */
+-      if (qeth_alloc_cq(card))
+-              goto out_freeoutq;
+-
+       return 0;
+ out_freeoutq:
+@@ -2643,6 +2646,8 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+       card->qdio.in_q = NULL;
+ out_nomem:
+       atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
++      qeth_free_cq(card);
++out_err:
+       return -ENOMEM;
+ }
+@@ -2650,11 +2655,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
+ {
+       int i, j;
++      qeth_free_cq(card);
++
+       if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+               QETH_QDIO_UNINITIALIZED)
+               return;
+-      qeth_free_cq(card);
+       for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+               if (card->qdio.in_q->bufs[j].rx_skb)
+                       dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
+@@ -3707,24 +3713,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
+ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
+ {
+-      int rc;
+-
+-      if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
+-              rc = -1;
+-              goto out;
+-      } else {
+-              if (card->options.cq == cq) {
+-                      rc = 0;
+-                      goto out;
+-              }
+-
+-              qeth_free_qdio_queues(card);
+-              card->options.cq = cq;
+-              rc = 0;
+-      }
+-out:
+-      return rc;
++      if (card->options.cq == QETH_CQ_NOTAVAILABLE)
++              return -1;
++      card->options.cq = cq;
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(qeth_configure_cq);
+-- 
+2.43.0
+
diff --git a/queue-5.15/s390-vdso-add-cfi-for-ra-register-to-asm-macro-vdso_.patch b/queue-5.15/s390-vdso-add-cfi-for-ra-register-to-asm-macro-vdso_.patch
new file mode 100644 (file)
index 0000000..e5ba740
--- /dev/null
@@ -0,0 +1,56 @@
+From 341fee87a0836c8e7abe724431173491f5711fe5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Apr 2024 17:35:52 +0200
+Subject: s390/vdso: Add CFI for RA register to asm macro vdso_func
+
+From: Jens Remus <jremus@linux.ibm.com>
+
+[ Upstream commit b961ec10b9f9719987470236feb50c967db5a652 ]
+
+The return-address (RA) register r14 is specified as volatile in the
+s390x ELF ABI [1]. Nevertheless proper CFI directives must be provided
+for an unwinder to restore the return address, if the RA register
+value is changed from its value at function entry, as it is the case.
+
+[1]: s390x ELF ABI, https://github.com/IBM/s390x-abi/releases
+
+Fixes: 4bff8cb54502 ("s390: convert to GENERIC_VDSO")
+Signed-off-by: Jens Remus <jremus@linux.ibm.com>
+Acked-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/dwarf.h               | 1 +
+ arch/s390/kernel/vdso64/vdso_user_wrapper.S | 2 ++
+ 2 files changed, 3 insertions(+)
+
+diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h
+index 4f21ae561e4dd..390906b8e386e 100644
+--- a/arch/s390/include/asm/dwarf.h
++++ b/arch/s390/include/asm/dwarf.h
+@@ -9,6 +9,7 @@
+ #define CFI_DEF_CFA_OFFSET    .cfi_def_cfa_offset
+ #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
+ #define CFI_RESTORE           .cfi_restore
++#define CFI_REL_OFFSET                .cfi_rel_offset
+ #ifdef CONFIG_AS_CFI_VAL_OFFSET
+ #define CFI_VAL_OFFSET                .cfi_val_offset
+diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+index 97f0c0a669a59..0625381359df4 100644
+--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
++++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+@@ -23,8 +23,10 @@ __kernel_\func:
+       CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
+       stg     %r14,STACK_FRAME_OVERHEAD(%r15)
++      CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD
+       brasl   %r14,__s390_vdso_\func
+       lg      %r14,STACK_FRAME_OVERHEAD(%r15)
++      CFI_RESTORE 14
+       aghi    %r15,WRAPPER_FRAME_SIZE
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
+       CFI_RESTORE 15
+-- 
+2.43.0
+
index a5cf59da7a36dd6a4ca6e59d67bef336ec1d77f7..3d916f806688f94217cee8cb611d9dd125a6884a 100644 (file)
@@ -21,3 +21,43 @@ power-rt9455-hide-unused-rt9455_boost_voltage_values.patch
 power-supply-mt6360_charger-fix-of_match-for-usb-otg.patch
 pinctrl-devicetree-fix-refcount-leak-in-pinctrl_dt_t.patch
 regulator-mt6360-de-capitalize-devicetree-regulator-.patch
+bpf-kconfig-fix-debug_info_btf_modules-kconfig-defin.patch
+tcp-introduce-tcp_read_skb.patch
+net-introduce-a-new-proto_ops-read_skb.patch
+bpf-sockmap-wake-up-polling-after-data-copy.patch
+bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch
+bpf-sockmap-fix-null-pointer-dereference-in-sk_psock.patch
+bpf-skmsg-fix-null-pointer-dereference-in-sk_psock_s.patch
+bpf-fix-a-verifier-verbose-message.patch
+spi-hisi-kunpeng-delete-the-dump-interface-of-data-r.patch
+s390-mm-fix-storage-key-clearing-for-guest-huge-page.patch
+s390-mm-fix-clearing-storage-keys-for-huge-pages.patch
+xdp-move-conversion-to-xdp_frame-out-of-map-function.patch
+xdp-add-xdp_do_redirect_frame-for-pre-computed-xdp_f.patch
+xdp-use-flags-field-to-disambiguate-broadcast-redire.patch
+bna-ensure-the-copied-buf-is-nul-terminated.patch
+octeontx2-af-avoid-off-by-one-read-from-userspace.patch
+nsh-restore-skb-protocol-data-mac_header-for-outer-h.patch
+net-l2tp-drop-flow-hash-on-forward.patch
+s390-vdso-add-cfi-for-ra-register-to-asm-macro-vdso_.patch
+net-qede-sanitize-rc-in-qede_add_tc_flower_fltr.patch
+net-qede-use-return-from-qede_parse_flow_attr-for-fl.patch
+net-qede-use-return-from-qede_parse_flow_attr-for-fl.patch-8226
+net-qede-use-return-from-qede_parse_actions.patch
+asoc-meson-axg-fifo-use-field-helpers.patch
+asoc-meson-axg-fifo-use-threaded-irq-to-check-period.patch
+asoc-meson-axg-card-make-links-nonatomic.patch
+asoc-meson-axg-tdm-interface-manage-formatters-in-tr.patch
+asoc-meson-cards-select-snd_dynamic_minors.patch
+alsa-hda-intel-sdw-acpi-fix-usage-of-device_get_name.patch
+s390-cio-ensure-the-copied-buf-is-nul-terminated.patch
+cxgb4-properly-lock-tx-queue-for-the-selftest.patch
+net-dsa-mv88e6xxx-fix-number-of-databases-for-88e614.patch
+net-bridge-fix-multicast-to-unicast-with-fraglist-gs.patch
+net-core-reject-skb_copy-_expand-for-fraglist-gso-sk.patch
+tipc-fix-a-possible-memleak-in-tipc_buf_append.patch
+s390-qeth-don-t-keep-track-of-input-queue-count.patch
+s390-qeth-fix-kernel-panic-after-setting-hsuid.patch
+drm-panel-ili9341-respect-deferred-probe.patch
+drm-panel-ili9341-use-predefined-error-codes.patch
+net-gro-add-flush-check-in-udp_gro_receive_segment.patch
diff --git a/queue-5.15/spi-hisi-kunpeng-delete-the-dump-interface-of-data-r.patch b/queue-5.15/spi-hisi-kunpeng-delete-the-dump-interface-of-data-r.patch
new file mode 100644 (file)
index 0000000..0ac7cd6
--- /dev/null
@@ -0,0 +1,41 @@
+From 92e0341d1100d1aafba7d26e4ba6fad2226103c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Apr 2024 09:58:39 +0800
+Subject: spi: hisi-kunpeng: Delete the dump interface of data registers in
+ debugfs
+
+From: Devyn Liu <liudingyuan@huawei.com>
+
+[ Upstream commit 7430764f5a85d30314aeef2d5438dff1fb0b1d68 ]
+
+Due to the reading of FIFO during the dump of data registers in
+debugfs, if SPI transmission is in progress, it will be affected
+and may result in transmission failure. Therefore, the dump
+interface of data registers in debugfs is removed.
+
+Fixes: 2b2142f247eb ("spi: hisi-kunpeng: Add debugfs support")
+Signed-off-by: Devyn Liu <liudingyuan@huawei.com>
+Reviewed-by: Jay Fang <f.fangjian@huawei.com>
+Link: https://lore.kernel.org/r/20240416015839.3323398-1-liudingyuan@huawei.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-hisi-kunpeng.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
+index 525cc0143a305..54730e93fba45 100644
+--- a/drivers/spi/spi-hisi-kunpeng.c
++++ b/drivers/spi/spi-hisi-kunpeng.c
+@@ -151,8 +151,6 @@ static const struct debugfs_reg32 hisi_spi_regs[] = {
+       HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
+       HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
+       HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
+-      HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
+-      HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
+       HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
+       HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
+       HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
+-- 
+2.43.0
+
diff --git a/queue-5.15/tcp-introduce-tcp_read_skb.patch b/queue-5.15/tcp-introduce-tcp_read_skb.patch
new file mode 100644 (file)
index 0000000..34e02a2
--- /dev/null
@@ -0,0 +1,103 @@
+From a6e53a6990372e5fcf04b80c77bcf3eb99f7dd5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jun 2022 09:20:11 -0700
+Subject: tcp: Introduce tcp_read_skb()
+
+From: Cong Wang <cong.wang@bytedance.com>
+
+[ Upstream commit 04919bed948dc22a0032a9da867b7dcb8aece4ca ]
+
+This patch inroduces tcp_read_skb() based on tcp_read_sock(),
+a preparation for the next patch which actually introduces
+a new sock ops.
+
+TCP is special here, because it has tcp_read_sock() which is
+mainly used by splice(). tcp_read_sock() supports partial read
+and arbitrary offset, neither of them is needed for sockmap.
+
+Signed-off-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20220615162014.89193-2-xiyou.wangcong@gmail.com
+Stable-dep-of: 6648e613226e ("bpf, skmsg: Fix NULL pointer dereference in sk_psock_skb_ingress_enqueue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/tcp.h |  2 ++
+ net/ipv4/tcp.c    | 47 +++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 49 insertions(+)
+
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 08923ed4278f0..3047a8b3dbd1c 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -663,6 +663,8 @@ void tcp_get_info(struct sock *, struct tcp_info *);
+ /* Read 'sendfile()'-style from a TCP socket */
+ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+                 sk_read_actor_t recv_actor);
++int tcp_read_skb(struct sock *sk, read_descriptor_t *desc,
++               sk_read_actor_t recv_actor);
+ void tcp_initialize_rcv_mss(struct sock *sk);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 16fd3da68e9f6..3647a5f08c22d 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1705,6 +1705,53 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ }
+ EXPORT_SYMBOL(tcp_read_sock);
++int tcp_read_skb(struct sock *sk, read_descriptor_t *desc,
++               sk_read_actor_t recv_actor)
++{
++      struct tcp_sock *tp = tcp_sk(sk);
++      u32 seq = tp->copied_seq;
++      struct sk_buff *skb;
++      int copied = 0;
++      u32 offset;
++
++      if (sk->sk_state == TCP_LISTEN)
++              return -ENOTCONN;
++
++      while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
++              int used;
++
++              __skb_unlink(skb, &sk->sk_receive_queue);
++              used = recv_actor(desc, skb, 0, skb->len);
++              if (used <= 0) {
++                      if (!copied)
++                              copied = used;
++                      break;
++              }
++              seq += used;
++              copied += used;
++
++              if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
++                      consume_skb(skb);
++                      ++seq;
++                      break;
++              }
++              consume_skb(skb);
++              if (!desc->count)
++                      break;
++              WRITE_ONCE(tp->copied_seq, seq);
++      }
++      WRITE_ONCE(tp->copied_seq, seq);
++
++      tcp_rcv_space_adjust(sk);
++
++      /* Clean up data we have read: This will do ACK frames. */
++      if (copied > 0)
++              tcp_cleanup_rbuf(sk, copied);
++
++      return copied;
++}
++EXPORT_SYMBOL(tcp_read_skb);
++
+ int tcp_peek_len(struct socket *sock)
+ {
+       return tcp_inq(sock->sk);
+-- 
+2.43.0
+
diff --git a/queue-5.15/tipc-fix-a-possible-memleak-in-tipc_buf_append.patch b/queue-5.15/tipc-fix-a-possible-memleak-in-tipc_buf_append.patch
new file mode 100644 (file)
index 0000000..e67fb1f
--- /dev/null
@@ -0,0 +1,43 @@
+From e4053dc72107c56e3e8440d6f78ed04cfd64e35b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Apr 2024 10:03:38 -0400
+Subject: tipc: fix a possible memleak in tipc_buf_append
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 97bf6f81b29a8efaf5d0983251a7450e5794370d ]
+
+__skb_linearize() doesn't free the skb when it fails, so move
+'*buf = NULL' after __skb_linearize(), so that the skb can be
+freed on the err path.
+
+Fixes: b7df21cf1b79 ("tipc: skb_linearize the head skb when reassembling msgs")
+Reported-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+Link: https://lore.kernel.org/r/90710748c29a1521efac4f75ea01b3b7e61414cf.1714485818.git.lucien.xin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/msg.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 5c9fd4791c4ba..c52ab423082cd 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -142,9 +142,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+       if (fragid == FIRST_FRAGMENT) {
+               if (unlikely(head))
+                       goto err;
+-              *buf = NULL;
+               if (skb_has_frag_list(frag) && __skb_linearize(frag))
+                       goto err;
++              *buf = NULL;
+               frag = skb_unshare(frag, GFP_ATOMIC);
+               if (unlikely(!frag))
+                       goto err;
+-- 
+2.43.0
+
diff --git a/queue-5.15/xdp-add-xdp_do_redirect_frame-for-pre-computed-xdp_f.patch b/queue-5.15/xdp-add-xdp_do_redirect_frame-for-pre-computed-xdp_f.patch
new file mode 100644 (file)
index 0000000..783dece
--- /dev/null
@@ -0,0 +1,147 @@
+From 5f9dee9f4bf21f4a0c98541e73e03cedd5b40b9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jan 2022 16:08:10 +0100
+Subject: xdp: Add xdp_do_redirect_frame() for pre-computed xdp_frames
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+[ Upstream commit 1372d34ccf6dd480332b2bcb2fd59a2b9a0df415 ]
+
+Add an xdp_do_redirect_frame() variant which supports pre-computed
+xdp_frame structures. This will be used in bpf_prog_run() to avoid having
+to write to the xdp_frame structure when the XDP program doesn't modify the
+frame boundaries.
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20220103150812.87914-6-toke@redhat.com
+Stable-dep-of: 5bcf0dcbf906 ("xdp: use flags field to disambiguate broadcast redirect")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/filter.h |  4 +++
+ net/core/filter.c      | 65 +++++++++++++++++++++++++++++++++++-------
+ 2 files changed, 58 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index ddaeb2afc022f..af0103bebb7bf 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -1020,6 +1020,10 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ int xdp_do_redirect(struct net_device *dev,
+                   struct xdp_buff *xdp,
+                   struct bpf_prog *prog);
++int xdp_do_redirect_frame(struct net_device *dev,
++                        struct xdp_buff *xdp,
++                        struct xdp_frame *xdpf,
++                        struct bpf_prog *prog);
+ void xdp_do_flush(void);
+ /* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 96441da61fca8..b756951c92494 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3987,26 +3987,44 @@ u32 xdp_master_redirect(struct xdp_buff *xdp)
+ }
+ EXPORT_SYMBOL_GPL(xdp_master_redirect);
+-int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+-                  struct bpf_prog *xdp_prog)
++static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
++                                      struct net_device *dev,
++                                      struct xdp_buff *xdp,
++                                      struct bpf_prog *xdp_prog)
+ {
+-      struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+       enum bpf_map_type map_type = ri->map_type;
+       void *fwd = ri->tgt_value;
+       u32 map_id = ri->map_id;
+-      struct xdp_frame *xdpf;
+-      struct bpf_map *map;
+       int err;
+       ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+       ri->map_type = BPF_MAP_TYPE_UNSPEC;
+-      if (map_type == BPF_MAP_TYPE_XSKMAP) {
+-              err = __xsk_map_redirect(fwd, xdp);
+-              goto out;
+-      }
++      err = __xsk_map_redirect(fwd, xdp);
++      if (unlikely(err))
++              goto err;
++
++      _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
++      return 0;
++err:
++      _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
++      return err;
++}
++
++static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
++                                                 struct net_device *dev,
++                                                 struct xdp_frame *xdpf,
++                                                 struct bpf_prog *xdp_prog)
++{
++      enum bpf_map_type map_type = ri->map_type;
++      void *fwd = ri->tgt_value;
++      u32 map_id = ri->map_id;
++      struct bpf_map *map;
++      int err;
++
++      ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
++      ri->map_type = BPF_MAP_TYPE_UNSPEC;
+-      xdpf = xdp_convert_buff_to_frame(xdp);
+       if (unlikely(!xdpf)) {
+               err = -EOVERFLOW;
+               goto err;
+@@ -4043,7 +4061,6 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+               err = -EBADRQC;
+       }
+-out:
+       if (unlikely(err))
+               goto err;
+@@ -4053,8 +4070,34 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+       _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
+       return err;
+ }
++
++int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
++                  struct bpf_prog *xdp_prog)
++{
++      struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++      enum bpf_map_type map_type = ri->map_type;
++
++      if (map_type == BPF_MAP_TYPE_XSKMAP)
++              return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
++
++      return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp),
++                                     xdp_prog);
++}
+ EXPORT_SYMBOL_GPL(xdp_do_redirect);
++int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
++                        struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
++{
++      struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++      enum bpf_map_type map_type = ri->map_type;
++
++      if (map_type == BPF_MAP_TYPE_XSKMAP)
++              return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
++
++      return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog);
++}
++EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
++
+ static int xdp_do_generic_redirect_map(struct net_device *dev,
+                                      struct sk_buff *skb,
+                                      struct xdp_buff *xdp,
+-- 
+2.43.0
+
diff --git a/queue-5.15/xdp-move-conversion-to-xdp_frame-out-of-map-function.patch b/queue-5.15/xdp-move-conversion-to-xdp_frame-out-of-map-function.patch
new file mode 100644 (file)
index 0000000..6570927
--- /dev/null
@@ -0,0 +1,303 @@
+From 2f5e90c2b40535c78d8ed76fb5d76b808af83cb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jan 2022 16:08:09 +0100
+Subject: xdp: Move conversion to xdp_frame out of map functions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+[ Upstream commit d53ad5d8b218a885e95080d4d3d556b16b91b1b9 ]
+
+All map redirect functions except XSK maps convert xdp_buff to xdp_frame
+before enqueueing it. So move this conversion of out the map functions
+and into xdp_do_redirect(). This removes a bit of duplicated code, but more
+importantly it makes it possible to support caller-allocated xdp_frame
+structures, which will be added in a subsequent commit.
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20220103150812.87914-5-toke@redhat.com
+Stable-dep-of: 5bcf0dcbf906 ("xdp: use flags field to disambiguate broadcast redirect")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 20 ++++++++++----------
+ kernel/bpf/cpumap.c |  8 +-------
+ kernel/bpf/devmap.c | 32 +++++++++++---------------------
+ net/core/filter.c   | 24 +++++++++++++++++-------
+ 4 files changed, 39 insertions(+), 45 deletions(-)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 74a26cabc084e..4236de05a8e70 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1651,17 +1651,17 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
+ struct btf *bpf_get_btf_vmlinux(void);
+ /* Map specifics */
+-struct xdp_buff;
++struct xdp_frame;
+ struct sk_buff;
+ struct bpf_dtab_netdev;
+ struct bpf_cpu_map_entry;
+ void __dev_flush(void);
+-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
++int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+                   struct net_device *dev_rx);
+-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
++int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
+                   struct net_device *dev_rx);
+-int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
++int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+                         struct bpf_map *map, bool exclude_ingress);
+ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog);
+@@ -1670,7 +1670,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+                          bool exclude_ingress);
+ void __cpu_map_flush(void);
+-int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
++int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+                   struct net_device *dev_rx);
+ int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+                            struct sk_buff *skb);
+@@ -1823,26 +1823,26 @@ static inline void __dev_flush(void)
+ {
+ }
+-struct xdp_buff;
++struct xdp_frame;
+ struct bpf_dtab_netdev;
+ struct bpf_cpu_map_entry;
+ static inline
+-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
++int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+                   struct net_device *dev_rx)
+ {
+       return 0;
+ }
+ static inline
+-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
++int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
+                   struct net_device *dev_rx)
+ {
+       return 0;
+ }
+ static inline
+-int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
++int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+                         struct bpf_map *map, bool exclude_ingress)
+ {
+       return 0;
+@@ -1870,7 +1870,7 @@ static inline void __cpu_map_flush(void)
+ }
+ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
+-                                struct xdp_buff *xdp,
++                                struct xdp_frame *xdpf,
+                                 struct net_device *dev_rx)
+ {
+       return 0;
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index a8429cfb4ae8c..0848d5691fd15 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -764,15 +764,9 @@ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+               list_add(&bq->flush_node, flush_list);
+ }
+-int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
++int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+                   struct net_device *dev_rx)
+ {
+-      struct xdp_frame *xdpf;
+-
+-      xdpf = xdp_convert_buff_to_frame(xdp);
+-      if (unlikely(!xdpf))
+-              return -EOVERFLOW;
+-
+       /* Info needed when constructing SKB on remote CPU */
+       xdpf->dev_rx = dev_rx;
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index b591073c5f83d..bbf3ec03aa591 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -468,24 +468,19 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+       bq->q[bq->count++] = xdpf;
+ }
+-static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
++static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+                               struct net_device *dev_rx,
+                               struct bpf_prog *xdp_prog)
+ {
+-      struct xdp_frame *xdpf;
+       int err;
+       if (!dev->netdev_ops->ndo_xdp_xmit)
+               return -EOPNOTSUPP;
+-      err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
++      err = xdp_ok_fwd_dev(dev, xdpf->len);
+       if (unlikely(err))
+               return err;
+-      xdpf = xdp_convert_buff_to_frame(xdp);
+-      if (unlikely(!xdpf))
+-              return -EOVERFLOW;
+-
+       bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
+       return 0;
+ }
+@@ -521,27 +516,27 @@ static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev
+       return act;
+ }
+-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
++int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+                   struct net_device *dev_rx)
+ {
+-      return __xdp_enqueue(dev, xdp, dev_rx, NULL);
++      return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
+ }
+-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
++int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
+                   struct net_device *dev_rx)
+ {
+       struct net_device *dev = dst->dev;
+-      return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
++      return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
+ }
+-static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp)
++static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
+ {
+       if (!obj ||
+           !obj->dev->netdev_ops->ndo_xdp_xmit)
+               return false;
+-      if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data))
++      if (xdp_ok_fwd_dev(obj->dev, xdpf->len))
+               return false;
+       return true;
+@@ -587,14 +582,13 @@ static int get_upper_ifindexes(struct net_device *dev, int *indexes)
+       return n;
+ }
+-int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
++int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+                         struct bpf_map *map, bool exclude_ingress)
+ {
+       struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+       struct bpf_dtab_netdev *dst, *last_dst = NULL;
+       int excluded_devices[1+MAX_NEST_DEV];
+       struct hlist_head *head;
+-      struct xdp_frame *xdpf;
+       int num_excluded = 0;
+       unsigned int i;
+       int err;
+@@ -604,15 +598,11 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
+               excluded_devices[num_excluded++] = dev_rx->ifindex;
+       }
+-      xdpf = xdp_convert_buff_to_frame(xdp);
+-      if (unlikely(!xdpf))
+-              return -EOVERFLOW;
+-
+       if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
+               for (i = 0; i < map->max_entries; i++) {
+                       dst = rcu_dereference_check(dtab->netdev_map[i],
+                                                   rcu_read_lock_bh_held());
+-                      if (!is_valid_dst(dst, xdp))
++                      if (!is_valid_dst(dst, xdpf))
+                               continue;
+                       if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
+@@ -635,7 +625,7 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
+                       head = dev_map_index_hash(dtab, i);
+                       hlist_for_each_entry_rcu(dst, head, index_hlist,
+                                                lockdep_is_held(&dtab->index_lock)) {
+-                              if (!is_valid_dst(dst, xdp))
++                              if (!is_valid_dst(dst, xdpf))
+                                       continue;
+                               if (is_ifindex_excluded(excluded_devices, num_excluded,
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 457d1a164ad5d..96441da61fca8 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3994,12 +3994,24 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+       enum bpf_map_type map_type = ri->map_type;
+       void *fwd = ri->tgt_value;
+       u32 map_id = ri->map_id;
++      struct xdp_frame *xdpf;
+       struct bpf_map *map;
+       int err;
+       ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+       ri->map_type = BPF_MAP_TYPE_UNSPEC;
++      if (map_type == BPF_MAP_TYPE_XSKMAP) {
++              err = __xsk_map_redirect(fwd, xdp);
++              goto out;
++      }
++
++      xdpf = xdp_convert_buff_to_frame(xdp);
++      if (unlikely(!xdpf)) {
++              err = -EOVERFLOW;
++              goto err;
++      }
++
+       switch (map_type) {
+       case BPF_MAP_TYPE_DEVMAP:
+               fallthrough;
+@@ -4007,17 +4019,14 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+               map = READ_ONCE(ri->map);
+               if (unlikely(map)) {
+                       WRITE_ONCE(ri->map, NULL);
+-                      err = dev_map_enqueue_multi(xdp, dev, map,
++                      err = dev_map_enqueue_multi(xdpf, dev, map,
+                                                   ri->flags & BPF_F_EXCLUDE_INGRESS);
+               } else {
+-                      err = dev_map_enqueue(fwd, xdp, dev);
++                      err = dev_map_enqueue(fwd, xdpf, dev);
+               }
+               break;
+       case BPF_MAP_TYPE_CPUMAP:
+-              err = cpu_map_enqueue(fwd, xdp, dev);
+-              break;
+-      case BPF_MAP_TYPE_XSKMAP:
+-              err = __xsk_map_redirect(fwd, xdp);
++              err = cpu_map_enqueue(fwd, xdpf, dev);
+               break;
+       case BPF_MAP_TYPE_UNSPEC:
+               if (map_id == INT_MAX) {
+@@ -4026,7 +4035,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+                               err = -EINVAL;
+                               break;
+                       }
+-                      err = dev_xdp_enqueue(fwd, xdp, dev);
++                      err = dev_xdp_enqueue(fwd, xdpf, dev);
+                       break;
+               }
+               fallthrough;
+@@ -4034,6 +4043,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+               err = -EBADRQC;
+       }
++out:
+       if (unlikely(err))
+               goto err;
+-- 
+2.43.0
+
diff --git a/queue-5.15/xdp-use-flags-field-to-disambiguate-broadcast-redire.patch b/queue-5.15/xdp-use-flags-field-to-disambiguate-broadcast-redire.patch
new file mode 100644 (file)
index 0000000..015087a
--- /dev/null
@@ -0,0 +1,153 @@
+From 3c0d4d22d8e70a377d9ea75d19191a7a7edbb46f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Apr 2024 09:18:39 +0200
+Subject: xdp: use flags field to disambiguate broadcast redirect
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+[ Upstream commit 5bcf0dcbf9066348058b88a510c57f70f384c92c ]
+
+When redirecting a packet using XDP, the bpf_redirect_map() helper will set
+up the redirect destination information in struct bpf_redirect_info (using
+the __bpf_xdp_redirect_map() helper function), and the xdp_do_redirect()
+function will read this information after the XDP program returns and pass
+the frame on to the right redirect destination.
+
+When using the BPF_F_BROADCAST flag to do multicast redirect to a whole
+map, __bpf_xdp_redirect_map() sets the 'map' pointer in struct
+bpf_redirect_info to point to the destination map to be broadcast. And
+xdp_do_redirect() reacts to the value of this map pointer to decide whether
+it's dealing with a broadcast or a single-value redirect. However, if the
+destination map is being destroyed before xdp_do_redirect() is called, the
+map pointer will be cleared out (by bpf_clear_redirect_map()) without
+waiting for any XDP programs to stop running. This causes xdp_do_redirect()
+to think that the redirect was to a single target, but the target pointer
+is also NULL (since broadcast redirects don't have a single target), so
+this causes a crash when a NULL pointer is passed to dev_map_enqueue().
+
+To fix this, change xdp_do_redirect() to react directly to the presence of
+the BPF_F_BROADCAST flag in the 'flags' value in struct bpf_redirect_info
+to disambiguate between a single-target and a broadcast redirect. And only
+read the 'map' pointer if the broadcast flag is set, aborting if that has
+been cleared out in the meantime. This prevents the crash, while keeping
+the atomic (cmpxchg-based) clearing of the map pointer itself, and without
+adding any more checks in the non-broadcast fast path.
+
+Fixes: e624d4ed4aa8 ("xdp: Extend xdp_redirect_map with broadcast support")
+Reported-and-tested-by: syzbot+af9492708df9797198d6@syzkaller.appspotmail.com
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Stanislav Fomichev <sdf@google.com>
+Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Link: https://lore.kernel.org/r/20240418071840.156411-1-toke@redhat.com
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 42 ++++++++++++++++++++++++++++++++----------
+ 1 file changed, 32 insertions(+), 10 deletions(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index b756951c92494..47eb1bd47aa6e 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4019,10 +4019,12 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
+       enum bpf_map_type map_type = ri->map_type;
+       void *fwd = ri->tgt_value;
+       u32 map_id = ri->map_id;
++      u32 flags = ri->flags;
+       struct bpf_map *map;
+       int err;
+       ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
++      ri->flags = 0;
+       ri->map_type = BPF_MAP_TYPE_UNSPEC;
+       if (unlikely(!xdpf)) {
+@@ -4034,11 +4036,20 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
+       case BPF_MAP_TYPE_DEVMAP:
+               fallthrough;
+       case BPF_MAP_TYPE_DEVMAP_HASH:
+-              map = READ_ONCE(ri->map);
+-              if (unlikely(map)) {
++              if (unlikely(flags & BPF_F_BROADCAST)) {
++                      map = READ_ONCE(ri->map);
++
++                      /* The map pointer is cleared when the map is being torn
++                       * down by bpf_clear_redirect_map()
++                       */
++                      if (unlikely(!map)) {
++                              err = -ENOENT;
++                              break;
++                      }
++
+                       WRITE_ONCE(ri->map, NULL);
+                       err = dev_map_enqueue_multi(xdpf, dev, map,
+-                                                  ri->flags & BPF_F_EXCLUDE_INGRESS);
++                                                  flags & BPF_F_EXCLUDE_INGRESS);
+               } else {
+                       err = dev_map_enqueue(fwd, xdpf, dev);
+               }
+@@ -4101,9 +4112,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
+ static int xdp_do_generic_redirect_map(struct net_device *dev,
+                                      struct sk_buff *skb,
+                                      struct xdp_buff *xdp,
+-                                     struct bpf_prog *xdp_prog,
+-                                     void *fwd,
+-                                     enum bpf_map_type map_type, u32 map_id)
++                                     struct bpf_prog *xdp_prog, void *fwd,
++                                     enum bpf_map_type map_type, u32 map_id,
++                                     u32 flags)
+ {
+       struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+       struct bpf_map *map;
+@@ -4113,11 +4124,20 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
+       case BPF_MAP_TYPE_DEVMAP:
+               fallthrough;
+       case BPF_MAP_TYPE_DEVMAP_HASH:
+-              map = READ_ONCE(ri->map);
+-              if (unlikely(map)) {
++              if (unlikely(flags & BPF_F_BROADCAST)) {
++                      map = READ_ONCE(ri->map);
++
++                      /* The map pointer is cleared when the map is being torn
++                       * down by bpf_clear_redirect_map()
++                       */
++                      if (unlikely(!map)) {
++                              err = -ENOENT;
++                              break;
++                      }
++
+                       WRITE_ONCE(ri->map, NULL);
+                       err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
+-                                                   ri->flags & BPF_F_EXCLUDE_INGRESS);
++                                                   flags & BPF_F_EXCLUDE_INGRESS);
+               } else {
+                       err = dev_map_generic_redirect(fwd, skb, xdp_prog);
+               }
+@@ -4154,9 +4174,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+       enum bpf_map_type map_type = ri->map_type;
+       void *fwd = ri->tgt_value;
+       u32 map_id = ri->map_id;
++      u32 flags = ri->flags;
+       int err;
+       ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
++      ri->flags = 0;
+       ri->map_type = BPF_MAP_TYPE_UNSPEC;
+       if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
+@@ -4176,7 +4198,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+               return 0;
+       }
+-      return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
++      return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags);
+ err:
+       _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
+       return err;
+-- 
+2.43.0
+