--- /dev/null
+From 3e62282e9645465b6b3e5842ee671b911491027c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Feb 2025 19:22:47 +0000
+Subject: afs: Fix the server_list to unuse a displaced server rather than
+ putting it
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit add117e48df4788a86a21bd0515833c0a6db1ad1 ]
+
+When allocating and building an afs_server_list struct object from a VLDB
+record, we look up each server address to get the server record for it -
+but a server may have more than one entry in the record and we discard the
+duplicate pointers. Currently, however, when we discard, we only put a
+server record, not unuse it - but the lookup got as an active-user count.
+
+The active-user count on an afs_server_list object determines its lifetime
+whereas the refcount keeps the memory backing it around. Failing to reduce
+the active-user counter prevents the record from being cleaned up and can
+lead to multiple copied being seen - and pointing to deleted afs_cell
+objects and other such things.
+
+Fix this by switching the incorrect 'put' to an 'unuse' instead.
+
+Without this, occasionally, a dead server record can be seen in
+/proc/net/afs/servers and list corruption may be observed:
+
+ list_del corruption. prev->next should be ffff888102423e40, but was 0000000000000000. (prev=ffff88810140cd38)
+
+Fixes: 977e5f8ed0ab ("afs: Split the usage count on struct afs_server")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: Simon Horman <horms@kernel.org>
+cc: linux-afs@lists.infradead.org
+Link: https://patch.msgid.link/20250218192250.296870-5-dhowells@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/server_list.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
+index 7e7e567a7f8a2..d20cd902ef949 100644
+--- a/fs/afs/server_list.c
++++ b/fs/afs/server_list.c
+@@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
+ break;
+ if (j < slist->nr_servers) {
+ if (slist->servers[j].server == server) {
+- afs_put_server(volume->cell->net, server,
+- afs_server_trace_put_slist_isort);
++ afs_unuse_server(volume->cell->net, server,
++ afs_server_trace_put_slist_isort);
+ continue;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From fbdd6a2fbad77eac744a3896498541e9d8b049b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Feb 2025 19:22:48 +0000
+Subject: afs: Give an afs_server object a ref on the afs_cell object it points
+ to
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 1f0fc3374f3345ff1d150c5c56ac5016e5d3826a ]
+
+Give an afs_server object a ref on the afs_cell object it points to so that
+the cell doesn't get deleted before the server record.
+
+Whilst this is circular (cell -> vol -> server_list -> server -> cell), the
+ref only pins the memory, not the lifetime as that's controlled by the
+activity counter. When the volume's activity counter reaches 0, it
+detaches from the cell and discards its server list; when a cell's activity
+counter reaches 0, it discards its root volume. At that point, the
+circularity is cut.
+
+Fixes: d2ddc776a458 ("afs: Overhaul volume and server record caching and fileserver rotation")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: Simon Horman <horms@kernel.org>
+cc: linux-afs@lists.infradead.org
+Link: https://patch.msgid.link/20250218192250.296870-6-dhowells@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/server.c | 3 +++
+ include/trace/events/afs.h | 2 ++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index 038f9d0ae3af8..4504e16b458cc 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -163,6 +163,8 @@ static struct afs_server *afs_install_server(struct afs_cell *cell,
+ rb_insert_color(&server->uuid_rb, &net->fs_servers);
+ hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+
++ afs_get_cell(cell, afs_cell_trace_get_server);
++
+ added_dup:
+ write_seqlock(&net->fs_addr_lock);
+ estate = rcu_dereference_protected(server->endpoint_state,
+@@ -442,6 +444,7 @@ static void afs_server_rcu(struct rcu_head *rcu)
+ atomic_read(&server->active), afs_server_trace_free);
+ afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state),
+ afs_estate_trace_put_server);
++ afs_put_cell(server->cell, afs_cell_trace_put_server);
+ kfree(server);
+ }
+
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index 9a75590227f26..3dddfc6abf0ee 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -173,6 +173,7 @@ enum yfs_cm_operation {
+ EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \
+ EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \
+ EM(afs_cell_trace_get_queue_new, "GET q-new ") \
++ EM(afs_cell_trace_get_server, "GET server") \
+ EM(afs_cell_trace_get_vol, "GET vol ") \
+ EM(afs_cell_trace_insert, "INSERT ") \
+ EM(afs_cell_trace_manage, "MANAGE ") \
+@@ -180,6 +181,7 @@ enum yfs_cm_operation {
+ EM(afs_cell_trace_put_destroy, "PUT destry") \
+ EM(afs_cell_trace_put_queue_work, "PUT q-work") \
+ EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \
++ EM(afs_cell_trace_put_server, "PUT server") \
+ EM(afs_cell_trace_put_vol, "PUT vol ") \
+ EM(afs_cell_trace_see_source, "SEE source") \
+ EM(afs_cell_trace_see_ws, "SEE ws ") \
+--
+2.39.5
+
--- /dev/null
+From d24a2f771a876ba9e70efe720b20147a9b8aa8c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2025 16:45:32 +0100
+Subject: ALSA: hda/realtek: Fix wrong mic setup for ASUS VivoBook 15
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 9e7c6779e3530bbdd465214afcd13f19c33e51a2 ]
+
+ASUS VivoBook 15 with SSID 1043:1460 took an incorrect quirk via the
+pin pattern matching for ASUS (ALC256_FIXUP_ASUS_MIC), resulting in
+the two built-in mic pins (0x13 and 0x1b). This had worked without
+problems casually in the past because the right pin (0x1b) was picked
+up as the primary device. But since we fixed the pin enumeration for
+other bugs, the bogus one (0x13) is picked up as the primary device,
+hence the bug surfaced now.
+
+For addressing the regression, this patch explicitly specifies the
+quirk entry with ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, which sets up only
+the headset mic pin.
+
+Fixes: 3b4309546b48 ("ALSA: hda: Fix headset detection failure due to unstable sort")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219807
+Link: https://patch.msgid.link/20250225154540.13543-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 9bf99fe6cd34d..63e22f5845f82 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10564,6 +10564,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+--
+2.39.5
+
--- /dev/null
+From e9149e47a721d7c0fa14e2ba78f3939496f3a54c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Feb 2025 12:40:24 +0100
+Subject: ALSA: usb-audio: Avoid dropping MIDI events at closing multiple ports
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit a3bdd8f5c2217e1cb35db02c2eed36ea20fb50f5 ]
+
+We fixed the UAF issue in USB MIDI code by canceling the pending work
+at closing each MIDI output device in the commit below. However, this
+assumed that it's the only one that is tied with the endpoint, and it
+resulted in unexpected data truncations when multiple devices are
+assigned to a single endpoint and opened simultaneously.
+
+For addressing the unexpected MIDI message drops, simply replace
+cancel_work_sync() with flush_work(). The drain callback should have
+been already invoked before the close callback, hence the port->active
+flag must be already cleared. So this just assures that the pending
+work is finished before freeing the resources.
+
+Fixes: 0125de38122f ("ALSA: usb-audio: Cancel pending work at closing a MIDI substream")
+Reported-and-tested-by: John Keeping <jkeeping@inmusicbrands.com>
+Closes: https://lore.kernel.org/20250217111647.3368132-1-jkeeping@inmusicbrands.com
+Link: https://patch.msgid.link/20250218114024.23125-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/midi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 737dd00e97b14..779d97d31f170 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1145,7 +1145,7 @@ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
+ {
+ struct usbmidi_out_port *port = substream->runtime->private_data;
+
+- cancel_work_sync(&port->ep->work);
++ flush_work(&port->ep->work);
+ return substream_open(substream, 0, 0);
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 0bd104ea95a6682bd30acda294e163c2ab22f388 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2025 13:18:43 +0000
+Subject: ASoC: cs35l56: Prevent races when soft-resetting using SPI control
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit 769c1b79295c38d60fde4c0a8f5f31e01360c54f ]
+
+When SPI is used for control, the driver must hold the SPI bus lock
+while issuing the sequence of writes to perform a soft reset.
+
+>From the time the driver writes the SYSTEM_RESET command until the
+driver does a write to terminate the reset, there must not be any
+activity on the SPI bus lines. If there is any SPI activity during the
+soft-reset, another soft-reset will be triggered. The state of the SPI
+chip select is irrelevant.
+
+A repeated soft-reset does not in itself cause any problems, and it is
+not an infinite loop. The problem is a race between these resets and
+the driver polling for boot completion. There is a time window between
+soft resets where the driver could read HALO_STATE as 2 (fully booted)
+while the chip is actually soft-resetting. Although this window is
+small, it is long enough that it is possible to hit it in normal
+operation.
+
+To prevent this race and ensure the chip really is fully booted, the
+driver calls spi_bus_lock() to prevent other activity while resetting.
+It then issues the SYSTEM_RESET mailbox command. After allowing
+sufficient time for reset to take effect, the driver issues a PING
+mailbox command, which will force completion of the full soft-reset
+sequence. The SPI bus lock can then be released. The mailbox is
+checked for any boot or wakeup response from the firmware, before the
+value in HALO_STATE will be trusted.
+
+This does not affect SoundWire or I2C control.
+
+Fixes: 8a731fd37f8b ("ASoC: cs35l56: Move utility functions to shared file")
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Link: https://patch.msgid.link/20250225131843.113752-3-rf@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/sound/cs35l56.h | 31 ++++++++++++
+ sound/pci/hda/cs35l56_hda_spi.c | 3 ++
+ sound/soc/codecs/cs35l56-shared.c | 80 +++++++++++++++++++++++++++++++
+ sound/soc/codecs/cs35l56-spi.c | 3 ++
+ 4 files changed, 117 insertions(+)
+
+diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
+index 3dc7a1551ac35..5d653a3491d07 100644
+--- a/include/sound/cs35l56.h
++++ b/include/sound/cs35l56.h
+@@ -12,6 +12,7 @@
+ #include <linux/firmware/cirrus/cs_dsp.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/regmap.h>
++#include <linux/spi/spi.h>
+ #include <sound/cs-amp-lib.h>
+
+ #define CS35L56_DEVID 0x0000000
+@@ -61,6 +62,7 @@
+ #define CS35L56_IRQ1_MASK_8 0x000E0AC
+ #define CS35L56_IRQ1_MASK_18 0x000E0D4
+ #define CS35L56_IRQ1_MASK_20 0x000E0DC
++#define CS35L56_DSP_MBOX_1_RAW 0x0011000
+ #define CS35L56_DSP_VIRTUAL1_MBOX_1 0x0011020
+ #define CS35L56_DSP_VIRTUAL1_MBOX_2 0x0011024
+ #define CS35L56_DSP_VIRTUAL1_MBOX_3 0x0011028
+@@ -224,6 +226,7 @@
+ #define CS35L56_HALO_STATE_SHUTDOWN 1
+ #define CS35L56_HALO_STATE_BOOT_DONE 2
+
++#define CS35L56_MBOX_CMD_PING 0x0A000000
+ #define CS35L56_MBOX_CMD_AUDIO_PLAY 0x0B000001
+ #define CS35L56_MBOX_CMD_AUDIO_PAUSE 0x0B000002
+ #define CS35L56_MBOX_CMD_AUDIO_REINIT 0x0B000003
+@@ -254,6 +257,16 @@
+ #define CS35L56_NUM_BULK_SUPPLIES 3
+ #define CS35L56_NUM_DSP_REGIONS 5
+
++/* Additional margin for SYSTEM_RESET to control port ready on SPI */
++#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500)
++
++struct cs35l56_spi_payload {
++ __be32 addr;
++ __be16 pad;
++ __be32 value;
++} __packed;
++static_assert(sizeof(struct cs35l56_spi_payload) == 10);
++
+ struct cs35l56_base {
+ struct device *dev;
+ struct regmap *regmap;
+@@ -269,6 +282,7 @@ struct cs35l56_base {
+ s8 cal_index;
+ struct cirrus_amp_cal_data cal_data;
+ struct gpio_desc *reset_gpio;
++ struct cs35l56_spi_payload *spi_payload_buf;
+ };
+
+ static inline bool cs35l56_is_otp_register(unsigned int reg)
+@@ -276,6 +290,23 @@ static inline bool cs35l56_is_otp_register(unsigned int reg)
+ return (reg >> 16) == 3;
+ }
+
++static inline int cs35l56_init_config_for_spi(struct cs35l56_base *cs35l56,
++ struct spi_device *spi)
++{
++ cs35l56->spi_payload_buf = devm_kzalloc(&spi->dev,
++ sizeof(*cs35l56->spi_payload_buf),
++ GFP_KERNEL | GFP_DMA);
++ if (!cs35l56->spi_payload_buf)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56)
++{
++ return IS_ENABLED(CONFIG_SPI_MASTER) && !!cs35l56->spi_payload_buf;
++}
++
+ extern const struct regmap_config cs35l56_regmap_i2c;
+ extern const struct regmap_config cs35l56_regmap_spi;
+ extern const struct regmap_config cs35l56_regmap_sdw;
+diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c
+index 7f02155fe61e3..7c94110b6272a 100644
+--- a/sound/pci/hda/cs35l56_hda_spi.c
++++ b/sound/pci/hda/cs35l56_hda_spi.c
+@@ -22,6 +22,9 @@ static int cs35l56_hda_spi_probe(struct spi_device *spi)
+ return -ENOMEM;
+
+ cs35l56->base.dev = &spi->dev;
++ ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
++ if (ret)
++ return ret;
+
+ #ifdef CS35L56_WAKE_HOLD_TIME_US
+ cs35l56->base.can_hibernate = true;
+diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
+index e45e9ae01bc66..195841a567c3d 100644
+--- a/sound/soc/codecs/cs35l56-shared.c
++++ b/sound/soc/codecs/cs35l56-shared.c
+@@ -10,6 +10,7 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
++#include <linux/spi/spi.h>
+ #include <linux/types.h>
+ #include <sound/cs-amp-lib.h>
+
+@@ -303,6 +304,79 @@ void cs35l56_wait_min_reset_pulse(void)
+ }
+ EXPORT_SYMBOL_NS_GPL(cs35l56_wait_min_reset_pulse, SND_SOC_CS35L56_SHARED);
+
++static const struct {
++ u32 addr;
++ u32 value;
++} cs35l56_spi_system_reset_stages[] = {
++ { .addr = CS35L56_DSP_VIRTUAL1_MBOX_1, .value = CS35L56_MBOX_CMD_SYSTEM_RESET },
++ /* The next write is necessary to delimit the soft reset */
++ { .addr = CS35L56_DSP_MBOX_1_RAW, .value = CS35L56_MBOX_CMD_PING },
++};
++
++static void cs35l56_spi_issue_bus_locked_reset(struct cs35l56_base *cs35l56_base,
++ struct spi_device *spi)
++{
++ struct cs35l56_spi_payload *buf = cs35l56_base->spi_payload_buf;
++ struct spi_transfer t = {
++ .tx_buf = buf,
++ .len = sizeof(*buf),
++ };
++ struct spi_message m;
++ int i, ret;
++
++ for (i = 0; i < ARRAY_SIZE(cs35l56_spi_system_reset_stages); i++) {
++ buf->addr = cpu_to_be32(cs35l56_spi_system_reset_stages[i].addr);
++ buf->value = cpu_to_be32(cs35l56_spi_system_reset_stages[i].value);
++ spi_message_init_with_transfers(&m, &t, 1);
++ ret = spi_sync_locked(spi, &m);
++ if (ret)
++ dev_warn(cs35l56_base->dev, "spi_sync failed: %d\n", ret);
++
++ usleep_range(CS35L56_SPI_RESET_TO_PORT_READY_US,
++ 2 * CS35L56_SPI_RESET_TO_PORT_READY_US);
++ }
++}
++
++static void cs35l56_spi_system_reset(struct cs35l56_base *cs35l56_base)
++{
++ struct spi_device *spi = to_spi_device(cs35l56_base->dev);
++ unsigned int val;
++ int read_ret, ret;
++
++ /*
++ * There must not be any other SPI bus activity while the amp is
++ * soft-resetting.
++ */
++ ret = spi_bus_lock(spi->controller);
++ if (ret) {
++ dev_warn(cs35l56_base->dev, "spi_bus_lock failed: %d\n", ret);
++ return;
++ }
++
++ cs35l56_spi_issue_bus_locked_reset(cs35l56_base, spi);
++ spi_bus_unlock(spi->controller);
++
++ /*
++ * Check firmware boot by testing for a response in MBOX_2.
++ * HALO_STATE cannot be trusted yet because the reset sequence
++ * can leave it with stale state. But MBOX is reset.
++ * The regmap must remain in cache-only until the chip has
++ * booted, so use a bypassed read.
++ */
++ ret = read_poll_timeout(regmap_read_bypassed, read_ret,
++ (val > 0) && (val < 0xffffffff),
++ CS35L56_HALO_STATE_POLL_US,
++ CS35L56_HALO_STATE_TIMEOUT_US,
++ false,
++ cs35l56_base->regmap,
++ CS35L56_DSP_VIRTUAL1_MBOX_2,
++ &val);
++ if (ret) {
++ dev_err(cs35l56_base->dev, "SPI reboot timed out(%d): MBOX2=%#x\n",
++ read_ret, val);
++ }
++}
++
+ static const struct reg_sequence cs35l56_system_reset_seq[] = {
+ REG_SEQ0(CS35L56_DSP1_HALO_STATE, 0),
+ REG_SEQ0(CS35L56_DSP_VIRTUAL1_MBOX_1, CS35L56_MBOX_CMD_SYSTEM_RESET),
+@@ -315,6 +389,12 @@ void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire)
+ * accesses other than the controlled system reset sequence below.
+ */
+ regcache_cache_only(cs35l56_base->regmap, true);
++
++ if (cs35l56_is_spi(cs35l56_base)) {
++ cs35l56_spi_system_reset(cs35l56_base);
++ return;
++ }
++
+ regmap_multi_reg_write_bypassed(cs35l56_base->regmap,
+ cs35l56_system_reset_seq,
+ ARRAY_SIZE(cs35l56_system_reset_seq));
+diff --git a/sound/soc/codecs/cs35l56-spi.c b/sound/soc/codecs/cs35l56-spi.c
+index b07b798b0b45d..568f554a8638b 100644
+--- a/sound/soc/codecs/cs35l56-spi.c
++++ b/sound/soc/codecs/cs35l56-spi.c
+@@ -33,6 +33,9 @@ static int cs35l56_spi_probe(struct spi_device *spi)
+
+ cs35l56->base.dev = &spi->dev;
+ cs35l56->base.can_hibernate = true;
++ ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
++ if (ret)
++ return ret;
+
+ ret = cs35l56_common_probe(cs35l56);
+ if (ret != 0)
+--
+2.39.5
+
--- /dev/null
+From 4dfacc9da3ad6265c90f5f00423c99b20f669246 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 Feb 2025 20:39:57 +0100
+Subject: ASoC: es8328: fix route from DAC to output
+
+From: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+
+[ Upstream commit 5b0c02f9b8acf2a791e531bbc09acae2d51f4f9b ]
+
+The ES8328 codec driver, which is also used for the ES8388 chip that
+appears to have an identical register map, claims that the output can
+either take the route from DAC->Mixer->Output or through DAC->Output
+directly. To the best of what I could find, this is not true, and
+creates problems.
+
+Without DACCONTROL17 bit index 7 set for the left channel, as well as
+DACCONTROL20 bit index 7 set for the right channel, I cannot get any
+analog audio out on Left Out 2 and Right Out 2 respectively, despite the
+DAPM routes claiming that this should be possible. Furthermore, the same
+is the case for Left Out 1 and Right Out 1, showing that those two don't
+have a direct route from DAC to output bypassing the mixer either.
+
+Those control bits toggle whether the DACs are fed (stale bread?) into
+their respective mixers. If one "unmutes" the mixer controls in
+alsamixer, then sure, the audio output works, but if it doesn't work
+without the mixer being fed the DAC input then evidently it's not a
+direct output from the DAC.
+
+ES8328/ES8388 are seemingly not alone in this. ES8323, which uses a
+separate driver for what appears to be a very similar register map,
+simply flips those two bits on in its probe function, and then pretends
+there is no power management whatsoever for the individual controls.
+Fair enough.
+
+My theory as to why nobody has noticed this up to this point is that
+everyone just assumes it's their fault when they had to unmute an
+additional control in ALSA.
+
+Fix this in the es8328 driver by removing the erroneous direct route,
+then get rid of the playback switch controls and have those bits tied to
+the mixer's widget instead, which until now had no register to play
+with.
+
+Fixes: 567e4f98922c ("ASoC: add es8328 codec driver")
+Signed-off-by: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+Link: https://patch.msgid.link/20250222-es8328-route-bludgeoning-v1-1-99bfb7fb22d9@collabora.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/es8328.c | 15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
+index f3c97da798dc8..76159c45e6b52 100644
+--- a/sound/soc/codecs/es8328.c
++++ b/sound/soc/codecs/es8328.c
+@@ -233,7 +233,6 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
+
+ /* Left Mixer */
+ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
+- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
+ SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
+ SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
+ SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
+@@ -243,7 +242,6 @@ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
+ static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
+ SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
+- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
+ SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
+ };
+
+@@ -336,10 +334,10 @@ static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER,
+ ES8328_DACPOWER_LDAC_OFF, 1),
+
+- SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
++ SND_SOC_DAPM_MIXER("Left Mixer", ES8328_DACCONTROL17, 7, 0,
+ &es8328_left_mixer_controls[0],
+ ARRAY_SIZE(es8328_left_mixer_controls)),
+- SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
++ SND_SOC_DAPM_MIXER("Right Mixer", ES8328_DACCONTROL20, 7, 0,
+ &es8328_right_mixer_controls[0],
+ ARRAY_SIZE(es8328_right_mixer_controls)),
+
+@@ -418,19 +416,14 @@ static const struct snd_soc_dapm_route es8328_dapm_routes[] = {
+ { "Right Line Mux", "PGA", "Right PGA Mux" },
+ { "Right Line Mux", "Differential", "Differential Mux" },
+
+- { "Left Out 1", NULL, "Left DAC" },
+- { "Right Out 1", NULL, "Right DAC" },
+- { "Left Out 2", NULL, "Left DAC" },
+- { "Right Out 2", NULL, "Right DAC" },
+-
+- { "Left Mixer", "Playback Switch", "Left DAC" },
++ { "Left Mixer", NULL, "Left DAC" },
+ { "Left Mixer", "Left Bypass Switch", "Left Line Mux" },
+ { "Left Mixer", "Right Playback Switch", "Right DAC" },
+ { "Left Mixer", "Right Bypass Switch", "Right Line Mux" },
+
+ { "Right Mixer", "Left Playback Switch", "Left DAC" },
+ { "Right Mixer", "Left Bypass Switch", "Left Line Mux" },
+- { "Right Mixer", "Playback Switch", "Right DAC" },
++ { "Right Mixer", NULL, "Right DAC" },
+ { "Right Mixer", "Right Bypass Switch", "Right Line Mux" },
+
+ { "DAC DIG", NULL, "DAC STM" },
+--
+2.39.5
+
--- /dev/null
+From 0931f27ea7b43bf0e9dc880f0cbe1c0e23b75905 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Feb 2025 10:04:37 +0900
+Subject: ASoC: fsl: Rename stream name of SAI DAI driver
+
+From: Chancel Liu <chancel.liu@nxp.com>
+
+[ Upstream commit 0da83ab025bc45e9742e87c2cce19bff423377c8 ]
+
+If stream names of DAI driver are duplicated there'll be warnings when
+machine driver tries to add widgets on a route:
+
+[ 8.831335] fsl-asoc-card sound-wm8960: ASoC: sink widget CPU-Playback overwritten
+[ 8.839917] fsl-asoc-card sound-wm8960: ASoC: source widget CPU-Capture overwritten
+
+Use different stream names to avoid such warnings.
+DAI names in AUDMIX are also updated accordingly.
+
+Fixes: 15c958390460 ("ASoC: fsl_sai: Add separate DAI for transmitter and receiver")
+Signed-off-by: Chancel Liu <chancel.liu@nxp.com>
+Acked-by: Shengjiu Wang <shengjiu.wang@gmail.com>
+Link: https://patch.msgid.link/20250217010437.258621-1-chancel.liu@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/fsl/fsl_sai.c | 6 +++---
+ sound/soc/fsl/imx-audmix.c | 4 ++--
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 634168d2bb6e5..c5efbceb06d1f 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -994,10 +994,10 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
+ {
+ .name = "sai-tx",
+ .playback = {
+- .stream_name = "CPU-Playback",
++ .stream_name = "SAI-Playback",
+ .channels_min = 1,
+ .channels_max = 32,
+- .rate_min = 8000,
++ .rate_min = 8000,
+ .rate_max = 2822400,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .formats = FSL_SAI_FORMATS,
+@@ -1007,7 +1007,7 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
+ {
+ .name = "sai-rx",
+ .capture = {
+- .stream_name = "CPU-Capture",
++ .stream_name = "SAI-Capture",
+ .channels_min = 1,
+ .channels_max = 32,
+ .rate_min = 8000,
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index ff3671226306b..ca33ecad07521 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -119,8 +119,8 @@ static const struct snd_soc_ops imx_audmix_be_ops = {
+ static const char *name[][3] = {
+ {"HiFi-AUDMIX-FE-0", "HiFi-AUDMIX-FE-1", "HiFi-AUDMIX-FE-2"},
+ {"sai-tx", "sai-tx", "sai-rx"},
+- {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "CPU-Capture"},
+- {"CPU-Playback", "CPU-Playback", "AUDMIX-Capture-0"},
++ {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "SAI-Capture"},
++ {"SAI-Playback", "SAI-Playback", "AUDMIX-Capture-0"},
+ };
+
+ static int imx_audmix_probe(struct platform_device *pdev)
+--
+2.39.5
+
--- /dev/null
+From a6f5a2e72e3f33ba8649cee113c42e3bc983d6ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2025 10:30:25 -0500
+Subject: Bluetooth: L2CAP: Fix L2CAP_ECRED_CONN_RSP response
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit b25120e1d5f2ebb3db00af557709041f47f7f3d0 ]
+
+L2CAP_ECRED_CONN_RSP needs to respond DCID in the same order received as
+SCID but the order is reversed due to use of list_add which actually
+prepend channels to the list so the response is reversed:
+
+> ACL Data RX: Handle 16 flags 0x02 dlen 26
+ LE L2CAP: Enhanced Credit Connection Request (0x17) ident 2 len 18
+ PSM: 39 (0x0027)
+ MTU: 256
+ MPS: 251
+ Credits: 65535
+ Source CID: 116
+ Source CID: 117
+ Source CID: 118
+ Source CID: 119
+ Source CID: 120
+< ACL Data TX: Handle 16 flags 0x00 dlen 26
+ LE L2CAP: Enhanced Credit Connection Response (0x18) ident 2 len 18
+ MTU: 517
+ MPS: 247
+ Credits: 3
+ Result: Connection successful (0x0000)
+ Destination CID: 68
+ Destination CID: 67
+ Destination CID: 66
+ Destination CID: 65
+ Destination CID: 64
+
+Also make sure the response don't include channels that are not on
+BT_CONNECT2 since the chan->ident can be set to the same value as in the
+following trace:
+
+< ACL Data TX: Handle 16 flags 0x00 dlen 12
+ LE L2CAP: LE Flow Control Credit (0x16) ident 6 len 4
+ Source CID: 64
+ Credits: 1
+...
+> ACL Data RX: Handle 16 flags 0x02 dlen 18
+ LE L2CAP: Enhanced Credit Connection Request (0x17) ident 6 len 10
+ PSM: 39 (0x0027)
+ MTU: 517
+ MPS: 251
+ Credits: 255
+ Source CID: 70
+< ACL Data TX: Handle 16 flags 0x00 dlen 20
+ LE L2CAP: Enhanced Credit Connection Response (0x18) ident 6 len 12
+ MTU: 517
+ MPS: 247
+ Credits: 3
+ Result: Connection successful (0x0000)
+ Destination CID: 64
+ Destination CID: 68
+
+Closes: https://github.com/bluez/bluez/issues/1094
+Fixes: 9aa9d9473f15 ("Bluetooth: L2CAP: Fix responding with wrong PDU type")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 27b4c4a2ba1fd..728a5ce9b5058 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -636,7 +636,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+ test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
+ hci_conn_hold(conn->hcon);
+
+- list_add(&chan->list, &conn->chan_l);
++ /* Append to the list since the order matters for ECRED */
++ list_add_tail(&chan->list, &conn->chan_l);
+ }
+
+ void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+@@ -3776,7 +3777,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
+ struct l2cap_ecred_conn_rsp *rsp_flex =
+ container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
+
+- if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
++ /* Check if channel for outgoing connection or if it wasn't deferred
++ * since in those cases it must be skipped.
++ */
++ if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
++ !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
+ return;
+
+ /* Reset ident so only one response is sent */
+--
+2.39.5
+
--- /dev/null
+From 4bffe64c241308d2e8fc77450f4818c04bfdd097 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2025 10:27:54 +0530
+Subject: drm/xe: cancel pending job timer before freeing scheduler
+
+From: Tejas Upadhyay <tejas.upadhyay@intel.com>
+
+[ Upstream commit 12c2f962fe71f390951d9242725bc7e608f55927 ]
+
+The async call to __guc_exec_queue_fini_async frees the scheduler
+while a submission may time out and restart. To prevent this race
+condition, the pending job timer should be canceled before freeing
+the scheduler.
+
+V3(MattB):
+ - Adjust position of cancel pending job
+ - Remove gitlab issue# from commit message
+V2(MattB):
+ - Cancel pending jobs before scheduler finish
+
+Fixes: a20c75dba192 ("drm/xe: Call __guc_exec_queue_fini_async direct for KERNEL exec_queues")
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250225045754.600905-1-tejas.upadhyay@intel.com
+Signed-off-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
+(cherry picked from commit 18fbd567e75f9b97b699b2ab4f1fa76b7cf268f6)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_guc_submit.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index fed23304e4da5..3fd2b28b91ab9 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -1215,6 +1215,8 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
+
+ if (xe_exec_queue_is_lr(q))
+ cancel_work_sync(&ge->lr_tdr);
++ /* Confirm no work left behind accessing device structures */
++ cancel_delayed_work_sync(&ge->sched.base.work_tdr);
+ release_guc_id(guc, q);
+ xe_sched_entity_fini(&ge->entity);
+ xe_sched_fini(&ge->sched);
+--
+2.39.5
+
--- /dev/null
+From 710e9fc82047bc62266558094ce4c06871a4e241 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 13:03:51 -0700
+Subject: drm/xe/oa: Add syncs support to OA config ioctl
+
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+
+[ Upstream commit 9920c8b88c5cf2e44f4ff508dd3c0c96e4364db0 ]
+
+In addition to stream open, add xe_sync support to the OA config ioctl,
+where it is even more useful. This allows e.g. Mesa to replay a workload
+repeatedly on the GPU, each time with a different OA configuration, while
+precisely controlling (at batch buffer granularity) the workload segment
+for which a particular OA configuration is active, without introducing
+stalls in the userspace pipeline.
+
+v2: Emit OA config even when config id is same as previous, to ensure
+ consistent sync behavior (Jose)
+
+Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241022200352.1192560-7-ashutosh.dixit@intel.com
+Stable-dep-of: 5bd566703e16 ("drm/xe/oa: Allow oa_exponent value of 0")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 41 ++++++++++++++++++--------------
+ drivers/gpu/drm/xe/xe_oa_types.h | 3 +++
+ 2 files changed, 26 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index dd541b62942f8..78f662fd197c4 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -844,6 +844,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
+ xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc));
+
+ xe_oa_free_configs(stream);
++ xe_file_put(stream->xef);
+ }
+
+ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
+@@ -1413,36 +1414,38 @@ static int xe_oa_disable_locked(struct xe_oa_stream *stream)
+
+ static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
+ {
+- struct drm_xe_ext_set_property ext;
++ struct xe_oa_open_param param = {};
+ long ret = stream->oa_config->id;
+ struct xe_oa_config *config;
+ int err;
+
+- err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext));
+- if (XE_IOCTL_DBG(stream->oa->xe, err))
+- return -EFAULT;
+-
+- if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) ||
+- XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) ||
+- XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) ||
+- XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET))
+- return -EINVAL;
++ err = xe_oa_user_extensions(stream->oa, arg, 0, ¶m);
++ if (err)
++ return err;
+
+- config = xe_oa_get_oa_config(stream->oa, ext.value);
++ config = xe_oa_get_oa_config(stream->oa, param.metric_set);
+ if (!config)
+ return -ENODEV;
+
+- if (config != stream->oa_config) {
+- err = xe_oa_emit_oa_config(stream, config);
+- if (!err)
+- config = xchg(&stream->oa_config, config);
+- else
+- ret = err;
++ param.xef = stream->xef;
++ err = xe_oa_parse_syncs(stream->oa, ¶m);
++ if (err)
++ goto err_config_put;
++
++ stream->num_syncs = param.num_syncs;
++ stream->syncs = param.syncs;
++
++ err = xe_oa_emit_oa_config(stream, config);
++ if (!err) {
++ config = xchg(&stream->oa_config, config);
++ drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n",
++ stream->oa_config->uuid);
+ }
+
++err_config_put:
+ xe_oa_config_put(config);
+
+- return ret;
++ return err ?: ret;
+ }
+
+ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
+@@ -1685,6 +1688,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ stream->period_exponent = param->period_exponent;
+ stream->no_preempt = param->no_preempt;
+
++ stream->xef = xe_file_get(param->xef);
+ stream->num_syncs = param->num_syncs;
+ stream->syncs = param->syncs;
+
+@@ -1784,6 +1788,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ err_free_configs:
+ xe_oa_free_configs(stream);
+ exit:
++ xe_file_put(stream->xef);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
+index c8e0df13faf83..fea9d981e414f 100644
+--- a/drivers/gpu/drm/xe/xe_oa_types.h
++++ b/drivers/gpu/drm/xe/xe_oa_types.h
+@@ -239,6 +239,9 @@ struct xe_oa_stream {
+ /** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */
+ u32 no_preempt;
+
++ /** @xef: xe_file with which the stream was opened */
++ struct xe_file *xef;
++
+ /** @last_fence: fence to use in stream destroy when needed */
+ struct dma_fence *last_fence;
+
+--
+2.39.5
+
--- /dev/null
+From 936408b90628e028f93898da712e5aa022dea6a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Feb 2025 13:33:52 -0800
+Subject: drm/xe/oa: Allow oa_exponent value of 0
+
+From: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+
+[ Upstream commit 5bd566703e16b17d17f4fb648440d54f8967462c ]
+
+OA exponent value of 0 is a valid value for periodic reports. Allow user
+to pass 0 for the OA sampling interval since it gets converted to 2 gt
+clock ticks.
+
+v2: Update the check in xe_oa_stream_init as well (Ashutosh)
+v3: Fix mi-rpc failure by setting default exponent to -1 (CI)
+v4: Add the Fixes tag
+
+Fixes: b6fd51c62119 ("drm/xe/oa/uapi: Define and parse OA stream properties")
+Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250221213352.1712932-1-umesh.nerlige.ramappa@intel.com
+(cherry picked from commit 30341f0b8ea71725cc4ab2c43e3a3b749892fc92)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 210b8bae59102..448766033690c 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1716,7 +1716,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
+
+ stream->sample = param->sample;
+- stream->periodic = param->period_exponent > 0;
++ stream->periodic = param->period_exponent >= 0;
+ stream->period_exponent = param->period_exponent;
+ stream->no_preempt = param->no_preempt;
+
+@@ -2002,6 +2002,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ }
+
+ param.xef = xef;
++ param.period_exponent = -1;
+ ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m);
+ if (ret)
+ return ret;
+@@ -2056,7 +2057,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ goto err_exec_q;
+ }
+
+- if (param.period_exponent > 0) {
++ if (param.period_exponent >= 0) {
+ u64 oa_period, oa_freq_hz;
+
+ /* Requesting samples from OAG buffer is a privileged operation */
+--
+2.39.5
+
--- /dev/null
+From 5244ca209dfa52405ab68f2d75567fe59233732d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 13:03:52 -0700
+Subject: drm/xe/oa: Allow only certain property changes from config
+
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+
+[ Upstream commit 85d3f9e84e0628c412b69aa99b63654dfa08ad68 ]
+
+Whereas all properties can be specified during OA stream open, when the OA
+stream is reconfigured only the config_id and syncs can be specified.
+
+v2: Use separate function table for reconfig case (Jonathan)
+ Change bool function args to enum (Matt B)
+v3: s/xe_oa_set_property_funcs/xe_oa_set_property_funcs_open/ (Jonathan)
+
+Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Suggested-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241022200352.1192560-8-ashutosh.dixit@intel.com
+Stable-dep-of: 5bd566703e16 ("drm/xe/oa: Allow oa_exponent value of 0")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 60 +++++++++++++++++++++++++++++---------
+ 1 file changed, 46 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 78f662fd197c4..210b8bae59102 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -47,6 +47,11 @@ enum xe_oa_submit_deps {
+ XE_OA_SUBMIT_ADD_DEPS,
+ };
+
++enum xe_oa_user_extn_from {
++ XE_OA_USER_EXTN_FROM_OPEN,
++ XE_OA_USER_EXTN_FROM_CONFIG,
++};
++
+ struct xe_oa_reg {
+ struct xe_reg addr;
+ u32 value;
+@@ -1205,9 +1210,15 @@ static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
+ return 0;
+ }
+
++static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ return -EINVAL;
++}
++
+ typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param);
+-static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
++static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = {
+ [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
+ [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
+ [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
+@@ -1221,8 +1232,22 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
+ [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ };
+
+-static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
+- struct xe_oa_open_param *param)
++static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
++ [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_ret_inval,
++ [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_ret_inval,
++ [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
++ [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_ret_inval,
++ [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_ret_inval,
++ [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_ret_inval,
++ [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_ret_inval,
++ [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_ret_inval,
++ [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval,
++ [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
++ [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
++};
++
++static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from,
++ u64 extension, struct xe_oa_open_param *param)
+ {
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+@@ -1233,23 +1258,30 @@ static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return -EFAULT;
+
+- if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) ||
++ BUILD_BUG_ON(ARRAY_SIZE(xe_oa_set_property_funcs_open) !=
++ ARRAY_SIZE(xe_oa_set_property_funcs_config));
++
++ if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
+ XE_IOCTL_DBG(oa->xe, ext.pad))
+ return -EINVAL;
+
+- idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs));
+- return xe_oa_set_property_funcs[idx](oa, ext.value, param);
++ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
++
++ if (from == XE_OA_USER_EXTN_FROM_CONFIG)
++ return xe_oa_set_property_funcs_config[idx](oa, ext.value, param);
++ else
++ return xe_oa_set_property_funcs_open[idx](oa, ext.value, param);
+ }
+
+-typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension,
+- struct xe_oa_open_param *param);
++typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, enum xe_oa_user_extn_from from,
++ u64 extension, struct xe_oa_open_param *param);
+ static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
+ [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
+ };
+
+ #define MAX_USER_EXTENSIONS 16
+-static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number,
+- struct xe_oa_open_param *param)
++static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from from, u64 extension,
++ int ext_number, struct xe_oa_open_param *param)
+ {
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_user_extension ext;
+@@ -1268,12 +1300,12 @@ static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
+- err = xe_oa_user_extension_funcs[idx](oa, extension, param);
++ err = xe_oa_user_extension_funcs[idx](oa, from, extension, param);
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return err;
+
+ if (ext.next_extension)
+- return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param);
++ return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param);
+
+ return 0;
+ }
+@@ -1419,7 +1451,7 @@ static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
+ struct xe_oa_config *config;
+ int err;
+
+- err = xe_oa_user_extensions(stream->oa, arg, 0, ¶m);
++ err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, ¶m);
+ if (err)
+ return err;
+
+@@ -1970,7 +2002,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ }
+
+ param.xef = xef;
+- ret = xe_oa_user_extensions(oa, data, 0, ¶m);
++ ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m);
+ if (ret)
+ return ret;
+
+--
+2.39.5
+
--- /dev/null
+From 9e6c3ff06e5a097382bb1c8af1ad6900b607823c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 13:03:50 -0700
+Subject: drm/xe/oa: Move functions up so they can be reused for config ioctl
+
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+
+[ Upstream commit cc4e6994d5a237ef38363e459ac83cf8ef7626ff ]
+
+No code changes, only code movement so that functions used during stream
+open can be reused for the stream reconfiguration
+ioctl (DRM_XE_OBSERVATION_IOCTL_CONFIG).
+
+Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241022200352.1192560-6-ashutosh.dixit@intel.com
+Stable-dep-of: 5bd566703e16 ("drm/xe/oa: Allow oa_exponent value of 0")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 458 ++++++++++++++++++-------------------
+ 1 file changed, 229 insertions(+), 229 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index a54098c1a944a..dd541b62942f8 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1091,6 +1091,235 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
+ return xe_oa_emit_oa_config(stream, stream->oa_config);
+ }
+
++static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
++{
++ u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
++ u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
++ u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
++ u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
++ int idx;
++
++ for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
++ const struct xe_oa_format *f = &oa->oa_formats[idx];
++
++ if (counter_size == f->counter_size && bc_report == f->bc_report &&
++ type == f->type && counter_sel == f->counter_select) {
++ *name = idx;
++ return 0;
++ }
++ }
++
++ return -EINVAL;
++}
++
++static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ if (value >= oa->oa_unit_ids) {
++ drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
++ return -EINVAL;
++ }
++ param->oa_unit_id = value;
++ return 0;
++}
++
++static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ param->sample = value;
++ return 0;
++}
++
++static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ param->metric_set = value;
++ return 0;
++}
++
++static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ int ret = decode_oa_format(oa, value, ¶m->oa_format);
++
++ if (ret) {
++ drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
++ return ret;
++ }
++ return 0;
++}
++
++static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++#define OA_EXPONENT_MAX 31
++
++ if (value > OA_EXPONENT_MAX) {
++ drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
++ return -EINVAL;
++ }
++ param->period_exponent = value;
++ return 0;
++}
++
++static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ param->disabled = value;
++ return 0;
++}
++
++static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ param->exec_queue_id = value;
++ return 0;
++}
++
++static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ param->engine_instance = value;
++ return 0;
++}
++
++static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ param->no_preempt = value;
++ return 0;
++}
++
++static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ param->num_syncs = value;
++ return 0;
++}
++
++static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param)
++{
++ param->syncs_user = u64_to_user_ptr(value);
++ return 0;
++}
++
++typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
++ struct xe_oa_open_param *param);
++static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
++ [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
++ [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
++ [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
++ [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
++ [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
++ [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
++ [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
++ [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
++ [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
++ [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
++ [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
++};
++
++static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
++ struct xe_oa_open_param *param)
++{
++ u64 __user *address = u64_to_user_ptr(extension);
++ struct drm_xe_ext_set_property ext;
++ int err;
++ u32 idx;
++
++ err = __copy_from_user(&ext, address, sizeof(ext));
++ if (XE_IOCTL_DBG(oa->xe, err))
++ return -EFAULT;
++
++ if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) ||
++ XE_IOCTL_DBG(oa->xe, ext.pad))
++ return -EINVAL;
++
++ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs));
++ return xe_oa_set_property_funcs[idx](oa, ext.value, param);
++}
++
++typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension,
++ struct xe_oa_open_param *param);
++static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
++ [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
++};
++
++#define MAX_USER_EXTENSIONS 16
++static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number,
++ struct xe_oa_open_param *param)
++{
++ u64 __user *address = u64_to_user_ptr(extension);
++ struct drm_xe_user_extension ext;
++ int err;
++ u32 idx;
++
++ if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
++ return -E2BIG;
++
++ err = __copy_from_user(&ext, address, sizeof(ext));
++ if (XE_IOCTL_DBG(oa->xe, err))
++ return -EFAULT;
++
++ if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
++ XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
++ return -EINVAL;
++
++ idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
++ err = xe_oa_user_extension_funcs[idx](oa, extension, param);
++ if (XE_IOCTL_DBG(oa->xe, err))
++ return err;
++
++ if (ext.next_extension)
++ return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param);
++
++ return 0;
++}
++
++static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
++{
++ int ret, num_syncs, num_ufence = 0;
++
++ if (param->num_syncs && !param->syncs_user) {
++ drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n");
++ ret = -EINVAL;
++ goto exit;
++ }
++
++ if (param->num_syncs) {
++ param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL);
++ if (!param->syncs) {
++ ret = -ENOMEM;
++ goto exit;
++ }
++ }
++
++ for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
++ ret = xe_sync_entry_parse(oa->xe, param->xef, ¶m->syncs[num_syncs],
++ ¶m->syncs_user[num_syncs], 0);
++ if (ret)
++ goto err_syncs;
++
++ if (xe_sync_is_ufence(¶m->syncs[num_syncs]))
++ num_ufence++;
++ }
++
++ if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) {
++ ret = -EINVAL;
++ goto err_syncs;
++ }
++
++ return 0;
++
++err_syncs:
++ while (num_syncs--)
++ xe_sync_entry_cleanup(¶m->syncs[num_syncs]);
++ kfree(param->syncs);
++exit:
++ return ret;
++}
++
+ static void xe_oa_stream_enable(struct xe_oa_stream *stream)
+ {
+ stream->pollin = false;
+@@ -1664,27 +1893,6 @@ static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type)
+ }
+ }
+
+-static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
+-{
+- u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
+- u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
+- u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
+- u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
+- int idx;
+-
+- for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
+- const struct xe_oa_format *f = &oa->oa_formats[idx];
+-
+- if (counter_size == f->counter_size && bc_report == f->bc_report &&
+- type == f->type && counter_sel == f->counter_select) {
+- *name = idx;
+- return 0;
+- }
+- }
+-
+- return -EINVAL;
+-}
+-
+ /**
+ * xe_oa_unit_id - Return OA unit ID for a hardware engine
+ * @hwe: @xe_hw_engine
+@@ -1731,214 +1939,6 @@ static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param)
+ return ret;
+ }
+
+-static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- if (value >= oa->oa_unit_ids) {
+- drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
+- return -EINVAL;
+- }
+- param->oa_unit_id = value;
+- return 0;
+-}
+-
+-static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- param->sample = value;
+- return 0;
+-}
+-
+-static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- param->metric_set = value;
+- return 0;
+-}
+-
+-static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- int ret = decode_oa_format(oa, value, ¶m->oa_format);
+-
+- if (ret) {
+- drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
+- return ret;
+- }
+- return 0;
+-}
+-
+-static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+-#define OA_EXPONENT_MAX 31
+-
+- if (value > OA_EXPONENT_MAX) {
+- drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
+- return -EINVAL;
+- }
+- param->period_exponent = value;
+- return 0;
+-}
+-
+-static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- param->disabled = value;
+- return 0;
+-}
+-
+-static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- param->exec_queue_id = value;
+- return 0;
+-}
+-
+-static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- param->engine_instance = value;
+- return 0;
+-}
+-
+-static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- param->no_preempt = value;
+- return 0;
+-}
+-
+-static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- param->num_syncs = value;
+- return 0;
+-}
+-
+-static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param)
+-{
+- param->syncs_user = u64_to_user_ptr(value);
+- return 0;
+-}
+-
+-typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
+- struct xe_oa_open_param *param);
+-static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
+- [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
+- [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
+- [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
+- [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
+- [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
+- [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
+- [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
+- [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
+- [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
+- [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+- [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+-};
+-
+-static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
+- struct xe_oa_open_param *param)
+-{
+- u64 __user *address = u64_to_user_ptr(extension);
+- struct drm_xe_ext_set_property ext;
+- int err;
+- u32 idx;
+-
+- err = __copy_from_user(&ext, address, sizeof(ext));
+- if (XE_IOCTL_DBG(oa->xe, err))
+- return -EFAULT;
+-
+- if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) ||
+- XE_IOCTL_DBG(oa->xe, ext.pad))
+- return -EINVAL;
+-
+- idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs));
+- return xe_oa_set_property_funcs[idx](oa, ext.value, param);
+-}
+-
+-typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension,
+- struct xe_oa_open_param *param);
+-static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
+- [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
+-};
+-
+-#define MAX_USER_EXTENSIONS 16
+-static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number,
+- struct xe_oa_open_param *param)
+-{
+- u64 __user *address = u64_to_user_ptr(extension);
+- struct drm_xe_user_extension ext;
+- int err;
+- u32 idx;
+-
+- if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
+- return -E2BIG;
+-
+- err = __copy_from_user(&ext, address, sizeof(ext));
+- if (XE_IOCTL_DBG(oa->xe, err))
+- return -EFAULT;
+-
+- if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
+- XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
+- return -EINVAL;
+-
+- idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
+- err = xe_oa_user_extension_funcs[idx](oa, extension, param);
+- if (XE_IOCTL_DBG(oa->xe, err))
+- return err;
+-
+- if (ext.next_extension)
+- return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param);
+-
+- return 0;
+-}
+-
+-static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
+-{
+- int ret, num_syncs, num_ufence = 0;
+-
+- if (param->num_syncs && !param->syncs_user) {
+- drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n");
+- ret = -EINVAL;
+- goto exit;
+- }
+-
+- if (param->num_syncs) {
+- param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL);
+- if (!param->syncs) {
+- ret = -ENOMEM;
+- goto exit;
+- }
+- }
+-
+- for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
+- ret = xe_sync_entry_parse(oa->xe, param->xef, ¶m->syncs[num_syncs],
+- ¶m->syncs_user[num_syncs], 0);
+- if (ret)
+- goto err_syncs;
+-
+- if (xe_sync_is_ufence(¶m->syncs[num_syncs]))
+- num_ufence++;
+- }
+-
+- if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) {
+- ret = -EINVAL;
+- goto err_syncs;
+- }
+-
+- return 0;
+-
+-err_syncs:
+- while (num_syncs--)
+- xe_sync_entry_cleanup(¶m->syncs[num_syncs]);
+- kfree(param->syncs);
+-exit:
+- return ret;
+-}
+-
+ /**
+ * xe_oa_stream_open_ioctl - Opens an OA stream
+ * @dev: @drm_device
+--
+2.39.5
+
--- /dev/null
+From 9fc5efff1f727827dbbf8b08625dc7c3b0f54fcb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 13:03:49 -0700
+Subject: drm/xe/oa: Signal output fences
+
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+
+[ Upstream commit 343dd246fd9b58e67b395153e8e7298bd250f943 ]
+
+Introduce 'struct xe_oa_fence' which includes the dma_fence used to signal
+output fences in the xe_sync array. The fences are signaled
+asynchronously. When there are no output fences to signal, the OA
+configuration wait is synchronously re-introduced into the ioctl.
+
+v2: Don't wait in the work, use callback + delayed work (Matt B)
+ Use a single, not a per-fence spinlock (Matt Brost)
+v3: Move ofence alloc before job submission (Matt)
+ Assert, don't fail, from dma_fence_add_callback (Matt)
+ Additional dma_fence_get for dma_fence_wait (Matt)
+ Change dma_fence_wait to non-interruptible (Matt)
+v4: Introduce last_fence to prevent uaf if stream is closed with
+ pending OA config jobs
+v5: Remove oa_fence_lock, move spinlock back into xe_oa_fence to
+ prevent uaf
+
+Suggested-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241022200352.1192560-5-ashutosh.dixit@intel.com
+Stable-dep-of: 5bd566703e16 ("drm/xe/oa: Allow oa_exponent value of 0")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 119 ++++++++++++++++++++++++++-----
+ drivers/gpu/drm/xe/xe_oa_types.h | 3 +
+ 2 files changed, 105 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index e6744422dee49..a54098c1a944a 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -94,6 +94,17 @@ struct xe_oa_config_bo {
+ struct xe_bb *bb;
+ };
+
++struct xe_oa_fence {
++ /* @base: dma fence base */
++ struct dma_fence base;
++ /* @lock: lock for the fence */
++ spinlock_t lock;
++ /* @work: work to signal @base */
++ struct delayed_work work;
++ /* @cb: callback to schedule @work */
++ struct dma_fence_cb cb;
++};
++
+ #define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x
+
+ static const struct xe_oa_format oa_formats[] = {
+@@ -166,10 +177,10 @@ static struct xe_oa_config *xe_oa_get_oa_config(struct xe_oa *oa, int metrics_se
+ return oa_config;
+ }
+
+-static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo)
++static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *last_fence)
+ {
+ xe_oa_config_put(oa_bo->oa_config);
+- xe_bb_free(oa_bo->bb, NULL);
++ xe_bb_free(oa_bo->bb, last_fence);
+ kfree(oa_bo);
+ }
+
+@@ -668,7 +679,8 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
+
+ xe_oa_config_put(stream->oa_config);
+ llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
+- free_oa_config_bo(oa_bo);
++ free_oa_config_bo(oa_bo, stream->last_fence);
++ dma_fence_put(stream->last_fence);
+ }
+
+ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
+@@ -902,40 +914,113 @@ xe_oa_alloc_config_buffer(struct xe_oa_stream *stream, struct xe_oa_config *oa_c
+ return oa_bo;
+ }
+
++static void xe_oa_update_last_fence(struct xe_oa_stream *stream, struct dma_fence *fence)
++{
++ dma_fence_put(stream->last_fence);
++ stream->last_fence = dma_fence_get(fence);
++}
++
++static void xe_oa_fence_work_fn(struct work_struct *w)
++{
++ struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work);
++
++ /* Signal fence to indicate new OA configuration is active */
++ dma_fence_signal(&ofence->base);
++ dma_fence_put(&ofence->base);
++}
++
++static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
++{
++ /* Additional empirical delay needed for NOA programming after registers are written */
++#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
++
++ struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
++
++ INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
++ queue_delayed_work(system_unbound_wq, &ofence->work,
++ usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
++ dma_fence_put(fence);
++}
++
++static const char *xe_oa_get_driver_name(struct dma_fence *fence)
++{
++ return "xe_oa";
++}
++
++static const char *xe_oa_get_timeline_name(struct dma_fence *fence)
++{
++ return "unbound";
++}
++
++static const struct dma_fence_ops xe_oa_fence_ops = {
++ .get_driver_name = xe_oa_get_driver_name,
++ .get_timeline_name = xe_oa_get_timeline_name,
++};
++
+ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config)
+ {
+ #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
+ struct xe_oa_config_bo *oa_bo;
+- int err = 0, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
++ struct xe_oa_fence *ofence;
++ int i, err, num_signal = 0;
+ struct dma_fence *fence;
+- long timeout;
+
+- /* Emit OA configuration batch */
++ ofence = kzalloc(sizeof(*ofence), GFP_KERNEL);
++ if (!ofence) {
++ err = -ENOMEM;
++ goto exit;
++ }
++
+ oa_bo = xe_oa_alloc_config_buffer(stream, config);
+ if (IS_ERR(oa_bo)) {
+ err = PTR_ERR(oa_bo);
+ goto exit;
+ }
+
++ /* Emit OA configuration batch */
+ fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto exit;
+ }
+
+- /* Wait till all previous batches have executed */
+- timeout = dma_fence_wait_timeout(fence, false, 5 * HZ);
+- dma_fence_put(fence);
+- if (timeout < 0)
+- err = timeout;
+- else if (!timeout)
+- err = -ETIME;
+- if (err)
+- drm_dbg(&stream->oa->xe->drm, "dma_fence_wait_timeout err %d\n", err);
++ /* Point of no return: initialize and set fence to signal */
++ spin_lock_init(&ofence->lock);
++ dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0);
+
+- /* Additional empirical delay needed for NOA programming after registers are written */
+- usleep_range(us, 2 * us);
++ for (i = 0; i < stream->num_syncs; i++) {
++ if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL)
++ num_signal++;
++ xe_sync_entry_signal(&stream->syncs[i], &ofence->base);
++ }
++
++ /* Additional dma_fence_get in case we dma_fence_wait */
++ if (!num_signal)
++ dma_fence_get(&ofence->base);
++
++ /* Update last fence too before adding callback */
++ xe_oa_update_last_fence(stream, fence);
++
++ /* Add job fence callback to schedule work to signal ofence->base */
++ err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb);
++ xe_gt_assert(stream->gt, !err || err == -ENOENT);
++ if (err == -ENOENT)
++ xe_oa_config_cb(fence, &ofence->cb);
++
++ /* If nothing needs to be signaled we wait synchronously */
++ if (!num_signal) {
++ dma_fence_wait(&ofence->base, false);
++ dma_fence_put(&ofence->base);
++ }
++
++ /* Done with syncs */
++ for (i = 0; i < stream->num_syncs; i++)
++ xe_sync_entry_cleanup(&stream->syncs[i]);
++ kfree(stream->syncs);
++
++ return 0;
+ exit:
++ kfree(ofence);
+ return err;
+ }
+
+diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
+index 99f4b2d4bdcf6..c8e0df13faf83 100644
+--- a/drivers/gpu/drm/xe/xe_oa_types.h
++++ b/drivers/gpu/drm/xe/xe_oa_types.h
+@@ -239,6 +239,9 @@ struct xe_oa_stream {
+ /** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */
+ u32 no_preempt;
+
++ /** @last_fence: fence to use in stream destroy when needed */
++ struct dma_fence *last_fence;
++
+ /** @num_syncs: size of @syncs array */
+ u32 num_syncs;
+
+--
+2.39.5
+
--- /dev/null
+From a1617d5453b126f4995c81b1b79d2a02fc099486 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2025 13:18:42 +0000
+Subject: firmware: cs_dsp: Remove async regmap writes
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit fe08b7d5085a9774abc30c26d5aebc5b9cdd6091 ]
+
+Change calls to async regmap write functions to use the normal
+blocking writes so that the cs35l56 driver can use spi_bus_lock() to
+gain exclusive access to the SPI bus.
+
+As this is part of a fix, it makes only the minimal change to swap the
+functions to the blocking equivalents. There's no need to risk
+reworking the buffer allocation logic that is now partially redundant.
+
+The async writes are a 12-year-old workaround for inefficiency of
+synchronous writes in the SPI subsystem. The SPI subsystem has since
+been changed to avoid the overheads, so this workaround should not be
+necessary.
+
+The cs35l56 driver needs to use spi_bus_lock() prevent bus activity
+while it is soft-resetting the cs35l56. But spi_bus_lock() is
+incompatible with spi_async() calls, which will fail with -EBUSY.
+
+Fixes: 8a731fd37f8b ("ASoC: cs35l56: Move utility functions to shared file")
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Link: https://patch.msgid.link/20250225131843.113752-2-rf@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/cirrus/cs_dsp.c | 24 ++++++------------------
+ 1 file changed, 6 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 419220fa42fd7..bd1ea99c3b475 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ goto out_fw;
+ }
+
+- ret = regmap_raw_write_async(regmap, reg, buf->buf,
+- le32_to_cpu(region->len));
++ ret = regmap_raw_write(regmap, reg, buf->buf,
++ le32_to_cpu(region->len));
+ if (ret != 0) {
+ cs_dsp_err(dsp,
+ "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
+@@ -1625,12 +1625,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ regions++;
+ }
+
+- ret = regmap_async_complete(regmap);
+- if (ret != 0) {
+- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+- goto out_fw;
+- }
+-
+ if (pos > firmware->size)
+ cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, regions, pos - firmware->size);
+@@ -1638,7 +1632,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ cs_dsp_debugfs_save_wmfwname(dsp, file);
+
+ out_fw:
+- regmap_async_complete(regmap);
+ cs_dsp_buf_free(&buf_list);
+
+ if (ret == -EOVERFLOW)
+@@ -2326,8 +2319,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
+ file, blocks, le32_to_cpu(blk->len),
+ reg);
+- ret = regmap_raw_write_async(regmap, reg, buf->buf,
+- le32_to_cpu(blk->len));
++ ret = regmap_raw_write(regmap, reg, buf->buf,
++ le32_to_cpu(blk->len));
+ if (ret != 0) {
+ cs_dsp_err(dsp,
+ "%s.%d: Failed to write to %x in %s: %d\n",
+@@ -2339,10 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ blocks++;
+ }
+
+- ret = regmap_async_complete(regmap);
+- if (ret != 0)
+- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+-
+ if (pos > firmware->size)
+ cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, blocks, pos - firmware->size);
+@@ -2350,7 +2339,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ cs_dsp_debugfs_save_binname(dsp, file);
+
+ out_fw:
+- regmap_async_complete(regmap);
+ cs_dsp_buf_free(&buf_list);
+
+ if (ret == -EOVERFLOW)
+@@ -2561,8 +2549,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
+ {
+ int ret;
+
+- ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
+- ADSP2_SYS_ENA, ADSP2_SYS_ENA);
++ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
++ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ if (ret != 0)
+ return ret;
+
+--
+2.39.5
+
--- /dev/null
+From c93a525248a32df5229fc55071e6c1771bf17b13 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Aug 2024 17:26:16 -0400
+Subject: ice: add E830 HW VF mailbox message limit support
+
+From: Paul Greenwalt <paul.greenwalt@intel.com>
+
+[ Upstream commit 59f4d59b25aec39a015c0949f4ec235c7a839c44 ]
+
+E830 adds hardware support to prevent the VF from overflowing the PF
+mailbox with VIRTCHNL messages. E830 will use the hardware feature
+(ICE_F_MBX_LIMIT) instead of the software solution ice_is_malicious_vf().
+
+To prevent a VF from overflowing the PF, the PF sets the number of
+messages per VF that can be in the PF's mailbox queue
+(ICE_MBX_OVERFLOW_WATERMARK). When the PF processes a message from a VF,
+the PF decrements the per VF message count using the E830_MBX_VF_DEC_TRIG
+register.
+
+Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
+Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: 79990cf5e7ad ("ice: Fix deinitializing VF in error path")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h | 1 +
+ .../net/ethernet/intel/ice/ice_hw_autogen.h | 3 ++
+ drivers/net/ethernet/intel/ice/ice_lib.c | 3 ++
+ drivers/net/ethernet/intel/ice/ice_main.c | 24 ++++++++++----
+ drivers/net/ethernet/intel/ice/ice_sriov.c | 3 +-
+ drivers/net/ethernet/intel/ice/ice_vf_lib.c | 26 +++++++++++++--
+ drivers/net/ethernet/intel/ice/ice_vf_mbx.c | 32 +++++++++++++++++++
+ drivers/net/ethernet/intel/ice/ice_vf_mbx.h | 9 ++++++
+ drivers/net/ethernet/intel/ice/ice_virtchnl.c | 8 +++--
+ 9 files changed, 96 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 558cda577191d..2960709f6b62c 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -207,6 +207,7 @@ enum ice_feature {
+ ICE_F_GNSS,
+ ICE_F_ROCE_LAG,
+ ICE_F_SRIOV_LAG,
++ ICE_F_MBX_LIMIT,
+ ICE_F_MAX
+ };
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+index 91cbae1eec89a..8d31bfe28cc88 100644
+--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+@@ -539,5 +539,8 @@
+ #define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
+ #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
+ #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
++#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
++#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4)
++#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
+
+ #endif /* _ICE_HW_AUTOGEN_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 06e712cdc3d9e..d4e74f96a8ad5 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -3880,6 +3880,9 @@ void ice_init_feature_support(struct ice_pf *pf)
+ default:
+ break;
+ }
++
++ if (pf->hw.mac_type == ICE_MAC_E830)
++ ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 45eefe22fb5b7..ca707dfcb286e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -1546,12 +1546,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
+ ice_vf_lan_overflow_event(pf, &event);
+ break;
+ case ice_mbx_opc_send_msg_to_pf:
+- data.num_msg_proc = i;
+- data.num_pending_arq = pending;
+- data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
+- data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
++ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
++ ice_vc_process_vf_msg(pf, &event, NULL);
++ ice_mbx_vf_dec_trig_e830(hw, &event);
++ } else {
++ u16 val = hw->mailboxq.num_rq_entries;
++
++ data.max_num_msgs_mbx = val;
++ val = ICE_MBX_OVERFLOW_WATERMARK;
++ data.async_watermark_val = val;
++ data.num_msg_proc = i;
++ data.num_pending_arq = pending;
+
+- ice_vc_process_vf_msg(pf, &event, &data);
++ ice_vc_process_vf_msg(pf, &event, &data);
++ }
+ break;
+ case ice_aqc_opc_fw_logs_event:
+ ice_get_fwlog_data(pf, &event);
+@@ -4082,7 +4090,11 @@ static int ice_init_pf(struct ice_pf *pf)
+
+ mutex_init(&pf->vfs.table_lock);
+ hash_init(pf->vfs.table);
+- ice_mbx_init_snapshot(&pf->hw);
++ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++ wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
++ ICE_MBX_OVERFLOW_WATERMARK);
++ else
++ ice_mbx_init_snapshot(&pf->hw);
+
+ xa_init(&pf->dyn_ports);
+ xa_init(&pf->sf_nums);
+diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
+index 91cb393f616f2..b83f99c01d91b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
++++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
+@@ -194,7 +194,8 @@ void ice_free_vfs(struct ice_pf *pf)
+ }
+
+ /* clear malicious info since the VF is getting released */
+- list_del(&vf->mbx_info.list_entry);
++ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++ list_del(&vf->mbx_info.list_entry);
+
+ mutex_unlock(&vf->cfg_lock);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+index 8c434689e3f78..c7c0c2f50c265 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+@@ -716,6 +716,23 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+ return 0;
+ }
+
++/**
++ * ice_reset_vf_mbx_cnt - reset VF mailbox message count
++ * @vf: pointer to the VF structure
++ *
++ * This function clears the VF mailbox message count, and should be called on
++ * VF reset.
++ */
++static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
++{
++ struct ice_pf *pf = vf->pf;
++
++ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++ ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
++ else
++ ice_mbx_clear_malvf(&vf->mbx_info);
++}
++
+ /**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+@@ -742,7 +759,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
+
+ /* clear all malicious info if the VFs are getting reset */
+ ice_for_each_vf(pf, bkt, vf)
+- ice_mbx_clear_malvf(&vf->mbx_info);
++ ice_reset_vf_mbx_cnt(vf);
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
+@@ -958,7 +975,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
+ ice_eswitch_update_repr(&vf->repr_id, vsi);
+
+ /* if the VF has been reset allow it to come up again */
+- ice_mbx_clear_malvf(&vf->mbx_info);
++ ice_reset_vf_mbx_cnt(vf);
+
+ out_unlock:
+ if (lag && lag->bonded && lag->primary &&
+@@ -1011,7 +1028,10 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
+ ice_vf_fdir_init(vf);
+
+ /* Initialize mailbox info for this VF */
+- ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
++ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++ ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
++ else
++ ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
+
+ mutex_init(&vf->cfg_lock);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+index 40cb4ba0789ce..75c8113e58ee9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+@@ -210,6 +210,38 @@ ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
+ return 0;
+ }
+
++/**
++ * ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter
++ * @hw: pointer to the HW struct
++ * @event: pointer to the control queue receive event
++ *
++ * This function triggers to decrement the counter
++ * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes
++ * the buffers at the PF mailbox queue.
++ */
++void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
++ const struct ice_rq_event_info *event)
++{
++ u16 vfid = le16_to_cpu(event->desc.retval);
++
++ wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1);
++}
++
++/**
++ * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count
++ * @hw: pointer to the HW struct
++ * @vf_id: VF ID in the PF space
++ *
++ * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should
++ * be called when a VF is created and on VF reset.
++ */
++void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id)
++{
++ u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id));
++
++ wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg);
++}
++
+ /**
+ * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+ * @hw: pointer to the HW struct
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
+index 44bc030d17e07..684de89e5c5ed 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
++++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
+@@ -19,6 +19,9 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
++void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
++ const struct ice_rq_event_info *event);
++void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id);
+ int
+ ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ struct ice_mbx_vf_info *vf_info, bool *report_malvf);
+@@ -47,5 +50,11 @@ static inline void ice_mbx_init_snapshot(struct ice_hw *hw)
+ {
+ }
+
++static inline void
++ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
++ const struct ice_rq_event_info *event)
++{
++}
++
+ #endif /* CONFIG_PCI_IOV */
+ #endif /* _ICE_VF_MBX_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index b6ec01f6fa73e..c8c1d48ff793d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -4008,8 +4008,10 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
+ * @event: pointer to the AQ event
+ * @mbxdata: information used to detect VF attempting mailbox overflow
+ *
+- * called from the common asq/arq handler to
+- * process request from VF
++ * Called from the common asq/arq handler to process request from VF. When this
++ * flow is used for devices with hardware VF to PF message queue overflow
++ * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
++ * check is skipped.
+ */
+ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ struct ice_mbx_data *mbxdata)
+@@ -4035,7 +4037,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ mutex_lock(&vf->cfg_lock);
+
+ /* Check if the VF is trying to overflow the mailbox */
+- if (ice_is_malicious_vf(vf, mbxdata))
++ if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
+ goto finish;
+
+ /* Check if VF is disabled. */
+--
+2.39.5
+
--- /dev/null
+From d88f43134f274d1b9ce7ab0c9c62e4d270816a8e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 11:06:42 -0800
+Subject: ice: Avoid setting default Rx VSI twice in switchdev setup
+
+From: Marcin Szycik <marcin.szycik@linux.intel.com>
+
+[ Upstream commit 5c07be96d8b3f8447e980f29b967bf2e1d7ac732 ]
+
+As part of switchdev environment setup, uplink VSI is configured as
+default for both Tx and Rx. Default Rx VSI is also used by promiscuous
+mode. If promisc mode is enabled and an attempt to enter switchdev mode
+is made, the setup will fail because Rx VSI is already configured as
+default (rule exists).
+
+Reproducer:
+ devlink dev eswitch set $PF1_PCI mode switchdev
+ ip l s $PF1 up
+ ip l s $PF1 promisc on
+ echo 1 > /sys/class/net/$PF1/device/sriov_numvfs
+
+In switchdev setup, use ice_set_dflt_vsi() instead of plain
+ice_cfg_dflt_vsi(), which avoids repeating setting default VSI for Rx if
+it's already configured.
+
+Fixes: 50d62022f455 ("ice: default Tx rule instead of to queue")
+Reported-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com>
+Closes: https://lore.kernel.org/intel-wired-lan/PH0PR11MB50138B635F2E5CEB7075325D961F2@PH0PR11MB5013.namprd11.prod.outlook.com
+Reviewed-by: Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com>
+Signed-off-by: Marcin Szycik <marcin.szycik@linux.intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250224190647.3601930-3-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_eswitch.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index fb527434b58b1..d649c197cf673 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
+ if (ice_vsi_add_vlan_zero(uplink_vsi))
+ goto err_vlan_zero;
+
+- if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+- ICE_FLTR_RX))
++ if (ice_set_dflt_vsi(uplink_vsi))
+ goto err_def_rx;
+
+ if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+--
+2.39.5
+
--- /dev/null
+From 329d08396ebe0abbb88fea6b2825f69fae657495 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 11:06:41 -0800
+Subject: ice: Fix deinitializing VF in error path
+
+From: Marcin Szycik <marcin.szycik@linux.intel.com>
+
+[ Upstream commit 79990cf5e7aded76d0c092c9f5ed31eb1c75e02c ]
+
+If ice_ena_vfs() fails after calling ice_create_vf_entries(), it frees
+all VFs without removing them from snapshot PF-VF mailbox list, leading
+to list corruption.
+
+Reproducer:
+ devlink dev eswitch set $PF1_PCI mode switchdev
+ ip l s $PF1 up
+ ip l s $PF1 promisc on
+ sleep 1
+ echo 1 > /sys/class/net/$PF1/device/sriov_numvfs
+ sleep 1
+ echo 1 > /sys/class/net/$PF1/device/sriov_numvfs
+
+Trace (minimized):
+ list_add corruption. next->prev should be prev (ffff8882e241c6f0), but was 0000000000000000. (next=ffff888455da1330).
+ kernel BUG at lib/list_debug.c:29!
+ RIP: 0010:__list_add_valid_or_report+0xa6/0x100
+ ice_mbx_init_vf_info+0xa7/0x180 [ice]
+ ice_initialize_vf_entry+0x1fa/0x250 [ice]
+ ice_sriov_configure+0x8d7/0x1520 [ice]
+ ? __percpu_ref_switch_mode+0x1b1/0x5d0
+ ? __pfx_ice_sriov_configure+0x10/0x10 [ice]
+
+Sometimes a KASAN report can be seen instead with a similar stack trace:
+ BUG: KASAN: use-after-free in __list_add_valid_or_report+0xf1/0x100
+
+VFs are added to this list in ice_mbx_init_vf_info(), but only removed
+in ice_free_vfs(). Move the removing to ice_free_vf_entries(), which is
+also being called in other places where VFs are being removed (including
+ice_free_vfs() itself).
+
+Fixes: 8cd8a6b17d27 ("ice: move VF overflow message count into struct ice_mbx_vf_info")
+Reported-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com>
+Closes: https://lore.kernel.org/intel-wired-lan/PH0PR11MB50138B635F2E5CEB7075325D961F2@PH0PR11MB5013.namprd11.prod.outlook.com
+Reviewed-by: Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com>
+Signed-off-by: Marcin Szycik <marcin.szycik@linux.intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250224190647.3601930-2-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_sriov.c | 5 +----
+ drivers/net/ethernet/intel/ice/ice_vf_lib.c | 8 ++++++++
+ drivers/net/ethernet/intel/ice/ice_vf_lib_private.h | 1 +
+ 3 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
+index b83f99c01d91b..8aabf7749aa5e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
++++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
+@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
+
+ hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
+ hash_del_rcu(&vf->entry);
++ ice_deinitialize_vf_entry(vf);
+ ice_put_vf(vf);
+ }
+ }
+@@ -193,10 +194,6 @@ void ice_free_vfs(struct ice_pf *pf)
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ }
+
+- /* clear malicious info since the VF is getting released */
+- if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+- list_del(&vf->mbx_info.list_entry);
+-
+ mutex_unlock(&vf->cfg_lock);
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+index c7c0c2f50c265..815ad0bfe8326 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+@@ -1036,6 +1036,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
+ mutex_init(&vf->cfg_lock);
+ }
+
++void ice_deinitialize_vf_entry(struct ice_vf *vf)
++{
++ struct ice_pf *pf = vf->pf;
++
++ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++ list_del(&vf->mbx_info.list_entry);
++}
++
+ /**
+ * ice_dis_vf_qs - Disable the VF queues
+ * @vf: pointer to the VF structure
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+index 0c7e77c0a09fa..5392b04049862 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+@@ -24,6 +24,7 @@
+ #endif
+
+ void ice_initialize_vf_entry(struct ice_vf *vf);
++void ice_deinitialize_vf_entry(struct ice_vf *vf);
+ void ice_dis_vf_qs(struct ice_vf *vf);
+ int ice_check_vf_init(struct ice_vf *vf);
+ enum virtchnl_status_code ice_err_to_virt_err(int err);
+--
+2.39.5
+
--- /dev/null
+From ae33d0cf2eecf5c3b93e51d01b62c79745351e25 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Feb 2025 22:12:52 +0000
+Subject: idpf: fix checksums set in idpf_rx_rsc()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 674fcb4f4a7e3e277417a01788cc6daae47c3804 ]
+
+idpf_rx_rsc() uses skb_transport_offset(skb) while the transport header
+is not set yet.
+
+This triggers the following warning for CONFIG_DEBUG_NET=y builds.
+
+DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb))
+
+[ 69.261620] WARNING: CPU: 7 PID: 0 at ./include/linux/skbuff.h:3020 idpf_vport_splitq_napi_poll (include/linux/skbuff.h:3020) idpf
+[ 69.261629] Modules linked in: vfat fat dummy bridge intel_uncore_frequency_tpmi intel_uncore_frequency_common intel_vsec_tpmi idpf intel_vsec cdc_ncm cdc_eem cdc_ether usbnet mii xhci_pci xhci_hcd ehci_pci ehci_hcd libeth
+[ 69.261644] CPU: 7 UID: 0 PID: 0 Comm: swapper/7 Tainted: G S W 6.14.0-smp-DEV #1697
+[ 69.261648] Tainted: [S]=CPU_OUT_OF_SPEC, [W]=WARN
+[ 69.261650] RIP: 0010:idpf_vport_splitq_napi_poll (include/linux/skbuff.h:3020) idpf
+[ 69.261677] ? __warn (kernel/panic.c:242 kernel/panic.c:748)
+[ 69.261682] ? idpf_vport_splitq_napi_poll (include/linux/skbuff.h:3020) idpf
+[ 69.261687] ? report_bug (lib/bug.c:?)
+[ 69.261690] ? handle_bug (arch/x86/kernel/traps.c:285)
+[ 69.261694] ? exc_invalid_op (arch/x86/kernel/traps.c:309)
+[ 69.261697] ? asm_exc_invalid_op (arch/x86/include/asm/idtentry.h:621)
+[ 69.261700] ? __pfx_idpf_vport_splitq_napi_poll (drivers/net/ethernet/intel/idpf/idpf_txrx.c:4011) idpf
+[ 69.261704] ? idpf_vport_splitq_napi_poll (include/linux/skbuff.h:3020) idpf
+[ 69.261708] ? idpf_vport_splitq_napi_poll (drivers/net/ethernet/intel/idpf/idpf_txrx.c:3072) idpf
+[ 69.261712] __napi_poll (net/core/dev.c:7194)
+[ 69.261716] net_rx_action (net/core/dev.c:7265)
+[ 69.261718] ? __qdisc_run (net/sched/sch_generic.c:293)
+[ 69.261721] ? sched_clock (arch/x86/include/asm/preempt.h:84 arch/x86/kernel/tsc.c:288)
+[ 69.261726] handle_softirqs (kernel/softirq.c:561)
+
+Fixes: 3a8845af66edb ("idpf: add RX splitq napi poll support")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Alan Brady <alan.brady@intel.com>
+Cc: Joshua Hay <joshua.a.hay@intel.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Acked-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Link: https://patch.msgid.link/20250226221253.1927782-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index 1e0d1f9b07fbc..afc902ae4763e 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -3013,7 +3013,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ skb_shinfo(skb)->gso_size = rsc_seg_len;
+
+ skb_reset_network_header(skb);
+- len = skb->len - skb_transport_offset(skb);
+
+ if (ipv4) {
+ struct iphdr *ipv4h = ip_hdr(skb);
+@@ -3022,6 +3021,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+
+ /* Reset and set transport header offset in skb */
+ skb_set_transport_header(skb, sizeof(struct iphdr));
++ len = skb->len - skb_transport_offset(skb);
+
+ /* Compute the TCP pseudo header checksum*/
+ tcp_hdr(skb)->check =
+@@ -3031,6 +3031,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
++ len = skb->len - skb_transport_offset(skb);
+ tcp_hdr(skb)->check =
+ ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
+ }
+--
+2.39.5
+
--- /dev/null
+From 475f84adbb5b34d7ce851b40f6324329dd97555a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 21:28:37 +0200
+Subject: ipv4: Convert icmp_route_lookup() to dscp_t.
+
+From: Guillaume Nault <gnault@redhat.com>
+
+[ Upstream commit 913c83a610bb7dd8e5952a2b4663e1feec0b5de6 ]
+
+Pass a dscp_t variable to icmp_route_lookup(), instead of a plain u8,
+to prevent accidental setting of ECN bits in ->flowi4_tos. Rename that
+variable ("tos" -> "dscp") to make the intent clear.
+
+While there, reorganise the function parameters to fill up horizontal
+space.
+
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/294fead85c6035bcdc5fcf9a6bb4ce8798c45ba1.1727807926.git.gnault@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 27843ce6ba3d ("ipvlan: ensure network headers are in skb linear part")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/icmp.c | 19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index f45bc187a92a7..0a2f988c4c24e 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -477,13 +477,11 @@ static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
+ return route_lookup_dev;
+ }
+
+-static struct rtable *icmp_route_lookup(struct net *net,
+- struct flowi4 *fl4,
++static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
+ struct sk_buff *skb_in,
+- const struct iphdr *iph,
+- __be32 saddr, u8 tos, u32 mark,
+- int type, int code,
+- struct icmp_bxm *param)
++ const struct iphdr *iph, __be32 saddr,
++ dscp_t dscp, u32 mark, int type,
++ int code, struct icmp_bxm *param)
+ {
+ struct net_device *route_lookup_dev;
+ struct dst_entry *dst, *dst2;
+@@ -497,7 +495,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
+ fl4->saddr = saddr;
+ fl4->flowi4_mark = mark;
+ fl4->flowi4_uid = sock_net_uid(net, NULL);
+- fl4->flowi4_tos = tos & INET_DSCP_MASK;
++ fl4->flowi4_tos = inet_dscp_to_dsfield(dscp);
+ fl4->flowi4_proto = IPPROTO_ICMP;
+ fl4->fl4_icmp_type = type;
+ fl4->fl4_icmp_code = code;
+@@ -549,7 +547,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
+ orefdst = skb_in->_skb_refdst; /* save old refdst */
+ skb_dst_set(skb_in, NULL);
+ err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
+- tos, rt2->dst.dev);
++ inet_dscp_to_dsfield(dscp), rt2->dst.dev);
+
+ dst_release(&rt2->dst);
+ rt2 = skb_rtable(skb_in);
+@@ -745,8 +743,9 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ ipc.opt = &icmp_param.replyopts.opt;
+ ipc.sockc.mark = mark;
+
+- rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
+- type, code, &icmp_param);
++ rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr,
++ inet_dsfield_to_dscp(tos), mark, type, code,
++ &icmp_param);
+ if (IS_ERR(rt))
+ goto out_unlock;
+
+--
+2.39.5
+
--- /dev/null
+From ab264ea3629c4ca1e50fc85b447979e45571f188 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 21:28:43 +0200
+Subject: ipv4: Convert ip_route_input() to dscp_t.
+
+From: Guillaume Nault <gnault@redhat.com>
+
+[ Upstream commit 7e863e5db6185b1add0df4cb01b31a4ed1c4b738 ]
+
+Pass a dscp_t variable to ip_route_input(), instead of a plain u8, to
+prevent accidental setting of ECN bits in ->flowi4_tos.
+
+Callers of ip_route_input() to consider are:
+
+ * input_action_end_dx4_finish() and input_action_end_dt4() in
+ net/ipv6/seg6_local.c. These functions set the tos parameter to 0,
+ which is already a valid dscp_t value, so they don't need to be
+ adjusted for the new prototype.
+
+ * icmp_route_lookup(), which already has a dscp_t variable to pass as
+ parameter. We just need to remove the inet_dscp_to_dsfield()
+ conversion.
+
+ * br_nf_pre_routing_finish(), ip_options_rcv_srr() and ip4ip6_err(),
+ which get the DSCP directly from IPv4 headers. Define a helper to
+ read the .tos field of struct iphdr as dscp_t, so that these
+ function don't have to do the conversion manually.
+
+While there, declare *iph as const in br_nf_pre_routing_finish(),
+declare its local variables in reverse-christmas-tree order and move
+the "err = ip_route_input()" assignment out of the conditional to avoid
+checkpatch warning.
+
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/e9d40781d64d3d69f4c79ac8a008b8d67a033e8d.1727807926.git.gnault@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 27843ce6ba3d ("ipvlan: ensure network headers are in skb linear part")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/ip.h | 5 +++++
+ include/net/route.h | 5 +++--
+ net/bridge/br_netfilter_hooks.c | 8 +++++---
+ net/ipv4/icmp.c | 2 +-
+ net/ipv4/ip_options.c | 3 ++-
+ net/ipv6/ip6_tunnel.c | 4 ++--
+ 6 files changed, 18 insertions(+), 9 deletions(-)
+
+diff --git a/include/net/ip.h b/include/net/ip.h
+index fe4f854381143..bd201278c55a5 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -424,6 +424,11 @@ int ip_decrease_ttl(struct iphdr *iph)
+ return --iph->ttl;
+ }
+
++static inline dscp_t ip4h_dscp(const struct iphdr *ip4h)
++{
++ return inet_dsfield_to_dscp(ip4h->tos);
++}
++
+ static inline int ip_mtu_locked(const struct dst_entry *dst)
+ {
+ const struct rtable *rt = dst_rtable(dst);
+diff --git a/include/net/route.h b/include/net/route.h
+index da34b6fa9862d..8a11d19f897bb 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -208,12 +208,13 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
+ const struct sk_buff *hint);
+
+ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
+- u8 tos, struct net_device *devin)
++ dscp_t dscp, struct net_device *devin)
+ {
+ int err;
+
+ rcu_read_lock();
+- err = ip_route_input_noref(skb, dst, src, tos, devin);
++ err = ip_route_input_noref(skb, dst, src, inet_dscp_to_dsfield(dscp),
++ devin);
+ if (!err) {
+ skb_dst_force(skb);
+ if (!skb_dst(skb))
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 1d458e9da660c..17a5f5923d615 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -370,9 +370,9 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
+ */
+ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+- struct net_device *dev = skb->dev, *br_indev;
+- struct iphdr *iph = ip_hdr(skb);
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
++ struct net_device *dev = skb->dev, *br_indev;
++ const struct iphdr *iph = ip_hdr(skb);
+ struct rtable *rt;
+ int err;
+
+@@ -390,7 +390,9 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
+ }
+ nf_bridge->in_prerouting = 0;
+ if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
+- if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
++ err = ip_route_input(skb, iph->daddr, iph->saddr,
++ ip4h_dscp(iph), dev);
++ if (err) {
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+ /* If err equals -EHOSTUNREACH the error is due to a
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 0a2f988c4c24e..b8111ec651b54 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -547,7 +547,7 @@ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
+ orefdst = skb_in->_skb_refdst; /* save old refdst */
+ skb_dst_set(skb_in, NULL);
+ err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
+- inet_dscp_to_dsfield(dscp), rt2->dst.dev);
++ dscp, rt2->dst.dev);
+
+ dst_release(&rt2->dst);
+ rt2 = skb_rtable(skb_in);
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 68aedb8877b9f..81e86e5defee6 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -617,7 +617,8 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
+
+ orefdst = skb->_skb_refdst;
+ skb_dst_set(skb, NULL);
+- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
++ err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph),
++ dev);
+ rt2 = skb_rtable(skb);
+ if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
+ skb_dst_drop(skb);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index b60e13c42bcac..48fd53b989726 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -630,8 +630,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ }
+ skb_dst_set(skb2, &rt->dst);
+ } else {
+- if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
+- skb2->dev) ||
++ if (ip_route_input(skb2, eiph->daddr, eiph->saddr,
++ ip4h_dscp(eiph), skb2->dev) ||
+ skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
+ goto out;
+ }
+--
+2.39.5
+
--- /dev/null
+From fbafe8738427929aa778aec8e584aace574024b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Feb 2025 15:53:36 +0000
+Subject: ipvlan: ensure network headers are in skb linear part
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 27843ce6ba3d3122b65066550fe33fb8839f8aef ]
+
+syzbot found that ipvlan_process_v6_outbound() was assuming
+the IPv6 network header isis present in skb->head [1]
+
+Add the needed pskb_network_may_pull() calls for both
+IPv4 and IPv6 handlers.
+
+[1]
+BUG: KMSAN: uninit-value in __ipv6_addr_type+0xa2/0x490 net/ipv6/addrconf_core.c:47
+ __ipv6_addr_type+0xa2/0x490 net/ipv6/addrconf_core.c:47
+ ipv6_addr_type include/net/ipv6.h:555 [inline]
+ ip6_route_output_flags_noref net/ipv6/route.c:2616 [inline]
+ ip6_route_output_flags+0x51/0x720 net/ipv6/route.c:2651
+ ip6_route_output include/net/ip6_route.h:93 [inline]
+ ipvlan_route_v6_outbound+0x24e/0x520 drivers/net/ipvlan/ipvlan_core.c:476
+ ipvlan_process_v6_outbound drivers/net/ipvlan/ipvlan_core.c:491 [inline]
+ ipvlan_process_outbound drivers/net/ipvlan/ipvlan_core.c:541 [inline]
+ ipvlan_xmit_mode_l3 drivers/net/ipvlan/ipvlan_core.c:605 [inline]
+ ipvlan_queue_xmit+0xd72/0x1780 drivers/net/ipvlan/ipvlan_core.c:671
+ ipvlan_start_xmit+0x5b/0x210 drivers/net/ipvlan/ipvlan_main.c:223
+ __netdev_start_xmit include/linux/netdevice.h:5150 [inline]
+ netdev_start_xmit include/linux/netdevice.h:5159 [inline]
+ xmit_one net/core/dev.c:3735 [inline]
+ dev_hard_start_xmit+0x247/0xa20 net/core/dev.c:3751
+ sch_direct_xmit+0x399/0xd40 net/sched/sch_generic.c:343
+ qdisc_restart net/sched/sch_generic.c:408 [inline]
+ __qdisc_run+0x14da/0x35d0 net/sched/sch_generic.c:416
+ qdisc_run+0x141/0x4d0 include/net/pkt_sched.h:127
+ net_tx_action+0x78b/0x940 net/core/dev.c:5484
+ handle_softirqs+0x1a0/0x7c0 kernel/softirq.c:561
+ __do_softirq+0x14/0x1a kernel/softirq.c:595
+ do_softirq+0x9a/0x100 kernel/softirq.c:462
+ __local_bh_enable_ip+0x9f/0xb0 kernel/softirq.c:389
+ local_bh_enable include/linux/bottom_half.h:33 [inline]
+ rcu_read_unlock_bh include/linux/rcupdate.h:919 [inline]
+ __dev_queue_xmit+0x2758/0x57d0 net/core/dev.c:4611
+ dev_queue_xmit include/linux/netdevice.h:3311 [inline]
+ packet_xmit+0x9c/0x6c0 net/packet/af_packet.c:276
+ packet_snd net/packet/af_packet.c:3132 [inline]
+ packet_sendmsg+0x93e0/0xa7e0 net/packet/af_packet.c:3164
+ sock_sendmsg_nosec net/socket.c:718 [inline]
+
+Fixes: 2ad7bf363841 ("ipvlan: Initial check-in of the IPVLAN driver.")
+Reported-by: syzbot+93ab4a777bafb9d9f960@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/67b74f01.050a0220.14d86d.02d8.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Mahesh Bandewar <maheshb@google.com>
+Link: https://patch.msgid.link/20250220155336.61884-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_core.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index fd591ddb3884d..ca62188a317ad 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -416,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+
+ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ {
+- const struct iphdr *ip4h = ip_hdr(skb);
+ struct net_device *dev = skb->dev;
+ struct net *net = dev_net(dev);
+- struct rtable *rt;
+ int err, ret = NET_XMIT_DROP;
++ const struct iphdr *ip4h;
++ struct rtable *rt;
+ struct flowi4 fl4 = {
+ .flowi4_oif = dev->ifindex,
+- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
+ .flowi4_flags = FLOWI_FLAG_ANYSRC,
+ .flowi4_mark = skb->mark,
+- .daddr = ip4h->daddr,
+- .saddr = ip4h->saddr,
+ };
+
++ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
++ goto err;
++
++ ip4h = ip_hdr(skb);
++ fl4.daddr = ip4h->daddr;
++ fl4.saddr = ip4h->saddr;
++ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
++
+ rt = ip_route_output_flow(net, &fl4, NULL);
+ if (IS_ERR(rt))
+ goto err;
+@@ -488,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ struct net_device *dev = skb->dev;
+ int err, ret = NET_XMIT_DROP;
+
++ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
++ DEV_STATS_INC(dev, tx_errors);
++ kfree_skb(skb);
++ return ret;
++ }
++
+ err = ipvlan_route_v6_outbound(dev, skb);
+ if (unlikely(err)) {
+ DEV_STATS_INC(dev, tx_errors);
+--
+2.39.5
+
--- /dev/null
+From 1c7239f4800525c0afea679b69332f4dabf3b0de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Oct 2024 13:43:11 +0100
+Subject: ipvlan: Prepare ipvlan_process_v4_outbound() to future .flowi4_tos
+ conversion.
+
+From: Guillaume Nault <gnault@redhat.com>
+
+[ Upstream commit 0c30d6eedd1ec0c1382bcab9576d26413cd278a3 ]
+
+Use ip4h_dscp() to get the DSCP from the IPv4 header, then convert the
+dscp_t value to __u8 with inet_dscp_to_dsfield().
+
+Then, when we'll convert .flowi4_tos to dscp_t, we'll just have to drop
+the inet_dscp_to_dsfield() call.
+
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://patch.msgid.link/f48335504a05b3587e0081a9b4511e0761571ca5.1730292157.git.gnault@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 27843ce6ba3d ("ipvlan: ensure network headers are in skb linear part")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index b1afcb8740de1..fd591ddb3884d 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -3,6 +3,7 @@
+ */
+
+ #include <net/inet_dscp.h>
++#include <net/ip.h>
+
+ #include "ipvlan.h"
+
+@@ -422,7 +423,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ int err, ret = NET_XMIT_DROP;
+ struct flowi4 fl4 = {
+ .flowi4_oif = dev->ifindex,
+- .flowi4_tos = ip4h->tos & INET_DSCP_MASK,
++ .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
+ .flowi4_flags = FLOWI_FLAG_ANYSRC,
+ .flowi4_mark = skb->mark,
+ .daddr = ip4h->daddr,
+--
+2.39.5
+
--- /dev/null
+From bc14aec9dfc50ab3f02bfb36ecbbd27ae4b80cf8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 Feb 2025 11:35:18 +0800
+Subject: ipvs: Always clear ipvs_property flag in skb_scrub_packet()
+
+From: Philo Lu <lulie@linux.alibaba.com>
+
+[ Upstream commit de2c211868b9424f9aa9b3432c4430825bafb41b ]
+
+We found an issue when using bpf_redirect with ipvs NAT mode after
+commit ff70202b2d1a ("dev_forward_skb: do not scrub skb mark within
+the same name space"). Particularly, we use bpf_redirect to return
+the skb directly back to the netif it comes from, i.e., xnet is
+false in skb_scrub_packet(), and then ipvs_property is preserved
+and SNAT is skipped in the rx path.
+
+ipvs_property has been already cleared when netns is changed in
+commit 2b5ec1a5f973 ("netfilter/ipvs: clear ipvs_property flag when
+SKB net namespace changed"). This patch just clears it in spite of
+netns.
+
+Fixes: 2b5ec1a5f973 ("netfilter/ipvs: clear ipvs_property flag when SKB net namespace changed")
+Signed-off-by: Philo Lu <lulie@linux.alibaba.com>
+Acked-by: Julian Anastasov <ja@ssi.bg>
+Link: https://patch.msgid.link/20250222033518.126087-1-lulie@linux.alibaba.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skbuff.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 61a950f13a91c..f220306731dac 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -6127,11 +6127,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ skb->offload_fwd_mark = 0;
+ skb->offload_l3_fwd_mark = 0;
+ #endif
++ ipvs_reset(skb);
+
+ if (!xnet)
+ return;
+
+- ipvs_reset(skb);
+ skb->mark = 0;
+ skb_clear_tstamp(skb);
+ }
+--
+2.39.5
+
--- /dev/null
+From 6201022cf86090a9e347d437de38cc9e79345c5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Feb 2025 11:29:50 -0500
+Subject: net: cadence: macb: Synchronize stats calculations
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit fa52f15c745ce55261b92873676f64f7348cfe82 ]
+
+Stats calculations involve a RMW to add the stat update to the existing
+value. This is currently not protected by any synchronization mechanism,
+so data races are possible. Add a spinlock to protect the update. The
+reader side could be protected using u64_stats, but we would still need
+a spinlock for the update side anyway. And we always do an update
+immediately before reading the stats anyway.
+
+Fixes: 89e5785fc8a6 ("[PATCH] Atmel MACB ethernet driver")
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Link: https://patch.msgid.link/20250220162950.95941-1-sean.anderson@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cadence/macb.h | 2 ++
+ drivers/net/ethernet/cadence/macb_main.c | 12 ++++++++++--
+ 2 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
+index 5740c98d8c9f0..2847278d9cd48 100644
+--- a/drivers/net/ethernet/cadence/macb.h
++++ b/drivers/net/ethernet/cadence/macb.h
+@@ -1279,6 +1279,8 @@ struct macb {
+ struct clk *rx_clk;
+ struct clk *tsu_clk;
+ struct net_device *dev;
++ /* Protects hw_stats and ethtool_stats */
++ spinlock_t stats_lock;
+ union {
+ struct macb_stats macb;
+ struct gem_stats gem;
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 56901280ba047..60847cdb516ee 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1992,10 +1992,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
+
+ if (status & MACB_BIT(ISR_ROVR)) {
+ /* We missed at least one packet */
++ spin_lock(&bp->stats_lock);
+ if (macb_is_gem(bp))
+ bp->hw_stats.gem.rx_overruns++;
+ else
+ bp->hw_stats.macb.rx_overruns++;
++ spin_unlock(&bp->stats_lock);
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
+@@ -3116,6 +3118,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ if (!netif_running(bp->dev))
+ return nstat;
+
++ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+
+ nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+@@ -3145,6 +3148,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
+ nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
+ nstat->tx_fifo_errors = hwstat->tx_underrun;
++ spin_unlock_irq(&bp->stats_lock);
+
+ return nstat;
+ }
+@@ -3152,12 +3156,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ static void gem_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+ {
+- struct macb *bp;
++ struct macb *bp = netdev_priv(dev);
+
+- bp = netdev_priv(dev);
++ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ memcpy(data, &bp->ethtool_stats, sizeof(u64)
+ * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
++ spin_unlock_irq(&bp->stats_lock);
+ }
+
+ static int gem_get_sset_count(struct net_device *dev, int sset)
+@@ -3207,6 +3212,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
+ return gem_get_stats(bp);
+
+ /* read stats from hardware */
++ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+
+ /* Convert HW stats into netdevice stats */
+@@ -3240,6 +3246,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
+ nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
+ nstat->tx_fifo_errors = hwstat->tx_underruns;
+ /* Don't know about heartbeat or window errors... */
++ spin_unlock_irq(&bp->stats_lock);
+
+ return nstat;
+ }
+@@ -5110,6 +5117,7 @@ static int macb_probe(struct platform_device *pdev)
+ }
+ }
+ spin_lock_init(&bp->lock);
++ spin_lock_init(&bp->stats_lock);
+
+ /* setup capabilities */
+ macb_configure_caps(bp, macb_config);
+--
+2.39.5
+
--- /dev/null
+From b761861d7796cc7102387c80d0750c5490e35916 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2025 13:28:52 +0200
+Subject: net: Clear old fragment checksum value in napi_reuse_skb
+
+From: Mohammad Heib <mheib@redhat.com>
+
+[ Upstream commit 49806fe6e61b045b5be8610e08b5a3083c109aa0 ]
+
+In certain cases, napi_get_frags() returns an skb that points to an old
+received fragment, This skb may have its skb->ip_summed, csum, and other
+fields set from previous fragment handling.
+
+Some network drivers set skb->ip_summed to either CHECKSUM_COMPLETE or
+CHECKSUM_UNNECESSARY when getting skb from napi_get_frags(), while
+others only set skb->ip_summed when RX checksum offload is enabled on
+the device, and do not set any value for skb->ip_summed when hardware
+checksum offload is disabled, assuming that the skb->ip_summed
+initiated to zero by napi_reuse_skb, ionic driver for example will
+ignore/unset any value for the ip_summed filed if HW checksum offload is
+disabled, and if we have a situation where the user disables the
+checksum offload during a traffic that could lead to the following
+errors shown in the kernel logs:
+<IRQ>
+dump_stack_lvl+0x34/0x48
+ __skb_gro_checksum_complete+0x7e/0x90
+tcp6_gro_receive+0xc6/0x190
+ipv6_gro_receive+0x1ec/0x430
+dev_gro_receive+0x188/0x360
+? ionic_rx_clean+0x25a/0x460 [ionic]
+napi_gro_frags+0x13c/0x300
+? __pfx_ionic_rx_service+0x10/0x10 [ionic]
+ionic_rx_service+0x67/0x80 [ionic]
+ionic_cq_service+0x58/0x90 [ionic]
+ionic_txrx_napi+0x64/0x1b0 [ionic]
+ __napi_poll+0x27/0x170
+net_rx_action+0x29c/0x370
+handle_softirqs+0xce/0x270
+__irq_exit_rcu+0xa3/0xc0
+common_interrupt+0x80/0xa0
+</IRQ>
+
+This inconsistency sometimes leads to checksum validation issues in the
+upper layers of the network stack.
+
+To resolve this, this patch clears the skb->ip_summed value for each
+reused skb in by napi_reuse_skb(), ensuring that the caller is responsible
+for setting the correct checksum status. This eliminates potential
+checksum validation issues caused by improper handling of
+skb->ip_summed.
+
+Fixes: 76620aafd66f ("gro: New frags interface to avoid copying shinfo")
+Signed-off-by: Mohammad Heib <mheib@redhat.com>
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250225112852.2507709-1-mheib@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/gro.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/core/gro.c b/net/core/gro.c
+index 78b320b631744..0ad549b07e039 100644
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -653,6 +653,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+ skb->pkt_type = PACKET_HOST;
+
+ skb->encapsulation = 0;
++ skb->ip_summed = CHECKSUM_NONE;
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ if (unlikely(skb->slow_gro)) {
+--
+2.39.5
+
--- /dev/null
+From 3061e88f67001e1402d284f301864999d1ee92aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Feb 2025 19:48:15 +0100
+Subject: net: dsa: rtl8366rb: Fix compilation problem
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+[ Upstream commit f15176b8b6e72ac30e14fd273282d2b72562d26b ]
+
+When the kernel is compiled without LED framework support the
+rtl8366rb fails to build like this:
+
+rtl8366rb.o: in function `rtl8366rb_setup_led':
+rtl8366rb.c:953:(.text.unlikely.rtl8366rb_setup_led+0xe8):
+ undefined reference to `led_init_default_state_get'
+rtl8366rb.c:980:(.text.unlikely.rtl8366rb_setup_led+0x240):
+ undefined reference to `devm_led_classdev_register_ext'
+
+As this is constantly coming up in different randconfig builds,
+bite the bullet and create a separate file for the offending
+code, split out a header with all stuff needed both in the
+core driver and the leds code.
+
+Add a new bool Kconfig option for the LED compile target, such
+that it depends on LEDS_CLASS=y || LEDS_CLASS=RTL8366RB
+which make LED support always available when LEDS_CLASS is
+compiled into the kernel and enforce that if the LEDS_CLASS
+is a module, then the RTL8366RB driver needs to be a module
+as well so that modprobe can resolve the dependencies.
+
+Fixes: 32d617005475 ("net: dsa: realtek: add LED drivers for rtl8366rb")
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202502070525.xMUImayb-lkp@intel.com/
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/realtek/Kconfig | 6 +
+ drivers/net/dsa/realtek/Makefile | 3 +
+ drivers/net/dsa/realtek/rtl8366rb-leds.c | 177 ++++++++++++++++
+ drivers/net/dsa/realtek/rtl8366rb.c | 258 +----------------------
+ drivers/net/dsa/realtek/rtl8366rb.h | 107 ++++++++++
+ 5 files changed, 299 insertions(+), 252 deletions(-)
+ create mode 100644 drivers/net/dsa/realtek/rtl8366rb-leds.c
+ create mode 100644 drivers/net/dsa/realtek/rtl8366rb.h
+
+diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
+index 6989972eebc30..10687722d14c0 100644
+--- a/drivers/net/dsa/realtek/Kconfig
++++ b/drivers/net/dsa/realtek/Kconfig
+@@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB
+ help
+ Select to enable support for Realtek RTL8366RB.
+
++config NET_DSA_REALTEK_RTL8366RB_LEDS
++ bool "Support RTL8366RB LED control"
++ depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
++ depends on NET_DSA_REALTEK_RTL8366RB
++ default NET_DSA_REALTEK_RTL8366RB
++
+ endif
+diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
+index 35491dc20d6d6..17367bcba496c 100644
+--- a/drivers/net/dsa/realtek/Makefile
++++ b/drivers/net/dsa/realtek/Makefile
+@@ -12,4 +12,7 @@ endif
+
+ obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
+ rtl8366-objs := rtl8366-core.o rtl8366rb.o
++ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
++rtl8366-objs += rtl8366rb-leds.o
++endif
+ obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
+diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
+new file mode 100644
+index 0000000000000..99c890681ae60
+--- /dev/null
++++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
+@@ -0,0 +1,177 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bitops.h>
++#include <linux/regmap.h>
++#include <net/dsa.h>
++#include "rtl83xx.h"
++#include "rtl8366rb.h"
++
++static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
++{
++ switch (led_group) {
++ case 0:
++ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++ case 1:
++ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++ case 2:
++ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++ case 3:
++ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++ default:
++ return 0;
++ }
++}
++
++static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
++{
++ struct realtek_priv *priv = led->priv;
++ u8 led_group = led->led_group;
++ u8 port_num = led->port_num;
++ int ret;
++ u32 val;
++
++ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
++ &val);
++ if (ret) {
++ dev_err(priv->dev, "error reading LED on port %d group %d\n",
++ led_group, port_num);
++ return ret;
++ }
++
++ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
++}
++
++static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
++{
++ struct realtek_priv *priv = led->priv;
++ u8 led_group = led->led_group;
++ u8 port_num = led->port_num;
++ int ret;
++
++ ret = regmap_update_bits(priv->map,
++ RTL8366RB_LED_X_X_CTRL_REG(led_group),
++ rtl8366rb_led_group_port_mask(led_group,
++ port_num),
++ enable ? 0xffff : 0);
++ if (ret) {
++ dev_err(priv->dev, "error updating LED on port %d group %d\n",
++ led_group, port_num);
++ return ret;
++ }
++
++ /* Change the LED group to manual controlled LEDs if required */
++ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
++ RTL8366RB_LEDGROUP_FORCE);
++
++ if (ret) {
++ dev_err(priv->dev, "error updating LED GROUP group %d\n",
++ led_group);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int
++rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
++ enum led_brightness brightness)
++{
++ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
++ cdev);
++
++ return rb8366rb_set_port_led(led, brightness == LED_ON);
++}
++
++static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
++ struct fwnode_handle *led_fwnode)
++{
++ struct rtl8366rb *rb = priv->chip_data;
++ struct led_init_data init_data = { };
++ enum led_default_state state;
++ struct rtl8366rb_led *led;
++ u32 led_group;
++ int ret;
++
++ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
++ if (ret)
++ return ret;
++
++ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
++ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
++ led_group, dp->index);
++ return -EINVAL;
++ }
++
++ led = &rb->leds[dp->index][led_group];
++ led->port_num = dp->index;
++ led->led_group = led_group;
++ led->priv = priv;
++
++ state = led_init_default_state_get(led_fwnode);
++ switch (state) {
++ case LEDS_DEFSTATE_ON:
++ led->cdev.brightness = 1;
++ rb8366rb_set_port_led(led, 1);
++ break;
++ case LEDS_DEFSTATE_KEEP:
++ led->cdev.brightness =
++ rb8366rb_get_port_led(led);
++ break;
++ case LEDS_DEFSTATE_OFF:
++ default:
++ led->cdev.brightness = 0;
++ rb8366rb_set_port_led(led, 0);
++ }
++
++ led->cdev.max_brightness = 1;
++ led->cdev.brightness_set_blocking =
++ rtl8366rb_cled_brightness_set_blocking;
++ init_data.fwnode = led_fwnode;
++ init_data.devname_mandatory = true;
++
++ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
++ dp->ds->index, dp->index, led_group);
++ if (!init_data.devicename)
++ return -ENOMEM;
++
++ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
++ if (ret) {
++ dev_warn(priv->dev, "Failed to init LED %d for port %d",
++ led_group, dp->index);
++ return ret;
++ }
++
++ return 0;
++}
++
++int rtl8366rb_setup_leds(struct realtek_priv *priv)
++{
++ struct dsa_switch *ds = &priv->ds;
++ struct device_node *leds_np;
++ struct dsa_port *dp;
++ int ret = 0;
++
++ dsa_switch_for_each_port(dp, ds) {
++ if (!dp->dn)
++ continue;
++
++ leds_np = of_get_child_by_name(dp->dn, "leds");
++ if (!leds_np) {
++ dev_dbg(priv->dev, "No leds defined for port %d",
++ dp->index);
++ continue;
++ }
++
++ for_each_child_of_node_scoped(leds_np, led_np) {
++ ret = rtl8366rb_setup_led(priv, dp,
++ of_fwnode_handle(led_np));
++ if (ret)
++ break;
++ }
++
++ of_node_put(leds_np);
++ if (ret)
++ return ret;
++ }
++ return 0;
++}
+diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
+index c7a8cd0605878..ae3d49fc22b80 100644
+--- a/drivers/net/dsa/realtek/rtl8366rb.c
++++ b/drivers/net/dsa/realtek/rtl8366rb.c
+@@ -26,11 +26,7 @@
+ #include "realtek-smi.h"
+ #include "realtek-mdio.h"
+ #include "rtl83xx.h"
+-
+-#define RTL8366RB_PORT_NUM_CPU 5
+-#define RTL8366RB_NUM_PORTS 6
+-#define RTL8366RB_PHY_NO_MAX 4
+-#define RTL8366RB_PHY_ADDR_MAX 31
++#include "rtl8366rb.h"
+
+ /* Switch Global Configuration register */
+ #define RTL8366RB_SGCR 0x0000
+@@ -175,39 +171,6 @@
+ */
+ #define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
+
+-/* LED control registers */
+-/* The LED blink rate is global; it is used by all triggers in all groups. */
+-#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+-#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+-#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+-#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+-#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+-#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+-#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+-#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+-
+-/* LED trigger event for each group */
+-#define RTL8366RB_LED_CTRL_REG 0x0431
+-#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+- (4 * (led_group))
+-#define RTL8366RB_LED_CTRL_MASK(led_group) \
+- (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+-
+-/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+- * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+- * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+- */
+-#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+-#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+-#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+- ((led_group) <= 1 ? \
+- RTL8366RB_LED_0_1_CTRL_REG : \
+- RTL8366RB_LED_2_3_CTRL_REG)
+-#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+-#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+-#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+-#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
+-
+ #define RTL8366RB_MIB_COUNT 33
+ #define RTL8366RB_GLOBAL_MIB_COUNT 1
+ #define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
+@@ -243,7 +206,6 @@
+ #define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
+
+ #define RTL8366RB_NUM_VLANS 16
+-#define RTL8366RB_NUM_LEDGROUPS 4
+ #define RTL8366RB_NUM_VIDS 4096
+ #define RTL8366RB_PRIORITYMAX 7
+ #define RTL8366RB_NUM_FIDS 8
+@@ -350,46 +312,6 @@
+ #define RTL8366RB_GREEN_FEATURE_TX BIT(0)
+ #define RTL8366RB_GREEN_FEATURE_RX BIT(2)
+
+-enum rtl8366_ledgroup_mode {
+- RTL8366RB_LEDGROUP_OFF = 0x0,
+- RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+- RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+- RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+- RTL8366RB_LEDGROUP_SPD100 = 0x4,
+- RTL8366RB_LEDGROUP_SPD10 = 0x5,
+- RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+- RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+- RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+- RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+- RTL8366RB_LEDGROUP_FIBER = 0xa,
+- RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+- RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+- RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+- RTL8366RB_LEDGROUP_MASTER = 0xe,
+- RTL8366RB_LEDGROUP_FORCE = 0xf,
+-
+- __RTL8366RB_LEDGROUP_MODE_MAX
+-};
+-
+-struct rtl8366rb_led {
+- u8 port_num;
+- u8 led_group;
+- struct realtek_priv *priv;
+- struct led_classdev cdev;
+-};
+-
+-/**
+- * struct rtl8366rb - RTL8366RB-specific data
+- * @max_mtu: per-port max MTU setting
+- * @pvid_enabled: if PVID is set for respective port
+- * @leds: per-port and per-ledgroup led info
+- */
+-struct rtl8366rb {
+- unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+- bool pvid_enabled[RTL8366RB_NUM_PORTS];
+- struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+-};
+-
+ static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
+ { 0, 0, 4, "IfInOctets" },
+ { 0, 4, 4, "EtherStatsOctets" },
+@@ -830,9 +752,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
+ return 0;
+ }
+
+-static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+- u8 led_group,
+- enum rtl8366_ledgroup_mode mode)
++/* This code is used also with LEDs disabled */
++int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
++ u8 led_group,
++ enum rtl8366_ledgroup_mode mode)
+ {
+ int ret;
+ u32 val;
+@@ -849,144 +772,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ return 0;
+ }
+
+-static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+-{
+- switch (led_group) {
+- case 0:
+- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+- case 1:
+- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+- case 2:
+- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+- case 3:
+- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+- default:
+- return 0;
+- }
+-}
+-
+-static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+-{
+- struct realtek_priv *priv = led->priv;
+- u8 led_group = led->led_group;
+- u8 port_num = led->port_num;
+- int ret;
+- u32 val;
+-
+- ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+- &val);
+- if (ret) {
+- dev_err(priv->dev, "error reading LED on port %d group %d\n",
+- led_group, port_num);
+- return ret;
+- }
+-
+- return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+-}
+-
+-static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+-{
+- struct realtek_priv *priv = led->priv;
+- u8 led_group = led->led_group;
+- u8 port_num = led->port_num;
+- int ret;
+-
+- ret = regmap_update_bits(priv->map,
+- RTL8366RB_LED_X_X_CTRL_REG(led_group),
+- rtl8366rb_led_group_port_mask(led_group,
+- port_num),
+- enable ? 0xffff : 0);
+- if (ret) {
+- dev_err(priv->dev, "error updating LED on port %d group %d\n",
+- led_group, port_num);
+- return ret;
+- }
+-
+- /* Change the LED group to manual controlled LEDs if required */
+- ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+- RTL8366RB_LEDGROUP_FORCE);
+-
+- if (ret) {
+- dev_err(priv->dev, "error updating LED GROUP group %d\n",
+- led_group);
+- return ret;
+- }
+-
+- return 0;
+-}
+-
+-static int
+-rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+- enum led_brightness brightness)
+-{
+- struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+- cdev);
+-
+- return rb8366rb_set_port_led(led, brightness == LED_ON);
+-}
+-
+-static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+- struct fwnode_handle *led_fwnode)
+-{
+- struct rtl8366rb *rb = priv->chip_data;
+- struct led_init_data init_data = { };
+- enum led_default_state state;
+- struct rtl8366rb_led *led;
+- u32 led_group;
+- int ret;
+-
+- ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+- if (ret)
+- return ret;
+-
+- if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+- dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+- led_group, dp->index);
+- return -EINVAL;
+- }
+-
+- led = &rb->leds[dp->index][led_group];
+- led->port_num = dp->index;
+- led->led_group = led_group;
+- led->priv = priv;
+-
+- state = led_init_default_state_get(led_fwnode);
+- switch (state) {
+- case LEDS_DEFSTATE_ON:
+- led->cdev.brightness = 1;
+- rb8366rb_set_port_led(led, 1);
+- break;
+- case LEDS_DEFSTATE_KEEP:
+- led->cdev.brightness =
+- rb8366rb_get_port_led(led);
+- break;
+- case LEDS_DEFSTATE_OFF:
+- default:
+- led->cdev.brightness = 0;
+- rb8366rb_set_port_led(led, 0);
+- }
+-
+- led->cdev.max_brightness = 1;
+- led->cdev.brightness_set_blocking =
+- rtl8366rb_cled_brightness_set_blocking;
+- init_data.fwnode = led_fwnode;
+- init_data.devname_mandatory = true;
+-
+- init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+- dp->ds->index, dp->index, led_group);
+- if (!init_data.devicename)
+- return -ENOMEM;
+-
+- ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+- if (ret) {
+- dev_warn(priv->dev, "Failed to init LED %d for port %d",
+- led_group, dp->index);
+- return ret;
+- }
+-
+- return 0;
+-}
+-
++/* This code is used also with LEDs disabled */
+ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+ {
+ int ret = 0;
+@@ -1007,38 +793,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+ return ret;
+ }
+
+-static int rtl8366rb_setup_leds(struct realtek_priv *priv)
+-{
+- struct dsa_switch *ds = &priv->ds;
+- struct device_node *leds_np;
+- struct dsa_port *dp;
+- int ret = 0;
+-
+- dsa_switch_for_each_port(dp, ds) {
+- if (!dp->dn)
+- continue;
+-
+- leds_np = of_get_child_by_name(dp->dn, "leds");
+- if (!leds_np) {
+- dev_dbg(priv->dev, "No leds defined for port %d",
+- dp->index);
+- continue;
+- }
+-
+- for_each_child_of_node_scoped(leds_np, led_np) {
+- ret = rtl8366rb_setup_led(priv, dp,
+- of_fwnode_handle(led_np));
+- if (ret)
+- break;
+- }
+-
+- of_node_put(leds_np);
+- if (ret)
+- return ret;
+- }
+- return 0;
+-}
+-
+ static int rtl8366rb_setup(struct dsa_switch *ds)
+ {
+ struct realtek_priv *priv = ds->priv;
+diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
+new file mode 100644
+index 0000000000000..685ff3275faa1
+--- /dev/null
++++ b/drivers/net/dsa/realtek/rtl8366rb.h
+@@ -0,0 +1,107 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++
++#ifndef _RTL8366RB_H
++#define _RTL8366RB_H
++
++#include "realtek.h"
++
++#define RTL8366RB_PORT_NUM_CPU 5
++#define RTL8366RB_NUM_PORTS 6
++#define RTL8366RB_PHY_NO_MAX 4
++#define RTL8366RB_NUM_LEDGROUPS 4
++#define RTL8366RB_PHY_ADDR_MAX 31
++
++/* LED control registers */
++/* The LED blink rate is global; it is used by all triggers in all groups. */
++#define RTL8366RB_LED_BLINKRATE_REG 0x0430
++#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
++#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
++#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
++#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
++#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
++#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
++#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
++
++/* LED trigger event for each group */
++#define RTL8366RB_LED_CTRL_REG 0x0431
++#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
++ (4 * (led_group))
++#define RTL8366RB_LED_CTRL_MASK(led_group) \
++ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
++
++/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
++ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
++ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
++ */
++#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
++#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
++#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
++ ((led_group) <= 1 ? \
++ RTL8366RB_LED_0_1_CTRL_REG : \
++ RTL8366RB_LED_2_3_CTRL_REG)
++#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
++#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
++#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
++#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
++
++enum rtl8366_ledgroup_mode {
++ RTL8366RB_LEDGROUP_OFF = 0x0,
++ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
++ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
++ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
++ RTL8366RB_LEDGROUP_SPD100 = 0x4,
++ RTL8366RB_LEDGROUP_SPD10 = 0x5,
++ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
++ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
++ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
++ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
++ RTL8366RB_LEDGROUP_FIBER = 0xa,
++ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
++ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
++ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
++ RTL8366RB_LEDGROUP_MASTER = 0xe,
++ RTL8366RB_LEDGROUP_FORCE = 0xf,
++
++ __RTL8366RB_LEDGROUP_MODE_MAX
++};
++
++#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
++
++struct rtl8366rb_led {
++ u8 port_num;
++ u8 led_group;
++ struct realtek_priv *priv;
++ struct led_classdev cdev;
++};
++
++int rtl8366rb_setup_leds(struct realtek_priv *priv);
++
++#else
++
++static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
++{
++ return 0;
++}
++
++#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
++
++/**
++ * struct rtl8366rb - RTL8366RB-specific data
++ * @max_mtu: per-port max MTU setting
++ * @pvid_enabled: if PVID is set for respective port
++ * @leds: per-port and per-ledgroup led info
++ */
++struct rtl8366rb {
++ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
++ bool pvid_enabled[RTL8366RB_NUM_PORTS];
++#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
++ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
++#endif
++};
++
++/* This code is used also with LEDs disabled */
++int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
++ u8 led_group,
++ enum rtl8366_ledgroup_mode mode);
++
++#endif /* _RTL8366RB_H */
+--
+2.39.5
+
--- /dev/null
+From 3b5ca842467ca3c3205907e77b6907551ea44e48 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 06:17:16 +0100
+Subject: net: ethernet: ti: am65-cpsw: select PAGE_POOL
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+[ Upstream commit bab3a6e9ffd600f9db0ebaf8f45e1c6111cf314c ]
+
+am65-cpsw uses page_pool_dev_alloc_pages(), thus needs PAGE_POOL
+selected to avoid linker errors. This is missing since the driver
+started to use page_pool helpers in 8acacc40f733 ("net: ethernet:
+ti: am65-cpsw: Add minimal XDP support")
+
+Fixes: 8acacc40f733 ("net: ethernet: ti: am65-cpsw: Add minimal XDP support")
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Link: https://patch.msgid.link/20250224-net-am654-nuss-kconfig-v2-1-c124f4915c92@pengutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
+index 0d5a862cd78a6..3a13d60a947a8 100644
+--- a/drivers/net/ethernet/ti/Kconfig
++++ b/drivers/net/ethernet/ti/Kconfig
+@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
+ select NET_DEVLINK
+ select TI_DAVINCI_MDIO
+ select PHYLINK
++ select PAGE_POOL
+ select TI_K3_CPPI_DESC_POOL
+ imply PHY_TI_GMII_SEL
+ depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
+--
+2.39.5
+
--- /dev/null
+From b9d32e0f89964e1dd3b85c865764b065068ece6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2025 18:51:39 +0100
+Subject: net: ipv6: fix dst ref loop on input in rpl lwt
+
+From: Justin Iurman <justin.iurman@uliege.be>
+
+[ Upstream commit 13e55fbaec176119cff68a7e1693b251c8883c5f ]
+
+Prevent a dst ref loop on input in rpl_iptunnel.
+
+Fixes: a7a29f9c361f ("net: ipv6: add rpl sr tunnel")
+Cc: Alexander Aring <alex.aring@gmail.com>
+Cc: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Signed-off-by: Justin Iurman <justin.iurman@uliege.be>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/rpl_iptunnel.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index 0ac4283acdf20..7c05ac846646f 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -262,10 +262,18 @@ static int rpl_input(struct sk_buff *skb)
+ {
+ struct dst_entry *orig_dst = skb_dst(skb);
+ struct dst_entry *dst = NULL;
++ struct lwtunnel_state *lwtst;
+ struct rpl_lwt *rlwt;
+ int err;
+
+- rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
++ /* We cannot dereference "orig_dst" once ip6_route_input() or
++ * skb_dst_drop() is called. However, in order to detect a dst loop, we
++ * need the address of its lwtstate. So, save the address of lwtstate
++ * now and use it later as a comparison.
++ */
++ lwtst = orig_dst->lwtstate;
++
++ rlwt = rpl_lwt_lwtunnel(lwtst);
+
+ local_bh_disable();
+ dst = dst_cache_get(&rlwt->cache);
+@@ -280,7 +288,9 @@ static int rpl_input(struct sk_buff *skb)
+ if (!dst) {
+ ip6_route_input(skb);
+ dst = skb_dst(skb);
+- if (!dst->error) {
++
++ /* cache only if we don't create a dst reference loop */
++ if (!dst->error && lwtst != dst->lwtstate) {
+ local_bh_disable();
+ dst_cache_set_ip6(&rlwt->cache, dst,
+ &ipv6_hdr(skb)->saddr);
+--
+2.39.5
+
--- /dev/null
+From 9c72e2295ca0dc002a0dfac70a360fa9e46ea429 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2025 18:51:38 +0100
+Subject: net: ipv6: fix dst ref loop on input in seg6 lwt
+
+From: Justin Iurman <justin.iurman@uliege.be>
+
+[ Upstream commit c64a0727f9b1cbc63a5538c8c0014e9a175ad864 ]
+
+Prevent a dst ref loop on input in seg6_iptunnel.
+
+Fixes: af4a2209b134 ("ipv6: sr: use dst_cache in seg6_input")
+Cc: David Lebrun <dlebrun@google.com>
+Cc: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Signed-off-by: Justin Iurman <justin.iurman@uliege.be>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_iptunnel.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 33833b2064c07..51583461ae29b 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -472,10 +472,18 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ {
+ struct dst_entry *orig_dst = skb_dst(skb);
+ struct dst_entry *dst = NULL;
++ struct lwtunnel_state *lwtst;
+ struct seg6_lwt *slwt;
+ int err;
+
+- slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
++ /* We cannot dereference "orig_dst" once ip6_route_input() or
++ * skb_dst_drop() is called. However, in order to detect a dst loop, we
++ * need the address of its lwtstate. So, save the address of lwtstate
++ * now and use it later as a comparison.
++ */
++ lwtst = orig_dst->lwtstate;
++
++ slwt = seg6_lwt_lwtunnel(lwtst);
+
+ local_bh_disable();
+ dst = dst_cache_get(&slwt->cache);
+@@ -490,7 +498,9 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ if (!dst) {
+ ip6_route_input(skb);
+ dst = skb_dst(skb);
+- if (!dst->error) {
++
++ /* cache only if we don't create a dst reference loop */
++ if (!dst->error && lwtst != dst->lwtstate) {
+ local_bh_disable();
+ dst_cache_set_ip6(&slwt->cache, dst,
+ &ipv6_hdr(skb)->saddr);
+--
+2.39.5
+
--- /dev/null
+From 6f4d56c277ea72df84b32e147688cb93ddd7fcf0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Feb 2025 09:25:59 +0200
+Subject: net: loopback: Avoid sending IP packets without an Ethernet header
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit 0e4427f8f587c4b603475468bb3aee9418574893 ]
+
+After commit 22600596b675 ("ipv4: give an IPv4 dev to blackhole_netdev")
+IPv4 neighbors can be constructed on the blackhole net device, but they
+are constructed with an output function (neigh_direct_output()) that
+simply calls dev_queue_xmit(). The latter will transmit packets via
+'skb->dev' which might not be the blackhole net device if dst_dev_put()
+switched 'dst->dev' to the blackhole net device while another CPU was
+using the dst entry in ip_output(), but after it already initialized
+'skb->dev' from 'dst->dev'.
+
+Specifically, the following can happen:
+
+ CPU1 CPU2
+
+udp_sendmsg(sk1) udp_sendmsg(sk2)
+udp_send_skb() [...]
+ip_output()
+ skb->dev = skb_dst(skb)->dev
+ dst_dev_put()
+ dst->dev = blackhole_netdev
+ip_finish_output2()
+ resolves neigh on dst->dev
+neigh_output()
+neigh_direct_output()
+dev_queue_xmit()
+
+This will result in IPv4 packets being sent without an Ethernet header
+via a valid net device:
+
+tcpdump: verbose output suppressed, use -v[v]... for full protocol decode
+listening on enp9s0, link-type EN10MB (Ethernet), snapshot length 262144 bytes
+22:07:02.329668 20:00:40:11:18:fb > 45:00:00:44:f4:94, ethertype Unknown
+(0x58c6), length 68:
+ 0x0000: 8dda 74ca f1ae ca6c ca6c 0098 969c 0400 ..t....l.l......
+ 0x0010: 0000 4730 3f18 6800 0000 0000 0000 9971 ..G0?.h........q
+ 0x0020: c4c9 9055 a157 0a70 9ead bf83 38ca ab38 ...U.W.p....8..8
+ 0x0030: 8add ab96 e052 .....R
+
+Fix by making sure that neighbors are constructed on top of the
+blackhole net device with an output function that simply consumes the
+packets, in a similar fashion to dst_discard_out() and
+blackhole_netdev_xmit().
+
+Fixes: 8d7017fd621d ("blackhole_netdev: use blackhole_netdev to invalidate dst entries")
+Fixes: 22600596b675 ("ipv4: give an IPv4 dev to blackhole_netdev")
+Reported-by: Florian Meister <fmei@sfs.com>
+Closes: https://lore.kernel.org/netdev/20250210084931.23a5c2e4@hermes.local/
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250220072559.782296-1-idosch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/loopback.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 1993b90b1a5f9..491e56b3263fd 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -244,8 +244,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+ }
+
++static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
++{
++ kfree_skb(skb);
++ return 0;
++}
++
++static int blackhole_neigh_construct(struct net_device *dev,
++ struct neighbour *n)
++{
++ n->output = blackhole_neigh_output;
++ return 0;
++}
++
+ static const struct net_device_ops blackhole_netdev_ops = {
+ .ndo_start_xmit = blackhole_netdev_xmit,
++ .ndo_neigh_construct = blackhole_neigh_construct,
+ };
+
+ /* This is a dst-dummy device used specifically for invalidated
+--
+2.39.5
+
--- /dev/null
+From bf0b498f0c65a5cfc5da605ae9e4d1cf233864ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2025 09:26:08 +0200
+Subject: net/mlx5: IRQ, Fix null string in debug print
+
+From: Shay Drory <shayd@nvidia.com>
+
+[ Upstream commit 2f5a6014eb168a97b24153adccfa663d3b282767 ]
+
+irq_pool_alloc() debug print can print a null string.
+Fix it by providing a default string to print.
+
+Fixes: 71e084e26414 ("net/mlx5: Allocating a pool of MSI-X vectors for SFs")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202501141055.SwfIphN0-lkp@intel.com/
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250225072608.526866-4-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index 7db9cab9bedf6..d9362eabc6a1c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -572,7 +572,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
+ pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
+ pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
+ mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
+- name, size, start);
++ name ? name : "mlx5_pcif_pool", size, start);
+ return pool;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 28abd967e8208733b02b90c900fa54db59acae50 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 20:20:58 -0800
+Subject: net: mvpp2: cls: Fixed Non IP flow, with vlan tag flow defination.
+
+From: Harshal Chaudhari <hchaudhari@marvell.com>
+
+[ Upstream commit 2d253726ff7106b39a44483b6864398bba8a2f74 ]
+
+Non IP flow, with vlan tag not working as expected while
+running below command for vlan-priority. fixed that.
+
+ethtool -N eth1 flow-type ether vlan 0x8000 vlan-mask 0x1fff action 0 loc 0
+
+Fixes: 1274daede3ef ("net: mvpp2: cls: Add steering based on vlan Id and priority.")
+Signed-off-by: Harshal Chaudhari <hchaudhari@marvell.com>
+Reviewed-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Link: https://patch.msgid.link/20250225042058.2643838-1-hchaudhari@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+index 1641791a2d5b4..8ed83fb988624 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+ MVPP2_PRS_RI_VLAN_MASK),
+ /* Non IP flow, with vlan tag */
+ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
+- MVPP22_CLS_HEK_OPT_VLAN,
++ MVPP22_CLS_HEK_TAGGED,
+ 0, 0),
+ };
+
+--
+2.39.5
+
--- /dev/null
+From 62486f1fe10ccf2ce53af3d2dc0bba289b8d8dab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Feb 2025 12:07:52 +0100
+Subject: net: set the minimum for net_hotdata.netdev_budget_usecs
+
+From: Jiri Slaby (SUSE) <jirislaby@kernel.org>
+
+[ Upstream commit c180188ec02281126045414e90d08422a80f75b4 ]
+
+Commit 7acf8a1e8a28 ("Replace 2 jiffies with sysctl netdev_budget_usecs
+to enable softirq tuning") added a possibility to set
+net_hotdata.netdev_budget_usecs, but added no lower bound checking.
+
+Commit a4837980fd9f ("net: revert default NAPI poll timeout to 2 jiffies")
+made the *initial* value HZ-dependent, so the initial value is at least
+2 jiffies even for lower HZ values (2 ms for 1000 Hz, 8ms for 250 Hz, 20
+ms for 100 Hz).
+
+But a user still can set improper values by a sysctl. Set .extra1
+(the lower bound) for net_hotdata.netdev_budget_usecs to the same value
+as in the latter commit. That is to 2 jiffies.
+
+Fixes: a4837980fd9f ("net: revert default NAPI poll timeout to 2 jiffies")
+Fixes: 7acf8a1e8a28 ("Replace 2 jiffies with sysctl netdev_budget_usecs to enable softirq tuning")
+Signed-off-by: Jiri Slaby (SUSE) <jirislaby@kernel.org>
+Cc: Dmitry Yakunin <zeil@yandex-team.ru>
+Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Link: https://patch.msgid.link/20250220110752.137639-1-jirislaby@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/sysctl_net_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 5dd54a8133980..47e2743ffe228 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -34,6 +34,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
+ static int max_skb_frags = MAX_SKB_FRAGS;
+ static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
++static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ;
+
+ static int net_msg_warn; /* Unused, but still a sysctl */
+
+@@ -580,7 +581,7 @@ static struct ctl_table net_core_table[] = {
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+- .extra1 = SYSCTL_ZERO,
++ .extra1 = &netdev_budget_usecs_min,
+ },
+ {
+ .procname = "fb_tunnels_only_for_init_net",
+--
+2.39.5
+
--- /dev/null
+From 4b1e93e07c12bb6381ea91652687b4590f6ab780 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Feb 2025 14:54:41 +0530
+Subject: net: ti: icss-iep: Reject perout generation request
+
+From: Meghana Malladi <m-malladi@ti.com>
+
+[ Upstream commit 54e1b4becf5e220be03db4e1be773c1310e8cbbd ]
+
+IEP driver supports both perout and pps signal generation
+but perout feature is faulty with half-cooked support
+due to some missing configuration. Remove perout
+support from the driver and reject perout requests with
+"not supported" error code.
+
+Fixes: c1e0230eeaab2 ("net: ti: icss-iep: Add IEP driver")
+Signed-off-by: Meghana Malladi <m-malladi@ti.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Link: https://patch.msgid.link/20250227092441.1848419-1-m-malladi@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/icssg/icss_iep.c | 21 +--------------------
+ 1 file changed, 1 insertion(+), 20 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index 768578c0d9587..d59c1744840af 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -474,26 +474,7 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ static int icss_iep_perout_enable(struct icss_iep *iep,
+ struct ptp_perout_request *req, int on)
+ {
+- int ret = 0;
+-
+- mutex_lock(&iep->ptp_clk_mutex);
+-
+- if (iep->pps_enabled) {
+- ret = -EBUSY;
+- goto exit;
+- }
+-
+- if (iep->perout_enabled == !!on)
+- goto exit;
+-
+- ret = icss_iep_perout_enable_hw(iep, req, on);
+- if (!ret)
+- iep->perout_enabled = !!on;
+-
+-exit:
+- mutex_unlock(&iep->ptp_clk_mutex);
+-
+- return ret;
++ return -EOPNOTSUPP;
+ }
+
+ static void icss_iep_cap_cmp_work(struct work_struct *work)
+--
+2.39.5
+
--- /dev/null
+From 732ea30fa4d4fbca103f6217872eadc0164437cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Feb 2025 19:22:44 +0000
+Subject: rxrpc: rxperf: Fix missing decoding of terminal magic cookie
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit c34d999ca3145d9fe858258cc3342ec493f47d2e ]
+
+The rxperf RPCs seem to have a magic cookie at the end of the request that
+was failing to be taken account of by the unmarshalling of the request.
+Fix the rxperf code to expect this.
+
+Fixes: 75bfdbf2fca3 ("rxrpc: Implement an in-kernel rxperf server for testing purposes")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: Simon Horman <horms@kernel.org>
+cc: linux-afs@lists.infradead.org
+Link: https://patch.msgid.link/20250218192250.296870-2-dhowells@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/rxperf.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
+index 085e7892d3104..b1536da2246b8 100644
+--- a/net/rxrpc/rxperf.c
++++ b/net/rxrpc/rxperf.c
+@@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call)
+ call->unmarshal++;
+ fallthrough;
+ case 2:
++ ret = rxperf_extract_data(call, true);
++ if (ret < 0)
++ return ret;
++
++ /* Deal with the terminal magic cookie. */
++ call->iov_len = 4;
++ call->kvec[0].iov_len = call->iov_len;
++ call->kvec[0].iov_base = call->tmp;
++ iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
++ call->unmarshal++;
++ fallthrough;
++ case 3:
+ ret = rxperf_extract_data(call, false);
+ if (ret < 0)
+ return ret;
+--
+2.39.5
+
--- /dev/null
+From aa9b0cf023b3ae0a866adddaa983481c381107bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Feb 2025 18:19:57 +0000
+Subject: selftests: drv-net: Check if combined-count exists
+
+From: Joe Damato <jdamato@fastly.com>
+
+[ Upstream commit 1cbddbddee68d17feb6467fc556c144777af91ef ]
+
+Some drivers, like tg3, do not set combined-count:
+
+$ ethtool -l enp4s0f1
+Channel parameters for enp4s0f1:
+Pre-set maximums:
+RX: 4
+TX: 4
+Other: n/a
+Combined: n/a
+Current hardware settings:
+RX: 4
+TX: 1
+Other: n/a
+Combined: n/a
+
+In the case where combined-count is not set, the ethtool netlink code
+in the kernel elides the value and the code in the test:
+
+ netnl.channels_get(...)
+
+With a tg3 device, the returned dictionary looks like:
+
+{'header': {'dev-index': 3, 'dev-name': 'enp4s0f1'},
+ 'rx-max': 4,
+ 'rx-count': 4,
+ 'tx-max': 4,
+ 'tx-count': 1}
+
+Note that the key 'combined-count' is missing. As a result of this
+missing key the test raises an exception:
+
+ # Exception| if channels['combined-count'] == 0:
+ # Exception| ~~~~~~~~^^^^^^^^^^^^^^^^^^
+ # Exception| KeyError: 'combined-count'
+
+Change the test to check if 'combined-count' is a key in the dictionary
+first and if not assume that this means the driver has separate RX and
+TX queues.
+
+With this change, the test now passes successfully on tg3 and mlx5
+(which does have a 'combined-count').
+
+Fixes: 1cf270424218 ("net: selftest: add test for netdev netlink queue-get API")
+Signed-off-by: Joe Damato <jdamato@fastly.com>
+Reviewed-by: David Wei <dw@davidwei.uk>
+Link: https://patch.msgid.link/20250226181957.212189-1-jdamato@fastly.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/drivers/net/queues.py | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py
+index 30f29096e27c2..4868b514ae78d 100755
+--- a/tools/testing/selftests/drivers/net/queues.py
++++ b/tools/testing/selftests/drivers/net/queues.py
+@@ -40,10 +40,9 @@ def addremove_queues(cfg, nl) -> None:
+
+ netnl = EthtoolFamily()
+ channels = netnl.channels_get({'header': {'dev-index': cfg.ifindex}})
+- if channels['combined-count'] == 0:
+- rx_type = 'rx'
+- else:
+- rx_type = 'combined'
++ rx_type = 'rx'
++ if channels.get('combined-count', 0) > 0:
++ rx_type = 'combined'
+
+ expected = curr_queues - 1
+ cmd(f"ethtool -L {cfg.dev['ifname']} {rx_type} {expected}", timeout=10)
+--
+2.39.5
+
rdma-mlx5-fix-bind-qp-error-cleanup-flow.patch
rdma-bnxt_re-fix-the-page-details-for-the-srq-create.patch
sunrpc-suppress-warnings-for-unused-procfs-functions.patch
+alsa-usb-audio-avoid-dropping-midi-events-at-closing.patch
+bluetooth-l2cap-fix-l2cap_ecred_conn_rsp-response.patch
+rxrpc-rxperf-fix-missing-decoding-of-terminal-magic-.patch
+afs-fix-the-server_list-to-unuse-a-displaced-server-.patch
+afs-give-an-afs_server-object-a-ref-on-the-afs_cell-.patch
+net-loopback-avoid-sending-ip-packets-without-an-eth.patch
+net-set-the-minimum-for-net_hotdata.netdev_budget_us.patch
+ipv4-convert-icmp_route_lookup-to-dscp_t.patch
+ipv4-convert-ip_route_input-to-dscp_t.patch
+ipvlan-prepare-ipvlan_process_v4_outbound-to-future-.patch
+ipvlan-ensure-network-headers-are-in-skb-linear-part.patch
+net-cadence-macb-synchronize-stats-calculations.patch
+net-dsa-rtl8366rb-fix-compilation-problem.patch
+asoc-es8328-fix-route-from-dac-to-output.patch
+asoc-fsl-rename-stream-name-of-sai-dai-driver.patch
+ipvs-always-clear-ipvs_property-flag-in-skb_scrub_pa.patch
+drm-xe-oa-signal-output-fences.patch
+drm-xe-oa-move-functions-up-so-they-can-be-reused-fo.patch
+drm-xe-oa-add-syncs-support-to-oa-config-ioctl.patch
+drm-xe-oa-allow-only-certain-property-changes-from-c.patch
+drm-xe-oa-allow-oa_exponent-value-of-0.patch
+firmware-cs_dsp-remove-async-regmap-writes.patch
+asoc-cs35l56-prevent-races-when-soft-resetting-using.patch
+alsa-hda-realtek-fix-wrong-mic-setup-for-asus-vivobo.patch
+net-ethernet-ti-am65-cpsw-select-page_pool.patch
+tcp-devmem-don-t-write-truncated-dmabuf-cmsgs-to-use.patch
+ice-add-e830-hw-vf-mailbox-message-limit-support.patch
+ice-fix-deinitializing-vf-in-error-path.patch
+ice-avoid-setting-default-rx-vsi-twice-in-switchdev-.patch
+tcp-defer-ts_recent-changes-until-req-is-owned.patch
+drm-xe-cancel-pending-job-timer-before-freeing-sched.patch
+net-clear-old-fragment-checksum-value-in-napi_reuse_.patch
+net-mvpp2-cls-fixed-non-ip-flow-with-vlan-tag-flow-d.patch
+net-mlx5-irq-fix-null-string-in-debug-print.patch
+net-ipv6-fix-dst-ref-loop-on-input-in-seg6-lwt.patch
+net-ipv6-fix-dst-ref-loop-on-input-in-rpl-lwt.patch
+selftests-drv-net-check-if-combined-count-exists.patch
+idpf-fix-checksums-set-in-idpf_rx_rsc.patch
+net-ti-icss-iep-reject-perout-generation-request.patch
--- /dev/null
+From aedefdf20189f92f09ce5354fa2eb23ea07c0d2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 17:00:47 +0800
+Subject: tcp: Defer ts_recent changes until req is owned
+
+From: Wang Hai <wanghai38@huawei.com>
+
+[ Upstream commit 8d52da23b6c68a0f6bad83959ebb61a2cf623c4e ]
+
+Recently a bug was discovered where the server had entered TCP_ESTABLISHED
+state, but the upper layers were not notified.
+
+The same 5-tuple packet may be processed by different CPUSs, so two
+CPUs may receive different ack packets at the same time when the
+state is TCP_NEW_SYN_RECV.
+
+In that case, req->ts_recent in tcp_check_req may be changed concurrently,
+which will probably cause the newsk's ts_recent to be incorrectly large.
+So that tcp_validate_incoming will fail. At this point, newsk will not be
+able to enter the TCP_ESTABLISHED.
+
+cpu1 cpu2
+tcp_check_req
+ tcp_check_req
+ req->ts_recent = rcv_tsval = t1
+ req->ts_recent = rcv_tsval = t2
+
+ syn_recv_sock
+ tcp_sk(child)->rx_opt.ts_recent = req->ts_recent = t2 // t1 < t2
+tcp_child_process
+ tcp_rcv_state_process
+ tcp_validate_incoming
+ tcp_paws_check
+ if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
+ // t2 - t1 > paws_win, failed
+ tcp_v4_do_rcv
+ tcp_rcv_state_process
+ // TCP_ESTABLISHED
+
+The cpu2's skb or a newly received skb will call tcp_v4_do_rcv to get
+the newsk into the TCP_ESTABLISHED state, but at this point it is no
+longer possible to notify the upper layer application. A notification
+mechanism could be added here, but the fix is more complex, so the
+current fix is used.
+
+In tcp_check_req, req->ts_recent is used to assign a value to
+tcp_sk(child)->rx_opt.ts_recent, so removing the change in req->ts_recent
+and changing tcp_sk(child)->rx_opt.ts_recent directly after owning the
+req fixes this bug.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Reviewed-by: Jason Xing <kerneljasonxing@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_minisocks.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index bb1fe1ba867ac..f3e4fc9572196 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -806,12 +806,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+
+ /* In sequence, PAWS is OK. */
+
+- /* TODO: We probably should defer ts_recent change once
+- * we take ownership of @req.
+- */
+- if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
+- WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
+-
+ if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
+ /* Truncate SYN, it is out of window starting
+ at tcp_rsk(req)->rcv_isn + 1. */
+@@ -860,6 +854,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ if (!child)
+ goto listen_overflow;
+
++ if (own_req && tmp_opt.saw_tstamp &&
++ !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
++ tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
++
+ if (own_req && rsk_drop_req(req)) {
+ reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
+ inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
+--
+2.39.5
+
--- /dev/null
+From 85ecace8e68dd45d97db411896921dbbc856bd7a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 09:44:01 -0800
+Subject: tcp: devmem: don't write truncated dmabuf CMSGs to userspace
+
+From: Stanislav Fomichev <sdf@fomichev.me>
+
+[ Upstream commit 18912c520674ec4d920fe3826e7e4fefeecdf5ae ]
+
+Currently, we report -ETOOSMALL (err) only on the first iteration
+(!sent). When we get put_cmsg error after a bunch of successful
+put_cmsg calls, we don't signal the error at all. This might be
+confusing on the userspace side which will see truncated CMSGs
+but no MSG_CTRUNC signal.
+
+Consider the following case:
+- sizeof(struct cmsghdr) = 16
+- sizeof(struct dmabuf_cmsg) = 24
+- total cmsg size (CMSG_LEN) = 40 (16+24)
+
+When calling recvmsg with msg_controllen=60, the userspace
+will receive two(!) dmabuf_cmsg(s), the first one will
+be a valid one and the second one will be silently truncated. There is no
+easy way to discover the truncation besides doing something like
+"cm->cmsg_len != CMSG_LEN(sizeof(dmabuf_cmsg))".
+
+Introduce new put_devmem_cmsg wrapper that reports an error instead
+of doing the truncation. Mina suggests that it's the intended way
+this API should work.
+
+Note that we might now report MSG_CTRUNC when the users (incorrectly)
+call us with msg_control == NULL.
+
+Fixes: 8f0b3cc9a4c1 ("tcp: RX path for devmem TCP")
+Reviewed-by: Mina Almasry <almasrymina@google.com>
+Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250224174401.3582695-1-sdf@fomichev.me
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/socket.h | 2 ++
+ net/core/scm.c | 10 ++++++++++
+ net/ipv4/tcp.c | 26 ++++++++++----------------
+ 3 files changed, 22 insertions(+), 16 deletions(-)
+
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index d18cc47e89bd0..c3322eb3d6865 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -392,6 +392,8 @@ struct ucred {
+
+ extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
+ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
++extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
++ void *data);
+
+ struct timespec64;
+ struct __kernel_timespec;
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 4f6a14babe5ae..733c0cbd393d2 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -282,6 +282,16 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ }
+ EXPORT_SYMBOL(put_cmsg);
+
++int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
++ void *data)
++{
++ /* Don't produce truncated CMSGs */
++ if (!msg->msg_control || msg->msg_controllen < CMSG_LEN(len))
++ return -ETOOSMALL;
++
++ return put_cmsg(msg, level, type, len, data);
++}
++
+ void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
+ {
+ struct scm_timestamping64 tss;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 68cb6a966b18b..b731a4a8f2b0d 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2456,14 +2456,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
+ */
+ memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
+ dmabuf_cmsg.frag_size = copy;
+- err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR,
+- sizeof(dmabuf_cmsg), &dmabuf_cmsg);
+- if (err || msg->msg_flags & MSG_CTRUNC) {
+- msg->msg_flags &= ~MSG_CTRUNC;
+- if (!err)
+- err = -ETOOSMALL;
++ err = put_cmsg_notrunc(msg, SOL_SOCKET,
++ SO_DEVMEM_LINEAR,
++ sizeof(dmabuf_cmsg),
++ &dmabuf_cmsg);
++ if (err)
+ goto out;
+- }
+
+ sent += copy;
+
+@@ -2517,16 +2515,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
+ offset += copy;
+ remaining_len -= copy;
+
+- err = put_cmsg(msg, SOL_SOCKET,
+- SO_DEVMEM_DMABUF,
+- sizeof(dmabuf_cmsg),
+- &dmabuf_cmsg);
+- if (err || msg->msg_flags & MSG_CTRUNC) {
+- msg->msg_flags &= ~MSG_CTRUNC;
+- if (!err)
+- err = -ETOOSMALL;
++ err = put_cmsg_notrunc(msg, SOL_SOCKET,
++ SO_DEVMEM_DMABUF,
++ sizeof(dmabuf_cmsg),
++ &dmabuf_cmsg);
++ if (err)
+ goto out;
+- }
+
+ atomic_long_inc(&niov->pp_ref_count);
+ tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
+--
+2.39.5
+