--- /dev/null
+From 5568c5266a4576458d6a472372a0a13975e46981 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jul 2024 19:05:15 +0200
+Subject: ALSA: hda: Conditionally use snooping for AMD HDMI
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 478689b5990deb626a0b3f1ebf165979914d6be4 ]
+
+The recent regression report revealed that the use of WC pages for AMD
+HDMI device together with AMD IOMMU leads to unexpected truncation or
+noises. The issue seems triggered by the change in the kernel core
+memory allocation that enables IOMMU driver to use always S/G
+buffers. Meanwhile, the use of WC pages has been a workaround for the
+similar issue with standard pages in the past. So, now we need to
+apply the workaround conditionally, namely, only when IOMMU isn't in
+place.
+
+This patch modifies the workaround code to check the DMA ops at first
+and apply the snoop-off only when needed.
+
+Fixes: f5ff79fddf0e ("dma-mapping: remove CONFIG_DMA_REMAP")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=219087
+Link: https://patch.msgid.link/20240731170521.31714-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/hda_controller.h | 2 +-
+ sound/pci/hda/hda_intel.c | 10 +++++++++-
+ 2 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index c2d0109866e62..68c883f202ca5 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -28,7 +28,7 @@
+ #else
+ #define AZX_DCAPS_I915_COMPONENT 0 /* NOP */
+ #endif
+-/* 14 unused */
++#define AZX_DCAPS_AMD_ALLOC_FIX (1 << 14) /* AMD allocation workaround */
+ #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
+ #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
+ #define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 3500108f6ba37..87203b819dd47 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -40,6 +40,7 @@
+
+ #ifdef CONFIG_X86
+ /* for snoop control */
++#include <linux/dma-map-ops.h>
+ #include <asm/set_memory.h>
+ #include <asm/cpufeature.h>
+ #endif
+@@ -306,7 +307,7 @@ enum {
+
+ /* quirks for ATI HDMI with snoop off */
+ #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
+- (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
++ (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_AMD_ALLOC_FIX)
+
+ /* quirks for AMD SB */
+ #define AZX_DCAPS_PRESET_AMD_SB \
+@@ -1702,6 +1703,13 @@ static void azx_check_snoop_available(struct azx *chip)
+ if (chip->driver_caps & AZX_DCAPS_SNOOP_OFF)
+ snoop = false;
+
++#ifdef CONFIG_X86
++ /* check the presence of DMA ops (i.e. IOMMU), disable snoop conditionally */
++ if ((chip->driver_caps & AZX_DCAPS_AMD_ALLOC_FIX) &&
++ !get_dma_ops(chip->card->dev))
++ snoop = false;
++#endif
++
+ chip->snoop = snoop;
+ if (!snoop) {
+ dev_info(chip->card->dev, "Force to non-snoop mode\n");
+--
+2.43.0
+
--- /dev/null
+From a91c810ae48ba676a8af5cf401837a2052983d4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 18:07:26 +0800
+Subject: ALSA: hda: conexant: Fix headset auto detect fail in the polling mode
+
+From: songxiebing <songxiebing@kylinos.cn>
+
+[ Upstream commit e60dc98122110594d0290845160f12916192fc6d ]
+
+The previous fix (7aeb25908648) only handles the unsol_event reporting
+during interrupts and does not include the polling mode used to set
+jackroll_ms, so now we are replacing it with
+snd_hda_jack_detect_enable_callback.
+
+Fixes: 7aeb25908648 ("ALSA: hda/conexant: Fix headset auto detect fail in cx8070 and SN6140")
+Co-developed-by: bo liu <bo.liu@senarytech.com>
+Signed-off-by: bo liu <bo.liu@senarytech.com>
+Signed-off-by: songxiebing <songxiebing@kylinos.cn>
+Link: https://patch.msgid.link/20240726100726.50824-1-soxiebing@163.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 54 ++++++----------------------------
+ 1 file changed, 9 insertions(+), 45 deletions(-)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 17389a3801bd1..4472923ba694b 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -21,12 +21,6 @@
+ #include "hda_jack.h"
+ #include "hda_generic.h"
+
+-enum {
+- CX_HEADSET_NOPRESENT = 0,
+- CX_HEADSET_PARTPRESENT,
+- CX_HEADSET_ALLPRESENT,
+-};
+-
+ struct conexant_spec {
+ struct hda_gen_spec gen;
+
+@@ -48,7 +42,6 @@ struct conexant_spec {
+ unsigned int gpio_led;
+ unsigned int gpio_mute_led_mask;
+ unsigned int gpio_mic_led_mask;
+- unsigned int headset_present_flag;
+ bool is_cx8070_sn6140;
+ };
+
+@@ -250,48 +243,19 @@ static void cx_process_headset_plugin(struct hda_codec *codec)
+ }
+ }
+
+-static void cx_update_headset_mic_vref(struct hda_codec *codec, unsigned int res)
++static void cx_update_headset_mic_vref(struct hda_codec *codec, struct hda_jack_callback *event)
+ {
+- unsigned int phone_present, mic_persent, phone_tag, mic_tag;
+- struct conexant_spec *spec = codec->spec;
++ unsigned int mic_present;
+
+ /* In cx8070 and sn6140, the node 16 can only be config to headphone or disabled,
+ * the node 19 can only be config to microphone or disabled.
+ * Check hp&mic tag to process headset pulgin&plugout.
+ */
+- phone_tag = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
+- mic_tag = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
+- if ((phone_tag & (res >> AC_UNSOL_RES_TAG_SHIFT)) ||
+- (mic_tag & (res >> AC_UNSOL_RES_TAG_SHIFT))) {
+- phone_present = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_PIN_SENSE, 0x0);
+- if (!(phone_present & AC_PINSENSE_PRESENCE)) {/* headphone plugout */
+- spec->headset_present_flag = CX_HEADSET_NOPRESENT;
+- snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
+- return;
+- }
+- if (spec->headset_present_flag == CX_HEADSET_NOPRESENT) {
+- spec->headset_present_flag = CX_HEADSET_PARTPRESENT;
+- } else if (spec->headset_present_flag == CX_HEADSET_PARTPRESENT) {
+- mic_persent = snd_hda_codec_read(codec, 0x19, 0,
+- AC_VERB_GET_PIN_SENSE, 0x0);
+- /* headset is present */
+- if ((phone_present & AC_PINSENSE_PRESENCE) &&
+- (mic_persent & AC_PINSENSE_PRESENCE)) {
+- cx_process_headset_plugin(codec);
+- spec->headset_present_flag = CX_HEADSET_ALLPRESENT;
+- }
+- }
+- }
+-}
+-
+-static void cx_jack_unsol_event(struct hda_codec *codec, unsigned int res)
+-{
+- struct conexant_spec *spec = codec->spec;
+-
+- if (spec->is_cx8070_sn6140)
+- cx_update_headset_mic_vref(codec, res);
+-
+- snd_hda_jack_unsol_event(codec, res);
++ mic_present = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0x0);
++ if (!(mic_present & AC_PINSENSE_PRESENCE)) /* mic plugout */
++ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
++ else
++ cx_process_headset_plugin(codec);
+ }
+
+ static int cx_auto_suspend(struct hda_codec *codec)
+@@ -305,7 +269,7 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
+ .build_pcms = snd_hda_gen_build_pcms,
+ .init = cx_auto_init,
+ .free = cx_auto_free,
+- .unsol_event = cx_jack_unsol_event,
++ .unsol_event = snd_hda_jack_unsol_event,
+ .suspend = cx_auto_suspend,
+ .check_power_status = snd_hda_gen_check_power_status,
+ };
+@@ -1163,7 +1127,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ case 0x14f11f86:
+ case 0x14f11f87:
+ spec->is_cx8070_sn6140 = true;
+- spec->headset_present_flag = CX_HEADSET_NOPRESENT;
++ snd_hda_jack_detect_enable_callback(codec, 0x19, cx_update_headset_mic_vref);
+ break;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From ac35e4c7c25df3644a4d3dab7ad3520fff0bb99f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 08:29:59 +0100
+Subject: ARM: 9406/1: Fix callchain_trace() return value
+
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+
+[ Upstream commit 4e7b4ff2dcaed228cb2fb7bfe720262c98ec1bb9 ]
+
+perf_callchain_store() return 0 on success, -1 otherwise, fix
+callchain_trace() to return correct bool value. So walk_stackframe() can
+have a chance to stop walking the stack ahead.
+
+Fixes: 70ccc7c0667b ("ARM: 9258/1: stacktrace: Make stack walk callback consistent with generic code")
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/kernel/perf_callchain.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
+index 7147edbe56c67..1d230ac9d0eb5 100644
+--- a/arch/arm/kernel/perf_callchain.c
++++ b/arch/arm/kernel/perf_callchain.c
+@@ -85,8 +85,7 @@ static bool
+ callchain_trace(void *data, unsigned long pc)
+ {
+ struct perf_callchain_entry_ctx *entry = data;
+- perf_callchain_store(entry, pc);
+- return true;
++ return perf_callchain_store(entry, pc) == 0;
+ }
+
+ void
+--
+2.43.0
+
--- /dev/null
+From 6dd8b995e238507a927e666e2858ff5810f7f354 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 09:22:09 +0100
+Subject: ARM: 9408/1: mm: CFI: Fix some erroneous reset prototypes
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+[ Upstream commit 657a292d679ae3a6c733ab0e939e24ae44b20faf ]
+
+I somehow got a few cpu_nn_reset() signatures wrong in my
+patch. Fix it up.
+
+Closes: https://lore.kernel.org/oe-kbuild-all/202406260432.6WGV2jCk-lkp@intel.com/
+
+Fixes: 393999fa9627 ("ARM: 9389/2: mm: Define prototypes for all per-processor calls")
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mm/proc.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/arch/arm/mm/proc.c b/arch/arm/mm/proc.c
+index bdbbf65d1b366..2027845efefb6 100644
+--- a/arch/arm/mm/proc.c
++++ b/arch/arm/mm/proc.c
+@@ -17,7 +17,7 @@ void cpu_arm7tdmi_proc_init(void);
+ __ADDRESSABLE(cpu_arm7tdmi_proc_init);
+ void cpu_arm7tdmi_proc_fin(void);
+ __ADDRESSABLE(cpu_arm7tdmi_proc_fin);
+-void cpu_arm7tdmi_reset(void);
++void cpu_arm7tdmi_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_arm7tdmi_reset);
+ int cpu_arm7tdmi_do_idle(void);
+ __ADDRESSABLE(cpu_arm7tdmi_do_idle);
+@@ -32,7 +32,7 @@ void cpu_arm720_proc_init(void);
+ __ADDRESSABLE(cpu_arm720_proc_init);
+ void cpu_arm720_proc_fin(void);
+ __ADDRESSABLE(cpu_arm720_proc_fin);
+-void cpu_arm720_reset(void);
++void cpu_arm720_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_arm720_reset);
+ int cpu_arm720_do_idle(void);
+ __ADDRESSABLE(cpu_arm720_do_idle);
+@@ -49,7 +49,7 @@ void cpu_arm740_proc_init(void);
+ __ADDRESSABLE(cpu_arm740_proc_init);
+ void cpu_arm740_proc_fin(void);
+ __ADDRESSABLE(cpu_arm740_proc_fin);
+-void cpu_arm740_reset(void);
++void cpu_arm740_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_arm740_reset);
+ int cpu_arm740_do_idle(void);
+ __ADDRESSABLE(cpu_arm740_do_idle);
+@@ -64,7 +64,7 @@ void cpu_arm9tdmi_proc_init(void);
+ __ADDRESSABLE(cpu_arm9tdmi_proc_init);
+ void cpu_arm9tdmi_proc_fin(void);
+ __ADDRESSABLE(cpu_arm9tdmi_proc_fin);
+-void cpu_arm9tdmi_reset(void);
++void cpu_arm9tdmi_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_arm9tdmi_reset);
+ int cpu_arm9tdmi_do_idle(void);
+ __ADDRESSABLE(cpu_arm9tdmi_do_idle);
+@@ -79,7 +79,7 @@ void cpu_arm920_proc_init(void);
+ __ADDRESSABLE(cpu_arm920_proc_init);
+ void cpu_arm920_proc_fin(void);
+ __ADDRESSABLE(cpu_arm920_proc_fin);
+-void cpu_arm920_reset(void);
++void cpu_arm920_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_arm920_reset);
+ int cpu_arm920_do_idle(void);
+ __ADDRESSABLE(cpu_arm920_do_idle);
+@@ -102,7 +102,7 @@ void cpu_arm922_proc_init(void);
+ __ADDRESSABLE(cpu_arm922_proc_init);
+ void cpu_arm922_proc_fin(void);
+ __ADDRESSABLE(cpu_arm922_proc_fin);
+-void cpu_arm922_reset(void);
++void cpu_arm922_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_arm922_reset);
+ int cpu_arm922_do_idle(void);
+ __ADDRESSABLE(cpu_arm922_do_idle);
+@@ -119,7 +119,7 @@ void cpu_arm925_proc_init(void);
+ __ADDRESSABLE(cpu_arm925_proc_init);
+ void cpu_arm925_proc_fin(void);
+ __ADDRESSABLE(cpu_arm925_proc_fin);
+-void cpu_arm925_reset(void);
++void cpu_arm925_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_arm925_reset);
+ int cpu_arm925_do_idle(void);
+ __ADDRESSABLE(cpu_arm925_do_idle);
+@@ -159,7 +159,7 @@ void cpu_arm940_proc_init(void);
+ __ADDRESSABLE(cpu_arm940_proc_init);
+ void cpu_arm940_proc_fin(void);
+ __ADDRESSABLE(cpu_arm940_proc_fin);
+-void cpu_arm940_reset(void);
++void cpu_arm940_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_arm940_reset);
+ int cpu_arm940_do_idle(void);
+ __ADDRESSABLE(cpu_arm940_do_idle);
+@@ -174,7 +174,7 @@ void cpu_arm946_proc_init(void);
+ __ADDRESSABLE(cpu_arm946_proc_init);
+ void cpu_arm946_proc_fin(void);
+ __ADDRESSABLE(cpu_arm946_proc_fin);
+-void cpu_arm946_reset(void);
++void cpu_arm946_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_arm946_reset);
+ int cpu_arm946_do_idle(void);
+ __ADDRESSABLE(cpu_arm946_do_idle);
+@@ -429,7 +429,7 @@ void cpu_v7_proc_init(void);
+ __ADDRESSABLE(cpu_v7_proc_init);
+ void cpu_v7_proc_fin(void);
+ __ADDRESSABLE(cpu_v7_proc_fin);
+-void cpu_v7_reset(void);
++void cpu_v7_reset(unsigned long addr, bool hvc);
+ __ADDRESSABLE(cpu_v7_reset);
+ int cpu_v7_do_idle(void);
+ __ADDRESSABLE(cpu_v7_do_idle);
+--
+2.43.0
+
--- /dev/null
+From c029e35a7e313134147bd4b8063aa4feeee2fca4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jul 2024 14:36:01 +0100
+Subject: arm64: jump_label: Ensure patched jump_labels are visible to all CPUs
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit cfb00a35786414e7c0e6226b277d9f09657eae74 ]
+
+Although the Arm architecture permits concurrent modification and
+execution of NOP and branch instructions, it still requires some
+synchronisation to ensure that other CPUs consistently execute the newly
+written instruction:
+
+ > When the modified instructions are observable, each PE that is
+ > executing the modified instructions must execute an ISB or perform a
+ > context synchronizing event to ensure execution of the modified
+ > instructions
+
+Prior to commit f6cc0c501649 ("arm64: Avoid calling stop_machine() when
+patching jump labels"), the arm64 jump_label patching machinery
+performed synchronisation using stop_machine() after each modification,
+however this was problematic when flipping static keys from atomic
+contexts (namely, the arm_arch_timer CPU hotplug startup notifier) and
+so we switched to the _nosync() patching routines to avoid "scheduling
+while atomic" BUG()s during boot.
+
+In hindsight, the analysis of the issue in f6cc0c501649 isn't quite
+right: it cites the use of IPIs in the default patching routines as the
+cause of the lockup, whereas stop_machine() does not rely on IPIs and
+the I-cache invalidation is performed using __flush_icache_range(),
+which elides the call to kick_all_cpus_sync(). In fact, the blocking
+wait for other CPUs is what triggers the BUG() and the problem remains
+even after f6cc0c501649, for example because we could block on the
+jump_label_mutex. Eventually, the arm_arch_timer driver was fixed to
+avoid the static key entirely in commit a862fc2254bd
+("clocksource/arm_arch_timer: Remove use of workaround static key").
+
+This all leaves the jump_label patching code in a funny situation on
+arm64 as we do not synchronise with other CPUs to reduce the likelihood
+of a bug which no longer exists. Consequently, toggling a static key on
+one CPU cannot be assumed to take effect on other CPUs, leading to
+potential issues, for example with missing preempt notifiers.
+
+Rather than revert f6cc0c501649 and go back to stop_machine() for each
+patch site, implement arch_jump_label_transform_apply() and kick all
+the other CPUs with an IPI at the end of patching.
+
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Fixes: f6cc0c501649 ("arm64: Avoid calling stop_machine() when patching jump labels")
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20240731133601.3073-1-will@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/jump_label.h | 1 +
+ arch/arm64/kernel/jump_label.c | 11 +++++++++--
+ 2 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
+index 4e753908b8018..a0a5bbae7229e 100644
+--- a/arch/arm64/include/asm/jump_label.h
++++ b/arch/arm64/include/asm/jump_label.h
+@@ -13,6 +13,7 @@
+ #include <linux/types.h>
+ #include <asm/insn.h>
+
++#define HAVE_JUMP_LABEL_BATCH
+ #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
+
+ #define JUMP_TABLE_ENTRY(key, label) \
+diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
+index faf88ec9c48e8..f63ea915d6ad2 100644
+--- a/arch/arm64/kernel/jump_label.c
++++ b/arch/arm64/kernel/jump_label.c
+@@ -7,11 +7,12 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/jump_label.h>
++#include <linux/smp.h>
+ #include <asm/insn.h>
+ #include <asm/patching.h>
+
+-void arch_jump_label_transform(struct jump_entry *entry,
+- enum jump_label_type type)
++bool arch_jump_label_transform_queue(struct jump_entry *entry,
++ enum jump_label_type type)
+ {
+ void *addr = (void *)jump_entry_code(entry);
+ u32 insn;
+@@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
+ }
+
+ aarch64_insn_patch_text_nosync(addr, insn);
++ return true;
++}
++
++void arch_jump_label_transform_apply(void)
++{
++ kick_all_cpus_sync();
+ }
+--
+2.43.0
+
--- /dev/null
+From 174590120ec02a9131bde9486bf35d44411c880b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Jul 2024 14:22:42 +0530
+Subject: Bluetooth: btintel: Fail setup on error
+
+From: Kiran K <kiran.k@intel.com>
+
+[ Upstream commit e22a3a9d4134d7e6351a2998771522e74bcc58da ]
+
+Do not attempt to send any hci command to controller if *setup* function
+fails.
+
+Fixes: af395330abed ("Bluetooth: btintel: Add Intel devcoredump support")
+Signed-off-by: Kiran K <kiran.k@intel.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bluetooth/btintel.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index 7ecc67deecb09..93900c37349c1 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -3012,6 +3012,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
+ btintel_set_dsm_reset_method(hdev, &ver_tlv);
+
+ err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
++ if (err)
++ goto exit_error;
++
+ btintel_register_devcoredump_support(hdev);
+ btintel_print_fseq_info(hdev);
+ break;
+--
+2.43.0
+
--- /dev/null
+From 334c74173cd926bf480c701f8fdd968ed37e8617 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jul 2024 10:40:03 -0400
+Subject: Bluetooth: hci_sync: Fix suspending with wrong filter policy
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit 96b82af36efaa1787946e021aa3dc5410c05beeb ]
+
+When suspending the scan filter policy cannot be 0x00 (no acceptlist)
+since that means the host has to process every advertisement report
+waking up the system, so this attempts to check if hdev is marked as
+suspended and if the resulting filter policy would be 0x00 (no
+acceptlist) then skip passive scanning if thre no devices in the
+acceptlist otherwise reset the filter policy to 0x01 so the acceptlist
+is used since the devices programmed there can still wakeup be system.
+
+Fixes: 182ee45da083 ("Bluetooth: hci_sync: Rework hci_suspend_notifier")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index bb704088559fb..2f26147fdf3c9 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -2929,6 +2929,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
+ */
+ filter_policy = hci_update_accept_list_sync(hdev);
+
++ /* If suspended and filter_policy set to 0x00 (no acceptlist) then
++ * passive scanning cannot be started since that would require the host
++ * to be woken up to process the reports.
++ */
++ if (hdev->suspended && !filter_policy) {
++ /* Check if accept list is empty then there is no need to scan
++ * while suspended.
++ */
++ if (list_empty(&hdev->le_accept_list))
++ return 0;
++
++ /* If there are devices is the accept_list that means some
++ * devices could not be programmed which in non-suspended case
++ * means filter_policy needs to be set to 0x00 so the host needs
++ * to filter, but since this is treating suspended case we
++ * can ignore device needing host to filter to allow devices in
++ * the acceptlist to be able to wakeup the system.
++ */
++ filter_policy = 0x01;
++ }
++
+ /* When the controller is using random resolvable addresses and
+ * with that having LE privacy enabled, then controllers with
+ * Extended Scanner Filter Policies support can now enable support
+--
+2.43.0
+
--- /dev/null
+From c457d41f16e1eb6b1c2d1ec61d6be28b26e2c592 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jul 2024 15:21:06 -0700
+Subject: bnxt_en: Fix RSS logic in __bnxt_reserve_rings()
+
+From: Pavan Chebbi <pavan.chebbi@broadcom.com>
+
+[ Upstream commit 98ba1d931f611e8f8f519c0405fa0a1a76554bfa ]
+
+In __bnxt_reserve_rings(), the existing code unconditionally sets the
+default RSS indirection table to default if netif_is_rxfh_configured()
+returns false. This used to be correct before we added RSS contexts
+support. For example, if the user is changing the number of ethtool
+channels, we will enter this path to reserve the new number of rings.
+We will then set the RSS indirection table to default to cover the new
+number of rings if netif_is_rxfh_configured() is false.
+
+Now, with RSS contexts support, if the user has added or deleted RSS
+contexts, we may now enter this path to reserve the new number of VNICs.
+However, netif_is_rxfh_configured() will not return the correct state if
+we are still in the middle of set_rxfh(). So the existing code may
+set the indirection table of the default RSS context to default by
+mistake.
+
+Fix it to check if the reservation of the RX rings is changing. Only
+check netif_is_rxfh_configured() if it is changing. RX rings will not
+change in the middle of set_rxfh() and this will fix the issue.
+
+Fixes: b3d0083caf9a ("bnxt_en: Support RSS contexts in ethtool .{get|set}_rxfh()")
+Reported-and-tested-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/20240625010210.2002310-1-kuba@kernel.org
+Reviewed-by: Andy Gospodarek <andrew.gospodarek@broadcom.com>
+Signed-off-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Link: https://patch.msgid.link/20240724222106.147744-1-michael.chan@broadcom.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 43952689bfb0c..23627c973e40f 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7491,8 +7491,8 @@ static int bnxt_get_avail_msix(struct bnxt *bp, int num);
+ static int __bnxt_reserve_rings(struct bnxt *bp)
+ {
+ struct bnxt_hw_rings hwr = {0};
++ int rx_rings, old_rx_rings, rc;
+ int cp = bp->cp_nr_rings;
+- int rx_rings, rc;
+ int ulp_msix = 0;
+ bool sh = false;
+ int tx_cp;
+@@ -7526,6 +7526,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
+ hwr.grp = bp->rx_nr_rings;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
+ hwr.stat = bnxt_get_func_stat_ctxs(bp);
++ old_rx_rings = bp->hw_resc.resv_rx_rings;
+
+ rc = bnxt_hwrm_reserve_rings(bp, &hwr);
+ if (rc)
+@@ -7580,7 +7581,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
+ if (!bnxt_rings_ok(bp, &hwr))
+ return -ENOMEM;
+
+- if (!netif_is_rxfh_configured(bp->dev))
++ if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
++ !netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
+
+ if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
+--
+2.43.0
+
--- /dev/null
+From ef7519924cd0d3c67cfb7714e46a901e4feed9cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2024 18:22:15 -0300
+Subject: drm/atomic: Allow userspace to use damage clips with async flips
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: André Almeida <andrealmeid@igalia.com>
+
+[ Upstream commit f85de245c6a8e2654e1e9158588bcf78e38cd5a5 ]
+
+Allow userspace to use damage clips with atomic async flips. Damage
+clips are useful for partial plane updates, which can be helpful for
+clients that want to do flips asynchronously.
+
+Fixes: 0e26cc72c71c ("drm: Refuse to async flip with atomic prop changes")
+Signed-off-by: André Almeida <andrealmeid@igalia.com>
+Reviewed-by: Simon Ser <contact@emersion.fr>
+Signed-off-by: Simon Ser <contact@emersion.fr>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240702212215.109696-2-andrealmeid@igalia.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_atomic_uapi.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
+index fef4849a4ec21..02b1235c6d619 100644
+--- a/drivers/gpu/drm/drm_atomic_uapi.c
++++ b/drivers/gpu/drm/drm_atomic_uapi.c
+@@ -1068,7 +1068,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
+
+ if (async_flip &&
+ prop != config->prop_fb_id &&
+- prop != config->prop_in_fence_fd) {
++ prop != config->prop_in_fence_fd &&
++ prop != config->prop_fb_damage_clips) {
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+--
+2.43.0
+
--- /dev/null
+From ccab28682bd44fde65557a1d5836e91e5374940b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2024 18:22:14 -0300
+Subject: drm/atomic: Allow userspace to use explicit sync with atomic async
+ flips
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: André Almeida <andrealmeid@igalia.com>
+
+[ Upstream commit e0fa4132bfae725a60c50d53bac80ec31fc20d89 ]
+
+Allow userspace to use explicit synchronization with atomic async flips.
+That means that the flip will wait for some hardware fence, and then
+will flip as soon as possible (async) in regard of the vblank.
+
+Fixes: 0e26cc72c71c ("drm: Refuse to async flip with atomic prop changes")
+Signed-off-by: André Almeida <andrealmeid@igalia.com>
+Reviewed-by: Simon Ser <contact@emersion.fr>
+Signed-off-by: Simon Ser <contact@emersion.fr>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240702212215.109696-1-andrealmeid@igalia.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_atomic_uapi.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
+index fc16fddee5c59..fef4849a4ec21 100644
+--- a/drivers/gpu/drm/drm_atomic_uapi.c
++++ b/drivers/gpu/drm/drm_atomic_uapi.c
+@@ -1066,7 +1066,9 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
+ break;
+ }
+
+- if (async_flip && prop != config->prop_fb_id) {
++ if (async_flip &&
++ prop != config->prop_fb_id &&
++ prop != config->prop_in_fence_fd) {
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+--
+2.43.0
+
--- /dev/null
+From 3ea428fbfc96303e51b441d3b152cd5e4cfdfc23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jul 2024 11:09:54 -0500
+Subject: drm/client: Fix error code in drm_client_buffer_vmap_local()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit b5fbf924f125ba3638cfdc21c0515eb7e76264ca ]
+
+This function accidentally returns zero/success on the failure path.
+It leads to locking issues and an uninitialized *map_copy in the
+caller.
+
+Fixes: b4b0193e83cb ("drm/fbdev-generic: Fix locking with drm_client_buffer_vmap_local()")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/89d13df3-747c-4c5d-b122-d081aef5110a@stanley.mountain
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_client.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
+index 2803ac111bbd8..bfedcbf516dbe 100644
+--- a/drivers/gpu/drm/drm_client.c
++++ b/drivers/gpu/drm/drm_client.c
+@@ -355,7 +355,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
+
+ err_drm_gem_vmap_unlocked:
+ drm_gem_unlock(gem);
+- return 0;
++ return ret;
+ }
+ EXPORT_SYMBOL(drm_client_buffer_vmap_local);
+
+--
+2.43.0
+
--- /dev/null
+From d7652d7f12057ac890e3944a5db0f9f94d20df84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jul 2024 15:51:33 +0200
+Subject: drm/gpuvm: fix missing dependency to DRM_EXEC
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Danilo Krummrich <dakr@redhat.com>
+
+[ Upstream commit eeb1f825b5dc68047a0556e5ae86d1467920db41 ]
+
+In commit 50c1a36f594b ("drm/gpuvm: track/lock/validate external/evicted
+objects") we started using drm_exec, but did not select DRM_EXEC in the
+Kconfig for DRM_GPUVM, fix this.
+
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Boris Brezillon <boris.brezillon@collabora.com>
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Fixes: 50c1a36f594b ("drm/gpuvm: track/lock/validate external/evicted objects")
+Signed-off-by: Danilo Krummrich <dakr@redhat.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240715135158.133287-1-dakr@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 359b68adafc1b..79628ff837e6f 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -253,6 +253,7 @@ config DRM_EXEC
+ config DRM_GPUVM
+ tristate
+ depends on DRM
++ select DRM_EXEC
+ help
+ GPU-VM representation providing helpers to manage a GPUs virtual
+ address space
+--
+2.43.0
+
--- /dev/null
+From 89989ba7021a8cbfa12694eb56f914560da2c97a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 09:25:05 +0530
+Subject: drm/i915/hdcp: Fix HDCP2_STREAM_STATUS macro
+
+From: Suraj Kandpal <suraj.kandpal@intel.com>
+
+[ Upstream commit 555069117390a5d581863bc797fb546bb4417c31 ]
+
+Fix HDCP2_STREAM_STATUS macro, it called pipe instead of port never
+threw a compile error as no one used it.
+
+--v2
+-Add Fixes [Jani]
+
+Fixes: d631b984cc90 ("drm/i915/hdcp: Add HDCP 2.2 stream register")
+Signed-off-by: Suraj Kandpal <suraj.kandpal@intel.com>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240730035505.3759899-1-suraj.kandpal@intel.com
+(cherry picked from commit 73d7cd542bbd0a7c6881ea0df5255f190a1e7236)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_hdcp_regs.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+index a568a457e5326..f590d7f48ba74 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
++++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+@@ -251,7 +251,7 @@
+ #define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (TRANS_HDCP(dev_priv) ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+- PIPE_HDCP2_STREAM_STATUS(pipe))
++ PIPE_HDCP2_STREAM_STATUS(port))
+
+ #define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+ #define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+--
+2.43.0
+
--- /dev/null
+From 334c9a66a4f22c4fb6cdd99093195a41ee711e6a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Jul 2024 18:58:46 +0200
+Subject: drm/nouveau: prime: fix refcount underflow
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+[ Upstream commit a9bf3efc33f1fbf88787a277f7349459283c9b95 ]
+
+Calling nouveau_bo_ref() on a nouveau_bo without initializing it (and
+hence the backing ttm_bo) leads to a refcount underflow.
+
+Instead of calling nouveau_bo_ref() in the unwind path of
+drm_gem_object_init(), clean things up manually.
+
+Fixes: ab9ccb96a6e6 ("drm/nouveau: use prime helpers")
+Reviewed-by: Ben Skeggs <bskeggs@nvidia.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240718165959.3983-2-dakr@kernel.org
+(cherry picked from commit 1b93f3e89d03cfc576636e195466a0d728ad8de5)
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_prime.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
+index b58ab595faf82..cd95446d68511 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
++++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
+@@ -64,7 +64,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
+ * to the caller, instead of a normal nouveau_bo ttm reference. */
+ ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
+ if (ret) {
+- nouveau_bo_ref(NULL, &nvbo);
++ drm_gem_object_release(&nvbo->bo.base);
++ kfree(nvbo);
+ obj = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+--
+2.43.0
+
--- /dev/null
+From f86f40657606ee26a17bb854df304c074d5df2bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jul 2024 11:36:27 -0500
+Subject: drm/vmwgfx: Fix overlay when using Screen Targets
+
+From: Ian Forbes <ian.forbes@broadcom.com>
+
+[ Upstream commit cb372a505a994cb39aa75acfb8b3bcf94787cf94 ]
+
+This code was never updated to support Screen Targets.
+Fixes a bug where Xv playback displays a green screen instead of actual
+video contents when 3D acceleration is disabled in the guest.
+
+Fixes: c8261a961ece ("vmwgfx: Major KMS refactoring / cleanup in preparation of screen targets")
+Reported-by: Doug Brown <doug@schmorgal.com>
+Closes: https://lore.kernel.org/all/bd9cb3c7-90e8-435d-bc28-0e38fee58977@schmorgal.com
+Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
+Tested-by: Doug Brown <doug@schmorgal.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240719163627.20888-1-ian.forbes@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+index c45b4724e4141..e20f64b67b266 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+@@ -92,7 +92,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
+ {
+ struct vmw_escape_video_flush *flush;
+ size_t fifo_size;
+- bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
++ bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
+ int i, num_items;
+ SVGAGuestPtr ptr;
+
+--
+2.43.0
+
--- /dev/null
+From 93d2e989d74624d7c748780d25075b996d98356e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Jul 2024 14:41:14 -0400
+Subject: drm/vmwgfx: Make sure the screen surface is ref counted
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+[ Upstream commit 09f34a00272d2311f6e5d64ed8ad824ef78f7487 ]
+
+Fix races issues in virtual crc generation by making sure the surface
+the code uses for crc computation is properly ref counted.
+
+Crc generation was trying to be too clever by allowing the surfaces
+to go in and out of scope, with the hope of always having some kind
+of screen present. That's not always the code, in particular during
+atomic disable, so to make sure the surface, when present, is not
+being actively destroyed at the same time, hold a reference to it.
+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Fixes: 7b0062036c3b ("drm/vmwgfx: Implement virtual crc generation")
+Cc: Zack Rusin <zack.rusin@broadcom.com>
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Reviewed-by: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240722184313.181318-3-zack.rusin@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c | 40 +++++++++++++++-------------
+ 1 file changed, 22 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
+index 7e93a45948f79..ac002048d8e5e 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
+@@ -76,7 +76,7 @@ vmw_surface_sync(struct vmw_private *vmw,
+ return ret;
+ }
+
+-static int
++static void
+ compute_crc(struct drm_crtc *crtc,
+ struct vmw_surface *surf,
+ u32 *crc)
+@@ -102,8 +102,6 @@ compute_crc(struct drm_crtc *crtc,
+ }
+
+ vmw_bo_unmap(bo);
+-
+- return 0;
+ }
+
+ static void
+@@ -117,7 +115,6 @@ crc_generate_worker(struct work_struct *work)
+ u64 frame_start, frame_end;
+ u32 crc32 = 0;
+ struct vmw_surface *surf = 0;
+- int ret;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ crc_pending = du->vkms.crc_pending;
+@@ -131,22 +128,24 @@ crc_generate_worker(struct work_struct *work)
+ return;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+- surf = du->vkms.surface;
++ surf = vmw_surface_reference(du->vkms.surface);
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+- if (vmw_surface_sync(vmw, surf)) {
+- drm_warn(crtc->dev, "CRC worker wasn't able to sync the crc surface!\n");
+- return;
+- }
++ if (surf) {
++ if (vmw_surface_sync(vmw, surf)) {
++ drm_warn(
++ crtc->dev,
++ "CRC worker wasn't able to sync the crc surface!\n");
++ return;
++ }
+
+- ret = compute_crc(crtc, surf, &crc32);
+- if (ret)
+- return;
++ compute_crc(crtc, surf, &crc32);
++ vmw_surface_unreference(&surf);
++ }
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ frame_start = du->vkms.frame_start;
+ frame_end = du->vkms.frame_end;
+- crc_pending = du->vkms.crc_pending;
+ du->vkms.frame_start = 0;
+ du->vkms.frame_end = 0;
+ du->vkms.crc_pending = false;
+@@ -165,7 +164,7 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer)
+ struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer);
+ struct drm_crtc *crtc = &du->crtc;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+- struct vmw_surface *surf = NULL;
++ bool has_surface = false;
+ u64 ret_overrun;
+ bool locked, ret;
+
+@@ -180,10 +179,10 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer)
+ WARN_ON(!ret);
+ if (!locked)
+ return HRTIMER_RESTART;
+- surf = du->vkms.surface;
++ has_surface = du->vkms.surface != NULL;
+ vmw_vkms_unlock(crtc);
+
+- if (du->vkms.crc_enabled && surf) {
++ if (du->vkms.crc_enabled && has_surface) {
+ u64 frame = drm_crtc_accurate_vblank_count(crtc);
+
+ spin_lock(&du->vkms.crc_state_lock);
+@@ -337,6 +336,8 @@ vmw_vkms_crtc_cleanup(struct drm_crtc *crtc)
+ {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
++ if (du->vkms.surface)
++ vmw_surface_unreference(&du->vkms.surface);
+ WARN_ON(work_pending(&du->vkms.crc_generator_work));
+ hrtimer_cancel(&du->vkms.timer);
+ }
+@@ -498,9 +499,12 @@ vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+- if (vmw->vkms_enabled) {
++ if (vmw->vkms_enabled && du->vkms.surface != surf) {
+ WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET);
+- du->vkms.surface = surf;
++ if (du->vkms.surface)
++ vmw_surface_unreference(&du->vkms.surface);
++ if (surf)
++ du->vkms.surface = vmw_surface_reference(surf);
+ }
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 9f2d4fa98a51569a2b0e04893562620e5b3149bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Jun 2024 15:59:51 -0500
+Subject: drm/vmwgfx: Trigger a modeset when the screen moves
+
+From: Ian Forbes <ian.forbes@broadcom.com>
+
+[ Upstream commit 75c3e8a26a35d4f3eee299b3cc7e465f166f4e2d ]
+
+When multi-monitor is cycled the X,Y position of the Screen Target will
+likely change but the resolution will not. We need to trigger a modeset
+when this occurs in order to recreate the Screen Target with the correct
+X,Y position.
+
+Fixes a bug where multiple displays are shown in a single scrollable
+host window rather than in 2+ windows on separate host displays.
+
+Fixes: 426826933109 ("drm/vmwgfx: Filter modes which exceed graphics memory")
+Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240624205951.23343-1-ian.forbes@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 29 +++++++++++++++++++++++++++-
+ 1 file changed, 28 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index a04e0736318da..9becd71bc93bc 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -877,6 +877,32 @@ vmw_stdu_connector_mode_valid(struct drm_connector *connector,
+ return MODE_OK;
+ }
+
++/*
++ * Trigger a modeset if the X,Y position of the Screen Target changes.
++ * This is needed when multi-mon is cycled. The original Screen Target will have
++ * the same mode but its relative X,Y position in the topology will change.
++ */
++static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
++ struct drm_atomic_state *state)
++{
++ struct drm_connector_state *conn_state;
++ struct vmw_screen_target_display_unit *du;
++ struct drm_crtc_state *new_crtc_state;
++
++ conn_state = drm_atomic_get_connector_state(state, conn);
++ du = vmw_connector_to_stdu(conn);
++
++ if (!conn_state->crtc)
++ return 0;
++
++ new_crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
++ if (du->base.gui_x != du->base.set_gui_x ||
++ du->base.gui_y != du->base.set_gui_y)
++ new_crtc_state->mode_changed = true;
++
++ return 0;
++}
++
+ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ .dpms = vmw_du_connector_dpms,
+ .detect = vmw_du_connector_detect,
+@@ -891,7 +917,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ static const struct
+ drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
+ .get_modes = vmw_connector_get_modes,
+- .mode_valid = vmw_stdu_connector_mode_valid
++ .mode_valid = vmw_stdu_connector_mode_valid,
++ .atomic_check = vmw_stdu_connector_atomic_check,
+ };
+
+
+--
+2.43.0
+
--- /dev/null
+From c368fc75e177fb226dae0ced44095671614f89cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Jul 2024 15:23:51 -0700
+Subject: ethtool: fix setting key and resetting indir at once
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 7195f0ef7f5b8c678cf28de7c9b619cb908b482c ]
+
+The indirection table and the key follow struct ethtool_rxfh
+in user memory.
+
+To reset the indirection table user space calls SET_RXFH with
+table of size 0 (OTOH to say "no change" it should use -1 / ~0).
+The logic for calculating the offset where they key sits is
+incorrect in this case, as kernel would still offset by the full
+table length, while for the reset there is no indir table and
+key is immediately after the struct.
+
+ $ ethtool -X eth0 default hkey 01:02:03...
+ $ ethtool -x eth0
+ [...]
+ RSS hash key:
+00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00
+ [...]
+
+Fixes: 3de0b592394d ("ethtool: Support for configurable RSS hash key")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ethtool/ioctl.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index d9c8a6a16cb2a..f3ae4c56279c1 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -1280,13 +1280,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u32 dev_indir_size = 0, dev_key_size = 0, i;
++ u32 user_indir_len = 0, indir_bytes = 0;
+ struct ethtool_rxfh_param rxfh_dev = {};
+ struct ethtool_rxfh_context *ctx = NULL;
+ struct netlink_ext_ack *extack = NULL;
+ struct ethtool_rxnfc rx_rings;
+ struct ethtool_rxfh rxfh;
+ bool locked = false; /* dev->ethtool->rss_lock taken */
+- u32 indir_bytes = 0;
+ bool create = false;
+ u8 *rss_config;
+ int ret;
+@@ -1349,6 +1349,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ */
+ if (rxfh.indir_size &&
+ rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
++ user_indir_len = indir_bytes;
+ rxfh_dev.indir = (u32 *)rss_config;
+ rxfh_dev.indir_size = dev_indir_size;
+ ret = ethtool_copy_validate_indir(rxfh_dev.indir,
+@@ -1375,7 +1376,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ rxfh_dev.key_size = dev_key_size;
+ rxfh_dev.key = rss_config + indir_bytes;
+ if (copy_from_user(rxfh_dev.key,
+- useraddr + rss_cfg_offset + indir_bytes,
++ useraddr + rss_cfg_offset + user_indir_len,
+ rxfh.key_size)) {
+ ret = -EFAULT;
+ goto out;
+--
+2.43.0
+
--- /dev/null
+From 0ec1853d400ddf2e5c97572bb42814a2c8508d9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jul 2024 16:42:49 -0700
+Subject: ethtool: rss: echo the context number back
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit f96aae91b0d260f682e630e092ef70a05a718a43 ]
+
+The response to a GET request in Netlink should fully identify
+the queried object. RSS_GET accepts context id as an input,
+so it must echo that attribute back to the response.
+
+After (assuming context 1 has been created):
+
+ $ ./cli.py --spec netlink/specs/ethtool.yaml \
+ --do rss-get \
+ --json '{"header": {"dev-index": 2}, "context": 1}'
+ {'context': 1,
+ 'header': {'dev-index': 2, 'dev-name': 'eth0'},
+ [...]
+
+Fixes: 7112a04664bf ("ethtool: add netlink based get rss support")
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Joe Damato <jdamato@fastly.com>
+Link: https://patch.msgid.link/20240724234249.2621109-3-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/netlink/specs/ethtool.yaml | 1 +
+ Documentation/networking/ethtool-netlink.rst | 1 +
+ net/ethtool/rss.c | 8 +++++++-
+ 3 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
+index 3632c1c891e94..238145c31835e 100644
+--- a/Documentation/netlink/specs/ethtool.yaml
++++ b/Documentation/netlink/specs/ethtool.yaml
+@@ -1638,6 +1638,7 @@ operations:
+ reply:
+ attributes:
+ - header
++ - context
+ - hfunc
+ - indir
+ - hkey
+diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
+index 160bfb0ae8bae..0d8c487be3993 100644
+--- a/Documentation/networking/ethtool-netlink.rst
++++ b/Documentation/networking/ethtool-netlink.rst
+@@ -1800,6 +1800,7 @@ Kernel response contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_RSS_HEADER`` nested reply header
++ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
+ ``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
+ ``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
+ ``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
+diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
+index 71679137eff21..5c4c4505ab9a4 100644
+--- a/net/ethtool/rss.c
++++ b/net/ethtool/rss.c
+@@ -111,7 +111,8 @@ rss_reply_size(const struct ethnl_req_info *req_base,
+ const struct rss_reply_data *data = RSS_REPDATA(reply_base);
+ int len;
+
+- len = nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
++ len = nla_total_size(sizeof(u32)) + /* _RSS_CONTEXT */
++ nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
+ nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */
+ nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */
+ nla_total_size(data->hkey_size); /* _RSS_HKEY */
+@@ -124,6 +125,11 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+ {
+ const struct rss_reply_data *data = RSS_REPDATA(reply_base);
++ struct rss_req_info *request = RSS_REQINFO(req_base);
++
++ if (request->rss_context &&
++ nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context))
++ return -EMSGSIZE;
+
+ if ((data->hfunc &&
+ nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
+--
+2.43.0
+
--- /dev/null
+From e27bfbafce976f8887b83a0c835641cc58e8df38 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 17:08:52 +0300
+Subject: ethtool: Veto some operations during firmware flashing process
+
+From: Danielle Ratson <danieller@nvidia.com>
+
+[ Upstream commit 31e0aa99dc02b2b038a270b0670fc8201b69ec8a ]
+
+Some operations cannot be performed during the firmware flashing
+process.
+
+For example:
+
+- Port must be down during the whole flashing process to avoid packet loss
+ while committing reset for example.
+
+- Writing to EEPROM interrupts the flashing process, so operations like
+ ethtool dump, module reset, get and set power mode should be vetoed.
+
+- Split port firmware flashing should be vetoed.
+
+In order to veto those scenarios, add a flag in 'struct net_device' that
+indicates when a firmware flash is taking place on the module and use it
+to prevent interruptions during the process.
+
+Signed-off-by: Danielle Ratson <danieller@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 7195f0ef7f5b ("ethtool: fix setting key and resetting indir at once")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 4 +++-
+ net/ethtool/eeprom.c | 6 ++++++
+ net/ethtool/ioctl.c | 12 ++++++++++++
+ net/ethtool/netlink.c | 12 ++++++++++++
+ 4 files changed, 33 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index d20c6c99eb887..ccba9f145edaa 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1989,6 +1989,8 @@ enum netdev_reg_state {
+ *
+ * @threaded: napi threaded mode is enabled
+ *
++ * @module_fw_flash_in_progress: Module firmware flashing is in progress.
++ *
+ * @net_notifier_list: List of per-net netdev notifier block
+ * that follow this device when it is moved
+ * to another network namespace.
+@@ -2373,7 +2375,7 @@ struct net_device {
+ bool proto_down;
+ bool threaded;
+ unsigned wol_enabled:1;
+-
++ unsigned module_fw_flash_in_progress:1;
+ struct list_head net_notifier_list;
+
+ #if IS_ENABLED(CONFIG_MACSEC)
+diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c
+index 6209c3a9c8f72..f36811b3ecf16 100644
+--- a/net/ethtool/eeprom.c
++++ b/net/ethtool/eeprom.c
+@@ -91,6 +91,12 @@ static int get_module_eeprom_by_page(struct net_device *dev,
+ {
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
++ if (dev->module_fw_flash_in_progress) {
++ NL_SET_ERR_MSG(extack,
++ "Module firmware flashing is in progress");
++ return -EBUSY;
++ }
++
+ if (dev->sfp_bus)
+ return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack);
+
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 223dcd25d88a2..3c8821adc4891 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -658,6 +658,9 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+ if (!dev->ethtool_ops->get_link_ksettings)
+ return -EOPNOTSUPP;
+
++ if (dev->module_fw_flash_in_progress)
++ return -EBUSY;
++
+ memset(&link_ksettings, 0, sizeof(link_ksettings));
+ err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings);
+ if (err < 0)
+@@ -1450,6 +1453,9 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
+ if (!dev->ethtool_ops->reset)
+ return -EOPNOTSUPP;
+
++ if (dev->module_fw_flash_in_progress)
++ return -EBUSY;
++
+ if (copy_from_user(&reset, useraddr, sizeof(reset)))
+ return -EFAULT;
+
+@@ -2463,6 +2469,9 @@ int ethtool_get_module_info_call(struct net_device *dev,
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct phy_device *phydev = dev->phydev;
+
++ if (dev->module_fw_flash_in_progress)
++ return -EBUSY;
++
+ if (dev->sfp_bus)
+ return sfp_get_module_info(dev->sfp_bus, modinfo);
+
+@@ -2500,6 +2509,9 @@ int ethtool_get_module_eeprom_call(struct net_device *dev,
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct phy_device *phydev = dev->phydev;
+
++ if (dev->module_fw_flash_in_progress)
++ return -EBUSY;
++
+ if (dev->sfp_bus)
+ return sfp_get_module_eeprom(dev->sfp_bus, ee, data);
+
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index bd04f28d5cf4b..5b42c736d7254 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -760,10 +760,22 @@ static void ethnl_notify_features(struct netdev_notifier_info *info)
+ static int ethnl_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+ {
++ struct netdev_notifier_info *info = ptr;
++ struct netlink_ext_ack *extack;
++ struct net_device *dev;
++
++ dev = netdev_notifier_info_to_dev(info);
++ extack = netdev_notifier_info_to_extack(info);
++
+ switch (event) {
+ case NETDEV_FEAT_CHANGE:
+ ethnl_notify_features(ptr);
+ break;
++ case NETDEV_PRE_UP:
++ if (dev->module_fw_flash_in_progress) {
++ NL_SET_ERR_MSG(extack, "Can't set port up while flashing module firmware");
++ return NOTIFY_BAD;
++ }
+ }
+
+ return NOTIFY_DONE;
+--
+2.43.0
+
--- /dev/null
+From 5874eea8d3dc92bbc77ef0ccdbe31bbda7a355d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Jul 2024 16:46:16 +0530
+Subject: HID: amd_sfh: Move sensor discovery before HID device initialization
+
+From: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+
+[ Upstream commit 8031b001da700474c11d28629581480b12a0d8d4 ]
+
+Sensors discovery is independent of HID device initialization. If sensor
+discovery fails after HID initialization, then the HID device needs to be
+deinitialized. Therefore, sensors discovery should be moved before HID
+device initialization.
+
+Fixes: 7bcfdab3f0c6 ("HID: amd_sfh: if no sensors are enabled, clean up")
+Tested-by: Aurinko <petrvelicka@tuta.io>
+Signed-off-by: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+Link: https://patch.msgid.link/20240718111616.3012155-1-Basavaraj.Natikar@amd.com
+Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/amd-sfh-hid/amd_sfh_client.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index bdb578e0899f5..4b59687ff5d82 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -288,12 +288,22 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ mp2_ops->start(privdata, info);
+ cl_data->sensor_sts[i] = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
++
++ if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
++ cl_data->is_any_sensor_enabled = true;
++ }
++
++ if (!cl_data->is_any_sensor_enabled ||
++ (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
++ dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
++ cl_data->is_any_sensor_enabled);
++ rc = -EOPNOTSUPP;
++ goto cleanup;
+ }
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ cl_data->cur_hid_dev = i;
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+- cl_data->is_any_sensor_enabled = true;
+ rc = amdtp_hid_probe(i, cl_data);
+ if (rc)
+ goto cleanup;
+@@ -305,12 +315,6 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ cl_data->sensor_sts[i]);
+ }
+
+- if (!cl_data->is_any_sensor_enabled ||
+- (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
+- dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
+- rc = -EOPNOTSUPP;
+- goto cleanup;
+- }
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ return 0;
+
+--
+2.43.0
+
--- /dev/null
+From b4bc5c2f5906f7be676c60b3215470be7185c04d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jun 2024 17:56:43 -0700
+Subject: i915/perf: Remove code to update PWR_CLK_STATE for gen12
+
+From: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+
+[ Upstream commit 4bc14b9cfaa2149d41baef2f2620e9f82d9847d7 ]
+
+PWR_CLK_STATE only needs to be modified up until gen11. For gen12 this
+code is not applicable. Remove code to update context image with
+PWR_CLK_STATE for gen12.
+
+Fixes: 00a7f0d7155c ("drm/i915/tgl: Add perf support on TGL")
+Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240629005643.3050678-1-umesh.nerlige.ramappa@intel.com
+(cherry picked from commit 7b5bdae7740eb6a3d09f9cd4e4b07362a15b86b3)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/i915_perf.c | 33 --------------------------------
+ 1 file changed, 33 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 0b1cd4c7a525f..025a79fe5920e 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -2748,26 +2748,6 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
+ return 0;
+ }
+
+-static int
+-gen12_configure_all_contexts(struct i915_perf_stream *stream,
+- const struct i915_oa_config *oa_config,
+- struct i915_active *active)
+-{
+- struct flex regs[] = {
+- {
+- GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
+- CTX_R_PWR_CLK_STATE,
+- },
+- };
+-
+- if (stream->engine->class != RENDER_CLASS)
+- return 0;
+-
+- return oa_configure_all_contexts(stream,
+- regs, ARRAY_SIZE(regs),
+- active);
+-}
+-
+ static int
+ lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config,
+@@ -2874,7 +2854,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
+ {
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct intel_uncore *uncore = stream->uncore;
+- struct i915_oa_config *oa_config = stream->oa_config;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ u32 sqcnt1;
+@@ -2918,15 +2897,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
+
+ intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
+
+- /*
+- * Update all contexts prior writing the mux configurations as we need
+- * to make sure all slices/subslices are ON before writing to NOA
+- * registers.
+- */
+- ret = gen12_configure_all_contexts(stream, oa_config, active);
+- if (ret)
+- return ret;
+-
+ /*
+ * For Gen12, performance counters are context
+ * saved/restored. Only enable it for the context that
+@@ -2980,9 +2950,6 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
+ _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
+ }
+
+- /* Reset all contexts' slices/subslices configurations. */
+- gen12_configure_all_contexts(stream, NULL, NULL);
+-
+ /* disable the context save/restore or OAR counters */
+ if (stream->ctx)
+ gen12_configure_oar_context(stream, NULL);
+--
+2.43.0
+
--- /dev/null
+From 5e16b6bf5bc16bf63a97e37543b24bf439b514d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 20:17:15 +0200
+Subject: ice: add missing WRITE_ONCE when clearing ice_rx_ring::xdp_prog
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 6044ca26210ba72b3dcc649fae1cbedd9e6ab018 ]
+
+It is read by data path and modified from process context on remote cpu
+so it is needed to use WRITE_ONCE to clear the pointer.
+
+Fixes: efc2214b6047 ("ice: Add support for XDP")
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_txrx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 0f91e91674277..8d25b69812698 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
+ if (rx_ring->vsi->type == ICE_VSI_PF)
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+- rx_ring->xdp_prog = NULL;
++ WRITE_ONCE(rx_ring->xdp_prog, NULL);
+ if (rx_ring->xsk_pool) {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+--
+2.43.0
+
--- /dev/null
+From 73103ba53643fecc08373a220ddbf1deb85e5001 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 20:17:10 +0200
+Subject: ice: don't busy wait for Rx queue disable in ice_qp_dis()
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 1ff72a2f67791cd4ddad19ed830445f57b30e992 ]
+
+When ice driver is spammed with multiple xdpsock instances and flow
+control is enabled, there are cases when Rx queue gets stuck and unable
+to reflect the disable state in QRX_CTRL register. Similar issue has
+previously been addressed in commit 13a6233b033f ("ice: Add support to
+enable/disable all Rx queues before waiting").
+
+To workaround this, let us simply not wait for a disabled state as later
+patch will make sure that regardless of the encountered error in the
+process of disabling a queue pair, the Rx queue will be enabled.
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 72738b8b8a68e..3104a5657b837 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -199,10 +199,8 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ if (err)
+ return err;
+ }
+- err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
+- if (err)
+- return err;
+
++ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+--
+2.43.0
+
--- /dev/null
+From 92bf20390fea965d8e5b2c219ec0d6919e567774 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 20:17:14 +0200
+Subject: ice: improve updating ice_{t,r}x_ring::xsk_pool
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit ebc33a3f8d0aeddf19fd5827add24b82ae171829 ]
+
+xsk_buff_pool pointers that ice ring structs hold are updated via
+ndo_bpf that is executed in process context while it can be read by
+remote CPU at the same time within NAPI poll. Use synchronize_net()
+after pointer update and {READ,WRITE}_ONCE() when working with mentioned
+pointer.
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h | 11 ++-
+ drivers/net/ethernet/intel/ice/ice_base.c | 4 +-
+ drivers/net/ethernet/intel/ice/ice_main.c | 2 +-
+ drivers/net/ethernet/intel/ice/ice_txrx.c | 8 +-
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 103 ++++++++++++++--------
+ drivers/net/ethernet/intel/ice/ice_xsk.h | 14 ++-
+ 6 files changed, 87 insertions(+), 55 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 99a75a59078ef..caaa10157909e 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -765,18 +765,17 @@ static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
+ }
+
+ /**
+- * ice_xsk_pool - get XSK buffer pool bound to a ring
++ * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
+ * @ring: Rx ring to use
+ *
+- * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
+- * present, NULL otherwise.
++ * Sets XSK buff pool pointer on Rx ring.
+ */
+-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
++static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
+ {
+ struct ice_vsi *vsi = ring->vsi;
+ u16 qid = ring->q_index;
+
+- return ice_get_xp_from_qid(vsi, qid);
++ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
+ }
+
+ /**
+@@ -801,7 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
+ if (!ring)
+ return;
+
+- ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
++ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 5d396c1a77314..1facf179a96fd 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -536,7 +536,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ return err;
+ }
+
+- ring->xsk_pool = ice_xsk_pool(ring);
++ ice_rx_xsk_pool(ring);
+ if (ring->xsk_pool) {
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
+
+@@ -597,7 +597,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ return 0;
+ }
+
+- ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
++ ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
+ if (!ok) {
+ u16 pf_q = ring->vsi->rxq_map[ring->q_index];
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 55a42aad92a51..9b075dd48889e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2949,7 +2949,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
+ ice_for_each_rxq(vsi, i) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
+
+- if (rx_ring->xsk_pool)
++ if (READ_ONCE(rx_ring->xsk_pool))
+ napi_schedule(&rx_ring->q_vector->napi);
+ }
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 8bb743f78fcb4..0f91e91674277 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -1521,10 +1521,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
++ struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
+ bool wd;
+
+- if (tx_ring->xsk_pool)
+- wd = ice_xmit_zc(tx_ring);
++ if (xsk_pool)
++ wd = ice_xmit_zc(tx_ring, xsk_pool);
+ else if (ice_ring_is_xdp(tx_ring))
+ wd = true;
+ else
+@@ -1550,6 +1551,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
+ budget_per_ring = budget;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
++ struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
+ int cleaned;
+
+ /* A dedicated path for zero-copy allows making a single
+@@ -1557,7 +1559,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
+ * ice_clean_rx_irq function and makes the codebase cleaner.
+ */
+ cleaned = rx_ring->xsk_pool ?
+- ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
++ ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
+ ice_clean_rx_irq(rx_ring, budget_per_ring);
+ work_done += cleaned;
+ /* if we clean as many as budgeted, we must not be done */
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 3fbe4cfadfbfa..ee084ad80a613 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -250,6 +250,8 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
++ /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
++ synchronize_net();
+ ice_get_link_status(vsi->port_info, &link_up);
+ if (link_up) {
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+@@ -464,6 +466,7 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ /**
+ * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
++ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
+ * @count: The number of buffers to allocate
+ *
+ * Place the @count of descriptors onto Rx ring. Handle the ring wrap
+@@ -472,7 +475,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ *
+ * Returns true if all allocations were successful, false if any fail.
+ */
+-static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
++static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
++ struct xsk_buff_pool *xsk_pool, u16 count)
+ {
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
+ union ice_32b_rx_flex_desc *rx_desc;
+@@ -484,8 +488,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+ xdp = ice_xdp_buf(rx_ring, ntu);
+
+ if (ntu + count >= rx_ring->count) {
+- nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
+- rx_desc,
++ nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc,
+ rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+@@ -498,7 +501,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+ ice_release_rx_desc(rx_ring, 0);
+ }
+
+- nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
++ nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
+
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count)
+@@ -514,6 +517,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+ /**
+ * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
++ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
+ * @count: The number of buffers to allocate
+ *
+ * Wrapper for internal allocation routine; figure out how many tail
+@@ -521,7 +525,8 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+ *
+ * Returns true if all calls to internal alloc routine succeeded
+ */
+-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
++bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
++ struct xsk_buff_pool *xsk_pool, u16 count)
+ {
+ u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
+ u16 leftover, i, tail_bumps;
+@@ -530,9 +535,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+ leftover = count - (tail_bumps * rx_thresh);
+
+ for (i = 0; i < tail_bumps; i++)
+- if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
++ if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh))
+ return false;
+- return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
++ return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
+ }
+
+ /**
+@@ -601,8 +606,10 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
+ /**
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
+ * @xdp_ring: XDP Tx ring
++ * @xsk_pool: AF_XDP buffer pool pointer
+ */
+-static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
++static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring,
++ struct xsk_buff_pool *xsk_pool)
+ {
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+@@ -653,7 +660,7 @@ static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
++ xsk_tx_completed(xsk_pool, xsk_frames);
+
+ return completed_frames;
+ }
+@@ -662,6 +669,7 @@ static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+ * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
+ * @xdp: XDP buffer to xmit
+ * @xdp_ring: XDP ring to produce descriptor onto
++ * @xsk_pool: AF_XDP buffer pool pointer
+ *
+ * note that this function works directly on xdp_buff, no need to convert
+ * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
+@@ -671,7 +679,8 @@ static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+ * was not enough space on XDP ring
+ */
+ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
+- struct ice_tx_ring *xdp_ring)
++ struct ice_tx_ring *xdp_ring,
++ struct xsk_buff_pool *xsk_pool)
+ {
+ struct skb_shared_info *sinfo = NULL;
+ u32 size = xdp->data_end - xdp->data;
+@@ -685,7 +694,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
+
+ free_space = ICE_DESC_UNUSED(xdp_ring);
+ if (free_space < ICE_RING_QUARTER(xdp_ring))
+- free_space += ice_clean_xdp_irq_zc(xdp_ring);
++ free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
+
+ if (unlikely(!free_space))
+ goto busy;
+@@ -705,7 +714,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
+ dma_addr_t dma;
+
+ dma = xsk_buff_xdp_get_dma(xdp);
+- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
++ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size);
+
+ tx_buf->xdp = xdp;
+ tx_buf->type = ICE_TX_BUF_XSK_TX;
+@@ -747,12 +756,14 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
+ * @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
++ * @xsk_pool: AF_XDP buffer pool pointer
+ *
+ * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+ */
+ static int
+ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
++ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
++ struct xsk_buff_pool *xsk_pool)
+ {
+ int err, result = ICE_XDP_PASS;
+ u32 act;
+@@ -763,7 +774,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ if (!err)
+ return ICE_XDP_REDIR;
+- if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
++ if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS)
+ result = ICE_XDP_EXIT;
+ else
+ result = ICE_XDP_CONSUMED;
+@@ -774,7 +785,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+- result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
++ result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool);
+ if (result == ICE_XDP_CONSUMED)
+ goto out_failure;
+ break;
+@@ -826,14 +837,16 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
+ /**
+ * ice_clean_rx_irq_zc - consumes packets from the hardware ring
+ * @rx_ring: AF_XDP Rx ring
++ * @xsk_pool: AF_XDP buffer pool pointer
+ * @budget: NAPI budget
+ *
+ * Returns number of processed packets on success, remaining budget on failure.
+ */
+-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
++int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
++ struct xsk_buff_pool *xsk_pool,
++ int budget)
+ {
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+- struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
+ u32 ntc = rx_ring->next_to_clean;
+ u32 ntu = rx_ring->next_to_use;
+ struct xdp_buff *first = NULL;
+@@ -896,7 +909,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+- xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
++ xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
++ xsk_pool);
+ if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == ICE_XDP_EXIT) {
+@@ -945,7 +959,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+ rx_ring->next_to_clean = ntc;
+ entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
+ if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
+- failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
++ failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
++ entries_to_alloc);
+
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
+ ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
+@@ -968,17 +983,19 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+ /**
+ * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
+ * @xdp_ring: XDP ring to produce the HW Tx descriptor on
++ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
+ * @desc: AF_XDP descriptor to pull the DMA address and length from
+ * @total_bytes: bytes accumulator that will be used for stats update
+ */
+-static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
++static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring,
++ struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc,
+ unsigned int *total_bytes)
+ {
+ struct ice_tx_desc *tx_desc;
+ dma_addr_t dma;
+
+- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
+- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
++ dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr);
++ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len);
+
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+@@ -991,10 +1008,13 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+ /**
+ * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
++ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @total_bytes: bytes accumulator that will be used for stats update
+ */
+-static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
++static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
++ struct xsk_buff_pool *xsk_pool,
++ struct xdp_desc *descs,
+ unsigned int *total_bytes)
+ {
+ u16 ntu = xdp_ring->next_to_use;
+@@ -1004,8 +1024,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
+ loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ dma_addr_t dma;
+
+- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
+- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
++ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
++ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+@@ -1021,37 +1041,41 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
+ /**
+ * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
++ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @nb_pkts: count of packets to be send
+ * @total_bytes: bytes accumulator that will be used for stats update
+ */
+-static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+- u32 nb_pkts, unsigned int *total_bytes)
++static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring,
++ struct xsk_buff_pool *xsk_pool,
++ struct xdp_desc *descs, u32 nb_pkts,
++ unsigned int *total_bytes)
+ {
+ u32 batched, leftover, i;
+
+ batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
+ leftover = nb_pkts & (PKTS_PER_BATCH - 1);
+ for (i = 0; i < batched; i += PKTS_PER_BATCH)
+- ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
++ ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes);
+ for (; i < batched + leftover; i++)
+- ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
++ ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes);
+ }
+
+ /**
+ * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
++ * @xsk_pool: AF_XDP buffer pool pointer
+ *
+ * Returns true if there is no more work that needs to be done, false otherwise
+ */
+-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
++bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
+ {
+- struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
++ struct xdp_desc *descs = xsk_pool->tx_descs;
+ u32 nb_pkts, nb_processed = 0;
+ unsigned int total_bytes = 0;
+ int budget;
+
+- ice_clean_xdp_irq_zc(xdp_ring);
++ ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
+
+ if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
+ !netif_running(xdp_ring->vsi->netdev))
+@@ -1060,25 +1084,26 @@ bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+ budget = ICE_DESC_UNUSED(xdp_ring);
+ budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
+
+- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
++ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
+ nb_processed = xdp_ring->count - xdp_ring->next_to_use;
+- ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
++ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed,
++ &total_bytes);
+ xdp_ring->next_to_use = 0;
+ }
+
+- ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
+- &total_bytes);
++ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed],
++ nb_pkts - nb_processed, &total_bytes);
+
+ ice_set_rs_bit(xdp_ring);
+ ice_xdp_ring_update_tail(xdp_ring);
+ ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
+
+- if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
+- xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
++ if (xsk_uses_need_wakeup(xsk_pool))
++ xsk_set_tx_need_wakeup(xsk_pool);
+
+ return nb_pkts < budget;
+ }
+@@ -1111,7 +1136,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+
+ ring = vsi->rx_rings[queue_id]->xdp_ring;
+
+- if (!ring->xsk_pool)
++ if (!READ_ONCE(ring->xsk_pool))
+ return -EINVAL;
+
+ /* The idea here is that if NAPI is running, mark a miss, so
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
+index 6fa181f080ef1..45adeb513253a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
+@@ -20,16 +20,20 @@ struct ice_vsi;
+ #ifdef CONFIG_XDP_SOCKETS
+ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
+ u16 qid);
+-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
++int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
++ struct xsk_buff_pool *xsk_pool,
++ int budget);
+ int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
+-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
++bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
++ struct xsk_buff_pool *xsk_pool, u16 count);
+ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
+ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
+ void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
+-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
++bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
+ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
+ #else
+-static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
++static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
++ struct xsk_buff_pool __always_unused *xsk_pool)
+ {
+ return false;
+ }
+@@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
+
+ static inline int
+ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
++ struct xsk_buff_pool __always_unused *xsk_pool,
+ int __always_unused budget)
+ {
+ return 0;
+@@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
+
+ static inline bool
+ ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
++ struct xsk_buff_pool __always_unused *xsk_pool,
+ u16 __always_unused count)
+ {
+ return false;
+--
+2.43.0
+
--- /dev/null
+From c1f58799c6f47e2e54b0f36bdab464645a523573 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 20:17:12 +0200
+Subject: ice: modify error handling when setting XSK pool in ndo_bpf
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit d5922717994911e8f0eab736f3ba0d968c158823 ]
+
+Don't bail out right when spotting an error within ice_qp_{dis,ena}()
+but rather track error and go through whole flow of disabling and
+enabling queue pair.
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 30 +++++++++++++-----------
+ 1 file changed, 16 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index ba50af9a59293..902096b000f5a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -162,6 +162,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ int timeout = 50;
++ int fail = 0;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+@@ -186,8 +187,8 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+- if (err)
+- return err;
++ if (!fail)
++ fail = err;
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+@@ -195,15 +196,15 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+- if (err)
+- return err;
++ if (!fail)
++ fail = err;
+ }
+
+ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+- return 0;
++ return fail;
+ }
+
+ /**
+@@ -216,32 +217,33 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ {
+ struct ice_q_vector *q_vector;
++ int fail = 0;
+ int err;
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
+- if (err)
+- return err;
++ if (!fail)
++ fail = err;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
+- if (err)
+- return err;
++ if (!fail)
++ fail = err;
+ ice_set_ring_xdp(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
+- if (err)
+- return err;
++ if (!fail)
++ fail = err;
+
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
+ ice_qvec_cfg_msix(vsi, q_vector);
+
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
+- if (err)
+- return err;
++ if (!fail)
++ fail = err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+@@ -249,7 +251,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ clear_bit(ICE_CFG_BUSY, vsi->state);
+
+- return 0;
++ return fail;
+ }
+
+ /**
+--
+2.43.0
+
--- /dev/null
+From 6089a3eb308dd7e76602c79759a8696b9d15f67e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 20:17:11 +0200
+Subject: ice: replace synchronize_rcu with synchronize_net
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 405d9999aa0b4ae467ef391d1d9c7e0d30ad0841 ]
+
+Given that ice_qp_dis() is called under rtnl_lock, synchronize_net() can
+be called instead of synchronize_rcu() so that XDP rings can finish its
+job in a faster way. Also let us do this as earlier in XSK queue disable
+flow.
+
+Additionally, turn off regular Tx queue before disabling irqs and NAPI.
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 3104a5657b837..ba50af9a59293 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -52,10 +52,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+ static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+ {
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+- if (ice_is_xdp_ena_vsi(vsi)) {
+- synchronize_rcu();
++ if (ice_is_xdp_ena_vsi(vsi))
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+- }
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+ }
+
+@@ -180,11 +178,12 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ usleep_range(1000, 2000);
+ }
+
++ synchronize_net();
++ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
++
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
+- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+-
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (err)
+--
+2.43.0
+
--- /dev/null
+From 57b6cfe5e73c5d62d3dd2ae88163e426a5421a01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 20:17:09 +0200
+Subject: ice: respect netif readiness in AF_XDP ZC related ndo's
+
+From: Michal Kubiak <michal.kubiak@intel.com>
+
+[ Upstream commit ec145a18687fec8dd97eeb4f30057fa4debef577 ]
+
+Address a scenario in which XSK ZC Tx produces descriptors to XDP Tx
+ring when link is either not yet fully initialized or process of
+stopping the netdev has already started. To avoid this, add checks
+against carrier readiness in ice_xsk_wakeup() and in ice_xmit_zc().
+One could argue that bailing out early in ice_xsk_wakeup() would be
+sufficient but given the fact that we produce Tx descriptors on behalf
+of NAPI that is triggered for Rx traffic, the latter is also needed.
+
+Bringing link up is an asynchronous event executed within
+ice_service_task so even though interface has been brought up there is
+still a time frame where link is not yet ok.
+
+Without this patch, when AF_XDP ZC Tx is used simultaneously with stack
+Tx, Tx timeouts occur after going through link flap (admin brings
+interface down then up again). HW seem to be unable to transmit
+descriptor to the wire after HW tail register bump which in turn causes
+bit __QUEUE_STATE_STACK_XOFF to be set forever as
+netdev_tx_completed_queue() sees no cleaned bytes on the input.
+
+Fixes: 126cdfe1007a ("ice: xsk: Improve AF_XDP ZC Tx and use batching API")
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index a65955eb23c0b..72738b8b8a68e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -1048,6 +1048,10 @@ bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+
+ ice_clean_xdp_irq_zc(xdp_ring);
+
++ if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
++ !netif_running(xdp_ring->vsi->netdev))
++ return true;
++
+ budget = ICE_DESC_UNUSED(xdp_ring);
+ budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
+
+@@ -1091,7 +1095,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_tx_ring *ring;
+
+- if (test_bit(ICE_VSI_DOWN, vsi->state))
++ if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+--
+2.43.0
+
--- /dev/null
+From 92a1c7f22acc52225f8576d8a92209b7a1dc554a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 20:17:13 +0200
+Subject: ice: toggle netif_carrier when setting up XSK pool
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 9da75a511c5558fa3da56759984fd1fa859186f0 ]
+
+This so we prevent Tx timeout issues. One of conditions checked on
+running in the background dev_watchdog() is netif_carrier_ok(), so let
+us turn it off when we disable the queues that belong to a q_vector
+where XSK pool is being configured. Turn carrier on in ice_qp_ena()
+only when ice_get_link_status() tells us that physical link is up.
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 902096b000f5a..3fbe4cfadfbfa 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -180,6 +180,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ }
+
+ synchronize_net();
++ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+@@ -218,6 +219,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ {
+ struct ice_q_vector *q_vector;
+ int fail = 0;
++ bool link_up;
+ int err;
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
+@@ -248,7 +250,11 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+- netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
++ ice_get_link_status(vsi->port_info, &link_up);
++ if (link_up) {
++ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
++ netif_carrier_on(vsi->netdev);
++ }
+ clear_bit(ICE_CFG_BUSY, vsi->state);
+
+ return fail;
+--
+2.43.0
+
--- /dev/null
+From b77637a67efbf158a189d4d0815636418b56c55f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 20:17:16 +0200
+Subject: ice: xsk: fix txq interrupt mapping
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 963fb4612295a5c35b1b89c8bff3bdd4f9127af6 ]
+
+ice_cfg_txq_interrupt() internally handles XDP Tx ring. Do not use
+ice_for_each_tx_ring() in ice_qvec_cfg_msix() as this causing us to
+treat XDP ring that belongs to queue vector as Tx ring and therefore
+misconfiguring the interrupts.
+
+Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 24 ++++++++++++++----------
+ 1 file changed, 14 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index ee084ad80a613..240a7bec242be 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -110,25 +110,29 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ * ice_qvec_cfg_msix - Enable IRQ for given queue vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
++ * @qid: queue index
+ */
+ static void
+-ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
++ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
+ {
+ u16 reg_idx = q_vector->reg_idx;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+- struct ice_tx_ring *tx_ring;
+- struct ice_rx_ring *rx_ring;
++ int q, _qid = qid;
+
+ ice_cfg_itr(hw, q_vector);
+
+- ice_for_each_tx_ring(tx_ring, q_vector->tx)
+- ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
+- q_vector->tx.itr_idx);
++ for (q = 0; q < q_vector->num_ring_tx; q++) {
++ ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx);
++ _qid++;
++ }
+
+- ice_for_each_rx_ring(rx_ring, q_vector->rx)
+- ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
+- q_vector->rx.itr_idx);
++ _qid = qid;
++
++ for (q = 0; q < q_vector->num_ring_rx; q++) {
++ ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx);
++ _qid++;
++ }
+
+ ice_flush(hw);
+ }
+@@ -241,7 +245,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ fail = err;
+
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
+- ice_qvec_cfg_msix(vsi, q_vector);
++ ice_qvec_cfg_msix(vsi, q_vector, q_idx);
+
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
+ if (!fail)
+--
+2.43.0
+
--- /dev/null
+From 02cff4c7c33822ad4fa80e39287e7cb724e5dc8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 10:33:02 -0700
+Subject: igc: Fix double reset adapter triggered from a single taprio cmd
+
+From: Faizal Rahim <faizal.abdul.rahim@linux.intel.com>
+
+[ Upstream commit b9e7fc0aeda79031a101610b2fcb12bf031056e9 ]
+
+Following the implementation of "igc: Add TransmissionOverrun counter"
+patch, when a taprio command is triggered by user, igc processes two
+commands: TAPRIO_CMD_REPLACE followed by TAPRIO_CMD_STATS. However, both
+commands unconditionally pass through igc_tsn_offload_apply() which
+evaluates and triggers reset adapter. The double reset causes issues in
+the calculation of adapter->qbv_count in igc.
+
+TAPRIO_CMD_REPLACE command is expected to reset the adapter since it
+activates qbv. It's unexpected for TAPRIO_CMD_STATS to do the same
+because it doesn't configure any driver-specific TSN settings. So, the
+evaluation in igc_tsn_offload_apply() isn't needed for TAPRIO_CMD_STATS.
+
+To address this, commands parsing are relocated to
+igc_tsn_enable_qbv_scheduling(). Commands that don't require an adapter
+reset will exit after processing, thus avoiding igc_tsn_offload_apply().
+
+Fixes: d3750076d464 ("igc: Add TransmissionOverrun counter")
+Signed-off-by: Faizal Rahim <faizal.abdul.rahim@linux.intel.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Tested-by: Mor Bar-Gabay <morx.bar.gabay@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20240730173304.865479-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 33 ++++++++++++-----------
+ 1 file changed, 17 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 87b655b839c1c..33069880c86c0 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6310,21 +6310,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ size_t n;
+ int i;
+
+- switch (qopt->cmd) {
+- case TAPRIO_CMD_REPLACE:
+- break;
+- case TAPRIO_CMD_DESTROY:
+- return igc_tsn_clear_schedule(adapter);
+- case TAPRIO_CMD_STATS:
+- igc_taprio_stats(adapter->netdev, &qopt->stats);
+- return 0;
+- case TAPRIO_CMD_QUEUE_STATS:
+- igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
+- return 0;
+- default:
+- return -EOPNOTSUPP;
+- }
+-
+ if (qopt->base_time < 0)
+ return -ERANGE;
+
+@@ -6433,7 +6418,23 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+- err = igc_save_qbv_schedule(adapter, qopt);
++ switch (qopt->cmd) {
++ case TAPRIO_CMD_REPLACE:
++ err = igc_save_qbv_schedule(adapter, qopt);
++ break;
++ case TAPRIO_CMD_DESTROY:
++ err = igc_tsn_clear_schedule(adapter);
++ break;
++ case TAPRIO_CMD_STATS:
++ igc_taprio_stats(adapter->netdev, &qopt->stats);
++ return 0;
++ case TAPRIO_CMD_QUEUE_STATS:
++ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++
+ if (err)
+ return err;
+
+--
+2.43.0
+
--- /dev/null
+From 1d0e1d7c370b90ea44ba832c1dddb3f35ae90c1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Jul 2024 17:17:48 -0700
+Subject: ipv6: fix ndisc_is_useropt() handling for PIO
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maciej Żenczykowski <maze@google.com>
+
+[ Upstream commit a46c68debf3be3a477a69ccbf0a1d050df841676 ]
+
+The current logic only works if the PIO is between two
+other ND user options. This fixes it so that the PIO
+can also be either before or after other ND user options
+(for example the first or last option in the RA).
+
+side note: there's actually Android tests verifying
+a portion of the old broken behaviour, so:
+ https://android-review.googlesource.com/c/kernel/tests/+/3196704
+fixes those up.
+
+Cc: Jen Linkova <furry@google.com>
+Cc: Lorenzo Colitti <lorenzo@google.com>
+Cc: Patrick Rohr <prohr@google.com>
+Cc: David Ahern <dsahern@kernel.org>
+Cc: YOSHIFUJI Hideaki / 吉藤英明 <yoshfuji@linux-ipv6.org>
+Cc: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Maciej Żenczykowski <maze@google.com>
+Fixes: 048c796beb6e ("ipv6: adjust ndisc_is_useropt() to also return true for PIO")
+Link: https://patch.msgid.link/20240730001748.147636-1-maze@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ndisc.c | 34 ++++++++++++++++++----------------
+ 1 file changed, 18 insertions(+), 16 deletions(-)
+
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index d914b23256ce6..0282d15725095 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
+ return NULL;
+ memset(ndopts, 0, sizeof(*ndopts));
+ while (opt_len) {
++ bool unknown = false;
+ int l;
+ if (opt_len < sizeof(struct nd_opt_hdr))
+ return NULL;
+@@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
+ break;
+ #endif
+ default:
+- if (ndisc_is_useropt(dev, nd_opt)) {
+- ndopts->nd_useropts_end = nd_opt;
+- if (!ndopts->nd_useropts)
+- ndopts->nd_useropts = nd_opt;
+- } else {
+- /*
+- * Unknown options must be silently ignored,
+- * to accommodate future extension to the
+- * protocol.
+- */
+- ND_PRINTK(2, notice,
+- "%s: ignored unsupported option; type=%d, len=%d\n",
+- __func__,
+- nd_opt->nd_opt_type,
+- nd_opt->nd_opt_len);
+- }
++ unknown = true;
++ }
++ if (ndisc_is_useropt(dev, nd_opt)) {
++ ndopts->nd_useropts_end = nd_opt;
++ if (!ndopts->nd_useropts)
++ ndopts->nd_useropts = nd_opt;
++ } else if (unknown) {
++ /*
++ * Unknown options must be silently ignored,
++ * to accommodate future extension to the
++ * protocol.
++ */
++ ND_PRINTK(2, notice,
++ "%s: ignored unsupported option; type=%d, len=%d\n",
++ __func__,
++ nd_opt->nd_opt_type,
++ nd_opt->nd_opt_len);
+ }
+ next_opt:
+ opt_len -= l;
+--
+2.43.0
+
--- /dev/null
+From b4158184ff735a79bf7b876c91db8a0614ebbd10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 15:06:50 +0800
+Subject: net: axienet: start napi before enabling Rx/Tx
+
+From: Andy Chiu <andy.chiu@sifive.com>
+
+[ Upstream commit 799a829507506924add8a7620493adc1c3cfda30 ]
+
+softirq may get lost if an Rx interrupt comes before we call
+napi_enable. Move napi_enable in front of axienet_setoptions(), which
+turns on the device, to address the issue.
+
+Link: https://lists.gnu.org/archive/html/qemu-devel/2024-07/msg06160.html
+Fixes: cc37610caaf8 ("net: axienet: implement NAPI and GRO receive")
+Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index c29809cd92015..fa510f4e26008 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -2219,9 +2219,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+- axienet_setoptions(ndev, lp->options);
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
++ axienet_setoptions(ndev, lp->options);
+ }
+
+ /**
+--
+2.43.0
+
--- /dev/null
+From 01764eaa588997eb987eb407719bfc56bdf3907c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 16:33:51 +0100
+Subject: net: ethtool: add a mutex protecting RSS contexts
+
+From: Edward Cree <ecree.xilinx@gmail.com>
+
+[ Upstream commit 87925151191b64d9623e63ccf11e517eacc99d7d ]
+
+While this is not needed to serialise the ethtool entry points (which
+ are all under RTNL), drivers may have cause to asynchronously access
+ dev->ethtool->rss_ctx; taking dev->ethtool->rss_lock allows them to
+ do this safely without needing to take the RTNL.
+
+Signed-off-by: Edward Cree <ecree.xilinx@gmail.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Link: https://patch.msgid.link/7f9c15eb7525bf87af62c275dde3a8570ee8bf0a.1719502240.git.ecree.xilinx@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 7195f0ef7f5b ("ethtool: fix setting key and resetting indir at once")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ethtool.h | 3 +++
+ net/core/dev.c | 5 +++++
+ net/ethtool/ioctl.c | 7 +++++++
+ 3 files changed, 15 insertions(+)
+
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 8fa2f8bd474b6..787dd63c3acb3 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -1055,10 +1055,13 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+ /**
+ * struct ethtool_netdev_state - per-netdevice state for ethtool features
+ * @rss_ctx: XArray of custom RSS contexts
++ * @rss_lock: Protects entries in @rss_ctx. May be taken from
++ * within RTNL.
+ * @wol_enabled: Wake-on-LAN is enabled
+ */
+ struct ethtool_netdev_state {
+ struct xarray rss_ctx;
++ struct mutex rss_lock;
+ unsigned wol_enabled:1;
+ };
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index b390b4cd9098f..8a9ab7b12c618 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10287,6 +10287,7 @@ int register_netdevice(struct net_device *dev)
+
+ /* rss ctx ID 0 is reserved for the default context, start from 1 */
+ xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1);
++ mutex_init(&dev->ethtool->rss_lock);
+
+ spin_lock_init(&dev->addr_list_lock);
+ netdev_set_addr_lockdep_class(dev);
+@@ -11142,6 +11143,7 @@ static void netdev_rss_contexts_free(struct net_device *dev)
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
++ mutex_lock(&dev->ethtool->rss_lock);
+ xa_for_each(&dev->ethtool->rss_ctx, context, ctx) {
+ struct ethtool_rxfh_param rxfh;
+
+@@ -11157,6 +11159,7 @@ static void netdev_rss_contexts_free(struct net_device *dev)
+ kfree(ctx);
+ }
+ xa_destroy(&dev->ethtool->rss_ctx);
++ mutex_unlock(&dev->ethtool->rss_lock);
+ }
+
+ /**
+@@ -11269,6 +11272,8 @@ void unregister_netdevice_many_notify(struct list_head *head,
+ if (dev->netdev_ops->ndo_uninit)
+ dev->netdev_ops->ndo_uninit(dev);
+
++ mutex_destroy(&dev->ethtool->rss_lock);
++
+ if (skb)
+ rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
+
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 1e87131f2caf4..d9c8a6a16cb2a 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -1285,6 +1285,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ struct netlink_ext_ack *extack = NULL;
+ struct ethtool_rxnfc rx_rings;
+ struct ethtool_rxfh rxfh;
++ bool locked = false; /* dev->ethtool->rss_lock taken */
+ u32 indir_bytes = 0;
+ bool create = false;
+ u8 *rss_config;
+@@ -1381,6 +1382,10 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ }
+ }
+
++ if (rxfh.rss_context) {
++ mutex_lock(&dev->ethtool->rss_lock);
++ locked = true;
++ }
+ if (create) {
+ if (rxfh_dev.rss_delete) {
+ ret = -EINVAL;
+@@ -1471,6 +1476,8 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ }
+
+ out:
++ if (locked)
++ mutex_unlock(&dev->ethtool->rss_lock);
+ kfree(rss_config);
+ return ret;
+ }
+--
+2.43.0
+
--- /dev/null
+From 536a780f27abae5daf9d71e564a75679740290da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 16:33:47 +0100
+Subject: net: ethtool: attach an XArray of custom RSS contexts to a netdevice
+
+From: Edward Cree <ecree.xilinx@gmail.com>
+
+[ Upstream commit 6ad2962f8adfd53fca52dce7f830783e95d99ce7 ]
+
+Each context stores the RXFH settings (indir, key, and hfunc) as well
+ as optionally some driver private data.
+Delete any still-existing contexts at netdev unregister time.
+
+Signed-off-by: Edward Cree <ecree.xilinx@gmail.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Link: https://patch.msgid.link/cbd1c402cec38f2e03124f2ab65b4ae4e08bd90d.1719502240.git.ecree.xilinx@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 7195f0ef7f5b ("ethtool: fix setting key and resetting indir at once")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ethtool.h | 42 +++++++++++++++++++++++++++++++++++++++++
+ net/core/dev.c | 27 ++++++++++++++++++++++++++
+ 2 files changed, 69 insertions(+)
+
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 8cd6b3c993f17..13c9c819de580 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -159,6 +159,46 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+ return index % n_rx_rings;
+ }
+
++/**
++ * struct ethtool_rxfh_context - a custom RSS context configuration
++ * @indir_size: Number of u32 entries in indirection table
++ * @key_size: Size of hash key, in bytes
++ * @priv_size: Size of driver private data, in bytes
++ * @hfunc: RSS hash function identifier. One of the %ETH_RSS_HASH_*
++ * @input_xfrm: Defines how the input data is transformed. Valid values are one
++ * of %RXH_XFRM_*.
++ * @indir_configured: indir has been specified (at create time or subsequently)
++ * @key_configured: hkey has been specified (at create time or subsequently)
++ */
++struct ethtool_rxfh_context {
++ u32 indir_size;
++ u32 key_size;
++ u16 priv_size;
++ u8 hfunc;
++ u8 input_xfrm;
++ u8 indir_configured:1;
++ u8 key_configured:1;
++ /* private: driver private data, indirection table, and hash key are
++ * stored sequentially in @data area. Use below helpers to access.
++ */
++ u8 data[] __aligned(sizeof(void *));
++};
++
++static inline void *ethtool_rxfh_context_priv(struct ethtool_rxfh_context *ctx)
++{
++ return ctx->data;
++}
++
++static inline u32 *ethtool_rxfh_context_indir(struct ethtool_rxfh_context *ctx)
++{
++ return (u32 *)(ctx->data + ALIGN(ctx->priv_size, sizeof(u32)));
++}
++
++static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
++{
++ return (u8 *)(ethtool_rxfh_context_indir(ctx) + ctx->indir_size);
++}
++
+ /* declare a link mode bitmap */
+ #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
+ DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
+@@ -1000,9 +1040,11 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+
+ /**
+ * struct ethtool_netdev_state - per-netdevice state for ethtool features
++ * @rss_ctx: XArray of custom RSS contexts
+ * @wol_enabled: Wake-on-LAN is enabled
+ */
+ struct ethtool_netdev_state {
++ struct xarray rss_ctx;
+ unsigned wol_enabled:1;
+ };
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 250752751811c..b390b4cd9098f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10285,6 +10285,9 @@ int register_netdevice(struct net_device *dev)
+ if (ret)
+ return ret;
+
++ /* rss ctx ID 0 is reserved for the default context, start from 1 */
++ xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1);
++
+ spin_lock_init(&dev->addr_list_lock);
+ netdev_set_addr_lockdep_class(dev);
+
+@@ -11134,6 +11137,28 @@ void synchronize_net(void)
+ }
+ EXPORT_SYMBOL(synchronize_net);
+
++static void netdev_rss_contexts_free(struct net_device *dev)
++{
++ struct ethtool_rxfh_context *ctx;
++ unsigned long context;
++
++ xa_for_each(&dev->ethtool->rss_ctx, context, ctx) {
++ struct ethtool_rxfh_param rxfh;
++
++ rxfh.indir = ethtool_rxfh_context_indir(ctx);
++ rxfh.key = ethtool_rxfh_context_key(ctx);
++ rxfh.hfunc = ctx->hfunc;
++ rxfh.input_xfrm = ctx->input_xfrm;
++ rxfh.rss_context = context;
++ rxfh.rss_delete = true;
++
++ xa_erase(&dev->ethtool->rss_ctx, context);
++ dev->ethtool_ops->set_rxfh(dev, &rxfh, NULL);
++ kfree(ctx);
++ }
++ xa_destroy(&dev->ethtool->rss_ctx);
++}
++
+ /**
+ * unregister_netdevice_queue - remove device from the kernel
+ * @dev: device
+@@ -11237,6 +11262,8 @@ void unregister_netdevice_many_notify(struct list_head *head,
+ netdev_name_node_alt_flush(dev);
+ netdev_name_node_free(dev->name_node);
+
++ netdev_rss_contexts_free(dev);
++
+ call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
+
+ if (dev->netdev_ops->ndo_uninit)
+--
+2.43.0
+
--- /dev/null
+From 756a1c7a95d270503dade14dc9786da1168d1f4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 16:33:48 +0100
+Subject: net: ethtool: record custom RSS contexts in the XArray
+
+From: Edward Cree <ecree.xilinx@gmail.com>
+
+[ Upstream commit eac9122f0c41b832065e01977c34946ec8e76c24 ]
+
+Since drivers are still choosing the context IDs, we have to force the
+ XArray to use the ID they've chosen rather than picking one ourselves,
+ and handle the case where they give us an ID that's already in use.
+
+Signed-off-by: Edward Cree <ecree.xilinx@gmail.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Link: https://patch.msgid.link/801f5faa4cec87c65b2c6e27fb220c944bce593a.1719502240.git.ecree.xilinx@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 7195f0ef7f5b ("ethtool: fix setting key and resetting indir at once")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ethtool.h | 14 ++++++++
+ net/ethtool/ioctl.c | 74 ++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 87 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 13c9c819de580..8fa2f8bd474b6 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -199,6 +199,17 @@ static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
+ return (u8 *)(ethtool_rxfh_context_indir(ctx) + ctx->indir_size);
+ }
+
++static inline size_t ethtool_rxfh_context_size(u32 indir_size, u32 key_size,
++ u16 priv_size)
++{
++ size_t indir_bytes = array_size(indir_size, sizeof(u32));
++ size_t flex_len;
++
++ flex_len = size_add(size_add(indir_bytes, key_size),
++ ALIGN(priv_size, sizeof(u32)));
++ return struct_size_t(struct ethtool_rxfh_context, data, flex_len);
++}
++
+ /* declare a link mode bitmap */
+ #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
+ DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
+@@ -709,6 +720,8 @@ struct ethtool_rxfh_param {
+ * contexts.
+ * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor
+ * RSS.
++ * @rxfh_priv_size: size of the driver private data area the core should
++ * allocate for an RSS context (in &struct ethtool_rxfh_context).
+ * @supported_coalesce_params: supported types of interrupt coalescing.
+ * @supported_ring_params: supported ring params.
+ * @get_drvinfo: Report driver/device information. Modern drivers no
+@@ -892,6 +905,7 @@ struct ethtool_ops {
+ u32 cap_link_lanes_supported:1;
+ u32 cap_rss_ctx_supported:1;
+ u32 cap_rss_sym_xor_supported:1;
++ u16 rxfh_priv_size;
+ u32 supported_coalesce_params;
+ u32 supported_ring_params;
+ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 8d85a9900538b..1e87131f2caf4 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -1281,10 +1281,12 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u32 dev_indir_size = 0, dev_key_size = 0, i;
+ struct ethtool_rxfh_param rxfh_dev = {};
++ struct ethtool_rxfh_context *ctx = NULL;
+ struct netlink_ext_ack *extack = NULL;
+ struct ethtool_rxnfc rx_rings;
+ struct ethtool_rxfh rxfh;
+ u32 indir_bytes = 0;
++ bool create = false;
+ u8 *rss_config;
+ int ret;
+
+@@ -1313,6 +1315,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ (rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
+ !ops->cap_rss_sym_xor_supported)
+ return -EOPNOTSUPP;
++ create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC;
+
+ /* If either indir, hash key or function is valid, proceed further.
+ * Must request at least one change: indir size, hash key, function
+@@ -1378,13 +1381,42 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ }
+ }
+
++ if (create) {
++ if (rxfh_dev.rss_delete) {
++ ret = -EINVAL;
++ goto out;
++ }
++ ctx = kzalloc(ethtool_rxfh_context_size(dev_indir_size,
++ dev_key_size,
++ ops->rxfh_priv_size),
++ GFP_KERNEL_ACCOUNT);
++ if (!ctx) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ ctx->indir_size = dev_indir_size;
++ ctx->key_size = dev_key_size;
++ ctx->hfunc = rxfh.hfunc;
++ ctx->input_xfrm = rxfh.input_xfrm;
++ ctx->priv_size = ops->rxfh_priv_size;
++ } else if (rxfh.rss_context) {
++ ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context);
++ if (!ctx) {
++ ret = -ENOENT;
++ goto out;
++ }
++ }
+ rxfh_dev.hfunc = rxfh.hfunc;
+ rxfh_dev.rss_context = rxfh.rss_context;
+ rxfh_dev.input_xfrm = rxfh.input_xfrm;
+
+ ret = ops->set_rxfh(dev, &rxfh_dev, extack);
+- if (ret)
++ if (ret) {
++ if (create)
++ /* failed to create, free our new tracking entry */
++ kfree(ctx);
+ goto out;
++ }
+
+ if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context),
+ &rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context)))
+@@ -1397,6 +1429,46 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
+ dev->priv_flags |= IFF_RXFH_CONFIGURED;
+ }
++ /* Update rss_ctx tracking */
++ if (create) {
++ /* Ideally this should happen before calling the driver,
++ * so that we can fail more cleanly; but we don't have the
++ * context ID until the driver picks it, so we have to
++ * wait until after.
++ */
++ if (WARN_ON(xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context))) {
++ /* context ID reused, our tracking is screwed */
++ kfree(ctx);
++ goto out;
++ }
++ /* Allocate the exact ID the driver gave us */
++ if (xa_is_err(xa_store(&dev->ethtool->rss_ctx, rxfh.rss_context,
++ ctx, GFP_KERNEL))) {
++ kfree(ctx);
++ goto out;
++ }
++ ctx->indir_configured = rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE;
++ ctx->key_configured = !!rxfh.key_size;
++ }
++ if (rxfh_dev.rss_delete) {
++ WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx);
++ kfree(ctx);
++ } else if (ctx) {
++ if (rxfh_dev.indir) {
++ for (i = 0; i < dev_indir_size; i++)
++ ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i];
++ ctx->indir_configured = 1;
++ }
++ if (rxfh_dev.key) {
++ memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key,
++ dev_key_size);
++ ctx->key_configured = 1;
++ }
++ if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE)
++ ctx->hfunc = rxfh_dev.hfunc;
++ if (rxfh_dev.input_xfrm != RXH_XFRM_NO_CHANGE)
++ ctx->input_xfrm = rxfh_dev.input_xfrm;
++ }
+
+ out:
+ kfree(rss_config);
+--
+2.43.0
+
--- /dev/null
+From 894b4da5fb95bbad1c7208a5cf8fd7180c449706 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Jul 2024 14:28:16 +0200
+Subject: net/iucv: fix use after free in iucv_sock_close()
+
+From: Alexandra Winter <wintera@linux.ibm.com>
+
+[ Upstream commit f558120cd709682b739207b48cf7479fd9568431 ]
+
+iucv_sever_path() is called from process context and from bh context.
+iucv->path is used as indicator whether somebody else is taking care of
+severing the path (or it is already removed / never existed).
+This needs to be done with atomic compare and swap, otherwise there is a
+small window where iucv_sock_close() will try to work with a path that has
+already been severed and freed by iucv_callback_connrej() called by
+iucv_tasklet_fn().
+
+Example:
+[452744.123844] Call Trace:
+[452744.123845] ([<0000001e87f03880>] 0x1e87f03880)
+[452744.123966] [<00000000d593001e>] iucv_path_sever+0x96/0x138
+[452744.124330] [<000003ff801ddbca>] iucv_sever_path+0xc2/0xd0 [af_iucv]
+[452744.124336] [<000003ff801e01b6>] iucv_sock_close+0xa6/0x310 [af_iucv]
+[452744.124341] [<000003ff801e08cc>] iucv_sock_release+0x3c/0xd0 [af_iucv]
+[452744.124345] [<00000000d574794e>] __sock_release+0x5e/0xe8
+[452744.124815] [<00000000d5747a0c>] sock_close+0x34/0x48
+[452744.124820] [<00000000d5421642>] __fput+0xba/0x268
+[452744.124826] [<00000000d51b382c>] task_work_run+0xbc/0xf0
+[452744.124832] [<00000000d5145710>] do_notify_resume+0x88/0x90
+[452744.124841] [<00000000d5978096>] system_call+0xe2/0x2c8
+[452744.125319] Last Breaking-Event-Address:
+[452744.125321] [<00000000d5930018>] iucv_path_sever+0x90/0x138
+[452744.125324]
+[452744.125325] Kernel panic - not syncing: Fatal exception in interrupt
+
+Note that bh_lock_sock() is not serializing the tasklet context against
+process context, because the check for sock_owned_by_user() and
+corresponding handling is missing.
+
+Ideas for a future clean-up patch:
+A) Correct usage of bh_lock_sock() in tasklet context, as described in
+Link: https://lore.kernel.org/netdev/1280155406.2899.407.camel@edumazet-laptop/
+Re-enqueue, if needed. This may require adding return values to the
+tasklet functions and thus changes to all users of iucv.
+
+B) Change iucv tasklet into worker and use only lock_sock() in af_iucv.
+
+Fixes: 7d316b945352 ("af_iucv: remove IUCV-pathes completely")
+Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
+Signed-off-by: Alexandra Winter <wintera@linux.ibm.com>
+Link: https://patch.msgid.link/20240729122818.947756-1-wintera@linux.ibm.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/iucv/af_iucv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index c3b0b610b0aa3..c00323fa9eb66 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct iucv_path *path = iucv->path;
+
+- if (iucv->path) {
+- iucv->path = NULL;
++ /* Whoever resets the path pointer, must sever and free it. */
++ if (xchg(&iucv->path, NULL)) {
+ if (with_user_data) {
+ low_nmcpy(user_data, iucv->src_name);
+ high_nmcpy(user_data, iucv->dst_name);
+--
+2.43.0
+
--- /dev/null
+From 3d8d31da5e3cec6406ed8a567a7e5a44836d2a91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 09:16:30 +0300
+Subject: net/mlx5: Always drain health in shutdown callback
+
+From: Shay Drory <shayd@nvidia.com>
+
+[ Upstream commit 1b75da22ed1e6171e261bc9265370162553d5393 ]
+
+There is no point in recovery during device shutdown. if health
+work started need to wait for it to avoid races and NULL pointer
+access.
+
+Hence, drain health WQ on shutdown callback.
+
+Fixes: 1958fc2f0712 ("net/mlx5: SF, Add auxiliary device driver")
+Fixes: d2aa060d40fa ("net/mlx5: Cancel health poll before sending panic teardown command")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Link: https://patch.msgid.link/20240730061638.1831002-2-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +-
+ drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 459a836a5d9c1..3e55a6c6a7c9b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -2140,7 +2140,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
+ /* Panic tear down fw command will stop the PCI bus communication
+ * with the HCA, so the health poll is no longer needed.
+ */
+- mlx5_drain_health_wq(dev);
+ mlx5_stop_health_poll(dev, false);
+
+ ret = mlx5_cmd_fast_teardown_hca(dev);
+@@ -2175,6 +2174,7 @@ static void shutdown(struct pci_dev *pdev)
+
+ mlx5_core_info(dev, "Shutdown was called\n");
+ set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
++ mlx5_drain_health_wq(dev);
+ err = mlx5_try_fast_unload(dev);
+ if (err)
+ mlx5_unload_one(dev, false);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+index b2986175d9afe..b706f1486504a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+@@ -112,6 +112,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
+
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
++ mlx5_drain_health_wq(mdev);
+ mlx5_unload_one(mdev, false);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From a8e734eb35daa2a2ba1ae9a83ddbaae23d0b1e29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 09:16:31 +0300
+Subject: net/mlx5: Fix error handling in irq_pool_request_irq
+
+From: Shay Drory <shayd@nvidia.com>
+
+[ Upstream commit a4557b0b57c40871ff00da4f623cf79211e052f3 ]
+
+In case mlx5_irq_alloc fails, the previously allocated index remains
+in the XArray, which could lead to inconsistencies.
+
+Fix it by adding error handling that erases the allocated index
+from the XArray if mlx5_irq_alloc returns an error.
+
+Fixes: c36326d38d93 ("net/mlx5: Round-Robin EQs over IRQs")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Reviewed-by: Maher Sanalla <msanalla@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Link: https://patch.msgid.link/20240730061638.1831002-3-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+index 612e666ec2635..e2230c8f18152 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+@@ -48,6 +48,7 @@ static struct mlx5_irq *
+ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+ {
+ struct irq_affinity_desc auto_desc = {};
++ struct mlx5_irq *irq;
+ u32 irq_index;
+ int err;
+
+@@ -64,9 +65,12 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de
+ else
+ cpu_get(pool, cpumask_first(&af_desc->mask));
+ }
+- return mlx5_irq_alloc(pool, irq_index,
+- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
+- NULL);
++ irq = mlx5_irq_alloc(pool, irq_index,
++ cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
++ NULL);
++ if (IS_ERR(irq))
++ xa_erase(&pool->irqs, irq_index);
++ return irq;
+ }
+
+ /* Looking for the IRQ with the smallest refcount that fits req_mask.
+--
+2.43.0
+
--- /dev/null
+From 0ae207084d6946d27b995ec72ff7b204938f2f69 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 09:16:34 +0300
+Subject: net/mlx5: Fix missing lock on sync reset reload
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Moshe Shemesh <moshe@nvidia.com>
+
+[ Upstream commit 572f9caa9e7295f8c8822e4122c7ae8f1c412ff9 ]
+
+On sync reset reload work, when remote host updates devlink on reload
+actions performed on that host, it misses taking devlink lock before
+calling devlink_remote_reload_actions_performed() which results in
+triggering lock assert like the following:
+
+WARNING: CPU: 4 PID: 1164 at net/devlink/core.c:261 devl_assert_locked+0x3e/0x50
+…
+ CPU: 4 PID: 1164 Comm: kworker/u96:6 Tainted: G S W 6.10.0-rc2+ #116
+ Hardware name: Supermicro SYS-2028TP-DECTR/X10DRT-PT, BIOS 2.0 12/18/2015
+ Workqueue: mlx5_fw_reset_events mlx5_sync_reset_reload_work [mlx5_core]
+ RIP: 0010:devl_assert_locked+0x3e/0x50
+…
+ Call Trace:
+ <TASK>
+ ? __warn+0xa4/0x210
+ ? devl_assert_locked+0x3e/0x50
+ ? report_bug+0x160/0x280
+ ? handle_bug+0x3f/0x80
+ ? exc_invalid_op+0x17/0x40
+ ? asm_exc_invalid_op+0x1a/0x20
+ ? devl_assert_locked+0x3e/0x50
+ devlink_notify+0x88/0x2b0
+ ? mlx5_attach_device+0x20c/0x230 [mlx5_core]
+ ? __pfx_devlink_notify+0x10/0x10
+ ? process_one_work+0x4b6/0xbb0
+ process_one_work+0x4b6/0xbb0
+[…]
+
+Fixes: 84a433a40d0e ("net/mlx5: Lock mlx5 devlink reload callbacks")
+Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Link: https://patch.msgid.link/20240730061638.1831002-6-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index 979c49ae6b5cc..b43ca0b762c30 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -207,6 +207,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
+ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
+ {
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
++ struct devlink *devlink = priv_to_devlink(dev);
+
+ /* if this is the driver that initiated the fw reset, devlink completed the reload */
+ if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
+@@ -218,9 +219,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
+ mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
+ else
+ mlx5_load_one(dev, true);
+- devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
++ devl_lock(devlink);
++ devlink_remote_reload_actions_performed(devlink, 0,
+ BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
++ devl_unlock(devlink);
+ }
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 9ca8bcdd06fdd514da8f3408dd56678bd38d7c8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 09:16:33 +0300
+Subject: net/mlx5: Lag, don't use the hardcoded value of the first port
+
+From: Mark Bloch <mbloch@nvidia.com>
+
+[ Upstream commit 3fda84dc090390573cfbd0b1d70372663315de21 ]
+
+The cited commit didn't change the body of the loop as it should.
+It shouldn't be using MLX5_LAG_P1.
+
+Fixes: 7e978e7714d6 ("net/mlx5: Lag, use actual number of lag ports")
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Link: https://patch.msgid.link/20240730061638.1831002-5-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index d0871c46b8c54..cf8045b926892 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -1538,7 +1538,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ goto unlock;
+
+ for (i = 0; i < ldev->ports; i++) {
+- if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
++ if (ldev->pf[i].netdev == slave) {
+ port = i;
+ break;
+ }
+--
+2.43.0
+
--- /dev/null
+From fc0f89d2922d2bf5759f4959e4a75cfb2a22f2b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 09:16:37 +0300
+Subject: net/mlx5e: Add a check for the return value from
+ mlx5_port_set_eth_ptys
+
+From: Shahar Shitrit <shshitrit@nvidia.com>
+
+[ Upstream commit 3f8e82a020a5c22f9b791f4ac499b8e18007fbda ]
+
+Since the documentation for mlx5_toggle_port_link states that it should
+only be used after setting the port register, we add a check for the
+return value from mlx5_port_set_eth_ptys to ensure the register was
+successfully set before calling it.
+
+Fixes: 667daedaecd1 ("net/mlx5e: Toggle link only after modifying port parameters")
+Signed-off-by: Shahar Shitrit <shshitrit@nvidia.com>
+Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Link: https://patch.msgid.link/20240730061638.1831002-9-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 3320f12ba2dbd..58eb96a688533 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1409,7 +1409,12 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ if (!an_changes && link_modes == eproto.admin)
+ goto out;
+
+- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
++ err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
++ if (err) {
++ netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
++ goto out;
++ }
++
+ mlx5_toggle_port_link(mdev);
+
+ out:
+--
+2.43.0
+
--- /dev/null
+From b27c1af78f69dab93d5c2fc92e4f75b010bc91ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 09:16:36 +0300
+Subject: net/mlx5e: Fix CT entry update leaks of modify header context
+
+From: Chris Mi <cmi@nvidia.com>
+
+[ Upstream commit 025f2b85a5e5a46df14ecf162c3c80a957a36d0b ]
+
+The cited commit allocates a new modify header to replace the old
+one when updating CT entry. But if failed to allocate a new one, eg.
+exceed the max number firmware can support, modify header will be
+an error pointer that will trigger a panic when deallocating it. And
+the old modify header point is copied to old attr. When the old
+attr is freed, the old modify header is lost.
+
+Fix it by restoring the old attr to attr when failed to allocate a
+new modify header context. So when the CT entry is freed, the right
+modify header context will be freed. And the panic of accessing
+error pointer is also fixed.
+
+Fixes: 94ceffb48eac ("net/mlx5e: Implement CT entry update")
+Signed-off-by: Chris Mi <cmi@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Link: https://patch.msgid.link/20240730061638.1831002-8-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+index fadfa8b50bebe..8c4e3ecef5901 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+@@ -920,6 +920,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+ err_mod_hdr:
++ *attr = *old_attr;
+ kfree(old_attr);
+ err_attr:
+ kvfree(spec);
+--
+2.43.0
+
--- /dev/null
+From f62959158290f97eb5cfd64028fce5d4b4030b11 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jul 2024 09:16:35 +0300
+Subject: net/mlx5e: Require mlx5 tc classifier action support for IPsec prio
+ capability
+
+From: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+
+[ Upstream commit 06827e27fdcd197557be72b2229dbd362303794f ]
+
+Require mlx5 classifier action support when creating IPSec chains in
+offload path. MLX5_IPSEC_CAP_PRIO should only be set if CONFIG_MLX5_CLS_ACT
+is enabled. If CONFIG_MLX5_CLS_ACT=n and MLX5_IPSEC_CAP_PRIO is set,
+configuring IPsec offload will fail due to the mlxx5 ipsec chain rules
+failing to be created due to lack of classifier action support.
+
+Fixes: fa5aa2f89073 ("net/mlx5e: Use chains for IPsec policy priority offload")
+Signed-off-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Link: https://patch.msgid.link/20240730061638.1831002-7-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+index 6e00afe4671b7..797db853de363 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+@@ -51,9 +51,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
+ caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+
+- if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
+- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
++ if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
++ ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
++ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
++ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
+ caps |= MLX5_IPSEC_CAP_PRIO;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+--
+2.43.0
+
--- /dev/null
+From e331e73ff4c5c89a7f51a465ae40a7ad9fcd7a28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Jun 2024 16:33:46 +0100
+Subject: net: move ethtool-related netdev state into its own struct
+
+From: Edward Cree <ecree.xilinx@gmail.com>
+
+[ Upstream commit 3ebbd9f6de7ec6d538639ebb657246f629ace81e ]
+
+net_dev->ethtool is a pointer to new struct ethtool_netdev_state, which
+ currently contains only the wol_enabled field.
+
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Edward Cree <ecree.xilinx@gmail.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Link: https://patch.msgid.link/293a562278371de7534ed1eb17531838ca090633.1719502239.git.ecree.xilinx@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 7195f0ef7f5b ("ethtool: fix setting key and resetting indir at once")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c | 4 ++--
+ drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 4 ++--
+ drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 2 +-
+ drivers/net/phy/phy.c | 2 +-
+ drivers/net/phy/phy_device.c | 5 +++--
+ drivers/net/phy/phylink.c | 2 +-
+ include/linux/ethtool.h | 8 ++++++++
+ include/linux/netdevice.h | 8 +++++---
+ net/core/dev.c | 4 ++++
+ net/ethtool/ioctl.c | 2 +-
+ net/ethtool/wol.c | 2 +-
+ 11 files changed, 29 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 7b9e04884575e..a06ac1a7a1822 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -1608,7 +1608,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+
+ if (!tp->dash_enabled) {
+ rtl_set_d3_pll_down(tp, !wolopts);
+- tp->dev->wol_enabled = wolopts ? 1 : 0;
++ tp->dev->ethtool->wol_enabled = wolopts ? 1 : 0;
+ }
+ }
+
+@@ -5478,7 +5478,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ rtl_set_d3_pll_down(tp, true);
+ } else {
+ rtl_set_d3_pll_down(tp, false);
+- dev->wol_enabled = 1;
++ dev->ethtool->wol_enabled = 1;
+ }
+
+ jumbo_max = rtl_jumbo_max(tp);
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+index 46a5a3e952021..e868f7ef49203 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+@@ -37,9 +37,9 @@ static int ngbe_set_wol(struct net_device *netdev,
+ wx->wol = 0;
+ if (wol->wolopts & WAKE_MAGIC)
+ wx->wol = WX_PSR_WKUP_CTL_MAG;
+- netdev->wol_enabled = !!(wx->wol);
++ netdev->ethtool->wol_enabled = !!(wx->wol);
+ wr32(wx, WX_PSR_WKUP_CTL, wx->wol);
+- device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled);
++ device_set_wakeup_enable(&pdev->dev, netdev->ethtool->wol_enabled);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+index af30ca0312b81..53aeae2f884b0 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+@@ -652,7 +652,7 @@ static int ngbe_probe(struct pci_dev *pdev,
+ if (wx->wol_hw_supported)
+ wx->wol = NGBE_PSR_WKUP_CTL_MAG;
+
+- netdev->wol_enabled = !!(wx->wol);
++ netdev->ethtool->wol_enabled = !!(wx->wol);
+ wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol);
+ device_set_wakeup_enable(&pdev->dev, wx->wol);
+
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index c4236564c1cd0..785182fa5fe01 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -1309,7 +1309,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
+ if (netdev) {
+ struct device *parent = netdev->dev.parent;
+
+- if (netdev->wol_enabled)
++ if (netdev->ethtool->wol_enabled)
+ pm_system_wakeup();
+ else if (device_may_wakeup(&netdev->dev))
+ pm_wakeup_dev_event(&netdev->dev, 0, true);
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 6c6ec94757092..473cbc1d497b3 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -296,7 +296,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
+ if (!netdev)
+ goto out;
+
+- if (netdev->wol_enabled)
++ if (netdev->ethtool->wol_enabled)
+ return false;
+
+ /* As long as not all affected network drivers support the
+@@ -1984,7 +1984,8 @@ int phy_suspend(struct phy_device *phydev)
+ return 0;
+
+ phy_ethtool_get_wol(phydev, &wol);
+- phydev->wol_enabled = wol.wolopts || (netdev && netdev->wol_enabled);
++ phydev->wol_enabled = wol.wolopts ||
++ (netdev && netdev->ethtool->wol_enabled);
+ /* If the device has WOL enabled, we cannot suspend the PHY */
+ if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND))
+ return -EBUSY;
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 994471fad833f..d73a7bfb355ed 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -2270,7 +2270,7 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
+ {
+ ASSERT_RTNL();
+
+- if (mac_wol && (!pl->netdev || pl->netdev->wol_enabled)) {
++ if (mac_wol && (!pl->netdev || pl->netdev->ethtool->wol_enabled)) {
+ /* Wake-on-Lan enabled, MAC handling */
+ mutex_lock(&pl->state_mutex);
+
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 6fd9107d3cc01..8cd6b3c993f17 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -998,6 +998,14 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd,
+ u32 *dev_speed, u8 *dev_duplex);
+
++/**
++ * struct ethtool_netdev_state - per-netdevice state for ethtool features
++ * @wol_enabled: Wake-on-LAN is enabled
++ */
++struct ethtool_netdev_state {
++ unsigned wol_enabled:1;
++};
++
+ struct phy_device;
+ struct phy_tdr_config;
+ struct phy_plca_cfg;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index ccba9f145edaa..e1ef352f592fa 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -79,6 +79,7 @@ struct xdp_buff;
+ struct xdp_frame;
+ struct xdp_metadata_ops;
+ struct xdp_md;
++struct ethtool_netdev_state;
+
+ typedef u32 xdp_features_t;
+
+@@ -1985,8 +1986,6 @@ enum netdev_reg_state {
+ * switch driver and used to set the phys state of the
+ * switch port.
+ *
+- * @wol_enabled: Wake-on-LAN is enabled
+- *
+ * @threaded: napi threaded mode is enabled
+ *
+ * @module_fw_flash_in_progress: Module firmware flashing is in progress.
+@@ -2000,6 +1999,7 @@ enum netdev_reg_state {
+ * @udp_tunnel_nic_info: static structure describing the UDP tunnel
+ * offload capabilities of the device
+ * @udp_tunnel_nic: UDP tunnel offload state
++ * @ethtool: ethtool related state
+ * @xdp_state: stores info on attached XDP BPF programs
+ *
+ * @nested_level: Used as a parameter of spin_lock_nested() of
+@@ -2374,7 +2374,7 @@ struct net_device {
+ struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
+ bool threaded;
+- unsigned wol_enabled:1;
++
+ unsigned module_fw_flash_in_progress:1;
+ struct list_head net_notifier_list;
+
+@@ -2385,6 +2385,8 @@ struct net_device {
+ const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
+ struct udp_tunnel_nic *udp_tunnel_nic;
+
++ struct ethtool_netdev_state *ethtool;
++
+ /* protected by rtnl_lock */
+ struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2b4819b610b8a..250752751811c 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -11015,6 +11015,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ dev->real_num_rx_queues = rxqs;
+ if (netif_alloc_rx_queues(dev))
+ goto free_all;
++ dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT);
++ if (!dev->ethtool)
++ goto free_all;
+
+ strcpy(dev->name, name);
+ dev->name_assign_type = name_assign_type;
+@@ -11065,6 +11068,7 @@ void free_netdev(struct net_device *dev)
+ return;
+ }
+
++ kfree(dev->ethtool);
+ netif_free_tx_queues(dev);
+ netif_free_rx_queues(dev);
+
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 3c8821adc4891..8d85a9900538b 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -1510,7 +1510,7 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
+ if (ret)
+ return ret;
+
+- dev->wol_enabled = !!wol.wolopts;
++ dev->ethtool->wol_enabled = !!wol.wolopts;
+ ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL);
+
+ return 0;
+diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c
+index 0ed56c9ac1bc4..a39d8000d808a 100644
+--- a/net/ethtool/wol.c
++++ b/net/ethtool/wol.c
+@@ -137,7 +137,7 @@ ethnl_set_wol(struct ethnl_req_info *req_info, struct genl_info *info)
+ ret = dev->ethtool_ops->set_wol(dev, &wol);
+ if (ret)
+ return ret;
+- dev->wol_enabled = !!wol.wolopts;
++ dev->ethtool->wol_enabled = !!wol.wolopts;
+ return 1;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 0914b0195977616b6a4f79d2c6e2efafa2ab169c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jul 2024 11:06:56 -0500
+Subject: net: mvpp2: Don't re-use loop iterator
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 0aa3ca956c46d849775eae1816cef8fe4bc8b50e ]
+
+This function has a nested loop. The problem is that both the inside
+and outside loop use the same variable as an iterator. I found this
+via static analysis so I'm not sure the impact. It could be that it
+loops forever or, more likely, the loop exits early.
+
+Fixes: 3a616b92a9d1 ("net: mvpp2: Add TX flow control support for jumbo frames")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/eaa8f403-7779-4d81-973d-a9ecddc0bf6f@stanley.mountain
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 9adf4301c9b1d..a40b631188866 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -953,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
+ static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
+ {
+ struct mvpp2_port *port;
+- int i;
++ int i, j;
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (port->priv->percpu_pools) {
+- for (i = 0; i < port->nrxqs; i++)
+- mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
++ for (j = 0; j < port->nrxqs; j++)
++ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
+ port->tx_fc & en);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
+--
+2.43.0
+
--- /dev/null
+From abf213f6afe6e5f3a4485a7172d958fc6e76f83a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Jul 2024 12:41:25 +0530
+Subject: net: phy: micrel: Fix the KSZ9131 MDI-X status issue
+
+From: Raju Lakkaraju <Raju.Lakkaraju@microchip.com>
+
+[ Upstream commit 84383b5ef4cd21b4a67de92afdc05a03b5247db9 ]
+
+The MDIX status is not accurately reflecting the current state after the link
+partner has manually altered its MDIX configuration while operating in forced
+mode.
+
+Access information about Auto mdix completion and pair selection from the
+KSZ9131's Auto/MDI/MDI-X status register
+
+Fixes: b64e6a8794d9 ("net: phy: micrel: Add PHY Auto/MDI/MDI-X set driver for KSZ9131")
+Signed-off-by: Raju Lakkaraju <Raju.Lakkaraju@microchip.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20240725071125.13960-1-Raju.Lakkaraju@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/micrel.c | 34 +++++++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index ebafedde0ab74..0803b6e83cf74 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1389,6 +1389,8 @@ static int ksz9131_config_init(struct phy_device *phydev)
+ const struct device *dev_walker;
+ int ret;
+
++ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
++
+ dev_walker = &phydev->mdio.dev;
+ do {
+ of_node = dev_walker->of_node;
+@@ -1438,28 +1440,30 @@ static int ksz9131_config_init(struct phy_device *phydev)
+ #define MII_KSZ9131_AUTO_MDIX 0x1C
+ #define MII_KSZ9131_AUTO_MDI_SET BIT(7)
+ #define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6)
++#define MII_KSZ9131_DIG_AXAN_STS 0x14
++#define MII_KSZ9131_DIG_AXAN_STS_LINK_DET BIT(14)
++#define MII_KSZ9131_DIG_AXAN_STS_A_SELECT BIT(12)
+
+ static int ksz9131_mdix_update(struct phy_device *phydev)
+ {
+ int ret;
+
+- ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
+- if (ret < 0)
+- return ret;
+-
+- if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) {
+- if (ret & MII_KSZ9131_AUTO_MDI_SET)
+- phydev->mdix_ctrl = ETH_TP_MDI;
+- else
+- phydev->mdix_ctrl = ETH_TP_MDI_X;
++ if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) {
++ phydev->mdix = phydev->mdix_ctrl;
+ } else {
+- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+- }
++ ret = phy_read(phydev, MII_KSZ9131_DIG_AXAN_STS);
++ if (ret < 0)
++ return ret;
+
+- if (ret & MII_KSZ9131_AUTO_MDI_SET)
+- phydev->mdix = ETH_TP_MDI;
+- else
+- phydev->mdix = ETH_TP_MDI_X;
++ if (ret & MII_KSZ9131_DIG_AXAN_STS_LINK_DET) {
++ if (ret & MII_KSZ9131_DIG_AXAN_STS_A_SELECT)
++ phydev->mdix = ETH_TP_MDI;
++ else
++ phydev->mdix = ETH_TP_MDI_X;
++ } else {
++ phydev->mdix = ETH_TP_MDI_INVALID;
++ }
++ }
+
+ return 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From 79edb334953c197ae471c716aa011aad7aa724c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Jul 2024 16:41:44 -0400
+Subject: net: phy: realtek: add support for RTL8366S Gigabit PHY
+
+From: Mark Mentovai <mark@mentovai.com>
+
+[ Upstream commit 225990c487c1023e7b3aa89beb6a68011fbc0461 ]
+
+The PHY built in to the Realtek RTL8366S switch controller was
+previously supported by genphy_driver. This PHY does not implement MMD
+operations. Since commit 9b01c885be36 ("net: phy: c22: migrate to
+genphy_c45_write_eee_adv()"), MMD register reads have been made during
+phy_probe to determine EEE support. For genphy_driver, these reads are
+transformed into 802.3 annex 22D clause 45-over-clause 22
+mmd_phy_indirect operations that perform MII register writes to
+MII_MMD_CTRL and MII_MMD_DATA. This overwrites those two MII registers,
+which on this PHY are reserved and have another function, rendering the
+PHY unusable while so configured.
+
+Proper support for this PHY is restored by providing a phy_driver that
+declares MMD operations as unsupported by using the helper functions
+provided for that purpose, while remaining otherwise identical to
+genphy_driver.
+
+Fixes: 9b01c885be36 ("net: phy: c22: migrate to genphy_c45_write_eee_adv()")
+Reported-by: Russell Senior <russell@personaltelco.net>
+Closes: https://github.com/openwrt/openwrt/issues/15981
+Link: https://github.com/openwrt/openwrt/issues/15739
+Signed-off-by: Mark Mentovai <mark@mentovai.com>
+Reviewed-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/realtek.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 7ab41f95dae5f..ffa07c3f04c26 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -1351,6 +1351,13 @@ static struct phy_driver realtek_drvs[] = {
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
++ }, {
++ PHY_ID_MATCH_EXACT(0x001cc960),
++ .name = "RTL8366S Gigabit Ethernet",
++ .suspend = genphy_suspend,
++ .resume = genphy_resume,
++ .read_mmd = genphy_read_mmd_unsupported,
++ .write_mmd = genphy_write_mmd_unsupported,
+ },
+ };
+
+--
+2.43.0
+
--- /dev/null
+From cad98201418045c082a71219349d61822eed3925 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Jul 2024 12:28:20 -0700
+Subject: netfilter: iptables: Fix null-ptr-deref in iptable_nat_table_init().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 5830aa863981d43560748aa93589c0695191d95d ]
+
+We had a report that iptables-restore sometimes triggered null-ptr-deref
+at boot time. [0]
+
+The problem is that iptable_nat_table_init() is exposed to user space
+before the kernel fully initialises netns.
+
+In the small race window, a user could call iptable_nat_table_init()
+that accesses net_generic(net, iptable_nat_net_id), which is available
+only after registering iptable_nat_net_ops.
+
+Let's call register_pernet_subsys() before xt_register_template().
+
+[0]:
+bpfilter: Loaded bpfilter_umh pid 11702
+Started bpfilter
+BUG: kernel NULL pointer dereference, address: 0000000000000013
+ PF: supervisor write access in kernel mode
+ PF: error_code(0x0002) - not-present page
+PGD 0 P4D 0
+PREEMPT SMP NOPTI
+CPU: 2 PID: 11879 Comm: iptables-restor Not tainted 6.1.92-99.174.amzn2023.x86_64 #1
+Hardware name: Amazon EC2 c6i.4xlarge/, BIOS 1.0 10/16/2017
+RIP: 0010:iptable_nat_table_init (net/ipv4/netfilter/iptable_nat.c:87 net/ipv4/netfilter/iptable_nat.c:121) iptable_nat
+Code: 10 4c 89 f6 48 89 ef e8 0b 19 bb ff 41 89 c4 85 c0 75 38 41 83 c7 01 49 83 c6 28 41 83 ff 04 75 dc 48 8b 44 24 08 48 8b 0c 24 <48> 89 08 4c 89 ef e8 a2 3b a2 cf 48 83 c4 10 44 89 e0 5b 5d 41 5c
+RSP: 0018:ffffbef902843cd0 EFLAGS: 00010246
+RAX: 0000000000000013 RBX: ffff9f4b052caa20 RCX: ffff9f4b20988d80
+RDX: 0000000000000000 RSI: 0000000000000064 RDI: ffffffffc04201c0
+RBP: ffff9f4b29394000 R08: ffff9f4b07f77258 R09: ffff9f4b07f77240
+R10: 0000000000000000 R11: ffff9f4b09635388 R12: 0000000000000000
+R13: ffff9f4b1a3c6c00 R14: ffff9f4b20988e20 R15: 0000000000000004
+FS: 00007f6284340000(0000) GS:ffff9f51fe280000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000013 CR3: 00000001d10a6005 CR4: 00000000007706e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+PKRU: 55555554
+Call Trace:
+ <TASK>
+ ? show_trace_log_lvl (arch/x86/kernel/dumpstack.c:259)
+ ? show_trace_log_lvl (arch/x86/kernel/dumpstack.c:259)
+ ? xt_find_table_lock (net/netfilter/x_tables.c:1259)
+ ? __die_body.cold (arch/x86/kernel/dumpstack.c:478 arch/x86/kernel/dumpstack.c:420)
+ ? page_fault_oops (arch/x86/mm/fault.c:727)
+ ? exc_page_fault (./arch/x86/include/asm/irqflags.h:40 ./arch/x86/include/asm/irqflags.h:75 arch/x86/mm/fault.c:1470 arch/x86/mm/fault.c:1518)
+ ? asm_exc_page_fault (./arch/x86/include/asm/idtentry.h:570)
+ ? iptable_nat_table_init (net/ipv4/netfilter/iptable_nat.c:87 net/ipv4/netfilter/iptable_nat.c:121) iptable_nat
+ xt_find_table_lock (net/netfilter/x_tables.c:1259)
+ xt_request_find_table_lock (net/netfilter/x_tables.c:1287)
+ get_info (net/ipv4/netfilter/ip_tables.c:965)
+ ? security_capable (security/security.c:809 (discriminator 13))
+ ? ns_capable (kernel/capability.c:376 kernel/capability.c:397)
+ ? do_ipt_get_ctl (net/ipv4/netfilter/ip_tables.c:1656)
+ ? bpfilter_send_req (net/bpfilter/bpfilter_kern.c:52) bpfilter
+ nf_getsockopt (net/netfilter/nf_sockopt.c:116)
+ ip_getsockopt (net/ipv4/ip_sockglue.c:1827)
+ __sys_getsockopt (net/socket.c:2327)
+ __x64_sys_getsockopt (net/socket.c:2342 net/socket.c:2339 net/socket.c:2339)
+ do_syscall_64 (arch/x86/entry/common.c:51 arch/x86/entry/common.c:81)
+ entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:121)
+RIP: 0033:0x7f62844685ee
+Code: 48 8b 0d 45 28 0f 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 49 89 ca b8 37 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 0a c3 66 0f 1f 84 00 00 00 00 00 48 8b 15 09
+RSP: 002b:00007ffd1f83d638 EFLAGS: 00000246 ORIG_RAX: 0000000000000037
+RAX: ffffffffffffffda RBX: 00007ffd1f83d680 RCX: 00007f62844685ee
+RDX: 0000000000000040 RSI: 0000000000000000 RDI: 0000000000000004
+RBP: 0000000000000004 R08: 00007ffd1f83d670 R09: 0000558798ffa2a0
+R10: 00007ffd1f83d680 R11: 0000000000000246 R12: 00007ffd1f83e3b2
+R13: 00007f628455baa0 R14: 00007ffd1f83d7b0 R15: 00007f628457a008
+ </TASK>
+Modules linked in: iptable_nat(+) bpfilter rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache veth xt_state xt_connmark xt_nat xt_statistic xt_MASQUERADE xt_mark xt_addrtype ipt_REJECT nf_reject_ipv4 nft_chain_nat nf_nat xt_conntrack nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 xt_comment nft_compat nf_tables nfnetlink overlay nls_ascii nls_cp437 vfat fat ghash_clmulni_intel aesni_intel ena crypto_simd ptp cryptd i8042 pps_core serio button sunrpc sch_fq_codel configfs loop dm_mod fuse dax dmi_sysfs crc32_pclmul crc32c_intel efivarfs
+CR2: 0000000000000013
+
+Fixes: fdacd57c79b7 ("netfilter: x_tables: never register tables by default")
+Reported-by: Takahiro Kawahara <takawaha@amazon.co.jp>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/iptable_nat.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
+index 4d42d0756fd70..a5db7c67d61be 100644
+--- a/net/ipv4/netfilter/iptable_nat.c
++++ b/net/ipv4/netfilter/iptable_nat.c
+@@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = {
+
+ static int __init iptable_nat_init(void)
+ {
+- int ret = xt_register_template(&nf_nat_ipv4_table,
+- iptable_nat_table_init);
++ int ret;
+
++ /* net->gen->ptr[iptable_nat_net_id] must be allocated
++ * before calling iptable_nat_table_init().
++ */
++ ret = register_pernet_subsys(&iptable_nat_net_ops);
+ if (ret < 0)
+ return ret;
+
+- ret = register_pernet_subsys(&iptable_nat_net_ops);
+- if (ret < 0) {
+- xt_unregister_template(&nf_nat_ipv4_table);
+- return ret;
+- }
++ ret = xt_register_template(&nf_nat_ipv4_table,
++ iptable_nat_table_init);
++ if (ret < 0)
++ unregister_pernet_subsys(&iptable_nat_net_ops);
+
+ return ret;
+ }
+
+ static void __exit iptable_nat_exit(void)
+ {
+- unregister_pernet_subsys(&iptable_nat_net_ops);
+ xt_unregister_template(&nf_nat_ipv4_table);
++ unregister_pernet_subsys(&iptable_nat_net_ops);
+ }
+
+ module_init(iptable_nat_init);
+--
+2.43.0
+
--- /dev/null
+From 4df67c25251a829f5482c093ffb6da7407f59564 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Jul 2024 12:28:21 -0700
+Subject: netfilter: iptables: Fix potential null-ptr-deref in
+ ip6table_nat_table_init().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit c22921df777de5606f1047b1345b8d22ef1c0b34 ]
+
+ip6table_nat_table_init() accesses net->gen->ptr[ip6table_nat_net_ops.id],
+but the function is exposed to user space before the entry is allocated
+via register_pernet_subsys().
+
+Let's call register_pernet_subsys() before xt_register_template().
+
+Fixes: fdacd57c79b7 ("netfilter: x_tables: never register tables by default")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/netfilter/ip6table_nat.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
+index 52cf104e34788..e119d4f090cc8 100644
+--- a/net/ipv6/netfilter/ip6table_nat.c
++++ b/net/ipv6/netfilter/ip6table_nat.c
+@@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = {
+
+ static int __init ip6table_nat_init(void)
+ {
+- int ret = xt_register_template(&nf_nat_ipv6_table,
+- ip6table_nat_table_init);
++ int ret;
+
++ /* net->gen->ptr[ip6table_nat_net_id] must be allocated
++ * before calling ip6t_nat_register_lookups().
++ */
++ ret = register_pernet_subsys(&ip6table_nat_net_ops);
+ if (ret < 0)
+ return ret;
+
+- ret = register_pernet_subsys(&ip6table_nat_net_ops);
++ ret = xt_register_template(&nf_nat_ipv6_table,
++ ip6table_nat_table_init);
+ if (ret)
+- xt_unregister_template(&nf_nat_ipv6_table);
++ unregister_pernet_subsys(&ip6table_nat_net_ops);
+
+ return ret;
+ }
+
+ static void __exit ip6table_nat_exit(void)
+ {
+- unregister_pernet_subsys(&ip6table_nat_net_ops);
+ xt_unregister_template(&nf_nat_ipv6_table);
++ unregister_pernet_subsys(&ip6table_nat_net_ops);
+ }
+
+ module_init(ip6table_nat_init);
+--
+2.43.0
+
--- /dev/null
+From 9ace36fc10c20a48839d524d45daa828cbb52f62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jul 2024 16:42:48 -0700
+Subject: netlink: specs: correct the spec of ethtool
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit a40c7a24f97edda025f53cfe8f0bc6a6e3c12fa6 ]
+
+The spec for Ethtool is a bit inaccurate. We don't currently
+support dump. Context is only accepted as input and not echoed
+to output (which is a separate bug).
+
+Fixes: a353318ebf24 ("tools: ynl: populate most of the ethtool spec")
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Joe Damato <jdamato@fastly.com>
+Link: https://patch.msgid.link/20240724234249.2621109-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/netlink/specs/ethtool.yaml | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
+index 4510e8d1adcb8..3632c1c891e94 100644
+--- a/Documentation/netlink/specs/ethtool.yaml
++++ b/Documentation/netlink/specs/ethtool.yaml
+@@ -1634,15 +1634,14 @@ operations:
+ request:
+ attributes:
+ - header
++ - context
+ reply:
+ attributes:
+ - header
+- - context
+ - hfunc
+ - indir
+ - hkey
+ - input_xfrm
+- dump: *rss-get-op
+ -
+ name: plca-get-cfg
+ doc: Get PLCA params.
+--
+2.43.0
+
--- /dev/null
+From 6d5340ef35c57640a7c1a59b51e13cb31c7c8f86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jul 2024 19:50:18 +0800
+Subject: perf arch events: Fix duplicate RISC-V SBI firmware event name
+
+From: Eric Lin <eric.lin@sifive.com>
+
+[ Upstream commit 63ba5b0fb4f54db256ec43b3062b2606b383055d ]
+
+Currently, the RISC-V firmware JSON file has duplicate event name
+"FW_SFENCE_VMA_RECEIVED". According to the RISC-V SBI PMU extension[1],
+the event name should be "FW_SFENCE_VMA_ASID_SENT".
+
+Before this patch:
+$ perf list
+
+firmware:
+ fw_access_load
+ [Load access trap event. Unit: cpu]
+ fw_access_store
+ [Store access trap event. Unit: cpu]
+....
+ fw_set_timer
+ [Set timer event. Unit: cpu]
+ fw_sfence_vma_asid_received
+ [Received SFENCE.VMA with ASID request from other HART event. Unit: cpu]
+ fw_sfence_vma_received
+ [Sent SFENCE.VMA with ASID request to other HART event. Unit: cpu]
+
+After this patch:
+$ perf list
+
+firmware:
+ fw_access_load
+ [Load access trap event. Unit: cpu]
+ fw_access_store
+ [Store access trap event. Unit: cpu]
+.....
+ fw_set_timer
+ [Set timer event. Unit: cpu]
+ fw_sfence_vma_asid_received
+ [Received SFENCE.VMA with ASID request from other HART event. Unit: cpu]
+ fw_sfence_vma_asid_sent
+ [Sent SFENCE.VMA with ASID request to other HART event. Unit: cpu]
+ fw_sfence_vma_received
+ [Received SFENCE.VMA request from other HART event. Unit: cpu]
+
+Link: https://github.com/riscv-non-isa/riscv-sbi-doc/blob/master/src/ext-pmu.adoc#event-firmware-events-type-15 [1]
+Fixes: 8f0dcb4e7364 ("perf arch events: riscv sbi firmware std event files")
+Fixes: c4f769d4093d ("perf vendor events riscv: add Sifive U74 JSON file")
+Fixes: acbf6de674ef ("perf vendor events riscv: Add StarFive Dubhe-80 JSON file")
+Fixes: 7340c6df49df ("perf vendor events riscv: add T-HEAD C9xx JSON file")
+Fixes: f5102e31c209 ("riscv: andes: Support specifying symbolic firmware and hardware raw event")
+Signed-off-by: Eric Lin <eric.lin@sifive.com>
+Reviewed-by: Samuel Holland <samuel.holland@sifive.com>
+Reviewed-by: Nikita Shubin <n.shubin@yadro.com>
+Reviewed-by: Inochi Amaoto <inochiama@outlook.com>
+Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
+Reviewed-by: Atish Patra <atishp@rivosinc.com>
+Link: https://lore.kernel.org/r/20240719115018.27356-1-eric.lin@sifive.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json | 2 +-
+ tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json | 2 +-
+ tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json | 2 +-
+ .../perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json | 2 +-
+ .../perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json | 2 +-
+ 5 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
+index 9b4a032186a7b..7149caec4f80e 100644
+--- a/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
++++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
+@@ -36,7 +36,7 @@
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
++ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+diff --git a/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json b/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
+index a9939823b14b5..0c9b9a2d2958a 100644
+--- a/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
++++ b/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
+@@ -74,7 +74,7 @@
+ {
+ "PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event",
+ "ConfigCode": "0x800000000000000c",
+- "EventName": "FW_SFENCE_VMA_RECEIVED",
++ "EventName": "FW_SFENCE_VMA_ASID_SENT",
+ "BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event"
+ },
+ {
+diff --git a/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json b/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
+index 9b4a032186a7b..7149caec4f80e 100644
+--- a/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
++++ b/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
+@@ -36,7 +36,7 @@
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
++ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+diff --git a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
+index 9b4a032186a7b..7149caec4f80e 100644
+--- a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
++++ b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
+@@ -36,7 +36,7 @@
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
++ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
+index 9b4a032186a7b..7149caec4f80e 100644
+--- a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
++++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
+@@ -36,7 +36,7 @@
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
++ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+--
+2.43.0
+
--- /dev/null
+From 72e58a06e9c824aafd73b0746288c8ee5322c1e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Jul 2024 15:58:58 +0300
+Subject: perf: riscv: Fix selecting counters in legacy mode
+
+From: Shifrin Dmitry <dmitry.shifrin@syntacore.com>
+
+[ Upstream commit 941a8e9b7a86763ac52d5bf6ccc9986d37fde628 ]
+
+It is required to check event type before checking event config.
+Events with the different types can have the same config.
+This check is missed for legacy mode code
+
+For such perf usage:
+ sysctl -w kernel.perf_user_access=2
+ perf stat -e cycles,L1-dcache-loads --
+driver will try to force both events to CYCLE counter.
+
+This commit implements event type check before forcing
+events on the special counters.
+
+Signed-off-by: Shifrin Dmitry <dmitry.shifrin@syntacore.com>
+Reviewed-by: Atish Patra <atishp@rivosinc.com>
+Fixes: cc4c07c89aad ("drivers: perf: Implement perf event mmap support in the SBI backend")
+Link: https://lore.kernel.org/r/20240729125858.630653-1-dmitry.shifrin@syntacore.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/perf/riscv_pmu_sbi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 4e842dcedfbaa..11c7c85047ed4 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -412,7 +412,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
+ * but not in the user access mode as we want to use the other counters
+ * that support sampling/filtering.
+ */
+- if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
++ if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) {
+ if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+ cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
+ cmask = 1;
+--
+2.43.0
+
--- /dev/null
+From e68b1c335fbc635cc52467b09a5c8d3e1f8ce8bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Jul 2024 15:15:48 -0600
+Subject: perf tool: fix dereferencing NULL al->maps
+
+From: Casey Chen <cachen@purestorage.com>
+
+[ Upstream commit 4c17736689ccfc44ec7dcc472577f25c34cf8724 ]
+
+With 0dd5041c9a0e ("perf addr_location: Add init/exit/copy functions"),
+when cpumode is 3 (macro PERF_RECORD_MISC_HYPERVISOR),
+thread__find_map() could return with al->maps being NULL.
+
+The path below could add a callchain_cursor_node with NULL ms.maps.
+
+add_callchain_ip()
+ thread__find_symbol(.., &al)
+ thread__find_map(.., &al) // al->maps becomes NULL
+ ms.maps = maps__get(al.maps)
+ callchain_cursor_append(..., &ms, ...)
+ node->ms.maps = maps__get(ms->maps)
+
+Then the path below would dereference NULL maps and get segfault.
+
+fill_callchain_info()
+ maps__machine(node->ms.maps);
+
+Fix it by checking if maps is NULL in fill_callchain_info().
+
+Fixes: 0dd5041c9a0e ("perf addr_location: Add init/exit/copy functions")
+Signed-off-by: Casey Chen <cachen@purestorage.com>
+Reviewed-by: Ian Rogers <irogers@google.com>
+Reviewed-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Cc: yzhong@purestorage.com
+Link: https://lore.kernel.org/r/20240722211548.61455-1-cachen@purestorage.com
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/callchain.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
+index 1730b852a9474..6d075648d2ccf 100644
+--- a/tools/perf/util/callchain.c
++++ b/tools/perf/util/callchain.c
+@@ -1141,7 +1141,7 @@ int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *samp
+ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
+ bool hide_unresolved)
+ {
+- struct machine *machine = maps__machine(node->ms.maps);
++ struct machine *machine = node->ms.maps ? maps__machine(node->ms.maps) : NULL;
+
+ maps__put(al->maps);
+ al->maps = maps__get(node->ms.maps);
+--
+2.43.0
+
--- /dev/null
+From e907da5eb64bb779cc2bb2adf572e8860b2c436a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Jul 2024 11:17:14 +0800
+Subject: RISC-V: Enable the IPI before workqueue_online_cpu()
+
+From: Nick Hu <nick.hu@sifive.com>
+
+[ Upstream commit 3908ba2e0b2476e2ec13e15967bf6a37e449f2af ]
+
+Sometimes the hotplug cpu stalls at the arch_cpu_idle() for a while after
+workqueue_online_cpu(). When cpu stalls at the idle loop, the reschedule
+IPI is pending. However the enable bit is not enabled yet so the cpu stalls
+at WFI until watchdog timeout. Therefore enable the IPI before the
+workqueue_online_cpu() to fix the issue.
+
+Fixes: 63c5484e7495 ("workqueue: Add multiple affinity scopes and interface to select them")
+Signed-off-by: Nick Hu <nick.hu@sifive.com>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Link: https://lore.kernel.org/r/20240717031714.1946036-1-nick.hu@sifive.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/sbi-ipi.c | 2 +-
+ include/linux/cpuhotplug.h | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/riscv/kernel/sbi-ipi.c b/arch/riscv/kernel/sbi-ipi.c
+index 1026e22955ccc..0cc5559c08d8f 100644
+--- a/arch/riscv/kernel/sbi-ipi.c
++++ b/arch/riscv/kernel/sbi-ipi.c
+@@ -71,7 +71,7 @@ void __init sbi_ipi_init(void)
+ * the masking/unmasking of virtual IPIs is done
+ * via generic IPI-Mux
+ */
+- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
++ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
+ "irqchip/sbi-ipi:starting",
+ sbi_ipi_starting_cpu, NULL);
+
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 7a5785f405b62..0a8fd4a3d04c9 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -147,6 +147,7 @@ enum cpuhp_state {
+ CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
++ CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
+ CPUHP_AP_ARM_MVEBU_COHERENCY,
+ CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
+ CPUHP_AP_PERF_X86_STARTING,
+--
+2.43.0
+
--- /dev/null
+From a1fe6965e3ced4bba781448a8828080d29e8afa4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 Jun 2024 12:42:16 +0100
+Subject: riscv: Fix linear mapping checks for non-contiguous memory regions
+
+From: Stuart Menefy <stuart.menefy@codasip.com>
+
+[ Upstream commit 3b6564427aea83b7a35a15ca278291d50a1edcfc ]
+
+The RISC-V kernel already has checks to ensure that memory which would
+lie outside of the linear mapping is not used. However those checks
+use memory_limit, which is used to implement the mem= kernel command
+line option (to limit the total amount of memory, not its address
+range). When memory is made up of two or more non-contiguous memory
+banks this check is incorrect.
+
+Two changes are made here:
+ - add a call in setup_bootmem() to memblock_cap_memory_range() which
+ will cause any memory which falls outside the linear mapping to be
+ removed from the memory regions.
+ - remove the check in create_linear_mapping_page_table() which was
+ intended to remove memory which is outside the liner mapping based
+ on memory_limit, as it is no longer needed. Note a check for
+ mapping more memory than memory_limit (to implement mem=) is
+ unnecessary because of the existing call to
+ memblock_enforce_memory_limit().
+
+This issue was seen when booting on a SV39 platform with two memory
+banks:
+ 0x00,80000000 1GiB
+ 0x20,00000000 32GiB
+This memory range is 158GiB from top to bottom, but the linear mapping
+is limited to 128GiB, so the lower block of RAM will be mapped at
+PAGE_OFFSET, and the upper block straddles the top of the linear
+mapping.
+
+This causes the following Oops:
+[ 0.000000] Linux version 6.10.0-rc2-gd3b8dd5b51dd-dirty (stuart.menefy@codasip.com) (riscv64-codasip-linux-gcc (GCC) 13.2.0, GNU ld (GNU Binutils) 2.41.0.20231213) #20 SMP Sat Jun 22 11:34:22 BST 2024
+[ 0.000000] memblock_add: [0x0000000080000000-0x00000000bfffffff] early_init_dt_add_memory_arch+0x4a/0x52
+[ 0.000000] memblock_add: [0x0000002000000000-0x00000027ffffffff] early_init_dt_add_memory_arch+0x4a/0x52
+...
+[ 0.000000] memblock_alloc_try_nid: 23724 bytes align=0x8 nid=-1 from=0x0000000000000000 max_addr=0x0000000000000000 early_init_dt_alloc_memory_arch+0x1e/0x48
+[ 0.000000] memblock_reserve: [0x00000027ffff5350-0x00000027ffffaffb] memblock_alloc_range_nid+0xb8/0x132
+[ 0.000000] Unable to handle kernel paging request at virtual address fffffffe7fff5350
+[ 0.000000] Oops [#1]
+[ 0.000000] Modules linked in:
+[ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 6.10.0-rc2-gd3b8dd5b51dd-dirty #20
+[ 0.000000] Hardware name: codasip,a70x (DT)
+[ 0.000000] epc : __memset+0x8c/0x104
+[ 0.000000] ra : memblock_alloc_try_nid+0x74/0x84
+[ 0.000000] epc : ffffffff805e88c8 ra : ffffffff806148f6 sp : ffffffff80e03d50
+[ 0.000000] gp : ffffffff80ec4158 tp : ffffffff80e0bec0 t0 : fffffffe7fff52f8
+[ 0.000000] t1 : 00000027ffffb000 t2 : 5f6b636f6c626d65 s0 : ffffffff80e03d90
+[ 0.000000] s1 : 0000000000005cac a0 : fffffffe7fff5350 a1 : 0000000000000000
+[ 0.000000] a2 : 0000000000005cac a3 : fffffffe7fffaff8 a4 : 000000000000002c
+[ 0.000000] a5 : ffffffff805e88c8 a6 : 0000000000005cac a7 : 0000000000000030
+[ 0.000000] s2 : fffffffe7fff5350 s3 : ffffffffffffffff s4 : 0000000000000000
+[ 0.000000] s5 : ffffffff8062347e s6 : 0000000000000000 s7 : 0000000000000001
+[ 0.000000] s8 : 0000000000002000 s9 : 00000000800226d0 s10: 0000000000000000
+[ 0.000000] s11: 0000000000000000 t3 : ffffffff8080a928 t4 : ffffffff8080a928
+[ 0.000000] t5 : ffffffff8080a928 t6 : ffffffff8080a940
+[ 0.000000] status: 0000000200000100 badaddr: fffffffe7fff5350 cause: 000000000000000f
+[ 0.000000] [<ffffffff805e88c8>] __memset+0x8c/0x104
+[ 0.000000] [<ffffffff8062349c>] early_init_dt_alloc_memory_arch+0x1e/0x48
+[ 0.000000] [<ffffffff8043e892>] __unflatten_device_tree+0x52/0x114
+[ 0.000000] [<ffffffff8062441e>] unflatten_device_tree+0x9e/0xb8
+[ 0.000000] [<ffffffff806046fe>] setup_arch+0xd4/0x5bc
+[ 0.000000] [<ffffffff806007aa>] start_kernel+0x76/0x81a
+[ 0.000000] Code: b823 02b2 bc23 02b2 b023 04b2 b423 04b2 b823 04b2 (bc23) 04b2
+[ 0.000000] ---[ end trace 0000000000000000 ]---
+[ 0.000000] Kernel panic - not syncing: Attempted to kill the idle task!
+[ 0.000000] ---[ end Kernel panic - not syncing: Attempted to kill the idle task! ]---
+
+The problem is that memblock (unaware that some physical memory cannot
+be used) has allocated memory from the top of memory but which is
+outside the linear mapping region.
+
+Signed-off-by: Stuart Menefy <stuart.menefy@codasip.com>
+Fixes: c99127c45248 ("riscv: Make sure the linear mapping does not use the kernel mapping")
+Reviewed-by: David McKay <david.mckay@codasip.com>
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20240622114217.2158495-1-stuart.menefy@codasip.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/mm/init.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index e3405e4b99af5..7e25606f858aa 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -233,8 +233,6 @@ static void __init setup_bootmem(void)
+ */
+ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
+
+- phys_ram_end = memblock_end_of_DRAM();
+-
+ /*
+ * Make sure we align the start of the memory on a PMD boundary so that
+ * at worst, we map the linear mapping with PMD mappings.
+@@ -249,6 +247,16 @@ static void __init setup_bootmem(void)
+ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
+ kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
+
++ /*
++ * The size of the linear page mapping may restrict the amount of
++ * usable RAM.
++ */
++ if (IS_ENABLED(CONFIG_64BIT)) {
++ max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
++ memblock_cap_memory_range(phys_ram_base,
++ max_mapped_addr - phys_ram_base);
++ }
++
+ /*
+ * Reserve physical address space that would be mapped to virtual
+ * addresses greater than (void *)(-PAGE_SIZE) because:
+@@ -265,6 +273,7 @@ static void __init setup_bootmem(void)
+ memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
+ }
+
++ phys_ram_end = memblock_end_of_DRAM();
+ min_low_pfn = PFN_UP(phys_ram_base);
+ max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
+ high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
+@@ -1289,8 +1298,6 @@ static void __init create_linear_mapping_page_table(void)
+ if (start <= __pa(PAGE_OFFSET) &&
+ __pa(PAGE_OFFSET) < end)
+ start = __pa(PAGE_OFFSET);
+- if (end >= __pa(PAGE_OFFSET) + memory_limit)
+- end = __pa(PAGE_OFFSET) + memory_limit;
+
+ create_linear_mapping_range(start, end, 0);
+ }
+--
+2.43.0
+
--- /dev/null
+From 51afd80d204d945567d8d79ffb9e97a76f8e3f50 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jul 2024 16:45:47 +0800
+Subject: riscv/mm: Add handling for VM_FAULT_SIGSEGV in mm_fault_error()
+
+From: Zhe Qiao <qiaozhe@iscas.ac.cn>
+
+[ Upstream commit 0c710050c47d45eb77b28c271cddefc5c785cb40 ]
+
+Handle VM_FAULT_SIGSEGV in the page fault path so that we correctly
+kill the process and we don't BUG() the kernel.
+
+Fixes: 07037db5d479 ("RISC-V: Paging and MMU")
+Signed-off-by: Zhe Qiao <qiaozhe@iscas.ac.cn>
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20240731084547.85380-1-qiaozhe@iscas.ac.cn
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/mm/fault.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
+index 5224f37338022..a9f2b4af8f3f1 100644
+--- a/arch/riscv/mm/fault.c
++++ b/arch/riscv/mm/fault.c
+@@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
+
+ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
+ {
++ if (!user_mode(regs)) {
++ no_context(regs, addr);
++ return;
++ }
++
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+- if (!user_mode(regs)) {
+- no_context(regs, addr);
+- return;
+- }
+ pagefault_out_of_memory();
+ return;
+ } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
+ /* Kernel mode? Handle exceptions or die */
+- if (!user_mode(regs)) {
+- no_context(regs, addr);
+- return;
+- }
+ do_trap(regs, SIGBUS, BUS_ADRERR, addr);
+ return;
++ } else if (fault & VM_FAULT_SIGSEGV) {
++ do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
++ return;
+ }
++
+ BUG();
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 388756fbbcf99ba0ceffe1144cd238caa50c4a3a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jul 2024 19:04:37 +0200
+Subject: riscv/purgatory: align riscv_kernel_entry
+
+From: Daniel Maslowski <cyrevolt@googlemail.com>
+
+[ Upstream commit fb197c5d2fd24b9af3d4697d0cf778645846d6d5 ]
+
+When alignment handling is delegated to the kernel, everything must be
+word-aligned in purgatory, since the trap handler is then set to the
+kexec one. Without the alignment, hitting the exception would
+ultimately crash. On other occasions, the kernel's handler would take
+care of exceptions.
+This has been tested on a JH7110 SoC with oreboot and its SBI delegating
+unaligned access exceptions and the kernel configured to handle them.
+
+Fixes: 736e30af583fb ("RISC-V: Add purgatory")
+Signed-off-by: Daniel Maslowski <cyrevolt@gmail.com>
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20240719170437.247457-1-cyrevolt@gmail.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/purgatory/entry.S | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S
+index 5bcf3af903daa..0e6ca6d5ae4b4 100644
+--- a/arch/riscv/purgatory/entry.S
++++ b/arch/riscv/purgatory/entry.S
+@@ -7,6 +7,7 @@
+ * Author: Li Zhengyu (lizhengyu3@huawei.com)
+ *
+ */
++#include <asm/asm.h>
+ #include <linux/linkage.h>
+
+ .text
+@@ -34,6 +35,7 @@ SYM_CODE_END(purgatory_start)
+
+ .data
+
++.align LGREG
+ SYM_DATA(riscv_kernel_entry, .quad 0)
+
+ .end
+--
+2.43.0
+
--- /dev/null
+From c312816df5bfb150fdd133cf8021d514e1582d41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 17:19:53 -0700
+Subject: rtnetlink: Don't ignore IFLA_TARGET_NETNSID when ifname is specified
+ in rtnl_dellink().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 9415d375d8520e0ed55f0c0b058928da9a5b5b3d ]
+
+The cited commit accidentally replaced tgt_net with net in rtnl_dellink().
+
+As a result, IFLA_TARGET_NETNSID is ignored if the interface is specified
+with IFLA_IFNAME or IFLA_ALT_IFNAME.
+
+Let's pass tgt_net to rtnl_dev_get().
+
+Fixes: cc6090e985d7 ("net: rtnetlink: introduce helper to get net_device instance by ifname")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/rtnetlink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 4668d67180407..5e589f0a62bc5 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3288,7 +3288,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
+ else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
+- dev = rtnl_dev_get(net, tb);
++ dev = rtnl_dev_get(tgt_net, tb);
+ else if (tb[IFLA_GROUP])
+ err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
+ else
+--
+2.43.0
+
--- /dev/null
+From a35c69795e5c3068cdfce8993963bdf3dea67281 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jul 2024 20:49:53 +0200
+Subject: s390/mm/ptdump: Fix handling of identity mapping area
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 373953444ce542db43535861fb8ebf3a1e05669c ]
+
+Since virtual and real addresses are not the same anymore the
+assumption that the kernel image is contained within the identity
+mapping is also not true anymore.
+
+Fix this by adding two explicit areas and at the correct locations: one
+for the 8kb lowcore area, and one for the identity mapping.
+
+Fixes: c98d2ecae08f ("s390/mm: Uncouple physical vs virtual address spaces")
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/mm/dump_pagetables.c | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
+index ffd07ed7b4af8..9d0805d6dc1b2 100644
+--- a/arch/s390/mm/dump_pagetables.c
++++ b/arch/s390/mm/dump_pagetables.c
+@@ -20,8 +20,8 @@ struct addr_marker {
+ };
+
+ enum address_markers_idx {
+- IDENTITY_BEFORE_NR = 0,
+- IDENTITY_BEFORE_END_NR,
++ LOWCORE_START_NR = 0,
++ LOWCORE_END_NR,
+ AMODE31_START_NR,
+ AMODE31_END_NR,
+ KERNEL_START_NR,
+@@ -30,8 +30,8 @@ enum address_markers_idx {
+ KFENCE_START_NR,
+ KFENCE_END_NR,
+ #endif
+- IDENTITY_AFTER_NR,
+- IDENTITY_AFTER_END_NR,
++ IDENTITY_START_NR,
++ IDENTITY_END_NR,
+ VMEMMAP_NR,
+ VMEMMAP_END_NR,
+ VMALLOC_NR,
+@@ -49,8 +49,10 @@ enum address_markers_idx {
+ };
+
+ static struct addr_marker address_markers[] = {
+- [IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"},
+- [IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
++ [LOWCORE_START_NR] = {0, "Lowcore Start"},
++ [LOWCORE_END_NR] = {0, "Lowcore End"},
++ [IDENTITY_START_NR] = {0, "Identity Mapping Start"},
++ [IDENTITY_END_NR] = {0, "Identity Mapping End"},
+ [AMODE31_START_NR] = {0, "Amode31 Area Start"},
+ [AMODE31_END_NR] = {0, "Amode31 Area End"},
+ [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
+@@ -59,8 +61,6 @@ static struct addr_marker address_markers[] = {
+ [KFENCE_START_NR] = {0, "KFence Pool Start"},
+ [KFENCE_END_NR] = {0, "KFence Pool End"},
+ #endif
+- [IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
+- [IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
+ [VMEMMAP_NR] = {0, "vmemmap Area Start"},
+ [VMEMMAP_END_NR] = {0, "vmemmap Area End"},
+ [VMALLOC_NR] = {0, "vmalloc Area Start"},
+@@ -290,7 +290,10 @@ static int pt_dump_init(void)
+ */
+ max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
+ max_addr = 1UL << (max_addr * 11 + 31);
+- address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
++ address_markers[LOWCORE_START_NR].start_address = 0;
++ address_markers[LOWCORE_END_NR].start_address = sizeof(struct lowcore);
++ address_markers[IDENTITY_START_NR].start_address = __identity_base;
++ address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
+ address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
+ address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
+ address_markers[MODULES_NR].start_address = MODULES_VADDR;
+--
+2.43.0
+
--- /dev/null
+From b4fa7c688f020dd620accebc5fafbfd0f6a3f887 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Jul 2024 09:27:45 +0000
+Subject: sched: act_ct: take care of padding in struct zones_ht_key
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 2191a54f63225b548fd8346be3611c3219a24738 ]
+
+Blamed commit increased lookup key size from 2 bytes to 16 bytes,
+because zones_ht_key got a struct net pointer.
+
+Make sure rhashtable_lookup() is not using the padding bytes
+which are not initialized.
+
+ BUG: KMSAN: uninit-value in rht_ptr_rcu include/linux/rhashtable.h:376 [inline]
+ BUG: KMSAN: uninit-value in __rhashtable_lookup include/linux/rhashtable.h:607 [inline]
+ BUG: KMSAN: uninit-value in rhashtable_lookup include/linux/rhashtable.h:646 [inline]
+ BUG: KMSAN: uninit-value in rhashtable_lookup_fast include/linux/rhashtable.h:672 [inline]
+ BUG: KMSAN: uninit-value in tcf_ct_flow_table_get+0x611/0x2260 net/sched/act_ct.c:329
+ rht_ptr_rcu include/linux/rhashtable.h:376 [inline]
+ __rhashtable_lookup include/linux/rhashtable.h:607 [inline]
+ rhashtable_lookup include/linux/rhashtable.h:646 [inline]
+ rhashtable_lookup_fast include/linux/rhashtable.h:672 [inline]
+ tcf_ct_flow_table_get+0x611/0x2260 net/sched/act_ct.c:329
+ tcf_ct_init+0xa67/0x2890 net/sched/act_ct.c:1408
+ tcf_action_init_1+0x6cc/0xb30 net/sched/act_api.c:1425
+ tcf_action_init+0x458/0xf00 net/sched/act_api.c:1488
+ tcf_action_add net/sched/act_api.c:2061 [inline]
+ tc_ctl_action+0x4be/0x19d0 net/sched/act_api.c:2118
+ rtnetlink_rcv_msg+0x12fc/0x1410 net/core/rtnetlink.c:6647
+ netlink_rcv_skb+0x375/0x650 net/netlink/af_netlink.c:2550
+ rtnetlink_rcv+0x34/0x40 net/core/rtnetlink.c:6665
+ netlink_unicast_kernel net/netlink/af_netlink.c:1331 [inline]
+ netlink_unicast+0xf52/0x1260 net/netlink/af_netlink.c:1357
+ netlink_sendmsg+0x10da/0x11e0 net/netlink/af_netlink.c:1901
+ sock_sendmsg_nosec net/socket.c:730 [inline]
+ __sock_sendmsg+0x30f/0x380 net/socket.c:745
+ ____sys_sendmsg+0x877/0xb60 net/socket.c:2597
+ ___sys_sendmsg+0x28d/0x3c0 net/socket.c:2651
+ __sys_sendmsg net/socket.c:2680 [inline]
+ __do_sys_sendmsg net/socket.c:2689 [inline]
+ __se_sys_sendmsg net/socket.c:2687 [inline]
+ __x64_sys_sendmsg+0x307/0x4a0 net/socket.c:2687
+ x64_sys_call+0x2dd6/0x3c10 arch/x86/include/generated/asm/syscalls_64.h:47
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xcd/0x1e0 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Local variable key created at:
+ tcf_ct_flow_table_get+0x4a/0x2260 net/sched/act_ct.c:324
+ tcf_ct_init+0xa67/0x2890 net/sched/act_ct.c:1408
+
+Fixes: 88c67aeb1407 ("sched: act_ct: add netns into the key of tcf_ct_flow_table")
+Reported-by: syzbot+1b5e4e187cc586d05ea0@syzkaller.appspotmail.com
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Xin Long <lucien.xin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_ct.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 6fa3cca87d346..9d451d77d54e2 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -44,6 +44,8 @@ static DEFINE_MUTEX(zones_mutex);
+ struct zones_ht_key {
+ struct net *net;
+ u16 zone;
++ /* Note : pad[] must be the last field. */
++ u8 pad[];
+ };
+
+ struct tcf_ct_flow_table {
+@@ -60,7 +62,7 @@ struct tcf_ct_flow_table {
+ static const struct rhashtable_params zones_params = {
+ .head_offset = offsetof(struct tcf_ct_flow_table, node),
+ .key_offset = offsetof(struct tcf_ct_flow_table, key),
+- .key_len = sizeof_field(struct tcf_ct_flow_table, key),
++ .key_len = offsetof(struct zones_ht_key, pad),
+ .automatic_shrinking = true,
+ };
+
+--
+2.43.0
+
mips-loongson64-dts-fix-pcie-port-nodes-for-ls7a.patch
mips-dts-loongson-fix-liointc-irq-polarity.patch
mips-dts-loongson-fix-ls2k1000-rtc-interrupt.patch
+arm-9406-1-fix-callchain_trace-return-value.patch
+arm-9408-1-mm-cfi-fix-some-erroneous-reset-prototype.patch
+hid-amd_sfh-move-sensor-discovery-before-hid-device-.patch
+perf-tool-fix-dereferencing-null-al-maps.patch
+drm-gpuvm-fix-missing-dependency-to-drm_exec.patch
+drm-nouveau-prime-fix-refcount-underflow.patch
+drm-vmwgfx-make-sure-the-screen-surface-is-ref-count.patch
+drm-vmwgfx-fix-overlay-when-using-screen-targets.patch
+bnxt_en-fix-rss-logic-in-__bnxt_reserve_rings.patch
+netlink-specs-correct-the-spec-of-ethtool.patch
+ethtool-rss-echo-the-context-number-back.patch
+drm-vmwgfx-trigger-a-modeset-when-the-screen-moves.patch
+sched-act_ct-take-care-of-padding-in-struct-zones_ht.patch
+wifi-cfg80211-fix-reporting-failed-mlo-links-status-.patch
+wifi-cfg80211-correct-s1g-beacon-length-calculation.patch
+net-phy-realtek-add-support-for-rtl8366s-gigabit-phy.patch
+alsa-hda-conexant-fix-headset-auto-detect-fail-in-th.patch
+bluetooth-btintel-fail-setup-on-error.patch
+bluetooth-hci_sync-fix-suspending-with-wrong-filter-.patch
+drm-client-fix-error-code-in-drm_client_buffer_vmap_.patch
+ethtool-veto-some-operations-during-firmware-flashin.patch
+net-move-ethtool-related-netdev-state-into-its-own-s.patch
+net-ethtool-attach-an-xarray-of-custom-rss-contexts-.patch
+net-ethtool-record-custom-rss-contexts-in-the-xarray.patch
+net-ethtool-add-a-mutex-protecting-rss-contexts.patch
+ethtool-fix-setting-key-and-resetting-indir-at-once.patch
+tcp-adjust-clamping-window-for-applications-specifyi.patch
+net-axienet-start-napi-before-enabling-rx-tx.patch
+rtnetlink-don-t-ignore-ifla_target_netnsid-when-ifna.patch
+i915-perf-remove-code-to-update-pwr_clk_state-for-ge.patch
+ice-respect-netif-readiness-in-af_xdp-zc-related-ndo.patch
+ice-don-t-busy-wait-for-rx-queue-disable-in-ice_qp_d.patch
+ice-replace-synchronize_rcu-with-synchronize_net.patch
+ice-modify-error-handling-when-setting-xsk-pool-in-n.patch
+ice-toggle-netif_carrier-when-setting-up-xsk-pool.patch
+ice-improve-updating-ice_-t-r-x_ring-xsk_pool.patch
+ice-add-missing-write_once-when-clearing-ice_rx_ring.patch
+ice-xsk-fix-txq-interrupt-mapping.patch
+net-iucv-fix-use-after-free-in-iucv_sock_close.patch
+drm-i915-hdcp-fix-hdcp2_stream_status-macro.patch
+net-mvpp2-don-t-re-use-loop-iterator.patch
+net-phy-micrel-fix-the-ksz9131-mdi-x-status-issue.patch
+s390-mm-ptdump-fix-handling-of-identity-mapping-area.patch
+alsa-hda-conditionally-use-snooping-for-amd-hdmi.patch
+drm-atomic-allow-userspace-to-use-explicit-sync-with.patch
+drm-atomic-allow-userspace-to-use-damage-clips-with-.patch
+netfilter-iptables-fix-null-ptr-deref-in-iptable_nat.patch
+netfilter-iptables-fix-potential-null-ptr-deref-in-i.patch
+net-mlx5-always-drain-health-in-shutdown-callback.patch
+net-mlx5-fix-error-handling-in-irq_pool_request_irq.patch
+net-mlx5-lag-don-t-use-the-hardcoded-value-of-the-fi.patch
+net-mlx5-fix-missing-lock-on-sync-reset-reload.patch
+net-mlx5e-require-mlx5-tc-classifier-action-support-.patch
+net-mlx5e-fix-ct-entry-update-leaks-of-modify-header.patch
+net-mlx5e-add-a-check-for-the-return-value-from-mlx5.patch
+igc-fix-double-reset-adapter-triggered-from-a-single.patch
+ipv6-fix-ndisc_is_useropt-handling-for-pio.patch
+riscv-purgatory-align-riscv_kernel_entry.patch
+perf-arch-events-fix-duplicate-risc-v-sbi-firmware-e.patch
+perf-riscv-fix-selecting-counters-in-legacy-mode.patch
+riscv-mm-add-handling-for-vm_fault_sigsegv-in-mm_fau.patch
+risc-v-enable-the-ipi-before-workqueue_online_cpu.patch
+riscv-fix-linear-mapping-checks-for-non-contiguous-m.patch
+arm64-jump_label-ensure-patched-jump_labels-are-visi.patch
--- /dev/null
+From 5eb845ea86956a3c670dd86e0a30f79e90dd6b56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 13:41:05 -0700
+Subject: tcp: Adjust clamping window for applications specifying SO_RCVBUF
+
+From: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
+
+[ Upstream commit 05f76b2d634e65ab34472802d9b142ea9e03f74e ]
+
+tp->scaling_ratio is not updated based on skb->len/skb->truesize once
+SO_RCVBUF is set leading to the maximum window scaling to be 25% of
+rcvbuf after
+commit dfa2f0483360 ("tcp: get rid of sysctl_tcp_adv_win_scale")
+and 50% of rcvbuf after
+commit 697a6c8cec03 ("tcp: increase the default TCP scaling ratio").
+50% tries to emulate the behavior of older kernels using
+sysctl_tcp_adv_win_scale with default value.
+
+Systems which were using a different values of sysctl_tcp_adv_win_scale
+in older kernels ended up seeing reduced download speeds in certain
+cases as covered in https://lists.openwall.net/netdev/2024/05/15/13
+While the sysctl scheme is no longer acceptable, the value of 50% is
+a bit conservative when the skb->len/skb->truesize ratio is later
+determined to be ~0.66.
+
+Applications not specifying SO_RCVBUF update the window scaling and
+the receiver buffer every time data is copied to userspace. This
+computation is now used for applications setting SO_RCVBUF to update
+the maximum window scaling while ensuring that the receive buffer
+is within the application specified limit.
+
+Fixes: dfa2f0483360 ("tcp: get rid of sysctl_tcp_adv_win_scale")
+Signed-off-by: Sean Tranchetti <quic_stranche@quicinc.com>
+Signed-off-by: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_input.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 570e87ad9a56e..ecd521108559f 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -754,8 +754,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
+ * <prev RTT . ><current RTT .. ><next RTT .... >
+ */
+
+- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
+- !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
++ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) {
+ u64 rcvwin, grow;
+ int rcvbuf;
+
+@@ -771,12 +770,22 @@ void tcp_rcv_space_adjust(struct sock *sk)
+
+ rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin),
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
+- if (rcvbuf > sk->sk_rcvbuf) {
+- WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
++ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
++ if (rcvbuf > sk->sk_rcvbuf) {
++ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+
+- /* Make the window clamp follow along. */
+- WRITE_ONCE(tp->window_clamp,
+- tcp_win_from_space(sk, rcvbuf));
++ /* Make the window clamp follow along. */
++ WRITE_ONCE(tp->window_clamp,
++ tcp_win_from_space(sk, rcvbuf));
++ }
++ } else {
++ /* Make the window clamp follow along while being bounded
++ * by SO_RCVBUF.
++ */
++ int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf));
++
++ if (clamp > tp->window_clamp)
++ WRITE_ONCE(tp->window_clamp, clamp);
+ }
+ }
+ tp->rcvq_space.space = copied;
+--
+2.43.0
+
--- /dev/null
+From e2b7377bc538a83fe87a83514f8a2582233b2b7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jul 2024 13:29:12 +0200
+Subject: wifi: cfg80211: correct S1G beacon length calculation
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 6873cc4416078202882691b424fcca5b5fb1a94d ]
+
+The minimum header length calculation (equivalent to the start
+of the elements) for the S1G long beacon erroneously required
+only up to the start of u.s1g_beacon rather than the start of
+u.s1g_beacon.variable. Fix that, and also shuffle the branches
+around a bit to not assign useless values that are overwritten
+later.
+
+Reported-by: syzbot+0f3afa93b91202f21939@syzkaller.appspotmail.com
+Fixes: 9eaffe5078ca ("cfg80211: convert S1G beacon to scan results")
+Link: https://patch.msgid.link/20240724132912.9662972db7c1.I8779675b5bbda4994cc66f876b6b87a2361c3c0b@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/scan.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 0222ede0feb60..292b530a6dd31 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -3136,8 +3136,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
+ {
+- size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+- u.probe_resp.variable);
++ size_t min_hdr_len;
+ struct ieee80211_ext *ext = NULL;
+ enum cfg80211_bss_frame_type ftype;
+ u16 beacon_interval;
+@@ -3160,10 +3159,16 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+ ext = (void *) mgmt;
+- min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_beacon);
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_short_beacon.variable);
++ else
++ min_hdr_len = offsetof(struct ieee80211_ext,
++ u.s1g_beacon.variable);
++ } else {
++ /* same for beacons */
++ min_hdr_len = offsetof(struct ieee80211_mgmt,
++ u.probe_resp.variable);
+ }
+
+ if (WARN_ON(len < min_hdr_len))
+--
+2.43.0
+
--- /dev/null
+From cb7a1b80d99ad39cfe64d5f52f0a8de2908790bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jul 2024 18:23:27 +0530
+Subject: wifi: cfg80211: fix reporting failed MLO links status with
+ cfg80211_connect_done
+
+From: Veerendranath Jakkam <quic_vjakkam@quicinc.com>
+
+[ Upstream commit baeaabf970b9a90999f62ae27edf63f6cb86c023 ]
+
+Individual MLO links connection status is not copied to
+EVENT_CONNECT_RESULT data while processing the connect response
+information in cfg80211_connect_done(). Due to this failed links
+are wrongly indicated with success status in EVENT_CONNECT_RESULT.
+
+To fix this, copy the individual MLO links status to the
+EVENT_CONNECT_RESULT data.
+
+Fixes: 53ad07e9823b ("wifi: cfg80211: support reporting failed links")
+Signed-off-by: Veerendranath Jakkam <quic_vjakkam@quicinc.com>
+Reviewed-by: Carlos Llamas <cmllamas@google.com>
+Link: https://patch.msgid.link/20240724125327.3495874-1-quic_vjakkam@quicinc.com
+[commit message editorial changes]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/sme.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index a8ad55f11133b..1cfe673bc52f3 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -1045,6 +1045,7 @@ void cfg80211_connect_done(struct net_device *dev,
+ cfg80211_hold_bss(
+ bss_from_pub(params->links[link].bss));
+ ev->cr.links[link].bss = params->links[link].bss;
++ ev->cr.links[link].status = params->links[link].status;
+
+ if (params->links[link].addr) {
+ ev->cr.links[link].addr = next;
+--
+2.43.0
+