--- /dev/null
+From 96409eeab8cdd394e03ec494ea9547edc27f7ab4 Mon Sep 17 00:00:00 2001
+From: Maksym Glubokiy <maxgl.kernel@gmail.com>
+Date: Tue, 12 Nov 2024 17:48:15 +0200
+Subject: ALSA: hda/realtek: fix mute/micmute LEDs for a HP EliteBook 645 G10
+
+From: Maksym Glubokiy <maxgl.kernel@gmail.com>
+
+commit 96409eeab8cdd394e03ec494ea9547edc27f7ab4 upstream.
+
+HP EliteBook 645 G10 uses ALC236 codec and need the
+ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF quirk to make mute LED and
+micmute LED work.
+
+Signed-off-by: Maksym Glubokiy <maxgl.kernel@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20241112154815.10888-1-maxgl.kernel@gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10483,6 +10483,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x103c, 0x8b59, "HP Elite mt645 G7 Mobile Thin Client U89", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8b5f, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
--- /dev/null
+From 42ee87df8530150d637aa48363b72b22a9bbd78f Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Fri, 25 Oct 2024 16:37:57 +0800
+Subject: ALSA: hda/realtek - Fixed Clevo platform headset Mic issue
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit 42ee87df8530150d637aa48363b72b22a9bbd78f upstream.
+
+Clevo platform with ALC255 Headset Mic was disable by default.
+Assigned verb table for Mic pin will enable it.
+
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/b2dcac3e09ef4f82b36d6712194e1ea4@realtek.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11664,6 +11664,8 @@ static const struct snd_hda_pin_quirk al
+ {0x1a, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC,
+ {0x19, 0x40000000}),
++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1558, "Clevo", ALC2XX_FIXUP_HEADSET_MIC,
++ {0x19, 0x40000000}),
+ {}
+ };
+
--- /dev/null
+From 2143c8ae423dbc3f036cae8d18a5a3c272df3deb Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Tue, 12 Nov 2024 14:03:53 +0800
+Subject: ALSA: hda/realtek - update set GPIO3 to default for Thinkpad with ALC1318
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit 2143c8ae423dbc3f036cae8d18a5a3c272df3deb upstream.
+
+If user no update BIOS, the speaker will no sound.
+This patch support old BIOS to have sound from speaker.
+
+Fixes: 1e707769df07 ("ALSA: hda/realtek - Set GPIO3 to default at S4 state for Thinkpad with ALC1318")
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7436,7 +7436,6 @@ static void alc287_alc1318_playback_pcm_
+ struct snd_pcm_substream *substream,
+ int action)
+ {
+- alc_write_coef_idx(codec, 0x10, 0x8806); /* Change MLK to GPIO3 */
+ switch (action) {
+ case HDA_GEN_PCM_ACT_OPEN:
+ alc_write_coefex_idx(codec, 0x5a, 0x00, 0x954f); /* write gpio3 to high */
+@@ -7450,7 +7449,6 @@ static void alc287_alc1318_playback_pcm_
+ static void alc287_s4_power_gpio3_default(struct hda_codec *codec)
+ {
+ if (is_s4_suspend(codec)) {
+- alc_write_coef_idx(codec, 0x10, 0x8806); /* Change MLK to GPIO3 */
+ alc_write_coefex_idx(codec, 0x5a, 0x00, 0x554f); /* write gpio3 as default value */
+ }
+ }
+@@ -7459,9 +7457,17 @@ static void alc287_fixup_lenovo_thinkpad
+ const struct hda_fixup *fix, int action)
+ {
+ struct alc_spec *spec = codec->spec;
++ static const struct coef_fw coefs[] = {
++ WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC300),
++ WRITE_COEF(0x28, 0x0001), WRITE_COEF(0x29, 0xb023),
++ WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC301),
++ WRITE_COEF(0x28, 0x0001), WRITE_COEF(0x29, 0xb023),
++ };
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
++ alc_update_coef_idx(codec, 0x10, 1<<11, 1<<11);
++ alc_process_coef_fw(codec, coefs);
+ spec->power_hook = alc287_s4_power_gpio3_default;
+ spec->gen.pcm_playback_hook = alc287_alc1318_playback_pcm_hook;
+ }
--- /dev/null
+From 7d493a5ecc26f861421af6e64427d5f697ddd395 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Wed, 13 Nov 2024 11:05:13 -0500
+Subject: btrfs: fix incorrect comparison for delayed refs
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 7d493a5ecc26f861421af6e64427d5f697ddd395 upstream.
+
+When I reworked delayed ref comparison in cf4f04325b2b ("btrfs: move
+->parent and ->ref_root into btrfs_delayed_ref_node"), I made a mistake
+and returned -1 for the case where ref1->ref_root was > than
+ref2->ref_root. This is a subtle bug that can result in improper
+delayed ref running order, which can result in transaction aborts.
+
+Fixes: cf4f04325b2b ("btrfs: move ->parent and ->ref_root into btrfs_delayed_ref_node")
+CC: stable@vger.kernel.org # 6.10+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/delayed-ref.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 65d841d7142c..cab94d141f66 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -298,7 +298,7 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
+ if (ref1->ref_root < ref2->ref_root)
+ return -1;
+ if (ref1->ref_root > ref2->ref_root)
+- return -1;
++ return 1;
+ if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
+ ret = comp_data_refs(ref1, ref2);
+ }
+--
+2.47.0
+
--- /dev/null
+From 31daa34315d45d3fe77f2158d889d523d78852ea Mon Sep 17 00:00:00 2001
+From: Dave Vasilevsky <dave@vasilevsky.ca>
+Date: Tue, 17 Sep 2024 12:37:20 -0400
+Subject: crash, powerpc: default to CRASH_DUMP=n on PPC_BOOK3S_32
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dave Vasilevsky <dave@vasilevsky.ca>
+
+commit 31daa34315d45d3fe77f2158d889d523d78852ea upstream.
+
+Fixes boot failures on 6.9 on PPC_BOOK3S_32 machines using Open Firmware.
+On these machines, the kernel refuses to boot from non-zero
+PHYSICAL_START, which occurs when CRASH_DUMP is on.
+
+Since most PPC_BOOK3S_32 machines boot via Open Firmware, it should
+default to off for them. Users booting via some other mechanism can still
+turn it on explicitly.
+
+Does not change the default on any other architectures for the
+time being.
+
+Link: https://lkml.kernel.org/r/20240917163720.1644584-1-dave@vasilevsky.ca
+Fixes: 75bc255a7444 ("crash: clean up kdump related config items")
+Signed-off-by: Dave Vasilevsky <dave@vasilevsky.ca>
+Reported-by: Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+Closes: https://lists.debian.org/debian-powerpc/2024/07/msg00001.html
+Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
+Acked-by: Baoquan He <bhe@redhat.com>
+Cc: "Eric W. Biederman" <ebiederm@xmission.com>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/Kconfig | 3 +++
+ arch/arm64/Kconfig | 3 +++
+ arch/loongarch/Kconfig | 3 +++
+ arch/mips/Kconfig | 3 +++
+ arch/powerpc/Kconfig | 4 ++++
+ arch/riscv/Kconfig | 3 +++
+ arch/s390/Kconfig | 3 +++
+ arch/sh/Kconfig | 3 +++
+ arch/x86/Kconfig | 3 +++
+ kernel/Kconfig.kexec | 2 +-
+ 10 files changed, 29 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1597,6 +1597,9 @@ config ATAGS_PROC
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool y
+
++config ARCH_DEFAULT_CRASH_DUMP
++ def_bool y
++
+ config AUTO_ZRELADDR
+ bool "Auto calculation of the decompressed kernel image address" if !ARCH_MULTIPLATFORM
+ default !(ARCH_FOOTBRIDGE || ARCH_RPC || ARCH_SA1100)
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1559,6 +1559,9 @@ config ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_S
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool y
+
++config ARCH_DEFAULT_CRASH_DUMP
++ def_bool y
++
+ config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+ def_bool CRASH_RESERVE
+
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -599,6 +599,9 @@ config ARCH_SUPPORTS_KEXEC
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool y
+
++config ARCH_DEFAULT_CRASH_DUMP
++ def_bool y
++
+ config ARCH_SELECTS_CRASH_DUMP
+ def_bool y
+ depends on CRASH_DUMP
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2881,6 +2881,9 @@ config ARCH_SUPPORTS_KEXEC
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool y
+
++config ARCH_DEFAULT_CRASH_DUMP
++ def_bool y
++
+ config PHYSICAL_START
+ hex "Physical address where the kernel is loaded"
+ default "0xffffffff84000000"
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -682,6 +682,10 @@ config RELOCATABLE_TEST
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool PPC64 || PPC_BOOK3S_32 || PPC_85xx || (44x && !SMP)
+
++config ARCH_DEFAULT_CRASH_DUMP
++ bool
++ default y if !PPC_BOOK3S_32
++
+ config ARCH_SELECTS_CRASH_DUMP
+ def_bool y
+ depends on CRASH_DUMP
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -884,6 +884,9 @@ config ARCH_SUPPORTS_KEXEC_PURGATORY
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool y
+
++config ARCH_DEFAULT_CRASH_DUMP
++ def_bool y
++
+ config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+ def_bool CRASH_RESERVE
+
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -275,6 +275,9 @@ config ARCH_SUPPORTS_CRASH_DUMP
+ This option also enables s390 zfcpdump.
+ See also <file:Documentation/arch/s390/zfcpdump.rst>
+
++config ARCH_DEFAULT_CRASH_DUMP
++ def_bool y
++
+ menu "Processor type and features"
+
+ config HAVE_MARCH_Z10_FEATURES
+--- a/arch/sh/Kconfig
++++ b/arch/sh/Kconfig
+@@ -549,6 +549,9 @@ config ARCH_SUPPORTS_KEXEC
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool BROKEN_ON_SMP
+
++config ARCH_DEFAULT_CRASH_DUMP
++ def_bool y
++
+ config ARCH_SUPPORTS_KEXEC_JUMP
+ def_bool y
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2087,6 +2087,9 @@ config ARCH_SUPPORTS_KEXEC_JUMP
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool X86_64 || (X86_32 && HIGHMEM)
+
++config ARCH_DEFAULT_CRASH_DUMP
++ def_bool y
++
+ config ARCH_SUPPORTS_CRASH_HOTPLUG
+ def_bool y
+
+--- a/kernel/Kconfig.kexec
++++ b/kernel/Kconfig.kexec
+@@ -97,7 +97,7 @@ config KEXEC_JUMP
+
+ config CRASH_DUMP
+ bool "kernel crash dumps"
+- default y
++ default ARCH_DEFAULT_CRASH_DUMP
+ depends on ARCH_SUPPORTS_CRASH_DUMP
+ depends on KEXEC_CORE
+ select VMCORE_INFO
--- /dev/null
+From b8d9d5fef4915a383b4ce4d0f418352aa4701a87 Mon Sep 17 00:00:00 2001
+From: Tom Chung <chiahsuan.chung@amd.com>
+Date: Tue, 29 Oct 2024 15:38:16 +0800
+Subject: drm/amd/display: Change some variable name of psr
+
+From: Tom Chung <chiahsuan.chung@amd.com>
+
+commit b8d9d5fef4915a383b4ce4d0f418352aa4701a87 upstream.
+
+Panel Replay feature may also use the same variable with PSR.
+Change the variable name and make it not specify for PSR.
+
+Reviewed-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Tom Chung <chiahsuan.chung@amd.com>
+Signed-off-by: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit c7fafb7a46b38a11a19342d153f505749bf56f3e)
+Cc: stable@vger.kernel.org # 6.11+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 22 +++++------
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 -
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 2 -
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h | 2 -
+ 4 files changed, 14 insertions(+), 14 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6740,7 +6740,7 @@ create_stream_for_sink(struct drm_connec
+ if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
+ tf = TRANSFER_FUNC_GAMMA_22;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
++ aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+
+ }
+ finish:
+@@ -8983,7 +8983,7 @@ static void amdgpu_dm_commit_planes(stru
+ * during the PSR-SU was disabled.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+- acrtc_attach->dm_irq_params.allow_psr_entry &&
++ acrtc_attach->dm_irq_params.allow_sr_entry &&
+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+ #endif
+@@ -9218,27 +9218,27 @@ static void amdgpu_dm_commit_planes(stru
+ }
+ }
+
+- /* Decrement skip count when PSR is enabled and we're doing fast updates. */
++ /* Decrement skip count when SR is enabled and we're doing fast updates. */
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+
+- if (aconn->psr_skip_count > 0)
+- aconn->psr_skip_count--;
++ if (aconn->sr_skip_count > 0)
++ aconn->sr_skip_count--;
+
+- /* Allow PSR when skip count is 0. */
+- acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
++ /* Allow SR when skip count is 0. */
++ acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
+
+ /*
+- * If sink supports PSR SU, there is no need to rely on
+- * a vblank event disable request to enable PSR. PSR SU
++ * If sink supports PSR SU/Panel Replay, there is no need to rely on
++ * a vblank event disable request to enable PSR/RP. PSR SU/RP
+ * can be enabled immediately once OS demonstrates an
+ * adequate number of fast atomic commits to notify KMD
+ * of update events. See `vblank_control_worker()`.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+- acrtc_attach->dm_irq_params.allow_psr_entry &&
++ acrtc_attach->dm_irq_params.allow_sr_entry &&
+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+ #endif
+@@ -9249,7 +9249,7 @@ static void amdgpu_dm_commit_planes(stru
+ 500000000)
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ } else {
+- acrtc_attach->dm_irq_params.allow_psr_entry = false;
++ acrtc_attach->dm_irq_params.allow_sr_entry = false;
+ }
+
+ mutex_unlock(&dm->dc_lock);
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -727,7 +727,7 @@ struct amdgpu_dm_connector {
+ /* Cached display modes */
+ struct drm_display_mode freesync_vid_base;
+
+- int psr_skip_count;
++ int sr_skip_count;
+ bool disallow_edp_enter_psr;
+
+ /* Record progress status of mst*/
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -269,7 +269,7 @@ static void amdgpu_dm_crtc_vblank_contro
+ if (vblank_work->stream && vblank_work->stream->link) {
+ amdgpu_dm_crtc_set_panel_sr_feature(
+ vblank_work, vblank_work->enable,
+- vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
++ vblank_work->acrtc->dm_irq_params.allow_sr_entry ||
+ vblank_work->stream->link->replay_settings.replay_feature_enabled);
+ }
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+@@ -33,7 +33,7 @@ struct dm_irq_params {
+ struct mod_vrr_params vrr_params;
+ struct dc_stream_state *stream;
+ int active_planes;
+- bool allow_psr_entry;
++ bool allow_sr_entry;
+ struct mod_freesync_config freesync_config;
+
+ #ifdef CONFIG_DEBUG_FS
--- /dev/null
+From bd8a9576617439bdc907c9ce0875909aea4221cb Mon Sep 17 00:00:00 2001
+From: Tom Chung <chiahsuan.chung@amd.com>
+Date: Tue, 29 Oct 2024 17:28:23 +0800
+Subject: drm/amd/display: Fix Panel Replay not update screen correctly
+
+From: Tom Chung <chiahsuan.chung@amd.com>
+
+commit bd8a9576617439bdc907c9ce0875909aea4221cb upstream.
+
+[Why]
+In certain use case such as KDE login screen, there will be no atomic
+commit while do the frame update.
+If the Panel Replay enabled, it will cause the screen not updated and
+looks like system hang.
+
+[How]
+Delay few atomic commits before enabled the Panel Replay just like PSR.
+
+Fixes: be64336307a6c ("drm/amd/display: Re-enable panel replay feature")
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3686
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3682
+Tested-By: Corey Hickey <bugfood-c@fatooh.org>
+Tested-By: James Courtier-Dutton <james.dutton@gmail.com>
+Reviewed-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Tom Chung <chiahsuan.chung@amd.com>
+Signed-off-by: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit ca628f0eddd73adfccfcc06b2a55d915bca4a342)
+Cc: stable@vger.kernel.org # 6.11+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 111 ++++++++---------
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 5
+ 2 files changed, 59 insertions(+), 57 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -8830,6 +8830,56 @@ static void amdgpu_dm_update_cursor(stru
+ }
+ }
+
++static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
++ const struct dm_crtc_state *acrtc_state,
++ const u64 current_ts)
++{
++ struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
++ struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
++ struct amdgpu_dm_connector *aconn =
++ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
++
++ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
++ if (pr->config.replay_supported && !pr->replay_feature_enabled)
++ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
++ else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED &&
++ !psr->psr_feature_enabled)
++ if (!aconn->disallow_edp_enter_psr)
++ amdgpu_dm_link_setup_psr(acrtc_state->stream);
++ }
++
++ /* Decrement skip count when SR is enabled and we're doing fast updates. */
++ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
++ (psr->psr_feature_enabled || pr->config.replay_supported)) {
++ if (aconn->sr_skip_count > 0)
++ aconn->sr_skip_count--;
++
++ /* Allow SR when skip count is 0. */
++ acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
++
++ /*
++ * If sink supports PSR SU/Panel Replay, there is no need to rely on
++ * a vblank event disable request to enable PSR/RP. PSR SU/RP
++ * can be enabled immediately once OS demonstrates an
++ * adequate number of fast atomic commits to notify KMD
++ * of update events. See `vblank_control_worker()`.
++ */
++ if (acrtc_attach->dm_irq_params.allow_sr_entry &&
++#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
++ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
++#endif
++ (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
++ if (pr->replay_feature_enabled && !pr->replay_allow_active)
++ amdgpu_dm_replay_enable(acrtc_state->stream, true);
++ if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
++ !psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
++ amdgpu_dm_psr_enable(acrtc_state->stream);
++ }
++ } else {
++ acrtc_attach->dm_irq_params.allow_sr_entry = false;
++ }
++}
++
+ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+@@ -9158,9 +9208,12 @@ static void amdgpu_dm_commit_planes(stru
+ bundle->stream_update.abm_level = &acrtc_state->abm_level;
+
+ mutex_lock(&dm->dc_lock);
+- if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+- acrtc_state->stream->link->psr_settings.psr_allow_active)
+- amdgpu_dm_psr_disable(acrtc_state->stream);
++ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
++ if (acrtc_state->stream->link->replay_settings.replay_allow_active)
++ amdgpu_dm_replay_disable(acrtc_state->stream);
++ if (acrtc_state->stream->link->psr_settings.psr_allow_active)
++ amdgpu_dm_psr_disable(acrtc_state->stream);
++ }
+ mutex_unlock(&dm->dc_lock);
+
+ /*
+@@ -9201,57 +9254,7 @@ static void amdgpu_dm_commit_planes(stru
+ dm_update_pflip_irq_state(drm_to_adev(dev),
+ acrtc_attach);
+
+- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+- if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
+- !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+- struct amdgpu_dm_connector *aconn =
+- (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+- amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+- } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+- !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+-
+- struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+- acrtc_state->stream->dm_stream_context;
+-
+- if (!aconn->disallow_edp_enter_psr)
+- amdgpu_dm_link_setup_psr(acrtc_state->stream);
+- }
+- }
+-
+- /* Decrement skip count when SR is enabled and we're doing fast updates. */
+- if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+- acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+- struct amdgpu_dm_connector *aconn =
+- (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+-
+- if (aconn->sr_skip_count > 0)
+- aconn->sr_skip_count--;
+-
+- /* Allow SR when skip count is 0. */
+- acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
+-
+- /*
+- * If sink supports PSR SU/Panel Replay, there is no need to rely on
+- * a vblank event disable request to enable PSR/RP. PSR SU/RP
+- * can be enabled immediately once OS demonstrates an
+- * adequate number of fast atomic commits to notify KMD
+- * of update events. See `vblank_control_worker()`.
+- */
+- if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+- acrtc_attach->dm_irq_params.allow_sr_entry &&
+-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+- !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+-#endif
+- !acrtc_state->stream->link->psr_settings.psr_allow_active &&
+- !aconn->disallow_edp_enter_psr &&
+- (timestamp_ns -
+- acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
+- 500000000)
+- amdgpu_dm_psr_enable(acrtc_state->stream);
+- } else {
+- acrtc_attach->dm_irq_params.allow_sr_entry = false;
+- }
+-
++ amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -266,11 +266,10 @@ static void amdgpu_dm_crtc_vblank_contro
+ * where the SU region is the full hactive*vactive region. See
+ * fill_dc_dirty_rects().
+ */
+- if (vblank_work->stream && vblank_work->stream->link) {
++ if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) {
+ amdgpu_dm_crtc_set_panel_sr_feature(
+ vblank_work, vblank_work->enable,
+- vblank_work->acrtc->dm_irq_params.allow_sr_entry ||
+- vblank_work->stream->link->replay_settings.replay_feature_enabled);
++ vblank_work->acrtc->dm_irq_params.allow_sr_entry);
+ }
+
+ if (dm->active_vblank_irq_count == 0) {
--- /dev/null
+From 17e68f89132b9ee4b144358b49e5df404b314181 Mon Sep 17 00:00:00 2001
+From: Leo Li <sunpeng.li@amd.com>
+Date: Thu, 11 Jul 2024 14:38:11 -0400
+Subject: drm/amd/display: Run idle optimizations at end of vblank handler
+
+From: Leo Li <sunpeng.li@amd.com>
+
+commit 17e68f89132b9ee4b144358b49e5df404b314181 upstream.
+
+[Why & How]
+1. After allowing idle optimizations, hw programming is disallowed.
+2. Before hw programming, we need to disallow idle optimizations.
+
+Otherwise, in scenario 1, we will immediately kick hw out of idle
+optimizations with register access.
+
+Scenario 2 is less of a concern, since any register access will kick
+hw out of idle optimizations. But we'll do it early for correctness.
+
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -251,9 +251,10 @@ static void amdgpu_dm_crtc_vblank_contro
+ else if (dm->active_vblank_irq_count)
+ dm->active_vblank_irq_count--;
+
+- dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
+-
+- DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
++ if (dm->active_vblank_irq_count > 0) {
++ DRM_DEBUG_KMS("Allow idle optimizations (MALL): false\n");
++ dc_allow_idle_optimizations(dm->dc, false);
++ }
+
+ /*
+ * Control PSR based on vblank requirements from OS
+@@ -272,6 +273,11 @@ static void amdgpu_dm_crtc_vblank_contro
+ vblank_work->stream->link->replay_settings.replay_feature_enabled);
+ }
+
++ if (dm->active_vblank_irq_count == 0) {
++ DRM_DEBUG_KMS("Allow idle optimizations (MALL): true\n");
++ dc_allow_idle_optimizations(dm->dc, true);
++ }
++
+ mutex_unlock(&dm->dc_lock);
+
+ dc_stream_release(vblank_work->stream);
--- /dev/null
+From 32c4514455b2b8fde506f8c0962f15c7e4c26f1d Mon Sep 17 00:00:00 2001
+From: Francesco Dolcini <francesco.dolcini@toradex.com>
+Date: Thu, 26 Sep 2024 16:12:46 +0200
+Subject: drm/bridge: tc358768: Fix DSI command tx
+
+From: Francesco Dolcini <francesco.dolcini@toradex.com>
+
+commit 32c4514455b2b8fde506f8c0962f15c7e4c26f1d upstream.
+
+Wait for the command transmission to be completed in the DSI transfer
+function polling for the dc_start bit to go back to idle state after the
+transmission is started.
+
+This is documented in the datasheet and failures to do so lead to
+commands corruption.
+
+Fixes: ff1ca6397b1d ("drm/bridge: Add tc358768 driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Francesco Dolcini <francesco.dolcini@toradex.com>
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://lore.kernel.org/r/20240926141246.48282-1-francesco@dolcini.it
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240926141246.48282-1-francesco@dolcini.it
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/bridge/tc358768.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -125,6 +125,9 @@
+ #define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
+ #define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
+
++/* TC358768_DSICMD_TX (0x0600) register */
++#define TC358768_DSI_CMDTX_DC_START BIT(0)
++
+ static const char * const tc358768_supplies[] = {
+ "vddc", "vddmipi", "vddio"
+ };
+@@ -229,6 +232,21 @@ static void tc358768_update_bits(struct
+ tc358768_write(priv, reg, tmp);
+ }
+
++static void tc358768_dsicmd_tx(struct tc358768_priv *priv)
++{
++ u32 val;
++
++ /* start transfer */
++ tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START);
++ if (priv->error)
++ return;
++
++ /* wait transfer completion */
++ priv->error = regmap_read_poll_timeout(priv->regmap, TC358768_DSICMD_TX, val,
++ (val & TC358768_DSI_CMDTX_DC_START) == 0,
++ 100, 100000);
++}
++
+ static int tc358768_sw_reset(struct tc358768_priv *priv)
+ {
+ /* Assert Reset */
+@@ -516,8 +534,7 @@ static ssize_t tc358768_dsi_host_transfe
+ }
+ }
+
+- /* start transfer */
+- tc358768_write(priv, TC358768_DSICMD_TX, 1);
++ tc358768_dsicmd_tx(priv);
+
+ ret = tc358768_clear_error(priv);
+ if (ret)
--- /dev/null
+From be7eeaba2a11d7c16a9dc034a25f224f1343f303 Mon Sep 17 00:00:00 2001
+From: Matthew Auld <matthew.auld@intel.com>
+Date: Tue, 12 Nov 2024 16:28:28 +0000
+Subject: drm/xe: handle flat ccs during hibernation on igpu
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+commit be7eeaba2a11d7c16a9dc034a25f224f1343f303 upstream.
+
+Starting from LNL, CCS has moved over to flat CCS model where there is
+now dedicated memory reserved for storing compression state. On
+platforms like LNL this reserved memory lives inside graphics stolen
+memory, which is not treated like normal RAM and is therefore skipped by
+the core kernel when creating the hibernation image. Currently if
+something was compressed and we enter hibernation all the corresponding
+CCS state is lost on such HW, resulting in corrupted memory. To fix this
+evict user buffers from TT -> SYSTEM to ensure we take a snapshot of the
+raw CCS state when entering hibernation, where upon resuming we can
+restore the raw CCS state back when next validating the buffer. This has
+been confirmed to fix display corruption on LNL when coming back from
+hibernation.
+
+Fixes: cbdc52c11c9b ("drm/xe/xe2: Support flat ccs")
+Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/3409
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.8+
+Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241112162827.116523-2-matthew.auld@intel.com
+(cherry picked from commit c8b3c6db941299d7cc31bd9befed3518fdebaf68)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_bo_evict.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_bo_evict.c
++++ b/drivers/gpu/drm/xe/xe_bo_evict.c
+@@ -38,10 +38,21 @@ int xe_bo_evict_all(struct xe_device *xe
+ return 0;
+
+ /* User memory */
+- for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
++ for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bdev, mem_type);
+
++ /*
++ * On igpu platforms with flat CCS we need to ensure we save and restore any CCS
++ * state since this state lives inside graphics stolen memory which doesn't survive
++ * hibernation.
++ *
++ * This can be further improved by only evicting objects that we know have actually
++ * used a compression enabled PAT index.
++ */
++ if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
++ continue;
++
+ if (man) {
+ ret = ttm_resource_manager_evict_all(bdev, man);
+ if (ret)
--- /dev/null
+From c0403e4ceecaefbeaf78263dffcd3e3f06a19f6b Mon Sep 17 00:00:00 2001
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Date: Fri, 8 Nov 2024 19:20:03 -0800
+Subject: drm/xe/oa: Fix "Missing outer runtime PM protection" warning
+
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+
+commit c0403e4ceecaefbeaf78263dffcd3e3f06a19f6b upstream.
+
+Fix the following drm_WARN:
+
+[953.586396] xe 0000:00:02.0: [drm] Missing outer runtime PM protection
+...
+<4> [953.587090] ? xe_pm_runtime_get_noresume+0x8d/0xa0 [xe]
+<4> [953.587208] guc_exec_queue_add_msg+0x28/0x130 [xe]
+<4> [953.587319] guc_exec_queue_fini+0x3a/0x40 [xe]
+<4> [953.587425] xe_exec_queue_destroy+0xb3/0xf0 [xe]
+<4> [953.587515] xe_oa_release+0x9c/0xc0 [xe]
+
+Suggested-by: John Harrison <john.c.harrison@intel.com>
+Suggested-by: Matthew Brost <matthew.brost@intel.com>
+Fixes: e936f885f1e9 ("drm/xe/oa/uapi: Expose OA stream fd")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241109032003.3093811-1-ashutosh.dixit@intel.com
+(cherry picked from commit b107c63d2953907908fd0cafb0e543b3c3167b75)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_oa.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 2804f14f8f29..78823f53d290 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1206,9 +1206,11 @@ static int xe_oa_release(struct inode *inode, struct file *file)
+ struct xe_oa_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+
++ xe_pm_runtime_get(gt_to_xe(gt));
+ mutex_lock(>->oa.gt_lock);
+ xe_oa_destroy_locked(stream);
+ mutex_unlock(>->oa.gt_lock);
++ xe_pm_runtime_put(gt_to_xe(gt));
+
+ /* Release the reference the OA stream kept on the driver */
+ drm_dev_put(>_to_xe(gt)->drm);
+--
+2.47.0
+
--- /dev/null
+From 699ae6241920b0fa837fa57e61f7d5b0e2e65b58 Mon Sep 17 00:00:00 2001
+From: Mateusz Guzik <mjguzik@gmail.com>
+Date: Tue, 6 Aug 2024 15:36:07 +0200
+Subject: evm: stop avoidably reading i_writecount in evm_file_release
+
+From: Mateusz Guzik <mjguzik@gmail.com>
+
+commit 699ae6241920b0fa837fa57e61f7d5b0e2e65b58 upstream.
+
+The EVM_NEW_FILE flag is unset if the file already existed at the time
+of open and this can be checked without looking at i_writecount.
+
+Not accessing it reduces traffic on the cacheline during parallel open
+of the same file and drop the evm_file_release routine from second place
+to bottom of the profile.
+
+Fixes: 75a323e604fc ("evm: Make it independent from 'integrity' LSM")
+Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
+Reviewed-by: Roberto Sassu <roberto.sassu@huawei.com>
+Cc: stable@vger.kernel.org # 6.9+
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/integrity/evm/evm_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -1084,7 +1084,8 @@ static void evm_file_release(struct file
+ if (!S_ISREG(inode->i_mode) || !(mode & FMODE_WRITE))
+ return;
+
+- if (iint && atomic_read(&inode->i_writecount) == 1)
++ if (iint && iint->flags & EVM_NEW_FILE &&
++ atomic_read(&inode->i_writecount) == 1)
+ iint->flags &= ~EVM_NEW_FILE;
+ }
+
--- /dev/null
+From e2261bb81e0db86c3c866734cf93232a58464ecd Mon Sep 17 00:00:00 2001
+From: Sibi Sankar <quic_sibis@quicinc.com>
+Date: Wed, 30 Oct 2024 18:25:09 +0530
+Subject: firmware: arm_scmi: Report duplicate opps as firmware bugs
+
+From: Sibi Sankar <quic_sibis@quicinc.com>
+
+commit e2261bb81e0db86c3c866734cf93232a58464ecd upstream.
+
+Duplicate opps reported by buggy SCP firmware currently show up
+as warnings even though the only functional impact is that the
+level/index remain inaccessible. Make it less scary for the end
+user by using dev_info instead, along with FW_BUG tag.
+
+Suggested-by: Johan Hovold <johan+linaro@kernel.org>
+Signed-off-by: Sibi Sankar <quic_sibis@quicinc.com>
+Reviewed-by: Cristian Marussi <cristian.marussi@arm.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
+Cc: stable@vger.kernel.org
+Message-ID: <20241030125512.2884761-4-quic_sibis@quicinc.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/arm_scmi/perf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -387,7 +387,7 @@ process_response_opp(struct device *dev,
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret) {
+- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
++ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
+ return ret;
+ }
+@@ -409,7 +409,7 @@ process_response_opp_v4(struct device *d
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret) {
+- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
++ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
+ return ret;
+ }
--- /dev/null
+From 5d8a766226587d111620df520dd9239c009cb154 Mon Sep 17 00:00:00 2001
+From: Cristian Marussi <cristian.marussi@arm.com>
+Date: Wed, 30 Oct 2024 18:25:08 +0530
+Subject: firmware: arm_scmi: Skip opp duplicates
+
+From: Cristian Marussi <cristian.marussi@arm.com>
+
+commit 5d8a766226587d111620df520dd9239c009cb154 upstream.
+
+Buggy firmware can reply with duplicated PERF opps descriptors.
+
+Ensure that the bad duplicates reported by the platform firmware doesn't
+get added to the opp-tables.
+
+Reported-by: Johan Hovold <johan+linaro@kernel.org>
+Closes: https://lore.kernel.org/lkml/ZoQjAWse2YxwyRJv@hovoldconsulting.com/
+Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
+Tested-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
+Cc: stable@vger.kernel.org
+Message-ID: <20241030125512.2884761-3-quic_sibis@quicinc.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/arm_scmi/perf.c | 40 +++++++++++++++++++++++++++++----------
+ 1 file changed, 30 insertions(+), 10 deletions(-)
+
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -373,7 +373,7 @@ static int iter_perf_levels_update_state
+ return 0;
+ }
+
+-static inline void
++static inline int
+ process_response_opp(struct device *dev, struct perf_dom_info *dom,
+ struct scmi_opp *opp, unsigned int loop_idx,
+ const struct scmi_msg_resp_perf_describe_levels *r)
+@@ -386,12 +386,16 @@ process_response_opp(struct device *dev,
+ le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+- if (ret)
++ if (ret) {
+ dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
++ return ret;
++ }
++
++ return 0;
+ }
+
+-static inline void
++static inline int
+ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
+ struct scmi_opp *opp, unsigned int loop_idx,
+ const struct scmi_msg_resp_perf_describe_levels_v4 *r)
+@@ -404,9 +408,11 @@ process_response_opp_v4(struct device *d
+ le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+- if (ret)
++ if (ret) {
+ dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
++ return ret;
++ }
+
+ /* Note that PERF v4 reports always five 32-bit words */
+ opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
+@@ -415,13 +421,21 @@ process_response_opp_v4(struct device *d
+
+ ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
+ GFP_KERNEL);
+- if (ret)
++ if (ret) {
+ dev_warn(dev,
+ "Failed to add opps_by_idx at %d for %s - ret:%d\n",
+ opp->level_index, dom->info.name, ret);
+
++ /* Cleanup by_lvl too */
++ xa_erase(&dom->opps_by_lvl, opp->perf);
++
++ return ret;
++ }
++
+ hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
+ }
++
++ return 0;
+ }
+
+ static int
+@@ -429,16 +443,22 @@ iter_perf_levels_process_response(const
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+ {
++ int ret;
+ struct scmi_opp *opp;
+ struct scmi_perf_ipriv *p = priv;
+
+- opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
++ opp = &p->perf_dom->opp[p->perf_dom->opp_count];
+ if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
+- process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx,
+- response);
++ ret = process_response_opp(ph->dev, p->perf_dom, opp,
++ st->loop_idx, response);
+ else
+- process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
+- response);
++ ret = process_response_opp_v4(ph->dev, p->perf_dom, opp,
++ st->loop_idx, response);
++
++ /* Skip BAD duplicates received from firmware */
++ if (ret)
++ return ret == -EBUSY ? 0 : ret;
++
+ p->perf_dom->opp_count++;
+
+ dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
--- /dev/null
+From 669b0cb81e4e4e78cff77a5b367c7f70c0c6c05e Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@linaro.org>
+Date: Thu, 14 Nov 2024 11:59:32 +0300
+Subject: fs/proc/task_mmu: prevent integer overflow in pagemap_scan_get_args()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+commit 669b0cb81e4e4e78cff77a5b367c7f70c0c6c05e upstream.
+
+The "arg->vec_len" variable is a u64 that comes from the user at the start
+of the function. The "arg->vec_len * sizeof(struct page_region))"
+multiplication can lead to integer wrapping. Use size_mul() to avoid
+that.
+
+Also the size_add/mul() functions work on unsigned long so for 32bit
+systems we need to ensure that "arg->vec_len" fits in an unsigned long.
+
+Link: https://lkml.kernel.org/r/39d41335-dd4d-48ed-8a7f-402c57d8ea84@stanley.mountain
+Fixes: 52526ca7fdb9 ("fs/proc/task_mmu: implement IOCTL to get and optionally clear info about PTEs")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Cc: Andrei Vagin <avagin@google.com>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/task_mmu.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -2672,8 +2672,10 @@ static int pagemap_scan_get_args(struct
+ return -EFAULT;
+ if (!arg->vec && arg->vec_len)
+ return -EINVAL;
++ if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX)
++ return -EINVAL;
+ if (arg->vec && !access_ok((void __user *)(long)arg->vec,
+- arg->vec_len * sizeof(struct page_region)))
++ size_mul(arg->vec_len, sizeof(struct page_region))))
+ return -EFAULT;
+
+ /* Fixup default values */
--- /dev/null
+From 923168a0631bc42fffd55087b337b1b6c54dcff5 Mon Sep 17 00:00:00 2001
+From: Samasth Norway Ananda <samasth.norway.ananda@oracle.com>
+Date: Wed, 7 Aug 2024 10:27:13 -0700
+Subject: ima: fix buffer overrun in ima_eventdigest_init_common
+
+From: Samasth Norway Ananda <samasth.norway.ananda@oracle.com>
+
+commit 923168a0631bc42fffd55087b337b1b6c54dcff5 upstream.
+
+Function ima_eventdigest_init() calls ima_eventdigest_init_common()
+with HASH_ALGO__LAST which is then used to access the array
+hash_digest_size[] leading to buffer overrun. Have a conditional
+statement to handle this.
+
+Fixes: 9fab303a2cb3 ("ima: fix violation measurement list record")
+Signed-off-by: Samasth Norway Ananda <samasth.norway.ananda@oracle.com>
+Tested-by: Enrico Bravi (PhD at polito.it) <enrico.bravi@huawei.com>
+Cc: stable@vger.kernel.org # 5.19+
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/integrity/ima/ima_template_lib.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/security/integrity/ima/ima_template_lib.c
++++ b/security/integrity/ima/ima_template_lib.c
+@@ -318,15 +318,21 @@ static int ima_eventdigest_init_common(c
+ hash_algo_name[hash_algo]);
+ }
+
+- if (digest)
++ if (digest) {
+ memcpy(buffer + offset, digest, digestsize);
+- else
++ } else {
+ /*
+ * If digest is NULL, the event being recorded is a violation.
+ * Make room for the digest by increasing the offset by the
+- * hash algorithm digest size.
++ * hash algorithm digest size. If the hash algorithm is not
++ * specified increase the offset by IMA_DIGEST_SIZE which
++ * fits SHA1 or MD5
+ */
+- offset += hash_digest_size[hash_algo];
++ if (hash_algo < HASH_ALGO__LAST)
++ offset += hash_digest_size[hash_algo];
++ else
++ offset += IMA_DIGEST_SIZE;
++ }
+
+ return ima_write_template_field_data(buffer, offset + digestsize,
+ fmt, field_data);
--- /dev/null
+From 2657b82a78f18528bef56dc1b017158490970873 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Thu, 31 Oct 2024 13:20:11 -0700
+Subject: KVM: nVMX: Treat vpid01 as current if L2 is active, but with VPID disabled
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 2657b82a78f18528bef56dc1b017158490970873 upstream.
+
+When getting the current VPID, e.g. to emulate a guest TLB flush, return
+vpid01 if L2 is running but with VPID disabled, i.e. if VPID is disabled
+in vmcs12. Architecturally, if VPID is disabled, then the guest and host
+effectively share VPID=0. KVM emulates this behavior by using vpid01 when
+running an L2 with VPID disabled (see prepare_vmcs02_early_rare()), and so
+KVM must also treat vpid01 as the current VPID while L2 is active.
+
+Unconditionally treating vpid02 as the current VPID when L2 is active
+causes KVM to flush TLB entries for vpid02 instead of vpid01, which
+results in TLB entries from L1 being incorrectly preserved across nested
+VM-Enter to L2 (L2=>L1 isn't problematic, because the TLB flush after
+nested VM-Exit flushes vpid01).
+
+The bug manifests as failures in the vmx_apicv_test KVM-Unit-Test, as KVM
+incorrectly retains TLB entries for the APIC-access page across a nested
+VM-Enter.
+
+Opportunisticaly add comments at various touchpoints to explain the
+architectural requirements, and also why KVM uses vpid01 instead of vpid02.
+
+All credit goes to Chao, who root caused the issue and identified the fix.
+
+Link: https://lore.kernel.org/all/ZwzczkIlYGX+QXJz@intel.com
+Fixes: 2b4a5a5d5688 ("KVM: nVMX: Flush current VPID (L1 vs. L2) for KVM_REQ_TLB_FLUSH_GUEST")
+Cc: stable@vger.kernel.org
+Cc: Like Xu <like.xu.linux@gmail.com>
+Debugged-by: Chao Gao <chao.gao@intel.com>
+Reviewed-by: Chao Gao <chao.gao@intel.com>
+Tested-by: Chao Gao <chao.gao@intel.com>
+Link: https://lore.kernel.org/r/20241031202011.1580522-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 30 +++++++++++++++++++++++++-----
+ arch/x86/kvm/vmx/vmx.c | 2 +-
+ 2 files changed, 26 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -1197,11 +1197,14 @@ static void nested_vmx_transition_tlb_fl
+ kvm_hv_nested_transtion_tlb_flush(vcpu, enable_ept);
+
+ /*
+- * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
+- * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
+- * full TLB flush from the guest's perspective. This is required even
+- * if VPID is disabled in the host as KVM may need to synchronize the
+- * MMU in response to the guest TLB flush.
++ * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
++ * same VPID as the host, and so architecturally, linear and combined
++ * mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM
++ * emulates L2 sharing L1's VPID=0 by using vpid01 while running L2,
++ * and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01. This
++ * is required if VPID is disabled in KVM, as a TLB flush (there are no
++ * VPIDs) still occurs from L1's perspective, and KVM may need to
++ * synchronize the MMU in response to the guest TLB flush.
+ *
+ * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
+ * EPT is a special snowflake, as guest-physical mappings aren't
+@@ -2291,6 +2294,17 @@ static void prepare_vmcs02_early_rare(st
+
+ vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
+
++ /*
++ * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
++ * same VPID as the host. Emulate this behavior by using vpid01 for L2
++ * if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter
++ * and VM-Exit are architecturally required to flush VPID=0, but *only*
++ * VPID=0. I.e. using vpid02 would be ok (so long as KVM emulates the
++ * required flushes), but doing so would cause KVM to over-flush. E.g.
++ * if L1 runs L2 X with VPID12=1, then runs L2 Y with VPID12 disabled,
++ * and then runs L2 X again, then KVM can and should retain TLB entries
++ * for VPID12=1.
++ */
+ if (enable_vpid) {
+ if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
+@@ -5890,6 +5904,12 @@ static int handle_invvpid(struct kvm_vcp
+ return nested_vmx_fail(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+
++ /*
++ * Always flush the effective vpid02, i.e. never flush the current VPID
++ * and never explicitly flush vpid01. INVVPID targets a VPID, not a
++ * VMCS, and so whether or not the current vmcs12 has VPID enabled is
++ * irrelevant (and there may not be a loaded vmcs12).
++ */
+ vpid02 = nested_get_vpid02(vcpu);
+ switch (type) {
+ case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3220,7 +3220,7 @@ void vmx_flush_tlb_all(struct kvm_vcpu *
+
+ static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
+ {
+- if (is_guest_mode(vcpu))
++ if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu)))
+ return nested_get_vpid02(vcpu);
+ return to_vmx(vcpu)->vpid;
+ }
--- /dev/null
+From 5b188cc4866aaf712e896f92ac42c7802135e507 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 9 Oct 2024 08:49:41 -0700
+Subject: KVM: selftests: Disable strict aliasing
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 5b188cc4866aaf712e896f92ac42c7802135e507 upstream.
+
+Disable strict aliasing, as has been done in the kernel proper for decades
+(literally since before git history) to fix issues where gcc will optimize
+away loads in code that looks 100% correct, but is _technically_ undefined
+behavior, and thus can be thrown away by the compiler.
+
+E.g. arm64's vPMU counter access test casts a uint64_t (unsigned long)
+pointer to a u64 (unsigned long long) pointer when setting PMCR.N via
+u64p_replace_bits(), which gcc-13 detects and optimizes away, i.e. ignores
+the result and uses the original PMCR.
+
+The issue is most easily observed by making set_pmcr_n() noinline and
+wrapping the call with printf(), e.g. sans comments, for this code:
+
+ printf("orig = %lx, next = %lx, want = %lu\n", pmcr_orig, pmcr, pmcr_n);
+ set_pmcr_n(&pmcr, pmcr_n);
+ printf("orig = %lx, next = %lx, want = %lu\n", pmcr_orig, pmcr, pmcr_n);
+
+gcc-13 generates:
+
+ 0000000000401c90 <set_pmcr_n>:
+ 401c90: f9400002 ldr x2, [x0]
+ 401c94: b3751022 bfi x2, x1, #11, #5
+ 401c98: f9000002 str x2, [x0]
+ 401c9c: d65f03c0 ret
+
+ 0000000000402660 <test_create_vpmu_vm_with_pmcr_n>:
+ 402724: aa1403e3 mov x3, x20
+ 402728: aa1503e2 mov x2, x21
+ 40272c: aa1603e0 mov x0, x22
+ 402730: aa1503e1 mov x1, x21
+ 402734: 940060ff bl 41ab30 <_IO_printf>
+ 402738: aa1403e1 mov x1, x20
+ 40273c: 910183e0 add x0, sp, #0x60
+ 402740: 97fffd54 bl 401c90 <set_pmcr_n>
+ 402744: aa1403e3 mov x3, x20
+ 402748: aa1503e2 mov x2, x21
+ 40274c: aa1503e1 mov x1, x21
+ 402750: aa1603e0 mov x0, x22
+ 402754: 940060f7 bl 41ab30 <_IO_printf>
+
+with the value stored in [sp + 0x60] ignored by both printf() above and
+in the test proper, resulting in a false failure due to vcpu_set_reg()
+simply storing the original value, not the intended value.
+
+ $ ./vpmu_counter_access
+ Random seed: 0x6b8b4567
+ orig = 3040, next = 3040, want = 0
+ orig = 3040, next = 3040, want = 0
+ ==== Test Assertion Failure ====
+ aarch64/vpmu_counter_access.c:505: pmcr_n == get_pmcr_n(pmcr)
+ pid=71578 tid=71578 errno=9 - Bad file descriptor
+ 1 0x400673: run_access_test at vpmu_counter_access.c:522
+ 2 (inlined by) main at vpmu_counter_access.c:643
+ 3 0x4132d7: __libc_start_call_main at libc-start.o:0
+ 4 0x413653: __libc_start_main at ??:0
+ 5 0x40106f: _start at ??:0
+ Failed to update PMCR.N to 0 (received: 6)
+
+Somewhat bizarrely, gcc-11 also exhibits the same behavior, but only if
+set_pmcr_n() is marked noinline, whereas gcc-13 fails even if set_pmcr_n()
+is inlined in its sole caller.
+
+Cc: stable@vger.kernel.org
+Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=116912
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/kvm/Makefile | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/tools/testing/selftests/kvm/Makefile
++++ b/tools/testing/selftests/kvm/Makefile
+@@ -235,10 +235,10 @@ CFLAGS += -Wall -Wstrict-prototypes -Wun
+ -Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \
+ -fno-builtin-memcmp -fno-builtin-memcpy \
+ -fno-builtin-memset -fno-builtin-strnlen \
+- -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
+- -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
+- -I$(<D) -Iinclude/$(ARCH_DIR) -I ../rseq -I.. $(EXTRA_CFLAGS) \
+- $(KHDR_INCLUDES)
++ -fno-stack-protector -fno-PIE -fno-strict-aliasing \
++ -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_TOOL_ARCH_INCLUDE) \
++ -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(ARCH_DIR) \
++ -I ../rseq -I.. $(EXTRA_CFLAGS) $(KHDR_INCLUDES)
+ ifeq ($(ARCH),s390)
+ CFLAGS += -march=z10
+ endif
--- /dev/null
+From aa0d42cacf093a6fcca872edc954f6f812926a17 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 1 Nov 2024 11:50:30 -0700
+Subject: KVM: VMX: Bury Intel PT virtualization (guest/host mode) behind CONFIG_BROKEN
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit aa0d42cacf093a6fcca872edc954f6f812926a17 upstream.
+
+Hide KVM's pt_mode module param behind CONFIG_BROKEN, i.e. disable support
+for virtualizing Intel PT via guest/host mode unless BROKEN=y. There are
+myriad bugs in the implementation, some of which are fatal to the guest,
+and others which put the stability and health of the host at risk.
+
+For guest fatalities, the most glaring issue is that KVM fails to ensure
+tracing is disabled, and *stays* disabled prior to VM-Enter, which is
+necessary as hardware disallows loading (the guest's) RTIT_CTL if tracing
+is enabled (enforced via a VMX consistency check). Per the SDM:
+
+ If the logical processor is operating with Intel PT enabled (if
+ IA32_RTIT_CTL.TraceEn = 1) at the time of VM entry, the "load
+ IA32_RTIT_CTL" VM-entry control must be 0.
+
+On the host side, KVM doesn't validate the guest CPUID configuration
+provided by userspace, and even worse, uses the guest configuration to
+decide what MSRs to save/load at VM-Enter and VM-Exit. E.g. configuring
+guest CPUID to enumerate more address ranges than are supported in hardware
+will result in KVM trying to passthrough, save, and load non-existent MSRs,
+which generates a variety of WARNs, ToPA ERRORs in the host, a potential
+deadlock, etc.
+
+Fixes: f99e3daf94ff ("KVM: x86: Add Intel PT virtualization work mode")
+Cc: stable@vger.kernel.org
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
+Tested-by: Adrian Hunter <adrian.hunter@intel.com>
+Message-ID: <20241101185031.1799556-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -217,9 +217,11 @@ module_param(ple_window_shrink, uint, 04
+ static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
+ module_param(ple_window_max, uint, 0444);
+
+-/* Default is SYSTEM mode, 1 for host-guest mode */
++/* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
+ int __read_mostly pt_mode = PT_MODE_SYSTEM;
++#ifdef CONFIG_BROKEN
+ module_param(pt_mode, int, S_IRUGO);
++#endif
+
+ struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;
+
--- /dev/null
+From d3ddef46f22e8c3124e0df1f325bc6a18dadff39 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 5 Nov 2024 17:51:35 -0800
+Subject: KVM: x86: Unconditionally set irr_pending when updating APICv state
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit d3ddef46f22e8c3124e0df1f325bc6a18dadff39 upstream.
+
+Always set irr_pending (to true) when updating APICv status to fix a bug
+where KVM fails to set irr_pending when userspace sets APIC state and
+APICv is disabled, which ultimate results in KVM failing to inject the
+pending interrupt(s) that userspace stuffed into the vIRR, until another
+interrupt happens to be emulated by KVM.
+
+Only the APICv-disabled case is flawed, as KVM forces apic->irr_pending to
+be true if APICv is enabled, because not all vIRR updates will be visible
+to KVM.
+
+Hit the bug with a big hammer, even though strictly speaking KVM can scan
+the vIRR and set/clear irr_pending as appropriate for this specific case.
+The bug was introduced by commit 755c2bf87860 ("KVM: x86: lapic: don't
+touch irr_pending in kvm_apic_update_apicv when inhibiting it"), which as
+the shortlog suggests, deleted code that updated irr_pending.
+
+Before that commit, kvm_apic_update_apicv() did indeed scan the vIRR, with
+with the crucial difference that kvm_apic_update_apicv() did the scan even
+when APICv was being *disabled*, e.g. due to an AVIC inhibition.
+
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ if (vcpu->arch.apicv_active) {
+ /* irr_pending is always true when apicv is activated. */
+ apic->irr_pending = true;
+ apic->isr_count = 1;
+ } else {
+ apic->irr_pending = (apic_search_irr(apic) != -1);
+ apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+ }
+
+And _that_ bug (clearing irr_pending) was introduced by commit b26a695a1d78
+("kvm: lapic: Introduce APICv update helper function"), prior to which KVM
+unconditionally set irr_pending to true in kvm_apic_set_state(), i.e.
+assumed that the new virtual APIC state could have a pending IRQ.
+
+Furthermore, in addition to introducing this issue, commit 755c2bf87860
+also papered over the underlying bug: KVM doesn't ensure CPUs and devices
+see APICv as disabled prior to searching the IRR. Waiting until KVM
+emulates an EOI to update irr_pending "works", but only because KVM won't
+emulate EOI until after refresh_apicv_exec_ctrl(), and there are plenty of
+memory barriers in between. I.e. leaving irr_pending set is basically
+hacking around bad ordering.
+
+So, effectively revert to the pre-b26a695a1d78 behavior for state restore,
+even though it's sub-optimal if no IRQs are pending, in order to provide a
+minimal fix, but leave behind a FIXME to document the ugliness. With luck,
+the ordering issue will be fixed and the mess will be cleaned up in the
+not-too-distant future.
+
+Fixes: 755c2bf87860 ("KVM: x86: lapic: don't touch irr_pending in kvm_apic_update_apicv when inhibiting it")
+Cc: stable@vger.kernel.org
+Cc: Maxim Levitsky <mlevitsk@redhat.com>
+Reported-by: Yong He <zhuangel570@gmail.com>
+Closes: https://lkml.kernel.org/r/20241023124527.1092810-1-alexyonghe%40tencent.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-ID: <20241106015135.2462147-1-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/lapic.c | 29 ++++++++++++++++++-----------
+ 1 file changed, 18 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2629,19 +2629,26 @@ void kvm_apic_update_apicv(struct kvm_vc
+ {
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+- if (apic->apicv_active) {
+- /* irr_pending is always true when apicv is activated. */
+- apic->irr_pending = true;
++ /*
++ * When APICv is enabled, KVM must always search the IRR for a pending
++ * IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
++ * isn't running. If APICv is disabled, KVM _should_ search the IRR
++ * for a pending IRQ. But KVM currently doesn't ensure *all* hardware,
++ * e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
++ * the IRR at this time could race with IRQ delivery from hardware that
++ * still sees APICv as being enabled.
++ *
++ * FIXME: Ensure other vCPUs and devices observe the change in APICv
++ * state prior to updating KVM's metadata caches, so that KVM
++ * can safely search the IRR and set irr_pending accordingly.
++ */
++ apic->irr_pending = true;
++
++ if (apic->apicv_active)
+ apic->isr_count = 1;
+- } else {
+- /*
+- * Don't clear irr_pending, searching the IRR can race with
+- * updates from the CPU as APICv is still active from hardware's
+- * perspective. The flag will be cleared as appropriate when
+- * KVM injects the interrupt.
+- */
++ else
+ apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+- }
++
+ apic->highest_isr_cache = -1;
+ }
+
--- /dev/null
+From 139d42ca51018c1d43ab5f35829179f060d1ab31 Mon Sep 17 00:00:00 2001
+From: Kanglong Wang <wangkanglong@loongson.cn>
+Date: Tue, 12 Nov 2024 16:35:39 +0800
+Subject: LoongArch: Add WriteCombine shadow mapping in KASAN
+
+From: Kanglong Wang <wangkanglong@loongson.cn>
+
+commit 139d42ca51018c1d43ab5f35829179f060d1ab31 upstream.
+
+Currently, the kernel couldn't boot when ARCH_IOREMAP, ARCH_WRITECOMBINE
+and KASAN are enabled together. Because DMW2 is used by kernel now which
+is configured as 0xa000000000000000 for WriteCombine, but KASAN has no
+segment mapping for it. This patch fix this issue.
+
+Solution: Add the relevant definitions for WriteCombine (DMW2) in KASAN.
+
+Cc: stable@vger.kernel.org
+Fixes: 8e02c3b782ec ("LoongArch: Add writecombine support for DMW-based ioremap()")
+Signed-off-by: Kanglong Wang <wangkanglong@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/kasan.h | 11 ++++++++++-
+ arch/loongarch/mm/kasan_init.c | 5 +++++
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/include/asm/kasan.h
++++ b/arch/loongarch/include/asm/kasan.h
+@@ -25,6 +25,7 @@
+ /* 64-bit segment value. */
+ #define XKPRANGE_UC_SEG (0x8000)
+ #define XKPRANGE_CC_SEG (0x9000)
++#define XKPRANGE_WC_SEG (0xa000)
+ #define XKVRANGE_VC_SEG (0xffff)
+
+ /* Cached */
+@@ -41,10 +42,17 @@
+ #define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+ #define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
+
++/* WriteCombine */
++#define XKPRANGE_WC_START WRITECOMBINE_BASE
++#define XKPRANGE_WC_SIZE XRANGE_SIZE
++#define XKPRANGE_WC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
++#define XKPRANGE_WC_SHADOW_SIZE (XKPRANGE_WC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
++#define XKPRANGE_WC_SHADOW_END (XKPRANGE_WC_KASAN_OFFSET + XKPRANGE_WC_SHADOW_SIZE)
++
+ /* VMALLOC (Cached or UnCached) */
+ #define XKVRANGE_VC_START MODULES_VADDR
+ #define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
+-#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
++#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_WC_SHADOW_END
+ #define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+ #define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
+
+@@ -55,6 +63,7 @@
+
+ #define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
+ #define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
++#define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET)
+ #define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
+
+ extern bool kasan_early_stage;
+--- a/arch/loongarch/mm/kasan_init.c
++++ b/arch/loongarch/mm/kasan_init.c
+@@ -55,6 +55,9 @@ void *kasan_mem_to_shadow(const void *ad
+ case XKPRANGE_UC_SEG:
+ offset = XKPRANGE_UC_SHADOW_OFFSET;
+ break;
++ case XKPRANGE_WC_SEG:
++ offset = XKPRANGE_WC_SHADOW_OFFSET;
++ break;
+ case XKVRANGE_VC_SEG:
+ offset = XKVRANGE_VC_SHADOW_OFFSET;
+ break;
+@@ -79,6 +82,8 @@ const void *kasan_shadow_to_mem(const vo
+
+ if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
+ return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
++ else if (addr >= XKPRANGE_WC_SHADOW_OFFSET)
++ return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START);
+ else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
+ else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
--- /dev/null
+From 227ca9f6f6aeb8aa8f0c10430b955f1fe2aeab91 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Tue, 12 Nov 2024 16:35:39 +0800
+Subject: LoongArch: Disable KASAN if PGDIR_SIZE is too large for cpu_vabits
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 227ca9f6f6aeb8aa8f0c10430b955f1fe2aeab91 upstream.
+
+If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
+overflow UINTPTR_MAX because KASAN_SHADOW_START/KASAN_SHADOW_END are
+aligned up by PGDIR_SIZE. And then the overflowed KASAN_SHADOW_END looks
+like a user space address.
+
+For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too large
+for Loongson-2K series whose cpu_vabits = 39.
+
+Since CONFIG_4KB_4LEVEL is completely legal for CPUs with cpu_vabits <=
+39, we just disable KASAN via early return in kasan_init(). Otherwise we
+get a boot failure.
+
+Moreover, we change KASAN_SHADOW_END from the first address after KASAN
+shadow area to the last address in KASAN shadow area, in order to avoid
+the end address exactly overflow to 0 (which is a legal case). We don't
+need to worry about alignment because pgd_addr_end() can handle it.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/kasan.h | 2 +-
+ arch/loongarch/mm/kasan_init.c | 15 +++++++++++++--
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/arch/loongarch/include/asm/kasan.h
++++ b/arch/loongarch/include/asm/kasan.h
+@@ -51,7 +51,7 @@
+ /* KAsan shadow memory start right after vmalloc. */
+ #define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
+ #define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
+-#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
++#define KASAN_SHADOW_END (round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1)
+
+ #define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
+ #define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
+--- a/arch/loongarch/mm/kasan_init.c
++++ b/arch/loongarch/mm/kasan_init.c
+@@ -218,7 +218,7 @@ static void __init kasan_map_populate(un
+ asmlinkage void __init kasan_early_init(void)
+ {
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
++ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
+ }
+
+ static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
+@@ -233,7 +233,7 @@ static void __init clear_pgds(unsigned l
+ * swapper_pg_dir. pgd_clear() can't be used
+ * here because it's nop on 2,3-level pagetable setups
+ */
+- for (; start < end; start += PGDIR_SIZE)
++ for (; start < end; start = pgd_addr_end(start, end))
+ kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
+ }
+
+@@ -243,6 +243,17 @@ void __init kasan_init(void)
+ phys_addr_t pa_start, pa_end;
+
+ /*
++ * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
++ * overflow UINTPTR_MAX and then looks like a user space address.
++ * For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
++ * large for Loongson-2K series whose cpu_vabits = 39.
++ */
++ if (KASAN_SHADOW_END < vm_map_base) {
++ pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
++ return;
++ }
++
++ /*
+ * PGD was populated as invalid_pmd_table or invalid_pud_table
+ * in pagetable_init() which depends on how many levels of page
+ * table you are using, but we had to clean the gpd of kasan
--- /dev/null
+From 6ce031e5d6f475d476bab55ab7d8ea168fedc4c1 Mon Sep 17 00:00:00 2001
+From: Bibo Mao <maobibo@loongson.cn>
+Date: Tue, 12 Nov 2024 16:35:39 +0800
+Subject: LoongArch: Fix AP booting issue in VM mode
+
+From: Bibo Mao <maobibo@loongson.cn>
+
+commit 6ce031e5d6f475d476bab55ab7d8ea168fedc4c1 upstream.
+
+Native IPI is used for AP booting, because it is the booting interface
+between OS and BIOS firmware. The paravirt IPI is only used inside OS,
+and native IPI is necessary to boot AP.
+
+When booting AP, we write the kernel entry address in the HW mailbox of
+AP and send IPI interrupt to it. AP executes idle instruction and waits
+for interrupts or SW events, then clears IPI interrupt and jumps to the
+kernel entry from HW mailbox.
+
+Between writing HW mailbox and sending IPI, AP can be woken up by SW
+events and jumps to the kernel entry, so ACTION_BOOT_CPU IPI interrupt
+will keep pending during AP booting. And native IPI interrupt handler
+needs be registered so that it can clear pending native IPI, else there
+will be endless interrupts during AP booting stage.
+
+Here native IPI interrupt is initialized even if paravirt IPI is used.
+
+Cc: stable@vger.kernel.org
+Fixes: 74c16b2e2b0c ("LoongArch: KVM: Add PV IPI support on guest side")
+Signed-off-by: Bibo Mao <maobibo@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/paravirt.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/arch/loongarch/kernel/paravirt.c
++++ b/arch/loongarch/kernel/paravirt.c
+@@ -50,11 +50,18 @@ static u64 paravt_steal_clock(int cpu)
+ }
+
+ #ifdef CONFIG_SMP
++static struct smp_ops native_ops;
++
+ static void pv_send_ipi_single(int cpu, unsigned int action)
+ {
+ int min, old;
+ irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
+
++ if (unlikely(action == ACTION_BOOT_CPU)) {
++ native_ops.send_ipi_single(cpu, action);
++ return;
++ }
++
+ old = atomic_fetch_or(BIT(action), &info->message);
+ if (old)
+ return;
+@@ -74,6 +81,11 @@ static void pv_send_ipi_mask(const struc
+ if (cpumask_empty(mask))
+ return;
+
++ if (unlikely(action == ACTION_BOOT_CPU)) {
++ native_ops.send_ipi_mask(mask, action);
++ return;
++ }
++
+ action = BIT(action);
+ for_each_cpu(i, mask) {
+ info = &per_cpu(irq_stat, i);
+@@ -141,6 +153,8 @@ static void pv_init_ipi(void)
+ {
+ int r, swi;
+
++ /* Init native ipi irq for ACTION_BOOT_CPU */
++ native_ops.init_ipi();
+ swi = get_percpu_irq(INT_SWI0);
+ if (swi < 0)
+ panic("SWI0 IRQ mapping failed\n");
+@@ -179,6 +193,7 @@ int __init pv_ipi_init(void)
+ return 0;
+
+ #ifdef CONFIG_SMP
++ native_ops = mp_ops;
+ mp_ops.init_ipi = pv_init_ipi;
+ mp_ops.send_ipi_single = pv_send_ipi_single;
+ mp_ops.send_ipi_mask = pv_send_ipi_mask;
--- /dev/null
+From 30cec747d6bf2c3e915c075d76d9712e54cde0a6 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Tue, 12 Nov 2024 16:35:36 +0800
+Subject: LoongArch: Fix early_numa_add_cpu() usage for FDT systems
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 30cec747d6bf2c3e915c075d76d9712e54cde0a6 upstream.
+
+early_numa_add_cpu() applies on physical CPU id rather than logical CPU
+id, so use cpuid instead of cpu.
+
+Cc: stable@vger.kernel.org
+Fixes: 3de9c42d02a79a5 ("LoongArch: Add all CPUs enabled by fdt to NUMA node 0")
+Reported-by: Bibo Mao <maobibo@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/smp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -296,7 +296,7 @@ static void __init fdt_smp_setup(void)
+ __cpu_number_map[cpuid] = cpu;
+ __cpu_logical_map[cpu] = cpuid;
+
+- early_numa_add_cpu(cpu, 0);
++ early_numa_add_cpu(cpuid, 0);
+ set_cpuid_to_node(cpuid, 0);
+ }
+
--- /dev/null
+From a410656643ce4844ba9875aa4e87a7779308259b Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Tue, 12 Nov 2024 16:35:39 +0800
+Subject: LoongArch: Make KASAN work with 5-level page-tables
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit a410656643ce4844ba9875aa4e87a7779308259b upstream.
+
+Make KASAN work with 5-level page-tables, including:
+1. Implement and use __pgd_none() and kasan_p4d_offset().
+2. As done in kasan_pmd_populate() and kasan_pte_populate(), restrict
+ the loop conditions of kasan_p4d_populate() and kasan_pud_populate()
+ to avoid unnecessary population.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/mm/kasan_init.c | 26 +++++++++++++++++++++++---
+ 1 file changed, 23 insertions(+), 3 deletions(-)
+
+--- a/arch/loongarch/mm/kasan_init.c
++++ b/arch/loongarch/mm/kasan_init.c
+@@ -13,6 +13,13 @@
+
+ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+
++#ifdef __PAGETABLE_P4D_FOLDED
++#define __pgd_none(early, pgd) (0)
++#else
++#define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
++(__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
++#endif
++
+ #ifdef __PAGETABLE_PUD_FOLDED
+ #define __p4d_none(early, p4d) (0)
+ #else
+@@ -147,6 +154,19 @@ static pud_t *__init kasan_pud_offset(p4
+ return pud_offset(p4dp, addr);
+ }
+
++static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
++{
++ if (__pgd_none(early, pgdp_get(pgdp))) {
++ phys_addr_t p4d_phys = early ?
++ __pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
++ if (!early)
++ memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
++ pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
++ }
++
++ return p4d_offset(pgdp, addr);
++}
++
+ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+ {
+@@ -183,19 +203,19 @@ static void __init kasan_pud_populate(p4
+ do {
+ next = pud_addr_end(addr, end);
+ kasan_pmd_populate(pudp, addr, next, node, early);
+- } while (pudp++, addr = next, addr != end);
++ } while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
+ }
+
+ static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+ {
+ unsigned long next;
+- p4d_t *p4dp = p4d_offset(pgdp, addr);
++ p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
+
+ do {
+ next = p4d_addr_end(addr, end);
+ kasan_pud_populate(p4dp, addr, next, node, early);
+- } while (p4dp++, addr = next, addr != end);
++ } while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
+ }
+
+ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
--- /dev/null
+From d2fab3fc27cbca7ba65c539a2c5fc7f941231983 Mon Sep 17 00:00:00 2001
+From: Sibi Sankar <quic_sibis@quicinc.com>
+Date: Wed, 30 Oct 2024 18:25:12 +0530
+Subject: mailbox: qcom-cpucp: Mark the irq with IRQF_NO_SUSPEND flag
+
+From: Sibi Sankar <quic_sibis@quicinc.com>
+
+commit d2fab3fc27cbca7ba65c539a2c5fc7f941231983 upstream.
+
+The qcom-cpucp mailbox irq is expected to function during suspend-resume
+cycle particularly when the scmi cpufreq driver can query the current
+frequency using the get_level message after the cpus are brought up during
+resume. Hence mark the irq with IRQF_NO_SUSPEND flag to fix the do_xfer
+failures we see during resume.
+
+Err Logs:
+arm-scmi firmware:scmi: timed out in resp(caller:do_xfer+0x164/0x568)
+cpufreq: cpufreq_online: ->get() failed
+
+Reported-by: Johan Hovold <johan+linaro@kernel.org>
+Closes: https://lore.kernel.org/lkml/ZtgFj1y5ggipgEOS@hovoldconsulting.com/
+Fixes: 0e2a9a03106c ("mailbox: Add support for QTI CPUCP mailbox controller")
+Signed-off-by: Sibi Sankar <quic_sibis@quicinc.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Tested-by: Johan Hovold <johan+linaro@kernel.org>
+Cc: stable@vger.kernel.org
+Message-ID: <20241030125512.2884761-7-quic_sibis@quicinc.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mailbox/qcom-cpucp-mbox.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/mailbox/qcom-cpucp-mbox.c b/drivers/mailbox/qcom-cpucp-mbox.c
+index e5437c294803..44f4ed15f818 100644
+--- a/drivers/mailbox/qcom-cpucp-mbox.c
++++ b/drivers/mailbox/qcom-cpucp-mbox.c
+@@ -138,7 +138,7 @@ static int qcom_cpucp_mbox_probe(struct platform_device *pdev)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, qcom_cpucp_mbox_irq_fn,
+- IRQF_TRIGGER_HIGH, "apss_cpucp_mbox", cpucp);
++ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, "apss_cpucp_mbox", cpucp);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register irq: %d\n", irq);
+
+--
+2.47.0
+
--- /dev/null
+From 8ce41b0f9d77cca074df25afd39b86e2ee3aa68e Mon Sep 17 00:00:00 2001
+From: Jinjiang Tu <tujinjiang@huawei.com>
+Date: Wed, 13 Nov 2024 16:32:35 +0800
+Subject: mm: fix NULL pointer dereference in alloc_pages_bulk_noprof
+
+From: Jinjiang Tu <tujinjiang@huawei.com>
+
+commit 8ce41b0f9d77cca074df25afd39b86e2ee3aa68e upstream.
+
+We triggered a NULL pointer dereference for ac.preferred_zoneref->zone in
+alloc_pages_bulk_noprof() when the task is migrated between cpusets.
+
+When cpuset is enabled, in prepare_alloc_pages(), ac->nodemask may be
+¤t->mems_allowed. when first_zones_zonelist() is called to find
+preferred_zoneref, the ac->nodemask may be modified concurrently if the
+task is migrated between different cpusets. Assuming we have 2 NUMA Node,
+when traversing Node1 in ac->zonelist, the nodemask is 2, and when
+traversing Node2 in ac->zonelist, the nodemask is 1. As a result, the
+ac->preferred_zoneref points to NULL zone.
+
+In alloc_pages_bulk_noprof(), for_each_zone_zonelist_nodemask() finds a
+allowable zone and calls zonelist_node_idx(ac.preferred_zoneref), leading
+to NULL pointer dereference.
+
+__alloc_pages_noprof() fixes this issue by checking NULL pointer in commit
+ea57485af8f4 ("mm, page_alloc: fix check for NULL preferred_zone") and
+commit df76cee6bbeb ("mm, page_alloc: remove redundant checks from alloc
+fastpath").
+
+To fix it, check NULL pointer for preferred_zoneref->zone.
+
+Link: https://lkml.kernel.org/r/20241113083235.166798-1-tujinjiang@huawei.com
+Fixes: 387ba26fb1cb ("mm/page_alloc: add a bulk page allocator")
+Signed-off-by: Jinjiang Tu <tujinjiang@huawei.com>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Alexander Lobakin <alobakin@pm.me>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Nanyong Sun <sunnanyong@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4569,7 +4569,8 @@ unsigned long alloc_pages_bulk_noprof(gf
+ gfp = alloc_gfp;
+
+ /* Find an allowed local zone that meets the low watermark. */
+- for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
++ z = ac.preferred_zoneref;
++ for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
+ unsigned long mark;
+
+ if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
--- /dev/null
+From a4a282daf1a190f03790bf163458ea3c8d28d217 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Mon, 11 Nov 2024 20:34:30 +0100
+Subject: mm/mremap: fix address wraparound in move_page_tables()
+
+From: Jann Horn <jannh@google.com>
+
+commit a4a282daf1a190f03790bf163458ea3c8d28d217 upstream.
+
+On 32-bit platforms, it is possible for the expression `len + old_addr <
+old_end` to be false-positive if `len + old_addr` wraps around.
+`old_addr` is the cursor in the old range up to which page table entries
+have been moved; so if the operation succeeded, `old_addr` is the *end* of
+the old region, and adding `len` to it can wrap.
+
+The overflow causes mremap() to mistakenly believe that PTEs have been
+copied; the consequence is that mremap() bails out, but doesn't move the
+PTEs back before the new VMA is unmapped, causing anonymous pages in the
+region to be lost. So basically if userspace tries to mremap() a
+private-anon region and hits this bug, mremap() will return an error and
+the private-anon region's contents appear to have been zeroed.
+
+The idea of this check is that `old_end - len` is the original start
+address, and writing the check that way also makes it easier to read; so
+fix the check by rearranging the comparison accordingly.
+
+(An alternate fix would be to refactor this function by introducing an
+"orig_old_start" variable or such.)
+
+
+Tested in a VM with a 32-bit X86 kernel; without the patch:
+
+```
+user@horn:~/big_mremap$ cat test.c
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <stdio.h>
+#include <err.h>
+#include <sys/mman.h>
+
+#define ADDR1 ((void*)0x60000000)
+#define ADDR2 ((void*)0x10000000)
+#define SIZE 0x50000000uL
+
+int main(void) {
+ unsigned char *p1 = mmap(ADDR1, SIZE, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED_NOREPLACE, -1, 0);
+ if (p1 == MAP_FAILED)
+ err(1, "mmap 1");
+ unsigned char *p2 = mmap(ADDR2, SIZE, PROT_NONE,
+ MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED_NOREPLACE, -1, 0);
+ if (p2 == MAP_FAILED)
+ err(1, "mmap 2");
+ *p1 = 0x41;
+ printf("first char is 0x%02hhx\n", *p1);
+ unsigned char *p3 = mremap(p1, SIZE, SIZE,
+ MREMAP_MAYMOVE|MREMAP_FIXED, p2);
+ if (p3 == MAP_FAILED) {
+ printf("mremap() failed; first char is 0x%02hhx\n", *p1);
+ } else {
+ printf("mremap() succeeded; first char is 0x%02hhx\n", *p3);
+ }
+}
+user@horn:~/big_mremap$ gcc -static -o test test.c
+user@horn:~/big_mremap$ setarch -R ./test
+first char is 0x41
+mremap() failed; first char is 0x00
+```
+
+With the patch:
+
+```
+user@horn:~/big_mremap$ setarch -R ./test
+first char is 0x41
+mremap() succeeded; first char is 0x41
+```
+
+Link: https://lkml.kernel.org/r/20241111-fix-mremap-32bit-wrap-v1-1-61d6be73b722@google.com
+Fixes: af8ca1c14906 ("mm/mremap: optimize the start addresses in move_page_tables()")
+Signed-off-by: Jann Horn <jannh@google.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Acked-by: Qi Zheng <zhengqi.arch@bytedance.com>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
+Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mremap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -648,7 +648,7 @@ again:
+ * Prevent negative return values when {old,new}_addr was realigned
+ * but we broke out of the above loop for the first PMD itself.
+ */
+- if (len + old_addr < old_end)
++ if (old_addr < old_end - len)
+ return 0;
+
+ return len + old_addr - old_end; /* how much done */
--- /dev/null
+From 66edc3a5894c74f8887c8af23b97593a0dd0df4d Mon Sep 17 00:00:00 2001
+From: Roman Gushchin <roman.gushchin@linux.dev>
+Date: Wed, 6 Nov 2024 19:53:54 +0000
+Subject: mm: page_alloc: move mlocked flag clearance into free_pages_prepare()
+
+From: Roman Gushchin <roman.gushchin@linux.dev>
+
+commit 66edc3a5894c74f8887c8af23b97593a0dd0df4d upstream.
+
+Syzbot reported a bad page state problem caused by a page being freed
+using free_page() still having a mlocked flag at free_pages_prepare()
+stage:
+
+ BUG: Bad page state in process syz.5.504 pfn:61f45
+ page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x61f45
+ flags: 0xfff00000080204(referenced|workingset|mlocked|node=0|zone=1|lastcpupid=0x7ff)
+ raw: 00fff00000080204 0000000000000000 dead000000000122 0000000000000000
+ raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000
+ page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set
+ page_owner tracks the page as allocated
+ page last allocated via order 0, migratetype Unmovable, gfp_mask 0x400dc0(GFP_KERNEL_ACCOUNT|__GFP_ZERO), pid 8443, tgid 8442 (syz.5.504), ts 201884660643, free_ts 201499827394
+ set_page_owner include/linux/page_owner.h:32 [inline]
+ post_alloc_hook+0x1f3/0x230 mm/page_alloc.c:1537
+ prep_new_page mm/page_alloc.c:1545 [inline]
+ get_page_from_freelist+0x303f/0x3190 mm/page_alloc.c:3457
+ __alloc_pages_noprof+0x292/0x710 mm/page_alloc.c:4733
+ alloc_pages_mpol_noprof+0x3e8/0x680 mm/mempolicy.c:2265
+ kvm_coalesced_mmio_init+0x1f/0xf0 virt/kvm/coalesced_mmio.c:99
+ kvm_create_vm virt/kvm/kvm_main.c:1235 [inline]
+ kvm_dev_ioctl_create_vm virt/kvm/kvm_main.c:5488 [inline]
+ kvm_dev_ioctl+0x12dc/0x2240 virt/kvm/kvm_main.c:5530
+ __do_compat_sys_ioctl fs/ioctl.c:1007 [inline]
+ __se_compat_sys_ioctl+0x510/0xc90 fs/ioctl.c:950
+ do_syscall_32_irqs_on arch/x86/entry/common.c:165 [inline]
+ __do_fast_syscall_32+0xb4/0x110 arch/x86/entry/common.c:386
+ do_fast_syscall_32+0x34/0x80 arch/x86/entry/common.c:411
+ entry_SYSENTER_compat_after_hwframe+0x84/0x8e
+ page last free pid 8399 tgid 8399 stack trace:
+ reset_page_owner include/linux/page_owner.h:25 [inline]
+ free_pages_prepare mm/page_alloc.c:1108 [inline]
+ free_unref_folios+0xf12/0x18d0 mm/page_alloc.c:2686
+ folios_put_refs+0x76c/0x860 mm/swap.c:1007
+ free_pages_and_swap_cache+0x5c8/0x690 mm/swap_state.c:335
+ __tlb_batch_free_encoded_pages mm/mmu_gather.c:136 [inline]
+ tlb_batch_pages_flush mm/mmu_gather.c:149 [inline]
+ tlb_flush_mmu_free mm/mmu_gather.c:366 [inline]
+ tlb_flush_mmu+0x3a3/0x680 mm/mmu_gather.c:373
+ tlb_finish_mmu+0xd4/0x200 mm/mmu_gather.c:465
+ exit_mmap+0x496/0xc40 mm/mmap.c:1926
+ __mmput+0x115/0x390 kernel/fork.c:1348
+ exit_mm+0x220/0x310 kernel/exit.c:571
+ do_exit+0x9b2/0x28e0 kernel/exit.c:926
+ do_group_exit+0x207/0x2c0 kernel/exit.c:1088
+ __do_sys_exit_group kernel/exit.c:1099 [inline]
+ __se_sys_exit_group kernel/exit.c:1097 [inline]
+ __x64_sys_exit_group+0x3f/0x40 kernel/exit.c:1097
+ x64_sys_call+0x2634/0x2640 arch/x86/include/generated/asm/syscalls_64.h:232
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+ Modules linked in:
+ CPU: 0 UID: 0 PID: 8442 Comm: syz.5.504 Not tainted 6.12.0-rc6-syzkaller #0
+ Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024
+ Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:94 [inline]
+ dump_stack_lvl+0x241/0x360 lib/dump_stack.c:120
+ bad_page+0x176/0x1d0 mm/page_alloc.c:501
+ free_page_is_bad mm/page_alloc.c:918 [inline]
+ free_pages_prepare mm/page_alloc.c:1100 [inline]
+ free_unref_page+0xed0/0xf20 mm/page_alloc.c:2638
+ kvm_destroy_vm virt/kvm/kvm_main.c:1327 [inline]
+ kvm_put_kvm+0xc75/0x1350 virt/kvm/kvm_main.c:1386
+ kvm_vcpu_release+0x54/0x60 virt/kvm/kvm_main.c:4143
+ __fput+0x23f/0x880 fs/file_table.c:431
+ task_work_run+0x24f/0x310 kernel/task_work.c:239
+ exit_task_work include/linux/task_work.h:43 [inline]
+ do_exit+0xa2f/0x28e0 kernel/exit.c:939
+ do_group_exit+0x207/0x2c0 kernel/exit.c:1088
+ __do_sys_exit_group kernel/exit.c:1099 [inline]
+ __se_sys_exit_group kernel/exit.c:1097 [inline]
+ __ia32_sys_exit_group+0x3f/0x40 kernel/exit.c:1097
+ ia32_sys_call+0x2624/0x2630 arch/x86/include/generated/asm/syscalls_32.h:253
+ do_syscall_32_irqs_on arch/x86/entry/common.c:165 [inline]
+ __do_fast_syscall_32+0xb4/0x110 arch/x86/entry/common.c:386
+ do_fast_syscall_32+0x34/0x80 arch/x86/entry/common.c:411
+ entry_SYSENTER_compat_after_hwframe+0x84/0x8e
+ RIP: 0023:0xf745d579
+ Code: Unable to access opcode bytes at 0xf745d54f.
+ RSP: 002b:00000000f75afd6c EFLAGS: 00000206 ORIG_RAX: 00000000000000fc
+ RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: 00000000ffffff9c RDI: 00000000f744cff4
+ RBP: 00000000f717ae61 R08: 0000000000000000 R09: 0000000000000000
+ R10: 0000000000000000 R11: 0000000000000206 R12: 0000000000000000
+ R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+ </TASK>
+
+The problem was originally introduced by commit b109b87050df ("mm/munlock:
+replace clear_page_mlock() by final clearance"): it was focused on
+handling pagecache and anonymous memory and wasn't suitable for lower
+level get_page()/free_page() API's used for example by KVM, as with this
+reproducer.
+
+Fix it by moving the mlocked flag clearance down to free_page_prepare().
+
+The bug itself if fairly old and harmless (aside from generating these
+warnings), aside from a small memory leak - "bad" pages are stopped from
+being allocated again.
+
+Link: https://lkml.kernel.org/r/20241106195354.270757-1-roman.gushchin@linux.dev
+Fixes: b109b87050df ("mm/munlock: replace clear_page_mlock() by final clearance")
+Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
+Reported-by: syzbot+e985d3026c4fd041578e@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/6729f475.050a0220.701a.0019.GAE@google.com
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Sean Christopherson <seanjc@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c | 15 +++++++++++++++
+ mm/swap.c | 14 --------------
+ 2 files changed, 15 insertions(+), 14 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1040,6 +1040,7 @@ __always_inline bool free_pages_prepare(
+ bool skip_kasan_poison = should_skip_kasan_poison(page);
+ bool init = want_init_on_free();
+ bool compound = PageCompound(page);
++ struct folio *folio = page_folio(page);
+
+ VM_BUG_ON_PAGE(PageTail(page), page);
+
+@@ -1049,6 +1050,20 @@ __always_inline bool free_pages_prepare(
+ if (memcg_kmem_online() && PageMemcgKmem(page))
+ __memcg_kmem_uncharge_page(page, order);
+
++ /*
++ * In rare cases, when truncation or holepunching raced with
++ * munlock after VM_LOCKED was cleared, Mlocked may still be
++ * found set here. This does not indicate a problem, unless
++ * "unevictable_pgs_cleared" appears worryingly large.
++ */
++ if (unlikely(folio_test_mlocked(folio))) {
++ long nr_pages = folio_nr_pages(folio);
++
++ __folio_clear_mlocked(folio);
++ zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
++ count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
++ }
++
+ if (unlikely(PageHWPoison(page)) && !order) {
+ /* Do not let hwpoison pages hit pcplists/buddy */
+ reset_page_owner(page, order);
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -82,20 +82,6 @@ static void __page_cache_release(struct
+ lruvec_del_folio(*lruvecp, folio);
+ __folio_clear_lru_flags(folio);
+ }
+-
+- /*
+- * In rare cases, when truncation or holepunching raced with
+- * munlock after VM_LOCKED was cleared, Mlocked may still be
+- * found set here. This does not indicate a problem, unless
+- * "unevictable_pgs_cleared" appears worryingly large.
+- */
+- if (unlikely(folio_test_mlocked(folio))) {
+- long nr_pages = folio_nr_pages(folio);
+-
+- __folio_clear_mlocked(folio);
+- zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
+- count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
+- }
+ }
+
+ /*
--- /dev/null
+From d1aa0c04294e29883d65eac6c2f72fe95cc7c049 Mon Sep 17 00:00:00 2001
+From: Andrew Morton <akpm@linux-foundation.org>
+Date: Fri, 15 Nov 2024 16:57:24 -0800
+Subject: mm: revert "mm: shmem: fix data-race in shmem_getattr()"
+
+From: Andrew Morton <akpm@linux-foundation.org>
+
+commit d1aa0c04294e29883d65eac6c2f72fe95cc7c049 upstream.
+
+Revert d949d1d14fa2 ("mm: shmem: fix data-race in shmem_getattr()") as
+suggested by Chuck [1]. It is causing deadlocks when accessing tmpfs over
+NFS.
+
+As Hugh commented, "added just to silence a syzbot sanitizer splat: added
+where there has never been any practical problem".
+
+Link: https://lkml.kernel.org/r/ZzdxKF39VEmXSSyN@tissot.1015granger.net [1]
+Fixes: d949d1d14fa2 ("mm: shmem: fix data-race in shmem_getattr()")
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Chuck Lever <chuck.lever@oracle.com>
+Cc: Jeongjun Park <aha310510@gmail.com>
+Cc: Yu Zhao <yuzhao@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/shmem.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1163,9 +1163,7 @@ static int shmem_getattr(struct mnt_idma
+ stat->attributes_mask |= (STATX_ATTR_APPEND |
+ STATX_ATTR_IMMUTABLE |
+ STATX_ATTR_NODUMP);
+- inode_lock_shared(inode);
+ generic_fillattr(idmap, request_mask, inode, stat);
+- inode_unlock_shared(inode);
+
+ if (shmem_huge_global_enabled(inode, 0, false, NULL, 0))
+ stat->blksize = HPAGE_PMD_SIZE;
--- /dev/null
+From 85b580afc2c215394e08974bf033de9face94955 Mon Sep 17 00:00:00 2001
+From: Andre Przywara <andre.przywara@arm.com>
+Date: Thu, 7 Nov 2024 01:42:40 +0000
+Subject: mmc: sunxi-mmc: Fix A100 compatible description
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+commit 85b580afc2c215394e08974bf033de9face94955 upstream.
+
+It turns out that the Allwinner A100/A133 SoC only supports 8K DMA
+blocks (13 bits wide), for both the SD/SDIO and eMMC instances.
+And while this alone would make a trivial fix, the H616 falls back to
+the A100 compatible string, so we have to now match the H616 compatible
+string explicitly against the description advertising 64K DMA blocks.
+
+As the A100 is now compatible with the D1 description, let the A100
+compatible string point to that block instead, and introduce an explicit
+match against the H616 string, pointing to the old description.
+Also remove the redundant setting of clk_delays to NULL on the way.
+
+Fixes: 3536b82e5853 ("mmc: sunxi: add support for A100 mmc controller")
+Cc: stable@vger.kernel.org
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Tested-by: Parthiban Nallathambi <parthiban@linumiz.com>
+Reviewed-by: Chen-Yu Tsai <wens@csie.org>
+Message-ID: <20241107014240.24669-1-andre.przywara@arm.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sunxi-mmc.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -1191,10 +1191,9 @@ static const struct sunxi_mmc_cfg sun50i
+ .needs_new_timings = true,
+ };
+
+-static const struct sunxi_mmc_cfg sun50i_a100_cfg = {
++static const struct sunxi_mmc_cfg sun50i_h616_cfg = {
+ .idma_des_size_bits = 16,
+ .idma_des_shift = 2,
+- .clk_delays = NULL,
+ .can_calibrate = true,
+ .mask_data0 = true,
+ .needs_new_timings = true,
+@@ -1217,8 +1216,9 @@ static const struct of_device_id sunxi_m
+ { .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg },
+ { .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
+ { .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
+- { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg },
++ { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun20i_d1_cfg },
+ { .compatible = "allwinner,sun50i-a100-emmc", .data = &sun50i_a100_emmc_cfg },
++ { .compatible = "allwinner,sun50i-h616-mmc", .data = &sun50i_h616_cfg },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
--- /dev/null
+From f642c5c4d528d11bd78b6c6f84f541cd3c0bea86 Mon Sep 17 00:00:00 2001
+From: Geliang Tang <tanggeliang@kylinos.cn>
+Date: Tue, 12 Nov 2024 20:18:34 +0100
+Subject: mptcp: hold pm lock when deleting entry
+
+From: Geliang Tang <tanggeliang@kylinos.cn>
+
+commit f642c5c4d528d11bd78b6c6f84f541cd3c0bea86 upstream.
+
+When traversing userspace_pm_local_addr_list and deleting an entry from
+it in mptcp_pm_nl_remove_doit(), msk->pm.lock should be held.
+
+This patch holds this lock before mptcp_userspace_pm_lookup_addr_by_id()
+and releases it after list_move() in mptcp_pm_nl_remove_doit().
+
+Fixes: d9a4594edabf ("mptcp: netlink: Add MPTCP_PM_CMD_REMOVE")
+Cc: stable@vger.kernel.org
+Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20241112-net-mptcp-misc-6-12-pm-v1-2-b835580cefa8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_userspace.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -325,14 +325,17 @@ int mptcp_pm_nl_remove_doit(struct sk_bu
+
+ lock_sock(sk);
+
++ spin_lock_bh(&msk->pm.lock);
+ match = mptcp_userspace_pm_lookup_addr_by_id(msk, id_val);
+ if (!match) {
+ GENL_SET_ERR_MSG(info, "address with specified id not found");
++ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ goto out;
+ }
+
+ list_move(&match->list, &free_list);
++ spin_unlock_bh(&msk->pm.lock);
+
+ mptcp_pm_remove_addrs(msk, &free_list);
+
--- /dev/null
+From db3eab8110bc0520416101b6a5b52f44a43fb4cf Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Tue, 12 Nov 2024 20:18:35 +0100
+Subject: mptcp: pm: use _rcu variant under rcu_read_lock
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit db3eab8110bc0520416101b6a5b52f44a43fb4cf upstream.
+
+In mptcp_pm_create_subflow_or_signal_addr(), rcu_read_(un)lock() are
+used as expected to iterate over the list of local addresses, but
+list_for_each_entry() was used instead of list_for_each_entry_rcu() in
+__lookup_addr(). It is important to use this variant which adds the
+required READ_ONCE() (and diagnostic checks if enabled).
+
+Because __lookup_addr() is also used in mptcp_pm_nl_set_flags() where it
+is called under the pernet->lock and not rcu_read_lock(), an extra
+condition is then passed to help the diagnostic checks making sure
+either the associated spin lock or the RCU lock is held.
+
+Fixes: 86e39e04482b ("mptcp: keep track of local endpoint still available for each msk")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20241112-net-mptcp-misc-6-12-pm-v1-3-b835580cefa8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -520,7 +520,8 @@ __lookup_addr(struct pm_nl_pernet *perne
+ {
+ struct mptcp_pm_addr_entry *entry;
+
+- list_for_each_entry(entry, &pernet->local_addr_list, list) {
++ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list,
++ lockdep_is_held(&pernet->lock)) {
+ if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port))
+ return entry;
+ }
--- /dev/null
+From e0266319413d5d687ba7b6df7ca99e4b9724a4f2 Mon Sep 17 00:00:00 2001
+From: Geliang Tang <tanggeliang@kylinos.cn>
+Date: Tue, 12 Nov 2024 20:18:33 +0100
+Subject: mptcp: update local address flags when setting it
+
+From: Geliang Tang <tanggeliang@kylinos.cn>
+
+commit e0266319413d5d687ba7b6df7ca99e4b9724a4f2 upstream.
+
+Just like in-kernel pm, when userspace pm does set_flags, it needs to send
+out MP_PRIO signal, and also modify the flags of the corresponding address
+entry in the local address list. This patch implements the missing logic.
+
+Traverse all address entries on userspace_pm_local_addr_list to find the
+local address entry, if bkup is true, set the flags of this entry with
+FLAG_BACKUP, otherwise, clear FLAG_BACKUP.
+
+Fixes: 892f396c8e68 ("mptcp: netlink: issue MP_PRIO signals from userspace PMs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20241112-net-mptcp-misc-6-12-pm-v1-1-b835580cefa8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_userspace.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -574,6 +574,7 @@ int mptcp_userspace_pm_set_flags(struct
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct net *net = sock_net(skb->sk);
++ struct mptcp_pm_addr_entry *entry;
+ struct mptcp_sock *msk;
+ int ret = -EINVAL;
+ struct sock *sk;
+@@ -615,6 +616,17 @@ int mptcp_userspace_pm_set_flags(struct
+ if (loc.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
+ bkup = 1;
+
++ spin_lock_bh(&msk->pm.lock);
++ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
++ if (mptcp_addresses_equal(&entry->addr, &loc.addr, false)) {
++ if (bkup)
++ entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
++ else
++ entry->flags &= ~MPTCP_PM_ADDR_FLAG_BACKUP;
++ }
++ }
++ spin_unlock_bh(&msk->pm.lock);
++
+ lock_sock(sk);
+ ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc.addr, &rem.addr, bkup);
+ release_sock(sk);
--- /dev/null
+From 2026559a6c4ce34db117d2db8f710fe2a9420d5a Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Thu, 7 Nov 2024 01:07:33 +0900
+Subject: nilfs2: fix null-ptr-deref in block_dirty_buffer tracepoint
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 2026559a6c4ce34db117d2db8f710fe2a9420d5a upstream.
+
+When using the "block:block_dirty_buffer" tracepoint, mark_buffer_dirty()
+may cause a NULL pointer dereference, or a general protection fault when
+KASAN is enabled.
+
+This happens because, since the tracepoint was added in
+mark_buffer_dirty(), it references the dev_t member bh->b_bdev->bd_dev
+regardless of whether the buffer head has a pointer to a block_device
+structure.
+
+In the current implementation, nilfs_grab_buffer(), which grabs a buffer
+to read (or create) a block of metadata, including b-tree node blocks,
+does not set the block device, but instead does so only if the buffer is
+not in the "uptodate" state for each of its caller block reading
+functions. However, if the uptodate flag is set on a folio/page, and the
+buffer heads are detached from it by try_to_free_buffers(), and new buffer
+heads are then attached by create_empty_buffers(), the uptodate flag may
+be restored to each buffer without the block device being set to
+bh->b_bdev, and mark_buffer_dirty() may be called later in that state,
+resulting in the bug mentioned above.
+
+Fix this issue by making nilfs_grab_buffer() always set the block device
+of the super block structure to the buffer head, regardless of the state
+of the buffer's uptodate flag.
+
+Link: https://lkml.kernel.org/r/20241106160811.3316-3-konishi.ryusuke@gmail.com
+Fixes: 5305cb830834 ("block: add block_{touch|dirty}_buffer tracepoint")
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Ubisectech Sirius <bugreport@valiantsec.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/btnode.c | 2 --
+ fs/nilfs2/gcinode.c | 4 +---
+ fs/nilfs2/mdt.c | 1 -
+ fs/nilfs2/page.c | 1 +
+ 4 files changed, 2 insertions(+), 6 deletions(-)
+
+--- a/fs/nilfs2/btnode.c
++++ b/fs/nilfs2/btnode.c
+@@ -68,7 +68,6 @@ nilfs_btnode_create_block(struct address
+ goto failed;
+ }
+ memset(bh->b_data, 0, i_blocksize(inode));
+- bh->b_bdev = inode->i_sb->s_bdev;
+ bh->b_blocknr = blocknr;
+ set_buffer_mapped(bh);
+ set_buffer_uptodate(bh);
+@@ -133,7 +132,6 @@ int nilfs_btnode_submit_block(struct add
+ goto found;
+ }
+ set_buffer_mapped(bh);
+- bh->b_bdev = inode->i_sb->s_bdev;
+ bh->b_blocknr = pblocknr; /* set block address for read */
+ bh->b_end_io = end_buffer_read_sync;
+ get_bh(bh);
+--- a/fs/nilfs2/gcinode.c
++++ b/fs/nilfs2/gcinode.c
+@@ -83,10 +83,8 @@ int nilfs_gccache_submit_read_data(struc
+ goto out;
+ }
+
+- if (!buffer_mapped(bh)) {
+- bh->b_bdev = inode->i_sb->s_bdev;
++ if (!buffer_mapped(bh))
+ set_buffer_mapped(bh);
+- }
+ bh->b_blocknr = pbn;
+ bh->b_end_io = end_buffer_read_sync;
+ get_bh(bh);
+--- a/fs/nilfs2/mdt.c
++++ b/fs/nilfs2/mdt.c
+@@ -89,7 +89,6 @@ static int nilfs_mdt_create_block(struct
+ if (buffer_uptodate(bh))
+ goto failed_bh;
+
+- bh->b_bdev = sb->s_bdev;
+ err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
+ if (likely(!err)) {
+ get_bh(bh);
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -63,6 +63,7 @@ struct buffer_head *nilfs_grab_buffer(st
+ folio_put(folio);
+ return NULL;
+ }
++ bh->b_bdev = inode->i_sb->s_bdev;
+ return bh;
+ }
+
--- /dev/null
+From cd45e963e44b0f10d90b9e6c0e8b4f47f3c92471 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Thu, 7 Nov 2024 01:07:32 +0900
+Subject: nilfs2: fix null-ptr-deref in block_touch_buffer tracepoint
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit cd45e963e44b0f10d90b9e6c0e8b4f47f3c92471 upstream.
+
+Patch series "nilfs2: fix null-ptr-deref bugs on block tracepoints".
+
+This series fixes null pointer dereference bugs that occur when using
+nilfs2 and two block-related tracepoints.
+
+
+This patch (of 2):
+
+It has been reported that when using "block:block_touch_buffer"
+tracepoint, touch_buffer() called from __nilfs_get_folio_block() causes a
+NULL pointer dereference, or a general protection fault when KASAN is
+enabled.
+
+This happens because since the tracepoint was added in touch_buffer(), it
+references the dev_t member bh->b_bdev->bd_dev regardless of whether the
+buffer head has a pointer to a block_device structure. In the current
+implementation, the block_device structure is set after the function
+returns to the caller.
+
+Here, touch_buffer() is used to mark the folio/page that owns the buffer
+head as accessed, but the common search helper for folio/page used by the
+caller function was optimized to mark the folio/page as accessed when it
+was reimplemented a long time ago, eliminating the need to call
+touch_buffer() here in the first place.
+
+So this solves the issue by eliminating the touch_buffer() call itself.
+
+Link: https://lkml.kernel.org/r/20241106160811.3316-1-konishi.ryusuke@gmail.com
+Link: https://lkml.kernel.org/r/20241106160811.3316-2-konishi.ryusuke@gmail.com
+Fixes: 5305cb830834 ("block: add block_{touch|dirty}_buffer tracepoint")
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: Ubisectech Sirius <bugreport@valiantsec.com>
+Closes: https://lkml.kernel.org/r/86bd3013-887e-4e38-960f-ca45c657f032.bugreport@valiantsec.com
+Reported-by: syzbot+9982fb8d18eba905abe2@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=9982fb8d18eba905abe2
+Tested-by: syzbot+9982fb8d18eba905abe2@syzkaller.appspotmail.com
+Cc: Tejun Heo <tj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/page.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -39,7 +39,6 @@ static struct buffer_head *__nilfs_get_f
+ first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
+ bh = get_nth_bh(bh, block - first_block);
+
+- touch_buffer(bh);
+ wait_on_buffer(bh);
+ return bh;
+ }
--- /dev/null
+From 247d720b2c5d22f7281437fd6054a138256986ba Mon Sep 17 00:00:00 2001
+From: Hajime Tazaki <thehajime@gmail.com>
+Date: Sat, 9 Nov 2024 07:28:34 +0900
+Subject: nommu: pass NULL argument to vma_iter_prealloc()
+
+From: Hajime Tazaki <thehajime@gmail.com>
+
+commit 247d720b2c5d22f7281437fd6054a138256986ba upstream.
+
+When deleting a vma entry from a maple tree, it has to pass NULL to
+vma_iter_prealloc() in order to calculate internal state of the tree, but
+it passed a wrong argument. As a result, nommu kernels crashed upon
+accessing a vma iterator, such as acct_collect() reading the size of vma
+entries after do_munmap().
+
+This commit fixes this issue by passing a right argument to the
+preallocation call.
+
+Link: https://lkml.kernel.org/r/20241108222834.3625217-1-thehajime@gmail.com
+Fixes: b5df09226450 ("mm: set up vma iterator for vma_iter_prealloc() calls")
+Signed-off-by: Hajime Tazaki <thehajime@gmail.com>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/nommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -568,7 +568,7 @@ static int delete_vma_from_mm(struct vm_
+ VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
+
+ vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
+- if (vma_iter_prealloc(&vmi, vma)) {
++ if (vma_iter_prealloc(&vmi, NULL)) {
+ pr_warn("Allocation of vma tree for process %d failed\n",
+ current->pid);
+ return -ENOMEM;
--- /dev/null
+From 23aab037106d46e6168ce1214a958ce9bf317f2e Mon Sep 17 00:00:00 2001
+From: Dmitry Antipov <dmantipov@yandex.ru>
+Date: Wed, 6 Nov 2024 12:21:00 +0300
+Subject: ocfs2: fix UBSAN warning in ocfs2_verify_volume()
+
+From: Dmitry Antipov <dmantipov@yandex.ru>
+
+commit 23aab037106d46e6168ce1214a958ce9bf317f2e upstream.
+
+Syzbot has reported the following splat triggered by UBSAN:
+
+UBSAN: shift-out-of-bounds in fs/ocfs2/super.c:2336:10
+shift exponent 32768 is too large for 32-bit type 'int'
+CPU: 2 UID: 0 PID: 5255 Comm: repro Not tainted 6.12.0-rc4-syzkaller-00047-gc2ee9f594da8 #0
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-3.fc41 04/01/2014
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x241/0x360
+ ? __pfx_dump_stack_lvl+0x10/0x10
+ ? __pfx__printk+0x10/0x10
+ ? __asan_memset+0x23/0x50
+ ? lockdep_init_map_type+0xa1/0x910
+ __ubsan_handle_shift_out_of_bounds+0x3c8/0x420
+ ocfs2_fill_super+0xf9c/0x5750
+ ? __pfx_ocfs2_fill_super+0x10/0x10
+ ? __pfx_validate_chain+0x10/0x10
+ ? __pfx_validate_chain+0x10/0x10
+ ? validate_chain+0x11e/0x5920
+ ? __lock_acquire+0x1384/0x2050
+ ? __pfx_validate_chain+0x10/0x10
+ ? string+0x26a/0x2b0
+ ? widen_string+0x3a/0x310
+ ? string+0x26a/0x2b0
+ ? bdev_name+0x2b1/0x3c0
+ ? pointer+0x703/0x1210
+ ? __pfx_pointer+0x10/0x10
+ ? __pfx_format_decode+0x10/0x10
+ ? __lock_acquire+0x1384/0x2050
+ ? vsnprintf+0x1ccd/0x1da0
+ ? snprintf+0xda/0x120
+ ? __pfx_lock_release+0x10/0x10
+ ? do_raw_spin_lock+0x14f/0x370
+ ? __pfx_snprintf+0x10/0x10
+ ? set_blocksize+0x1f9/0x360
+ ? sb_set_blocksize+0x98/0xf0
+ ? setup_bdev_super+0x4e6/0x5d0
+ mount_bdev+0x20c/0x2d0
+ ? __pfx_ocfs2_fill_super+0x10/0x10
+ ? __pfx_mount_bdev+0x10/0x10
+ ? vfs_parse_fs_string+0x190/0x230
+ ? __pfx_vfs_parse_fs_string+0x10/0x10
+ legacy_get_tree+0xf0/0x190
+ ? __pfx_ocfs2_mount+0x10/0x10
+ vfs_get_tree+0x92/0x2b0
+ do_new_mount+0x2be/0xb40
+ ? __pfx_do_new_mount+0x10/0x10
+ __se_sys_mount+0x2d6/0x3c0
+ ? __pfx___se_sys_mount+0x10/0x10
+ ? do_syscall_64+0x100/0x230
+ ? __x64_sys_mount+0x20/0xc0
+ do_syscall_64+0xf3/0x230
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7f37cae96fda
+Code: 48 8b 0d 51 ce 0c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 49 89 ca b8 a5 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1e ce 0c 00 f7 d8 64 89 01 48
+RSP: 002b:00007fff6c1aa228 EFLAGS: 00000206 ORIG_RAX: 00000000000000a5
+RAX: ffffffffffffffda RBX: 00007fff6c1aa240 RCX: 00007f37cae96fda
+RDX: 00000000200002c0 RSI: 0000000020000040 RDI: 00007fff6c1aa240
+RBP: 0000000000000004 R08: 00007fff6c1aa280 R09: 0000000000000000
+R10: 00000000000008c0 R11: 0000000000000206 R12: 00000000000008c0
+R13: 00007fff6c1aa280 R14: 0000000000000003 R15: 0000000001000000
+ </TASK>
+
+For a really damaged superblock, the value of 'i_super.s_blocksize_bits'
+may exceed the maximum possible shift for an underlying 'int'. So add an
+extra check whether the aforementioned field represents the valid block
+size, which is 512 bytes, 1K, 2K, or 4K.
+
+Link: https://lkml.kernel.org/r/20241106092100.2661330-1-dmantipov@yandex.ru
+Fixes: ccd979bdbce9 ("[PATCH] OCFS2: The Second Oracle Cluster Filesystem")
+Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru>
+Reported-by: syzbot+56f7cd1abe4b8e475180@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=56f7cd1abe4b8e475180
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/super.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -2321,6 +2321,7 @@ static int ocfs2_verify_volume(struct oc
+ struct ocfs2_blockcheck_stats *stats)
+ {
+ int status = -EAGAIN;
++ u32 blksz_bits;
+
+ if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE,
+ strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) {
+@@ -2335,11 +2336,15 @@ static int ocfs2_verify_volume(struct oc
+ goto out;
+ }
+ status = -EINVAL;
+- if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != blksz) {
++ /* Acceptable block sizes are 512 bytes, 1K, 2K and 4K. */
++ blksz_bits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
++ if (blksz_bits < 9 || blksz_bits > 12) {
+ mlog(ML_ERROR, "found superblock with incorrect block "
+- "size: found %u, should be %u\n",
+- 1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits),
+- blksz);
++ "size bits: found %u, should be 9, 10, 11, or 12\n",
++ blksz_bits);
++ } else if ((1 << le32_to_cpu(blksz_bits)) != blksz) {
++ mlog(ML_ERROR, "found superblock with incorrect block "
++ "size: found %u, should be %u\n", 1 << blksz_bits, blksz);
+ } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) !=
+ OCFS2_MAJOR_REV_LEVEL ||
+ le16_to_cpu(di->id2.i_super.s_minor_rev_level) !=
--- /dev/null
+From 737f34137844d6572ab7d473c998c7f977ff30eb Mon Sep 17 00:00:00 2001
+From: Dmitry Antipov <dmantipov@yandex.ru>
+Date: Thu, 14 Nov 2024 07:38:44 +0300
+Subject: ocfs2: uncache inode which has failed entering the group
+
+From: Dmitry Antipov <dmantipov@yandex.ru>
+
+commit 737f34137844d6572ab7d473c998c7f977ff30eb upstream.
+
+Syzbot has reported the following BUG:
+
+kernel BUG at fs/ocfs2/uptodate.c:509!
+...
+Call Trace:
+ <TASK>
+ ? __die_body+0x5f/0xb0
+ ? die+0x9e/0xc0
+ ? do_trap+0x15a/0x3a0
+ ? ocfs2_set_new_buffer_uptodate+0x145/0x160
+ ? do_error_trap+0x1dc/0x2c0
+ ? ocfs2_set_new_buffer_uptodate+0x145/0x160
+ ? __pfx_do_error_trap+0x10/0x10
+ ? handle_invalid_op+0x34/0x40
+ ? ocfs2_set_new_buffer_uptodate+0x145/0x160
+ ? exc_invalid_op+0x38/0x50
+ ? asm_exc_invalid_op+0x1a/0x20
+ ? ocfs2_set_new_buffer_uptodate+0x2e/0x160
+ ? ocfs2_set_new_buffer_uptodate+0x144/0x160
+ ? ocfs2_set_new_buffer_uptodate+0x145/0x160
+ ocfs2_group_add+0x39f/0x15a0
+ ? __pfx_ocfs2_group_add+0x10/0x10
+ ? __pfx_lock_acquire+0x10/0x10
+ ? mnt_get_write_access+0x68/0x2b0
+ ? __pfx_lock_release+0x10/0x10
+ ? rcu_read_lock_any_held+0xb7/0x160
+ ? __pfx_rcu_read_lock_any_held+0x10/0x10
+ ? smack_log+0x123/0x540
+ ? mnt_get_write_access+0x68/0x2b0
+ ? mnt_get_write_access+0x68/0x2b0
+ ? mnt_get_write_access+0x226/0x2b0
+ ocfs2_ioctl+0x65e/0x7d0
+ ? __pfx_ocfs2_ioctl+0x10/0x10
+ ? smack_file_ioctl+0x29e/0x3a0
+ ? __pfx_smack_file_ioctl+0x10/0x10
+ ? lockdep_hardirqs_on_prepare+0x43d/0x780
+ ? __pfx_lockdep_hardirqs_on_prepare+0x10/0x10
+ ? __pfx_ocfs2_ioctl+0x10/0x10
+ __se_sys_ioctl+0xfb/0x170
+ do_syscall_64+0xf3/0x230
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+...
+ </TASK>
+
+When 'ioctl(OCFS2_IOC_GROUP_ADD, ...)' has failed for the particular
+inode in 'ocfs2_verify_group_and_input()', corresponding buffer head
+remains cached and subsequent call to the same 'ioctl()' for the same
+inode issues the BUG() in 'ocfs2_set_new_buffer_uptodate()' (trying
+to cache the same buffer head of that inode). Fix this by uncaching
+the buffer head with 'ocfs2_remove_from_cache()' on error path in
+'ocfs2_group_add()'.
+
+Link: https://lkml.kernel.org/r/20241114043844.111847-1-dmantipov@yandex.ru
+Fixes: 7909f2bf8353 ("[PATCH 2/2] ocfs2: Implement group add for online resize")
+Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru>
+Reported-by: syzbot+453873f1588c2d75b447@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=453873f1588c2d75b447
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Dmitry Antipov <dmantipov@yandex.ru>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/resize.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/ocfs2/resize.c
++++ b/fs/ocfs2/resize.c
+@@ -574,6 +574,8 @@ out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+ out_free_group_bh:
++ if (ret < 0)
++ ocfs2_remove_from_cache(INODE_CACHE(inode), group_bh);
+ brelse(group_bh);
+
+ out_unlock:
--- /dev/null
+From 5f77ee21eb44e37e371bcea195ea9403b95d1399 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 8 Nov 2024 09:34:46 -0500
+Subject: Revert "drm/amd/display: parse umc_info or vram_info based on ASIC"
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 5f77ee21eb44e37e371bcea195ea9403b95d1399 upstream.
+
+This reverts commit 694c79769cb384bca8b1ec1d1e84156e726bd106.
+
+This was not the root cause. Revert.
+
+Link: https://gitlab.freedesktop.org/drm/amd/-/issues/3678
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: aurabindo.pillai@amd.com
+Cc: hamishclaxton@gmail.com
+(cherry picked from commit 3c2296b1eec55b50c64509ba15406142d4a958dc)
+Cc: stable@vger.kernel.org # 6.11.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -3127,9 +3127,7 @@ static enum bp_result bios_parser_get_vr
+ struct atom_data_revision revision;
+
+ // vram info moved to umc_info for DCN4x
+- if (dcb->ctx->dce_version >= DCN_VERSION_4_01 &&
+- dcb->ctx->dce_version < DCN_VERSION_MAX &&
+- info && DATA_TABLES(umc_info)) {
++ if (info && DATA_TABLES(umc_info)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(umc_info));
+
--- /dev/null
+From 1635e407a4a64d08a8517ac59ca14ad4fc785e75 Mon Sep 17 00:00:00 2001
+From: Aurelien Jarno <aurelien@aurel32.net>
+Date: Sun, 10 Nov 2024 12:46:36 +0100
+Subject: Revert "mmc: dw_mmc: Fix IDMAC operation with pages bigger than 4K"
+
+From: Aurelien Jarno <aurelien@aurel32.net>
+
+commit 1635e407a4a64d08a8517ac59ca14ad4fc785e75 upstream.
+
+The commit 8396c793ffdf ("mmc: dw_mmc: Fix IDMAC operation with pages
+bigger than 4K") increased the max_req_size, even for 4K pages, causing
+various issues:
+- Panic booting the kernel/rootfs from an SD card on Rockchip RK3566
+- Panic booting the kernel/rootfs from an SD card on StarFive JH7100
+- "swiotlb buffer is full" and data corruption on StarFive JH7110
+
+At this stage no fix have been found, so it's probably better to just
+revert the change.
+
+This reverts commit 8396c793ffdf28bb8aee7cfe0891080f8cab7890.
+
+Cc: stable@vger.kernel.org
+Cc: Sam Protsenko <semen.protsenko@linaro.org>
+Fixes: 8396c793ffdf ("mmc: dw_mmc: Fix IDMAC operation with pages bigger than 4K")
+Closes: https://lore.kernel.org/linux-mmc/614692b4-1dbe-31b8-a34d-cb6db1909bb7@w6rz.net/
+Closes: https://lore.kernel.org/linux-mmc/CAC8uq=Ppnmv98mpa1CrWLawWoPnu5abtU69v-=G-P7ysATQ2Pw@mail.gmail.com/
+Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
+Message-ID: <20241110114700.622372-1-aurelien@aurel32.net>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/dw_mmc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2957,8 +2957,8 @@ static int dw_mci_init_slot(struct dw_mc
+ if (host->use_dma == TRANS_MODE_IDMAC) {
+ mmc->max_segs = host->ring_size;
+ mmc->max_blk_size = 65535;
+- mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
+- mmc->max_seg_size = mmc->max_req_size;
++ mmc->max_seg_size = 0x1000;
++ mmc->max_req_size = mmc->max_seg_size * host->ring_size;
+ mmc->max_blk_count = mmc->max_req_size / 512;
+ } else if (host->use_dma == TRANS_MODE_EDMAC) {
+ mmc->max_segs = 64;
--- /dev/null
+From fd7b4f9f46d46acbc7af3a439bb0d869efdc5c58 Mon Sep 17 00:00:00 2001
+From: Qun-Wei Lin <qun-wei.lin@mediatek.com>
+Date: Wed, 13 Nov 2024 12:25:43 +0800
+Subject: sched/task_stack: fix object_is_on_stack() for KASAN tagged pointers
+
+From: Qun-Wei Lin <qun-wei.lin@mediatek.com>
+
+commit fd7b4f9f46d46acbc7af3a439bb0d869efdc5c58 upstream.
+
+When CONFIG_KASAN_SW_TAGS and CONFIG_KASAN_STACK are enabled, the
+object_is_on_stack() function may produce incorrect results due to the
+presence of tags in the obj pointer, while the stack pointer does not have
+tags. This discrepancy can lead to incorrect stack object detection and
+subsequently trigger warnings if CONFIG_DEBUG_OBJECTS is also enabled.
+
+Example of the warning:
+
+ODEBUG: object 3eff800082ea7bb0 is NOT on stack ffff800082ea0000, but annotated.
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 1 at lib/debugobjects.c:557 __debug_object_init+0x330/0x364
+Modules linked in:
+CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.12.0-rc5 #4
+Hardware name: linux,dummy-virt (DT)
+pstate: 600000c5 (nZCv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : __debug_object_init+0x330/0x364
+lr : __debug_object_init+0x330/0x364
+sp : ffff800082ea7b40
+x29: ffff800082ea7b40 x28: 98ff0000c0164518 x27: 98ff0000c0164534
+x26: ffff800082d93ec8 x25: 0000000000000001 x24: 1cff0000c00172a0
+x23: 0000000000000000 x22: ffff800082d93ed0 x21: ffff800081a24418
+x20: 3eff800082ea7bb0 x19: efff800000000000 x18: 0000000000000000
+x17: 00000000000000ff x16: 0000000000000047 x15: 206b63617473206e
+x14: 0000000000000018 x13: ffff800082ea7780 x12: 0ffff800082ea78e
+x11: 0ffff800082ea790 x10: 0ffff800082ea79d x9 : 34d77febe173e800
+x8 : 34d77febe173e800 x7 : 0000000000000001 x6 : 0000000000000001
+x5 : feff800082ea74b8 x4 : ffff800082870a90 x3 : ffff80008018d3c4
+x2 : 0000000000000001 x1 : ffff800082858810 x0 : 0000000000000050
+Call trace:
+ __debug_object_init+0x330/0x364
+ debug_object_init_on_stack+0x30/0x3c
+ schedule_hrtimeout_range_clock+0xac/0x26c
+ schedule_hrtimeout+0x1c/0x30
+ wait_task_inactive+0x1d4/0x25c
+ kthread_bind_mask+0x28/0x98
+ init_rescuer+0x1e8/0x280
+ workqueue_init+0x1a0/0x3cc
+ kernel_init_freeable+0x118/0x200
+ kernel_init+0x28/0x1f0
+ ret_from_fork+0x10/0x20
+---[ end trace 0000000000000000 ]---
+ODEBUG: object 3eff800082ea7bb0 is NOT on stack ffff800082ea0000, but annotated.
+------------[ cut here ]------------
+
+Link: https://lkml.kernel.org/r/20241113042544.19095-1-qun-wei.lin@mediatek.com
+Signed-off-by: Qun-Wei Lin <qun-wei.lin@mediatek.com>
+Cc: Andrew Yang <andrew.yang@mediatek.com>
+Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Cc: Casper Li <casper.li@mediatek.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Chinwen Chang <chinwen.chang@mediatek.com>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/sched/task_stack.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/linux/sched/task_stack.h
++++ b/include/linux/sched/task_stack.h
+@@ -9,6 +9,7 @@
+ #include <linux/sched.h>
+ #include <linux/magic.h>
+ #include <linux/refcount.h>
++#include <linux/kasan.h>
+
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+
+@@ -89,6 +90,7 @@ static inline int object_is_on_stack(con
+ {
+ void *stack = task_stack_page(current);
+
++ obj = kasan_reset_tag(obj);
+ return (obj >= stack) && (obj < (stack + THREAD_SIZE));
+ }
+
--- /dev/null
+From fae1980347bfd23325099b69db6638b94149a94c Mon Sep 17 00:00:00 2001
+From: Donet Tom <donettom@linux.ibm.com>
+Date: Sun, 10 Nov 2024 00:49:03 -0600
+Subject: selftests: hugetlb_dio: fixup check for initial conditions to skip in the start
+
+From: Donet Tom <donettom@linux.ibm.com>
+
+commit fae1980347bfd23325099b69db6638b94149a94c upstream.
+
+This test verifies that a hugepage, used as a user buffer for DIO
+operations, is correctly freed upon unmapping. To test this, we read the
+count of free hugepages before and after the mmap, DIO, and munmap
+operations, then check if the free hugepage count is the same.
+
+Reading free hugepages before the test was removed by commit 0268d4579901
+('selftests: hugetlb_dio: check for initial conditions to skip at the
+start'), causing the test to always fail.
+
+This patch adds back reading the free hugepages before starting the test.
+With this patch, the tests are now passing.
+
+Test results without this patch:
+
+./tools/testing/selftests/mm/hugetlb_dio
+TAP version 13
+1..4
+ # No. Free pages before allocation : 0
+ # No. Free pages after munmap : 100
+not ok 1 : Huge pages not freed!
+ # No. Free pages before allocation : 0
+ # No. Free pages after munmap : 100
+not ok 2 : Huge pages not freed!
+ # No. Free pages before allocation : 0
+ # No. Free pages after munmap : 100
+not ok 3 : Huge pages not freed!
+ # No. Free pages before allocation : 0
+ # No. Free pages after munmap : 100
+not ok 4 : Huge pages not freed!
+ # Totals: pass:0 fail:4 xfail:0 xpass:0 skip:0 error:0
+
+Test results with this patch:
+
+/tools/testing/selftests/mm/hugetlb_dio
+TAP version 13
+1..4
+# No. Free pages before allocation : 100
+# No. Free pages after munmap : 100
+ok 1 : Huge pages freed successfully !
+# No. Free pages before allocation : 100
+# No. Free pages after munmap : 100
+ok 2 : Huge pages freed successfully !
+# No. Free pages before allocation : 100
+# No. Free pages after munmap : 100
+ok 3 : Huge pages freed successfully !
+# No. Free pages before allocation : 100
+# No. Free pages after munmap : 100
+ok 4 : Huge pages freed successfully !
+
+# Totals: pass:4 fail:0 xfail:0 xpass:0 skip:0 error:0
+
+Link: https://lkml.kernel.org/r/20241110064903.23626-1-donettom@linux.ibm.com
+Fixes: 0268d4579901 ("selftests: hugetlb_dio: check for initial conditions to skip in the start")
+Signed-off-by: Donet Tom <donettom@linux.ibm.com>
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/mm/hugetlb_dio.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/tools/testing/selftests/mm/hugetlb_dio.c b/tools/testing/selftests/mm/hugetlb_dio.c
+index 60001c142ce9..432d5af15e66 100644
+--- a/tools/testing/selftests/mm/hugetlb_dio.c
++++ b/tools/testing/selftests/mm/hugetlb_dio.c
+@@ -44,6 +44,13 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
+ if (fd < 0)
+ ksft_exit_fail_perror("Error opening file\n");
+
++ /* Get the free huge pages before allocation */
++ free_hpage_b = get_free_hugepages();
++ if (free_hpage_b == 0) {
++ close(fd);
++ ksft_exit_skip("No free hugepage, exiting!\n");
++ }
++
+ /* Allocate a hugetlb page */
+ orig_buffer = mmap(NULL, h_pagesize, mmap_prot, mmap_flags, -1, 0);
+ if (orig_buffer == MAP_FAILED) {
+--
+2.47.0
+
arm-fix-cacheflush-with-pan.patch
tools-mm-fix-compile-error.patch
revert-drm-amd-pm-correct-the-workload-setting.patch
+drm-amd-display-run-idle-optimizations-at-end-of-vblank-handler.patch
+drm-amd-display-change-some-variable-name-of-psr.patch
+drm-amd-display-fix-panel-replay-not-update-screen-correctly.patch
+x86-cpu-amd-clear-virtualized-vmload-vmsave-on-zen4-client.patch
+x86-mm-fix-a-kdump-kernel-failure-on-sme-system-when-config_ima_kexec-y.patch
+x86-stackprotector-work-around-strict-clang-tls-symbol-requirements.patch
+mm-fix-null-pointer-dereference-in-alloc_pages_bulk_noprof.patch
+ocfs2-uncache-inode-which-has-failed-entering-the-group.patch
+crash-powerpc-default-to-crash_dump-n-on-ppc_book3s_32.patch
+sched-task_stack-fix-object_is_on_stack-for-kasan-tagged-pointers.patch
+fs-proc-task_mmu-prevent-integer-overflow-in-pagemap_scan_get_args.patch
+mm-mremap-fix-address-wraparound-in-move_page_tables.patch
+mm-revert-mm-shmem-fix-data-race-in-shmem_getattr.patch
+vdpa-solidrun-fix-ub-bug-with-devres.patch
+vdpa-mlx5-fix-pa-offset-with-unaligned-starting-iotlb-map.patch
+vp_vdpa-fix-id_table-array-not-null-terminated-error.patch
+ima-fix-buffer-overrun-in-ima_eventdigest_init_common.patch
+evm-stop-avoidably-reading-i_writecount-in-evm_file_release.patch
+kvm-selftests-disable-strict-aliasing.patch
+kvm-nvmx-treat-vpid01-as-current-if-l2-is-active-but-with-vpid-disabled.patch
+kvm-x86-unconditionally-set-irr_pending-when-updating-apicv-state.patch
+kvm-vmx-bury-intel-pt-virtualization-guest-host-mode-behind-config_broken.patch
+nilfs2-fix-null-ptr-deref-in-block_touch_buffer-tracepoint.patch
+nommu-pass-null-argument-to-vma_iter_prealloc.patch
+tpm-disable-tpm-on-tpm2_create_primary-failure.patch
+mm-page_alloc-move-mlocked-flag-clearance-into-free_pages_prepare.patch
+alsa-hda-realtek-fixed-clevo-platform-headset-mic-issue.patch
+alsa-hda-realtek-update-set-gpio3-to-default-for-thinkpad-with-alc1318.patch
+alsa-hda-realtek-fix-mute-micmute-leds-for-a-hp-elitebook-645-g10.patch
+mptcp-update-local-address-flags-when-setting-it.patch
+mptcp-hold-pm-lock-when-deleting-entry.patch
+mptcp-pm-use-_rcu-variant-under-rcu_read_lock.patch
+ocfs2-fix-ubsan-warning-in-ocfs2_verify_volume.patch
+nilfs2-fix-null-ptr-deref-in-block_dirty_buffer-tracepoint.patch
+loongarch-fix-early_numa_add_cpu-usage-for-fdt-systems.patch
+loongarch-disable-kasan-if-pgdir_size-is-too-large-for-cpu_vabits.patch
+loongarch-add-writecombine-shadow-mapping-in-kasan.patch
+loongarch-fix-ap-booting-issue-in-vm-mode.patch
+loongarch-make-kasan-work-with-5-level-page-tables.patch
+selftests-hugetlb_dio-fixup-check-for-initial-conditions-to-skip-in-the-start.patch
+revert-mmc-dw_mmc-fix-idmac-operation-with-pages-bigger-than-4k.patch
+revert-drm-amd-display-parse-umc_info-or-vram_info-based-on-asic.patch
+btrfs-fix-incorrect-comparison-for-delayed-refs.patch
+mailbox-qcom-cpucp-mark-the-irq-with-irqf_no_suspend-flag.patch
+firmware-arm_scmi-skip-opp-duplicates.patch
+firmware-arm_scmi-report-duplicate-opps-as-firmware-bugs.patch
+mmc-sunxi-mmc-fix-a100-compatible-description.patch
+drm-bridge-tc358768-fix-dsi-command-tx.patch
+drm-xe-handle-flat-ccs-during-hibernation-on-igpu.patch
+drm-xe-oa-fix-missing-outer-runtime-pm-protection-warning.patch
--- /dev/null
+From 423893fcbe7e9adc875bce4e55b9b25fc1424977 Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko@kernel.org>
+Date: Wed, 13 Nov 2024 20:35:39 +0200
+Subject: tpm: Disable TPM on tpm2_create_primary() failure
+
+From: Jarkko Sakkinen <jarkko@kernel.org>
+
+commit 423893fcbe7e9adc875bce4e55b9b25fc1424977 upstream.
+
+The earlier bug fix misplaced the error-label when dealing with the
+tpm2_create_primary() return value, which the original completely ignored.
+
+Cc: stable@vger.kernel.org
+Reported-by: Christoph Anton Mitterer <calestyo@scientia.org>
+Closes: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1087331
+Fixes: cc7d8594342a ("tpm: Rollback tpm2_load_null()")
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm2-sessions.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/tpm/tpm2-sessions.c
++++ b/drivers/char/tpm/tpm2-sessions.c
+@@ -948,10 +948,13 @@ static int tpm2_load_null(struct tpm_chi
+ /* Deduce from the name change TPM interference: */
+ dev_err(&chip->dev, "null key integrity check failed\n");
+ tpm2_flush_context(chip, tmp_null_key);
+- chip->flags |= TPM_CHIP_FLAG_DISABLE;
+
+ err:
+- return rc ? -ENODEV : 0;
++ if (rc) {
++ chip->flags |= TPM_CHIP_FLAG_DISABLE;
++ rc = -ENODEV;
++ }
++ return rc;
+ }
+
+ /**
--- /dev/null
+From 29ce8b8a4fa74e841342c8b8f8941848a3c6f29f Mon Sep 17 00:00:00 2001
+From: Si-Wei Liu <si-wei.liu@oracle.com>
+Date: Mon, 21 Oct 2024 16:40:39 +0300
+Subject: vdpa/mlx5: Fix PA offset with unaligned starting iotlb map
+
+From: Si-Wei Liu <si-wei.liu@oracle.com>
+
+commit 29ce8b8a4fa74e841342c8b8f8941848a3c6f29f upstream.
+
+When calculating the physical address range based on the iotlb and mr
+[start,end) ranges, the offset of mr->start relative to map->start
+is not taken into account. This leads to some incorrect and duplicate
+mappings.
+
+For the case when mr->start < map->start the code is already correct:
+the range in [mr->start, map->start) was handled by a different
+iteration.
+
+Fixes: 94abbccdf291 ("vdpa/mlx5: Add shared memory registration code")
+Cc: stable@vger.kernel.org
+Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
+Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
+Message-Id: <20241021134040.975221-2-dtatulea@nvidia.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vdpa/mlx5/core/mr.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -232,7 +232,7 @@ static int map_direct_mr(struct mlx5_vdp
+ struct page *pg;
+ unsigned int nsg;
+ int sglen;
+- u64 pa;
++ u64 pa, offset;
+ u64 paend;
+ struct scatterlist *sg;
+ struct device *dma = mvdev->vdev.dma_dev;
+@@ -255,8 +255,10 @@ static int map_direct_mr(struct mlx5_vdp
+ sg = mr->sg_head.sgl;
+ for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+ map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
+- paend = map->addr + maplen(map, mr);
+- for (pa = map->addr; pa < paend; pa += sglen) {
++ offset = mr->start > map->start ? mr->start - map->start : 0;
++ pa = map->addr + offset;
++ paend = map->addr + offset + maplen(map, mr);
++ for (; pa < paend; pa += sglen) {
+ pg = pfn_to_page(__phys_to_pfn(pa));
+ if (!sg) {
+ mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
--- /dev/null
+From 0b364cf53b20204e92bac7c6ebd1ee7d3ec62931 Mon Sep 17 00:00:00 2001
+From: Philipp Stanner <pstanner@redhat.com>
+Date: Mon, 28 Oct 2024 08:43:59 +0100
+Subject: vdpa: solidrun: Fix UB bug with devres
+
+From: Philipp Stanner <pstanner@redhat.com>
+
+commit 0b364cf53b20204e92bac7c6ebd1ee7d3ec62931 upstream.
+
+In psnet_open_pf_bar() and snet_open_vf_bar() a string later passed to
+pcim_iomap_regions() is placed on the stack. Neither
+pcim_iomap_regions() nor the functions it calls copy that string.
+
+Should the string later ever be used, this, consequently, causes
+undefined behavior since the stack frame will by then have disappeared.
+
+Fix the bug by allocating the strings on the heap through
+devm_kasprintf().
+
+Cc: stable@vger.kernel.org # v6.3
+Fixes: 51a8f9d7f587 ("virtio: vdpa: new SolidNET DPU driver.")
+Reported-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Closes: https://lore.kernel.org/all/74e9109a-ac59-49e2-9b1d-d825c9c9f891@wanadoo.fr/
+Suggested-by: Andy Shevchenko <andy@kernel.org>
+Signed-off-by: Philipp Stanner <pstanner@redhat.com>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Message-Id: <20241028074357.9104-3-pstanner@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vdpa/solidrun/snet_main.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/vdpa/solidrun/snet_main.c
++++ b/drivers/vdpa/solidrun/snet_main.c
+@@ -555,7 +555,7 @@ static const struct vdpa_config_ops snet
+
+ static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
+ {
+- char name[50];
++ char *name;
+ int ret, i, mask = 0;
+ /* We don't know which BAR will be used to communicate..
+ * We will map every bar with len > 0.
+@@ -573,7 +573,10 @@ static int psnet_open_pf_bar(struct pci_
+ return -ENODEV;
+ }
+
+- snprintf(name, sizeof(name), "psnet[%s]-bars", pci_name(pdev));
++ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "psnet[%s]-bars", pci_name(pdev));
++ if (!name)
++ return -ENOMEM;
++
+ ret = pcim_iomap_regions(pdev, mask, name);
+ if (ret) {
+ SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
+@@ -590,10 +593,13 @@ static int psnet_open_pf_bar(struct pci_
+
+ static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet)
+ {
+- char name[50];
++ char *name;
+ int ret;
+
+- snprintf(name, sizeof(name), "snet[%s]-bar", pci_name(pdev));
++ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "snet[%s]-bars", pci_name(pdev));
++ if (!name)
++ return -ENOMEM;
++
+ /* Request and map BAR */
+ ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name);
+ if (ret) {
--- /dev/null
+From 4e39ecadf1d2a08187139619f1f314b64ba7d947 Mon Sep 17 00:00:00 2001
+From: Xiaoguang Wang <lege.wang@jaguarmicro.com>
+Date: Tue, 5 Nov 2024 21:35:18 +0800
+Subject: vp_vdpa: fix id_table array not null terminated error
+
+From: Xiaoguang Wang <lege.wang@jaguarmicro.com>
+
+commit 4e39ecadf1d2a08187139619f1f314b64ba7d947 upstream.
+
+Allocate one extra virtio_device_id as null terminator, otherwise
+vdpa_mgmtdev_get_classes() may iterate multiple times and visit
+undefined memory.
+
+Fixes: ffbda8e9df10 ("vdpa/vp_vdpa : add vdpa tool support in vp_vdpa")
+Cc: stable@vger.kernel.org
+Suggested-by: Parav Pandit <parav@nvidia.com>
+Signed-off-by: Angus Chen <angus.chen@jaguarmicro.com>
+Signed-off-by: Xiaoguang Wang <lege.wang@jaguarmicro.com>
+Message-Id: <20241105133518.1494-1-lege.wang@jaguarmicro.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Parav Pandit <parav@nvidia.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vdpa/virtio_pci/vp_vdpa.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
++++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
+@@ -612,7 +612,11 @@ static int vp_vdpa_probe(struct pci_dev
+ goto mdev_err;
+ }
+
+- mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL);
++ /*
++ * id_table should be a null terminated array, so allocate one additional
++ * entry here, see vdpa_mgmtdev_get_classes().
++ */
++ mdev_id = kcalloc(2, sizeof(struct virtio_device_id), GFP_KERNEL);
+ if (!mdev_id) {
+ err = -ENOMEM;
+ goto mdev_id_err;
+@@ -632,8 +636,8 @@ static int vp_vdpa_probe(struct pci_dev
+ goto probe_err;
+ }
+
+- mdev_id->device = mdev->id.device;
+- mdev_id->vendor = mdev->id.vendor;
++ mdev_id[0].device = mdev->id.device;
++ mdev_id[0].vendor = mdev->id.vendor;
+ mgtdev->id_table = mdev_id;
+ mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
+ mgtdev->supported_features = vp_modern_get_features(mdev);
--- /dev/null
+From a5ca1dc46a6b610dd4627d8b633d6c84f9724ef0 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Tue, 5 Nov 2024 10:02:34 -0600
+Subject: x86/CPU/AMD: Clear virtualized VMLOAD/VMSAVE on Zen4 client
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit a5ca1dc46a6b610dd4627d8b633d6c84f9724ef0 upstream.
+
+A number of Zen4 client SoCs advertise the ability to use virtualized
+VMLOAD/VMSAVE, but using these instructions is reported to be a cause
+of a random host reboot.
+
+These instructions aren't intended to be advertised on Zen4 client
+so clear the capability.
+
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=219009
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -924,6 +924,17 @@ static void init_amd_zen4(struct cpuinfo
+ {
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
++
++ /*
++ * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE
++ * in some BIOS versions but they can lead to random host reboots.
++ */
++ switch (c->x86_model) {
++ case 0x18 ... 0x1f:
++ case 0x60 ... 0x7f:
++ clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD);
++ break;
++ }
+ }
+
+ static void init_amd_zen5(struct cpuinfo_x86 *c)
--- /dev/null
+From 8d9ffb2fe65a6c4ef114e8d4f947958a12751bbe Mon Sep 17 00:00:00 2001
+From: Baoquan He <bhe@redhat.com>
+Date: Wed, 11 Sep 2024 16:16:15 +0800
+Subject: x86/mm: Fix a kdump kernel failure on SME system when CONFIG_IMA_KEXEC=y
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Baoquan He <bhe@redhat.com>
+
+commit 8d9ffb2fe65a6c4ef114e8d4f947958a12751bbe upstream.
+
+The kdump kernel is broken on SME systems with CONFIG_IMA_KEXEC=y enabled.
+Debugging traced the issue back to
+
+ b69a2afd5afc ("x86/kexec: Carry forward IMA measurement log on kexec").
+
+Testing was previously not conducted on SME systems with CONFIG_IMA_KEXEC
+enabled, which led to the oversight, with the following incarnation:
+
+...
+ ima: No TPM chip found, activating TPM-bypass!
+ Loading compiled-in module X.509 certificates
+ Loaded X.509 cert 'Build time autogenerated kernel key: 18ae0bc7e79b64700122bb1d6a904b070fef2656'
+ ima: Allocated hash algorithm: sha256
+ Oops: general protection fault, probably for non-canonical address 0xcfacfdfe6660003e: 0000 [#1] PREEMPT SMP NOPTI
+ CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.11.0-rc2+ #14
+ Hardware name: Dell Inc. PowerEdge R7425/02MJ3T, BIOS 1.20.0 05/03/2023
+ RIP: 0010:ima_restore_measurement_list
+ Call Trace:
+ <TASK>
+ ? show_trace_log_lvl
+ ? show_trace_log_lvl
+ ? ima_load_kexec_buffer
+ ? __die_body.cold
+ ? die_addr
+ ? exc_general_protection
+ ? asm_exc_general_protection
+ ? ima_restore_measurement_list
+ ? vprintk_emit
+ ? ima_load_kexec_buffer
+ ima_load_kexec_buffer
+ ima_init
+ ? __pfx_init_ima
+ init_ima
+ ? __pfx_init_ima
+ do_one_initcall
+ do_initcalls
+ ? __pfx_kernel_init
+ kernel_init_freeable
+ kernel_init
+ ret_from_fork
+ ? __pfx_kernel_init
+ ret_from_fork_asm
+ </TASK>
+ Modules linked in:
+ ---[ end trace 0000000000000000 ]---
+ ...
+ Kernel panic - not syncing: Fatal exception
+ Kernel Offset: disabled
+ Rebooting in 10 seconds..
+
+Adding debug printks showed that the stored addr and size of ima_kexec buffer
+are not decrypted correctly like:
+
+ ima: ima_load_kexec_buffer, buffer:0xcfacfdfe6660003e, size:0xe48066052d5df359
+
+Three types of setup_data info
+
+ — SETUP_EFI,
+ - SETUP_IMA, and
+ - SETUP_RNG_SEED
+
+are passed to the kexec/kdump kernel. Only the ima_kexec buffer
+experienced incorrect decryption. Debugging identified a bug in
+early_memremap_is_setup_data(), where an incorrect range calculation
+occurred due to the len variable in struct setup_data ended up only
+representing the length of the data field, excluding the struct's size,
+and thus leading to miscalculation.
+
+Address a similar issue in memremap_is_setup_data() while at it.
+
+ [ bp: Heavily massage. ]
+
+Fixes: b3c72fc9a78e ("x86/boot: Introduce setup_indirect")
+Signed-off-by: Baoquan He <bhe@redhat.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/20240911081615.262202-3-bhe@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/ioremap.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -655,7 +655,8 @@ static bool memremap_is_setup_data(resou
+ paddr_next = data->next;
+ len = data->len;
+
+- if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
++ if ((phys_addr > paddr) &&
++ (phys_addr < (paddr + sizeof(struct setup_data) + len))) {
+ memunmap(data);
+ return true;
+ }
+@@ -717,7 +718,8 @@ static bool __init early_memremap_is_set
+ paddr_next = data->next;
+ len = data->len;
+
+- if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
++ if ((phys_addr > paddr) &&
++ (phys_addr < (paddr + sizeof(struct setup_data) + len))) {
+ early_memunmap(data, sizeof(*data));
+ return true;
+ }
--- /dev/null
+From 577c134d311b9b94598d7a0c86be1f431f823003 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Tue, 5 Nov 2024 10:57:46 -0500
+Subject: x86/stackprotector: Work around strict Clang TLS symbol requirements
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 577c134d311b9b94598d7a0c86be1f431f823003 upstream.
+
+GCC and Clang both implement stack protector support based on Thread Local
+Storage (TLS) variables, and this is used in the kernel to implement per-task
+stack cookies, by copying a task's stack cookie into a per-CPU variable every
+time it is scheduled in.
+
+Both now also implement -mstack-protector-guard-symbol=, which permits the TLS
+variable to be specified directly. This is useful because it will allow to
+move away from using a fixed offset of 40 bytes into the per-CPU area on
+x86_64, which requires a lot of special handling in the per-CPU code and the
+runtime relocation code.
+
+However, while GCC is rather lax in its implementation of this command line
+option, Clang actually requires that the provided symbol name refers to a TLS
+variable (i.e., one declared with __thread), although it also permits the
+variable to be undeclared entirely, in which case it will use an implicit
+declaration of the right type.
+
+The upshot of this is that Clang will emit the correct references to the stack
+cookie variable in most cases, e.g.,
+
+ 10d: 64 a1 00 00 00 00 mov %fs:0x0,%eax
+ 10f: R_386_32 __stack_chk_guard
+
+However, if a non-TLS definition of the symbol in question is visible in the
+same compilation unit (which amounts to the whole of vmlinux if LTO is
+enabled), it will drop the per-CPU prefix and emit a load from a bogus
+address.
+
+Work around this by using a symbol name that never occurs in C code, and emit
+it as an alias in the linker script.
+
+Fixes: 3fb0fdb3bbe7 ("x86/stackprotector/32: Make the canary into a regular percpu variable")
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Brian Gerst <brgerst@gmail.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Nathan Chancellor <nathan@kernel.org>
+Tested-by: Nathan Chancellor <nathan@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://github.com/ClangBuiltLinux/linux/issues/1854
+Link: https://lore.kernel.org/r/20241105155801.1779119-2-brgerst@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Makefile | 5 +++--
+ arch/x86/entry/entry.S | 16 ++++++++++++++++
+ arch/x86/include/asm/asm-prototypes.h | 3 +++
+ arch/x86/kernel/cpu/common.c | 2 ++
+ arch/x86/kernel/vmlinux.lds.S | 3 +++
+ 5 files changed, 27 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -138,9 +138,10 @@ ifeq ($(CONFIG_X86_32),y)
+
+ ifeq ($(CONFIG_STACKPROTECTOR),y)
+ ifeq ($(CONFIG_SMP),y)
+- KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
++ KBUILD_CFLAGS += -mstack-protector-guard-reg=fs \
++ -mstack-protector-guard-symbol=__ref_stack_chk_guard
+ else
+- KBUILD_CFLAGS += -mstack-protector-guard=global
++ KBUILD_CFLAGS += -mstack-protector-guard=global
+ endif
+ endif
+ else
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -51,3 +51,19 @@ EXPORT_SYMBOL_GPL(mds_verw_sel);
+ .popsection
+
+ THUNK warn_thunk_thunk, __warn_thunk
++
++#ifndef CONFIG_X86_64
++/*
++ * Clang's implementation of TLS stack cookies requires the variable in
++ * question to be a TLS variable. If the variable happens to be defined as an
++ * ordinary variable with external linkage in the same compilation unit (which
++ * amounts to the whole of vmlinux with LTO enabled), Clang will drop the
++ * segment register prefix from the references, resulting in broken code. Work
++ * around this by avoiding the symbol used in -mstack-protector-guard-symbol=
++ * entirely in the C code, and use an alias emitted by the linker script
++ * instead.
++ */
++#ifdef CONFIG_STACKPROTECTOR
++EXPORT_SYMBOL(__ref_stack_chk_guard);
++#endif
++#endif
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -20,3 +20,6 @@
+ extern void cmpxchg8b_emu(void);
+ #endif
+
++#if defined(__GENKSYMS__) && defined(CONFIG_STACKPROTECTOR)
++extern unsigned long __ref_stack_chk_guard;
++#endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -2084,8 +2084,10 @@ void syscall_init(void)
+
+ #ifdef CONFIG_STACKPROTECTOR
+ DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
++#ifndef CONFIG_SMP
+ EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
+ #endif
++#endif
+
+ #endif /* CONFIG_X86_64 */
+
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -492,6 +492,9 @@ SECTIONS
+ . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
+ "kernel image bigger than KERNEL_IMAGE_SIZE");
+
++/* needed for Clang - see arch/x86/entry/entry.S */
++PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
++
+ #ifdef CONFIG_X86_64
+ /*
+ * Per-cpu symbols which need to be offset from __per_cpu_load