--- /dev/null
+From stable+bounces-201182-greg=kroah.com@vger.kernel.org Tue Dec 16 12:02:23 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Dec 2025 06:00:34 -0500
+Subject: ALSA: hda: cs35l41: Fix NULL pointer dereference in cs35l41_hda_read_acpi()
+To: stable@vger.kernel.org
+Cc: Denis Arefev <arefev@swemel.ru>, Richard Fitzgerald <rf@opensource.cirrus.com>, Takashi Iwai <tiwai@suse.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251216110034.2752519-1-sashal@kernel.org>
+
+From: Denis Arefev <arefev@swemel.ru>
+
+[ Upstream commit c34b04cc6178f33c08331568c7fd25c5b9a39f66 ]
+
+The acpi_get_first_physical_node() function can return NULL, in which
+case the get_device() function also returns NULL, but this value is
+then dereferenced without checking,so add a check to prevent a crash.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 7b2f3eb492da ("ALSA: hda: cs35l41: Add support for CS35L41 in HDA systems")
+Cc: stable@vger.kernel.org
+Signed-off-by: Denis Arefev <arefev@swemel.ru>
+Reviewed-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20251202101338.11437-1-arefev@swemel.ru
+[ sound/hda/codecs/side-codecs/ -> sound/pci/hda/ ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/cs35l41_hda.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/cs35l41_hda.c
++++ b/sound/pci/hda/cs35l41_hda.c
+@@ -1865,6 +1865,8 @@ static int cs35l41_hda_read_acpi(struct
+
+ cs35l41->dacpi = adev;
+ physdev = get_device(acpi_get_first_physical_node(adev));
++ if (!physdev)
++ return -ENODEV;
+
+ sub = acpi_get_subsystem_id(ACPI_HANDLE(physdev));
+ if (IS_ERR(sub))
--- /dev/null
+From stable+bounces-201181-greg=kroah.com@vger.kernel.org Tue Dec 16 12:01:31 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Dec 2025 05:59:26 -0500
+Subject: ALSA: wavefront: Clear substream pointers on close
+To: stable@vger.kernel.org
+Cc: Junrui Luo <moonafterrain@outlook.com>, Yuhao Jiang <danisjiang@gmail.com>, Takashi Iwai <tiwai@suse.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251216105926.2751412-2-sashal@kernel.org>
+
+From: Junrui Luo <moonafterrain@outlook.com>
+
+[ Upstream commit e11c5c13ce0ab2325d38fe63500be1dd88b81e38 ]
+
+Clear substream pointers in close functions to avoid leaving dangling
+pointers, helping to improve code safety and
+prevents potential issues.
+
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Reported-by: Junrui Luo <moonafterrain@outlook.com>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Junrui Luo <moonafterrain@outlook.com>
+Link: https://patch.msgid.link/SYBPR01MB7881DF762CAB45EE42F6D812AFC2A@SYBPR01MB7881.ausprd01.prod.outlook.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/isa/wavefront/wavefront_midi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/isa/wavefront/wavefront_midi.c
++++ b/sound/isa/wavefront/wavefront_midi.c
+@@ -278,6 +278,7 @@ static int snd_wavefront_midi_input_clos
+ return -EIO;
+
+ guard(spinlock_irqsave)(&midi->open);
++ midi->substream_input[mpu] = NULL;
+ midi->mode[mpu] &= ~MPU401_MODE_INPUT;
+
+ return 0;
+@@ -300,6 +301,7 @@ static int snd_wavefront_midi_output_clo
+ return -EIO;
+
+ guard(spinlock_irqsave)(&midi->open);
++ midi->substream_output[mpu] = NULL;
+ midi->mode[mpu] &= ~MPU401_MODE_OUTPUT;
+ return 0;
+ }
--- /dev/null
+From stable+bounces-201179-greg=kroah.com@vger.kernel.org Tue Dec 16 12:01:21 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Dec 2025 05:59:25 -0500
+Subject: ALSA: wavefront: Use guard() for spin locks
+To: stable@vger.kernel.org
+Cc: Takashi Iwai <tiwai@suse.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251216105926.2751412-1-sashal@kernel.org>
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 4b97f8e614ba46a50bd181d40b5a1424411a211a ]
+
+Clean up the code using guard() for spin locks.
+
+Merely code refactoring, and no behavior change.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20250829145300.5460-19-tiwai@suse.de
+Stable-dep-of: e11c5c13ce0a ("ALSA: wavefront: Clear substream pointers on close")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/isa/wavefront/wavefront_midi.c | 127 +++++++++++++---------------------
+ sound/isa/wavefront/wavefront_synth.c | 18 ++--
+ 2 files changed, 59 insertions(+), 86 deletions(-)
+
+--- a/sound/isa/wavefront/wavefront_midi.c
++++ b/sound/isa/wavefront/wavefront_midi.c
+@@ -113,7 +113,6 @@ static void snd_wavefront_midi_output_wr
+ {
+ snd_wavefront_midi_t *midi = &card->wavefront.midi;
+ snd_wavefront_mpu_id mpu;
+- unsigned long flags;
+ unsigned char midi_byte;
+ int max = 256, mask = 1;
+ int timeout;
+@@ -142,11 +141,9 @@ static void snd_wavefront_midi_output_wr
+ break;
+ }
+
+- spin_lock_irqsave (&midi->virtual, flags);
+- if ((midi->mode[midi->output_mpu] & MPU401_MODE_OUTPUT) == 0) {
+- spin_unlock_irqrestore (&midi->virtual, flags);
++ guard(spinlock_irqsave)(&midi->virtual);
++ if ((midi->mode[midi->output_mpu] & MPU401_MODE_OUTPUT) == 0)
+ goto __second;
+- }
+ if (output_ready (midi)) {
+ if (snd_rawmidi_transmit(midi->substream_output[midi->output_mpu], &midi_byte, 1) == 1) {
+ if (!midi->isvirtual ||
+@@ -160,14 +157,11 @@ static void snd_wavefront_midi_output_wr
+ del_timer(&midi->timer);
+ }
+ midi->mode[midi->output_mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER;
+- spin_unlock_irqrestore (&midi->virtual, flags);
+ goto __second;
+ }
+ } else {
+- spin_unlock_irqrestore (&midi->virtual, flags);
+ return;
+ }
+- spin_unlock_irqrestore (&midi->virtual, flags);
+ }
+
+ __second:
+@@ -185,15 +179,13 @@ static void snd_wavefront_midi_output_wr
+ break;
+ }
+
+- spin_lock_irqsave (&midi->virtual, flags);
++ guard(spinlock_irqsave)(&midi->virtual);
+ if (!midi->isvirtual)
+ mask = 0;
+ mpu = midi->output_mpu ^ mask;
+ mask = 0; /* don't invert the value from now */
+- if ((midi->mode[mpu] & MPU401_MODE_OUTPUT) == 0) {
+- spin_unlock_irqrestore (&midi->virtual, flags);
++ if ((midi->mode[mpu] & MPU401_MODE_OUTPUT) == 0)
+ return;
+- }
+ if (snd_rawmidi_transmit_empty(midi->substream_output[mpu]))
+ goto __timer;
+ if (output_ready (midi)) {
+@@ -215,20 +207,16 @@ static void snd_wavefront_midi_output_wr
+ del_timer(&midi->timer);
+ }
+ midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER;
+- spin_unlock_irqrestore (&midi->virtual, flags);
+ return;
+ }
+ } else {
+- spin_unlock_irqrestore (&midi->virtual, flags);
+ return;
+ }
+- spin_unlock_irqrestore (&midi->virtual, flags);
+ }
+ }
+
+ static int snd_wavefront_midi_input_open(struct snd_rawmidi_substream *substream)
+ {
+- unsigned long flags;
+ snd_wavefront_midi_t *midi;
+ snd_wavefront_mpu_id mpu;
+
+@@ -243,17 +231,15 @@ static int snd_wavefront_midi_input_open
+ if (!midi)
+ return -EIO;
+
+- spin_lock_irqsave (&midi->open, flags);
++ guard(spinlock_irqsave)(&midi->open);
+ midi->mode[mpu] |= MPU401_MODE_INPUT;
+ midi->substream_input[mpu] = substream;
+- spin_unlock_irqrestore (&midi->open, flags);
+
+ return 0;
+ }
+
+ static int snd_wavefront_midi_output_open(struct snd_rawmidi_substream *substream)
+ {
+- unsigned long flags;
+ snd_wavefront_midi_t *midi;
+ snd_wavefront_mpu_id mpu;
+
+@@ -268,17 +254,15 @@ static int snd_wavefront_midi_output_ope
+ if (!midi)
+ return -EIO;
+
+- spin_lock_irqsave (&midi->open, flags);
++ guard(spinlock_irqsave)(&midi->open);
+ midi->mode[mpu] |= MPU401_MODE_OUTPUT;
+ midi->substream_output[mpu] = substream;
+- spin_unlock_irqrestore (&midi->open, flags);
+
+ return 0;
+ }
+
+ static int snd_wavefront_midi_input_close(struct snd_rawmidi_substream *substream)
+ {
+- unsigned long flags;
+ snd_wavefront_midi_t *midi;
+ snd_wavefront_mpu_id mpu;
+
+@@ -293,16 +277,14 @@ static int snd_wavefront_midi_input_clos
+ if (!midi)
+ return -EIO;
+
+- spin_lock_irqsave (&midi->open, flags);
++ guard(spinlock_irqsave)(&midi->open);
+ midi->mode[mpu] &= ~MPU401_MODE_INPUT;
+- spin_unlock_irqrestore (&midi->open, flags);
+
+ return 0;
+ }
+
+ static int snd_wavefront_midi_output_close(struct snd_rawmidi_substream *substream)
+ {
+- unsigned long flags;
+ snd_wavefront_midi_t *midi;
+ snd_wavefront_mpu_id mpu;
+
+@@ -317,15 +299,13 @@ static int snd_wavefront_midi_output_clo
+ if (!midi)
+ return -EIO;
+
+- spin_lock_irqsave (&midi->open, flags);
++ guard(spinlock_irqsave)(&midi->open);
+ midi->mode[mpu] &= ~MPU401_MODE_OUTPUT;
+- spin_unlock_irqrestore (&midi->open, flags);
+ return 0;
+ }
+
+ static void snd_wavefront_midi_input_trigger(struct snd_rawmidi_substream *substream, int up)
+ {
+- unsigned long flags;
+ snd_wavefront_midi_t *midi;
+ snd_wavefront_mpu_id mpu;
+
+@@ -341,30 +321,27 @@ static void snd_wavefront_midi_input_tri
+ if (!midi)
+ return;
+
+- spin_lock_irqsave (&midi->virtual, flags);
++ guard(spinlock_irqsave)(&midi->virtual);
+ if (up) {
+ midi->mode[mpu] |= MPU401_MODE_INPUT_TRIGGER;
+ } else {
+ midi->mode[mpu] &= ~MPU401_MODE_INPUT_TRIGGER;
+ }
+- spin_unlock_irqrestore (&midi->virtual, flags);
+ }
+
+ static void snd_wavefront_midi_output_timer(struct timer_list *t)
+ {
+ snd_wavefront_midi_t *midi = from_timer(midi, t, timer);
+ snd_wavefront_card_t *card = midi->timer_card;
+- unsigned long flags;
+
+- spin_lock_irqsave (&midi->virtual, flags);
+- mod_timer(&midi->timer, 1 + jiffies);
+- spin_unlock_irqrestore (&midi->virtual, flags);
++ scoped_guard(spinlock_irqsave, &midi->virtual) {
++ mod_timer(&midi->timer, 1 + jiffies);
++ }
+ snd_wavefront_midi_output_write(card);
+ }
+
+ static void snd_wavefront_midi_output_trigger(struct snd_rawmidi_substream *substream, int up)
+ {
+- unsigned long flags;
+ snd_wavefront_midi_t *midi;
+ snd_wavefront_mpu_id mpu;
+
+@@ -380,22 +357,22 @@ static void snd_wavefront_midi_output_tr
+ if (!midi)
+ return;
+
+- spin_lock_irqsave (&midi->virtual, flags);
+- if (up) {
+- if ((midi->mode[mpu] & MPU401_MODE_OUTPUT_TRIGGER) == 0) {
+- if (!midi->istimer) {
+- timer_setup(&midi->timer,
+- snd_wavefront_midi_output_timer,
+- 0);
+- mod_timer(&midi->timer, 1 + jiffies);
++ scoped_guard(spinlock_irqsave, &midi->virtual) {
++ if (up) {
++ if ((midi->mode[mpu] & MPU401_MODE_OUTPUT_TRIGGER) == 0) {
++ if (!midi->istimer) {
++ timer_setup(&midi->timer,
++ snd_wavefront_midi_output_timer,
++ 0);
++ mod_timer(&midi->timer, 1 + jiffies);
++ }
++ midi->istimer++;
++ midi->mode[mpu] |= MPU401_MODE_OUTPUT_TRIGGER;
+ }
+- midi->istimer++;
+- midi->mode[mpu] |= MPU401_MODE_OUTPUT_TRIGGER;
++ } else {
++ midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER;
+ }
+- } else {
+- midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER;
+ }
+- spin_unlock_irqrestore (&midi->virtual, flags);
+
+ if (up)
+ snd_wavefront_midi_output_write((snd_wavefront_card_t *)substream->rmidi->card->private_data);
+@@ -405,7 +382,6 @@ void
+ snd_wavefront_midi_interrupt (snd_wavefront_card_t *card)
+
+ {
+- unsigned long flags;
+ snd_wavefront_midi_t *midi;
+ static struct snd_rawmidi_substream *substream = NULL;
+ static int mpu = external_mpu;
+@@ -419,37 +395,37 @@ snd_wavefront_midi_interrupt (snd_wavefr
+ return;
+ }
+
+- spin_lock_irqsave (&midi->virtual, flags);
+- while (--max) {
++ scoped_guard(spinlock_irqsave, &midi->virtual) {
++ while (--max) {
+
+- if (input_avail (midi)) {
+- byte = read_data (midi);
++ if (input_avail(midi)) {
++ byte = read_data(midi);
+
+- if (midi->isvirtual) {
+- if (byte == WF_EXTERNAL_SWITCH) {
+- substream = midi->substream_input[external_mpu];
+- mpu = external_mpu;
+- } else if (byte == WF_INTERNAL_SWITCH) {
+- substream = midi->substream_output[internal_mpu];
++ if (midi->isvirtual) {
++ if (byte == WF_EXTERNAL_SWITCH) {
++ substream = midi->substream_input[external_mpu];
++ mpu = external_mpu;
++ } else if (byte == WF_INTERNAL_SWITCH) {
++ substream = midi->substream_output[internal_mpu];
++ mpu = internal_mpu;
++ } /* else just leave it as it is */
++ } else {
++ substream = midi->substream_input[internal_mpu];
+ mpu = internal_mpu;
+- } /* else just leave it as it is */
+- } else {
+- substream = midi->substream_input[internal_mpu];
+- mpu = internal_mpu;
+- }
++ }
+
+- if (substream == NULL) {
+- continue;
+- }
++ if (substream == NULL) {
++ continue;
++ }
+
+- if (midi->mode[mpu] & MPU401_MODE_INPUT_TRIGGER) {
+- snd_rawmidi_receive(substream, &byte, 1);
++ if (midi->mode[mpu] & MPU401_MODE_INPUT_TRIGGER) {
++ snd_rawmidi_receive(substream, &byte, 1);
++ }
++ } else {
++ break;
+ }
+- } else {
+- break;
+ }
+- }
+- spin_unlock_irqrestore (&midi->virtual, flags);
++ }
+
+ snd_wavefront_midi_output_write(card);
+ }
+@@ -471,13 +447,10 @@ void
+ snd_wavefront_midi_disable_virtual (snd_wavefront_card_t *card)
+
+ {
+- unsigned long flags;
+-
+- spin_lock_irqsave (&card->wavefront.midi.virtual, flags);
++ guard(spinlock_irqsave)(&card->wavefront.midi.virtual);
+ // snd_wavefront_midi_input_close (card->ics2115_external_rmidi);
+ // snd_wavefront_midi_output_close (card->ics2115_external_rmidi);
+ card->wavefront.midi.isvirtual = 0;
+- spin_unlock_irqrestore (&card->wavefront.midi.virtual, flags);
+ }
+
+ int
+--- a/sound/isa/wavefront/wavefront_synth.c
++++ b/sound/isa/wavefront/wavefront_synth.c
+@@ -1741,10 +1741,10 @@ snd_wavefront_internal_interrupt (snd_wa
+ return;
+ }
+
+- spin_lock(&dev->irq_lock);
+- dev->irq_ok = 1;
+- dev->irq_cnt++;
+- spin_unlock(&dev->irq_lock);
++ scoped_guard(spinlock, &dev->irq_lock) {
++ dev->irq_ok = 1;
++ dev->irq_cnt++;
++ }
+ wake_up(&dev->interrupt_sleeper);
+ }
+
+@@ -1796,11 +1796,11 @@ wavefront_should_cause_interrupt (snd_wa
+ wait_queue_entry_t wait;
+
+ init_waitqueue_entry(&wait, current);
+- spin_lock_irq(&dev->irq_lock);
+- add_wait_queue(&dev->interrupt_sleeper, &wait);
+- dev->irq_ok = 0;
+- outb (val,port);
+- spin_unlock_irq(&dev->irq_lock);
++ scoped_guard(spinlock_irq, &dev->irq_lock) {
++ add_wait_queue(&dev->interrupt_sleeper, &wait);
++ dev->irq_ok = 0;
++ outb(val, port);
++ }
+ while (!dev->irq_ok && time_before(jiffies, timeout)) {
+ schedule_timeout_uninterruptible(1);
+ barrier();
--- /dev/null
+From stable+bounces-203065-greg=kroah.com@vger.kernel.org Fri Dec 19 11:32:13 2025
+From: Wei-Lin Chang <weilin.chang@arm.com>
+Date: Fri, 19 Dec 2025 10:21:23 +0000
+Subject: arm64: Revamp HCR_EL2.E2H RES1 detection
+To: stable@vger.kernel.org
+Cc: Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Wei-Lin Chang <weilin.chang@arm.com>
+Message-ID: <20251219102123.730823-4-weilin.chang@arm.com>
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit ca88ecdce5f51874a7c151809bd2c936ee0d3805 ]
+
+We currently have two ways to identify CPUs that only implement FEAT_VHE
+and not FEAT_E2H0:
+
+- either they advertise it via ID_AA64MMFR4_EL1.E2H0,
+- or the HCR_EL2.E2H bit is RAO/WI
+
+However, there is a third category of "cpus" that fall between these
+two cases: on CPUs that do not implement FEAT_FGT, it is IMPDEF whether
+an access to ID_AA64MMFR4_EL1 can trap to EL2 when the register value
+is zero.
+
+A consequence of this is that on systems such as Neoverse V2, a NV
+guest cannot reliably detect that it is in a VHE-only configuration
+(E2H is writable, and ID_AA64MMFR0_EL1 is 0), despite the hypervisor's
+best effort to repaint the id register.
+
+Replace the RAO/WI test by a sequence that makes use of the VHE
+register remnapping between EL1 and EL2 to detect this situation,
+and work out whether we get the VHE behaviour even after having
+set HCR_EL2.E2H to 0.
+
+This solves the NV problem, and provides a more reliable acid test
+for CPUs that do not completely follow the letter of the architecture
+while providing a RES1 behaviour for HCR_EL2.E2H.
+
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Tested-by: Jan Kotas <jank@cadence.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/15A85F2B-1A0C-4FA7-9FE4-EEC2203CC09E@global.cadence.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Wei-Lin Chang <weilin.chang@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/el2_setup.h | 38 +++++++++++++++++++++++++++++++------
+ 1 file changed, 32 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/el2_setup.h
++++ b/arch/arm64/include/asm/el2_setup.h
+@@ -24,22 +24,48 @@
+ * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
+ * can reset into an UNKNOWN state and might not read as 1 until it has
+ * been initialized explicitly.
+- *
+- * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
+- * don't advertise it (they predate this relaxation).
+- *
+ * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
+ * indicating whether the CPU is running in E2H mode.
+ */
+ mrs_s x1, SYS_ID_AA64MMFR4_EL1
+ sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
+ cmp x1, #0
+- b.ge .LnVHE_\@
++ b.lt .LnE2H0_\@
++
++ /*
++ * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised
++ * as such via ID_AA64MMFR4_EL1.E2H0:
++ *
++ * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to
++ * have HCR_EL2.E2H implemented as RAO/WI.
++ *
++ * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest
++ * reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV
++ * guests on these hosts can write to HCR_EL2.E2H without
++ * trapping to the hypervisor, but these writes have no
++ * functional effect.
++ *
++ * Handle both cases by checking for an essential VHE property
++ * (system register remapping) to decide whether we're
++ * effectively VHE-only or not.
++ */
++ msr hcr_el2, x0 // Setup HCR_EL2 as nVHE
++ isb
++ mov x1, #1 // Write something to FAR_EL1
++ msr far_el1, x1
++ isb
++ mov x1, #2 // Try to overwrite it via FAR_EL2
++ msr far_el2, x1
++ isb
++ mrs x1, far_el1 // If we see the latest write in FAR_EL1,
++ cmp x1, #2 // we can safely assume we are VHE only.
++ b.ne .LnVHE_\@ // Otherwise, we know that nVHE works.
+
++.LnE2H0_\@:
+ orr x0, x0, #HCR_E2H
+-.LnVHE_\@:
+ msr hcr_el2, x0
+ isb
++.LnVHE_\@:
+ .endm
+
+ .macro __init_el2_sctlr
--- /dev/null
+From 69741d9ccc7222e6b6f138db67b012ecc0d72542 Mon Sep 17 00:00:00 2001
+From: Ray Wu <ray.wu@amd.com>
+Date: Fri, 28 Nov 2025 08:58:13 +0800
+Subject: drm/amd/display: Fix scratch registers offsets for DCN35
+
+From: Ray Wu <ray.wu@amd.com>
+
+commit 69741d9ccc7222e6b6f138db67b012ecc0d72542 upstream.
+
+[Why]
+Different platforms use differnet NBIO header files,
+causing display code to use differnt offset and read
+wrong accelerated status.
+
+[How]
+- Unified NBIO offset header file across platform.
+- Correct scratch registers offsets to proper locations.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4667
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Ray Wu <ray.wu@amd.com>
+Signed-off-by: Chenyu Chen <chen-yu.chen@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 49a63bc8eda0304ba307f5ba68305f936174f72d)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+@@ -203,12 +203,12 @@ enum dcn35_clk_src_array_id {
+ NBIO_BASE_INNER(seg)
+
+ #define NBIO_SR(reg_name)\
+- REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+- regBIF_BX2_ ## reg_name
++ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
++ regBIF_BX1_ ## reg_name
+
+ #define NBIO_SR_ARR(reg_name, id)\
+- REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+- regBIF_BX2_ ## reg_name
++ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
++ regBIF_BX1_ ## reg_name
+
+ #define bios_regs_init() \
+ ( \
--- /dev/null
+From fd62aa13d3ee0f21c756a40a7c2f900f98992d6a Mon Sep 17 00:00:00 2001
+From: Ray Wu <ray.wu@amd.com>
+Date: Fri, 28 Nov 2025 09:14:09 +0800
+Subject: drm/amd/display: Fix scratch registers offsets for DCN351
+
+From: Ray Wu <ray.wu@amd.com>
+
+commit fd62aa13d3ee0f21c756a40a7c2f900f98992d6a upstream.
+
+[Why]
+Different platforms use different NBIO header files,
+causing display code to use differnt offset and read
+wrong accelerated status.
+
+[How]
+- Unified NBIO offset header file across platform.
+- Correct scratch registers offsets to proper locations.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4667
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Ray Wu <ray.wu@amd.com>
+Signed-off-by: Chenyu Chen <chen-yu.chen@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 576e032e909c8a6bb3d907b4ef5f6abe0f644199)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+@@ -183,12 +183,12 @@ enum dcn351_clk_src_array_id {
+ NBIO_BASE_INNER(seg)
+
+ #define NBIO_SR(reg_name)\
+- REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+- regBIF_BX2_ ## reg_name
++ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
++ regBIF_BX1_ ## reg_name
+
+ #define NBIO_SR_ARR(reg_name, id)\
+- REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+- regBIF_BX2_ ## reg_name
++ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
++ regBIF_BX1_ ## reg_name
+
+ #define bios_regs_init() \
+ ( \
--- /dev/null
+From 3c41114dcdabb7b25f5bc33273c6db9c7af7f4a7 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 11 Nov 2025 11:17:22 -0500
+Subject: drm/amd/display: Use GFP_ATOMIC in dc_create_plane_state()
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 3c41114dcdabb7b25f5bc33273c6db9c7af7f4a7 upstream.
+
+This can get called from an atomic context.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4470
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 8acdad9344cc7b4e7bc01f0dfea80093eb3768db)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+@@ -93,7 +93,7 @@ void enable_surface_flip_reporting(struc
+ struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
+ {
+ struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
+- GFP_KERNEL);
++ GFP_ATOMIC);
+
+ if (NULL == plane_state)
+ return NULL;
--- /dev/null
+From 520f37c30992fd0c212a34fbe99c062b7a3dc52e Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Tue, 28 Oct 2025 22:07:25 +0200
+Subject: drm/displayid: pass iter to drm_find_displayid_extension()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 520f37c30992fd0c212a34fbe99c062b7a3dc52e upstream.
+
+It's more convenient to pass iter than a handful of its members to
+drm_find_displayid_extension(), especially as we're about to add another
+member.
+
+Rename the function find_next_displayid_extension() while at it, to be
+more descriptive.
+
+Cc: Tiago Martins Araújo <tiago.martins.araujo@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Tiago Martins Araújo <tiago.martins.araujo@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/3837ae7f095e77a082ac2422ce2fac96c4f9373d.1761681968.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_displayid.c | 19 +++++++------------
+ 1 file changed, 7 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/drm_displayid.c
++++ b/drivers/gpu/drm/drm_displayid.c
+@@ -48,26 +48,24 @@ validate_displayid(const u8 *displayid,
+ return base;
+ }
+
+-static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
+- int *length, int *idx,
+- int *ext_index)
++static const u8 *find_next_displayid_extension(struct displayid_iter *iter)
+ {
+ const struct displayid_header *base;
+ const u8 *displayid;
+
+- displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index);
++ displayid = drm_edid_find_extension(iter->drm_edid, DISPLAYID_EXT, &iter->ext_index);
+ if (!displayid)
+ return NULL;
+
+ /* EDID extensions block checksum isn't for us */
+- *length = EDID_LENGTH - 1;
+- *idx = 1;
++ iter->length = EDID_LENGTH - 1;
++ iter->idx = 1;
+
+- base = validate_displayid(displayid, *length, *idx);
++ base = validate_displayid(displayid, iter->length, iter->idx);
+ if (IS_ERR(base))
+ return NULL;
+
+- *length = *idx + sizeof(*base) + base->bytes;
++ iter->length = iter->idx + sizeof(*base) + base->bytes;
+
+ return displayid;
+ }
+@@ -126,10 +124,7 @@ __displayid_iter_next(struct displayid_i
+ /* The first section we encounter is the base section */
+ bool base_section = !iter->section;
+
+- iter->section = drm_find_displayid_extension(iter->drm_edid,
+- &iter->length,
+- &iter->idx,
+- &iter->ext_index);
++ iter->section = find_next_displayid_extension(iter);
+ if (!iter->section) {
+ iter->drm_edid = NULL;
+ return NULL;
--- /dev/null
+From stable+bounces-203006-greg=kroah.com@vger.kernel.org Thu Dec 18 17:05:51 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Dec 2025 10:37:36 -0500
+Subject: hsr: hold rcu and dev lock for hsr_get_port_ndev
+To: stable@vger.kernel.org
+Cc: Hangbin Liu <liuhangbin@gmail.com>, Paolo Abeni <pabeni@redhat.com>, Simon Horman <horms@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251218153736.3435271-1-sashal@kernel.org>
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 847748fc66d08a89135a74e29362a66ba4e3ab15 ]
+
+hsr_get_port_ndev calls hsr_for_each_port, which need to hold rcu lock.
+On the other hand, before return the port device, we need to hold the
+device reference to avoid UaF in the caller function.
+
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Fixes: 9c10dd8eed74 ("net: hsr: Create and export hsr_get_port_ndev()")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250905091533.377443-4-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ Drop multicast filtering changes ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/hsr/hsr_device.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -682,9 +682,14 @@ struct net_device *hsr_get_port_ndev(str
+ struct hsr_priv *hsr = netdev_priv(ndev);
+ struct hsr_port *port;
+
++ rcu_read_lock();
+ hsr_for_each_port(hsr, port)
+- if (port->type == pt)
++ if (port->type == pt) {
++ dev_hold(port->dev);
++ rcu_read_unlock();
+ return port->dev;
++ }
++ rcu_read_unlock();
+ return NULL;
+ }
+ EXPORT_SYMBOL(hsr_get_port_ndev);
--- /dev/null
+From stable+bounces-203063-greg=kroah.com@vger.kernel.org Fri Dec 19 11:36:09 2025
+From: Wei-Lin Chang <weilin.chang@arm.com>
+Date: Fri, 19 Dec 2025 10:21:21 +0000
+Subject: KVM: arm64: Initialize HCR_EL2.E2H early
+To: stable@vger.kernel.org
+Cc: Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Wei-Lin Chang <weilin.chang@arm.com>
+Message-ID: <20251219102123.730823-2-weilin.chang@arm.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 7a68b55ff39b0a1638acb1694c185d49f6077a0d ]
+
+On CPUs without FEAT_E2H0, HCR_EL2.E2H is RES1, but may reset to an
+UNKNOWN value out of reset and consequently may not read as 1 unless it
+has been explicitly initialized.
+
+We handled this for the head.S boot code in commits:
+
+ 3944382fa6f22b54 ("arm64: Treat HCR_EL2.E2H as RES1 when ID_AA64MMFR4_EL1.E2H0 is negative")
+ b3320142f3db9b3f ("arm64: Fix early handling of FEAT_E2H0 not being implemented")
+
+Unfortunately, we forgot to apply a similar fix to the KVM PSCI entry
+points used when relaying CPU_ON, CPU_SUSPEND, and SYSTEM SUSPEND. When
+KVM is entered via these entry points, the value of HCR_EL2.E2H may be
+consumed before it has been initialized (e.g. by the 'init_el2_state'
+macro).
+
+Initialize HCR_EL2.E2H early in these paths such that it can be consumed
+reliably. The existing code in head.S is factored out into a new
+'init_el2_hcr' macro, and this is used in the __kvm_hyp_init_cpu()
+function common to all the relevant PSCI entry points.
+
+For clarity, I've tweaked the assembly used to check whether
+ID_AA64MMFR4_EL1.E2H0 is negative. The bitfield is extracted as a signed
+value, and this is checked with a signed-greater-or-equal (GE) comparison.
+
+As the hyp code will reconfigure HCR_EL2 later in ___kvm_hyp_init(), all
+bits other than E2H are initialized to zero in __kvm_hyp_init_cpu().
+
+Fixes: 3944382fa6f22b54 ("arm64: Treat HCR_EL2.E2H as RES1 when ID_AA64MMFR4_EL1.E2H0 is negative")
+Fixes: b3320142f3db9b3f ("arm64: Fix early handling of FEAT_E2H0 not being implemented")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Ahmed Genidi <ahmed.genidi@arm.com>
+Cc: Ben Horgan <ben.horgan@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Leo Yan <leo.yan@arm.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20250227180526.1204723-2-mark.rutland@arm.com
+[maz: fixed LT->GE thinko]
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Wei-Lin Chang <weilin.chang@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/el2_setup.h | 26 ++++++++++++++++++++++++++
+ arch/arm64/kernel/head.S | 19 +------------------
+ arch/arm64/kvm/hyp/nvhe/hyp-init.S | 8 +++++++-
+ 3 files changed, 34 insertions(+), 19 deletions(-)
+
+--- a/arch/arm64/include/asm/el2_setup.h
++++ b/arch/arm64/include/asm/el2_setup.h
+@@ -16,6 +16,32 @@
+ #include <asm/sysreg.h>
+ #include <linux/irqchip/arm-gic-v3.h>
+
++.macro init_el2_hcr val
++ mov_q x0, \val
++
++ /*
++ * Compliant CPUs advertise their VHE-onlyness with
++ * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
++ * can reset into an UNKNOWN state and might not read as 1 until it has
++ * been initialized explicitly.
++ *
++ * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
++ * don't advertise it (they predate this relaxation).
++ *
++ * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
++ * indicating whether the CPU is running in E2H mode.
++ */
++ mrs_s x1, SYS_ID_AA64MMFR4_EL1
++ sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
++ cmp x1, #0
++ b.ge .LnVHE_\@
++
++ orr x0, x0, #HCR_E2H
++.LnVHE_\@:
++ msr hcr_el2, x0
++ isb
++.endm
++
+ .macro __init_el2_sctlr
+ mov_q x0, INIT_SCTLR_EL2_MMU_OFF
+ msr sctlr_el2, x0
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -295,25 +295,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
+ msr sctlr_el2, x0
+ isb
+ 0:
+- mov_q x0, HCR_HOST_NVHE_FLAGS
+-
+- /*
+- * Compliant CPUs advertise their VHE-onlyness with
+- * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
+- * RES1 in that case. Publish the E2H bit early so that
+- * it can be picked up by the init_el2_state macro.
+- *
+- * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
+- * don't advertise it (they predate this relaxation).
+- */
+- mrs_s x1, SYS_ID_AA64MMFR4_EL1
+- tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
+-
+- orr x0, x0, #HCR_E2H
+-1:
+- msr hcr_el2, x0
+- isb
+
++ init_el2_hcr HCR_HOST_NVHE_FLAGS
+ init_el2_state
+
+ /* Hypervisor stub */
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+@@ -73,8 +73,12 @@ __do_hyp_init:
+ eret
+ SYM_CODE_END(__kvm_hyp_init)
+
++/*
++ * Initialize EL2 CPU state to sane values.
++ *
++ * HCR_EL2.E2H must have been initialized already.
++ */
+ SYM_CODE_START_LOCAL(__kvm_init_el2_state)
+- /* Initialize EL2 CPU state to sane values. */
+ init_el2_state // Clobbers x0..x2
+ finalise_el2_state
+ ret
+@@ -206,6 +210,8 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
+
+ 2: msr SPsel, #1 // We want to use SP_EL{1,2}
+
++ init_el2_hcr 0
++
+ bl __kvm_init_el2_state
+
+ __init_el2_nvhe_prepare_eret
--- /dev/null
+From stable+bounces-203064-greg=kroah.com@vger.kernel.org Fri Dec 19 11:31:49 2025
+From: Wei-Lin Chang <weilin.chang@arm.com>
+Date: Fri, 19 Dec 2025 10:21:22 +0000
+Subject: KVM: arm64: Initialize SCTLR_EL1 in __kvm_hyp_init_cpu()
+To: stable@vger.kernel.org
+Cc: Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Wei-Lin Chang <weilin.chang@arm.com>
+Message-ID: <20251219102123.730823-3-weilin.chang@arm.com>
+
+From: Ahmed Genidi <ahmed.genidi@arm.com>
+
+[ Upstream commit 3855a7b91d42ebf3513b7ccffc44807274978b3d ]
+
+When KVM is in protected mode, host calls to PSCI are proxied via EL2,
+and cold entries from CPU_ON, CPU_SUSPEND, and SYSTEM_SUSPEND bounce
+through __kvm_hyp_init_cpu() at EL2 before entering the host kernel's
+entry point at EL1. While __kvm_hyp_init_cpu() initializes SPSR_EL2 for
+the exception return to EL1, it does not initialize SCTLR_EL1.
+
+Due to this, it's possible to enter EL1 with SCTLR_EL1 in an UNKNOWN
+state. In practice this has been seen to result in kernel crashes after
+CPU_ON as a result of SCTLR_EL1.M being 1 in violation of the initial
+core configuration specified by PSCI.
+
+Fix this by initializing SCTLR_EL1 for cold entry to the host kernel.
+As it's necessary to write to SCTLR_EL12 in VHE mode, this
+initialization is moved into __kvm_host_psci_cpu_entry() where we can
+use write_sysreg_el1().
+
+The remnants of the '__init_el2_nvhe_prepare_eret' macro are folded into
+its only caller, as this is clearer than having the macro.
+
+Fixes: cdf367192766ad11 ("KVM: arm64: Intercept host's CPU_ON SMCs")
+Reported-by: Leo Yan <leo.yan@arm.com>
+Signed-off-by: Ahmed Genidi <ahmed.genidi@arm.com>
+[ Mark: clarify commit message, handle E2H, move to C, remove macro ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Ahmed Genidi <ahmed.genidi@arm.com>
+Cc: Ben Horgan <ben.horgan@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Leo Yan <leo.yan@arm.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Cc: Will Deacon <will@kernel.org>
+Reviewed-by: Leo Yan <leo.yan@arm.com>
+Link: https://lore.kernel.org/r/20250227180526.1204723-3-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Wei-Lin Chang <weilin.chang@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/el2_setup.h | 5 -----
+ arch/arm64/kernel/head.S | 3 ++-
+ arch/arm64/kvm/hyp/nvhe/hyp-init.S | 2 --
+ arch/arm64/kvm/hyp/nvhe/psci-relay.c | 3 +++
+ 4 files changed, 5 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/include/asm/el2_setup.h
++++ b/arch/arm64/include/asm/el2_setup.h
+@@ -265,11 +265,6 @@
+ .Lskip_fgt2_\@:
+ .endm
+
+-.macro __init_el2_nvhe_prepare_eret
+- mov x0, #INIT_PSTATE_EL1
+- msr spsr_el2, x0
+-.endm
+-
+ /**
+ * Initialize EL2 registers to sane values. This should be called early on all
+ * cores that were booted in EL2. Note that everything gets initialised as
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -319,7 +319,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
+ msr sctlr_el1, x1
+ mov x2, xzr
+ 3:
+- __init_el2_nvhe_prepare_eret
++ mov x0, #INIT_PSTATE_EL1
++ msr spsr_el2, x0
+
+ mov w0, #BOOT_CPU_MODE_EL2
+ orr x0, x0, x2
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+@@ -214,8 +214,6 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
+
+ bl __kvm_init_el2_state
+
+- __init_el2_nvhe_prepare_eret
+-
+ /* Enable MMU, set vectors and stack. */
+ mov x0, x28
+ bl ___kvm_hyp_init // Clobbers x0..x2
+--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
++++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+@@ -218,6 +218,9 @@ asmlinkage void __noreturn __kvm_host_ps
+ if (is_cpu_on)
+ release_boot_args(boot_args);
+
++ write_sysreg_el1(INIT_SCTLR_EL1_MMU_OFF, SYS_SCTLR);
++ write_sysreg(INIT_PSTATE_EL1, SPSR_EL2);
++
+ __host_enter(host_ctxt);
+ }
+
--- /dev/null
+From stable+bounces-202885-greg=kroah.com@vger.kernel.org Wed Dec 17 19:10:05 2025
+From: Claudiu <claudiu.beznea@tuxon.dev>
+Date: Wed, 17 Dec 2025 20:00:00 +0200
+Subject: pinctrl: renesas: rzg2l: Fix ISEL restore on resume
+To: stable@vger.kernel.org
+Cc: claudiu.beznea@tuxon.dev
+Message-ID: <20251217180000.721366-1-claudiu.beznea.uj@bp.renesas.com>
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+commit 44bf66122c12ef6d3382a9b84b9be1802e5f0e95 upstream.
+
+Commit 1d2da79708cb ("pinctrl: renesas: rzg2l: Avoid configuring ISEL in
+gpio_irq_{en,dis}able*()") dropped the configuration of ISEL from
+struct irq_chip::{irq_enable, irq_disable} APIs and moved it to
+struct gpio_chip::irq::{child_to_parent_hwirq,
+child_irq_domain_ops::free} APIs to fix spurious IRQs.
+
+After commit 1d2da79708cb ("pinctrl: renesas: rzg2l: Avoid configuring ISEL
+in gpio_irq_{en,dis}able*()"), ISEL was no longer configured properly on
+resume. This is because the pinctrl resume code used
+struct irq_chip::irq_enable (called from rzg2l_gpio_irq_restore()) to
+reconfigure the wakeup interrupts. Some drivers (e.g. Ethernet) may also
+reconfigure non-wakeup interrupts on resume through their own code,
+eventually calling struct irq_chip::irq_enable.
+
+Fix this by adding ISEL configuration back into the
+struct irq_chip::irq_enable API and on resume path for wakeup interrupts.
+
+As struct irq_chip::irq_enable needs now to lock to update the ISEL,
+convert the struct rzg2l_pinctrl::lock to a raw spinlock and replace the
+locking API calls with the raw variants. Otherwise the lockdep reports
+invalid wait context when probing the adv7511 module on RZ/G2L:
+
+ [ BUG: Invalid wait context ]
+ 6.17.0-rc5-next-20250911-00001-gfcfac22533c9 #18 Not tainted
+ -----------------------------
+ (udev-worker)/165 is trying to lock:
+ ffff00000e3664a8 (&pctrl->lock){....}-{3:3}, at: rzg2l_gpio_irq_enable+0x38/0x78
+ other info that might help us debug this:
+ context-{5:5}
+ 3 locks held by (udev-worker)/165:
+ #0: ffff00000e890108 (&dev->mutex){....}-{4:4}, at: __driver_attach+0x90/0x1ac
+ #1: ffff000011c07240 (request_class){+.+.}-{4:4}, at: __setup_irq+0xb4/0x6dc
+ #2: ffff000011c070c8 (lock_class){....}-{2:2}, at: __setup_irq+0xdc/0x6dc
+ stack backtrace:
+ CPU: 1 UID: 0 PID: 165 Comm: (udev-worker) Not tainted 6.17.0-rc5-next-20250911-00001-gfcfac22533c9 #18 PREEMPT
+ Hardware name: Renesas SMARC EVK based on r9a07g044l2 (DT)
+ Call trace:
+ show_stack+0x18/0x24 (C)
+ dump_stack_lvl+0x90/0xd0
+ dump_stack+0x18/0x24
+ __lock_acquire+0xa14/0x20b4
+ lock_acquire+0x1c8/0x354
+ _raw_spin_lock_irqsave+0x60/0x88
+ rzg2l_gpio_irq_enable+0x38/0x78
+ irq_enable+0x40/0x8c
+ __irq_startup+0x78/0xa4
+ irq_startup+0x108/0x16c
+ __setup_irq+0x3c0/0x6dc
+ request_threaded_irq+0xec/0x1ac
+ devm_request_threaded_irq+0x80/0x134
+ adv7511_probe+0x928/0x9a4 [adv7511]
+ i2c_device_probe+0x22c/0x3dc
+ really_probe+0xbc/0x2a0
+ __driver_probe_device+0x78/0x12c
+ driver_probe_device+0x40/0x164
+ __driver_attach+0x9c/0x1ac
+ bus_for_each_dev+0x74/0xd0
+ driver_attach+0x24/0x30
+ bus_add_driver+0xe4/0x208
+ driver_register+0x60/0x128
+ i2c_register_driver+0x48/0xd0
+ adv7511_init+0x5c/0x1000 [adv7511]
+ do_one_initcall+0x64/0x30c
+ do_init_module+0x58/0x23c
+ load_module+0x1bcc/0x1d40
+ init_module_from_file+0x88/0xc4
+ idempotent_init_module+0x188/0x27c
+ __arm64_sys_finit_module+0x68/0xac
+ invoke_syscall+0x48/0x110
+ el0_svc_common.constprop.0+0xc0/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x4c/0x160
+ el0t_64_sync_handler+0xa0/0xe4
+ el0t_64_sync+0x198/0x19c
+
+Having ISEL configuration back into the struct irq_chip::irq_enable API
+should be safe with respect to spurious IRQs, as in the probe case IRQs
+are enabled anyway in struct gpio_chip::irq::child_to_parent_hwirq. No
+spurious IRQs were detected on suspend/resume, boot, ethernet link
+insert/remove tests (executed on RZ/G3S). Boot, ethernet link
+insert/remove tests were also executed successfully on RZ/G2L.
+
+Fixes: 1d2da79708cb ("pinctrl: renesas: rzg2l: Avoid configuring ISEL in gpio_irq_{en,dis}able*(")
+Cc: stable@vger.kernel.org
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Link: https://patch.msgid.link/20250912095308.3603704-1-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+[claudiu.beznea:
+ - in rzg2l_write_oen() kept v6.12 code and use
+ raw_spin_lock_irqsave()/raw_spin_unlock_irqrestore()
+ - in rzg2l_gpio_set() kept v6.12 code and use raw_spin_unlock_irqrestore()
+ - in rzg2l_pinctrl_resume_noirq() kept v6.12 code
+ - manually adjust rzg3s_oen_write(), rzv2h_oen_write() to use
+ raw_spin_lock_irqsave()/raw_spin_unlock_irqrestore()]
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pinctrl/renesas/pinctrl-rzg2l.c | 75 +++++++++++++++++++-------------
+ 1 file changed, 46 insertions(+), 29 deletions(-)
+
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -349,7 +349,7 @@ struct rzg2l_pinctrl {
+ spinlock_t bitmap_lock; /* protect tint_slot bitmap */
+ unsigned int hwirq[RZG2L_TINT_MAX_INTERRUPT];
+
+- spinlock_t lock; /* lock read/write registers */
++ raw_spinlock_t lock; /* lock read/write registers */
+ struct mutex mutex; /* serialize adding groups and functions */
+
+ struct rzg2l_pinctrl_pin_settings *settings;
+@@ -454,7 +454,7 @@ static void rzg2l_pinctrl_set_pfc_mode(s
+ unsigned long flags;
+ u32 reg;
+
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ /* Set pin to 'Non-use (Hi-Z input protection)' */
+ reg = readw(pctrl->base + PM(off));
+@@ -478,7 +478,7 @@ static void rzg2l_pinctrl_set_pfc_mode(s
+
+ pctrl->data->pwpr_pfc_lock_unlock(pctrl, true);
+
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ };
+
+ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+@@ -805,10 +805,10 @@ static void rzg2l_rmw_pin_config(struct
+ addr += 4;
+ }
+
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ reg = readl(addr) & ~(mask << (bit * 8));
+ writel(reg | (val << (bit * 8)), addr);
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ static int rzg2l_caps_to_pwr_reg(const struct rzg2l_register_offsets *regs, u32 caps)
+@@ -1036,14 +1036,14 @@ static int rzg2l_write_oen(struct rzg2l_
+ if (bit < 0)
+ return bit;
+
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ val = readb(pctrl->base + ETH_MODE);
+ if (oen)
+ val &= ~BIT(bit);
+ else
+ val |= BIT(bit);
+ writeb(val, pctrl->base + ETH_MODE);
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+ }
+@@ -1089,14 +1089,14 @@ static int rzg3s_oen_write(struct rzg2l_
+ if (bit < 0)
+ return bit;
+
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ val = readb(pctrl->base + ETH_MODE);
+ if (oen)
+ val &= ~BIT(bit);
+ else
+ val |= BIT(bit);
+ writeb(val, pctrl->base + ETH_MODE);
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+ }
+@@ -1201,7 +1201,7 @@ static int rzv2h_oen_write(struct rzg2l_
+ u8 pwpr;
+
+ bit = rzv2h_pin_to_oen_bit(pctrl, _pin);
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ val = readb(pctrl->base + PFC_OEN);
+ if (oen)
+ val &= ~BIT(bit);
+@@ -1212,7 +1212,7 @@ static int rzv2h_oen_write(struct rzg2l_
+ writeb(pwpr | PWPR_REGWE_B, pctrl->base + regs->pwpr);
+ writeb(val, pctrl->base + PFC_OEN);
+ writeb(pwpr & ~PWPR_REGWE_B, pctrl->base + regs->pwpr);
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+ }
+@@ -1613,14 +1613,14 @@ static int rzg2l_gpio_request(struct gpi
+ if (ret)
+ return ret;
+
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ /* Select GPIO mode in PMC Register */
+ reg8 = readb(pctrl->base + PMC(off));
+ reg8 &= ~BIT(bit);
+ pctrl->data->pmc_writeb(pctrl, reg8, PMC(off));
+
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+ }
+@@ -1635,7 +1635,7 @@ static void rzg2l_gpio_set_direction(str
+ unsigned long flags;
+ u16 reg16;
+
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ reg16 = readw(pctrl->base + PM(off));
+ reg16 &= ~(PM_MASK << (bit * 2));
+@@ -1643,7 +1643,7 @@ static void rzg2l_gpio_set_direction(str
+ reg16 |= (output ? PM_OUTPUT : PM_INPUT) << (bit * 2);
+ writew(reg16, pctrl->base + PM(off));
+
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ static int rzg2l_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+@@ -1687,7 +1687,7 @@ static void rzg2l_gpio_set(struct gpio_c
+ unsigned long flags;
+ u8 reg8;
+
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ reg8 = readb(pctrl->base + P(off));
+
+@@ -1696,7 +1696,7 @@ static void rzg2l_gpio_set(struct gpio_c
+ else
+ writeb(reg8 & ~BIT(bit), pctrl->base + P(off));
+
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ static int rzg2l_gpio_direction_output(struct gpio_chip *chip,
+@@ -2236,14 +2236,13 @@ static int rzg2l_gpio_get_gpioint(unsign
+ return gpioint;
+ }
+
+-static void rzg2l_gpio_irq_endisable(struct rzg2l_pinctrl *pctrl,
+- unsigned int hwirq, bool enable)
++static void __rzg2l_gpio_irq_endisable(struct rzg2l_pinctrl *pctrl,
++ unsigned int hwirq, bool enable)
+ {
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[hwirq];
+ u64 *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(hwirq);
+- unsigned long flags;
+ void __iomem *addr;
+
+ addr = pctrl->base + ISEL(off);
+@@ -2252,12 +2251,20 @@ static void rzg2l_gpio_irq_endisable(str
+ addr += 4;
+ }
+
+- spin_lock_irqsave(&pctrl->lock, flags);
+ if (enable)
+ writel(readl(addr) | BIT(bit * 8), addr);
+ else
+ writel(readl(addr) & ~BIT(bit * 8), addr);
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++}
++
++static void rzg2l_gpio_irq_endisable(struct rzg2l_pinctrl *pctrl,
++ unsigned int hwirq, bool enable)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
++ __rzg2l_gpio_irq_endisable(pctrl, hwirq, enable);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+@@ -2269,15 +2276,25 @@ static void rzg2l_gpio_irq_disable(struc
+ gpiochip_disable_irq(gc, hwirq);
+ }
+
+-static void rzg2l_gpio_irq_enable(struct irq_data *d)
++static void __rzg2l_gpio_irq_enable(struct irq_data *d, bool lock)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++ struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
++ if (lock)
++ rzg2l_gpio_irq_endisable(pctrl, hwirq, true);
++ else
++ __rzg2l_gpio_irq_endisable(pctrl, hwirq, true);
+ irq_chip_enable_parent(d);
+ }
+
++static void rzg2l_gpio_irq_enable(struct irq_data *d)
++{
++ __rzg2l_gpio_irq_enable(d, true);
++}
++
+ static int rzg2l_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ return irq_chip_set_type_parent(d, type);
+@@ -2438,11 +2455,11 @@ static void rzg2l_gpio_irq_restore(struc
+ * This has to be atomically executed to protect against a concurrent
+ * interrupt.
+ */
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
+ if (!ret && !irqd_irq_disabled(data))
+- rzg2l_gpio_irq_enable(data);
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ __rzg2l_gpio_irq_enable(data, false);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ if (ret)
+ dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
+@@ -2765,7 +2782,7 @@ static int rzg2l_pinctrl_probe(struct pl
+ "failed to enable GPIO clk\n");
+ }
+
+- spin_lock_init(&pctrl->lock);
++ raw_spin_lock_init(&pctrl->lock);
+ spin_lock_init(&pctrl->bitmap_lock);
+ mutex_init(&pctrl->mutex);
+ atomic_set(&pctrl->wakeup_path, 0);
+@@ -2908,7 +2925,7 @@ static void rzg2l_pinctrl_pm_setup_pfc(s
+ u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT;
+ unsigned long flags;
+
+- spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ pctrl->data->pwpr_pfc_lock_unlock(pctrl, false);
+
+ /* Restore port registers. */
+@@ -2953,7 +2970,7 @@ static void rzg2l_pinctrl_pm_setup_pfc(s
+ }
+
+ pctrl->data->pwpr_pfc_lock_unlock(pctrl, true);
+- spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ static int rzg2l_pinctrl_suspend_noirq(struct device *dev)
--- /dev/null
+From 72e24456a54fe04710d89626cc5a88703e2f6202 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Tue, 9 Dec 2025 11:14:47 -0600
+Subject: Revert "drm/amd/display: Fix pbn to kbps Conversion"
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 72e24456a54fe04710d89626cc5a88703e2f6202 upstream.
+
+Deeply daisy chained DP/MST displays are no longer able to light
+up. This reverts commit e0dec00f3d05 ("drm/amd/display: Fix pbn
+to kbps Conversion")
+
+Cc: Jerry Zuo <jerry.zuo@amd.com>
+Reported-by: nat@nullable.se
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4756
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit e1c94109c76e8a77a21531bd53f6c63356c81158)
+Cc: stable@vger.kernel.org # 6.17+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 59 +++++++-----
+ 1 file changed, 36 insertions(+), 23 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -846,28 +846,26 @@ struct dsc_mst_fairness_params {
+ };
+
+ #if defined(CONFIG_DRM_AMD_DC_FP)
+-static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
++static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+ {
+- uint64_t effective_kbps = (uint64_t)kbps;
++ u8 link_coding_cap;
++ uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+
+- if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
+- effective_kbps *= 1006;
+- effective_kbps = div_u64(effective_kbps, 1000);
+- }
++ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
++ if (link_coding_cap == DP_128b_132b_ENCODING)
++ fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+
+- return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
++ return fec_overhead_multiplier_x1000;
+ }
+
+-static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
++static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+ {
+- uint64_t pbn_effective = (uint64_t)pbn;
+-
+- if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
+- pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
+- else
+- pbn_effective *= 1000;
++ u64 peak_kbps = kbps;
+
+- return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
++ peak_kbps *= 1006;
++ peak_kbps *= fec_overhead_multiplier_x1000;
++ peak_kbps = div_u64(peak_kbps, 1000 * 1000);
++ return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+ }
+
+ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
+@@ -938,7 +936,7 @@ static int bpp_x16_from_pbn(struct dsc_m
+ dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
+
+- kbps = pbn_to_kbps(pbn, false);
++ kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ dc_dsc_compute_config(
+ param.sink->ctx->dc->res_pool->dscs[0],
+ ¶m.sink->dsc_caps.dsc_dec_caps,
+@@ -967,11 +965,12 @@ static int increase_dsc_bpp(struct drm_a
+ int link_timeslots_used;
+ int fair_pbn_alloc;
+ int ret = 0;
++ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+
+ for (i = 0; i < count; i++) {
+ if (vars[i + k].dsc_enabled) {
+ initial_slack[i] =
+- kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
++ kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ bpp_increased[i] = false;
+ remaining_to_increase += 1;
+ } else {
+@@ -1067,6 +1066,7 @@ static int try_disable_dsc(struct drm_at
+ int next_index;
+ int remaining_to_try = 0;
+ int ret;
++ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ int var_pbn;
+
+ for (i = 0; i < count; i++) {
+@@ -1099,7 +1099,7 @@ static int try_disable_dsc(struct drm_at
+
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
+ var_pbn = vars[next_index].pbn;
+- vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
++ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+@@ -1159,6 +1159,7 @@ static int compute_mst_dsc_configs_for_l
+ int count = 0;
+ int i, k, ret;
+ bool debugfs_overwrite = false;
++ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ struct drm_connector_state *new_conn_state;
+
+ memset(params, 0, sizeof(params));
+@@ -1239,7 +1240,7 @@ static int compute_mst_dsc_configs_for_l
+ DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
+ for (i = 0; i < count; i++) {
+ vars[i + k].aconnector = params[i].aconnector;
+- vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
++ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].dsc_enabled = false;
+ vars[i + k].bpp_x16 = 0;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+@@ -1261,7 +1262,7 @@ static int compute_mst_dsc_configs_for_l
+ DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
+ for (i = 0; i < count; i++) {
+ if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
+- vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
++ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].dsc_enabled = true;
+ vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+@@ -1269,7 +1270,7 @@ static int compute_mst_dsc_configs_for_l
+ if (ret < 0)
+ return ret;
+ } else {
+- vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
++ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].dsc_enabled = false;
+ vars[i + k].bpp_x16 = 0;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+@@ -1721,6 +1722,18 @@ clean_exit:
+ return ret;
+ }
+
++static uint32_t kbps_from_pbn(unsigned int pbn)
++{
++ uint64_t kbps = (uint64_t)pbn;
++
++ kbps *= (1000000 / PEAK_FACTOR_X1000);
++ kbps *= 8;
++ kbps *= 54;
++ kbps /= 64;
++
++ return (uint32_t)kbps;
++}
++
+ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
+ struct dc_dsc_bw_range *bw_range)
+ {
+@@ -1812,7 +1825,7 @@ enum dc_status dm_dp_mst_is_port_support
+ dc_link_get_highest_encoding_format(stream->link));
+ cur_link_settings = stream->link->verified_link_cap;
+ root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
+- virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
++ virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+
+ /* pick the end to end bw bottleneck */
+ end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
+@@ -1863,7 +1876,7 @@ enum dc_status dm_dp_mst_is_port_support
+ immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
+
+ if (immediate_upstream_port) {
+- virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
++ virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
+ } else {
+ /* For topology LCT 1 case - only one mstb*/
--- /dev/null
+From 690e47d1403e90b7f2366f03b52ed3304194c793 Mon Sep 17 00:00:00 2001
+From: Harshit Agarwal <harshit@nutanix.com>
+Date: Tue, 25 Feb 2025 18:05:53 +0000
+Subject: sched/rt: Fix race in push_rt_task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Harshit Agarwal <harshit@nutanix.com>
+
+commit 690e47d1403e90b7f2366f03b52ed3304194c793 upstream.
+
+Overview
+========
+When a CPU chooses to call push_rt_task and picks a task to push to
+another CPU's runqueue then it will call find_lock_lowest_rq method
+which would take a double lock on both CPUs' runqueues. If one of the
+locks aren't readily available, it may lead to dropping the current
+runqueue lock and reacquiring both the locks at once. During this window
+it is possible that the task is already migrated and is running on some
+other CPU. These cases are already handled. However, if the task is
+migrated and has already been executed and another CPU is now trying to
+wake it up (ttwu) such that it is queued again on the runqeue
+(on_rq is 1) and also if the task was run by the same CPU, then the
+current checks will pass even though the task was migrated out and is no
+longer in the pushable tasks list.
+
+Crashes
+=======
+This bug resulted in quite a few flavors of crashes triggering kernel
+panics with various crash signatures such as assert failures, page
+faults, null pointer dereferences, and queue corruption errors all
+coming from scheduler itself.
+
+Some of the crashes:
+-> kernel BUG at kernel/sched/rt.c:1616! BUG_ON(idx >= MAX_RT_PRIO)
+ Call Trace:
+ ? __die_body+0x1a/0x60
+ ? die+0x2a/0x50
+ ? do_trap+0x85/0x100
+ ? pick_next_task_rt+0x6e/0x1d0
+ ? do_error_trap+0x64/0xa0
+ ? pick_next_task_rt+0x6e/0x1d0
+ ? exc_invalid_op+0x4c/0x60
+ ? pick_next_task_rt+0x6e/0x1d0
+ ? asm_exc_invalid_op+0x12/0x20
+ ? pick_next_task_rt+0x6e/0x1d0
+ __schedule+0x5cb/0x790
+ ? update_ts_time_stats+0x55/0x70
+ schedule_idle+0x1e/0x40
+ do_idle+0x15e/0x200
+ cpu_startup_entry+0x19/0x20
+ start_secondary+0x117/0x160
+ secondary_startup_64_no_verify+0xb0/0xbb
+
+-> BUG: kernel NULL pointer dereference, address: 00000000000000c0
+ Call Trace:
+ ? __die_body+0x1a/0x60
+ ? no_context+0x183/0x350
+ ? __warn+0x8a/0xe0
+ ? exc_page_fault+0x3d6/0x520
+ ? asm_exc_page_fault+0x1e/0x30
+ ? pick_next_task_rt+0xb5/0x1d0
+ ? pick_next_task_rt+0x8c/0x1d0
+ __schedule+0x583/0x7e0
+ ? update_ts_time_stats+0x55/0x70
+ schedule_idle+0x1e/0x40
+ do_idle+0x15e/0x200
+ cpu_startup_entry+0x19/0x20
+ start_secondary+0x117/0x160
+ secondary_startup_64_no_verify+0xb0/0xbb
+
+-> BUG: unable to handle page fault for address: ffff9464daea5900
+ kernel BUG at kernel/sched/rt.c:1861! BUG_ON(rq->cpu != task_cpu(p))
+
+-> kernel BUG at kernel/sched/rt.c:1055! BUG_ON(!rq->nr_running)
+ Call Trace:
+ ? __die_body+0x1a/0x60
+ ? die+0x2a/0x50
+ ? do_trap+0x85/0x100
+ ? dequeue_top_rt_rq+0xa2/0xb0
+ ? do_error_trap+0x64/0xa0
+ ? dequeue_top_rt_rq+0xa2/0xb0
+ ? exc_invalid_op+0x4c/0x60
+ ? dequeue_top_rt_rq+0xa2/0xb0
+ ? asm_exc_invalid_op+0x12/0x20
+ ? dequeue_top_rt_rq+0xa2/0xb0
+ dequeue_rt_entity+0x1f/0x70
+ dequeue_task_rt+0x2d/0x70
+ __schedule+0x1a8/0x7e0
+ ? blk_finish_plug+0x25/0x40
+ schedule+0x3c/0xb0
+ futex_wait_queue_me+0xb6/0x120
+ futex_wait+0xd9/0x240
+ do_futex+0x344/0xa90
+ ? get_mm_exe_file+0x30/0x60
+ ? audit_exe_compare+0x58/0x70
+ ? audit_filter_rules.constprop.26+0x65e/0x1220
+ __x64_sys_futex+0x148/0x1f0
+ do_syscall_64+0x30/0x80
+ entry_SYSCALL_64_after_hwframe+0x62/0xc7
+
+-> BUG: unable to handle page fault for address: ffff8cf3608bc2c0
+ Call Trace:
+ ? __die_body+0x1a/0x60
+ ? no_context+0x183/0x350
+ ? spurious_kernel_fault+0x171/0x1c0
+ ? exc_page_fault+0x3b6/0x520
+ ? plist_check_list+0x15/0x40
+ ? plist_check_list+0x2e/0x40
+ ? asm_exc_page_fault+0x1e/0x30
+ ? _cond_resched+0x15/0x30
+ ? futex_wait_queue_me+0xc8/0x120
+ ? futex_wait+0xd9/0x240
+ ? try_to_wake_up+0x1b8/0x490
+ ? futex_wake+0x78/0x160
+ ? do_futex+0xcd/0xa90
+ ? plist_check_list+0x15/0x40
+ ? plist_check_list+0x2e/0x40
+ ? plist_del+0x6a/0xd0
+ ? plist_check_list+0x15/0x40
+ ? plist_check_list+0x2e/0x40
+ ? dequeue_pushable_task+0x20/0x70
+ ? __schedule+0x382/0x7e0
+ ? asm_sysvec_reschedule_ipi+0xa/0x20
+ ? schedule+0x3c/0xb0
+ ? exit_to_user_mode_prepare+0x9e/0x150
+ ? irqentry_exit_to_user_mode+0x5/0x30
+ ? asm_sysvec_reschedule_ipi+0x12/0x20
+
+Above are some of the common examples of the crashes that were observed
+due to this issue.
+
+Details
+=======
+Let's look at the following scenario to understand this race.
+
+1) CPU A enters push_rt_task
+ a) CPU A has chosen next_task = task p.
+ b) CPU A calls find_lock_lowest_rq(Task p, CPU Z’s rq).
+ c) CPU A identifies CPU X as a destination CPU (X < Z).
+ d) CPU A enters double_lock_balance(CPU Z’s rq, CPU X’s rq).
+ e) Since X is lower than Z, CPU A unlocks CPU Z’s rq. Someone else has
+ locked CPU X’s rq, and thus, CPU A must wait.
+
+2) At CPU Z
+ a) Previous task has completed execution and thus, CPU Z enters
+ schedule, locks its own rq after CPU A releases it.
+ b) CPU Z dequeues previous task and begins executing task p.
+ c) CPU Z unlocks its rq.
+ d) Task p yields the CPU (ex. by doing IO or waiting to acquire a
+ lock) which triggers the schedule function on CPU Z.
+ e) CPU Z enters schedule again, locks its own rq, and dequeues task p.
+ f) As part of dequeue, it sets p.on_rq = 0 and unlocks its rq.
+
+3) At CPU B
+ a) CPU B enters try_to_wake_up with input task p.
+ b) Since CPU Z dequeued task p, p.on_rq = 0, and CPU B updates
+ B.state = WAKING.
+ c) CPU B via select_task_rq determines CPU Y as the target CPU.
+
+4) The race
+ a) CPU A acquires CPU X’s lock and relocks CPU Z.
+ b) CPU A reads task p.cpu = Z and incorrectly concludes task p is
+ still on CPU Z.
+ c) CPU A failed to notice task p had been dequeued from CPU Z while
+ CPU A was waiting for locks in double_lock_balance. If CPU A knew
+ that task p had been dequeued, it would return NULL forcing
+ push_rt_task to give up the task p's migration.
+ d) CPU B updates task p.cpu = Y and calls ttwu_queue.
+ e) CPU B locks Ys rq. CPU B enqueues task p onto Y and sets task
+ p.on_rq = 1.
+ f) CPU B unlocks CPU Y, triggering memory synchronization.
+ g) CPU A reads task p.on_rq = 1, cementing its assumption that task p
+ has not migrated.
+ h) CPU A decides to migrate p to CPU X.
+
+This leads to A dequeuing p from Y's queue and various crashes down the
+line.
+
+Solution
+========
+The solution here is fairly simple. After obtaining the lock (at 4a),
+the check is enhanced to make sure that the task is still at the head of
+the pushable tasks list. If not, then it is anyway not suitable for
+being pushed out.
+
+Testing
+=======
+The fix is tested on a cluster of 3 nodes, where the panics due to this
+are hit every couple of days. A fix similar to this was deployed on such
+cluster and was stable for more than 30 days.
+
+Co-developed-by: Jon Kohler <jon@nutanix.com>
+Signed-off-by: Jon Kohler <jon@nutanix.com>
+Co-developed-by: Gauri Patwardhan <gauri.patwardhan@nutanix.com>
+Signed-off-by: Gauri Patwardhan <gauri.patwardhan@nutanix.com>
+Co-developed-by: Rahul Chunduru <rahul.chunduru@nutanix.com>
+Signed-off-by: Rahul Chunduru <rahul.chunduru@nutanix.com>
+Signed-off-by: Harshit Agarwal <harshit@nutanix.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Reviewed-by: Phil Auld <pauld@redhat.com>
+Tested-by: Will Ton <william.ton@nutanix.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250225180553.167995-1-harshit@nutanix.com
+Signed-off-by: Rajani Kantha <681739313@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/rt.c | 52 +++++++++++++++++++++++++---------------------------
+ 1 file changed, 25 insertions(+), 27 deletions(-)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1895,6 +1895,26 @@ static int find_lowest_rq(struct task_st
+ return -1;
+ }
+
++static struct task_struct *pick_next_pushable_task(struct rq *rq)
++{
++ struct task_struct *p;
++
++ if (!has_pushable_tasks(rq))
++ return NULL;
++
++ p = plist_first_entry(&rq->rt.pushable_tasks,
++ struct task_struct, pushable_tasks);
++
++ BUG_ON(rq->cpu != task_cpu(p));
++ BUG_ON(task_current(rq, p));
++ BUG_ON(p->nr_cpus_allowed <= 1);
++
++ BUG_ON(!task_on_rq_queued(p));
++ BUG_ON(!rt_task(p));
++
++ return p;
++}
++
+ /* Will lock the rq it finds */
+ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
+ {
+@@ -1925,18 +1945,16 @@ static struct rq *find_lock_lowest_rq(st
+ /*
+ * We had to unlock the run queue. In
+ * the mean time, task could have
+- * migrated already or had its affinity changed.
+- * Also make sure that it wasn't scheduled on its rq.
++ * migrated already or had its affinity changed,
++ * therefore check if the task is still at the
++ * head of the pushable tasks list.
+ * It is possible the task was scheduled, set
+ * "migrate_disabled" and then got preempted, so we must
+ * check the task migration disable flag here too.
+ */
+- if (unlikely(task_rq(task) != rq ||
++ if (unlikely(is_migration_disabled(task) ||
+ !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
+- task_on_cpu(rq, task) ||
+- !rt_task(task) ||
+- is_migration_disabled(task) ||
+- !task_on_rq_queued(task))) {
++ task != pick_next_pushable_task(rq))) {
+
+ double_unlock_balance(rq, lowest_rq);
+ lowest_rq = NULL;
+@@ -1956,26 +1974,6 @@ static struct rq *find_lock_lowest_rq(st
+ return lowest_rq;
+ }
+
+-static struct task_struct *pick_next_pushable_task(struct rq *rq)
+-{
+- struct task_struct *p;
+-
+- if (!has_pushable_tasks(rq))
+- return NULL;
+-
+- p = plist_first_entry(&rq->rt.pushable_tasks,
+- struct task_struct, pushable_tasks);
+-
+- BUG_ON(rq->cpu != task_cpu(p));
+- BUG_ON(task_current(rq, p));
+- BUG_ON(p->nr_cpus_allowed <= 1);
+-
+- BUG_ON(!task_on_rq_queued(p));
+- BUG_ON(!rt_task(p));
+-
+- return p;
+-}
+-
+ /*
+ * If the current CPU has more than one RT task, see if the non
+ * running task can migrate over to a CPU that is running a task
gpio-regmap-fix-memleak-in-error-path-in-gpio_regmap_register.patch
io_uring-poll-correctly-handle-io_poll_add-return-value-on-update.patch
io_uring-fix-min_wait-wakeups-for-sqpoll.patch
+revert-drm-amd-display-fix-pbn-to-kbps-conversion.patch
+drm-amd-display-use-gfp_atomic-in-dc_create_plane_state.patch
+drm-amd-display-fix-scratch-registers-offsets-for-dcn35.patch
+drm-amd-display-fix-scratch-registers-offsets-for-dcn351.patch
+drm-displayid-pass-iter-to-drm_find_displayid_extension.patch
+alsa-hda-cs35l41-fix-null-pointer-dereference-in-cs35l41_hda_read_acpi.patch
+alsa-wavefront-use-guard-for-spin-locks.patch
+alsa-wavefront-clear-substream-pointers-on-close.patch
+pinctrl-renesas-rzg2l-fix-isel-restore-on-resume.patch
+hsr-hold-rcu-and-dev-lock-for-hsr_get_port_ndev.patch
+sched-rt-fix-race-in-push_rt_task.patch
+kvm-arm64-initialize-hcr_el2.e2h-early.patch
+kvm-arm64-initialize-sctlr_el1-in-__kvm_hyp_init_cpu.patch
+arm64-revamp-hcr_el2.e2h-res1-detection.patch