--- /dev/null
+From ca0f79f0286046f6a91c099dc941cf7afae198d6 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 28 Nov 2024 08:26:45 +0100
+Subject: ALSA: hda/realtek: Apply quirk for Medion E15433
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit ca0f79f0286046f6a91c099dc941cf7afae198d6 upstream.
+
+Medion E15433 laptop wich ALC269VC (SSID 2782:1705) needs the same
+workaround for the missing speaker as another model.
+
+Link: https://bugzilla.suse.com/show_bug.cgi?id=1233298
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20241128072646.15659-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10425,6 +10425,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13),
+ SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+ SND_PCI_QUIRK(0x2782, 0x1701, "Infinix Y4 Max", ALC269VC_FIXUP_INFINIX_Y4_MAX),
++ SND_PCI_QUIRK(0x2782, 0x1705, "MEDION E15433", ALC269VC_FIXUP_INFINIX_Y4_MAX),
+ SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
+ SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
--- /dev/null
+From 5ebe792a5139f1ce6e4aed22bef12e7e2660df96 Mon Sep 17 00:00:00 2001
+From: Dinesh Kumar <desikumar81@gmail.com>
+Date: Mon, 25 Nov 2024 14:58:42 +0530
+Subject: ALSA: hda/realtek: Fix Internal Speaker and Mic boost of Infinix Y4 Max
+
+From: Dinesh Kumar <desikumar81@gmail.com>
+
+commit 5ebe792a5139f1ce6e4aed22bef12e7e2660df96 upstream.
+
+Internal Speaker of Infinix Y4 Max remains muted due to incorrect
+Pin configuration, and the Internal Mic records high noise. This patch
+corrects the Pin configuration for the Internal Speaker and limits
+the Internal Mic boost.
+HW Probe for device: https://linux-hardware.org/?probe=6d4386c347
+Test: Internal Speaker works fine, Mic has low noise.
+
+Signed-off-by: Dinesh Kumar <desikumar81@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20241125092842.13208-1-desikumar81@gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7255,6 +7255,7 @@ enum {
+ ALC269_FIXUP_THINKPAD_ACPI,
+ ALC269_FIXUP_DMIC_THINKPAD_ACPI,
+ ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13,
++ ALC269VC_FIXUP_INFINIX_Y4_MAX,
+ ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO,
+ ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ ALC255_FIXUP_ASUS_MIC_NO_PRESENCE,
+@@ -7644,6 +7645,15 @@ static const struct hda_fixup alc269_fix
+ .chained = true,
+ .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+ },
++ [ALC269VC_FIXUP_INFINIX_Y4_MAX] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1b, 0x90170150 }, /* use as internal speaker */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
++ },
+ [ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -10414,6 +10424,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13),
+ SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
++ SND_PCI_QUIRK(0x2782, 0x1701, "Infinix Y4 Max", ALC269VC_FIXUP_INFINIX_Y4_MAX),
+ SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
+ SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
--- /dev/null
+From 155699ccab7c78cbba69798242b68bc8ac66d5d2 Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Thu, 21 Nov 2024 16:16:26 +0800
+Subject: ALSA: hda/realtek: Set PCBeep to default value for ALC274
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit 155699ccab7c78cbba69798242b68bc8ac66d5d2 upstream.
+
+BIOS Enable PC beep path cause pop noise via speaker during boot time.
+Set to default value from driver will solve the issue.
+
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/2721bb57e20a44c3826c473e933f9105@realtek.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -471,6 +471,8 @@ static void alc_fill_eapd_coef(struct hd
+ break;
+ case 0x10ec0234:
+ case 0x10ec0274:
++ alc_write_coef_idx(codec, 0x6e, 0x0c25);
++ fallthrough;
+ case 0x10ec0294:
+ case 0x10ec0700:
+ case 0x10ec0701:
--- /dev/null
+From 1fd50509fe14a9adc9329e0454b986157a4c155a Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Thu, 14 Nov 2024 15:08:07 +0800
+Subject: ALSA: hda/realtek: Update ALC225 depop procedure
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit 1fd50509fe14a9adc9329e0454b986157a4c155a upstream.
+
+Old procedure has a chance to meet Headphone no output.
+
+Fixes: da911b1f5e98 ("ALSA: hda/realtek - update ALC225 depop optimize")
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/5a27b016ba9d42b4a4e6dadce50a3ba4@realtek.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 95 +++++++++++++++++++-----------------------
+ 1 file changed, 43 insertions(+), 52 deletions(-)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3757,33 +3757,28 @@ static void alc225_init(struct hda_codec
+ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+
+- if (hp1_pin_sense || hp2_pin_sense)
++ if (hp1_pin_sense || hp2_pin_sense) {
+ msleep(2);
++ alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+
+- alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
++ if (hp1_pin_sense)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++ if (hp2_pin_sense)
++ snd_hda_codec_write(codec, 0x16, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++ msleep(75);
++
++ if (hp1_pin_sense)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
++ if (hp2_pin_sense)
++ snd_hda_codec_write(codec, 0x16, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+
+- if (hp1_pin_sense || spec->ultra_low_power)
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+- if (hp2_pin_sense)
+- snd_hda_codec_write(codec, 0x16, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-
+- if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power)
+- msleep(85);
+-
+- if (hp1_pin_sense || spec->ultra_low_power)
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+- if (hp2_pin_sense)
+- snd_hda_codec_write(codec, 0x16, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+-
+- if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power)
+- msleep(100);
+-
+- alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+- alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
++ msleep(75);
++ alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
++ }
+ }
+
+ static void alc225_shutup(struct hda_codec *codec)
+@@ -3795,36 +3790,35 @@ static void alc225_shutup(struct hda_cod
+ if (!hp_pin)
+ hp_pin = 0x21;
+
+- alc_disable_headset_jack_key(codec);
+- /* 3k pull low control for Headset jack. */
+- alc_update_coef_idx(codec, 0x4a, 0, 3 << 10);
+-
+ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+
+- if (hp1_pin_sense || hp2_pin_sense)
++ if (hp1_pin_sense || hp2_pin_sense) {
++ alc_disable_headset_jack_key(codec);
++ /* 3k pull low control for Headset jack. */
++ alc_update_coef_idx(codec, 0x4a, 0, 3 << 10);
+ msleep(2);
+
+- if (hp1_pin_sense || spec->ultra_low_power)
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+- if (hp2_pin_sense)
+- snd_hda_codec_write(codec, 0x16, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-
+- if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power)
+- msleep(85);
+-
+- if (hp1_pin_sense || spec->ultra_low_power)
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+- if (hp2_pin_sense)
+- snd_hda_codec_write(codec, 0x16, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+-
+- if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power)
+- msleep(100);
+-
++ if (hp1_pin_sense)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++ if (hp2_pin_sense)
++ snd_hda_codec_write(codec, 0x16, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++
++ msleep(75);
++
++ if (hp1_pin_sense)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++ if (hp2_pin_sense)
++ snd_hda_codec_write(codec, 0x16, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++
++ msleep(75);
++ alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
++ alc_enable_headset_jack_key(codec);
++ }
+ alc_auto_setup_eapd(codec, false);
+ alc_shutup_pins(codec);
+ if (spec->ultra_low_power) {
+@@ -3835,9 +3829,6 @@ static void alc225_shutup(struct hda_cod
+ alc_update_coef_idx(codec, 0x4a, 3<<4, 2<<4);
+ msleep(30);
+ }
+-
+- alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+- alc_enable_headset_jack_key(codec);
+ }
+
+ static void alc_default_init(struct hda_codec *codec)
--- /dev/null
+From d2913a07d9037fe7aed4b7e680684163eaed6bc4 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 20 Nov 2024 15:11:02 +0100
+Subject: ALSA: pcm: Add sanity NULL check for the default mmap fault handler
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit d2913a07d9037fe7aed4b7e680684163eaed6bc4 upstream.
+
+A driver might allow the mmap access before initializing its
+runtime->dma_area properly. Add a proper NULL check before passing to
+virt_to_page() for avoiding a panic.
+
+Reported-by: syzbot+4bf62a7b1d0f4fdb7ae2@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20241120141104.7060-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/core/pcm_native.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -3794,9 +3794,11 @@ static vm_fault_t snd_pcm_mmap_data_faul
+ return VM_FAULT_SIGBUS;
+ if (substream->ops->page)
+ page = substream->ops->page(substream, offset);
+- else if (!snd_pcm_get_dma_buf(substream))
++ else if (!snd_pcm_get_dma_buf(substream)) {
++ if (WARN_ON_ONCE(!runtime->dma_area))
++ return VM_FAULT_SIGBUS;
+ page = virt_to_page(runtime->dma_area + offset);
+- else
++ } else
+ page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset);
+ if (!page)
+ return VM_FAULT_SIGBUS;
--- /dev/null
+From 7be34f6feedd60e418de1c2c48e661d70416635f Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 27 Nov 2024 08:00:58 +0100
+Subject: ALSA: ump: Fix evaluation of MIDI 1.0 FB info
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 7be34f6feedd60e418de1c2c48e661d70416635f upstream.
+
+The m1.0 field of UMP Function Block info specifies whether the given
+FB is a MIDI 1.0 port or not. When implementing the UMP support on
+Linux, I somehow interpreted as if it were bit flags, but the field is
+actually an enumeration from 0 to 2, where 2 means MIDI 1.0 *and* low
+speed.
+
+This patch corrects the interpretation and sets the right bit flags
+depending on the m1.0 field of FB Info. This effectively fixes the
+missing detection of MIDI 1.0 FB when m1.0 is 2.
+
+Fixes: 37e0e14128e0 ("ALSA: ump: Support UMP Endpoint and Function Block parsing")
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20241127070059.8099-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/core/ump.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -724,7 +724,10 @@ static void fill_fb_info(struct snd_ump_
+ info->ui_hint = buf->fb_info.ui_hint;
+ info->first_group = buf->fb_info.first_group;
+ info->num_groups = buf->fb_info.num_groups;
+- info->flags = buf->fb_info.midi_10;
++ if (buf->fb_info.midi_10 < 2)
++ info->flags = buf->fb_info.midi_10;
++ else
++ info->flags = SNDRV_UMP_BLOCK_IS_MIDI1 | SNDRV_UMP_BLOCK_IS_LOWSPEED;
+ info->active = buf->fb_info.active;
+ info->midi_ci_version = buf->fb_info.midi_ci_version;
+ info->sysex8_streams = buf->fb_info.sysex8_streams;
--- /dev/null
+From 67ab51cbdfee02ef07fb9d7d14cc0bf6cb5a5e5c Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Thu, 14 Nov 2024 09:53:32 +0000
+Subject: arm64: tls: Fix context-switching of tpidrro_el0 when kpti is enabled
+
+From: Will Deacon <will@kernel.org>
+
+commit 67ab51cbdfee02ef07fb9d7d14cc0bf6cb5a5e5c upstream.
+
+Commit 18011eac28c7 ("arm64: tls: Avoid unconditional zeroing of
+tpidrro_el0 for native tasks") tried to optimise the context switching
+of tpidrro_el0 by eliding the clearing of the register when switching
+to a native task with kpti enabled, on the erroneous assumption that
+the kpti trampoline entry code would already have taken care of the
+write.
+
+Although the kpti trampoline does zero the register on entry from a
+native task, the check in tls_thread_switch() is on the *next* task and
+so we can end up leaving a stale, non-zero value in the register if the
+previous task was 32-bit.
+
+Drop the broken optimisation and zero tpidrro_el0 unconditionally when
+switching to a native 64-bit task.
+
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: stable@vger.kernel.org
+Fixes: 18011eac28c7 ("arm64: tls: Avoid unconditional zeroing of tpidrro_el0 for native tasks")
+Signed-off-by: Will Deacon <will@kernel.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/r/20241114095332.23391-1-will@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/process.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -429,7 +429,7 @@ static void tls_thread_switch(struct tas
+
+ if (is_compat_thread(task_thread_info(next)))
+ write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
+- else if (!arm64_kernel_unmapped_at_el0())
++ else
+ write_sysreg(0, tpidrro_el0);
+
+ write_sysreg(*task_user_tls(next), tpidr_el0);
--- /dev/null
+From ccd9e252c515ac5a3ed04a414c95d1307d17f159 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Tue, 22 Oct 2024 11:16:17 -0700
+Subject: blk-mq: Make blk_mq_quiesce_tagset() hold the tag list mutex less long
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit ccd9e252c515ac5a3ed04a414c95d1307d17f159 upstream.
+
+Make sure that the tag_list_lock mutex is not held any longer than
+necessary. This change reduces latency if e.g. blk_mq_quiesce_tagset()
+is called concurrently from more than one thread. This function is used
+by the NVMe core and also by the UFS driver.
+
+Reported-by: Peter Wang <peter.wang@mediatek.com>
+Cc: Chao Leng <lengchao@huawei.com>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 414dd48e882c ("blk-mq: add tagset quiesce interface")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Link: https://lore.kernel.org/r/20241022181617.2716173-1-bvanassche@acm.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -283,8 +283,9 @@ void blk_mq_quiesce_tagset(struct blk_mq
+ if (!blk_queue_skip_tagset_quiesce(q))
+ blk_mq_quiesce_queue_nowait(q);
+ }
+- blk_mq_wait_quiesce_done(set);
+ mutex_unlock(&set->tag_list_lock);
++
++ blk_mq_wait_quiesce_done(set);
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
+
--- /dev/null
+From 2003ee8a9aa14d766b06088156978d53c2e9be3d Mon Sep 17 00:00:00 2001
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Mon, 14 Oct 2024 17:29:32 +0800
+Subject: block: fix missing dispatching request when queue is started or unquiesced
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+commit 2003ee8a9aa14d766b06088156978d53c2e9be3d upstream.
+
+Supposing the following scenario with a virtio_blk driver.
+
+CPU0 CPU1 CPU2
+
+blk_mq_try_issue_directly()
+ __blk_mq_issue_directly()
+ q->mq_ops->queue_rq()
+ virtio_queue_rq()
+ blk_mq_stop_hw_queue()
+ virtblk_done()
+ blk_mq_try_issue_directly()
+ if (blk_mq_hctx_stopped())
+ blk_mq_request_bypass_insert() blk_mq_run_hw_queue()
+ blk_mq_run_hw_queue() blk_mq_run_hw_queue()
+ blk_mq_insert_request()
+ return
+
+After CPU0 has marked the queue as stopped, CPU1 will see the queue is
+stopped. But before CPU1 puts the request on the dispatch list, CPU2
+receives the interrupt of completion of request, so it will run the
+hardware queue and marks the queue as non-stopped. Meanwhile, CPU1 also
+runs the same hardware queue. After both CPU1 and CPU2 complete
+blk_mq_run_hw_queue(), CPU1 just puts the request to the same hardware
+queue and returns. It misses dispatching a request. Fix it by running
+the hardware queue explicitly. And blk_mq_request_issue_directly()
+should handle a similar situation. Fix it as well.
+
+Fixes: d964f04a8fde ("blk-mq: fix direct issue")
+Cc: stable@vger.kernel.org
+Cc: Muchun Song <muchun.song@linux.dev>
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20241014092934.53630-2-songmuchun@bytedance.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2668,6 +2668,7 @@ static void blk_mq_try_issue_directly(st
+
+ if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
+ blk_mq_insert_request(rq, 0);
++ blk_mq_run_hw_queue(hctx, false);
+ return;
+ }
+
+@@ -2698,6 +2699,7 @@ static blk_status_t blk_mq_request_issue
+
+ if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
+ blk_mq_insert_request(rq, 0);
++ blk_mq_run_hw_queue(hctx, false);
+ return BLK_STS_OK;
+ }
+
--- /dev/null
+From 96a9fe64bfd486ebeeacf1e6011801ffe89dae18 Mon Sep 17 00:00:00 2001
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Mon, 14 Oct 2024 17:29:34 +0800
+Subject: block: fix ordering between checking BLK_MQ_S_STOPPED request adding
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+commit 96a9fe64bfd486ebeeacf1e6011801ffe89dae18 upstream.
+
+Supposing first scenario with a virtio_blk driver.
+
+CPU0 CPU1
+
+blk_mq_try_issue_directly()
+ __blk_mq_issue_directly()
+ q->mq_ops->queue_rq()
+ virtio_queue_rq()
+ blk_mq_stop_hw_queue()
+ virtblk_done()
+ blk_mq_request_bypass_insert() 1) store
+ blk_mq_start_stopped_hw_queue()
+ clear_bit(BLK_MQ_S_STOPPED) 3) store
+ blk_mq_run_hw_queue()
+ if (!blk_mq_hctx_has_pending()) 4) load
+ return
+ blk_mq_sched_dispatch_requests()
+ blk_mq_run_hw_queue()
+ if (!blk_mq_hctx_has_pending())
+ return
+ blk_mq_sched_dispatch_requests()
+ if (blk_mq_hctx_stopped()) 2) load
+ return
+ __blk_mq_sched_dispatch_requests()
+
+Supposing another scenario.
+
+CPU0 CPU1
+
+blk_mq_requeue_work()
+ blk_mq_insert_request() 1) store
+ virtblk_done()
+ blk_mq_start_stopped_hw_queue()
+ blk_mq_run_hw_queues() clear_bit(BLK_MQ_S_STOPPED) 3) store
+ blk_mq_run_hw_queue()
+ if (!blk_mq_hctx_has_pending()) 4) load
+ return
+ blk_mq_sched_dispatch_requests()
+ if (blk_mq_hctx_stopped()) 2) load
+ continue
+ blk_mq_run_hw_queue()
+
+Both scenarios are similar, the full memory barrier should be inserted
+between 1) and 2), as well as between 3) and 4) to make sure that either
+CPU0 sees BLK_MQ_S_STOPPED is cleared or CPU1 sees dispatch list.
+Otherwise, either CPU will not rerun the hardware queue causing
+starvation of the request.
+
+The easy way to fix it is to add the essential full memory barrier into
+helper of blk_mq_hctx_stopped(). In order to not affect the fast path
+(hardware queue is not stopped most of the time), we only insert the
+barrier into the slow path. Actually, only slow path needs to care about
+missing of dispatching the request to the low-level device driver.
+
+Fixes: 320ae51feed5 ("blk-mq: new multi-queue block IO queueing mechanism")
+Cc: stable@vger.kernel.org
+Cc: Muchun Song <muchun.song@linux.dev>
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20241014092934.53630-4-songmuchun@bytedance.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 6 ++++++
+ block/blk-mq.h | 13 +++++++++++++
+ 2 files changed, 19 insertions(+)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2463,6 +2463,12 @@ void blk_mq_start_stopped_hw_queue(struc
+ return;
+
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
++ /*
++ * Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the
++ * clearing of BLK_MQ_S_STOPPED above and the checking of dispatch
++ * list in the subsequent routine.
++ */
++ smp_mb__after_atomic();
+ blk_mq_run_hw_queue(hctx, async);
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -228,6 +228,19 @@ static inline struct blk_mq_tags *blk_mq
+
+ static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
+ {
++ /* Fast path: hardware queue is not stopped most of the time. */
++ if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
++ return false;
++
++ /*
++ * This barrier is used to order adding of dispatch list before and
++ * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
++ * in blk_mq_start_stopped_hw_queue() so that dispatch code could
++ * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
++ * empty to avoid missing dispatching requests.
++ */
++ smp_mb();
++
+ return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
+ }
+
--- /dev/null
+From 6bda857bcbb86fb9d0e54fbef93a093d51172acc Mon Sep 17 00:00:00 2001
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Mon, 14 Oct 2024 17:29:33 +0800
+Subject: block: fix ordering between checking QUEUE_FLAG_QUIESCED request adding
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+commit 6bda857bcbb86fb9d0e54fbef93a093d51172acc upstream.
+
+Supposing the following scenario.
+
+CPU0 CPU1
+
+blk_mq_insert_request() 1) store
+ blk_mq_unquiesce_queue()
+ blk_queue_flag_clear() 3) store
+ blk_mq_run_hw_queues()
+ blk_mq_run_hw_queue()
+ if (!blk_mq_hctx_has_pending()) 4) load
+ return
+blk_mq_run_hw_queue()
+ if (blk_queue_quiesced()) 2) load
+ return
+ blk_mq_sched_dispatch_requests()
+
+The full memory barrier should be inserted between 1) and 2), as well as
+between 3) and 4) to make sure that either CPU0 sees QUEUE_FLAG_QUIESCED
+is cleared or CPU1 sees dispatch list or setting of bitmap of software
+queue. Otherwise, either CPU will not rerun the hardware queue causing
+starvation.
+
+So the first solution is to 1) add a pair of memory barrier to fix the
+problem, another solution is to 2) use hctx->queue->queue_lock to
+synchronize QUEUE_FLAG_QUIESCED. Here, we chose 2) to fix it since
+memory barrier is not easy to be maintained.
+
+Fixes: f4560ffe8cec ("blk-mq: use QUEUE_FLAG_QUIESCED to quiesce queue")
+Cc: stable@vger.kernel.org
+Cc: Muchun Song <muchun.song@linux.dev>
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20241014092934.53630-3-songmuchun@bytedance.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 49 +++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 35 insertions(+), 14 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2252,6 +2252,24 @@ void blk_mq_delay_run_hw_queue(struct bl
+ }
+ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
+
++static inline bool blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx *hctx)
++{
++ bool need_run;
++
++ /*
++ * When queue is quiesced, we may be switching io scheduler, or
++ * updating nr_hw_queues, or other things, and we can't run queue
++ * any more, even blk_mq_hctx_has_pending() can't be called safely.
++ *
++ * And queue will be rerun in blk_mq_unquiesce_queue() if it is
++ * quiesced.
++ */
++ __blk_mq_run_dispatch_ops(hctx->queue, false,
++ need_run = !blk_queue_quiesced(hctx->queue) &&
++ blk_mq_hctx_has_pending(hctx));
++ return need_run;
++}
++
+ /**
+ * blk_mq_run_hw_queue - Start to run a hardware queue.
+ * @hctx: Pointer to the hardware queue to run.
+@@ -2272,20 +2290,23 @@ void blk_mq_run_hw_queue(struct blk_mq_h
+
+ might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
+
+- /*
+- * When queue is quiesced, we may be switching io scheduler, or
+- * updating nr_hw_queues, or other things, and we can't run queue
+- * any more, even __blk_mq_hctx_has_pending() can't be called safely.
+- *
+- * And queue will be rerun in blk_mq_unquiesce_queue() if it is
+- * quiesced.
+- */
+- __blk_mq_run_dispatch_ops(hctx->queue, false,
+- need_run = !blk_queue_quiesced(hctx->queue) &&
+- blk_mq_hctx_has_pending(hctx));
+-
+- if (!need_run)
+- return;
++ need_run = blk_mq_hw_queue_need_run(hctx);
++ if (!need_run) {
++ unsigned long flags;
++
++ /*
++ * Synchronize with blk_mq_unquiesce_queue(), because we check
++ * if hw queue is quiesced locklessly above, we need the use
++ * ->queue_lock to make sure we see the up-to-date status to
++ * not miss rerunning the hw queue.
++ */
++ spin_lock_irqsave(&hctx->queue->queue_lock, flags);
++ need_run = blk_mq_hw_queue_need_run(hctx);
++ spin_unlock_irqrestore(&hctx->queue->queue_lock, flags);
++
++ if (!need_run)
++ return;
++ }
+
+ if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
+ blk_mq_delay_run_hw_queue(hctx, 0);
--- /dev/null
+From 12b3642b6c242061d3ba84e6e3050c3141ded14c Mon Sep 17 00:00:00 2001
+From: Michal Simek <michal.simek@amd.com>
+Date: Mon, 16 Sep 2024 11:53:06 +0200
+Subject: dt-bindings: serial: rs485: Fix rs485-rts-delay property
+
+From: Michal Simek <michal.simek@amd.com>
+
+commit 12b3642b6c242061d3ba84e6e3050c3141ded14c upstream.
+
+Code expects array only with 2 items which should be checked.
+But also item checking is not working as it should likely because of
+incorrect items description.
+
+Fixes: d50f974c4f7f ("dt-bindings: serial: Convert rs485 bindings to json-schema")
+Signed-off-by: Michal Simek <michal.simek@amd.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Krzysztof Kozlowski <krzk@kernel.org>
+Link: https://lore.kernel.org/r/820c639b9e22fe037730ed44d1b044cdb6d28b75.1726480384.git.michal.simek@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/serial/rs485.yaml | 19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+--- a/Documentation/devicetree/bindings/serial/rs485.yaml
++++ b/Documentation/devicetree/bindings/serial/rs485.yaml
+@@ -18,16 +18,15 @@ properties:
+ description: prop-encoded-array <a b>
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+- items:
+- - description: Delay between rts signal and beginning of data sent in
+- milliseconds. It corresponds to the delay before sending data.
+- default: 0
+- maximum: 100
+- - description: Delay between end of data sent and rts signal in milliseconds.
+- It corresponds to the delay after sending data and actual release
+- of the line.
+- default: 0
+- maximum: 100
++ - description: Delay between rts signal and beginning of data sent in
++ milliseconds. It corresponds to the delay before sending data.
++ default: 0
++ maximum: 100
++ - description: Delay between end of data sent and rts signal in milliseconds.
++ It corresponds to the delay after sending data and actual release
++ of the line.
++ default: 0
++ maximum: 100
+
+ rs485-rts-active-high:
+ description: drive RTS high when sending (this is the default).
--- /dev/null
+From 49a397ad24ee5e2c53a59dada2780d7e71bd3f77 Mon Sep 17 00:00:00 2001
+From: Jason Gerecke <jason.gerecke@wacom.com>
+Date: Mon, 28 Oct 2024 10:39:14 -0700
+Subject: HID: wacom: Interpret tilt data from Intuos Pro BT as signed values
+
+From: Jason Gerecke <jason.gerecke@wacom.com>
+
+commit 49a397ad24ee5e2c53a59dada2780d7e71bd3f77 upstream.
+
+The tilt data contained in the Bluetooth packets of an Intuos Pro are
+supposed to be interpreted as signed values. Simply casting the values
+to type `char` is not guaranteed to work since it is implementation-
+defined whether it is signed or unsigned. At least one user has noticed
+the data being reported incorrectly on their system. To ensure that the
+data is interpreted properly, we specifically cast to `signed char`
+instead.
+
+Link: https://github.com/linuxwacom/input-wacom/issues/445
+Fixes: 4922cd26f03c ("HID: wacom: Support 2nd-gen Intuos Pro's Bluetooth classic interface")
+CC: stable@vger.kernel.org # 4.11+
+Signed-off-by: Jason Gerecke <jason.gerecke@wacom.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/wacom_wac.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1399,9 +1399,9 @@ static void wacom_intuos_pro2_bt_pen(str
+ rotation -= 1800;
+
+ input_report_abs(pen_input, ABS_TILT_X,
+- (char)frame[7]);
++ (signed char)frame[7]);
+ input_report_abs(pen_input, ABS_TILT_Y,
+- (char)frame[8]);
++ (signed char)frame[8]);
+ input_report_abs(pen_input, ABS_Z, rotation);
+ input_report_abs(pen_input, ABS_WHEEL,
+ get_unaligned_le16(&frame[11]));
--- /dev/null
+From e2fb2f89faf87b681038475d093214f4cbe12ebb Mon Sep 17 00:00:00 2001
+From: Zicheng Qu <quzicheng@huawei.com>
+Date: Thu, 31 Oct 2024 01:45:05 +0000
+Subject: iio: gts: Fix uninitialized symbol 'ret'
+
+From: Zicheng Qu <quzicheng@huawei.com>
+
+commit e2fb2f89faf87b681038475d093214f4cbe12ebb upstream.
+
+Initialize the variable ret at the time of declaration to prevent it from
+being returned without a defined value. Fixes smatch warning:
+drivers/iio/industrialio-gts-helper.c:256 gain_to_scaletables() error:
+uninitialized symbol 'ret'.
+
+Cc: stable@vger.kernel.org # v6.6+
+Fixes: 38416c28e168 ("iio: light: Add gain-time-scale helpers")
+Signed-off-by: Zicheng Qu <quzicheng@huawei.com>
+Reviewed-by: Matti Vaittinen <mazziesaccount@gmail.com>
+Link: https://patch.msgid.link/20241031014505.2313035-1-quzicheng@huawei.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/industrialio-gts-helper.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
+index 5f131bc1a01e..4ad949672210 100644
+--- a/drivers/iio/industrialio-gts-helper.c
++++ b/drivers/iio/industrialio-gts-helper.c
+@@ -167,7 +167,7 @@ static int iio_gts_gain_cmp(const void *a, const void *b)
+
+ static int gain_to_scaletables(struct iio_gts *gts, int **gains, int **scales)
+ {
+- int ret, i, j, new_idx, time_idx;
++ int i, j, new_idx, time_idx, ret = 0;
+ int *all_gains;
+ size_t gain_bytes;
+
+--
+2.47.1
+
--- /dev/null
+From fe051552f5078fa02d593847529a3884305a6ffe Mon Sep 17 00:00:00 2001
+From: Kinsey Moore <kinsey.moore@oarcorp.com>
+Date: Tue, 23 Jul 2024 15:58:05 -0500
+Subject: jffs2: Prevent rtime decompress memory corruption
+
+From: Kinsey Moore <kinsey.moore@oarcorp.com>
+
+commit fe051552f5078fa02d593847529a3884305a6ffe upstream.
+
+The rtime decompression routine does not fully check bounds during the
+entirety of the decompression pass and can corrupt memory outside the
+decompression buffer if the compressed data is corrupted. This adds the
+required check to prevent this failure mode.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Kinsey Moore <kinsey.moore@oarcorp.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jffs2/compr_rtime.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/jffs2/compr_rtime.c
++++ b/fs/jffs2/compr_rtime.c
+@@ -95,6 +95,9 @@ static int jffs2_rtime_decompress(unsign
+
+ positions[value]=outpos;
+ if (repeat) {
++ if ((outpos + repeat) >= destlen) {
++ return 1;
++ }
+ if (backoffs + repeat >= outpos) {
+ while(repeat) {
+ cpage_out[outpos++] = cpage_out[backoffs++];
--- /dev/null
+From 9f070b1862f3411b8bcdfd51a8eaad25286f9deb Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hverkuil@xs4all.nl>
+Date: Mon, 14 Oct 2024 16:52:41 +0200
+Subject: media: v4l2-core: v4l2-dv-timings: check cvt/gtf result
+
+From: Hans Verkuil <hverkuil@xs4all.nl>
+
+commit 9f070b1862f3411b8bcdfd51a8eaad25286f9deb upstream.
+
+The v4l2_detect_cvt/gtf functions should check the result against the
+timing capabilities: these functions calculate the timings, so if they
+are out of bounds, they should be rejected.
+
+To do this, add the struct v4l2_dv_timings_cap as argument to those
+functions.
+
+This required updates to the adv7604 and adv7842 drivers since the
+prototype of these functions has now changed. The timings struct
+that is passed to v4l2_detect_cvt/gtf in those two drivers is filled
+with the timings detected by the hardware.
+
+The vivid driver was also updated, but an additional check was added:
+the width and height specified by VIDIOC_S_DV_TIMINGS has to match the
+calculated result, otherwise something went wrong. Note that vivid
+*emulates* hardware, so all the values passed to the v4l2_detect_cvt/gtf
+functions came from the timings struct that was filled by userspace
+and passed on to the driver via VIDIOC_S_DV_TIMINGS. So these fields
+can contain random data. Both the constraints check via
+struct v4l2_dv_timings_cap and the additional width/height check
+ensure that the resulting timings are sane and not messed up by the
+v4l2_detect_cvt/gtf calculations.
+
+Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl>
+Fixes: 2576415846bc ("[media] v4l2: move dv-timings related code to v4l2-dv-timings.c")
+Cc: stable@vger.kernel.org
+Reported-by: syzbot+a828133770f62293563e@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-media/000000000000013050062127830a@google.com/
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/i2c/adv7604.c | 5
+ drivers/media/i2c/adv7842.c | 13 +-
+ drivers/media/test-drivers/vivid/vivid-vid-cap.c | 15 ++
+ drivers/media/v4l2-core/v4l2-dv-timings.c | 132 ++++++++++++-----------
+ include/media/v4l2-dv-timings.h | 18 ++-
+ 5 files changed, 107 insertions(+), 76 deletions(-)
+
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -1405,12 +1405,13 @@ static int stdi2dv_timings(struct v4l2_s
+ if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0,
+ (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+ (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+- false, timings))
++ false, adv76xx_get_dv_timings_cap(sd, -1), timings))
+ return 0;
+ if (v4l2_detect_gtf(stdi->lcf + 1, hfreq, stdi->lcvs,
+ (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+ (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+- false, state->aspect_ratio, timings))
++ false, state->aspect_ratio,
++ adv76xx_get_dv_timings_cap(sd, -1), timings))
+ return 0;
+
+ v4l2_dbg(2, debug, sd,
+--- a/drivers/media/i2c/adv7842.c
++++ b/drivers/media/i2c/adv7842.c
+@@ -1431,14 +1431,15 @@ static int stdi2dv_timings(struct v4l2_s
+ }
+
+ if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0,
+- (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+- (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+- false, timings))
++ (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
++ (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
++ false, adv7842_get_dv_timings_cap(sd), timings))
+ return 0;
+ if (v4l2_detect_gtf(stdi->lcf + 1, hfreq, stdi->lcvs,
+- (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+- (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+- false, state->aspect_ratio, timings))
++ (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
++ (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
++ false, state->aspect_ratio,
++ adv7842_get_dv_timings_cap(sd), timings))
+ return 0;
+
+ v4l2_dbg(2, debug, sd,
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -1466,12 +1466,19 @@ static bool valid_cvt_gtf_timings(struct
+ h_freq = (u32)bt->pixelclock / total_h_pixel;
+
+ if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
++ struct v4l2_dv_timings cvt = {};
++
+ if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
+- bt->polarities, bt->interlaced, timings))
++ bt->polarities, bt->interlaced,
++ &vivid_dv_timings_cap, &cvt) &&
++ cvt.bt.width == bt->width && cvt.bt.height == bt->height) {
++ *timings = cvt;
+ return true;
++ }
+ }
+
+ if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
++ struct v4l2_dv_timings gtf = {};
+ struct v4l2_fract aspect_ratio;
+
+ find_aspect_ratio(bt->width, bt->height,
+@@ -1479,8 +1486,12 @@ static bool valid_cvt_gtf_timings(struct
+ &aspect_ratio.denominator);
+ if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
+ bt->polarities, bt->interlaced,
+- aspect_ratio, timings))
++ aspect_ratio, &vivid_dv_timings_cap,
++ >f) &&
++ gtf.bt.width == bt->width && gtf.bt.height == bt->height) {
++ *timings = gtf;
+ return true;
++ }
+ }
+ return false;
+ }
+--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
+@@ -481,25 +481,28 @@ EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @interlaced - if this flag is true, it indicates interlaced format
+- * @fmt - the resulting timings.
++ * @cap - the v4l2_dv_timings_cap capabilities.
++ * @timings - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid CVT format. If so, then it will return true, and fmt will be filled
+ * in with the found CVT timings.
+ */
+-bool v4l2_detect_cvt(unsigned frame_height,
+- unsigned hfreq,
+- unsigned vsync,
+- unsigned active_width,
++bool v4l2_detect_cvt(unsigned int frame_height,
++ unsigned int hfreq,
++ unsigned int vsync,
++ unsigned int active_width,
+ u32 polarities,
+ bool interlaced,
+- struct v4l2_dv_timings *fmt)
++ const struct v4l2_dv_timings_cap *cap,
++ struct v4l2_dv_timings *timings)
+ {
+- int v_fp, v_bp, h_fp, h_bp, hsync;
+- int frame_width, image_height, image_width;
++ struct v4l2_dv_timings t = {};
++ int v_fp, v_bp, h_fp, h_bp, hsync;
++ int frame_width, image_height, image_width;
+ bool reduced_blanking;
+ bool rb_v2 = false;
+- unsigned pix_clk;
++ unsigned int pix_clk;
+
+ if (vsync < 4 || vsync > 8)
+ return false;
+@@ -625,36 +628,39 @@ bool v4l2_detect_cvt(unsigned frame_heig
+ h_fp = h_blank - hsync - h_bp;
+ }
+
+- fmt->type = V4L2_DV_BT_656_1120;
+- fmt->bt.polarities = polarities;
+- fmt->bt.width = image_width;
+- fmt->bt.height = image_height;
+- fmt->bt.hfrontporch = h_fp;
+- fmt->bt.vfrontporch = v_fp;
+- fmt->bt.hsync = hsync;
+- fmt->bt.vsync = vsync;
+- fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
++ t.type = V4L2_DV_BT_656_1120;
++ t.bt.polarities = polarities;
++ t.bt.width = image_width;
++ t.bt.height = image_height;
++ t.bt.hfrontporch = h_fp;
++ t.bt.vfrontporch = v_fp;
++ t.bt.hsync = hsync;
++ t.bt.vsync = vsync;
++ t.bt.hbackporch = frame_width - image_width - h_fp - hsync;
+
+ if (!interlaced) {
+- fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+- fmt->bt.interlaced = V4L2_DV_PROGRESSIVE;
++ t.bt.vbackporch = frame_height - image_height - v_fp - vsync;
++ t.bt.interlaced = V4L2_DV_PROGRESSIVE;
+ } else {
+- fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp -
++ t.bt.vbackporch = (frame_height - image_height - 2 * v_fp -
+ 2 * vsync) / 2;
+- fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
+- 2 * vsync - fmt->bt.vbackporch;
+- fmt->bt.il_vfrontporch = v_fp;
+- fmt->bt.il_vsync = vsync;
+- fmt->bt.flags |= V4L2_DV_FL_HALF_LINE;
+- fmt->bt.interlaced = V4L2_DV_INTERLACED;
++ t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
++ 2 * vsync - t.bt.vbackporch;
++ t.bt.il_vfrontporch = v_fp;
++ t.bt.il_vsync = vsync;
++ t.bt.flags |= V4L2_DV_FL_HALF_LINE;
++ t.bt.interlaced = V4L2_DV_INTERLACED;
+ }
+
+- fmt->bt.pixelclock = pix_clk;
+- fmt->bt.standards = V4L2_DV_BT_STD_CVT;
++ t.bt.pixelclock = pix_clk;
++ t.bt.standards = V4L2_DV_BT_STD_CVT;
+
+ if (reduced_blanking)
+- fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
++ t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+
++ if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL))
++ return false;
++ *timings = t;
+ return true;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+@@ -699,22 +705,25 @@ EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+ * image height, so it has to be passed explicitly. Usually
+ * the native screen aspect ratio is used for this. If it
+ * is not filled in correctly, then 16:9 will be assumed.
+- * @fmt - the resulting timings.
++ * @cap - the v4l2_dv_timings_cap capabilities.
++ * @timings - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid GTF format. If so, then it will return true, and fmt will be filled
+ * in with the found GTF timings.
+ */
+-bool v4l2_detect_gtf(unsigned frame_height,
+- unsigned hfreq,
+- unsigned vsync,
+- u32 polarities,
+- bool interlaced,
+- struct v4l2_fract aspect,
+- struct v4l2_dv_timings *fmt)
++bool v4l2_detect_gtf(unsigned int frame_height,
++ unsigned int hfreq,
++ unsigned int vsync,
++ u32 polarities,
++ bool interlaced,
++ struct v4l2_fract aspect,
++ const struct v4l2_dv_timings_cap *cap,
++ struct v4l2_dv_timings *timings)
+ {
++ struct v4l2_dv_timings t = {};
+ int pix_clk;
+- int v_fp, v_bp, h_fp, hsync;
++ int v_fp, v_bp, h_fp, hsync;
+ int frame_width, image_height, image_width;
+ bool default_gtf;
+ int h_blank;
+@@ -783,36 +792,39 @@ bool v4l2_detect_gtf(unsigned frame_heig
+
+ h_fp = h_blank / 2 - hsync;
+
+- fmt->type = V4L2_DV_BT_656_1120;
+- fmt->bt.polarities = polarities;
+- fmt->bt.width = image_width;
+- fmt->bt.height = image_height;
+- fmt->bt.hfrontporch = h_fp;
+- fmt->bt.vfrontporch = v_fp;
+- fmt->bt.hsync = hsync;
+- fmt->bt.vsync = vsync;
+- fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
++ t.type = V4L2_DV_BT_656_1120;
++ t.bt.polarities = polarities;
++ t.bt.width = image_width;
++ t.bt.height = image_height;
++ t.bt.hfrontporch = h_fp;
++ t.bt.vfrontporch = v_fp;
++ t.bt.hsync = hsync;
++ t.bt.vsync = vsync;
++ t.bt.hbackporch = frame_width - image_width - h_fp - hsync;
+
+ if (!interlaced) {
+- fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+- fmt->bt.interlaced = V4L2_DV_PROGRESSIVE;
++ t.bt.vbackporch = frame_height - image_height - v_fp - vsync;
++ t.bt.interlaced = V4L2_DV_PROGRESSIVE;
+ } else {
+- fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp -
++ t.bt.vbackporch = (frame_height - image_height - 2 * v_fp -
+ 2 * vsync) / 2;
+- fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
+- 2 * vsync - fmt->bt.vbackporch;
+- fmt->bt.il_vfrontporch = v_fp;
+- fmt->bt.il_vsync = vsync;
+- fmt->bt.flags |= V4L2_DV_FL_HALF_LINE;
+- fmt->bt.interlaced = V4L2_DV_INTERLACED;
++ t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
++ 2 * vsync - t.bt.vbackporch;
++ t.bt.il_vfrontporch = v_fp;
++ t.bt.il_vsync = vsync;
++ t.bt.flags |= V4L2_DV_FL_HALF_LINE;
++ t.bt.interlaced = V4L2_DV_INTERLACED;
+ }
+
+- fmt->bt.pixelclock = pix_clk;
+- fmt->bt.standards = V4L2_DV_BT_STD_GTF;
++ t.bt.pixelclock = pix_clk;
++ t.bt.standards = V4L2_DV_BT_STD_GTF;
+
+ if (!default_gtf)
+- fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
++ t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+
++ if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL))
++ return false;
++ *timings = t;
+ return true;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
+--- a/include/media/v4l2-dv-timings.h
++++ b/include/media/v4l2-dv-timings.h
+@@ -146,15 +146,18 @@ void v4l2_print_dv_timings(const char *d
+ * @polarities: the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @interlaced: if this flag is true, it indicates interlaced format
++ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fmt: the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid CVT format. If so, then it will return true, and fmt will be filled
+ * in with the found CVT timings.
+ */
+-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
+- unsigned active_width, u32 polarities, bool interlaced,
+- struct v4l2_dv_timings *fmt);
++bool v4l2_detect_cvt(unsigned int frame_height, unsigned int hfreq,
++ unsigned int vsync, unsigned int active_width,
++ u32 polarities, bool interlaced,
++ const struct v4l2_dv_timings_cap *cap,
++ struct v4l2_dv_timings *fmt);
+
+ /**
+ * v4l2_detect_gtf - detect if the given timings follow the GTF standard
+@@ -170,15 +173,18 @@ bool v4l2_detect_cvt(unsigned frame_heig
+ * image height, so it has to be passed explicitly. Usually
+ * the native screen aspect ratio is used for this. If it
+ * is not filled in correctly, then 16:9 will be assumed.
++ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fmt: the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid GTF format. If so, then it will return true, and fmt will be filled
+ * in with the found GTF timings.
+ */
+-bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
+- u32 polarities, bool interlaced, struct v4l2_fract aspect,
+- struct v4l2_dv_timings *fmt);
++bool v4l2_detect_gtf(unsigned int frame_height, unsigned int hfreq,
++ unsigned int vsync, u32 polarities, bool interlaced,
++ struct v4l2_fract aspect,
++ const struct v4l2_dv_timings_cap *cap,
++ struct v4l2_dv_timings *fmt);
+
+ /**
+ * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
--- /dev/null
+From ca59f9956d4519ab18ab2270be47c6b8c6ced091 Mon Sep 17 00:00:00 2001
+From: Qiu-ji Chen <chenqiuji666@gmail.com>
+Date: Fri, 27 Sep 2024 16:39:02 +0800
+Subject: media: wl128x: Fix atomicity violation in fmc_send_cmd()
+
+From: Qiu-ji Chen <chenqiuji666@gmail.com>
+
+commit ca59f9956d4519ab18ab2270be47c6b8c6ced091 upstream.
+
+Atomicity violation occurs when the fmc_send_cmd() function is executed
+simultaneously with the modification of the fmdev->resp_skb value.
+Consider a scenario where, after passing the validity check within the
+function, a non-null fmdev->resp_skb variable is assigned a null value.
+This results in an invalid fmdev->resp_skb variable passing the validity
+check. As seen in the later part of the function, skb = fmdev->resp_skb;
+when the invalid fmdev->resp_skb passes the check, a null pointer
+dereference error may occur at line 478, evt_hdr = (void *)skb->data;
+
+To address this issue, it is recommended to include the validity check of
+fmdev->resp_skb within the locked section of the function. This
+modification ensures that the value of fmdev->resp_skb does not change
+during the validation process, thereby maintaining its validity.
+
+This possible bug is found by an experimental static analysis tool
+developed by our team. This tool analyzes the locking APIs
+to extract function pairs that can be concurrently executed, and then
+analyzes the instructions in the paired functions to identify possible
+concurrency bugs including data races and atomicity violations.
+
+Fixes: e8454ff7b9a4 ("[media] drivers:media:radio: wl128x: FM Driver Common sources")
+Cc: stable@vger.kernel.org
+Signed-off-by: Qiu-ji Chen <chenqiuji666@gmail.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/radio/wl128x/fmdrv_common.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/radio/wl128x/fmdrv_common.c
++++ b/drivers/media/radio/wl128x/fmdrv_common.c
+@@ -466,11 +466,12 @@ int fmc_send_cmd(struct fmdev *fmdev, u8
+ jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
+ return -ETIMEDOUT;
+ }
++ spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
+ if (!fmdev->resp_skb) {
++ spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
+ fmerr("Response SKB is missing\n");
+ return -EFAULT;
+ }
+- spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
+ skb = fmdev->resp_skb;
+ fmdev->resp_skb = NULL;
+ spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
--- /dev/null
+From bcc7ba668818dcadd2f1db66b39ed860a63ecf97 Mon Sep 17 00:00:00 2001
+From: Bin Liu <b-liu@ti.com>
+Date: Thu, 31 Oct 2024 12:23:15 -0500
+Subject: serial: 8250: omap: Move pm_runtime_get_sync
+
+From: Bin Liu <b-liu@ti.com>
+
+commit bcc7ba668818dcadd2f1db66b39ed860a63ecf97 upstream.
+
+Currently in omap_8250_shutdown, the dma->rx_running flag is
+set to zero in omap_8250_rx_dma_flush. Next pm_runtime_get_sync
+is called, which is a runtime resume call stack which can
+re-set the flag. When the call omap_8250_shutdown returns, the
+flag is expected to be UN-SET, but this is not the case. This
+is causing issues the next time UART is re-opened and
+omap_8250_rx_dma is called. Fix by moving pm_runtime_get_sync
+before the omap_8250_rx_dma_flush.
+
+cc: stable@vger.kernel.org
+Fixes: 0e31c8d173ab ("tty: serial: 8250_omap: add custom DMA-RX callback")
+Signed-off-by: Bin Liu <b-liu@ti.com>
+[Judith: Add commit message]
+Signed-off-by: Judith Mendez <jm@ti.com>
+Reviewed-by: Kevin Hilman <khilman@baylibre.com>
+Tested-by: Kevin Hilman <khilman@baylibre.com>
+Link: https://lore.kernel.org/r/20241031172315.453750-1-jm@ti.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/8250/8250_omap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -766,12 +766,12 @@ static void omap_8250_shutdown(struct ua
+ struct uart_8250_port *up = up_to_u8250p(port);
+ struct omap8250_priv *priv = port->private_data;
+
++ pm_runtime_get_sync(port->dev);
++
+ flush_work(&priv->qos_work);
+ if (up->dma)
+ omap_8250_rx_dma_flush(up);
+
+- pm_runtime_get_sync(port->dev);
+-
+ serial_out(up, UART_OMAP_WER, 0);
+ if (priv->habit & UART_HAS_EFR2)
+ serial_out(up, UART_OMAP_EFR2, 0x0);
--- /dev/null
+From 166105c9030a30ba08574a9998afc7b60bc72dd7 Mon Sep 17 00:00:00 2001
+From: Filip Brozovic <fbrozovic@gmail.com>
+Date: Sun, 10 Nov 2024 12:17:00 +0100
+Subject: serial: 8250_fintek: Add support for F81216E
+
+From: Filip Brozovic <fbrozovic@gmail.com>
+
+commit 166105c9030a30ba08574a9998afc7b60bc72dd7 upstream.
+
+The F81216E is a LPC/eSPI to 4 UART Super I/O and is mostly compatible with
+the F81216H, but does not support RS-485 auto-direction delays on any port.
+
+Signed-off-by: Filip Brozovic <fbrozovic@gmail.com>
+Cc: stable <stable@kernel.org>
+Link: https://lore.kernel.org/r/20241110111703.15494-1-fbrozovic@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/8250/8250_fintek.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/tty/serial/8250/8250_fintek.c
++++ b/drivers/tty/serial/8250/8250_fintek.c
+@@ -21,6 +21,7 @@
+ #define CHIP_ID_F81866 0x1010
+ #define CHIP_ID_F81966 0x0215
+ #define CHIP_ID_F81216AD 0x1602
++#define CHIP_ID_F81216E 0x1617
+ #define CHIP_ID_F81216H 0x0501
+ #define CHIP_ID_F81216 0x0802
+ #define VENDOR_ID1 0x23
+@@ -158,6 +159,7 @@ static int fintek_8250_check_id(struct f
+ case CHIP_ID_F81866:
+ case CHIP_ID_F81966:
+ case CHIP_ID_F81216AD:
++ case CHIP_ID_F81216E:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81216:
+ break;
+@@ -181,6 +183,7 @@ static int fintek_8250_get_ldn_range(str
+ return 0;
+
+ case CHIP_ID_F81216AD:
++ case CHIP_ID_F81216E:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81216:
+ *min = F81216_LDN_LOW;
+@@ -250,6 +253,7 @@ static void fintek_8250_set_irq_mode(str
+ break;
+
+ case CHIP_ID_F81216AD:
++ case CHIP_ID_F81216E:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81216:
+ sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_SHARE,
+@@ -263,7 +267,8 @@ static void fintek_8250_set_irq_mode(str
+ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
+ {
+ switch (pdata->pid) {
+- case CHIP_ID_F81216H: /* 128Bytes FIFO */
++ case CHIP_ID_F81216E: /* 128Bytes FIFO */
++ case CHIP_ID_F81216H:
+ case CHIP_ID_F81966:
+ case CHIP_ID_F81866:
+ sio_write_mask_reg(pdata, FIFO_CTRL,
+@@ -297,6 +302,7 @@ static void fintek_8250_set_termios(stru
+ goto exit;
+
+ switch (pdata->pid) {
++ case CHIP_ID_F81216E:
+ case CHIP_ID_F81216H:
+ reg = RS485;
+ break;
+@@ -346,6 +352,7 @@ static void fintek_8250_set_termios_hand
+ struct fintek_8250 *pdata = uart->port.private_data;
+
+ switch (pdata->pid) {
++ case CHIP_ID_F81216E:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81966:
+ case CHIP_ID_F81866:
+@@ -438,6 +445,11 @@ static void fintek_8250_set_rs485_handle
+ uart->port.rs485_supported = fintek_8250_rs485_supported;
+ break;
+
++ case CHIP_ID_F81216E: /* F81216E does not support RS485 delays */
++ uart->port.rs485_config = fintek_8250_rs485_config;
++ uart->port.rs485_supported = fintek_8250_rs485_supported;
++ break;
++
+ default: /* No RS485 Auto direction functional */
+ break;
+ }
ksmbd-fix-use-after-free-in-smb-request-handling.patch
smb-client-fix-null-ptr-deref-in-crypto_aead_setkey.patch
platform-chrome-cros_ec_typec-fix-missing-fwnode-reference-decrement.patch
+ubi-wl-put-source-peb-into-correct-list-if-trying-locking-leb-failed.patch
+um-ubd-do-not-use-drvdata-in-release.patch
+um-net-do-not-use-drvdata-in-release.patch
+dt-bindings-serial-rs485-fix-rs485-rts-delay-property.patch
+serial-8250_fintek-add-support-for-f81216e.patch
+serial-8250-omap-move-pm_runtime_get_sync.patch
+jffs2-prevent-rtime-decompress-memory-corruption.patch
+um-vector-do-not-use-drvdata-in-release.patch
+sh-cpuinfo-fix-a-warning-for-config_cpumask_offstack.patch
+iio-gts-fix-uninitialized-symbol-ret.patch
+ublk-fix-ublk_ch_mmap-for-64k-page-size.patch
+arm64-tls-fix-context-switching-of-tpidrro_el0-when-kpti-is-enabled.patch
+block-fix-missing-dispatching-request-when-queue-is-started-or-unquiesced.patch
+block-fix-ordering-between-checking-queue_flag_quiesced-request-adding.patch
+block-fix-ordering-between-checking-blk_mq_s_stopped-request-adding.patch
+blk-mq-make-blk_mq_quiesce_tagset-hold-the-tag-list-mutex-less-long.patch
+hid-wacom-interpret-tilt-data-from-intuos-pro-bt-as-signed-values.patch
+media-wl128x-fix-atomicity-violation-in-fmc_send_cmd.patch
+soc-fsl-rcpm-fix-missing-of_node_put-in-copy_ippdexpcr1_setting.patch
+media-v4l2-core-v4l2-dv-timings-check-cvt-gtf-result.patch
+alsa-ump-fix-evaluation-of-midi-1.0-fb-info.patch
+alsa-pcm-add-sanity-null-check-for-the-default-mmap-fault-handler.patch
+alsa-hda-realtek-update-alc225-depop-procedure.patch
+alsa-hda-realtek-set-pcbeep-to-default-value-for-alc274.patch
+alsa-hda-realtek-fix-internal-speaker-and-mic-boost-of-infinix-y4-max.patch
+alsa-hda-realtek-apply-quirk-for-medion-e15433.patch
+smb3-request-handle-caching-when-caching-directories.patch
+smb-client-handle-max-length-for-smb-symlinks.patch
+smb-don-t-leak-cfid-when-reconnect-races-with-open_cached_dir.patch
+smb-prevent-use-after-free-due-to-open_cached_dir-error-paths.patch
+smb-during-unmount-ensure-all-cached-dir-instances-drop-their-dentry.patch
--- /dev/null
+From 3c891f7c6a4e90bb1199497552f24b26e46383bc Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Thu, 14 Jul 2022 16:41:36 +0800
+Subject: sh: cpuinfo: Fix a warning for CONFIG_CPUMASK_OFFSTACK
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 3c891f7c6a4e90bb1199497552f24b26e46383bc upstream.
+
+When CONFIG_CPUMASK_OFFSTACK and CONFIG_DEBUG_PER_CPU_MAPS are selected,
+cpu_max_bits_warn() generates a runtime warning similar as below when
+showing /proc/cpuinfo. Fix this by using nr_cpu_ids (the runtime limit)
+instead of NR_CPUS to iterate CPUs.
+
+[ 3.052463] ------------[ cut here ]------------
+[ 3.059679] WARNING: CPU: 3 PID: 1 at include/linux/cpumask.h:108 show_cpuinfo+0x5e8/0x5f0
+[ 3.070072] Modules linked in: efivarfs autofs4
+[ 3.076257] CPU: 0 PID: 1 Comm: systemd Not tainted 5.19-rc5+ #1052
+[ 3.099465] Stack : 9000000100157b08 9000000000f18530 9000000000cf846c 9000000100154000
+[ 3.109127] 9000000100157a50 0000000000000000 9000000100157a58 9000000000ef7430
+[ 3.118774] 90000001001578e8 0000000000000040 0000000000000020 ffffffffffffffff
+[ 3.128412] 0000000000aaaaaa 1ab25f00eec96a37 900000010021de80 900000000101c890
+[ 3.138056] 0000000000000000 0000000000000000 0000000000000000 0000000000aaaaaa
+[ 3.147711] ffff8000339dc220 0000000000000001 0000000006ab4000 0000000000000000
+[ 3.157364] 900000000101c998 0000000000000004 9000000000ef7430 0000000000000000
+[ 3.167012] 0000000000000009 000000000000006c 0000000000000000 0000000000000000
+[ 3.176641] 9000000000d3de08 9000000001639390 90000000002086d8 00007ffff0080286
+[ 3.186260] 00000000000000b0 0000000000000004 0000000000000000 0000000000071c1c
+[ 3.195868] ...
+[ 3.199917] Call Trace:
+[ 3.203941] [<90000000002086d8>] show_stack+0x38/0x14c
+[ 3.210666] [<9000000000cf846c>] dump_stack_lvl+0x60/0x88
+[ 3.217625] [<900000000023d268>] __warn+0xd0/0x100
+[ 3.223958] [<9000000000cf3c90>] warn_slowpath_fmt+0x7c/0xcc
+[ 3.231150] [<9000000000210220>] show_cpuinfo+0x5e8/0x5f0
+[ 3.238080] [<90000000004f578c>] seq_read_iter+0x354/0x4b4
+[ 3.245098] [<90000000004c2e90>] new_sync_read+0x17c/0x1c4
+[ 3.252114] [<90000000004c5174>] vfs_read+0x138/0x1d0
+[ 3.258694] [<90000000004c55f8>] ksys_read+0x70/0x100
+[ 3.265265] [<9000000000cfde9c>] do_syscall+0x7c/0x94
+[ 3.271820] [<9000000000202fe4>] handle_syscall+0xc4/0x160
+[ 3.281824] ---[ end trace 8b484262b4b8c24c ]---
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Reviewed-by: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Tested-by: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Signed-off-by: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sh/kernel/cpu/proc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/sh/kernel/cpu/proc.c
++++ b/arch/sh/kernel/cpu/proc.c
+@@ -132,7 +132,7 @@ static int show_cpuinfo(struct seq_file
+
+ static void *c_start(struct seq_file *m, loff_t *pos)
+ {
+- return *pos < NR_CPUS ? cpu_data + *pos : NULL;
++ return *pos < nr_cpu_ids ? cpu_data + *pos : NULL;
+ }
+ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+ {
--- /dev/null
+From 0812340811e45ec4039d409049be53056182a552 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Mon, 18 Nov 2024 12:35:16 -0300
+Subject: smb: client: handle max length for SMB symlinks
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 0812340811e45ec4039d409049be53056182a552 upstream.
+
+We can't use PATH_MAX for SMB symlinks because
+
+ (1) Windows Server will fail FSCTL_SET_REPARSE_POINT with
+ STATUS_IO_REPARSE_DATA_INVALID when input buffer is larger than
+ 16K, as specified in MS-FSA 2.1.5.10.37.
+
+ (2) The client won't be able to parse large SMB responses that
+ includes SMB symlink path within SMB2_CREATE or SMB2_IOCTL
+ responses.
+
+Fix this by defining a maximum length value (4060) for SMB symlinks
+that both client and server can handle.
+
+Cc: David Howells <dhowells@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/reparse.c | 5 ++++-
+ fs/smb/client/reparse.h | 2 ++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -35,6 +35,9 @@ int smb2_create_reparse_symlink(const un
+ u16 len, plen;
+ int rc = 0;
+
++ if (strlen(symname) > REPARSE_SYM_PATH_MAX)
++ return -ENAMETOOLONG;
++
+ sym = kstrdup(symname, GFP_KERNEL);
+ if (!sym)
+ return -ENOMEM;
+@@ -64,7 +67,7 @@ int smb2_create_reparse_symlink(const un
+ if (rc < 0)
+ goto out;
+
+- plen = 2 * UniStrnlen((wchar_t *)path, PATH_MAX);
++ plen = 2 * UniStrnlen((wchar_t *)path, REPARSE_SYM_PATH_MAX);
+ len = sizeof(*buf) + plen * 2;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf) {
+--- a/fs/smb/client/reparse.h
++++ b/fs/smb/client/reparse.h
+@@ -12,6 +12,8 @@
+ #include "fs_context.h"
+ #include "cifsglob.h"
+
++#define REPARSE_SYM_PATH_MAX 4060
++
+ /*
+ * Used only by cifs.ko to ignore reparse points from files when client or
+ * server doesn't support FSCTL_GET_REPARSE_POINT.
--- /dev/null
+From 7afb86733685c64c604d32faf00fa4a1f22c2ab1 Mon Sep 17 00:00:00 2001
+From: Paul Aurich <paul@darkrain42.org>
+Date: Mon, 18 Nov 2024 13:50:26 -0800
+Subject: smb: Don't leak cfid when reconnect races with open_cached_dir
+
+From: Paul Aurich <paul@darkrain42.org>
+
+commit 7afb86733685c64c604d32faf00fa4a1f22c2ab1 upstream.
+
+open_cached_dir() may either race with the tcon reconnection even before
+compound_send_recv() or directly trigger a reconnection via
+SMB2_open_init() or SMB_query_info_init().
+
+The reconnection process invokes invalidate_all_cached_dirs() via
+cifs_mark_open_files_invalid(), which removes all cfids from the
+cfids->entries list but doesn't drop a ref if has_lease isn't true. This
+results in the currently-being-constructed cfid not being on the list,
+but still having a refcount of 2. It leaks if returned from
+open_cached_dir().
+
+Fix this by setting cfid->has_lease when the ref is actually taken; the
+cfid will not be used by other threads until it has a valid time.
+
+Addresses these kmemleaks:
+
+unreferenced object 0xffff8881090c4000 (size 1024):
+ comm "bash", pid 1860, jiffies 4295126592
+ hex dump (first 32 bytes):
+ 00 01 00 00 00 00 ad de 22 01 00 00 00 00 ad de ........".......
+ 00 ca 45 22 81 88 ff ff f8 dc 4f 04 81 88 ff ff ..E"......O.....
+ backtrace (crc 6f58c20f):
+ [<ffffffff8b895a1e>] __kmalloc_cache_noprof+0x2be/0x350
+ [<ffffffff8bda06e3>] open_cached_dir+0x993/0x1fb0
+ [<ffffffff8bdaa750>] cifs_readdir+0x15a0/0x1d50
+ [<ffffffff8b9a853f>] iterate_dir+0x28f/0x4b0
+ [<ffffffff8b9a9aed>] __x64_sys_getdents64+0xfd/0x200
+ [<ffffffff8cf6da05>] do_syscall_64+0x95/0x1a0
+ [<ffffffff8d00012f>] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+unreferenced object 0xffff8881044fdcf8 (size 8):
+ comm "bash", pid 1860, jiffies 4295126592
+ hex dump (first 8 bytes):
+ 00 cc cc cc cc cc cc cc ........
+ backtrace (crc 10c106a9):
+ [<ffffffff8b89a3d3>] __kmalloc_node_track_caller_noprof+0x363/0x480
+ [<ffffffff8b7d7256>] kstrdup+0x36/0x60
+ [<ffffffff8bda0700>] open_cached_dir+0x9b0/0x1fb0
+ [<ffffffff8bdaa750>] cifs_readdir+0x15a0/0x1d50
+ [<ffffffff8b9a853f>] iterate_dir+0x28f/0x4b0
+ [<ffffffff8b9a9aed>] __x64_sys_getdents64+0xfd/0x200
+ [<ffffffff8cf6da05>] do_syscall_64+0x95/0x1a0
+ [<ffffffff8d00012f>] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+And addresses these BUG splats when unmounting the SMB filesystem:
+
+BUG: Dentry ffff888140590ba0{i=1000000000080,n=/} still in use (2) [unmount of cifs cifs]
+WARNING: CPU: 3 PID: 3433 at fs/dcache.c:1536 umount_check+0xd0/0x100
+Modules linked in:
+CPU: 3 UID: 0 PID: 3433 Comm: bash Not tainted 6.12.0-rc4-g850925a8133c-dirty #49
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020
+RIP: 0010:umount_check+0xd0/0x100
+Code: 8d 7c 24 40 e8 31 5a f4 ff 49 8b 54 24 40 41 56 49 89 e9 45 89 e8 48 89 d9 41 57 48 89 de 48 c7 c7 80 e7 db ac e8 f0 72 9a ff <0f> 0b 58 31 c0 5a 5b 5d 41 5c 41 5d 41 5e 41 5f e9 2b e5 5d 01 41
+RSP: 0018:ffff88811cc27978 EFLAGS: 00010286
+RAX: 0000000000000000 RBX: ffff888140590ba0 RCX: ffffffffaaf20bae
+RDX: dffffc0000000000 RSI: 0000000000000008 RDI: ffff8881f6fb6f40
+RBP: ffff8881462ec000 R08: 0000000000000001 R09: ffffed1023984ee3
+R10: ffff88811cc2771f R11: 00000000016cfcc0 R12: ffff888134383e08
+R13: 0000000000000002 R14: ffff8881462ec668 R15: ffffffffaceab4c0
+FS: 00007f23bfa98740(0000) GS:ffff8881f6f80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000556de4a6f808 CR3: 0000000123c80000 CR4: 0000000000350ef0
+Call Trace:
+ <TASK>
+ d_walk+0x6a/0x530
+ shrink_dcache_for_umount+0x6a/0x200
+ generic_shutdown_super+0x52/0x2a0
+ kill_anon_super+0x22/0x40
+ cifs_kill_sb+0x159/0x1e0
+ deactivate_locked_super+0x66/0xe0
+ cleanup_mnt+0x140/0x210
+ task_work_run+0xfb/0x170
+ syscall_exit_to_user_mode+0x29f/0x2b0
+ do_syscall_64+0xa1/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7f23bfb93ae7
+Code: ff ff ff ff c3 66 0f 1f 44 00 00 48 8b 0d 11 93 0d 00 f7 d8 64 89 01 b8 ff ff ff ff eb bf 0f 1f 44 00 00 b8 50 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d e9 92 0d 00 f7 d8 64 89 01 48
+RSP: 002b:00007ffee9138598 EFLAGS: 00000246 ORIG_RAX: 0000000000000050
+RAX: 0000000000000000 RBX: 0000558f1803e9a0 RCX: 00007f23bfb93ae7
+RDX: 0000000000000000 RSI: 0000000000000004 RDI: 0000558f1803e9a0
+RBP: 0000558f1803e600 R08: 0000000000000007 R09: 0000558f17fab610
+R10: d91d5ec34ab757b0 R11: 0000000000000246 R12: 0000000000000001
+R13: 0000000000000000 R14: 0000000000000015 R15: 0000000000000000
+ </TASK>
+irq event stamp: 1163486
+hardirqs last enabled at (1163485): [<ffffffffac98d344>] _raw_spin_unlock_irqrestore+0x34/0x60
+hardirqs last disabled at (1163486): [<ffffffffac97dcfc>] __schedule+0xc7c/0x19a0
+softirqs last enabled at (1163482): [<ffffffffab79a3ee>] __smb_send_rqst+0x3de/0x990
+softirqs last disabled at (1163480): [<ffffffffac2314f1>] release_sock+0x21/0xf0
+---[ end trace 0000000000000000 ]---
+
+VFS: Busy inodes after unmount of cifs (cifs)
+------------[ cut here ]------------
+kernel BUG at fs/super.c:661!
+Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN NOPTI
+CPU: 1 UID: 0 PID: 3433 Comm: bash Tainted: G W 6.12.0-rc4-g850925a8133c-dirty #49
+Tainted: [W]=WARN
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020
+RIP: 0010:generic_shutdown_super+0x290/0x2a0
+Code: e8 15 7c f7 ff 48 8b 5d 28 48 89 df e8 09 7c f7 ff 48 8b 0b 48 89 ee 48 8d 95 68 06 00 00 48 c7 c7 80 7f db ac e8 00 69 af ff <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 90 90 90 90 90 90
+RSP: 0018:ffff88811cc27a50 EFLAGS: 00010246
+RAX: 000000000000003e RBX: ffffffffae994420 RCX: 0000000000000027
+RDX: 0000000000000000 RSI: ffffffffab06180e RDI: ffff8881f6eb18c8
+RBP: ffff8881462ec000 R08: 0000000000000001 R09: ffffed103edd6319
+R10: ffff8881f6eb18cb R11: 00000000016d3158 R12: ffff8881462ec9c0
+R13: ffff8881462ec050 R14: 0000000000000001 R15: 0000000000000000
+FS: 00007f23bfa98740(0000) GS:ffff8881f6e80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8364005d68 CR3: 0000000123c80000 CR4: 0000000000350ef0
+Call Trace:
+ <TASK>
+ kill_anon_super+0x22/0x40
+ cifs_kill_sb+0x159/0x1e0
+ deactivate_locked_super+0x66/0xe0
+ cleanup_mnt+0x140/0x210
+ task_work_run+0xfb/0x170
+ syscall_exit_to_user_mode+0x29f/0x2b0
+ do_syscall_64+0xa1/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7f23bfb93ae7
+ </TASK>
+Modules linked in:
+---[ end trace 0000000000000000 ]---
+RIP: 0010:generic_shutdown_super+0x290/0x2a0
+Code: e8 15 7c f7 ff 48 8b 5d 28 48 89 df e8 09 7c f7 ff 48 8b 0b 48 89 ee 48 8d 95 68 06 00 00 48 c7 c7 80 7f db ac e8 00 69 af ff <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 90 90 90 90 90 90
+RSP: 0018:ffff88811cc27a50 EFLAGS: 00010246
+RAX: 000000000000003e RBX: ffffffffae994420 RCX: 0000000000000027
+RDX: 0000000000000000 RSI: ffffffffab06180e RDI: ffff8881f6eb18c8
+RBP: ffff8881462ec000 R08: 0000000000000001 R09: ffffed103edd6319
+R10: ffff8881f6eb18cb R11: 00000000016d3158 R12: ffff8881462ec9c0
+R13: ffff8881462ec050 R14: 0000000000000001 R15: 0000000000000000
+FS: 00007f23bfa98740(0000) GS:ffff8881f6e80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8364005d68 CR3: 0000000123c80000 CR4: 0000000000350ef0
+
+This reproduces eventually with an SMB mount and two shells running
+these loops concurrently
+
+- while true; do
+ cd ~; sleep 1;
+ for i in {1..3}; do cd /mnt/test/subdir;
+ echo $PWD; sleep 1; cd ..; echo $PWD; sleep 1;
+ done;
+ echo ...;
+ done
+- while true; do
+ iptables -F OUTPUT; mount -t cifs -a;
+ for _ in {0..2}; do ls /mnt/test/subdir/ | wc -l; done;
+ iptables -I OUTPUT -p tcp --dport 445 -j DROP;
+ sleep 10
+ echo "unmounting"; umount -l -t cifs -a; echo "done unmounting";
+ sleep 20
+ echo "recovering"; iptables -F OUTPUT;
+ sleep 10;
+ done
+
+Fixes: ebe98f1447bb ("cifs: enable caching of directories for which a lease is held")
+Fixes: 5c86919455c1 ("smb: client: fix use-after-free in smb2_query_info_compound()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Aurich <paul@darkrain42.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 27 ++++++++++++++-------------
+ 1 file changed, 14 insertions(+), 13 deletions(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -59,6 +59,16 @@ static struct cached_fid *find_or_create
+ list_add(&cfid->entry, &cfids->entries);
+ cfid->on_list = true;
+ kref_get(&cfid->refcount);
++ /*
++ * Set @cfid->has_lease to true during construction so that the lease
++ * reference can be put in cached_dir_lease_break() due to a potential
++ * lease break right after the request is sent or while @cfid is still
++ * being cached, or if a reconnection is triggered during construction.
++ * Concurrent processes won't be to use it yet due to @cfid->time being
++ * zero.
++ */
++ cfid->has_lease = true;
++
+ spin_unlock(&cfids->cfid_list_lock);
+ return cfid;
+ }
+@@ -176,12 +186,12 @@ replay_again:
+ return -ENOENT;
+ }
+ /*
+- * Return cached fid if it has a lease. Otherwise, it is either a new
+- * entry or laundromat worker removed it from @cfids->entries. Caller
+- * will put last reference if the latter.
++ * Return cached fid if it is valid (has a lease and has a time).
++ * Otherwise, it is either a new entry or laundromat worker removed it
++ * from @cfids->entries. Caller will put last reference if the latter.
+ */
+ spin_lock(&cfids->cfid_list_lock);
+- if (cfid->has_lease) {
++ if (cfid->has_lease && cfid->time) {
+ spin_unlock(&cfids->cfid_list_lock);
+ *ret_cfid = cfid;
+ kfree(utf16_path);
+@@ -267,15 +277,6 @@ replay_again:
+
+ smb2_set_related(&rqst[1]);
+
+- /*
+- * Set @cfid->has_lease to true before sending out compounded request so
+- * its lease reference can be put in cached_dir_lease_break() due to a
+- * potential lease break right after the request is sent or while @cfid
+- * is still being cached. Concurrent processes won't be to use it yet
+- * due to @cfid->time being zero.
+- */
+- cfid->has_lease = true;
+-
+ if (retries) {
+ smb2_set_replay(server, &rqst[0]);
+ smb2_set_replay(server, &rqst[1]);
--- /dev/null
+From 3fa640d035e5ae526769615c35cb9ed4be6e3662 Mon Sep 17 00:00:00 2001
+From: Paul Aurich <paul@darkrain42.org>
+Date: Mon, 18 Nov 2024 13:50:28 -0800
+Subject: smb: During unmount, ensure all cached dir instances drop their dentry
+
+From: Paul Aurich <paul@darkrain42.org>
+
+commit 3fa640d035e5ae526769615c35cb9ed4be6e3662 upstream.
+
+The unmount process (cifs_kill_sb() calling close_all_cached_dirs()) can
+race with various cached directory operations, which ultimately results
+in dentries not being dropped and these kernel BUGs:
+
+BUG: Dentry ffff88814f37e358{i=1000000000080,n=/} still in use (2) [unmount of cifs cifs]
+VFS: Busy inodes after unmount of cifs (cifs)
+------------[ cut here ]------------
+kernel BUG at fs/super.c:661!
+
+This happens when a cfid is in the process of being cleaned up when, and
+has been removed from the cfids->entries list, including:
+
+- Receiving a lease break from the server
+- Server reconnection triggers invalidate_all_cached_dirs(), which
+ removes all the cfids from the list
+- The laundromat thread decides to expire an old cfid.
+
+To solve these problems, dropping the dentry is done in queued work done
+in a newly-added cfid_put_wq workqueue, and close_all_cached_dirs()
+flushes that workqueue after it drops all the dentries of which it's
+aware. This is a global workqueue (rather than scoped to a mount), but
+the queued work is minimal.
+
+The final cleanup work for cleaning up a cfid is performed via work
+queued in the serverclose_wq workqueue; this is done separate from
+dropping the dentries so that close_all_cached_dirs() doesn't block on
+any server operations.
+
+Both of these queued works expect to invoked with a cfid reference and
+a tcon reference to avoid those objects from being freed while the work
+is ongoing.
+
+While we're here, add proper locking to close_all_cached_dirs(), and
+locking around the freeing of cfid->dentry.
+
+Fixes: ebe98f1447bb ("cifs: enable caching of directories for which a lease is held")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Aurich <paul@darkrain42.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 156 ++++++++++++++++++++++++++++++++++++---------
+ fs/smb/client/cached_dir.h | 6 +
+ fs/smb/client/cifsfs.c | 12 +++
+ fs/smb/client/cifsglob.h | 3
+ fs/smb/client/inode.c | 3
+ fs/smb/client/trace.h | 3
+ 6 files changed, 147 insertions(+), 36 deletions(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -17,6 +17,11 @@ static void free_cached_dir(struct cache
+ static void smb2_close_cached_fid(struct kref *ref);
+ static void cfids_laundromat_worker(struct work_struct *work);
+
++struct cached_dir_dentry {
++ struct list_head entry;
++ struct dentry *dentry;
++};
++
+ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ const char *path,
+ bool lookup_only,
+@@ -470,7 +475,10 @@ void close_all_cached_dirs(struct cifs_s
+ struct cifs_tcon *tcon;
+ struct tcon_link *tlink;
+ struct cached_fids *cfids;
++ struct cached_dir_dentry *tmp_list, *q;
++ LIST_HEAD(entry);
+
++ spin_lock(&cifs_sb->tlink_tree_lock);
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+ tcon = tlink_tcon(tlink);
+@@ -479,11 +487,30 @@ void close_all_cached_dirs(struct cifs_s
+ cfids = tcon->cfids;
+ if (cfids == NULL)
+ continue;
++ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry(cfid, &cfids->entries, entry) {
+- dput(cfid->dentry);
++ tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
++ if (tmp_list == NULL)
++ break;
++ spin_lock(&cfid->fid_lock);
++ tmp_list->dentry = cfid->dentry;
+ cfid->dentry = NULL;
++ spin_unlock(&cfid->fid_lock);
++
++ list_add_tail(&tmp_list->entry, &entry);
+ }
++ spin_unlock(&cfids->cfid_list_lock);
++ }
++ spin_unlock(&cifs_sb->tlink_tree_lock);
++
++ list_for_each_entry_safe(tmp_list, q, &entry, entry) {
++ list_del(&tmp_list->entry);
++ dput(tmp_list->dentry);
++ kfree(tmp_list);
+ }
++
++ /* Flush any pending work that will drop dentries */
++ flush_workqueue(cfid_put_wq);
+ }
+
+ /*
+@@ -494,14 +521,18 @@ void invalidate_all_cached_dirs(struct c
+ {
+ struct cached_fids *cfids = tcon->cfids;
+ struct cached_fid *cfid, *q;
+- LIST_HEAD(entry);
+
+ if (cfids == NULL)
+ return;
+
++ /*
++ * Mark all the cfids as closed, and move them to the cfids->dying list.
++ * They'll be cleaned up later by cfids_invalidation_worker. Take
++ * a reference to each cfid during this process.
++ */
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+- list_move(&cfid->entry, &entry);
++ list_move(&cfid->entry, &cfids->dying);
+ cfids->num_entries--;
+ cfid->is_open = false;
+ cfid->on_list = false;
+@@ -514,26 +545,47 @@ void invalidate_all_cached_dirs(struct c
+ } else
+ kref_get(&cfid->refcount);
+ }
++ /*
++ * Queue dropping of the dentries once locks have been dropped
++ */
++ if (!list_empty(&cfids->dying))
++ queue_work(cfid_put_wq, &cfids->invalidation_work);
+ spin_unlock(&cfids->cfid_list_lock);
+-
+- list_for_each_entry_safe(cfid, q, &entry, entry) {
+- list_del(&cfid->entry);
+- cancel_work_sync(&cfid->lease_break);
+- /*
+- * Drop the ref-count from above, either the lease-ref (if there
+- * was one) or the extra one acquired.
+- */
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
+- }
+ }
+
+ static void
+-smb2_cached_lease_break(struct work_struct *work)
++cached_dir_offload_close(struct work_struct *work)
+ {
+ struct cached_fid *cfid = container_of(work,
+- struct cached_fid, lease_break);
++ struct cached_fid, close_work);
++ struct cifs_tcon *tcon = cfid->tcon;
++
++ WARN_ON(cfid->on_list);
+
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
++ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
++}
++
++/*
++ * Release the cached directory's dentry, and then queue work to drop cached
++ * directory itself (closing on server if needed).
++ *
++ * Must be called with a reference to the cached_fid and a reference to the
++ * tcon.
++ */
++static void cached_dir_put_work(struct work_struct *work)
++{
++ struct cached_fid *cfid = container_of(work, struct cached_fid,
++ put_work);
++ struct dentry *dentry;
++
++ spin_lock(&cfid->fid_lock);
++ dentry = cfid->dentry;
++ cfid->dentry = NULL;
++ spin_unlock(&cfid->fid_lock);
++
++ dput(dentry);
++ queue_work(serverclose_wq, &cfid->close_work);
+ }
+
+ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
+@@ -560,8 +612,10 @@ int cached_dir_lease_break(struct cifs_t
+ cfid->on_list = false;
+ cfids->num_entries--;
+
+- queue_work(cifsiod_wq,
+- &cfid->lease_break);
++ ++tcon->tc_count;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_get_cached_lease_break);
++ queue_work(cfid_put_wq, &cfid->put_work);
+ spin_unlock(&cfids->cfid_list_lock);
+ return true;
+ }
+@@ -583,7 +637,8 @@ static struct cached_fid *init_cached_di
+ return NULL;
+ }
+
+- INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
++ INIT_WORK(&cfid->close_work, cached_dir_offload_close);
++ INIT_WORK(&cfid->put_work, cached_dir_put_work);
+ INIT_LIST_HEAD(&cfid->entry);
+ INIT_LIST_HEAD(&cfid->dirents.entries);
+ mutex_init(&cfid->dirents.de_mutex);
+@@ -596,6 +651,9 @@ static void free_cached_dir(struct cache
+ {
+ struct cached_dirent *dirent, *q;
+
++ WARN_ON(work_pending(&cfid->close_work));
++ WARN_ON(work_pending(&cfid->put_work));
++
+ dput(cfid->dentry);
+ cfid->dentry = NULL;
+
+@@ -613,10 +671,30 @@ static void free_cached_dir(struct cache
+ kfree(cfid);
+ }
+
++static void cfids_invalidation_worker(struct work_struct *work)
++{
++ struct cached_fids *cfids = container_of(work, struct cached_fids,
++ invalidation_work);
++ struct cached_fid *cfid, *q;
++ LIST_HEAD(entry);
++
++ spin_lock(&cfids->cfid_list_lock);
++ /* move cfids->dying to the local list */
++ list_cut_before(&entry, &cfids->dying, &cfids->dying);
++ spin_unlock(&cfids->cfid_list_lock);
++
++ list_for_each_entry_safe(cfid, q, &entry, entry) {
++ list_del(&cfid->entry);
++ /* Drop the ref-count acquired in invalidate_all_cached_dirs */
++ kref_put(&cfid->refcount, smb2_close_cached_fid);
++ }
++}
++
+ static void cfids_laundromat_worker(struct work_struct *work)
+ {
+ struct cached_fids *cfids;
+ struct cached_fid *cfid, *q;
++ struct dentry *dentry;
+ LIST_HEAD(entry);
+
+ cfids = container_of(work, struct cached_fids, laundromat_work.work);
+@@ -642,18 +720,28 @@ static void cfids_laundromat_worker(stru
+
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+ list_del(&cfid->entry);
+- /*
+- * Cancel and wait for the work to finish in case we are racing
+- * with it.
+- */
+- cancel_work_sync(&cfid->lease_break);
+- /*
+- * Drop the ref-count from above, either the lease-ref (if there
+- * was one) or the extra one acquired.
+- */
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
++
++ spin_lock(&cfid->fid_lock);
++ dentry = cfid->dentry;
++ cfid->dentry = NULL;
++ spin_unlock(&cfid->fid_lock);
++
++ dput(dentry);
++ if (cfid->is_open) {
++ spin_lock(&cifs_tcp_ses_lock);
++ ++cfid->tcon->tc_count;
++ trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
++ netfs_trace_tcon_ref_get_cached_laundromat);
++ spin_unlock(&cifs_tcp_ses_lock);
++ queue_work(serverclose_wq, &cfid->close_work);
++ } else
++ /*
++ * Drop the ref-count from above, either the lease-ref (if there
++ * was one) or the extra one acquired.
++ */
++ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+- queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++ queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
+ }
+
+@@ -666,9 +754,11 @@ struct cached_fids *init_cached_dirs(voi
+ return NULL;
+ spin_lock_init(&cfids->cfid_list_lock);
+ INIT_LIST_HEAD(&cfids->entries);
++ INIT_LIST_HEAD(&cfids->dying);
+
++ INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
+ INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
+- queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++ queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
+
+ return cfids;
+@@ -687,12 +777,18 @@ void free_cached_dirs(struct cached_fids
+ return;
+
+ cancel_delayed_work_sync(&cfids->laundromat_work);
++ cancel_work_sync(&cfids->invalidation_work);
+
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+ cfid->on_list = false;
+ cfid->is_open = false;
+ list_move(&cfid->entry, &entry);
++ }
++ list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
++ cfid->on_list = false;
++ cfid->is_open = false;
++ list_move(&cfid->entry, &entry);
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+--- a/fs/smb/client/cached_dir.h
++++ b/fs/smb/client/cached_dir.h
+@@ -44,7 +44,8 @@ struct cached_fid {
+ spinlock_t fid_lock;
+ struct cifs_tcon *tcon;
+ struct dentry *dentry;
+- struct work_struct lease_break;
++ struct work_struct put_work;
++ struct work_struct close_work;
+ struct smb2_file_all_info file_all_info;
+ struct cached_dirents dirents;
+ };
+@@ -53,10 +54,13 @@ struct cached_fid {
+ struct cached_fids {
+ /* Must be held when:
+ * - accessing the cfids->entries list
++ * - accessing the cfids->dying list
+ */
+ spinlock_t cfid_list_lock;
+ int num_entries;
+ struct list_head entries;
++ struct list_head dying;
++ struct work_struct invalidation_work;
+ struct delayed_work laundromat_work;
+ };
+
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -156,6 +156,7 @@ struct workqueue_struct *fileinfo_put_wq
+ struct workqueue_struct *cifsoplockd_wq;
+ struct workqueue_struct *deferredclose_wq;
+ struct workqueue_struct *serverclose_wq;
++struct workqueue_struct *cfid_put_wq;
+ __u32 cifs_lock_secret;
+
+ /*
+@@ -1899,9 +1900,16 @@ init_cifs(void)
+ goto out_destroy_deferredclose_wq;
+ }
+
++ cfid_put_wq = alloc_workqueue("cfid_put_wq",
++ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++ if (!cfid_put_wq) {
++ rc = -ENOMEM;
++ goto out_destroy_serverclose_wq;
++ }
++
+ rc = cifs_init_inodecache();
+ if (rc)
+- goto out_destroy_serverclose_wq;
++ goto out_destroy_cfid_put_wq;
+
+ rc = init_mids();
+ if (rc)
+@@ -1963,6 +1971,8 @@ out_destroy_mids:
+ destroy_mids();
+ out_destroy_inodecache:
+ cifs_destroy_inodecache();
++out_destroy_cfid_put_wq:
++ destroy_workqueue(cfid_put_wq);
+ out_destroy_serverclose_wq:
+ destroy_workqueue(serverclose_wq);
+ out_destroy_deferredclose_wq:
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -2022,7 +2022,7 @@ require use of the stronger protocol */
+ * cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
+ * ->can_cache_brlcks
+ * cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
+- * cached_fid->fid_mutex cifs_tcon->crfid tcon_info_alloc
++ * cached_fids->cfid_list_lock cifs_tcon->cfids->entries init_cached_dirs
+ * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
+ * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
+ * ->invalidHandle initiate_cifs_search
+@@ -2111,6 +2111,7 @@ extern struct workqueue_struct *fileinfo
+ extern struct workqueue_struct *cifsoplockd_wq;
+ extern struct workqueue_struct *deferredclose_wq;
+ extern struct workqueue_struct *serverclose_wq;
++extern struct workqueue_struct *cfid_put_wq;
+ extern __u32 cifs_lock_secret;
+
+ extern mempool_t *cifs_sm_req_poolp;
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -2412,13 +2412,10 @@ cifs_dentry_needs_reval(struct dentry *d
+ return true;
+
+ if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) {
+- spin_lock(&cfid->fid_lock);
+ if (cfid->time && cifs_i->time > cfid->time) {
+- spin_unlock(&cfid->fid_lock);
+ close_cached_dir(cfid);
+ return false;
+ }
+- spin_unlock(&cfid->fid_lock);
+ close_cached_dir(cfid);
+ }
+ /*
+--- a/fs/smb/client/trace.h
++++ b/fs/smb/client/trace.h
+@@ -27,6 +27,8 @@
+ EM(netfs_trace_tcon_ref_free_ipc, "FRE Ipc ") \
+ EM(netfs_trace_tcon_ref_free_ipc_fail, "FRE Ipc-F ") \
+ EM(netfs_trace_tcon_ref_free_reconnect_server, "FRE Reconn") \
++ EM(netfs_trace_tcon_ref_get_cached_laundromat, "GET Ch-Lau") \
++ EM(netfs_trace_tcon_ref_get_cached_lease_break, "GET Ch-Lea") \
+ EM(netfs_trace_tcon_ref_get_cancelled_close, "GET Cn-Cls") \
+ EM(netfs_trace_tcon_ref_get_dfs_refer, "GET DfsRef") \
+ EM(netfs_trace_tcon_ref_get_find, "GET Find ") \
+@@ -35,6 +37,7 @@
+ EM(netfs_trace_tcon_ref_new, "NEW ") \
+ EM(netfs_trace_tcon_ref_new_ipc, "NEW Ipc ") \
+ EM(netfs_trace_tcon_ref_new_reconnect_server, "NEW Reconn") \
++ EM(netfs_trace_tcon_ref_put_cached_close, "PUT Ch-Cls") \
+ EM(netfs_trace_tcon_ref_put_cancelled_close, "PUT Cn-Cls") \
+ EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
+ EM(netfs_trace_tcon_ref_put_cancelled_mid, "PUT Cn-Mid") \
--- /dev/null
+From a9685b409a03b73d2980bbfa53eb47555802d0a9 Mon Sep 17 00:00:00 2001
+From: Paul Aurich <paul@darkrain42.org>
+Date: Mon, 18 Nov 2024 13:50:27 -0800
+Subject: smb: prevent use-after-free due to open_cached_dir error paths
+
+From: Paul Aurich <paul@darkrain42.org>
+
+commit a9685b409a03b73d2980bbfa53eb47555802d0a9 upstream.
+
+If open_cached_dir() encounters an error parsing the lease from the
+server, the error handling may race with receiving a lease break,
+resulting in open_cached_dir() freeing the cfid while the queued work is
+pending.
+
+Update open_cached_dir() to drop refs rather than directly freeing the
+cfid.
+
+Have cached_dir_lease_break(), cfids_laundromat_worker(), and
+invalidate_all_cached_dirs() clear has_lease immediately while still
+holding cfids->cfid_list_lock, and then use this to also simplify the
+reference counting in cfids_laundromat_worker() and
+invalidate_all_cached_dirs().
+
+Fixes this KASAN splat (which manually injects an error and lease break
+in open_cached_dir()):
+
+==================================================================
+BUG: KASAN: slab-use-after-free in smb2_cached_lease_break+0x27/0xb0
+Read of size 8 at addr ffff88811cc24c10 by task kworker/3:1/65
+
+CPU: 3 UID: 0 PID: 65 Comm: kworker/3:1 Not tainted 6.12.0-rc6-g255cf264e6e5-dirty #87
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020
+Workqueue: cifsiod smb2_cached_lease_break
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x77/0xb0
+ print_report+0xce/0x660
+ kasan_report+0xd3/0x110
+ smb2_cached_lease_break+0x27/0xb0
+ process_one_work+0x50a/0xc50
+ worker_thread+0x2ba/0x530
+ kthread+0x17c/0x1c0
+ ret_from_fork+0x34/0x60
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+Allocated by task 2464:
+ kasan_save_stack+0x33/0x60
+ kasan_save_track+0x14/0x30
+ __kasan_kmalloc+0xaa/0xb0
+ open_cached_dir+0xa7d/0x1fb0
+ smb2_query_path_info+0x43c/0x6e0
+ cifs_get_fattr+0x346/0xf10
+ cifs_get_inode_info+0x157/0x210
+ cifs_revalidate_dentry_attr+0x2d1/0x460
+ cifs_getattr+0x173/0x470
+ vfs_statx_path+0x10f/0x160
+ vfs_statx+0xe9/0x150
+ vfs_fstatat+0x5e/0xc0
+ __do_sys_newfstatat+0x91/0xf0
+ do_syscall_64+0x95/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Freed by task 2464:
+ kasan_save_stack+0x33/0x60
+ kasan_save_track+0x14/0x30
+ kasan_save_free_info+0x3b/0x60
+ __kasan_slab_free+0x51/0x70
+ kfree+0x174/0x520
+ open_cached_dir+0x97f/0x1fb0
+ smb2_query_path_info+0x43c/0x6e0
+ cifs_get_fattr+0x346/0xf10
+ cifs_get_inode_info+0x157/0x210
+ cifs_revalidate_dentry_attr+0x2d1/0x460
+ cifs_getattr+0x173/0x470
+ vfs_statx_path+0x10f/0x160
+ vfs_statx+0xe9/0x150
+ vfs_fstatat+0x5e/0xc0
+ __do_sys_newfstatat+0x91/0xf0
+ do_syscall_64+0x95/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Last potentially related work creation:
+ kasan_save_stack+0x33/0x60
+ __kasan_record_aux_stack+0xad/0xc0
+ insert_work+0x32/0x100
+ __queue_work+0x5c9/0x870
+ queue_work_on+0x82/0x90
+ open_cached_dir+0x1369/0x1fb0
+ smb2_query_path_info+0x43c/0x6e0
+ cifs_get_fattr+0x346/0xf10
+ cifs_get_inode_info+0x157/0x210
+ cifs_revalidate_dentry_attr+0x2d1/0x460
+ cifs_getattr+0x173/0x470
+ vfs_statx_path+0x10f/0x160
+ vfs_statx+0xe9/0x150
+ vfs_fstatat+0x5e/0xc0
+ __do_sys_newfstatat+0x91/0xf0
+ do_syscall_64+0x95/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+The buggy address belongs to the object at ffff88811cc24c00
+ which belongs to the cache kmalloc-1k of size 1024
+The buggy address is located 16 bytes inside of
+ freed 1024-byte region [ffff88811cc24c00, ffff88811cc25000)
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Aurich <paul@darkrain42.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 70 ++++++++++++++++++---------------------------
+ 1 file changed, 29 insertions(+), 41 deletions(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -348,6 +348,7 @@ oshr_free:
+ SMB2_query_info_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++out:
+ if (rc) {
+ spin_lock(&cfids->cfid_list_lock);
+ if (cfid->on_list) {
+@@ -359,23 +360,14 @@ oshr_free:
+ /*
+ * We are guaranteed to have two references at this
+ * point. One for the caller and one for a potential
+- * lease. Release the Lease-ref so that the directory
+- * will be closed when the caller closes the cached
+- * handle.
++ * lease. Release one here, and the second below.
+ */
+ cfid->has_lease = false;
+- spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+- goto out;
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+- }
+-out:
+- if (rc) {
+- if (cfid->is_open)
+- SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+- cfid->fid.volatile_fid);
+- free_cached_dir(cfid);
++
++ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ } else {
+ *ret_cfid = cfid;
+ atomic_inc(&tcon->num_remote_opens);
+@@ -513,25 +505,24 @@ void invalidate_all_cached_dirs(struct c
+ cfids->num_entries--;
+ cfid->is_open = false;
+ cfid->on_list = false;
+- /* To prevent race with smb2_cached_lease_break() */
+- kref_get(&cfid->refcount);
++ if (cfid->has_lease) {
++ /*
++ * The lease was never cancelled from the server,
++ * so steal that reference.
++ */
++ cfid->has_lease = false;
++ } else
++ kref_get(&cfid->refcount);
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+ list_del(&cfid->entry);
+ cancel_work_sync(&cfid->lease_break);
+- if (cfid->has_lease) {
+- /*
+- * We lease was never cancelled from the server so we
+- * need to drop the reference.
+- */
+- spin_lock(&cfids->cfid_list_lock);
+- cfid->has_lease = false;
+- spin_unlock(&cfids->cfid_list_lock);
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
+- }
+- /* Drop the extra reference opened above*/
++ /*
++ * Drop the ref-count from above, either the lease-ref (if there
++ * was one) or the extra one acquired.
++ */
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+ }
+@@ -542,9 +533,6 @@ smb2_cached_lease_break(struct work_stru
+ struct cached_fid *cfid = container_of(work,
+ struct cached_fid, lease_break);
+
+- spin_lock(&cfid->cfids->cfid_list_lock);
+- cfid->has_lease = false;
+- spin_unlock(&cfid->cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+
+@@ -562,6 +550,7 @@ int cached_dir_lease_break(struct cifs_t
+ !memcmp(lease_key,
+ cfid->fid.lease_key,
+ SMB2_LEASE_KEY_SIZE)) {
++ cfid->has_lease = false;
+ cfid->time = 0;
+ /*
+ * We found a lease remove it from the list
+@@ -639,8 +628,14 @@ static void cfids_laundromat_worker(stru
+ cfid->on_list = false;
+ list_move(&cfid->entry, &entry);
+ cfids->num_entries--;
+- /* To prevent race with smb2_cached_lease_break() */
+- kref_get(&cfid->refcount);
++ if (cfid->has_lease) {
++ /*
++ * Our lease has not yet been cancelled from the
++ * server. Steal that reference.
++ */
++ cfid->has_lease = false;
++ } else
++ kref_get(&cfid->refcount);
+ }
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+@@ -652,17 +647,10 @@ static void cfids_laundromat_worker(stru
+ * with it.
+ */
+ cancel_work_sync(&cfid->lease_break);
+- if (cfid->has_lease) {
+- /*
+- * Our lease has not yet been cancelled from the server
+- * so we need to drop the reference.
+- */
+- spin_lock(&cfids->cfid_list_lock);
+- cfid->has_lease = false;
+- spin_unlock(&cfids->cfid_list_lock);
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
+- }
+- /* Drop the extra reference opened above */
++ /*
++ * Drop the ref-count from above, either the lease-ref (if there
++ * was one) or the extra one acquired.
++ */
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+ queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
--- /dev/null
+From 9ed9d83a51a9636d367c796252409e7b2f4de4d4 Mon Sep 17 00:00:00 2001
+From: Steve French <stfrench@microsoft.com>
+Date: Mon, 18 Nov 2024 12:19:46 -0600
+Subject: smb3: request handle caching when caching directories
+
+From: Steve French <stfrench@microsoft.com>
+
+commit 9ed9d83a51a9636d367c796252409e7b2f4de4d4 upstream.
+
+This client was only requesting READ caching, not READ and HANDLE caching
+in the LeaseState on the open requests we send for directories. To
+delay closing a handle (e.g. for caching directory contents) we should
+be requesting HANDLE as well as READ (as we already do for deferred
+close of files). See MS-SMB2 3.3.1.4 e.g.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2ops.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4016,7 +4016,7 @@ map_oplock_to_lease(u8 oplock)
+ if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+ return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
+ else if (oplock == SMB2_OPLOCK_LEVEL_II)
+- return SMB2_LEASE_READ_CACHING_LE;
++ return SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE;
+ else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
+ return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
+ SMB2_LEASE_WRITE_CACHING_LE;
--- /dev/null
+From c9f1efabf8e3b3ff886a42669f7093789dbeca94 Mon Sep 17 00:00:00 2001
+From: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+Date: Sun, 13 Oct 2024 15:29:17 +0200
+Subject: soc: fsl: rcpm: fix missing of_node_put() in copy_ippdexpcr1_setting()
+
+From: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+commit c9f1efabf8e3b3ff886a42669f7093789dbeca94 upstream.
+
+of_find_compatible_node() requires a call to of_node_put() when the
+pointer to the node is not required anymore to decrement its refcount
+and avoid leaking memory.
+
+Add the missing call to of_node_put() after the node has been used.
+
+Cc: stable@vger.kernel.org
+Fixes: e95f287deed2 ("soc: fsl: handle RCPM errata A-008646 on SoC LS1021A")
+Signed-off-by: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+Link: https://lore.kernel.org/r/20241013-rcpm-of_node_put-v1-1-9a8e55a01eae@gmail.com
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/fsl/rcpm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/soc/fsl/rcpm.c
++++ b/drivers/soc/fsl/rcpm.c
+@@ -36,6 +36,7 @@ static void copy_ippdexpcr1_setting(u32
+ return;
+
+ regs = of_iomap(np, 0);
++ of_node_put(np);
+ if (!regs)
+ return;
+
--- /dev/null
+From d610020f030bec819f42de327c2bd5437d2766b3 Mon Sep 17 00:00:00 2001
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+Date: Mon, 19 Aug 2024 11:26:21 +0800
+Subject: ubi: wl: Put source PEB into correct list if trying locking LEB failed
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+commit d610020f030bec819f42de327c2bd5437d2766b3 upstream.
+
+During wear-leveing work, the source PEB will be moved into scrub list
+when source LEB cannot be locked in ubi_eba_copy_leb(), which is wrong
+for non-scrub type source PEB. The problem could bring extra and
+ineffective wear-leveing jobs, which makes more or less negative effects
+for the life time of flash. Specifically, the process is divided 2 steps:
+1. wear_leveling_worker // generate false scrub type PEB
+ ubi_eba_copy_leb // MOVE_RETRY is returned
+ leb_write_trylock // trylock failed
+ scrubbing = 1;
+ e1 is put into ubi->scrub
+2. wear_leveling_worker // schedule false scrub type PEB for wl
+ scrubbing = 1
+ e1 = rb_entry(rb_first(&ubi->scrub))
+
+The problem can be reproduced easily by running fsstress on a small
+UBIFS partition(<64M, simulated by nandsim) for 5~10mins
+(CONFIG_MTD_UBI_FASTMAP=y,CONFIG_MTD_UBI_WL_THRESHOLD=50). Following
+message is shown:
+ ubi0: scrubbed PEB 66 (LEB 0:10), data moved to PEB 165
+
+Since scrub type source PEB has set variable scrubbing as '1', and
+variable scrubbing is checked before variable keep, so the problem can
+be fixed by setting keep variable as 1 directly if the source LEB cannot
+be locked.
+
+Fixes: e801e128b220 ("UBI: fix missing scrub when there is a bit-flip")
+CC: stable@vger.kernel.org
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/ubi/wl.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -834,7 +834,14 @@ static int wear_leveling_worker(struct u
+ goto out_not_moved;
+ }
+ if (err == MOVE_RETRY) {
+- scrubbing = 1;
++ /*
++ * For source PEB:
++ * 1. The scrubbing is set for scrub type PEB, it will
++ * be put back into ubi->scrub list.
++ * 2. Non-scrub type PEB will be put back into ubi->used
++ * list.
++ */
++ keep = 1;
+ dst_leb_clean = 1;
+ goto out_not_moved;
+ }
--- /dev/null
+From d369735e02ef122d19d4c3d093028da0eb400636 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Mon, 11 Nov 2024 19:07:18 +0800
+Subject: ublk: fix ublk_ch_mmap() for 64K page size
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit d369735e02ef122d19d4c3d093028da0eb400636 upstream.
+
+In ublk_ch_mmap(), queue id is calculated in the following way:
+
+ (vma->vm_pgoff << PAGE_SHIFT) / `max_cmd_buf_size`
+
+'max_cmd_buf_size' is equal to
+
+ `UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc)`
+
+and UBLK_MAX_QUEUE_DEPTH is 4096 and part of UAPI, so 'max_cmd_buf_size'
+is always page aligned in 4K page size kernel. However, it isn't true in
+64K page size kernel.
+
+Fixes the issue by always rounding up 'max_cmd_buf_size' with PAGE_SIZE.
+
+Cc: stable@vger.kernel.org
+Fixes: 71f28f3136af ("ublk_drv: add io_uring based userspace block driver")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20241111110718.1394001-1-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/ublk_drv.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -713,12 +713,21 @@ static inline char *ublk_queue_cmd_buf(s
+ return ublk_get_queue(ub, q_id)->io_cmd_buf;
+ }
+
++static inline int __ublk_queue_cmd_buf_size(int depth)
++{
++ return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
++}
++
+ static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+ {
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+
+- return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
+- PAGE_SIZE);
++ return __ublk_queue_cmd_buf_size(ubq->q_depth);
++}
++
++static int ublk_max_cmd_buf_size(void)
++{
++ return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
+ }
+
+ static inline bool ublk_queue_can_use_recovery_reissue(
+@@ -1387,7 +1396,7 @@ static int ublk_ch_mmap(struct file *fil
+ {
+ struct ublk_device *ub = filp->private_data;
+ size_t sz = vma->vm_end - vma->vm_start;
+- unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
++ unsigned max_sz = ublk_max_cmd_buf_size();
+ unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
+ int q_id, ret = 0;
+
--- /dev/null
+From d1db692a9be3b4bd3473b64fcae996afaffe8438 Mon Sep 17 00:00:00 2001
+From: Tiwei Bie <tiwei.btw@antgroup.com>
+Date: Tue, 5 Nov 2024 00:32:02 +0800
+Subject: um: net: Do not use drvdata in release
+
+From: Tiwei Bie <tiwei.btw@antgroup.com>
+
+commit d1db692a9be3b4bd3473b64fcae996afaffe8438 upstream.
+
+The drvdata is not available in release. Let's just use container_of()
+to get the uml_net instance. Otherwise, removing a network device will
+result in a crash:
+
+RIP: 0033:net_device_release+0x10/0x6f
+RSP: 00000000e20c7c40 EFLAGS: 00010206
+RAX: 000000006002e4e7 RBX: 00000000600f1baf RCX: 00000000624074e0
+RDX: 0000000062778000 RSI: 0000000060551c80 RDI: 00000000627af028
+RBP: 00000000e20c7c50 R08: 00000000603ad594 R09: 00000000e20c7b70
+R10: 000000000000135a R11: 00000000603ad422 R12: 0000000000000000
+R13: 0000000062c7af00 R14: 0000000062406d60 R15: 00000000627700b6
+Kernel panic - not syncing: Segfault with no mm
+CPU: 0 UID: 0 PID: 29 Comm: kworker/0:2 Not tainted 6.12.0-rc6-g59b723cd2adb #1
+Workqueue: events mc_work_proc
+Stack:
+ 627af028 62c7af00 e20c7c80 60276fcd
+ 62778000 603f5820 627af028 00000000
+ e20c7cb0 603a2bcd 627af000 62770010
+Call Trace:
+ [<60276fcd>] device_release+0x70/0xba
+ [<603a2bcd>] kobject_put+0xba/0xe7
+ [<60277265>] put_device+0x19/0x1c
+ [<60281266>] platform_device_put+0x26/0x29
+ [<60281e5f>] platform_device_unregister+0x2c/0x2e
+ [<6002ec9c>] net_remove+0x63/0x69
+ [<60031316>] ? mconsole_reply+0x0/0x50
+ [<600310c8>] mconsole_remove+0x160/0x1cc
+ [<60087d40>] ? __remove_hrtimer+0x38/0x74
+ [<60087ff8>] ? hrtimer_try_to_cancel+0x8c/0x98
+ [<6006b3cf>] ? dl_server_stop+0x3f/0x48
+ [<6006b390>] ? dl_server_stop+0x0/0x48
+ [<600672e8>] ? dequeue_entities+0x327/0x390
+ [<60038fa6>] ? um_set_signals+0x0/0x43
+ [<6003070c>] mc_work_proc+0x77/0x91
+ [<60057664>] process_scheduled_works+0x1b3/0x2dd
+ [<60055f32>] ? assign_work+0x0/0x58
+ [<60057f0a>] worker_thread+0x1e9/0x293
+ [<6005406f>] ? set_pf_worker+0x0/0x64
+ [<6005d65d>] ? arch_local_irq_save+0x0/0x2d
+ [<6005d748>] ? kthread_exit+0x0/0x3a
+ [<60057d21>] ? worker_thread+0x0/0x293
+ [<6005dbf1>] kthread+0x126/0x12b
+ [<600219c5>] new_thread_handler+0x85/0xb6
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com>
+Acked-By: Anton Ivanov <anton.ivanov@cambridgegreys.com>
+Link: https://patch.msgid.link/20241104163203.435515-4-tiwei.btw@antgroup.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/um/drivers/net_kern.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/um/drivers/net_kern.c
++++ b/arch/um/drivers/net_kern.c
+@@ -336,7 +336,7 @@ static struct platform_driver uml_net_dr
+
+ static void net_device_release(struct device *dev)
+ {
+- struct uml_net *device = dev_get_drvdata(dev);
++ struct uml_net *device = container_of(dev, struct uml_net, pdev.dev);
+ struct net_device *netdev = device->dev;
+ struct uml_net_private *lp = netdev_priv(netdev);
+
--- /dev/null
+From 5bee35e5389f450a7eea7318deb9073e9414d3b1 Mon Sep 17 00:00:00 2001
+From: Tiwei Bie <tiwei.btw@antgroup.com>
+Date: Tue, 5 Nov 2024 00:32:01 +0800
+Subject: um: ubd: Do not use drvdata in release
+
+From: Tiwei Bie <tiwei.btw@antgroup.com>
+
+commit 5bee35e5389f450a7eea7318deb9073e9414d3b1 upstream.
+
+The drvdata is not available in release. Let's just use container_of()
+to get the ubd instance. Otherwise, removing a ubd device will result
+in a crash:
+
+RIP: 0033:blk_mq_free_tag_set+0x1f/0xba
+RSP: 00000000e2083bf0 EFLAGS: 00010246
+RAX: 000000006021463a RBX: 0000000000000348 RCX: 0000000062604d00
+RDX: 0000000004208060 RSI: 00000000605241a0 RDI: 0000000000000348
+RBP: 00000000e2083c10 R08: 0000000062414010 R09: 00000000601603f7
+R10: 000000000000133a R11: 000000006038c4bd R12: 0000000000000000
+R13: 0000000060213a5c R14: 0000000062405d20 R15: 00000000604f7aa0
+Kernel panic - not syncing: Segfault with no mm
+CPU: 0 PID: 17 Comm: kworker/0:1 Not tainted 6.8.0-rc3-00107-gba3f67c11638 #1
+Workqueue: events mc_work_proc
+Stack:
+ 00000000 604f7ef0 62c5d000 62405d20
+ e2083c30 6002c776 6002c755 600e47ff
+ e2083c60 6025ffe3 04208060 603d36e0
+Call Trace:
+ [<6002c776>] ubd_device_release+0x21/0x55
+ [<6002c755>] ? ubd_device_release+0x0/0x55
+ [<600e47ff>] ? kfree+0x0/0x100
+ [<6025ffe3>] device_release+0x70/0xba
+ [<60381d6a>] kobject_put+0xb5/0xe2
+ [<6026027b>] put_device+0x19/0x1c
+ [<6026a036>] platform_device_put+0x26/0x29
+ [<6026ac5a>] platform_device_unregister+0x2c/0x2e
+ [<6002c52e>] ubd_remove+0xb8/0xd6
+ [<6002bb74>] ? mconsole_reply+0x0/0x50
+ [<6002b926>] mconsole_remove+0x160/0x1cc
+ [<6002bbbc>] ? mconsole_reply+0x48/0x50
+ [<6003379c>] ? um_set_signals+0x3b/0x43
+ [<60061c55>] ? update_min_vruntime+0x14/0x70
+ [<6006251f>] ? dequeue_task_fair+0x164/0x235
+ [<600620aa>] ? update_cfs_group+0x0/0x40
+ [<603a0e77>] ? __schedule+0x0/0x3ed
+ [<60033761>] ? um_set_signals+0x0/0x43
+ [<6002af6a>] mc_work_proc+0x77/0x91
+ [<600520b4>] process_scheduled_works+0x1af/0x2c3
+ [<6004ede3>] ? assign_work+0x0/0x58
+ [<600527a1>] worker_thread+0x2f7/0x37a
+ [<6004ee3b>] ? set_pf_worker+0x0/0x64
+ [<6005765d>] ? arch_local_irq_save+0x0/0x2d
+ [<60058e07>] ? kthread_exit+0x0/0x3a
+ [<600524aa>] ? worker_thread+0x0/0x37a
+ [<60058f9f>] kthread+0x130/0x135
+ [<6002068e>] new_thread_handler+0x85/0xb6
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com>
+Acked-By: Anton Ivanov <anton.ivanov@cambridgegreys.com>
+Link: https://patch.msgid.link/20241104163203.435515-3-tiwei.btw@antgroup.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/um/drivers/ubd_kern.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -799,7 +799,7 @@ static int ubd_open_dev(struct ubd *ubd_
+
+ static void ubd_device_release(struct device *dev)
+ {
+- struct ubd *ubd_dev = dev_get_drvdata(dev);
++ struct ubd *ubd_dev = container_of(dev, struct ubd, pdev.dev);
+
+ blk_mq_free_tag_set(&ubd_dev->tag_set);
+ *ubd_dev = ((struct ubd) DEFAULT_UBD);
--- /dev/null
+From 51b39d741970742a5c41136241a9c48ac607cf82 Mon Sep 17 00:00:00 2001
+From: Tiwei Bie <tiwei.btw@antgroup.com>
+Date: Tue, 5 Nov 2024 00:32:03 +0800
+Subject: um: vector: Do not use drvdata in release
+
+From: Tiwei Bie <tiwei.btw@antgroup.com>
+
+commit 51b39d741970742a5c41136241a9c48ac607cf82 upstream.
+
+The drvdata is not available in release. Let's just use container_of()
+to get the vector_device instance. Otherwise, removing a vector device
+will result in a crash:
+
+RIP: 0033:vector_device_release+0xf/0x50
+RSP: 00000000e187bc40 EFLAGS: 00010202
+RAX: 0000000060028f61 RBX: 00000000600f1baf RCX: 00000000620074e0
+RDX: 000000006220b9c0 RSI: 0000000060551c80 RDI: 0000000000000000
+RBP: 00000000e187bc50 R08: 00000000603ad594 R09: 00000000e187bb70
+R10: 000000000000135a R11: 00000000603ad422 R12: 00000000623ae028
+R13: 000000006287a200 R14: 0000000062006d30 R15: 00000000623700b6
+Kernel panic - not syncing: Segfault with no mm
+CPU: 0 UID: 0 PID: 16 Comm: kworker/0:1 Not tainted 6.12.0-rc6-g59b723cd2adb #1
+Workqueue: events mc_work_proc
+Stack:
+ 60028f61 623ae028 e187bc80 60276fcd
+ 6220b9c0 603f5820 623ae028 00000000
+ e187bcb0 603a2bcd 623ae000 62370010
+Call Trace:
+ [<60028f61>] ? vector_device_release+0x0/0x50
+ [<60276fcd>] device_release+0x70/0xba
+ [<603a2bcd>] kobject_put+0xba/0xe7
+ [<60277265>] put_device+0x19/0x1c
+ [<60281266>] platform_device_put+0x26/0x29
+ [<60281e5f>] platform_device_unregister+0x2c/0x2e
+ [<60029422>] vector_remove+0x52/0x58
+ [<60031316>] ? mconsole_reply+0x0/0x50
+ [<600310c8>] mconsole_remove+0x160/0x1cc
+ [<603b19f4>] ? strlen+0x0/0x15
+ [<60066611>] ? __dequeue_entity+0x1a9/0x206
+ [<600666a7>] ? set_next_entity+0x39/0x63
+ [<6006666e>] ? set_next_entity+0x0/0x63
+ [<60038fa6>] ? um_set_signals+0x0/0x43
+ [<6003070c>] mc_work_proc+0x77/0x91
+ [<60057664>] process_scheduled_works+0x1b3/0x2dd
+ [<60055f32>] ? assign_work+0x0/0x58
+ [<60057f0a>] worker_thread+0x1e9/0x293
+ [<6005406f>] ? set_pf_worker+0x0/0x64
+ [<6005d65d>] ? arch_local_irq_save+0x0/0x2d
+ [<6005d748>] ? kthread_exit+0x0/0x3a
+ [<60057d21>] ? worker_thread+0x0/0x293
+ [<6005dbf1>] kthread+0x126/0x12b
+ [<600219c5>] new_thread_handler+0x85/0xb6
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com>
+Acked-By: Anton Ivanov <anton.ivanov@cambridgegreys.com>
+Link: https://patch.msgid.link/20241104163203.435515-5-tiwei.btw@antgroup.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/um/drivers/vector_kern.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -823,7 +823,8 @@ static struct platform_driver uml_net_dr
+
+ static void vector_device_release(struct device *dev)
+ {
+- struct vector_device *device = dev_get_drvdata(dev);
++ struct vector_device *device =
++ container_of(dev, struct vector_device, pdev.dev);
+ struct net_device *netdev = device->dev;
+
+ list_del(&device->list);