--- /dev/null
+From eafae0fdd115a71b3a200ef1a31f86da04bac77f Mon Sep 17 00:00:00 2001
+From: Evgeniy Harchenko <evgeniyharchenko.dev@gmail.com>
+Date: Fri, 15 Aug 2025 12:58:14 +0300
+Subject: ALSA: hda/realtek: Add support for HP EliteBook x360 830 G6 and EliteBook 830 G6
+
+From: Evgeniy Harchenko <evgeniyharchenko.dev@gmail.com>
+
+commit eafae0fdd115a71b3a200ef1a31f86da04bac77f upstream.
+
+The HP EliteBook x360 830 G6 and HP EliteBook 830 G6 have
+Realtek HDA codec ALC215. It needs the ALC285_FIXUP_HP_GPIO_LED
+quirk to enable the mute LED.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Evgeniy Harchenko <evgeniyharchenko.dev@gmail.com>
+Link: https://patch.msgid.link/20250815095814.75845-1-evgeniyharchenko.dev@gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10662,6 +10662,8 @@ static const struct hda_quirk alc269_fix
+ SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8548, "HP EliteBook x360 830 G6", ALC285_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x854a, "HP EliteBook 830 G6", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11),
+ SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360),
+ SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
--- /dev/null
+From 3f4422e7c9436abf81a00270be7e4d6d3760ec0e Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 20 Aug 2025 07:19:01 +0200
+Subject: ALSA: hda: tas2781: Fix wrong reference of tasdevice_priv
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 3f4422e7c9436abf81a00270be7e4d6d3760ec0e upstream.
+
+During the conversion to unify the calibration data management, the
+reference to tasdevice_priv was wrongly set to h->hda_priv instead of
+h->priv. This resulted in memory corruption and crashes eventually.
+Unfortunately it's a void pointer, hence the compiler couldn't know
+that it's wrong.
+
+Fixes: 4fe238513407 ("ALSA: hda/tas2781: Move and unified the calibrated-data getting function for SPI and I2C into the tas2781_hda lib")
+Link: https://bugzilla.suse.com/show_bug.cgi?id=1248270
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20250820051902.4523-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/tas2781_hda_i2c.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/pci/hda/tas2781_hda_i2c.c
++++ b/sound/pci/hda/tas2781_hda_i2c.c
+@@ -287,7 +287,7 @@ static int tas2563_save_calibration(stru
+ efi_char16_t efi_name[TAS2563_CAL_VAR_NAME_MAX];
+ unsigned long max_size = TAS2563_CAL_DATA_SIZE;
+ unsigned char var8[TAS2563_CAL_VAR_NAME_MAX];
+- struct tasdevice_priv *p = h->hda_priv;
++ struct tasdevice_priv *p = h->priv;
+ struct calidata *cd = &p->cali_data;
+ struct cali_reg *r = &cd->cali_reg_array;
+ unsigned int offset = 0;
--- /dev/null
+From 63b17b653df30e90f95338083cb44c35d64bcae4 Mon Sep 17 00:00:00 2001
+From: Pasha Tatashin <pasha.tatashin@soleen.com>
+Date: Fri, 8 Aug 2025 20:18:02 +0000
+Subject: kho: init new_physxa->phys_bits to fix lockdep
+
+From: Pasha Tatashin <pasha.tatashin@soleen.com>
+
+commit 63b17b653df30e90f95338083cb44c35d64bcae4 upstream.
+
+Patch series "Several KHO Hotfixes".
+
+Three unrelated fixes for Kexec Handover.
+
+
+This patch (of 3):
+
+Lockdep shows the following warning:
+
+INFO: trying to register non-static key. The code is fine but needs
+lockdep annotation, or maybe you didn't initialize this object before use?
+turning off the locking correctness validator.
+
+[<ffffffff810133a6>] dump_stack_lvl+0x66/0xa0
+[<ffffffff8136012c>] assign_lock_key+0x10c/0x120
+[<ffffffff81358bb4>] register_lock_class+0xf4/0x2f0
+[<ffffffff813597ff>] __lock_acquire+0x7f/0x2c40
+[<ffffffff81360cb0>] ? __pfx_hlock_conflict+0x10/0x10
+[<ffffffff811707be>] ? native_flush_tlb_global+0x8e/0xa0
+[<ffffffff8117096e>] ? __flush_tlb_all+0x4e/0xa0
+[<ffffffff81172fc2>] ? __kernel_map_pages+0x112/0x140
+[<ffffffff813ec327>] ? xa_load_or_alloc+0x67/0xe0
+[<ffffffff81359556>] lock_acquire+0xe6/0x280
+[<ffffffff813ec327>] ? xa_load_or_alloc+0x67/0xe0
+[<ffffffff8100b9e0>] _raw_spin_lock+0x30/0x40
+[<ffffffff813ec327>] ? xa_load_or_alloc+0x67/0xe0
+[<ffffffff813ec327>] xa_load_or_alloc+0x67/0xe0
+[<ffffffff813eb4c0>] kho_preserve_folio+0x90/0x100
+[<ffffffff813ebb7f>] __kho_finalize+0xcf/0x400
+[<ffffffff813ebef4>] kho_finalize+0x34/0x70
+
+This is becase xa has its own lock, that is not initialized in
+xa_load_or_alloc.
+
+Modifiy __kho_preserve_order(), to properly call
+xa_init(&new_physxa->phys_bits);
+
+Link: https://lkml.kernel.org/r/20250808201804.772010-2-pasha.tatashin@soleen.com
+Fixes: fc33e4b44b27 ("kexec: enable KHO support for memory preservation")
+Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Cc: Alexander Graf <graf@amazon.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Changyuan Lyu <changyuanl@google.com>
+Cc: Coiby Xu <coxu@redhat.com>
+Cc: Dave Vasilevsky <dave@vasilevsky.ca>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Kees Cook <kees@kernel.org>
+Cc: Pratyush Yadav <pratyush@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kexec_handover.c | 28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c
+index e49743ae52c5..65145972d6d6 100644
+--- a/kernel/kexec_handover.c
++++ b/kernel/kexec_handover.c
+@@ -144,14 +144,34 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
+ unsigned int order)
+ {
+ struct kho_mem_phys_bits *bits;
+- struct kho_mem_phys *physxa;
++ struct kho_mem_phys *physxa, *new_physxa;
+ const unsigned long pfn_high = pfn >> order;
+
+ might_sleep();
+
+- physxa = xa_load_or_alloc(&track->orders, order, sizeof(*physxa));
+- if (IS_ERR(physxa))
+- return PTR_ERR(physxa);
++ physxa = xa_load(&track->orders, order);
++ if (!physxa) {
++ int err;
++
++ new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL);
++ if (!new_physxa)
++ return -ENOMEM;
++
++ xa_init(&new_physxa->phys_bits);
++ physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa,
++ GFP_KERNEL);
++
++ err = xa_err(physxa);
++ if (err || physxa) {
++ xa_destroy(&new_physxa->phys_bits);
++ kfree(new_physxa);
++
++ if (err)
++ return err;
++ } else {
++ physxa = new_physxa;
++ }
++ }
+
+ bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS,
+ sizeof(*bits));
+--
+2.50.1
+
--- /dev/null
+From 8b66ed2c3f42cc462e05704af6b94e6a7bad2f5e Mon Sep 17 00:00:00 2001
+From: Pasha Tatashin <pasha.tatashin@soleen.com>
+Date: Fri, 8 Aug 2025 20:18:03 +0000
+Subject: kho: mm: don't allow deferred struct page with KHO
+
+From: Pasha Tatashin <pasha.tatashin@soleen.com>
+
+commit 8b66ed2c3f42cc462e05704af6b94e6a7bad2f5e upstream.
+
+KHO uses struct pages for the preserved memory early in boot, however,
+with deferred struct page initialization, only a small portion of memory
+has properly initialized struct pages.
+
+This problem was detected where vmemmap is poisoned, and illegal flag
+combinations are detected.
+
+Don't allow them to be enabled together, and later we will have to teach
+KHO to work properly with deferred struct page init kernel feature.
+
+Link: https://lkml.kernel.org/r/20250808201804.772010-3-pasha.tatashin@soleen.com
+Fixes: 4e1d010e3bda ("kexec: add config option for KHO")
+Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Acked-by: Pratyush Yadav <pratyush@kernel.org>
+Cc: Alexander Graf <graf@amazon.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Changyuan Lyu <changyuanl@google.com>
+Cc: Coiby Xu <coxu@redhat.com>
+Cc: Dave Vasilevsky <dave@vasilevsky.ca>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Kees Cook <kees@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/Kconfig.kexec | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec
+index 2ee603a98813..1224dd937df0 100644
+--- a/kernel/Kconfig.kexec
++++ b/kernel/Kconfig.kexec
+@@ -97,6 +97,7 @@ config KEXEC_JUMP
+ config KEXEC_HANDOVER
+ bool "kexec handover"
+ depends on ARCH_SUPPORTS_KEXEC_HANDOVER && ARCH_SUPPORTS_KEXEC_FILE
++ depends on !DEFERRED_STRUCT_PAGE_INIT
+ select MEMBLOCK_KHO_SCRATCH
+ select KEXEC_FILE
+ select DEBUG_FS
+--
+2.50.1
+
--- /dev/null
+From 44958f2025ed3f29fc3e93bb1f6c16121d7847ad Mon Sep 17 00:00:00 2001
+From: Pasha Tatashin <pasha.tatashin@soleen.com>
+Date: Fri, 8 Aug 2025 20:18:04 +0000
+Subject: kho: warn if KHO is disabled due to an error
+
+From: Pasha Tatashin <pasha.tatashin@soleen.com>
+
+commit 44958f2025ed3f29fc3e93bb1f6c16121d7847ad upstream.
+
+During boot scratch area is allocated based on command line parameters or
+auto calculated. However, scratch area may fail to allocate, and in that
+case KHO is disabled. Currently, no warning is printed that KHO is
+disabled, which makes it confusing for the end user to figure out why KHO
+is not available. Add the missing warning message.
+
+Link: https://lkml.kernel.org/r/20250808201804.772010-4-pasha.tatashin@soleen.com
+Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Acked-by: Pratyush Yadav <pratyush@kernel.org>
+Cc: Alexander Graf <graf@amazon.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Changyuan Lyu <changyuanl@google.com>
+Cc: Coiby Xu <coxu@redhat.com>
+Cc: Dave Vasilevsky <dave@vasilevsky.ca>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Kees Cook <kees@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kexec_handover.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c
+index 65145972d6d6..ecd1ac210dbd 100644
+--- a/kernel/kexec_handover.c
++++ b/kernel/kexec_handover.c
+@@ -564,6 +564,7 @@ err_free_scratch_areas:
+ err_free_scratch_desc:
+ memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
+ err_disable_kho:
++ pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
+ kho_enable = false;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 99d7ab8db9d8230b243f5ed20ba0229e54cc0dfa Mon Sep 17 00:00:00 2001
+From: Jiayi Li <lijiayi@kylinos.cn>
+Date: Mon, 4 Aug 2025 09:36:04 +0800
+Subject: memstick: Fix deadlock by moving removing flag earlier
+
+From: Jiayi Li <lijiayi@kylinos.cn>
+
+commit 99d7ab8db9d8230b243f5ed20ba0229e54cc0dfa upstream.
+
+The existing memstick core patch: commit 62c59a8786e6 ("memstick: Skip
+allocating card when removing host") sets host->removing in
+memstick_remove_host(),but still exists a critical time window where
+memstick_check can run after host->eject is set but before removing is set.
+
+In the rtsx_usb_ms driver, the problematic sequence is:
+
+rtsx_usb_ms_drv_remove: memstick_check:
+ host->eject = true
+ cancel_work_sync(handle_req) if(!host->removing)
+ ... memstick_alloc_card()
+ memstick_set_rw_addr()
+ memstick_new_req()
+ rtsx_usb_ms_request()
+ if(!host->eject)
+ skip schedule_work
+ wait_for_completion()
+ memstick_remove_host: [blocks indefinitely]
+ host->removing = true
+ flush_workqueue()
+ [block]
+
+1. rtsx_usb_ms_drv_remove sets host->eject = true
+2. cancel_work_sync(&host->handle_req) runs
+3. memstick_check work may be executed here <-- danger window
+4. memstick_remove_host sets removing = 1
+
+During this window (step 3), memstick_check calls memstick_alloc_card,
+which may indefinitely waiting for mrq_complete completion that will
+never occur because rtsx_usb_ms_request sees eject=true and skips
+scheduling work, memstick_set_rw_addr waits forever for completion.
+
+This causes a deadlock when memstick_remove_host tries to flush_workqueue,
+waiting for memstick_check to complete, while memstick_check is blocked
+waiting for mrq_complete completion.
+
+Fix this by setting removing=true at the start of rtsx_usb_ms_drv_remove,
+before any work cancellation. This ensures memstick_check will see the
+removing flag immediately and exit early, avoiding the deadlock.
+
+Fixes: 62c59a8786e6 ("memstick: Skip allocating card when removing host")
+Signed-off-by: Jiayi Li <lijiayi@kylinos.cn>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250804013604.1311218-1-lijiayi@kylinos.cn
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/memstick/core/memstick.c | 1 -
+ drivers/memstick/host/rtsx_usb_ms.c | 1 +
+ 2 files changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/memstick/core/memstick.c
++++ b/drivers/memstick/core/memstick.c
+@@ -555,7 +555,6 @@ EXPORT_SYMBOL(memstick_add_host);
+ */
+ void memstick_remove_host(struct memstick_host *host)
+ {
+- host->removing = 1;
+ flush_workqueue(workqueue);
+ mutex_lock(&host->lock);
+ if (host->card)
+--- a/drivers/memstick/host/rtsx_usb_ms.c
++++ b/drivers/memstick/host/rtsx_usb_ms.c
+@@ -812,6 +812,7 @@ static void rtsx_usb_ms_drv_remove(struc
+ int err;
+
+ host->eject = true;
++ msh->removing = true;
+ cancel_work_sync(&host->handle_req);
+ cancel_delayed_work_sync(&host->poll_card);
+
--- /dev/null
+From b3dee902b6c26b7d8031a4df19753e27dcfcba01 Mon Sep 17 00:00:00 2001
+From: Sang-Heon Jeon <ekffu200098@gmail.com>
+Date: Sat, 16 Aug 2025 10:51:16 +0900
+Subject: mm/damon/core: fix damos_commit_filter not changing allow
+
+From: Sang-Heon Jeon <ekffu200098@gmail.com>
+
+commit b3dee902b6c26b7d8031a4df19753e27dcfcba01 upstream.
+
+Current damos_commit_filter() does not persist the `allow' value of the
+filter. As a result, changing the `allow' value of a filter and
+committing doesn't change the `allow' value.
+
+Add the missing `allow' value update, so committing the filter
+persistently changes the `allow' value well.
+
+Link: https://lkml.kernel.org/r/20250816015116.194589-1-ekffu200098@gmail.com
+Fixes: fe6d7fdd6249 ("mm/damon/core: add damos_filter->allow field")
+Signed-off-by: Sang-Heon Jeon <ekffu200098@gmail.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [6.14.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -881,6 +881,7 @@ static void damos_commit_filter(
+ {
+ dst->type = src->type;
+ dst->matching = src->matching;
++ dst->allow = src->allow;
+ damos_commit_filter_arg(dst, src);
+ }
+
--- /dev/null
+From dde30854bddfb5d69f30022b53c5955a41088b33 Mon Sep 17 00:00:00 2001
+From: "Herton R. Krzesinski" <herton@redhat.com>
+Date: Thu, 31 Jul 2025 18:40:51 -0300
+Subject: mm/debug_vm_pgtable: clear page table entries at destroy_args()
+
+From: Herton R. Krzesinski <herton@redhat.com>
+
+commit dde30854bddfb5d69f30022b53c5955a41088b33 upstream.
+
+The mm/debug_vm_pagetable test allocates manually page table entries for
+the tests it runs, using also its manually allocated mm_struct. That in
+itself is ok, but when it exits, at destroy_args() it fails to clear those
+entries with the *_clear functions.
+
+The problem is that leaves stale entries. If another process allocates an
+mm_struct with a pgd at the same address, it may end up running into the
+stale entry. This is happening in practice on a debug kernel with
+CONFIG_DEBUG_VM_PGTABLE=y, for example this is the output with some extra
+debugging I added (it prints a warning trace if pgtables_bytes goes
+negative, in addition to the warning at check_mm() function):
+
+[ 2.539353] debug_vm_pgtable: [get_random_vaddr ]: random_vaddr is 0x7ea247140000
+[ 2.539366] kmem_cache info
+[ 2.539374] kmem_cachep 0x000000002ce82385 - freelist 0x0000000000000000 - offset 0x508
+[ 2.539447] debug_vm_pgtable: [init_args ]: args->mm is 0x000000002267cc9e
+(...)
+[ 2.552800] WARNING: CPU: 5 PID: 116 at include/linux/mm.h:2841 free_pud_range+0x8bc/0x8d0
+[ 2.552816] Modules linked in:
+[ 2.552843] CPU: 5 UID: 0 PID: 116 Comm: modprobe Not tainted 6.12.0-105.debug_vm2.el10.ppc64le+debug #1 VOLUNTARY
+[ 2.552859] Hardware name: IBM,9009-41A POWER9 (architected) 0x4e0202 0xf000005 of:IBM,FW910.00 (VL910_062) hv:phyp pSeries
+[ 2.552872] NIP: c0000000007eef3c LR: c0000000007eef30 CTR: c0000000003d8c90
+[ 2.552885] REGS: c0000000622e73b0 TRAP: 0700 Not tainted (6.12.0-105.debug_vm2.el10.ppc64le+debug)
+[ 2.552899] MSR: 800000000282b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE> CR: 24002822 XER: 0000000a
+[ 2.552954] CFAR: c0000000008f03f0 IRQMASK: 0
+[ 2.552954] GPR00: c0000000007eef30 c0000000622e7650 c000000002b1ac00 0000000000000001
+[ 2.552954] GPR04: 0000000000000008 0000000000000000 c0000000007eef30 ffffffffffffffff
+[ 2.552954] GPR08: 00000000ffff00f5 0000000000000001 0000000000000048 0000000000004000
+[ 2.552954] GPR12: 00000003fa440000 c000000017ffa300 c0000000051d9f80 ffffffffffffffdb
+[ 2.552954] GPR16: 0000000000000000 0000000000000008 000000000000000a 60000000000000e0
+[ 2.552954] GPR20: 4080000000000000 c0000000113af038 00007fffcf130000 0000700000000000
+[ 2.552954] GPR24: c000000062a6a000 0000000000000001 8000000062a68000 0000000000000001
+[ 2.552954] GPR28: 000000000000000a c000000062ebc600 0000000000002000 c000000062ebc760
+[ 2.553170] NIP [c0000000007eef3c] free_pud_range+0x8bc/0x8d0
+[ 2.553185] LR [c0000000007eef30] free_pud_range+0x8b0/0x8d0
+[ 2.553199] Call Trace:
+[ 2.553207] [c0000000622e7650] [c0000000007eef30] free_pud_range+0x8b0/0x8d0 (unreliable)
+[ 2.553229] [c0000000622e7750] [c0000000007f40b4] free_pgd_range+0x284/0x3b0
+[ 2.553248] [c0000000622e7800] [c0000000007f4630] free_pgtables+0x450/0x570
+[ 2.553274] [c0000000622e78e0] [c0000000008161c0] exit_mmap+0x250/0x650
+[ 2.553292] [c0000000622e7a30] [c0000000001b95b8] __mmput+0x98/0x290
+[ 2.558344] [c0000000622e7a80] [c0000000001d1018] exit_mm+0x118/0x1b0
+[ 2.558361] [c0000000622e7ac0] [c0000000001d141c] do_exit+0x2ec/0x870
+[ 2.558376] [c0000000622e7b60] [c0000000001d1ca8] do_group_exit+0x88/0x150
+[ 2.558391] [c0000000622e7bb0] [c0000000001d1db8] sys_exit_group+0x48/0x50
+[ 2.558407] [c0000000622e7be0] [c00000000003d810] system_call_exception+0x1e0/0x4c0
+[ 2.558423] [c0000000622e7e50] [c00000000000d05c] system_call_vectored_common+0x15c/0x2ec
+(...)
+[ 2.558892] ---[ end trace 0000000000000000 ]---
+[ 2.559022] BUG: Bad rss-counter state mm:000000002267cc9e type:MM_ANONPAGES val:1
+[ 2.559037] BUG: non-zero pgtables_bytes on freeing mm: -6144
+
+Here the modprobe process ended up with an allocated mm_struct from the
+mm_struct slab that was used before by the debug_vm_pgtable test. That is
+not a problem, since the mm_struct is initialized again etc., however, if
+it ends up using the same pgd table, it bumps into the old stale entry
+when clearing/freeing the page table entries, so it tries to free an entry
+already gone (that one which was allocated by the debug_vm_pgtable test),
+which also explains the negative pgtables_bytes since it's accounting for
+not allocated entries in the current process.
+
+As far as I looked pgd_{alloc,free} etc. does not clear entries, and
+clearing of the entries is explicitly done in the free_pgtables->
+free_pgd_range->free_p4d_range->free_pud_range->free_pmd_range->
+free_pte_range path. However, the debug_vm_pgtable test does not call
+free_pgtables, since it allocates mm_struct and entries manually for its
+test and eg. not goes through page faults. So it also should clear
+manually the entries before exit at destroy_args().
+
+This problem was noticed on a reboot X number of times test being done on
+a powerpc host, with a debug kernel with CONFIG_DEBUG_VM_PGTABLE enabled.
+Depends on the system, but on a 100 times reboot loop the problem could
+manifest once or twice, if a process ends up getting the right mm->pgd
+entry with the stale entries used by mm/debug_vm_pagetable. After using
+this patch, I couldn't reproduce/experience the problems anymore. I was
+able to reproduce the problem as well on latest upstream kernel (6.16).
+
+I also modified destroy_args() to use mmput() instead of mmdrop(), there
+is no reason to hold mm_users reference and not release the mm_struct
+entirely, and in the output above with my debugging prints I already had
+patched it to use mmput, it did not fix the problem, but helped in the
+debugging as well.
+
+Link: https://lkml.kernel.org/r/20250731214051.4115182-1-herton@redhat.com
+Fixes: 3c9b84f044a9 ("mm/debug_vm_pgtable: introduce struct pgtable_debug_args")
+Signed-off-by: Herton R. Krzesinski <herton@redhat.com>
+Cc: Anshuman Khandual <anshuman.khandual@arm.com>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Gavin Shan <gshan@redhat.com>
+Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/debug_vm_pgtable.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/mm/debug_vm_pgtable.c
++++ b/mm/debug_vm_pgtable.c
+@@ -1041,29 +1041,34 @@ static void __init destroy_args(struct p
+
+ /* Free page table entries */
+ if (args->start_ptep) {
++ pmd_clear(args->pmdp);
+ pte_free(args->mm, args->start_ptep);
+ mm_dec_nr_ptes(args->mm);
+ }
+
+ if (args->start_pmdp) {
++ pud_clear(args->pudp);
+ pmd_free(args->mm, args->start_pmdp);
+ mm_dec_nr_pmds(args->mm);
+ }
+
+ if (args->start_pudp) {
++ p4d_clear(args->p4dp);
+ pud_free(args->mm, args->start_pudp);
+ mm_dec_nr_puds(args->mm);
+ }
+
+- if (args->start_p4dp)
++ if (args->start_p4dp) {
++ pgd_clear(args->pgdp);
+ p4d_free(args->mm, args->start_p4dp);
++ }
+
+ /* Free vma and mm struct */
+ if (args->vma)
+ vm_area_free(args->vma);
+
+ if (args->mm)
+- mmdrop(args->mm);
++ mmput(args->mm);
+ }
+
+ static struct page * __init
--- /dev/null
+From 2e6053fea379806269c4f7f5e36b523c9c0fb35c Mon Sep 17 00:00:00 2001
+From: Jinjiang Tu <tujinjiang@huawei.com>
+Date: Fri, 15 Aug 2025 15:32:09 +0800
+Subject: mm/memory-failure: fix infinite UCE for VM_PFNMAP pfn
+
+From: Jinjiang Tu <tujinjiang@huawei.com>
+
+commit 2e6053fea379806269c4f7f5e36b523c9c0fb35c upstream.
+
+When memory_failure() is called for a already hwpoisoned pfn,
+kill_accessing_process() will be called to kill current task. However, if
+the vma of the accessing vaddr is VM_PFNMAP, walk_page_range() will skip
+the vma in walk_page_test() and return 0.
+
+Before commit aaf99ac2ceb7 ("mm/hwpoison: do not send SIGBUS to processes
+with recovered clean pages"), kill_accessing_process() will return EFAULT.
+For x86, the current task will be killed in kill_me_maybe().
+
+However, after this commit, kill_accessing_process() simplies return 0,
+that means UCE is handled properly, but it doesn't actually. In such
+case, the user task will trigger UCE infinitely.
+
+To fix it, add .test_walk callback for hwpoison_walk_ops to scan all vmas.
+
+Link: https://lkml.kernel.org/r/20250815073209.1984582-1-tujinjiang@huawei.com
+Fixes: aaf99ac2ceb7 ("mm/hwpoison: do not send SIGBUS to processes with recovered clean pages")
+Signed-off-by: Jinjiang Tu <tujinjiang@huawei.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: Miaohe Lin <linmiaohe@huawei.com>
+Reviewed-by: Jane Chu <jane.chu@oracle.com>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Shuai Xue <xueshuai@linux.alibaba.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory-failure.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -847,9 +847,17 @@ static int hwpoison_hugetlb_range(pte_t
+ #define hwpoison_hugetlb_range NULL
+ #endif
+
++static int hwpoison_test_walk(unsigned long start, unsigned long end,
++ struct mm_walk *walk)
++{
++ /* We also want to consider pages mapped into VM_PFNMAP. */
++ return 0;
++}
++
+ static const struct mm_walk_ops hwpoison_walk_ops = {
+ .pmd_entry = hwpoison_pte_range,
+ .hugetlb_entry = hwpoison_hugetlb_range,
++ .test_walk = hwpoison_test_walk,
+ .walk_lock = PGWALK_RDLOCK,
+ };
+
--- /dev/null
+From 772e5b4a5e8360743645b9a466842d16092c4f94 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Mon, 18 Aug 2025 19:53:58 +0200
+Subject: mm/mremap: fix WARN with uffd that has remap events disabled
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 772e5b4a5e8360743645b9a466842d16092c4f94 upstream.
+
+Registering userfaultd on a VMA that spans at least one PMD and then
+mremap()'ing that VMA can trigger a WARN when recovering from a failed
+page table move due to a page table allocation error.
+
+The code ends up doing the right thing (recurse, avoiding moving actual
+page tables), but triggering that WARN is unpleasant:
+
+WARNING: CPU: 2 PID: 6133 at mm/mremap.c:357 move_normal_pmd mm/mremap.c:357 [inline]
+WARNING: CPU: 2 PID: 6133 at mm/mremap.c:357 move_pgt_entry mm/mremap.c:595 [inline]
+WARNING: CPU: 2 PID: 6133 at mm/mremap.c:357 move_page_tables+0x3832/0x44a0 mm/mremap.c:852
+Modules linked in:
+CPU: 2 UID: 0 PID: 6133 Comm: syz.0.19 Not tainted 6.17.0-rc1-syzkaller-00004-g53e760d89498 #0 PREEMPT(full)
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014
+RIP: 0010:move_normal_pmd mm/mremap.c:357 [inline]
+RIP: 0010:move_pgt_entry mm/mremap.c:595 [inline]
+RIP: 0010:move_page_tables+0x3832/0x44a0 mm/mremap.c:852
+Code: ...
+RSP: 0018:ffffc900037a76d8 EFLAGS: 00010293
+RAX: 0000000000000000 RBX: 0000000032930007 RCX: ffffffff820c6645
+RDX: ffff88802e56a440 RSI: ffffffff820c7201 RDI: 0000000000000007
+RBP: ffff888037728fc0 R08: 0000000000000007 R09: 0000000000000000
+R10: 0000000032930007 R11: 0000000000000000 R12: 0000000000000000
+R13: ffffc900037a79a8 R14: 0000000000000001 R15: dffffc0000000000
+FS: 000055556316a500(0000) GS:ffff8880d68bc000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000001b30863fff CR3: 0000000050171000 CR4: 0000000000352ef0
+Call Trace:
+ <TASK>
+ copy_vma_and_data+0x468/0x790 mm/mremap.c:1215
+ move_vma+0x548/0x1780 mm/mremap.c:1282
+ mremap_to+0x1b7/0x450 mm/mremap.c:1406
+ do_mremap+0xfad/0x1f80 mm/mremap.c:1921
+ __do_sys_mremap+0x119/0x170 mm/mremap.c:1977
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xcd/0x4c0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7f00d0b8ebe9
+Code: ...
+RSP: 002b:00007ffe5ea5ee98 EFLAGS: 00000246 ORIG_RAX: 0000000000000019
+RAX: ffffffffffffffda RBX: 00007f00d0db5fa0 RCX: 00007f00d0b8ebe9
+RDX: 0000000000400000 RSI: 0000000000c00000 RDI: 0000200000000000
+RBP: 00007ffe5ea5eef0 R08: 0000200000c00000 R09: 0000000000000000
+R10: 0000000000000003 R11: 0000000000000246 R12: 0000000000000002
+R13: 00007f00d0db5fa0 R14: 00007f00d0db5fa0 R15: 0000000000000005
+ </TASK>
+
+The underlying issue is that we recurse during the original page table
+move, but not during the recovery move.
+
+Fix it by checking for both VMAs and performing the check before the
+pmd_none() sanity check.
+
+Add a new helper where we perform+document that check for the PMD and PUD
+level.
+
+Thanks to Harry for bisecting.
+
+Link: https://lkml.kernel.org/r/20250818175358.1184757-1-david@redhat.com
+Fixes: 0cef0bb836e3 ("mm: clear uffd-wp PTE/PMD state on mremap()")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reported-by: syzbot+4d9a13f0797c46a29e42@syzkaller.appspotmail.com
+Closes: https://lkml.kernel.org/r/689bb893.050a0220.7f033.013a.GAE@google.com
+Tested-by: Harry Yoo <harry.yoo@oracle.com>
+Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jann Horn <jannh@google.com>
+Cc: Pedro Falcato <pfalcato@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mremap.c | 41 +++++++++++++++++++++++------------------
+ 1 file changed, 23 insertions(+), 18 deletions(-)
+
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -294,6 +294,25 @@ static inline bool arch_supports_page_ta
+ }
+ #endif
+
++static inline bool uffd_supports_page_table_move(struct pagetable_move_control *pmc)
++{
++ /*
++ * If we are moving a VMA that has uffd-wp registered but with
++ * remap events disabled (new VMA will not be registered with uffd), we
++ * need to ensure that the uffd-wp state is cleared from all pgtables.
++ * This means recursing into lower page tables in move_page_tables().
++ *
++ * We might get called with VMAs reversed when recovering from a
++ * failed page table move. In that case, the
++ * "old"-but-actually-"originally new" VMA during recovery will not have
++ * a uffd context. Recursing into lower page tables during the original
++ * move but not during the recovery move will cause trouble, because we
++ * run into already-existing page tables. So check both VMAs.
++ */
++ return !vma_has_uffd_without_event_remap(pmc->old) &&
++ !vma_has_uffd_without_event_remap(pmc->new);
++}
++
+ #ifdef CONFIG_HAVE_MOVE_PMD
+ static bool move_normal_pmd(struct pagetable_move_control *pmc,
+ pmd_t *old_pmd, pmd_t *new_pmd)
+@@ -306,6 +325,8 @@ static bool move_normal_pmd(struct paget
+
+ if (!arch_supports_page_table_move())
+ return false;
++ if (!uffd_supports_page_table_move(pmc))
++ return false;
+ /*
+ * The destination pmd shouldn't be established, free_pgtables()
+ * should have released it.
+@@ -332,15 +353,6 @@ static bool move_normal_pmd(struct paget
+ if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
+ return false;
+
+- /* If this pmd belongs to a uffd vma with remap events disabled, we need
+- * to ensure that the uffd-wp state is cleared from all pgtables. This
+- * means recursing into lower page tables in move_page_tables(), and we
+- * can reuse the existing code if we simply treat the entry as "not
+- * moved".
+- */
+- if (vma_has_uffd_without_event_remap(vma))
+- return false;
+-
+ /*
+ * We don't have to worry about the ordering of src and dst
+ * ptlocks because exclusive mmap_lock prevents deadlock.
+@@ -389,6 +401,8 @@ static bool move_normal_pud(struct paget
+
+ if (!arch_supports_page_table_move())
+ return false;
++ if (!uffd_supports_page_table_move(pmc))
++ return false;
+ /*
+ * The destination pud shouldn't be established, free_pgtables()
+ * should have released it.
+@@ -396,15 +410,6 @@ static bool move_normal_pud(struct paget
+ if (WARN_ON_ONCE(!pud_none(*new_pud)))
+ return false;
+
+- /* If this pud belongs to a uffd vma with remap events disabled, we need
+- * to ensure that the uffd-wp state is cleared from all pgtables. This
+- * means recursing into lower page tables in move_page_tables(), and we
+- * can reuse the existing code if we simply treat the entry as "not
+- * moved".
+- */
+- if (vma_has_uffd_without_event_remap(vma))
+- return false;
+-
+ /*
+ * We don't have to worry about the ordering of src and dst
+ * ptlocks because exclusive mmap_lock prevents deadlock.
--- /dev/null
+From 340be332e420ed37d15d4169a1b4174e912ad6cb Mon Sep 17 00:00:00 2001
+From: Victor Shih <victor.shih@genesyslogic.com.tw>
+Date: Thu, 31 Jul 2025 14:57:52 +0800
+Subject: mmc: sdhci-pci-gli: GL9763e: Mask the replay timer timeout of AER
+
+From: Victor Shih <victor.shih@genesyslogic.com.tw>
+
+commit 340be332e420ed37d15d4169a1b4174e912ad6cb upstream.
+
+Due to a flaw in the hardware design, the GL9763e replay timer frequently
+times out when ASPM is enabled. As a result, the warning messages will
+often appear in the system log when the system accesses the GL9763e
+PCI config. Therefore, the replay timer timeout must be masked.
+
+Signed-off-by: Victor Shih <victor.shih@genesyslogic.com.tw>
+Fixes: 1ae1d2d6e555 ("mmc: sdhci-pci-gli: Add Genesys Logic GL9763E support")
+Cc: stable@vger.kernel.org
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20250731065752.450231-4-victorshihgli@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-pci-gli.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -1782,6 +1782,9 @@ static void gli_set_gl9763e(struct sdhci
+ value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value);
+
++ /* mask the replay timer timeout of AER */
++ sdhci_gli_mask_replay_timer_timeout(pdev);
++
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
--- /dev/null
+From 293ed0f5f34e1e9df888456af4b0a021f57b5f54 Mon Sep 17 00:00:00 2001
+From: Victor Shih <victor.shih@genesyslogic.com.tw>
+Date: Thu, 31 Jul 2025 14:57:51 +0800
+Subject: mmc: sdhci-pci-gli: GL9763e: Rename the gli_set_gl9763e() for consistency
+
+From: Victor Shih <victor.shih@genesyslogic.com.tw>
+
+commit 293ed0f5f34e1e9df888456af4b0a021f57b5f54 upstream.
+
+In preparation to fix replay timer timeout, rename the
+gli_set_gl9763e() to gl9763e_hw_setting() for consistency.
+
+Signed-off-by: Victor Shih <victor.shih@genesyslogic.com.tw>
+Fixes: 1ae1d2d6e555 ("mmc: sdhci-pci-gli: Add Genesys Logic GL9763E support")
+Cc: stable@vger.kernel.org
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20250731065752.450231-3-victorshihgli@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-pci-gli.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -1753,7 +1753,7 @@ cleanup:
+ return ret;
+ }
+
+-static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
++static void gl9763e_hw_setting(struct sdhci_pci_slot *slot)
+ {
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+@@ -1928,7 +1928,7 @@ static int gli_probe_slot_gl9763e(struct
+ gli_pcie_enable_msi(slot);
+ host->mmc_host_ops.hs400_enhanced_strobe =
+ gl9763e_hs400_enhanced_strobe;
+- gli_set_gl9763e(slot);
++ gl9763e_hw_setting(slot);
+ sdhci_enable_v4_mode(host);
+
+ return 0;
--- /dev/null
+From d2d7a96b29ea6ab093973a1a37d26126db70c79f Mon Sep 17 00:00:00 2001
+From: Judith Mendez <jm@ti.com>
+Date: Wed, 20 Aug 2025 14:30:47 -0500
+Subject: mmc: sdhci_am654: Disable HS400 for AM62P SR1.0 and SR1.1
+
+From: Judith Mendez <jm@ti.com>
+
+commit d2d7a96b29ea6ab093973a1a37d26126db70c79f upstream.
+
+This adds SDHCI_AM654_QUIRK_DISABLE_HS400 quirk which shall be used
+to disable HS400 support. AM62P SR1.0 and SR1.1 do not support HS400
+due to errata i2458 [0] so disable HS400 for these SoC revisions.
+
+[0] https://www.ti.com/lit/er/sprz574a/sprz574a.pdf
+Fixes: 37f28165518f ("arm64: dts: ti: k3-am62p: Add ITAP/OTAP values for MMC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Judith Mendez <jm@ti.com>
+Reviewed-by: Andrew Davis <afd@ti.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20250820193047.4064142-1-jm@ti.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci_am654.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -156,6 +156,7 @@ struct sdhci_am654_data {
+
+ #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
+ #define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
++#define SDHCI_AM654_QUIRK_DISABLE_HS400 BIT(2)
+ };
+
+ struct window {
+@@ -765,6 +766,7 @@ static int sdhci_am654_init(struct sdhci
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
++ struct device *dev = mmc_dev(host->mmc);
+ u32 ctl_cfg_2 = 0;
+ u32 mask;
+ u32 val;
+@@ -820,6 +822,12 @@ static int sdhci_am654_init(struct sdhci
+ if (ret)
+ goto err_cleanup_host;
+
++ if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_DISABLE_HS400 &&
++ host->mmc->caps2 & (MMC_CAP2_HS400 | MMC_CAP2_HS400_ES)) {
++ dev_info(dev, "HS400 mode not supported on this silicon revision, disabling it\n");
++ host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
++ }
++
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+@@ -883,6 +891,12 @@ static int sdhci_am654_get_of_property(s
+ return 0;
+ }
+
++static const struct soc_device_attribute sdhci_am654_descope_hs400[] = {
++ { .family = "AM62PX", .revision = "SR1.0" },
++ { .family = "AM62PX", .revision = "SR1.1" },
++ { /* sentinel */ }
++};
++
+ static const struct of_device_id sdhci_am654_of_match[] = {
+ {
+ .compatible = "ti,am654-sdhci-5.1",
+@@ -975,6 +989,10 @@ static int sdhci_am654_probe(struct plat
+ goto err_pltfm_free;
+ }
+
++ soc = soc_device_match(sdhci_am654_descope_hs400);
++ if (soc)
++ sdhci_am654->quirks |= SDHCI_AM654_QUIRK_DISABLE_HS400;
++
+ host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
+ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+
--- /dev/null
+From 76d2e3890fb169168c73f2e4f8375c7cc24a765e Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Sat, 16 Aug 2025 07:25:20 -0700
+Subject: NFS: Fix a race when updating an existing write
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 76d2e3890fb169168c73f2e4f8375c7cc24a765e upstream.
+
+After nfs_lock_and_join_requests() tests for whether the request is
+still attached to the mapping, nothing prevents a call to
+nfs_inode_remove_request() from succeeding until we actually lock the
+page group.
+The reason is that whoever called nfs_inode_remove_request() doesn't
+necessarily have a lock on the page group head.
+
+So in order to avoid races, let's take the page group lock earlier in
+nfs_lock_and_join_requests(), and hold it across the removal of the
+request in nfs_inode_remove_request().
+
+Reported-by: Jeff Layton <jlayton@kernel.org>
+Tested-by: Joe Quanaim <jdq@meta.com>
+Tested-by: Andrew Steffen <aksteffen@meta.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Fixes: bd37d6fce184 ("NFSv4: Convert nfs_lock_and_join_requests() to use nfs_page_find_head_request()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/pagelist.c | 9 +++++----
+ fs/nfs/write.c | 29 ++++++++++-------------------
+ include/linux/nfs_page.h | 1 +
+ 3 files changed, 16 insertions(+), 23 deletions(-)
+
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -253,13 +253,14 @@ nfs_page_group_unlock(struct nfs_page *r
+ nfs_page_clear_headlock(req);
+ }
+
+-/*
+- * nfs_page_group_sync_on_bit_locked
++/**
++ * nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
++ * @req: request in page group
++ * @bit: PG_* bit that is used to sync page group
+ *
+ * must be called with page group lock held
+ */
+-static bool
+-nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
++bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
+ {
+ struct nfs_page *head = req->wb_head;
+ struct nfs_page *tmp;
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -153,20 +153,10 @@ nfs_page_set_inode_ref(struct nfs_page *
+ }
+ }
+
+-static int
+-nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
++static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+ {
+- int ret;
+-
+- if (!test_bit(PG_REMOVE, &req->wb_flags))
+- return 0;
+- ret = nfs_page_group_lock(req);
+- if (ret)
+- return ret;
+ if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
+ nfs_page_set_inode_ref(req, inode);
+- nfs_page_group_unlock(req);
+- return 0;
+ }
+
+ /**
+@@ -585,19 +575,18 @@ retry:
+ }
+ }
+
++ ret = nfs_page_group_lock(head);
++ if (ret < 0)
++ goto out_unlock;
++
+ /* Ensure that nobody removed the request before we locked it */
+ if (head != folio->private) {
++ nfs_page_group_unlock(head);
+ nfs_unlock_and_release_request(head);
+ goto retry;
+ }
+
+- ret = nfs_cancel_remove_inode(head, inode);
+- if (ret < 0)
+- goto out_unlock;
+-
+- ret = nfs_page_group_lock(head);
+- if (ret < 0)
+- goto out_unlock;
++ nfs_cancel_remove_inode(head, inode);
+
+ /* lock each request in the page group */
+ for (subreq = head->wb_this_page;
+@@ -786,7 +775,8 @@ static void nfs_inode_remove_request(str
+ {
+ struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
+
+- if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
++ nfs_page_group_lock(req);
++ if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
+ struct folio *folio = nfs_page_to_folio(req->wb_head);
+ struct address_space *mapping = folio->mapping;
+
+@@ -798,6 +788,7 @@ static void nfs_inode_remove_request(str
+ }
+ spin_unlock(&mapping->i_private_lock);
+ }
++ nfs_page_group_unlock(req);
+
+ if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
+ atomic_long_dec(&nfsi->nrequests);
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -160,6 +160,7 @@ extern void nfs_join_page_group(struct n
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
++extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
+ extern int nfs_page_set_headlock(struct nfs_page *req);
+ extern void nfs_page_clear_headlock(struct nfs_page *req);
+ extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
mm-damon-core-fix-commit_ops_filters-by-using-correct-nth-function.patch
mmc-sdhci-of-arasan-ensure-cd-logic-stabilization-before-power-up.patch
mmc-sdhci-pci-gli-add-a-new-function-to-simplify-the-code.patch
+kho-init-new_physxa-phys_bits-to-fix-lockdep.patch
+kho-mm-don-t-allow-deferred-struct-page-with-kho.patch
+kho-warn-if-kho-is-disabled-due-to-an-error.patch
+memstick-fix-deadlock-by-moving-removing-flag-earlier.patch
+mmc-sdhci-pci-gli-gl9763e-mask-the-replay-timer-timeout-of-aer.patch
+mmc-sdhci-pci-gli-gl9763e-rename-the-gli_set_gl9763e-for-consistency.patch
+mmc-sdhci_am654-disable-hs400-for-am62p-sr1.0-and-sr1.1.patch
+nfs-fix-a-race-when-updating-an-existing-write.patch
+squashfs-fix-memory-leak-in-squashfs_fill_super.patch
+mm-damon-core-fix-damos_commit_filter-not-changing-allow.patch
+mm-debug_vm_pgtable-clear-page-table-entries-at-destroy_args.patch
+mm-memory-failure-fix-infinite-uce-for-vm_pfnmap-pfn.patch
+mm-mremap-fix-warn-with-uffd-that-has-remap-events-disabled.patch
+alsa-hda-tas2781-fix-wrong-reference-of-tasdevice_priv.patch
+alsa-hda-realtek-add-support-for-hp-elitebook-x360-830-g6-and-elitebook-830-g6.patch
--- /dev/null
+From b64700d41bdc4e9f82f1346c15a3678ebb91a89c Mon Sep 17 00:00:00 2001
+From: Phillip Lougher <phillip@squashfs.org.uk>
+Date: Mon, 11 Aug 2025 23:37:40 +0100
+Subject: squashfs: fix memory leak in squashfs_fill_super
+
+From: Phillip Lougher <phillip@squashfs.org.uk>
+
+commit b64700d41bdc4e9f82f1346c15a3678ebb91a89c upstream.
+
+If sb_min_blocksize returns 0, squashfs_fill_super exits without freeing
+allocated memory (sb->s_fs_info).
+
+Fix this by moving the call to sb_min_blocksize to before memory is
+allocated.
+
+Link: https://lkml.kernel.org/r/20250811223740.110392-1-phillip@squashfs.org.uk
+Fixes: 734aa85390ea ("Squashfs: check return result of sb_min_blocksize")
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+Reported-by: Scott GUO <scottzhguo@tencent.com>
+Closes: https://lore.kernel.org/all/20250811061921.3807353-1-scott_gzh@163.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/squashfs/super.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/fs/squashfs/super.c
++++ b/fs/squashfs/super.c
+@@ -187,10 +187,15 @@ static int squashfs_fill_super(struct su
+ unsigned short flags;
+ unsigned int fragments;
+ u64 lookup_table_start, xattr_id_table_start, next_table;
+- int err;
++ int err, devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
+
+ TRACE("Entered squashfs_fill_superblock\n");
+
++ if (!devblksize) {
++ errorf(fc, "squashfs: unable to set blocksize\n");
++ return -EINVAL;
++ }
++
+ sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
+ if (sb->s_fs_info == NULL) {
+ ERROR("Failed to allocate squashfs_sb_info\n");
+@@ -201,12 +206,7 @@ static int squashfs_fill_super(struct su
+
+ msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
+
+- msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
+- if (!msblk->devblksize) {
+- errorf(fc, "squashfs: unable to set blocksize\n");
+- return -EINVAL;
+- }
+-
++ msblk->devblksize = devblksize;
+ msblk->devblksize_log2 = ffz(~msblk->devblksize);
+
+ mutex_init(&msblk->meta_index_mutex);