--- /dev/null
+From 9158c6bb245113d4966df9b2ba602197a379412e Mon Sep 17 00:00:00 2001
+From: Zhen Ni <zhen.ni@easystack.cn>
+Date: Tue, 23 Sep 2025 15:51:04 +0800
+Subject: afs: Fix potential null pointer dereference in afs_put_server
+
+From: Zhen Ni <zhen.ni@easystack.cn>
+
+commit 9158c6bb245113d4966df9b2ba602197a379412e upstream.
+
+afs_put_server() accessed server->debug_id before the NULL check, which
+could lead to a null pointer dereference. Move the debug_id assignment,
+ensuring we never dereference a NULL server pointer.
+
+Fixes: 2757a4dc1849 ("afs: Fix access after dec in put functions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zhen Ni <zhen.ni@easystack.cn>
+Acked-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/afs/server.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -331,13 +331,14 @@ struct afs_server *afs_use_server(struct
+ void afs_put_server(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason)
+ {
+- unsigned int a, debug_id = server->debug_id;
++ unsigned int a, debug_id;
+ bool zero;
+ int r;
+
+ if (!server)
+ return;
+
++ debug_id = server->debug_id;
+ a = atomic_read(&server->active);
+ zero = __refcount_dec_and_test(&server->ref, &r);
+ trace_afs_server(debug_id, r - 1, a, reason);
--- /dev/null
+From 1a194e6c8e1ee745e914b0b7f50fa86c89ed13fe Mon Sep 17 00:00:00 2001
+From: Samasth Norway Ananda <samasth.norway.ananda@oracle.com>
+Date: Fri, 12 Sep 2025 10:00:23 -0700
+Subject: fbcon: fix integer overflow in fbcon_do_set_font
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Samasth Norway Ananda <samasth.norway.ananda@oracle.com>
+
+commit 1a194e6c8e1ee745e914b0b7f50fa86c89ed13fe upstream.
+
+Fix integer overflow vulnerabilities in fbcon_do_set_font() where font
+size calculations could overflow when handling user-controlled font
+parameters.
+
+The vulnerabilities occur when:
+1. CALC_FONTSZ(h, pitch, charcount) performs h * pith * charcount
+ multiplication with user-controlled values that can overflow.
+2. FONT_EXTRA_WORDS * sizeof(int) + size addition can also overflow
+3. This results in smaller allocations than expected, leading to buffer
+ overflows during font data copying.
+
+Add explicit overflow checking using check_mul_overflow() and
+check_add_overflow() kernel helpers to safety validate all size
+calculations before allocation.
+
+Signed-off-by: Samasth Norway Ananda <samasth.norway.ananda@oracle.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 39b3cffb8cf3 ("fbcon: prevent user font height or width change from causing potential out-of-bounds access")
+Cc: George Kennedy <george.kennedy@oracle.com>
+Cc: stable <stable@vger.kernel.org>
+Cc: syzbot+38a3699c7eaf165b97a6@syzkaller.appspotmail.com
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Simona Vetter <simona@ffwll.ch>
+Cc: Helge Deller <deller@gmx.de>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: "Ville Syrjälä" <ville.syrjala@linux.intel.com>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: Qianqiang Liu <qianqiang.liu@163.com>
+Cc: Shixiong Ou <oushixiong@kylinos.cn>
+Cc: Kees Cook <kees@kernel.org>
+Cc: <stable@vger.kernel.org> # v5.9+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250912170023.3931881-1-samasth.norway.ananda@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/fbdev/core/fbcon.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2518,9 +2518,16 @@ static int fbcon_set_font(struct vc_data
+ if (fbcon_invalid_charcount(info, charcount))
+ return -EINVAL;
+
+- size = CALC_FONTSZ(h, pitch, charcount);
++ /* Check for integer overflow in font size calculation */
++ if (check_mul_overflow(h, pitch, &size) ||
++ check_mul_overflow(size, charcount, &size))
++ return -EINVAL;
++
++ /* Check for overflow in allocation size calculation */
++ if (check_add_overflow(FONT_EXTRA_WORDS * sizeof(int), size, &size))
++ return -EINVAL;
+
+- new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
++ new_data = kmalloc(size, GFP_USER);
+
+ if (!new_data)
+ return -ENOMEM;
--- /dev/null
+From 9b2f5ef00e852f8e8902a4d4f73aeedc60220c12 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Mon, 22 Sep 2025 15:45:54 +0200
+Subject: fbcon: Fix OOB access in font allocation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 9b2f5ef00e852f8e8902a4d4f73aeedc60220c12 upstream.
+
+Commit 1a194e6c8e1e ("fbcon: fix integer overflow in fbcon_do_set_font")
+introduced an out-of-bounds access by storing data and allocation sizes
+in the same variable. Restore the old size calculation and use the new
+variable 'alloc_size' for the allocation.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 1a194e6c8e1e ("fbcon: fix integer overflow in fbcon_do_set_font")
+Reported-by: Jani Nikula <jani.nikula@linux.intel.com>
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/15020
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6201
+Cc: Samasth Norway Ananda <samasth.norway.ananda@oracle.com>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: George Kennedy <george.kennedy@oracle.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Simona Vetter <simona@ffwll.ch>
+Cc: Helge Deller <deller@gmx.de>
+Cc: "Ville Syrjälä" <ville.syrjala@linux.intel.com>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: Qianqiang Liu <qianqiang.liu@163.com>
+Cc: Shixiong Ou <oushixiong@kylinos.cn>
+Cc: Kees Cook <kees@kernel.org>
+Cc: <stable@vger.kernel.org> # v5.9+
+Cc: Zsolt Kajtar <soci@c64.rulez.org>
+Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Reviewed-by: Qianqiang Liu <qianqiang.liu@163.com>
+Link: https://lore.kernel.org/r/20250922134619.257684-1-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/fbdev/core/fbcon.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2491,7 +2491,7 @@ static int fbcon_set_font(struct vc_data
+ unsigned charcount = font->charcount;
+ int w = font->width;
+ int h = font->height;
+- int size;
++ int size, alloc_size;
+ int i, csum;
+ u8 *new_data, *data = font->data;
+ int pitch = PITCH(font->width);
+@@ -2524,10 +2524,10 @@ static int fbcon_set_font(struct vc_data
+ return -EINVAL;
+
+ /* Check for overflow in allocation size calculation */
+- if (check_add_overflow(FONT_EXTRA_WORDS * sizeof(int), size, &size))
++ if (check_add_overflow(FONT_EXTRA_WORDS * sizeof(int), size, &alloc_size))
+ return -EINVAL;
+
+- new_data = kmalloc(size, GFP_USER);
++ new_data = kmalloc(alloc_size, GFP_USER);
+
+ if (!new_data)
+ return -ENOMEM;
--- /dev/null
+From 28aa29986dde79e8466bc87569141291053833f5 Mon Sep 17 00:00:00 2001
+From: Jakub Acs <acsjakub@amazon.de>
+Date: Mon, 22 Sep 2025 08:22:05 +0000
+Subject: fs/proc/task_mmu: check p->vec_buf for NULL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jakub Acs <acsjakub@amazon.de>
+
+commit 28aa29986dde79e8466bc87569141291053833f5 upstream.
+
+When the PAGEMAP_SCAN ioctl is invoked with vec_len = 0 reaches
+pagemap_scan_backout_range(), kernel panics with null-ptr-deref:
+
+[ 44.936808] Oops: general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN NOPTI
+[ 44.937797] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
+[ 44.938391] CPU: 1 UID: 0 PID: 2480 Comm: reproducer Not tainted 6.17.0-rc6 #22 PREEMPT(none)
+[ 44.939062] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+[ 44.939935] RIP: 0010:pagemap_scan_thp_entry.isra.0+0x741/0xa80
+
+<snip registers, unreliable trace>
+
+[ 44.946828] Call Trace:
+[ 44.947030] <TASK>
+[ 44.949219] pagemap_scan_pmd_entry+0xec/0xfa0
+[ 44.952593] walk_pmd_range.isra.0+0x302/0x910
+[ 44.954069] walk_pud_range.isra.0+0x419/0x790
+[ 44.954427] walk_p4d_range+0x41e/0x620
+[ 44.954743] walk_pgd_range+0x31e/0x630
+[ 44.955057] __walk_page_range+0x160/0x670
+[ 44.956883] walk_page_range_mm+0x408/0x980
+[ 44.958677] walk_page_range+0x66/0x90
+[ 44.958984] do_pagemap_scan+0x28d/0x9c0
+[ 44.961833] do_pagemap_cmd+0x59/0x80
+[ 44.962484] __x64_sys_ioctl+0x18d/0x210
+[ 44.962804] do_syscall_64+0x5b/0x290
+[ 44.963111] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+vec_len = 0 in pagemap_scan_init_bounce_buffer() means no buffers are
+allocated and p->vec_buf remains set to NULL.
+
+This breaks an assumption made later in pagemap_scan_backout_range(), that
+page_region is always allocated for p->vec_buf_index.
+
+Fix it by explicitly checking p->vec_buf for NULL before dereferencing.
+
+Other sites that might run into same deref-issue are already (directly or
+transitively) protected by checking p->vec_buf.
+
+Note:
+From PAGEMAP_SCAN man page, it seems vec_len = 0 is valid when no output
+is requested and it's only the side effects caller is interested in,
+hence it passes check in pagemap_scan_get_args().
+
+This issue was found by syzkaller.
+
+Link: https://lkml.kernel.org/r/20250922082206.6889-1-acsjakub@amazon.de
+Fixes: 52526ca7fdb9 ("fs/proc/task_mmu: implement IOCTL to get and optionally clear info about PTEs")
+Signed-off-by: Jakub Acs <acsjakub@amazon.de>
+Reviewed-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Jinjiang Tu <tujinjiang@huawei.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Penglei Jiang <superman.xpt@gmail.com>
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Andrei Vagin <avagin@gmail.com>
+Cc: "Michał Mirosław" <mirq-linux@rere.qmqm.pl>
+Cc: Stephen Rothwell <sfr@canb.auug.org.au>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/task_mmu.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -2286,6 +2286,9 @@ static void pagemap_scan_backout_range(s
+ {
+ struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
+
++ if (!p->vec_buf)
++ return;
++
+ if (cur_buf->start != addr)
+ cur_buf->end = addr;
+ else
--- /dev/null
+From c6ccc4dde17676dfe617b9a37bd9ba19a8fc87ee Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hansg@kernel.org>
+Date: Sat, 20 Sep 2025 22:09:55 +0200
+Subject: gpiolib: Extend software-node support to support secondary software-nodes
+
+From: Hans de Goede <hansg@kernel.org>
+
+commit c6ccc4dde17676dfe617b9a37bd9ba19a8fc87ee upstream.
+
+When a software-node gets added to a device which already has another
+fwnode as primary node it will become the secondary fwnode for that
+device.
+
+Currently if a software-node with GPIO properties ends up as the secondary
+fwnode then gpiod_find_by_fwnode() will fail to find the GPIOs.
+
+Add a new gpiod_fwnode_lookup() helper which falls back to calling
+gpiod_find_by_fwnode() with the secondary fwnode if the GPIO was not
+found in the primary fwnode.
+
+Fixes: e7f9ff5dc90c ("gpiolib: add support for software nodes")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hans de Goede <hansg@kernel.org>
+Reviewed-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Link: https://lore.kernel.org/r/20250920200955.20403-1-hansg@kernel.org
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -4605,6 +4605,23 @@ static struct gpio_desc *gpiod_find_by_f
+ return desc;
+ }
+
++static struct gpio_desc *gpiod_fwnode_lookup(struct fwnode_handle *fwnode,
++ struct device *consumer,
++ const char *con_id,
++ unsigned int idx,
++ enum gpiod_flags *flags,
++ unsigned long *lookupflags)
++{
++ struct gpio_desc *desc;
++
++ desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, flags, lookupflags);
++ if (gpiod_not_found(desc) && !IS_ERR_OR_NULL(fwnode))
++ desc = gpiod_find_by_fwnode(fwnode->secondary, consumer, con_id,
++ idx, flags, lookupflags);
++
++ return desc;
++}
++
+ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+@@ -4623,8 +4640,8 @@ struct gpio_desc *gpiod_find_and_request
+ int ret = 0;
+
+ scoped_guard(srcu, &gpio_devices_srcu) {
+- desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx,
+- &flags, &lookupflags);
++ desc = gpiod_fwnode_lookup(fwnode, consumer, con_id, idx,
++ &flags, &lookupflags);
+ if (gpiod_not_found(desc) && platform_lookup_allowed) {
+ /*
+ * Either we are not using DT or ACPI, or their lookup
--- /dev/null
+From 85e1ff61060a765d91ee62dc5606d4d547d9d105 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Thu, 11 Sep 2025 12:58:58 -0700
+Subject: kmsan: fix out-of-bounds access to shadow memory
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 85e1ff61060a765d91ee62dc5606d4d547d9d105 upstream.
+
+Running sha224_kunit on a KMSAN-enabled kernel results in a crash in
+kmsan_internal_set_shadow_origin():
+
+ BUG: unable to handle page fault for address: ffffbc3840291000
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 1810067 P4D 1810067 PUD 192d067 PMD 3c17067 PTE 0
+ Oops: 0000 [#1] SMP NOPTI
+ CPU: 0 UID: 0 PID: 81 Comm: kunit_try_catch Tainted: G N 6.17.0-rc3 #10 PREEMPT(voluntary)
+ Tainted: [N]=TEST
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014
+ RIP: 0010:kmsan_internal_set_shadow_origin+0x91/0x100
+ [...]
+ Call Trace:
+ <TASK>
+ __msan_memset+0xee/0x1a0
+ sha224_final+0x9e/0x350
+ test_hash_buffer_overruns+0x46f/0x5f0
+ ? kmsan_get_shadow_origin_ptr+0x46/0xa0
+ ? __pfx_test_hash_buffer_overruns+0x10/0x10
+ kunit_try_run_case+0x198/0xa00
+
+This occurs when memset() is called on a buffer that is not 4-byte aligned
+and extends to the end of a guard page, i.e. the next page is unmapped.
+
+The bug is that the loop at the end of kmsan_internal_set_shadow_origin()
+accesses the wrong shadow memory bytes when the address is not 4-byte
+aligned. Since each 4 bytes are associated with an origin, it rounds the
+address and size so that it can access all the origins that contain the
+buffer. However, when it checks the corresponding shadow bytes for a
+particular origin, it incorrectly uses the original unrounded shadow
+address. This results in reads from shadow memory beyond the end of the
+buffer's shadow memory, which crashes when that memory is not mapped.
+
+To fix this, correctly align the shadow address before accessing the 4
+shadow bytes corresponding to each origin.
+
+Link: https://lkml.kernel.org/r/20250911195858.394235-1-ebiggers@kernel.org
+Fixes: 2ef3cec44c60 ("kmsan: do not wipe out origin when doing partial unpoisoning")
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Tested-by: Alexander Potapenko <glider@google.com>
+Reviewed-by: Alexander Potapenko <glider@google.com>
+Cc: Dmitriy Vyukov <dvyukov@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kmsan/core.c | 10 +++++++---
+ mm/kmsan/kmsan_test.c | 16 ++++++++++++++++
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+--- a/mm/kmsan/core.c
++++ b/mm/kmsan/core.c
+@@ -195,7 +195,8 @@ void kmsan_internal_set_shadow_origin(vo
+ u32 origin, bool checked)
+ {
+ u64 address = (u64)addr;
+- u32 *shadow_start, *origin_start;
++ void *shadow_start;
++ u32 *aligned_shadow, *origin_start;
+ size_t pad = 0;
+
+ KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
+@@ -214,9 +215,12 @@ void kmsan_internal_set_shadow_origin(vo
+ }
+ __memset(shadow_start, b, size);
+
+- if (!IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
++ if (IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
++ aligned_shadow = shadow_start;
++ } else {
+ pad = address % KMSAN_ORIGIN_SIZE;
+ address -= pad;
++ aligned_shadow = shadow_start - pad;
+ size += pad;
+ }
+ size = ALIGN(size, KMSAN_ORIGIN_SIZE);
+@@ -230,7 +234,7 @@ void kmsan_internal_set_shadow_origin(vo
+ * corresponding shadow slot is zero.
+ */
+ for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
+- if (origin || !shadow_start[i])
++ if (origin || !aligned_shadow[i])
+ origin_start[i] = origin;
+ }
+ }
+--- a/mm/kmsan/kmsan_test.c
++++ b/mm/kmsan/kmsan_test.c
+@@ -556,6 +556,21 @@ DEFINE_TEST_MEMSETXX(16)
+ DEFINE_TEST_MEMSETXX(32)
+ DEFINE_TEST_MEMSETXX(64)
+
++/* Test case: ensure that KMSAN does not access shadow memory out of bounds. */
++static void test_memset_on_guarded_buffer(struct kunit *test)
++{
++ void *buf = vmalloc(PAGE_SIZE);
++
++ kunit_info(test,
++ "memset() on ends of guarded buffer should not crash\n");
++
++ for (size_t size = 0; size <= 128; size++) {
++ memset(buf, 0xff, size);
++ memset(buf + PAGE_SIZE - size, 0xff, size);
++ }
++ vfree(buf);
++}
++
+ static noinline void fibonacci(int *array, int size, int start)
+ {
+ if (start < 2 || (start == size))
+@@ -677,6 +692,7 @@ static struct kunit_case kmsan_test_case
+ KUNIT_CASE(test_memset16),
+ KUNIT_CASE(test_memset32),
+ KUNIT_CASE(test_memset64),
++ KUNIT_CASE(test_memset_on_guarded_buffer),
+ KUNIT_CASE(test_long_origin_chain),
+ KUNIT_CASE(test_stackdepot_roundtrip),
+ KUNIT_CASE(test_unpoison_memory),
--- /dev/null
+From 06195ee967d06ead757f9291bbaf1a0b30fa10b8 Mon Sep 17 00:00:00 2001
+From: Akinobu Mita <akinobu.mita@gmail.com>
+Date: Sat, 20 Sep 2025 22:25:46 +0900
+Subject: mm/damon/sysfs: do not ignore callback's return value in damon_sysfs_damon_call()
+
+From: Akinobu Mita <akinobu.mita@gmail.com>
+
+commit 06195ee967d06ead757f9291bbaf1a0b30fa10b8 upstream.
+
+The callback return value is ignored in damon_sysfs_damon_call(), which
+means that it is not possible to detect invalid user input when writing
+commands such as 'commit' to
+/sys/kernel/mm/damon/admin/kdamonds/<K>/state. Fix it.
+
+Link: https://lkml.kernel.org/r/20250920132546.5822-1-akinobu.mita@gmail.com
+Fixes: f64539dcdb87 ("mm/damon/sysfs: use damon_call() for update_schemes_stats")
+Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [6.14+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1576,12 +1576,14 @@ static int damon_sysfs_damon_call(int (*
+ struct damon_sysfs_kdamond *kdamond)
+ {
+ struct damon_call_control call_control = {};
++ int err;
+
+ if (!kdamond->damon_ctx)
+ return -EINVAL;
+ call_control.fn = fn;
+ call_control.data = kdamond;
+- return damon_call(kdamond->damon_ctx, &call_control);
++ err = damon_call(kdamond->damon_ctx, &call_control);
++ return err ? err : call_control.return_code;
+ }
+
+ struct damon_sysfs_schemes_walk_data {
--- /dev/null
+From 7b7387650dcf2881fd8bb55bcf3c8bd6c9542dd7 Mon Sep 17 00:00:00 2001
+From: Jinjiang Tu <tujinjiang@huawei.com>
+Date: Fri, 12 Sep 2025 15:41:39 +0800
+Subject: mm/hugetlb: fix folio is still mapped when deleted
+
+From: Jinjiang Tu <tujinjiang@huawei.com>
+
+commit 7b7387650dcf2881fd8bb55bcf3c8bd6c9542dd7 upstream.
+
+Migration may be raced with fallocating hole. remove_inode_single_folio
+will unmap the folio if the folio is still mapped. However, it's called
+without folio lock. If the folio is migrated and the mapped pte has been
+converted to migration entry, folio_mapped() returns false, and won't
+unmap it. Due to extra refcount held by remove_inode_single_folio,
+migration fails, restores migration entry to normal pte, and the folio is
+mapped again. As a result, we triggered BUG in filemap_unaccount_folio.
+
+The log is as follows:
+ BUG: Bad page cache in process hugetlb pfn:156c00
+ page: refcount:515 mapcount:0 mapping:0000000099fef6e1 index:0x0 pfn:0x156c00
+ head: order:9 mapcount:1 entire_mapcount:1 nr_pages_mapped:0 pincount:0
+ aops:hugetlbfs_aops ino:dcc dentry name(?):"my_hugepage_file"
+ flags: 0x17ffffc00000c1(locked|waiters|head|node=0|zone=2|lastcpupid=0x1fffff)
+ page_type: f4(hugetlb)
+ page dumped because: still mapped when deleted
+ CPU: 1 UID: 0 PID: 395 Comm: hugetlb Not tainted 6.17.0-rc5-00044-g7aac71907bde-dirty #484 NONE
+ Hardware name: QEMU Ubuntu 24.04 PC (i440FX + PIIX, 1996), BIOS 0.0.0 02/06/2015
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x4f/0x70
+ filemap_unaccount_folio+0xc4/0x1c0
+ __filemap_remove_folio+0x38/0x1c0
+ filemap_remove_folio+0x41/0xd0
+ remove_inode_hugepages+0x142/0x250
+ hugetlbfs_fallocate+0x471/0x5a0
+ vfs_fallocate+0x149/0x380
+
+Hold folio lock before checking if the folio is mapped to avold race with
+migration.
+
+Link: https://lkml.kernel.org/r/20250912074139.3575005-1-tujinjiang@huawei.com
+Fixes: 4aae8d1c051e ("mm/hugetlbfs: unmap pages if page fault raced with hole punch")
+Signed-off-by: Jinjiang Tu <tujinjiang@huawei.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hugetlbfs/inode.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -520,14 +520,16 @@ static bool remove_inode_single_folio(st
+
+ /*
+ * If folio is mapped, it was faulted in after being
+- * unmapped in caller. Unmap (again) while holding
+- * the fault mutex. The mutex will prevent faults
+- * until we finish removing the folio.
++ * unmapped in caller or hugetlb_vmdelete_list() skips
++ * unmapping it due to fail to grab lock. Unmap (again)
++ * while holding the fault mutex. The mutex will prevent
++ * faults until we finish removing the folio. Hold folio
++ * lock to guarantee no concurrent migration.
+ */
++ folio_lock(folio);
+ if (unlikely(folio_mapped(folio)))
+ hugetlb_unmap_file_folio(h, mapping, folio, index);
+
+- folio_lock(folio);
+ /*
+ * We must remove the folio from page cache before removing
+ * the region/ reserve map (hugetlb_unreserve_pages). In
--- /dev/null
+From 4d428dca252c858bfac691c31fa95d26cd008706 Mon Sep 17 00:00:00 2001
+From: Max Kellermann <max.kellermann@ionos.com>
+Date: Thu, 25 Sep 2025 14:08:20 +0100
+Subject: netfs: fix reference leak
+
+From: Max Kellermann <max.kellermann@ionos.com>
+
+commit 4d428dca252c858bfac691c31fa95d26cd008706 upstream.
+
+Commit 20d72b00ca81 ("netfs: Fix the request's work item to not
+require a ref") modified netfs_alloc_request() to initialize the
+reference counter to 2 instead of 1. The rationale was that the
+requet's "work" would release the second reference after completion
+(via netfs_{read,write}_collection_worker()). That works most of the
+time if all goes well.
+
+However, it leaks this additional reference if the request is released
+before the I/O operation has been submitted: the error code path only
+decrements the reference counter once and the work item will never be
+queued because there will never be a completion.
+
+This has caused outages of our whole server cluster today because
+tasks were blocked in netfs_wait_for_outstanding_io(), leading to
+deadlocks in Ceph (another bug that I will address soon in another
+patch). This was caused by a netfs_pgpriv2_begin_copy_to_cache() call
+which failed in fscache_begin_write_operation(). The leaked
+netfs_io_request was never completed, leaving `netfs_inode.io_count`
+with a positive value forever.
+
+All of this is super-fragile code. Finding out which code paths will
+lead to an eventual completion and which do not is hard to see:
+
+- Some functions like netfs_create_write_req() allocate a request, but
+ will never submit any I/O.
+
+- netfs_unbuffered_read_iter_locked() calls netfs_unbuffered_read()
+ and then netfs_put_request(); however, netfs_unbuffered_read() can
+ also fail early before submitting the I/O request, therefore another
+ netfs_put_request() call must be added there.
+
+A rule of thumb is that functions that return a `netfs_io_request` do
+not submit I/O, and all of their callers must be checked.
+
+For my taste, the whole netfs code needs an overhaul to make reference
+counting easier to understand and less fragile & obscure. But to fix
+this bug here and now and produce a patch that is adequate for a
+stable backport, I tried a minimal approach that quickly frees the
+request object upon early failure.
+
+I decided against adding a second netfs_put_request() each time
+because that would cause code duplication which obscures the code
+further. Instead, I added the function netfs_put_failed_request()
+which frees such a failed request synchronously under the assumption
+that the reference count is exactly 2 (as initially set by
+netfs_alloc_request() and never touched), verified by a
+WARN_ON_ONCE(). It then deinitializes the request object (without
+going through the "cleanup_work" indirection) and frees the allocation
+(with RCU protection to protect against concurrent access by
+netfs_requests_seq_start()).
+
+All code paths that fail early have been changed to call
+netfs_put_failed_request() instead of netfs_put_request().
+Additionally, I have added a netfs_put_request() call to
+netfs_unbuffered_read() as explained above because the
+netfs_put_failed_request() approach does not work there.
+
+Fixes: 20d72b00ca81 ("netfs: Fix the request's work item to not require a ref")
+Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Paulo Alcantara <pc@manguebit.org>
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+cc: stable@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/netfs/buffered_read.c | 10 +++++-----
+ fs/netfs/direct_read.c | 7 ++++++-
+ fs/netfs/direct_write.c | 6 +++++-
+ fs/netfs/internal.h | 1 +
+ fs/netfs/objects.c | 30 +++++++++++++++++++++++++++---
+ fs/netfs/read_pgpriv2.c | 2 +-
+ fs/netfs/read_single.c | 2 +-
+ fs/netfs/write_issue.c | 3 +--
+ 8 files changed, 47 insertions(+), 14 deletions(-)
+
+diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
+index 18b3dc74c70e..37ab6f28b5ad 100644
+--- a/fs/netfs/buffered_read.c
++++ b/fs/netfs/buffered_read.c
+@@ -369,7 +369,7 @@ void netfs_readahead(struct readahead_control *ractl)
+ return netfs_put_request(rreq, netfs_rreq_trace_put_return);
+
+ cleanup_free:
+- return netfs_put_request(rreq, netfs_rreq_trace_put_failed);
++ return netfs_put_failed_request(rreq);
+ }
+ EXPORT_SYMBOL(netfs_readahead);
+
+@@ -472,7 +472,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio)
+ return ret < 0 ? ret : 0;
+
+ discard:
+- netfs_put_request(rreq, netfs_rreq_trace_put_discard);
++ netfs_put_failed_request(rreq);
+ alloc_error:
+ folio_unlock(folio);
+ return ret;
+@@ -532,7 +532,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
+ return ret < 0 ? ret : 0;
+
+ discard:
+- netfs_put_request(rreq, netfs_rreq_trace_put_discard);
++ netfs_put_failed_request(rreq);
+ alloc_error:
+ folio_unlock(folio);
+ return ret;
+@@ -699,7 +699,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
+ return 0;
+
+ error_put:
+- netfs_put_request(rreq, netfs_rreq_trace_put_failed);
++ netfs_put_failed_request(rreq);
+ error:
+ if (folio) {
+ folio_unlock(folio);
+@@ -754,7 +754,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
+ return ret < 0 ? ret : 0;
+
+ error_put:
+- netfs_put_request(rreq, netfs_rreq_trace_put_discard);
++ netfs_put_failed_request(rreq);
+ error:
+ _leave(" = %d", ret);
+ return ret;
+diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
+index a05e13472baf..a498ee8d6674 100644
+--- a/fs/netfs/direct_read.c
++++ b/fs/netfs/direct_read.c
+@@ -131,6 +131,7 @@ static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
+
+ if (rreq->len == 0) {
+ pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
++ netfs_put_request(rreq, netfs_rreq_trace_put_discard);
+ return -EIO;
+ }
+
+@@ -205,7 +206,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
+ if (user_backed_iter(iter)) {
+ ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0);
+ if (ret < 0)
+- goto out;
++ goto error_put;
+ rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec;
+ rreq->direct_bv_count = ret;
+ rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
+@@ -238,6 +239,10 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
+ if (ret > 0)
+ orig_count -= ret;
+ return ret;
++
++error_put:
++ netfs_put_failed_request(rreq);
++ return ret;
+ }
+ EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked);
+
+diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
+index a16660ab7f83..a9d1c3b2c084 100644
+--- a/fs/netfs/direct_write.c
++++ b/fs/netfs/direct_write.c
+@@ -57,7 +57,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
+ n = netfs_extract_user_iter(iter, len, &wreq->buffer.iter, 0);
+ if (n < 0) {
+ ret = n;
+- goto out;
++ goto error_put;
+ }
+ wreq->direct_bv = (struct bio_vec *)wreq->buffer.iter.bvec;
+ wreq->direct_bv_count = n;
+@@ -101,6 +101,10 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
+ out:
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
+ return ret;
++
++error_put:
++ netfs_put_failed_request(wreq);
++ return ret;
+ }
+ EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);
+
+diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
+index d4f16fefd965..4319611f5354 100644
+--- a/fs/netfs/internal.h
++++ b/fs/netfs/internal.h
+@@ -87,6 +87,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+ void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
+ void netfs_clear_subrequests(struct netfs_io_request *rreq);
+ void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
++void netfs_put_failed_request(struct netfs_io_request *rreq);
+ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
+
+ static inline void netfs_see_request(struct netfs_io_request *rreq,
+diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
+index e8c99738b5bb..40a1c7d6f6e0 100644
+--- a/fs/netfs/objects.c
++++ b/fs/netfs/objects.c
+@@ -116,10 +116,8 @@ static void netfs_free_request_rcu(struct rcu_head *rcu)
+ netfs_stat_d(&netfs_n_rh_rreq);
+ }
+
+-static void netfs_free_request(struct work_struct *work)
++static void netfs_deinit_request(struct netfs_io_request *rreq)
+ {
+- struct netfs_io_request *rreq =
+- container_of(work, struct netfs_io_request, cleanup_work);
+ struct netfs_inode *ictx = netfs_inode(rreq->inode);
+ unsigned int i;
+
+@@ -149,6 +147,14 @@ static void netfs_free_request(struct work_struct *work)
+
+ if (atomic_dec_and_test(&ictx->io_count))
+ wake_up_var(&ictx->io_count);
++}
++
++static void netfs_free_request(struct work_struct *work)
++{
++ struct netfs_io_request *rreq =
++ container_of(work, struct netfs_io_request, cleanup_work);
++
++ netfs_deinit_request(rreq);
+ call_rcu(&rreq->rcu, netfs_free_request_rcu);
+ }
+
+@@ -167,6 +173,24 @@ void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace
+ }
+ }
+
++/*
++ * Free a request (synchronously) that was just allocated but has
++ * failed before it could be submitted.
++ */
++void netfs_put_failed_request(struct netfs_io_request *rreq)
++{
++ int r = refcount_read(&rreq->ref);
++
++ /* new requests have two references (see
++ * netfs_alloc_request(), and this function is only allowed on
++ * new request objects
++ */
++ WARN_ON_ONCE(r != 2);
++
++ trace_netfs_rreq_ref(rreq->debug_id, r, netfs_rreq_trace_put_failed);
++ netfs_free_request(&rreq->cleanup_work);
++}
++
+ /*
+ * Allocate and partially initialise an I/O request structure.
+ */
+diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
+index 8097bc069c1d..a1489aa29f78 100644
+--- a/fs/netfs/read_pgpriv2.c
++++ b/fs/netfs/read_pgpriv2.c
+@@ -118,7 +118,7 @@ static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
+ return creq;
+
+ cancel_put:
+- netfs_put_request(creq, netfs_rreq_trace_put_return);
++ netfs_put_failed_request(creq);
+ cancel:
+ rreq->copy_to_cache = ERR_PTR(-ENOBUFS);
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
+diff --git a/fs/netfs/read_single.c b/fs/netfs/read_single.c
+index fa622a6cd56d..5c0dc4efc792 100644
+--- a/fs/netfs/read_single.c
++++ b/fs/netfs/read_single.c
+@@ -189,7 +189,7 @@ ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_ite
+ return ret;
+
+ cleanup_free:
+- netfs_put_request(rreq, netfs_rreq_trace_put_failed);
++ netfs_put_failed_request(rreq);
+ return ret;
+ }
+ EXPORT_SYMBOL(netfs_read_single);
+diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
+index 0584cba1a043..dd8743bc8d7f 100644
+--- a/fs/netfs/write_issue.c
++++ b/fs/netfs/write_issue.c
+@@ -133,8 +133,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
+
+ return wreq;
+ nomem:
+- wreq->error = -ENOMEM;
+- netfs_put_request(wreq, netfs_rreq_trace_put_failed);
++ netfs_put_failed_request(wreq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 546e42c8c6d9498d5eac14bf2aca0383a11b145a Mon Sep 17 00:00:00 2001
+From: Alexandre Ghiti <alexghiti@rivosinc.com>
+Date: Tue, 23 Sep 2025 18:25:52 -0600
+Subject: riscv: Use an atomic xchg in pudp_huge_get_and_clear()
+
+From: Alexandre Ghiti <alexghiti@rivosinc.com>
+
+commit 546e42c8c6d9498d5eac14bf2aca0383a11b145a upstream.
+
+Make sure we return the right pud value and not a value that could
+have been overwritten in between by a different core.
+
+Fixes: c3cc2a4a3a23 ("riscv: Add support for PUD THP")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20250814-dev-alex-thp_pud_xchg-v1-1-b4704dfae206@rivosinc.com
+[pjw@kernel.org: use xchg rather than atomic_long_xchg; avoid atomic op for !CONFIG_SMP like x86]
+Signed-off-by: Paul Walmsley <pjw@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/include/asm/pgtable.h | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -964,6 +964,23 @@ static inline int pudp_test_and_clear_yo
+ return ptep_test_and_clear_young(vma, address, (pte_t *)pudp);
+ }
+
++#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
++static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
++ unsigned long address, pud_t *pudp)
++{
++#ifdef CONFIG_SMP
++ pud_t pud = __pud(xchg(&pudp->pud, 0));
++#else
++ pud_t pud = *pudp;
++
++ pud_clear(pudp);
++#endif
++
++ page_table_check_pud_clear(mm, pud);
++
++ return pud;
++}
++
+ static inline int pud_young(pud_t pud)
+ {
+ return pte_young(pud_pte(pud));
pinctrl-airoha-fix-wrong-mdio-function-bitmaks.patch
revert-vhost-net-defer-tx-queue-re-enable-until-after-sendmsg.patch
vhost-net-flush-batched-before-enabling-notifications.patch
+afs-fix-potential-null-pointer-dereference-in-afs_put_server.patch
+fs-proc-task_mmu-check-p-vec_buf-for-null.patch
+gpiolib-extend-software-node-support-to-support-secondary-software-nodes.patch
+kmsan-fix-out-of-bounds-access-to-shadow-memory.patch
+netfs-fix-reference-leak.patch
+riscv-use-an-atomic-xchg-in-pudp_huge_get_and_clear.patch
+x86-topology-implement-topology_is_core_online-to-address-smt-regression.patch
+x86-kconfig-reenable-ptdump-on-i386.patch
+mm-hugetlb-fix-folio-is-still-mapped-when-deleted.patch
+mm-damon-sysfs-do-not-ignore-callback-s-return-value-in-damon_sysfs_damon_call.patch
+fbcon-fix-integer-overflow-in-fbcon_do_set_font.patch
+fbcon-fix-oob-access-in-font-allocation.patch
--- /dev/null
+From 4f115596133fa168bac06bb34c6efd8f4d84c22e Mon Sep 17 00:00:00 2001
+From: Alexander Popov <alex.popov@linux.com>
+Date: Sun, 21 Sep 2025 23:58:15 +0300
+Subject: x86/Kconfig: Reenable PTDUMP on i386
+
+From: Alexander Popov <alex.popov@linux.com>
+
+commit 4f115596133fa168bac06bb34c6efd8f4d84c22e upstream.
+
+The commit
+
+ f9aad622006bd64c ("mm: rename GENERIC_PTDUMP and PTDUMP_CORE")
+
+has broken PTDUMP and the Kconfig options that use it on ARCH=i386, including
+CONFIG_DEBUG_WX.
+
+CONFIG_GENERIC_PTDUMP was renamed into CONFIG_ARCH_HAS_PTDUMP, but it was
+mistakenly moved from "config X86" to "config X86_64". That made PTDUMP
+unavailable for i386.
+
+Move CONFIG_ARCH_HAS_PTDUMP back to "config X86" to fix it.
+
+ [ bp: Massage commit message. ]
+
+Fixes: f9aad622006bd64c ("mm: rename GENERIC_PTDUMP and PTDUMP_CORE")
+Signed-off-by: Alexander Popov <alex.popov@linux.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -26,7 +26,6 @@ config X86_64
+ depends on 64BIT
+ # Options that are inherently 64-bit kernel only:
+ select ARCH_HAS_GIGANTIC_PAGE
+- select ARCH_HAS_PTDUMP
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
+ select ARCH_SUPPORTS_PER_VMA_LOCK
+@@ -101,6 +100,7 @@ config X86
+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ select ARCH_HAS_PMEM_API if X86_64
+ select ARCH_HAS_PREEMPT_LAZY
++ select ARCH_HAS_PTDUMP
+ select ARCH_HAS_PTE_DEVMAP if X86_64
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_HW_PTE_YOUNG
--- /dev/null
+From 2066f00e5b2dc061fb6d8c88fadaebc97f11feaa Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 21 Sep 2025 10:56:40 +0200
+Subject: x86/topology: Implement topology_is_core_online() to address SMT regression
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 2066f00e5b2dc061fb6d8c88fadaebc97f11feaa upstream.
+
+Christian reported that commit a430c11f4015 ("intel_idle: Rescan "dead" SMT
+siblings during initialization") broke the use case in which both 'nosmt'
+and 'maxcpus' are on the kernel command line because it onlines primary
+threads, which were offline due to the maxcpus limit.
+
+The initially proposed fix to skip primary threads in the loop is
+inconsistent. While it prevents the primary thread to be onlined, it then
+onlines the corresponding hyperthread(s), which does not really make sense.
+
+The CPU iterator in cpuhp_smt_enable() contains a check which excludes all
+threads of a core, when the primary thread is offline. The default
+implementation is a NOOP and therefore not effective on x86.
+
+Implement topology_is_core_online() on x86 to address this issue. This
+makes the behaviour consistent between x86 and PowerPC.
+
+Fixes: a430c11f4015 ("intel_idle: Rescan "dead" SMT siblings during initialization")
+Fixes: f694481b1d31 ("ACPI: processor: Rescan "dead" SMT siblings during initialization")
+Closes: https://lore.kernel.org/linux-pm/724616a2-6374-4ba3-8ce3-ea9c45e2ae3b@arm.com/
+Reported-by: Christian Loehle <christian.loehle@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Rafael J. Wysocki (Intel) <rafael@kernel.org>
+Tested-by: Christian Loehle <christian.loehle@arm.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/12740505.O9o76ZdvQC@rafael.j.wysocki
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/topology.h | 10 ++++++++++
+ arch/x86/kernel/cpu/topology.c | 13 +++++++++++++
+ 2 files changed, 23 insertions(+)
+
+diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
+index 6c79ee7c0957..21041898157a 100644
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -231,6 +231,16 @@ static inline bool topology_is_primary_thread(unsigned int cpu)
+ }
+ #define topology_is_primary_thread topology_is_primary_thread
+
++int topology_get_primary_thread(unsigned int cpu);
++
++static inline bool topology_is_core_online(unsigned int cpu)
++{
++ int pcpu = topology_get_primary_thread(cpu);
++
++ return pcpu >= 0 ? cpu_online(pcpu) : false;
++}
++#define topology_is_core_online topology_is_core_online
++
+ #else /* CONFIG_SMP */
+ static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
+ static inline int topology_max_smt_threads(void) { return 1; }
+diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
+index e35ccdc84910..6073a16628f9 100644
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -372,6 +372,19 @@ unsigned int topology_unit_count(u32 apicid, enum x86_topology_domains which_uni
+ return topo_unit_count(lvlid, at_level, apic_maps[which_units].map);
+ }
+
++#ifdef CONFIG_SMP
++int topology_get_primary_thread(unsigned int cpu)
++{
++ u32 apic_id = cpuid_to_apicid[cpu];
++
++ /*
++ * Get the core domain level APIC id, which is the primary thread
++ * and return the CPU number assigned to it.
++ */
++ return topo_lookup_cpuid(topo_apicid(apic_id, TOPO_CORE_DOMAIN));
++}
++#endif
++
+ #ifdef CONFIG_ACPI_HOTPLUG_CPU
+ /**
+ * topology_hotplug_apic - Handle a physical hotplugged APIC after boot
+--
+2.51.0
+