From d4b53915fed9bf8f3e6f8a86187d5e0929720c7b Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 4 Dec 2025 17:35:58 +0100 Subject: [PATCH] drop some more 5.10 and 5.15 patches --- ...lb_vpn-array-to-avoid-stack-overflow.patch | 76 ------------ ...poisoning-order-0-pages-with-highmem.patch | 110 ------------------ ...ace-kmap_atomic-with-kmap_local_page.patch | 71 ----------- queue-5.10/series | 3 - ...lb_vpn-array-to-avoid-stack-overflow.patch | 76 ------------ ...poisoning-order-0-pages-with-highmem.patch | 110 ------------------ ...ace-kmap_atomic-with-kmap_local_page.patch | 71 ----------- queue-5.15/series | 3 - 8 files changed, 520 deletions(-) delete mode 100644 queue-5.10/mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch delete mode 100644 queue-5.10/mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch delete mode 100644 queue-5.10/mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch delete mode 100644 queue-5.15/mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch delete mode 100644 queue-5.15/mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch delete mode 100644 queue-5.15/mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch diff --git a/queue-5.10/mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch b/queue-5.10/mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch deleted file mode 100644 index 6522677304..0000000000 --- a/queue-5.10/mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 841ecc979b18d3227fad5e2d6a1e6f92688776b5 Mon Sep 17 00:00:00 2001 -From: Thomas Bogendoerfer -Date: Fri, 28 Nov 2025 16:53:46 +0000 -Subject: MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow - -From: Thomas Bogendoerfer - -commit 841ecc979b18d3227fad5e2d6a1e6f92688776b5 upstream. - -Owing to Config4.MMUSizeExt and VTLB/FTLB MMU features later MIPSr2+ -cores can have more than 64 TLB entries. Therefore allocate an array -for uniquification instead of placing too an small array on the stack. - -Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init") -Co-developed-by: Maciej W. Rozycki -Signed-off-by: Maciej W. Rozycki -Cc: stable@vger.kernel.org # v6.17+: 9f048fa48740: MIPS: mm: Prevent a TLB shutdown on initial uniquification -Cc: stable@vger.kernel.org # v6.17+ -Tested-by: Gregory CLEMENT -Tested-by: Klara Modin -Signed-off-by: Thomas Bogendoerfer -Signed-off-by: Greg Kroah-Hartman ---- - arch/mips/mm/tlb-r4k.c | 18 ++++++++++++++++-- - 1 file changed, 16 insertions(+), 2 deletions(-) - ---- a/arch/mips/mm/tlb-r4k.c -+++ b/arch/mips/mm/tlb-r4k.c -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -512,17 +513,26 @@ static int r4k_vpn_cmp(const void *a, co - * Initialise all TLB entries with unique values that do not clash with - * what we have been handed over and what we'll be using ourselves. - */ --static void r4k_tlb_uniquify(void) -+static void __ref r4k_tlb_uniquify(void) - { -- unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE]; - int tlbsize = current_cpu_data.tlbsize; -+ bool use_slab = slab_is_available(); - int start = num_wired_entries(); -+ phys_addr_t tlb_vpn_size; -+ unsigned long *tlb_vpns; - unsigned long vpn_mask; - int cnt, ent, idx, i; - - vpn_mask = GENMASK(cpu_vmbits - 1, 13); - vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31; - -+ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns); -+ tlb_vpns = (use_slab ? -+ kmalloc(tlb_vpn_size, GFP_KERNEL) : -+ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns))); -+ if (WARN_ON(!tlb_vpns)) -+ return; /* Pray local_flush_tlb_all() is good enough. */ -+ - htw_stop(); - - for (i = start, cnt = 0; i < tlbsize; i++, cnt++) { -@@ -575,6 +585,10 @@ static void r4k_tlb_uniquify(void) - tlbw_use_hazard(); - htw_start(); - flush_micro_tlb(); -+ if (use_slab) -+ kfree(tlb_vpns); -+ else -+ memblock_free(tlb_vpns, tlb_vpn_size); - } - - /* diff --git a/queue-5.10/mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch b/queue-5.10/mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch deleted file mode 100644 index 7e1645b869..0000000000 --- a/queue-5.10/mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch +++ /dev/null @@ -1,110 +0,0 @@ -From stable+bounces-196810-greg=kroah.com@vger.kernel.org Mon Nov 24 22:38:44 2025 -From: Sasha Levin -Date: Mon, 24 Nov 2025 16:38:34 -0500 -Subject: mm/mempool: fix poisoning order>0 pages with HIGHMEM -To: stable@vger.kernel.org -Cc: Vlastimil Babka , kernel test robot , Christoph Hellwig , Sasha Levin -Message-ID: <20251124213835.42484-2-sashal@kernel.org> - -From: Vlastimil Babka - -[ Upstream commit ec33b59542d96830e3c89845ff833cf7b25ef172 ] - -The kernel test has reported: - - BUG: unable to handle page fault for address: fffba000 - #PF: supervisor write access in kernel mode - #PF: error_code(0x0002) - not-present page - *pde = 03171067 *pte = 00000000 - Oops: Oops: 0002 [#1] - CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Tainted: G T 6.18.0-rc2-00031-gec7f31b2a2d3 #1 NONE a1d066dfe789f54bc7645c7989957d2bdee593ca - Tainted: [T]=RANDSTRUCT - Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 - EIP: memset (arch/x86/include/asm/string_32.h:168 arch/x86/lib/memcpy_32.c:17) - Code: a5 8b 4d f4 83 e1 03 74 02 f3 a4 83 c4 04 5e 5f 5d 2e e9 73 41 01 00 90 90 90 3e 8d 74 26 00 55 89 e5 57 56 89 c6 89 d0 89 f7 aa 89 f0 5e 5f 5d 2e e9 53 41 01 00 cc cc cc 55 89 e5 53 57 56 - EAX: 0000006b EBX: 00000015 ECX: 001fefff EDX: 0000006b - ESI: fffb9000 EDI: fffba000 EBP: c611fbf0 ESP: c611fbe8 - DS: 007b ES: 007b FS: 0000 GS: 0000 SS: 0068 EFLAGS: 00010287 - CR0: 80050033 CR2: fffba000 CR3: 0316e000 CR4: 00040690 - Call Trace: - poison_element (mm/mempool.c:83 mm/mempool.c:102) - mempool_init_node (mm/mempool.c:142 mm/mempool.c:226) - mempool_init_noprof (mm/mempool.c:250 (discriminator 1)) - ? mempool_alloc_pages (mm/mempool.c:640) - bio_integrity_initfn (block/bio-integrity.c:483 (discriminator 8)) - ? mempool_alloc_pages (mm/mempool.c:640) - do_one_initcall (init/main.c:1283) - -Christoph found out this is due to the poisoning code not dealing -properly with CONFIG_HIGHMEM because only the first page is mapped but -then the whole potentially high-order page is accessed. - -We could give up on HIGHMEM here, but it's straightforward to fix this -with a loop that's mapping, poisoning or checking and unmapping -individual pages. - -Reported-by: kernel test robot -Closes: https://lore.kernel.org/oe-lkp/202511111411.9ebfa1ba-lkp@intel.com -Analyzed-by: Christoph Hellwig -Fixes: bdfedb76f4f5 ("mm, mempool: poison elements backed by slab allocator") -Cc: stable@vger.kernel.org -Tested-by: kernel test robot -Reviewed-by: Christoph Hellwig -Link: https://patch.msgid.link/20251113-mempool-poison-v1-1-233b3ef984c3@suse.cz -Signed-off-by: Vlastimil Babka -Signed-off-by: Sasha Levin -Signed-off-by: Greg Kroah-Hartman ---- - mm/mempool.c | 32 ++++++++++++++++++++++++++------ - 1 file changed, 26 insertions(+), 6 deletions(-) - ---- a/mm/mempool.c -+++ b/mm/mempool.c -@@ -63,10 +63,20 @@ static void check_element(mempool_t *poo - } else if (pool->free == mempool_free_pages) { - /* Mempools backed by page allocator */ - int order = (int)(long)pool->pool_data; -- void *addr = kmap_local_page((struct page *)element); - -- __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); -- kunmap_local(addr); -+#ifdef CONFIG_HIGHMEM -+ for (int i = 0; i < (1 << order); i++) { -+ struct page *page = (struct page *)element; -+ void *addr = kmap_local_page(page + i); -+ -+ __check_element(pool, addr, PAGE_SIZE); -+ kunmap_local(addr); -+ } -+#else -+ void *addr = page_address((struct page *)element); -+ -+ __check_element(pool, addr, PAGE_SIZE << order); -+#endif - } - } - -@@ -86,10 +96,20 @@ static void poison_element(mempool_t *po - } else if (pool->alloc == mempool_alloc_pages) { - /* Mempools backed by page allocator */ - int order = (int)(long)pool->pool_data; -- void *addr = kmap_local_page((struct page *)element); - -- __poison_element(addr, 1UL << (PAGE_SHIFT + order)); -- kunmap_local(addr); -+#ifdef CONFIG_HIGHMEM -+ for (int i = 0; i < (1 << order); i++) { -+ struct page *page = (struct page *)element; -+ void *addr = kmap_local_page(page + i); -+ -+ __poison_element(addr, PAGE_SIZE); -+ kunmap_local(addr); -+ } -+#else -+ void *addr = page_address((struct page *)element); -+ -+ __poison_element(addr, PAGE_SIZE << order); -+#endif - } - } - #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ diff --git a/queue-5.10/mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch b/queue-5.10/mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch deleted file mode 100644 index 10a9b64ddb..0000000000 --- a/queue-5.10/mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch +++ /dev/null @@ -1,71 +0,0 @@ -From stable+bounces-196809-greg=kroah.com@vger.kernel.org Mon Nov 24 22:38:42 2025 -From: Sasha Levin -Date: Mon, 24 Nov 2025 16:38:33 -0500 -Subject: mm/mempool: replace kmap_atomic() with kmap_local_page() -To: stable@vger.kernel.org -Cc: "Fabio M. De Francesco" , Ira Weiny , Andrew Morton , Sasha Levin -Message-ID: <20251124213835.42484-1-sashal@kernel.org> - -From: "Fabio M. De Francesco" - -[ Upstream commit f2bcc99a5e901a13b754648d1dbab60f4adf9375 ] - -kmap_atomic() has been deprecated in favor of kmap_local_page(). - -Therefore, replace kmap_atomic() with kmap_local_page(). - -kmap_atomic() is implemented like a kmap_local_page() which also disables -page-faults and preemption (the latter only in !PREEMPT_RT kernels). The -kernel virtual addresses returned by these two API are only valid in the -context of the callers (i.e., they cannot be handed to other threads). - -With kmap_local_page() the mappings are per thread and CPU local like in -kmap_atomic(); however, they can handle page-faults and can be called from -any context (including interrupts). The tasks that call kmap_local_page() -can be preempted and, when they are scheduled to run again, the kernel -virtual addresses are restored and are still valid. - -The code blocks between the mappings and un-mappings don't rely on the -above-mentioned side effects of kmap_atomic(), so that mere replacements -of the old API with the new one is all that they require (i.e., there is -no need to explicitly call pagefault_disable() and/or preempt_disable()). - -Link: https://lkml.kernel.org/r/20231120142640.7077-1-fabio.maria.de.francesco@linux.intel.com -Signed-off-by: Fabio M. De Francesco -Cc: Ira Weiny -Signed-off-by: Andrew Morton -Stable-dep-of: ec33b59542d9 ("mm/mempool: fix poisoning order>0 pages with HIGHMEM") -Signed-off-by: Sasha Levin -Signed-off-by: Greg Kroah-Hartman ---- - mm/mempool.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/mm/mempool.c -+++ b/mm/mempool.c -@@ -63,10 +63,10 @@ static void check_element(mempool_t *poo - } else if (pool->free == mempool_free_pages) { - /* Mempools backed by page allocator */ - int order = (int)(long)pool->pool_data; -- void *addr = kmap_atomic((struct page *)element); -+ void *addr = kmap_local_page((struct page *)element); - - __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); -- kunmap_atomic(addr); -+ kunmap_local(addr); - } - } - -@@ -86,10 +86,10 @@ static void poison_element(mempool_t *po - } else if (pool->alloc == mempool_alloc_pages) { - /* Mempools backed by page allocator */ - int order = (int)(long)pool->pool_data; -- void *addr = kmap_atomic((struct page *)element); -+ void *addr = kmap_local_page((struct page *)element); - - __poison_element(addr, 1UL << (PAGE_SHIFT + order)); -- kunmap_atomic(addr); -+ kunmap_local(addr); - } - } - #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ diff --git a/queue-5.10/series b/queue-5.10/series index 66c97d166d..cb132065d2 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -244,8 +244,6 @@ ata-libata-scsi-fix-system-suspend-for-a-security-locked-drive.patch mptcp-introduce-mptcp_schedule_work.patch mptcp-fix-race-condition-in-mptcp_schedule_work.patch dt-bindings-pinctrl-toshiba-visconti-fix-number-of-items-in-groups.patch -mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch -mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch mptcp-fix-a-race-in-mptcp_pm_del_add_timer.patch mptcp-do-not-fallback-when-ooo-is-present.patch usb-deprecate-the-third-argument-of-usb_maxpacket.patch @@ -263,7 +261,6 @@ revert-perf-x86-always-store-regs-ip-in-perf_callchain_kernel.patch iio-imu-st_lsm6dsx-fix-array-size-for-st_lsm6dsx_settings-fields.patch iio-common-ssp_sensors-fix-an-error-handling-path-ssp_probe.patch mips-mm-prevent-a-tlb-shutdown-on-initial-uniquification.patch -mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch atm-fore200e-fix-possible-data-race-in-fore200e_open.patch can-sja1000-fix-max-irq-loop-handling.patch can-sun4i_can-sun4i_can_interrupt-fix-max-irq-loop-handling.patch diff --git a/queue-5.15/mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch b/queue-5.15/mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch deleted file mode 100644 index 6522677304..0000000000 --- a/queue-5.15/mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 841ecc979b18d3227fad5e2d6a1e6f92688776b5 Mon Sep 17 00:00:00 2001 -From: Thomas Bogendoerfer -Date: Fri, 28 Nov 2025 16:53:46 +0000 -Subject: MIPS: mm: kmalloc tlb_vpn array to avoid stack overflow - -From: Thomas Bogendoerfer - -commit 841ecc979b18d3227fad5e2d6a1e6f92688776b5 upstream. - -Owing to Config4.MMUSizeExt and VTLB/FTLB MMU features later MIPSr2+ -cores can have more than 64 TLB entries. Therefore allocate an array -for uniquification instead of placing too an small array on the stack. - -Fixes: 35ad7e181541 ("MIPS: mm: tlb-r4k: Uniquify TLB entries on init") -Co-developed-by: Maciej W. Rozycki -Signed-off-by: Maciej W. Rozycki -Cc: stable@vger.kernel.org # v6.17+: 9f048fa48740: MIPS: mm: Prevent a TLB shutdown on initial uniquification -Cc: stable@vger.kernel.org # v6.17+ -Tested-by: Gregory CLEMENT -Tested-by: Klara Modin -Signed-off-by: Thomas Bogendoerfer -Signed-off-by: Greg Kroah-Hartman ---- - arch/mips/mm/tlb-r4k.c | 18 ++++++++++++++++-- - 1 file changed, 16 insertions(+), 2 deletions(-) - ---- a/arch/mips/mm/tlb-r4k.c -+++ b/arch/mips/mm/tlb-r4k.c -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -512,17 +513,26 @@ static int r4k_vpn_cmp(const void *a, co - * Initialise all TLB entries with unique values that do not clash with - * what we have been handed over and what we'll be using ourselves. - */ --static void r4k_tlb_uniquify(void) -+static void __ref r4k_tlb_uniquify(void) - { -- unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE]; - int tlbsize = current_cpu_data.tlbsize; -+ bool use_slab = slab_is_available(); - int start = num_wired_entries(); -+ phys_addr_t tlb_vpn_size; -+ unsigned long *tlb_vpns; - unsigned long vpn_mask; - int cnt, ent, idx, i; - - vpn_mask = GENMASK(cpu_vmbits - 1, 13); - vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31; - -+ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns); -+ tlb_vpns = (use_slab ? -+ kmalloc(tlb_vpn_size, GFP_KERNEL) : -+ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns))); -+ if (WARN_ON(!tlb_vpns)) -+ return; /* Pray local_flush_tlb_all() is good enough. */ -+ - htw_stop(); - - for (i = start, cnt = 0; i < tlbsize; i++, cnt++) { -@@ -575,6 +585,10 @@ static void r4k_tlb_uniquify(void) - tlbw_use_hazard(); - htw_start(); - flush_micro_tlb(); -+ if (use_slab) -+ kfree(tlb_vpns); -+ else -+ memblock_free(tlb_vpns, tlb_vpn_size); - } - - /* diff --git a/queue-5.15/mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch b/queue-5.15/mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch deleted file mode 100644 index 6731e7f793..0000000000 --- a/queue-5.15/mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch +++ /dev/null @@ -1,110 +0,0 @@ -From stable+bounces-196808-greg=kroah.com@vger.kernel.org Mon Nov 24 22:33:40 2025 -From: Sasha Levin -Date: Mon, 24 Nov 2025 16:33:30 -0500 -Subject: mm/mempool: fix poisoning order>0 pages with HIGHMEM -To: stable@vger.kernel.org -Cc: Vlastimil Babka , kernel test robot , Christoph Hellwig , Sasha Levin -Message-ID: <20251124213330.39729-2-sashal@kernel.org> - -From: Vlastimil Babka - -[ Upstream commit ec33b59542d96830e3c89845ff833cf7b25ef172 ] - -The kernel test has reported: - - BUG: unable to handle page fault for address: fffba000 - #PF: supervisor write access in kernel mode - #PF: error_code(0x0002) - not-present page - *pde = 03171067 *pte = 00000000 - Oops: Oops: 0002 [#1] - CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Tainted: G T 6.18.0-rc2-00031-gec7f31b2a2d3 #1 NONE a1d066dfe789f54bc7645c7989957d2bdee593ca - Tainted: [T]=RANDSTRUCT - Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 - EIP: memset (arch/x86/include/asm/string_32.h:168 arch/x86/lib/memcpy_32.c:17) - Code: a5 8b 4d f4 83 e1 03 74 02 f3 a4 83 c4 04 5e 5f 5d 2e e9 73 41 01 00 90 90 90 3e 8d 74 26 00 55 89 e5 57 56 89 c6 89 d0 89 f7 aa 89 f0 5e 5f 5d 2e e9 53 41 01 00 cc cc cc 55 89 e5 53 57 56 - EAX: 0000006b EBX: 00000015 ECX: 001fefff EDX: 0000006b - ESI: fffb9000 EDI: fffba000 EBP: c611fbf0 ESP: c611fbe8 - DS: 007b ES: 007b FS: 0000 GS: 0000 SS: 0068 EFLAGS: 00010287 - CR0: 80050033 CR2: fffba000 CR3: 0316e000 CR4: 00040690 - Call Trace: - poison_element (mm/mempool.c:83 mm/mempool.c:102) - mempool_init_node (mm/mempool.c:142 mm/mempool.c:226) - mempool_init_noprof (mm/mempool.c:250 (discriminator 1)) - ? mempool_alloc_pages (mm/mempool.c:640) - bio_integrity_initfn (block/bio-integrity.c:483 (discriminator 8)) - ? mempool_alloc_pages (mm/mempool.c:640) - do_one_initcall (init/main.c:1283) - -Christoph found out this is due to the poisoning code not dealing -properly with CONFIG_HIGHMEM because only the first page is mapped but -then the whole potentially high-order page is accessed. - -We could give up on HIGHMEM here, but it's straightforward to fix this -with a loop that's mapping, poisoning or checking and unmapping -individual pages. - -Reported-by: kernel test robot -Closes: https://lore.kernel.org/oe-lkp/202511111411.9ebfa1ba-lkp@intel.com -Analyzed-by: Christoph Hellwig -Fixes: bdfedb76f4f5 ("mm, mempool: poison elements backed by slab allocator") -Cc: stable@vger.kernel.org -Tested-by: kernel test robot -Reviewed-by: Christoph Hellwig -Link: https://patch.msgid.link/20251113-mempool-poison-v1-1-233b3ef984c3@suse.cz -Signed-off-by: Vlastimil Babka -Signed-off-by: Sasha Levin -Signed-off-by: Greg Kroah-Hartman ---- - mm/mempool.c | 32 ++++++++++++++++++++++++++------ - 1 file changed, 26 insertions(+), 6 deletions(-) - ---- a/mm/mempool.c -+++ b/mm/mempool.c -@@ -63,10 +63,20 @@ static void check_element(mempool_t *poo - } else if (pool->free == mempool_free_pages) { - /* Mempools backed by page allocator */ - int order = (int)(long)pool->pool_data; -- void *addr = kmap_local_page((struct page *)element); - -- __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); -- kunmap_local(addr); -+#ifdef CONFIG_HIGHMEM -+ for (int i = 0; i < (1 << order); i++) { -+ struct page *page = (struct page *)element; -+ void *addr = kmap_local_page(page + i); -+ -+ __check_element(pool, addr, PAGE_SIZE); -+ kunmap_local(addr); -+ } -+#else -+ void *addr = page_address((struct page *)element); -+ -+ __check_element(pool, addr, PAGE_SIZE << order); -+#endif - } - } - -@@ -86,10 +96,20 @@ static void poison_element(mempool_t *po - } else if (pool->alloc == mempool_alloc_pages) { - /* Mempools backed by page allocator */ - int order = (int)(long)pool->pool_data; -- void *addr = kmap_local_page((struct page *)element); - -- __poison_element(addr, 1UL << (PAGE_SHIFT + order)); -- kunmap_local(addr); -+#ifdef CONFIG_HIGHMEM -+ for (int i = 0; i < (1 << order); i++) { -+ struct page *page = (struct page *)element; -+ void *addr = kmap_local_page(page + i); -+ -+ __poison_element(addr, PAGE_SIZE); -+ kunmap_local(addr); -+ } -+#else -+ void *addr = page_address((struct page *)element); -+ -+ __poison_element(addr, PAGE_SIZE << order); -+#endif - } - } - #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ diff --git a/queue-5.15/mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch b/queue-5.15/mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch deleted file mode 100644 index e5383c109a..0000000000 --- a/queue-5.15/mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch +++ /dev/null @@ -1,71 +0,0 @@ -From stable+bounces-196807-greg=kroah.com@vger.kernel.org Mon Nov 24 22:33:36 2025 -From: Sasha Levin -Date: Mon, 24 Nov 2025 16:33:29 -0500 -Subject: mm/mempool: replace kmap_atomic() with kmap_local_page() -To: stable@vger.kernel.org -Cc: "Fabio M. De Francesco" , Ira Weiny , Andrew Morton , Sasha Levin -Message-ID: <20251124213330.39729-1-sashal@kernel.org> - -From: "Fabio M. De Francesco" - -[ Upstream commit f2bcc99a5e901a13b754648d1dbab60f4adf9375 ] - -kmap_atomic() has been deprecated in favor of kmap_local_page(). - -Therefore, replace kmap_atomic() with kmap_local_page(). - -kmap_atomic() is implemented like a kmap_local_page() which also disables -page-faults and preemption (the latter only in !PREEMPT_RT kernels). The -kernel virtual addresses returned by these two API are only valid in the -context of the callers (i.e., they cannot be handed to other threads). - -With kmap_local_page() the mappings are per thread and CPU local like in -kmap_atomic(); however, they can handle page-faults and can be called from -any context (including interrupts). The tasks that call kmap_local_page() -can be preempted and, when they are scheduled to run again, the kernel -virtual addresses are restored and are still valid. - -The code blocks between the mappings and un-mappings don't rely on the -above-mentioned side effects of kmap_atomic(), so that mere replacements -of the old API with the new one is all that they require (i.e., there is -no need to explicitly call pagefault_disable() and/or preempt_disable()). - -Link: https://lkml.kernel.org/r/20231120142640.7077-1-fabio.maria.de.francesco@linux.intel.com -Signed-off-by: Fabio M. De Francesco -Cc: Ira Weiny -Signed-off-by: Andrew Morton -Stable-dep-of: ec33b59542d9 ("mm/mempool: fix poisoning order>0 pages with HIGHMEM") -Signed-off-by: Sasha Levin -Signed-off-by: Greg Kroah-Hartman ---- - mm/mempool.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/mm/mempool.c -+++ b/mm/mempool.c -@@ -63,10 +63,10 @@ static void check_element(mempool_t *poo - } else if (pool->free == mempool_free_pages) { - /* Mempools backed by page allocator */ - int order = (int)(long)pool->pool_data; -- void *addr = kmap_atomic((struct page *)element); -+ void *addr = kmap_local_page((struct page *)element); - - __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); -- kunmap_atomic(addr); -+ kunmap_local(addr); - } - } - -@@ -86,10 +86,10 @@ static void poison_element(mempool_t *po - } else if (pool->alloc == mempool_alloc_pages) { - /* Mempools backed by page allocator */ - int order = (int)(long)pool->pool_data; -- void *addr = kmap_atomic((struct page *)element); -+ void *addr = kmap_local_page((struct page *)element); - - __poison_element(addr, 1UL << (PAGE_SHIFT + order)); -- kunmap_atomic(addr); -+ kunmap_local(addr); - } - } - #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ diff --git a/queue-5.15/series b/queue-5.15/series index 51a17609ff..636c13b1d4 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -324,8 +324,6 @@ input-remove-third-argument-of-usb_maxpacket.patch input-pegasus-notetaker-fix-potential-out-of-bounds-access.patch ata-libata-scsi-fix-system-suspend-for-a-security-locked-drive.patch dt-bindings-pinctrl-toshiba-visconti-fix-number-of-items-in-groups.patch -mm-mempool-replace-kmap_atomic-with-kmap_local_page.patch -mm-mempool-fix-poisoning-order-0-pages-with-highmem.patch mptcp-fix-ack-generation-for-fallback-msk.patch mptcp-fix-premature-close-in-case-of-fallback.patch mptcp-fix-a-race-in-mptcp_pm_del_add_timer.patch @@ -349,7 +347,6 @@ iio-imu-st_lsm6dsx-fix-array-size-for-st_lsm6dsx_settings-fields.patch iio-common-ssp_sensors-fix-an-error-handling-path-ssp_probe.patch iio-accel-bmc150-fix-irq-assumption-regression.patch mips-mm-prevent-a-tlb-shutdown-on-initial-uniquification.patch -mips-mm-kmalloc-tlb_vpn-array-to-avoid-stack-overflow.patch atm-fore200e-fix-possible-data-race-in-fore200e_open.patch can-sja1000-fix-max-irq-loop-handling.patch can-sun4i_can-sun4i_can_interrupt-fix-max-irq-loop-handling.patch -- 2.47.3