From e5f088c72cef646b3ff53b97ef7bf81580e9e456 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 18 Feb 2023 12:04:57 +0100 Subject: [PATCH] 6.1-stable patches added patches: coredump-move-dump_emit_page-to-kill-unused-warning.patch net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch --- ...ump_emit_page-to-kill-unused-warning.patch | 93 +++++++++++++++++++ ...extension-in-netdev_stats_to_stats64.patch | 38 ++++++++ ...uddy-allocator-in-memblock_free_late.patch | 71 ++++++++++++++ queue-6.1/series | 3 + 4 files changed, 205 insertions(+) create mode 100644 queue-6.1/coredump-move-dump_emit_page-to-kill-unused-warning.patch create mode 100644 queue-6.1/net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch create mode 100644 queue-6.1/revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch diff --git a/queue-6.1/coredump-move-dump_emit_page-to-kill-unused-warning.patch b/queue-6.1/coredump-move-dump_emit_page-to-kill-unused-warning.patch new file mode 100644 index 00000000000..99db579fb21 --- /dev/null +++ b/queue-6.1/coredump-move-dump_emit_page-to-kill-unused-warning.patch @@ -0,0 +1,93 @@ +From 9c7417b5ec440242bb5b64521acd53d4e19130c1 Mon Sep 17 00:00:00 2001 +From: Geert Uytterhoeven +Date: Mon, 3 Oct 2022 11:06:57 +0200 +Subject: coredump: Move dump_emit_page() to kill unused warning +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Geert Uytterhoeven + +commit 9c7417b5ec440242bb5b64521acd53d4e19130c1 upstream. + +If CONFIG_ELF_CORE is not set: + + fs/coredump.c:835:12: error: ‘dump_emit_page’ defined but not used [-Werror=unused-function] + 835 | static int dump_emit_page(struct coredump_params *cprm, struct page *page) + | ^~~~~~~~~~~~~~ + +Fix this by moving dump_emit_page() inside the existing section +protected by #ifdef CONFIG_ELF_CORE. + +Fixes: 06bbaa6dc53cb720 ("[coredump] don't use __kernel_write() on kmap_local_page()") +Signed-off-by: Geert Uytterhoeven +Signed-off-by: Al Viro +Signed-off-by: Greg Kroah-Hartman +--- + fs/coredump.c | 48 ++++++++++++++++++++++++------------------------ + 1 file changed, 24 insertions(+), 24 deletions(-) + +--- a/fs/coredump.c ++++ b/fs/coredump.c +@@ -831,6 +831,30 @@ static int __dump_skip(struct coredump_p + } + } + ++int dump_emit(struct coredump_params *cprm, const void *addr, int nr) ++{ ++ if (cprm->to_skip) { ++ if (!__dump_skip(cprm, cprm->to_skip)) ++ return 0; ++ cprm->to_skip = 0; ++ } ++ return __dump_emit(cprm, addr, nr); ++} ++EXPORT_SYMBOL(dump_emit); ++ ++void dump_skip_to(struct coredump_params *cprm, unsigned long pos) ++{ ++ cprm->to_skip = pos - cprm->pos; ++} ++EXPORT_SYMBOL(dump_skip_to); ++ ++void dump_skip(struct coredump_params *cprm, size_t nr) ++{ ++ cprm->to_skip += nr; ++} ++EXPORT_SYMBOL(dump_skip); ++ ++#ifdef CONFIG_ELF_CORE + static int dump_emit_page(struct coredump_params *cprm, struct page *page) + { + struct bio_vec bvec = { +@@ -864,30 +888,6 @@ static int dump_emit_page(struct coredum + return 1; + } + +-int dump_emit(struct coredump_params *cprm, const void *addr, int nr) +-{ +- if (cprm->to_skip) { +- if (!__dump_skip(cprm, cprm->to_skip)) +- return 0; +- cprm->to_skip = 0; +- } +- return __dump_emit(cprm, addr, nr); +-} +-EXPORT_SYMBOL(dump_emit); +- +-void dump_skip_to(struct coredump_params *cprm, unsigned long pos) +-{ +- cprm->to_skip = pos - cprm->pos; +-} +-EXPORT_SYMBOL(dump_skip_to); +- +-void dump_skip(struct coredump_params *cprm, size_t nr) +-{ +- cprm->to_skip += nr; +-} +-EXPORT_SYMBOL(dump_skip); +- +-#ifdef CONFIG_ELF_CORE + int dump_user_range(struct coredump_params *cprm, unsigned long start, + unsigned long len) + { diff --git a/queue-6.1/net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch b/queue-6.1/net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch new file mode 100644 index 00000000000..a75e5b75869 --- /dev/null +++ b/queue-6.1/net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch @@ -0,0 +1,38 @@ +From 9b55d3f0a69af649c62cbc2633e6d695bb3cc583 Mon Sep 17 00:00:00 2001 +From: Felix Riemann +Date: Fri, 10 Feb 2023 13:36:44 +0100 +Subject: net: Fix unwanted sign extension in netdev_stats_to_stats64() + +From: Felix Riemann + +commit 9b55d3f0a69af649c62cbc2633e6d695bb3cc583 upstream. + +When converting net_device_stats to rtnl_link_stats64 sign extension +is triggered on ILP32 machines as 6c1c509778 changed the previous +"ulong -> u64" conversion to "long -> u64" by accessing the +net_device_stats fields through a (signed) atomic_long_t. + +This causes for example the received bytes counter to jump to 16EiB after +having received 2^31 bytes. Casting the atomic value to "unsigned long" +beforehand converting it into u64 avoids this. + +Fixes: 6c1c5097781f ("net: add atomic_long_t to net_device_stats fields") +Signed-off-by: Felix Riemann +Reviewed-by: Eric Dumazet +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/core/dev.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -10385,7 +10385,7 @@ void netdev_stats_to_stats64(struct rtnl + + BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); + for (i = 0; i < n; i++) +- dst[i] = atomic_long_read(&src[i]); ++ dst[i] = (unsigned long)atomic_long_read(&src[i]); + /* zero out counters that only exist in rtnl_link_stats64 */ + memset((char *)stats64 + n * sizeof(u64), 0, + sizeof(*stats64) - n * sizeof(u64)); diff --git a/queue-6.1/revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch b/queue-6.1/revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch new file mode 100644 index 00000000000..47b05cca658 --- /dev/null +++ b/queue-6.1/revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch @@ -0,0 +1,71 @@ +From 647037adcad00f2bab8828d3d41cd0553d41f3bd Mon Sep 17 00:00:00 2001 +From: Aaron Thompson +Date: Tue, 7 Feb 2023 08:21:51 +0000 +Subject: Revert "mm: Always release pages to the buddy allocator in memblock_free_late()." + +From: Aaron Thompson + +commit 647037adcad00f2bab8828d3d41cd0553d41f3bd upstream. + +This reverts commit 115d9d77bb0f9152c60b6e8646369fa7f6167593. + +The pages being freed by memblock_free_late() have already been +initialized, but if they are in the deferred init range, +__free_one_page() might access nearby uninitialized pages when trying to +coalesce buddies. This can, for example, trigger this BUG: + + BUG: unable to handle page fault for address: ffffe964c02580c8 + RIP: 0010:__list_del_entry_valid+0x3f/0x70 + + __free_one_page+0x139/0x410 + __free_pages_ok+0x21d/0x450 + memblock_free_late+0x8c/0xb9 + efi_free_boot_services+0x16b/0x25c + efi_enter_virtual_mode+0x403/0x446 + start_kernel+0x678/0x714 + secondary_startup_64_no_verify+0xd2/0xdb + + +A proper fix will be more involved so revert this change for the time +being. + +Fixes: 115d9d77bb0f ("mm: Always release pages to the buddy allocator in memblock_free_late().") +Signed-off-by: Aaron Thompson +Link: https://lore.kernel.org/r/20230207082151.1303-1-dev@aaront.org +Signed-off-by: Mike Rapoport (IBM) +Signed-off-by: Greg Kroah-Hartman +--- + mm/memblock.c | 8 +------- + tools/testing/memblock/internal.h | 4 ---- + 2 files changed, 1 insertion(+), 11 deletions(-) + +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -1640,13 +1640,7 @@ void __init memblock_free_late(phys_addr + end = PFN_DOWN(base + size); + + for (; cursor < end; cursor++) { +- /* +- * Reserved pages are always initialized by the end of +- * memblock_free_all() (by memmap_init() and, if deferred +- * initialization is enabled, memmap_init_reserved_pages()), so +- * these pages can be released directly to the buddy allocator. +- */ +- __free_pages_core(pfn_to_page(cursor), 0); ++ memblock_free_pages(pfn_to_page(cursor), cursor, 0); + totalram_pages_inc(); + } + } +--- a/tools/testing/memblock/internal.h ++++ b/tools/testing/memblock/internal.h +@@ -15,10 +15,6 @@ bool mirrored_kernelcore = false; + + struct page {}; + +-void __free_pages_core(struct page *page, unsigned int order) +-{ +-} +- + void memblock_free_pages(struct page *page, unsigned long pfn, + unsigned int order) + { diff --git a/queue-6.1/series b/queue-6.1/series index 99a34ecf0a3..cc92563a31c 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -71,3 +71,6 @@ mm-filemap-fix-page-end-in-filemap_get_read_batch.patch mm-migrate-fix-wrongly-apply-write-bit-after-mkdirty-on-sparc64.patch gpio-sim-fix-a-memory-leak.patch freezer-umh-fix-call_usermode_helper_exec-vs-sigkill.patch +coredump-move-dump_emit_page-to-kill-unused-warning.patch +revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch +net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch -- 2.47.2