]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 18 Feb 2023 11:04:57 +0000 (12:04 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 18 Feb 2023 11:04:57 +0000 (12:04 +0100)
added patches:
coredump-move-dump_emit_page-to-kill-unused-warning.patch
net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch
revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch

queue-6.1/coredump-move-dump_emit_page-to-kill-unused-warning.patch [new file with mode: 0644]
queue-6.1/net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch [new file with mode: 0644]
queue-6.1/revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/coredump-move-dump_emit_page-to-kill-unused-warning.patch b/queue-6.1/coredump-move-dump_emit_page-to-kill-unused-warning.patch
new file mode 100644 (file)
index 0000000..99db579
--- /dev/null
@@ -0,0 +1,93 @@
+From 9c7417b5ec440242bb5b64521acd53d4e19130c1 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+Date: Mon, 3 Oct 2022 11:06:57 +0200
+Subject: coredump: Move dump_emit_page() to kill unused warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+commit 9c7417b5ec440242bb5b64521acd53d4e19130c1 upstream.
+
+If CONFIG_ELF_CORE is not set:
+
+    fs/coredump.c:835:12: error: ‘dump_emit_page’ defined but not used [-Werror=unused-function]
+      835 | static int dump_emit_page(struct coredump_params *cprm, struct page *page)
+          |            ^~~~~~~~~~~~~~
+
+Fix this by moving dump_emit_page() inside the existing section
+protected by #ifdef CONFIG_ELF_CORE.
+
+Fixes: 06bbaa6dc53cb720 ("[coredump] don't use __kernel_write() on kmap_local_page()")
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/coredump.c |   48 ++++++++++++++++++++++++------------------------
+ 1 file changed, 24 insertions(+), 24 deletions(-)
+
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -831,6 +831,30 @@ static int __dump_skip(struct coredump_p
+       }
+ }
++int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
++{
++      if (cprm->to_skip) {
++              if (!__dump_skip(cprm, cprm->to_skip))
++                      return 0;
++              cprm->to_skip = 0;
++      }
++      return __dump_emit(cprm, addr, nr);
++}
++EXPORT_SYMBOL(dump_emit);
++
++void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
++{
++      cprm->to_skip = pos - cprm->pos;
++}
++EXPORT_SYMBOL(dump_skip_to);
++
++void dump_skip(struct coredump_params *cprm, size_t nr)
++{
++      cprm->to_skip += nr;
++}
++EXPORT_SYMBOL(dump_skip);
++
++#ifdef CONFIG_ELF_CORE
+ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
+ {
+       struct bio_vec bvec = {
+@@ -864,30 +888,6 @@ static int dump_emit_page(struct coredum
+       return 1;
+ }
+-int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
+-{
+-      if (cprm->to_skip) {
+-              if (!__dump_skip(cprm, cprm->to_skip))
+-                      return 0;
+-              cprm->to_skip = 0;
+-      }
+-      return __dump_emit(cprm, addr, nr);
+-}
+-EXPORT_SYMBOL(dump_emit);
+-
+-void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
+-{
+-      cprm->to_skip = pos - cprm->pos;
+-}
+-EXPORT_SYMBOL(dump_skip_to);
+-
+-void dump_skip(struct coredump_params *cprm, size_t nr)
+-{
+-      cprm->to_skip += nr;
+-}
+-EXPORT_SYMBOL(dump_skip);
+-
+-#ifdef CONFIG_ELF_CORE
+ int dump_user_range(struct coredump_params *cprm, unsigned long start,
+                   unsigned long len)
+ {
diff --git a/queue-6.1/net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch b/queue-6.1/net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch
new file mode 100644 (file)
index 0000000..a75e5b7
--- /dev/null
@@ -0,0 +1,38 @@
+From 9b55d3f0a69af649c62cbc2633e6d695bb3cc583 Mon Sep 17 00:00:00 2001
+From: Felix Riemann <felix.riemann@sma.de>
+Date: Fri, 10 Feb 2023 13:36:44 +0100
+Subject: net: Fix unwanted sign extension in netdev_stats_to_stats64()
+
+From: Felix Riemann <felix.riemann@sma.de>
+
+commit 9b55d3f0a69af649c62cbc2633e6d695bb3cc583 upstream.
+
+When converting net_device_stats to rtnl_link_stats64 sign extension
+is triggered on ILP32 machines as 6c1c509778 changed the previous
+"ulong -> u64" conversion to "long -> u64" by accessing the
+net_device_stats fields through a (signed) atomic_long_t.
+
+This causes for example the received bytes counter to jump to 16EiB after
+having received 2^31 bytes. Casting the atomic value to "unsigned long"
+beforehand converting it into u64 avoids this.
+
+Fixes: 6c1c5097781f ("net: add atomic_long_t to net_device_stats fields")
+Signed-off-by: Felix Riemann <felix.riemann@sma.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10385,7 +10385,7 @@ void netdev_stats_to_stats64(struct rtnl
+       BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
+       for (i = 0; i < n; i++)
+-              dst[i] = atomic_long_read(&src[i]);
++              dst[i] = (unsigned long)atomic_long_read(&src[i]);
+       /* zero out counters that only exist in rtnl_link_stats64 */
+       memset((char *)stats64 + n * sizeof(u64), 0,
+              sizeof(*stats64) - n * sizeof(u64));
diff --git a/queue-6.1/revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch b/queue-6.1/revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch
new file mode 100644 (file)
index 0000000..47b05cc
--- /dev/null
@@ -0,0 +1,71 @@
+From 647037adcad00f2bab8828d3d41cd0553d41f3bd Mon Sep 17 00:00:00 2001
+From: Aaron Thompson <dev@aaront.org>
+Date: Tue, 7 Feb 2023 08:21:51 +0000
+Subject: Revert "mm: Always release pages to the buddy allocator in memblock_free_late()."
+
+From: Aaron Thompson <dev@aaront.org>
+
+commit 647037adcad00f2bab8828d3d41cd0553d41f3bd upstream.
+
+This reverts commit 115d9d77bb0f9152c60b6e8646369fa7f6167593.
+
+The pages being freed by memblock_free_late() have already been
+initialized, but if they are in the deferred init range,
+__free_one_page() might access nearby uninitialized pages when trying to
+coalesce buddies. This can, for example, trigger this BUG:
+
+  BUG: unable to handle page fault for address: ffffe964c02580c8
+  RIP: 0010:__list_del_entry_valid+0x3f/0x70
+   <TASK>
+   __free_one_page+0x139/0x410
+   __free_pages_ok+0x21d/0x450
+   memblock_free_late+0x8c/0xb9
+   efi_free_boot_services+0x16b/0x25c
+   efi_enter_virtual_mode+0x403/0x446
+   start_kernel+0x678/0x714
+   secondary_startup_64_no_verify+0xd2/0xdb
+   </TASK>
+
+A proper fix will be more involved so revert this change for the time
+being.
+
+Fixes: 115d9d77bb0f ("mm: Always release pages to the buddy allocator in memblock_free_late().")
+Signed-off-by: Aaron Thompson <dev@aaront.org>
+Link: https://lore.kernel.org/r/20230207082151.1303-1-dev@aaront.org
+Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memblock.c                     |    8 +-------
+ tools/testing/memblock/internal.h |    4 ----
+ 2 files changed, 1 insertion(+), 11 deletions(-)
+
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1640,13 +1640,7 @@ void __init memblock_free_late(phys_addr
+       end = PFN_DOWN(base + size);
+       for (; cursor < end; cursor++) {
+-              /*
+-               * Reserved pages are always initialized by the end of
+-               * memblock_free_all() (by memmap_init() and, if deferred
+-               * initialization is enabled, memmap_init_reserved_pages()), so
+-               * these pages can be released directly to the buddy allocator.
+-               */
+-              __free_pages_core(pfn_to_page(cursor), 0);
++              memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+               totalram_pages_inc();
+       }
+ }
+--- a/tools/testing/memblock/internal.h
++++ b/tools/testing/memblock/internal.h
+@@ -15,10 +15,6 @@ bool mirrored_kernelcore = false;
+ struct page {};
+-void __free_pages_core(struct page *page, unsigned int order)
+-{
+-}
+-
+ void memblock_free_pages(struct page *page, unsigned long pfn,
+                        unsigned int order)
+ {
index 99a34ecf0a3bd1d479fdc959909c850cdd7502de..cc92563a31c83d7defab44b05973c438440c6885 100644 (file)
@@ -71,3 +71,6 @@ mm-filemap-fix-page-end-in-filemap_get_read_batch.patch
 mm-migrate-fix-wrongly-apply-write-bit-after-mkdirty-on-sparc64.patch
 gpio-sim-fix-a-memory-leak.patch
 freezer-umh-fix-call_usermode_helper_exec-vs-sigkill.patch
+coredump-move-dump_emit_page-to-kill-unused-warning.patch
+revert-mm-always-release-pages-to-the-buddy-allocator-in-memblock_free_late.patch
+net-fix-unwanted-sign-extension-in-netdev_stats_to_stats64.patch