--- /dev/null
+From 9b55d3f0a69af649c62cbc2633e6d695bb3cc583 Mon Sep 17 00:00:00 2001
+From: Felix Riemann <felix.riemann@sma.de>
+Date: Fri, 10 Feb 2023 13:36:44 +0100
+Subject: net: Fix unwanted sign extension in netdev_stats_to_stats64()
+
+From: Felix Riemann <felix.riemann@sma.de>
+
+commit 9b55d3f0a69af649c62cbc2633e6d695bb3cc583 upstream.
+
+When converting net_device_stats to rtnl_link_stats64 sign extension
+is triggered on ILP32 machines as 6c1c509778 changed the previous
+"ulong -> u64" conversion to "long -> u64" by accessing the
+net_device_stats fields through a (signed) atomic_long_t.
+
+This causes for example the received bytes counter to jump to 16EiB after
+having received 2^31 bytes. Casting the atomic value to "unsigned long"
+beforehand converting it into u64 avoids this.
+
+Fixes: 6c1c5097781f ("net: add atomic_long_t to net_device_stats fields")
+Signed-off-by: Felix Riemann <felix.riemann@sma.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10326,7 +10326,7 @@ void netdev_stats_to_stats64(struct rtnl
+
+ BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
+ for (i = 0; i < n; i++)
+- dst[i] = atomic_long_read(&src[i]);
++ dst[i] = (unsigned long)atomic_long_read(&src[i]);
+ /* zero out counters that only exist in rtnl_link_stats64 */
+ memset((char *)stats64 + n * sizeof(u64), 0,
+ sizeof(*stats64) - n * sizeof(u64));
--- /dev/null
+From 647037adcad00f2bab8828d3d41cd0553d41f3bd Mon Sep 17 00:00:00 2001
+From: Aaron Thompson <dev@aaront.org>
+Date: Tue, 7 Feb 2023 08:21:51 +0000
+Subject: Revert "mm: Always release pages to the buddy allocator in memblock_free_late()."
+
+From: Aaron Thompson <dev@aaront.org>
+
+commit 647037adcad00f2bab8828d3d41cd0553d41f3bd upstream.
+
+This reverts commit 115d9d77bb0f9152c60b6e8646369fa7f6167593.
+
+The pages being freed by memblock_free_late() have already been
+initialized, but if they are in the deferred init range,
+__free_one_page() might access nearby uninitialized pages when trying to
+coalesce buddies. This can, for example, trigger this BUG:
+
+ BUG: unable to handle page fault for address: ffffe964c02580c8
+ RIP: 0010:__list_del_entry_valid+0x3f/0x70
+ <TASK>
+ __free_one_page+0x139/0x410
+ __free_pages_ok+0x21d/0x450
+ memblock_free_late+0x8c/0xb9
+ efi_free_boot_services+0x16b/0x25c
+ efi_enter_virtual_mode+0x403/0x446
+ start_kernel+0x678/0x714
+ secondary_startup_64_no_verify+0xd2/0xdb
+ </TASK>
+
+A proper fix will be more involved so revert this change for the time
+being.
+
+Fixes: 115d9d77bb0f ("mm: Always release pages to the buddy allocator in memblock_free_late().")
+Signed-off-by: Aaron Thompson <dev@aaront.org>
+Link: https://lore.kernel.org/r/20230207082151.1303-1-dev@aaront.org
+Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memblock.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1597,13 +1597,7 @@ void __init __memblock_free_late(phys_ad
+ end = PFN_DOWN(base + size);
+
+ for (; cursor < end; cursor++) {
+- /*
+- * Reserved pages are always initialized by the end of
+- * memblock_free_all() (by memmap_init() and, if deferred
+- * initialization is enabled, memmap_init_reserved_pages()), so
+- * these pages can be released directly to the buddy allocator.
+- */
+- __free_pages_core(pfn_to_page(cursor), 0);
++ memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+ totalram_pages_inc();
+ }
+ }