]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop queue-5.10/mm-page_alloc-fix-memory-map-initialization-for-descending-nodes...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 16 Jul 2021 18:15:09 +0000 (20:15 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 16 Jul 2021 18:15:09 +0000 (20:15 +0200)
queue-5.10/mm-page_alloc-fix-memory-map-initialization-for-descending-nodes.patch [deleted file]
queue-5.10/series

diff --git a/queue-5.10/mm-page_alloc-fix-memory-map-initialization-for-descending-nodes.patch b/queue-5.10/mm-page_alloc-fix-memory-map-initialization-for-descending-nodes.patch
deleted file mode 100644 (file)
index 7b5746a..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-From 122e093c1734361dedb64f65c99b93e28e4624f4 Mon Sep 17 00:00:00 2001
-From: Mike Rapoport <rppt@kernel.org>
-Date: Mon, 28 Jun 2021 19:33:26 -0700
-Subject: mm/page_alloc: fix memory map initialization for descending nodes
-
-From: Mike Rapoport <rppt@linux.ibm.com>
-
-commit 122e093c1734361dedb64f65c99b93e28e4624f4 upstream.
-
-On systems with memory nodes sorted in descending order, for instance Dell
-Precision WorkStation T5500, the struct pages for higher PFNs and
-respectively lower nodes, could be overwritten by the initialization of
-struct pages corresponding to the holes in the memory sections.
-
-For example for the below memory layout
-
-[    0.245624] Early memory node ranges
-[    0.248496]   node   1: [mem 0x0000000000001000-0x0000000000090fff]
-[    0.251376]   node   1: [mem 0x0000000000100000-0x00000000dbdf8fff]
-[    0.254256]   node   1: [mem 0x0000000100000000-0x0000001423ffffff]
-[    0.257144]   node   0: [mem 0x0000001424000000-0x0000002023ffffff]
-
-the range 0x1424000000 - 0x1428000000 in the beginning of node 0 starts in
-the middle of a section and will be considered as a hole during the
-initialization of the last section in node 1.
-
-The wrong initialization of the memory map causes panic on boot when
-CONFIG_DEBUG_VM is enabled.
-
-Reorder loop order of the memory map initialization so that the outer loop
-will always iterate over populated memory regions in the ascending order
-and the inner loop will select the zone corresponding to the PFN range.
-
-This way initialization of the struct pages for the memory holes will be
-always done for the ranges that are actually not populated.
-
-[akpm@linux-foundation.org: coding style fixes]
-
-Link: https://lkml.kernel.org/r/YNXlMqBbL+tBG7yq@kernel.org
-Link: https://bugzilla.kernel.org/show_bug.cgi?id=213073
-Link: https://lkml.kernel.org/r/20210624062305.10940-1-rppt@kernel.org
-Fixes: 0740a50b9baa ("mm/page_alloc.c: refactor initialization of struct page for holes in memory layout")
-Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
-Cc: Boris Petkov <bp@alien8.de>
-Cc: Robert Shteynfeld <robert.shteynfeld@gmail.com>
-Cc: Baoquan He <bhe@redhat.com>
-Cc: Vlastimil Babka <vbabka@suse.cz>
-Cc: David Hildenbrand <david@redhat.com>
-Cc: <stable@vger.kernel.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- mm/page_alloc.c |  100 +++++++++++++++++++++++++++++++++-----------------------
- 1 file changed, 60 insertions(+), 40 deletions(-)
-
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -6129,7 +6129,7 @@ void __ref memmap_init_zone_device(struc
-               return;
-       /*
--       * The call to memmap_init_zone should have already taken care
-+       * The call to memmap_init should have already taken care
-        * of the pages reserved for the memmap, so we can just jump to
-        * the end of that region and start processing the device pages.
-        */
-@@ -6194,7 +6194,7 @@ static void __meminit zone_init_free_lis
- /*
-  * Only struct pages that correspond to ranges defined by memblock.memory
-  * are zeroed and initialized by going through __init_single_page() during
-- * memmap_init_zone().
-+ * memmap_init_zone_range().
-  *
-  * But, there could be struct pages that correspond to holes in
-  * memblock.memory. This can happen because of the following reasons:
-@@ -6213,9 +6213,9 @@ static void __meminit zone_init_free_lis
-  *   zone/node above the hole except for the trailing pages in the last
-  *   section that will be appended to the zone/node below.
-  */
--static u64 __meminit init_unavailable_range(unsigned long spfn,
--                                          unsigned long epfn,
--                                          int zone, int node)
-+static void __init init_unavailable_range(unsigned long spfn,
-+                                        unsigned long epfn,
-+                                        int zone, int node)
- {
-       unsigned long pfn;
-       u64 pgcnt = 0;
-@@ -6231,58 +6231,77 @@ static u64 __meminit init_unavailable_ra
-               pgcnt++;
-       }
--      return pgcnt;
-+      if (pgcnt)
-+              pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
-+                      node, zone_names[zone], pgcnt);
- }
- #else
--static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
--                                       int zone, int node)
-+static inline void init_unavailable_range(unsigned long spfn,
-+                                        unsigned long epfn,
-+                                        int zone, int node)
- {
--      return 0;
- }
- #endif
--void __meminit __weak memmap_init(unsigned long size, int nid,
--                                unsigned long zone,
--                                unsigned long range_start_pfn)
-+static void __init memmap_init_zone_range(struct zone *zone,
-+                                        unsigned long start_pfn,
-+                                        unsigned long end_pfn,
-+                                        unsigned long *hole_pfn)
-+{
-+      unsigned long zone_start_pfn = zone->zone_start_pfn;
-+      unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
-+      int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
-+
-+      start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
-+      end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
-+
-+      if (start_pfn >= end_pfn)
-+              return;
-+
-+      memmap_init_zone(end_pfn - start_pfn, nid, zone_id, start_pfn,
-+                        zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
-+
-+      if (*hole_pfn < start_pfn)
-+              init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
-+
-+      *hole_pfn = end_pfn;
-+}
-+
-+void __init __weak memmap_init(void)
- {
--      static unsigned long hole_pfn;
-       unsigned long start_pfn, end_pfn;
--      unsigned long range_end_pfn = range_start_pfn + size;
--      int i;
--      u64 pgcnt = 0;
-+      unsigned long hole_pfn = 0;
-+      int i, j, zone_id, nid;
--      for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
--              start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
--              end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
-+      for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
-+              struct pglist_data *node = NODE_DATA(nid);
--              if (end_pfn > start_pfn) {
--                      size = end_pfn - start_pfn;
--                      memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
--                                       MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
--              }
-+              for (j = 0; j < MAX_NR_ZONES; j++) {
-+                      struct zone *zone = node->node_zones + j;
--              if (hole_pfn < start_pfn)
--                      pgcnt += init_unavailable_range(hole_pfn, start_pfn,
--                                                      zone, nid);
--              hole_pfn = end_pfn;
-+                      if (!populated_zone(zone))
-+                              continue;
-+
-+                      memmap_init_zone_range(zone, start_pfn, end_pfn,
-+                                             &hole_pfn);
-+                      zone_id = j;
-+              }
-       }
- #ifdef CONFIG_SPARSEMEM
-       /*
--       * Initialize the hole in the range [zone_end_pfn, section_end].
--       * If zone boundary falls in the middle of a section, this hole
--       * will be re-initialized during the call to this function for the
--       * higher zone.
-+       * Initialize the memory map for hole in the range [memory_end,
-+       * section_end].
-+       * Append the pages in this hole to the highest zone in the last
-+       * node.
-+       * The call to init_unavailable_range() is outside the ifdef to
-+       * silence the compiler warining about zone_id set but not used;
-+       * for FLATMEM it is a nop anyway
-        */
--      end_pfn = round_up(range_end_pfn, PAGES_PER_SECTION);
-+      end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
-       if (hole_pfn < end_pfn)
--              pgcnt += init_unavailable_range(hole_pfn, end_pfn,
--                                              zone, nid);
- #endif
--
--      if (pgcnt)
--              pr_info("  %s zone: %llu pages in unavailable ranges\n",
--                      zone_names[zone], pgcnt);
-+              init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
- }
- static int zone_batchsize(struct zone *zone)
-@@ -6981,7 +7000,6 @@ static void __init free_area_init_core(s
-               set_pageblock_order();
-               setup_usemap(pgdat, zone, zone_start_pfn, size);
-               init_currently_empty_zone(zone, zone_start_pfn, size);
--              memmap_init(size, nid, j, zone_start_pfn);
-       }
- }
-@@ -7507,6 +7525,8 @@ void __init free_area_init(unsigned long
-                       node_set_state(nid, N_MEMORY);
-               check_for_memory(pgdat, nid);
-       }
-+
-+      memmap_init();
- }
- static int __init cmdline_parse_core(char *p, unsigned long *core,
index 44168b63a361e7a110884bb4dd8f49ee0c91004a..6d30aaa539c465cbef8ff9bb2d1190c44fb4a82e 100644 (file)
@@ -135,7 +135,6 @@ mips-mt-extensions-are-not-available-on-mips32r1.patch
 ath11k-unlock-on-error-path-in-ath11k_mac_op_add_interface.patch
 arm64-dts-rockchip-add-rk3328-dwc3-usb-controller-node.patch
 arm64-dts-rockchip-enable-usb3-for-rk3328-rock64.patch
-mm-page_alloc-fix-memory-map-initialization-for-descending-nodes.patch
 loop-fix-i-o-error-on-fsync-in-detached-loop-devices.patch
 mm-hwpoison-return-ebusy-when-migration-fails.patch
 io_uring-simplify-io_remove_personalities.patch