]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 19 Feb 2018 17:22:23 +0000 (18:22 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 19 Feb 2018 17:22:23 +0000 (18:22 +0100)
added patches:
mm-memory_hotplug-fix-memmap-initialization.patch

queue-4.15/mm-memory_hotplug-fix-memmap-initialization.patch [new file with mode: 0644]
queue-4.15/series

diff --git a/queue-4.15/mm-memory_hotplug-fix-memmap-initialization.patch b/queue-4.15/mm-memory_hotplug-fix-memmap-initialization.patch
new file mode 100644 (file)
index 0000000..78d2c8c
--- /dev/null
@@ -0,0 +1,114 @@
+From 9bb5a391f9a5707e04763cf14298fc4cc29bfecd Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.com>
+Date: Wed, 31 Jan 2018 16:21:14 -0800
+Subject: mm, memory_hotplug: fix memmap initialization
+
+From: Michal Hocko <mhocko@suse.com>
+
+commit 9bb5a391f9a5707e04763cf14298fc4cc29bfecd upstream.
+
+Bharata has noticed that onlining a newly added memory doesn't increase
+the total memory, pointing to commit f7f99100d8d9 ("mm: stop zeroing
+memory during allocation in vmemmap") as a culprit.  This commit has
+changed the way how the memory for memmaps is initialized and moves it
+from the allocation time to the initialization time.  This works
+properly for the early memmap init path.
+
+It doesn't work for the memory hotplug though because we need to mark
+page as reserved when the sparsemem section is created and later
+initialize it completely during onlining.  memmap_init_zone is called in
+the early stage of onlining.  With the current code it calls
+__init_single_page and as such it clears up the whole stage and
+therefore online_pages_range skips those pages.
+
+Fix this by skipping mm_zero_struct_page in __init_single_page for
+memory hotplug path.  This is quite uggly but unifying both early init
+and memory hotplug init paths is a large project.  Make sure we plug the
+regression at least.
+
+Link: http://lkml.kernel.org/r/20180130101141.GW21609@dhcp22.suse.cz
+Fixes: f7f99100d8d9 ("mm: stop zeroing memory during allocation in vmemmap")
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Reported-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
+Tested-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
+Reviewed-by: Pavel Tatashin <pasha.tatashin@oracle.com>
+Cc: Steven Sistare <steven.sistare@oracle.com>
+Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: Bob Picco <bob.picco@oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c |   22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1177,9 +1177,10 @@ static void free_one_page(struct zone *z
+ }
+ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
+-                              unsigned long zone, int nid)
++                              unsigned long zone, int nid, bool zero)
+ {
+-      mm_zero_struct_page(page);
++      if (zero)
++              mm_zero_struct_page(page);
+       set_page_links(page, zone, nid, pfn);
+       init_page_count(page);
+       page_mapcount_reset(page);
+@@ -1194,9 +1195,9 @@ static void __meminit __init_single_page
+ }
+ static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
+-                                      int nid)
++                                      int nid, bool zero)
+ {
+-      return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
++      return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero);
+ }
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+@@ -1217,7 +1218,7 @@ static void __meminit init_reserved_page
+               if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
+                       break;
+       }
+-      __init_single_pfn(pfn, zid, nid);
++      __init_single_pfn(pfn, zid, nid, true);
+ }
+ #else
+ static inline void init_reserved_page(unsigned long pfn)
+@@ -1514,7 +1515,7 @@ static unsigned long __init deferred_ini
+                                       page++;
+                               else
+                                       page = pfn_to_page(pfn);
+-                              __init_single_page(page, pfn, zid, nid);
++                              __init_single_page(page, pfn, zid, nid, true);
+                               cond_resched();
+                       }
+               }
+@@ -5393,15 +5394,20 @@ not_early:
+                * can be created for invalid pages (for alignment)
+                * check here not to call set_pageblock_migratetype() against
+                * pfn out of zone.
++               *
++               * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
++               * because this is done early in sparse_add_one_section
+                */
+               if (!(pfn & (pageblock_nr_pages - 1))) {
+                       struct page *page = pfn_to_page(pfn);
+-                      __init_single_page(page, pfn, zone, nid);
++                      __init_single_page(page, pfn, zone, nid,
++                                      context != MEMMAP_HOTPLUG);
+                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+                       cond_resched();
+               } else {
+-                      __init_single_pfn(pfn, zone, nid);
++                      __init_single_pfn(pfn, zone, nid,
++                                      context != MEMMAP_HOTPLUG);
+               }
+       }
+ }
index b2e18d7d83d2d646bf77555c903d909aaa80f266..d57474b670b994effbb1ff1b1fcc8b08f6153fcb 100644 (file)
@@ -36,3 +36,4 @@ x86-gpu-add-cfl-to-early-quirks.patch
 x86-kexec-make-kexec-mostly-work-in-5-level-paging-mode.patch
 x86-xen-init-gs-very-early-to-avoid-page-faults-with-stack-protector.patch
 x86-pm-make-apm-idle-driver-initialize-polling-state.patch
+mm-memory_hotplug-fix-memmap-initialization.patch