--- /dev/null
+From e181ae0c5db9544de9c53239eb22bc012ce75033 Mon Sep 17 00:00:00 2001
+From: Pavel Tatashin <pasha.tatashin@oracle.com>
+Date: Sat, 14 Jul 2018 09:15:07 -0400
+Subject: mm: zero unavailable pages before memmap init
+
+From: Pavel Tatashin <pasha.tatashin@oracle.com>
+
+commit e181ae0c5db9544de9c53239eb22bc012ce75033 upstream.
+
+We must zero struct pages for memory that is not backed by physical
+memory, or kernel does not have access to.
+
+Recently, there was a change which zeroed all memmap for all holes in
+e820. Unfortunately, it introduced a bug that is discussed here:
+
+ https://www.spinics.net/lists/linux-mm/msg156764.html
+
+Linus, also saw this bug on his machine, and confirmed that reverting
+commit 124049decbb1 ("x86/e820: put !E820_TYPE_RAM regions into
+memblock.reserved") fixes the issue.
+
+The problem is that we incorrectly zero some struct pages after they
+were setup.
+
+The fix is to zero unavailable struct pages prior to initializing of
+struct pages.
+
+A more detailed fix should come later that would avoid double zeroing
+cases: one in __init_single_page(), the other one in
+zero_resv_unavail().
+
+Fixes: 124049decbb1 ("x86/e820: put !E820_TYPE_RAM regions into memblock.reserved")
+Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6841,6 +6841,7 @@ void __init free_area_init_nodes(unsigne
+ /* Initialise every node */
+ mminit_verify_pageflags_layout();
+ setup_nr_node_ids();
++ zero_resv_unavail();
+ for_each_online_node(nid) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ free_area_init_node(nid, NULL,
+@@ -6851,7 +6852,6 @@ void __init free_area_init_nodes(unsigne
+ node_set_state(nid, N_MEMORY);
+ check_for_memory(pgdat, nid);
+ }
+- zero_resv_unavail();
+ }
+
+ static int __init cmdline_parse_core(char *p, unsigned long *core,
+@@ -7027,9 +7027,9 @@ void __init set_dma_reserve(unsigned lon
+
+ void __init free_area_init(unsigned long *zones_size)
+ {
++ zero_resv_unavail();
+ free_area_init_node(0, zones_size,
+ __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
+- zero_resv_unavail();
+ }
+
+ static int page_alloc_cpu_dead(unsigned int cpu)