--- /dev/null
+From f86e4271978bd93db466d6a95dad4b0fdcdb04f6 Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@linaro.org>
+Date: Fri, 3 Jun 2016 14:55:38 -0700
+Subject: mm: check the return value of lookup_page_ext for all call sites
+
+From: Yang Shi <yang.shi@linaro.org>
+
+commit f86e4271978bd93db466d6a95dad4b0fdcdb04f6 upstream.
+
+Per the discussion with Joonsoo Kim [1], we need check the return value
+of lookup_page_ext() for all call sites since it might return NULL in
+some cases, although it is unlikely, i.e. memory hotplug.
+
+Tested with ltp with "page_owner=0".
+
+[1] http://lkml.kernel.org/r/20160519002809.GA10245@js1304-P5Q-DELUXE
+
+[akpm@linux-foundation.org: fix build-breaking typos]
+[arnd@arndb.de: fix build problems from lookup_page_ext]
+ Link: http://lkml.kernel.org/r/6285269.2CksypHdYp@wuerfel
+[akpm@linux-foundation.org: coding-style fixes]
+Link: http://lkml.kernel.org/r/1464023768-31025-1-git-send-email-yang.shi@linaro.org
+Signed-off-by: Yang Shi <yang.shi@linaro.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/page_idle.h | 43 ++++++++++++++++++++++++++++++++++++-------
+ mm/debug-pagealloc.c | 6 ++++++
+ mm/page_alloc.c | 6 ++++++
+ mm/page_owner.c | 16 ++++++++++++++++
+ mm/vmstat.c | 2 ++
+ 5 files changed, 66 insertions(+), 7 deletions(-)
+
+--- a/include/linux/page_idle.h
++++ b/include/linux/page_idle.h
+@@ -46,33 +46,62 @@ extern struct page_ext_operations page_i
+
+ static inline bool page_is_young(struct page *page)
+ {
+- return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
++ struct page_ext *page_ext = lookup_page_ext(page);
++
++ if (unlikely(!page_ext))
++ return false;
++
++ return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ }
+
+ static inline void set_page_young(struct page *page)
+ {
+- set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
++ struct page_ext *page_ext = lookup_page_ext(page);
++
++ if (unlikely(!page_ext))
++ return;
++
++ set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ }
+
+ static inline bool test_and_clear_page_young(struct page *page)
+ {
+- return test_and_clear_bit(PAGE_EXT_YOUNG,
+- &lookup_page_ext(page)->flags);
++ struct page_ext *page_ext = lookup_page_ext(page);
++
++ if (unlikely(!page_ext))
++ return false;
++
++ return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ }
+
+ static inline bool page_is_idle(struct page *page)
+ {
+- return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
++ struct page_ext *page_ext = lookup_page_ext(page);
++
++ if (unlikely(!page_ext))
++ return false;
++
++ return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ }
+
+ static inline void set_page_idle(struct page *page)
+ {
+- set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
++ struct page_ext *page_ext = lookup_page_ext(page);
++
++ if (unlikely(!page_ext))
++ return;
++
++ set_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ }
+
+ static inline void clear_page_idle(struct page *page)
+ {
+- clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
++ struct page_ext *page_ext = lookup_page_ext(page);
++
++ if (unlikely(!page_ext))
++ return;
++
++ clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ }
+ #endif /* CONFIG_64BIT */
+
+--- a/mm/debug-pagealloc.c
++++ b/mm/debug-pagealloc.c
+@@ -34,6 +34,8 @@ static inline void set_page_poison(struc
+ struct page_ext *page_ext;
+
+ page_ext = lookup_page_ext(page);
++ if (page_ext)
++ return;
+ __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
+ }
+
+@@ -42,6 +44,8 @@ static inline void clear_page_poison(str
+ struct page_ext *page_ext;
+
+ page_ext = lookup_page_ext(page);
++ if (page_ext)
++ return;
+ __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
+ }
+
+@@ -50,6 +54,8 @@ static inline bool page_poison(struct pa
+ struct page_ext *page_ext;
+
+ page_ext = lookup_page_ext(page);
++ if (page_ext)
++ return false;
+ return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
+ }
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -569,6 +569,9 @@ static inline void set_page_guard(struct
+ return;
+
+ page_ext = lookup_page_ext(page);
++ if (unlikely(!page_ext))
++ return;
++
+ __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+
+ INIT_LIST_HEAD(&page->lru);
+@@ -586,6 +589,9 @@ static inline void clear_page_guard(stru
+ return;
+
+ page_ext = lookup_page_ext(page);
++ if (unlikely(!page_ext))
++ return;
++
+ __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+
+ set_page_private(page, 0);
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -53,6 +53,8 @@ void __reset_page_owner(struct page *pag
+
+ for (i = 0; i < (1 << order); i++) {
+ page_ext = lookup_page_ext(page + i);
++ if (unlikely(!page_ext))
++ continue;
+ __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
+ }
+ }
+@@ -60,6 +62,7 @@ void __reset_page_owner(struct page *pag
+ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
+ {
+ struct page_ext *page_ext = lookup_page_ext(page);
++
+ struct stack_trace trace = {
+ .nr_entries = 0,
+ .max_entries = ARRAY_SIZE(page_ext->trace_entries),
+@@ -67,6 +70,9 @@ void __set_page_owner(struct page *page,
+ .skip = 3,
+ };
+
++ if (unlikely(!page_ext))
++ return;
++
+ save_stack_trace(&trace);
+
+ page_ext->order = order;
+@@ -79,6 +85,12 @@ void __set_page_owner(struct page *page,
+ gfp_t __get_page_owner_gfp(struct page *page)
+ {
+ struct page_ext *page_ext = lookup_page_ext(page);
++ if (unlikely(!page_ext))
++ /*
++ * The caller just returns 0 if no valid gfp
++ * So return 0 here too.
++ */
++ return 0;
+
+ return page_ext->gfp_mask;
+ }
+@@ -194,6 +206,8 @@ read_page_owner(struct file *file, char
+ }
+
+ page_ext = lookup_page_ext(page);
++ if (unlikely(!page_ext))
++ continue;
+
+ /*
+ * Some pages could be missed by concurrent allocation or free,
+@@ -257,6 +271,8 @@ static void init_pages_in_zone(pg_data_t
+ continue;
+
+ page_ext = lookup_page_ext(page);
++ if (unlikely(!page_ext))
++ continue;
+
+ /* Maybe overraping zone */
+ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1091,6 +1091,8 @@ static void pagetypeinfo_showmixedcount_
+ continue;
+
+ page_ext = lookup_page_ext(page);
++ if (unlikely(!page_ext))
++ continue;
+
+ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+ continue;
--- /dev/null
+From e492080e640c2d1235ddf3441cae634cfffef7e1 Mon Sep 17 00:00:00 2001
+From: Jaewon Kim <jaewon31.kim@samsung.com>
+Date: Wed, 15 Nov 2017 17:39:07 -0800
+Subject: mm/page_ext.c: check if page_ext is not prepared
+
+From: Jaewon Kim <jaewon31.kim@samsung.com>
+
+commit e492080e640c2d1235ddf3441cae634cfffef7e1 upstream.
+
+online_page_ext() and page_ext_init() allocate page_ext for each
+section, but they do not allocate if the first PFN is !pfn_present(pfn)
+or !pfn_valid(pfn). Then section->page_ext remains as NULL.
+lookup_page_ext checks NULL only if CONFIG_DEBUG_VM is enabled. For a
+valid PFN, __set_page_owner will try to get page_ext through
+lookup_page_ext. Without CONFIG_DEBUG_VM lookup_page_ext will misuse
+NULL pointer as value 0. This incurrs invalid address access.
+
+This is the panic example when PFN 0x100000 is not valid but PFN
+0x13FC00 is being used for page_ext. section->page_ext is NULL,
+get_entry returned invalid page_ext address as 0x1DFA000 for a PFN
+0x13FC00.
+
+To avoid this panic, CONFIG_DEBUG_VM should be removed so that page_ext
+will be checked at all times.
+
+ Unable to handle kernel paging request at virtual address 01dfa014
+ ------------[ cut here ]------------
+ Kernel BUG at ffffff80082371e0 [verbose debug info unavailable]
+ Internal error: Oops: 96000045 [#1] PREEMPT SMP
+ Modules linked in:
+ PC is at __set_page_owner+0x48/0x78
+ LR is at __set_page_owner+0x44/0x78
+ __set_page_owner+0x48/0x78
+ get_page_from_freelist+0x880/0x8e8
+ __alloc_pages_nodemask+0x14c/0xc48
+ __do_page_cache_readahead+0xdc/0x264
+ filemap_fault+0x2ac/0x550
+ ext4_filemap_fault+0x3c/0x58
+ __do_fault+0x80/0x120
+ handle_mm_fault+0x704/0xbb0
+ do_page_fault+0x2e8/0x394
+ do_mem_abort+0x88/0x124
+
+Pre-4.7 kernels also need commit f86e4271978b ("mm: check the return
+value of lookup_page_ext for all call sites").
+
+Link: http://lkml.kernel.org/r/20171107094131.14621-1-jaewon31.kim@samsung.com
+Fixes: eefa864b701d ("mm/page_ext: resurrect struct page extending code for debugging")
+Signed-off-by: Jaewon Kim <jaewon31.kim@samsung.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Joonsoo Kim <js1304@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_ext.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/mm/page_ext.c
++++ b/mm/page_ext.c
+@@ -106,7 +106,6 @@ struct page_ext *lookup_page_ext(struct
+ struct page_ext *base;
+
+ base = NODE_DATA(page_to_nid(page))->node_page_ext;
+-#ifdef CONFIG_DEBUG_VM
+ /*
+ * The sanity checks the page allocator does upon freeing a
+ * page can reach here before the page_ext arrays are
+@@ -115,7 +114,6 @@ struct page_ext *lookup_page_ext(struct
+ */
+ if (unlikely(!base))
+ return NULL;
+-#endif
+ offset = pfn - round_down(node_start_pfn(page_to_nid(page)),
+ MAX_ORDER_NR_PAGES);
+ return base + offset;
+@@ -180,7 +178,6 @@ struct page_ext *lookup_page_ext(struct
+ {
+ unsigned long pfn = page_to_pfn(page);
+ struct mem_section *section = __pfn_to_section(pfn);
+-#ifdef CONFIG_DEBUG_VM
+ /*
+ * The sanity checks the page allocator does upon freeing a
+ * page can reach here before the page_ext arrays are
+@@ -189,7 +186,6 @@ struct page_ext *lookup_page_ext(struct
+ */
+ if (!section->page_ext)
+ return NULL;
+-#endif
+ return section->page_ext + pfn;
+ }
+