]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.19.29/mm-memory_hotplug-fix-off-by-one-in-is_pageblock_rem.patch
fixes for 4.19
[thirdparty/kernel/stable-queue.git] / releases / 4.19.29 / mm-memory_hotplug-fix-off-by-one-in-is_pageblock_rem.patch
CommitLineData
7748c0ed
SL
1From 24bc3928aa543d9a13c822733d3a23faf47ad43f Mon Sep 17 00:00:00 2001
2From: Michal Hocko <mhocko@suse.com>
3Date: Wed, 20 Feb 2019 22:20:46 -0800
4Subject: mm, memory_hotplug: fix off-by-one in is_pageblock_removable
5
6[ Upstream commit 891cb2a72d821f930a39d5900cb7a3aa752c1d5b ]
7
8Rong Chen has reported the following boot crash:
9
10 PGD 0 P4D 0
11 Oops: 0000 [#1] PREEMPT SMP PTI
12 CPU: 1 PID: 239 Comm: udevd Not tainted 5.0.0-rc4-00149-gefad4e4 #1
13 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014
14 RIP: 0010:page_mapping+0x12/0x80
15 Code: 5d c3 48 89 df e8 0e ad 02 00 85 c0 75 da 89 e8 5b 5d c3 0f 1f 44 00 00 53 48 89 fb 48 8b 43 08 48 8d 50 ff a8 01 48 0f 45 da <48> 8b 53 08 48 8d 42 ff 83 e2 01 48 0f 44 c3 48 83 38 ff 74 2f 48
16 RSP: 0018:ffff88801fa87cd8 EFLAGS: 00010202
17 RAX: ffffffffffffffff RBX: fffffffffffffffe RCX: 000000000000000a
18 RDX: fffffffffffffffe RSI: ffffffff820b9a20 RDI: ffff88801e5c0000
19 RBP: 6db6db6db6db6db7 R08: ffff88801e8bb000 R09: 0000000001b64d13
20 R10: ffff88801fa87cf8 R11: 0000000000000001 R12: ffff88801e640000
21 R13: ffffffff820b9a20 R14: ffff88801f145258 R15: 0000000000000001
22 FS: 00007fb2079817c0(0000) GS:ffff88801dd00000(0000) knlGS:0000000000000000
23 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
24 CR2: 0000000000000006 CR3: 000000001fa82000 CR4: 00000000000006a0
25 Call Trace:
26 __dump_page+0x14/0x2c0
27 is_mem_section_removable+0x24c/0x2c0
28 removable_show+0x87/0xa0
29 dev_attr_show+0x25/0x60
30 sysfs_kf_seq_show+0xba/0x110
31 seq_read+0x196/0x3f0
32 __vfs_read+0x34/0x180
33 vfs_read+0xa0/0x150
34 ksys_read+0x44/0xb0
35 do_syscall_64+0x5e/0x4a0
36 entry_SYSCALL_64_after_hwframe+0x49/0xbe
37
38and bisected it down to commit efad4e475c31 ("mm, memory_hotplug:
39is_mem_section_removable do not pass the end of a zone").
40
41The reason for the crash is that the mapping is garbage for poisoned
42(uninitialized) page. This shouldn't happen as all pages in the zone's
43boundary should be initialized.
44
45Later debugging revealed that the actual problem is an off-by-one when
46evaluating the end_page. 'start_pfn + nr_pages' resp 'zone_end_pfn'
47refers to a pfn after the range and as such it might belong to a
48differen memory section.
49
50This along with CONFIG_SPARSEMEM then makes the loop condition
51completely bogus because a pointer arithmetic doesn't work for pages
52from two different sections in that memory model.
53
54Fix the issue by reworking is_pageblock_removable to be pfn based and
55only use struct page where necessary. This makes the code slightly
56easier to follow and we will remove the problematic pointer arithmetic
57completely.
58
59Link: http://lkml.kernel.org/r/20190218181544.14616-1-mhocko@kernel.org
60Fixes: efad4e475c31 ("mm, memory_hotplug: is_mem_section_removable do not pass the end of a zone")
61Signed-off-by: Michal Hocko <mhocko@suse.com>
62Reported-by: <rong.a.chen@intel.com>
63Tested-by: <rong.a.chen@intel.com>
64Acked-by: Mike Rapoport <rppt@linux.ibm.com>
65Reviewed-by: Oscar Salvador <osalvador@suse.de>
66Cc: Matthew Wilcox <willy@infradead.org>
67Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
68Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
69Signed-off-by: Sasha Levin <sashal@kernel.org>
70---
71 mm/memory_hotplug.c | 27 +++++++++++++++------------
72 1 file changed, 15 insertions(+), 12 deletions(-)
73
74diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
75index ff93a57e1694..156991edec2a 100644
76--- a/mm/memory_hotplug.c
77+++ b/mm/memory_hotplug.c
78@@ -1213,11 +1213,13 @@ static inline int pageblock_free(struct page *page)
79 return PageBuddy(page) && page_order(page) >= pageblock_order;
80 }
81
82-/* Return the start of the next active pageblock after a given page */
83-static struct page *next_active_pageblock(struct page *page)
84+/* Return the pfn of the start of the next active pageblock after a given pfn */
85+static unsigned long next_active_pageblock(unsigned long pfn)
86 {
87+ struct page *page = pfn_to_page(pfn);
88+
89 /* Ensure the starting page is pageblock-aligned */
90- BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
91+ BUG_ON(pfn & (pageblock_nr_pages - 1));
92
93 /* If the entire pageblock is free, move to the end of free page */
94 if (pageblock_free(page)) {
95@@ -1225,16 +1227,16 @@ static struct page *next_active_pageblock(struct page *page)
96 /* be careful. we don't have locks, page_order can be changed.*/
97 order = page_order(page);
98 if ((order < MAX_ORDER) && (order >= pageblock_order))
99- return page + (1 << order);
100+ return pfn + (1 << order);
101 }
102
103- return page + pageblock_nr_pages;
104+ return pfn + pageblock_nr_pages;
105 }
106
107-static bool is_pageblock_removable_nolock(struct page *page)
108+static bool is_pageblock_removable_nolock(unsigned long pfn)
109 {
110+ struct page *page = pfn_to_page(pfn);
111 struct zone *zone;
112- unsigned long pfn;
113
114 /*
115 * We have to be careful here because we are iterating over memory
116@@ -1257,13 +1259,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
117 /* Checks if this range of memory is likely to be hot-removable. */
118 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
119 {
120- struct page *page = pfn_to_page(start_pfn);
121- unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
122- struct page *end_page = pfn_to_page(end_pfn);
123+ unsigned long end_pfn, pfn;
124+
125+ end_pfn = min(start_pfn + nr_pages,
126+ zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
127
128 /* Check the starting page of each pageblock within the range */
129- for (; page < end_page; page = next_active_pageblock(page)) {
130- if (!is_pageblock_removable_nolock(page))
131+ for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
132+ if (!is_pageblock_removable_nolock(pfn))
133 return false;
134 cond_resched();
135 }
136--
1372.19.1
138