]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.19.7/mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
5.1-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.19.7 / mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
CommitLineData
4067483a
SL
1From 709c42273f8d869886a13494176d82ebda1a01d3 Mon Sep 17 00:00:00 2001
2From: Hugh Dickins <hughd@google.com>
3Date: Fri, 30 Nov 2018 14:10:39 -0800
4Subject: mm/khugepaged: minor reorderings in collapse_shmem()
5
6commit 042a30824871fa3149b0127009074b75cc25863c upstream.
7
8Several cleanups in collapse_shmem(): most of which probably do not
9really matter, beyond doing things in a more familiar and reassuring
10order. Simplify the failure gotos in the main loop, and on success
11update stats while interrupts still disabled from the last iteration.
12
13Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261526400.2275@eggly.anvils
14Fixes: f3f0e1d2150b2 ("khugepaged: add support of collapse for tmpfs/shmem pages")
15Signed-off-by: Hugh Dickins <hughd@google.com>
16Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
17Cc: Jerome Glisse <jglisse@redhat.com>
18Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
19Cc: Matthew Wilcox <willy@infradead.org>
20Cc: <stable@vger.kernel.org> [4.8+]
21Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
22Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
23Signed-off-by: Sasha Levin <sashal@kernel.org>
24---
25 mm/khugepaged.c | 73 ++++++++++++++++++++-----------------------------
26 1 file changed, 30 insertions(+), 43 deletions(-)
27
28diff --git a/mm/khugepaged.c b/mm/khugepaged.c
29index 068868763b78..d0a347e6fd08 100644
30--- a/mm/khugepaged.c
31+++ b/mm/khugepaged.c
32@@ -1330,13 +1330,12 @@ static void collapse_shmem(struct mm_struct *mm,
33 goto out;
34 }
35
36+ __SetPageLocked(new_page);
37+ __SetPageSwapBacked(new_page);
38 new_page->index = start;
39 new_page->mapping = mapping;
40- __SetPageSwapBacked(new_page);
41- __SetPageLocked(new_page);
42 BUG_ON(!page_ref_freeze(new_page, 1));
43
44-
45 /*
46 * At this point the new_page is 'frozen' (page_count() is zero), locked
47 * and not up-to-date. It's safe to insert it into radix tree, because
48@@ -1365,13 +1364,13 @@ static void collapse_shmem(struct mm_struct *mm,
49 */
50 if (n && !shmem_charge(mapping->host, n)) {
51 result = SCAN_FAIL;
52- break;
53+ goto tree_locked;
54 }
55- nr_none += n;
56 for (; index < min(iter.index, end); index++) {
57 radix_tree_insert(&mapping->i_pages, index,
58 new_page + (index % HPAGE_PMD_NR));
59 }
60+ nr_none += n;
61
62 /* We are done. */
63 if (index >= end)
64@@ -1387,12 +1386,12 @@ static void collapse_shmem(struct mm_struct *mm,
65 result = SCAN_FAIL;
66 goto tree_unlocked;
67 }
68- xa_lock_irq(&mapping->i_pages);
69 } else if (trylock_page(page)) {
70 get_page(page);
71+ xa_unlock_irq(&mapping->i_pages);
72 } else {
73 result = SCAN_PAGE_LOCK;
74- break;
75+ goto tree_locked;
76 }
77
78 /*
79@@ -1407,11 +1406,10 @@ static void collapse_shmem(struct mm_struct *mm,
80 result = SCAN_TRUNCATED;
81 goto out_unlock;
82 }
83- xa_unlock_irq(&mapping->i_pages);
84
85 if (isolate_lru_page(page)) {
86 result = SCAN_DEL_PAGE_LRU;
87- goto out_isolate_failed;
88+ goto out_unlock;
89 }
90
91 if (page_mapped(page))
92@@ -1432,7 +1430,9 @@ static void collapse_shmem(struct mm_struct *mm,
93 */
94 if (!page_ref_freeze(page, 3)) {
95 result = SCAN_PAGE_COUNT;
96- goto out_lru;
97+ xa_unlock_irq(&mapping->i_pages);
98+ putback_lru_page(page);
99+ goto out_unlock;
100 }
101
102 /*
103@@ -1448,17 +1448,10 @@ static void collapse_shmem(struct mm_struct *mm,
104 slot = radix_tree_iter_resume(slot, &iter);
105 index++;
106 continue;
107-out_lru:
108- xa_unlock_irq(&mapping->i_pages);
109- putback_lru_page(page);
110-out_isolate_failed:
111- unlock_page(page);
112- put_page(page);
113- goto tree_unlocked;
114 out_unlock:
115 unlock_page(page);
116 put_page(page);
117- break;
118+ goto tree_unlocked;
119 }
120
121 /*
122@@ -1466,7 +1459,7 @@ static void collapse_shmem(struct mm_struct *mm,
123 * This code only triggers if there's nothing in radix tree
124 * beyond 'end'.
125 */
126- if (result == SCAN_SUCCEED && index < end) {
127+ if (index < end) {
128 int n = end - index;
129
130 /* Stop if extent has been truncated, and is now empty */
131@@ -1478,7 +1471,6 @@ static void collapse_shmem(struct mm_struct *mm,
132 result = SCAN_FAIL;
133 goto tree_locked;
134 }
135-
136 for (; index < end; index++) {
137 radix_tree_insert(&mapping->i_pages, index,
138 new_page + (index % HPAGE_PMD_NR));
139@@ -1486,14 +1478,19 @@ static void collapse_shmem(struct mm_struct *mm,
140 nr_none += n;
141 }
142
143+ __inc_node_page_state(new_page, NR_SHMEM_THPS);
144+ if (nr_none) {
145+ struct zone *zone = page_zone(new_page);
146+
147+ __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
148+ __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
149+ }
150+
151 tree_locked:
152 xa_unlock_irq(&mapping->i_pages);
153 tree_unlocked:
154
155 if (result == SCAN_SUCCEED) {
156- unsigned long flags;
157- struct zone *zone = page_zone(new_page);
158-
159 /*
160 * Replacing old pages with new one has succeed, now we need to
161 * copy the content and free old pages.
162@@ -1507,11 +1504,11 @@ static void collapse_shmem(struct mm_struct *mm,
163 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
164 page);
165 list_del(&page->lru);
166- unlock_page(page);
167- page_ref_unfreeze(page, 1);
168 page->mapping = NULL;
169+ page_ref_unfreeze(page, 1);
170 ClearPageActive(page);
171 ClearPageUnevictable(page);
172+ unlock_page(page);
173 put_page(page);
174 index++;
175 }
176@@ -1520,28 +1517,17 @@ static void collapse_shmem(struct mm_struct *mm,
177 index++;
178 }
179
180- local_irq_save(flags);
181- __inc_node_page_state(new_page, NR_SHMEM_THPS);
182- if (nr_none) {
183- __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
184- __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
185- }
186- local_irq_restore(flags);
187-
188- /*
189- * Remove pte page tables, so we can re-faulti
190- * the page as huge.
191- */
192- retract_page_tables(mapping, start);
193-
194 /* Everything is ready, let's unfreeze the new_page */
195- set_page_dirty(new_page);
196 SetPageUptodate(new_page);
197 page_ref_unfreeze(new_page, HPAGE_PMD_NR);
198+ set_page_dirty(new_page);
199 mem_cgroup_commit_charge(new_page, memcg, false, true);
200 lru_cache_add_anon(new_page);
201- unlock_page(new_page);
202
203+ /*
204+ * Remove pte page tables, so we can re-fault the page as huge.
205+ */
206+ retract_page_tables(mapping, start);
207 *hpage = NULL;
208
209 khugepaged_pages_collapsed++;
210@@ -1573,8 +1559,8 @@ static void collapse_shmem(struct mm_struct *mm,
211 radix_tree_replace_slot(&mapping->i_pages, slot, page);
212 slot = radix_tree_iter_resume(slot, &iter);
213 xa_unlock_irq(&mapping->i_pages);
214- putback_lru_page(page);
215 unlock_page(page);
216+ putback_lru_page(page);
217 xa_lock_irq(&mapping->i_pages);
218 }
219 VM_BUG_ON(nr_none);
220@@ -1583,9 +1569,10 @@ static void collapse_shmem(struct mm_struct *mm,
221 /* Unfreeze new_page, caller would take care about freeing it */
222 page_ref_unfreeze(new_page, 1);
223 mem_cgroup_cancel_charge(new_page, memcg, true);
224- unlock_page(new_page);
225 new_page->mapping = NULL;
226 }
227+
228+ unlock_page(new_page);
229 out:
230 VM_BUG_ON(!list_empty(&pagelist));
231 /* TODO: tracepoints */
232--
2332.17.1
234