]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/compaction.c
mm, page_alloc: count movable pages when stealing from pageblock
[thirdparty/linux.git] / mm / compaction.c
CommitLineData
748446bb
MG
1/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
698b1b30 10#include <linux/cpu.h>
748446bb
MG
11#include <linux/swap.h>
12#include <linux/migrate.h>
13#include <linux/compaction.h>
14#include <linux/mm_inline.h>
174cd4b1 15#include <linux/sched/signal.h>
748446bb 16#include <linux/backing-dev.h>
76ab0f53 17#include <linux/sysctl.h>
ed4a6d7f 18#include <linux/sysfs.h>
194159fb 19#include <linux/page-isolation.h>
b8c73fc2 20#include <linux/kasan.h>
698b1b30
VB
21#include <linux/kthread.h>
22#include <linux/freezer.h>
83358ece 23#include <linux/page_owner.h>
748446bb
MG
24#include "internal.h"
25
010fc29a
MK
26#ifdef CONFIG_COMPACTION
27static inline void count_compact_event(enum vm_event_item item)
28{
29 count_vm_event(item);
30}
31
32static inline void count_compact_events(enum vm_event_item item, long delta)
33{
34 count_vm_events(item, delta);
35}
36#else
37#define count_compact_event(item) do { } while (0)
38#define count_compact_events(item, delta) do { } while (0)
39#endif
40
ff9543fd
MN
41#if defined CONFIG_COMPACTION || defined CONFIG_CMA
42
b7aba698
MG
43#define CREATE_TRACE_POINTS
44#include <trace/events/compaction.h>
45
06b6640a
VB
46#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
47#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
48#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
49#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
50
748446bb
MG
51static unsigned long release_freepages(struct list_head *freelist)
52{
53 struct page *page, *next;
6bace090 54 unsigned long high_pfn = 0;
748446bb
MG
55
56 list_for_each_entry_safe(page, next, freelist, lru) {
6bace090 57 unsigned long pfn = page_to_pfn(page);
748446bb
MG
58 list_del(&page->lru);
59 __free_page(page);
6bace090
VB
60 if (pfn > high_pfn)
61 high_pfn = pfn;
748446bb
MG
62 }
63
6bace090 64 return high_pfn;
748446bb
MG
65}
66
ff9543fd
MN
67static void map_pages(struct list_head *list)
68{
66c64223
JK
69 unsigned int i, order, nr_pages;
70 struct page *page, *next;
71 LIST_HEAD(tmp_list);
72
73 list_for_each_entry_safe(page, next, list, lru) {
74 list_del(&page->lru);
75
76 order = page_private(page);
77 nr_pages = 1 << order;
66c64223 78
46f24fd8 79 post_alloc_hook(page, order, __GFP_MOVABLE);
66c64223
JK
80 if (order)
81 split_page(page, order);
ff9543fd 82
66c64223
JK
83 for (i = 0; i < nr_pages; i++) {
84 list_add(&page->lru, &tmp_list);
85 page++;
86 }
ff9543fd 87 }
66c64223
JK
88
89 list_splice(&tmp_list, list);
ff9543fd
MN
90}
91
47118af0
MN
92static inline bool migrate_async_suitable(int migratetype)
93{
94 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
95}
96
bb13ffeb 97#ifdef CONFIG_COMPACTION
24e2716f 98
bda807d4
MK
99int PageMovable(struct page *page)
100{
101 struct address_space *mapping;
102
103 VM_BUG_ON_PAGE(!PageLocked(page), page);
104 if (!__PageMovable(page))
105 return 0;
106
107 mapping = page_mapping(page);
108 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
109 return 1;
110
111 return 0;
112}
113EXPORT_SYMBOL(PageMovable);
114
115void __SetPageMovable(struct page *page, struct address_space *mapping)
116{
117 VM_BUG_ON_PAGE(!PageLocked(page), page);
118 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
119 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
120}
121EXPORT_SYMBOL(__SetPageMovable);
122
123void __ClearPageMovable(struct page *page)
124{
125 VM_BUG_ON_PAGE(!PageLocked(page), page);
126 VM_BUG_ON_PAGE(!PageMovable(page), page);
127 /*
128 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
129 * flag so that VM can catch up released page by driver after isolation.
130 * With it, VM migration doesn't try to put it back.
131 */
132 page->mapping = (void *)((unsigned long)page->mapping &
133 PAGE_MAPPING_MOVABLE);
134}
135EXPORT_SYMBOL(__ClearPageMovable);
136
24e2716f
JK
137/* Do not skip compaction more than 64 times */
138#define COMPACT_MAX_DEFER_SHIFT 6
139
140/*
141 * Compaction is deferred when compaction fails to result in a page
142 * allocation success. 1 << compact_defer_limit compactions are skipped up
143 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
144 */
145void defer_compaction(struct zone *zone, int order)
146{
147 zone->compact_considered = 0;
148 zone->compact_defer_shift++;
149
150 if (order < zone->compact_order_failed)
151 zone->compact_order_failed = order;
152
153 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
154 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
155
156 trace_mm_compaction_defer_compaction(zone, order);
157}
158
159/* Returns true if compaction should be skipped this time */
160bool compaction_deferred(struct zone *zone, int order)
161{
162 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
163
164 if (order < zone->compact_order_failed)
165 return false;
166
167 /* Avoid possible overflow */
168 if (++zone->compact_considered > defer_limit)
169 zone->compact_considered = defer_limit;
170
171 if (zone->compact_considered >= defer_limit)
172 return false;
173
174 trace_mm_compaction_deferred(zone, order);
175
176 return true;
177}
178
179/*
180 * Update defer tracking counters after successful compaction of given order,
181 * which means an allocation either succeeded (alloc_success == true) or is
182 * expected to succeed.
183 */
184void compaction_defer_reset(struct zone *zone, int order,
185 bool alloc_success)
186{
187 if (alloc_success) {
188 zone->compact_considered = 0;
189 zone->compact_defer_shift = 0;
190 }
191 if (order >= zone->compact_order_failed)
192 zone->compact_order_failed = order + 1;
193
194 trace_mm_compaction_defer_reset(zone, order);
195}
196
197/* Returns true if restarting compaction after many failures */
198bool compaction_restarting(struct zone *zone, int order)
199{
200 if (order < zone->compact_order_failed)
201 return false;
202
203 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
204 zone->compact_considered >= 1UL << zone->compact_defer_shift;
205}
206
bb13ffeb
MG
207/* Returns true if the pageblock should be scanned for pages to isolate. */
208static inline bool isolation_suitable(struct compact_control *cc,
209 struct page *page)
210{
211 if (cc->ignore_skip_hint)
212 return true;
213
214 return !get_pageblock_skip(page);
215}
216
02333641
VB
217static void reset_cached_positions(struct zone *zone)
218{
219 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
220 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
623446e4 221 zone->compact_cached_free_pfn =
06b6640a 222 pageblock_start_pfn(zone_end_pfn(zone) - 1);
02333641
VB
223}
224
bb13ffeb
MG
225/*
226 * This function is called to clear all cached information on pageblocks that
227 * should be skipped for page isolation when the migrate and free page scanner
228 * meet.
229 */
62997027 230static void __reset_isolation_suitable(struct zone *zone)
bb13ffeb
MG
231{
232 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 233 unsigned long end_pfn = zone_end_pfn(zone);
bb13ffeb
MG
234 unsigned long pfn;
235
62997027 236 zone->compact_blockskip_flush = false;
bb13ffeb
MG
237
238 /* Walk the zone and mark every pageblock as suitable for isolation */
239 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
240 struct page *page;
241
242 cond_resched();
243
244 if (!pfn_valid(pfn))
245 continue;
246
247 page = pfn_to_page(pfn);
248 if (zone != page_zone(page))
249 continue;
250
251 clear_pageblock_skip(page);
252 }
02333641
VB
253
254 reset_cached_positions(zone);
bb13ffeb
MG
255}
256
62997027
MG
257void reset_isolation_suitable(pg_data_t *pgdat)
258{
259 int zoneid;
260
261 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
262 struct zone *zone = &pgdat->node_zones[zoneid];
263 if (!populated_zone(zone))
264 continue;
265
266 /* Only flush if a full compaction finished recently */
267 if (zone->compact_blockskip_flush)
268 __reset_isolation_suitable(zone);
269 }
270}
271
bb13ffeb
MG
272/*
273 * If no pages were isolated then mark this pageblock to be skipped in the
62997027 274 * future. The information is later cleared by __reset_isolation_suitable().
bb13ffeb 275 */
c89511ab
MG
276static void update_pageblock_skip(struct compact_control *cc,
277 struct page *page, unsigned long nr_isolated,
edc2ca61 278 bool migrate_scanner)
bb13ffeb 279{
c89511ab 280 struct zone *zone = cc->zone;
35979ef3 281 unsigned long pfn;
6815bf3f
JK
282
283 if (cc->ignore_skip_hint)
284 return;
285
bb13ffeb
MG
286 if (!page)
287 return;
288
35979ef3
DR
289 if (nr_isolated)
290 return;
291
edc2ca61 292 set_pageblock_skip(page);
c89511ab 293
35979ef3
DR
294 pfn = page_to_pfn(page);
295
296 /* Update where async and sync compaction should restart */
297 if (migrate_scanner) {
35979ef3
DR
298 if (pfn > zone->compact_cached_migrate_pfn[0])
299 zone->compact_cached_migrate_pfn[0] = pfn;
e0b9daeb
DR
300 if (cc->mode != MIGRATE_ASYNC &&
301 pfn > zone->compact_cached_migrate_pfn[1])
35979ef3
DR
302 zone->compact_cached_migrate_pfn[1] = pfn;
303 } else {
35979ef3
DR
304 if (pfn < zone->compact_cached_free_pfn)
305 zone->compact_cached_free_pfn = pfn;
c89511ab 306 }
bb13ffeb
MG
307}
308#else
309static inline bool isolation_suitable(struct compact_control *cc,
310 struct page *page)
311{
312 return true;
313}
314
c89511ab
MG
315static void update_pageblock_skip(struct compact_control *cc,
316 struct page *page, unsigned long nr_isolated,
edc2ca61 317 bool migrate_scanner)
bb13ffeb
MG
318{
319}
320#endif /* CONFIG_COMPACTION */
321
8b44d279
VB
322/*
323 * Compaction requires the taking of some coarse locks that are potentially
324 * very heavily contended. For async compaction, back out if the lock cannot
325 * be taken immediately. For sync compaction, spin on the lock if needed.
326 *
327 * Returns true if the lock is held
328 * Returns false if the lock is not held and compaction should abort
329 */
330static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
331 struct compact_control *cc)
2a1402aa 332{
8b44d279
VB
333 if (cc->mode == MIGRATE_ASYNC) {
334 if (!spin_trylock_irqsave(lock, *flags)) {
c3486f53 335 cc->contended = true;
8b44d279
VB
336 return false;
337 }
338 } else {
339 spin_lock_irqsave(lock, *flags);
340 }
1f9efdef 341
8b44d279 342 return true;
2a1402aa
MG
343}
344
c67fe375
MG
345/*
346 * Compaction requires the taking of some coarse locks that are potentially
8b44d279
VB
347 * very heavily contended. The lock should be periodically unlocked to avoid
348 * having disabled IRQs for a long time, even when there is nobody waiting on
349 * the lock. It might also be that allowing the IRQs will result in
350 * need_resched() becoming true. If scheduling is needed, async compaction
351 * aborts. Sync compaction schedules.
352 * Either compaction type will also abort if a fatal signal is pending.
353 * In either case if the lock was locked, it is dropped and not regained.
c67fe375 354 *
8b44d279
VB
355 * Returns true if compaction should abort due to fatal signal pending, or
356 * async compaction due to need_resched()
357 * Returns false when compaction can continue (sync compaction might have
358 * scheduled)
c67fe375 359 */
8b44d279
VB
360static bool compact_unlock_should_abort(spinlock_t *lock,
361 unsigned long flags, bool *locked, struct compact_control *cc)
c67fe375 362{
8b44d279
VB
363 if (*locked) {
364 spin_unlock_irqrestore(lock, flags);
365 *locked = false;
366 }
1f9efdef 367
8b44d279 368 if (fatal_signal_pending(current)) {
c3486f53 369 cc->contended = true;
8b44d279
VB
370 return true;
371 }
c67fe375 372
8b44d279 373 if (need_resched()) {
e0b9daeb 374 if (cc->mode == MIGRATE_ASYNC) {
c3486f53 375 cc->contended = true;
8b44d279 376 return true;
c67fe375 377 }
c67fe375 378 cond_resched();
c67fe375
MG
379 }
380
8b44d279 381 return false;
c67fe375
MG
382}
383
be976572
VB
384/*
385 * Aside from avoiding lock contention, compaction also periodically checks
386 * need_resched() and either schedules in sync compaction or aborts async
8b44d279 387 * compaction. This is similar to what compact_unlock_should_abort() does, but
be976572
VB
388 * is used where no lock is concerned.
389 *
390 * Returns false when no scheduling was needed, or sync compaction scheduled.
391 * Returns true when async compaction should abort.
392 */
393static inline bool compact_should_abort(struct compact_control *cc)
394{
395 /* async compaction aborts if contended */
396 if (need_resched()) {
397 if (cc->mode == MIGRATE_ASYNC) {
c3486f53 398 cc->contended = true;
be976572
VB
399 return true;
400 }
401
402 cond_resched();
403 }
404
405 return false;
406}
407
85aa125f 408/*
9e4be470
JM
409 * Isolate free pages onto a private freelist. If @strict is true, will abort
410 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
411 * (even though it may still end up isolating some pages).
85aa125f 412 */
f40d1e42 413static unsigned long isolate_freepages_block(struct compact_control *cc,
e14c720e 414 unsigned long *start_pfn,
85aa125f
MN
415 unsigned long end_pfn,
416 struct list_head *freelist,
417 bool strict)
748446bb 418{
b7aba698 419 int nr_scanned = 0, total_isolated = 0;
bb13ffeb 420 struct page *cursor, *valid_page = NULL;
b8b2d825 421 unsigned long flags = 0;
f40d1e42 422 bool locked = false;
e14c720e 423 unsigned long blockpfn = *start_pfn;
66c64223 424 unsigned int order;
748446bb 425
748446bb
MG
426 cursor = pfn_to_page(blockpfn);
427
f40d1e42 428 /* Isolate free pages. */
748446bb 429 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
66c64223 430 int isolated;
748446bb
MG
431 struct page *page = cursor;
432
8b44d279
VB
433 /*
434 * Periodically drop the lock (if held) regardless of its
435 * contention, to give chance to IRQs. Abort if fatal signal
436 * pending or async compaction detects need_resched()
437 */
438 if (!(blockpfn % SWAP_CLUSTER_MAX)
439 && compact_unlock_should_abort(&cc->zone->lock, flags,
440 &locked, cc))
441 break;
442
b7aba698 443 nr_scanned++;
f40d1e42 444 if (!pfn_valid_within(blockpfn))
2af120bc
LA
445 goto isolate_fail;
446
bb13ffeb
MG
447 if (!valid_page)
448 valid_page = page;
9fcd6d2e
VB
449
450 /*
451 * For compound pages such as THP and hugetlbfs, we can save
452 * potentially a lot of iterations if we skip them at once.
453 * The check is racy, but we can consider only valid values
454 * and the only danger is skipping too much.
455 */
456 if (PageCompound(page)) {
457 unsigned int comp_order = compound_order(page);
458
459 if (likely(comp_order < MAX_ORDER)) {
460 blockpfn += (1UL << comp_order) - 1;
461 cursor += (1UL << comp_order) - 1;
462 }
463
464 goto isolate_fail;
465 }
466
f40d1e42 467 if (!PageBuddy(page))
2af120bc 468 goto isolate_fail;
f40d1e42
MG
469
470 /*
69b7189f
VB
471 * If we already hold the lock, we can skip some rechecking.
472 * Note that if we hold the lock now, checked_pageblock was
473 * already set in some previous iteration (or strict is true),
474 * so it is correct to skip the suitable migration target
475 * recheck as well.
f40d1e42 476 */
69b7189f
VB
477 if (!locked) {
478 /*
479 * The zone lock must be held to isolate freepages.
480 * Unfortunately this is a very coarse lock and can be
481 * heavily contended if there are parallel allocations
482 * or parallel compactions. For async compaction do not
483 * spin on the lock and we acquire the lock as late as
484 * possible.
485 */
8b44d279
VB
486 locked = compact_trylock_irqsave(&cc->zone->lock,
487 &flags, cc);
69b7189f
VB
488 if (!locked)
489 break;
f40d1e42 490
69b7189f
VB
491 /* Recheck this is a buddy page under lock */
492 if (!PageBuddy(page))
493 goto isolate_fail;
494 }
748446bb 495
66c64223
JK
496 /* Found a free page, will break it into order-0 pages */
497 order = page_order(page);
498 isolated = __isolate_free_page(page, order);
a4f04f2c
DR
499 if (!isolated)
500 break;
66c64223 501 set_page_private(page, order);
a4f04f2c 502
748446bb 503 total_isolated += isolated;
a4f04f2c 504 cc->nr_freepages += isolated;
66c64223
JK
505 list_add_tail(&page->lru, freelist);
506
a4f04f2c
DR
507 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
508 blockpfn += isolated;
509 break;
748446bb 510 }
a4f04f2c
DR
511 /* Advance to the end of split page */
512 blockpfn += isolated - 1;
513 cursor += isolated - 1;
514 continue;
2af120bc
LA
515
516isolate_fail:
517 if (strict)
518 break;
519 else
520 continue;
521
748446bb
MG
522 }
523
a4f04f2c
DR
524 if (locked)
525 spin_unlock_irqrestore(&cc->zone->lock, flags);
526
9fcd6d2e
VB
527 /*
528 * There is a tiny chance that we have read bogus compound_order(),
529 * so be careful to not go outside of the pageblock.
530 */
531 if (unlikely(blockpfn > end_pfn))
532 blockpfn = end_pfn;
533
e34d85f0
JK
534 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
535 nr_scanned, total_isolated);
536
e14c720e
VB
537 /* Record how far we have got within the block */
538 *start_pfn = blockpfn;
539
f40d1e42
MG
540 /*
541 * If strict isolation is requested by CMA then check that all the
542 * pages requested were isolated. If there were any failures, 0 is
543 * returned and CMA will fail.
544 */
2af120bc 545 if (strict && blockpfn < end_pfn)
f40d1e42
MG
546 total_isolated = 0;
547
bb13ffeb
MG
548 /* Update the pageblock-skip if the whole pageblock was scanned */
549 if (blockpfn == end_pfn)
edc2ca61 550 update_pageblock_skip(cc, valid_page, total_isolated, false);
bb13ffeb 551
7f354a54 552 cc->total_free_scanned += nr_scanned;
397487db 553 if (total_isolated)
010fc29a 554 count_compact_events(COMPACTISOLATED, total_isolated);
748446bb
MG
555 return total_isolated;
556}
557
85aa125f
MN
558/**
559 * isolate_freepages_range() - isolate free pages.
560 * @start_pfn: The first PFN to start isolating.
561 * @end_pfn: The one-past-last PFN.
562 *
563 * Non-free pages, invalid PFNs, or zone boundaries within the
564 * [start_pfn, end_pfn) range are considered errors, cause function to
565 * undo its actions and return zero.
566 *
567 * Otherwise, function returns one-past-the-last PFN of isolated page
568 * (which may be greater then end_pfn if end fell in a middle of
569 * a free page).
570 */
ff9543fd 571unsigned long
bb13ffeb
MG
572isolate_freepages_range(struct compact_control *cc,
573 unsigned long start_pfn, unsigned long end_pfn)
85aa125f 574{
e1409c32 575 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
85aa125f
MN
576 LIST_HEAD(freelist);
577
7d49d886 578 pfn = start_pfn;
06b6640a 579 block_start_pfn = pageblock_start_pfn(pfn);
e1409c32
JK
580 if (block_start_pfn < cc->zone->zone_start_pfn)
581 block_start_pfn = cc->zone->zone_start_pfn;
06b6640a 582 block_end_pfn = pageblock_end_pfn(pfn);
7d49d886
VB
583
584 for (; pfn < end_pfn; pfn += isolated,
e1409c32 585 block_start_pfn = block_end_pfn,
7d49d886 586 block_end_pfn += pageblock_nr_pages) {
e14c720e
VB
587 /* Protect pfn from changing by isolate_freepages_block */
588 unsigned long isolate_start_pfn = pfn;
85aa125f 589
85aa125f
MN
590 block_end_pfn = min(block_end_pfn, end_pfn);
591
58420016
JK
592 /*
593 * pfn could pass the block_end_pfn if isolated freepage
594 * is more than pageblock order. In this case, we adjust
595 * scanning range to right one.
596 */
597 if (pfn >= block_end_pfn) {
06b6640a
VB
598 block_start_pfn = pageblock_start_pfn(pfn);
599 block_end_pfn = pageblock_end_pfn(pfn);
58420016
JK
600 block_end_pfn = min(block_end_pfn, end_pfn);
601 }
602
e1409c32
JK
603 if (!pageblock_pfn_to_page(block_start_pfn,
604 block_end_pfn, cc->zone))
7d49d886
VB
605 break;
606
e14c720e
VB
607 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
608 block_end_pfn, &freelist, true);
85aa125f
MN
609
610 /*
611 * In strict mode, isolate_freepages_block() returns 0 if
612 * there are any holes in the block (ie. invalid PFNs or
613 * non-free pages).
614 */
615 if (!isolated)
616 break;
617
618 /*
619 * If we managed to isolate pages, it is always (1 << n) *
620 * pageblock_nr_pages for some non-negative n. (Max order
621 * page may span two pageblocks).
622 */
623 }
624
66c64223 625 /* __isolate_free_page() does not map the pages */
85aa125f
MN
626 map_pages(&freelist);
627
628 if (pfn < end_pfn) {
629 /* Loop terminated early, cleanup. */
630 release_freepages(&freelist);
631 return 0;
632 }
633
634 /* We don't use freelists for anything. */
635 return pfn;
636}
637
748446bb
MG
638/* Similar to reclaim, but different enough that they don't share logic */
639static bool too_many_isolated(struct zone *zone)
640{
bc693045 641 unsigned long active, inactive, isolated;
748446bb 642
599d0c95
MG
643 inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
644 node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
645 active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
646 node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
647 isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
648 node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
748446bb 649
bc693045 650 return isolated > (inactive + active) / 2;
748446bb
MG
651}
652
2fe86e00 653/**
edc2ca61
VB
654 * isolate_migratepages_block() - isolate all migrate-able pages within
655 * a single pageblock
2fe86e00 656 * @cc: Compaction control structure.
edc2ca61
VB
657 * @low_pfn: The first PFN to isolate
658 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
659 * @isolate_mode: Isolation mode to be used.
2fe86e00
MN
660 *
661 * Isolate all pages that can be migrated from the range specified by
edc2ca61
VB
662 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
663 * Returns zero if there is a fatal signal pending, otherwise PFN of the
664 * first page that was not scanned (which may be both less, equal to or more
665 * than end_pfn).
2fe86e00 666 *
edc2ca61
VB
667 * The pages are isolated on cc->migratepages list (not required to be empty),
668 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
669 * is neither read nor updated.
748446bb 670 */
edc2ca61
VB
671static unsigned long
672isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
673 unsigned long end_pfn, isolate_mode_t isolate_mode)
748446bb 674{
edc2ca61 675 struct zone *zone = cc->zone;
b7aba698 676 unsigned long nr_scanned = 0, nr_isolated = 0;
fa9add64 677 struct lruvec *lruvec;
b8b2d825 678 unsigned long flags = 0;
2a1402aa 679 bool locked = false;
bb13ffeb 680 struct page *page = NULL, *valid_page = NULL;
e34d85f0 681 unsigned long start_pfn = low_pfn;
fdd048e1
VB
682 bool skip_on_failure = false;
683 unsigned long next_skip_pfn = 0;
748446bb 684
748446bb
MG
685 /*
686 * Ensure that there are not too many pages isolated from the LRU
687 * list by either parallel reclaimers or compaction. If there are,
688 * delay for some time until fewer pages are isolated
689 */
690 while (unlikely(too_many_isolated(zone))) {
f9e35b3b 691 /* async migration should just abort */
e0b9daeb 692 if (cc->mode == MIGRATE_ASYNC)
2fe86e00 693 return 0;
f9e35b3b 694
748446bb
MG
695 congestion_wait(BLK_RW_ASYNC, HZ/10);
696
697 if (fatal_signal_pending(current))
2fe86e00 698 return 0;
748446bb
MG
699 }
700
be976572
VB
701 if (compact_should_abort(cc))
702 return 0;
aeef4b83 703
fdd048e1
VB
704 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
705 skip_on_failure = true;
706 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
707 }
708
748446bb 709 /* Time to isolate some pages for migration */
748446bb 710 for (; low_pfn < end_pfn; low_pfn++) {
29c0dde8 711
fdd048e1
VB
712 if (skip_on_failure && low_pfn >= next_skip_pfn) {
713 /*
714 * We have isolated all migration candidates in the
715 * previous order-aligned block, and did not skip it due
716 * to failure. We should migrate the pages now and
717 * hopefully succeed compaction.
718 */
719 if (nr_isolated)
720 break;
721
722 /*
723 * We failed to isolate in the previous order-aligned
724 * block. Set the new boundary to the end of the
725 * current block. Note we can't simply increase
726 * next_skip_pfn by 1 << order, as low_pfn might have
727 * been incremented by a higher number due to skipping
728 * a compound or a high-order buddy page in the
729 * previous loop iteration.
730 */
731 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
732 }
733
8b44d279
VB
734 /*
735 * Periodically drop the lock (if held) regardless of its
736 * contention, to give chance to IRQs. Abort async compaction
737 * if contended.
738 */
739 if (!(low_pfn % SWAP_CLUSTER_MAX)
a52633d8 740 && compact_unlock_should_abort(zone_lru_lock(zone), flags,
8b44d279
VB
741 &locked, cc))
742 break;
c67fe375 743
748446bb 744 if (!pfn_valid_within(low_pfn))
fdd048e1 745 goto isolate_fail;
b7aba698 746 nr_scanned++;
748446bb 747
748446bb 748 page = pfn_to_page(low_pfn);
dc908600 749
bb13ffeb
MG
750 if (!valid_page)
751 valid_page = page;
752
6c14466c 753 /*
99c0fd5e
VB
754 * Skip if free. We read page order here without zone lock
755 * which is generally unsafe, but the race window is small and
756 * the worst thing that can happen is that we skip some
757 * potential isolation targets.
6c14466c 758 */
99c0fd5e
VB
759 if (PageBuddy(page)) {
760 unsigned long freepage_order = page_order_unsafe(page);
761
762 /*
763 * Without lock, we cannot be sure that what we got is
764 * a valid page order. Consider only values in the
765 * valid order range to prevent low_pfn overflow.
766 */
767 if (freepage_order > 0 && freepage_order < MAX_ORDER)
768 low_pfn += (1UL << freepage_order) - 1;
748446bb 769 continue;
99c0fd5e 770 }
748446bb 771
bc835011 772 /*
29c0dde8
VB
773 * Regardless of being on LRU, compound pages such as THP and
774 * hugetlbfs are not to be compacted. We can potentially save
775 * a lot of iterations if we skip them at once. The check is
776 * racy, but we can consider only valid values and the only
777 * danger is skipping too much.
bc835011 778 */
29c0dde8
VB
779 if (PageCompound(page)) {
780 unsigned int comp_order = compound_order(page);
781
782 if (likely(comp_order < MAX_ORDER))
783 low_pfn += (1UL << comp_order) - 1;
edc2ca61 784
fdd048e1 785 goto isolate_fail;
2a1402aa
MG
786 }
787
bda807d4
MK
788 /*
789 * Check may be lockless but that's ok as we recheck later.
790 * It's possible to migrate LRU and non-lru movable pages.
791 * Skip any other type of page
792 */
793 if (!PageLRU(page)) {
bda807d4
MK
794 /*
795 * __PageMovable can return false positive so we need
796 * to verify it under page_lock.
797 */
798 if (unlikely(__PageMovable(page)) &&
799 !PageIsolated(page)) {
800 if (locked) {
a52633d8 801 spin_unlock_irqrestore(zone_lru_lock(zone),
bda807d4
MK
802 flags);
803 locked = false;
804 }
805
9e5bcd61 806 if (!isolate_movable_page(page, isolate_mode))
bda807d4
MK
807 goto isolate_success;
808 }
809
fdd048e1 810 goto isolate_fail;
bda807d4 811 }
29c0dde8 812
119d6d59
DR
813 /*
814 * Migration will fail if an anonymous page is pinned in memory,
815 * so avoid taking lru_lock and isolating it unnecessarily in an
816 * admittedly racy check.
817 */
818 if (!page_mapping(page) &&
819 page_count(page) > page_mapcount(page))
fdd048e1 820 goto isolate_fail;
119d6d59 821
73e64c51
MH
822 /*
823 * Only allow to migrate anonymous pages in GFP_NOFS context
824 * because those do not depend on fs locks.
825 */
826 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
827 goto isolate_fail;
828
69b7189f
VB
829 /* If we already hold the lock, we can skip some rechecking */
830 if (!locked) {
a52633d8 831 locked = compact_trylock_irqsave(zone_lru_lock(zone),
8b44d279 832 &flags, cc);
69b7189f
VB
833 if (!locked)
834 break;
2a1402aa 835
29c0dde8 836 /* Recheck PageLRU and PageCompound under lock */
69b7189f 837 if (!PageLRU(page))
fdd048e1 838 goto isolate_fail;
29c0dde8
VB
839
840 /*
841 * Page become compound since the non-locked check,
842 * and it's on LRU. It can only be a THP so the order
843 * is safe to read and it's 0 for tail pages.
844 */
845 if (unlikely(PageCompound(page))) {
846 low_pfn += (1UL << compound_order(page)) - 1;
fdd048e1 847 goto isolate_fail;
69b7189f 848 }
bc835011
AA
849 }
850
599d0c95 851 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
fa9add64 852
748446bb 853 /* Try isolate the page */
edc2ca61 854 if (__isolate_lru_page(page, isolate_mode) != 0)
fdd048e1 855 goto isolate_fail;
748446bb 856
29c0dde8 857 VM_BUG_ON_PAGE(PageCompound(page), page);
bc835011 858
748446bb 859 /* Successfully isolated */
fa9add64 860 del_page_from_lru_list(page, lruvec, page_lru(page));
6afcf8ef
ML
861 inc_node_page_state(page,
862 NR_ISOLATED_ANON + page_is_file_cache(page));
b6c75016
JK
863
864isolate_success:
fdd048e1 865 list_add(&page->lru, &cc->migratepages);
748446bb 866 cc->nr_migratepages++;
b7aba698 867 nr_isolated++;
748446bb 868
a34753d2
VB
869 /*
870 * Record where we could have freed pages by migration and not
871 * yet flushed them to buddy allocator.
872 * - this is the lowest page that was isolated and likely be
873 * then freed by migration.
874 */
875 if (!cc->last_migrated_pfn)
876 cc->last_migrated_pfn = low_pfn;
877
748446bb 878 /* Avoid isolating too much */
31b8384a
HD
879 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
880 ++low_pfn;
748446bb 881 break;
31b8384a 882 }
fdd048e1
VB
883
884 continue;
885isolate_fail:
886 if (!skip_on_failure)
887 continue;
888
889 /*
890 * We have isolated some pages, but then failed. Release them
891 * instead of migrating, as we cannot form the cc->order buddy
892 * page anyway.
893 */
894 if (nr_isolated) {
895 if (locked) {
a52633d8 896 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
fdd048e1
VB
897 locked = false;
898 }
fdd048e1
VB
899 putback_movable_pages(&cc->migratepages);
900 cc->nr_migratepages = 0;
901 cc->last_migrated_pfn = 0;
902 nr_isolated = 0;
903 }
904
905 if (low_pfn < next_skip_pfn) {
906 low_pfn = next_skip_pfn - 1;
907 /*
908 * The check near the loop beginning would have updated
909 * next_skip_pfn too, but this is a bit simpler.
910 */
911 next_skip_pfn += 1UL << cc->order;
912 }
748446bb
MG
913 }
914
99c0fd5e
VB
915 /*
916 * The PageBuddy() check could have potentially brought us outside
917 * the range to be scanned.
918 */
919 if (unlikely(low_pfn > end_pfn))
920 low_pfn = end_pfn;
921
c67fe375 922 if (locked)
a52633d8 923 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
748446bb 924
50b5b094
VB
925 /*
926 * Update the pageblock-skip information and cached scanner pfn,
927 * if the whole pageblock was scanned without isolating any page.
50b5b094 928 */
35979ef3 929 if (low_pfn == end_pfn)
edc2ca61 930 update_pageblock_skip(cc, valid_page, nr_isolated, true);
bb13ffeb 931
e34d85f0
JK
932 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
933 nr_scanned, nr_isolated);
b7aba698 934
7f354a54 935 cc->total_migrate_scanned += nr_scanned;
397487db 936 if (nr_isolated)
010fc29a 937 count_compact_events(COMPACTISOLATED, nr_isolated);
397487db 938
2fe86e00
MN
939 return low_pfn;
940}
941
edc2ca61
VB
942/**
943 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
944 * @cc: Compaction control structure.
945 * @start_pfn: The first PFN to start isolating.
946 * @end_pfn: The one-past-last PFN.
947 *
948 * Returns zero if isolation fails fatally due to e.g. pending signal.
949 * Otherwise, function returns one-past-the-last PFN of isolated page
950 * (which may be greater than end_pfn if end fell in a middle of a THP page).
951 */
952unsigned long
953isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
954 unsigned long end_pfn)
955{
e1409c32 956 unsigned long pfn, block_start_pfn, block_end_pfn;
edc2ca61
VB
957
958 /* Scan block by block. First and last block may be incomplete */
959 pfn = start_pfn;
06b6640a 960 block_start_pfn = pageblock_start_pfn(pfn);
e1409c32
JK
961 if (block_start_pfn < cc->zone->zone_start_pfn)
962 block_start_pfn = cc->zone->zone_start_pfn;
06b6640a 963 block_end_pfn = pageblock_end_pfn(pfn);
edc2ca61
VB
964
965 for (; pfn < end_pfn; pfn = block_end_pfn,
e1409c32 966 block_start_pfn = block_end_pfn,
edc2ca61
VB
967 block_end_pfn += pageblock_nr_pages) {
968
969 block_end_pfn = min(block_end_pfn, end_pfn);
970
e1409c32
JK
971 if (!pageblock_pfn_to_page(block_start_pfn,
972 block_end_pfn, cc->zone))
edc2ca61
VB
973 continue;
974
975 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
976 ISOLATE_UNEVICTABLE);
977
14af4a5e 978 if (!pfn)
edc2ca61 979 break;
6ea41c0c
JK
980
981 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
982 break;
edc2ca61 983 }
edc2ca61
VB
984
985 return pfn;
986}
987
ff9543fd
MN
988#endif /* CONFIG_COMPACTION || CONFIG_CMA */
989#ifdef CONFIG_COMPACTION
018e9a49
AM
990
991/* Returns true if the page is within a block suitable for migration to */
9f7e3387
VB
992static bool suitable_migration_target(struct compact_control *cc,
993 struct page *page)
018e9a49
AM
994{
995 /* If the page is a large free page, then disallow migration */
996 if (PageBuddy(page)) {
997 /*
998 * We are checking page_order without zone->lock taken. But
999 * the only small danger is that we skip a potentially suitable
1000 * pageblock, so it's not worth to check order for valid range.
1001 */
1002 if (page_order_unsafe(page) >= pageblock_order)
1003 return false;
1004 }
1005
1ef36db2
YX
1006 if (cc->ignore_block_suitable)
1007 return true;
1008
018e9a49
AM
1009 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1010 if (migrate_async_suitable(get_pageblock_migratetype(page)))
1011 return true;
1012
1013 /* Otherwise skip the block */
1014 return false;
1015}
1016
f2849aa0
VB
1017/*
1018 * Test whether the free scanner has reached the same or lower pageblock than
1019 * the migration scanner, and compaction should thus terminate.
1020 */
1021static inline bool compact_scanners_met(struct compact_control *cc)
1022{
1023 return (cc->free_pfn >> pageblock_order)
1024 <= (cc->migrate_pfn >> pageblock_order);
1025}
1026
2fe86e00 1027/*
ff9543fd
MN
1028 * Based on information in the current compact_control, find blocks
1029 * suitable for isolating free pages from and then isolate them.
2fe86e00 1030 */
edc2ca61 1031static void isolate_freepages(struct compact_control *cc)
2fe86e00 1032{
edc2ca61 1033 struct zone *zone = cc->zone;
ff9543fd 1034 struct page *page;
c96b9e50 1035 unsigned long block_start_pfn; /* start of current pageblock */
e14c720e 1036 unsigned long isolate_start_pfn; /* exact pfn we start at */
c96b9e50
VB
1037 unsigned long block_end_pfn; /* end of current pageblock */
1038 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
ff9543fd 1039 struct list_head *freelist = &cc->freepages;
2fe86e00 1040
ff9543fd
MN
1041 /*
1042 * Initialise the free scanner. The starting point is where we last
49e068f0 1043 * successfully isolated from, zone-cached value, or the end of the
e14c720e
VB
1044 * zone when isolating for the first time. For looping we also need
1045 * this pfn aligned down to the pageblock boundary, because we do
c96b9e50
VB
1046 * block_start_pfn -= pageblock_nr_pages in the for loop.
1047 * For ending point, take care when isolating in last pageblock of a
1048 * a zone which ends in the middle of a pageblock.
49e068f0
VB
1049 * The low boundary is the end of the pageblock the migration scanner
1050 * is using.
ff9543fd 1051 */
e14c720e 1052 isolate_start_pfn = cc->free_pfn;
06b6640a 1053 block_start_pfn = pageblock_start_pfn(cc->free_pfn);
c96b9e50
VB
1054 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1055 zone_end_pfn(zone));
06b6640a 1056 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
2fe86e00 1057
ff9543fd
MN
1058 /*
1059 * Isolate free pages until enough are available to migrate the
1060 * pages on cc->migratepages. We stop searching if the migrate
1061 * and free page scanners meet or enough free pages are isolated.
1062 */
f5f61a32 1063 for (; block_start_pfn >= low_pfn;
c96b9e50 1064 block_end_pfn = block_start_pfn,
e14c720e
VB
1065 block_start_pfn -= pageblock_nr_pages,
1066 isolate_start_pfn = block_start_pfn) {
f6ea3adb
DR
1067 /*
1068 * This can iterate a massively long zone without finding any
1069 * suitable migration targets, so periodically check if we need
be976572 1070 * to schedule, or even abort async compaction.
f6ea3adb 1071 */
be976572
VB
1072 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1073 && compact_should_abort(cc))
1074 break;
f6ea3adb 1075
7d49d886
VB
1076 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1077 zone);
1078 if (!page)
ff9543fd
MN
1079 continue;
1080
1081 /* Check the block is suitable for migration */
9f7e3387 1082 if (!suitable_migration_target(cc, page))
ff9543fd 1083 continue;
68e3e926 1084
bb13ffeb
MG
1085 /* If isolation recently failed, do not retry */
1086 if (!isolation_suitable(cc, page))
1087 continue;
1088
e14c720e 1089 /* Found a block suitable for isolating free pages from. */
a46cbf3b
DR
1090 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1091 freelist, false);
ff9543fd 1092
e14c720e 1093 /*
a46cbf3b
DR
1094 * If we isolated enough freepages, or aborted due to lock
1095 * contention, terminate.
e14c720e 1096 */
f5f61a32
VB
1097 if ((cc->nr_freepages >= cc->nr_migratepages)
1098 || cc->contended) {
a46cbf3b
DR
1099 if (isolate_start_pfn >= block_end_pfn) {
1100 /*
1101 * Restart at previous pageblock if more
1102 * freepages can be isolated next time.
1103 */
f5f61a32
VB
1104 isolate_start_pfn =
1105 block_start_pfn - pageblock_nr_pages;
a46cbf3b 1106 }
be976572 1107 break;
a46cbf3b 1108 } else if (isolate_start_pfn < block_end_pfn) {
f5f61a32 1109 /*
a46cbf3b
DR
1110 * If isolation failed early, do not continue
1111 * needlessly.
f5f61a32 1112 */
a46cbf3b 1113 break;
f5f61a32 1114 }
ff9543fd
MN
1115 }
1116
66c64223 1117 /* __isolate_free_page() does not map the pages */
ff9543fd
MN
1118 map_pages(freelist);
1119
7ed695e0 1120 /*
f5f61a32
VB
1121 * Record where the free scanner will restart next time. Either we
1122 * broke from the loop and set isolate_start_pfn based on the last
1123 * call to isolate_freepages_block(), or we met the migration scanner
1124 * and the loop terminated due to isolate_start_pfn < low_pfn
7ed695e0 1125 */
f5f61a32 1126 cc->free_pfn = isolate_start_pfn;
748446bb
MG
1127}
1128
1129/*
1130 * This is a migrate-callback that "allocates" freepages by taking pages
1131 * from the isolated freelists in the block we are migrating to.
1132 */
1133static struct page *compaction_alloc(struct page *migratepage,
1134 unsigned long data,
1135 int **result)
1136{
1137 struct compact_control *cc = (struct compact_control *)data;
1138 struct page *freepage;
1139
be976572
VB
1140 /*
1141 * Isolate free pages if necessary, and if we are not aborting due to
1142 * contention.
1143 */
748446bb 1144 if (list_empty(&cc->freepages)) {
be976572 1145 if (!cc->contended)
edc2ca61 1146 isolate_freepages(cc);
748446bb
MG
1147
1148 if (list_empty(&cc->freepages))
1149 return NULL;
1150 }
1151
1152 freepage = list_entry(cc->freepages.next, struct page, lru);
1153 list_del(&freepage->lru);
1154 cc->nr_freepages--;
1155
1156 return freepage;
1157}
1158
1159/*
d53aea3d
DR
1160 * This is a migrate-callback that "frees" freepages back to the isolated
1161 * freelist. All pages on the freelist are from the same zone, so there is no
1162 * special handling needed for NUMA.
1163 */
1164static void compaction_free(struct page *page, unsigned long data)
1165{
1166 struct compact_control *cc = (struct compact_control *)data;
1167
1168 list_add(&page->lru, &cc->freepages);
1169 cc->nr_freepages++;
1170}
1171
ff9543fd
MN
1172/* possible outcome of isolate_migratepages */
1173typedef enum {
1174 ISOLATE_ABORT, /* Abort compaction now */
1175 ISOLATE_NONE, /* No pages isolated, continue scanning */
1176 ISOLATE_SUCCESS, /* Pages isolated, migrate */
1177} isolate_migrate_t;
1178
5bbe3547
EM
1179/*
1180 * Allow userspace to control policy on scanning the unevictable LRU for
1181 * compactable pages.
1182 */
1183int sysctl_compact_unevictable_allowed __read_mostly = 1;
1184
ff9543fd 1185/*
edc2ca61
VB
1186 * Isolate all pages that can be migrated from the first suitable block,
1187 * starting at the block pointed to by the migrate scanner pfn within
1188 * compact_control.
ff9543fd
MN
1189 */
1190static isolate_migrate_t isolate_migratepages(struct zone *zone,
1191 struct compact_control *cc)
1192{
e1409c32
JK
1193 unsigned long block_start_pfn;
1194 unsigned long block_end_pfn;
1195 unsigned long low_pfn;
edc2ca61
VB
1196 struct page *page;
1197 const isolate_mode_t isolate_mode =
5bbe3547 1198 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1d2047fe 1199 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
ff9543fd 1200
edc2ca61
VB
1201 /*
1202 * Start at where we last stopped, or beginning of the zone as
1203 * initialized by compact_zone()
1204 */
1205 low_pfn = cc->migrate_pfn;
06b6640a 1206 block_start_pfn = pageblock_start_pfn(low_pfn);
e1409c32
JK
1207 if (block_start_pfn < zone->zone_start_pfn)
1208 block_start_pfn = zone->zone_start_pfn;
ff9543fd
MN
1209
1210 /* Only scan within a pageblock boundary */
06b6640a 1211 block_end_pfn = pageblock_end_pfn(low_pfn);
ff9543fd 1212
edc2ca61
VB
1213 /*
1214 * Iterate over whole pageblocks until we find the first suitable.
1215 * Do not cross the free scanner.
1216 */
e1409c32
JK
1217 for (; block_end_pfn <= cc->free_pfn;
1218 low_pfn = block_end_pfn,
1219 block_start_pfn = block_end_pfn,
1220 block_end_pfn += pageblock_nr_pages) {
ff9543fd 1221
edc2ca61
VB
1222 /*
1223 * This can potentially iterate a massively long zone with
1224 * many pageblocks unsuitable, so periodically check if we
1225 * need to schedule, or even abort async compaction.
1226 */
1227 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1228 && compact_should_abort(cc))
1229 break;
ff9543fd 1230
e1409c32
JK
1231 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1232 zone);
7d49d886 1233 if (!page)
edc2ca61
VB
1234 continue;
1235
edc2ca61
VB
1236 /* If isolation recently failed, do not retry */
1237 if (!isolation_suitable(cc, page))
1238 continue;
1239
1240 /*
1241 * For async compaction, also only scan in MOVABLE blocks.
1242 * Async compaction is optimistic to see if the minimum amount
1243 * of work satisfies the allocation.
1244 */
1245 if (cc->mode == MIGRATE_ASYNC &&
1246 !migrate_async_suitable(get_pageblock_migratetype(page)))
1247 continue;
1248
1249 /* Perform the isolation */
e1409c32
JK
1250 low_pfn = isolate_migratepages_block(cc, low_pfn,
1251 block_end_pfn, isolate_mode);
edc2ca61 1252
6afcf8ef 1253 if (!low_pfn || cc->contended)
edc2ca61
VB
1254 return ISOLATE_ABORT;
1255
1256 /*
1257 * Either we isolated something and proceed with migration. Or
1258 * we failed and compact_zone should decide if we should
1259 * continue or not.
1260 */
1261 break;
1262 }
1263
f2849aa0
VB
1264 /* Record where migration scanner will be restarted. */
1265 cc->migrate_pfn = low_pfn;
ff9543fd 1266
edc2ca61 1267 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
ff9543fd
MN
1268}
1269
21c527a3
YB
1270/*
1271 * order == -1 is expected when compacting via
1272 * /proc/sys/vm/compact_memory
1273 */
1274static inline bool is_via_compact_memory(int order)
1275{
1276 return order == -1;
1277}
1278
ea7ab982 1279static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
6d7ce559 1280 const int migratetype)
748446bb 1281{
8fb74b9f 1282 unsigned int order;
56de7263 1283
be976572 1284 if (cc->contended || fatal_signal_pending(current))
2d1e1041 1285 return COMPACT_CONTENDED;
748446bb 1286
753341a4 1287 /* Compaction run completes if the migrate and free scanner meet */
f2849aa0 1288 if (compact_scanners_met(cc)) {
55b7c4c9 1289 /* Let the next compaction start anew. */
02333641 1290 reset_cached_positions(zone);
55b7c4c9 1291
62997027
MG
1292 /*
1293 * Mark that the PG_migrate_skip information should be cleared
accf6242 1294 * by kswapd when it goes to sleep. kcompactd does not set the
62997027
MG
1295 * flag itself as the decision to be clear should be directly
1296 * based on an allocation request.
1297 */
accf6242 1298 if (cc->direct_compaction)
62997027
MG
1299 zone->compact_blockskip_flush = true;
1300
c8f7de0b
MH
1301 if (cc->whole_zone)
1302 return COMPACT_COMPLETE;
1303 else
1304 return COMPACT_PARTIAL_SKIPPED;
bb13ffeb 1305 }
748446bb 1306
21c527a3 1307 if (is_via_compact_memory(cc->order))
56de7263
MG
1308 return COMPACT_CONTINUE;
1309
1310 /* Direct compactor: Is a suitable page free? */
8fb74b9f
MG
1311 for (order = cc->order; order < MAX_ORDER; order++) {
1312 struct free_area *area = &zone->free_area[order];
2149cdae 1313 bool can_steal;
8fb74b9f
MG
1314
1315 /* Job done if page is free of the right migratetype */
6d7ce559 1316 if (!list_empty(&area->free_list[migratetype]))
cf378319 1317 return COMPACT_SUCCESS;
8fb74b9f 1318
2149cdae
JK
1319#ifdef CONFIG_CMA
1320 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1321 if (migratetype == MIGRATE_MOVABLE &&
1322 !list_empty(&area->free_list[MIGRATE_CMA]))
cf378319 1323 return COMPACT_SUCCESS;
2149cdae
JK
1324#endif
1325 /*
1326 * Job done if allocation would steal freepages from
1327 * other migratetype buddy lists.
1328 */
1329 if (find_suitable_fallback(area, order, migratetype,
1330 true, &can_steal) != -1)
cf378319 1331 return COMPACT_SUCCESS;
56de7263
MG
1332 }
1333
837d026d
JK
1334 return COMPACT_NO_SUITABLE_PAGE;
1335}
1336
ea7ab982
MH
1337static enum compact_result compact_finished(struct zone *zone,
1338 struct compact_control *cc,
1339 const int migratetype)
837d026d
JK
1340{
1341 int ret;
1342
1343 ret = __compact_finished(zone, cc, migratetype);
1344 trace_mm_compaction_finished(zone, cc->order, ret);
1345 if (ret == COMPACT_NO_SUITABLE_PAGE)
1346 ret = COMPACT_CONTINUE;
1347
1348 return ret;
748446bb
MG
1349}
1350
3e7d3449
MG
1351/*
1352 * compaction_suitable: Is this suitable to run compaction on this zone now?
1353 * Returns
1354 * COMPACT_SKIPPED - If there are too few free pages for compaction
cf378319 1355 * COMPACT_SUCCESS - If the allocation would succeed without compaction
3e7d3449
MG
1356 * COMPACT_CONTINUE - If compaction should run now
1357 */
ea7ab982 1358static enum compact_result __compaction_suitable(struct zone *zone, int order,
c603844b 1359 unsigned int alloc_flags,
86a294a8
MH
1360 int classzone_idx,
1361 unsigned long wmark_target)
3e7d3449 1362{
3e7d3449
MG
1363 unsigned long watermark;
1364
21c527a3 1365 if (is_via_compact_memory(order))
3957c776
MH
1366 return COMPACT_CONTINUE;
1367
f2b8228c 1368 watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
ebff3980
VB
1369 /*
1370 * If watermarks for high-order allocation are already met, there
1371 * should be no need for compaction at all.
1372 */
1373 if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1374 alloc_flags))
cf378319 1375 return COMPACT_SUCCESS;
ebff3980 1376
3e7d3449 1377 /*
9861a62c 1378 * Watermarks for order-0 must be met for compaction to be able to
984fdba6
VB
1379 * isolate free pages for migration targets. This means that the
1380 * watermark and alloc_flags have to match, or be more pessimistic than
1381 * the check in __isolate_free_page(). We don't use the direct
1382 * compactor's alloc_flags, as they are not relevant for freepage
1383 * isolation. We however do use the direct compactor's classzone_idx to
1384 * skip over zones where lowmem reserves would prevent allocation even
1385 * if compaction succeeds.
8348faf9
VB
1386 * For costly orders, we require low watermark instead of min for
1387 * compaction to proceed to increase its chances.
984fdba6
VB
1388 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
1389 * suitable migration targets
3e7d3449 1390 */
8348faf9
VB
1391 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
1392 low_wmark_pages(zone) : min_wmark_pages(zone);
1393 watermark += compact_gap(order);
86a294a8 1394 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
984fdba6 1395 ALLOC_CMA, wmark_target))
3e7d3449
MG
1396 return COMPACT_SKIPPED;
1397
cc5c9f09
VB
1398 return COMPACT_CONTINUE;
1399}
1400
1401enum compact_result compaction_suitable(struct zone *zone, int order,
1402 unsigned int alloc_flags,
1403 int classzone_idx)
1404{
1405 enum compact_result ret;
1406 int fragindex;
1407
1408 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1409 zone_page_state(zone, NR_FREE_PAGES));
3e7d3449
MG
1410 /*
1411 * fragmentation index determines if allocation failures are due to
1412 * low memory or external fragmentation
1413 *
ebff3980
VB
1414 * index of -1000 would imply allocations might succeed depending on
1415 * watermarks, but we already failed the high-order watermark check
3e7d3449
MG
1416 * index towards 0 implies failure is due to lack of memory
1417 * index towards 1000 implies failure is due to fragmentation
1418 *
20311420
VB
1419 * Only compact if a failure would be due to fragmentation. Also
1420 * ignore fragindex for non-costly orders where the alternative to
1421 * a successful reclaim/compaction is OOM. Fragindex and the
1422 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
1423 * excessive compaction for costly orders, but it should not be at the
1424 * expense of system stability.
3e7d3449 1425 */
20311420 1426 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
cc5c9f09
VB
1427 fragindex = fragmentation_index(zone, order);
1428 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1429 ret = COMPACT_NOT_SUITABLE_ZONE;
1430 }
837d026d 1431
837d026d
JK
1432 trace_mm_compaction_suitable(zone, order, ret);
1433 if (ret == COMPACT_NOT_SUITABLE_ZONE)
1434 ret = COMPACT_SKIPPED;
1435
1436 return ret;
1437}
1438
86a294a8
MH
1439bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1440 int alloc_flags)
1441{
1442 struct zone *zone;
1443 struct zoneref *z;
1444
1445 /*
1446 * Make sure at least one zone would pass __compaction_suitable if we continue
1447 * retrying the reclaim.
1448 */
1449 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1450 ac->nodemask) {
1451 unsigned long available;
1452 enum compact_result compact_result;
1453
1454 /*
1455 * Do not consider all the reclaimable memory because we do not
1456 * want to trash just for a single high order allocation which
1457 * is even not guaranteed to appear even if __compaction_suitable
1458 * is happy about the watermark check.
1459 */
5a1c84b4 1460 available = zone_reclaimable_pages(zone) / order;
86a294a8
MH
1461 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1462 compact_result = __compaction_suitable(zone, order, alloc_flags,
1463 ac_classzone_idx(ac), available);
cc5c9f09 1464 if (compact_result != COMPACT_SKIPPED)
86a294a8
MH
1465 return true;
1466 }
1467
1468 return false;
1469}
1470
ea7ab982 1471static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
748446bb 1472{
ea7ab982 1473 enum compact_result ret;
c89511ab 1474 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 1475 unsigned long end_pfn = zone_end_pfn(zone);
6d7ce559 1476 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
e0b9daeb 1477 const bool sync = cc->mode != MIGRATE_ASYNC;
748446bb 1478
ebff3980
VB
1479 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1480 cc->classzone_idx);
c46649de 1481 /* Compaction is likely to fail */
cf378319 1482 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
3e7d3449 1483 return ret;
c46649de
MH
1484
1485 /* huh, compaction_suitable is returning something unexpected */
1486 VM_BUG_ON(ret != COMPACT_CONTINUE);
3e7d3449 1487
d3132e4b
VB
1488 /*
1489 * Clear pageblock skip if there were failures recently and compaction
accf6242 1490 * is about to be retried after being deferred.
d3132e4b 1491 */
accf6242 1492 if (compaction_restarting(zone, cc->order))
d3132e4b
VB
1493 __reset_isolation_suitable(zone);
1494
c89511ab
MG
1495 /*
1496 * Setup to move all movable pages to the end of the zone. Used cached
06ed2998
VB
1497 * information on where the scanners should start (unless we explicitly
1498 * want to compact the whole zone), but check that it is initialised
1499 * by ensuring the values are within zone boundaries.
c89511ab 1500 */
06ed2998 1501 if (cc->whole_zone) {
c89511ab 1502 cc->migrate_pfn = start_pfn;
06ed2998
VB
1503 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1504 } else {
1505 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1506 cc->free_pfn = zone->compact_cached_free_pfn;
1507 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1508 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1509 zone->compact_cached_free_pfn = cc->free_pfn;
1510 }
1511 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1512 cc->migrate_pfn = start_pfn;
1513 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1514 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1515 }
c8f7de0b 1516
06ed2998
VB
1517 if (cc->migrate_pfn == start_pfn)
1518 cc->whole_zone = true;
1519 }
c8f7de0b 1520
1a16718c 1521 cc->last_migrated_pfn = 0;
748446bb 1522
16c4a097
JK
1523 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1524 cc->free_pfn, end_pfn, sync);
0eb927c0 1525
748446bb
MG
1526 migrate_prep_local();
1527
6d7ce559
DR
1528 while ((ret = compact_finished(zone, cc, migratetype)) ==
1529 COMPACT_CONTINUE) {
9d502c1c 1530 int err;
748446bb 1531
f9e35b3b
MG
1532 switch (isolate_migratepages(zone, cc)) {
1533 case ISOLATE_ABORT:
2d1e1041 1534 ret = COMPACT_CONTENDED;
5733c7d1 1535 putback_movable_pages(&cc->migratepages);
e64c5237 1536 cc->nr_migratepages = 0;
f9e35b3b
MG
1537 goto out;
1538 case ISOLATE_NONE:
fdaf7f5c
VB
1539 /*
1540 * We haven't isolated and migrated anything, but
1541 * there might still be unflushed migrations from
1542 * previous cc->order aligned block.
1543 */
1544 goto check_drain;
f9e35b3b
MG
1545 case ISOLATE_SUCCESS:
1546 ;
1547 }
748446bb 1548
d53aea3d 1549 err = migrate_pages(&cc->migratepages, compaction_alloc,
e0b9daeb 1550 compaction_free, (unsigned long)cc, cc->mode,
7b2a2d4a 1551 MR_COMPACTION);
748446bb 1552
f8c9301f
VB
1553 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1554 &cc->migratepages);
748446bb 1555
f8c9301f
VB
1556 /* All pages were either migrated or will be released */
1557 cc->nr_migratepages = 0;
9d502c1c 1558 if (err) {
5733c7d1 1559 putback_movable_pages(&cc->migratepages);
7ed695e0
VB
1560 /*
1561 * migrate_pages() may return -ENOMEM when scanners meet
1562 * and we want compact_finished() to detect it
1563 */
f2849aa0 1564 if (err == -ENOMEM && !compact_scanners_met(cc)) {
2d1e1041 1565 ret = COMPACT_CONTENDED;
4bf2bba3
DR
1566 goto out;
1567 }
fdd048e1
VB
1568 /*
1569 * We failed to migrate at least one page in the current
1570 * order-aligned block, so skip the rest of it.
1571 */
1572 if (cc->direct_compaction &&
1573 (cc->mode == MIGRATE_ASYNC)) {
1574 cc->migrate_pfn = block_end_pfn(
1575 cc->migrate_pfn - 1, cc->order);
1576 /* Draining pcplists is useless in this case */
1577 cc->last_migrated_pfn = 0;
1578
1579 }
748446bb 1580 }
fdaf7f5c 1581
fdaf7f5c
VB
1582check_drain:
1583 /*
1584 * Has the migration scanner moved away from the previous
1585 * cc->order aligned block where we migrated from? If yes,
1586 * flush the pages that were freed, so that they can merge and
1587 * compact_finished() can detect immediately if allocation
1588 * would succeed.
1589 */
1a16718c 1590 if (cc->order > 0 && cc->last_migrated_pfn) {
fdaf7f5c
VB
1591 int cpu;
1592 unsigned long current_block_start =
06b6640a 1593 block_start_pfn(cc->migrate_pfn, cc->order);
fdaf7f5c 1594
1a16718c 1595 if (cc->last_migrated_pfn < current_block_start) {
fdaf7f5c
VB
1596 cpu = get_cpu();
1597 lru_add_drain_cpu(cpu);
1598 drain_local_pages(zone);
1599 put_cpu();
1600 /* No more flushing until we migrate again */
1a16718c 1601 cc->last_migrated_pfn = 0;
fdaf7f5c
VB
1602 }
1603 }
1604
748446bb
MG
1605 }
1606
f9e35b3b 1607out:
6bace090
VB
1608 /*
1609 * Release free pages and update where the free scanner should restart,
1610 * so we don't leave any returned pages behind in the next attempt.
1611 */
1612 if (cc->nr_freepages > 0) {
1613 unsigned long free_pfn = release_freepages(&cc->freepages);
1614
1615 cc->nr_freepages = 0;
1616 VM_BUG_ON(free_pfn == 0);
1617 /* The cached pfn is always the first in a pageblock */
06b6640a 1618 free_pfn = pageblock_start_pfn(free_pfn);
6bace090
VB
1619 /*
1620 * Only go back, not forward. The cached pfn might have been
1621 * already reset to zone end in compact_finished()
1622 */
1623 if (free_pfn > zone->compact_cached_free_pfn)
1624 zone->compact_cached_free_pfn = free_pfn;
1625 }
748446bb 1626
7f354a54
DR
1627 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
1628 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
1629
16c4a097
JK
1630 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1631 cc->free_pfn, end_pfn, sync, ret);
0eb927c0 1632
748446bb
MG
1633 return ret;
1634}
76ab0f53 1635
ea7ab982 1636static enum compact_result compact_zone_order(struct zone *zone, int order,
c3486f53 1637 gfp_t gfp_mask, enum compact_priority prio,
c603844b 1638 unsigned int alloc_flags, int classzone_idx)
56de7263 1639{
ea7ab982 1640 enum compact_result ret;
56de7263
MG
1641 struct compact_control cc = {
1642 .nr_freepages = 0,
1643 .nr_migratepages = 0,
7f354a54
DR
1644 .total_migrate_scanned = 0,
1645 .total_free_scanned = 0,
56de7263 1646 .order = order,
6d7ce559 1647 .gfp_mask = gfp_mask,
56de7263 1648 .zone = zone,
a5508cd8
VB
1649 .mode = (prio == COMPACT_PRIO_ASYNC) ?
1650 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
ebff3980
VB
1651 .alloc_flags = alloc_flags,
1652 .classzone_idx = classzone_idx,
accf6242 1653 .direct_compaction = true,
a8e025e5 1654 .whole_zone = (prio == MIN_COMPACT_PRIORITY),
9f7e3387
VB
1655 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
1656 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
56de7263
MG
1657 };
1658 INIT_LIST_HEAD(&cc.freepages);
1659 INIT_LIST_HEAD(&cc.migratepages);
1660
e64c5237
SL
1661 ret = compact_zone(zone, &cc);
1662
1663 VM_BUG_ON(!list_empty(&cc.freepages));
1664 VM_BUG_ON(!list_empty(&cc.migratepages));
1665
e64c5237 1666 return ret;
56de7263
MG
1667}
1668
5e771905
MG
1669int sysctl_extfrag_threshold = 500;
1670
56de7263
MG
1671/**
1672 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
56de7263 1673 * @gfp_mask: The GFP mask of the current allocation
1a6d53a1
VB
1674 * @order: The order of the current allocation
1675 * @alloc_flags: The allocation flags of the current allocation
1676 * @ac: The context of current allocation
e0b9daeb 1677 * @mode: The migration mode for async, sync light, or sync migration
56de7263
MG
1678 *
1679 * This is the main entry point for direct page compaction.
1680 */
ea7ab982 1681enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
c603844b 1682 unsigned int alloc_flags, const struct alloc_context *ac,
c3486f53 1683 enum compact_priority prio)
56de7263 1684{
56de7263 1685 int may_perform_io = gfp_mask & __GFP_IO;
56de7263
MG
1686 struct zoneref *z;
1687 struct zone *zone;
1d4746d3 1688 enum compact_result rc = COMPACT_SKIPPED;
56de7263 1689
73e64c51
MH
1690 /*
1691 * Check if the GFP flags allow compaction - GFP_NOIO is really
1692 * tricky context because the migration might require IO
1693 */
1694 if (!may_perform_io)
53853e2d 1695 return COMPACT_SKIPPED;
56de7263 1696
a5508cd8 1697 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
837d026d 1698
56de7263 1699 /* Compact each zone in the list */
1a6d53a1
VB
1700 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1701 ac->nodemask) {
ea7ab982 1702 enum compact_result status;
56de7263 1703
a8e025e5
VB
1704 if (prio > MIN_COMPACT_PRIORITY
1705 && compaction_deferred(zone, order)) {
1d4746d3 1706 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
53853e2d 1707 continue;
1d4746d3 1708 }
53853e2d 1709
a5508cd8 1710 status = compact_zone_order(zone, order, gfp_mask, prio,
c3486f53 1711 alloc_flags, ac_classzone_idx(ac));
56de7263
MG
1712 rc = max(status, rc);
1713
7ceb009a
VB
1714 /* The allocation should succeed, stop compacting */
1715 if (status == COMPACT_SUCCESS) {
53853e2d
VB
1716 /*
1717 * We think the allocation will succeed in this zone,
1718 * but it is not certain, hence the false. The caller
1719 * will repeat this with true if allocation indeed
1720 * succeeds in this zone.
1721 */
1722 compaction_defer_reset(zone, order, false);
1f9efdef 1723
c3486f53 1724 break;
1f9efdef
VB
1725 }
1726
a5508cd8 1727 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
c3486f53 1728 status == COMPACT_PARTIAL_SKIPPED))
53853e2d
VB
1729 /*
1730 * We think that allocation won't succeed in this zone
1731 * so we defer compaction there. If it ends up
1732 * succeeding after all, it will be reset.
1733 */
1734 defer_compaction(zone, order);
1f9efdef
VB
1735
1736 /*
1737 * We might have stopped compacting due to need_resched() in
1738 * async compaction, or due to a fatal signal detected. In that
c3486f53 1739 * case do not try further zones
1f9efdef 1740 */
c3486f53
VB
1741 if ((prio == COMPACT_PRIO_ASYNC && need_resched())
1742 || fatal_signal_pending(current))
1743 break;
56de7263
MG
1744 }
1745
1746 return rc;
1747}
1748
1749
76ab0f53 1750/* Compact all zones within a node */
791cae96 1751static void compact_node(int nid)
76ab0f53 1752{
791cae96 1753 pg_data_t *pgdat = NODE_DATA(nid);
76ab0f53 1754 int zoneid;
76ab0f53 1755 struct zone *zone;
791cae96
VB
1756 struct compact_control cc = {
1757 .order = -1,
7f354a54
DR
1758 .total_migrate_scanned = 0,
1759 .total_free_scanned = 0,
791cae96
VB
1760 .mode = MIGRATE_SYNC,
1761 .ignore_skip_hint = true,
1762 .whole_zone = true,
73e64c51 1763 .gfp_mask = GFP_KERNEL,
791cae96
VB
1764 };
1765
76ab0f53 1766
76ab0f53 1767 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
76ab0f53
MG
1768
1769 zone = &pgdat->node_zones[zoneid];
1770 if (!populated_zone(zone))
1771 continue;
1772
791cae96
VB
1773 cc.nr_freepages = 0;
1774 cc.nr_migratepages = 0;
1775 cc.zone = zone;
1776 INIT_LIST_HEAD(&cc.freepages);
1777 INIT_LIST_HEAD(&cc.migratepages);
76ab0f53 1778
791cae96 1779 compact_zone(zone, &cc);
75469345 1780
791cae96
VB
1781 VM_BUG_ON(!list_empty(&cc.freepages));
1782 VM_BUG_ON(!list_empty(&cc.migratepages));
76ab0f53 1783 }
76ab0f53
MG
1784}
1785
1786/* Compact all nodes in the system */
7964c06d 1787static void compact_nodes(void)
76ab0f53
MG
1788{
1789 int nid;
1790
8575ec29
HD
1791 /* Flush pending updates to the LRU lists */
1792 lru_add_drain_all();
1793
76ab0f53
MG
1794 for_each_online_node(nid)
1795 compact_node(nid);
76ab0f53
MG
1796}
1797
1798/* The written value is actually unused, all memory is compacted */
1799int sysctl_compact_memory;
1800
fec4eb2c
YB
1801/*
1802 * This is the entry point for compacting all nodes via
1803 * /proc/sys/vm/compact_memory
1804 */
76ab0f53
MG
1805int sysctl_compaction_handler(struct ctl_table *table, int write,
1806 void __user *buffer, size_t *length, loff_t *ppos)
1807{
1808 if (write)
7964c06d 1809 compact_nodes();
76ab0f53
MG
1810
1811 return 0;
1812}
ed4a6d7f 1813
5e771905
MG
1814int sysctl_extfrag_handler(struct ctl_table *table, int write,
1815 void __user *buffer, size_t *length, loff_t *ppos)
1816{
1817 proc_dointvec_minmax(table, write, buffer, length, ppos);
1818
1819 return 0;
1820}
1821
ed4a6d7f 1822#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
74e77fb9 1823static ssize_t sysfs_compact_node(struct device *dev,
10fbcf4c 1824 struct device_attribute *attr,
ed4a6d7f
MG
1825 const char *buf, size_t count)
1826{
8575ec29
HD
1827 int nid = dev->id;
1828
1829 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1830 /* Flush pending updates to the LRU lists */
1831 lru_add_drain_all();
1832
1833 compact_node(nid);
1834 }
ed4a6d7f
MG
1835
1836 return count;
1837}
10fbcf4c 1838static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
ed4a6d7f
MG
1839
1840int compaction_register_node(struct node *node)
1841{
10fbcf4c 1842 return device_create_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1843}
1844
1845void compaction_unregister_node(struct node *node)
1846{
10fbcf4c 1847 return device_remove_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1848}
1849#endif /* CONFIG_SYSFS && CONFIG_NUMA */
ff9543fd 1850
698b1b30
VB
1851static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1852{
172400c6 1853 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
698b1b30
VB
1854}
1855
1856static bool kcompactd_node_suitable(pg_data_t *pgdat)
1857{
1858 int zoneid;
1859 struct zone *zone;
1860 enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1861
6cd9dc3e 1862 for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
698b1b30
VB
1863 zone = &pgdat->node_zones[zoneid];
1864
1865 if (!populated_zone(zone))
1866 continue;
1867
1868 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1869 classzone_idx) == COMPACT_CONTINUE)
1870 return true;
1871 }
1872
1873 return false;
1874}
1875
1876static void kcompactd_do_work(pg_data_t *pgdat)
1877{
1878 /*
1879 * With no special task, compact all zones so that a page of requested
1880 * order is allocatable.
1881 */
1882 int zoneid;
1883 struct zone *zone;
1884 struct compact_control cc = {
1885 .order = pgdat->kcompactd_max_order,
7f354a54
DR
1886 .total_migrate_scanned = 0,
1887 .total_free_scanned = 0,
698b1b30
VB
1888 .classzone_idx = pgdat->kcompactd_classzone_idx,
1889 .mode = MIGRATE_SYNC_LIGHT,
1890 .ignore_skip_hint = true,
73e64c51 1891 .gfp_mask = GFP_KERNEL,
698b1b30
VB
1892
1893 };
698b1b30
VB
1894 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1895 cc.classzone_idx);
7f354a54 1896 count_compact_event(KCOMPACTD_WAKE);
698b1b30 1897
6cd9dc3e 1898 for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
698b1b30
VB
1899 int status;
1900
1901 zone = &pgdat->node_zones[zoneid];
1902 if (!populated_zone(zone))
1903 continue;
1904
1905 if (compaction_deferred(zone, cc.order))
1906 continue;
1907
1908 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1909 COMPACT_CONTINUE)
1910 continue;
1911
1912 cc.nr_freepages = 0;
1913 cc.nr_migratepages = 0;
7f354a54
DR
1914 cc.total_migrate_scanned = 0;
1915 cc.total_free_scanned = 0;
698b1b30
VB
1916 cc.zone = zone;
1917 INIT_LIST_HEAD(&cc.freepages);
1918 INIT_LIST_HEAD(&cc.migratepages);
1919
172400c6
VB
1920 if (kthread_should_stop())
1921 return;
698b1b30
VB
1922 status = compact_zone(zone, &cc);
1923
7ceb009a 1924 if (status == COMPACT_SUCCESS) {
698b1b30 1925 compaction_defer_reset(zone, cc.order, false);
c8f7de0b 1926 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
698b1b30
VB
1927 /*
1928 * We use sync migration mode here, so we defer like
1929 * sync direct compaction does.
1930 */
1931 defer_compaction(zone, cc.order);
1932 }
1933
7f354a54
DR
1934 count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
1935 cc.total_migrate_scanned);
1936 count_compact_events(KCOMPACTD_FREE_SCANNED,
1937 cc.total_free_scanned);
1938
698b1b30
VB
1939 VM_BUG_ON(!list_empty(&cc.freepages));
1940 VM_BUG_ON(!list_empty(&cc.migratepages));
1941 }
1942
1943 /*
1944 * Regardless of success, we are done until woken up next. But remember
1945 * the requested order/classzone_idx in case it was higher/tighter than
1946 * our current ones
1947 */
1948 if (pgdat->kcompactd_max_order <= cc.order)
1949 pgdat->kcompactd_max_order = 0;
1950 if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
1951 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1952}
1953
1954void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1955{
1956 if (!order)
1957 return;
1958
1959 if (pgdat->kcompactd_max_order < order)
1960 pgdat->kcompactd_max_order = order;
1961
46acef04
DB
1962 /*
1963 * Pairs with implicit barrier in wait_event_freezable()
1964 * such that wakeups are not missed in the lockless
1965 * waitqueue_active() call.
1966 */
1967 smp_acquire__after_ctrl_dep();
1968
698b1b30
VB
1969 if (pgdat->kcompactd_classzone_idx > classzone_idx)
1970 pgdat->kcompactd_classzone_idx = classzone_idx;
1971
1972 if (!waitqueue_active(&pgdat->kcompactd_wait))
1973 return;
1974
1975 if (!kcompactd_node_suitable(pgdat))
1976 return;
1977
1978 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
1979 classzone_idx);
1980 wake_up_interruptible(&pgdat->kcompactd_wait);
1981}
1982
1983/*
1984 * The background compaction daemon, started as a kernel thread
1985 * from the init process.
1986 */
1987static int kcompactd(void *p)
1988{
1989 pg_data_t *pgdat = (pg_data_t*)p;
1990 struct task_struct *tsk = current;
1991
1992 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1993
1994 if (!cpumask_empty(cpumask))
1995 set_cpus_allowed_ptr(tsk, cpumask);
1996
1997 set_freezable();
1998
1999 pgdat->kcompactd_max_order = 0;
2000 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2001
2002 while (!kthread_should_stop()) {
2003 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2004 wait_event_freezable(pgdat->kcompactd_wait,
2005 kcompactd_work_requested(pgdat));
2006
2007 kcompactd_do_work(pgdat);
2008 }
2009
2010 return 0;
2011}
2012
2013/*
2014 * This kcompactd start function will be called by init and node-hot-add.
2015 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2016 */
2017int kcompactd_run(int nid)
2018{
2019 pg_data_t *pgdat = NODE_DATA(nid);
2020 int ret = 0;
2021
2022 if (pgdat->kcompactd)
2023 return 0;
2024
2025 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2026 if (IS_ERR(pgdat->kcompactd)) {
2027 pr_err("Failed to start kcompactd on node %d\n", nid);
2028 ret = PTR_ERR(pgdat->kcompactd);
2029 pgdat->kcompactd = NULL;
2030 }
2031 return ret;
2032}
2033
2034/*
2035 * Called by memory hotplug when all memory in a node is offlined. Caller must
2036 * hold mem_hotplug_begin/end().
2037 */
2038void kcompactd_stop(int nid)
2039{
2040 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2041
2042 if (kcompactd) {
2043 kthread_stop(kcompactd);
2044 NODE_DATA(nid)->kcompactd = NULL;
2045 }
2046}
2047
2048/*
2049 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2050 * not required for correctness. So if the last cpu in a node goes
2051 * away, we get changed to run anywhere: as the first one comes back,
2052 * restore their cpu bindings.
2053 */
e46b1db2 2054static int kcompactd_cpu_online(unsigned int cpu)
698b1b30
VB
2055{
2056 int nid;
2057
e46b1db2
AMG
2058 for_each_node_state(nid, N_MEMORY) {
2059 pg_data_t *pgdat = NODE_DATA(nid);
2060 const struct cpumask *mask;
698b1b30 2061
e46b1db2 2062 mask = cpumask_of_node(pgdat->node_id);
698b1b30 2063
e46b1db2
AMG
2064 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2065 /* One of our CPUs online: restore mask */
2066 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
698b1b30 2067 }
e46b1db2 2068 return 0;
698b1b30
VB
2069}
2070
2071static int __init kcompactd_init(void)
2072{
2073 int nid;
e46b1db2
AMG
2074 int ret;
2075
2076 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2077 "mm/compaction:online",
2078 kcompactd_cpu_online, NULL);
2079 if (ret < 0) {
2080 pr_err("kcompactd: failed to register hotplug callbacks.\n");
2081 return ret;
2082 }
698b1b30
VB
2083
2084 for_each_node_state(nid, N_MEMORY)
2085 kcompactd_run(nid);
698b1b30
VB
2086 return 0;
2087}
2088subsys_initcall(kcompactd_init)
2089
ff9543fd 2090#endif /* CONFIG_COMPACTION */