]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - mm/page_alloc.c
Merge tag 'mmc-v6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[thirdparty/kernel/linux.git] / mm / page_alloc.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
1da177e4
LT
18#include <linux/stddef.h>
19#include <linux/mm.h>
ca79b0c2 20#include <linux/highmem.h>
1da177e4 21#include <linux/interrupt.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/compiler.h>
9f158333 24#include <linux/kernel.h>
b8c73fc2 25#include <linux/kasan.h>
b073d7f8 26#include <linux/kmsan.h>
1da177e4
LT
27#include <linux/module.h>
28#include <linux/suspend.h>
a238ab5b 29#include <linux/ratelimit.h>
5a3135c2 30#include <linux/oom.h>
1da177e4
LT
31#include <linux/topology.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
90491d87 35#include <linux/pagevec.h>
bdc8cb98 36#include <linux/memory_hotplug.h>
1da177e4 37#include <linux/nodemask.h>
a6cccdc3 38#include <linux/vmstat.h>
933e312e 39#include <linux/fault-inject.h>
56de7263 40#include <linux/compaction.h>
0d3d062a 41#include <trace/events/kmem.h>
d379f01d 42#include <trace/events/oom.h>
268bb0ce 43#include <linux/prefetch.h>
6e543d57 44#include <linux/mm_inline.h>
f920e413 45#include <linux/mmu_notifier.h>
041d3a8c 46#include <linux/migrate.h>
5b3cc15a 47#include <linux/sched/mm.h>
48c96a36 48#include <linux/page_owner.h>
df4e817b 49#include <linux/page_table_check.h>
4949148a 50#include <linux/memcontrol.h>
42c269c8 51#include <linux/ftrace.h>
d92a8cfc 52#include <linux/lockdep.h>
eb414681 53#include <linux/psi.h>
4aab2be0 54#include <linux/khugepaged.h>
5bf18281 55#include <linux/delayacct.h>
362d37a1 56#include <linux/cacheinfo.h>
dcfe378c 57#include <linux/pgalloc_tag.h>
ac924c60 58#include <asm/div64.h>
1da177e4 59#include "internal.h"
e900a918 60#include "shuffle.h"
36e66c55 61#include "page_reporting.h"
1da177e4 62
f04a5d5d
DH
63/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
64typedef int __bitwise fpi_t;
65
66/* No special request */
67#define FPI_NONE ((__force fpi_t)0)
68
69/*
70 * Skip free page reporting notification for the (possibly merged) page.
71 * This does not hinder free page reporting from grabbing the page,
72 * reporting it and marking it "reported" - it only skips notifying
73 * the free page reporting infrastructure about a newly freed page. For
74 * example, used when temporarily pulling a page from a freelist and
75 * putting it back unmodified.
76 */
77#define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
78
47b6a24a
DH
79/*
80 * Place the (possibly merged) page to the tail of the freelist. Will ignore
81 * page shuffling (relevant code - e.g., memory onlining - is expected to
82 * shuffle the whole zone).
83 *
84 * Note: No code should rely on this flag for correctness - it's purely
85 * to allow for optimizations when handing back either fresh pages
86 * (memory onlining) or untouched pages (page isolation, free page
87 * reporting).
88 */
89#define FPI_TO_TAIL ((__force fpi_t)BIT(1))
90
8c57b687
AS
91/* Free the page without taking locks. Rely on trylock only. */
92#define FPI_TRYLOCK ((__force fpi_t)BIT(2))
93
c8e251fa
CS
94/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
95static DEFINE_MUTEX(pcp_batch_high_lock);
74f44822 96#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
c8e251fa 97
4b23a68f
MG
98#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
99/*
100 * On SMP, spin_trylock is sufficient protection.
101 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
102 */
103#define pcp_trylock_prepare(flags) do { } while (0)
104#define pcp_trylock_finish(flag) do { } while (0)
105#else
106
107/* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
108#define pcp_trylock_prepare(flags) local_irq_save(flags)
109#define pcp_trylock_finish(flags) local_irq_restore(flags)
110#endif
111
01b44456
MG
112/*
113 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
114 * a migration causing the wrong PCP to be locked and remote memory being
115 * potentially allocated, pin the task to the CPU for the lookup+lock.
116 * preempt_disable is used on !RT because it is faster than migrate_disable.
117 * migrate_disable is used on RT because otherwise RT spinlock usage is
118 * interfered with and a high priority task cannot preempt the allocator.
119 */
120#ifndef CONFIG_PREEMPT_RT
121#define pcpu_task_pin() preempt_disable()
122#define pcpu_task_unpin() preempt_enable()
123#else
124#define pcpu_task_pin() migrate_disable()
125#define pcpu_task_unpin() migrate_enable()
126#endif
c8e251fa 127
01b44456
MG
128/*
129 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
130 * Return value should be used with equivalent unlock helper.
131 */
132#define pcpu_spin_lock(type, member, ptr) \
133({ \
134 type *_ret; \
135 pcpu_task_pin(); \
136 _ret = this_cpu_ptr(ptr); \
137 spin_lock(&_ret->member); \
138 _ret; \
139})
140
57490774 141#define pcpu_spin_trylock(type, member, ptr) \
01b44456
MG
142({ \
143 type *_ret; \
144 pcpu_task_pin(); \
145 _ret = this_cpu_ptr(ptr); \
57490774 146 if (!spin_trylock(&_ret->member)) { \
01b44456
MG
147 pcpu_task_unpin(); \
148 _ret = NULL; \
149 } \
150 _ret; \
151})
152
153#define pcpu_spin_unlock(member, ptr) \
154({ \
155 spin_unlock(&ptr->member); \
156 pcpu_task_unpin(); \
157})
158
01b44456
MG
159/* struct per_cpu_pages specific helpers. */
160#define pcp_spin_lock(ptr) \
161 pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
162
57490774
MG
163#define pcp_spin_trylock(ptr) \
164 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr)
01b44456
MG
165
166#define pcp_spin_unlock(ptr) \
167 pcpu_spin_unlock(lock, ptr)
168
72812019
LS
169#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
170DEFINE_PER_CPU(int, numa_node);
171EXPORT_PER_CPU_SYMBOL(numa_node);
172#endif
173
4518085e
KW
174DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
175
7aac7898
LS
176#ifdef CONFIG_HAVE_MEMORYLESS_NODES
177/*
178 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
179 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
180 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
181 * defined in <linux/topology.h>.
182 */
183DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
184EXPORT_PER_CPU_SYMBOL(_numa_mem_);
185#endif
186
8b885f53 187static DEFINE_MUTEX(pcpu_drain_mutex);
bd233f53 188
38addce8 189#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
58bea414 190volatile unsigned long latent_entropy __latent_entropy;
38addce8
ER
191EXPORT_SYMBOL(latent_entropy);
192#endif
193
1da177e4 194/*
13808910 195 * Array of node states.
1da177e4 196 */
13808910
CL
197nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
198 [N_POSSIBLE] = NODE_MASK_ALL,
199 [N_ONLINE] = { { [0] = 1UL } },
200#ifndef CONFIG_NUMA
201 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
202#ifdef CONFIG_HIGHMEM
203 [N_HIGH_MEMORY] = { { [0] = 1UL } },
20b2f52b 204#endif
20b2f52b 205 [N_MEMORY] = { { [0] = 1UL } },
13808910
CL
206 [N_CPU] = { { [0] = 1UL } },
207#endif /* NUMA */
208};
209EXPORT_SYMBOL(node_states);
210
dcce284a 211gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
6471384a 212
d9c23400 213#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
d00181b9 214unsigned int pageblock_order __read_mostly;
d9c23400
MG
215#endif
216
7fef431b
DH
217static void __free_pages_ok(struct page *page, unsigned int order,
218 fpi_t fpi_flags);
a226f6c8 219
1da177e4
LT
220/*
221 * results with 256, 32 in the lowmem_reserve sysctl:
222 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
223 * 1G machine -> (16M dma, 784M normal, 224M high)
224 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
225 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
84109e15 226 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
227 *
228 * TBD: should special case ZONE_DMA32 machines here - in those we normally
229 * don't need any ZONE_NORMAL reservation
1da177e4 230 */
62069aac 231static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
4b51d669 232#ifdef CONFIG_ZONE_DMA
d3cda233 233 [ZONE_DMA] = 256,
4b51d669 234#endif
fb0e7942 235#ifdef CONFIG_ZONE_DMA32
d3cda233 236 [ZONE_DMA32] = 256,
fb0e7942 237#endif
d3cda233 238 [ZONE_NORMAL] = 32,
e53ef38d 239#ifdef CONFIG_HIGHMEM
d3cda233 240 [ZONE_HIGHMEM] = 0,
e53ef38d 241#endif
d3cda233 242 [ZONE_MOVABLE] = 0,
2f1b6248 243};
1da177e4 244
9420f89d 245char * const zone_names[MAX_NR_ZONES] = {
4b51d669 246#ifdef CONFIG_ZONE_DMA
2f1b6248 247 "DMA",
4b51d669 248#endif
fb0e7942 249#ifdef CONFIG_ZONE_DMA32
2f1b6248 250 "DMA32",
fb0e7942 251#endif
2f1b6248 252 "Normal",
e53ef38d 253#ifdef CONFIG_HIGHMEM
2a1e274a 254 "HighMem",
e53ef38d 255#endif
2a1e274a 256 "Movable",
033fbae9
DW
257#ifdef CONFIG_ZONE_DEVICE
258 "Device",
259#endif
2f1b6248
CL
260};
261
c999fbd3 262const char * const migratetype_names[MIGRATE_TYPES] = {
60f30350
VB
263 "Unmovable",
264 "Movable",
265 "Reclaimable",
266 "HighAtomic",
267#ifdef CONFIG_CMA
268 "CMA",
269#endif
270#ifdef CONFIG_MEMORY_ISOLATION
271 "Isolate",
272#endif
273};
274
1da177e4 275int min_free_kbytes = 1024;
42aa83cb 276int user_min_free_kbytes = -1;
e95d372c
KW
277static int watermark_boost_factor __read_mostly = 15000;
278static int watermark_scale_factor = 10;
a211c655 279int defrag_mode;
0ee332c1
TH
280
281/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
282int movable_zone;
283EXPORT_SYMBOL(movable_zone);
c713216d 284
418508c1 285#if MAX_NUMNODES > 1
b9726c26 286unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
ce0725f7 287unsigned int nr_online_nodes __read_mostly = 1;
418508c1 288EXPORT_SYMBOL(nr_node_ids);
62bc62a8 289EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
290#endif
291
dcdfdd40 292static bool page_contains_unaccepted(struct page *page, unsigned int order);
23fa022a
KS
293static bool cond_accept_memory(struct zone *zone, unsigned int order,
294 int alloc_flags);
dcdfdd40
KS
295static bool __free_unaccepted(struct page *page);
296
9ef9acb0
MG
297int page_group_by_mobility_disabled __read_mostly;
298
3a80a7fa 299#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3c0c12cc
WL
300/*
301 * During boot we initialize deferred pages on-demand, as needed, but once
302 * page_alloc_init_late() has finished, the deferred pages are all initialized,
303 * and we can permanently disable that path.
304 */
9420f89d 305DEFINE_STATIC_KEY_TRUE(deferred_pages);
3c0c12cc 306
94ae8b83 307static inline bool deferred_pages_enabled(void)
3c0c12cc 308{
94ae8b83 309 return static_branch_unlikely(&deferred_pages);
3c0c12cc
WL
310}
311
3a80a7fa 312/*
9420f89d
MRI
313 * deferred_grow_zone() is __init, but it is called from
314 * get_page_from_freelist() during early boot until deferred_pages permanently
315 * disables this call. This is why we have refdata wrapper to avoid warning,
316 * and to ensure that the function body gets unloaded.
3a80a7fa 317 */
9420f89d
MRI
318static bool __ref
319_deferred_grow_zone(struct zone *zone, unsigned int order)
3a80a7fa 320{
96a5c186 321 return deferred_grow_zone(zone, order);
3a80a7fa
MG
322}
323#else
94ae8b83 324static inline bool deferred_pages_enabled(void)
2c335680 325{
94ae8b83 326 return false;
2c335680 327}
3a80b822
KS
328
329static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
330{
331 return false;
332}
9420f89d 333#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
3a80a7fa 334
0b423ca2 335/* Return a pointer to the bitmap storing bits affecting a block of pages */
ca891f41 336static inline unsigned long *get_pageblock_bitmap(const struct page *page,
0b423ca2
MG
337 unsigned long pfn)
338{
339#ifdef CONFIG_SPARSEMEM
f1eca35a 340 return section_to_usemap(__pfn_to_section(pfn));
0b423ca2
MG
341#else
342 return page_zone(page)->pageblock_flags;
343#endif /* CONFIG_SPARSEMEM */
344}
345
ca891f41 346static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
0b423ca2
MG
347{
348#ifdef CONFIG_SPARSEMEM
349 pfn &= (PAGES_PER_SECTION-1);
0b423ca2 350#else
4f9bc69a 351 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
0b423ca2 352#endif /* CONFIG_SPARSEMEM */
399b795b 353 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
0b423ca2
MG
354}
355
a04d12c2
KS
356/**
357 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
358 * @page: The page within the block of interest
359 * @pfn: The target page frame number
360 * @mask: mask of bits that the caller is interested in
361 *
362 * Return: pageblock_bits flags
363 */
364unsigned long get_pfnblock_flags_mask(const struct page *page,
365 unsigned long pfn, unsigned long mask)
0b423ca2
MG
366{
367 unsigned long *bitmap;
368 unsigned long bitidx, word_bitidx;
369 unsigned long word;
370
371 bitmap = get_pageblock_bitmap(page, pfn);
372 bitidx = pfn_to_bitidx(page, pfn);
373 word_bitidx = bitidx / BITS_PER_LONG;
374 bitidx &= (BITS_PER_LONG-1);
1c563432
MK
375 /*
376 * This races, without locks, with set_pfnblock_flags_mask(). Ensure
377 * a consistent read of the memory array, so that results, even though
378 * racy, are not corrupted.
379 */
380 word = READ_ONCE(bitmap[word_bitidx]);
d93d5ab9 381 return (word >> bitidx) & mask;
0b423ca2
MG
382}
383
ca891f41
MWO
384static __always_inline int get_pfnblock_migratetype(const struct page *page,
385 unsigned long pfn)
0b423ca2 386{
a04d12c2 387 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
0b423ca2
MG
388}
389
390/**
391 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
392 * @page: The page within the block of interest
393 * @flags: The flags to set
394 * @pfn: The target page frame number
0b423ca2
MG
395 * @mask: mask of bits that the caller is interested in
396 */
397void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
398 unsigned long pfn,
0b423ca2
MG
399 unsigned long mask)
400{
401 unsigned long *bitmap;
402 unsigned long bitidx, word_bitidx;
04ec0061 403 unsigned long word;
0b423ca2
MG
404
405 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
125b860b 406 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
0b423ca2
MG
407
408 bitmap = get_pageblock_bitmap(page, pfn);
409 bitidx = pfn_to_bitidx(page, pfn);
410 word_bitidx = bitidx / BITS_PER_LONG;
411 bitidx &= (BITS_PER_LONG-1);
412
413 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
414
d93d5ab9
WY
415 mask <<= bitidx;
416 flags <<= bitidx;
0b423ca2
MG
417
418 word = READ_ONCE(bitmap[word_bitidx]);
04ec0061
UB
419 do {
420 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
0b423ca2 421}
3a80a7fa 422
ee6f509c 423void set_pageblock_migratetype(struct page *page, int migratetype)
b2a0ac88 424{
5d0f3f72
KM
425 if (unlikely(page_group_by_mobility_disabled &&
426 migratetype < MIGRATE_PCPTYPES))
49255c61
MG
427 migratetype = MIGRATE_UNMOVABLE;
428
d93d5ab9 429 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
535b81e2 430 page_to_pfn(page), MIGRATETYPE_MASK);
b2a0ac88
MG
431}
432
13e7444b 433#ifdef CONFIG_DEBUG_VM
c6a57e19 434static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 435{
82d9b8c8 436 int ret;
bdc8cb98
DH
437 unsigned seq;
438 unsigned long pfn = page_to_pfn(page);
b5e6a5a2 439 unsigned long sp, start_pfn;
c6a57e19 440
bdc8cb98
DH
441 do {
442 seq = zone_span_seqbegin(zone);
b5e6a5a2
CS
443 start_pfn = zone->zone_start_pfn;
444 sp = zone->spanned_pages;
82d9b8c8 445 ret = !zone_spans_pfn(zone, pfn);
bdc8cb98
DH
446 } while (zone_span_seqretry(zone, seq));
447
b5e6a5a2 448 if (ret)
613813e8
DH
449 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
450 pfn, zone_to_nid(zone), zone->name,
451 start_pfn, start_pfn + sp);
b5e6a5a2 452
bdc8cb98 453 return ret;
c6a57e19
DH
454}
455
c6a57e19
DH
456/*
457 * Temporary debugging check for pages not lying within a given zone.
458 */
5bb14214 459static bool __maybe_unused bad_range(struct zone *zone, struct page *page)
c6a57e19
DH
460{
461 if (page_outside_zone_boundaries(zone, page))
5bb14214 462 return true;
5b855aa3 463 if (zone != page_zone(page))
5bb14214 464 return true;
c6a57e19 465
5bb14214 466 return false;
1da177e4 467}
13e7444b 468#else
5bb14214 469static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page)
13e7444b 470{
5bb14214 471 return false;
13e7444b
NP
472}
473#endif
474
82a3241a 475static void bad_page(struct page *page, const char *reason)
1da177e4 476{
d936cf9b
HD
477 static unsigned long resume;
478 static unsigned long nr_shown;
479 static unsigned long nr_unshown;
480
481 /*
482 * Allow a burst of 60 reports, then keep quiet for that minute;
483 * or allow a steady drip of one report per second.
484 */
485 if (nr_shown == 60) {
486 if (time_before(jiffies, resume)) {
487 nr_unshown++;
488 goto out;
489 }
490 if (nr_unshown) {
ff8e8116 491 pr_alert(
1e9e6365 492 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
493 nr_unshown);
494 nr_unshown = 0;
495 }
496 nr_shown = 0;
497 }
498 if (nr_shown++ == 0)
499 resume = jiffies + 60 * HZ;
500
ff8e8116 501 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 502 current->comm, page_to_pfn(page));
d2f07ec0 503 dump_page(page, reason);
3dc14741 504
4f31888c 505 print_modules();
1da177e4 506 dump_stack();
d936cf9b 507out:
8cc3b392 508 /* Leave bad fields for debug, except PageBuddy could make trouble */
e4d970ac
DH
509 if (PageBuddy(page))
510 __ClearPageBuddy(page);
373d4d09 511 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
512}
513
44042b44
MG
514static inline unsigned int order_to_pindex(int migratetype, int order)
515{
bf14ed81 516
44042b44 517#ifdef CONFIG_TRANSPARENT_HUGEPAGE
0a1e082b 518 bool movable;
44042b44 519 if (order > PAGE_ALLOC_COSTLY_ORDER) {
6303d1c5 520 VM_BUG_ON(order != HPAGE_PMD_ORDER);
bf14ed81 521
522 movable = migratetype == MIGRATE_MOVABLE;
523
524 return NR_LOWORDER_PCP_LISTS + movable;
44042b44
MG
525 }
526#else
527 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
528#endif
529
c1dc69e6 530 return (MIGRATE_PCPTYPES * order) + migratetype;
44042b44
MG
531}
532
533static inline int pindex_to_order(unsigned int pindex)
534{
535 int order = pindex / MIGRATE_PCPTYPES;
536
537#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bf14ed81 538 if (pindex >= NR_LOWORDER_PCP_LISTS)
6303d1c5 539 order = HPAGE_PMD_ORDER;
44042b44
MG
540#else
541 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
542#endif
543
544 return order;
545}
546
547static inline bool pcp_allowed_order(unsigned int order)
548{
549 if (order <= PAGE_ALLOC_COSTLY_ORDER)
550 return true;
551#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6303d1c5 552 if (order == HPAGE_PMD_ORDER)
44042b44
MG
553 return true;
554#endif
555 return false;
556}
557
1da177e4
LT
558/*
559 * Higher-order pages are called "compound pages". They are structured thusly:
560 *
1d798ca3 561 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
1da177e4 562 *
1d798ca3
KS
563 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
564 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
1da177e4 565 *
1d798ca3 566 * The first tail page's ->compound_order holds the order of allocation.
41d78ba5 567 * This usage means that zero-order pages may not be compound.
1da177e4 568 */
d98c7a09 569
d00181b9 570void prep_compound_page(struct page *page, unsigned int order)
18229df5
AW
571{
572 int i;
573 int nr_pages = 1 << order;
574
18229df5 575 __SetPageHead(page);
5b24eeef
JM
576 for (i = 1; i < nr_pages; i++)
577 prep_compound_tail(page, i);
1378a5ee 578
5b24eeef 579 prep_compound_head(page, order);
18229df5
AW
580}
581
ab130f91 582static inline void set_buddy_order(struct page *page, unsigned int order)
6aa3001b 583{
4c21e2f2 584 set_page_private(page, order);
676165a8 585 __SetPageBuddy(page);
1da177e4
LT
586}
587
5e1f0f09
MG
588#ifdef CONFIG_COMPACTION
589static inline struct capture_control *task_capc(struct zone *zone)
590{
591 struct capture_control *capc = current->capture_control;
592
deba0487 593 return unlikely(capc) &&
5e1f0f09
MG
594 !(current->flags & PF_KTHREAD) &&
595 !capc->page &&
deba0487 596 capc->cc->zone == zone ? capc : NULL;
5e1f0f09
MG
597}
598
599static inline bool
600compaction_capture(struct capture_control *capc, struct page *page,
601 int order, int migratetype)
602{
603 if (!capc || order != capc->cc->order)
604 return false;
605
606 /* Do not accidentally pollute CMA or isolated regions*/
607 if (is_migrate_cma(migratetype) ||
608 is_migrate_isolate(migratetype))
609 return false;
610
611 /*
231f8c71
BW
612 * Do not let lower order allocations pollute a movable pageblock
613 * unless compaction is also requesting movable pages.
5e1f0f09
MG
614 * This might let an unmovable request use a reclaimable pageblock
615 * and vice-versa but no more than normal fallback logic which can
616 * have trouble finding a high-order free page.
617 */
231f8c71
BW
618 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE &&
619 capc->cc->migratetype != MIGRATE_MOVABLE)
5e1f0f09
MG
620 return false;
621
f46012c0
JW
622 if (migratetype != capc->cc->migratetype)
623 trace_mm_page_alloc_extfrag(page, capc->cc->order, order,
624 capc->cc->migratetype, migratetype);
625
5e1f0f09
MG
626 capc->page = page;
627 return true;
628}
629
630#else
631static inline struct capture_control *task_capc(struct zone *zone)
632{
633 return NULL;
634}
635
636static inline bool
637compaction_capture(struct capture_control *capc, struct page *page,
638 int order, int migratetype)
639{
640 return false;
641}
642#endif /* CONFIG_COMPACTION */
643
e0932b6c
JW
644static inline void account_freepages(struct zone *zone, int nr_pages,
645 int migratetype)
6ab01363 646{
c928807f
YZ
647 lockdep_assert_held(&zone->lock);
648
e0932b6c
JW
649 if (is_migrate_isolate(migratetype))
650 return;
6ab01363 651
e0932b6c
JW
652 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
653
654 if (is_migrate_cma(migratetype))
655 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
c928807f
YZ
656 else if (is_migrate_highatomic(migratetype))
657 WRITE_ONCE(zone->nr_free_highatomic,
658 zone->nr_free_highatomic + nr_pages);
6ab01363
AD
659}
660
661/* Used for pages not on another list */
e0932b6c
JW
662static inline void __add_to_free_list(struct page *page, struct zone *zone,
663 unsigned int order, int migratetype,
664 bool tail)
6ab01363
AD
665{
666 struct free_area *area = &zone->free_area[order];
a211c655 667 int nr_pages = 1 << order;
6ab01363 668
e0932b6c
JW
669 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
670 "page type is %lu, passed migratetype is %d (nr=%d)\n",
a211c655 671 get_pageblock_migratetype(page), migratetype, nr_pages);
e0932b6c
JW
672
673 if (tail)
674 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
675 else
676 list_add(&page->buddy_list, &area->free_list[migratetype]);
6ab01363 677 area->nr_free++;
a211c655
JW
678
679 if (order >= pageblock_order && !is_migrate_isolate(migratetype))
680 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
6ab01363
AD
681}
682
293ffa5e
DH
683/*
684 * Used for pages which are on another list. Move the pages to the tail
685 * of the list - so the moved pages won't immediately be considered for
686 * allocation again (e.g., optimization for memory onlining).
687 */
6ab01363 688static inline void move_to_free_list(struct page *page, struct zone *zone,
e0932b6c 689 unsigned int order, int old_mt, int new_mt)
6ab01363
AD
690{
691 struct free_area *area = &zone->free_area[order];
a211c655 692 int nr_pages = 1 << order;
6ab01363 693
e0932b6c
JW
694 /* Free page moving can fail, so it happens before the type update */
695 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt,
696 "page type is %lu, passed migratetype is %d (nr=%d)\n",
a211c655 697 get_pageblock_migratetype(page), old_mt, nr_pages);
e0932b6c
JW
698
699 list_move_tail(&page->buddy_list, &area->free_list[new_mt]);
700
a211c655
JW
701 account_freepages(zone, -nr_pages, old_mt);
702 account_freepages(zone, nr_pages, new_mt);
703
704 if (order >= pageblock_order &&
705 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) {
706 if (!is_migrate_isolate(old_mt))
707 nr_pages = -nr_pages;
708 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
709 }
6ab01363
AD
710}
711
e0932b6c
JW
712static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
713 unsigned int order, int migratetype)
6ab01363 714{
a211c655
JW
715 int nr_pages = 1 << order;
716
e0932b6c
JW
717 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
718 "page type is %lu, passed migratetype is %d (nr=%d)\n",
a211c655 719 get_pageblock_migratetype(page), migratetype, nr_pages);
e0932b6c 720
36e66c55
AD
721 /* clear reported state and update reported page count */
722 if (page_reported(page))
723 __ClearPageReported(page);
724
bf75f200 725 list_del(&page->buddy_list);
6ab01363
AD
726 __ClearPageBuddy(page);
727 set_page_private(page, 0);
728 zone->free_area[order].nr_free--;
a211c655
JW
729
730 if (order >= pageblock_order && !is_migrate_isolate(migratetype))
731 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages);
6ab01363
AD
732}
733
e0932b6c
JW
734static inline void del_page_from_free_list(struct page *page, struct zone *zone,
735 unsigned int order, int migratetype)
736{
737 __del_page_from_free_list(page, zone, order, migratetype);
738 account_freepages(zone, -(1 << order), migratetype);
739}
740
5d671eb4
MRI
741static inline struct page *get_page_from_free_area(struct free_area *area,
742 int migratetype)
743{
744 return list_first_entry_or_null(&area->free_list[migratetype],
1bf61092 745 struct page, buddy_list);
5d671eb4
MRI
746}
747
a2129f24 748/*
08af2c12
WY
749 * If this is less than the 2nd largest possible page, check if the buddy
750 * of the next-higher order is free. If it is, it's possible
a2129f24
AD
751 * that pages are being freed that will coalesce soon. In case,
752 * that is happening, add the free page to the tail of the list
753 * so it's less likely to be used soon and more likely to be merged
08af2c12 754 * as a 2-level higher order page
a2129f24
AD
755 */
756static inline bool
757buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
758 struct page *page, unsigned int order)
759{
8170ac47
ZY
760 unsigned long higher_page_pfn;
761 struct page *higher_page;
a2129f24 762
5e0a760b 763 if (order >= MAX_PAGE_ORDER - 1)
a2129f24
AD
764 return false;
765
8170ac47
ZY
766 higher_page_pfn = buddy_pfn & pfn;
767 higher_page = page + (higher_page_pfn - pfn);
a2129f24 768
8170ac47
ZY
769 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
770 NULL) != NULL;
a2129f24
AD
771}
772
1da177e4
LT
773/*
774 * Freeing function for a buddy system allocator.
775 *
776 * The concept of a buddy system is to maintain direct-mapped table
777 * (containing bit values) for memory blocks of various "orders".
778 * The bottom level table contains the map for the smallest allocatable
779 * units of memory (here, pages), and each level above it describes
780 * pairs of units from the levels below, hence, "buddies".
781 * At a high level, all that happens here is marking the table entry
782 * at the bottom level available, and propagating the changes upward
783 * as necessary, plus some accounting needed to play nicely with other
784 * parts of the VM system.
785 * At each level, we keep a list of pages, which are heads of continuous
6e292b9b
MW
786 * free pages of length of (1 << order) and marked with PageBuddy.
787 * Page's order is recorded in page_private(page) field.
1da177e4 788 * So when we are allocating or freeing one, we can derive the state of the
5f63b720
MN
789 * other. That is, if we allocate a small block, and both were
790 * free, the remainder of the region must be split into blocks.
1da177e4 791 * If a block is freed, and its buddy is also free, then this
5f63b720 792 * triggers coalescing into a block of larger size.
1da177e4 793 *
6d49e352 794 * -- nyc
1da177e4
LT
795 */
796
48db57f8 797static inline void __free_one_page(struct page *page,
dc4b0caf 798 unsigned long pfn,
ed0ae21d 799 struct zone *zone, unsigned int order,
f04a5d5d 800 int migratetype, fpi_t fpi_flags)
1da177e4 801{
a2129f24 802 struct capture_control *capc = task_capc(zone);
dae37a5d 803 unsigned long buddy_pfn = 0;
a2129f24 804 unsigned long combined_pfn;
a2129f24
AD
805 struct page *buddy;
806 bool to_tail;
d9dddbf5 807
d29bb978 808 VM_BUG_ON(!zone_is_initialized(zone));
6e9f0d58 809 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1da177e4 810
ed0ae21d 811 VM_BUG_ON(migratetype == -1);
76741e77 812 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
309381fe 813 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1da177e4 814
e0932b6c
JW
815 account_freepages(zone, 1 << order, migratetype);
816
5e0a760b 817 while (order < MAX_PAGE_ORDER) {
e0932b6c
JW
818 int buddy_mt = migratetype;
819
5e1f0f09 820 if (compaction_capture(capc, page, order, migratetype)) {
e0932b6c 821 account_freepages(zone, -(1 << order), migratetype);
5e1f0f09
MG
822 return;
823 }
13ad59df 824
8170ac47
ZY
825 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
826 if (!buddy)
d9dddbf5 827 goto done_merging;
bb0e28eb
ZY
828
829 if (unlikely(order >= pageblock_order)) {
830 /*
831 * We want to prevent merge between freepages on pageblock
832 * without fallbacks and normal pageblock. Without this,
833 * pageblock isolation could cause incorrect freepage or CMA
834 * accounting or HIGHATOMIC accounting.
835 */
e0932b6c 836 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
bb0e28eb 837
e0932b6c
JW
838 if (migratetype != buddy_mt &&
839 (!migratetype_is_mergeable(migratetype) ||
840 !migratetype_is_mergeable(buddy_mt)))
841 goto done_merging;
bb0e28eb
ZY
842 }
843
c0a32fc5
SG
844 /*
845 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
846 * merge with it and move up one order.
847 */
b03641af 848 if (page_is_guard(buddy))
e0932b6c 849 clear_page_guard(zone, buddy, order);
b03641af 850 else
e0932b6c
JW
851 __del_page_from_free_list(buddy, zone, order, buddy_mt);
852
853 if (unlikely(buddy_mt != migratetype)) {
854 /*
855 * Match buddy type. This ensures that an
856 * expand() down the line puts the sub-blocks
857 * on the right freelists.
858 */
859 set_pageblock_migratetype(buddy, migratetype);
860 }
861
76741e77
VB
862 combined_pfn = buddy_pfn & pfn;
863 page = page + (combined_pfn - pfn);
864 pfn = combined_pfn;
1da177e4
LT
865 order++;
866 }
d9dddbf5
VB
867
868done_merging:
ab130f91 869 set_buddy_order(page, order);
6dda9d55 870
47b6a24a
DH
871 if (fpi_flags & FPI_TO_TAIL)
872 to_tail = true;
873 else if (is_shuffle_order(order))
a2129f24 874 to_tail = shuffle_pick_tail();
97500a4a 875 else
a2129f24 876 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
97500a4a 877
e0932b6c 878 __add_to_free_list(page, zone, order, migratetype, to_tail);
36e66c55
AD
879
880 /* Notify page reporting subsystem of freed page */
f04a5d5d 881 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
36e66c55 882 page_reporting_notify_free(order);
1da177e4
LT
883}
884
7bfec6f4
MG
885/*
886 * A bad page could be due to a number of fields. Instead of multiple branches,
887 * try and check multiple fields with one check. The caller must do a detailed
888 * check if necessary.
889 */
890static inline bool page_expected_state(struct page *page,
891 unsigned long check_flags)
892{
893 if (unlikely(atomic_read(&page->_mapcount) != -1))
894 return false;
895
896 if (unlikely((unsigned long)page->mapping |
897 page_ref_count(page) |
898#ifdef CONFIG_MEMCG
48060834 899 page->memcg_data |
dba1b8a7 900#endif
cd3c9316 901 page_pool_page_is_pp(page) |
7bfec6f4
MG
902 (page->flags & check_flags)))
903 return false;
904
905 return true;
906}
907
58b7f119 908static const char *page_bad_reason(struct page *page, unsigned long flags)
1da177e4 909{
82a3241a 910 const char *bad_reason = NULL;
f0b791a3 911
53f9263b 912 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
913 bad_reason = "nonzero mapcount";
914 if (unlikely(page->mapping != NULL))
915 bad_reason = "non-NULL mapping";
fe896d18 916 if (unlikely(page_ref_count(page) != 0))
0139aa7b 917 bad_reason = "nonzero _refcount";
58b7f119
WY
918 if (unlikely(page->flags & flags)) {
919 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
920 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
921 else
922 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
f0b791a3 923 }
9edad6ea 924#ifdef CONFIG_MEMCG
48060834 925 if (unlikely(page->memcg_data))
9edad6ea 926 bad_reason = "page still charged to cgroup";
dba1b8a7 927#endif
cd3c9316 928 if (unlikely(page_pool_page_is_pp(page)))
dba1b8a7 929 bad_reason = "page_pool leak";
58b7f119
WY
930 return bad_reason;
931}
932
a8368cd8 933static inline bool free_page_is_bad(struct page *page)
bb552ac6 934{
da838d4f 935 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
a8368cd8 936 return false;
bb552ac6
MG
937
938 /* Something has gone sideways, find it */
3a531a99 939 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
a8368cd8 940 return true;
1da177e4
LT
941}
942
ecbb490d
KW
943static inline bool is_check_pages_enabled(void)
944{
945 return static_branch_unlikely(&check_pages_enabled);
946}
947
8666925c 948static int free_tail_page_prepare(struct page *head_page, struct page *page)
4db7548c 949{
94688e8e 950 struct folio *folio = (struct folio *)head_page;
4db7548c
MG
951 int ret = 1;
952
953 /*
954 * We rely page->lru.next never has bit 0 set, unless the page
955 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
956 */
957 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
958
ecbb490d 959 if (!is_check_pages_enabled()) {
4db7548c
MG
960 ret = 0;
961 goto out;
962 }
963 switch (page - head_page) {
964 case 1:
cb67f428 965 /* the first tail page: these may be in place of ->mapping */
05c5323b
DH
966 if (unlikely(folio_large_mapcount(folio))) {
967 bad_page(page, "nonzero large_mapcount");
968 goto out;
969 }
74949222
DH
970 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) &&
971 unlikely(atomic_read(&folio->_nr_pages_mapped))) {
65a689f3 972 bad_page(page, "nonzero nr_pages_mapped");
cb67f428
HD
973 goto out;
974 }
6af8cb80
DH
975 if (IS_ENABLED(CONFIG_MM_ID)) {
976 if (unlikely(folio->_mm_id_mapcount[0] != -1)) {
977 bad_page(page, "nonzero mm mapcount 0");
978 goto out;
979 }
980 if (unlikely(folio->_mm_id_mapcount[1] != -1)) {
981 bad_page(page, "nonzero mm mapcount 1");
982 goto out;
983 }
984 }
31a31da8 985 if (IS_ENABLED(CONFIG_64BIT)) {
845d2be6
DH
986 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
987 bad_page(page, "nonzero entire_mapcount");
988 goto out;
989 }
31a31da8
DH
990 if (unlikely(atomic_read(&folio->_pincount))) {
991 bad_page(page, "nonzero pincount");
992 goto out;
993 }
cb67f428 994 }
4db7548c
MG
995 break;
996 case 2:
b7b098cf 997 /* the second tail page: deferred_list overlaps ->mapping */
e66f3185
HD
998 if (unlikely(!list_empty(&folio->_deferred_list))) {
999 bad_page(page, "on deferred list");
b7b098cf
MWO
1000 goto out;
1001 }
31a31da8 1002 if (!IS_ENABLED(CONFIG_64BIT)) {
845d2be6
DH
1003 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
1004 bad_page(page, "nonzero entire_mapcount");
1005 goto out;
1006 }
31a31da8
DH
1007 if (unlikely(atomic_read(&folio->_pincount))) {
1008 bad_page(page, "nonzero pincount");
1009 goto out;
1010 }
1011 }
4db7548c 1012 break;
4eeec8c8
DH
1013 case 3:
1014 /* the third tail page: hugetlb specifics overlap ->mappings */
1015 if (IS_ENABLED(CONFIG_HUGETLB_PAGE))
1016 break;
1017 fallthrough;
4db7548c
MG
1018 default:
1019 if (page->mapping != TAIL_MAPPING) {
82a3241a 1020 bad_page(page, "corrupted mapping in tail page");
4db7548c
MG
1021 goto out;
1022 }
1023 break;
1024 }
1025 if (unlikely(!PageTail(page))) {
82a3241a 1026 bad_page(page, "PageTail not set");
4db7548c
MG
1027 goto out;
1028 }
1029 if (unlikely(compound_head(page) != head_page)) {
82a3241a 1030 bad_page(page, "compound_head not consistent");
4db7548c
MG
1031 goto out;
1032 }
1033 ret = 0;
1034out:
1035 page->mapping = NULL;
1036 clear_compound_head(page);
1037 return ret;
1038}
1039
94ae8b83
AK
1040/*
1041 * Skip KASAN memory poisoning when either:
1042 *
0a54864f
PC
1043 * 1. For generic KASAN: deferred memory initialization has not yet completed.
1044 * Tag-based KASAN modes skip pages freed via deferred memory initialization
1045 * using page tags instead (see below).
1046 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1047 * that error detection is disabled for accesses via the page address.
1048 *
1049 * Pages will have match-all tags in the following circumstances:
1050 *
1051 * 1. Pages are being initialized for the first time, including during deferred
1052 * memory init; see the call to page_kasan_tag_reset in __init_single_page.
1053 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the
1054 * exception of pages unpoisoned by kasan_unpoison_vmalloc.
1055 * 3. The allocation was excluded from being checked due to sampling,
44383cef 1056 * see the call to kasan_unpoison_pages.
94ae8b83
AK
1057 *
1058 * Poisoning pages during deferred memory init will greatly lengthen the
1059 * process and cause problem in large memory systems as the deferred pages
1060 * initialization is done with interrupt disabled.
1061 *
1062 * Assuming that there will be no reference to those newly initialized
1063 * pages before they are ever allocated, this should have no effect on
1064 * KASAN memory tracking as the poison will be properly inserted at page
1065 * allocation time. The only corner case is when pages are allocated by
1066 * on-demand allocation and then freed again before the deferred pages
1067 * initialization is done, but this is not likely to happen.
1068 */
5267fe5d 1069static inline bool should_skip_kasan_poison(struct page *page)
94ae8b83 1070{
0a54864f
PC
1071 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1072 return deferred_pages_enabled();
1073
5cb6674b 1074 return page_kasan_tag(page) == KASAN_TAG_KERNEL;
94ae8b83
AK
1075}
1076
384a746b 1077static void kernel_init_pages(struct page *page, int numpages)
6471384a
AP
1078{
1079 int i;
1080
9e15afa5
QC
1081 /* s390's use of memset() could override KASAN redzones. */
1082 kasan_disable_current();
d9da8f6c
AK
1083 for (i = 0; i < numpages; i++)
1084 clear_highpage_kasan_tagged(page + i);
9e15afa5 1085 kasan_enable_current();
6471384a
AP
1086}
1087
93d5440e
SB
1088#ifdef CONFIG_MEM_ALLOC_PROFILING
1089
1090/* Should be called only if mem_alloc_profiling_enabled() */
1091void __clear_page_tag_ref(struct page *page)
1092{
1093 union pgtag_ref_handle handle;
1094 union codetag_ref ref;
1095
1096 if (get_page_tag_ref(page, &ref, &handle)) {
1097 set_codetag_empty(&ref);
1098 update_page_tag_ref(handle, &ref);
1099 put_page_tag_ref(handle);
1100 }
1101}
1102
1103/* Should be called only if mem_alloc_profiling_enabled() */
1104static noinline
1105void __pgalloc_tag_add(struct page *page, struct task_struct *task,
1106 unsigned int nr)
1107{
1108 union pgtag_ref_handle handle;
1109 union codetag_ref ref;
1110
1111 if (get_page_tag_ref(page, &ref, &handle)) {
1112 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
1113 update_page_tag_ref(handle, &ref);
1114 put_page_tag_ref(handle);
1115 }
1116}
1117
1118static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1119 unsigned int nr)
1120{
1121 if (mem_alloc_profiling_enabled())
1122 __pgalloc_tag_add(page, task, nr);
1123}
1124
1125/* Should be called only if mem_alloc_profiling_enabled() */
1126static noinline
1127void __pgalloc_tag_sub(struct page *page, unsigned int nr)
1128{
1129 union pgtag_ref_handle handle;
1130 union codetag_ref ref;
1131
1132 if (get_page_tag_ref(page, &ref, &handle)) {
1133 alloc_tag_sub(&ref, PAGE_SIZE * nr);
1134 update_page_tag_ref(handle, &ref);
1135 put_page_tag_ref(handle);
1136 }
1137}
1138
1139static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
1140{
1141 if (mem_alloc_profiling_enabled())
1142 __pgalloc_tag_sub(page, nr);
1143}
1144
0ae0227f
DW
1145/* When tag is not NULL, assuming mem_alloc_profiling_enabled */
1146static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
93d5440e 1147{
93d5440e
SB
1148 if (tag)
1149 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
1150}
1151
1152#else /* CONFIG_MEM_ALLOC_PROFILING */
1153
1154static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1155 unsigned int nr) {}
1156static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
0ae0227f 1157static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
93d5440e
SB
1158
1159#endif /* CONFIG_MEM_ALLOC_PROFILING */
1160
733aea0b 1161__always_inline bool free_pages_prepare(struct page *page,
5267fe5d 1162 unsigned int order)
4db7548c 1163{
e2769dbd 1164 int bad = 0;
5267fe5d 1165 bool skip_kasan_poison = should_skip_kasan_poison(page);
c3525330 1166 bool init = want_init_on_free();
76f26535 1167 bool compound = PageCompound(page);
66edc3a5 1168 struct folio *folio = page_folio(page);
4db7548c 1169
4db7548c
MG
1170 VM_BUG_ON_PAGE(PageTail(page), page);
1171
e2769dbd 1172 trace_mm_page_free(page, order);
b073d7f8 1173 kmsan_free_page(page, order);
e2769dbd 1174
17b46e7b
BJ
1175 if (memcg_kmem_online() && PageMemcgKmem(page))
1176 __memcg_kmem_uncharge_page(page, order);
1177
66edc3a5
RG
1178 /*
1179 * In rare cases, when truncation or holepunching raced with
1180 * munlock after VM_LOCKED was cleared, Mlocked may still be
1181 * found set here. This does not indicate a problem, unless
1182 * "unevictable_pgs_cleared" appears worryingly large.
1183 */
1184 if (unlikely(folio_test_mlocked(folio))) {
1185 long nr_pages = folio_nr_pages(folio);
1186
1187 __folio_clear_mlocked(folio);
1188 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
1189 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
1190 }
1191
79f5f8fa 1192 if (unlikely(PageHWPoison(page)) && !order) {
17b46e7b 1193 /* Do not let hwpoison pages hit pcplists/buddy */
79f5f8fa 1194 reset_page_owner(page, order);
df4e817b 1195 page_table_check_free(page, order);
dcfe378c 1196 pgalloc_tag_sub(page, 1 << order);
5e9784e9
HG
1197
1198 /*
1199 * The page is isolated and accounted for.
1200 * Mark the codetag as empty to avoid accounting error
1201 * when the page is freed by unpoison_memory().
1202 */
1203 clear_page_tag_ref(page);
79f5f8fa
OS
1204 return false;
1205 }
1206
76f26535
HY
1207 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1208
e2769dbd
MG
1209 /*
1210 * Check tail pages before head page information is cleared to
1211 * avoid checking PageCompound for order-0 pages.
1212 */
1213 if (unlikely(order)) {
e2769dbd
MG
1214 int i;
1215
4996fc54 1216 if (compound) {
9c5ccf2d 1217 page[1].flags &= ~PAGE_FLAGS_SECOND;
4996fc54
DH
1218#ifdef NR_PAGES_IN_LARGE_FOLIO
1219 folio->_nr_pages = 0;
1220#endif
1221 }
e2769dbd
MG
1222 for (i = 1; i < (1 << order); i++) {
1223 if (compound)
8666925c 1224 bad += free_tail_page_prepare(page, page + i);
fce0b421 1225 if (is_check_pages_enabled()) {
8666925c 1226 if (free_page_is_bad(page + i)) {
700d2e9a
VB
1227 bad++;
1228 continue;
1229 }
e2769dbd
MG
1230 }
1231 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1232 }
1233 }
5d65c8d7
BS
1234 if (PageMappingFlags(page)) {
1235 if (PageAnon(page))
1236 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
4db7548c 1237 page->mapping = NULL;
5d65c8d7 1238 }
fce0b421 1239 if (is_check_pages_enabled()) {
700d2e9a
VB
1240 if (free_page_is_bad(page))
1241 bad++;
1242 if (bad)
1243 return false;
1244 }
4db7548c 1245
e2769dbd
MG
1246 page_cpupid_reset_last(page);
1247 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1248 reset_page_owner(page, order);
df4e817b 1249 page_table_check_free(page, order);
dcfe378c 1250 pgalloc_tag_sub(page, 1 << order);
4db7548c
MG
1251
1252 if (!PageHighMem(page)) {
1253 debug_check_no_locks_freed(page_address(page),
e2769dbd 1254 PAGE_SIZE << order);
4db7548c 1255 debug_check_no_obj_freed(page_address(page),
e2769dbd 1256 PAGE_SIZE << order);
4db7548c 1257 }
6471384a 1258
8db26a3d
VB
1259 kernel_poison_pages(page, 1 << order);
1260
f9d79e8d 1261 /*
1bb5eab3 1262 * As memory initialization might be integrated into KASAN,
7c13c163 1263 * KASAN poisoning and memory initialization code must be
1bb5eab3
AK
1264 * kept together to avoid discrepancies in behavior.
1265 *
f9d79e8d
AK
1266 * With hardware tag-based KASAN, memory tags must be set before the
1267 * page becomes unavailable via debug_pagealloc or arch_free_page.
1268 */
f446883d 1269 if (!skip_kasan_poison) {
c3525330 1270 kasan_poison_pages(page, order, init);
f9d79e8d 1271
db8a0477
AK
1272 /* Memory is already initialized if KASAN did it internally. */
1273 if (kasan_has_integrated_init())
1274 init = false;
1275 }
1276 if (init)
aeaec8e2 1277 kernel_init_pages(page, 1 << order);
db8a0477 1278
234fdce8
QC
1279 /*
1280 * arch_free_page() can make the page's contents inaccessible. s390
1281 * does this. So nothing which can access the page's contents should
1282 * happen after this.
1283 */
1284 arch_free_page(page, order);
1285
77bc7fd6 1286 debug_pagealloc_unmap_pages(page, 1 << order);
d6332692 1287
4db7548c
MG
1288 return true;
1289}
1290
1da177e4 1291/*
5f8dcc21 1292 * Frees a number of pages from the PCP lists
7cba630b 1293 * Assumes all pages on list are in same zone.
207f36ee 1294 * count is the number of pages to free.
1da177e4 1295 */
5f8dcc21 1296static void free_pcppages_bulk(struct zone *zone, int count,
fd56eef2
MG
1297 struct per_cpu_pages *pcp,
1298 int pindex)
1da177e4 1299{
57490774 1300 unsigned long flags;
44042b44 1301 unsigned int order;
8b10b465 1302 struct page *page;
f2260e6b 1303
88e8ac11
CTR
1304 /*
1305 * Ensure proper count is passed which otherwise would stuck in the
1306 * below while (list_empty(list)) loop.
1307 */
1308 count = min(pcp->count, count);
d61372bc
MG
1309
1310 /* Ensure requested pindex is drained first. */
1311 pindex = pindex - 1;
1312
57490774 1313 spin_lock_irqsave(&zone->lock, flags);
8b10b465 1314
44042b44 1315 while (count > 0) {
5f8dcc21 1316 struct list_head *list;
fd56eef2 1317 int nr_pages;
5f8dcc21 1318
fd56eef2 1319 /* Remove pages from lists in a round-robin fashion. */
5f8dcc21 1320 do {
f142b2c2
KS
1321 if (++pindex > NR_PCP_LISTS - 1)
1322 pindex = 0;
44042b44 1323 list = &pcp->lists[pindex];
f142b2c2 1324 } while (list_empty(list));
48db57f8 1325
44042b44 1326 order = pindex_to_order(pindex);
fd56eef2 1327 nr_pages = 1 << order;
a6f9edd6 1328 do {
17edeb5d 1329 unsigned long pfn;
8b10b465
MG
1330 int mt;
1331
bf75f200 1332 page = list_last_entry(list, struct page, pcp_list);
17edeb5d
JW
1333 pfn = page_to_pfn(page);
1334 mt = get_pfnblock_migratetype(page, pfn);
8b10b465 1335
0a5f4e5b 1336 /* must delete to avoid corrupting pcp list */
bf75f200 1337 list_del(&page->pcp_list);
fd56eef2
MG
1338 count -= nr_pages;
1339 pcp->count -= nr_pages;
aa016d14 1340
17edeb5d 1341 __free_one_page(page, pfn, zone, order, mt, FPI_NONE);
8b10b465
MG
1342 trace_mm_page_pcpu_drain(page, order, mt);
1343 } while (count > 0 && !list_empty(list));
0a5f4e5b 1344 }
8b10b465 1345
57490774 1346 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4
LT
1347}
1348
e98337d1
YZ
1349/* Split a multi-block free page into its individual pageblocks. */
1350static void split_large_buddy(struct zone *zone, struct page *page,
1351 unsigned long pfn, int order, fpi_t fpi)
1352{
1353 unsigned long end = pfn + (1 << order);
1354
1355 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order));
1356 /* Caller removed page from freelist, buddy info cleared! */
1357 VM_WARN_ON_ONCE(PageBuddy(page));
1358
1359 if (order > pageblock_order)
1360 order = pageblock_order;
1361
faeec8e2 1362 do {
e98337d1
YZ
1363 int mt = get_pfnblock_migratetype(page, pfn);
1364
1365 __free_one_page(page, pfn, zone, order, mt, fpi);
1366 pfn += 1 << order;
faeec8e2
DH
1367 if (pfn == end)
1368 break;
e98337d1 1369 page = pfn_to_page(pfn);
faeec8e2 1370 } while (1);
e98337d1
YZ
1371}
1372
8c57b687
AS
1373static void add_page_to_zone_llist(struct zone *zone, struct page *page,
1374 unsigned int order)
1375{
1376 /* Remember the order */
1377 page->order = order;
1378 /* Add the page to the free list */
1379 llist_add(&page->pcp_llist, &zone->trylock_free_pages);
1380}
1381
55612e80
JW
1382static void free_one_page(struct zone *zone, struct page *page,
1383 unsigned long pfn, unsigned int order,
1384 fpi_t fpi_flags)
1da177e4 1385{
8c57b687 1386 struct llist_head *llhead;
df1acc85
MG
1387 unsigned long flags;
1388
c5bb27e2
AS
1389 if (unlikely(fpi_flags & FPI_TRYLOCK)) {
1390 if (!spin_trylock_irqsave(&zone->lock, flags)) {
8c57b687
AS
1391 add_page_to_zone_llist(zone, page, order);
1392 return;
1393 }
c5bb27e2 1394 } else {
8c57b687
AS
1395 spin_lock_irqsave(&zone->lock, flags);
1396 }
1397
1398 /* The lock succeeded. Process deferred pages. */
1399 llhead = &zone->trylock_free_pages;
1400 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) {
1401 struct llist_node *llnode;
1402 struct page *p, *tmp;
1403
1404 llnode = llist_del_all(llhead);
1405 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) {
1406 unsigned int p_order = p->order;
1407
1408 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags);
1409 __count_vm_events(PGFREE, 1 << p_order);
1410 }
1411 }
e98337d1 1412 split_large_buddy(zone, page, pfn, order, fpi_flags);
df1acc85 1413 spin_unlock_irqrestore(&zone->lock, flags);
ec867977
YA
1414
1415 __count_vm_events(PGFREE, 1 << order);
48db57f8
NP
1416}
1417
7fef431b
DH
1418static void __free_pages_ok(struct page *page, unsigned int order,
1419 fpi_t fpi_flags)
ec95f53a 1420{
dc4b0caf 1421 unsigned long pfn = page_to_pfn(page);
56f0e661 1422 struct zone *zone = page_zone(page);
ec95f53a 1423
ec867977
YA
1424 if (free_pages_prepare(page, order))
1425 free_one_page(zone, page, pfn, order, fpi_flags);
1da177e4
LT
1426}
1427
f6953e22 1428void __meminit __free_pages_core(struct page *page, unsigned int order,
13c52654 1429 enum meminit_context context)
a226f6c8 1430{
c3993076 1431 unsigned int nr_pages = 1 << order;
e2d0bd2b 1432 struct page *p = page;
c3993076 1433 unsigned int loop;
a226f6c8 1434
7fef431b
DH
1435 /*
1436 * When initializing the memmap, __init_single_page() sets the refcount
1437 * of all pages to 1 ("allocated"/"not free"). We have to set the
1438 * refcount of all involved pages to 0.
503b158f
DH
1439 *
1440 * Note that hotplugged memory pages are initialized to PageOffline().
1441 * Pages freed from memblock might be marked as reserved.
7fef431b 1442 */
13c52654
DH
1443 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
1444 unlikely(context == MEMINIT_HOTPLUG)) {
689d92cc 1445 for (loop = 0; loop < nr_pages; loop++, p++) {
503b158f
DH
1446 VM_WARN_ON_ONCE(PageReserved(p));
1447 __ClearPageOffline(p);
1448 set_page_count(p, 0);
1449 }
503b158f 1450
13c52654
DH
1451 adjust_managed_page_count(page, nr_pages);
1452 } else {
689d92cc 1453 for (loop = 0; loop < nr_pages; loop++, p++) {
503b158f
DH
1454 __ClearPageReserved(p);
1455 set_page_count(p, 0);
1456 }
503b158f 1457
13c52654
DH
1458 /* memblock adjusts totalram_pages() manually. */
1459 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1460 }
7fef431b 1461
dcdfdd40 1462 if (page_contains_unaccepted(page, order)) {
5e0a760b 1463 if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
dcdfdd40
KS
1464 return;
1465
5adfeaec 1466 accept_memory(page_to_phys(page), PAGE_SIZE << order);
dcdfdd40
KS
1467 }
1468
7fef431b
DH
1469 /*
1470 * Bypass PCP and place fresh pages right to the tail, primarily
1471 * relevant for memory onlining.
1472 */
0a54864f 1473 __free_pages_ok(page, order, FPI_TO_TAIL);
a226f6c8
DH
1474}
1475
7cf91a98
JK
1476/*
1477 * Check that the whole (or subset of) a pageblock given by the interval of
1478 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
859a85dd 1479 * with the migration of free compaction scanner.
7cf91a98
JK
1480 *
1481 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1482 *
1483 * It's possible on some configurations to have a setup like node0 node1 node0
1484 * i.e. it's possible that all pages within a zones range of pages do not
1485 * belong to a single zone. We assume that a border between node0 and node1
1486 * can occur within a single pageblock, but not a node0 node1 node0
1487 * interleaving within a single pageblock. It is therefore sufficient to check
1488 * the first and last page of a pageblock and avoid checking each individual
1489 * page in a pageblock.
65f67a3e
BW
1490 *
1491 * Note: the function may return non-NULL struct page even for a page block
1492 * which contains a memory hole (i.e. there is no physical memory for a subset
5e0a760b 1493 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
65f67a3e
BW
1494 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1495 * even though the start pfn is online and valid. This should be safe most of
1496 * the time because struct pages are still initialized via init_unavailable_range()
1497 * and pfn walkers shouldn't touch any physical memory range for which they do
1498 * not recognize any specific metadata in struct pages.
7cf91a98
JK
1499 */
1500struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1501 unsigned long end_pfn, struct zone *zone)
1502{
1503 struct page *start_page;
1504 struct page *end_page;
1505
1506 /* end_pfn is one past the range we are checking */
1507 end_pfn--;
1508
3c4322c9 1509 if (!pfn_valid(end_pfn))
7cf91a98
JK
1510 return NULL;
1511
2d070eab
MH
1512 start_page = pfn_to_online_page(start_pfn);
1513 if (!start_page)
1514 return NULL;
7cf91a98
JK
1515
1516 if (page_zone(start_page) != zone)
1517 return NULL;
1518
1519 end_page = pfn_to_page(end_pfn);
1520
1521 /* This gives a shorter code than deriving page_zone(end_page) */
1522 if (page_zone_id(start_page) != page_zone_id(end_page))
1523 return NULL;
1524
1525 return start_page;
1526}
1527
2f47a91f 1528/*
9420f89d
MRI
1529 * The order of subdivision here is critical for the IO subsystem.
1530 * Please do not alter this order without good reasons and regression
1531 * testing. Specifically, as large blocks of memory are subdivided,
1532 * the order in which smaller blocks are delivered depends on the order
1533 * they're subdivided in this function. This is the primary factor
1534 * influencing the order in which pages are delivered to the IO
1535 * subsystem according to empirical testing, and this is also justified
1536 * by considering the behavior of a buddy system containing a single
1537 * large block of memory acted on by a series of small allocations.
1538 * This behavior is a critical factor in sglist merging's success.
80b1f41c 1539 *
9420f89d 1540 * -- nyc
2f47a91f 1541 */
94deaf69
HY
1542static inline unsigned int expand(struct zone *zone, struct page *page, int low,
1543 int high, int migratetype)
2f47a91f 1544{
94deaf69
HY
1545 unsigned int size = 1 << high;
1546 unsigned int nr_added = 0;
2f47a91f 1547
9420f89d
MRI
1548 while (high > low) {
1549 high--;
1550 size >>= 1;
1551 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2f47a91f 1552
9420f89d
MRI
1553 /*
1554 * Mark as guard pages (or page), that will allow to
1555 * merge back to allocator when buddy will be freed.
1556 * Corresponding page table entries will not be touched,
1557 * pages will stay not present in virtual address space
1558 */
e0932b6c 1559 if (set_page_guard(zone, &page[size], high))
2f47a91f 1560 continue;
9420f89d 1561
883dd161 1562 __add_to_free_list(&page[size], zone, high, migratetype, false);
9420f89d 1563 set_buddy_order(&page[size], high);
883dd161 1564 nr_added += size;
2f47a91f 1565 }
94deaf69
HY
1566
1567 return nr_added;
1568}
1569
1570static __always_inline void page_del_and_expand(struct zone *zone,
1571 struct page *page, int low,
1572 int high, int migratetype)
1573{
1574 int nr_pages = 1 << high;
1575
1576 __del_page_from_free_list(page, zone, high, migratetype);
1577 nr_pages -= expand(zone, page, low, high, migratetype);
1578 account_freepages(zone, -nr_pages, migratetype);
2f47a91f
PT
1579}
1580
9420f89d 1581static void check_new_page_bad(struct page *page)
0e56acae 1582{
bd145bdd 1583 if (unlikely(PageHWPoison(page))) {
9420f89d 1584 /* Don't complain about hwpoisoned pages */
e4d970ac
DH
1585 if (PageBuddy(page))
1586 __ClearPageBuddy(page);
9420f89d 1587 return;
0e56acae
AD
1588 }
1589
9420f89d
MRI
1590 bad_page(page,
1591 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
0e56acae
AD
1592}
1593
1594/*
9420f89d 1595 * This page is about to be returned from the page allocator
0e56acae 1596 */
77c7a095 1597static bool check_new_page(struct page *page)
0e56acae 1598{
9420f89d
MRI
1599 if (likely(page_expected_state(page,
1600 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
77c7a095 1601 return false;
0e56acae 1602
9420f89d 1603 check_new_page_bad(page);
77c7a095 1604 return true;
9420f89d 1605}
0e56acae 1606
9420f89d
MRI
1607static inline bool check_new_pages(struct page *page, unsigned int order)
1608{
1609 if (is_check_pages_enabled()) {
1610 for (int i = 0; i < (1 << order); i++) {
1611 struct page *p = page + i;
0e56acae 1612
8666925c 1613 if (check_new_page(p))
9420f89d 1614 return true;
0e56acae
AD
1615 }
1616 }
1617
9420f89d 1618 return false;
0e56acae
AD
1619}
1620
9420f89d 1621static inline bool should_skip_kasan_unpoison(gfp_t flags)
e4443149 1622{
9420f89d
MRI
1623 /* Don't skip if a software KASAN mode is enabled. */
1624 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
1625 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1626 return false;
e4443149 1627
9420f89d
MRI
1628 /* Skip, if hardware tag-based KASAN is not enabled. */
1629 if (!kasan_hw_tags_enabled())
1630 return true;
e4443149
DJ
1631
1632 /*
9420f89d
MRI
1633 * With hardware tag-based KASAN enabled, skip if this has been
1634 * requested via __GFP_SKIP_KASAN.
e4443149 1635 */
9420f89d 1636 return flags & __GFP_SKIP_KASAN;
e4443149
DJ
1637}
1638
9420f89d 1639static inline bool should_skip_init(gfp_t flags)
ecd09650 1640{
9420f89d
MRI
1641 /* Don't skip, if hardware tag-based KASAN is not enabled. */
1642 if (!kasan_hw_tags_enabled())
1643 return false;
1644
1645 /* For hardware tag-based KASAN, skip if requested. */
1646 return (flags & __GFP_SKIP_ZERO);
ecd09650
DJ
1647}
1648
9420f89d
MRI
1649inline void post_alloc_hook(struct page *page, unsigned int order,
1650 gfp_t gfp_flags)
7e18adb4 1651{
9420f89d
MRI
1652 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
1653 !should_skip_init(gfp_flags);
1654 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
1655 int i;
1656
1657 set_page_private(page, 0);
0e1cc95b 1658
9420f89d
MRI
1659 arch_alloc_page(page, order);
1660 debug_pagealloc_map_pages(page, 1 << order);
7e18adb4 1661
3d060856 1662 /*
9420f89d
MRI
1663 * Page unpoisoning must happen before memory initialization.
1664 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
1665 * allocations and the page unpoisoning code will complain.
3d060856 1666 */
9420f89d 1667 kernel_unpoison_pages(page, 1 << order);
862b6dee 1668
1bb5eab3
AK
1669 /*
1670 * As memory initialization might be integrated into KASAN,
b42090ae 1671 * KASAN unpoisoning and memory initializion code must be
1bb5eab3
AK
1672 * kept together to avoid discrepancies in behavior.
1673 */
9294b128
AK
1674
1675 /*
44383cef
AK
1676 * If memory tags should be zeroed
1677 * (which happens only when memory should be initialized as well).
9294b128 1678 */
44383cef 1679 if (zero_tags) {
420ef683 1680 /* Initialize both memory and memory tags. */
9294b128
AK
1681 for (i = 0; i != 1 << order; ++i)
1682 tag_clear_highpage(page + i);
1683
44383cef 1684 /* Take note that memory was initialized by the loop above. */
9294b128
AK
1685 init = false;
1686 }
0a54864f
PC
1687 if (!should_skip_kasan_unpoison(gfp_flags) &&
1688 kasan_unpoison_pages(page, order, init)) {
1689 /* Take note that memory was initialized by KASAN. */
1690 if (kasan_has_integrated_init())
1691 init = false;
1692 } else {
1693 /*
1694 * If memory tags have not been set by KASAN, reset the page
1695 * tags to ensure page_address() dereferencing does not fault.
1696 */
70c248ac
CM
1697 for (i = 0; i != 1 << order; ++i)
1698 page_kasan_tag_reset(page + i);
7a3b8353 1699 }
44383cef 1700 /* If memory is still not initialized, initialize it now. */
7e3cbba6 1701 if (init)
aeaec8e2 1702 kernel_init_pages(page, 1 << order);
1bb5eab3
AK
1703
1704 set_page_owner(page, order, gfp_flags);
df4e817b 1705 page_table_check_alloc(page, order);
dcfe378c 1706 pgalloc_tag_add(page, current, 1 << order);
46f24fd8
JK
1707}
1708
479f854a 1709static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
c603844b 1710 unsigned int alloc_flags)
2a7684a2 1711{
46f24fd8 1712 post_alloc_hook(page, order, gfp_flags);
17cf4406 1713
17cf4406
NP
1714 if (order && (gfp_flags & __GFP_COMP))
1715 prep_compound_page(page, order);
1716
75379191 1717 /*
2f064f34 1718 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
75379191
VB
1719 * allocate the page. The expectation is that the caller is taking
1720 * steps that will free more memory. The caller should avoid the page
1721 * being used for !PFMEMALLOC purposes.
1722 */
2f064f34
MH
1723 if (alloc_flags & ALLOC_NO_WATERMARKS)
1724 set_page_pfmemalloc(page);
1725 else
1726 clear_page_pfmemalloc(page);
1da177e4
LT
1727}
1728
56fd56b8
MG
1729/*
1730 * Go through the free lists for the given migratetype and remove
1731 * the smallest available page from the freelists
1732 */
85ccc8fa 1733static __always_inline
728ec980 1734struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
1735 int migratetype)
1736{
1737 unsigned int current_order;
b8af2941 1738 struct free_area *area;
56fd56b8
MG
1739 struct page *page;
1740
1741 /* Find a page of the appropriate size in the preferred list */
fd377218 1742 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
56fd56b8 1743 area = &(zone->free_area[current_order]);
b03641af 1744 page = get_page_from_free_area(area, migratetype);
a16601c5
GT
1745 if (!page)
1746 continue;
94deaf69
HY
1747
1748 page_del_and_expand(zone, page, order, current_order,
1749 migratetype);
10e0f753
WY
1750 trace_mm_page_alloc_zone_locked(page, order, migratetype,
1751 pcp_allowed_order(order) &&
1752 migratetype < MIGRATE_PCPTYPES);
56fd56b8
MG
1753 return page;
1754 }
1755
1756 return NULL;
1757}
1758
1759
b2a0ac88
MG
1760/*
1761 * This array describes the order lists are fallen back to when
1762 * the free lists for the desirable migrate type are depleted
1dd214b8
ZY
1763 *
1764 * The other migratetypes do not have fallbacks.
b2a0ac88 1765 */
0aac4566 1766static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = {
aa02d3c1
YD
1767 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
1768 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
1769 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
b2a0ac88
MG
1770};
1771
dc67647b 1772#ifdef CONFIG_CMA
85ccc8fa 1773static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
dc67647b
JK
1774 unsigned int order)
1775{
1776 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1777}
1778#else
1779static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1780 unsigned int order) { return NULL; }
1781#endif
1782
c361be55 1783/*
f37c0f68
ZY
1784 * Change the type of a block and move all its free pages to that
1785 * type's freelist.
c361be55 1786 */
e1f42a57
VB
1787static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
1788 int old_mt, int new_mt)
c361be55
MG
1789{
1790 struct page *page;
e1f42a57 1791 unsigned long pfn, end_pfn;
d00181b9 1792 unsigned int order;
d100313f 1793 int pages_moved = 0;
c361be55 1794
f37c0f68 1795 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
e1f42a57 1796 end_pfn = pageblock_end_pfn(start_pfn);
f37c0f68 1797
e1f42a57 1798 for (pfn = start_pfn; pfn < end_pfn;) {
39ddb991 1799 page = pfn_to_page(pfn);
c361be55 1800 if (!PageBuddy(page)) {
39ddb991 1801 pfn++;
c361be55
MG
1802 continue;
1803 }
1804
cd961038
DR
1805 /* Make sure we are not inadvertently changing nodes */
1806 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1807 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1808
ab130f91 1809 order = buddy_order(page);
e0932b6c
JW
1810
1811 move_to_free_list(page, zone, order, old_mt, new_mt);
1812
39ddb991 1813 pfn += 1 << order;
d100313f 1814 pages_moved += 1 << order;
c361be55
MG
1815 }
1816
e0932b6c 1817 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
f37c0f68 1818
d100313f 1819 return pages_moved;
c361be55
MG
1820}
1821
c0cd6f55
JW
1822static bool prep_move_freepages_block(struct zone *zone, struct page *page,
1823 unsigned long *start_pfn,
c0cd6f55 1824 int *num_free, int *num_movable)
c361be55 1825{
c0cd6f55 1826 unsigned long pfn, start, end;
4a222127 1827
39ddb991 1828 pfn = page_to_pfn(page);
c0cd6f55 1829 start = pageblock_start_pfn(pfn);
e1f42a57 1830 end = pageblock_end_pfn(pfn);
c361be55 1831
2dd482ba
JW
1832 /*
1833 * The caller only has the lock for @zone, don't touch ranges
1834 * that straddle into other zones. While we could move part of
1835 * the range that's inside the zone, this call is usually
1836 * accompanied by other operations such as migratetype updates
1837 * which also should be locked.
1838 */
c0cd6f55
JW
1839 if (!zone_spans_pfn(zone, start))
1840 return false;
e1f42a57 1841 if (!zone_spans_pfn(zone, end - 1))
c0cd6f55
JW
1842 return false;
1843
1844 *start_pfn = start;
c0cd6f55
JW
1845
1846 if (num_free) {
1847 *num_free = 0;
1848 *num_movable = 0;
e1f42a57 1849 for (pfn = start; pfn < end;) {
c0cd6f55
JW
1850 page = pfn_to_page(pfn);
1851 if (PageBuddy(page)) {
1852 int nr = 1 << buddy_order(page);
1853
1854 *num_free += nr;
1855 pfn += nr;
1856 continue;
1857 }
1858 /*
1859 * We assume that pages that could be isolated for
1860 * migration are movable. But we don't actually try
1861 * isolating, as that would be expensive.
1862 */
1863 if (PageLRU(page) || __PageMovable(page))
1864 (*num_movable)++;
1865 pfn++;
1866 }
1867 }
c361be55 1868
c0cd6f55
JW
1869 return true;
1870}
1871
fd919a85 1872static int move_freepages_block(struct zone *zone, struct page *page,
e0932b6c 1873 int old_mt, int new_mt)
c0cd6f55 1874{
e1f42a57 1875 unsigned long start_pfn;
c0cd6f55 1876
e1f42a57 1877 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
c0cd6f55
JW
1878 return -1;
1879
e1f42a57 1880 return __move_freepages_block(zone, start_pfn, old_mt, new_mt);
c361be55
MG
1881}
1882
fd919a85
JW
1883#ifdef CONFIG_MEMORY_ISOLATION
1884/* Look for a buddy that straddles start_pfn */
1885static unsigned long find_large_buddy(unsigned long start_pfn)
1886{
1887 int order = 0;
1888 struct page *page;
1889 unsigned long pfn = start_pfn;
1890
1891 while (!PageBuddy(page = pfn_to_page(pfn))) {
1892 /* Nothing found */
1893 if (++order > MAX_PAGE_ORDER)
1894 return start_pfn;
1895 pfn &= ~0UL << order;
1896 }
1897
1898 /*
1899 * Found a preceding buddy, but does it straddle?
1900 */
1901 if (pfn + (1 << buddy_order(page)) > start_pfn)
1902 return pfn;
1903
1904 /* Nothing found */
1905 return start_pfn;
1906}
1907
fd919a85
JW
1908/**
1909 * move_freepages_block_isolate - move free pages in block for page isolation
1910 * @zone: the zone
1911 * @page: the pageblock page
1912 * @migratetype: migratetype to set on the pageblock
1913 *
1914 * This is similar to move_freepages_block(), but handles the special
1915 * case encountered in page isolation, where the block of interest
1916 * might be part of a larger buddy spanning multiple pageblocks.
1917 *
1918 * Unlike the regular page allocator path, which moves pages while
1919 * stealing buddies off the freelist, page isolation is interested in
1920 * arbitrary pfn ranges that may have overlapping buddies on both ends.
1921 *
1922 * This function handles that. Straddling buddies are split into
1923 * individual pageblocks. Only the block of interest is moved.
1924 *
1925 * Returns %true if pages could be moved, %false otherwise.
1926 */
1927bool move_freepages_block_isolate(struct zone *zone, struct page *page,
1928 int migratetype)
1929{
e1f42a57 1930 unsigned long start_pfn, pfn;
fd919a85 1931
e1f42a57 1932 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
fd919a85
JW
1933 return false;
1934
1935 /* No splits needed if buddies can't span multiple blocks */
1936 if (pageblock_order == MAX_PAGE_ORDER)
1937 goto move;
1938
1939 /* We're a tail block in a larger buddy */
1940 pfn = find_large_buddy(start_pfn);
1941 if (pfn != start_pfn) {
1942 struct page *buddy = pfn_to_page(pfn);
1943 int order = buddy_order(buddy);
fd919a85 1944
e0932b6c
JW
1945 del_page_from_free_list(buddy, zone, order,
1946 get_pfnblock_migratetype(buddy, pfn));
fd919a85 1947 set_pageblock_migratetype(page, migratetype);
e98337d1 1948 split_large_buddy(zone, buddy, pfn, order, FPI_NONE);
fd919a85
JW
1949 return true;
1950 }
1951
1952 /* We're the starting block of a larger buddy */
1953 if (PageBuddy(page) && buddy_order(page) > pageblock_order) {
fd919a85
JW
1954 int order = buddy_order(page);
1955
e0932b6c
JW
1956 del_page_from_free_list(page, zone, order,
1957 get_pfnblock_migratetype(page, pfn));
fd919a85 1958 set_pageblock_migratetype(page, migratetype);
e98337d1 1959 split_large_buddy(zone, page, pfn, order, FPI_NONE);
fd919a85
JW
1960 return true;
1961 }
1962move:
e1f42a57
VB
1963 __move_freepages_block(zone, start_pfn,
1964 get_pfnblock_migratetype(page, start_pfn),
1965 migratetype);
fd919a85
JW
1966 return true;
1967}
1968#endif /* CONFIG_MEMORY_ISOLATION */
1969
2f66a68f
MG
1970static void change_pageblock_range(struct page *pageblock_page,
1971 int start_order, int migratetype)
1972{
1973 int nr_pageblocks = 1 << (start_order - pageblock_order);
1974
1975 while (nr_pageblocks--) {
1976 set_pageblock_migratetype(pageblock_page, migratetype);
1977 pageblock_page += pageblock_nr_pages;
1978 }
1979}
1980
597c8920 1981static inline bool boost_watermark(struct zone *zone)
1c30844d
MG
1982{
1983 unsigned long max_boost;
1984
1985 if (!watermark_boost_factor)
597c8920 1986 return false;
14f69140
HW
1987 /*
1988 * Don't bother in zones that are unlikely to produce results.
1989 * On small machines, including kdump capture kernels running
1990 * in a small area, boosting the watermark can cause an out of
1991 * memory situation immediately.
1992 */
1993 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
597c8920 1994 return false;
1c30844d
MG
1995
1996 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
1997 watermark_boost_factor, 10000);
94b3334c
MG
1998
1999 /*
2000 * high watermark may be uninitialised if fragmentation occurs
2001 * very early in boot so do not boost. We do not fall
2002 * through and boost by pageblock_nr_pages as failing
2003 * allocations that early means that reclaim is not going
2004 * to help and it may even be impossible to reclaim the
2005 * boosted watermark resulting in a hang.
2006 */
2007 if (!max_boost)
597c8920 2008 return false;
94b3334c 2009
1c30844d
MG
2010 max_boost = max(pageblock_nr_pages, max_boost);
2011
2012 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2013 max_boost);
597c8920
JW
2014
2015 return true;
1c30844d
MG
2016}
2017
4eb7dce6 2018/*
a14efee0
BJ
2019 * When we are falling back to another migratetype during allocation, should we
2020 * try to claim an entire block to satisfy further allocations, instead of
2021 * polluting multiple pageblocks?
4eb7dce6 2022 */
e47f1f56 2023static bool should_try_claim_block(unsigned int order, int start_mt)
fef903ef 2024{
4eb7dce6
JK
2025 /*
2026 * Leaving this order check is intended, although there is
2027 * relaxed order check in next check. The reason is that
e47f1f56 2028 * we can actually claim the whole pageblock if this condition met,
4eb7dce6
JK
2029 * but, below check doesn't guarantee it and that is just heuristic
2030 * so could be changed anytime.
2031 */
2032 if (order >= pageblock_order)
2033 return true;
02aa0cdd 2034
6025ea5a 2035 /*
a14efee0
BJ
2036 * Above a certain threshold, always try to claim, as it's likely there
2037 * will be more free pages in the pageblock.
2038 */
2039 if (order >= pageblock_order / 2)
2040 return true;
fef903ef 2041
3bc48f96 2042 /*
a14efee0
BJ
2043 * Unmovable/reclaimable allocations would cause permanent
2044 * fragmentations if they fell back to allocating from a movable block
2045 * (polluting it), so we try to claim the whole block regardless of the
2046 * allocation size. Later movable allocations can always steal from this
2047 * block, which is less problematic.
3bc48f96 2048 */
a14efee0 2049 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE)
4eb7dce6
JK
2050 return true;
2051
a14efee0
BJ
2052 if (page_group_by_mobility_disabled)
2053 return true;
2054
2055 /*
2056 * Movable pages won't cause permanent fragmentation, so when you alloc
2057 * small pages, we just need to temporarily steal unmovable or
2058 * reclaimable pages that are closest to the request size. After a
2059 * while, memory compaction may occur to form large contiguous pages,
2060 * and the next movable allocation may not need to steal.
2061 */
4eb7dce6
JK
2062 return false;
2063}
2064
a4138a27
JW
2065/*
2066 * Check whether there is a suitable fallback freepage with requested order.
ee414bd9 2067 * If claimable is true, this function returns fallback_mt only if
e47f1f56 2068 * we would do this whole-block claiming. This would help to reduce
a4138a27
JW
2069 * fragmentation due to mixed migratetype pages in one pageblock.
2070 */
2071int find_suitable_fallback(struct free_area *area, unsigned int order,
ee414bd9 2072 int migratetype, bool claimable)
1c30844d 2073{
a4138a27 2074 int i;
ee414bd9
JW
2075
2076 if (claimable && !should_try_claim_block(order, migratetype))
2077 return -2;
1c30844d 2078
a4138a27
JW
2079 if (area->nr_free == 0)
2080 return -1;
1c30844d 2081
a4138a27 2082 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
ee414bd9 2083 int fallback_mt = fallbacks[migratetype][i];
94b3334c 2084
ee414bd9 2085 if (!free_area_empty(area, fallback_mt))
a4138a27
JW
2086 return fallback_mt;
2087 }
597c8920 2088
a4138a27 2089 return -1;
1c30844d
MG
2090}
2091
4eb7dce6 2092/*
e47f1f56
BJ
2093 * This function implements actual block claiming behaviour. If order is large
2094 * enough, we can claim the whole pageblock for the requested migratetype. If
2095 * not, we check the pageblock for constituent pages; if at least half of the
2096 * pages are free or compatible, we can still claim the whole block, so pages
2097 * freed in the future will be put on the correct free list.
4eb7dce6 2098 */
c0cd6f55 2099static struct page *
e47f1f56 2100try_to_claim_block(struct zone *zone, struct page *page,
c2f6ea38 2101 int current_order, int order, int start_type,
020396a5 2102 int block_type, unsigned int alloc_flags)
fef903ef 2103{
02aa0cdd 2104 int free_pages, movable_pages, alike_pages;
e1f42a57 2105 unsigned long start_pfn;
3bc48f96 2106
fef903ef
SB
2107 /* Take ownership for orders >= pageblock_order */
2108 if (current_order >= pageblock_order) {
94deaf69
HY
2109 unsigned int nr_added;
2110
e0932b6c 2111 del_page_from_free_list(page, zone, current_order, block_type);
fef903ef 2112 change_pageblock_range(page, current_order, start_type);
94deaf69
HY
2113 nr_added = expand(zone, page, order, current_order, start_type);
2114 account_freepages(zone, nr_added, start_type);
c0cd6f55 2115 return page;
fef903ef
SB
2116 }
2117
1c30844d
MG
2118 /*
2119 * Boost watermarks to increase reclaim pressure to reduce the
2120 * likelihood of future fallbacks. Wake kswapd now as the node
2121 * may be balanced overall and kswapd will not wake naturally.
2122 */
597c8920 2123 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
73444bc4 2124 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
1c30844d 2125
ebddd111 2126 /* moving whole block can fail due to zone boundary conditions */
e1f42a57
VB
2127 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
2128 &movable_pages))
c2f6ea38 2129 return NULL;
ebddd111 2130
02aa0cdd
VB
2131 /*
2132 * Determine how many pages are compatible with our allocation.
2133 * For movable allocation, it's the number of movable pages which
2134 * we just obtained. For other types it's a bit more tricky.
2135 */
2136 if (start_type == MIGRATE_MOVABLE) {
2137 alike_pages = movable_pages;
2138 } else {
2139 /*
2140 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2141 * to MOVABLE pageblock, consider all non-movable pages as
2142 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2143 * vice versa, be conservative since we can't distinguish the
2144 * exact migratetype of non-movable pages.
2145 */
c0cd6f55 2146 if (block_type == MIGRATE_MOVABLE)
02aa0cdd
VB
2147 alike_pages = pageblock_nr_pages
2148 - (free_pages + movable_pages);
2149 else
2150 alike_pages = 0;
2151 }
02aa0cdd
VB
2152 /*
2153 * If a sufficient number of pages in the block are either free or of
ebddd111 2154 * compatible migratability as our allocation, claim the whole block.
02aa0cdd
VB
2155 */
2156 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
c0cd6f55 2157 page_group_by_mobility_disabled) {
e1f42a57 2158 __move_freepages_block(zone, start_pfn, block_type, start_type);
c0cd6f55
JW
2159 return __rmqueue_smallest(zone, order, start_type);
2160 }
3bc48f96 2161
c2f6ea38 2162 return NULL;
0aaa29a5
MG
2163}
2164
2165/*
90abee6d
JW
2166 * Try to allocate from some fallback migratetype by claiming the entire block,
2167 * i.e. converting it to the allocation's start migratetype.
b002529d
RV
2168 *
2169 * The use of signed ints for order and current_order is a deliberate
2170 * deviation from the rest of this file, to make the for loop
2171 * condition simpler.
3bc48f96 2172 */
c0cd6f55 2173static __always_inline struct page *
90abee6d 2174__rmqueue_claim(struct zone *zone, int order, int start_migratetype,
6bb15450 2175 unsigned int alloc_flags)
b2a0ac88 2176{
b8af2941 2177 struct free_area *area;
b002529d 2178 int current_order;
6bb15450 2179 int min_order = order;
b2a0ac88 2180 struct page *page;
4eb7dce6 2181 int fallback_mt;
b2a0ac88 2182
6bb15450
MG
2183 /*
2184 * Do not steal pages from freelists belonging to other pageblocks
2185 * i.e. orders < pageblock_order. If there are no local zones free,
2186 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2187 */
e933dc4a 2188 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
6bb15450
MG
2189 min_order = pageblock_order;
2190
7a8f58f3
VB
2191 /*
2192 * Find the largest available free page in the other list. This roughly
2193 * approximates finding the pageblock with the most free pages, which
2194 * would be too costly to do exactly.
2195 */
5e0a760b 2196 for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
7aeb09f9 2197 --current_order) {
4eb7dce6
JK
2198 area = &(zone->free_area[current_order]);
2199 fallback_mt = find_suitable_fallback(area, current_order,
ee414bd9
JW
2200 start_migratetype, true);
2201
2202 /* No block in that order */
4eb7dce6
JK
2203 if (fallback_mt == -1)
2204 continue;
b2a0ac88 2205
ee414bd9
JW
2206 /* Advanced into orders too low to claim, abort */
2207 if (fallback_mt == -2)
c2f6ea38 2208 break;
b2a0ac88 2209
c2f6ea38 2210 page = get_page_from_free_area(area, fallback_mt);
e47f1f56 2211 page = try_to_claim_block(zone, page, current_order, order,
020396a5
JW
2212 start_migratetype, fallback_mt,
2213 alloc_flags);
90abee6d
JW
2214 if (page) {
2215 trace_mm_page_alloc_extfrag(page, order, current_order,
2216 start_migratetype, fallback_mt);
2217 return page;
2218 }
7a8f58f3 2219 }
e0fff1bd 2220
90abee6d
JW
2221 return NULL;
2222}
2223
2224/*
2225 * Try to steal a single page from some fallback migratetype. Leave the rest of
2226 * the block as its current migratetype, potentially causing fragmentation.
2227 */
2228static __always_inline struct page *
2229__rmqueue_steal(struct zone *zone, int order, int start_migratetype)
2230{
2231 struct free_area *area;
2232 int current_order;
2233 struct page *page;
2234 int fallback_mt;
e0fff1bd 2235
fd377218 2236 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
7a8f58f3
VB
2237 area = &(zone->free_area[current_order]);
2238 fallback_mt = find_suitable_fallback(area, current_order,
ee414bd9 2239 start_migratetype, false);
c2f6ea38
JW
2240 if (fallback_mt == -1)
2241 continue;
7a8f58f3 2242
c2f6ea38
JW
2243 page = get_page_from_free_area(area, fallback_mt);
2244 page_del_and_expand(zone, page, order, current_order, fallback_mt);
90abee6d
JW
2245 trace_mm_page_alloc_extfrag(page, order, current_order,
2246 start_migratetype, fallback_mt);
2247 return page;
c2f6ea38 2248 }
7a8f58f3 2249
c2f6ea38 2250 return NULL;
b2a0ac88
MG
2251}
2252
90abee6d
JW
2253enum rmqueue_mode {
2254 RMQUEUE_NORMAL,
2255 RMQUEUE_CMA,
2256 RMQUEUE_CLAIM,
2257 RMQUEUE_STEAL,
2258};
2259
56fd56b8 2260/*
1da177e4
LT
2261 * Do the hard work of removing an element from the buddy allocator.
2262 * Call me with the zone->lock already held.
2263 */
85ccc8fa 2264static __always_inline struct page *
6bb15450 2265__rmqueue(struct zone *zone, unsigned int order, int migratetype,
90abee6d 2266 unsigned int alloc_flags, enum rmqueue_mode *mode)
1da177e4 2267{
1da177e4
LT
2268 struct page *page;
2269
ce8f86ee
H
2270 if (IS_ENABLED(CONFIG_CMA)) {
2271 /*
2272 * Balance movable allocations between regular and CMA areas by
2273 * allocating from CMA when over half of the zone's free memory
2274 * is in the CMA area.
2275 */
2276 if (alloc_flags & ALLOC_CMA &&
2277 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2278 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2279 page = __rmqueue_cma_fallback(zone, order);
2280 if (page)
10e0f753 2281 return page;
ce8f86ee 2282 }
16867664 2283 }
c0cd6f55 2284
90abee6d
JW
2285 /*
2286 * First try the freelists of the requested migratetype, then try
2287 * fallbacks modes with increasing levels of fragmentation risk.
2288 *
2289 * The fallback logic is expensive and rmqueue_bulk() calls in
2290 * a loop with the zone->lock held, meaning the freelists are
2291 * not subject to any outside changes. Remember in *mode where
2292 * we found pay dirt, to save us the search on the next call.
2293 */
2294 switch (*mode) {
2295 case RMQUEUE_NORMAL:
2296 page = __rmqueue_smallest(zone, order, migratetype);
2297 if (page)
2298 return page;
2299 fallthrough;
2300 case RMQUEUE_CMA:
2301 if (alloc_flags & ALLOC_CMA) {
dc67647b 2302 page = __rmqueue_cma_fallback(zone, order);
90abee6d
JW
2303 if (page) {
2304 *mode = RMQUEUE_CMA;
2305 return page;
2306 }
2307 }
2308 fallthrough;
2309 case RMQUEUE_CLAIM:
2310 page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
2311 if (page) {
2312 /* Replenished preferred freelist, back to normal mode. */
2313 *mode = RMQUEUE_NORMAL;
2314 return page;
2315 }
2316 fallthrough;
2317 case RMQUEUE_STEAL:
2318 if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
2319 page = __rmqueue_steal(zone, order, migratetype);
2320 if (page) {
2321 *mode = RMQUEUE_STEAL;
2322 return page;
2323 }
2324 }
728ec980 2325 }
90abee6d 2326 return NULL;
1da177e4
LT
2327}
2328
5f63b720 2329/*
1da177e4
LT
2330 * Obtain a specified number of elements from the buddy allocator, all under
2331 * a single hold of the lock, for efficiency. Add them to the supplied list.
2332 * Returns the number of new pages which were placed at *list.
2333 */
5f63b720 2334static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 2335 unsigned long count, struct list_head *list,
6bb15450 2336 int migratetype, unsigned int alloc_flags)
1da177e4 2337{
90abee6d 2338 enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
57490774 2339 unsigned long flags;
700d2e9a 2340 int i;
5f63b720 2341
c5bb27e2
AS
2342 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2343 if (!spin_trylock_irqsave(&zone->lock, flags))
97769a53 2344 return 0;
c5bb27e2 2345 } else {
97769a53
AS
2346 spin_lock_irqsave(&zone->lock, flags);
2347 }
1da177e4 2348 for (i = 0; i < count; ++i) {
6bb15450 2349 struct page *page = __rmqueue(zone, order, migratetype,
90abee6d 2350 alloc_flags, &rmqm);
085cc7d5 2351 if (unlikely(page == NULL))
1da177e4 2352 break;
81eabcbe
MG
2353
2354 /*
0fac3ba5
VB
2355 * Split buddy pages returned by expand() are received here in
2356 * physical page order. The page is added to the tail of
2357 * caller's list. From the callers perspective, the linked list
2358 * is ordered by page number under some conditions. This is
2359 * useful for IO devices that can forward direction from the
2360 * head, thus also in the physical page order. This is useful
2361 * for IO devices that can merge IO requests if the physical
2362 * pages are ordered properly.
81eabcbe 2363 */
bf75f200 2364 list_add_tail(&page->pcp_list, list);
1da177e4 2365 }
57490774 2366 spin_unlock_irqrestore(&zone->lock, flags);
2ede3c13 2367
700d2e9a 2368 return i;
1da177e4
LT
2369}
2370
51a755c5
YH
2371/*
2372 * Called from the vmstat counter updater to decay the PCP high.
2373 * Return whether there are addition works to do.
2374 */
2375int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
2376{
2377 int high_min, to_drain, batch;
2378 int todo = 0;
2379
2380 high_min = READ_ONCE(pcp->high_min);
2381 batch = READ_ONCE(pcp->batch);
2382 /*
2383 * Decrease pcp->high periodically to try to free possible
2384 * idle PCP pages. And, avoid to free too many pages to
2385 * control latency. This caps pcp->high decrement too.
2386 */
2387 if (pcp->high > high_min) {
2388 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2389 pcp->high - (pcp->high >> 3), high_min);
2390 if (pcp->high > high_min)
2391 todo++;
2392 }
2393
2394 to_drain = pcp->count - pcp->high;
2395 if (to_drain > 0) {
2396 spin_lock(&pcp->lock);
2397 free_pcppages_bulk(zone, to_drain, pcp, 0);
2398 spin_unlock(&pcp->lock);
2399 todo++;
2400 }
2401
2402 return todo;
2403}
2404
4ae7c039 2405#ifdef CONFIG_NUMA
8fce4d8e 2406/*
4037d452
CL
2407 * Called from the vmstat counter updater to drain pagesets of this
2408 * currently executing processor on remote nodes after they have
2409 * expired.
8fce4d8e 2410 */
4037d452 2411void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 2412{
7be12fc9 2413 int to_drain, batch;
4ae7c039 2414
4db0c3c2 2415 batch = READ_ONCE(pcp->batch);
7be12fc9 2416 to_drain = min(pcp->count, batch);
4b23a68f 2417 if (to_drain > 0) {
57490774 2418 spin_lock(&pcp->lock);
fd56eef2 2419 free_pcppages_bulk(zone, to_drain, pcp, 0);
57490774 2420 spin_unlock(&pcp->lock);
4b23a68f 2421 }
4ae7c039
CL
2422}
2423#endif
2424
9f8f2172 2425/*
93481ff0 2426 * Drain pcplists of the indicated processor and zone.
9f8f2172 2427 */
93481ff0 2428static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1da177e4 2429{
55f77df7 2430 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
66eca102 2431 int count;
1da177e4 2432
66eca102 2433 do {
57490774 2434 spin_lock(&pcp->lock);
66eca102
LZ
2435 count = pcp->count;
2436 if (count) {
2437 int to_drain = min(count,
2438 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
2439
2440 free_pcppages_bulk(zone, to_drain, pcp, 0);
2441 count -= to_drain;
2442 }
57490774 2443 spin_unlock(&pcp->lock);
66eca102 2444 } while (count);
93481ff0 2445}
3dfa5721 2446
93481ff0
VB
2447/*
2448 * Drain pcplists of all zones on the indicated processor.
93481ff0
VB
2449 */
2450static void drain_pages(unsigned int cpu)
2451{
2452 struct zone *zone;
2453
2454 for_each_populated_zone(zone) {
2455 drain_pages_zone(cpu, zone);
1da177e4
LT
2456 }
2457}
1da177e4 2458
9f8f2172
CL
2459/*
2460 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2461 */
93481ff0 2462void drain_local_pages(struct zone *zone)
9f8f2172 2463{
93481ff0
VB
2464 int cpu = smp_processor_id();
2465
2466 if (zone)
2467 drain_pages_zone(cpu, zone);
2468 else
2469 drain_pages(cpu);
9f8f2172
CL
2470}
2471
2472/*
ec6e8c7e
VB
2473 * The implementation of drain_all_pages(), exposing an extra parameter to
2474 * drain on all cpus.
93481ff0 2475 *
ec6e8c7e
VB
2476 * drain_all_pages() is optimized to only execute on cpus where pcplists are
2477 * not empty. The check for non-emptiness can however race with a free to
2478 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2479 * that need the guarantee that every CPU has drained can disable the
2480 * optimizing racy check.
9f8f2172 2481 */
3b1f3658 2482static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
9f8f2172 2483{
74046494 2484 int cpu;
74046494
GBY
2485
2486 /*
041711ce 2487 * Allocate in the BSS so we won't require allocation in
74046494
GBY
2488 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2489 */
2490 static cpumask_t cpus_with_pcps;
2491
bd233f53
MG
2492 /*
2493 * Do not drain if one is already in progress unless it's specific to
2494 * a zone. Such callers are primarily CMA and memory hotplug and need
2495 * the drain to be complete when the call returns.
2496 */
2497 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2498 if (!zone)
2499 return;
2500 mutex_lock(&pcpu_drain_mutex);
2501 }
0ccce3b9 2502
74046494
GBY
2503 /*
2504 * We don't care about racing with CPU hotplug event
2505 * as offline notification will cause the notified
2506 * cpu to drain that CPU pcps and on_each_cpu_mask
2507 * disables preemption as part of its processing
2508 */
2509 for_each_online_cpu(cpu) {
28f836b6 2510 struct per_cpu_pages *pcp;
93481ff0 2511 struct zone *z;
74046494 2512 bool has_pcps = false;
93481ff0 2513
ec6e8c7e
VB
2514 if (force_all_cpus) {
2515 /*
2516 * The pcp.count check is racy, some callers need a
2517 * guarantee that no cpu is missed.
2518 */
2519 has_pcps = true;
2520 } else if (zone) {
28f836b6
MG
2521 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2522 if (pcp->count)
74046494 2523 has_pcps = true;
93481ff0
VB
2524 } else {
2525 for_each_populated_zone(z) {
28f836b6
MG
2526 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2527 if (pcp->count) {
93481ff0
VB
2528 has_pcps = true;
2529 break;
2530 }
74046494
GBY
2531 }
2532 }
93481ff0 2533
74046494
GBY
2534 if (has_pcps)
2535 cpumask_set_cpu(cpu, &cpus_with_pcps);
2536 else
2537 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2538 }
0ccce3b9 2539
bd233f53 2540 for_each_cpu(cpu, &cpus_with_pcps) {
443c2acc
NSJ
2541 if (zone)
2542 drain_pages_zone(cpu, zone);
2543 else
2544 drain_pages(cpu);
0ccce3b9 2545 }
bd233f53
MG
2546
2547 mutex_unlock(&pcpu_drain_mutex);
9f8f2172
CL
2548}
2549
ec6e8c7e
VB
2550/*
2551 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2552 *
2553 * When zone parameter is non-NULL, spill just the single zone's pages.
ec6e8c7e
VB
2554 */
2555void drain_all_pages(struct zone *zone)
2556{
2557 __drain_all_pages(zone, false);
2558}
2559
51a755c5 2560static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high)
3b12e7e9
MG
2561{
2562 int min_nr_free, max_nr_free;
2563
51a755c5 2564 /* Free as much as possible if batch freeing high-order pages. */
f26b3fa0 2565 if (unlikely(free_high))
51a755c5 2566 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX);
f26b3fa0 2567
3b12e7e9
MG
2568 /* Check for PCP disabled or boot pageset */
2569 if (unlikely(high < batch))
2570 return 1;
2571
2572 /* Leave at least pcp->batch pages on the list */
2573 min_nr_free = batch;
2574 max_nr_free = high - batch;
2575
2576 /*
6ccdcb6d
YH
2577 * Increase the batch number to the number of the consecutive
2578 * freed pages to reduce zone lock contention.
3b12e7e9 2579 */
6ccdcb6d 2580 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free);
3b12e7e9
MG
2581
2582 return batch;
2583}
2584
f26b3fa0 2585static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
51a755c5 2586 int batch, bool free_high)
c49c2c47 2587{
51a755c5 2588 int high, high_min, high_max;
c49c2c47 2589
51a755c5
YH
2590 high_min = READ_ONCE(pcp->high_min);
2591 high_max = READ_ONCE(pcp->high_max);
2592 high = pcp->high = clamp(pcp->high, high_min, high_max);
2593
2594 if (unlikely(!high))
c49c2c47
MG
2595 return 0;
2596
51a755c5
YH
2597 if (unlikely(free_high)) {
2598 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2599 high_min);
2600 return 0;
2601 }
c49c2c47
MG
2602
2603 /*
2604 * If reclaim is active, limit the number of pages that can be
2605 * stored on pcp lists
2606 */
51a755c5 2607 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) {
6ccdcb6d
YH
2608 int free_count = max_t(int, pcp->free_count, batch);
2609
2610 pcp->high = max(high - free_count, high_min);
51a755c5
YH
2611 return min(batch << 2, pcp->high);
2612 }
2613
57c0419c
YH
2614 if (high_min == high_max)
2615 return high;
2616
2617 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) {
6ccdcb6d
YH
2618 int free_count = max_t(int, pcp->free_count, batch);
2619
2620 pcp->high = max(high - free_count, high_min);
57c0419c
YH
2621 high = max(pcp->count, high_min);
2622 } else if (pcp->count >= high) {
6ccdcb6d 2623 int need_high = pcp->free_count + batch;
51a755c5
YH
2624
2625 /* pcp->high should be large enough to hold batch freed pages */
2626 if (pcp->high < need_high)
2627 pcp->high = clamp(need_high, high_min, high_max);
2628 }
2629
2630 return high;
c49c2c47
MG
2631}
2632
520128a1
MWO
2633static void free_frozen_page_commit(struct zone *zone,
2634 struct per_cpu_pages *pcp, struct page *page, int migratetype,
8c57b687 2635 unsigned int order, fpi_t fpi_flags)
9cca35d4 2636{
51a755c5 2637 int high, batch;
44042b44 2638 int pindex;
ca71fe1a 2639 bool free_high = false;
9cca35d4 2640
c0a24239
YH
2641 /*
2642 * On freeing, reduce the number of pages that are batch allocated.
2643 * See nr_pcp_alloc() where alloc_factor is increased for subsequent
2644 * allocations.
2645 */
2646 pcp->alloc_factor >>= 1;
15cd9004 2647 __count_vm_events(PGFREE, 1 << order);
44042b44 2648 pindex = order_to_pindex(migratetype, order);
bf75f200 2649 list_add(&page->pcp_list, &pcp->lists[pindex]);
44042b44 2650 pcp->count += 1 << order;
f26b3fa0 2651
51a755c5 2652 batch = READ_ONCE(pcp->batch);
f26b3fa0
MG
2653 /*
2654 * As high-order pages other than THP's stored on PCP can contribute
2655 * to fragmentation, limit the number stored when PCP is heavily
2656 * freeing without allocation. The remainder after bulk freeing
2657 * stops will be drained from vmstat refresh context.
2658 */
ca71fe1a 2659 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) {
c544a952 2660 free_high = (pcp->free_count >= (batch + pcp->high_min / 2) &&
362d37a1
YH
2661 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) &&
2662 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) ||
cd348c5e 2663 pcp->count >= batch));
ca71fe1a
YH
2664 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER;
2665 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) {
2666 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER;
2667 }
6ccdcb6d
YH
2668 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX))
2669 pcp->free_count += (1 << order);
8c57b687
AS
2670
2671 if (unlikely(fpi_flags & FPI_TRYLOCK)) {
2672 /*
2673 * Do not attempt to take a zone lock. Let pcp->count get
2674 * over high mark temporarily.
2675 */
2676 return;
2677 }
51a755c5 2678 high = nr_pcp_high(pcp, zone, batch, free_high);
3b12e7e9 2679 if (pcp->count >= high) {
51a755c5
YH
2680 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high),
2681 pcp, pindex);
57c0419c
YH
2682 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) &&
2683 zone_watermark_ok(zone, 0, high_wmark_pages(zone),
2684 ZONE_MOVABLE, 0))
2685 clear_bit(ZONE_BELOW_HIGH, &zone->flags);
3b12e7e9 2686 }
9cca35d4 2687}
5f8dcc21 2688
9cca35d4 2689/*
44042b44 2690 * Free a pcp page
9cca35d4 2691 */
8c57b687
AS
2692static void __free_frozen_pages(struct page *page, unsigned int order,
2693 fpi_t fpi_flags)
9cca35d4 2694{
4b23a68f
MG
2695 unsigned long __maybe_unused UP_flags;
2696 struct per_cpu_pages *pcp;
2697 struct zone *zone;
9cca35d4 2698 unsigned long pfn = page_to_pfn(page);
55612e80 2699 int migratetype;
9cca35d4 2700
5b8d7591 2701 if (!pcp_allowed_order(order)) {
8c57b687 2702 __free_pages_ok(page, order, fpi_flags);
5b8d7591
MWO
2703 return;
2704 }
2705
17edeb5d 2706 if (!free_pages_prepare(page, order))
9cca35d4 2707 return;
da456f14 2708
5f8dcc21
MG
2709 /*
2710 * We only track unmovable, reclaimable and movable on pcp lists.
df1acc85 2711 * Place ISOLATE pages on the isolated list because they are being
7b086755
JW
2712 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
2713 * get those areas back if necessary. Otherwise, we may have to free
5f8dcc21
MG
2714 * excessively into the page allocator
2715 */
d4056386 2716 zone = page_zone(page);
55612e80 2717 migratetype = get_pfnblock_migratetype(page, pfn);
df1acc85 2718 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
194159fb 2719 if (unlikely(is_migrate_isolate(migratetype))) {
8c57b687 2720 free_one_page(zone, page, pfn, order, fpi_flags);
9cca35d4 2721 return;
5f8dcc21 2722 }
55612e80 2723 migratetype = MIGRATE_MOVABLE;
5f8dcc21
MG
2724 }
2725
8c57b687
AS
2726 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT)
2727 && (in_nmi() || in_hardirq()))) {
2728 add_page_to_zone_llist(zone, page, order);
2729 return;
2730 }
4b23a68f 2731 pcp_trylock_prepare(UP_flags);
57490774 2732 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
01b44456 2733 if (pcp) {
8c57b687 2734 free_frozen_page_commit(zone, pcp, page, migratetype, order, fpi_flags);
57490774 2735 pcp_spin_unlock(pcp);
4b23a68f 2736 } else {
8c57b687 2737 free_one_page(zone, page, pfn, order, fpi_flags);
4b23a68f
MG
2738 }
2739 pcp_trylock_finish(UP_flags);
1da177e4
LT
2740}
2741
8c57b687
AS
2742void free_frozen_pages(struct page *page, unsigned int order)
2743{
2744 __free_frozen_pages(page, order, FPI_NONE);
2745}
2746
cc59850e 2747/*
31b2ff82 2748 * Free a batch of folios
cc59850e 2749 */
90491d87 2750void free_unref_folios(struct folio_batch *folios)
cc59850e 2751{
57490774 2752 unsigned long __maybe_unused UP_flags;
4b23a68f
MG
2753 struct per_cpu_pages *pcp = NULL;
2754 struct zone *locked_zone = NULL;
9cbe97ba 2755 int i, j;
9cca35d4 2756
90491d87
MWO
2757 /* Prepare folios for freeing */
2758 for (i = 0, j = 0; i < folios->nr; i++) {
2759 struct folio *folio = folios->folios[i];
7c76d922 2760 unsigned long pfn = folio_pfn(folio);
31b2ff82 2761 unsigned int order = folio_order(folio);
9cca35d4 2762
17edeb5d 2763 if (!free_pages_prepare(&folio->page, order))
053cfda1 2764 continue;
df1acc85 2765 /*
9cbe97ba
JW
2766 * Free orders not handled on the PCP directly to the
2767 * allocator.
df1acc85 2768 */
9cbe97ba 2769 if (!pcp_allowed_order(order)) {
55612e80
JW
2770 free_one_page(folio_zone(folio), &folio->page,
2771 pfn, order, FPI_NONE);
47aef601 2772 continue;
df1acc85 2773 }
31b2ff82 2774 folio->private = (void *)(unsigned long)order;
90491d87
MWO
2775 if (j != i)
2776 folios->folios[j] = folio;
2777 j++;
9cca35d4 2778 }
90491d87 2779 folios->nr = j;
cc59850e 2780
90491d87
MWO
2781 for (i = 0; i < folios->nr; i++) {
2782 struct folio *folio = folios->folios[i];
7c76d922 2783 struct zone *zone = folio_zone(folio);
17edeb5d 2784 unsigned long pfn = folio_pfn(folio);
31b2ff82 2785 unsigned int order = (unsigned long)folio->private;
9cbe97ba 2786 int migratetype;
4b23a68f 2787
31b2ff82 2788 folio->private = NULL;
17edeb5d 2789 migratetype = get_pfnblock_migratetype(&folio->page, pfn);
c3e58a70 2790
90491d87 2791 /* Different zone requires a different pcp lock */
9cbe97ba
JW
2792 if (zone != locked_zone ||
2793 is_migrate_isolate(migratetype)) {
57490774
MG
2794 if (pcp) {
2795 pcp_spin_unlock(pcp);
2796 pcp_trylock_finish(UP_flags);
9cbe97ba
JW
2797 locked_zone = NULL;
2798 pcp = NULL;
2799 }
2800
2801 /*
2802 * Free isolated pages directly to the
520128a1 2803 * allocator, see comment in free_frozen_pages.
9cbe97ba
JW
2804 */
2805 if (is_migrate_isolate(migratetype)) {
2806 free_one_page(zone, &folio->page, pfn,
55612e80 2807 order, FPI_NONE);
9cbe97ba 2808 continue;
57490774 2809 }
01b44456 2810
57490774 2811 /*
7c76d922 2812 * trylock is necessary as folios may be getting freed
57490774
MG
2813 * from IRQ or SoftIRQ context after an IO completion.
2814 */
2815 pcp_trylock_prepare(UP_flags);
2816 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2817 if (unlikely(!pcp)) {
2818 pcp_trylock_finish(UP_flags);
17edeb5d 2819 free_one_page(zone, &folio->page, pfn,
55612e80 2820 order, FPI_NONE);
57490774
MG
2821 continue;
2822 }
4b23a68f 2823 locked_zone = zone;
4b23a68f
MG
2824 }
2825
47aef601
DB
2826 /*
2827 * Non-isolated types over MIGRATE_PCPTYPES get added
2828 * to the MIGRATE_MOVABLE pcp list.
2829 */
47aef601
DB
2830 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
2831 migratetype = MIGRATE_MOVABLE;
2832
7c76d922 2833 trace_mm_page_free_batched(&folio->page);
520128a1 2834 free_frozen_page_commit(zone, pcp, &folio->page, migratetype,
8c57b687 2835 order, FPI_NONE);
cc59850e 2836 }
4b23a68f 2837
57490774
MG
2838 if (pcp) {
2839 pcp_spin_unlock(pcp);
2840 pcp_trylock_finish(UP_flags);
2841 }
90491d87 2842 folio_batch_reinit(folios);
cc59850e
KK
2843}
2844
8dfcc9ba
NP
2845/*
2846 * split_page takes a non-compound higher-order page, and splits it into
2847 * n (1<<order) sub-pages: page[0..n]
2848 * Each sub-page must be freed individually.
2849 *
2850 * Note: this is probably too low level an operation for use in drivers.
2851 * Please consult with lkml before using this in your driver.
2852 */
2853void split_page(struct page *page, unsigned int order)
2854{
2855 int i;
2856
309381fe
SL
2857 VM_BUG_ON_PAGE(PageCompound(page), page);
2858 VM_BUG_ON_PAGE(!page_count(page), page);
b1eeab67 2859
a9627bc5 2860 for (i = 1; i < (1 << order); i++)
7835e98b 2861 set_page_refcounted(page + i);
46d44d09 2862 split_page_owner(page, order, 0);
95599ef6 2863 pgalloc_tag_split(page_folio(page), order, 0);
1506c255 2864 split_page_memcg(page, order);
8dfcc9ba 2865}
5853ff23 2866EXPORT_SYMBOL_GPL(split_page);
8dfcc9ba 2867
3c605096 2868int __isolate_free_page(struct page *page, unsigned int order)
748446bb 2869{
9a157dd8
KW
2870 struct zone *zone = page_zone(page);
2871 int mt = get_pageblock_migratetype(page);
748446bb 2872
194159fb 2873 if (!is_migrate_isolate(mt)) {
9a157dd8 2874 unsigned long watermark;
8348faf9
VB
2875 /*
2876 * Obey watermarks as if the page was being allocated. We can
2877 * emulate a high-order watermark check with a raised order-0
2878 * watermark, because we already know our high-order page
2879 * exists.
2880 */
fd1444b2 2881 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
d883c6cf 2882 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2e30abd1 2883 return 0;
2e30abd1 2884 }
748446bb 2885
e0932b6c 2886 del_page_from_free_list(page, zone, order, mt);
2139cbe6 2887
400bc7fd 2888 /*
2889 * Set the pageblock if the isolated page is at least half of a
2890 * pageblock
2891 */
748446bb
MG
2892 if (order >= pageblock_order - 1) {
2893 struct page *endpage = page + (1 << order) - 1;
47118af0
MN
2894 for (; page < endpage; page += pageblock_nr_pages) {
2895 int mt = get_pageblock_migratetype(page);
1dd214b8
ZY
2896 /*
2897 * Only change normal pageblocks (i.e., they can merge
2898 * with others)
2899 */
f37c0f68 2900 if (migratetype_is_mergeable(mt))
e0932b6c 2901 move_freepages_block(zone, page, mt,
f37c0f68 2902 MIGRATE_MOVABLE);
47118af0 2903 }
748446bb
MG
2904 }
2905
8fb74b9f 2906 return 1UL << order;
1fb3f8ca
MG
2907}
2908
624f58d8
AD
2909/**
2910 * __putback_isolated_page - Return a now-isolated page back where we got it
2911 * @page: Page that was isolated
2912 * @order: Order of the isolated page
e6a0a7ad 2913 * @mt: The page's pageblock's migratetype
624f58d8
AD
2914 *
2915 * This function is meant to return a page pulled from the free lists via
2916 * __isolate_free_page back to the free lists they were pulled from.
2917 */
2918void __putback_isolated_page(struct page *page, unsigned int order, int mt)
2919{
2920 struct zone *zone = page_zone(page);
2921
2922 /* zone lock should be held when this function is called */
2923 lockdep_assert_held(&zone->lock);
2924
2925 /* Return isolated page to tail of freelist. */
f04a5d5d 2926 __free_one_page(page, page_to_pfn(page), zone, order, mt,
47b6a24a 2927 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
624f58d8
AD
2928}
2929
060e7417
MG
2930/*
2931 * Update NUMA hit/miss statistics
060e7417 2932 */
3e23060b
MG
2933static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2934 long nr_account)
060e7417
MG
2935{
2936#ifdef CONFIG_NUMA
3a321d2a 2937 enum numa_stat_item local_stat = NUMA_LOCAL;
060e7417 2938
4518085e
KW
2939 /* skip numa counters update if numa stats is disabled */
2940 if (!static_branch_likely(&vm_numa_stat_key))
2941 return;
2942
c1093b74 2943 if (zone_to_nid(z) != numa_node_id())
060e7417 2944 local_stat = NUMA_OTHER;
060e7417 2945
c1093b74 2946 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3e23060b 2947 __count_numa_events(z, NUMA_HIT, nr_account);
2df26639 2948 else {
3e23060b
MG
2949 __count_numa_events(z, NUMA_MISS, nr_account);
2950 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
060e7417 2951 }
3e23060b 2952 __count_numa_events(z, local_stat, nr_account);
060e7417
MG
2953#endif
2954}
2955
589d9973
MG
2956static __always_inline
2957struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
2958 unsigned int order, unsigned int alloc_flags,
2959 int migratetype)
2960{
2961 struct page *page;
2962 unsigned long flags;
2963
2964 do {
2965 page = NULL;
c5bb27e2
AS
2966 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2967 if (!spin_trylock_irqsave(&zone->lock, flags))
97769a53 2968 return NULL;
c5bb27e2 2969 } else {
97769a53
AS
2970 spin_lock_irqsave(&zone->lock, flags);
2971 }
eb2e2b42 2972 if (alloc_flags & ALLOC_HIGHATOMIC)
589d9973
MG
2973 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2974 if (!page) {
90abee6d
JW
2975 enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
2976
2977 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
eb2e2b42
MG
2978
2979 /*
281dd25c
MF
2980 * If the allocation fails, allow OOM handling and
2981 * order-0 (atomic) allocs access to HIGHATOMIC
2982 * reserves as failing now is worse than failing a
2983 * high-order atomic allocation in the future.
eb2e2b42 2984 */
281dd25c 2985 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK)))
eb2e2b42
MG
2986 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2987
589d9973
MG
2988 if (!page) {
2989 spin_unlock_irqrestore(&zone->lock, flags);
2990 return NULL;
2991 }
2992 }
589d9973
MG
2993 spin_unlock_irqrestore(&zone->lock, flags);
2994 } while (check_new_pages(page, order));
2995
2996 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2997 zone_statistics(preferred_zone, zone, 1);
2998
2999 return page;
3000}
3001
51a755c5 3002static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order)
c0a24239 3003{
51a755c5
YH
3004 int high, base_batch, batch, max_nr_alloc;
3005 int high_max, high_min;
c0a24239 3006
51a755c5
YH
3007 base_batch = READ_ONCE(pcp->batch);
3008 high_min = READ_ONCE(pcp->high_min);
3009 high_max = READ_ONCE(pcp->high_max);
3010 high = pcp->high = clamp(pcp->high, high_min, high_max);
c0a24239
YH
3011
3012 /* Check for PCP disabled or boot pageset */
51a755c5 3013 if (unlikely(high < base_batch))
c0a24239
YH
3014 return 1;
3015
51a755c5
YH
3016 if (order)
3017 batch = base_batch;
3018 else
3019 batch = (base_batch << pcp->alloc_factor);
3020
c0a24239 3021 /*
51a755c5
YH
3022 * If we had larger pcp->high, we could avoid to allocate from
3023 * zone.
c0a24239 3024 */
57c0419c 3025 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags))
51a755c5
YH
3026 high = pcp->high = min(high + batch, high_max);
3027
c0a24239 3028 if (!order) {
51a755c5
YH
3029 max_nr_alloc = max(high - pcp->count - base_batch, base_batch);
3030 /*
3031 * Double the number of pages allocated each time there is
3032 * subsequent allocation of order-0 pages without any freeing.
3033 */
c0a24239
YH
3034 if (batch <= max_nr_alloc &&
3035 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX)
3036 pcp->alloc_factor++;
3037 batch = min(batch, max_nr_alloc);
3038 }
3039
3040 /*
3041 * Scale batch relative to order if batch implies free pages
3042 * can be stored on the PCP. Batch can be 1 for small zones or
3043 * for boot pagesets which should never store free pages as
3044 * the pages may belong to arbitrary zones.
3045 */
3046 if (batch > 1)
3047 batch = max(batch >> order, 2);
3048
3049 return batch;
3050}
3051
066b2393 3052/* Remove page from the per-cpu list, caller must protect the list */
3b822017 3053static inline
44042b44
MG
3054struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3055 int migratetype,
6bb15450 3056 unsigned int alloc_flags,
453f85d4 3057 struct per_cpu_pages *pcp,
066b2393
MG
3058 struct list_head *list)
3059{
3060 struct page *page;
3061
3062 do {
3063 if (list_empty(list)) {
51a755c5 3064 int batch = nr_pcp_alloc(pcp, zone, order);
44042b44
MG
3065 int alloced;
3066
44042b44
MG
3067 alloced = rmqueue_bulk(zone, order,
3068 batch, list,
6bb15450 3069 migratetype, alloc_flags);
44042b44
MG
3070
3071 pcp->count += alloced << order;
066b2393
MG
3072 if (unlikely(list_empty(list)))
3073 return NULL;
3074 }
3075
bf75f200
MG
3076 page = list_first_entry(list, struct page, pcp_list);
3077 list_del(&page->pcp_list);
44042b44 3078 pcp->count -= 1 << order;
700d2e9a 3079 } while (check_new_pages(page, order));
066b2393
MG
3080
3081 return page;
3082}
3083
3084/* Lock and remove page from the per-cpu list */
3085static struct page *rmqueue_pcplist(struct zone *preferred_zone,
44042b44 3086 struct zone *zone, unsigned int order,
663d0cfd 3087 int migratetype, unsigned int alloc_flags)
066b2393
MG
3088{
3089 struct per_cpu_pages *pcp;
3090 struct list_head *list;
066b2393 3091 struct page *page;
4b23a68f 3092 unsigned long __maybe_unused UP_flags;
066b2393 3093
57490774 3094 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
4b23a68f 3095 pcp_trylock_prepare(UP_flags);
57490774 3096 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
01b44456 3097 if (!pcp) {
4b23a68f 3098 pcp_trylock_finish(UP_flags);
4b23a68f
MG
3099 return NULL;
3100 }
3b12e7e9
MG
3101
3102 /*
3103 * On allocation, reduce the number of pages that are batch freed.
3104 * See nr_pcp_free() where free_factor is increased for subsequent
3105 * frees.
3106 */
6ccdcb6d 3107 pcp->free_count >>= 1;
44042b44
MG
3108 list = &pcp->lists[order_to_pindex(migratetype, order)];
3109 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
57490774 3110 pcp_spin_unlock(pcp);
4b23a68f 3111 pcp_trylock_finish(UP_flags);
066b2393 3112 if (page) {
15cd9004 3113 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3e23060b 3114 zone_statistics(preferred_zone, zone, 1);
066b2393 3115 }
066b2393
MG
3116 return page;
3117}
3118
1da177e4 3119/*
a57ae9ef
RX
3120 * Allocate a page from the given zone.
3121 * Use pcplists for THP or "cheap" high-order allocations.
1da177e4 3122 */
b073d7f8
AP
3123
3124/*
3125 * Do not instrument rmqueue() with KMSAN. This function may call
3126 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
3127 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
3128 * may call rmqueue() again, which will result in a deadlock.
1da177e4 3129 */
b073d7f8 3130__no_sanitize_memory
0a15c3e9 3131static inline
066b2393 3132struct page *rmqueue(struct zone *preferred_zone,
7aeb09f9 3133 struct zone *zone, unsigned int order,
c603844b
MG
3134 gfp_t gfp_flags, unsigned int alloc_flags,
3135 int migratetype)
1da177e4 3136{
689bcebf 3137 struct page *page;
1da177e4 3138
44042b44 3139 if (likely(pcp_allowed_order(order))) {
f945116e
JW
3140 page = rmqueue_pcplist(preferred_zone, zone, order,
3141 migratetype, alloc_flags);
3142 if (likely(page))
3143 goto out;
066b2393 3144 }
83b9355b 3145
589d9973
MG
3146 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3147 migratetype);
1da177e4 3148
066b2393 3149out:
73444bc4 3150 /* Separate test+clear to avoid unnecessary atomics */
3b11edf1
TH
3151 if ((alloc_flags & ALLOC_KSWAPD) &&
3152 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
73444bc4
MG
3153 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3154 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3155 }
3156
066b2393 3157 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
1da177e4
LT
3158 return page;
3159}
3160
a4138a27
JW
3161/*
3162 * Reserve the pageblock(s) surrounding an allocation request for
3163 * exclusive use of high-order atomic allocations if there are no
3164 * empty page blocks that contain a page with a suitable order
3165 */
3166static void reserve_highatomic_pageblock(struct page *page, int order,
3167 struct zone *zone)
3168{
3169 int mt;
3170 unsigned long max_managed, flags;
3171
3172 /*
3173 * The number reserved as: minimum is 1 pageblock, maximum is
3174 * roughly 1% of a zone. But if 1% of a zone falls below a
3175 * pageblock size, then don't reserve any pageblocks.
3176 * Check is race-prone but harmless.
3177 */
3178 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
3179 return;
3180 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
3181 if (zone->nr_reserved_highatomic >= max_managed)
3182 return;
3183
3184 spin_lock_irqsave(&zone->lock, flags);
3185
3186 /* Recheck the nr_reserved_highatomic limit under the lock */
3187 if (zone->nr_reserved_highatomic >= max_managed)
3188 goto out_unlock;
3189
3190 /* Yoink! */
3191 mt = get_pageblock_migratetype(page);
3192 /* Only reserve normal pageblocks (i.e., they can merge with others) */
3193 if (!migratetype_is_mergeable(mt))
3194 goto out_unlock;
3195
3196 if (order < pageblock_order) {
3197 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
3198 goto out_unlock;
3199 zone->nr_reserved_highatomic += pageblock_nr_pages;
3200 } else {
3201 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
3202 zone->nr_reserved_highatomic += 1 << order;
3203 }
3204
3205out_unlock:
3206 spin_unlock_irqrestore(&zone->lock, flags);
3207}
3208
3209/*
3210 * Used when an allocation is about to fail under memory pressure. This
3211 * potentially hurts the reliability of high-order allocations when under
3212 * intense memory pressure but failed atomic allocations should be easier
3213 * to recover from than an OOM.
3214 *
3215 * If @force is true, try to unreserve pageblocks even though highatomic
3216 * pageblock is exhausted.
3217 */
3218static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
3219 bool force)
3220{
3221 struct zonelist *zonelist = ac->zonelist;
3222 unsigned long flags;
3223 struct zoneref *z;
3224 struct zone *zone;
3225 struct page *page;
3226 int order;
3227 int ret;
3228
3229 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
3230 ac->nodemask) {
3231 /*
3232 * Preserve at least one pageblock unless memory pressure
3233 * is really high.
3234 */
3235 if (!force && zone->nr_reserved_highatomic <=
3236 pageblock_nr_pages)
3237 continue;
3238
3239 spin_lock_irqsave(&zone->lock, flags);
3240 for (order = 0; order < NR_PAGE_ORDERS; order++) {
3241 struct free_area *area = &(zone->free_area[order]);
3242 unsigned long size;
3243
3244 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
3245 if (!page)
3246 continue;
3247
ebc29409 3248 size = max(pageblock_nr_pages, 1UL << order);
a4138a27
JW
3249 /*
3250 * It should never happen but changes to
3251 * locking could inadvertently allow a per-cpu
3252 * drain to add pages to MIGRATE_HIGHATOMIC
3253 * while unreserving so be safe and watch for
3254 * underflows.
3255 */
ebc29409
BJ
3256 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic))
3257 size = zone->nr_reserved_highatomic;
a4138a27
JW
3258 zone->nr_reserved_highatomic -= size;
3259
3260 /*
3261 * Convert to ac->migratetype and avoid the normal
3262 * pageblock stealing heuristics. Minimally, the caller
3263 * is doing the work and needs the pages. More
3264 * importantly, if the block was always converted to
3265 * MIGRATE_UNMOVABLE or another type then the number
3266 * of pageblocks that cannot be completely freed
3267 * may increase.
3268 */
3269 if (order < pageblock_order)
3270 ret = move_freepages_block(zone, page,
3271 MIGRATE_HIGHATOMIC,
3272 ac->migratetype);
3273 else {
3274 move_to_free_list(page, zone, order,
3275 MIGRATE_HIGHATOMIC,
3276 ac->migratetype);
3277 change_pageblock_range(page, order,
3278 ac->migratetype);
3279 ret = 1;
3280 }
3281 /*
3282 * Reserving the block(s) already succeeded,
3283 * so this should not fail on zone boundaries.
3284 */
3285 WARN_ON_ONCE(ret == -1);
3286 if (ret > 0) {
3287 spin_unlock_irqrestore(&zone->lock, flags);
3288 return ret;
3289 }
3290 }
3291 spin_unlock_irqrestore(&zone->lock, flags);
3292 }
3293
3294 return false;
3295}
3296
f27ce0e1
JK
3297static inline long __zone_watermark_unusable_free(struct zone *z,
3298 unsigned int order, unsigned int alloc_flags)
3299{
f27ce0e1
JK
3300 long unusable_free = (1 << order) - 1;
3301
3302 /*
ab350885 3303 * If the caller does not have rights to reserves below the min
c928807f 3304 * watermark then subtract the free pages reserved for highatomic.
f27ce0e1 3305 */
ab350885 3306 if (likely(!(alloc_flags & ALLOC_RESERVES)))
c928807f 3307 unusable_free += READ_ONCE(z->nr_free_highatomic);
f27ce0e1
JK
3308
3309#ifdef CONFIG_CMA
3310 /* If allocation can't use CMA areas don't use free CMA pages */
3311 if (!(alloc_flags & ALLOC_CMA))
3312 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3313#endif
3314
3315 return unusable_free;
3316}
3317
1da177e4 3318/*
97a16fc8
MG
3319 * Return true if free base pages are above 'mark'. For high-order checks it
3320 * will return true of the order-0 watermark is reached and there is at least
3321 * one free page of a suitable size. Checking now avoids taking the zone lock
3322 * to check in the allocation paths if no pages are free.
1da177e4 3323 */
86a294a8 3324bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
97a225e6 3325 int highest_zoneidx, unsigned int alloc_flags,
86a294a8 3326 long free_pages)
1da177e4 3327{
d23ad423 3328 long min = mark;
1da177e4
LT
3329 int o;
3330
0aaa29a5 3331 /* free_pages may go negative - that's OK */
f27ce0e1 3332 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
0aaa29a5 3333
ab350885
MG
3334 if (unlikely(alloc_flags & ALLOC_RESERVES)) {
3335 /*
3336 * __GFP_HIGH allows access to 50% of the min reserve as well
3337 * as OOM.
3338 */
1ebbb218 3339 if (alloc_flags & ALLOC_MIN_RESERVE) {
ab350885 3340 min -= min / 2;
0aaa29a5 3341
1ebbb218
MG
3342 /*
3343 * Non-blocking allocations (e.g. GFP_ATOMIC) can
3344 * access more reserves than just __GFP_HIGH. Other
3345 * non-blocking allocations requests such as GFP_NOWAIT
3346 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
3347 * access to the min reserve.
3348 */
3349 if (alloc_flags & ALLOC_NON_BLOCK)
3350 min -= min / 4;
3351 }
0aaa29a5 3352
cd04ae1e 3353 /*
ab350885 3354 * OOM victims can try even harder than the normal reserve
cd04ae1e
MH
3355 * users on the grounds that it's definitely going to be in
3356 * the exit path shortly and free memory. Any allocation it
3357 * makes during the free path will be small and short-lived.
3358 */
3359 if (alloc_flags & ALLOC_OOM)
3360 min -= min / 2;
cd04ae1e
MH
3361 }
3362
97a16fc8
MG
3363 /*
3364 * Check watermarks for an order-0 allocation request. If these
3365 * are not met, then a high-order request also cannot go ahead
3366 * even if a suitable page happened to be free.
3367 */
97a225e6 3368 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
88f5acf8 3369 return false;
1da177e4 3370
97a16fc8
MG
3371 /* If this is an order-0 request then the watermark is fine */
3372 if (!order)
3373 return true;
3374
3375 /* For a high-order request, check at least one suitable page is free */
fd377218 3376 for (o = order; o < NR_PAGE_ORDERS; o++) {
97a16fc8
MG
3377 struct free_area *area = &z->free_area[o];
3378 int mt;
3379
3380 if (!area->nr_free)
3381 continue;
3382
97a16fc8 3383 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
b03641af 3384 if (!free_area_empty(area, mt))
97a16fc8
MG
3385 return true;
3386 }
3387
3388#ifdef CONFIG_CMA
d883c6cf 3389 if ((alloc_flags & ALLOC_CMA) &&
b03641af 3390 !free_area_empty(area, MIGRATE_CMA)) {
97a16fc8 3391 return true;
d883c6cf 3392 }
97a16fc8 3393#endif
eb2e2b42
MG
3394 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
3395 !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
b050e376 3396 return true;
eb2e2b42 3397 }
1da177e4 3398 }
97a16fc8 3399 return false;
88f5acf8
MG
3400}
3401
7aeb09f9 3402bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
97a225e6 3403 int highest_zoneidx, unsigned int alloc_flags)
88f5acf8 3404{
97a225e6 3405 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
88f5acf8
MG
3406 zone_page_state(z, NR_FREE_PAGES));
3407}
3408
48ee5f36 3409static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
97a225e6 3410 unsigned long mark, int highest_zoneidx,
f80b08fc 3411 unsigned int alloc_flags, gfp_t gfp_mask)
48ee5f36 3412{
f27ce0e1 3413 long free_pages;
d883c6cf 3414
f27ce0e1 3415 free_pages = zone_page_state(z, NR_FREE_PAGES);
48ee5f36
MG
3416
3417 /*
3418 * Fast check for order-0 only. If this fails then the reserves
f27ce0e1 3419 * need to be calculated.
48ee5f36 3420 */
f27ce0e1 3421 if (!order) {
9282012f
JK
3422 long usable_free;
3423 long reserved;
f27ce0e1 3424
9282012f
JK
3425 usable_free = free_pages;
3426 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
3427
3428 /* reserved may over estimate high-atomic reserves. */
3429 usable_free -= min(usable_free, reserved);
3430 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
f27ce0e1
JK
3431 return true;
3432 }
48ee5f36 3433
f80b08fc
CTR
3434 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3435 free_pages))
3436 return true;
2973d822 3437
f80b08fc 3438 /*
2973d822 3439 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
f80b08fc
CTR
3440 * when checking the min watermark. The min watermark is the
3441 * point where boosting is ignored so that kswapd is woken up
3442 * when below the low watermark.
3443 */
2973d822 3444 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
f80b08fc
CTR
3445 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3446 mark = z->_watermark[WMARK_MIN];
3447 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3448 alloc_flags, free_pages);
3449 }
3450
3451 return false;
48ee5f36
MG
3452}
3453
9276b1bc 3454#ifdef CONFIG_NUMA
61bb6cd2
GU
3455int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
3456
957f822a
DR
3457static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3458{
e02dc017 3459 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
a55c7454 3460 node_reclaim_distance;
957f822a 3461}
9276b1bc 3462#else /* CONFIG_NUMA */
957f822a
DR
3463static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3464{
3465 return true;
3466}
9276b1bc
PJ
3467#endif /* CONFIG_NUMA */
3468
6bb15450
MG
3469/*
3470 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3471 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3472 * premature use of a lower zone may cause lowmem pressure problems that
3473 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3474 * probably too small. It only makes sense to spread allocations to avoid
3475 * fragmentation between the Normal and DMA32 zones.
3476 */
3477static inline unsigned int
0a79cdad 3478alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
6bb15450 3479{
736838e9 3480 unsigned int alloc_flags;
0a79cdad 3481
736838e9
MN
3482 /*
3483 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3484 * to save a branch.
3485 */
3486 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
0a79cdad 3487
e3aa7df3
JW
3488 if (defrag_mode) {
3489 alloc_flags |= ALLOC_NOFRAGMENT;
3490 return alloc_flags;
3491 }
3492
0a79cdad 3493#ifdef CONFIG_ZONE_DMA32
8139ad04
AR
3494 if (!zone)
3495 return alloc_flags;
3496
6bb15450 3497 if (zone_idx(zone) != ZONE_NORMAL)
8118b82e 3498 return alloc_flags;
6bb15450
MG
3499
3500 /*
3501 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3502 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3503 * on UMA that if Normal is populated then so is DMA32.
3504 */
3505 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3506 if (nr_online_nodes > 1 && !populated_zone(--zone))
8118b82e 3507 return alloc_flags;
6bb15450 3508
8118b82e 3509 alloc_flags |= ALLOC_NOFRAGMENT;
0a79cdad
MG
3510#endif /* CONFIG_ZONE_DMA32 */
3511 return alloc_flags;
6bb15450 3512}
6bb15450 3513
8e3560d9
PT
3514/* Must be called after current_gfp_context() which can change gfp_mask */
3515static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3516 unsigned int alloc_flags)
8510e69c
JK
3517{
3518#ifdef CONFIG_CMA
8e3560d9 3519 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
8510e69c 3520 alloc_flags |= ALLOC_CMA;
8510e69c
JK
3521#endif
3522 return alloc_flags;
3523}
3524
7fb1d9fc 3525/*
0798e519 3526 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
3527 * a page.
3528 */
3529static struct page *
a9263751
VB
3530get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3531 const struct alloc_context *ac)
753ee728 3532{
6bb15450 3533 struct zoneref *z;
5117f45d 3534 struct zone *zone;
8a87d695
WY
3535 struct pglist_data *last_pgdat = NULL;
3536 bool last_pgdat_dirty_ok = false;
6bb15450 3537 bool no_fallback;
3b8c0be4 3538
6bb15450 3539retry:
7fb1d9fc 3540 /*
9276b1bc 3541 * Scan zonelist, looking for a zone with enough free.
8adce085 3542 * See also cpuset_current_node_allowed() comment in kernel/cgroup/cpuset.c.
7fb1d9fc 3543 */
6bb15450
MG
3544 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3545 z = ac->preferred_zoneref;
30d8ec73
MN
3546 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3547 ac->nodemask) {
be06af00 3548 struct page *page;
e085dbc5
JW
3549 unsigned long mark;
3550
664eedde
MG
3551 if (cpusets_enabled() &&
3552 (alloc_flags & ALLOC_CPUSET) &&
002f2906 3553 !__cpuset_zone_allowed(zone, gfp_mask))
cd38b115 3554 continue;
a756cf59
JW
3555 /*
3556 * When allocating a page cache page for writing, we
281e3726
MG
3557 * want to get it from a node that is within its dirty
3558 * limit, such that no single node holds more than its
a756cf59 3559 * proportional share of globally allowed dirty pages.
281e3726 3560 * The dirty limits take into account the node's
a756cf59
JW
3561 * lowmem reserves and high watermark so that kswapd
3562 * should be able to balance it without having to
3563 * write pages from its LRU list.
3564 *
a756cf59 3565 * XXX: For now, allow allocations to potentially
281e3726 3566 * exceed the per-node dirty limit in the slowpath
c9ab0c4f 3567 * (spread_dirty_pages unset) before going into reclaim,
a756cf59 3568 * which is important when on a NUMA setup the allowed
281e3726 3569 * nodes are together not big enough to reach the
a756cf59 3570 * global limit. The proper fix for these situations
281e3726 3571 * will require awareness of nodes in the
a756cf59
JW
3572 * dirty-throttling and the flusher threads.
3573 */
3b8c0be4 3574 if (ac->spread_dirty_pages) {
8a87d695
WY
3575 if (last_pgdat != zone->zone_pgdat) {
3576 last_pgdat = zone->zone_pgdat;
3577 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3578 }
3b8c0be4 3579
8a87d695 3580 if (!last_pgdat_dirty_ok)
3b8c0be4 3581 continue;
3b8c0be4 3582 }
7fb1d9fc 3583
e3aa7df3 3584 if (no_fallback && !defrag_mode && nr_online_nodes > 1 &&
29943248 3585 zone != zonelist_zone(ac->preferred_zoneref)) {
6bb15450
MG
3586 int local_nid;
3587
3588 /*
3589 * If moving to a remote node, retry but allow
3590 * fragmenting fallbacks. Locality is more important
3591 * than fragmentation avoidance.
3592 */
29943248 3593 local_nid = zonelist_node_idx(ac->preferred_zoneref);
6bb15450
MG
3594 if (zone_to_nid(zone) != local_nid) {
3595 alloc_flags &= ~ALLOC_NOFRAGMENT;
3596 goto retry;
3597 }
3598 }
3599
23fa022a 3600 cond_accept_memory(zone, order, alloc_flags);
807174a9 3601
57c0419c
YH
3602 /*
3603 * Detect whether the number of free pages is below high
3604 * watermark. If so, we will decrease pcp->high and free
3605 * PCP pages in free path to reduce the possibility of
3606 * premature page reclaiming. Detection is done here to
3607 * avoid to do that in hotter free path.
3608 */
3609 if (test_bit(ZONE_BELOW_HIGH, &zone->flags))
3610 goto check_alloc_wmark;
3611
3612 mark = high_wmark_pages(zone);
3613 if (zone_watermark_fast(zone, order, mark,
3614 ac->highest_zoneidx, alloc_flags,
3615 gfp_mask))
3616 goto try_this_zone;
3617 else
3618 set_bit(ZONE_BELOW_HIGH, &zone->flags);
3619
3620check_alloc_wmark:
a9214443 3621 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
48ee5f36 3622 if (!zone_watermark_fast(zone, order, mark,
f80b08fc
CTR
3623 ac->highest_zoneidx, alloc_flags,
3624 gfp_mask)) {
fa5e084e
MG
3625 int ret;
3626
23fa022a 3627 if (cond_accept_memory(zone, order, alloc_flags))
807174a9 3628 goto try_this_zone;
dcdfdd40 3629
c9e97a19
PT
3630 /*
3631 * Watermark failed for this zone, but see if we can
3632 * grow this zone if it contains deferred pages.
3633 */
076cf7ea 3634 if (deferred_pages_enabled()) {
c9e97a19
PT
3635 if (_deferred_grow_zone(zone, order))
3636 goto try_this_zone;
3637 }
5dab2911
MG
3638 /* Checked here to keep the fast path fast */
3639 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3640 if (alloc_flags & ALLOC_NO_WATERMARKS)
3641 goto try_this_zone;
3642
202e35db 3643 if (!node_reclaim_enabled() ||
29943248 3644 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
cd38b115
MG
3645 continue;
3646
a5f5f91d 3647 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
fa5e084e 3648 switch (ret) {
a5f5f91d 3649 case NODE_RECLAIM_NOSCAN:
fa5e084e 3650 /* did not scan */
cd38b115 3651 continue;
a5f5f91d 3652 case NODE_RECLAIM_FULL:
fa5e084e 3653 /* scanned but unreclaimable */
cd38b115 3654 continue;
fa5e084e
MG
3655 default:
3656 /* did we reclaim enough */
fed2719e 3657 if (zone_watermark_ok(zone, order, mark,
97a225e6 3658 ac->highest_zoneidx, alloc_flags))
fed2719e
MG
3659 goto try_this_zone;
3660
fed2719e 3661 continue;
0798e519 3662 }
7fb1d9fc
RS
3663 }
3664
fa5e084e 3665try_this_zone:
29943248 3666 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
0aaa29a5 3667 gfp_mask, alloc_flags, ac->migratetype);
75379191 3668 if (page) {
479f854a 3669 prep_new_page(page, order, gfp_mask, alloc_flags);
0aaa29a5
MG
3670
3671 /*
3672 * If this is a high-order atomic allocation then check
3673 * if the pageblock should be reserved for the future
3674 */
eb2e2b42 3675 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
7cc5a5d6 3676 reserve_highatomic_pageblock(page, order, zone);
0aaa29a5 3677
75379191 3678 return page;
c9e97a19 3679 } else {
23fa022a 3680 if (cond_accept_memory(zone, order, alloc_flags))
807174a9 3681 goto try_this_zone;
dcdfdd40 3682
c9e97a19 3683 /* Try again if zone has deferred pages */
076cf7ea 3684 if (deferred_pages_enabled()) {
c9e97a19
PT
3685 if (_deferred_grow_zone(zone, order))
3686 goto try_this_zone;
3687 }
75379191 3688 }
54a6eb5c 3689 }
9276b1bc 3690
6bb15450
MG
3691 /*
3692 * It's possible on a UMA machine to get through all zones that are
3693 * fragmented. If avoiding fragmentation, reset and try again.
3694 */
e3aa7df3 3695 if (no_fallback && !defrag_mode) {
6bb15450
MG
3696 alloc_flags &= ~ALLOC_NOFRAGMENT;
3697 goto retry;
3698 }
3699
4ffeaf35 3700 return NULL;
753ee728
MH
3701}
3702
9af744d7 3703static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
a238ab5b 3704{
a238ab5b 3705 unsigned int filter = SHOW_MEM_FILTER_NODES;
a238ab5b
DH
3706
3707 /*
3708 * This documents exceptions given to allocations in certain
3709 * contexts that are allowed to allocate outside current's set
3710 * of allowed nodes.
3711 */
3712 if (!(gfp_mask & __GFP_NOMEMALLOC))
cd04ae1e 3713 if (tsk_is_oom_victim(current) ||
a238ab5b
DH
3714 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3715 filter &= ~SHOW_MEM_FILTER_NODES;
88dc6f20 3716 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
a238ab5b
DH
3717 filter &= ~SHOW_MEM_FILTER_NODES;
3718
974f4367 3719 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
aa187507
MH
3720}
3721
a8e99259 3722void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
aa187507
MH
3723{
3724 struct va_format vaf;
3725 va_list args;
1be334e5 3726 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
aa187507 3727
c4dc63f0
BH
3728 if ((gfp_mask & __GFP_NOWARN) ||
3729 !__ratelimit(&nopage_rs) ||
3730 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
aa187507
MH
3731 return;
3732
7877cdcc
MH
3733 va_start(args, fmt);
3734 vaf.fmt = fmt;
3735 vaf.va = &args;
ef8444ea 3736 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
0205f755
MH
3737 current->comm, &vaf, gfp_mask, &gfp_mask,
3738 nodemask_pr_args(nodemask));
7877cdcc 3739 va_end(args);
3ee9a4f0 3740
a8e99259 3741 cpuset_print_current_mems_allowed();
ef8444ea 3742 pr_cont("\n");
a238ab5b 3743 dump_stack();
685dbf6f 3744 warn_alloc_show_mem(gfp_mask, nodemask);
a238ab5b
DH
3745}
3746
6c18ba7a
MH
3747static inline struct page *
3748__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3749 unsigned int alloc_flags,
3750 const struct alloc_context *ac)
3751{
3752 struct page *page;
3753
3754 page = get_page_from_freelist(gfp_mask, order,
3755 alloc_flags|ALLOC_CPUSET, ac);
3756 /*
3757 * fallback to ignore cpuset restriction if our nodes
3758 * are depleted
3759 */
3760 if (!page)
3761 page = get_page_from_freelist(gfp_mask, order,
3762 alloc_flags, ac);
6c18ba7a
MH
3763 return page;
3764}
3765
11e33f6a
MG
3766static inline struct page *
3767__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
a9263751 3768 const struct alloc_context *ac, unsigned long *did_some_progress)
11e33f6a 3769{
6e0fc46d
DR
3770 struct oom_control oc = {
3771 .zonelist = ac->zonelist,
3772 .nodemask = ac->nodemask,
2a966b77 3773 .memcg = NULL,
6e0fc46d
DR
3774 .gfp_mask = gfp_mask,
3775 .order = order,
6e0fc46d 3776 };
11e33f6a
MG
3777 struct page *page;
3778
9879de73
JW
3779 *did_some_progress = 0;
3780
9879de73 3781 /*
dc56401f
JW
3782 * Acquire the oom lock. If that fails, somebody else is
3783 * making progress for us.
9879de73 3784 */
dc56401f 3785 if (!mutex_trylock(&oom_lock)) {
9879de73 3786 *did_some_progress = 1;
11e33f6a 3787 schedule_timeout_uninterruptible(1);
1da177e4
LT
3788 return NULL;
3789 }
6b1de916 3790
11e33f6a
MG
3791 /*
3792 * Go through the zonelist yet one more time, keep very high watermark
3793 * here, this is only to catch a parallel oom killing, we must fail if
e746bf73
TH
3794 * we're still under heavy pressure. But make sure that this reclaim
3795 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3796 * allocation which will never fail due to oom_lock already held.
11e33f6a 3797 */
e746bf73
TH
3798 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3799 ~__GFP_DIRECT_RECLAIM, order,
3800 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
7fb1d9fc 3801 if (page)
11e33f6a
MG
3802 goto out;
3803
06ad276a
MH
3804 /* Coredumps can quickly deplete all memory reserves */
3805 if (current->flags & PF_DUMPCORE)
3806 goto out;
3807 /* The OOM killer will not help higher order allocs */
3808 if (order > PAGE_ALLOC_COSTLY_ORDER)
3809 goto out;
dcda9b04
MH
3810 /*
3811 * We have already exhausted all our reclaim opportunities without any
3812 * success so it is time to admit defeat. We will skip the OOM killer
3813 * because it is very likely that the caller has a more reasonable
3814 * fallback than shooting a random task.
cfb4a541
MN
3815 *
3816 * The OOM killer may not free memory on a specific node.
dcda9b04 3817 */
cfb4a541 3818 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
dcda9b04 3819 goto out;
06ad276a 3820 /* The OOM killer does not needlessly kill tasks for lowmem */
97a225e6 3821 if (ac->highest_zoneidx < ZONE_NORMAL)
06ad276a
MH
3822 goto out;
3823 if (pm_suspended_storage())
3824 goto out;
3825 /*
3826 * XXX: GFP_NOFS allocations should rather fail than rely on
3827 * other request to make a forward progress.
3828 * We are in an unfortunate situation where out_of_memory cannot
3829 * do much for this context but let's try it to at least get
3830 * access to memory reserved if the current task is killed (see
3831 * out_of_memory). Once filesystems are ready to handle allocation
3832 * failures more gracefully we should just bail out here.
3833 */
3834
3c2c6488 3835 /* Exhausted what can be done so it's blame time */
3f913fc5
QZ
3836 if (out_of_memory(&oc) ||
3837 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
c32b3cbe 3838 *did_some_progress = 1;
5020e285 3839
6c18ba7a
MH
3840 /*
3841 * Help non-failing allocations by giving them access to memory
3842 * reserves
3843 */
3844 if (gfp_mask & __GFP_NOFAIL)
3845 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
5020e285 3846 ALLOC_NO_WATERMARKS, ac);
5020e285 3847 }
11e33f6a 3848out:
dc56401f 3849 mutex_unlock(&oom_lock);
11e33f6a
MG
3850 return page;
3851}
3852
33c2d214 3853/*
baf2f90b 3854 * Maximum number of compaction retries with a progress before OOM
33c2d214
MH
3855 * killer is consider as the only way to move forward.
3856 */
3857#define MAX_COMPACT_RETRIES 16
3858
56de7263
MG
3859#ifdef CONFIG_COMPACTION
3860/* Try memory compaction for high-order allocations before reclaim */
3861static struct page *
3862__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 3863 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 3864 enum compact_priority prio, enum compact_result *compact_result)
56de7263 3865{
5e1f0f09 3866 struct page *page = NULL;
eb414681 3867 unsigned long pflags;
499118e9 3868 unsigned int noreclaim_flag;
53853e2d
VB
3869
3870 if (!order)
66199712 3871 return NULL;
66199712 3872
eb414681 3873 psi_memstall_enter(&pflags);
5bf18281 3874 delayacct_compact_start();
499118e9 3875 noreclaim_flag = memalloc_noreclaim_save();
eb414681 3876
c5d01d0d 3877 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
5e1f0f09 3878 prio, &page);
eb414681 3879
499118e9 3880 memalloc_noreclaim_restore(noreclaim_flag);
eb414681 3881 psi_memstall_leave(&pflags);
5bf18281 3882 delayacct_compact_end();
56de7263 3883
06dac2f4
CTR
3884 if (*compact_result == COMPACT_SKIPPED)
3885 return NULL;
98dd3b48
VB
3886 /*
3887 * At least in one zone compaction wasn't deferred or skipped, so let's
3888 * count a compaction stall
3889 */
3890 count_vm_event(COMPACTSTALL);
8fb74b9f 3891
5e1f0f09
MG
3892 /* Prep a captured page if available */
3893 if (page)
3894 prep_new_page(page, order, gfp_mask, alloc_flags);
3895
3896 /* Try get a page from the freelist if available */
3897 if (!page)
3898 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
53853e2d 3899
98dd3b48
VB
3900 if (page) {
3901 struct zone *zone = page_zone(page);
53853e2d 3902
98dd3b48
VB
3903 zone->compact_blockskip_flush = false;
3904 compaction_defer_reset(zone, order, true);
3905 count_vm_event(COMPACTSUCCESS);
3906 return page;
3907 }
56de7263 3908
98dd3b48
VB
3909 /*
3910 * It's bad if compaction run occurs and fails. The most likely reason
3911 * is that pages exist, but not enough to satisfy watermarks.
3912 */
3913 count_vm_event(COMPACTFAIL);
66199712 3914
98dd3b48 3915 cond_resched();
56de7263
MG
3916
3917 return NULL;
3918}
33c2d214 3919
3250845d
VB
3920static inline bool
3921should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3922 enum compact_result compact_result,
3923 enum compact_priority *compact_priority,
d9436498 3924 int *compaction_retries)
3250845d
VB
3925{
3926 int max_retries = MAX_COMPACT_RETRIES;
c2033b00 3927 int min_priority;
65190cff
MH
3928 bool ret = false;
3929 int retries = *compaction_retries;
3930 enum compact_priority priority = *compact_priority;
3250845d
VB
3931
3932 if (!order)
3933 return false;
3934
691d9497
AT
3935 if (fatal_signal_pending(current))
3936 return false;
3937
49433085 3938 /*
ecd8b292
JW
3939 * Compaction was skipped due to a lack of free order-0
3940 * migration targets. Continue if reclaim can help.
49433085 3941 */
ecd8b292 3942 if (compact_result == COMPACT_SKIPPED) {
49433085
VB
3943 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3944 goto out;
3945 }
3946
3250845d 3947 /*
511a69b2
JW
3948 * Compaction managed to coalesce some page blocks, but the
3949 * allocation failed presumably due to a race. Retry some.
3250845d 3950 */
511a69b2
JW
3951 if (compact_result == COMPACT_SUCCESS) {
3952 /*
3953 * !costly requests are much more important than
3954 * __GFP_RETRY_MAYFAIL costly ones because they are de
3955 * facto nofail and invoke OOM killer to move on while
3956 * costly can fail and users are ready to cope with
3957 * that. 1/4 retries is rather arbitrary but we would
3958 * need much more detailed feedback from compaction to
3959 * make a better decision.
3960 */
3961 if (order > PAGE_ALLOC_COSTLY_ORDER)
3962 max_retries /= 4;
3250845d 3963
511a69b2
JW
3964 if (++(*compaction_retries) <= max_retries) {
3965 ret = true;
3966 goto out;
3967 }
65190cff 3968 }
3250845d 3969
d9436498 3970 /*
511a69b2 3971 * Compaction failed. Retry with increasing priority.
d9436498 3972 */
c2033b00
VB
3973 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3974 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
65190cff 3975
c2033b00 3976 if (*compact_priority > min_priority) {
d9436498
VB
3977 (*compact_priority)--;
3978 *compaction_retries = 0;
65190cff 3979 ret = true;
d9436498 3980 }
65190cff
MH
3981out:
3982 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3983 return ret;
3250845d 3984}
56de7263
MG
3985#else
3986static inline struct page *
3987__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 3988 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 3989 enum compact_priority prio, enum compact_result *compact_result)
56de7263 3990{
33c2d214 3991 *compact_result = COMPACT_SKIPPED;
56de7263
MG
3992 return NULL;
3993}
33c2d214
MH
3994
3995static inline bool
86a294a8
MH
3996should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3997 enum compact_result compact_result,
a5508cd8 3998 enum compact_priority *compact_priority,
d9436498 3999 int *compaction_retries)
33c2d214 4000{
31e49bfd
MH
4001 struct zone *zone;
4002 struct zoneref *z;
4003
4004 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4005 return false;
4006
4007 /*
4008 * There are setups with compaction disabled which would prefer to loop
4009 * inside the allocator rather than hit the oom killer prematurely.
4010 * Let's give them a good hope and keep retrying while the order-0
4011 * watermarks are OK.
4012 */
97a225e6
JK
4013 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4014 ac->highest_zoneidx, ac->nodemask) {
31e49bfd 4015 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
97a225e6 4016 ac->highest_zoneidx, alloc_flags))
31e49bfd
MH
4017 return true;
4018 }
33c2d214
MH
4019 return false;
4020}
3250845d 4021#endif /* CONFIG_COMPACTION */
56de7263 4022
d92a8cfc 4023#ifdef CONFIG_LOCKDEP
93781325 4024static struct lockdep_map __fs_reclaim_map =
d92a8cfc
PZ
4025 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4026
f920e413 4027static bool __need_reclaim(gfp_t gfp_mask)
d92a8cfc 4028{
d92a8cfc
PZ
4029 /* no reclaim without waiting on it */
4030 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4031 return false;
4032
4033 /* this guy won't enter reclaim */
2e517d68 4034 if (current->flags & PF_MEMALLOC)
d92a8cfc
PZ
4035 return false;
4036
d92a8cfc
PZ
4037 if (gfp_mask & __GFP_NOLOCKDEP)
4038 return false;
4039
4040 return true;
4041}
4042
4f3eaf45 4043void __fs_reclaim_acquire(unsigned long ip)
93781325 4044{
4f3eaf45 4045 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
93781325
OS
4046}
4047
4f3eaf45 4048void __fs_reclaim_release(unsigned long ip)
93781325 4049{
4f3eaf45 4050 lock_release(&__fs_reclaim_map, ip);
93781325
OS
4051}
4052
d92a8cfc
PZ
4053void fs_reclaim_acquire(gfp_t gfp_mask)
4054{
f920e413
SV
4055 gfp_mask = current_gfp_context(gfp_mask);
4056
4057 if (__need_reclaim(gfp_mask)) {
4058 if (gfp_mask & __GFP_FS)
4f3eaf45 4059 __fs_reclaim_acquire(_RET_IP_);
f920e413
SV
4060
4061#ifdef CONFIG_MMU_NOTIFIER
4062 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4063 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4064#endif
4065
4066 }
d92a8cfc
PZ
4067}
4068EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4069
4070void fs_reclaim_release(gfp_t gfp_mask)
4071{
f920e413
SV
4072 gfp_mask = current_gfp_context(gfp_mask);
4073
4074 if (__need_reclaim(gfp_mask)) {
4075 if (gfp_mask & __GFP_FS)
4f3eaf45 4076 __fs_reclaim_release(_RET_IP_);
f920e413 4077 }
d92a8cfc
PZ
4078}
4079EXPORT_SYMBOL_GPL(fs_reclaim_release);
4080#endif
4081
3d36424b
MG
4082/*
4083 * Zonelists may change due to hotplug during allocation. Detect when zonelists
4084 * have been rebuilt so allocation retries. Reader side does not lock and
4085 * retries the allocation if zonelist changes. Writer side is protected by the
4086 * embedded spin_lock.
4087 */
4088static DEFINE_SEQLOCK(zonelist_update_seq);
4089
4090static unsigned int zonelist_iter_begin(void)
4091{
4092 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4093 return read_seqbegin(&zonelist_update_seq);
4094
4095 return 0;
4096}
4097
4098static unsigned int check_retry_zonelist(unsigned int seq)
4099{
4100 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4101 return read_seqretry(&zonelist_update_seq, seq);
4102
4103 return seq;
4104}
4105
bba90710 4106/* Perform direct synchronous page reclaim */
2187e17b 4107static unsigned long
a9263751
VB
4108__perform_reclaim(gfp_t gfp_mask, unsigned int order,
4109 const struct alloc_context *ac)
11e33f6a 4110{
499118e9 4111 unsigned int noreclaim_flag;
fa7fc75f 4112 unsigned long progress;
11e33f6a
MG
4113
4114 cond_resched();
4115
4116 /* We now go into synchronous reclaim */
4117 cpuset_memory_pressure_bump();
d92a8cfc 4118 fs_reclaim_acquire(gfp_mask);
93781325 4119 noreclaim_flag = memalloc_noreclaim_save();
11e33f6a 4120
a9263751
VB
4121 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4122 ac->nodemask);
11e33f6a 4123
499118e9 4124 memalloc_noreclaim_restore(noreclaim_flag);
93781325 4125 fs_reclaim_release(gfp_mask);
11e33f6a
MG
4126
4127 cond_resched();
4128
bba90710
MS
4129 return progress;
4130}
4131
4132/* The really slow allocator path where we enter direct reclaim */
4133static inline struct page *
4134__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
c603844b 4135 unsigned int alloc_flags, const struct alloc_context *ac,
a9263751 4136 unsigned long *did_some_progress)
bba90710
MS
4137{
4138 struct page *page = NULL;
fa7fc75f 4139 unsigned long pflags;
bba90710
MS
4140 bool drained = false;
4141
fa7fc75f 4142 psi_memstall_enter(&pflags);
a9263751 4143 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
9ee493ce 4144 if (unlikely(!(*did_some_progress)))
fa7fc75f 4145 goto out;
11e33f6a 4146
9ee493ce 4147retry:
31a6c190 4148 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
9ee493ce
MG
4149
4150 /*
4151 * If an allocation failed after direct reclaim, it could be because
0aaa29a5 4152 * pages are pinned on the per-cpu lists or in high alloc reserves.
047b9967 4153 * Shrink them and try again
9ee493ce
MG
4154 */
4155 if (!page && !drained) {
29fac03b 4156 unreserve_highatomic_pageblock(ac, false);
93481ff0 4157 drain_all_pages(NULL);
9ee493ce
MG
4158 drained = true;
4159 goto retry;
4160 }
fa7fc75f
SB
4161out:
4162 psi_memstall_leave(&pflags);
9ee493ce 4163
11e33f6a
MG
4164 return page;
4165}
4166
5ecd9d40
DR
4167static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4168 const struct alloc_context *ac)
3a025760
JW
4169{
4170 struct zoneref *z;
4171 struct zone *zone;
e1a55637 4172 pg_data_t *last_pgdat = NULL;
97a225e6 4173 enum zone_type highest_zoneidx = ac->highest_zoneidx;
101f9d66
JW
4174 unsigned int reclaim_order;
4175
4176 if (defrag_mode)
4177 reclaim_order = max(order, pageblock_order);
4178 else
4179 reclaim_order = order;
3a025760 4180
97a225e6 4181 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
5ecd9d40 4182 ac->nodemask) {
bc53008e
WY
4183 if (!managed_zone(zone))
4184 continue;
101f9d66
JW
4185 if (last_pgdat == zone->zone_pgdat)
4186 continue;
4187 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx);
4188 last_pgdat = zone->zone_pgdat;
e1a55637 4189 }
3a025760
JW
4190}
4191
c603844b 4192static inline unsigned int
eb2e2b42 4193gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
341ce06f 4194{
c603844b 4195 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1da177e4 4196
736838e9 4197 /*
524c4807 4198 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
736838e9
MN
4199 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4200 * to save two branches.
4201 */
524c4807 4202 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
736838e9 4203 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
933e312e 4204
341ce06f
PZ
4205 /*
4206 * The caller may dip into page reserves a bit more if the caller
4207 * cannot run direct reclaim, or if the caller has realtime scheduling
4208 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1ebbb218 4209 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
341ce06f 4210 */
736838e9
MN
4211 alloc_flags |= (__force int)
4212 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
1da177e4 4213
1ebbb218 4214 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
5c3240d9 4215 /*
b104a35d
DR
4216 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4217 * if it can't schedule.
5c3240d9 4218 */
eb2e2b42 4219 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1ebbb218 4220 alloc_flags |= ALLOC_NON_BLOCK;
eb2e2b42
MG
4221
4222 if (order > 0)
4223 alloc_flags |= ALLOC_HIGHATOMIC;
4224 }
4225
523b9458 4226 /*
1ebbb218
MG
4227 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
4228 * GFP_ATOMIC) rather than fail, see the comment for
8adce085 4229 * cpuset_current_node_allowed().
523b9458 4230 */
1ebbb218
MG
4231 if (alloc_flags & ALLOC_MIN_RESERVE)
4232 alloc_flags &= ~ALLOC_CPUSET;
ae04f69d 4233 } else if (unlikely(rt_or_dl_task(current)) && in_task())
c988dcbe 4234 alloc_flags |= ALLOC_MIN_RESERVE;
341ce06f 4235
8e3560d9 4236 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
8510e69c 4237
e3aa7df3
JW
4238 if (defrag_mode)
4239 alloc_flags |= ALLOC_NOFRAGMENT;
4240
341ce06f
PZ
4241 return alloc_flags;
4242}
4243
cd04ae1e 4244static bool oom_reserves_allowed(struct task_struct *tsk)
072bb0aa 4245{
cd04ae1e
MH
4246 if (!tsk_is_oom_victim(tsk))
4247 return false;
4248
4249 /*
4250 * !MMU doesn't have oom reaper so give access to memory reserves
4251 * only to the thread with TIF_MEMDIE set
4252 */
4253 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
31a6c190
VB
4254 return false;
4255
cd04ae1e
MH
4256 return true;
4257}
4258
4259/*
4260 * Distinguish requests which really need access to full memory
4261 * reserves from oom victims which can live with a portion of it
4262 */
4263static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4264{
4265 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4266 return 0;
31a6c190 4267 if (gfp_mask & __GFP_MEMALLOC)
cd04ae1e 4268 return ALLOC_NO_WATERMARKS;
31a6c190 4269 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
cd04ae1e
MH
4270 return ALLOC_NO_WATERMARKS;
4271 if (!in_interrupt()) {
4272 if (current->flags & PF_MEMALLOC)
4273 return ALLOC_NO_WATERMARKS;
4274 else if (oom_reserves_allowed(current))
4275 return ALLOC_OOM;
4276 }
31a6c190 4277
cd04ae1e
MH
4278 return 0;
4279}
4280
4281bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4282{
4283 return !!__gfp_pfmemalloc_flags(gfp_mask);
072bb0aa
MG
4284}
4285
0a0337e0
MH
4286/*
4287 * Checks whether it makes sense to retry the reclaim to make a forward progress
4288 * for the given allocation request.
491d79ae
JW
4289 *
4290 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4291 * without success, or when we couldn't even meet the watermark if we
4292 * reclaimed all remaining pages on the LRU lists.
0a0337e0
MH
4293 *
4294 * Returns true if a retry is viable or false to enter the oom path.
4295 */
4296static inline bool
4297should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4298 struct alloc_context *ac, int alloc_flags,
423b452e 4299 bool did_some_progress, int *no_progress_loops)
0a0337e0
MH
4300{
4301 struct zone *zone;
4302 struct zoneref *z;
15f570bf 4303 bool ret = false;
0a0337e0 4304
423b452e
VB
4305 /*
4306 * Costly allocations might have made a progress but this doesn't mean
4307 * their order will become available due to high fragmentation so
4308 * always increment the no progress counter for them
4309 */
4310 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4311 *no_progress_loops = 0;
4312 else
4313 (*no_progress_loops)++;
4314
ac3f3b0a
CTK
4315 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
4316 goto out;
4317
0a0337e0 4318
bca67592
MG
4319 /*
4320 * Keep reclaiming pages while there is a chance this will lead
4321 * somewhere. If none of the target zones can satisfy our allocation
4322 * request even if all reclaimable pages are considered then we are
4323 * screwed and have to go OOM.
0a0337e0 4324 */
97a225e6
JK
4325 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4326 ac->highest_zoneidx, ac->nodemask) {
0a0337e0 4327 unsigned long available;
ede37713 4328 unsigned long reclaimable;
d379f01d
MH
4329 unsigned long min_wmark = min_wmark_pages(zone);
4330 bool wmark;
0a0337e0 4331
435b3894
ZH
4332 if (cpusets_enabled() &&
4333 (alloc_flags & ALLOC_CPUSET) &&
4334 !__cpuset_zone_allowed(zone, gfp_mask))
4335 continue;
4336
5a1c84b4 4337 available = reclaimable = zone_reclaimable_pages(zone);
5a1c84b4 4338 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
0a0337e0
MH
4339
4340 /*
491d79ae
JW
4341 * Would the allocation succeed if we reclaimed all
4342 * reclaimable pages?
0a0337e0 4343 */
d379f01d 4344 wmark = __zone_watermark_ok(zone, order, min_wmark,
97a225e6 4345 ac->highest_zoneidx, alloc_flags, available);
d379f01d
MH
4346 trace_reclaim_retry_zone(z, order, reclaimable,
4347 available, min_wmark, *no_progress_loops, wmark);
4348 if (wmark) {
15f570bf 4349 ret = true;
132b0d21 4350 break;
0a0337e0
MH
4351 }
4352 }
4353
15f570bf
MH
4354 /*
4355 * Memory allocation/reclaim might be called from a WQ context and the
4356 * current implementation of the WQ concurrency control doesn't
4357 * recognize that a particular WQ is congested if the worker thread is
4358 * looping without ever sleeping. Therefore we have to do a short sleep
4359 * here rather than calling cond_resched().
4360 */
4361 if (current->flags & PF_WQ_WORKER)
4362 schedule_timeout_uninterruptible(1);
4363 else
4364 cond_resched();
ac3f3b0a
CTK
4365out:
4366 /* Before OOM, exhaust highatomic_reserve */
4367 if (!ret)
4368 return unreserve_highatomic_pageblock(ac, true);
4369
15f570bf 4370 return ret;
0a0337e0
MH
4371}
4372
902b6281
VB
4373static inline bool
4374check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4375{
4376 /*
4377 * It's possible that cpuset's mems_allowed and the nodemask from
4378 * mempolicy don't intersect. This should be normally dealt with by
4379 * policy_nodemask(), but it's possible to race with cpuset update in
4380 * such a way the check therein was true, and then it became false
4381 * before we got our cpuset_mems_cookie here.
4382 * This assumes that for all allocations, ac->nodemask can come only
4383 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4384 * when it does not intersect with the cpuset restrictions) or the
4385 * caller can deal with a violated nodemask.
4386 */
4387 if (cpusets_enabled() && ac->nodemask &&
4388 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4389 ac->nodemask = NULL;
4390 return true;
4391 }
4392
4393 /*
4394 * When updating a task's mems_allowed or mempolicy nodemask, it is
4395 * possible to race with parallel threads in such a way that our
4396 * allocation can fail while the mask is being updated. If we are about
4397 * to fail, check if the cpuset changed during allocation and if so,
4398 * retry.
4399 */
4400 if (read_mems_allowed_retry(cpuset_mems_cookie))
4401 return true;
4402
4403 return false;
4404}
4405
11e33f6a
MG
4406static inline struct page *
4407__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
a9263751 4408 struct alloc_context *ac)
11e33f6a 4409{
d0164adc 4410 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
803de900 4411 bool can_compact = gfp_compaction_allowed(gfp_mask);
903edea6 4412 bool nofail = gfp_mask & __GFP_NOFAIL;
282722b0 4413 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
11e33f6a 4414 struct page *page = NULL;
c603844b 4415 unsigned int alloc_flags;
11e33f6a 4416 unsigned long did_some_progress;
5ce9bfef 4417 enum compact_priority compact_priority;
c5d01d0d 4418 enum compact_result compact_result;
5ce9bfef
VB
4419 int compaction_retries;
4420 int no_progress_loops;
5ce9bfef 4421 unsigned int cpuset_mems_cookie;
3d36424b 4422 unsigned int zonelist_iter_cookie;
cd04ae1e 4423 int reserve_flags;
1da177e4 4424
903edea6
BS
4425 if (unlikely(nofail)) {
4426 /*
4427 * We most definitely don't want callers attempting to
4428 * allocate greater than order-1 page units with __GFP_NOFAIL.
4429 */
4430 WARN_ON_ONCE(order > 1);
4431 /*
4432 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM,
4433 * otherwise, we may result in lockup.
4434 */
4435 WARN_ON_ONCE(!can_direct_reclaim);
4436 /*
4437 * PF_MEMALLOC request from this context is rather bizarre
4438 * because we cannot reclaim anything and only can loop waiting
4439 * for somebody to do a work for us.
4440 */
4441 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4442 }
4443
3d36424b 4444restart:
5ce9bfef
VB
4445 compaction_retries = 0;
4446 no_progress_loops = 0;
8fe9ed44 4447 compact_result = COMPACT_SKIPPED;
5ce9bfef
VB
4448 compact_priority = DEF_COMPACT_PRIORITY;
4449 cpuset_mems_cookie = read_mems_allowed_begin();
3d36424b 4450 zonelist_iter_cookie = zonelist_iter_begin();
9a67f648
MH
4451
4452 /*
4453 * The fast path uses conservative alloc_flags to succeed only until
4454 * kswapd needs to be woken up, and to avoid the cost of setting up
4455 * alloc_flags precisely. So we do that now.
4456 */
eb2e2b42 4457 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
9a67f648 4458
e47483bc
VB
4459 /*
4460 * We need to recalculate the starting point for the zonelist iterator
4461 * because we might have used different nodemask in the fast path, or
4462 * there was a cpuset modification and we are retrying - otherwise we
4463 * could end up iterating over non-eligible zones endlessly.
4464 */
4465 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
97a225e6 4466 ac->highest_zoneidx, ac->nodemask);
29943248 4467 if (!zonelist_zone(ac->preferred_zoneref))
e47483bc
VB
4468 goto nopage;
4469
8ca1b5a4
FT
4470 /*
4471 * Check for insane configurations where the cpuset doesn't contain
4472 * any suitable zone to satisfy the request - e.g. non-movable
4473 * GFP_HIGHUSER allocations from MOVABLE nodes only.
4474 */
4475 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
4476 struct zoneref *z = first_zones_zonelist(ac->zonelist,
4477 ac->highest_zoneidx,
4478 &cpuset_current_mems_allowed);
29943248 4479 if (!zonelist_zone(z))
8ca1b5a4
FT
4480 goto nopage;
4481 }
4482
0a79cdad 4483 if (alloc_flags & ALLOC_KSWAPD)
5ecd9d40 4484 wake_all_kswapds(order, gfp_mask, ac);
23771235
VB
4485
4486 /*
4487 * The adjusted alloc_flags might result in immediate success, so try
4488 * that first
4489 */
4490 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4491 if (page)
4492 goto got_pg;
4493
a8161d1e
VB
4494 /*
4495 * For costly allocations, try direct compaction first, as it's likely
282722b0
VB
4496 * that we have enough base pages and don't need to reclaim. For non-
4497 * movable high-order allocations, do that as well, as compaction will
4498 * try prevent permanent fragmentation by migrating from blocks of the
4499 * same migratetype.
4500 * Don't try this for allocations that are allowed to ignore
4501 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
a8161d1e 4502 */
803de900 4503 if (can_direct_reclaim && can_compact &&
282722b0
VB
4504 (costly_order ||
4505 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4506 && !gfp_pfmemalloc_allowed(gfp_mask)) {
a8161d1e
VB
4507 page = __alloc_pages_direct_compact(gfp_mask, order,
4508 alloc_flags, ac,
a5508cd8 4509 INIT_COMPACT_PRIORITY,
a8161d1e
VB
4510 &compact_result);
4511 if (page)
4512 goto got_pg;
4513
cc638f32
VB
4514 /*
4515 * Checks for costly allocations with __GFP_NORETRY, which
4516 * includes some THP page fault allocations
4517 */
4518 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
b39d0ee2
DR
4519 /*
4520 * If allocating entire pageblock(s) and compaction
4521 * failed because all zones are below low watermarks
4522 * or is prohibited because it recently failed at this
3f36d866
DR
4523 * order, fail immediately unless the allocator has
4524 * requested compaction and reclaim retry.
b39d0ee2
DR
4525 *
4526 * Reclaim is
4527 * - potentially very expensive because zones are far
4528 * below their low watermarks or this is part of very
4529 * bursty high order allocations,
4530 * - not guaranteed to help because isolate_freepages()
4531 * may not iterate over freed pages as part of its
4532 * linear scan, and
4533 * - unlikely to make entire pageblocks free on its
4534 * own.
4535 */
4536 if (compact_result == COMPACT_SKIPPED ||
4537 compact_result == COMPACT_DEFERRED)
4538 goto nopage;
a8161d1e 4539
a8161d1e 4540 /*
3eb2771b
VB
4541 * Looks like reclaim/compaction is worth trying, but
4542 * sync compaction could be very expensive, so keep
25160354 4543 * using async compaction.
a8161d1e 4544 */
a5508cd8 4545 compact_priority = INIT_COMPACT_PRIORITY;
a8161d1e
VB
4546 }
4547 }
23771235 4548
31a6c190 4549retry:
e05741fb
TZ
4550 /*
4551 * Deal with possible cpuset update races or zonelist updates to avoid
4552 * infinite retries.
4553 */
4554 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4555 check_retry_zonelist(zonelist_iter_cookie))
4556 goto restart;
4557
23771235 4558 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
0a79cdad 4559 if (alloc_flags & ALLOC_KSWAPD)
5ecd9d40 4560 wake_all_kswapds(order, gfp_mask, ac);
31a6c190 4561
cd04ae1e
MH
4562 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4563 if (reserve_flags)
ce96fa62
ML
4564 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4565 (alloc_flags & ALLOC_KSWAPD);
23771235 4566
e46e7b77 4567 /*
d6a24df0
VB
4568 * Reset the nodemask and zonelist iterators if memory policies can be
4569 * ignored. These allocations are high priority and system rather than
4570 * user oriented.
e46e7b77 4571 */
cd04ae1e 4572 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
d6a24df0 4573 ac->nodemask = NULL;
e46e7b77 4574 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
97a225e6 4575 ac->highest_zoneidx, ac->nodemask);
e46e7b77
MG
4576 }
4577
23771235 4578 /* Attempt with potentially adjusted zonelist and alloc_flags */
31a6c190 4579 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
7fb1d9fc
RS
4580 if (page)
4581 goto got_pg;
1da177e4 4582
d0164adc 4583 /* Caller is not willing to reclaim, we can't balance anything */
9a67f648 4584 if (!can_direct_reclaim)
1da177e4
LT
4585 goto nopage;
4586
9a67f648
MH
4587 /* Avoid recursion of direct reclaim */
4588 if (current->flags & PF_MEMALLOC)
6583bb64
DR
4589 goto nopage;
4590
a8161d1e
VB
4591 /* Try direct reclaim and then allocating */
4592 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4593 &did_some_progress);
4594 if (page)
4595 goto got_pg;
4596
4597 /* Try direct compaction and then allocating */
a9263751 4598 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
a5508cd8 4599 compact_priority, &compact_result);
56de7263
MG
4600 if (page)
4601 goto got_pg;
75f30861 4602
9083905a
JW
4603 /* Do not loop if specifically requested */
4604 if (gfp_mask & __GFP_NORETRY)
a8161d1e 4605 goto nopage;
9083905a 4606
0a0337e0
MH
4607 /*
4608 * Do not retry costly high order allocations unless they are
803de900 4609 * __GFP_RETRY_MAYFAIL and we can compact
0a0337e0 4610 */
803de900
VB
4611 if (costly_order && (!can_compact ||
4612 !(gfp_mask & __GFP_RETRY_MAYFAIL)))
a8161d1e 4613 goto nopage;
0a0337e0 4614
0a0337e0 4615 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
423b452e 4616 did_some_progress > 0, &no_progress_loops))
0a0337e0
MH
4617 goto retry;
4618
33c2d214
MH
4619 /*
4620 * It doesn't make any sense to retry for the compaction if the order-0
4621 * reclaim is not able to make any progress because the current
4622 * implementation of the compaction depends on the sufficient amount
4623 * of free memory (see __compaction_suitable)
4624 */
803de900 4625 if (did_some_progress > 0 && can_compact &&
86a294a8 4626 should_compact_retry(ac, order, alloc_flags,
a5508cd8 4627 compact_result, &compact_priority,
d9436498 4628 &compaction_retries))
33c2d214
MH
4629 goto retry;
4630
e3aa7df3 4631 /* Reclaim/compaction failed to prevent the fallback */
7a95a05f
JW
4632 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) {
4633 alloc_flags &= ~ALLOC_NOFRAGMENT;
e3aa7df3
JW
4634 goto retry;
4635 }
902b6281 4636
3d36424b
MG
4637 /*
4638 * Deal with possible cpuset update races or zonelist updates to avoid
4639 * a unnecessary OOM kill.
4640 */
4641 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4642 check_retry_zonelist(zonelist_iter_cookie))
4643 goto restart;
e47483bc 4644
9083905a
JW
4645 /* Reclaim has failed us, start killing things */
4646 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4647 if (page)
4648 goto got_pg;
4649
9a67f648 4650 /* Avoid allocations with no watermarks from looping endlessly */
cd04ae1e 4651 if (tsk_is_oom_victim(current) &&
8510e69c 4652 (alloc_flags & ALLOC_OOM ||
c288983d 4653 (gfp_mask & __GFP_NOMEMALLOC)))
9a67f648
MH
4654 goto nopage;
4655
9083905a 4656 /* Retry as long as the OOM killer is making progress */
0a0337e0
MH
4657 if (did_some_progress) {
4658 no_progress_loops = 0;
9083905a 4659 goto retry;
0a0337e0 4660 }
9083905a 4661
1da177e4 4662nopage:
3d36424b
MG
4663 /*
4664 * Deal with possible cpuset update races or zonelist updates to avoid
4665 * a unnecessary OOM kill.
4666 */
4667 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4668 check_retry_zonelist(zonelist_iter_cookie))
4669 goto restart;
5ce9bfef 4670
9a67f648
MH
4671 /*
4672 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4673 * we always retry
4674 */
903edea6 4675 if (unlikely(nofail)) {
9a67f648 4676 /*
903edea6
BS
4677 * Lacking direct_reclaim we can't do anything to reclaim memory,
4678 * we disregard these unreasonable nofail requests and still
4679 * return NULL
9a67f648 4680 */
903edea6 4681 if (!can_direct_reclaim)
9a67f648
MH
4682 goto fail;
4683
6c18ba7a 4684 /*
1ebbb218
MG
4685 * Help non-failing allocations by giving some access to memory
4686 * reserves normally used for high priority non-blocking
4687 * allocations but do not use ALLOC_NO_WATERMARKS because this
6c18ba7a 4688 * could deplete whole memory reserves which would just make
1ebbb218 4689 * the situation worse.
6c18ba7a 4690 */
1ebbb218 4691 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
6c18ba7a
MH
4692 if (page)
4693 goto got_pg;
4694
9a67f648
MH
4695 cond_resched();
4696 goto retry;
4697 }
4698fail:
a8e99259 4699 warn_alloc(gfp_mask, ac->nodemask,
7877cdcc 4700 "page allocation failure: order:%u", order);
1da177e4 4701got_pg:
072bb0aa 4702 return page;
1da177e4 4703}
11e33f6a 4704
9cd75558 4705static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
04ec6264 4706 int preferred_nid, nodemask_t *nodemask,
8e6a930b 4707 struct alloc_context *ac, gfp_t *alloc_gfp,
9cd75558 4708 unsigned int *alloc_flags)
11e33f6a 4709{
97a225e6 4710 ac->highest_zoneidx = gfp_zone(gfp_mask);
04ec6264 4711 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
9cd75558 4712 ac->nodemask = nodemask;
01c0bfe0 4713 ac->migratetype = gfp_migratetype(gfp_mask);
11e33f6a 4714
682a3385 4715 if (cpusets_enabled()) {
8e6a930b 4716 *alloc_gfp |= __GFP_HARDWALL;
182f3d7a
MS
4717 /*
4718 * When we are in the interrupt context, it is irrelevant
4719 * to the current task context. It means that any node ok.
4720 */
88dc6f20 4721 if (in_task() && !ac->nodemask)
9cd75558 4722 ac->nodemask = &cpuset_current_mems_allowed;
51047820
VB
4723 else
4724 *alloc_flags |= ALLOC_CPUSET;
682a3385
MG
4725 }
4726
446ec838 4727 might_alloc(gfp_mask);
11e33f6a 4728
97769a53
AS
4729 /*
4730 * Don't invoke should_fail logic, since it may call
4731 * get_random_u32() and printk() which need to spin_lock.
4732 */
4733 if (!(*alloc_flags & ALLOC_TRYLOCK) &&
4734 should_fail_alloc_page(gfp_mask, order))
9cd75558 4735 return false;
11e33f6a 4736
8e3560d9 4737 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
d883c6cf 4738
c9ab0c4f 4739 /* Dirty zone balancing only done in the fast path */
9cd75558 4740 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
c9ab0c4f 4741
e46e7b77
MG
4742 /*
4743 * The preferred zone is used for statistics but crucially it is
4744 * also used as the starting point for the zonelist iterator. It
4745 * may get reset for allocations that ignore memory policies.
4746 */
9cd75558 4747 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
97a225e6 4748 ac->highest_zoneidx, ac->nodemask);
a0622d05
MN
4749
4750 return true;
9cd75558
MG
4751}
4752
387ba26f 4753/*
c8b97953 4754 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
387ba26f
MG
4755 * @gfp: GFP flags for the allocation
4756 * @preferred_nid: The preferred NUMA node ID to allocate from
4757 * @nodemask: Set of nodes to allocate from, may be NULL
c8b97953
LC
4758 * @nr_pages: The number of pages desired in the array
4759 * @page_array: Array to store the pages
387ba26f
MG
4760 *
4761 * This is a batched version of the page allocator that attempts to
c8b97953 4762 * allocate nr_pages quickly. Pages are added to the page_array.
387ba26f 4763 *
c8b97953 4764 * Note that only NULL elements are populated with pages and nr_pages
0f87d9d3
MG
4765 * is the maximum number of pages that will be stored in the array.
4766 *
c8b97953 4767 * Returns the number of pages in the array.
387ba26f 4768 */
b951aaff 4769unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
387ba26f 4770 nodemask_t *nodemask, int nr_pages,
0f87d9d3 4771 struct page **page_array)
387ba26f
MG
4772{
4773 struct page *page;
4b23a68f 4774 unsigned long __maybe_unused UP_flags;
387ba26f
MG
4775 struct zone *zone;
4776 struct zoneref *z;
4777 struct per_cpu_pages *pcp;
4778 struct list_head *pcp_list;
4779 struct alloc_context ac;
4780 gfp_t alloc_gfp;
4781 unsigned int alloc_flags = ALLOC_WMARK_LOW;
3e23060b 4782 int nr_populated = 0, nr_account = 0;
387ba26f 4783
0f87d9d3
MG
4784 /*
4785 * Skip populated array elements to determine if any pages need
4786 * to be allocated before disabling IRQs.
4787 */
c8b97953 4788 while (nr_populated < nr_pages && page_array[nr_populated])
0f87d9d3
MG
4789 nr_populated++;
4790
06147843
CL
4791 /* No pages requested? */
4792 if (unlikely(nr_pages <= 0))
4793 goto out;
4794
b3b64ebd 4795 /* Already populated array? */
c8b97953 4796 if (unlikely(nr_pages - nr_populated == 0))
06147843 4797 goto out;
b3b64ebd 4798
8dcb3060 4799 /* Bulk allocator does not support memcg accounting. */
f7a449f7 4800 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
8dcb3060
SB
4801 goto failed;
4802
387ba26f 4803 /* Use the single page allocator for one page. */
0f87d9d3 4804 if (nr_pages - nr_populated == 1)
387ba26f
MG
4805 goto failed;
4806
187ad460
MG
4807#ifdef CONFIG_PAGE_OWNER
4808 /*
4809 * PAGE_OWNER may recurse into the allocator to allocate space to
4810 * save the stack with pagesets.lock held. Releasing/reacquiring
4811 * removes much of the performance benefit of bulk allocation so
4812 * force the caller to allocate one page at a time as it'll have
4813 * similar performance to added complexity to the bulk allocator.
4814 */
4815 if (static_branch_unlikely(&page_owner_inited))
4816 goto failed;
4817#endif
4818
387ba26f
MG
4819 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
4820 gfp &= gfp_allowed_mask;
4821 alloc_gfp = gfp;
4822 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
06147843 4823 goto out;
387ba26f
MG
4824 gfp = alloc_gfp;
4825
4826 /* Find an allowed local zone that meets the low watermark. */
8ce41b0f
JT
4827 z = ac.preferred_zoneref;
4828 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
387ba26f
MG
4829 unsigned long mark;
4830
4831 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
4832 !__cpuset_zone_allowed(zone, gfp)) {
4833 continue;
4834 }
4835
29943248
WY
4836 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
4837 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
387ba26f
MG
4838 goto failed;
4839 }
4840
23fa022a 4841 cond_accept_memory(zone, 0, alloc_flags);
4be9064b 4842retry_this_zone:
387ba26f
MG
4843 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
4844 if (zone_watermark_fast(zone, 0, mark,
4845 zonelist_zone_idx(ac.preferred_zoneref),
4846 alloc_flags, gfp)) {
4847 break;
4848 }
4be9064b 4849
23fa022a 4850 if (cond_accept_memory(zone, 0, alloc_flags))
4be9064b
KS
4851 goto retry_this_zone;
4852
4853 /* Try again if zone has deferred pages */
4854 if (deferred_pages_enabled()) {
4855 if (_deferred_grow_zone(zone, 0))
4856 goto retry_this_zone;
4857 }
387ba26f
MG
4858 }
4859
4860 /*
4861 * If there are no allowed local zones that meets the watermarks then
4862 * try to allocate a single page and reclaim if necessary.
4863 */
ce76f9a1 4864 if (unlikely(!zone))
387ba26f
MG
4865 goto failed;
4866
57490774 4867 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
4b23a68f 4868 pcp_trylock_prepare(UP_flags);
57490774 4869 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
01b44456 4870 if (!pcp)
4b23a68f 4871 goto failed_irq;
387ba26f 4872
387ba26f 4873 /* Attempt the batch allocation */
44042b44 4874 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
0f87d9d3
MG
4875 while (nr_populated < nr_pages) {
4876
4877 /* Skip existing pages */
c8b97953 4878 if (page_array[nr_populated]) {
0f87d9d3
MG
4879 nr_populated++;
4880 continue;
4881 }
4882
44042b44 4883 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
387ba26f 4884 pcp, pcp_list);
ce76f9a1 4885 if (unlikely(!page)) {
c572e488 4886 /* Try and allocate at least one page */
4b23a68f 4887 if (!nr_account) {
57490774 4888 pcp_spin_unlock(pcp);
387ba26f 4889 goto failed_irq;
4b23a68f 4890 }
387ba26f
MG
4891 break;
4892 }
3e23060b 4893 nr_account++;
387ba26f
MG
4894
4895 prep_new_page(page, 0, gfp, 0);
ee66e9c3 4896 set_page_refcounted(page);
c8b97953 4897 page_array[nr_populated++] = page;
387ba26f
MG
4898 }
4899
57490774 4900 pcp_spin_unlock(pcp);
4b23a68f 4901 pcp_trylock_finish(UP_flags);
43c95bcc 4902
3e23060b 4903 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
29943248 4904 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
387ba26f 4905
06147843 4906out:
0f87d9d3 4907 return nr_populated;
387ba26f
MG
4908
4909failed_irq:
4b23a68f 4910 pcp_trylock_finish(UP_flags);
387ba26f
MG
4911
4912failed:
b951aaff 4913 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
c8b97953
LC
4914 if (page)
4915 page_array[nr_populated++] = page;
06147843 4916 goto out;
387ba26f 4917}
b951aaff 4918EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
387ba26f 4919
9cd75558
MG
4920/*
4921 * This is the 'heart' of the zoned buddy allocator.
4922 */
49249a2a
MWO
4923struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
4924 int preferred_nid, nodemask_t *nodemask)
9cd75558
MG
4925{
4926 struct page *page;
4927 unsigned int alloc_flags = ALLOC_WMARK_LOW;
8e6a930b 4928 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
9cd75558
MG
4929 struct alloc_context ac = { };
4930
c63ae43b
MH
4931 /*
4932 * There are several places where we assume that the order value is sane
4933 * so bail out early if the request is out of bound.
4934 */
5e0a760b 4935 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
c63ae43b 4936 return NULL;
c63ae43b 4937
6e5e0f28 4938 gfp &= gfp_allowed_mask;
da6df1b0
PT
4939 /*
4940 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4941 * resp. GFP_NOIO which has to be inherited for all allocation requests
4942 * from a particular context which has been marked by
8e3560d9
PT
4943 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
4944 * movable zones are not used during allocation.
da6df1b0
PT
4945 */
4946 gfp = current_gfp_context(gfp);
6e5e0f28
MWO
4947 alloc_gfp = gfp;
4948 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
8e6a930b 4949 &alloc_gfp, &alloc_flags))
9cd75558
MG
4950 return NULL;
4951
6bb15450
MG
4952 /*
4953 * Forbid the first pass from falling back to types that fragment
4954 * memory until all local zones are considered.
4955 */
29943248 4956 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
6bb15450 4957
5117f45d 4958 /* First allocation attempt */
8e6a930b 4959 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
4fcb0971
MG
4960 if (likely(page))
4961 goto out;
11e33f6a 4962
da6df1b0 4963 alloc_gfp = gfp;
4fcb0971 4964 ac.spread_dirty_pages = false;
23f086f9 4965
4741526b
MG
4966 /*
4967 * Restore the original nodemask if it was potentially replaced with
4968 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4969 */
97ce86f9 4970 ac.nodemask = nodemask;
16096c25 4971
8e6a930b 4972 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
cc9a6c87 4973
4fcb0971 4974out:
f7a449f7 4975 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
6e5e0f28 4976 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
c972106d 4977 free_frozen_pages(page, order);
c4159a75 4978 page = NULL;
4949148a
VD
4979 }
4980
8e6a930b 4981 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
b073d7f8 4982 kmsan_alloc_page(page, order, alloc_gfp);
4fcb0971 4983
11e33f6a 4984 return page;
1da177e4 4985}
49249a2a
MWO
4986EXPORT_SYMBOL(__alloc_frozen_pages_noprof);
4987
4988struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
4989 int preferred_nid, nodemask_t *nodemask)
4990{
4991 struct page *page;
4992
4993 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask);
4994 if (page)
4995 set_page_refcounted(page);
4996 return page;
4997}
b951aaff 4998EXPORT_SYMBOL(__alloc_pages_noprof);
1da177e4 4999
b951aaff 5000struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
cc09cb13
MWO
5001 nodemask_t *nodemask)
5002{
b951aaff 5003 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order,
23e48832
HD
5004 preferred_nid, nodemask);
5005 return page_rmappable_folio(page);
cc09cb13 5006}
b951aaff 5007EXPORT_SYMBOL(__folio_alloc_noprof);
cc09cb13 5008
1da177e4 5009/*
9ea9a680
MH
5010 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5011 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5012 * you need to access high mem.
1da177e4 5013 */
b951aaff 5014unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order)
1da177e4 5015{
945a1113
AM
5016 struct page *page;
5017
b951aaff 5018 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order);
1da177e4
LT
5019 if (!page)
5020 return 0;
5021 return (unsigned long) page_address(page);
5022}
b951aaff 5023EXPORT_SYMBOL(get_free_pages_noprof);
1da177e4 5024
b951aaff 5025unsigned long get_zeroed_page_noprof(gfp_t gfp_mask)
1da177e4 5026{
b951aaff 5027 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0);
1da177e4 5028}
b951aaff 5029EXPORT_SYMBOL(get_zeroed_page_noprof);
1da177e4 5030
7f194fbb 5031/**
8c57b687 5032 * ___free_pages - Free pages allocated with alloc_pages().
7f194fbb
MWO
5033 * @page: The page pointer returned from alloc_pages().
5034 * @order: The order of the allocation.
8c57b687 5035 * @fpi_flags: Free Page Internal flags.
7f194fbb
MWO
5036 *
5037 * This function can free multi-page allocations that are not compound
5038 * pages. It does not check that the @order passed in matches that of
5039 * the allocation, so it is easy to leak memory. Freeing more memory
5040 * than was allocated will probably emit a warning.
5041 *
5042 * If the last reference to this page is speculative, it will be released
5043 * by put_page() which only frees the first page of a non-compound
5044 * allocation. To prevent the remaining pages from being leaked, we free
5045 * the subsequent pages here. If you want to use the page's reference
5046 * count to decide when to free the allocation, you should allocate a
5047 * compound page, and use put_page() instead of __free_pages().
5048 *
5049 * Context: May be called in interrupt context or while holding a normal
5050 * spinlock, but not in NMI context or while holding a raw spinlock.
5051 */
8c57b687
AS
5052static void ___free_pages(struct page *page, unsigned int order,
5053 fpi_t fpi_flags)
742aa7fb 5054{
462a8e08
DC
5055 /* get PageHead before we drop reference */
5056 int head = PageHead(page);
0ae0227f
DW
5057 /* get alloc tag in case the page is released by others */
5058 struct alloc_tag *tag = pgalloc_tag_get(page);
462a8e08 5059
742aa7fb 5060 if (put_page_testzero(page))
8c57b687 5061 __free_frozen_pages(page, order, fpi_flags);
cc92eba1 5062 else if (!head) {
0ae0227f 5063 pgalloc_tag_sub_pages(tag, (1 << order) - 1);
e320d301 5064 while (order-- > 0)
8c57b687
AS
5065 __free_frozen_pages(page + (1 << order), order,
5066 fpi_flags);
cc92eba1 5067 }
742aa7fb 5068}
8c57b687
AS
5069void __free_pages(struct page *page, unsigned int order)
5070{
5071 ___free_pages(page, order, FPI_NONE);
5072}
1da177e4
LT
5073EXPORT_SYMBOL(__free_pages);
5074
8c57b687
AS
5075/*
5076 * Can be called while holding raw_spin_lock or from IRQ and NMI for any
2aad4edf 5077 * page type (not only those that came from alloc_pages_nolock)
8c57b687
AS
5078 */
5079void free_pages_nolock(struct page *page, unsigned int order)
5080{
5081 ___free_pages(page, order, FPI_TRYLOCK);
5082}
5083
920c7a5d 5084void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
5085{
5086 if (addr != 0) {
725d704e 5087 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
5088 __free_pages(virt_to_page((void *)addr), order);
5089 }
5090}
5091
5092EXPORT_SYMBOL(free_pages);
5093
d00181b9
KS
5094static void *make_alloc_exact(unsigned long addr, unsigned int order,
5095 size_t size)
ee85c2e1
AK
5096{
5097 if (addr) {
df48a5f7
LH
5098 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
5099 struct page *page = virt_to_page((void *)addr);
5100 struct page *last = page + nr;
5101
46d44d09 5102 split_page_owner(page, order, 0);
95599ef6 5103 pgalloc_tag_split(page_folio(page), order, 0);
1506c255 5104 split_page_memcg(page, order);
df48a5f7
LH
5105 while (page < --last)
5106 set_page_refcounted(last);
5107
5108 last = page + (1UL << order);
5109 for (page += nr; page < last; page++)
5110 __free_pages_ok(page, 0, FPI_TO_TAIL);
ee85c2e1
AK
5111 }
5112 return (void *)addr;
5113}
5114
2be0ffe2
TT
5115/**
5116 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5117 * @size: the number of bytes to allocate
63931eb9 5118 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
2be0ffe2
TT
5119 *
5120 * This function is similar to alloc_pages(), except that it allocates the
5121 * minimum number of pages to satisfy the request. alloc_pages() can only
5122 * allocate memory in power-of-two pages.
5123 *
5e0a760b 5124 * This function is also limited by MAX_PAGE_ORDER.
2be0ffe2
TT
5125 *
5126 * Memory allocated by this function must be released by free_pages_exact().
a862f68a
MR
5127 *
5128 * Return: pointer to the allocated area or %NULL in case of error.
2be0ffe2 5129 */
b951aaff 5130void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask)
2be0ffe2
TT
5131{
5132 unsigned int order = get_order(size);
5133 unsigned long addr;
5134
ba7f1b9e
ML
5135 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5136 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
63931eb9 5137
b951aaff 5138 addr = get_free_pages_noprof(gfp_mask, order);
ee85c2e1 5139 return make_alloc_exact(addr, order, size);
2be0ffe2 5140}
b951aaff 5141EXPORT_SYMBOL(alloc_pages_exact_noprof);
2be0ffe2 5142
ee85c2e1
AK
5143/**
5144 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5145 * pages on a node.
b5e6ab58 5146 * @nid: the preferred node ID where memory should be allocated
ee85c2e1 5147 * @size: the number of bytes to allocate
63931eb9 5148 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
ee85c2e1
AK
5149 *
5150 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5151 * back.
a862f68a
MR
5152 *
5153 * Return: pointer to the allocated area or %NULL in case of error.
ee85c2e1 5154 */
b951aaff 5155void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask)
ee85c2e1 5156{
d00181b9 5157 unsigned int order = get_order(size);
63931eb9
VB
5158 struct page *p;
5159
ba7f1b9e
ML
5160 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5161 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
63931eb9 5162
b951aaff 5163 p = alloc_pages_node_noprof(nid, gfp_mask, order);
ee85c2e1
AK
5164 if (!p)
5165 return NULL;
5166 return make_alloc_exact((unsigned long)page_address(p), order, size);
5167}
ee85c2e1 5168
2be0ffe2
TT
5169/**
5170 * free_pages_exact - release memory allocated via alloc_pages_exact()
5171 * @virt: the value returned by alloc_pages_exact.
5172 * @size: size of allocation, same value as passed to alloc_pages_exact().
5173 *
5174 * Release the memory allocated by a previous call to alloc_pages_exact.
5175 */
5176void free_pages_exact(void *virt, size_t size)
5177{
5178 unsigned long addr = (unsigned long)virt;
5179 unsigned long end = addr + PAGE_ALIGN(size);
5180
5181 while (addr < end) {
5182 free_page(addr);
5183 addr += PAGE_SIZE;
5184 }
5185}
5186EXPORT_SYMBOL(free_pages_exact);
5187
e0fb5815
ZY
5188/**
5189 * nr_free_zone_pages - count number of pages beyond high watermark
5190 * @offset: The zone index of the highest zone
5191 *
a862f68a 5192 * nr_free_zone_pages() counts the number of pages which are beyond the
e0fb5815
ZY
5193 * high watermark within all zones at or below a given zone index. For each
5194 * zone, the number of pages is calculated as:
0e056eb5
MCC
5195 *
5196 * nr_free_zone_pages = managed_pages - high_pages
a862f68a
MR
5197 *
5198 * Return: number of pages beyond high watermark.
e0fb5815 5199 */
ebec3862 5200static unsigned long nr_free_zone_pages(int offset)
1da177e4 5201{
dd1a239f 5202 struct zoneref *z;
54a6eb5c
MG
5203 struct zone *zone;
5204
e310fd43 5205 /* Just pick one node, since fallback list is circular */
ebec3862 5206 unsigned long sum = 0;
1da177e4 5207
0e88460d 5208 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 5209
54a6eb5c 5210 for_each_zone_zonelist(zone, z, zonelist, offset) {
9705bea5 5211 unsigned long size = zone_managed_pages(zone);
41858966 5212 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
5213 if (size > high)
5214 sum += size - high;
1da177e4
LT
5215 }
5216
5217 return sum;
5218}
5219
e0fb5815
ZY
5220/**
5221 * nr_free_buffer_pages - count number of pages beyond high watermark
5222 *
5223 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5224 * watermark within ZONE_DMA and ZONE_NORMAL.
a862f68a
MR
5225 *
5226 * Return: number of pages beyond high watermark within ZONE_DMA and
5227 * ZONE_NORMAL.
1da177e4 5228 */
ebec3862 5229unsigned long nr_free_buffer_pages(void)
1da177e4 5230{
af4ca457 5231 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 5232}
c2f1a551 5233EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4 5234
19770b32
MG
5235static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5236{
5237 zoneref->zone = zone;
5238 zoneref->zone_idx = zone_idx(zone);
5239}
5240
1da177e4
LT
5241/*
5242 * Builds allocation fallback zone lists.
1a93205b
CL
5243 *
5244 * Add all populated zones of a node to the zonelist.
1da177e4 5245 */
9d3be21b 5246static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
1da177e4 5247{
1a93205b 5248 struct zone *zone;
bc732f1d 5249 enum zone_type zone_type = MAX_NR_ZONES;
9d3be21b 5250 int nr_zones = 0;
02a68a5e
CL
5251
5252 do {
2f6726e5 5253 zone_type--;
070f8032 5254 zone = pgdat->node_zones + zone_type;
e553f62f 5255 if (populated_zone(zone)) {
9d3be21b 5256 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
070f8032 5257 check_highest_zone(zone_type);
1da177e4 5258 }
2f6726e5 5259 } while (zone_type);
bc732f1d 5260
070f8032 5261 return nr_zones;
1da177e4
LT
5262}
5263
5264#ifdef CONFIG_NUMA
f0c0b2b8
KH
5265
5266static int __parse_numa_zonelist_order(char *s)
5267{
c9bff3ee 5268 /*
f0953a1b 5269 * We used to support different zonelists modes but they turned
c9bff3ee
MH
5270 * out to be just not useful. Let's keep the warning in place
5271 * if somebody still use the cmd line parameter so that we do
5272 * not fail it silently
5273 */
5274 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5275 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
f0c0b2b8
KH
5276 return -EINVAL;
5277 }
5278 return 0;
5279}
5280
e95d372c
KW
5281static char numa_zonelist_order[] = "Node";
5282#define NUMA_ZONELIST_ORDER_LEN 16
f0c0b2b8
KH
5283/*
5284 * sysctl handler for numa_zonelist_order
5285 */
78eb4ea2 5286static int numa_zonelist_order_handler(const struct ctl_table *table, int write,
32927393 5287 void *buffer, size_t *length, loff_t *ppos)
f0c0b2b8 5288{
32927393
CH
5289 if (write)
5290 return __parse_numa_zonelist_order(buffer);
5291 return proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8
KH
5292}
5293
f0c0b2b8
KH
5294static int node_load[MAX_NUMNODES];
5295
1da177e4 5296/**
4dc3b16b 5297 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
5298 * @node: node whose fallback list we're appending
5299 * @used_node_mask: nodemask_t of already used nodes
5300 *
5301 * We use a number of factors to determine which is the next node that should
5302 * appear on a given node's fallback list. The node should not have appeared
5303 * already in @node's fallback list, and it should be the next closest node
5304 * according to the distance array (which contains arbitrary distance values
5305 * from each node to each node in the system), and should also prefer nodes
5306 * with no CPUs, since presumably they'll have very little allocation pressure
5307 * on them otherwise.
a862f68a
MR
5308 *
5309 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
1da177e4 5310 */
79c28a41 5311int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 5312{
4cf808eb 5313 int n, val;
1da177e4 5314 int min_val = INT_MAX;
00ef2d2f 5315 int best_node = NUMA_NO_NODE;
1da177e4 5316
c2baef39
QZ
5317 /*
5318 * Use the local node if we haven't already, but for memoryless local
5319 * node, we should skip it and fall back to other nodes.
5320 */
5321 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) {
4cf808eb
LT
5322 node_set(node, *used_node_mask);
5323 return node;
5324 }
1da177e4 5325
4b0ef1fe 5326 for_each_node_state(n, N_MEMORY) {
1da177e4
LT
5327
5328 /* Don't want a node to appear more than once */
5329 if (node_isset(n, *used_node_mask))
5330 continue;
5331
1da177e4
LT
5332 /* Use the distance array to find the distance */
5333 val = node_distance(node, n);
5334
4cf808eb
LT
5335 /* Penalize nodes under us ("prefer the next node") */
5336 val += (n < node);
5337
1da177e4 5338 /* Give preference to headless and unused nodes */
b630749f 5339 if (!cpumask_empty(cpumask_of_node(n)))
1da177e4
LT
5340 val += PENALTY_FOR_NODE_WITH_CPUS;
5341
5342 /* Slight preference for less loaded node */
37931324 5343 val *= MAX_NUMNODES;
1da177e4
LT
5344 val += node_load[n];
5345
5346 if (val < min_val) {
5347 min_val = val;
5348 best_node = n;
5349 }
5350 }
5351
5352 if (best_node >= 0)
5353 node_set(best_node, *used_node_mask);
5354
5355 return best_node;
5356}
5357
f0c0b2b8
KH
5358
5359/*
5360 * Build zonelists ordered by node and zones within node.
5361 * This results in maximum locality--normal zone overflows into local
5362 * DMA zone, if any--but risks exhausting DMA zone.
5363 */
9d3be21b
MH
5364static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5365 unsigned nr_nodes)
1da177e4 5366{
9d3be21b
MH
5367 struct zoneref *zonerefs;
5368 int i;
5369
5370 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5371
5372 for (i = 0; i < nr_nodes; i++) {
5373 int nr_zones;
5374
5375 pg_data_t *node = NODE_DATA(node_order[i]);
f0c0b2b8 5376
9d3be21b
MH
5377 nr_zones = build_zonerefs_node(node, zonerefs);
5378 zonerefs += nr_zones;
5379 }
5380 zonerefs->zone = NULL;
5381 zonerefs->zone_idx = 0;
f0c0b2b8
KH
5382}
5383
523b9458 5384/*
b719efa2 5385 * Build __GFP_THISNODE zonelists
523b9458
CL
5386 */
5387static void build_thisnode_zonelists(pg_data_t *pgdat)
5388{
9d3be21b
MH
5389 struct zoneref *zonerefs;
5390 int nr_zones;
523b9458 5391
9d3be21b
MH
5392 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5393 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5394 zonerefs += nr_zones;
5395 zonerefs->zone = NULL;
5396 zonerefs->zone_idx = 0;
523b9458
CL
5397}
5398
f0c0b2b8
KH
5399static void build_zonelists(pg_data_t *pgdat)
5400{
9d3be21b 5401 static int node_order[MAX_NUMNODES];
37931324 5402 int node, nr_nodes = 0;
d0ddf49b 5403 nodemask_t used_mask = NODE_MASK_NONE;
f0c0b2b8 5404 int local_node, prev_node;
1da177e4
LT
5405
5406 /* NUMA-aware ordering of nodes */
5407 local_node = pgdat->node_id;
1da177e4 5408 prev_node = local_node;
f0c0b2b8 5409
f0c0b2b8 5410 memset(node_order, 0, sizeof(node_order));
1da177e4
LT
5411 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5412 /*
5413 * We don't want to pressure a particular node.
5414 * So adding penalty to the first node in same
5415 * distance group to make it round-robin.
5416 */
957f822a
DR
5417 if (node_distance(local_node, node) !=
5418 node_distance(local_node, prev_node))
37931324 5419 node_load[node] += 1;
f0c0b2b8 5420
9d3be21b 5421 node_order[nr_nodes++] = node;
1da177e4 5422 prev_node = node;
1da177e4 5423 }
523b9458 5424
9d3be21b 5425 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
523b9458 5426 build_thisnode_zonelists(pgdat);
6cf25392
BR
5427 pr_info("Fallback order for Node %d: ", local_node);
5428 for (node = 0; node < nr_nodes; node++)
5429 pr_cont("%d ", node_order[node]);
5430 pr_cont("\n");
1da177e4
LT
5431}
5432
7aac7898
LS
5433#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5434/*
5435 * Return node id of node used for "local" allocations.
5436 * I.e., first node id of first zone in arg node's generic zonelist.
5437 * Used for initializing percpu 'numa_mem', which is used primarily
5438 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5439 */
5440int local_memory_node(int node)
5441{
c33d6c06 5442 struct zoneref *z;
7aac7898 5443
c33d6c06 5444 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
7aac7898 5445 gfp_zone(GFP_KERNEL),
c33d6c06 5446 NULL);
29943248 5447 return zonelist_node_idx(z);
7aac7898
LS
5448}
5449#endif
f0c0b2b8 5450
6423aa81
JK
5451static void setup_min_unmapped_ratio(void);
5452static void setup_min_slab_ratio(void);
1da177e4
LT
5453#else /* CONFIG_NUMA */
5454
f0c0b2b8 5455static void build_zonelists(pg_data_t *pgdat)
1da177e4 5456{
9d3be21b
MH
5457 struct zoneref *zonerefs;
5458 int nr_zones;
1da177e4 5459
9d3be21b
MH
5460 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5461 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5462 zonerefs += nr_zones;
1da177e4 5463
9d3be21b
MH
5464 zonerefs->zone = NULL;
5465 zonerefs->zone_idx = 0;
1da177e4
LT
5466}
5467
5468#endif /* CONFIG_NUMA */
5469
99dcc3e5
CL
5470/*
5471 * Boot pageset table. One per cpu which is going to be used for all
5472 * zones and all nodes. The parameters will be set in such a way
5473 * that an item put on a list will immediately be handed over to
5474 * the buddy list. This is safe since pageset manipulation is done
5475 * with interrupts disabled.
5476 *
5477 * The boot_pagesets must be kept even after bootup is complete for
5478 * unused processors and/or zones. They do play a role for bootstrapping
5479 * hotplugged processors.
5480 *
5481 * zoneinfo_show() and maybe other functions do
5482 * not check if the processor is online before following the pageset pointer.
5483 * Other parts of the kernel may not check if the zone is available.
5484 */
28f836b6 5485static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
952eaf81
VB
5486/* These effectively disable the pcplists in the boot pageset completely */
5487#define BOOT_PAGESET_HIGH 0
5488#define BOOT_PAGESET_BATCH 1
28f836b6
MG
5489static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
5490static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
99dcc3e5 5491
11cd8638 5492static void __build_all_zonelists(void *data)
1da177e4 5493{
6811378e 5494 int nid;
afb6ebb3 5495 int __maybe_unused cpu;
9adb62a5 5496 pg_data_t *self = data;
1007843a 5497 unsigned long flags;
b93e0f32 5498
1007843a 5499 /*
a2ebb515
SAS
5500 * The zonelist_update_seq must be acquired with irqsave because the
5501 * reader can be invoked from IRQ with GFP_ATOMIC.
1007843a 5502 */
a2ebb515 5503 write_seqlock_irqsave(&zonelist_update_seq, flags);
1007843a 5504 /*
a2ebb515
SAS
5505 * Also disable synchronous printk() to prevent any printk() from
5506 * trying to hold port->lock, for
1007843a
TH
5507 * tty_insert_flip_string_and_push_buffer() on other CPU might be
5508 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
5509 */
5510 printk_deferred_enter();
9276b1bc 5511
7f9cfb31
BL
5512#ifdef CONFIG_NUMA
5513 memset(node_load, 0, sizeof(node_load));
5514#endif
9adb62a5 5515
c1152583
WY
5516 /*
5517 * This node is hotadded and no memory is yet present. So just
5518 * building zonelists is fine - no need to touch other nodes.
5519 */
9adb62a5
JL
5520 if (self && !node_online(self->node_id)) {
5521 build_zonelists(self);
c1152583 5522 } else {
09f49dca
MH
5523 /*
5524 * All possible nodes have pgdat preallocated
5525 * in free_area_init
5526 */
5527 for_each_node(nid) {
c1152583 5528 pg_data_t *pgdat = NODE_DATA(nid);
7ea1530a 5529
c1152583
WY
5530 build_zonelists(pgdat);
5531 }
99dcc3e5 5532
7aac7898
LS
5533#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5534 /*
5535 * We now know the "local memory node" for each node--
5536 * i.e., the node of the first zone in the generic zonelist.
5537 * Set up numa_mem percpu variable for on-line cpus. During
5538 * boot, only the boot cpu should be on-line; we'll init the
5539 * secondary cpus' numa_mem as they come on-line. During
5540 * node/memory hotplug, we'll fixup all on-line cpus.
5541 */
d9c9a0b9 5542 for_each_online_cpu(cpu)
7aac7898 5543 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
afb6ebb3 5544#endif
d9c9a0b9 5545 }
b93e0f32 5546
1007843a 5547 printk_deferred_exit();
a2ebb515 5548 write_sequnlock_irqrestore(&zonelist_update_seq, flags);
6811378e
YG
5549}
5550
061f67bc
RV
5551static noinline void __init
5552build_all_zonelists_init(void)
5553{
afb6ebb3
MH
5554 int cpu;
5555
061f67bc 5556 __build_all_zonelists(NULL);
afb6ebb3
MH
5557
5558 /*
5559 * Initialize the boot_pagesets that are going to be used
5560 * for bootstrapping processors. The real pagesets for
5561 * each zone will be allocated later when the per cpu
5562 * allocator is available.
5563 *
5564 * boot_pagesets are used also for bootstrapping offline
5565 * cpus if the system is already booted because the pagesets
5566 * are needed to initialize allocators on a specific cpu too.
5567 * F.e. the percpu allocator needs the page allocator which
5568 * needs the percpu allocator in order to allocate its pagesets
5569 * (a chicken-egg dilemma).
5570 */
5571 for_each_possible_cpu(cpu)
28f836b6 5572 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
afb6ebb3 5573
061f67bc
RV
5574 mminit_verify_zonelist();
5575 cpuset_init_current_mems_allowed();
5576}
5577
4eaf3f64 5578/*
4eaf3f64 5579 * unless system_state == SYSTEM_BOOTING.
061f67bc 5580 *
72675e13 5581 * __ref due to call of __init annotated helper build_all_zonelists_init
061f67bc 5582 * [protected by SYSTEM_BOOTING].
4eaf3f64 5583 */
72675e13 5584void __ref build_all_zonelists(pg_data_t *pgdat)
6811378e 5585{
0a18e607
DH
5586 unsigned long vm_total_pages;
5587
6811378e 5588 if (system_state == SYSTEM_BOOTING) {
061f67bc 5589 build_all_zonelists_init();
6811378e 5590 } else {
11cd8638 5591 __build_all_zonelists(pgdat);
6811378e
YG
5592 /* cpuset refresh routine should be here */
5593 }
56b9413b
DH
5594 /* Get the number of free pages beyond high watermark in all zones. */
5595 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
9ef9acb0
MG
5596 /*
5597 * Disable grouping by mobility if the number of pages in the
5598 * system is too low to allow the mechanism to work. It would be
5599 * more accurate, but expensive to check per-zone. This check is
5600 * made on memory-hotadd so a system can start with mobility
5601 * disabled and enable it later
5602 */
d9c23400 5603 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
5604 page_group_by_mobility_disabled = 1;
5605 else
5606 page_group_by_mobility_disabled = 0;
5607
ce0725f7 5608 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
756a025f 5609 nr_online_nodes,
ab505e8b 5610 str_off_on(page_group_by_mobility_disabled),
756a025f 5611 vm_total_pages);
f0c0b2b8 5612#ifdef CONFIG_NUMA
f88dfff5 5613 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
f0c0b2b8 5614#endif
1da177e4
LT
5615}
5616
9420f89d 5617static int zone_batchsize(struct zone *zone)
1da177e4 5618{
9420f89d
MRI
5619#ifdef CONFIG_MMU
5620 int batch;
1da177e4 5621
9420f89d
MRI
5622 /*
5623 * The number of pages to batch allocate is either ~0.1%
5624 * of the zone or 1MB, whichever is smaller. The batch
5625 * size is striking a balance between allocation latency
5626 * and zone lock contention.
5627 */
5628 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
5629 batch /= 4; /* We effectively *= 4 below */
5630 if (batch < 1)
5631 batch = 1;
22b31eec 5632
4b94ffdc 5633 /*
9420f89d
MRI
5634 * Clamp the batch to a 2^n - 1 value. Having a power
5635 * of 2 value was found to be more likely to have
5636 * suboptimal cache aliasing properties in some cases.
5637 *
5638 * For example if 2 tasks are alternately allocating
5639 * batches of pages, one task can end up with a lot
5640 * of pages of one half of the possible page colors
5641 * and the other with pages of the other colors.
4b94ffdc 5642 */
9420f89d 5643 batch = rounddown_pow_of_two(batch + batch/2) - 1;
966cf44f 5644
9420f89d 5645 return batch;
3a6be87f
DH
5646
5647#else
5648 /* The deferral and batching of frees should be suppressed under NOMMU
5649 * conditions.
5650 *
5651 * The problem is that NOMMU needs to be able to allocate large chunks
5652 * of contiguous memory as there's no hardware page translation to
5653 * assemble apparent contiguous memory from discontiguous pages.
5654 *
5655 * Queueing large contiguous runs of pages for batching, however,
5656 * causes the pages to actually be freed in smaller chunks. As there
5657 * can be a significant delay between the individual batches being
5658 * recycled, this leads to the once large chunks of space being
5659 * fragmented and becoming unavailable for high-order allocations.
5660 */
5661 return 0;
5662#endif
e7c8d5c9
CL
5663}
5664
e95d372c 5665static int percpu_pagelist_high_fraction;
90b41691
YH
5666static int zone_highsize(struct zone *zone, int batch, int cpu_online,
5667 int high_fraction)
b92ca18e 5668{
9420f89d
MRI
5669#ifdef CONFIG_MMU
5670 int high;
5671 int nr_split_cpus;
5672 unsigned long total_pages;
c13291a5 5673
90b41691 5674 if (!high_fraction) {
2a1e274a 5675 /*
9420f89d
MRI
5676 * By default, the high value of the pcp is based on the zone
5677 * low watermark so that if they are full then background
5678 * reclaim will not be started prematurely.
2a1e274a 5679 */
9420f89d
MRI
5680 total_pages = low_wmark_pages(zone);
5681 } else {
2a1e274a 5682 /*
9420f89d
MRI
5683 * If percpu_pagelist_high_fraction is configured, the high
5684 * value is based on a fraction of the managed pages in the
5685 * zone.
2a1e274a 5686 */
90b41691 5687 total_pages = zone_managed_pages(zone) / high_fraction;
2a1e274a
MG
5688 }
5689
5690 /*
9420f89d
MRI
5691 * Split the high value across all online CPUs local to the zone. Note
5692 * that early in boot that CPUs may not be online yet and that during
5693 * CPU hotplug that the cpumask is not yet updated when a CPU is being
90b41691
YH
5694 * onlined. For memory nodes that have no CPUs, split the high value
5695 * across all online CPUs to mitigate the risk that reclaim is triggered
9420f89d 5696 * prematurely due to pages stored on pcp lists.
2a1e274a 5697 */
9420f89d
MRI
5698 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5699 if (!nr_split_cpus)
5700 nr_split_cpus = num_online_cpus();
5701 high = total_pages / nr_split_cpus;
2a1e274a 5702
9420f89d
MRI
5703 /*
5704 * Ensure high is at least batch*4. The multiple is based on the
5705 * historical relationship between high and batch.
5706 */
5707 high = max(high, batch << 2);
37b07e41 5708
9420f89d
MRI
5709 return high;
5710#else
5711 return 0;
5712#endif
37b07e41
LS
5713}
5714
51930df5 5715/*
9420f89d
MRI
5716 * pcp->high and pcp->batch values are related and generally batch is lower
5717 * than high. They are also related to pcp->count such that count is lower
5718 * than high, and as soon as it reaches high, the pcplist is flushed.
5719 *
5720 * However, guaranteeing these relations at all times would require e.g. write
5721 * barriers here but also careful usage of read barriers at the read side, and
5722 * thus be prone to error and bad for performance. Thus the update only prevents
90b41691
YH
5723 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max
5724 * should ensure they can cope with those fields changing asynchronously, and
5725 * fully trust only the pcp->count field on the local CPU with interrupts
5726 * disabled.
9420f89d
MRI
5727 *
5728 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5729 * outside of boot time (or some other assurance that no concurrent updaters
5730 * exist).
51930df5 5731 */
90b41691
YH
5732static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min,
5733 unsigned long high_max, unsigned long batch)
51930df5 5734{
9420f89d 5735 WRITE_ONCE(pcp->batch, batch);
90b41691
YH
5736 WRITE_ONCE(pcp->high_min, high_min);
5737 WRITE_ONCE(pcp->high_max, high_max);
51930df5
MR
5738}
5739
9420f89d 5740static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
c713216d 5741{
9420f89d 5742 int pindex;
90cae1fe 5743
9420f89d
MRI
5744 memset(pcp, 0, sizeof(*pcp));
5745 memset(pzstats, 0, sizeof(*pzstats));
90cae1fe 5746
9420f89d
MRI
5747 spin_lock_init(&pcp->lock);
5748 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
5749 INIT_LIST_HEAD(&pcp->lists[pindex]);
2a1e274a 5750
9420f89d
MRI
5751 /*
5752 * Set batch and high values safe for a boot pageset. A true percpu
5753 * pageset's initialization will update them subsequently. Here we don't
5754 * need to be as careful as pageset_update() as nobody can access the
5755 * pageset yet.
5756 */
90b41691
YH
5757 pcp->high_min = BOOT_PAGESET_HIGH;
5758 pcp->high_max = BOOT_PAGESET_HIGH;
9420f89d 5759 pcp->batch = BOOT_PAGESET_BATCH;
6ccdcb6d 5760 pcp->free_count = 0;
9420f89d 5761}
c713216d 5762
90b41691
YH
5763static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min,
5764 unsigned long high_max, unsigned long batch)
9420f89d
MRI
5765{
5766 struct per_cpu_pages *pcp;
5767 int cpu;
2a1e274a 5768
9420f89d
MRI
5769 for_each_possible_cpu(cpu) {
5770 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
90b41691 5771 pageset_update(pcp, high_min, high_max, batch);
2a1e274a 5772 }
9420f89d 5773}
c713216d 5774
9420f89d
MRI
5775/*
5776 * Calculate and set new high and batch values for all per-cpu pagesets of a
5777 * zone based on the zone's size.
5778 */
5779static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
5780{
90b41691 5781 int new_high_min, new_high_max, new_batch;
09f49dca 5782
9420f89d 5783 new_batch = max(1, zone_batchsize(zone));
90b41691
YH
5784 if (percpu_pagelist_high_fraction) {
5785 new_high_min = zone_highsize(zone, new_batch, cpu_online,
5786 percpu_pagelist_high_fraction);
5787 /*
5788 * PCP high is tuned manually, disable auto-tuning via
5789 * setting high_min and high_max to the manual value.
5790 */
5791 new_high_max = new_high_min;
5792 } else {
5793 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0);
5794 new_high_max = zone_highsize(zone, new_batch, cpu_online,
5795 MIN_PERCPU_PAGELIST_HIGH_FRACTION);
5796 }
09f49dca 5797
90b41691
YH
5798 if (zone->pageset_high_min == new_high_min &&
5799 zone->pageset_high_max == new_high_max &&
9420f89d
MRI
5800 zone->pageset_batch == new_batch)
5801 return;
37b07e41 5802
90b41691
YH
5803 zone->pageset_high_min = new_high_min;
5804 zone->pageset_high_max = new_high_max;
9420f89d 5805 zone->pageset_batch = new_batch;
122e093c 5806
90b41691
YH
5807 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
5808 new_batch);
c713216d 5809}
2a1e274a 5810
9420f89d 5811void __meminit setup_zone_pageset(struct zone *zone)
2a1e274a 5812{
9420f89d 5813 int cpu;
2a1e274a 5814
9420f89d
MRI
5815 /* Size may be 0 on !SMP && !NUMA */
5816 if (sizeof(struct per_cpu_zonestat) > 0)
5817 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
2a1e274a 5818
9420f89d
MRI
5819 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
5820 for_each_possible_cpu(cpu) {
5821 struct per_cpu_pages *pcp;
5822 struct per_cpu_zonestat *pzstats;
2a1e274a 5823
9420f89d
MRI
5824 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5825 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
5826 per_cpu_pages_init(pcp, pzstats);
a5c6d650 5827 }
9420f89d
MRI
5828
5829 zone_set_pageset_high_and_batch(zone, 0);
2a1e274a 5830}
ed7ed365 5831
7e63efef 5832/*
9420f89d
MRI
5833 * The zone indicated has a new number of managed_pages; batch sizes and percpu
5834 * page high values need to be recalculated.
7e63efef 5835 */
9420f89d 5836static void zone_pcp_update(struct zone *zone, int cpu_online)
7e63efef 5837{
9420f89d
MRI
5838 mutex_lock(&pcp_batch_high_lock);
5839 zone_set_pageset_high_and_batch(zone, cpu_online);
5840 mutex_unlock(&pcp_batch_high_lock);
7e63efef
MG
5841}
5842
5cec4eb7 5843static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
362d37a1 5844{
362d37a1
YH
5845 struct per_cpu_pages *pcp;
5846 struct cpu_cacheinfo *cci;
5847
5cec4eb7
YH
5848 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5849 cci = get_cpu_cacheinfo(cpu);
5850 /*
5851 * If data cache slice of CPU is large enough, "pcp->batch"
5852 * pages can be preserved in PCP before draining PCP for
5853 * consecutive high-order pages freeing without allocation.
5854 * This can reduce zone lock contention without hurting
5855 * cache-hot pages sharing.
5856 */
5857 spin_lock(&pcp->lock);
5858 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
5859 pcp->flags |= PCPF_FREE_HIGH_BATCH;
5860 else
5861 pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
5862 spin_unlock(&pcp->lock);
362d37a1
YH
5863}
5864
5cec4eb7 5865void setup_pcp_cacheinfo(unsigned int cpu)
362d37a1
YH
5866{
5867 struct zone *zone;
5868
5869 for_each_populated_zone(zone)
5cec4eb7 5870 zone_pcp_update_cacheinfo(zone, cpu);
362d37a1
YH
5871}
5872
7e63efef 5873/*
9420f89d
MRI
5874 * Allocate per cpu pagesets and initialize them.
5875 * Before this call only boot pagesets were available.
7e63efef 5876 */
9420f89d 5877void __init setup_per_cpu_pageset(void)
7e63efef 5878{
9420f89d
MRI
5879 struct pglist_data *pgdat;
5880 struct zone *zone;
5881 int __maybe_unused cpu;
5882
5883 for_each_populated_zone(zone)
5884 setup_zone_pageset(zone);
5885
5886#ifdef CONFIG_NUMA
5887 /*
5888 * Unpopulated zones continue using the boot pagesets.
5889 * The numa stats for these pagesets need to be reset.
5890 * Otherwise, they will end up skewing the stats of
5891 * the nodes these zones are associated with.
5892 */
5893 for_each_possible_cpu(cpu) {
5894 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
5895 memset(pzstats->vm_numa_event, 0,
5896 sizeof(pzstats->vm_numa_event));
5897 }
5898#endif
5899
5900 for_each_online_pgdat(pgdat)
5901 pgdat->per_cpu_nodestats =
5902 alloc_percpu(struct per_cpu_nodestat);
7e63efef
MG
5903}
5904
9420f89d
MRI
5905__meminit void zone_pcp_init(struct zone *zone)
5906{
5907 /*
5908 * per cpu subsystem is not up at this point. The following code
5909 * relies on the ability of the linker to provide the
5910 * offset of a (static) per cpu variable into the per cpu area.
5911 */
5912 zone->per_cpu_pageset = &boot_pageset;
5913 zone->per_cpu_zonestats = &boot_zonestats;
90b41691
YH
5914 zone->pageset_high_min = BOOT_PAGESET_HIGH;
5915 zone->pageset_high_max = BOOT_PAGESET_HIGH;
9420f89d
MRI
5916 zone->pageset_batch = BOOT_PAGESET_BATCH;
5917
5918 if (populated_zone(zone))
5919 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
5920 zone->present_pages, zone_batchsize(zone));
5921}
ed7ed365 5922
9726891f 5923static void setup_per_zone_lowmem_reserve(void);
5924
c3d5f5f0
JL
5925void adjust_managed_page_count(struct page *page, long count)
5926{
9705bea5 5927 atomic_long_add(count, &page_zone(page)->managed_pages);
ca79b0c2 5928 totalram_pages_add(count);
9726891f 5929 setup_per_zone_lowmem_reserve();
c3d5f5f0 5930}
3dcc0571 5931EXPORT_SYMBOL(adjust_managed_page_count);
c3d5f5f0 5932
e5cb113f 5933unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
69afade7 5934{
11199692
JL
5935 void *pos;
5936 unsigned long pages = 0;
69afade7 5937
11199692
JL
5938 start = (void *)PAGE_ALIGN((unsigned long)start);
5939 end = (void *)((unsigned long)end & PAGE_MASK);
5940 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
0d834328
DH
5941 struct page *page = virt_to_page(pos);
5942 void *direct_map_addr;
5943
5944 /*
5945 * 'direct_map_addr' might be different from 'pos'
5946 * because some architectures' virt_to_page()
5947 * work with aliases. Getting the direct map
5948 * address ensures that we get a _writeable_
5949 * alias for the memset().
5950 */
5951 direct_map_addr = page_address(page);
c746170d
VF
5952 /*
5953 * Perform a kasan-unchecked memset() since this memory
5954 * has not been initialized.
5955 */
5956 direct_map_addr = kasan_reset_tag(direct_map_addr);
dbe67df4 5957 if ((unsigned int)poison <= 0xFF)
0d834328
DH
5958 memset(direct_map_addr, poison, PAGE_SIZE);
5959
5960 free_reserved_page(page);
69afade7
JL
5961 }
5962
5963 if (pages && s)
ff7ed9e4 5964 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
69afade7
JL
5965
5966 return pages;
5967}
5968
b3bebe44
SB
5969void free_reserved_page(struct page *page)
5970{
a8fc28da 5971 clear_page_tag_ref(page);
b3bebe44
SB
5972 ClearPageReserved(page);
5973 init_page_count(page);
5974 __free_page(page);
5975 adjust_managed_page_count(page, 1);
5976}
5977EXPORT_SYMBOL(free_reserved_page);
5978
005fd4bb 5979static int page_alloc_cpu_dead(unsigned int cpu)
1da177e4 5980{
04f8cfea 5981 struct zone *zone;
1da177e4 5982
005fd4bb 5983 lru_add_drain_cpu(cpu);
96f97c43 5984 mlock_drain_remote(cpu);
005fd4bb 5985 drain_pages(cpu);
9f8f2172 5986
005fd4bb
SAS
5987 /*
5988 * Spill the event counters of the dead processor
5989 * into the current processors event counters.
5990 * This artificially elevates the count of the current
5991 * processor.
5992 */
5993 vm_events_fold_cpu(cpu);
9f8f2172 5994
005fd4bb
SAS
5995 /*
5996 * Zero the differential counters of the dead processor
5997 * so that the vm statistics are consistent.
5998 *
5999 * This is only okay since the processor is dead and cannot
6000 * race with what we are doing.
6001 */
6002 cpu_vm_stats_fold(cpu);
04f8cfea
MG
6003
6004 for_each_populated_zone(zone)
6005 zone_pcp_update(zone, 0);
6006
6007 return 0;
6008}
6009
6010static int page_alloc_cpu_online(unsigned int cpu)
6011{
6012 struct zone *zone;
6013
6014 for_each_populated_zone(zone)
6015 zone_pcp_update(zone, 1);
005fd4bb 6016 return 0;
1da177e4 6017}
1da177e4 6018
c4fbed4b 6019void __init page_alloc_init_cpuhp(void)
1da177e4 6020{
005fd4bb
SAS
6021 int ret;
6022
04f8cfea
MG
6023 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
6024 "mm/page_alloc:pcp",
6025 page_alloc_cpu_online,
005fd4bb
SAS
6026 page_alloc_cpu_dead);
6027 WARN_ON(ret < 0);
1da177e4
LT
6028}
6029
cb45b0e9 6030/*
34b10060 6031 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
cb45b0e9
HA
6032 * or min_free_kbytes changes.
6033 */
6034static void calculate_totalreserve_pages(void)
6035{
6036 struct pglist_data *pgdat;
6037 unsigned long reserve_pages = 0;
2f6726e5 6038 enum zone_type i, j;
cb45b0e9
HA
6039
6040 for_each_online_pgdat(pgdat) {
281e3726
MG
6041
6042 pgdat->totalreserve_pages = 0;
6043
cb45b0e9
HA
6044 for (i = 0; i < MAX_NR_ZONES; i++) {
6045 struct zone *zone = pgdat->node_zones + i;
3484b2de 6046 long max = 0;
9705bea5 6047 unsigned long managed_pages = zone_managed_pages(zone);
cb45b0e9
HA
6048
6049 /* Find valid and maximum lowmem_reserve in the zone */
6050 for (j = i; j < MAX_NR_ZONES; j++) {
6051 if (zone->lowmem_reserve[j] > max)
6052 max = zone->lowmem_reserve[j];
6053 }
6054
41858966
MG
6055 /* we treat the high watermark as reserved pages. */
6056 max += high_wmark_pages(zone);
cb45b0e9 6057
3d6357de
AK
6058 if (max > managed_pages)
6059 max = managed_pages;
a8d01437 6060
281e3726 6061 pgdat->totalreserve_pages += max;
a8d01437 6062
cb45b0e9
HA
6063 reserve_pages += max;
6064 }
6065 }
6066 totalreserve_pages = reserve_pages;
15766485 6067 trace_mm_calculate_totalreserve_pages(totalreserve_pages);
cb45b0e9
HA
6068}
6069
1da177e4
LT
6070/*
6071 * setup_per_zone_lowmem_reserve - called whenever
34b10060 6072 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
1da177e4
LT
6073 * has a correct pages reserved value, so an adequate number of
6074 * pages are left in the zone after a successful __alloc_pages().
6075 */
6076static void setup_per_zone_lowmem_reserve(void)
6077{
6078 struct pglist_data *pgdat;
470c61d7 6079 enum zone_type i, j;
1da177e4 6080
ec936fc5 6081 for_each_online_pgdat(pgdat) {
470c61d7
LS
6082 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
6083 struct zone *zone = &pgdat->node_zones[i];
6084 int ratio = sysctl_lowmem_reserve_ratio[i];
6085 bool clear = !ratio || !zone_managed_pages(zone);
6086 unsigned long managed_pages = 0;
6087
6088 for (j = i + 1; j < MAX_NR_ZONES; j++) {
f7ec1044
LS
6089 struct zone *upper_zone = &pgdat->node_zones[j];
6090
6091 managed_pages += zone_managed_pages(upper_zone);
470c61d7 6092
eae116d1 6093 if (clear)
f7ec1044
LS
6094 zone->lowmem_reserve[j] = 0;
6095 else
470c61d7 6096 zone->lowmem_reserve[j] = managed_pages / ratio;
a293aba4
ML
6097 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone,
6098 zone->lowmem_reserve[j]);
1da177e4
LT
6099 }
6100 }
6101 }
cb45b0e9
HA
6102
6103 /* update totalreserve_pages */
6104 calculate_totalreserve_pages();
1da177e4
LT
6105}
6106
cfd3da1e 6107static void __setup_per_zone_wmarks(void)
1da177e4
LT
6108{
6109 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6110 unsigned long lowmem_pages = 0;
6111 struct zone *zone;
6112 unsigned long flags;
6113
416ef04f 6114 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */
1da177e4 6115 for_each_zone(zone) {
416ef04f 6116 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
9705bea5 6117 lowmem_pages += zone_managed_pages(zone);
1da177e4
LT
6118 }
6119
6120 for_each_zone(zone) {
ac924c60
AM
6121 u64 tmp;
6122
1125b4e3 6123 spin_lock_irqsave(&zone->lock, flags);
9705bea5 6124 tmp = (u64)pages_min * zone_managed_pages(zone);
72741db6 6125 tmp = div64_ul(tmp, lowmem_pages);
416ef04f 6126 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
1da177e4 6127 /*
669ed175 6128 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
416ef04f 6129 * need highmem and movable zones pages, so cap pages_min
6130 * to a small value here.
669ed175 6131 *
41858966 6132 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8bb4e7a2 6133 * deltas control async page reclaim, and so should
416ef04f 6134 * not be capped for highmem and movable zones.
1da177e4 6135 */
90ae8d67 6136 unsigned long min_pages;
1da177e4 6137
9705bea5 6138 min_pages = zone_managed_pages(zone) / 1024;
90ae8d67 6139 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
a9214443 6140 zone->_watermark[WMARK_MIN] = min_pages;
1da177e4 6141 } else {
669ed175
NP
6142 /*
6143 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
6144 * proportionate to the zone's size.
6145 */
a9214443 6146 zone->_watermark[WMARK_MIN] = tmp;
1da177e4
LT
6147 }
6148
795ae7a0
JW
6149 /*
6150 * Set the kswapd watermarks distance according to the
6151 * scale factor in proportion to available memory, but
6152 * ensure a minimum size on small systems.
6153 */
6154 tmp = max_t(u64, tmp >> 2,
9705bea5 6155 mult_frac(zone_managed_pages(zone),
795ae7a0
JW
6156 watermark_scale_factor, 10000));
6157
aa092591 6158 zone->watermark_boost = 0;
a9214443 6159 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
c574bbe9
YH
6160 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
6161 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
8c02048d 6162 trace_mm_setup_per_zone_wmarks(zone);
49f223a9 6163
1125b4e3 6164 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 6165 }
cb45b0e9
HA
6166
6167 /* update totalreserve_pages */
6168 calculate_totalreserve_pages();
1da177e4
LT
6169}
6170
cfd3da1e
MG
6171/**
6172 * setup_per_zone_wmarks - called when min_free_kbytes changes
6173 * or when memory is hot-{added|removed}
6174 *
6175 * Ensures that the watermark[min,low,high] values for each zone are set
6176 * correctly with respect to min_free_kbytes.
6177 */
6178void setup_per_zone_wmarks(void)
6179{
b92ca18e 6180 struct zone *zone;
b93e0f32
MH
6181 static DEFINE_SPINLOCK(lock);
6182
6183 spin_lock(&lock);
cfd3da1e 6184 __setup_per_zone_wmarks();
b93e0f32 6185 spin_unlock(&lock);
b92ca18e
MG
6186
6187 /*
6188 * The watermark size have changed so update the pcpu batch
6189 * and high limits or the limits may be inappropriate.
6190 */
6191 for_each_zone(zone)
04f8cfea 6192 zone_pcp_update(zone, 0);
cfd3da1e
MG
6193}
6194
1da177e4
LT
6195/*
6196 * Initialise min_free_kbytes.
6197 *
6198 * For small machines we want it small (128k min). For large machines
8beeae86 6199 * we want it large (256MB max). But it is not linear, because network
1da177e4
LT
6200 * bandwidth does not increase linearly with machine size. We use
6201 *
b8af2941 6202 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
1da177e4
LT
6203 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6204 *
6205 * which yields
6206 *
6207 * 16MB: 512k
6208 * 32MB: 724k
6209 * 64MB: 1024k
6210 * 128MB: 1448k
6211 * 256MB: 2048k
6212 * 512MB: 2896k
6213 * 1024MB: 4096k
6214 * 2048MB: 5792k
6215 * 4096MB: 8192k
6216 * 8192MB: 11584k
6217 * 16384MB: 16384k
6218 */
bd3400ea 6219void calculate_min_free_kbytes(void)
1da177e4
LT
6220{
6221 unsigned long lowmem_kbytes;
5f12733e 6222 int new_min_free_kbytes;
1da177e4
LT
6223
6224 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5f12733e
MH
6225 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6226
59d336bd
WS
6227 if (new_min_free_kbytes > user_min_free_kbytes)
6228 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
6229 else
5f12733e
MH
6230 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6231 new_min_free_kbytes, user_min_free_kbytes);
59d336bd 6232
bd3400ea
LF
6233}
6234
6235int __meminit init_per_zone_wmark_min(void)
6236{
6237 calculate_min_free_kbytes();
bc75d33f 6238 setup_per_zone_wmarks();
a6cccdc3 6239 refresh_zone_stat_thresholds();
1da177e4 6240 setup_per_zone_lowmem_reserve();
6423aa81
JK
6241
6242#ifdef CONFIG_NUMA
6243 setup_min_unmapped_ratio();
6244 setup_min_slab_ratio();
6245#endif
6246
4aab2be0
VB
6247 khugepaged_min_free_kbytes_update();
6248
1da177e4
LT
6249 return 0;
6250}
e08d3fdf 6251postcore_initcall(init_per_zone_wmark_min)
1da177e4
LT
6252
6253/*
b8af2941 6254 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
1da177e4
LT
6255 * that we can call two helper functions whenever min_free_kbytes
6256 * changes.
6257 */
78eb4ea2 6258static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write,
32927393 6259 void *buffer, size_t *length, loff_t *ppos)
1da177e4 6260{
da8c757b
HP
6261 int rc;
6262
6263 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6264 if (rc)
6265 return rc;
6266
5f12733e
MH
6267 if (write) {
6268 user_min_free_kbytes = min_free_kbytes;
bc75d33f 6269 setup_per_zone_wmarks();
5f12733e 6270 }
1da177e4
LT
6271 return 0;
6272}
6273
78eb4ea2 6274static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write,
32927393 6275 void *buffer, size_t *length, loff_t *ppos)
795ae7a0
JW
6276{
6277 int rc;
6278
6279 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6280 if (rc)
6281 return rc;
6282
6283 if (write)
6284 setup_per_zone_wmarks();
6285
6286 return 0;
6287}
6288
9614634f 6289#ifdef CONFIG_NUMA
6423aa81 6290static void setup_min_unmapped_ratio(void)
9614634f 6291{
6423aa81 6292 pg_data_t *pgdat;
9614634f 6293 struct zone *zone;
9614634f 6294
a5f5f91d 6295 for_each_online_pgdat(pgdat)
81cbcbc2 6296 pgdat->min_unmapped_pages = 0;
a5f5f91d 6297
9614634f 6298 for_each_zone(zone)
9705bea5
AK
6299 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
6300 sysctl_min_unmapped_ratio) / 100;
9614634f 6301}
0ff38490 6302
6423aa81 6303
78eb4ea2 6304static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write,
32927393 6305 void *buffer, size_t *length, loff_t *ppos)
0ff38490 6306{
0ff38490
CL
6307 int rc;
6308
8d65af78 6309 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
6310 if (rc)
6311 return rc;
6312
6423aa81
JK
6313 setup_min_unmapped_ratio();
6314
6315 return 0;
6316}
6317
6318static void setup_min_slab_ratio(void)
6319{
6320 pg_data_t *pgdat;
6321 struct zone *zone;
6322
a5f5f91d
MG
6323 for_each_online_pgdat(pgdat)
6324 pgdat->min_slab_pages = 0;
6325
0ff38490 6326 for_each_zone(zone)
9705bea5
AK
6327 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
6328 sysctl_min_slab_ratio) / 100;
6423aa81
JK
6329}
6330
78eb4ea2 6331static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write,
32927393 6332 void *buffer, size_t *length, loff_t *ppos)
6423aa81
JK
6333{
6334 int rc;
6335
6336 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6337 if (rc)
6338 return rc;
6339
6340 setup_min_slab_ratio();
6341
0ff38490
CL
6342 return 0;
6343}
9614634f
CL
6344#endif
6345
1da177e4
LT
6346/*
6347 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6348 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6349 * whenever sysctl_lowmem_reserve_ratio changes.
6350 *
6351 * The reserve ratio obviously has absolutely no relation with the
41858966 6352 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
6353 * if in function of the boot time zone sizes.
6354 */
78eb4ea2 6355static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table,
e95d372c 6356 int write, void *buffer, size_t *length, loff_t *ppos)
1da177e4 6357{
86aaf255
BH
6358 int i;
6359
8d65af78 6360 proc_dointvec_minmax(table, write, buffer, length, ppos);
86aaf255
BH
6361
6362 for (i = 0; i < MAX_NR_ZONES; i++) {
6363 if (sysctl_lowmem_reserve_ratio[i] < 1)
6364 sysctl_lowmem_reserve_ratio[i] = 0;
6365 }
6366
1da177e4
LT
6367 setup_per_zone_lowmem_reserve();
6368 return 0;
6369}
6370
8ad4b1fb 6371/*
74f44822
MG
6372 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
6373 * cpu. It is the fraction of total pages in each zone that a hot per cpu
b8af2941 6374 * pagelist can have before it gets flushed back to buddy allocator.
8ad4b1fb 6375 */
78eb4ea2 6376static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table,
74f44822 6377 int write, void *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
6378{
6379 struct zone *zone;
74f44822 6380 int old_percpu_pagelist_high_fraction;
8ad4b1fb
RS
6381 int ret;
6382
7cd2b0a3 6383 mutex_lock(&pcp_batch_high_lock);
74f44822 6384 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
7cd2b0a3 6385
8d65af78 6386 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7cd2b0a3
DR
6387 if (!write || ret < 0)
6388 goto out;
6389
6390 /* Sanity checking to avoid pcp imbalance */
74f44822
MG
6391 if (percpu_pagelist_high_fraction &&
6392 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
6393 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
7cd2b0a3
DR
6394 ret = -EINVAL;
6395 goto out;
6396 }
6397
6398 /* No change? */
74f44822 6399 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
7cd2b0a3 6400 goto out;
c8e251fa 6401
cb1ef534 6402 for_each_populated_zone(zone)
74f44822 6403 zone_set_pageset_high_and_batch(zone, 0);
7cd2b0a3 6404out:
c8e251fa 6405 mutex_unlock(&pcp_batch_high_lock);
7cd2b0a3 6406 return ret;
8ad4b1fb
RS
6407}
6408
1751f872 6409static const struct ctl_table page_alloc_sysctl_table[] = {
e95d372c
KW
6410 {
6411 .procname = "min_free_kbytes",
6412 .data = &min_free_kbytes,
6413 .maxlen = sizeof(min_free_kbytes),
6414 .mode = 0644,
6415 .proc_handler = min_free_kbytes_sysctl_handler,
6416 .extra1 = SYSCTL_ZERO,
6417 },
6418 {
6419 .procname = "watermark_boost_factor",
6420 .data = &watermark_boost_factor,
6421 .maxlen = sizeof(watermark_boost_factor),
6422 .mode = 0644,
6423 .proc_handler = proc_dointvec_minmax,
6424 .extra1 = SYSCTL_ZERO,
6425 },
6426 {
6427 .procname = "watermark_scale_factor",
6428 .data = &watermark_scale_factor,
6429 .maxlen = sizeof(watermark_scale_factor),
6430 .mode = 0644,
6431 .proc_handler = watermark_scale_factor_sysctl_handler,
6432 .extra1 = SYSCTL_ONE,
6433 .extra2 = SYSCTL_THREE_THOUSAND,
6434 },
e3aa7df3
JW
6435 {
6436 .procname = "defrag_mode",
6437 .data = &defrag_mode,
6438 .maxlen = sizeof(defrag_mode),
6439 .mode = 0644,
6440 .proc_handler = proc_dointvec_minmax,
6441 .extra1 = SYSCTL_ZERO,
6442 .extra2 = SYSCTL_ONE,
6443 },
e95d372c
KW
6444 {
6445 .procname = "percpu_pagelist_high_fraction",
6446 .data = &percpu_pagelist_high_fraction,
6447 .maxlen = sizeof(percpu_pagelist_high_fraction),
6448 .mode = 0644,
6449 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
6450 .extra1 = SYSCTL_ZERO,
6451 },
6452 {
6453 .procname = "lowmem_reserve_ratio",
6454 .data = &sysctl_lowmem_reserve_ratio,
6455 .maxlen = sizeof(sysctl_lowmem_reserve_ratio),
6456 .mode = 0644,
6457 .proc_handler = lowmem_reserve_ratio_sysctl_handler,
6458 },
6459#ifdef CONFIG_NUMA
6460 {
6461 .procname = "numa_zonelist_order",
6462 .data = &numa_zonelist_order,
6463 .maxlen = NUMA_ZONELIST_ORDER_LEN,
6464 .mode = 0644,
6465 .proc_handler = numa_zonelist_order_handler,
6466 },
6467 {
6468 .procname = "min_unmapped_ratio",
6469 .data = &sysctl_min_unmapped_ratio,
6470 .maxlen = sizeof(sysctl_min_unmapped_ratio),
6471 .mode = 0644,
6472 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
6473 .extra1 = SYSCTL_ZERO,
6474 .extra2 = SYSCTL_ONE_HUNDRED,
6475 },
6476 {
6477 .procname = "min_slab_ratio",
6478 .data = &sysctl_min_slab_ratio,
6479 .maxlen = sizeof(sysctl_min_slab_ratio),
6480 .mode = 0644,
6481 .proc_handler = sysctl_min_slab_ratio_sysctl_handler,
6482 .extra1 = SYSCTL_ZERO,
6483 .extra2 = SYSCTL_ONE_HUNDRED,
6484 },
6485#endif
e95d372c
KW
6486};
6487
6488void __init page_alloc_sysctl_init(void)
6489{
6490 register_sysctl_init("vm", page_alloc_sysctl_table);
6491}
6492
8df995f6 6493#ifdef CONFIG_CONTIG_ALLOC
a1394bdd
MK
6494/* Usage: See admin-guide/dynamic-debug-howto.rst */
6495static void alloc_contig_dump_pages(struct list_head *page_list)
6496{
6497 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
6498
6499 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
6500 struct page *page;
6501
6502 dump_stack();
6503 list_for_each_entry(page, page_list, lru)
6504 dump_page(page, "migration failure");
6505 }
6506}
a1394bdd 6507
c8b36003
RC
6508/*
6509 * [start, end) must belong to a single zone.
6510 * @migratetype: using migratetype to filter the type of migration in
6511 * trace_mm_alloc_contig_migrate_range_info.
6512 */
2202f51e
DH
6513static int __alloc_contig_migrate_range(struct compact_control *cc,
6514 unsigned long start, unsigned long end, int migratetype)
041d3a8c
MN
6515{
6516 /* This function is based on compact_zone() from compaction.c. */
730ec8c0 6517 unsigned int nr_reclaimed;
041d3a8c
MN
6518 unsigned long pfn = start;
6519 unsigned int tries = 0;
6520 int ret = 0;
8b94e0b8
JK
6521 struct migration_target_control mtc = {
6522 .nid = zone_to_nid(cc->zone),
f6037a4a 6523 .gfp_mask = cc->gfp_mask,
e42dfe4e 6524 .reason = MR_CONTIG_RANGE,
8b94e0b8 6525 };
c8b36003
RC
6526 struct page *page;
6527 unsigned long total_mapped = 0;
6528 unsigned long total_migrated = 0;
6529 unsigned long total_reclaimed = 0;
041d3a8c 6530
361a2a22 6531 lru_cache_disable();
041d3a8c 6532
bb13ffeb 6533 while (pfn < end || !list_empty(&cc->migratepages)) {
041d3a8c
MN
6534 if (fatal_signal_pending(current)) {
6535 ret = -EINTR;
6536 break;
6537 }
6538
bb13ffeb
MG
6539 if (list_empty(&cc->migratepages)) {
6540 cc->nr_migratepages = 0;
c2ad7a1f
OS
6541 ret = isolate_migratepages_range(cc, pfn, end);
6542 if (ret && ret != -EAGAIN)
041d3a8c 6543 break;
c2ad7a1f 6544 pfn = cc->migrate_pfn;
041d3a8c
MN
6545 tries = 0;
6546 } else if (++tries == 5) {
c8e28b47 6547 ret = -EBUSY;
041d3a8c
MN
6548 break;
6549 }
6550
beb51eaa
MK
6551 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6552 &cc->migratepages);
6553 cc->nr_migratepages -= nr_reclaimed;
02c6de8d 6554
c8b36003
RC
6555 if (trace_mm_alloc_contig_migrate_range_info_enabled()) {
6556 total_reclaimed += nr_reclaimed;
7115936a
DH
6557 list_for_each_entry(page, &cc->migratepages, lru) {
6558 struct folio *folio = page_folio(page);
6559
6560 total_mapped += folio_mapped(folio) *
6561 folio_nr_pages(folio);
6562 }
c8b36003
RC
6563 }
6564
8b94e0b8 6565 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
5ac95884 6566 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
c8e28b47 6567
c8b36003
RC
6568 if (trace_mm_alloc_contig_migrate_range_info_enabled() && !ret)
6569 total_migrated += cc->nr_migratepages;
6570
c8e28b47
OS
6571 /*
6572 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
6573 * to retry again over this error, so do the same here.
6574 */
6575 if (ret == -ENOMEM)
6576 break;
041d3a8c 6577 }
d479960e 6578
361a2a22 6579 lru_cache_enable();
2a6f5124 6580 if (ret < 0) {
3f913fc5 6581 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
151e084a 6582 alloc_contig_dump_pages(&cc->migratepages);
2a6f5124 6583 putback_movable_pages(&cc->migratepages);
2a6f5124 6584 }
c8b36003
RC
6585
6586 trace_mm_alloc_contig_migrate_range_info(start, end, migratetype,
6587 total_migrated,
6588 total_reclaimed,
6589 total_mapped);
6590 return (ret < 0) ? ret : 0;
041d3a8c
MN
6591}
6592
7b755570 6593static void split_free_pages(struct list_head *list, gfp_t gfp_mask)
e98337d1
YZ
6594{
6595 int order;
6596
6597 for (order = 0; order < NR_PAGE_ORDERS; order++) {
6598 struct page *page, *next;
6599 int nr_pages = 1 << order;
6600
6601 list_for_each_entry_safe(page, next, &list[order], lru) {
6602 int i;
6603
7b755570 6604 post_alloc_hook(page, order, gfp_mask);
8fd10a89 6605 set_page_refcounted(page);
e98337d1
YZ
6606 if (!order)
6607 continue;
6608
6609 split_page(page, order);
6610
6611 /* Add all subpages to the order-0 head, in sequence. */
6612 list_del(&page->lru);
6613 for (i = 0; i < nr_pages; i++)
6614 list_add_tail(&page[i].lru, &list[0]);
6615 }
6616 }
6617}
6618
f6037a4a
DH
6619static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
6620{
6621 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
7b755570
DH
6622 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
6623 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO;
f6037a4a
DH
6624 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
6625
6626 /*
6627 * We are given the range to allocate; node, mobility and placement
6628 * hints are irrelevant at this point. We'll simply ignore them.
6629 */
6630 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE |
6631 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE);
6632
6633 /*
6634 * We only support most reclaim flags (but not NOFAIL/NORETRY), and
6635 * selected action flags.
6636 */
6637 if (gfp_mask & ~(reclaim_mask | action_mask))
6638 return -EINVAL;
6639
6640 /*
6641 * Flags to control page compaction/migration/reclaim, to free up our
6642 * page range. Migratable pages are movable, __GFP_MOVABLE is implied
6643 * for them.
6644 *
f58498b7
DH
6645 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that
6646 * to not degrade callers.
f6037a4a
DH
6647 */
6648 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) |
f58498b7 6649 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
f6037a4a
DH
6650 return 0;
6651}
6652
041d3a8c
MN
6653/**
6654 * alloc_contig_range() -- tries to allocate given range of pages
6655 * @start: start PFN to allocate
6656 * @end: one-past-the-last PFN to allocate
f0953a1b 6657 * @migratetype: migratetype of the underlying pageblocks (either
0815f3d8
MN
6658 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6659 * in range must have the same migratetype and it must
6660 * be either of the two.
f6037a4a
DH
6661 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
6662 * action and reclaim modifiers are supported. Reclaim modifiers
6663 * control allocation behavior during compaction/migration/reclaim.
041d3a8c 6664 *
11ac3e87
ZY
6665 * The PFN range does not have to be pageblock aligned. The PFN range must
6666 * belong to a single zone.
041d3a8c 6667 *
2c7452a0
MK
6668 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
6669 * pageblocks in the range. Once isolated, the pageblocks should not
6670 * be modified by others.
041d3a8c 6671 *
a862f68a 6672 * Return: zero on success or negative error code. On success all
041d3a8c
MN
6673 * pages which PFN is in [start, end) are allocated for the caller and
6674 * need to be freed with free_contig_range().
6675 */
b951aaff 6676int alloc_contig_range_noprof(unsigned long start, unsigned long end,
ca96b625 6677 unsigned migratetype, gfp_t gfp_mask)
041d3a8c 6678{
041d3a8c 6679 unsigned long outer_start, outer_end;
d00181b9 6680 int ret = 0;
041d3a8c 6681
bb13ffeb
MG
6682 struct compact_control cc = {
6683 .nr_migratepages = 0,
6684 .order = -1,
6685 .zone = page_zone(pfn_to_page(start)),
e0b9daeb 6686 .mode = MIGRATE_SYNC,
bb13ffeb 6687 .ignore_skip_hint = true,
2583d671 6688 .no_set_skip_hint = true,
b06eda09 6689 .alloc_contig = true,
bb13ffeb
MG
6690 };
6691 INIT_LIST_HEAD(&cc.migratepages);
6692
f6037a4a
DH
6693 gfp_mask = current_gfp_context(gfp_mask);
6694 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
6695 return -EINVAL;
6696
041d3a8c
MN
6697 /*
6698 * What we do here is we mark all pageblocks in range as
6699 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6700 * have different sizes, and due to the way page allocator
b2c9e2fb 6701 * work, start_isolate_page_range() has special handlings for this.
041d3a8c
MN
6702 *
6703 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6704 * migrate the pages from an unaligned range (ie. pages that
b2c9e2fb 6705 * we are interested in). This will put all the pages in
041d3a8c
MN
6706 * range back to page allocator as MIGRATE_ISOLATE.
6707 *
6708 * When this is done, we take the pages in range from page
6709 * allocator removing them from the buddy system. This way
6710 * page allocator will never consider using them.
6711 *
6712 * This lets us mark the pageblocks back as
6713 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6714 * aligned range but not in the unaligned, original range are
6715 * put back to page allocator so that buddy can use them.
6716 */
6717
b9e40605 6718 ret = start_isolate_page_range(start, end, migratetype, 0);
3fa0c7c7 6719 if (ret)
b2c9e2fb 6720 goto done;
041d3a8c 6721
7612921f
VB
6722 drain_all_pages(cc.zone);
6723
8ef5849f
JK
6724 /*
6725 * In case of -EBUSY, we'd like to know which page causes problem.
63cd4489
MK
6726 * So, just fall through. test_pages_isolated() has a tracepoint
6727 * which will report the busy page.
6728 *
6729 * It is possible that busy pages could become available before
6730 * the call to test_pages_isolated, and the range will actually be
6731 * allocated. So, if we fall through be sure to clear ret so that
6732 * -EBUSY is not accidentally used or returned to caller.
8ef5849f 6733 */
c8b36003 6734 ret = __alloc_contig_migrate_range(&cc, start, end, migratetype);
8ef5849f 6735 if (ret && ret != -EBUSY)
041d3a8c 6736 goto done;
04f13d24 6737
6738 /*
6739 * When in-use hugetlb pages are migrated, they may simply be released
6740 * back into the free hugepage pool instead of being returned to the
6741 * buddy system. After the migration of in-use huge pages is completed,
6742 * we will invoke replace_free_hugepage_folios() to ensure that these
6743 * hugepages are properly released to the buddy system.
6744 */
6745 ret = replace_free_hugepage_folios(start, end);
6746 if (ret)
6747 goto done;
041d3a8c
MN
6748
6749 /*
b2c9e2fb 6750 * Pages from [start, end) are within a pageblock_nr_pages
041d3a8c
MN
6751 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
6752 * more, all pages in [start, end) are free in page allocator.
6753 * What we are going to do is to allocate all pages from
6754 * [start, end) (that is remove them from page allocator).
6755 *
6756 * The only problem is that pages at the beginning and at the
6757 * end of interesting range may be not aligned with pages that
6758 * page allocator holds, ie. they can be part of higher order
6759 * pages. Because of this, we reserve the bigger range and
6760 * once this is done free the pages we are not interested in.
6761 *
6762 * We don't have to hold zone->lock here because the pages are
6763 * isolated thus they won't get removed from buddy.
6764 */
fd919a85 6765 outer_start = find_large_buddy(start);
8ef5849f 6766
041d3a8c 6767 /* Make sure the range is really isolated. */
756d25be 6768 if (test_pages_isolated(outer_start, end, 0)) {
041d3a8c
MN
6769 ret = -EBUSY;
6770 goto done;
6771 }
6772
49f223a9 6773 /* Grab isolated pages from freelists. */
bb13ffeb 6774 outer_end = isolate_freepages_range(&cc, outer_start, end);
041d3a8c
MN
6775 if (!outer_end) {
6776 ret = -EBUSY;
6777 goto done;
6778 }
6779
e98337d1 6780 if (!(gfp_mask & __GFP_COMP)) {
7b755570 6781 split_free_pages(cc.freepages, gfp_mask);
041d3a8c 6782
e98337d1
YZ
6783 /* Free head and tail (if any) */
6784 if (start != outer_start)
6785 free_contig_range(outer_start, start - outer_start);
6786 if (end != outer_end)
6787 free_contig_range(end, outer_end - end);
6788 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
6789 struct page *head = pfn_to_page(start);
6790 int order = ilog2(end - start);
6791
6792 check_new_pages(head, order);
6793 prep_new_page(head, order, gfp_mask, 0);
ee66e9c3 6794 set_page_refcounted(head);
e98337d1
YZ
6795 } else {
6796 ret = -EINVAL;
6797 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
6798 start, end, outer_start, outer_end);
6799 }
041d3a8c 6800done:
6e263fff 6801 undo_isolate_page_range(start, end, migratetype);
041d3a8c
MN
6802 return ret;
6803}
b951aaff 6804EXPORT_SYMBOL(alloc_contig_range_noprof);
5e27a2df
AK
6805
6806static int __alloc_contig_pages(unsigned long start_pfn,
6807 unsigned long nr_pages, gfp_t gfp_mask)
6808{
6809 unsigned long end_pfn = start_pfn + nr_pages;
6810
b951aaff
SB
6811 return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE,
6812 gfp_mask);
5e27a2df
AK
6813}
6814
6815static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
6816 unsigned long nr_pages)
6817{
6818 unsigned long i, end_pfn = start_pfn + nr_pages;
6819 struct page *page;
6820
6821 for (i = start_pfn; i < end_pfn; i++) {
6822 page = pfn_to_online_page(i);
6823 if (!page)
6824 return false;
6825
6826 if (page_zone(page) != z)
6827 return false;
6828
6829 if (PageReserved(page))
4d73ba5f
MG
6830 return false;
6831
6832 if (PageHuge(page))
5e27a2df 6833 return false;
5e27a2df
AK
6834 }
6835 return true;
6836}
6837
6838static bool zone_spans_last_pfn(const struct zone *zone,
6839 unsigned long start_pfn, unsigned long nr_pages)
6840{
6841 unsigned long last_pfn = start_pfn + nr_pages - 1;
6842
6843 return zone_spans_pfn(zone, last_pfn);
6844}
6845
6846/**
6847 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
6848 * @nr_pages: Number of contiguous pages to allocate
f6037a4a
DH
6849 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some
6850 * action and reclaim modifiers are supported. Reclaim modifiers
6851 * control allocation behavior during compaction/migration/reclaim.
5e27a2df
AK
6852 * @nid: Target node
6853 * @nodemask: Mask for other possible nodes
6854 *
6855 * This routine is a wrapper around alloc_contig_range(). It scans over zones
6856 * on an applicable zonelist to find a contiguous pfn range which can then be
6857 * tried for allocation with alloc_contig_range(). This routine is intended
6858 * for allocation requests which can not be fulfilled with the buddy allocator.
6859 *
6860 * The allocated memory is always aligned to a page boundary. If nr_pages is a
eaab8e75
AK
6861 * power of two, then allocated range is also guaranteed to be aligned to same
6862 * nr_pages (e.g. 1GB request would be aligned to 1GB).
5e27a2df
AK
6863 *
6864 * Allocated pages can be freed with free_contig_range() or by manually calling
6865 * __free_page() on each allocated page.
6866 *
6867 * Return: pointer to contiguous pages on success, or NULL if not successful.
6868 */
b951aaff
SB
6869struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
6870 int nid, nodemask_t *nodemask)
5e27a2df
AK
6871{
6872 unsigned long ret, pfn, flags;
6873 struct zonelist *zonelist;
6874 struct zone *zone;
6875 struct zoneref *z;
6876
6877 zonelist = node_zonelist(nid, gfp_mask);
6878 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6879 gfp_zone(gfp_mask), nodemask) {
6880 spin_lock_irqsave(&zone->lock, flags);
6881
6882 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
6883 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
6884 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
6885 /*
6886 * We release the zone lock here because
6887 * alloc_contig_range() will also lock the zone
6888 * at some point. If there's an allocation
6889 * spinning on this lock, it may win the race
6890 * and cause alloc_contig_range() to fail...
6891 */
6892 spin_unlock_irqrestore(&zone->lock, flags);
6893 ret = __alloc_contig_pages(pfn, nr_pages,
6894 gfp_mask);
6895 if (!ret)
6896 return pfn_to_page(pfn);
6897 spin_lock_irqsave(&zone->lock, flags);
6898 }
6899 pfn += nr_pages;
6900 }
6901 spin_unlock_irqrestore(&zone->lock, flags);
6902 }
6903 return NULL;
6904}
4eb0716e 6905#endif /* CONFIG_CONTIG_ALLOC */
041d3a8c 6906
78fa5150 6907void free_contig_range(unsigned long pfn, unsigned long nr_pages)
041d3a8c 6908{
78fa5150 6909 unsigned long count = 0;
e98337d1
YZ
6910 struct folio *folio = pfn_folio(pfn);
6911
6912 if (folio_test_large(folio)) {
6913 int expected = folio_nr_pages(folio);
6914
6915 if (nr_pages == expected)
6916 folio_put(folio);
6917 else
6918 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n",
6919 pfn, nr_pages, expected);
6920 return;
6921 }
bcc2b02f
MS
6922
6923 for (; nr_pages--; pfn++) {
6924 struct page *page = pfn_to_page(pfn);
6925
6926 count += page_count(page) != 1;
6927 __free_page(page);
6928 }
78fa5150 6929 WARN(count != 0, "%lu pages are still in use!\n", count);
041d3a8c 6930}
255f5985 6931EXPORT_SYMBOL(free_contig_range);
041d3a8c 6932
ec6e8c7e
VB
6933/*
6934 * Effectively disable pcplists for the zone by setting the high limit to 0
6935 * and draining all cpus. A concurrent page freeing on another CPU that's about
6936 * to put the page on pcplist will either finish before the drain and the page
6937 * will be drained, or observe the new high limit and skip the pcplist.
6938 *
6939 * Must be paired with a call to zone_pcp_enable().
6940 */
6941void zone_pcp_disable(struct zone *zone)
6942{
6943 mutex_lock(&pcp_batch_high_lock);
90b41691 6944 __zone_set_pageset_high_and_batch(zone, 0, 0, 1);
ec6e8c7e
VB
6945 __drain_all_pages(zone, true);
6946}
6947
6948void zone_pcp_enable(struct zone *zone)
6949{
90b41691
YH
6950 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min,
6951 zone->pageset_high_max, zone->pageset_batch);
ec6e8c7e
VB
6952 mutex_unlock(&pcp_batch_high_lock);
6953}
6954
340175b7
JL
6955void zone_pcp_reset(struct zone *zone)
6956{
5a883813 6957 int cpu;
28f836b6 6958 struct per_cpu_zonestat *pzstats;
340175b7 6959
28f836b6 6960 if (zone->per_cpu_pageset != &boot_pageset) {
5a883813 6961 for_each_online_cpu(cpu) {
28f836b6
MG
6962 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6963 drain_zonestat(zone, pzstats);
5a883813 6964 }
28f836b6 6965 free_percpu(zone->per_cpu_pageset);
28f836b6 6966 zone->per_cpu_pageset = &boot_pageset;
022e7fa0
ML
6967 if (zone->per_cpu_zonestats != &boot_zonestats) {
6968 free_percpu(zone->per_cpu_zonestats);
6969 zone->per_cpu_zonestats = &boot_zonestats;
6970 }
340175b7 6971 }
340175b7
JL
6972}
6973
6dcd73d7 6974#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195 6975/*
257bea71
DH
6976 * All pages in the range must be in a single zone, must not contain holes,
6977 * must span full sections, and must be isolated before calling this function.
50625744
DH
6978 *
6979 * Returns the number of managed (non-PageOffline()) pages in the range: the
6980 * number of pages for which memory offlining code must adjust managed page
6981 * counters using adjust_managed_page_count().
0c0e6195 6982 */
50625744
DH
6983unsigned long __offline_isolated_pages(unsigned long start_pfn,
6984 unsigned long end_pfn)
0c0e6195 6985{
50625744 6986 unsigned long already_offline = 0, flags;
257bea71 6987 unsigned long pfn = start_pfn;
0c0e6195
KH
6988 struct page *page;
6989 struct zone *zone;
0ee5f4f3 6990 unsigned int order;
5557c766 6991
2d070eab 6992 offline_mem_sections(pfn, end_pfn);
0c0e6195
KH
6993 zone = page_zone(pfn_to_page(pfn));
6994 spin_lock_irqsave(&zone->lock, flags);
0c0e6195 6995 while (pfn < end_pfn) {
0c0e6195 6996 page = pfn_to_page(pfn);
b023f468
WC
6997 /*
6998 * The HWPoisoned page may be not in buddy system, and
6999 * page_count() is not 0.
7000 */
7001 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7002 pfn++;
b023f468
WC
7003 continue;
7004 }
aa218795
DH
7005 /*
7006 * At this point all remaining PageOffline() pages have a
7007 * reference count of 0 and can simply be skipped.
7008 */
7009 if (PageOffline(page)) {
7010 BUG_ON(page_count(page));
7011 BUG_ON(PageBuddy(page));
50625744 7012 already_offline++;
aa218795 7013 pfn++;
aa218795
DH
7014 continue;
7015 }
b023f468 7016
0c0e6195
KH
7017 BUG_ON(page_count(page));
7018 BUG_ON(!PageBuddy(page));
e0932b6c 7019 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE);
ab130f91 7020 order = buddy_order(page);
e0932b6c 7021 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE);
0c0e6195
KH
7022 pfn += (1 << order);
7023 }
7024 spin_unlock_irqrestore(&zone->lock, flags);
50625744
DH
7025
7026 return end_pfn - start_pfn - already_offline;
0c0e6195
KH
7027}
7028#endif
8d22ba1b 7029
8446b59b
ED
7030/*
7031 * This function returns a stable result only if called under zone lock.
7032 */
2ace5a67 7033bool is_free_buddy_page(const struct page *page)
8d22ba1b 7034{
8d22ba1b 7035 unsigned long pfn = page_to_pfn(page);
7aeb09f9 7036 unsigned int order;
8d22ba1b 7037
fd377218 7038 for (order = 0; order < NR_PAGE_ORDERS; order++) {
2ace5a67 7039 const struct page *head = page - (pfn & ((1 << order) - 1));
8d22ba1b 7040
2ace5a67
MWO
7041 if (PageBuddy(head) &&
7042 buddy_order_unsafe(head) >= order)
8d22ba1b
WF
7043 break;
7044 }
8d22ba1b 7045
5e0a760b 7046 return order <= MAX_PAGE_ORDER;
8d22ba1b 7047}
a581865e 7048EXPORT_SYMBOL(is_free_buddy_page);
d4ae9916
NH
7049
7050#ifdef CONFIG_MEMORY_FAILURE
e0932b6c
JW
7051static inline void add_to_free_list(struct page *page, struct zone *zone,
7052 unsigned int order, int migratetype,
7053 bool tail)
7054{
7055 __add_to_free_list(page, zone, order, migratetype, tail);
7056 account_freepages(zone, 1 << order, migratetype);
7057}
7058
d4ae9916 7059/*
06be6ff3
OS
7060 * Break down a higher-order page in sub-pages, and keep our target out of
7061 * buddy allocator.
d4ae9916 7062 */
06be6ff3
OS
7063static void break_down_buddy_pages(struct zone *zone, struct page *page,
7064 struct page *target, int low, int high,
7065 int migratetype)
7066{
7067 unsigned long size = 1 << high;
0dfca313 7068 struct page *current_buddy;
06be6ff3
OS
7069
7070 while (high > low) {
7071 high--;
7072 size >>= 1;
7073
7074 if (target >= &page[size]) {
06be6ff3 7075 current_buddy = page;
0dfca313 7076 page = page + size;
06be6ff3 7077 } else {
06be6ff3
OS
7078 current_buddy = page + size;
7079 }
7080
e0932b6c 7081 if (set_page_guard(zone, current_buddy, high))
06be6ff3
OS
7082 continue;
7083
e0932b6c 7084 add_to_free_list(current_buddy, zone, high, migratetype, false);
27e0db3c 7085 set_buddy_order(current_buddy, high);
06be6ff3
OS
7086 }
7087}
7088
7089/*
7090 * Take a page that will be marked as poisoned off the buddy allocator.
7091 */
7092bool take_page_off_buddy(struct page *page)
d4ae9916
NH
7093{
7094 struct zone *zone = page_zone(page);
7095 unsigned long pfn = page_to_pfn(page);
7096 unsigned long flags;
7097 unsigned int order;
06be6ff3 7098 bool ret = false;
d4ae9916
NH
7099
7100 spin_lock_irqsave(&zone->lock, flags);
fd377218 7101 for (order = 0; order < NR_PAGE_ORDERS; order++) {
d4ae9916 7102 struct page *page_head = page - (pfn & ((1 << order) - 1));
ab130f91 7103 int page_order = buddy_order(page_head);
d4ae9916 7104
ab130f91 7105 if (PageBuddy(page_head) && page_order >= order) {
06be6ff3
OS
7106 unsigned long pfn_head = page_to_pfn(page_head);
7107 int migratetype = get_pfnblock_migratetype(page_head,
7108 pfn_head);
7109
e0932b6c
JW
7110 del_page_from_free_list(page_head, zone, page_order,
7111 migratetype);
06be6ff3 7112 break_down_buddy_pages(zone, page_head, page, 0,
ab130f91 7113 page_order, migratetype);
bf181c58 7114 SetPageHWPoisonTakenOff(page);
06be6ff3 7115 ret = true;
d4ae9916
NH
7116 break;
7117 }
06be6ff3
OS
7118 if (page_count(page_head) > 0)
7119 break;
d4ae9916
NH
7120 }
7121 spin_unlock_irqrestore(&zone->lock, flags);
06be6ff3 7122 return ret;
d4ae9916 7123}
bf181c58
NH
7124
7125/*
7126 * Cancel takeoff done by take_page_off_buddy().
7127 */
7128bool put_page_back_buddy(struct page *page)
7129{
7130 struct zone *zone = page_zone(page);
bf181c58 7131 unsigned long flags;
bf181c58
NH
7132 bool ret = false;
7133
7134 spin_lock_irqsave(&zone->lock, flags);
7135 if (put_page_testzero(page)) {
55612e80
JW
7136 unsigned long pfn = page_to_pfn(page);
7137 int migratetype = get_pfnblock_migratetype(page, pfn);
7138
bf181c58
NH
7139 ClearPageHWPoisonTakenOff(page);
7140 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
7141 if (TestClearPageHWPoison(page)) {
bf181c58
NH
7142 ret = true;
7143 }
7144 }
7145 spin_unlock_irqrestore(&zone->lock, flags);
7146
7147 return ret;
7148}
d4ae9916 7149#endif
62b31070
BH
7150
7151#ifdef CONFIG_ZONE_DMA
7152bool has_managed_dma(void)
7153{
7154 struct pglist_data *pgdat;
7155
7156 for_each_online_pgdat(pgdat) {
7157 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
7158
7159 if (managed_zone(zone))
7160 return true;
7161 }
7162 return false;
7163}
7164#endif /* CONFIG_ZONE_DMA */
dcdfdd40
KS
7165
7166#ifdef CONFIG_UNACCEPTED_MEMORY
7167
dcdfdd40
KS
7168static bool lazy_accept = true;
7169
7170static int __init accept_memory_parse(char *p)
7171{
7172 if (!strcmp(p, "lazy")) {
7173 lazy_accept = true;
7174 return 0;
7175 } else if (!strcmp(p, "eager")) {
7176 lazy_accept = false;
7177 return 0;
7178 } else {
7179 return -EINVAL;
7180 }
7181}
7182early_param("accept_memory", accept_memory_parse);
7183
7184static bool page_contains_unaccepted(struct page *page, unsigned int order)
7185{
7186 phys_addr_t start = page_to_phys(page);
dcdfdd40 7187
5adfeaec 7188 return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
dcdfdd40
KS
7189}
7190
55ad43e8
KS
7191static void __accept_page(struct zone *zone, unsigned long *flags,
7192 struct page *page)
dcdfdd40 7193{
dcdfdd40 7194 list_del(&page->lru);
e0932b6c 7195 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
dcdfdd40 7196 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
310183de 7197 __ClearPageUnaccepted(page);
55ad43e8 7198 spin_unlock_irqrestore(&zone->lock, *flags);
dcdfdd40 7199
5adfeaec 7200 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
dcdfdd40 7201
5e0a760b 7202 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
55ad43e8
KS
7203}
7204
7205void accept_page(struct page *page)
7206{
7207 struct zone *zone = page_zone(page);
7208 unsigned long flags;
7209
7210 spin_lock_irqsave(&zone->lock, flags);
7211 if (!PageUnaccepted(page)) {
7212 spin_unlock_irqrestore(&zone->lock, flags);
7213 return;
7214 }
7215
7216 /* Unlocks zone->lock */
7217 __accept_page(zone, &flags, page);
dcdfdd40
KS
7218}
7219
7220static bool try_to_accept_memory_one(struct zone *zone)
7221{
7222 unsigned long flags;
7223 struct page *page;
dcdfdd40 7224
dcdfdd40
KS
7225 spin_lock_irqsave(&zone->lock, flags);
7226 page = list_first_entry_or_null(&zone->unaccepted_pages,
7227 struct page, lru);
7228 if (!page) {
7229 spin_unlock_irqrestore(&zone->lock, flags);
7230 return false;
7231 }
7232
55ad43e8
KS
7233 /* Unlocks zone->lock */
7234 __accept_page(zone, &flags, page);
dcdfdd40
KS
7235
7236 return true;
7237}
7238
23fa022a
KS
7239static bool cond_accept_memory(struct zone *zone, unsigned int order,
7240 int alloc_flags)
dcdfdd40 7241{
800f1059 7242 long to_accept, wmark;
807174a9
KS
7243 bool ret = false;
7244
807174a9
KS
7245 if (list_empty(&zone->unaccepted_pages))
7246 return false;
dcdfdd40 7247
23fa022a
KS
7248 /* Bailout, since try_to_accept_memory_one() needs to take a lock */
7249 if (alloc_flags & ALLOC_TRYLOCK)
7250 return false;
7251
800f1059
KS
7252 wmark = promo_wmark_pages(zone);
7253
7254 /*
7255 * Watermarks have not been initialized yet.
7256 *
7257 * Accepting one MAX_ORDER page to ensure progress.
7258 */
7259 if (!wmark)
7260 return try_to_accept_memory_one(zone);
7261
59149bf8 7262 /* How much to accept to get to promo watermark? */
800f1059 7263 to_accept = wmark -
dcdfdd40 7264 (zone_page_state(zone, NR_FREE_PAGES) -
807174a9
KS
7265 __zone_watermark_unusable_free(zone, order, 0) -
7266 zone_page_state(zone, NR_UNACCEPTED));
dcdfdd40 7267
807174a9 7268 while (to_accept > 0) {
dcdfdd40
KS
7269 if (!try_to_accept_memory_one(zone))
7270 break;
7271 ret = true;
7272 to_accept -= MAX_ORDER_NR_PAGES;
807174a9 7273 }
dcdfdd40
KS
7274
7275 return ret;
7276}
7277
dcdfdd40
KS
7278static bool __free_unaccepted(struct page *page)
7279{
7280 struct zone *zone = page_zone(page);
7281 unsigned long flags;
dcdfdd40
KS
7282
7283 if (!lazy_accept)
7284 return false;
7285
7286 spin_lock_irqsave(&zone->lock, flags);
dcdfdd40 7287 list_add_tail(&page->lru, &zone->unaccepted_pages);
e0932b6c 7288 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
dcdfdd40 7289 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
310183de 7290 __SetPageUnaccepted(page);
dcdfdd40
KS
7291 spin_unlock_irqrestore(&zone->lock, flags);
7292
dcdfdd40
KS
7293 return true;
7294}
7295
7296#else
7297
7298static bool page_contains_unaccepted(struct page *page, unsigned int order)
7299{
7300 return false;
7301}
7302
23fa022a
KS
7303static bool cond_accept_memory(struct zone *zone, unsigned int order,
7304 int alloc_flags)
dcdfdd40
KS
7305{
7306 return false;
7307}
7308
dcdfdd40
KS
7309static bool __free_unaccepted(struct page *page)
7310{
7311 BUILD_BUG();
7312 return false;
7313}
7314
7315#endif /* CONFIG_UNACCEPTED_MEMORY */
97769a53
AS
7316
7317/**
2aad4edf 7318 * alloc_pages_nolock - opportunistic reentrant allocation from any context
97769a53
AS
7319 * @nid: node to allocate from
7320 * @order: allocation order size
7321 *
7322 * Allocates pages of a given order from the given node. This is safe to
7323 * call from any context (from atomic, NMI, and also reentrant
2aad4edf 7324 * allocator -> tracepoint -> alloc_pages_nolock_noprof).
97769a53
AS
7325 * Allocation is best effort and to be expected to fail easily so nobody should
7326 * rely on the success. Failures are not reported via warn_alloc().
7327 * See always fail conditions below.
7328 *
2aad4edf
AS
7329 * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN.
7330 * It means ENOMEM. There is no reason to call it again and expect !NULL.
97769a53 7331 */
2aad4edf 7332struct page *alloc_pages_nolock_noprof(int nid, unsigned int order)
97769a53
AS
7333{
7334 /*
7335 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed.
7336 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd
7337 * is not safe in arbitrary context.
7338 *
7339 * These two are the conditions for gfpflags_allow_spinning() being true.
7340 *
2aad4edf 7341 * Specify __GFP_NOWARN since failing alloc_pages_nolock() is not a reason
97769a53
AS
7342 * to warn. Also warn would trigger printk() which is unsafe from
7343 * various contexts. We cannot use printk_deferred_enter() to mitigate,
7344 * since the running context is unknown.
7345 *
7346 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below
7347 * is safe in any context. Also zeroing the page is mandatory for
7348 * BPF use cases.
7349 *
7350 * Though __GFP_NOMEMALLOC is not checked in the code path below,
2aad4edf 7351 * specify it here to highlight that alloc_pages_nolock()
97769a53
AS
7352 * doesn't want to deplete reserves.
7353 */
e8d78dbd
AS
7354 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC
7355 | __GFP_ACCOUNT;
97769a53
AS
7356 unsigned int alloc_flags = ALLOC_TRYLOCK;
7357 struct alloc_context ac = { };
7358 struct page *page;
7359
7360 /*
7361 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is
7362 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current
7363 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will
7364 * mark the task as the owner of another rt_spin_lock which will
7365 * confuse PI logic, so return immediately if called form hard IRQ or
7366 * NMI.
7367 *
7368 * Note, irqs_disabled() case is ok. This function can be called
7369 * from raw_spin_lock_irqsave region.
7370 */
7371 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
7372 return NULL;
7373 if (!pcp_allowed_order(order))
7374 return NULL;
7375
97769a53
AS
7376 /* Bailout, since _deferred_grow_zone() needs to take a lock */
7377 if (deferred_pages_enabled())
7378 return NULL;
7379
7380 if (nid == NUMA_NO_NODE)
7381 nid = numa_node_id();
7382
7383 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac,
7384 &alloc_gfp, &alloc_flags);
7385
7386 /*
7387 * Best effort allocation from percpu free list.
7388 * If it's empty attempt to spin_trylock zone->lock.
7389 */
7390 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
7391
7392 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
7393
2985dae1
AS
7394 if (page)
7395 set_page_refcounted(page);
7396
e8d78dbd
AS
7397 if (memcg_kmem_online() && page &&
7398 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
7399 free_pages_nolock(page, order);
7400 page = NULL;
7401 }
97769a53
AS
7402 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
7403 kmsan_alloc_page(page, order, alloc_gfp);
7404 return page;
7405}