]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - include/linux/gfp.h
Merge tag 'io_uring-6.16-20250630' of git://git.kernel.dk/linux
[thirdparty/kernel/linux.git] / include / linux / gfp.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __LINUX_GFP_H
3#define __LINUX_GFP_H
4
cb5a065b
IM
5#include <linux/gfp_types.h>
6
1da177e4 7#include <linux/mmzone.h>
082edb7b 8#include <linux/topology.h>
b951aaff
SB
9#include <linux/alloc_tag.h>
10#include <linux/sched.h>
1da177e4
LT
11
12struct vm_area_struct;
ddc1a5cb 13struct mempolicy;
1da177e4 14
dd56b046 15/* Convert GFP flags to their corresponding migrate type */
e12ba74d 16#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
016c13da 17#define GFP_MOVABLE_SHIFT 3
6cb06229 18
01c0bfe0 19static inline int gfp_migratetype(const gfp_t gfp_flags)
467c996c 20{
016c13da
MG
21 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
22 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
23 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
4d86d4f7
PC
24 BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
25 BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
26 GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
467c996c
MG
27
28 if (unlikely(page_group_by_mobility_disabled))
29 return MIGRATE_UNMOVABLE;
30
31 /* Group based on mobility */
fe573327 32 return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
467c996c 33}
dd56b046
MG
34#undef GFP_MOVABLE_MASK
35#undef GFP_MOVABLE_SHIFT
a2f1b424 36
d0164adc
MG
37static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
38{
543dfb2d 39 return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
d0164adc
MG
40}
41
97769a53
AS
42static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags)
43{
44 /*
45 * !__GFP_DIRECT_RECLAIM -> direct claim is not allowed.
46 * !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd.
47 * All GFP_* flags including GFP_NOWAIT use one or both flags.
2aad4edf 48 * alloc_pages_nolock() is the only API that doesn't specify either flag.
97769a53
AS
49 *
50 * This is stronger than GFP_NOWAIT or GFP_ATOMIC because
51 * those are guaranteed to never block on a sleeping lock.
52 * Here we are enforcing that the allocation doesn't ever spin
53 * on any locks (i.e. only trylocks). There is no high level
2aad4edf 54 * GFP_$FOO flag for this use in alloc_pages_nolock() as the
97769a53
AS
55 * regular page allocator doesn't fully support this
56 * allocation mode.
57 */
f90b474a 58 return !!(gfp_flags & __GFP_RECLAIM);
97769a53
AS
59}
60
b70d94ee
CL
61#ifdef CONFIG_HIGHMEM
62#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
63#else
64#define OPT_ZONE_HIGHMEM ZONE_NORMAL
65#endif
66
4b51d669 67#ifdef CONFIG_ZONE_DMA
b70d94ee
CL
68#define OPT_ZONE_DMA ZONE_DMA
69#else
70#define OPT_ZONE_DMA ZONE_NORMAL
4b51d669 71#endif
b70d94ee 72
4e4785bc 73#ifdef CONFIG_ZONE_DMA32
b70d94ee
CL
74#define OPT_ZONE_DMA32 ZONE_DMA32
75#else
76#define OPT_ZONE_DMA32 ZONE_NORMAL
4e4785bc 77#endif
b70d94ee
CL
78
79/*
80 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
ac2e8e40
HL
81 * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT
82 * bits long and there are 16 of them to cover all possible combinations of
263ff5d8 83 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
b70d94ee
CL
84 *
85 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
86 * But GFP_MOVABLE is not only a zone specifier but also an allocation
87 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
263ff5d8 88 * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
b70d94ee
CL
89 *
90 * bit result
91 * =================
92 * 0x0 => NORMAL
93 * 0x1 => DMA or NORMAL
94 * 0x2 => HIGHMEM or NORMAL
95 * 0x3 => BAD (DMA+HIGHMEM)
4b33b695 96 * 0x4 => DMA32 or NORMAL
b70d94ee
CL
97 * 0x5 => BAD (DMA+DMA32)
98 * 0x6 => BAD (HIGHMEM+DMA32)
99 * 0x7 => BAD (HIGHMEM+DMA32+DMA)
100 * 0x8 => NORMAL (MOVABLE+0)
101 * 0x9 => DMA or NORMAL (MOVABLE+DMA)
102 * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
103 * 0xb => BAD (MOVABLE+HIGHMEM+DMA)
4b33b695 104 * 0xc => DMA32 or NORMAL (MOVABLE+DMA32)
b70d94ee
CL
105 * 0xd => BAD (MOVABLE+DMA32+DMA)
106 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
107 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
108 *
b11a7b94 109 * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
b70d94ee
CL
110 */
111
b11a7b94
DW
112#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
113/* ZONE_DEVICE is not a valid GFP zone specifier */
114#define GFP_ZONES_SHIFT 2
115#else
116#define GFP_ZONES_SHIFT ZONES_SHIFT
117#endif
118
119#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
120#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
b70d94ee
CL
121#endif
122
123#define GFP_ZONE_TABLE ( \
b11a7b94
DW
124 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
125 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
126 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
127 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
128 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
129 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
130 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
131 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
b70d94ee
CL
132)
133
134/*
263ff5d8 135 * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
b70d94ee
CL
136 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
137 * entry starting with bit 0. Bit is set if the combination is not
138 * allowed.
139 */
140#define GFP_ZONE_BAD ( \
16b56cf4
NK
141 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
142 | 1 << (___GFP_DMA | ___GFP_DMA32) \
143 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
144 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
145 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
146 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
147 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
148 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
b70d94ee
CL
149)
150
151static inline enum zone_type gfp_zone(gfp_t flags)
152{
153 enum zone_type z;
16b56cf4 154 int bit = (__force int) (flags & GFP_ZONEMASK);
b70d94ee 155
b11a7b94
DW
156 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
157 ((1 << GFP_ZONES_SHIFT) - 1);
82d4b577 158 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
b70d94ee 159 return z;
4e4785bc
CL
160}
161
1da177e4
LT
162/*
163 * There is only one page-allocator function, and two main namespaces to
164 * it. The alloc_page*() variants return 'struct page *' and as such
165 * can allocate highmem pages, the *get*page*() variants return
166 * virtual kernel addresses to the allocated page(s).
167 */
168
54a6eb5c
MG
169static inline int gfp_zonelist(gfp_t flags)
170{
c00eb15a
YB
171#ifdef CONFIG_NUMA
172 if (unlikely(flags & __GFP_THISNODE))
173 return ZONELIST_NOFALLBACK;
174#endif
175 return ZONELIST_FALLBACK;
54a6eb5c
MG
176}
177
1c00f936
DC
178/*
179 * gfp flag masking for nested internal allocations.
180 *
181 * For code that needs to do allocations inside the public allocation API (e.g.
182 * memory allocation tracking code) the allocations need to obey the caller
183 * allocation context constrains to prevent allocation context mismatches (e.g.
184 * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
185 * situations.
186 *
187 * It is also assumed that these nested allocations are for internal kernel
188 * object storage purposes only and are not going to be used for DMA, etc. Hence
189 * we strip out all the zone information and leave just the context information
190 * intact.
191 *
192 * Further, internal allocations must fail before the higher level allocation
193 * can fail, so we must make them fail faster and fail silently. We also don't
194 * want them to deplete emergency reserves. Hence nested allocations must be
195 * prepared for these allocations to fail.
196 */
197static inline gfp_t gfp_nested_mask(gfp_t flags)
198{
199 return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
200 (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
201}
202
1da177e4
LT
203/*
204 * We get the zone list from the current node and the gfp_mask.
cb152a1a 205 * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
54a6eb5c
MG
206 * There are two zonelists per node, one for all zones with memory and
207 * one containing just zones from the node the zonelist belongs to.
1da177e4 208 *
d3c251ab
MR
209 * For the case of non-NUMA systems the NODE_DATA() gets optimized to
210 * &contig_page_data at compile-time.
1da177e4 211 */
0e88460d
MG
212static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
213{
54a6eb5c 214 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
0e88460d 215}
1da177e4
LT
216
217#ifndef HAVE_ARCH_FREE_PAGE
218static inline void arch_free_page(struct page *page, int order) { }
219#endif
cc102509
NP
220#ifndef HAVE_ARCH_ALLOC_PAGE
221static inline void arch_alloc_page(struct page *page, int order) { }
222#endif
1da177e4 223
b951aaff 224struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
84172f4b 225 nodemask_t *nodemask);
b951aaff
SB
226#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__))
227
228struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
cc09cb13 229 nodemask_t *nodemask);
b951aaff 230#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__))
e4048e5d 231
b951aaff 232unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
387ba26f 233 nodemask_t *nodemask, int nr_pages,
0f87d9d3 234 struct page **page_array);
b951aaff 235#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
387ba26f 236
6bf9b5b4 237unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
c00b6b96
CW
238 unsigned long nr_pages,
239 struct page **page_array);
6bf9b5b4
LC
240#define alloc_pages_bulk_mempolicy(...) \
241 alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
c00b6b96 242
387ba26f 243/* Bulk allocate order-0 pages */
6bf9b5b4 244#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \
c8b97953 245 __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
387ba26f 246
a2afc59f 247static inline unsigned long
6bf9b5b4 248alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
b951aaff 249 struct page **page_array)
a2afc59f
URS
250{
251 if (nid == NUMA_NO_NODE)
252 nid = numa_mem_id();
253
c8b97953 254 return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
a2afc59f
URS
255}
256
6bf9b5b4
LC
257#define alloc_pages_bulk_node(...) \
258 alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
b951aaff 259
dec1d352
YS
260static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
261{
262 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
263
264 if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
265 return;
266
267 if (node_online(this_node))
268 return;
269
270 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
271 dump_stack();
272}
273
96db800f
VB
274/*
275 * Allocate pages, preferring the node given as nid. The node must be valid and
276 * online. For more general interface, see alloc_pages_node().
277 */
278static inline struct page *
b951aaff 279__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order)
1da177e4 280{
0bc35a97 281 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
dec1d352 282 warn_if_node_offline(nid, gfp_mask);
819a6928 283
b951aaff 284 return __alloc_pages_noprof(gfp_mask, order, nid, NULL);
1da177e4
LT
285}
286
b951aaff
SB
287#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__))
288
cc09cb13 289static inline
b951aaff 290struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid)
cc09cb13
MWO
291{
292 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
dec1d352 293 warn_if_node_offline(nid, gfp);
cc09cb13 294
b951aaff 295 return __folio_alloc_noprof(gfp, order, nid, NULL);
cc09cb13
MWO
296}
297
b951aaff
SB
298#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__))
299
96db800f
VB
300/*
301 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
82c1fc71
VB
302 * prefer the current CPU's closest node. Otherwise node must be valid and
303 * online.
96db800f 304 */
b951aaff
SB
305static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
306 unsigned int order)
6484eb3e 307{
0bc35a97 308 if (nid == NUMA_NO_NODE)
82c1fc71 309 nid = numa_mem_id();
6484eb3e 310
b951aaff 311 return __alloc_pages_node_noprof(nid, gfp_mask, order);
6484eb3e
MG
312}
313
b951aaff
SB
314#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__))
315
1da177e4 316#ifdef CONFIG_NUMA
b951aaff 317struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
b951aaff 318struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
a19621ed
KW
319struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
320 struct mempolicy *mpol, pgoff_t ilx, int nid);
b951aaff 321struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
6359c39c 322 unsigned long addr);
1da177e4 323#else
b951aaff 324static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
43ee5b6d 325{
b951aaff 326 return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
43ee5b6d 327}
b951aaff 328static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
cc09cb13 329{
0a6fff20 330 return __folio_alloc_node_noprof(gfp, order, numa_node_id());
cc09cb13 331}
a19621ed
KW
332static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
333 struct mempolicy *mpol, pgoff_t ilx, int nid)
334{
335 return folio_alloc_noprof(gfp, order);
336}
6359c39c 337#define vma_alloc_folio_noprof(gfp, order, vma, addr) \
b951aaff 338 folio_alloc_noprof(gfp, order)
1da177e4 339#endif
b951aaff
SB
340
341#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
b951aaff 342#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
a19621ed 343#define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__))
b951aaff
SB
344#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
345
1da177e4 346#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
b951aaff
SB
347
348static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
adf88aa8
MWO
349 struct vm_area_struct *vma, unsigned long addr)
350{
6359c39c 351 struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr);
adf88aa8
MWO
352
353 return &folio->page;
354}
b951aaff
SB
355#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
356
2aad4edf
AS
357struct page *alloc_pages_nolock_noprof(int nid, unsigned int order);
358#define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__))
97769a53 359
b951aaff
SB
360extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
361#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
1da177e4 362
b951aaff
SB
363extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask);
364#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__))
365
366void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1);
367#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__))
1da177e4 368
2be0ffe2
TT
369void free_pages_exact(void *virt, size_t size);
370
b951aaff
SB
371__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
372#define alloc_pages_exact_nid(...) \
373 alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__))
374
375#define __get_free_page(gfp_mask) \
376 __get_free_pages((gfp_mask), 0)
1da177e4 377
b951aaff
SB
378#define __get_dma_pages(gfp_mask, order) \
379 __get_free_pages((gfp_mask) | GFP_DMA, (order))
1da177e4 380
b3c97528 381extern void __free_pages(struct page *page, unsigned int order);
8c57b687 382extern void free_pages_nolock(struct page *page, unsigned int order);
b3c97528 383extern void free_pages(unsigned long addr, unsigned int order);
1da177e4
LT
384
385#define __free_page(page) __free_pages((page), 0)
fd23855e 386#define free_page(addr) free_pages((addr), 0)
1da177e4 387
c4fbed4b 388void page_alloc_init_cpuhp(void);
51a755c5 389int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
4037d452 390void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
93481ff0
VB
391void drain_all_pages(struct zone *zone);
392void drain_local_pages(struct zone *zone);
1da177e4 393
0e1cc95b 394void page_alloc_init_late(void);
5cec4eb7 395void setup_pcp_cacheinfo(unsigned int cpu);
0e1cc95b 396
f90ac398
MG
397/*
398 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
399 * GFP flags are used before interrupts are enabled. Once interrupts are
400 * enabled, it is set to __GFP_BITS_MASK while the system is running. During
401 * hibernation, it is used by PM to avoid I/O during memory allocation while
402 * devices are suspended.
403 */
dcce284a
BH
404extern gfp_t gfp_allowed_mask;
405
c93bdd0e
MG
406/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
407bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
408
07f44ac3 409static inline bool gfp_has_io_fs(gfp_t gfp)
f90ac398 410{
07f44ac3 411 return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
f90ac398 412}
07f44ac3 413
803de900
VB
414/*
415 * Check if the gfp flags allow compaction - GFP_NOIO is a really
416 * tricky context because the migration might require IO.
417 */
418static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
419{
420 return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
421}
422
07f44ac3 423extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
f90ac398 424
8df995f6 425#ifdef CONFIG_CONTIG_ALLOC
041d3a8c 426/* The below functions must be run on a range from a single zone. */
b951aaff 427extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
ca96b625 428 unsigned migratetype, gfp_t gfp_mask);
b951aaff
SB
429#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
430
431extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
432 int nid, nodemask_t *nodemask);
433#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
434
080fe206 435#endif
78fa5150 436void free_contig_range(unsigned long pfn, unsigned long nr_pages);
041d3a8c 437
e98337d1
YZ
438#ifdef CONFIG_CONTIG_ALLOC
439static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
440 int nid, nodemask_t *node)
441{
442 struct page *page;
443
444 if (WARN_ON(!order || !(gfp & __GFP_COMP)))
445 return NULL;
446
447 page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
448
449 return page ? page_folio(page) : NULL;
450}
451#else
452static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
453 int nid, nodemask_t *node)
454{
455 return NULL;
456}
457#endif
458/* This should be paired with folio_put() rather than free_contig_range(). */
459#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
460
1da177e4 461#endif /* __LINUX_GFP_H */