]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - include/linux/gfp.h
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[thirdparty/kernel/linux.git] / include / linux / gfp.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __LINUX_GFP_H
3#define __LINUX_GFP_H
4
cb5a065b
IM
5#include <linux/gfp_types.h>
6
1da177e4 7#include <linux/mmzone.h>
082edb7b 8#include <linux/topology.h>
b951aaff
SB
9#include <linux/alloc_tag.h>
10#include <linux/sched.h>
1da177e4
LT
11
12struct vm_area_struct;
ddc1a5cb 13struct mempolicy;
1da177e4 14
dd56b046 15/* Convert GFP flags to their corresponding migrate type */
e12ba74d 16#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
016c13da 17#define GFP_MOVABLE_SHIFT 3
6cb06229 18
01c0bfe0 19static inline int gfp_migratetype(const gfp_t gfp_flags)
467c996c 20{
016c13da
MG
21 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
22 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
23 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
4d86d4f7
PC
24 BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
25 BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
26 GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
467c996c
MG
27
28 if (unlikely(page_group_by_mobility_disabled))
29 return MIGRATE_UNMOVABLE;
30
31 /* Group based on mobility */
fe573327 32 return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
467c996c 33}
dd56b046
MG
34#undef GFP_MOVABLE_MASK
35#undef GFP_MOVABLE_SHIFT
a2f1b424 36
d0164adc
MG
37static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
38{
543dfb2d 39 return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
d0164adc
MG
40}
41
b70d94ee
CL
42#ifdef CONFIG_HIGHMEM
43#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
44#else
45#define OPT_ZONE_HIGHMEM ZONE_NORMAL
46#endif
47
4b51d669 48#ifdef CONFIG_ZONE_DMA
b70d94ee
CL
49#define OPT_ZONE_DMA ZONE_DMA
50#else
51#define OPT_ZONE_DMA ZONE_NORMAL
4b51d669 52#endif
b70d94ee 53
4e4785bc 54#ifdef CONFIG_ZONE_DMA32
b70d94ee
CL
55#define OPT_ZONE_DMA32 ZONE_DMA32
56#else
57#define OPT_ZONE_DMA32 ZONE_NORMAL
4e4785bc 58#endif
b70d94ee
CL
59
60/*
61 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
ac2e8e40
HL
62 * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT
63 * bits long and there are 16 of them to cover all possible combinations of
263ff5d8 64 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
b70d94ee
CL
65 *
66 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
67 * But GFP_MOVABLE is not only a zone specifier but also an allocation
68 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
263ff5d8 69 * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
b70d94ee
CL
70 *
71 * bit result
72 * =================
73 * 0x0 => NORMAL
74 * 0x1 => DMA or NORMAL
75 * 0x2 => HIGHMEM or NORMAL
76 * 0x3 => BAD (DMA+HIGHMEM)
4b33b695 77 * 0x4 => DMA32 or NORMAL
b70d94ee
CL
78 * 0x5 => BAD (DMA+DMA32)
79 * 0x6 => BAD (HIGHMEM+DMA32)
80 * 0x7 => BAD (HIGHMEM+DMA32+DMA)
81 * 0x8 => NORMAL (MOVABLE+0)
82 * 0x9 => DMA or NORMAL (MOVABLE+DMA)
83 * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
84 * 0xb => BAD (MOVABLE+HIGHMEM+DMA)
4b33b695 85 * 0xc => DMA32 or NORMAL (MOVABLE+DMA32)
b70d94ee
CL
86 * 0xd => BAD (MOVABLE+DMA32+DMA)
87 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
88 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
89 *
b11a7b94 90 * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
b70d94ee
CL
91 */
92
b11a7b94
DW
93#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
94/* ZONE_DEVICE is not a valid GFP zone specifier */
95#define GFP_ZONES_SHIFT 2
96#else
97#define GFP_ZONES_SHIFT ZONES_SHIFT
98#endif
99
100#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
101#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
b70d94ee
CL
102#endif
103
104#define GFP_ZONE_TABLE ( \
b11a7b94
DW
105 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
106 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
107 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
108 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
109 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
110 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
111 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
112 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
b70d94ee
CL
113)
114
115/*
263ff5d8 116 * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
b70d94ee
CL
117 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
118 * entry starting with bit 0. Bit is set if the combination is not
119 * allowed.
120 */
121#define GFP_ZONE_BAD ( \
16b56cf4
NK
122 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
123 | 1 << (___GFP_DMA | ___GFP_DMA32) \
124 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
125 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
126 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
127 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
128 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
129 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
b70d94ee
CL
130)
131
132static inline enum zone_type gfp_zone(gfp_t flags)
133{
134 enum zone_type z;
16b56cf4 135 int bit = (__force int) (flags & GFP_ZONEMASK);
b70d94ee 136
b11a7b94
DW
137 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
138 ((1 << GFP_ZONES_SHIFT) - 1);
82d4b577 139 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
b70d94ee 140 return z;
4e4785bc
CL
141}
142
1da177e4
LT
143/*
144 * There is only one page-allocator function, and two main namespaces to
145 * it. The alloc_page*() variants return 'struct page *' and as such
146 * can allocate highmem pages, the *get*page*() variants return
147 * virtual kernel addresses to the allocated page(s).
148 */
149
54a6eb5c
MG
150static inline int gfp_zonelist(gfp_t flags)
151{
c00eb15a
YB
152#ifdef CONFIG_NUMA
153 if (unlikely(flags & __GFP_THISNODE))
154 return ZONELIST_NOFALLBACK;
155#endif
156 return ZONELIST_FALLBACK;
54a6eb5c
MG
157}
158
1c00f936
DC
159/*
160 * gfp flag masking for nested internal allocations.
161 *
162 * For code that needs to do allocations inside the public allocation API (e.g.
163 * memory allocation tracking code) the allocations need to obey the caller
164 * allocation context constrains to prevent allocation context mismatches (e.g.
165 * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
166 * situations.
167 *
168 * It is also assumed that these nested allocations are for internal kernel
169 * object storage purposes only and are not going to be used for DMA, etc. Hence
170 * we strip out all the zone information and leave just the context information
171 * intact.
172 *
173 * Further, internal allocations must fail before the higher level allocation
174 * can fail, so we must make them fail faster and fail silently. We also don't
175 * want them to deplete emergency reserves. Hence nested allocations must be
176 * prepared for these allocations to fail.
177 */
178static inline gfp_t gfp_nested_mask(gfp_t flags)
179{
180 return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
181 (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
182}
183
1da177e4
LT
184/*
185 * We get the zone list from the current node and the gfp_mask.
cb152a1a 186 * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
54a6eb5c
MG
187 * There are two zonelists per node, one for all zones with memory and
188 * one containing just zones from the node the zonelist belongs to.
1da177e4 189 *
d3c251ab
MR
190 * For the case of non-NUMA systems the NODE_DATA() gets optimized to
191 * &contig_page_data at compile-time.
1da177e4 192 */
0e88460d
MG
193static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
194{
54a6eb5c 195 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
0e88460d 196}
1da177e4
LT
197
198#ifndef HAVE_ARCH_FREE_PAGE
199static inline void arch_free_page(struct page *page, int order) { }
200#endif
cc102509
NP
201#ifndef HAVE_ARCH_ALLOC_PAGE
202static inline void arch_alloc_page(struct page *page, int order) { }
203#endif
1da177e4 204
b951aaff 205struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
84172f4b 206 nodemask_t *nodemask);
b951aaff
SB
207#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__))
208
209struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
cc09cb13 210 nodemask_t *nodemask);
b951aaff 211#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__))
e4048e5d 212
b951aaff 213unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
387ba26f 214 nodemask_t *nodemask, int nr_pages,
0f87d9d3
MG
215 struct list_head *page_list,
216 struct page **page_array);
b951aaff 217#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
387ba26f 218
b951aaff 219unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
c00b6b96
CW
220 unsigned long nr_pages,
221 struct page **page_array);
b951aaff
SB
222#define alloc_pages_bulk_array_mempolicy(...) \
223 alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
c00b6b96 224
387ba26f 225/* Bulk allocate order-0 pages */
b951aaff
SB
226#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \
227 __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
0f87d9d3 228
b951aaff
SB
229#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \
230 __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
387ba26f 231
a2afc59f 232static inline unsigned long
b951aaff
SB
233alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
234 struct page **page_array)
a2afc59f
URS
235{
236 if (nid == NUMA_NO_NODE)
237 nid = numa_mem_id();
238
b951aaff 239 return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
a2afc59f
URS
240}
241
b951aaff
SB
242#define alloc_pages_bulk_array_node(...) \
243 alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__))
244
dec1d352
YS
245static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
246{
247 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
248
249 if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
250 return;
251
252 if (node_online(this_node))
253 return;
254
255 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
256 dump_stack();
257}
258
96db800f
VB
259/*
260 * Allocate pages, preferring the node given as nid. The node must be valid and
261 * online. For more general interface, see alloc_pages_node().
262 */
263static inline struct page *
b951aaff 264__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order)
1da177e4 265{
0bc35a97 266 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
dec1d352 267 warn_if_node_offline(nid, gfp_mask);
819a6928 268
b951aaff 269 return __alloc_pages_noprof(gfp_mask, order, nid, NULL);
1da177e4
LT
270}
271
b951aaff
SB
272#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__))
273
cc09cb13 274static inline
b951aaff 275struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid)
cc09cb13
MWO
276{
277 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
dec1d352 278 warn_if_node_offline(nid, gfp);
cc09cb13 279
b951aaff 280 return __folio_alloc_noprof(gfp, order, nid, NULL);
cc09cb13
MWO
281}
282
b951aaff
SB
283#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__))
284
96db800f
VB
285/*
286 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
82c1fc71
VB
287 * prefer the current CPU's closest node. Otherwise node must be valid and
288 * online.
96db800f 289 */
b951aaff
SB
290static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
291 unsigned int order)
6484eb3e 292{
0bc35a97 293 if (nid == NUMA_NO_NODE)
82c1fc71 294 nid = numa_mem_id();
6484eb3e 295
b951aaff 296 return __alloc_pages_node_noprof(nid, gfp_mask, order);
6484eb3e
MG
297}
298
b951aaff
SB
299#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__))
300
1da177e4 301#ifdef CONFIG_NUMA
b951aaff
SB
302struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
303struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
ddc1a5cb 304 struct mempolicy *mpol, pgoff_t ilx, int nid);
b951aaff
SB
305struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
306struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
f584b680 307 unsigned long addr, bool hugepage);
1da177e4 308#else
b951aaff 309static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
43ee5b6d 310{
b951aaff 311 return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
43ee5b6d 312}
b951aaff 313static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
ddc1a5cb
HD
314 struct mempolicy *mpol, pgoff_t ilx, int nid)
315{
b951aaff 316 return alloc_pages_noprof(gfp, order);
ddc1a5cb 317}
b951aaff 318static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
cc09cb13
MWO
319{
320 return __folio_alloc_node(gfp, order, numa_node_id());
321}
b951aaff
SB
322#define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage) \
323 folio_alloc_noprof(gfp, order)
1da177e4 324#endif
b951aaff
SB
325
326#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
327#define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__))
328#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
329#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
330
1da177e4 331#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
b951aaff
SB
332
333static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
adf88aa8
MWO
334 struct vm_area_struct *vma, unsigned long addr)
335{
b951aaff 336 struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false);
adf88aa8
MWO
337
338 return &folio->page;
339}
b951aaff
SB
340#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
341
342extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
343#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
1da177e4 344
b951aaff
SB
345extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask);
346#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__))
347
348void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1);
349#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__))
1da177e4 350
2be0ffe2
TT
351void free_pages_exact(void *virt, size_t size);
352
b951aaff
SB
353__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
354#define alloc_pages_exact_nid(...) \
355 alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__))
356
357#define __get_free_page(gfp_mask) \
358 __get_free_pages((gfp_mask), 0)
1da177e4 359
b951aaff
SB
360#define __get_dma_pages(gfp_mask, order) \
361 __get_free_pages((gfp_mask) | GFP_DMA, (order))
1da177e4 362
b3c97528
HH
363extern void __free_pages(struct page *page, unsigned int order);
364extern void free_pages(unsigned long addr, unsigned int order);
1da177e4 365
b63ae8ca 366struct page_frag_cache;
a0727489 367void page_frag_cache_drain(struct page_frag_cache *nc);
2976db80 368extern void __page_frag_cache_drain(struct page *page, unsigned int count);
411c5f36
YL
369void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
370 gfp_t gfp_mask, unsigned int align_mask);
371
372static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
373 unsigned int fragsz, gfp_t gfp_mask,
374 unsigned int align)
375{
376 WARN_ON_ONCE(!is_power_of_2(align));
377 return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
378}
b358e212
KH
379
380static inline void *page_frag_alloc(struct page_frag_cache *nc,
381 unsigned int fragsz, gfp_t gfp_mask)
382{
411c5f36 383 return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
b358e212
KH
384}
385
8c2dd3e4 386extern void page_frag_free(void *addr);
b63ae8ca 387
1da177e4 388#define __free_page(page) __free_pages((page), 0)
fd23855e 389#define free_page(addr) free_pages((addr), 0)
1da177e4 390
c4fbed4b 391void page_alloc_init_cpuhp(void);
51a755c5 392int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
4037d452 393void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
93481ff0
VB
394void drain_all_pages(struct zone *zone);
395void drain_local_pages(struct zone *zone);
1da177e4 396
0e1cc95b 397void page_alloc_init_late(void);
5cec4eb7 398void setup_pcp_cacheinfo(unsigned int cpu);
0e1cc95b 399
f90ac398
MG
400/*
401 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
402 * GFP flags are used before interrupts are enabled. Once interrupts are
403 * enabled, it is set to __GFP_BITS_MASK while the system is running. During
404 * hibernation, it is used by PM to avoid I/O during memory allocation while
405 * devices are suspended.
406 */
dcce284a
BH
407extern gfp_t gfp_allowed_mask;
408
c93bdd0e
MG
409/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
410bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
411
07f44ac3 412static inline bool gfp_has_io_fs(gfp_t gfp)
f90ac398 413{
07f44ac3 414 return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
f90ac398 415}
07f44ac3 416
803de900
VB
417/*
418 * Check if the gfp flags allow compaction - GFP_NOIO is a really
419 * tricky context because the migration might require IO.
420 */
421static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
422{
423 return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
424}
425
07f44ac3 426extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
f90ac398 427
8df995f6 428#ifdef CONFIG_CONTIG_ALLOC
041d3a8c 429/* The below functions must be run on a range from a single zone. */
b951aaff 430extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
ca96b625 431 unsigned migratetype, gfp_t gfp_mask);
b951aaff
SB
432#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
433
434extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
435 int nid, nodemask_t *nodemask);
436#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
437
080fe206 438#endif
78fa5150 439void free_contig_range(unsigned long pfn, unsigned long nr_pages);
041d3a8c 440
1da177e4 441#endif /* __LINUX_GFP_H */