]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
1da177e4 LT |
2 | /* internal.h: mm/ internal definitions |
3 | * | |
4 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
1da177e4 | 6 | */ |
0f8053a5 NP |
7 | #ifndef __MM_INTERNAL_H |
8 | #define __MM_INTERNAL_H | |
9 | ||
29f175d1 | 10 | #include <linux/fs.h> |
0f8053a5 | 11 | #include <linux/mm.h> |
e9b61f19 | 12 | #include <linux/pagemap.h> |
2aff7a47 | 13 | #include <linux/rmap.h> |
a62fb92a RR |
14 | #include <linux/swap.h> |
15 | #include <linux/swapops.h> | |
edf14cdb | 16 | #include <linux/tracepoint-defs.h> |
1da177e4 | 17 | |
0e499ed3 MWO |
18 | struct folio_batch; |
19 | ||
dd56b046 MG |
20 | /* |
21 | * The set of flags that only affect watermark checking and reclaim | |
22 | * behaviour. This is used by the MM to obey the caller constraints | |
23 | * about IO, FS and watermark checking while ignoring placement | |
24 | * hints such as HIGHMEM usage. | |
25 | */ | |
26 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | |
dcda9b04 | 27 | __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ |
e838a45f | 28 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ |
2973d822 | 29 | __GFP_NOLOCKDEP) |
dd56b046 MG |
30 | |
31 | /* The GFP flags allowed during early boot */ | |
32 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) | |
33 | ||
34 | /* Control allocation cpuset and node placement constraints */ | |
35 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | |
36 | ||
37 | /* Do not use these with a slab allocator */ | |
38 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | |
39 | ||
3f913fc5 QZ |
40 | /* |
41 | * Different from WARN_ON_ONCE(), no warning will be issued | |
42 | * when we specify __GFP_NOWARN. | |
43 | */ | |
44 | #define WARN_ON_ONCE_GFP(cond, gfp) ({ \ | |
45 | static bool __section(".data.once") __warned; \ | |
46 | int __ret_warn_once = !!(cond); \ | |
47 | \ | |
48 | if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \ | |
49 | __warned = true; \ | |
50 | WARN_ON(1); \ | |
51 | } \ | |
52 | unlikely(__ret_warn_once); \ | |
53 | }) | |
54 | ||
62906027 NP |
55 | void page_writeback_init(void); |
56 | ||
eec20426 MWO |
57 | /* |
58 | * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages, | |
e78a13fd | 59 | * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit |
eec20426 MWO |
60 | * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently |
61 | * leaves nr_pages_mapped at 0, but avoid surprise if it participates later. | |
62 | */ | |
e78a13fd DH |
63 | #define ENTIRELY_MAPPED 0x800000 |
64 | #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1) | |
eec20426 | 65 | |
1279aa06 KW |
66 | /* |
67 | * Flags passed to __show_mem() and show_free_areas() to suppress output in | |
68 | * various contexts. | |
69 | */ | |
70 | #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ | |
71 | ||
eec20426 MWO |
72 | /* |
73 | * How many individual pages have an elevated _mapcount. Excludes | |
74 | * the folio's entire_mapcount. | |
05c5323b DH |
75 | * |
76 | * Don't use this function outside of debugging code. | |
eec20426 | 77 | */ |
b84fd283 | 78 | static inline int folio_nr_pages_mapped(const struct folio *folio) |
eec20426 MWO |
79 | { |
80 | return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; | |
81 | } | |
82 | ||
f238b8c3 BS |
83 | /* |
84 | * Retrieve the first entry of a folio based on a provided entry within the | |
85 | * folio. We cannot rely on folio->swap as there is no guarantee that it has | |
86 | * been initialized. Used for calling arch_swap_restore() | |
87 | */ | |
b84fd283 MWO |
88 | static inline swp_entry_t folio_swap(swp_entry_t entry, |
89 | const struct folio *folio) | |
f238b8c3 BS |
90 | { |
91 | swp_entry_t swap = { | |
92 | .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)), | |
93 | }; | |
94 | ||
95 | return swap; | |
96 | } | |
97 | ||
b84fd283 | 98 | static inline void *folio_raw_mapping(const struct folio *folio) |
64601000 MWO |
99 | { |
100 | unsigned long mapping = (unsigned long)folio->mapping; | |
101 | ||
102 | return (void *)(mapping & ~PAGE_MAPPING_FLAGS); | |
103 | } | |
104 | ||
ac96cc4d BS |
105 | #ifdef CONFIG_MMU |
106 | ||
107 | /* Flags for folio_pte_batch(). */ | |
108 | typedef int __bitwise fpb_t; | |
109 | ||
110 | /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */ | |
111 | #define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0)) | |
112 | ||
113 | /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */ | |
114 | #define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1)) | |
115 | ||
116 | static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) | |
117 | { | |
118 | if (flags & FPB_IGNORE_DIRTY) | |
119 | pte = pte_mkclean(pte); | |
120 | if (likely(flags & FPB_IGNORE_SOFT_DIRTY)) | |
121 | pte = pte_clear_soft_dirty(pte); | |
122 | return pte_wrprotect(pte_mkold(pte)); | |
123 | } | |
124 | ||
125 | /** | |
126 | * folio_pte_batch - detect a PTE batch for a large folio | |
127 | * @folio: The large folio to detect a PTE batch for. | |
128 | * @addr: The user virtual address the first page is mapped at. | |
129 | * @start_ptep: Page table pointer for the first entry. | |
130 | * @pte: Page table entry for the first page. | |
131 | * @max_nr: The maximum number of table entries to consider. | |
132 | * @flags: Flags to modify the PTE batch semantics. | |
133 | * @any_writable: Optional pointer to indicate whether any entry except the | |
134 | * first one is writable. | |
3931b871 RR |
135 | * @any_young: Optional pointer to indicate whether any entry except the |
136 | * first one is young. | |
96ebdb03 LY |
137 | * @any_dirty: Optional pointer to indicate whether any entry except the |
138 | * first one is dirty. | |
ac96cc4d BS |
139 | * |
140 | * Detect a PTE batch: consecutive (present) PTEs that map consecutive | |
141 | * pages of the same large folio. | |
142 | * | |
143 | * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, | |
144 | * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and | |
145 | * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY). | |
146 | * | |
147 | * start_ptep must map any page of the folio. max_nr must be at least one and | |
148 | * must be limited by the caller so scanning cannot exceed a single page table. | |
149 | * | |
150 | * Return: the number of table entries in the batch. | |
151 | */ | |
152 | static inline int folio_pte_batch(struct folio *folio, unsigned long addr, | |
153 | pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, | |
96ebdb03 | 154 | bool *any_writable, bool *any_young, bool *any_dirty) |
ac96cc4d BS |
155 | { |
156 | unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); | |
157 | const pte_t *end_ptep = start_ptep + max_nr; | |
158 | pte_t expected_pte, *ptep; | |
96ebdb03 | 159 | bool writable, young, dirty; |
ac96cc4d BS |
160 | int nr; |
161 | ||
162 | if (any_writable) | |
163 | *any_writable = false; | |
3931b871 RR |
164 | if (any_young) |
165 | *any_young = false; | |
96ebdb03 LY |
166 | if (any_dirty) |
167 | *any_dirty = false; | |
ac96cc4d BS |
168 | |
169 | VM_WARN_ON_FOLIO(!pte_present(pte), folio); | |
170 | VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); | |
171 | VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio); | |
172 | ||
173 | nr = pte_batch_hint(start_ptep, pte); | |
174 | expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags); | |
175 | ptep = start_ptep + nr; | |
176 | ||
177 | while (ptep < end_ptep) { | |
178 | pte = ptep_get(ptep); | |
179 | if (any_writable) | |
180 | writable = !!pte_write(pte); | |
3931b871 RR |
181 | if (any_young) |
182 | young = !!pte_young(pte); | |
96ebdb03 LY |
183 | if (any_dirty) |
184 | dirty = !!pte_dirty(pte); | |
ac96cc4d BS |
185 | pte = __pte_batch_clear_ignored(pte, flags); |
186 | ||
187 | if (!pte_same(pte, expected_pte)) | |
188 | break; | |
189 | ||
190 | /* | |
191 | * Stop immediately once we reached the end of the folio. In | |
192 | * corner cases the next PFN might fall into a different | |
193 | * folio. | |
194 | */ | |
195 | if (pte_pfn(pte) >= folio_end_pfn) | |
196 | break; | |
197 | ||
198 | if (any_writable) | |
199 | *any_writable |= writable; | |
3931b871 RR |
200 | if (any_young) |
201 | *any_young |= young; | |
96ebdb03 LY |
202 | if (any_dirty) |
203 | *any_dirty |= dirty; | |
ac96cc4d BS |
204 | |
205 | nr = pte_batch_hint(ptep, pte); | |
206 | expected_pte = pte_advance_pfn(expected_pte, nr); | |
207 | ptep += nr; | |
208 | } | |
209 | ||
210 | return min(ptep - start_ptep, max_nr); | |
211 | } | |
a62fb92a RR |
212 | |
213 | /** | |
214 | * pte_next_swp_offset - Increment the swap entry offset field of a swap pte. | |
215 | * @pte: The initial pte state; is_swap_pte(pte) must be true and | |
216 | * non_swap_entry() must be false. | |
217 | * | |
218 | * Increments the swap offset, while maintaining all other fields, including | |
219 | * swap type, and any swp pte bits. The resulting pte is returned. | |
220 | */ | |
221 | static inline pte_t pte_next_swp_offset(pte_t pte) | |
222 | { | |
223 | swp_entry_t entry = pte_to_swp_entry(pte); | |
224 | pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry), | |
225 | (swp_offset(entry) + 1))); | |
226 | ||
227 | if (pte_swp_soft_dirty(pte)) | |
228 | new = pte_swp_mksoft_dirty(new); | |
229 | if (pte_swp_exclusive(pte)) | |
230 | new = pte_swp_mkexclusive(new); | |
231 | if (pte_swp_uffd_wp(pte)) | |
232 | new = pte_swp_mkuffd_wp(new); | |
233 | ||
234 | return new; | |
235 | } | |
236 | ||
237 | /** | |
238 | * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries | |
239 | * @start_ptep: Page table pointer for the first entry. | |
240 | * @max_nr: The maximum number of table entries to consider. | |
241 | * @pte: Page table entry for the first entry. | |
242 | * | |
243 | * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs | |
244 | * containing swap entries all with consecutive offsets and targeting the same | |
245 | * swap type, all with matching swp pte bits. | |
246 | * | |
247 | * max_nr must be at least one and must be limited by the caller so scanning | |
248 | * cannot exceed a single page table. | |
249 | * | |
250 | * Return: the number of table entries in the batch. | |
251 | */ | |
252 | static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte) | |
253 | { | |
254 | pte_t expected_pte = pte_next_swp_offset(pte); | |
255 | const pte_t *end_ptep = start_ptep + max_nr; | |
256 | pte_t *ptep = start_ptep + 1; | |
257 | ||
258 | VM_WARN_ON(max_nr < 1); | |
259 | VM_WARN_ON(!is_swap_pte(pte)); | |
260 | VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte))); | |
261 | ||
262 | while (ptep < end_ptep) { | |
263 | pte = ptep_get(ptep); | |
264 | ||
265 | if (!pte_same(pte, expected_pte)) | |
266 | break; | |
267 | ||
268 | expected_pte = pte_next_swp_offset(expected_pte); | |
269 | ptep++; | |
270 | } | |
271 | ||
272 | return ptep - start_ptep; | |
273 | } | |
ac96cc4d BS |
274 | #endif /* CONFIG_MMU */ |
275 | ||
512b7931 | 276 | void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, |
8cd7c588 | 277 | int nr_throttled); |
512b7931 | 278 | static inline void acct_reclaim_writeback(struct folio *folio) |
8cd7c588 | 279 | { |
512b7931 | 280 | pg_data_t *pgdat = folio_pgdat(folio); |
8cd7c588 MG |
281 | int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); |
282 | ||
283 | if (nr_throttled) | |
512b7931 | 284 | __acct_reclaim_writeback(pgdat, folio, nr_throttled); |
8cd7c588 MG |
285 | } |
286 | ||
d818fca1 MG |
287 | static inline void wake_throttle_isolated(pg_data_t *pgdat) |
288 | { | |
289 | wait_queue_head_t *wqh; | |
290 | ||
291 | wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; | |
292 | if (waitqueue_active(wqh)) | |
293 | wake_up(wqh); | |
294 | } | |
295 | ||
997f0ecb | 296 | vm_fault_t vmf_anon_prepare(struct vm_fault *vmf); |
2b740303 | 297 | vm_fault_t do_swap_page(struct vm_fault *vmf); |
575ced1c | 298 | void folio_rotate_reclaimable(struct folio *folio); |
2580d554 | 299 | bool __folio_end_writeback(struct folio *folio); |
261b6840 | 300 | void deactivate_file_folio(struct folio *folio); |
018ee47f | 301 | void folio_activate(struct folio *folio); |
8a966ed7 | 302 | |
fd892593 | 303 | void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, |
763ecb03 | 304 | struct vm_area_struct *start_vma, unsigned long floor, |
98e51a22 | 305 | unsigned long ceiling, bool mm_wr_locked); |
03c4f204 | 306 | void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); |
42b77728 | 307 | |
3506659e | 308 | struct zap_details; |
aac45363 MH |
309 | void unmap_page_range(struct mmu_gather *tlb, |
310 | struct vm_area_struct *vma, | |
311 | unsigned long addr, unsigned long end, | |
312 | struct zap_details *details); | |
313 | ||
56a4d67c MWO |
314 | void page_cache_ra_order(struct readahead_control *, struct file_ra_state *, |
315 | unsigned int order); | |
fcd9ae4f | 316 | void force_page_cache_ra(struct readahead_control *, unsigned long nr); |
7b3df3b9 DH |
317 | static inline void force_page_cache_readahead(struct address_space *mapping, |
318 | struct file *file, pgoff_t index, unsigned long nr_to_read) | |
319 | { | |
fcd9ae4f MWO |
320 | DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); |
321 | force_page_cache_ra(&ractl, nr_to_read); | |
7b3df3b9 | 322 | } |
29f175d1 | 323 | |
3392ca12 | 324 | unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, |
51dcbdac | 325 | pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); |
9fb6beea | 326 | unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, |
0e499ed3 | 327 | pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); |
78f42660 | 328 | void filemap_free_folio(struct address_space *mapping, struct folio *folio); |
1e84a3d9 | 329 | int truncate_inode_folio(struct address_space *mapping, struct folio *folio); |
b9a8a419 MWO |
330 | bool truncate_inode_partial_folio(struct folio *folio, loff_t start, |
331 | loff_t end); | |
1e12cbb9 | 332 | long mapping_evict_folio(struct address_space *mapping, struct folio *folio); |
1a0fc811 MWO |
333 | unsigned long mapping_try_invalidate(struct address_space *mapping, |
334 | pgoff_t start, pgoff_t end, unsigned long *nr_failed); | |
5c211ba2 | 335 | |
1eb6234e | 336 | /** |
3eed3ef5 MWO |
337 | * folio_evictable - Test whether a folio is evictable. |
338 | * @folio: The folio to test. | |
1eb6234e | 339 | * |
3eed3ef5 MWO |
340 | * Test whether @folio is evictable -- i.e., should be placed on |
341 | * active/inactive lists vs unevictable list. | |
1eb6234e | 342 | * |
3eed3ef5 MWO |
343 | * Reasons folio might not be evictable: |
344 | * 1. folio's mapping marked unevictable | |
345 | * 2. One of the pages in the folio is part of an mlocked VMA | |
1eb6234e | 346 | */ |
3eed3ef5 MWO |
347 | static inline bool folio_evictable(struct folio *folio) |
348 | { | |
349 | bool ret; | |
350 | ||
351 | /* Prevent address_space of inode and swap cache from being freed */ | |
352 | rcu_read_lock(); | |
353 | ret = !mapping_unevictable(folio_mapping(folio)) && | |
354 | !folio_test_mlocked(folio); | |
355 | rcu_read_unlock(); | |
356 | return ret; | |
357 | } | |
358 | ||
7835e98b | 359 | /* |
0139aa7b | 360 | * Turn a non-refcounted page (->_refcount == 0) into refcounted with |
7835e98b NP |
361 | * a count of one. |
362 | */ | |
363 | static inline void set_page_refcounted(struct page *page) | |
364 | { | |
309381fe | 365 | VM_BUG_ON_PAGE(PageTail(page), page); |
fe896d18 | 366 | VM_BUG_ON_PAGE(page_ref_count(page), page); |
77a8a788 | 367 | set_page_count(page, 1); |
77a8a788 NP |
368 | } |
369 | ||
0201ebf2 DH |
370 | /* |
371 | * Return true if a folio needs ->release_folio() calling upon it. | |
372 | */ | |
373 | static inline bool folio_needs_release(struct folio *folio) | |
374 | { | |
b4fa966f DH |
375 | struct address_space *mapping = folio_mapping(folio); |
376 | ||
377 | return folio_has_private(folio) || | |
378 | (mapping && mapping_release_always(mapping)); | |
0201ebf2 DH |
379 | } |
380 | ||
03f6462a HD |
381 | extern unsigned long highest_memmap_pfn; |
382 | ||
c73322d0 JW |
383 | /* |
384 | * Maximum number of reclaim retries without progress before the OOM | |
385 | * killer is consider the only way forward. | |
386 | */ | |
387 | #define MAX_RECLAIM_RETRIES 16 | |
388 | ||
894bc310 LS |
389 | /* |
390 | * in mm/vmscan.c: | |
391 | */ | |
f7f9c00d | 392 | bool isolate_lru_page(struct page *page); |
be2d5756 | 393 | bool folio_isolate_lru(struct folio *folio); |
ca6d60f3 MWO |
394 | void putback_lru_page(struct page *page); |
395 | void folio_putback_lru(struct folio *folio); | |
c3f4a9a2 | 396 | extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); |
62695a84 | 397 | |
6219049a BL |
398 | /* |
399 | * in mm/rmap.c: | |
400 | */ | |
50722804 | 401 | pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); |
6219049a | 402 | |
894bc310 LS |
403 | /* |
404 | * in mm/page_alloc.c | |
405 | */ | |
eb8589b4 | 406 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
3c605096 | 407 | |
9420f89d MRI |
408 | extern char * const zone_names[MAX_NR_ZONES]; |
409 | ||
f2fc4b44 MRI |
410 | /* perform sanity checks on struct pages being allocated or freed */ |
411 | DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); | |
412 | ||
e95d372c KW |
413 | extern int min_free_kbytes; |
414 | ||
415 | void setup_per_zone_wmarks(void); | |
416 | void calculate_min_free_kbytes(void); | |
417 | int __meminit init_per_zone_wmark_min(void); | |
418 | void page_alloc_sysctl_init(void); | |
f2fc4b44 | 419 | |
1a6d53a1 VB |
420 | /* |
421 | * Structure for holding the mostly immutable allocation parameters passed | |
422 | * between functions involved in allocations, including the alloc_pages* | |
423 | * family of functions. | |
424 | * | |
97a225e6 | 425 | * nodemask, migratetype and highest_zoneidx are initialized only once in |
84172f4b | 426 | * __alloc_pages() and then never change. |
1a6d53a1 | 427 | * |
97a225e6 | 428 | * zonelist, preferred_zone and highest_zoneidx are set first in |
84172f4b | 429 | * __alloc_pages() for the fast path, and might be later changed |
68956ccb | 430 | * in __alloc_pages_slowpath(). All other functions pass the whole structure |
1a6d53a1 VB |
431 | * by a const pointer. |
432 | */ | |
433 | struct alloc_context { | |
434 | struct zonelist *zonelist; | |
435 | nodemask_t *nodemask; | |
c33d6c06 | 436 | struct zoneref *preferred_zoneref; |
1a6d53a1 | 437 | int migratetype; |
97a225e6 JK |
438 | |
439 | /* | |
440 | * highest_zoneidx represents highest usable zone index of | |
441 | * the allocation request. Due to the nature of the zone, | |
442 | * memory on lower zone than the highest_zoneidx will be | |
443 | * protected by lowmem_reserve[highest_zoneidx]. | |
444 | * | |
445 | * highest_zoneidx is also used by reclaim/compaction to limit | |
446 | * the target zone since higher zone than this index cannot be | |
447 | * usable for this allocation request. | |
448 | */ | |
449 | enum zone_type highest_zoneidx; | |
c9ab0c4f | 450 | bool spread_dirty_pages; |
1a6d53a1 VB |
451 | }; |
452 | ||
8170ac47 ZY |
453 | /* |
454 | * This function returns the order of a free page in the buddy system. In | |
455 | * general, page_zone(page)->lock must be held by the caller to prevent the | |
456 | * page from being allocated in parallel and returning garbage as the order. | |
457 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the | |
458 | * page cannot be allocated or merged in parallel. Alternatively, it must | |
459 | * handle invalid values gracefully, and use buddy_order_unsafe() below. | |
460 | */ | |
461 | static inline unsigned int buddy_order(struct page *page) | |
462 | { | |
463 | /* PageBuddy() must be checked by the caller */ | |
464 | return page_private(page); | |
465 | } | |
466 | ||
467 | /* | |
468 | * Like buddy_order(), but for callers who cannot afford to hold the zone lock. | |
469 | * PageBuddy() should be checked first by the caller to minimize race window, | |
470 | * and invalid values must be handled gracefully. | |
471 | * | |
472 | * READ_ONCE is used so that if the caller assigns the result into a local | |
473 | * variable and e.g. tests it for valid range before using, the compiler cannot | |
474 | * decide to remove the variable and inline the page_private(page) multiple | |
475 | * times, potentially observing different values in the tests and the actual | |
476 | * use of the result. | |
477 | */ | |
478 | #define buddy_order_unsafe(page) READ_ONCE(page_private(page)) | |
479 | ||
480 | /* | |
481 | * This function checks whether a page is free && is the buddy | |
482 | * we can coalesce a page and its buddy if | |
483 | * (a) the buddy is not in a hole (check before calling!) && | |
484 | * (b) the buddy is in the buddy system && | |
485 | * (c) a page and its buddy have the same order && | |
486 | * (d) a page and its buddy are in the same zone. | |
487 | * | |
488 | * For recording whether a page is in the buddy system, we set PageBuddy. | |
489 | * Setting, clearing, and testing PageBuddy is serialized by zone->lock. | |
490 | * | |
491 | * For recording page's order, we use page_private(page). | |
492 | */ | |
493 | static inline bool page_is_buddy(struct page *page, struct page *buddy, | |
494 | unsigned int order) | |
495 | { | |
496 | if (!page_is_guard(buddy) && !PageBuddy(buddy)) | |
497 | return false; | |
498 | ||
499 | if (buddy_order(buddy) != order) | |
500 | return false; | |
501 | ||
502 | /* | |
503 | * zone check is done late to avoid uselessly calculating | |
504 | * zone/node ids for pages that could never merge. | |
505 | */ | |
506 | if (page_zone_id(page) != page_zone_id(buddy)) | |
507 | return false; | |
508 | ||
509 | VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); | |
510 | ||
511 | return true; | |
512 | } | |
513 | ||
3c605096 JK |
514 | /* |
515 | * Locate the struct page for both the matching buddy in our | |
516 | * pair (buddy1) and the combined O(n+1) page they form (page). | |
517 | * | |
518 | * 1) Any buddy B1 will have an order O twin B2 which satisfies | |
519 | * the following equation: | |
520 | * B2 = B1 ^ (1 << O) | |
521 | * For example, if the starting buddy (buddy2) is #8 its order | |
522 | * 1 buddy is #10: | |
523 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 | |
524 | * | |
525 | * 2) Any buddy B will have an order O+1 parent P which | |
526 | * satisfies the following equation: | |
527 | * P = B & ~(1 << O) | |
528 | * | |
5e0a760b | 529 | * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER |
3c605096 JK |
530 | */ |
531 | static inline unsigned long | |
76741e77 | 532 | __find_buddy_pfn(unsigned long page_pfn, unsigned int order) |
3c605096 | 533 | { |
76741e77 | 534 | return page_pfn ^ (1 << order); |
3c605096 JK |
535 | } |
536 | ||
8170ac47 ZY |
537 | /* |
538 | * Find the buddy of @page and validate it. | |
539 | * @page: The input page | |
540 | * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the | |
541 | * function is used in the performance-critical __free_one_page(). | |
542 | * @order: The order of the page | |
543 | * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to | |
544 | * page_to_pfn(). | |
545 | * | |
546 | * The found buddy can be a non PageBuddy, out of @page's zone, or its order is | |
547 | * not the same as @page. The validation is necessary before use it. | |
548 | * | |
549 | * Return: the found buddy page or NULL if not found. | |
550 | */ | |
551 | static inline struct page *find_buddy_page_pfn(struct page *page, | |
552 | unsigned long pfn, unsigned int order, unsigned long *buddy_pfn) | |
553 | { | |
554 | unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order); | |
555 | struct page *buddy; | |
556 | ||
557 | buddy = page + (__buddy_pfn - pfn); | |
558 | if (buddy_pfn) | |
559 | *buddy_pfn = __buddy_pfn; | |
560 | ||
561 | if (page_is_buddy(page, buddy, order)) | |
562 | return buddy; | |
563 | return NULL; | |
564 | } | |
565 | ||
7cf91a98 JK |
566 | extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, |
567 | unsigned long end_pfn, struct zone *zone); | |
568 | ||
569 | static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, | |
570 | unsigned long end_pfn, struct zone *zone) | |
571 | { | |
572 | if (zone->contiguous) | |
573 | return pfn_to_page(start_pfn); | |
574 | ||
575 | return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); | |
576 | } | |
577 | ||
904d5857 KW |
578 | void set_zone_contiguous(struct zone *zone); |
579 | ||
580 | static inline void clear_zone_contiguous(struct zone *zone) | |
581 | { | |
582 | zone->contiguous = false; | |
583 | } | |
584 | ||
3c605096 | 585 | extern int __isolate_free_page(struct page *page, unsigned int order); |
624f58d8 AD |
586 | extern void __putback_isolated_page(struct page *page, unsigned int order, |
587 | int mt); | |
7c2ee349 | 588 | extern void memblock_free_pages(struct page *page, unsigned long pfn, |
d70ddd7a | 589 | unsigned int order); |
a9cd410a | 590 | extern void __free_pages_core(struct page *page, unsigned int order); |
ba42b524 | 591 | extern void kernel_init_pages(struct page *page, int numpages); |
9420f89d | 592 | |
1e3be485 TS |
593 | /* |
594 | * This will have no effect, other than possibly generating a warning, if the | |
595 | * caller passes in a non-large folio. | |
596 | */ | |
597 | static inline void folio_set_order(struct folio *folio, unsigned int order) | |
598 | { | |
599 | if (WARN_ON_ONCE(!order || !folio_test_large(folio))) | |
600 | return; | |
601 | ||
ebc1baf5 | 602 | folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; |
1e3be485 TS |
603 | #ifdef CONFIG_64BIT |
604 | folio->_folio_nr_pages = 1U << order; | |
605 | #endif | |
606 | } | |
607 | ||
8dc4a8f1 MWO |
608 | void folio_undo_large_rmappable(struct folio *folio); |
609 | ||
23e48832 HD |
610 | static inline struct folio *page_rmappable_folio(struct page *page) |
611 | { | |
612 | struct folio *folio = (struct folio *)page; | |
613 | ||
85edc15a MWO |
614 | if (folio && folio_test_large(folio)) |
615 | folio_set_large_rmappable(folio); | |
23e48832 HD |
616 | return folio; |
617 | } | |
618 | ||
9420f89d MRI |
619 | static inline void prep_compound_head(struct page *page, unsigned int order) |
620 | { | |
621 | struct folio *folio = (struct folio *)page; | |
622 | ||
1e3be485 | 623 | folio_set_order(folio, order); |
05c5323b | 624 | atomic_set(&folio->_large_mapcount, -1); |
9420f89d MRI |
625 | atomic_set(&folio->_entire_mapcount, -1); |
626 | atomic_set(&folio->_nr_pages_mapped, 0); | |
627 | atomic_set(&folio->_pincount, 0); | |
b7b098cf MWO |
628 | if (order > 1) |
629 | INIT_LIST_HEAD(&folio->_deferred_list); | |
9420f89d MRI |
630 | } |
631 | ||
632 | static inline void prep_compound_tail(struct page *head, int tail_idx) | |
633 | { | |
634 | struct page *p = head + tail_idx; | |
635 | ||
636 | p->mapping = TAIL_MAPPING; | |
637 | set_compound_head(p, head); | |
638 | set_page_private(p, 0); | |
639 | } | |
640 | ||
d00181b9 | 641 | extern void prep_compound_page(struct page *page, unsigned int order); |
9420f89d | 642 | |
46f24fd8 JK |
643 | extern void post_alloc_hook(struct page *page, unsigned int order, |
644 | gfp_t gfp_flags); | |
733aea0b ZY |
645 | extern bool free_pages_prepare(struct page *page, unsigned int order); |
646 | ||
42aa83cb | 647 | extern int user_min_free_kbytes; |
20a0307c | 648 | |
90491d87 MWO |
649 | void free_unref_page(struct page *page, unsigned int order); |
650 | void free_unref_folios(struct folio_batch *fbatch); | |
0966aeb4 | 651 | |
68265390 | 652 | extern void zone_pcp_reset(struct zone *zone); |
ec6e8c7e VB |
653 | extern void zone_pcp_disable(struct zone *zone); |
654 | extern void zone_pcp_enable(struct zone *zone); | |
9420f89d | 655 | extern void zone_pcp_init(struct zone *zone); |
68265390 | 656 | |
c803b3c8 MR |
657 | extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, |
658 | phys_addr_t min_addr, | |
659 | int nid, bool exact_nid); | |
660 | ||
e95d372c KW |
661 | void memmap_init_range(unsigned long, int, unsigned long, unsigned long, |
662 | unsigned long, enum meminit_context, struct vmem_altmap *, int); | |
b2c9e2fb | 663 | |
ff9543fd MN |
664 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
665 | ||
666 | /* | |
667 | * in mm/compaction.c | |
668 | */ | |
669 | /* | |
670 | * compact_control is used to track pages being migrated and the free pages | |
671 | * they are being migrated to during memory compaction. The free_pfn starts | |
672 | * at the end of a zone and migrate_pfn begins at the start. Movable pages | |
673 | * are moved to the end of a zone during a compaction run and the run | |
674 | * completes when free_pfn <= migrate_pfn | |
675 | */ | |
676 | struct compact_control { | |
733aea0b | 677 | struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */ |
ff9543fd | 678 | struct list_head migratepages; /* List of pages being migrated */ |
c5fbd937 MG |
679 | unsigned int nr_freepages; /* Number of isolated free pages */ |
680 | unsigned int nr_migratepages; /* Number of pages to migrate */ | |
ff9543fd | 681 | unsigned long free_pfn; /* isolate_freepages search base */ |
c2ad7a1f OS |
682 | /* |
683 | * Acts as an in/out parameter to page isolation for migration. | |
684 | * isolate_migratepages uses it as a search base. | |
685 | * isolate_migratepages_block will update the value to the next pfn | |
686 | * after the last isolated one. | |
687 | */ | |
688 | unsigned long migrate_pfn; | |
70b44595 | 689 | unsigned long fast_start_pfn; /* a pfn to start linear scan from */ |
c5943b9c MG |
690 | struct zone *zone; |
691 | unsigned long total_migrate_scanned; | |
692 | unsigned long total_free_scanned; | |
dbe2d4e4 MG |
693 | unsigned short fast_search_fail;/* failures to use free list searches */ |
694 | short search_order; /* order to start a fast search at */ | |
f25ba6dc VB |
695 | const gfp_t gfp_mask; /* gfp mask of a direct compactor */ |
696 | int order; /* order a direct compactor needs */ | |
d39773a0 | 697 | int migratetype; /* migratetype of direct compactor */ |
f25ba6dc | 698 | const unsigned int alloc_flags; /* alloc flags of a direct compactor */ |
97a225e6 | 699 | const int highest_zoneidx; /* zone index of a direct compactor */ |
e0b9daeb | 700 | enum migrate_mode mode; /* Async or sync migration mode */ |
bb13ffeb | 701 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
2583d671 | 702 | bool no_set_skip_hint; /* Don't mark blocks for skipping */ |
9f7e3387 | 703 | bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
accf6242 | 704 | bool direct_compaction; /* False from kcompactd or /proc/... */ |
facdaa91 | 705 | bool proactive_compaction; /* kcompactd proactive compaction */ |
06ed2998 | 706 | bool whole_zone; /* Whole zone should/has been scanned */ |
d56c1584 | 707 | bool contended; /* Signal lock contention */ |
48731c84 MG |
708 | bool finish_pageblock; /* Scan the remainder of a pageblock. Used |
709 | * when there are potentially transient | |
710 | * isolation or migration failures to | |
711 | * ensure forward progress. | |
712 | */ | |
b06eda09 | 713 | bool alloc_contig; /* alloc_contig_range allocation */ |
ff9543fd MN |
714 | }; |
715 | ||
5e1f0f09 MG |
716 | /* |
717 | * Used in direct compaction when a page should be taken from the freelists | |
718 | * immediately when one is created during the free path. | |
719 | */ | |
720 | struct capture_control { | |
721 | struct compact_control *cc; | |
722 | struct page *page; | |
723 | }; | |
724 | ||
ff9543fd | 725 | unsigned long |
bb13ffeb MG |
726 | isolate_freepages_range(struct compact_control *cc, |
727 | unsigned long start_pfn, unsigned long end_pfn); | |
c2ad7a1f | 728 | int |
edc2ca61 VB |
729 | isolate_migratepages_range(struct compact_control *cc, |
730 | unsigned long low_pfn, unsigned long end_pfn); | |
b2c9e2fb ZY |
731 | |
732 | int __alloc_contig_migrate_range(struct compact_control *cc, | |
c8b36003 RC |
733 | unsigned long start, unsigned long end, |
734 | int migratetype); | |
9420f89d MRI |
735 | |
736 | /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ | |
737 | void init_cma_reserved_pageblock(struct page *page); | |
738 | ||
739 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ | |
740 | ||
2149cdae JK |
741 | int find_suitable_fallback(struct free_area *area, unsigned int order, |
742 | int migratetype, bool only_stealable, bool *can_steal); | |
ff9543fd | 743 | |
62f31bd4 MRI |
744 | static inline bool free_area_empty(struct free_area *area, int migratetype) |
745 | { | |
746 | return list_empty(&area->free_list[migratetype]); | |
747 | } | |
748 | ||
30bdbb78 KK |
749 | /* |
750 | * These three helpers classifies VMAs for virtual memory accounting. | |
751 | */ | |
752 | ||
753 | /* | |
754 | * Executable code area - executable, not writable, not stack | |
755 | */ | |
d977d56c KK |
756 | static inline bool is_exec_mapping(vm_flags_t flags) |
757 | { | |
30bdbb78 | 758 | return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
d977d56c KK |
759 | } |
760 | ||
30bdbb78 | 761 | /* |
00547ef7 | 762 | * Stack area (including shadow stacks) |
30bdbb78 KK |
763 | * |
764 | * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: | |
765 | * do_mmap() forbids all other combinations. | |
766 | */ | |
d977d56c KK |
767 | static inline bool is_stack_mapping(vm_flags_t flags) |
768 | { | |
00547ef7 | 769 | return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK); |
d977d56c KK |
770 | } |
771 | ||
30bdbb78 KK |
772 | /* |
773 | * Data area - private, writable, not stack | |
774 | */ | |
d977d56c KK |
775 | static inline bool is_data_mapping(vm_flags_t flags) |
776 | { | |
30bdbb78 | 777 | return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
d977d56c KK |
778 | } |
779 | ||
6038def0 | 780 | /* mm/util.c */ |
e05b3453 | 781 | struct anon_vma *folio_anon_vma(struct folio *folio); |
6038def0 | 782 | |
af8e3354 | 783 | #ifdef CONFIG_MMU |
3506659e | 784 | void unmap_mapping_folio(struct folio *folio); |
fc05f566 | 785 | extern long populate_vma_page_range(struct vm_area_struct *vma, |
a78f1ccd | 786 | unsigned long start, unsigned long end, int *locked); |
631426ba DH |
787 | extern long faultin_page_range(struct mm_struct *mm, unsigned long start, |
788 | unsigned long end, bool write, int *locked); | |
b0cc5e89 | 789 | extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, |
3c54a298 | 790 | unsigned long bytes); |
28e56657 YF |
791 | |
792 | /* | |
793 | * NOTE: This function can't tell whether the folio is "fully mapped" in the | |
794 | * range. | |
795 | * "fully mapped" means all the pages of folio is associated with the page | |
796 | * table of range while this function just check whether the folio range is | |
be16dd76 | 797 | * within the range [start, end). Function caller needs to do page table |
28e56657 YF |
798 | * check if it cares about the page table association. |
799 | * | |
800 | * Typical usage (like mlock or madvise) is: | |
801 | * Caller knows at least 1 page of folio is associated with page table of VMA | |
802 | * and the range [start, end) is intersect with the VMA range. Caller wants | |
803 | * to know whether the folio is fully associated with the range. It calls | |
804 | * this function to check whether the folio is in the range first. Then checks | |
805 | * the page table to know whether the folio is fully mapped to the range. | |
806 | */ | |
807 | static inline bool | |
808 | folio_within_range(struct folio *folio, struct vm_area_struct *vma, | |
809 | unsigned long start, unsigned long end) | |
810 | { | |
811 | pgoff_t pgoff, addr; | |
dd05f5ec | 812 | unsigned long vma_pglen = vma_pages(vma); |
28e56657 YF |
813 | |
814 | VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio); | |
815 | if (start > end) | |
816 | return false; | |
817 | ||
818 | if (start < vma->vm_start) | |
819 | start = vma->vm_start; | |
820 | ||
821 | if (end > vma->vm_end) | |
822 | end = vma->vm_end; | |
823 | ||
824 | pgoff = folio_pgoff(folio); | |
825 | ||
826 | /* if folio start address is not in vma range */ | |
827 | if (!in_range(pgoff, vma->vm_pgoff, vma_pglen)) | |
828 | return false; | |
829 | ||
830 | addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
831 | ||
832 | return !(addr < start || end - addr < folio_size(folio)); | |
833 | } | |
834 | ||
835 | static inline bool | |
836 | folio_within_vma(struct folio *folio, struct vm_area_struct *vma) | |
837 | { | |
838 | return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); | |
839 | } | |
840 | ||
b291f000 | 841 | /* |
7efecffb | 842 | * mlock_vma_folio() and munlock_vma_folio(): |
cea86fe2 HD |
843 | * should be called with vma's mmap_lock held for read or write, |
844 | * under page table lock for the pte/pmd being added or removed. | |
b291f000 | 845 | * |
4a8ffab0 | 846 | * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at |
4d8f7418 | 847 | * the end of folio_remove_rmap_*(); but new anon folios are managed by |
96f97c43 | 848 | * folio_add_lru_vma() calling mlock_new_folio(). |
b291f000 | 849 | */ |
dcc5d337 MWO |
850 | void mlock_folio(struct folio *folio); |
851 | static inline void mlock_vma_folio(struct folio *folio, | |
1acbc3f9 | 852 | struct vm_area_struct *vma) |
cea86fe2 | 853 | { |
c8263bd6 HD |
854 | /* |
855 | * The VM_SPECIAL check here serves two purposes. | |
856 | * 1) VM_IO check prevents migration from double-counting during mlock. | |
857 | * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED | |
858 | * is never left set on a VM_SPECIAL vma, there is an interval while | |
859 | * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may | |
860 | * still be set while VM_SPECIAL bits are added: so ignore it then. | |
861 | */ | |
1acbc3f9 | 862 | if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) |
dcc5d337 MWO |
863 | mlock_folio(folio); |
864 | } | |
865 | ||
96f97c43 | 866 | void munlock_folio(struct folio *folio); |
96f97c43 | 867 | static inline void munlock_vma_folio(struct folio *folio, |
1acbc3f9 | 868 | struct vm_area_struct *vma) |
cea86fe2 | 869 | { |
1acbc3f9 YF |
870 | /* |
871 | * munlock if the function is called. Ideally, we should only | |
872 | * do munlock if any page of folio is unmapped from VMA and | |
873 | * cause folio not fully mapped to VMA. | |
874 | * | |
875 | * But it's not easy to confirm that's the situation. So we | |
876 | * always munlock the folio and page reclaim will correct it | |
877 | * if it's wrong. | |
878 | */ | |
879 | if (unlikely(vma->vm_flags & VM_LOCKED)) | |
96f97c43 | 880 | munlock_folio(folio); |
cea86fe2 | 881 | } |
96f97c43 | 882 | |
96f97c43 LS |
883 | void mlock_new_folio(struct folio *folio); |
884 | bool need_mlock_drain(int cpu); | |
885 | void mlock_drain_local(void); | |
886 | void mlock_drain_remote(int cpu); | |
b291f000 | 887 | |
f55e1014 | 888 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
b32967ff | 889 | |
412ad5fb | 890 | /** |
e0abfbb6 MWO |
891 | * vma_address - Find the virtual address a page range is mapped at |
892 | * @vma: The vma which maps this object. | |
412ad5fb MWO |
893 | * @pgoff: The page offset within its object. |
894 | * @nr_pages: The number of pages to consider. | |
412ad5fb MWO |
895 | * |
896 | * If any page in this range is mapped by this VMA, return the first address | |
897 | * where any of these pages appear. Otherwise, return -EFAULT. | |
e9b61f19 | 898 | */ |
e0abfbb6 MWO |
899 | static inline unsigned long vma_address(struct vm_area_struct *vma, |
900 | pgoff_t pgoff, unsigned long nr_pages) | |
e9b61f19 | 901 | { |
494334e4 HD |
902 | unsigned long address; |
903 | ||
494334e4 HD |
904 | if (pgoff >= vma->vm_pgoff) { |
905 | address = vma->vm_start + | |
906 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
907 | /* Check for address beyond vma (or wrapped through 0?) */ | |
908 | if (address < vma->vm_start || address >= vma->vm_end) | |
909 | address = -EFAULT; | |
6a8e0596 | 910 | } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { |
494334e4 HD |
911 | /* Test above avoids possibility of wrap to 0 on 32-bit */ |
912 | address = vma->vm_start; | |
913 | } else { | |
914 | address = -EFAULT; | |
915 | } | |
916 | return address; | |
6a8e0596 MS |
917 | } |
918 | ||
494334e4 | 919 | /* |
2aff7a47 | 920 | * Then at what user virtual address will none of the range be found in vma? |
494334e4 | 921 | * Assumes that vma_address() already returned a good starting address. |
494334e4 | 922 | */ |
2aff7a47 | 923 | static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) |
e9b61f19 | 924 | { |
2aff7a47 | 925 | struct vm_area_struct *vma = pvmw->vma; |
494334e4 HD |
926 | pgoff_t pgoff; |
927 | unsigned long address; | |
928 | ||
2aff7a47 MWO |
929 | /* Common case, plus ->pgoff is invalid for KSM */ |
930 | if (pvmw->nr_pages == 1) | |
931 | return pvmw->address + PAGE_SIZE; | |
932 | ||
933 | pgoff = pvmw->pgoff + pvmw->nr_pages; | |
494334e4 HD |
934 | address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
935 | /* Check for address beyond vma (or wrapped through 0?) */ | |
936 | if (address < vma->vm_start || address > vma->vm_end) | |
937 | address = vma->vm_end; | |
938 | return address; | |
e9b61f19 KS |
939 | } |
940 | ||
89b15332 JW |
941 | static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, |
942 | struct file *fpin) | |
943 | { | |
944 | int flags = vmf->flags; | |
945 | ||
946 | if (fpin) | |
947 | return fpin; | |
948 | ||
949 | /* | |
950 | * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or | |
c1e8d7c6 | 951 | * anything, so we only pin the file and drop the mmap_lock if only |
4064b982 | 952 | * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. |
89b15332 | 953 | */ |
4064b982 PX |
954 | if (fault_flag_allow_retry_first(flags) && |
955 | !(flags & FAULT_FLAG_RETRY_NOWAIT)) { | |
89b15332 | 956 | fpin = get_file(vmf->vma->vm_file); |
0790e1e2 | 957 | release_fault_lock(vmf); |
89b15332 JW |
958 | } |
959 | return fpin; | |
960 | } | |
af8e3354 | 961 | #else /* !CONFIG_MMU */ |
3506659e | 962 | static inline void unmap_mapping_folio(struct folio *folio) { } |
96f97c43 LS |
963 | static inline void mlock_new_folio(struct folio *folio) { } |
964 | static inline bool need_mlock_drain(int cpu) { return false; } | |
965 | static inline void mlock_drain_local(void) { } | |
966 | static inline void mlock_drain_remote(int cpu) { } | |
4ad0ae8c NP |
967 | static inline void vunmap_range_noflush(unsigned long start, unsigned long end) |
968 | { | |
969 | } | |
af8e3354 | 970 | #endif /* !CONFIG_MMU */ |
894bc310 | 971 | |
6b74ab97 | 972 | /* Memory initialisation debug and verification */ |
9420f89d MRI |
973 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
974 | DECLARE_STATIC_KEY_TRUE(deferred_pages); | |
975 | ||
976 | bool __init deferred_grow_zone(struct zone *zone, unsigned int order); | |
977 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | |
978 | ||
6b74ab97 MG |
979 | enum mminit_level { |
980 | MMINIT_WARNING, | |
981 | MMINIT_VERIFY, | |
982 | MMINIT_TRACE | |
983 | }; | |
984 | ||
985 | #ifdef CONFIG_DEBUG_MEMORY_INIT | |
986 | ||
987 | extern int mminit_loglevel; | |
988 | ||
989 | #define mminit_dprintk(level, prefix, fmt, arg...) \ | |
990 | do { \ | |
991 | if (level < mminit_loglevel) { \ | |
fc5199d1 | 992 | if (level <= MMINIT_WARNING) \ |
1170532b | 993 | pr_warn("mminit::" prefix " " fmt, ##arg); \ |
fc5199d1 RV |
994 | else \ |
995 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ | |
6b74ab97 MG |
996 | } \ |
997 | } while (0) | |
998 | ||
708614e6 | 999 | extern void mminit_verify_pageflags_layout(void); |
68ad8df4 | 1000 | extern void mminit_verify_zonelist(void); |
6b74ab97 MG |
1001 | #else |
1002 | ||
1003 | static inline void mminit_dprintk(enum mminit_level level, | |
1004 | const char *prefix, const char *fmt, ...) | |
1005 | { | |
1006 | } | |
1007 | ||
708614e6 MG |
1008 | static inline void mminit_verify_pageflags_layout(void) |
1009 | { | |
1010 | } | |
1011 | ||
68ad8df4 MG |
1012 | static inline void mminit_verify_zonelist(void) |
1013 | { | |
1014 | } | |
6b74ab97 | 1015 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c4 | 1016 | |
a5f5f91d MG |
1017 | #define NODE_RECLAIM_NOSCAN -2 |
1018 | #define NODE_RECLAIM_FULL -1 | |
1019 | #define NODE_RECLAIM_SOME 0 | |
1020 | #define NODE_RECLAIM_SUCCESS 1 | |
7c116f2b | 1021 | |
8b09549c WY |
1022 | #ifdef CONFIG_NUMA |
1023 | extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); | |
79c28a41 | 1024 | extern int find_next_best_node(int node, nodemask_t *used_node_mask); |
8b09549c WY |
1025 | #else |
1026 | static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, | |
1027 | unsigned int order) | |
1028 | { | |
1029 | return NODE_RECLAIM_NOSCAN; | |
1030 | } | |
79c28a41 DH |
1031 | static inline int find_next_best_node(int node, nodemask_t *used_node_mask) |
1032 | { | |
1033 | return NUMA_NO_NODE; | |
1034 | } | |
8b09549c WY |
1035 | #endif |
1036 | ||
60f272f6 | 1037 | /* |
1038 | * mm/memory-failure.c | |
1039 | */ | |
fed5348e | 1040 | void shake_folio(struct folio *folio); |
31d3d348 WF |
1041 | extern int hwpoison_filter(struct page *p); |
1042 | ||
7c116f2b WF |
1043 | extern u32 hwpoison_filter_dev_major; |
1044 | extern u32 hwpoison_filter_dev_minor; | |
478c5ffc WF |
1045 | extern u64 hwpoison_filter_flags_mask; |
1046 | extern u64 hwpoison_filter_flags_value; | |
4fd466eb | 1047 | extern u64 hwpoison_filter_memcg; |
1bfe5feb | 1048 | extern u32 hwpoison_filter_enable; |
eb36c587 | 1049 | |
dc0ef0df | 1050 | extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
eb36c587 | 1051 | unsigned long, unsigned long, |
9fbeb5ab | 1052 | unsigned long, unsigned long); |
ca57df79 XQ |
1053 | |
1054 | extern void set_pageblock_order(void); | |
14f5be2a | 1055 | unsigned long reclaim_pages(struct list_head *folio_list); |
730ec8c0 | 1056 | unsigned int reclaim_clean_pages_from_list(struct zone *zone, |
4bf4f155 | 1057 | struct list_head *folio_list); |
d95ea5d1 BZ |
1058 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
1059 | #define ALLOC_WMARK_MIN WMARK_MIN | |
1060 | #define ALLOC_WMARK_LOW WMARK_LOW | |
1061 | #define ALLOC_WMARK_HIGH WMARK_HIGH | |
1062 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ | |
1063 | ||
1064 | /* Mask to get the watermark bits */ | |
1065 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) | |
1066 | ||
cd04ae1e MH |
1067 | /* |
1068 | * Only MMU archs have async oom victim reclaim - aka oom_reaper so we | |
1069 | * cannot assume a reduced access to memory reserves is sufficient for | |
1070 | * !MMU | |
1071 | */ | |
1072 | #ifdef CONFIG_MMU | |
1073 | #define ALLOC_OOM 0x08 | |
1074 | #else | |
1075 | #define ALLOC_OOM ALLOC_NO_WATERMARKS | |
1076 | #endif | |
1077 | ||
1ebbb218 MG |
1078 | #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access |
1079 | * to 25% of the min watermark or | |
1080 | * 62.5% if __GFP_HIGH is set. | |
1081 | */ | |
524c4807 MG |
1082 | #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50% |
1083 | * of the min watermark. | |
1084 | */ | |
6bb15450 MG |
1085 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
1086 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | |
1087 | #ifdef CONFIG_ZONE_DMA32 | |
1088 | #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ | |
1089 | #else | |
1090 | #define ALLOC_NOFRAGMENT 0x0 | |
1091 | #endif | |
eb2e2b42 | 1092 | #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */ |
736838e9 | 1093 | #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ |
d95ea5d1 | 1094 | |
ab350885 | 1095 | /* Flags that allow allocations below the min watermark. */ |
1ebbb218 | 1096 | #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM) |
ab350885 | 1097 | |
72b252ae MG |
1098 | enum ttu_flags; |
1099 | struct tlbflush_unmap_batch; | |
1100 | ||
ce612879 MH |
1101 | |
1102 | /* | |
1103 | * only for MM internal work items which do not depend on | |
1104 | * any allocations or locks which might depend on allocations | |
1105 | */ | |
1106 | extern struct workqueue_struct *mm_percpu_wq; | |
1107 | ||
72b252ae MG |
1108 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
1109 | void try_to_unmap_flush(void); | |
d950c947 | 1110 | void try_to_unmap_flush_dirty(void); |
3ea27719 | 1111 | void flush_tlb_batched_pending(struct mm_struct *mm); |
72b252ae MG |
1112 | #else |
1113 | static inline void try_to_unmap_flush(void) | |
1114 | { | |
1115 | } | |
d950c947 MG |
1116 | static inline void try_to_unmap_flush_dirty(void) |
1117 | { | |
1118 | } | |
3ea27719 MG |
1119 | static inline void flush_tlb_batched_pending(struct mm_struct *mm) |
1120 | { | |
1121 | } | |
72b252ae | 1122 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
edf14cdb VB |
1123 | |
1124 | extern const struct trace_print_flags pageflag_names[]; | |
4c85c0be | 1125 | extern const struct trace_print_flags pagetype_names[]; |
edf14cdb VB |
1126 | extern const struct trace_print_flags vmaflag_names[]; |
1127 | extern const struct trace_print_flags gfpflag_names[]; | |
1128 | ||
a6ffdc07 XQ |
1129 | static inline bool is_migrate_highatomic(enum migratetype migratetype) |
1130 | { | |
1131 | return migratetype == MIGRATE_HIGHATOMIC; | |
1132 | } | |
1133 | ||
72675e13 | 1134 | void setup_zone_pageset(struct zone *zone); |
19fc7bed JK |
1135 | |
1136 | struct migration_target_control { | |
1137 | int nid; /* preferred node id */ | |
1138 | nodemask_t *nmask; | |
1139 | gfp_t gfp_mask; | |
e42dfe4e | 1140 | enum migrate_reason reason; |
19fc7bed JK |
1141 | }; |
1142 | ||
07073eb0 DH |
1143 | /* |
1144 | * mm/filemap.c | |
1145 | */ | |
1146 | size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, | |
1147 | struct folio *folio, loff_t fpos, size_t size); | |
1148 | ||
b67177ec NP |
1149 | /* |
1150 | * mm/vmalloc.c | |
1151 | */ | |
4ad0ae8c | 1152 | #ifdef CONFIG_MMU |
b6714911 | 1153 | void __init vmalloc_init(void); |
d905ae2b | 1154 | int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
b67177ec | 1155 | pgprot_t prot, struct page **pages, unsigned int page_shift); |
4ad0ae8c | 1156 | #else |
b6714911 MRI |
1157 | static inline void vmalloc_init(void) |
1158 | { | |
1159 | } | |
1160 | ||
4ad0ae8c | 1161 | static inline |
d905ae2b | 1162 | int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
4ad0ae8c NP |
1163 | pgprot_t prot, struct page **pages, unsigned int page_shift) |
1164 | { | |
1165 | return -EINVAL; | |
1166 | } | |
1167 | #endif | |
1168 | ||
d905ae2b AP |
1169 | int __must_check __vmap_pages_range_noflush(unsigned long addr, |
1170 | unsigned long end, pgprot_t prot, | |
1171 | struct page **pages, unsigned int page_shift); | |
b073d7f8 | 1172 | |
4ad0ae8c | 1173 | void vunmap_range_noflush(unsigned long start, unsigned long end); |
b67177ec | 1174 | |
b073d7f8 AP |
1175 | void __vunmap_range_noflush(unsigned long start, unsigned long end); |
1176 | ||
f8fd525b | 1177 | int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf, |
f4c0d836 YS |
1178 | unsigned long addr, int page_nid, int *flags); |
1179 | ||
9f100e3b | 1180 | void free_zone_device_folio(struct folio *folio); |
b05a79d4 | 1181 | int migrate_device_coherent_page(struct page *page); |
27674ef6 | 1182 | |
ece1ed7b MWO |
1183 | /* |
1184 | * mm/gup.c | |
1185 | */ | |
1186 | struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags); | |
7ce154fe | 1187 | int __must_check try_grab_page(struct page *page, unsigned int flags); |
ece1ed7b | 1188 | |
8b9c1cc0 DH |
1189 | /* |
1190 | * mm/huge_memory.c | |
1191 | */ | |
1b167618 PX |
1192 | void touch_pud(struct vm_area_struct *vma, unsigned long addr, |
1193 | pud_t *pud, bool write); | |
4418c522 PX |
1194 | void touch_pmd(struct vm_area_struct *vma, unsigned long addr, |
1195 | pmd_t *pmd, bool write); | |
8b9c1cc0 | 1196 | |
adb20b0c LS |
1197 | /* |
1198 | * mm/mmap.c | |
1199 | */ | |
93bf5d4a LS |
1200 | struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, |
1201 | struct vm_area_struct *vma, | |
1202 | unsigned long delta); | |
adb20b0c | 1203 | |
2c224108 JG |
1204 | enum { |
1205 | /* mark page accessed */ | |
1206 | FOLL_TOUCH = 1 << 16, | |
1207 | /* a retry, previous pass started an IO */ | |
1208 | FOLL_TRIED = 1 << 17, | |
1209 | /* we are working on non-current tsk/mm */ | |
1210 | FOLL_REMOTE = 1 << 18, | |
1211 | /* pages must be released via unpin_user_page */ | |
1212 | FOLL_PIN = 1 << 19, | |
1213 | /* gup_fast: prevent fall-back to slow gup */ | |
1214 | FOLL_FAST_ONLY = 1 << 20, | |
1215 | /* allow unlocking the mmap lock */ | |
1216 | FOLL_UNLOCKABLE = 1 << 21, | |
631426ba DH |
1217 | /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */ |
1218 | FOLL_MADV_POPULATE = 1 << 22, | |
2c224108 JG |
1219 | }; |
1220 | ||
0f20bba1 | 1221 | #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \ |
631426ba DH |
1222 | FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \ |
1223 | FOLL_MADV_POPULATE) | |
0f20bba1 | 1224 | |
63b60512 JG |
1225 | /* |
1226 | * Indicates for which pages that are write-protected in the page table, | |
1227 | * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the | |
1228 | * GUP pin will remain consistent with the pages mapped into the page tables | |
1229 | * of the MM. | |
1230 | * | |
1231 | * Temporary unmapping of PageAnonExclusive() pages or clearing of | |
1232 | * PageAnonExclusive() has to protect against concurrent GUP: | |
1233 | * * Ordinary GUP: Using the PT lock | |
1234 | * * GUP-fast and fork(): mm->write_protect_seq | |
1235 | * * GUP-fast and KSM or temporary unmapping (swap, migration): see | |
e3b4b137 | 1236 | * folio_try_share_anon_rmap_*() |
63b60512 JG |
1237 | * |
1238 | * Must be called with the (sub)page that's actually referenced via the | |
1239 | * page table entry, which might not necessarily be the head page for a | |
1240 | * PTE-mapped THP. | |
1241 | * | |
1242 | * If the vma is NULL, we're coming from the GUP-fast path and might have | |
1243 | * to fallback to the slow path just to lookup the vma. | |
1244 | */ | |
1245 | static inline bool gup_must_unshare(struct vm_area_struct *vma, | |
1246 | unsigned int flags, struct page *page) | |
1247 | { | |
1248 | /* | |
1249 | * FOLL_WRITE is implicitly handled correctly as the page table entry | |
1250 | * has to be writable -- and if it references (part of) an anonymous | |
1251 | * folio, that part is required to be marked exclusive. | |
1252 | */ | |
1253 | if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN) | |
1254 | return false; | |
1255 | /* | |
1256 | * Note: PageAnon(page) is stable until the page is actually getting | |
1257 | * freed. | |
1258 | */ | |
1259 | if (!PageAnon(page)) { | |
1260 | /* | |
1261 | * We only care about R/O long-term pining: R/O short-term | |
1262 | * pinning does not have the semantics to observe successive | |
1263 | * changes through the process page tables. | |
1264 | */ | |
1265 | if (!(flags & FOLL_LONGTERM)) | |
1266 | return false; | |
1267 | ||
1268 | /* We really need the vma ... */ | |
1269 | if (!vma) | |
1270 | return true; | |
1271 | ||
1272 | /* | |
1273 | * ... because we only care about writable private ("COW") | |
1274 | * mappings where we have to break COW early. | |
1275 | */ | |
1276 | return is_cow_mapping(vma->vm_flags); | |
1277 | } | |
1278 | ||
e3b4b137 | 1279 | /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */ |
25176ad0 | 1280 | if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) |
63b60512 JG |
1281 | smp_rmb(); |
1282 | ||
1283 | /* | |
1284 | * Note that PageKsm() pages cannot be exclusive, and consequently, | |
1285 | * cannot get pinned. | |
1286 | */ | |
1287 | return !PageAnonExclusive(page); | |
1288 | } | |
ece1ed7b | 1289 | |
902c2d91 | 1290 | extern bool mirrored_kernelcore; |
0db31d63 | 1291 | extern bool memblock_has_mirror(void); |
902c2d91 | 1292 | |
412c6ef9 YD |
1293 | static __always_inline void vma_set_range(struct vm_area_struct *vma, |
1294 | unsigned long start, unsigned long end, | |
1295 | pgoff_t pgoff) | |
1296 | { | |
1297 | vma->vm_start = start; | |
1298 | vma->vm_end = end; | |
1299 | vma->vm_pgoff = pgoff; | |
1300 | } | |
1301 | ||
76aefad6 PX |
1302 | static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma) |
1303 | { | |
1304 | /* | |
1305 | * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty | |
1306 | * enablements, because when without soft-dirty being compiled in, | |
1307 | * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY) | |
1308 | * will be constantly true. | |
1309 | */ | |
1310 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) | |
1311 | return false; | |
1312 | ||
1313 | /* | |
1314 | * Soft-dirty is kind of special: its tracking is enabled when the | |
1315 | * vma flags not set. | |
1316 | */ | |
1317 | return !(vma->vm_flags & VM_SOFTDIRTY); | |
1318 | } | |
1319 | ||
53bee98d LH |
1320 | static inline void vma_iter_config(struct vma_iterator *vmi, |
1321 | unsigned long index, unsigned long last) | |
1322 | { | |
53bee98d LH |
1323 | __mas_set_range(&vmi->mas, index, last - 1); |
1324 | } | |
1325 | ||
d4e6b397 YD |
1326 | static inline void vma_iter_reset(struct vma_iterator *vmi) |
1327 | { | |
1328 | mas_reset(&vmi->mas); | |
1329 | } | |
1330 | ||
1331 | static inline | |
1332 | struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min) | |
1333 | { | |
1334 | return mas_prev_range(&vmi->mas, min); | |
1335 | } | |
1336 | ||
1337 | static inline | |
1338 | struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max) | |
1339 | { | |
1340 | return mas_next_range(&vmi->mas, max); | |
1341 | } | |
1342 | ||
1343 | static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min, | |
1344 | unsigned long max, unsigned long size) | |
1345 | { | |
1346 | return mas_empty_area(&vmi->mas, min, max - 1, size); | |
1347 | } | |
1348 | ||
1349 | static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min, | |
1350 | unsigned long max, unsigned long size) | |
1351 | { | |
1352 | return mas_empty_area_rev(&vmi->mas, min, max - 1, size); | |
1353 | } | |
1354 | ||
b62b633e LH |
1355 | /* |
1356 | * VMA Iterator functions shared between nommu and mmap | |
1357 | */ | |
b5df0922 LH |
1358 | static inline int vma_iter_prealloc(struct vma_iterator *vmi, |
1359 | struct vm_area_struct *vma) | |
b62b633e | 1360 | { |
b5df0922 | 1361 | return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); |
b62b633e LH |
1362 | } |
1363 | ||
b5df0922 | 1364 | static inline void vma_iter_clear(struct vma_iterator *vmi) |
b62b633e | 1365 | { |
b62b633e LH |
1366 | mas_store_prealloc(&vmi->mas, NULL); |
1367 | } | |
1368 | ||
1369 | static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) | |
1370 | { | |
1371 | return mas_walk(&vmi->mas); | |
1372 | } | |
1373 | ||
1374 | /* Store a VMA with preallocated memory */ | |
1375 | static inline void vma_iter_store(struct vma_iterator *vmi, | |
1376 | struct vm_area_struct *vma) | |
1377 | { | |
1378 | ||
1379 | #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) | |
067311d3 | 1380 | if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && |
36bd9310 LH |
1381 | vmi->mas.index > vma->vm_start)) { |
1382 | pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", | |
1383 | vmi->mas.index, vma->vm_start, vma->vm_start, | |
1384 | vma->vm_end, vmi->mas.index, vmi->mas.last); | |
b62b633e | 1385 | } |
067311d3 | 1386 | if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && |
36bd9310 LH |
1387 | vmi->mas.last < vma->vm_start)) { |
1388 | pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", | |
1389 | vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, | |
1390 | vmi->mas.index, vmi->mas.last); | |
b62b633e LH |
1391 | } |
1392 | #endif | |
1393 | ||
067311d3 | 1394 | if (vmi->mas.status != ma_start && |
b62b633e LH |
1395 | ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) |
1396 | vma_iter_invalidate(vmi); | |
1397 | ||
b5df0922 | 1398 | __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); |
b62b633e LH |
1399 | mas_store_prealloc(&vmi->mas, vma); |
1400 | } | |
1401 | ||
1402 | static inline int vma_iter_store_gfp(struct vma_iterator *vmi, | |
1403 | struct vm_area_struct *vma, gfp_t gfp) | |
1404 | { | |
067311d3 | 1405 | if (vmi->mas.status != ma_start && |
b62b633e LH |
1406 | ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) |
1407 | vma_iter_invalidate(vmi); | |
1408 | ||
b5df0922 | 1409 | __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); |
b62b633e LH |
1410 | mas_store_gfp(&vmi->mas, vma, gfp); |
1411 | if (unlikely(mas_is_err(&vmi->mas))) | |
1412 | return -ENOMEM; | |
1413 | ||
1414 | return 0; | |
1415 | } | |
440703e0 LH |
1416 | |
1417 | /* | |
1418 | * VMA lock generalization | |
1419 | */ | |
1420 | struct vma_prepare { | |
1421 | struct vm_area_struct *vma; | |
1422 | struct vm_area_struct *adj_next; | |
1423 | struct file *file; | |
1424 | struct address_space *mapping; | |
1425 | struct anon_vma *anon_vma; | |
1426 | struct vm_area_struct *insert; | |
1427 | struct vm_area_struct *remove; | |
1428 | struct vm_area_struct *remove2; | |
1429 | }; | |
3ee0aa9f | 1430 | |
fde1c4ec UA |
1431 | void __meminit __init_single_page(struct page *page, unsigned long pfn, |
1432 | unsigned long zone, int nid); | |
1433 | ||
3ee0aa9f | 1434 | /* shrinker related functions */ |
96f7b2b9 QZ |
1435 | unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, |
1436 | int priority); | |
3ee0aa9f QZ |
1437 | |
1438 | #ifdef CONFIG_SHRINKER_DEBUG | |
f04eba13 LM |
1439 | static inline __printf(2, 0) int shrinker_debugfs_name_alloc( |
1440 | struct shrinker *shrinker, const char *fmt, va_list ap) | |
c42d50ae QZ |
1441 | { |
1442 | shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); | |
1443 | ||
1444 | return shrinker->name ? 0 : -ENOMEM; | |
1445 | } | |
1446 | ||
1447 | static inline void shrinker_debugfs_name_free(struct shrinker *shrinker) | |
1448 | { | |
1449 | kfree_const(shrinker->name); | |
1450 | shrinker->name = NULL; | |
1451 | } | |
1452 | ||
3ee0aa9f QZ |
1453 | extern int shrinker_debugfs_add(struct shrinker *shrinker); |
1454 | extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, | |
1455 | int *debugfs_id); | |
1456 | extern void shrinker_debugfs_remove(struct dentry *debugfs_entry, | |
1457 | int debugfs_id); | |
1458 | #else /* CONFIG_SHRINKER_DEBUG */ | |
1459 | static inline int shrinker_debugfs_add(struct shrinker *shrinker) | |
1460 | { | |
1461 | return 0; | |
1462 | } | |
c42d50ae QZ |
1463 | static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker, |
1464 | const char *fmt, va_list ap) | |
1465 | { | |
1466 | return 0; | |
1467 | } | |
1468 | static inline void shrinker_debugfs_name_free(struct shrinker *shrinker) | |
1469 | { | |
1470 | } | |
3ee0aa9f QZ |
1471 | static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, |
1472 | int *debugfs_id) | |
1473 | { | |
1474 | *debugfs_id = -1; | |
1475 | return NULL; | |
1476 | } | |
1477 | static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry, | |
1478 | int debugfs_id) | |
1479 | { | |
1480 | } | |
1481 | #endif /* CONFIG_SHRINKER_DEBUG */ | |
1482 | ||
b64e74e9 CH |
1483 | /* Only track the nodes of mappings with shadow entries */ |
1484 | void workingset_update_node(struct xa_node *node); | |
1485 | extern struct list_lru shadow_nodes; | |
1486 | ||
db971418 | 1487 | #endif /* __MM_INTERNAL_H */ |