]>
git.ipfire.org Git - thirdparty/kernel/stable.git/blob - include/linux/mm_inline.h
de1e622dd3660c3f2d124812cacd8d6b1ac72a46
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
5 #include <linux/atomic.h>
6 #include <linux/huge_mm.h>
7 #include <linux/swap.h>
8 #include <linux/string.h>
9 #include <linux/userfaultfd_k.h>
10 #include <linux/swapops.h>
13 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
14 * @folio: The folio to test.
16 * We would like to get this info without a page flag, but the state
17 * needs to survive until the folio is last deleted from the LRU, which
18 * could be as far down as __page_cache_release.
20 * Return: An integer (not a boolean!) used to sort a folio onto the
21 * right LRU list and to account folios correctly.
22 * 1 if @folio is a regular filesystem backed page cache folio
23 * or a lazily freed anonymous folio (e.g. via MADV_FREE).
24 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
25 * ram or swap backed folio.
27 static inline int folio_is_file_lru(struct folio
*folio
)
29 return !folio_test_swapbacked(folio
);
32 static inline int page_is_file_lru(struct page
*page
)
34 return folio_is_file_lru(page_folio(page
));
37 static __always_inline
void __update_lru_size(struct lruvec
*lruvec
,
38 enum lru_list lru
, enum zone_type zid
,
41 struct pglist_data
*pgdat
= lruvec_pgdat(lruvec
);
43 lockdep_assert_held(&lruvec
->lru_lock
);
44 WARN_ON_ONCE(nr_pages
!= (int)nr_pages
);
46 __mod_lruvec_state(lruvec
, NR_LRU_BASE
+ lru
, nr_pages
);
47 __mod_zone_page_state(&pgdat
->node_zones
[zid
],
48 NR_ZONE_LRU_BASE
+ lru
, nr_pages
);
51 static __always_inline
void update_lru_size(struct lruvec
*lruvec
,
52 enum lru_list lru
, enum zone_type zid
,
55 __update_lru_size(lruvec
, lru
, zid
, nr_pages
);
57 mem_cgroup_update_lru_size(lruvec
, lru
, zid
, nr_pages
);
62 * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
63 * @folio: The folio that was on lru and now has a zero reference.
65 static __always_inline
void __folio_clear_lru_flags(struct folio
*folio
)
67 VM_BUG_ON_FOLIO(!folio_test_lru(folio
), folio
);
69 __folio_clear_lru(folio
);
71 /* this shouldn't happen, so leave the flags to bad_page() */
72 if (folio_test_active(folio
) && folio_test_unevictable(folio
))
75 __folio_clear_active(folio
);
76 __folio_clear_unevictable(folio
);
80 * folio_lru_list - Which LRU list should a folio be on?
81 * @folio: The folio to test.
83 * Return: The LRU list a folio should be on, as an index
84 * into the array of LRU lists.
86 static __always_inline
enum lru_list
folio_lru_list(struct folio
*folio
)
90 VM_BUG_ON_FOLIO(folio_test_active(folio
) && folio_test_unevictable(folio
), folio
);
92 if (folio_test_unevictable(folio
))
93 return LRU_UNEVICTABLE
;
95 lru
= folio_is_file_lru(folio
) ? LRU_INACTIVE_FILE
: LRU_INACTIVE_ANON
;
96 if (folio_test_active(folio
))
102 #ifdef CONFIG_LRU_GEN
104 #ifdef CONFIG_LRU_GEN_ENABLED
105 static inline bool lru_gen_enabled(void)
107 DECLARE_STATIC_KEY_TRUE(lru_gen_caps
[NR_LRU_GEN_CAPS
]);
109 return static_branch_likely(&lru_gen_caps
[LRU_GEN_CORE
]);
112 static inline bool lru_gen_enabled(void)
114 DECLARE_STATIC_KEY_FALSE(lru_gen_caps
[NR_LRU_GEN_CAPS
]);
116 return static_branch_unlikely(&lru_gen_caps
[LRU_GEN_CORE
]);
120 static inline bool lru_gen_in_fault(void)
122 return current
->in_lru_fault
;
125 static inline int lru_gen_from_seq(unsigned long seq
)
127 return seq
% MAX_NR_GENS
;
130 static inline int lru_hist_from_seq(unsigned long seq
)
132 return seq
% NR_HIST_GENS
;
135 static inline int lru_tier_from_refs(int refs
)
137 VM_WARN_ON_ONCE(refs
> BIT(LRU_REFS_WIDTH
));
139 /* see the comment in folio_lru_refs() */
140 return order_base_2(refs
+ 1);
143 static inline int folio_lru_refs(struct folio
*folio
)
145 unsigned long flags
= READ_ONCE(folio
->flags
);
146 bool workingset
= flags
& BIT(PG_workingset
);
149 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
150 * total number of accesses is N>1, since N=0,1 both map to the first
151 * tier. lru_tier_from_refs() will account for this off-by-one. Also see
152 * the comment on MAX_NR_TIERS.
154 return ((flags
& LRU_REFS_MASK
) >> LRU_REFS_PGOFF
) + workingset
;
157 static inline int folio_lru_gen(struct folio
*folio
)
159 unsigned long flags
= READ_ONCE(folio
->flags
);
161 return ((flags
& LRU_GEN_MASK
) >> LRU_GEN_PGOFF
) - 1;
164 static inline bool lru_gen_is_active(struct lruvec
*lruvec
, int gen
)
166 unsigned long max_seq
= lruvec
->lrugen
.max_seq
;
168 VM_WARN_ON_ONCE(gen
>= MAX_NR_GENS
);
170 /* see the comment on MIN_NR_GENS */
171 return gen
== lru_gen_from_seq(max_seq
) || gen
== lru_gen_from_seq(max_seq
- 1);
174 static inline void lru_gen_update_size(struct lruvec
*lruvec
, struct folio
*folio
,
175 int old_gen
, int new_gen
)
177 int type
= folio_is_file_lru(folio
);
178 int zone
= folio_zonenum(folio
);
179 int delta
= folio_nr_pages(folio
);
180 enum lru_list lru
= type
* LRU_INACTIVE_FILE
;
181 struct lru_gen_folio
*lrugen
= &lruvec
->lrugen
;
183 VM_WARN_ON_ONCE(old_gen
!= -1 && old_gen
>= MAX_NR_GENS
);
184 VM_WARN_ON_ONCE(new_gen
!= -1 && new_gen
>= MAX_NR_GENS
);
185 VM_WARN_ON_ONCE(old_gen
== -1 && new_gen
== -1);
188 WRITE_ONCE(lrugen
->nr_pages
[old_gen
][type
][zone
],
189 lrugen
->nr_pages
[old_gen
][type
][zone
] - delta
);
191 WRITE_ONCE(lrugen
->nr_pages
[new_gen
][type
][zone
],
192 lrugen
->nr_pages
[new_gen
][type
][zone
] + delta
);
196 if (lru_gen_is_active(lruvec
, new_gen
))
198 __update_lru_size(lruvec
, lru
, zone
, delta
);
204 if (lru_gen_is_active(lruvec
, old_gen
))
206 __update_lru_size(lruvec
, lru
, zone
, -delta
);
211 if (!lru_gen_is_active(lruvec
, old_gen
) && lru_gen_is_active(lruvec
, new_gen
)) {
212 __update_lru_size(lruvec
, lru
, zone
, -delta
);
213 __update_lru_size(lruvec
, lru
+ LRU_ACTIVE
, zone
, delta
);
216 /* demotion requires isolation, e.g., lru_deactivate_fn() */
217 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec
, old_gen
) && !lru_gen_is_active(lruvec
, new_gen
));
220 static inline bool lru_gen_add_folio(struct lruvec
*lruvec
, struct folio
*folio
, bool reclaiming
)
224 int gen
= folio_lru_gen(folio
);
225 int type
= folio_is_file_lru(folio
);
226 int zone
= folio_zonenum(folio
);
227 struct lru_gen_folio
*lrugen
= &lruvec
->lrugen
;
229 VM_WARN_ON_ONCE_FOLIO(gen
!= -1, folio
);
231 if (folio_test_unevictable(folio
) || !lrugen
->enabled
)
234 * There are three common cases for this page:
235 * 1. If it's hot, e.g., freshly faulted in or previously hot and
236 * migrated, add it to the youngest generation.
237 * 2. If it's cold but can't be evicted immediately, i.e., an anon page
238 * not in swapcache or a dirty page pending writeback, add it to the
239 * second oldest generation.
240 * 3. Everything else (clean, cold) is added to the oldest generation.
242 if (folio_test_active(folio
))
243 seq
= lrugen
->max_seq
;
244 else if ((type
== LRU_GEN_ANON
&& !folio_test_swapcache(folio
)) ||
245 (folio_test_reclaim(folio
) &&
246 (folio_test_dirty(folio
) || folio_test_writeback(folio
))))
247 seq
= lrugen
->min_seq
[type
] + 1;
249 seq
= lrugen
->min_seq
[type
];
251 gen
= lru_gen_from_seq(seq
);
252 flags
= (gen
+ 1UL) << LRU_GEN_PGOFF
;
253 /* see the comment on MIN_NR_GENS about PG_active */
254 set_mask_bits(&folio
->flags
, LRU_GEN_MASK
| BIT(PG_active
), flags
);
256 lru_gen_update_size(lruvec
, folio
, -1, gen
);
257 /* for folio_rotate_reclaimable() */
259 list_add_tail(&folio
->lru
, &lrugen
->folios
[gen
][type
][zone
]);
261 list_add(&folio
->lru
, &lrugen
->folios
[gen
][type
][zone
]);
266 static inline bool lru_gen_del_folio(struct lruvec
*lruvec
, struct folio
*folio
, bool reclaiming
)
269 int gen
= folio_lru_gen(folio
);
274 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio
), folio
);
275 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio
), folio
);
277 /* for folio_migrate_flags() */
278 flags
= !reclaiming
&& lru_gen_is_active(lruvec
, gen
) ? BIT(PG_active
) : 0;
279 flags
= set_mask_bits(&folio
->flags
, LRU_GEN_MASK
, flags
);
280 gen
= ((flags
& LRU_GEN_MASK
) >> LRU_GEN_PGOFF
) - 1;
282 lru_gen_update_size(lruvec
, folio
, gen
, -1);
283 list_del(&folio
->lru
);
288 #else /* !CONFIG_LRU_GEN */
290 static inline bool lru_gen_enabled(void)
295 static inline bool lru_gen_in_fault(void)
300 static inline bool lru_gen_add_folio(struct lruvec
*lruvec
, struct folio
*folio
, bool reclaiming
)
305 static inline bool lru_gen_del_folio(struct lruvec
*lruvec
, struct folio
*folio
, bool reclaiming
)
310 #endif /* CONFIG_LRU_GEN */
312 static __always_inline
313 void lruvec_add_folio(struct lruvec
*lruvec
, struct folio
*folio
)
315 enum lru_list lru
= folio_lru_list(folio
);
317 if (lru_gen_add_folio(lruvec
, folio
, false))
320 update_lru_size(lruvec
, lru
, folio_zonenum(folio
),
321 folio_nr_pages(folio
));
322 if (lru
!= LRU_UNEVICTABLE
)
323 list_add(&folio
->lru
, &lruvec
->lists
[lru
]);
326 static __always_inline
void add_page_to_lru_list(struct page
*page
,
327 struct lruvec
*lruvec
)
329 lruvec_add_folio(lruvec
, page_folio(page
));
332 static __always_inline
333 void lruvec_add_folio_tail(struct lruvec
*lruvec
, struct folio
*folio
)
335 enum lru_list lru
= folio_lru_list(folio
);
337 if (lru_gen_add_folio(lruvec
, folio
, true))
340 update_lru_size(lruvec
, lru
, folio_zonenum(folio
),
341 folio_nr_pages(folio
));
342 /* This is not expected to be used on LRU_UNEVICTABLE */
343 list_add_tail(&folio
->lru
, &lruvec
->lists
[lru
]);
346 static __always_inline
347 void lruvec_del_folio(struct lruvec
*lruvec
, struct folio
*folio
)
349 enum lru_list lru
= folio_lru_list(folio
);
351 if (lru_gen_del_folio(lruvec
, folio
, false))
354 if (lru
!= LRU_UNEVICTABLE
)
355 list_del(&folio
->lru
);
356 update_lru_size(lruvec
, lru
, folio_zonenum(folio
),
357 -folio_nr_pages(folio
));
360 static __always_inline
void del_page_from_lru_list(struct page
*page
,
361 struct lruvec
*lruvec
)
363 lruvec_del_folio(lruvec
, page_folio(page
));
366 #ifdef CONFIG_ANON_VMA_NAME
368 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
369 * either keep holding the lock while using the returned pointer or it should
370 * raise anon_vma_name refcount before releasing the lock.
372 extern struct anon_vma_name
*anon_vma_name(struct vm_area_struct
*vma
);
373 extern struct anon_vma_name
*anon_vma_name_alloc(const char *name
);
374 extern void anon_vma_name_free(struct kref
*kref
);
376 /* mmap_lock should be read-locked */
377 static inline void anon_vma_name_get(struct anon_vma_name
*anon_name
)
380 kref_get(&anon_name
->kref
);
383 static inline void anon_vma_name_put(struct anon_vma_name
*anon_name
)
386 kref_put(&anon_name
->kref
, anon_vma_name_free
);
390 struct anon_vma_name
*anon_vma_name_reuse(struct anon_vma_name
*anon_name
)
392 /* Prevent anon_name refcount saturation early on */
393 if (kref_read(&anon_name
->kref
) < REFCOUNT_MAX
) {
394 anon_vma_name_get(anon_name
);
398 return anon_vma_name_alloc(anon_name
->name
);
401 static inline void dup_anon_vma_name(struct vm_area_struct
*orig_vma
,
402 struct vm_area_struct
*new_vma
)
404 struct anon_vma_name
*anon_name
= anon_vma_name(orig_vma
);
407 new_vma
->anon_name
= anon_vma_name_reuse(anon_name
);
410 static inline void free_anon_vma_name(struct vm_area_struct
*vma
)
413 * Not using anon_vma_name because it generates a warning if mmap_lock
414 * is not held, which might be the case here.
416 anon_vma_name_put(vma
->anon_name
);
419 static inline bool anon_vma_name_eq(struct anon_vma_name
*anon_name1
,
420 struct anon_vma_name
*anon_name2
)
422 if (anon_name1
== anon_name2
)
425 return anon_name1
&& anon_name2
&&
426 !strcmp(anon_name1
->name
, anon_name2
->name
);
429 #else /* CONFIG_ANON_VMA_NAME */
430 static inline struct anon_vma_name
*anon_vma_name(struct vm_area_struct
*vma
)
435 static inline struct anon_vma_name
*anon_vma_name_alloc(const char *name
)
440 static inline void anon_vma_name_get(struct anon_vma_name
*anon_name
) {}
441 static inline void anon_vma_name_put(struct anon_vma_name
*anon_name
) {}
442 static inline void dup_anon_vma_name(struct vm_area_struct
*orig_vma
,
443 struct vm_area_struct
*new_vma
) {}
444 static inline void free_anon_vma_name(struct vm_area_struct
*vma
) {}
446 static inline bool anon_vma_name_eq(struct anon_vma_name
*anon_name1
,
447 struct anon_vma_name
*anon_name2
)
452 #endif /* CONFIG_ANON_VMA_NAME */
454 static inline void init_tlb_flush_pending(struct mm_struct
*mm
)
456 atomic_set(&mm
->tlb_flush_pending
, 0);
459 static inline void inc_tlb_flush_pending(struct mm_struct
*mm
)
461 atomic_inc(&mm
->tlb_flush_pending
);
463 * The only time this value is relevant is when there are indeed pages
464 * to flush. And we'll only flush pages after changing them, which
467 * So the ordering here is:
469 * atomic_inc(&mm->tlb_flush_pending);
476 * mm_tlb_flush_pending();
481 * atomic_dec(&mm->tlb_flush_pending);
483 * Where the increment if constrained by the PTL unlock, it thus
484 * ensures that the increment is visible if the PTE modification is
485 * visible. After all, if there is no PTE modification, nobody cares
486 * about TLB flushes either.
488 * This very much relies on users (mm_tlb_flush_pending() and
489 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
490 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
491 * locks (PPC) the unlock of one doesn't order against the lock of
494 * The decrement is ordered by the flush_tlb_range(), such that
495 * mm_tlb_flush_pending() will not return false unless all flushes have
500 static inline void dec_tlb_flush_pending(struct mm_struct
*mm
)
503 * See inc_tlb_flush_pending().
505 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
506 * not order against TLB invalidate completion, which is what we need.
508 * Therefore we must rely on tlb_flush_*() to guarantee order.
510 atomic_dec(&mm
->tlb_flush_pending
);
513 static inline bool mm_tlb_flush_pending(struct mm_struct
*mm
)
516 * Must be called after having acquired the PTL; orders against that
517 * PTLs release and therefore ensures that if we observe the modified
518 * PTE we must also observe the increment from inc_tlb_flush_pending().
520 * That is, it only guarantees to return true if there is a flush
521 * pending for _this_ PTL.
523 return atomic_read(&mm
->tlb_flush_pending
);
526 static inline bool mm_tlb_flush_nested(struct mm_struct
*mm
)
529 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
530 * for which there is a TLB flush pending in order to guarantee
531 * we've seen both that PTE modification and the increment.
533 * (no requirement on actually still holding the PTL, that is irrelevant)
535 return atomic_read(&mm
->tlb_flush_pending
) > 1;
539 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
540 * replace a none pte. NOTE! This should only be called when *pte is already
541 * cleared so we will never accidentally replace something valuable. Meanwhile
542 * none pte also means we are not demoting the pte so tlb flushed is not needed.
543 * E.g., when pte cleared the caller should have taken care of the tlb flush.
545 * Must be called with pgtable lock held so that no thread will see the none
546 * pte, and if they see it, they'll fault and serialize at the pgtable lock.
548 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
551 pte_install_uffd_wp_if_needed(struct vm_area_struct
*vma
, unsigned long addr
,
552 pte_t
*pte
, pte_t pteval
)
554 #ifdef CONFIG_PTE_MARKER_UFFD_WP
555 bool arm_uffd_pte
= false;
557 /* The current status of the pte should be "cleared" before calling */
558 WARN_ON_ONCE(!pte_none(*pte
));
560 if (vma_is_anonymous(vma
) || !userfaultfd_wp(vma
))
563 /* A uffd-wp wr-protected normal pte */
564 if (unlikely(pte_present(pteval
) && pte_uffd_wp(pteval
)))
568 * A uffd-wp wr-protected swap pte. Note: this should even cover an
569 * existing pte marker with uffd-wp bit set.
571 if (unlikely(pte_swp_uffd_wp_any(pteval
)))
574 if (unlikely(arm_uffd_pte
))
575 set_pte_at(vma
->vm_mm
, addr
, pte
,
576 make_pte_marker(PTE_MARKER_UFFD_WP
));
580 static inline bool vma_has_recency(struct vm_area_struct
*vma
)
582 if (vma
->vm_flags
& (VM_SEQ_READ
| VM_RAND_READ
))
585 if (vma
->vm_file
&& (vma
->vm_file
->f_mode
& FMODE_NOREUSE
))