2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_rwsem (while writing or truncating, not reading or faulting)
25 * mapping->invalidate_lock (in filemap_fault)
27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
29 * mapping->i_mmap_rwsem
31 * mm->page_table_lock or pte_lock
32 * swap_lock (in swap_duplicate, swap_info_get)
33 * mmlist_lock (in mmput, drain_mmlist and others)
34 * mapping->private_lock (in block_dirty_folio)
35 * folio_lock_memcg move_lock (in block_dirty_folio)
36 * i_pages lock (widely used)
37 * lruvec->lru_lock (in folio_lruvec_lock_irq)
38 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
39 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
40 * sb_lock (within inode_lock in fs/fs-writeback.c)
41 * i_pages lock (widely used, in set_page_dirty,
42 * in arch-dependent flush_dcache_mmap_lock,
43 * within bdi.wb->list_lock in __sync_single_inode)
45 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
49 * hugetlbfs PageHuge() take locks in this order:
50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
51 * vma_lock (hugetlb specific lock for pmd_sharing)
52 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
57 #include <linux/sched/mm.h>
58 #include <linux/sched/task.h>
59 #include <linux/pagemap.h>
60 #include <linux/swap.h>
61 #include <linux/swapops.h>
62 #include <linux/slab.h>
63 #include <linux/init.h>
64 #include <linux/ksm.h>
65 #include <linux/rmap.h>
66 #include <linux/rcupdate.h>
67 #include <linux/export.h>
68 #include <linux/memcontrol.h>
69 #include <linux/mmu_notifier.h>
70 #include <linux/migrate.h>
71 #include <linux/hugetlb.h>
72 #include <linux/huge_mm.h>
73 #include <linux/backing-dev.h>
74 #include <linux/page_idle.h>
75 #include <linux/memremap.h>
76 #include <linux/userfaultfd_k.h>
77 #include <linux/mm_inline.h>
79 #include <asm/tlbflush.h>
81 #define CREATE_TRACE_POINTS
82 #include <trace/events/tlb.h>
83 #include <trace/events/migrate.h>
87 static struct kmem_cache
*anon_vma_cachep
;
88 static struct kmem_cache
*anon_vma_chain_cachep
;
90 static inline struct anon_vma
*anon_vma_alloc(void)
92 struct anon_vma
*anon_vma
;
94 anon_vma
= kmem_cache_alloc(anon_vma_cachep
, GFP_KERNEL
);
96 atomic_set(&anon_vma
->refcount
, 1);
97 anon_vma
->num_children
= 0;
98 anon_vma
->num_active_vmas
= 0;
99 anon_vma
->parent
= anon_vma
;
101 * Initialise the anon_vma root to point to itself. If called
102 * from fork, the root will be reset to the parents anon_vma.
104 anon_vma
->root
= anon_vma
;
110 static inline void anon_vma_free(struct anon_vma
*anon_vma
)
112 VM_BUG_ON(atomic_read(&anon_vma
->refcount
));
115 * Synchronize against folio_lock_anon_vma_read() such that
116 * we can safely hold the lock without the anon_vma getting
119 * Relies on the full mb implied by the atomic_dec_and_test() from
120 * put_anon_vma() against the acquire barrier implied by
121 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
123 * folio_lock_anon_vma_read() VS put_anon_vma()
124 * down_read_trylock() atomic_dec_and_test()
126 * atomic_read() rwsem_is_locked()
128 * LOCK should suffice since the actual taking of the lock must
129 * happen _before_ what follows.
132 if (rwsem_is_locked(&anon_vma
->root
->rwsem
)) {
133 anon_vma_lock_write(anon_vma
);
134 anon_vma_unlock_write(anon_vma
);
137 kmem_cache_free(anon_vma_cachep
, anon_vma
);
140 static inline struct anon_vma_chain
*anon_vma_chain_alloc(gfp_t gfp
)
142 return kmem_cache_alloc(anon_vma_chain_cachep
, gfp
);
145 static void anon_vma_chain_free(struct anon_vma_chain
*anon_vma_chain
)
147 kmem_cache_free(anon_vma_chain_cachep
, anon_vma_chain
);
150 static void anon_vma_chain_link(struct vm_area_struct
*vma
,
151 struct anon_vma_chain
*avc
,
152 struct anon_vma
*anon_vma
)
155 avc
->anon_vma
= anon_vma
;
156 list_add(&avc
->same_vma
, &vma
->anon_vma_chain
);
157 anon_vma_interval_tree_insert(avc
, &anon_vma
->rb_root
);
161 * __anon_vma_prepare - attach an anon_vma to a memory region
162 * @vma: the memory region in question
164 * This makes sure the memory mapping described by 'vma' has
165 * an 'anon_vma' attached to it, so that we can associate the
166 * anonymous pages mapped into it with that anon_vma.
168 * The common case will be that we already have one, which
169 * is handled inline by anon_vma_prepare(). But if
170 * not we either need to find an adjacent mapping that we
171 * can re-use the anon_vma from (very common when the only
172 * reason for splitting a vma has been mprotect()), or we
173 * allocate a new one.
175 * Anon-vma allocations are very subtle, because we may have
176 * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
177 * and that may actually touch the rwsem even in the newly
178 * allocated vma (it depends on RCU to make sure that the
179 * anon_vma isn't actually destroyed).
181 * As a result, we need to do proper anon_vma locking even
182 * for the new allocation. At the same time, we do not want
183 * to do any locking for the common case of already having
186 int __anon_vma_prepare(struct vm_area_struct
*vma
)
188 struct mm_struct
*mm
= vma
->vm_mm
;
189 struct anon_vma
*anon_vma
, *allocated
;
190 struct anon_vma_chain
*avc
;
192 mmap_assert_locked(mm
);
195 avc
= anon_vma_chain_alloc(GFP_KERNEL
);
199 anon_vma
= find_mergeable_anon_vma(vma
);
202 anon_vma
= anon_vma_alloc();
203 if (unlikely(!anon_vma
))
204 goto out_enomem_free_avc
;
205 anon_vma
->num_children
++; /* self-parent link for new root */
206 allocated
= anon_vma
;
209 anon_vma_lock_write(anon_vma
);
210 /* page_table_lock to protect against threads */
211 spin_lock(&mm
->page_table_lock
);
212 if (likely(!vma
->anon_vma
)) {
213 vma
->anon_vma
= anon_vma
;
214 anon_vma_chain_link(vma
, avc
, anon_vma
);
215 anon_vma
->num_active_vmas
++;
219 spin_unlock(&mm
->page_table_lock
);
220 anon_vma_unlock_write(anon_vma
);
222 if (unlikely(allocated
))
223 put_anon_vma(allocated
);
225 anon_vma_chain_free(avc
);
230 anon_vma_chain_free(avc
);
236 * This is a useful helper function for locking the anon_vma root as
237 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
240 * Such anon_vma's should have the same root, so you'd expect to see
241 * just a single mutex_lock for the whole traversal.
243 static inline struct anon_vma
*lock_anon_vma_root(struct anon_vma
*root
, struct anon_vma
*anon_vma
)
245 struct anon_vma
*new_root
= anon_vma
->root
;
246 if (new_root
!= root
) {
247 if (WARN_ON_ONCE(root
))
248 up_write(&root
->rwsem
);
250 down_write(&root
->rwsem
);
255 static inline void unlock_anon_vma_root(struct anon_vma
*root
)
258 up_write(&root
->rwsem
);
262 * Attach the anon_vmas from src to dst.
263 * Returns 0 on success, -ENOMEM on failure.
265 * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(),
266 * copy_vma() and anon_vma_fork(). The first four want an exact copy of src,
267 * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to
268 * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before
269 * call, we can identify this case by checking (!dst->anon_vma &&
272 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
273 * and reuse existing anon_vma which has no vmas and only one child anon_vma.
274 * This prevents degradation of anon_vma hierarchy to endless linear chain in
275 * case of constantly forking task. On the other hand, an anon_vma with more
276 * than one child isn't reused even if there was no alive vma, thus rmap
277 * walker has a good chance of avoiding scanning the whole hierarchy when it
278 * searches where page is mapped.
280 int anon_vma_clone(struct vm_area_struct
*dst
, struct vm_area_struct
*src
)
282 struct anon_vma_chain
*avc
, *pavc
;
283 struct anon_vma
*root
= NULL
;
285 list_for_each_entry_reverse(pavc
, &src
->anon_vma_chain
, same_vma
) {
286 struct anon_vma
*anon_vma
;
288 avc
= anon_vma_chain_alloc(GFP_NOWAIT
| __GFP_NOWARN
);
289 if (unlikely(!avc
)) {
290 unlock_anon_vma_root(root
);
292 avc
= anon_vma_chain_alloc(GFP_KERNEL
);
296 anon_vma
= pavc
->anon_vma
;
297 root
= lock_anon_vma_root(root
, anon_vma
);
298 anon_vma_chain_link(dst
, avc
, anon_vma
);
301 * Reuse existing anon_vma if it has no vma and only one
304 * Root anon_vma is never reused:
305 * it has self-parent reference and at least one child.
307 if (!dst
->anon_vma
&& src
->anon_vma
&&
308 anon_vma
->num_children
< 2 &&
309 anon_vma
->num_active_vmas
== 0)
310 dst
->anon_vma
= anon_vma
;
313 dst
->anon_vma
->num_active_vmas
++;
314 unlock_anon_vma_root(root
);
319 * dst->anon_vma is dropped here otherwise its num_active_vmas can
320 * be incorrectly decremented in unlink_anon_vmas().
321 * We can safely do this because callers of anon_vma_clone() don't care
322 * about dst->anon_vma if anon_vma_clone() failed.
324 dst
->anon_vma
= NULL
;
325 unlink_anon_vmas(dst
);
330 * Attach vma to its own anon_vma, as well as to the anon_vmas that
331 * the corresponding VMA in the parent process is attached to.
332 * Returns 0 on success, non-zero on failure.
334 int anon_vma_fork(struct vm_area_struct
*vma
, struct vm_area_struct
*pvma
)
336 struct anon_vma_chain
*avc
;
337 struct anon_vma
*anon_vma
;
340 /* Don't bother if the parent process has no anon_vma here. */
344 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
345 vma
->anon_vma
= NULL
;
348 * First, attach the new VMA to the parent VMA's anon_vmas,
349 * so rmap can find non-COWed pages in child processes.
351 error
= anon_vma_clone(vma
, pvma
);
355 /* An existing anon_vma has been reused, all done then. */
359 /* Then add our own anon_vma. */
360 anon_vma
= anon_vma_alloc();
363 anon_vma
->num_active_vmas
++;
364 avc
= anon_vma_chain_alloc(GFP_KERNEL
);
366 goto out_error_free_anon_vma
;
369 * The root anon_vma's rwsem is the lock actually used when we
370 * lock any of the anon_vmas in this anon_vma tree.
372 anon_vma
->root
= pvma
->anon_vma
->root
;
373 anon_vma
->parent
= pvma
->anon_vma
;
375 * With refcounts, an anon_vma can stay around longer than the
376 * process it belongs to. The root anon_vma needs to be pinned until
377 * this anon_vma is freed, because the lock lives in the root.
379 get_anon_vma(anon_vma
->root
);
380 /* Mark this anon_vma as the one where our new (COWed) pages go. */
381 vma
->anon_vma
= anon_vma
;
382 anon_vma_lock_write(anon_vma
);
383 anon_vma_chain_link(vma
, avc
, anon_vma
);
384 anon_vma
->parent
->num_children
++;
385 anon_vma_unlock_write(anon_vma
);
389 out_error_free_anon_vma
:
390 put_anon_vma(anon_vma
);
392 unlink_anon_vmas(vma
);
396 void unlink_anon_vmas(struct vm_area_struct
*vma
)
398 struct anon_vma_chain
*avc
, *next
;
399 struct anon_vma
*root
= NULL
;
402 * Unlink each anon_vma chained to the VMA. This list is ordered
403 * from newest to oldest, ensuring the root anon_vma gets freed last.
405 list_for_each_entry_safe(avc
, next
, &vma
->anon_vma_chain
, same_vma
) {
406 struct anon_vma
*anon_vma
= avc
->anon_vma
;
408 root
= lock_anon_vma_root(root
, anon_vma
);
409 anon_vma_interval_tree_remove(avc
, &anon_vma
->rb_root
);
412 * Leave empty anon_vmas on the list - we'll need
413 * to free them outside the lock.
415 if (RB_EMPTY_ROOT(&anon_vma
->rb_root
.rb_root
)) {
416 anon_vma
->parent
->num_children
--;
420 list_del(&avc
->same_vma
);
421 anon_vma_chain_free(avc
);
424 vma
->anon_vma
->num_active_vmas
--;
427 * vma would still be needed after unlink, and anon_vma will be prepared
430 vma
->anon_vma
= NULL
;
432 unlock_anon_vma_root(root
);
435 * Iterate the list once more, it now only contains empty and unlinked
436 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
437 * needing to write-acquire the anon_vma->root->rwsem.
439 list_for_each_entry_safe(avc
, next
, &vma
->anon_vma_chain
, same_vma
) {
440 struct anon_vma
*anon_vma
= avc
->anon_vma
;
442 VM_WARN_ON(anon_vma
->num_children
);
443 VM_WARN_ON(anon_vma
->num_active_vmas
);
444 put_anon_vma(anon_vma
);
446 list_del(&avc
->same_vma
);
447 anon_vma_chain_free(avc
);
451 static void anon_vma_ctor(void *data
)
453 struct anon_vma
*anon_vma
= data
;
455 init_rwsem(&anon_vma
->rwsem
);
456 atomic_set(&anon_vma
->refcount
, 0);
457 anon_vma
->rb_root
= RB_ROOT_CACHED
;
460 void __init
anon_vma_init(void)
462 anon_vma_cachep
= kmem_cache_create("anon_vma", sizeof(struct anon_vma
),
463 0, SLAB_TYPESAFE_BY_RCU
|SLAB_PANIC
|SLAB_ACCOUNT
,
465 anon_vma_chain_cachep
= KMEM_CACHE(anon_vma_chain
,
466 SLAB_PANIC
|SLAB_ACCOUNT
);
470 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
472 * Since there is no serialization what so ever against folio_remove_rmap_*()
473 * the best this function can do is return a refcount increased anon_vma
474 * that might have been relevant to this page.
476 * The page might have been remapped to a different anon_vma or the anon_vma
477 * returned may already be freed (and even reused).
479 * In case it was remapped to a different anon_vma, the new anon_vma will be a
480 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
481 * ensure that any anon_vma obtained from the page will still be valid for as
482 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
484 * All users of this function must be very careful when walking the anon_vma
485 * chain and verify that the page in question is indeed mapped in it
486 * [ something equivalent to page_mapped_in_vma() ].
488 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
489 * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid
490 * if there is a mapcount, we can dereference the anon_vma after observing
493 * NOTE: the caller should normally hold folio lock when calling this. If
494 * not, the caller needs to double check the anon_vma didn't change after
495 * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it
496 * concurrently without folio lock protection). See folio_lock_anon_vma_read()
497 * which has already covered that, and comment above remap_pages().
499 struct anon_vma
*folio_get_anon_vma(struct folio
*folio
)
501 struct anon_vma
*anon_vma
= NULL
;
502 unsigned long anon_mapping
;
505 anon_mapping
= (unsigned long)READ_ONCE(folio
->mapping
);
506 if ((anon_mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
508 if (!folio_mapped(folio
))
511 anon_vma
= (struct anon_vma
*) (anon_mapping
- PAGE_MAPPING_ANON
);
512 if (!atomic_inc_not_zero(&anon_vma
->refcount
)) {
518 * If this folio is still mapped, then its anon_vma cannot have been
519 * freed. But if it has been unmapped, we have no security against the
520 * anon_vma structure being freed and reused (for another anon_vma:
521 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
522 * above cannot corrupt).
524 if (!folio_mapped(folio
)) {
526 put_anon_vma(anon_vma
);
536 * Similar to folio_get_anon_vma() except it locks the anon_vma.
538 * Its a little more complex as it tries to keep the fast path to a single
539 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
540 * reference like with folio_get_anon_vma() and then block on the mutex
541 * on !rwc->try_lock case.
543 struct anon_vma
*folio_lock_anon_vma_read(struct folio
*folio
,
544 struct rmap_walk_control
*rwc
)
546 struct anon_vma
*anon_vma
= NULL
;
547 struct anon_vma
*root_anon_vma
;
548 unsigned long anon_mapping
;
552 anon_mapping
= (unsigned long)READ_ONCE(folio
->mapping
);
553 if ((anon_mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
555 if (!folio_mapped(folio
))
558 anon_vma
= (struct anon_vma
*) (anon_mapping
- PAGE_MAPPING_ANON
);
559 root_anon_vma
= READ_ONCE(anon_vma
->root
);
560 if (down_read_trylock(&root_anon_vma
->rwsem
)) {
562 * folio_move_anon_rmap() might have changed the anon_vma as we
563 * might not hold the folio lock here.
565 if (unlikely((unsigned long)READ_ONCE(folio
->mapping
) !=
567 up_read(&root_anon_vma
->rwsem
);
573 * If the folio is still mapped, then this anon_vma is still
574 * its anon_vma, and holding the mutex ensures that it will
575 * not go away, see anon_vma_free().
577 if (!folio_mapped(folio
)) {
578 up_read(&root_anon_vma
->rwsem
);
584 if (rwc
&& rwc
->try_lock
) {
586 rwc
->contended
= true;
590 /* trylock failed, we got to sleep */
591 if (!atomic_inc_not_zero(&anon_vma
->refcount
)) {
596 if (!folio_mapped(folio
)) {
598 put_anon_vma(anon_vma
);
602 /* we pinned the anon_vma, its safe to sleep */
604 anon_vma_lock_read(anon_vma
);
607 * folio_move_anon_rmap() might have changed the anon_vma as we might
608 * not hold the folio lock here.
610 if (unlikely((unsigned long)READ_ONCE(folio
->mapping
) !=
612 anon_vma_unlock_read(anon_vma
);
613 put_anon_vma(anon_vma
);
618 if (atomic_dec_and_test(&anon_vma
->refcount
)) {
620 * Oops, we held the last refcount, release the lock
621 * and bail -- can't simply use put_anon_vma() because
622 * we'll deadlock on the anon_vma_lock_write() recursion.
624 anon_vma_unlock_read(anon_vma
);
625 __put_anon_vma(anon_vma
);
636 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
638 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
639 * important if a PTE was dirty when it was unmapped that it's flushed
640 * before any IO is initiated on the page to prevent lost writes. Similarly,
641 * it must be flushed before freeing to prevent data leakage.
643 void try_to_unmap_flush(void)
645 struct tlbflush_unmap_batch
*tlb_ubc
= ¤t
->tlb_ubc
;
647 if (!tlb_ubc
->flush_required
)
650 arch_tlbbatch_flush(&tlb_ubc
->arch
);
651 tlb_ubc
->flush_required
= false;
652 tlb_ubc
->writable
= false;
655 /* Flush iff there are potentially writable TLB entries that can race with IO */
656 void try_to_unmap_flush_dirty(void)
658 struct tlbflush_unmap_batch
*tlb_ubc
= ¤t
->tlb_ubc
;
660 if (tlb_ubc
->writable
)
661 try_to_unmap_flush();
665 * Bits 0-14 of mm->tlb_flush_batched record pending generations.
666 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
668 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16
669 #define TLB_FLUSH_BATCH_PENDING_MASK \
670 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
671 #define TLB_FLUSH_BATCH_PENDING_LARGE \
672 (TLB_FLUSH_BATCH_PENDING_MASK / 2)
674 static void set_tlb_ubc_flush_pending(struct mm_struct
*mm
, pte_t pteval
,
677 struct tlbflush_unmap_batch
*tlb_ubc
= ¤t
->tlb_ubc
;
679 bool writable
= pte_dirty(pteval
);
681 if (!pte_accessible(mm
, pteval
))
684 arch_tlbbatch_add_pending(&tlb_ubc
->arch
, mm
, uaddr
);
685 tlb_ubc
->flush_required
= true;
688 * Ensure compiler does not re-order the setting of tlb_flush_batched
689 * before the PTE is cleared.
692 batch
= atomic_read(&mm
->tlb_flush_batched
);
694 if ((batch
& TLB_FLUSH_BATCH_PENDING_MASK
) > TLB_FLUSH_BATCH_PENDING_LARGE
) {
696 * Prevent `pending' from catching up with `flushed' because of
697 * overflow. Reset `pending' and `flushed' to be 1 and 0 if
698 * `pending' becomes large.
700 if (!atomic_try_cmpxchg(&mm
->tlb_flush_batched
, &batch
, 1))
703 atomic_inc(&mm
->tlb_flush_batched
);
707 * If the PTE was dirty then it's best to assume it's writable. The
708 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
709 * before the page is queued for IO.
712 tlb_ubc
->writable
= true;
716 * Returns true if the TLB flush should be deferred to the end of a batch of
717 * unmap operations to reduce IPIs.
719 static bool should_defer_flush(struct mm_struct
*mm
, enum ttu_flags flags
)
721 if (!(flags
& TTU_BATCH_FLUSH
))
724 return arch_tlbbatch_should_defer(mm
);
728 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
729 * releasing the PTL if TLB flushes are batched. It's possible for a parallel
730 * operation such as mprotect or munmap to race between reclaim unmapping
731 * the page and flushing the page. If this race occurs, it potentially allows
732 * access to data via a stale TLB entry. Tracking all mm's that have TLB
733 * batching in flight would be expensive during reclaim so instead track
734 * whether TLB batching occurred in the past and if so then do a flush here
735 * if required. This will cost one additional flush per reclaim cycle paid
736 * by the first operation at risk such as mprotect and mumap.
738 * This must be called under the PTL so that an access to tlb_flush_batched
739 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
742 void flush_tlb_batched_pending(struct mm_struct
*mm
)
744 int batch
= atomic_read(&mm
->tlb_flush_batched
);
745 int pending
= batch
& TLB_FLUSH_BATCH_PENDING_MASK
;
746 int flushed
= batch
>> TLB_FLUSH_BATCH_FLUSHED_SHIFT
;
748 if (pending
!= flushed
) {
749 arch_flush_tlb_batched_pending(mm
);
751 * If the new TLB flushing is pending during flushing, leave
752 * mm->tlb_flush_batched as is, to avoid losing flushing.
754 atomic_cmpxchg(&mm
->tlb_flush_batched
, batch
,
755 pending
| (pending
<< TLB_FLUSH_BATCH_FLUSHED_SHIFT
));
759 static void set_tlb_ubc_flush_pending(struct mm_struct
*mm
, pte_t pteval
,
764 static bool should_defer_flush(struct mm_struct
*mm
, enum ttu_flags flags
)
768 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
771 * At what user virtual address is page expected in vma?
772 * Caller should check the page is actually part of the vma.
774 unsigned long page_address_in_vma(struct page
*page
, struct vm_area_struct
*vma
)
776 struct folio
*folio
= page_folio(page
);
779 if (folio_test_anon(folio
)) {
780 struct anon_vma
*page__anon_vma
= folio_anon_vma(folio
);
782 * Note: swapoff's unuse_vma() is more efficient with this
783 * check, and needs it to match anon_vma when KSM is active.
785 if (!vma
->anon_vma
|| !page__anon_vma
||
786 vma
->anon_vma
->root
!= page__anon_vma
->root
)
788 } else if (!vma
->vm_file
) {
790 } else if (vma
->vm_file
->f_mapping
!= folio
->mapping
) {
794 /* The !page__anon_vma above handles KSM folios */
795 pgoff
= folio
->index
+ folio_page_idx(folio
, page
);
796 return vma_address(vma
, pgoff
, 1);
800 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
801 * NULL if it doesn't exist. No guarantees / checks on what the pmd_t*
804 pmd_t
*mm_find_pmd(struct mm_struct
*mm
, unsigned long address
)
811 pgd
= pgd_offset(mm
, address
);
812 if (!pgd_present(*pgd
))
815 p4d
= p4d_offset(pgd
, address
);
816 if (!p4d_present(*p4d
))
819 pud
= pud_offset(p4d
, address
);
820 if (!pud_present(*pud
))
823 pmd
= pmd_offset(pud
, address
);
828 struct folio_referenced_arg
{
831 unsigned long vm_flags
;
832 struct mem_cgroup
*memcg
;
836 * arg: folio_referenced_arg will be passed
838 static bool folio_referenced_one(struct folio
*folio
,
839 struct vm_area_struct
*vma
, unsigned long address
, void *arg
)
841 struct folio_referenced_arg
*pra
= arg
;
842 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, 0);
844 unsigned long start
= address
, ptes
= 0;
846 while (page_vma_mapped_walk(&pvmw
)) {
847 address
= pvmw
.address
;
849 if (vma
->vm_flags
& VM_LOCKED
) {
850 if (!folio_test_large(folio
) || !pvmw
.pte
) {
851 /* Restore the mlock which got missed */
852 mlock_vma_folio(folio
, vma
);
853 page_vma_mapped_walk_done(&pvmw
);
854 pra
->vm_flags
|= VM_LOCKED
;
855 return false; /* To break the loop */
858 * For large folio fully mapped to VMA, will
859 * be handled after the pvmw loop.
861 * For large folio cross VMA boundaries, it's
862 * expected to be picked by page reclaim. But
863 * should skip reference of pages which are in
864 * the range of VM_LOCKED vma. As page reclaim
865 * should just count the reference of pages out
866 * the range of VM_LOCKED vma.
874 if (lru_gen_enabled() &&
875 pte_young(ptep_get(pvmw
.pte
))) {
876 lru_gen_look_around(&pvmw
);
880 if (ptep_clear_flush_young_notify(vma
, address
,
883 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
)) {
884 if (pmdp_clear_flush_young_notify(vma
, address
,
888 /* unexpected pmd-mapped folio? */
895 if ((vma
->vm_flags
& VM_LOCKED
) &&
896 folio_test_large(folio
) &&
897 folio_within_vma(folio
, vma
)) {
898 unsigned long s_align
, e_align
;
900 s_align
= ALIGN_DOWN(start
, PMD_SIZE
);
901 e_align
= ALIGN_DOWN(start
+ folio_size(folio
) - 1, PMD_SIZE
);
903 /* folio doesn't cross page table boundary and fully mapped */
904 if ((s_align
== e_align
) && (ptes
== folio_nr_pages(folio
))) {
905 /* Restore the mlock which got missed */
906 mlock_vma_folio(folio
, vma
);
907 pra
->vm_flags
|= VM_LOCKED
;
908 return false; /* To break the loop */
913 folio_clear_idle(folio
);
914 if (folio_test_clear_young(folio
))
919 pra
->vm_flags
|= vma
->vm_flags
& ~VM_LOCKED
;
923 return false; /* To break the loop */
928 static bool invalid_folio_referenced_vma(struct vm_area_struct
*vma
, void *arg
)
930 struct folio_referenced_arg
*pra
= arg
;
931 struct mem_cgroup
*memcg
= pra
->memcg
;
934 * Ignore references from this mapping if it has no recency. If the
935 * folio has been used in another mapping, we will catch it; if this
936 * other mapping is already gone, the unmap path will have set the
937 * referenced flag or activated the folio in zap_pte_range().
939 if (!vma_has_recency(vma
))
943 * If we are reclaiming on behalf of a cgroup, skip counting on behalf
944 * of references from different cgroups.
946 if (memcg
&& !mm_match_cgroup(vma
->vm_mm
, memcg
))
953 * folio_referenced() - Test if the folio was referenced.
954 * @folio: The folio to test.
955 * @is_locked: Caller holds lock on the folio.
956 * @memcg: target memory cgroup
957 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
959 * Quick test_and_clear_referenced for all mappings of a folio,
961 * Return: The number of mappings which referenced the folio. Return -1 if
962 * the function bailed out due to rmap lock contention.
964 int folio_referenced(struct folio
*folio
, int is_locked
,
965 struct mem_cgroup
*memcg
, unsigned long *vm_flags
)
967 bool we_locked
= false;
968 struct folio_referenced_arg pra
= {
969 .mapcount
= folio_mapcount(folio
),
972 struct rmap_walk_control rwc
= {
973 .rmap_one
= folio_referenced_one
,
975 .anon_lock
= folio_lock_anon_vma_read
,
977 .invalid_vma
= invalid_folio_referenced_vma
,
984 if (!folio_raw_mapping(folio
))
987 if (!is_locked
&& (!folio_test_anon(folio
) || folio_test_ksm(folio
))) {
988 we_locked
= folio_trylock(folio
);
993 rmap_walk(folio
, &rwc
);
994 *vm_flags
= pra
.vm_flags
;
999 return rwc
.contended
? -1 : pra
.referenced
;
1002 static int page_vma_mkclean_one(struct page_vma_mapped_walk
*pvmw
)
1005 struct vm_area_struct
*vma
= pvmw
->vma
;
1006 struct mmu_notifier_range range
;
1007 unsigned long address
= pvmw
->address
;
1010 * We have to assume the worse case ie pmd for invalidation. Note that
1011 * the folio can not be freed from this function.
1013 mmu_notifier_range_init(&range
, MMU_NOTIFY_PROTECTION_PAGE
, 0,
1014 vma
->vm_mm
, address
, vma_address_end(pvmw
));
1015 mmu_notifier_invalidate_range_start(&range
);
1017 while (page_vma_mapped_walk(pvmw
)) {
1020 address
= pvmw
->address
;
1022 pte_t
*pte
= pvmw
->pte
;
1023 pte_t entry
= ptep_get(pte
);
1025 if (!pte_dirty(entry
) && !pte_write(entry
))
1028 flush_cache_page(vma
, address
, pte_pfn(entry
));
1029 entry
= ptep_clear_flush(vma
, address
, pte
);
1030 entry
= pte_wrprotect(entry
);
1031 entry
= pte_mkclean(entry
);
1032 set_pte_at(vma
->vm_mm
, address
, pte
, entry
);
1035 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1036 pmd_t
*pmd
= pvmw
->pmd
;
1039 if (!pmd_dirty(*pmd
) && !pmd_write(*pmd
))
1042 flush_cache_range(vma
, address
,
1043 address
+ HPAGE_PMD_SIZE
);
1044 entry
= pmdp_invalidate(vma
, address
, pmd
);
1045 entry
= pmd_wrprotect(entry
);
1046 entry
= pmd_mkclean(entry
);
1047 set_pmd_at(vma
->vm_mm
, address
, pmd
, entry
);
1050 /* unexpected pmd-mapped folio? */
1059 mmu_notifier_invalidate_range_end(&range
);
1064 static bool page_mkclean_one(struct folio
*folio
, struct vm_area_struct
*vma
,
1065 unsigned long address
, void *arg
)
1067 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, PVMW_SYNC
);
1070 *cleaned
+= page_vma_mkclean_one(&pvmw
);
1075 static bool invalid_mkclean_vma(struct vm_area_struct
*vma
, void *arg
)
1077 if (vma
->vm_flags
& VM_SHARED
)
1083 int folio_mkclean(struct folio
*folio
)
1086 struct address_space
*mapping
;
1087 struct rmap_walk_control rwc
= {
1088 .arg
= (void *)&cleaned
,
1089 .rmap_one
= page_mkclean_one
,
1090 .invalid_vma
= invalid_mkclean_vma
,
1093 BUG_ON(!folio_test_locked(folio
));
1095 if (!folio_mapped(folio
))
1098 mapping
= folio_mapping(folio
);
1102 rmap_walk(folio
, &rwc
);
1106 EXPORT_SYMBOL_GPL(folio_mkclean
);
1109 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1110 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1111 * within the @vma of shared mappings. And since clean PTEs
1112 * should also be readonly, write protects them too.
1114 * @nr_pages: number of physically contiguous pages srarting with @pfn.
1115 * @pgoff: page offset that the @pfn mapped with.
1116 * @vma: vma that @pfn mapped within.
1118 * Returns the number of cleaned PTEs (including PMDs).
1120 int pfn_mkclean_range(unsigned long pfn
, unsigned long nr_pages
, pgoff_t pgoff
,
1121 struct vm_area_struct
*vma
)
1123 struct page_vma_mapped_walk pvmw
= {
1125 .nr_pages
= nr_pages
,
1131 if (invalid_mkclean_vma(vma
, NULL
))
1134 pvmw
.address
= vma_address(vma
, pgoff
, nr_pages
);
1135 VM_BUG_ON_VMA(pvmw
.address
== -EFAULT
, vma
);
1137 return page_vma_mkclean_one(&pvmw
);
1140 static __always_inline
unsigned int __folio_add_rmap(struct folio
*folio
,
1141 struct page
*page
, int nr_pages
, enum rmap_level level
,
1144 atomic_t
*mapped
= &folio
->_nr_pages_mapped
;
1145 const int orig_nr_pages
= nr_pages
;
1148 __folio_rmap_sanity_checks(folio
, page
, nr_pages
, level
);
1151 case RMAP_LEVEL_PTE
:
1152 if (!folio_test_large(folio
)) {
1153 nr
= atomic_inc_and_test(&page
->_mapcount
);
1158 first
= atomic_inc_and_test(&page
->_mapcount
);
1160 first
= atomic_inc_return_relaxed(mapped
);
1161 if (first
< ENTIRELY_MAPPED
)
1164 } while (page
++, --nr_pages
> 0);
1165 atomic_add(orig_nr_pages
, &folio
->_large_mapcount
);
1167 case RMAP_LEVEL_PMD
:
1168 first
= atomic_inc_and_test(&folio
->_entire_mapcount
);
1170 nr
= atomic_add_return_relaxed(ENTIRELY_MAPPED
, mapped
);
1171 if (likely(nr
< ENTIRELY_MAPPED
+ ENTIRELY_MAPPED
)) {
1172 *nr_pmdmapped
= folio_nr_pages(folio
);
1173 nr
= *nr_pmdmapped
- (nr
& FOLIO_PAGES_MAPPED
);
1174 /* Raced ahead of a remove and another add? */
1175 if (unlikely(nr
< 0))
1178 /* Raced ahead of a remove of ENTIRELY_MAPPED */
1182 atomic_inc(&folio
->_large_mapcount
);
1189 * folio_move_anon_rmap - move a folio to our anon_vma
1190 * @folio: The folio to move to our anon_vma
1191 * @vma: The vma the folio belongs to
1193 * When a folio belongs exclusively to one process after a COW event,
1194 * that folio can be moved into the anon_vma that belongs to just that
1195 * process, so the rmap code will not search the parent or sibling processes.
1197 void folio_move_anon_rmap(struct folio
*folio
, struct vm_area_struct
*vma
)
1199 void *anon_vma
= vma
->anon_vma
;
1201 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
1202 VM_BUG_ON_VMA(!anon_vma
, vma
);
1204 anon_vma
+= PAGE_MAPPING_ANON
;
1206 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1207 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1208 * folio_test_anon()) will not see one without the other.
1210 WRITE_ONCE(folio
->mapping
, anon_vma
);
1214 * __folio_set_anon - set up a new anonymous rmap for a folio
1215 * @folio: The folio to set up the new anonymous rmap for.
1216 * @vma: VM area to add the folio to.
1217 * @address: User virtual address of the mapping
1218 * @exclusive: Whether the folio is exclusive to the process.
1220 static void __folio_set_anon(struct folio
*folio
, struct vm_area_struct
*vma
,
1221 unsigned long address
, bool exclusive
)
1223 struct anon_vma
*anon_vma
= vma
->anon_vma
;
1228 * If the folio isn't exclusive to this vma, we must use the _oldest_
1229 * possible anon_vma for the folio mapping!
1232 anon_vma
= anon_vma
->root
;
1235 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
1236 * Make sure the compiler doesn't split the stores of anon_vma and
1237 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
1238 * could mistake the mapping for a struct address_space and crash.
1240 anon_vma
= (void *) anon_vma
+ PAGE_MAPPING_ANON
;
1241 WRITE_ONCE(folio
->mapping
, (struct address_space
*) anon_vma
);
1242 folio
->index
= linear_page_index(vma
, address
);
1246 * __page_check_anon_rmap - sanity check anonymous rmap addition
1247 * @folio: The folio containing @page.
1248 * @page: the page to check the mapping of
1249 * @vma: the vm area in which the mapping is added
1250 * @address: the user virtual address mapped
1252 static void __page_check_anon_rmap(struct folio
*folio
, struct page
*page
,
1253 struct vm_area_struct
*vma
, unsigned long address
)
1256 * The page's anon-rmap details (mapping and index) are guaranteed to
1257 * be set up correctly at this point.
1259 * We have exclusion against folio_add_anon_rmap_*() because the caller
1260 * always holds the page locked.
1262 * We have exclusion against folio_add_new_anon_rmap because those pages
1263 * are initially only visible via the pagetables, and the pte is locked
1264 * over the call to folio_add_new_anon_rmap.
1266 VM_BUG_ON_FOLIO(folio_anon_vma(folio
)->root
!= vma
->anon_vma
->root
,
1268 VM_BUG_ON_PAGE(page_to_pgoff(page
) != linear_page_index(vma
, address
),
1272 static __always_inline
void __folio_add_anon_rmap(struct folio
*folio
,
1273 struct page
*page
, int nr_pages
, struct vm_area_struct
*vma
,
1274 unsigned long address
, rmap_t flags
, enum rmap_level level
)
1276 int i
, nr
, nr_pmdmapped
= 0;
1278 nr
= __folio_add_rmap(folio
, page
, nr_pages
, level
, &nr_pmdmapped
);
1280 __lruvec_stat_mod_folio(folio
, NR_ANON_THPS
, nr_pmdmapped
);
1282 __lruvec_stat_mod_folio(folio
, NR_ANON_MAPPED
, nr
);
1284 if (unlikely(!folio_test_anon(folio
))) {
1285 VM_WARN_ON_FOLIO(!folio_test_locked(folio
), folio
);
1287 * For a PTE-mapped large folio, we only know that the single
1288 * PTE is exclusive. Further, __folio_set_anon() might not get
1289 * folio->index right when not given the address of the head
1292 VM_WARN_ON_FOLIO(folio_test_large(folio
) &&
1293 level
!= RMAP_LEVEL_PMD
, folio
);
1294 __folio_set_anon(folio
, vma
, address
,
1295 !!(flags
& RMAP_EXCLUSIVE
));
1296 } else if (likely(!folio_test_ksm(folio
))) {
1297 __page_check_anon_rmap(folio
, page
, vma
, address
);
1300 if (flags
& RMAP_EXCLUSIVE
) {
1302 case RMAP_LEVEL_PTE
:
1303 for (i
= 0; i
< nr_pages
; i
++)
1304 SetPageAnonExclusive(page
+ i
);
1306 case RMAP_LEVEL_PMD
:
1307 SetPageAnonExclusive(page
);
1311 for (i
= 0; i
< nr_pages
; i
++) {
1312 struct page
*cur_page
= page
+ i
;
1314 /* While PTE-mapping a THP we have a PMD and a PTE mapping. */
1315 VM_WARN_ON_FOLIO((atomic_read(&cur_page
->_mapcount
) > 0 ||
1316 (folio_test_large(folio
) &&
1317 folio_entire_mapcount(folio
) > 1)) &&
1318 PageAnonExclusive(cur_page
), folio
);
1322 * For large folio, only mlock it if it's fully mapped to VMA. It's
1323 * not easy to check whether the large folio is fully mapped to VMA
1324 * here. Only mlock normal 4K folio and leave page reclaim to handle
1327 if (!folio_test_large(folio
))
1328 mlock_vma_folio(folio
, vma
);
1332 * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
1333 * @folio: The folio to add the mappings to
1334 * @page: The first page to add
1335 * @nr_pages: The number of pages which will be mapped
1336 * @vma: The vm area in which the mappings are added
1337 * @address: The user virtual address of the first page to map
1338 * @flags: The rmap flags
1340 * The page range of folio is defined by [first_page, first_page + nr_pages)
1342 * The caller needs to hold the page table lock, and the page must be locked in
1343 * the anon_vma case: to serialize mapping,index checking after setting,
1344 * and to ensure that an anon folio is not being upgraded racily to a KSM folio
1345 * (but KSM folios are never downgraded).
1347 void folio_add_anon_rmap_ptes(struct folio
*folio
, struct page
*page
,
1348 int nr_pages
, struct vm_area_struct
*vma
, unsigned long address
,
1351 __folio_add_anon_rmap(folio
, page
, nr_pages
, vma
, address
, flags
,
1356 * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
1357 * @folio: The folio to add the mapping to
1358 * @page: The first page to add
1359 * @vma: The vm area in which the mapping is added
1360 * @address: The user virtual address of the first page to map
1361 * @flags: The rmap flags
1363 * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
1365 * The caller needs to hold the page table lock, and the page must be locked in
1366 * the anon_vma case: to serialize mapping,index checking after setting.
1368 void folio_add_anon_rmap_pmd(struct folio
*folio
, struct page
*page
,
1369 struct vm_area_struct
*vma
, unsigned long address
, rmap_t flags
)
1371 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1372 __folio_add_anon_rmap(folio
, page
, HPAGE_PMD_NR
, vma
, address
, flags
,
1380 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1381 * @folio: The folio to add the mapping to.
1382 * @vma: the vm area in which the mapping is added
1383 * @address: the user virtual address mapped
1385 * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
1386 * This means the inc-and-test can be bypassed.
1387 * The folio does not have to be locked.
1389 * If the folio is pmd-mappable, it is accounted as a THP. As the folio
1390 * is new, it's assumed to be mapped exclusively by a single process.
1392 void folio_add_new_anon_rmap(struct folio
*folio
, struct vm_area_struct
*vma
,
1393 unsigned long address
)
1395 int nr
= folio_nr_pages(folio
);
1397 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio
), folio
);
1398 VM_BUG_ON_VMA(address
< vma
->vm_start
||
1399 address
+ (nr
<< PAGE_SHIFT
) > vma
->vm_end
, vma
);
1400 __folio_set_swapbacked(folio
);
1401 __folio_set_anon(folio
, vma
, address
, true);
1403 if (likely(!folio_test_large(folio
))) {
1404 /* increment count (starts at -1) */
1405 atomic_set(&folio
->_mapcount
, 0);
1406 SetPageAnonExclusive(&folio
->page
);
1407 } else if (!folio_test_pmd_mappable(folio
)) {
1410 for (i
= 0; i
< nr
; i
++) {
1411 struct page
*page
= folio_page(folio
, i
);
1413 /* increment count (starts at -1) */
1414 atomic_set(&page
->_mapcount
, 0);
1415 SetPageAnonExclusive(page
);
1418 /* increment count (starts at -1) */
1419 atomic_set(&folio
->_large_mapcount
, nr
- 1);
1420 atomic_set(&folio
->_nr_pages_mapped
, nr
);
1422 /* increment count (starts at -1) */
1423 atomic_set(&folio
->_entire_mapcount
, 0);
1424 /* increment count (starts at -1) */
1425 atomic_set(&folio
->_large_mapcount
, 0);
1426 atomic_set(&folio
->_nr_pages_mapped
, ENTIRELY_MAPPED
);
1427 SetPageAnonExclusive(&folio
->page
);
1428 __lruvec_stat_mod_folio(folio
, NR_ANON_THPS
, nr
);
1431 __lruvec_stat_mod_folio(folio
, NR_ANON_MAPPED
, nr
);
1434 static __always_inline
void __folio_add_file_rmap(struct folio
*folio
,
1435 struct page
*page
, int nr_pages
, struct vm_area_struct
*vma
,
1436 enum rmap_level level
)
1438 pg_data_t
*pgdat
= folio_pgdat(folio
);
1439 int nr
, nr_pmdmapped
= 0;
1441 VM_WARN_ON_FOLIO(folio_test_anon(folio
), folio
);
1443 nr
= __folio_add_rmap(folio
, page
, nr_pages
, level
, &nr_pmdmapped
);
1445 __mod_node_page_state(pgdat
, folio_test_swapbacked(folio
) ?
1446 NR_SHMEM_PMDMAPPED
: NR_FILE_PMDMAPPED
, nr_pmdmapped
);
1448 __lruvec_stat_mod_folio(folio
, NR_FILE_MAPPED
, nr
);
1450 /* See comments in folio_add_anon_rmap_*() */
1451 if (!folio_test_large(folio
))
1452 mlock_vma_folio(folio
, vma
);
1456 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio
1457 * @folio: The folio to add the mappings to
1458 * @page: The first page to add
1459 * @nr_pages: The number of pages that will be mapped using PTEs
1460 * @vma: The vm area in which the mappings are added
1462 * The page range of the folio is defined by [page, page + nr_pages)
1464 * The caller needs to hold the page table lock.
1466 void folio_add_file_rmap_ptes(struct folio
*folio
, struct page
*page
,
1467 int nr_pages
, struct vm_area_struct
*vma
)
1469 __folio_add_file_rmap(folio
, page
, nr_pages
, vma
, RMAP_LEVEL_PTE
);
1473 * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
1474 * @folio: The folio to add the mapping to
1475 * @page: The first page to add
1476 * @vma: The vm area in which the mapping is added
1478 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1480 * The caller needs to hold the page table lock.
1482 void folio_add_file_rmap_pmd(struct folio
*folio
, struct page
*page
,
1483 struct vm_area_struct
*vma
)
1485 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1486 __folio_add_file_rmap(folio
, page
, HPAGE_PMD_NR
, vma
, RMAP_LEVEL_PMD
);
1492 static __always_inline
void __folio_remove_rmap(struct folio
*folio
,
1493 struct page
*page
, int nr_pages
, struct vm_area_struct
*vma
,
1494 enum rmap_level level
)
1496 atomic_t
*mapped
= &folio
->_nr_pages_mapped
;
1497 pg_data_t
*pgdat
= folio_pgdat(folio
);
1498 int last
, nr
= 0, nr_pmdmapped
= 0;
1499 bool partially_mapped
= false;
1500 enum node_stat_item idx
;
1502 __folio_rmap_sanity_checks(folio
, page
, nr_pages
, level
);
1505 case RMAP_LEVEL_PTE
:
1506 if (!folio_test_large(folio
)) {
1507 nr
= atomic_add_negative(-1, &page
->_mapcount
);
1511 atomic_sub(nr_pages
, &folio
->_large_mapcount
);
1513 last
= atomic_add_negative(-1, &page
->_mapcount
);
1515 last
= atomic_dec_return_relaxed(mapped
);
1516 if (last
< ENTIRELY_MAPPED
)
1519 } while (page
++, --nr_pages
> 0);
1521 partially_mapped
= nr
&& atomic_read(mapped
);
1523 case RMAP_LEVEL_PMD
:
1524 atomic_dec(&folio
->_large_mapcount
);
1525 last
= atomic_add_negative(-1, &folio
->_entire_mapcount
);
1527 nr
= atomic_sub_return_relaxed(ENTIRELY_MAPPED
, mapped
);
1528 if (likely(nr
< ENTIRELY_MAPPED
)) {
1529 nr_pmdmapped
= folio_nr_pages(folio
);
1530 nr
= nr_pmdmapped
- (nr
& FOLIO_PAGES_MAPPED
);
1531 /* Raced ahead of another remove and an add? */
1532 if (unlikely(nr
< 0))
1535 /* An add of ENTIRELY_MAPPED raced ahead */
1540 partially_mapped
= nr
< nr_pmdmapped
;
1545 /* NR_{FILE/SHMEM}_PMDMAPPED are not maintained per-memcg */
1546 if (folio_test_anon(folio
))
1547 __lruvec_stat_mod_folio(folio
, NR_ANON_THPS
, -nr_pmdmapped
);
1549 __mod_node_page_state(pgdat
,
1550 folio_test_swapbacked(folio
) ?
1551 NR_SHMEM_PMDMAPPED
: NR_FILE_PMDMAPPED
,
1555 idx
= folio_test_anon(folio
) ? NR_ANON_MAPPED
: NR_FILE_MAPPED
;
1556 __lruvec_stat_mod_folio(folio
, idx
, -nr
);
1559 * Queue anon large folio for deferred split if at least one
1560 * page of the folio is unmapped and at least one page
1563 * Check partially_mapped first to ensure it is a large folio.
1565 if (folio_test_anon(folio
) && partially_mapped
&&
1566 list_empty(&folio
->_deferred_list
))
1567 deferred_split_folio(folio
);
1571 * It would be tidy to reset folio_test_anon mapping when fully
1572 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
1573 * which increments mapcount after us but sets mapping before us:
1574 * so leave the reset to free_pages_prepare, and remember that
1575 * it's only reliable while mapped.
1578 munlock_vma_folio(folio
, vma
);
1582 * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio
1583 * @folio: The folio to remove the mappings from
1584 * @page: The first page to remove
1585 * @nr_pages: The number of pages that will be removed from the mapping
1586 * @vma: The vm area from which the mappings are removed
1588 * The page range of the folio is defined by [page, page + nr_pages)
1590 * The caller needs to hold the page table lock.
1592 void folio_remove_rmap_ptes(struct folio
*folio
, struct page
*page
,
1593 int nr_pages
, struct vm_area_struct
*vma
)
1595 __folio_remove_rmap(folio
, page
, nr_pages
, vma
, RMAP_LEVEL_PTE
);
1599 * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
1600 * @folio: The folio to remove the mapping from
1601 * @page: The first page to remove
1602 * @vma: The vm area from which the mapping is removed
1604 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1606 * The caller needs to hold the page table lock.
1608 void folio_remove_rmap_pmd(struct folio
*folio
, struct page
*page
,
1609 struct vm_area_struct
*vma
)
1611 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1612 __folio_remove_rmap(folio
, page
, HPAGE_PMD_NR
, vma
, RMAP_LEVEL_PMD
);
1619 * @arg: enum ttu_flags will be passed to this argument
1621 static bool try_to_unmap_one(struct folio
*folio
, struct vm_area_struct
*vma
,
1622 unsigned long address
, void *arg
)
1624 struct mm_struct
*mm
= vma
->vm_mm
;
1625 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, 0);
1627 struct page
*subpage
;
1628 bool anon_exclusive
, ret
= true;
1629 struct mmu_notifier_range range
;
1630 enum ttu_flags flags
= (enum ttu_flags
)(long)arg
;
1632 unsigned long hsz
= 0;
1635 * When racing against e.g. zap_pte_range() on another cpu,
1636 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
1637 * try_to_unmap() may return before page_mapped() has become false,
1638 * if page table locking is skipped: use TTU_SYNC to wait for that.
1640 if (flags
& TTU_SYNC
)
1641 pvmw
.flags
= PVMW_SYNC
;
1643 if (flags
& TTU_SPLIT_HUGE_PMD
)
1644 split_huge_pmd_address(vma
, address
, false, folio
);
1647 * For THP, we have to assume the worse case ie pmd for invalidation.
1648 * For hugetlb, it could be much worse if we need to do pud
1649 * invalidation in the case of pmd sharing.
1651 * Note that the folio can not be freed in this function as call of
1652 * try_to_unmap() must hold a reference on the folio.
1654 range
.end
= vma_address_end(&pvmw
);
1655 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
1656 address
, range
.end
);
1657 if (folio_test_hugetlb(folio
)) {
1659 * If sharing is possible, start and end will be adjusted
1662 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
,
1665 /* We need the huge page size for set_huge_pte_at() */
1666 hsz
= huge_page_size(hstate_vma(vma
));
1668 mmu_notifier_invalidate_range_start(&range
);
1670 while (page_vma_mapped_walk(&pvmw
)) {
1671 /* Unexpected PMD-mapped THP? */
1672 VM_BUG_ON_FOLIO(!pvmw
.pte
, folio
);
1675 * If the folio is in an mlock()d vma, we must not swap it out.
1677 if (!(flags
& TTU_IGNORE_MLOCK
) &&
1678 (vma
->vm_flags
& VM_LOCKED
)) {
1679 /* Restore the mlock which got missed */
1680 if (!folio_test_large(folio
))
1681 mlock_vma_folio(folio
, vma
);
1682 page_vma_mapped_walk_done(&pvmw
);
1687 pfn
= pte_pfn(ptep_get(pvmw
.pte
));
1688 subpage
= folio_page(folio
, pfn
- folio_pfn(folio
));
1689 address
= pvmw
.address
;
1690 anon_exclusive
= folio_test_anon(folio
) &&
1691 PageAnonExclusive(subpage
);
1693 if (folio_test_hugetlb(folio
)) {
1694 bool anon
= folio_test_anon(folio
);
1697 * The try_to_unmap() is only passed a hugetlb page
1698 * in the case where the hugetlb page is poisoned.
1700 VM_BUG_ON_PAGE(!PageHWPoison(subpage
), subpage
);
1702 * huge_pmd_unshare may unmap an entire PMD page.
1703 * There is no way of knowing exactly which PMDs may
1704 * be cached for this mm, so we must flush them all.
1705 * start/end were already adjusted above to cover this
1708 flush_cache_range(vma
, range
.start
, range
.end
);
1711 * To call huge_pmd_unshare, i_mmap_rwsem must be
1712 * held in write mode. Caller needs to explicitly
1713 * do this outside rmap routines.
1715 * We also must hold hugetlb vma_lock in write mode.
1716 * Lock order dictates acquiring vma_lock BEFORE
1717 * i_mmap_rwsem. We can only try lock here and fail
1721 VM_BUG_ON(!(flags
& TTU_RMAP_LOCKED
));
1722 if (!hugetlb_vma_trylock_write(vma
)) {
1723 page_vma_mapped_walk_done(&pvmw
);
1727 if (huge_pmd_unshare(mm
, vma
, address
, pvmw
.pte
)) {
1728 hugetlb_vma_unlock_write(vma
);
1729 flush_tlb_range(vma
,
1730 range
.start
, range
.end
);
1732 * The ref count of the PMD page was
1733 * dropped which is part of the way map
1734 * counting is done for shared PMDs.
1735 * Return 'true' here. When there is
1736 * no other sharing, huge_pmd_unshare
1737 * returns false and we will unmap the
1738 * actual page and drop map count
1741 page_vma_mapped_walk_done(&pvmw
);
1744 hugetlb_vma_unlock_write(vma
);
1746 pteval
= huge_ptep_clear_flush(vma
, address
, pvmw
.pte
);
1748 flush_cache_page(vma
, address
, pfn
);
1749 /* Nuke the page table entry. */
1750 if (should_defer_flush(mm
, flags
)) {
1752 * We clear the PTE but do not flush so potentially
1753 * a remote CPU could still be writing to the folio.
1754 * If the entry was previously clean then the
1755 * architecture must guarantee that a clear->dirty
1756 * transition on a cached TLB entry is written through
1757 * and traps if the PTE is unmapped.
1759 pteval
= ptep_get_and_clear(mm
, address
, pvmw
.pte
);
1761 set_tlb_ubc_flush_pending(mm
, pteval
, address
);
1763 pteval
= ptep_clear_flush(vma
, address
, pvmw
.pte
);
1768 * Now the pte is cleared. If this pte was uffd-wp armed,
1769 * we may want to replace a none pte with a marker pte if
1770 * it's file-backed, so we don't lose the tracking info.
1772 pte_install_uffd_wp_if_needed(vma
, address
, pvmw
.pte
, pteval
);
1774 /* Set the dirty flag on the folio now the pte is gone. */
1775 if (pte_dirty(pteval
))
1776 folio_mark_dirty(folio
);
1778 /* Update high watermark before we lower rss */
1779 update_hiwater_rss(mm
);
1781 if (PageHWPoison(subpage
) && (flags
& TTU_HWPOISON
)) {
1782 pteval
= swp_entry_to_pte(make_hwpoison_entry(subpage
));
1783 if (folio_test_hugetlb(folio
)) {
1784 hugetlb_count_sub(folio_nr_pages(folio
), mm
);
1785 set_huge_pte_at(mm
, address
, pvmw
.pte
, pteval
,
1788 dec_mm_counter(mm
, mm_counter(folio
));
1789 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1792 } else if (pte_unused(pteval
) && !userfaultfd_armed(vma
)) {
1794 * The guest indicated that the page content is of no
1795 * interest anymore. Simply discard the pte, vmscan
1796 * will take care of the rest.
1797 * A future reference will then fault in a new zero
1798 * page. When userfaultfd is active, we must not drop
1799 * this page though, as its main user (postcopy
1800 * migration) will not expect userfaults on already
1803 dec_mm_counter(mm
, mm_counter(folio
));
1804 } else if (folio_test_anon(folio
)) {
1805 swp_entry_t entry
= page_swap_entry(subpage
);
1808 * Store the swap location in the pte.
1809 * See handle_pte_fault() ...
1811 if (unlikely(folio_test_swapbacked(folio
) !=
1812 folio_test_swapcache(folio
))) {
1815 page_vma_mapped_walk_done(&pvmw
);
1819 /* MADV_FREE page check */
1820 if (!folio_test_swapbacked(folio
)) {
1821 int ref_count
, map_count
;
1824 * Synchronize with gup_pte_range():
1825 * - clear PTE; barrier; read refcount
1826 * - inc refcount; barrier; read PTE
1830 ref_count
= folio_ref_count(folio
);
1831 map_count
= folio_mapcount(folio
);
1834 * Order reads for page refcount and dirty flag
1835 * (see comments in __remove_mapping()).
1840 * The only page refs must be one from isolation
1841 * plus the rmap(s) (dropped by discard:).
1843 if (ref_count
== 1 + map_count
&&
1844 !folio_test_dirty(folio
)) {
1845 dec_mm_counter(mm
, MM_ANONPAGES
);
1850 * If the folio was redirtied, it cannot be
1851 * discarded. Remap the page to page table.
1853 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1854 folio_set_swapbacked(folio
);
1856 page_vma_mapped_walk_done(&pvmw
);
1860 if (swap_duplicate(entry
) < 0) {
1861 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1863 page_vma_mapped_walk_done(&pvmw
);
1866 if (arch_unmap_one(mm
, vma
, address
, pteval
) < 0) {
1868 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1870 page_vma_mapped_walk_done(&pvmw
);
1874 /* See folio_try_share_anon_rmap(): clear PTE first. */
1875 if (anon_exclusive
&&
1876 folio_try_share_anon_rmap_pte(folio
, subpage
)) {
1878 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1880 page_vma_mapped_walk_done(&pvmw
);
1883 if (list_empty(&mm
->mmlist
)) {
1884 spin_lock(&mmlist_lock
);
1885 if (list_empty(&mm
->mmlist
))
1886 list_add(&mm
->mmlist
, &init_mm
.mmlist
);
1887 spin_unlock(&mmlist_lock
);
1889 dec_mm_counter(mm
, MM_ANONPAGES
);
1890 inc_mm_counter(mm
, MM_SWAPENTS
);
1891 swp_pte
= swp_entry_to_pte(entry
);
1893 swp_pte
= pte_swp_mkexclusive(swp_pte
);
1894 if (pte_soft_dirty(pteval
))
1895 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
1896 if (pte_uffd_wp(pteval
))
1897 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
1898 set_pte_at(mm
, address
, pvmw
.pte
, swp_pte
);
1901 * This is a locked file-backed folio,
1902 * so it cannot be removed from the page
1903 * cache and replaced by a new folio before
1904 * mmu_notifier_invalidate_range_end, so no
1905 * concurrent thread might update its page table
1906 * to point at a new folio while a device is
1907 * still using this folio.
1909 * See Documentation/mm/mmu_notifier.rst
1911 dec_mm_counter(mm
, mm_counter_file(folio
));
1914 if (unlikely(folio_test_hugetlb(folio
)))
1915 hugetlb_remove_rmap(folio
);
1917 folio_remove_rmap_pte(folio
, subpage
, vma
);
1918 if (vma
->vm_flags
& VM_LOCKED
)
1919 mlock_drain_local();
1923 mmu_notifier_invalidate_range_end(&range
);
1928 static bool invalid_migration_vma(struct vm_area_struct
*vma
, void *arg
)
1930 return vma_is_temporary_stack(vma
);
1933 static int folio_not_mapped(struct folio
*folio
)
1935 return !folio_mapped(folio
);
1939 * try_to_unmap - Try to remove all page table mappings to a folio.
1940 * @folio: The folio to unmap.
1941 * @flags: action and flags
1943 * Tries to remove all the page table entries which are mapping this
1944 * folio. It is the caller's responsibility to check if the folio is
1945 * still mapped if needed (use TTU_SYNC to prevent accounting races).
1947 * Context: Caller must hold the folio lock.
1949 void try_to_unmap(struct folio
*folio
, enum ttu_flags flags
)
1951 struct rmap_walk_control rwc
= {
1952 .rmap_one
= try_to_unmap_one
,
1953 .arg
= (void *)flags
,
1954 .done
= folio_not_mapped
,
1955 .anon_lock
= folio_lock_anon_vma_read
,
1958 if (flags
& TTU_RMAP_LOCKED
)
1959 rmap_walk_locked(folio
, &rwc
);
1961 rmap_walk(folio
, &rwc
);
1965 * @arg: enum ttu_flags will be passed to this argument.
1967 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
1968 * containing migration entries.
1970 static bool try_to_migrate_one(struct folio
*folio
, struct vm_area_struct
*vma
,
1971 unsigned long address
, void *arg
)
1973 struct mm_struct
*mm
= vma
->vm_mm
;
1974 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, 0);
1976 struct page
*subpage
;
1977 bool anon_exclusive
, ret
= true;
1978 struct mmu_notifier_range range
;
1979 enum ttu_flags flags
= (enum ttu_flags
)(long)arg
;
1981 unsigned long hsz
= 0;
1984 * When racing against e.g. zap_pte_range() on another cpu,
1985 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
1986 * try_to_migrate() may return before page_mapped() has become false,
1987 * if page table locking is skipped: use TTU_SYNC to wait for that.
1989 if (flags
& TTU_SYNC
)
1990 pvmw
.flags
= PVMW_SYNC
;
1993 * unmap_page() in mm/huge_memory.c is the only user of migration with
1994 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
1996 if (flags
& TTU_SPLIT_HUGE_PMD
)
1997 split_huge_pmd_address(vma
, address
, true, folio
);
2000 * For THP, we have to assume the worse case ie pmd for invalidation.
2001 * For hugetlb, it could be much worse if we need to do pud
2002 * invalidation in the case of pmd sharing.
2004 * Note that the page can not be free in this function as call of
2005 * try_to_unmap() must hold a reference on the page.
2007 range
.end
= vma_address_end(&pvmw
);
2008 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
2009 address
, range
.end
);
2010 if (folio_test_hugetlb(folio
)) {
2012 * If sharing is possible, start and end will be adjusted
2015 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
,
2018 /* We need the huge page size for set_huge_pte_at() */
2019 hsz
= huge_page_size(hstate_vma(vma
));
2021 mmu_notifier_invalidate_range_start(&range
);
2023 while (page_vma_mapped_walk(&pvmw
)) {
2024 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2025 /* PMD-mapped THP migration entry */
2027 subpage
= folio_page(folio
,
2028 pmd_pfn(*pvmw
.pmd
) - folio_pfn(folio
));
2029 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio
) ||
2030 !folio_test_pmd_mappable(folio
), folio
);
2032 if (set_pmd_migration_entry(&pvmw
, subpage
)) {
2034 page_vma_mapped_walk_done(&pvmw
);
2041 /* Unexpected PMD-mapped THP? */
2042 VM_BUG_ON_FOLIO(!pvmw
.pte
, folio
);
2044 pfn
= pte_pfn(ptep_get(pvmw
.pte
));
2046 if (folio_is_zone_device(folio
)) {
2048 * Our PTE is a non-present device exclusive entry and
2049 * calculating the subpage as for the common case would
2050 * result in an invalid pointer.
2052 * Since only PAGE_SIZE pages can currently be
2053 * migrated, just set it to page. This will need to be
2054 * changed when hugepage migrations to device private
2055 * memory are supported.
2057 VM_BUG_ON_FOLIO(folio_nr_pages(folio
) > 1, folio
);
2058 subpage
= &folio
->page
;
2060 subpage
= folio_page(folio
, pfn
- folio_pfn(folio
));
2062 address
= pvmw
.address
;
2063 anon_exclusive
= folio_test_anon(folio
) &&
2064 PageAnonExclusive(subpage
);
2066 if (folio_test_hugetlb(folio
)) {
2067 bool anon
= folio_test_anon(folio
);
2070 * huge_pmd_unshare may unmap an entire PMD page.
2071 * There is no way of knowing exactly which PMDs may
2072 * be cached for this mm, so we must flush them all.
2073 * start/end were already adjusted above to cover this
2076 flush_cache_range(vma
, range
.start
, range
.end
);
2079 * To call huge_pmd_unshare, i_mmap_rwsem must be
2080 * held in write mode. Caller needs to explicitly
2081 * do this outside rmap routines.
2083 * We also must hold hugetlb vma_lock in write mode.
2084 * Lock order dictates acquiring vma_lock BEFORE
2085 * i_mmap_rwsem. We can only try lock here and
2086 * fail if unsuccessful.
2089 VM_BUG_ON(!(flags
& TTU_RMAP_LOCKED
));
2090 if (!hugetlb_vma_trylock_write(vma
)) {
2091 page_vma_mapped_walk_done(&pvmw
);
2095 if (huge_pmd_unshare(mm
, vma
, address
, pvmw
.pte
)) {
2096 hugetlb_vma_unlock_write(vma
);
2097 flush_tlb_range(vma
,
2098 range
.start
, range
.end
);
2101 * The ref count of the PMD page was
2102 * dropped which is part of the way map
2103 * counting is done for shared PMDs.
2104 * Return 'true' here. When there is
2105 * no other sharing, huge_pmd_unshare
2106 * returns false and we will unmap the
2107 * actual page and drop map count
2110 page_vma_mapped_walk_done(&pvmw
);
2113 hugetlb_vma_unlock_write(vma
);
2115 /* Nuke the hugetlb page table entry */
2116 pteval
= huge_ptep_clear_flush(vma
, address
, pvmw
.pte
);
2118 flush_cache_page(vma
, address
, pfn
);
2119 /* Nuke the page table entry. */
2120 if (should_defer_flush(mm
, flags
)) {
2122 * We clear the PTE but do not flush so potentially
2123 * a remote CPU could still be writing to the folio.
2124 * If the entry was previously clean then the
2125 * architecture must guarantee that a clear->dirty
2126 * transition on a cached TLB entry is written through
2127 * and traps if the PTE is unmapped.
2129 pteval
= ptep_get_and_clear(mm
, address
, pvmw
.pte
);
2131 set_tlb_ubc_flush_pending(mm
, pteval
, address
);
2133 pteval
= ptep_clear_flush(vma
, address
, pvmw
.pte
);
2137 /* Set the dirty flag on the folio now the pte is gone. */
2138 if (pte_dirty(pteval
))
2139 folio_mark_dirty(folio
);
2141 /* Update high watermark before we lower rss */
2142 update_hiwater_rss(mm
);
2144 if (folio_is_device_private(folio
)) {
2145 unsigned long pfn
= folio_pfn(folio
);
2150 WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio
,
2154 * Store the pfn of the page in a special migration
2155 * pte. do_swap_page() will wait until the migration
2156 * pte is removed and then restart fault handling.
2158 entry
= pte_to_swp_entry(pteval
);
2159 if (is_writable_device_private_entry(entry
))
2160 entry
= make_writable_migration_entry(pfn
);
2161 else if (anon_exclusive
)
2162 entry
= make_readable_exclusive_migration_entry(pfn
);
2164 entry
= make_readable_migration_entry(pfn
);
2165 swp_pte
= swp_entry_to_pte(entry
);
2168 * pteval maps a zone device page and is therefore
2171 if (pte_swp_soft_dirty(pteval
))
2172 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
2173 if (pte_swp_uffd_wp(pteval
))
2174 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
2175 set_pte_at(mm
, pvmw
.address
, pvmw
.pte
, swp_pte
);
2176 trace_set_migration_pte(pvmw
.address
, pte_val(swp_pte
),
2177 folio_order(folio
));
2179 * No need to invalidate here it will synchronize on
2180 * against the special swap migration pte.
2182 } else if (PageHWPoison(subpage
)) {
2183 pteval
= swp_entry_to_pte(make_hwpoison_entry(subpage
));
2184 if (folio_test_hugetlb(folio
)) {
2185 hugetlb_count_sub(folio_nr_pages(folio
), mm
);
2186 set_huge_pte_at(mm
, address
, pvmw
.pte
, pteval
,
2189 dec_mm_counter(mm
, mm_counter(folio
));
2190 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
2193 } else if (pte_unused(pteval
) && !userfaultfd_armed(vma
)) {
2195 * The guest indicated that the page content is of no
2196 * interest anymore. Simply discard the pte, vmscan
2197 * will take care of the rest.
2198 * A future reference will then fault in a new zero
2199 * page. When userfaultfd is active, we must not drop
2200 * this page though, as its main user (postcopy
2201 * migration) will not expect userfaults on already
2204 dec_mm_counter(mm
, mm_counter(folio
));
2209 if (arch_unmap_one(mm
, vma
, address
, pteval
) < 0) {
2210 if (folio_test_hugetlb(folio
))
2211 set_huge_pte_at(mm
, address
, pvmw
.pte
,
2214 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
2216 page_vma_mapped_walk_done(&pvmw
);
2219 VM_BUG_ON_PAGE(pte_write(pteval
) && folio_test_anon(folio
) &&
2220 !anon_exclusive
, subpage
);
2222 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
2223 if (folio_test_hugetlb(folio
)) {
2224 if (anon_exclusive
&&
2225 hugetlb_try_share_anon_rmap(folio
)) {
2226 set_huge_pte_at(mm
, address
, pvmw
.pte
,
2229 page_vma_mapped_walk_done(&pvmw
);
2232 } else if (anon_exclusive
&&
2233 folio_try_share_anon_rmap_pte(folio
, subpage
)) {
2234 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
2236 page_vma_mapped_walk_done(&pvmw
);
2241 * Store the pfn of the page in a special migration
2242 * pte. do_swap_page() will wait until the migration
2243 * pte is removed and then restart fault handling.
2245 if (pte_write(pteval
))
2246 entry
= make_writable_migration_entry(
2247 page_to_pfn(subpage
));
2248 else if (anon_exclusive
)
2249 entry
= make_readable_exclusive_migration_entry(
2250 page_to_pfn(subpage
));
2252 entry
= make_readable_migration_entry(
2253 page_to_pfn(subpage
));
2254 if (pte_young(pteval
))
2255 entry
= make_migration_entry_young(entry
);
2256 if (pte_dirty(pteval
))
2257 entry
= make_migration_entry_dirty(entry
);
2258 swp_pte
= swp_entry_to_pte(entry
);
2259 if (pte_soft_dirty(pteval
))
2260 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
2261 if (pte_uffd_wp(pteval
))
2262 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
2263 if (folio_test_hugetlb(folio
))
2264 set_huge_pte_at(mm
, address
, pvmw
.pte
, swp_pte
,
2267 set_pte_at(mm
, address
, pvmw
.pte
, swp_pte
);
2268 trace_set_migration_pte(address
, pte_val(swp_pte
),
2269 folio_order(folio
));
2271 * No need to invalidate here it will synchronize on
2272 * against the special swap migration pte.
2276 if (unlikely(folio_test_hugetlb(folio
)))
2277 hugetlb_remove_rmap(folio
);
2279 folio_remove_rmap_pte(folio
, subpage
, vma
);
2280 if (vma
->vm_flags
& VM_LOCKED
)
2281 mlock_drain_local();
2285 mmu_notifier_invalidate_range_end(&range
);
2291 * try_to_migrate - try to replace all page table mappings with swap entries
2292 * @folio: the folio to replace page table entries for
2293 * @flags: action and flags
2295 * Tries to remove all the page table entries which are mapping this folio and
2296 * replace them with special swap entries. Caller must hold the folio lock.
2298 void try_to_migrate(struct folio
*folio
, enum ttu_flags flags
)
2300 struct rmap_walk_control rwc
= {
2301 .rmap_one
= try_to_migrate_one
,
2302 .arg
= (void *)flags
,
2303 .done
= folio_not_mapped
,
2304 .anon_lock
= folio_lock_anon_vma_read
,
2308 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2309 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags.
2311 if (WARN_ON_ONCE(flags
& ~(TTU_RMAP_LOCKED
| TTU_SPLIT_HUGE_PMD
|
2312 TTU_SYNC
| TTU_BATCH_FLUSH
)))
2315 if (folio_is_zone_device(folio
) &&
2316 (!folio_is_device_private(folio
) && !folio_is_device_coherent(folio
)))
2320 * During exec, a temporary VMA is setup and later moved.
2321 * The VMA is moved under the anon_vma lock but not the
2322 * page tables leading to a race where migration cannot
2323 * find the migration ptes. Rather than increasing the
2324 * locking requirements of exec(), migration skips
2325 * temporary VMAs until after exec() completes.
2327 if (!folio_test_ksm(folio
) && folio_test_anon(folio
))
2328 rwc
.invalid_vma
= invalid_migration_vma
;
2330 if (flags
& TTU_RMAP_LOCKED
)
2331 rmap_walk_locked(folio
, &rwc
);
2333 rmap_walk(folio
, &rwc
);
2336 #ifdef CONFIG_DEVICE_PRIVATE
2337 struct make_exclusive_args
{
2338 struct mm_struct
*mm
;
2339 unsigned long address
;
2344 static bool page_make_device_exclusive_one(struct folio
*folio
,
2345 struct vm_area_struct
*vma
, unsigned long address
, void *priv
)
2347 struct mm_struct
*mm
= vma
->vm_mm
;
2348 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, 0);
2349 struct make_exclusive_args
*args
= priv
;
2351 struct page
*subpage
;
2353 struct mmu_notifier_range range
;
2358 mmu_notifier_range_init_owner(&range
, MMU_NOTIFY_EXCLUSIVE
, 0,
2359 vma
->vm_mm
, address
, min(vma
->vm_end
,
2360 address
+ folio_size(folio
)),
2362 mmu_notifier_invalidate_range_start(&range
);
2364 while (page_vma_mapped_walk(&pvmw
)) {
2365 /* Unexpected PMD-mapped THP? */
2366 VM_BUG_ON_FOLIO(!pvmw
.pte
, folio
);
2368 ptent
= ptep_get(pvmw
.pte
);
2369 if (!pte_present(ptent
)) {
2371 page_vma_mapped_walk_done(&pvmw
);
2375 subpage
= folio_page(folio
,
2376 pte_pfn(ptent
) - folio_pfn(folio
));
2377 address
= pvmw
.address
;
2379 /* Nuke the page table entry. */
2380 flush_cache_page(vma
, address
, pte_pfn(ptent
));
2381 pteval
= ptep_clear_flush(vma
, address
, pvmw
.pte
);
2383 /* Set the dirty flag on the folio now the pte is gone. */
2384 if (pte_dirty(pteval
))
2385 folio_mark_dirty(folio
);
2388 * Check that our target page is still mapped at the expected
2391 if (args
->mm
== mm
&& args
->address
== address
&&
2396 * Store the pfn of the page in a special migration
2397 * pte. do_swap_page() will wait until the migration
2398 * pte is removed and then restart fault handling.
2400 if (pte_write(pteval
))
2401 entry
= make_writable_device_exclusive_entry(
2402 page_to_pfn(subpage
));
2404 entry
= make_readable_device_exclusive_entry(
2405 page_to_pfn(subpage
));
2406 swp_pte
= swp_entry_to_pte(entry
);
2407 if (pte_soft_dirty(pteval
))
2408 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
2409 if (pte_uffd_wp(pteval
))
2410 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
2412 set_pte_at(mm
, address
, pvmw
.pte
, swp_pte
);
2415 * There is a reference on the page for the swap entry which has
2416 * been removed, so shouldn't take another.
2418 folio_remove_rmap_pte(folio
, subpage
, vma
);
2421 mmu_notifier_invalidate_range_end(&range
);
2427 * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2428 * @folio: The folio to replace page table entries for.
2429 * @mm: The mm_struct where the folio is expected to be mapped.
2430 * @address: Address where the folio is expected to be mapped.
2431 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
2433 * Tries to remove all the page table entries which are mapping this
2434 * folio and replace them with special device exclusive swap entries to
2435 * grant a device exclusive access to the folio.
2437 * Context: Caller must hold the folio lock.
2438 * Return: false if the page is still mapped, or if it could not be unmapped
2439 * from the expected address. Otherwise returns true (success).
2441 static bool folio_make_device_exclusive(struct folio
*folio
,
2442 struct mm_struct
*mm
, unsigned long address
, void *owner
)
2444 struct make_exclusive_args args
= {
2450 struct rmap_walk_control rwc
= {
2451 .rmap_one
= page_make_device_exclusive_one
,
2452 .done
= folio_not_mapped
,
2453 .anon_lock
= folio_lock_anon_vma_read
,
2458 * Restrict to anonymous folios for now to avoid potential writeback
2461 if (!folio_test_anon(folio
))
2464 rmap_walk(folio
, &rwc
);
2466 return args
.valid
&& !folio_mapcount(folio
);
2470 * make_device_exclusive_range() - Mark a range for exclusive use by a device
2471 * @mm: mm_struct of associated target process
2472 * @start: start of the region to mark for exclusive device access
2473 * @end: end address of region
2474 * @pages: returns the pages which were successfully marked for exclusive access
2475 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2477 * Returns: number of pages found in the range by GUP. A page is marked for
2478 * exclusive access only if the page pointer is non-NULL.
2480 * This function finds ptes mapping page(s) to the given address range, locks
2481 * them and replaces mappings with special swap entries preventing userspace CPU
2482 * access. On fault these entries are replaced with the original mapping after
2483 * calling MMU notifiers.
2485 * A driver using this to program access from a device must use a mmu notifier
2486 * critical section to hold a device specific lock during programming. Once
2487 * programming is complete it should drop the page lock and reference after
2488 * which point CPU access to the page will revoke the exclusive access.
2490 int make_device_exclusive_range(struct mm_struct
*mm
, unsigned long start
,
2491 unsigned long end
, struct page
**pages
,
2494 long npages
= (end
- start
) >> PAGE_SHIFT
;
2497 npages
= get_user_pages_remote(mm
, start
, npages
,
2498 FOLL_GET
| FOLL_WRITE
| FOLL_SPLIT_PMD
,
2503 for (i
= 0; i
< npages
; i
++, start
+= PAGE_SIZE
) {
2504 struct folio
*folio
= page_folio(pages
[i
]);
2505 if (PageTail(pages
[i
]) || !folio_trylock(folio
)) {
2511 if (!folio_make_device_exclusive(folio
, mm
, start
, owner
)) {
2512 folio_unlock(folio
);
2520 EXPORT_SYMBOL_GPL(make_device_exclusive_range
);
2523 void __put_anon_vma(struct anon_vma
*anon_vma
)
2525 struct anon_vma
*root
= anon_vma
->root
;
2527 anon_vma_free(anon_vma
);
2528 if (root
!= anon_vma
&& atomic_dec_and_test(&root
->refcount
))
2529 anon_vma_free(root
);
2532 static struct anon_vma
*rmap_walk_anon_lock(struct folio
*folio
,
2533 struct rmap_walk_control
*rwc
)
2535 struct anon_vma
*anon_vma
;
2538 return rwc
->anon_lock(folio
, rwc
);
2541 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2542 * because that depends on page_mapped(); but not all its usages
2543 * are holding mmap_lock. Users without mmap_lock are required to
2544 * take a reference count to prevent the anon_vma disappearing
2546 anon_vma
= folio_anon_vma(folio
);
2550 if (anon_vma_trylock_read(anon_vma
))
2553 if (rwc
->try_lock
) {
2555 rwc
->contended
= true;
2559 anon_vma_lock_read(anon_vma
);
2565 * rmap_walk_anon - do something to anonymous page using the object-based
2567 * @folio: the folio to be handled
2568 * @rwc: control variable according to each walk type
2569 * @locked: caller holds relevant rmap lock
2571 * Find all the mappings of a folio using the mapping pointer and the vma
2572 * chains contained in the anon_vma struct it points to.
2574 static void rmap_walk_anon(struct folio
*folio
,
2575 struct rmap_walk_control
*rwc
, bool locked
)
2577 struct anon_vma
*anon_vma
;
2578 pgoff_t pgoff_start
, pgoff_end
;
2579 struct anon_vma_chain
*avc
;
2582 anon_vma
= folio_anon_vma(folio
);
2583 /* anon_vma disappear under us? */
2584 VM_BUG_ON_FOLIO(!anon_vma
, folio
);
2586 anon_vma
= rmap_walk_anon_lock(folio
, rwc
);
2591 pgoff_start
= folio_pgoff(folio
);
2592 pgoff_end
= pgoff_start
+ folio_nr_pages(folio
) - 1;
2593 anon_vma_interval_tree_foreach(avc
, &anon_vma
->rb_root
,
2594 pgoff_start
, pgoff_end
) {
2595 struct vm_area_struct
*vma
= avc
->vma
;
2596 unsigned long address
= vma_address(vma
, pgoff_start
,
2597 folio_nr_pages(folio
));
2599 VM_BUG_ON_VMA(address
== -EFAULT
, vma
);
2602 if (rwc
->invalid_vma
&& rwc
->invalid_vma(vma
, rwc
->arg
))
2605 if (!rwc
->rmap_one(folio
, vma
, address
, rwc
->arg
))
2607 if (rwc
->done
&& rwc
->done(folio
))
2612 anon_vma_unlock_read(anon_vma
);
2616 * rmap_walk_file - do something to file page using the object-based rmap method
2617 * @folio: the folio to be handled
2618 * @rwc: control variable according to each walk type
2619 * @locked: caller holds relevant rmap lock
2621 * Find all the mappings of a folio using the mapping pointer and the vma chains
2622 * contained in the address_space struct it points to.
2624 static void rmap_walk_file(struct folio
*folio
,
2625 struct rmap_walk_control
*rwc
, bool locked
)
2627 struct address_space
*mapping
= folio_mapping(folio
);
2628 pgoff_t pgoff_start
, pgoff_end
;
2629 struct vm_area_struct
*vma
;
2632 * The page lock not only makes sure that page->mapping cannot
2633 * suddenly be NULLified by truncation, it makes sure that the
2634 * structure at mapping cannot be freed and reused yet,
2635 * so we can safely take mapping->i_mmap_rwsem.
2637 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
2642 pgoff_start
= folio_pgoff(folio
);
2643 pgoff_end
= pgoff_start
+ folio_nr_pages(folio
) - 1;
2645 if (i_mmap_trylock_read(mapping
))
2648 if (rwc
->try_lock
) {
2649 rwc
->contended
= true;
2653 i_mmap_lock_read(mapping
);
2656 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
,
2657 pgoff_start
, pgoff_end
) {
2658 unsigned long address
= vma_address(vma
, pgoff_start
,
2659 folio_nr_pages(folio
));
2661 VM_BUG_ON_VMA(address
== -EFAULT
, vma
);
2664 if (rwc
->invalid_vma
&& rwc
->invalid_vma(vma
, rwc
->arg
))
2667 if (!rwc
->rmap_one(folio
, vma
, address
, rwc
->arg
))
2669 if (rwc
->done
&& rwc
->done(folio
))
2675 i_mmap_unlock_read(mapping
);
2678 void rmap_walk(struct folio
*folio
, struct rmap_walk_control
*rwc
)
2680 if (unlikely(folio_test_ksm(folio
)))
2681 rmap_walk_ksm(folio
, rwc
);
2682 else if (folio_test_anon(folio
))
2683 rmap_walk_anon(folio
, rwc
, false);
2685 rmap_walk_file(folio
, rwc
, false);
2688 /* Like rmap_walk, but caller holds relevant rmap lock */
2689 void rmap_walk_locked(struct folio
*folio
, struct rmap_walk_control
*rwc
)
2691 /* no ksm support for now */
2692 VM_BUG_ON_FOLIO(folio_test_ksm(folio
), folio
);
2693 if (folio_test_anon(folio
))
2694 rmap_walk_anon(folio
, rwc
, true);
2696 rmap_walk_file(folio
, rwc
, true);
2699 #ifdef CONFIG_HUGETLB_PAGE
2701 * The following two functions are for anonymous (private mapped) hugepages.
2702 * Unlike common anonymous pages, anonymous hugepages have no accounting code
2703 * and no lru code, because we handle hugepages differently from common pages.
2705 void hugetlb_add_anon_rmap(struct folio
*folio
, struct vm_area_struct
*vma
,
2706 unsigned long address
, rmap_t flags
)
2708 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio
), folio
);
2709 VM_WARN_ON_FOLIO(!folio_test_anon(folio
), folio
);
2711 atomic_inc(&folio
->_entire_mapcount
);
2712 atomic_inc(&folio
->_large_mapcount
);
2713 if (flags
& RMAP_EXCLUSIVE
)
2714 SetPageAnonExclusive(&folio
->page
);
2715 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio
) > 1 &&
2716 PageAnonExclusive(&folio
->page
), folio
);
2719 void hugetlb_add_new_anon_rmap(struct folio
*folio
,
2720 struct vm_area_struct
*vma
, unsigned long address
)
2722 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio
), folio
);
2724 BUG_ON(address
< vma
->vm_start
|| address
>= vma
->vm_end
);
2725 /* increment count (starts at -1) */
2726 atomic_set(&folio
->_entire_mapcount
, 0);
2727 atomic_set(&folio
->_large_mapcount
, 0);
2728 folio_clear_hugetlb_restore_reserve(folio
);
2729 __folio_set_anon(folio
, vma
, address
, true);
2730 SetPageAnonExclusive(&folio
->page
);
2732 #endif /* CONFIG_HUGETLB_PAGE */