1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/page_table_check.h>
20 #include <linux/rcupdate_wait.h>
21 #include <linux/swapops.h>
22 #include <linux/shmem_fs.h>
23 #include <linux/ksm.h>
26 #include <asm/pgalloc.h>
38 SCAN_EXCEED_SHARED_PTE
,
41 SCAN_PTE_MAPPED_HUGEPAGE
,
43 SCAN_LACK_REFERENCED_PAGE
,
56 SCAN_ALLOC_HUGE_PAGE_FAIL
,
57 SCAN_CGROUP_CHARGE_FAIL
,
59 SCAN_PAGE_HAS_PRIVATE
,
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/huge_memory.h>
68 static struct task_struct
*khugepaged_thread __read_mostly
;
69 static DEFINE_MUTEX(khugepaged_mutex
);
71 /* default scan 8*512 pte (or vmas) every 30 second */
72 static unsigned int khugepaged_pages_to_scan __read_mostly
;
73 static unsigned int khugepaged_pages_collapsed
;
74 static unsigned int khugepaged_full_scans
;
75 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly
= 10000;
76 /* during fragmentation poll the hugepage allocator once every minute */
77 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly
= 60000;
78 static unsigned long khugepaged_sleep_expire
;
79 static DEFINE_SPINLOCK(khugepaged_mm_lock
);
80 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait
);
82 * default collapse hugepages if there is at least one pte mapped like
83 * it would have happened if the vma was large enough during page
86 * Note that these are only respected if collapse was initiated by khugepaged.
88 static unsigned int khugepaged_max_ptes_none __read_mostly
;
89 static unsigned int khugepaged_max_ptes_swap __read_mostly
;
90 static unsigned int khugepaged_max_ptes_shared __read_mostly
;
92 #define MM_SLOTS_HASH_BITS 10
93 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash
, MM_SLOTS_HASH_BITS
);
95 static struct kmem_cache
*mm_slot_cache __ro_after_init
;
97 struct collapse_control
{
100 /* Num pages scanned per node */
101 u32 node_load
[MAX_NUMNODES
];
103 /* nodemask for allocation fallback */
104 nodemask_t alloc_nmask
;
108 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
109 * @slot: hash lookup from mm to mm_slot
111 struct khugepaged_mm_slot
{
116 * struct khugepaged_scan - cursor for scanning
117 * @mm_head: the head of the mm list to scan
118 * @mm_slot: the current mm_slot we are scanning
119 * @address: the next address inside that to be scanned
121 * There is only the one khugepaged_scan instance of this cursor structure.
123 struct khugepaged_scan
{
124 struct list_head mm_head
;
125 struct khugepaged_mm_slot
*mm_slot
;
126 unsigned long address
;
129 static struct khugepaged_scan khugepaged_scan
= {
130 .mm_head
= LIST_HEAD_INIT(khugepaged_scan
.mm_head
),
134 static ssize_t
scan_sleep_millisecs_show(struct kobject
*kobj
,
135 struct kobj_attribute
*attr
,
138 return sysfs_emit(buf
, "%u\n", khugepaged_scan_sleep_millisecs
);
141 static ssize_t
scan_sleep_millisecs_store(struct kobject
*kobj
,
142 struct kobj_attribute
*attr
,
143 const char *buf
, size_t count
)
148 err
= kstrtouint(buf
, 10, &msecs
);
152 khugepaged_scan_sleep_millisecs
= msecs
;
153 khugepaged_sleep_expire
= 0;
154 wake_up_interruptible(&khugepaged_wait
);
158 static struct kobj_attribute scan_sleep_millisecs_attr
=
159 __ATTR_RW(scan_sleep_millisecs
);
161 static ssize_t
alloc_sleep_millisecs_show(struct kobject
*kobj
,
162 struct kobj_attribute
*attr
,
165 return sysfs_emit(buf
, "%u\n", khugepaged_alloc_sleep_millisecs
);
168 static ssize_t
alloc_sleep_millisecs_store(struct kobject
*kobj
,
169 struct kobj_attribute
*attr
,
170 const char *buf
, size_t count
)
175 err
= kstrtouint(buf
, 10, &msecs
);
179 khugepaged_alloc_sleep_millisecs
= msecs
;
180 khugepaged_sleep_expire
= 0;
181 wake_up_interruptible(&khugepaged_wait
);
185 static struct kobj_attribute alloc_sleep_millisecs_attr
=
186 __ATTR_RW(alloc_sleep_millisecs
);
188 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
189 struct kobj_attribute
*attr
,
192 return sysfs_emit(buf
, "%u\n", khugepaged_pages_to_scan
);
194 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
195 struct kobj_attribute
*attr
,
196 const char *buf
, size_t count
)
201 err
= kstrtouint(buf
, 10, &pages
);
205 khugepaged_pages_to_scan
= pages
;
209 static struct kobj_attribute pages_to_scan_attr
=
210 __ATTR_RW(pages_to_scan
);
212 static ssize_t
pages_collapsed_show(struct kobject
*kobj
,
213 struct kobj_attribute
*attr
,
216 return sysfs_emit(buf
, "%u\n", khugepaged_pages_collapsed
);
218 static struct kobj_attribute pages_collapsed_attr
=
219 __ATTR_RO(pages_collapsed
);
221 static ssize_t
full_scans_show(struct kobject
*kobj
,
222 struct kobj_attribute
*attr
,
225 return sysfs_emit(buf
, "%u\n", khugepaged_full_scans
);
227 static struct kobj_attribute full_scans_attr
=
228 __ATTR_RO(full_scans
);
230 static ssize_t
defrag_show(struct kobject
*kobj
,
231 struct kobj_attribute
*attr
, char *buf
)
233 return single_hugepage_flag_show(kobj
, attr
, buf
,
234 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
236 static ssize_t
defrag_store(struct kobject
*kobj
,
237 struct kobj_attribute
*attr
,
238 const char *buf
, size_t count
)
240 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
241 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
243 static struct kobj_attribute khugepaged_defrag_attr
=
247 * max_ptes_none controls if khugepaged should collapse hugepages over
248 * any unmapped ptes in turn potentially increasing the memory
249 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
250 * reduce the available free memory in the system as it
251 * runs. Increasing max_ptes_none will instead potentially reduce the
252 * free memory in the system during the khugepaged scan.
254 static ssize_t
max_ptes_none_show(struct kobject
*kobj
,
255 struct kobj_attribute
*attr
,
258 return sysfs_emit(buf
, "%u\n", khugepaged_max_ptes_none
);
260 static ssize_t
max_ptes_none_store(struct kobject
*kobj
,
261 struct kobj_attribute
*attr
,
262 const char *buf
, size_t count
)
265 unsigned long max_ptes_none
;
267 err
= kstrtoul(buf
, 10, &max_ptes_none
);
268 if (err
|| max_ptes_none
> HPAGE_PMD_NR
- 1)
271 khugepaged_max_ptes_none
= max_ptes_none
;
275 static struct kobj_attribute khugepaged_max_ptes_none_attr
=
276 __ATTR_RW(max_ptes_none
);
278 static ssize_t
max_ptes_swap_show(struct kobject
*kobj
,
279 struct kobj_attribute
*attr
,
282 return sysfs_emit(buf
, "%u\n", khugepaged_max_ptes_swap
);
285 static ssize_t
max_ptes_swap_store(struct kobject
*kobj
,
286 struct kobj_attribute
*attr
,
287 const char *buf
, size_t count
)
290 unsigned long max_ptes_swap
;
292 err
= kstrtoul(buf
, 10, &max_ptes_swap
);
293 if (err
|| max_ptes_swap
> HPAGE_PMD_NR
- 1)
296 khugepaged_max_ptes_swap
= max_ptes_swap
;
301 static struct kobj_attribute khugepaged_max_ptes_swap_attr
=
302 __ATTR_RW(max_ptes_swap
);
304 static ssize_t
max_ptes_shared_show(struct kobject
*kobj
,
305 struct kobj_attribute
*attr
,
308 return sysfs_emit(buf
, "%u\n", khugepaged_max_ptes_shared
);
311 static ssize_t
max_ptes_shared_store(struct kobject
*kobj
,
312 struct kobj_attribute
*attr
,
313 const char *buf
, size_t count
)
316 unsigned long max_ptes_shared
;
318 err
= kstrtoul(buf
, 10, &max_ptes_shared
);
319 if (err
|| max_ptes_shared
> HPAGE_PMD_NR
- 1)
322 khugepaged_max_ptes_shared
= max_ptes_shared
;
327 static struct kobj_attribute khugepaged_max_ptes_shared_attr
=
328 __ATTR_RW(max_ptes_shared
);
330 static struct attribute
*khugepaged_attr
[] = {
331 &khugepaged_defrag_attr
.attr
,
332 &khugepaged_max_ptes_none_attr
.attr
,
333 &khugepaged_max_ptes_swap_attr
.attr
,
334 &khugepaged_max_ptes_shared_attr
.attr
,
335 &pages_to_scan_attr
.attr
,
336 &pages_collapsed_attr
.attr
,
337 &full_scans_attr
.attr
,
338 &scan_sleep_millisecs_attr
.attr
,
339 &alloc_sleep_millisecs_attr
.attr
,
343 struct attribute_group khugepaged_attr_group
= {
344 .attrs
= khugepaged_attr
,
345 .name
= "khugepaged",
347 #endif /* CONFIG_SYSFS */
349 int hugepage_madvise(struct vm_area_struct
*vma
,
350 unsigned long *vm_flags
, int advice
)
356 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
357 * can't handle this properly after s390_enable_sie, so we simply
358 * ignore the madvise to prevent qemu from causing a SIGSEGV.
360 if (mm_has_pgste(vma
->vm_mm
))
363 *vm_flags
&= ~VM_NOHUGEPAGE
;
364 *vm_flags
|= VM_HUGEPAGE
;
366 * If the vma become good for khugepaged to scan,
367 * register it here without waiting a page fault that
368 * may not happen any time soon.
370 khugepaged_enter_vma(vma
, *vm_flags
);
372 case MADV_NOHUGEPAGE
:
373 *vm_flags
&= ~VM_HUGEPAGE
;
374 *vm_flags
|= VM_NOHUGEPAGE
;
376 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 * this vma even if we leave the mm registered in khugepaged if
378 * it got registered before VM_NOHUGEPAGE was set.
386 int __init
khugepaged_init(void)
388 mm_slot_cache
= kmem_cache_create("khugepaged_mm_slot",
389 sizeof(struct khugepaged_mm_slot
),
390 __alignof__(struct khugepaged_mm_slot
),
395 khugepaged_pages_to_scan
= HPAGE_PMD_NR
* 8;
396 khugepaged_max_ptes_none
= HPAGE_PMD_NR
- 1;
397 khugepaged_max_ptes_swap
= HPAGE_PMD_NR
/ 8;
398 khugepaged_max_ptes_shared
= HPAGE_PMD_NR
/ 2;
403 void __init
khugepaged_destroy(void)
405 kmem_cache_destroy(mm_slot_cache
);
408 static inline int hpage_collapse_test_exit(struct mm_struct
*mm
)
410 return atomic_read(&mm
->mm_users
) == 0;
413 static inline int hpage_collapse_test_exit_or_disable(struct mm_struct
*mm
)
415 return hpage_collapse_test_exit(mm
) ||
416 test_bit(MMF_DISABLE_THP
, &mm
->flags
);
419 void __khugepaged_enter(struct mm_struct
*mm
)
421 struct khugepaged_mm_slot
*mm_slot
;
422 struct mm_slot
*slot
;
425 /* __khugepaged_exit() must not run from under us */
426 VM_BUG_ON_MM(hpage_collapse_test_exit(mm
), mm
);
427 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE
, &mm
->flags
)))
430 mm_slot
= mm_slot_alloc(mm_slot_cache
);
434 slot
= &mm_slot
->slot
;
436 spin_lock(&khugepaged_mm_lock
);
437 mm_slot_insert(mm_slots_hash
, mm
, slot
);
439 * Insert just behind the scanning cursor, to let the area settle
442 wakeup
= list_empty(&khugepaged_scan
.mm_head
);
443 list_add_tail(&slot
->mm_node
, &khugepaged_scan
.mm_head
);
444 spin_unlock(&khugepaged_mm_lock
);
448 wake_up_interruptible(&khugepaged_wait
);
451 void khugepaged_enter_vma(struct vm_area_struct
*vma
,
452 unsigned long vm_flags
)
454 if (!test_bit(MMF_VM_HUGEPAGE
, &vma
->vm_mm
->flags
) &&
455 hugepage_flags_enabled()) {
456 if (thp_vma_allowable_order(vma
, vm_flags
, false, false, true,
458 __khugepaged_enter(vma
->vm_mm
);
462 void __khugepaged_exit(struct mm_struct
*mm
)
464 struct khugepaged_mm_slot
*mm_slot
;
465 struct mm_slot
*slot
;
468 spin_lock(&khugepaged_mm_lock
);
469 slot
= mm_slot_lookup(mm_slots_hash
, mm
);
470 mm_slot
= mm_slot_entry(slot
, struct khugepaged_mm_slot
, slot
);
471 if (mm_slot
&& khugepaged_scan
.mm_slot
!= mm_slot
) {
472 hash_del(&slot
->hash
);
473 list_del(&slot
->mm_node
);
476 spin_unlock(&khugepaged_mm_lock
);
479 clear_bit(MMF_VM_HUGEPAGE
, &mm
->flags
);
480 mm_slot_free(mm_slot_cache
, mm_slot
);
482 } else if (mm_slot
) {
484 * This is required to serialize against
485 * hpage_collapse_test_exit() (which is guaranteed to run
486 * under mmap sem read mode). Stop here (after we return all
487 * pagetables will be destroyed) until khugepaged has finished
488 * working on the pagetables under the mmap_lock.
491 mmap_write_unlock(mm
);
495 static void release_pte_folio(struct folio
*folio
)
497 node_stat_mod_folio(folio
,
498 NR_ISOLATED_ANON
+ folio_is_file_lru(folio
),
499 -folio_nr_pages(folio
));
501 folio_putback_lru(folio
);
504 static void release_pte_pages(pte_t
*pte
, pte_t
*_pte
,
505 struct list_head
*compound_pagelist
)
507 struct folio
*folio
, *tmp
;
509 while (--_pte
>= pte
) {
510 pte_t pteval
= ptep_get(_pte
);
513 if (pte_none(pteval
))
515 pfn
= pte_pfn(pteval
);
516 if (is_zero_pfn(pfn
))
518 folio
= pfn_folio(pfn
);
519 if (folio_test_large(folio
))
521 release_pte_folio(folio
);
524 list_for_each_entry_safe(folio
, tmp
, compound_pagelist
, lru
) {
525 list_del(&folio
->lru
);
526 release_pte_folio(folio
);
530 static bool is_refcount_suitable(struct folio
*folio
)
532 int expected_refcount
;
534 expected_refcount
= folio_mapcount(folio
);
535 if (folio_test_swapcache(folio
))
536 expected_refcount
+= folio_nr_pages(folio
);
538 return folio_ref_count(folio
) == expected_refcount
;
541 static int __collapse_huge_page_isolate(struct vm_area_struct
*vma
,
542 unsigned long address
,
544 struct collapse_control
*cc
,
545 struct list_head
*compound_pagelist
)
547 struct page
*page
= NULL
;
548 struct folio
*folio
= NULL
;
550 int none_or_zero
= 0, shared
= 0, result
= SCAN_FAIL
, referenced
= 0;
551 bool writable
= false;
553 for (_pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
554 _pte
++, address
+= PAGE_SIZE
) {
555 pte_t pteval
= ptep_get(_pte
);
556 if (pte_none(pteval
) || (pte_present(pteval
) &&
557 is_zero_pfn(pte_pfn(pteval
)))) {
559 if (!userfaultfd_armed(vma
) &&
560 (!cc
->is_khugepaged
||
561 none_or_zero
<= khugepaged_max_ptes_none
)) {
564 result
= SCAN_EXCEED_NONE_PTE
;
565 count_vm_event(THP_SCAN_EXCEED_NONE_PTE
);
569 if (!pte_present(pteval
)) {
570 result
= SCAN_PTE_NON_PRESENT
;
573 if (pte_uffd_wp(pteval
)) {
574 result
= SCAN_PTE_UFFD_WP
;
577 page
= vm_normal_page(vma
, address
, pteval
);
578 if (unlikely(!page
) || unlikely(is_zone_device_page(page
))) {
579 result
= SCAN_PAGE_NULL
;
583 folio
= page_folio(page
);
584 VM_BUG_ON_FOLIO(!folio_test_anon(folio
), folio
);
586 if (page_mapcount(page
) > 1) {
588 if (cc
->is_khugepaged
&&
589 shared
> khugepaged_max_ptes_shared
) {
590 result
= SCAN_EXCEED_SHARED_PTE
;
591 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE
);
596 if (folio_test_large(folio
)) {
600 * Check if we have dealt with the compound page
603 list_for_each_entry(f
, compound_pagelist
, lru
) {
610 * We can do it before isolate_lru_page because the
611 * page can't be freed from under us. NOTE: PG_lock
612 * is needed to serialize against split_huge_page
613 * when invoked from the VM.
615 if (!folio_trylock(folio
)) {
616 result
= SCAN_PAGE_LOCK
;
621 * Check if the page has any GUP (or other external) pins.
623 * The page table that maps the page has been already unlinked
624 * from the page table tree and this process cannot get
625 * an additional pin on the page.
627 * New pins can come later if the page is shared across fork,
628 * but not from this process. The other process cannot write to
629 * the page, only trigger CoW.
631 if (!is_refcount_suitable(folio
)) {
633 result
= SCAN_PAGE_COUNT
;
638 * Isolate the page to avoid collapsing an hugepage
639 * currently in use by the VM.
641 if (!folio_isolate_lru(folio
)) {
643 result
= SCAN_DEL_PAGE_LRU
;
646 node_stat_mod_folio(folio
,
647 NR_ISOLATED_ANON
+ folio_is_file_lru(folio
),
648 folio_nr_pages(folio
));
649 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
650 VM_BUG_ON_FOLIO(folio_test_lru(folio
), folio
);
652 if (folio_test_large(folio
))
653 list_add_tail(&folio
->lru
, compound_pagelist
);
656 * If collapse was initiated by khugepaged, check that there is
657 * enough young pte to justify collapsing the page
659 if (cc
->is_khugepaged
&&
660 (pte_young(pteval
) || folio_test_young(folio
) ||
661 folio_test_referenced(folio
) || mmu_notifier_test_young(vma
->vm_mm
,
665 if (pte_write(pteval
))
669 if (unlikely(!writable
)) {
670 result
= SCAN_PAGE_RO
;
671 } else if (unlikely(cc
->is_khugepaged
&& !referenced
)) {
672 result
= SCAN_LACK_REFERENCED_PAGE
;
674 result
= SCAN_SUCCEED
;
675 trace_mm_collapse_huge_page_isolate(&folio
->page
, none_or_zero
,
676 referenced
, writable
, result
);
680 release_pte_pages(pte
, _pte
, compound_pagelist
);
681 trace_mm_collapse_huge_page_isolate(&folio
->page
, none_or_zero
,
682 referenced
, writable
, result
);
686 static void __collapse_huge_page_copy_succeeded(pte_t
*pte
,
687 struct vm_area_struct
*vma
,
688 unsigned long address
,
690 struct list_head
*compound_pagelist
)
692 struct folio
*src
, *tmp
;
696 for (_pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
697 _pte
++, address
+= PAGE_SIZE
) {
698 pteval
= ptep_get(_pte
);
699 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
700 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, 1);
701 if (is_zero_pfn(pte_pfn(pteval
))) {
703 * ptl mostly unnecessary.
706 ptep_clear(vma
->vm_mm
, address
, _pte
);
708 ksm_might_unmap_zero_page(vma
->vm_mm
, pteval
);
711 struct page
*src_page
= pte_page(pteval
);
713 src
= page_folio(src_page
);
714 if (!folio_test_large(src
))
715 release_pte_folio(src
);
717 * ptl mostly unnecessary, but preempt has to
718 * be disabled to update the per-cpu stats
719 * inside folio_remove_rmap_pte().
722 ptep_clear(vma
->vm_mm
, address
, _pte
);
723 folio_remove_rmap_pte(src
, src_page
, vma
);
725 free_page_and_swap_cache(src_page
);
729 list_for_each_entry_safe(src
, tmp
, compound_pagelist
, lru
) {
731 node_stat_sub_folio(src
, NR_ISOLATED_ANON
+
732 folio_is_file_lru(src
));
734 free_swap_cache(src
);
735 folio_putback_lru(src
);
739 static void __collapse_huge_page_copy_failed(pte_t
*pte
,
742 struct vm_area_struct
*vma
,
743 struct list_head
*compound_pagelist
)
748 * Re-establish the PMD to point to the original page table
749 * entry. Restoring PMD needs to be done prior to releasing
750 * pages. Since pages are still isolated and locked here,
751 * acquiring anon_vma_lock_write is unnecessary.
753 pmd_ptl
= pmd_lock(vma
->vm_mm
, pmd
);
754 pmd_populate(vma
->vm_mm
, pmd
, pmd_pgtable(orig_pmd
));
755 spin_unlock(pmd_ptl
);
757 * Release both raw and compound pages isolated
758 * in __collapse_huge_page_isolate.
760 release_pte_pages(pte
, pte
+ HPAGE_PMD_NR
, compound_pagelist
);
764 * __collapse_huge_page_copy - attempts to copy memory contents from raw
765 * pages to a hugepage. Cleans up the raw pages if copying succeeds;
766 * otherwise restores the original page table and releases isolated raw pages.
767 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
769 * @pte: starting of the PTEs to copy from
770 * @page: the new hugepage to copy contents to
771 * @pmd: pointer to the new hugepage's PMD
772 * @orig_pmd: the original raw pages' PMD
773 * @vma: the original raw pages' virtual memory area
774 * @address: starting address to copy
775 * @ptl: lock on raw pages' PTEs
776 * @compound_pagelist: list that stores compound pages
778 static int __collapse_huge_page_copy(pte_t
*pte
,
782 struct vm_area_struct
*vma
,
783 unsigned long address
,
785 struct list_head
*compound_pagelist
)
787 struct page
*src_page
;
790 unsigned long _address
;
791 int result
= SCAN_SUCCEED
;
794 * Copying pages' contents is subject to memory poison at any iteration.
796 for (_pte
= pte
, _address
= address
; _pte
< pte
+ HPAGE_PMD_NR
;
797 _pte
++, page
++, _address
+= PAGE_SIZE
) {
798 pteval
= ptep_get(_pte
);
799 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
800 clear_user_highpage(page
, _address
);
803 src_page
= pte_page(pteval
);
804 if (copy_mc_user_highpage(page
, src_page
, _address
, vma
) > 0) {
805 result
= SCAN_COPY_MC
;
810 if (likely(result
== SCAN_SUCCEED
))
811 __collapse_huge_page_copy_succeeded(pte
, vma
, address
, ptl
,
814 __collapse_huge_page_copy_failed(pte
, pmd
, orig_pmd
, vma
,
820 static void khugepaged_alloc_sleep(void)
824 add_wait_queue(&khugepaged_wait
, &wait
);
825 __set_current_state(TASK_INTERRUPTIBLE
|TASK_FREEZABLE
);
826 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs
));
827 remove_wait_queue(&khugepaged_wait
, &wait
);
830 struct collapse_control khugepaged_collapse_control
= {
831 .is_khugepaged
= true,
834 static bool hpage_collapse_scan_abort(int nid
, struct collapse_control
*cc
)
839 * If node_reclaim_mode is disabled, then no extra effort is made to
840 * allocate memory locally.
842 if (!node_reclaim_enabled())
845 /* If there is a count for this node already, it must be acceptable */
846 if (cc
->node_load
[nid
])
849 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
850 if (!cc
->node_load
[i
])
852 if (node_distance(nid
, i
) > node_reclaim_distance
)
858 #define khugepaged_defrag() \
859 (transparent_hugepage_flags & \
860 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
862 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
863 static inline gfp_t
alloc_hugepage_khugepaged_gfpmask(void)
865 return khugepaged_defrag() ? GFP_TRANSHUGE
: GFP_TRANSHUGE_LIGHT
;
869 static int hpage_collapse_find_target_node(struct collapse_control
*cc
)
871 int nid
, target_node
= 0, max_value
= 0;
873 /* find first node with max normal pages hit */
874 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++)
875 if (cc
->node_load
[nid
] > max_value
) {
876 max_value
= cc
->node_load
[nid
];
880 for_each_online_node(nid
) {
881 if (max_value
== cc
->node_load
[nid
])
882 node_set(nid
, cc
->alloc_nmask
);
888 static int hpage_collapse_find_target_node(struct collapse_control
*cc
)
894 static bool hpage_collapse_alloc_folio(struct folio
**folio
, gfp_t gfp
, int node
,
897 *folio
= __folio_alloc(gfp
, HPAGE_PMD_ORDER
, node
, nmask
);
899 if (unlikely(!*folio
)) {
900 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
904 count_vm_event(THP_COLLAPSE_ALLOC
);
909 * If mmap_lock temporarily dropped, revalidate vma
910 * before taking mmap_lock.
911 * Returns enum scan_result value.
914 static int hugepage_vma_revalidate(struct mm_struct
*mm
, unsigned long address
,
916 struct vm_area_struct
**vmap
,
917 struct collapse_control
*cc
)
919 struct vm_area_struct
*vma
;
921 if (unlikely(hpage_collapse_test_exit_or_disable(mm
)))
922 return SCAN_ANY_PROCESS
;
924 *vmap
= vma
= find_vma(mm
, address
);
926 return SCAN_VMA_NULL
;
928 if (!thp_vma_suitable_order(vma
, address
, PMD_ORDER
))
929 return SCAN_ADDRESS_RANGE
;
930 if (!thp_vma_allowable_order(vma
, vma
->vm_flags
, false, false,
931 cc
->is_khugepaged
, PMD_ORDER
))
932 return SCAN_VMA_CHECK
;
934 * Anon VMA expected, the address may be unmapped then
935 * remapped to file after khugepaged reaquired the mmap_lock.
937 * thp_vma_allowable_order may return true for qualified file
940 if (expect_anon
&& (!(*vmap
)->anon_vma
|| !vma_is_anonymous(*vmap
)))
941 return SCAN_PAGE_ANON
;
945 static int find_pmd_or_thp_or_none(struct mm_struct
*mm
,
946 unsigned long address
,
951 *pmd
= mm_find_pmd(mm
, address
);
953 return SCAN_PMD_NULL
;
955 pmde
= pmdp_get_lockless(*pmd
);
957 return SCAN_PMD_NONE
;
958 if (!pmd_present(pmde
))
959 return SCAN_PMD_NULL
;
960 if (pmd_trans_huge(pmde
))
961 return SCAN_PMD_MAPPED
;
962 if (pmd_devmap(pmde
))
963 return SCAN_PMD_NULL
;
965 return SCAN_PMD_NULL
;
969 static int check_pmd_still_valid(struct mm_struct
*mm
,
970 unsigned long address
,
974 int result
= find_pmd_or_thp_or_none(mm
, address
, &new_pmd
);
976 if (result
!= SCAN_SUCCEED
)
984 * Bring missing pages in from swap, to complete THP collapse.
985 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
987 * Called and returns without pte mapped or spinlocks held.
988 * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
990 static int __collapse_huge_page_swapin(struct mm_struct
*mm
,
991 struct vm_area_struct
*vma
,
992 unsigned long haddr
, pmd_t
*pmd
,
997 unsigned long address
, end
= haddr
+ (HPAGE_PMD_NR
* PAGE_SIZE
);
1002 for (address
= haddr
; address
< end
; address
+= PAGE_SIZE
) {
1003 struct vm_fault vmf
= {
1006 .pgoff
= linear_page_index(vma
, address
),
1007 .flags
= FAULT_FLAG_ALLOW_RETRY
,
1012 pte
= pte_offset_map_nolock(mm
, pmd
, address
, &ptl
);
1014 mmap_read_unlock(mm
);
1015 result
= SCAN_PMD_NULL
;
1020 vmf
.orig_pte
= ptep_get_lockless(pte
);
1021 if (!is_swap_pte(vmf
.orig_pte
))
1026 ret
= do_swap_page(&vmf
);
1027 /* Which unmaps pte (after perhaps re-checking the entry) */
1031 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1032 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1033 * we do not retry here and swap entry will remain in pagetable
1034 * resulting in later failure.
1036 if (ret
& VM_FAULT_RETRY
) {
1037 /* Likely, but not guaranteed, that page lock failed */
1038 result
= SCAN_PAGE_LOCK
;
1041 if (ret
& VM_FAULT_ERROR
) {
1042 mmap_read_unlock(mm
);
1052 /* Drain LRU cache to remove extra pin on the swapped in pages */
1056 result
= SCAN_SUCCEED
;
1058 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, result
);
1062 static int alloc_charge_hpage(struct page
**hpage
, struct mm_struct
*mm
,
1063 struct collapse_control
*cc
)
1065 gfp_t gfp
= (cc
->is_khugepaged
? alloc_hugepage_khugepaged_gfpmask() :
1067 int node
= hpage_collapse_find_target_node(cc
);
1068 struct folio
*folio
;
1070 if (!hpage_collapse_alloc_folio(&folio
, gfp
, node
, &cc
->alloc_nmask
)) {
1072 return SCAN_ALLOC_HUGE_PAGE_FAIL
;
1075 if (unlikely(mem_cgroup_charge(folio
, mm
, gfp
))) {
1078 return SCAN_CGROUP_CHARGE_FAIL
;
1081 count_memcg_folio_events(folio
, THP_COLLAPSE_ALLOC
, 1);
1083 *hpage
= folio_page(folio
, 0);
1084 return SCAN_SUCCEED
;
1087 static int collapse_huge_page(struct mm_struct
*mm
, unsigned long address
,
1088 int referenced
, int unmapped
,
1089 struct collapse_control
*cc
)
1091 LIST_HEAD(compound_pagelist
);
1095 struct folio
*folio
;
1097 spinlock_t
*pmd_ptl
, *pte_ptl
;
1098 int result
= SCAN_FAIL
;
1099 struct vm_area_struct
*vma
;
1100 struct mmu_notifier_range range
;
1102 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1105 * Before allocating the hugepage, release the mmap_lock read lock.
1106 * The allocation can take potentially a long time if it involves
1107 * sync compaction, and we do not need to hold the mmap_lock during
1108 * that. We will recheck the vma after taking it again in write mode.
1110 mmap_read_unlock(mm
);
1112 result
= alloc_charge_hpage(&hpage
, mm
, cc
);
1113 if (result
!= SCAN_SUCCEED
)
1117 result
= hugepage_vma_revalidate(mm
, address
, true, &vma
, cc
);
1118 if (result
!= SCAN_SUCCEED
) {
1119 mmap_read_unlock(mm
);
1123 result
= find_pmd_or_thp_or_none(mm
, address
, &pmd
);
1124 if (result
!= SCAN_SUCCEED
) {
1125 mmap_read_unlock(mm
);
1131 * __collapse_huge_page_swapin will return with mmap_lock
1132 * released when it fails. So we jump out_nolock directly in
1133 * that case. Continuing to collapse causes inconsistency.
1135 result
= __collapse_huge_page_swapin(mm
, vma
, address
, pmd
,
1137 if (result
!= SCAN_SUCCEED
)
1141 mmap_read_unlock(mm
);
1143 * Prevent all access to pagetables with the exception of
1144 * gup_fast later handled by the ptep_clear_flush and the VM
1145 * handled by the anon_vma lock + PG_lock.
1147 * UFFDIO_MOVE is prevented to race as well thanks to the
1150 mmap_write_lock(mm
);
1151 result
= hugepage_vma_revalidate(mm
, address
, true, &vma
, cc
);
1152 if (result
!= SCAN_SUCCEED
)
1154 /* check if the pmd is still valid */
1155 result
= check_pmd_still_valid(mm
, address
, pmd
);
1156 if (result
!= SCAN_SUCCEED
)
1159 vma_start_write(vma
);
1160 anon_vma_lock_write(vma
->anon_vma
);
1162 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, address
,
1163 address
+ HPAGE_PMD_SIZE
);
1164 mmu_notifier_invalidate_range_start(&range
);
1166 pmd_ptl
= pmd_lock(mm
, pmd
); /* probably unnecessary */
1168 * This removes any huge TLB entry from the CPU so we won't allow
1169 * huge and small TLB entries for the same virtual address to
1170 * avoid the risk of CPU bugs in that area.
1172 * Parallel fast GUP is fine since fast GUP will back off when
1173 * it detects PMD is changed.
1175 _pmd
= pmdp_collapse_flush(vma
, address
, pmd
);
1176 spin_unlock(pmd_ptl
);
1177 mmu_notifier_invalidate_range_end(&range
);
1178 tlb_remove_table_sync_one();
1180 pte
= pte_offset_map_lock(mm
, &_pmd
, address
, &pte_ptl
);
1182 result
= __collapse_huge_page_isolate(vma
, address
, pte
, cc
,
1183 &compound_pagelist
);
1184 spin_unlock(pte_ptl
);
1186 result
= SCAN_PMD_NULL
;
1189 if (unlikely(result
!= SCAN_SUCCEED
)) {
1193 BUG_ON(!pmd_none(*pmd
));
1195 * We can only use set_pmd_at when establishing
1196 * hugepmds and never for establishing regular pmds that
1197 * points to regular pagetables. Use pmd_populate for that
1199 pmd_populate(mm
, pmd
, pmd_pgtable(_pmd
));
1200 spin_unlock(pmd_ptl
);
1201 anon_vma_unlock_write(vma
->anon_vma
);
1206 * All pages are isolated and locked so anon_vma rmap
1207 * can't run anymore.
1209 anon_vma_unlock_write(vma
->anon_vma
);
1211 result
= __collapse_huge_page_copy(pte
, hpage
, pmd
, _pmd
,
1212 vma
, address
, pte_ptl
,
1213 &compound_pagelist
);
1215 if (unlikely(result
!= SCAN_SUCCEED
))
1218 folio
= page_folio(hpage
);
1220 * The smp_wmb() inside __folio_mark_uptodate() ensures the
1221 * copy_huge_page writes become visible before the set_pmd_at()
1224 __folio_mark_uptodate(folio
);
1225 pgtable
= pmd_pgtable(_pmd
);
1227 _pmd
= mk_huge_pmd(hpage
, vma
->vm_page_prot
);
1228 _pmd
= maybe_pmd_mkwrite(pmd_mkdirty(_pmd
), vma
);
1231 BUG_ON(!pmd_none(*pmd
));
1232 folio_add_new_anon_rmap(folio
, vma
, address
);
1233 folio_add_lru_vma(folio
, vma
);
1234 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
1235 set_pmd_at(mm
, address
, pmd
, _pmd
);
1236 update_mmu_cache_pmd(vma
, address
, pmd
);
1237 spin_unlock(pmd_ptl
);
1241 result
= SCAN_SUCCEED
;
1243 mmap_write_unlock(mm
);
1247 trace_mm_collapse_huge_page(mm
, result
== SCAN_SUCCEED
, result
);
1251 static int hpage_collapse_scan_pmd(struct mm_struct
*mm
,
1252 struct vm_area_struct
*vma
,
1253 unsigned long address
, bool *mmap_locked
,
1254 struct collapse_control
*cc
)
1258 int result
= SCAN_FAIL
, referenced
= 0;
1259 int none_or_zero
= 0, shared
= 0;
1260 struct page
*page
= NULL
;
1261 struct folio
*folio
= NULL
;
1262 unsigned long _address
;
1264 int node
= NUMA_NO_NODE
, unmapped
= 0;
1265 bool writable
= false;
1267 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1269 result
= find_pmd_or_thp_or_none(mm
, address
, &pmd
);
1270 if (result
!= SCAN_SUCCEED
)
1273 memset(cc
->node_load
, 0, sizeof(cc
->node_load
));
1274 nodes_clear(cc
->alloc_nmask
);
1275 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1277 result
= SCAN_PMD_NULL
;
1281 for (_address
= address
, _pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
1282 _pte
++, _address
+= PAGE_SIZE
) {
1283 pte_t pteval
= ptep_get(_pte
);
1284 if (is_swap_pte(pteval
)) {
1286 if (!cc
->is_khugepaged
||
1287 unmapped
<= khugepaged_max_ptes_swap
) {
1289 * Always be strict with uffd-wp
1290 * enabled swap entries. Please see
1291 * comment below for pte_uffd_wp().
1293 if (pte_swp_uffd_wp_any(pteval
)) {
1294 result
= SCAN_PTE_UFFD_WP
;
1299 result
= SCAN_EXCEED_SWAP_PTE
;
1300 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE
);
1304 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
1306 if (!userfaultfd_armed(vma
) &&
1307 (!cc
->is_khugepaged
||
1308 none_or_zero
<= khugepaged_max_ptes_none
)) {
1311 result
= SCAN_EXCEED_NONE_PTE
;
1312 count_vm_event(THP_SCAN_EXCEED_NONE_PTE
);
1316 if (pte_uffd_wp(pteval
)) {
1318 * Don't collapse the page if any of the small
1319 * PTEs are armed with uffd write protection.
1320 * Here we can also mark the new huge pmd as
1321 * write protected if any of the small ones is
1322 * marked but that could bring unknown
1323 * userfault messages that falls outside of
1324 * the registered range. So, just be simple.
1326 result
= SCAN_PTE_UFFD_WP
;
1329 if (pte_write(pteval
))
1332 page
= vm_normal_page(vma
, _address
, pteval
);
1333 if (unlikely(!page
) || unlikely(is_zone_device_page(page
))) {
1334 result
= SCAN_PAGE_NULL
;
1338 if (page_mapcount(page
) > 1) {
1340 if (cc
->is_khugepaged
&&
1341 shared
> khugepaged_max_ptes_shared
) {
1342 result
= SCAN_EXCEED_SHARED_PTE
;
1343 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE
);
1348 folio
= page_folio(page
);
1350 * Record which node the original page is from and save this
1351 * information to cc->node_load[].
1352 * Khugepaged will allocate hugepage from the node has the max
1355 node
= folio_nid(folio
);
1356 if (hpage_collapse_scan_abort(node
, cc
)) {
1357 result
= SCAN_SCAN_ABORT
;
1360 cc
->node_load
[node
]++;
1361 if (!folio_test_lru(folio
)) {
1362 result
= SCAN_PAGE_LRU
;
1365 if (folio_test_locked(folio
)) {
1366 result
= SCAN_PAGE_LOCK
;
1369 if (!folio_test_anon(folio
)) {
1370 result
= SCAN_PAGE_ANON
;
1375 * Check if the page has any GUP (or other external) pins.
1377 * Here the check may be racy:
1378 * it may see total_mapcount > refcount in some cases?
1379 * But such case is ephemeral we could always retry collapse
1380 * later. However it may report false positive if the page
1381 * has excessive GUP pins (i.e. 512). Anyway the same check
1382 * will be done again later the risk seems low.
1384 if (!is_refcount_suitable(folio
)) {
1385 result
= SCAN_PAGE_COUNT
;
1390 * If collapse was initiated by khugepaged, check that there is
1391 * enough young pte to justify collapsing the page
1393 if (cc
->is_khugepaged
&&
1394 (pte_young(pteval
) || folio_test_young(folio
) ||
1395 folio_test_referenced(folio
) || mmu_notifier_test_young(vma
->vm_mm
,
1400 result
= SCAN_PAGE_RO
;
1401 } else if (cc
->is_khugepaged
&&
1403 (unmapped
&& referenced
< HPAGE_PMD_NR
/ 2))) {
1404 result
= SCAN_LACK_REFERENCED_PAGE
;
1406 result
= SCAN_SUCCEED
;
1409 pte_unmap_unlock(pte
, ptl
);
1410 if (result
== SCAN_SUCCEED
) {
1411 result
= collapse_huge_page(mm
, address
, referenced
,
1413 /* collapse_huge_page will return with the mmap_lock released */
1414 *mmap_locked
= false;
1417 trace_mm_khugepaged_scan_pmd(mm
, &folio
->page
, writable
, referenced
,
1418 none_or_zero
, result
, unmapped
);
1422 static void collect_mm_slot(struct khugepaged_mm_slot
*mm_slot
)
1424 struct mm_slot
*slot
= &mm_slot
->slot
;
1425 struct mm_struct
*mm
= slot
->mm
;
1427 lockdep_assert_held(&khugepaged_mm_lock
);
1429 if (hpage_collapse_test_exit(mm
)) {
1431 hash_del(&slot
->hash
);
1432 list_del(&slot
->mm_node
);
1435 * Not strictly needed because the mm exited already.
1437 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1440 /* khugepaged_mm_lock actually not necessary for the below */
1441 mm_slot_free(mm_slot_cache
, mm_slot
);
1447 /* hpage must be locked, and mmap_lock must be held */
1448 static int set_huge_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
1449 pmd_t
*pmdp
, struct page
*hpage
)
1451 struct vm_fault vmf
= {
1458 VM_BUG_ON(!PageTransHuge(hpage
));
1459 mmap_assert_locked(vma
->vm_mm
);
1461 if (do_set_pmd(&vmf
, hpage
))
1465 return SCAN_SUCCEED
;
1469 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1472 * @mm: process address space where collapse happens
1473 * @addr: THP collapse address
1474 * @install_pmd: If a huge PMD should be installed
1476 * This function checks whether all the PTEs in the PMD are pointing to the
1477 * right THP. If so, retract the page table so the THP can refault in with
1478 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1480 int collapse_pte_mapped_thp(struct mm_struct
*mm
, unsigned long addr
,
1483 struct mmu_notifier_range range
;
1484 bool notified
= false;
1485 unsigned long haddr
= addr
& HPAGE_PMD_MASK
;
1486 struct vm_area_struct
*vma
= vma_lookup(mm
, haddr
);
1487 struct folio
*folio
;
1488 pte_t
*start_pte
, *pte
;
1489 pmd_t
*pmd
, pgt_pmd
;
1490 spinlock_t
*pml
= NULL
, *ptl
;
1491 int nr_ptes
= 0, result
= SCAN_FAIL
;
1494 mmap_assert_locked(mm
);
1496 /* First check VMA found, in case page tables are being torn down */
1497 if (!vma
|| !vma
->vm_file
||
1498 !range_in_vma(vma
, haddr
, haddr
+ HPAGE_PMD_SIZE
))
1499 return SCAN_VMA_CHECK
;
1501 /* Fast check before locking page if already PMD-mapped */
1502 result
= find_pmd_or_thp_or_none(mm
, haddr
, &pmd
);
1503 if (result
== SCAN_PMD_MAPPED
)
1507 * If we are here, we've succeeded in replacing all the native pages
1508 * in the page cache with a single hugepage. If a mm were to fault-in
1509 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1510 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1511 * analogously elide sysfs THP settings here.
1513 if (!thp_vma_allowable_order(vma
, vma
->vm_flags
, false, false, false,
1515 return SCAN_VMA_CHECK
;
1517 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1518 if (userfaultfd_wp(vma
))
1519 return SCAN_PTE_UFFD_WP
;
1521 folio
= filemap_lock_folio(vma
->vm_file
->f_mapping
,
1522 linear_page_index(vma
, haddr
));
1524 return SCAN_PAGE_NULL
;
1526 if (folio_order(folio
) != HPAGE_PMD_ORDER
) {
1527 result
= SCAN_PAGE_COMPOUND
;
1531 result
= find_pmd_or_thp_or_none(mm
, haddr
, &pmd
);
1537 * All pte entries have been removed and pmd cleared.
1538 * Skip all the pte checks and just update the pmd mapping.
1540 goto maybe_install_pmd
;
1546 start_pte
= pte_offset_map_lock(mm
, pmd
, haddr
, &ptl
);
1547 if (!start_pte
) /* mmap_lock + page lock should prevent this */
1550 /* step 1: check all mapped PTEs are to the right huge page */
1551 for (i
= 0, addr
= haddr
, pte
= start_pte
;
1552 i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
, pte
++) {
1554 pte_t ptent
= ptep_get(pte
);
1556 /* empty pte, skip */
1557 if (pte_none(ptent
))
1560 /* page swapped out, abort */
1561 if (!pte_present(ptent
)) {
1562 result
= SCAN_PTE_NON_PRESENT
;
1566 page
= vm_normal_page(vma
, addr
, ptent
);
1567 if (WARN_ON_ONCE(page
&& is_zone_device_page(page
)))
1570 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1571 * page table, but the new page will not be a subpage of hpage.
1573 if (folio_page(folio
, i
) != page
)
1577 pte_unmap_unlock(start_pte
, ptl
);
1578 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
,
1579 haddr
, haddr
+ HPAGE_PMD_SIZE
);
1580 mmu_notifier_invalidate_range_start(&range
);
1584 * pmd_lock covers a wider range than ptl, and (if split from mm's
1585 * page_table_lock) ptl nests inside pml. The less time we hold pml,
1586 * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1587 * inserts a valid as-if-COWed PTE without even looking up page cache.
1588 * So page lock of folio does not protect from it, so we must not drop
1589 * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1591 if (userfaultfd_armed(vma
) && !(vma
->vm_flags
& VM_SHARED
))
1592 pml
= pmd_lock(mm
, pmd
);
1594 start_pte
= pte_offset_map_nolock(mm
, pmd
, haddr
, &ptl
);
1595 if (!start_pte
) /* mmap_lock + page lock should prevent this */
1599 else if (ptl
!= pml
)
1600 spin_lock_nested(ptl
, SINGLE_DEPTH_NESTING
);
1602 /* step 2: clear page table and adjust rmap */
1603 for (i
= 0, addr
= haddr
, pte
= start_pte
;
1604 i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
, pte
++) {
1606 pte_t ptent
= ptep_get(pte
);
1608 if (pte_none(ptent
))
1611 * We dropped ptl after the first scan, to do the mmu_notifier:
1612 * page lock stops more PTEs of the folio being faulted in, but
1613 * does not stop write faults COWing anon copies from existing
1614 * PTEs; and does not stop those being swapped out or migrated.
1616 if (!pte_present(ptent
)) {
1617 result
= SCAN_PTE_NON_PRESENT
;
1620 page
= vm_normal_page(vma
, addr
, ptent
);
1621 if (folio_page(folio
, i
) != page
)
1625 * Must clear entry, or a racing truncate may re-remove it.
1626 * TLB flush can be left until pmdp_collapse_flush() does it.
1627 * PTE dirty? Shmem page is already dirty; file is read-only.
1629 ptep_clear(mm
, addr
, pte
);
1630 folio_remove_rmap_pte(folio
, page
, vma
);
1634 pte_unmap(start_pte
);
1638 /* step 3: set proper refcount and mm_counters. */
1640 folio_ref_sub(folio
, nr_ptes
);
1641 add_mm_counter(mm
, mm_counter_file(folio
), -nr_ptes
);
1644 /* step 4: remove empty page table */
1646 pml
= pmd_lock(mm
, pmd
);
1648 spin_lock_nested(ptl
, SINGLE_DEPTH_NESTING
);
1650 pgt_pmd
= pmdp_collapse_flush(vma
, haddr
, pmd
);
1651 pmdp_get_lockless_sync();
1656 mmu_notifier_invalidate_range_end(&range
);
1659 page_table_check_pte_clear_range(mm
, haddr
, pgt_pmd
);
1660 pte_free_defer(mm
, pmd_pgtable(pgt_pmd
));
1663 /* step 5: install pmd entry */
1664 result
= install_pmd
1665 ? set_huge_pmd(vma
, haddr
, pmd
, &folio
->page
)
1671 folio_ref_sub(folio
, nr_ptes
);
1672 add_mm_counter(mm
, mm_counter_file(folio
), -nr_ptes
);
1675 pte_unmap_unlock(start_pte
, ptl
);
1676 if (pml
&& pml
!= ptl
)
1679 mmu_notifier_invalidate_range_end(&range
);
1681 folio_unlock(folio
);
1686 static void retract_page_tables(struct address_space
*mapping
, pgoff_t pgoff
)
1688 struct vm_area_struct
*vma
;
1690 i_mmap_lock_read(mapping
);
1691 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
1692 struct mmu_notifier_range range
;
1693 struct mm_struct
*mm
;
1695 pmd_t
*pmd
, pgt_pmd
;
1698 bool skipped_uffd
= false;
1701 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1702 * got written to. These VMAs are likely not worth removing
1703 * page tables from, as PMD-mapping is likely to be split later.
1705 if (READ_ONCE(vma
->anon_vma
))
1708 addr
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
1709 if (addr
& ~HPAGE_PMD_MASK
||
1710 vma
->vm_end
< addr
+ HPAGE_PMD_SIZE
)
1714 if (find_pmd_or_thp_or_none(mm
, addr
, &pmd
) != SCAN_SUCCEED
)
1717 if (hpage_collapse_test_exit(mm
))
1720 * When a vma is registered with uffd-wp, we cannot recycle
1721 * the page table because there may be pte markers installed.
1722 * Other vmas can still have the same file mapped hugely, but
1723 * skip this one: it will always be mapped in small page size
1724 * for uffd-wp registered ranges.
1726 if (userfaultfd_wp(vma
))
1729 /* PTEs were notified when unmapped; but now for the PMD? */
1730 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
,
1731 addr
, addr
+ HPAGE_PMD_SIZE
);
1732 mmu_notifier_invalidate_range_start(&range
);
1734 pml
= pmd_lock(mm
, pmd
);
1735 ptl
= pte_lockptr(mm
, pmd
);
1737 spin_lock_nested(ptl
, SINGLE_DEPTH_NESTING
);
1740 * Huge page lock is still held, so normally the page table
1741 * must remain empty; and we have already skipped anon_vma
1742 * and userfaultfd_wp() vmas. But since the mmap_lock is not
1743 * held, it is still possible for a racing userfaultfd_ioctl()
1744 * to have inserted ptes or markers. Now that we hold ptlock,
1745 * repeating the anon_vma check protects from one category,
1746 * and repeating the userfaultfd_wp() check from another.
1748 if (unlikely(vma
->anon_vma
|| userfaultfd_wp(vma
))) {
1749 skipped_uffd
= true;
1751 pgt_pmd
= pmdp_collapse_flush(vma
, addr
, pmd
);
1752 pmdp_get_lockless_sync();
1759 mmu_notifier_invalidate_range_end(&range
);
1761 if (!skipped_uffd
) {
1763 page_table_check_pte_clear_range(mm
, addr
, pgt_pmd
);
1764 pte_free_defer(mm
, pmd_pgtable(pgt_pmd
));
1767 i_mmap_unlock_read(mapping
);
1771 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1773 * @mm: process address space where collapse happens
1774 * @addr: virtual collapse start address
1775 * @file: file that collapse on
1776 * @start: collapse start address
1777 * @cc: collapse context and scratchpad
1779 * Basic scheme is simple, details are more complex:
1780 * - allocate and lock a new huge page;
1781 * - scan page cache, locking old pages
1782 * + swap/gup in pages if necessary;
1783 * - copy data to new page
1784 * - handle shmem holes
1785 * + re-validate that holes weren't filled by someone else
1786 * + check for userfaultfd
1787 * - finalize updates to the page cache;
1788 * - if replacing succeeds:
1789 * + unlock huge page;
1791 * - if replacing failed;
1792 * + unlock old pages
1793 * + unlock and free huge page;
1795 static int collapse_file(struct mm_struct
*mm
, unsigned long addr
,
1796 struct file
*file
, pgoff_t start
,
1797 struct collapse_control
*cc
)
1799 struct address_space
*mapping
= file
->f_mapping
;
1803 struct folio
*folio
;
1804 pgoff_t index
= 0, end
= start
+ HPAGE_PMD_NR
;
1805 LIST_HEAD(pagelist
);
1806 XA_STATE_ORDER(xas
, &mapping
->i_pages
, start
, HPAGE_PMD_ORDER
);
1807 int nr_none
= 0, result
= SCAN_SUCCEED
;
1808 bool is_shmem
= shmem_file(file
);
1811 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS
) && !is_shmem
);
1812 VM_BUG_ON(start
& (HPAGE_PMD_NR
- 1));
1814 result
= alloc_charge_hpage(&hpage
, mm
, cc
);
1815 if (result
!= SCAN_SUCCEED
)
1818 __SetPageLocked(hpage
);
1820 __SetPageSwapBacked(hpage
);
1821 hpage
->index
= start
;
1822 hpage
->mapping
= mapping
;
1825 * Ensure we have slots for all the pages in the range. This is
1826 * almost certainly a no-op because most of the pages must be present
1830 xas_create_range(&xas
);
1831 if (!xas_error(&xas
))
1833 xas_unlock_irq(&xas
);
1834 if (!xas_nomem(&xas
, GFP_KERNEL
)) {
1840 for (index
= start
; index
< end
; index
++) {
1841 xas_set(&xas
, index
);
1842 page
= xas_load(&xas
);
1844 VM_BUG_ON(index
!= xas
.xa_index
);
1848 * Stop if extent has been truncated or
1849 * hole-punched, and is now completely
1852 if (index
== start
) {
1853 if (!xas_next_entry(&xas
, end
- 1)) {
1854 result
= SCAN_TRUNCATED
;
1862 if (xa_is_value(page
) || !PageUptodate(page
)) {
1863 xas_unlock_irq(&xas
);
1864 /* swap in or instantiate fallocated page */
1865 if (shmem_get_folio(mapping
->host
, index
,
1866 &folio
, SGP_NOALLOC
)) {
1870 /* drain lru cache to help isolate_lru_page() */
1872 page
= folio_file_page(folio
, index
);
1873 } else if (trylock_page(page
)) {
1875 xas_unlock_irq(&xas
);
1877 result
= SCAN_PAGE_LOCK
;
1880 } else { /* !is_shmem */
1881 if (!page
|| xa_is_value(page
)) {
1882 xas_unlock_irq(&xas
);
1883 page_cache_sync_readahead(mapping
, &file
->f_ra
,
1886 /* drain lru cache to help isolate_lru_page() */
1888 page
= find_lock_page(mapping
, index
);
1889 if (unlikely(page
== NULL
)) {
1893 } else if (PageDirty(page
)) {
1895 * khugepaged only works on read-only fd,
1896 * so this page is dirty because it hasn't
1897 * been flushed since first write. There
1898 * won't be new dirty pages.
1900 * Trigger async flush here and hope the
1901 * writeback is done when khugepaged
1902 * revisits this page.
1904 * This is a one-off situation. We are not
1905 * forcing writeback in loop.
1907 xas_unlock_irq(&xas
);
1908 filemap_flush(mapping
);
1911 } else if (PageWriteback(page
)) {
1912 xas_unlock_irq(&xas
);
1915 } else if (trylock_page(page
)) {
1917 xas_unlock_irq(&xas
);
1919 result
= SCAN_PAGE_LOCK
;
1925 * The page must be locked, so we can drop the i_pages lock
1926 * without racing with truncate.
1928 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
1930 /* make sure the page is up to date */
1931 if (unlikely(!PageUptodate(page
))) {
1937 * If file was truncated then extended, or hole-punched, before
1938 * we locked the first page, then a THP might be there already.
1939 * This will be discovered on the first iteration.
1941 if (PageTransCompound(page
)) {
1942 struct page
*head
= compound_head(page
);
1944 result
= compound_order(head
) == HPAGE_PMD_ORDER
&&
1945 head
->index
== start
1946 /* Maybe PMD-mapped */
1947 ? SCAN_PTE_MAPPED_HUGEPAGE
1948 : SCAN_PAGE_COMPOUND
;
1952 folio
= page_folio(page
);
1954 if (folio_mapping(folio
) != mapping
) {
1955 result
= SCAN_TRUNCATED
;
1959 if (!is_shmem
&& (folio_test_dirty(folio
) ||
1960 folio_test_writeback(folio
))) {
1962 * khugepaged only works on read-only fd, so this
1963 * page is dirty because it hasn't been flushed
1964 * since first write.
1970 if (!folio_isolate_lru(folio
)) {
1971 result
= SCAN_DEL_PAGE_LRU
;
1975 if (!filemap_release_folio(folio
, GFP_KERNEL
)) {
1976 result
= SCAN_PAGE_HAS_PRIVATE
;
1977 folio_putback_lru(folio
);
1981 if (folio_mapped(folio
))
1983 TTU_IGNORE_MLOCK
| TTU_BATCH_FLUSH
);
1987 VM_BUG_ON_PAGE(page
!= xa_load(xas
.xa
, index
), page
);
1990 * We control three references to the page:
1991 * - we hold a pin on it;
1992 * - one reference from page cache;
1993 * - one from isolate_lru_page;
1994 * If those are the only references, then any new usage of the
1995 * page will have to fetch it from the page cache. That requires
1996 * locking the page to handle truncate, so any new usage will be
1997 * blocked until we unlock page after collapse/during rollback.
1999 if (page_count(page
) != 3) {
2000 result
= SCAN_PAGE_COUNT
;
2001 xas_unlock_irq(&xas
);
2002 putback_lru_page(page
);
2007 * Accumulate the pages that are being collapsed.
2009 list_add_tail(&page
->lru
, &pagelist
);
2018 filemap_nr_thps_inc(mapping
);
2020 * Paired with smp_mb() in do_dentry_open() to ensure
2021 * i_writecount is up to date and the update to nr_thps is
2022 * visible. Ensures the page cache will be truncated if the
2023 * file is opened writable.
2026 if (inode_is_open_for_write(mapping
->host
)) {
2028 filemap_nr_thps_dec(mapping
);
2033 xas_unlock_irq(&xas
);
2037 * If collapse is successful, flush must be done now before copying.
2038 * If collapse is unsuccessful, does flush actually need to be done?
2039 * Do it anyway, to clear the state.
2041 try_to_unmap_flush();
2043 if (result
== SCAN_SUCCEED
&& nr_none
&&
2044 !shmem_charge(mapping
->host
, nr_none
))
2046 if (result
!= SCAN_SUCCEED
) {
2052 * The old pages are locked, so they won't change anymore.
2055 list_for_each_entry(page
, &pagelist
, lru
) {
2056 while (index
< page
->index
) {
2057 clear_highpage(hpage
+ (index
% HPAGE_PMD_NR
));
2060 if (copy_mc_highpage(hpage
+ (page
->index
% HPAGE_PMD_NR
), page
) > 0) {
2061 result
= SCAN_COPY_MC
;
2066 while (index
< end
) {
2067 clear_highpage(hpage
+ (index
% HPAGE_PMD_NR
));
2072 struct vm_area_struct
*vma
;
2073 int nr_none_check
= 0;
2075 i_mmap_lock_read(mapping
);
2078 xas_set(&xas
, start
);
2079 for (index
= start
; index
< end
; index
++) {
2080 if (!xas_next(&xas
)) {
2081 xas_store(&xas
, XA_RETRY_ENTRY
);
2082 if (xas_error(&xas
)) {
2083 result
= SCAN_STORE_FAILED
;
2090 if (nr_none
!= nr_none_check
) {
2091 result
= SCAN_PAGE_FILLED
;
2096 * If userspace observed a missing page in a VMA with a MODE_MISSING
2097 * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
2098 * page. If so, we need to roll back to avoid suppressing such an
2099 * event. Since wp/minor userfaultfds don't give userspace any
2100 * guarantees that the kernel doesn't fill a missing page with a zero
2101 * page, so they don't matter here.
2103 * Any userfaultfds registered after this point will not be able to
2104 * observe any missing pages due to the previously inserted retry
2107 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, start
, end
) {
2108 if (userfaultfd_missing(vma
)) {
2109 result
= SCAN_EXCEED_NONE_PTE
;
2115 i_mmap_unlock_read(mapping
);
2116 if (result
!= SCAN_SUCCEED
) {
2117 xas_set(&xas
, start
);
2118 for (index
= start
; index
< end
; index
++) {
2119 if (xas_next(&xas
) == XA_RETRY_ENTRY
)
2120 xas_store(&xas
, NULL
);
2123 xas_unlock_irq(&xas
);
2130 folio
= page_folio(hpage
);
2131 nr
= folio_nr_pages(folio
);
2133 __lruvec_stat_mod_folio(folio
, NR_SHMEM_THPS
, nr
);
2135 __lruvec_stat_mod_folio(folio
, NR_FILE_THPS
, nr
);
2138 __lruvec_stat_mod_folio(folio
, NR_FILE_PAGES
, nr_none
);
2139 /* nr_none is always 0 for non-shmem. */
2140 __lruvec_stat_mod_folio(folio
, NR_SHMEM
, nr_none
);
2144 * Mark hpage as uptodate before inserting it into the page cache so
2145 * that it isn't mistaken for an fallocated but unwritten page.
2147 folio_mark_uptodate(folio
);
2148 folio_ref_add(folio
, HPAGE_PMD_NR
- 1);
2151 folio_mark_dirty(folio
);
2152 folio_add_lru(folio
);
2154 /* Join all the small entries into a single multi-index entry. */
2155 xas_set_order(&xas
, start
, HPAGE_PMD_ORDER
);
2156 xas_store(&xas
, folio
);
2157 WARN_ON_ONCE(xas_error(&xas
));
2158 xas_unlock_irq(&xas
);
2161 * Remove pte page tables, so we can re-fault the page as huge.
2162 * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2164 retract_page_tables(mapping
, start
);
2165 if (cc
&& !cc
->is_khugepaged
)
2166 result
= SCAN_PTE_MAPPED_HUGEPAGE
;
2167 folio_unlock(folio
);
2170 * The collapse has succeeded, so free the old pages.
2172 list_for_each_entry_safe(page
, tmp
, &pagelist
, lru
) {
2173 list_del(&page
->lru
);
2174 page
->mapping
= NULL
;
2175 ClearPageActive(page
);
2176 ClearPageUnevictable(page
);
2178 folio_put_refs(page_folio(page
), 3);
2184 /* Something went wrong: roll back page cache changes */
2187 mapping
->nrpages
-= nr_none
;
2188 xas_unlock_irq(&xas
);
2189 shmem_uncharge(mapping
->host
, nr_none
);
2192 list_for_each_entry_safe(page
, tmp
, &pagelist
, lru
) {
2193 list_del(&page
->lru
);
2195 putback_lru_page(page
);
2199 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2200 * file only. This undo is not needed unless failure is
2201 * due to SCAN_COPY_MC.
2203 if (!is_shmem
&& result
== SCAN_COPY_MC
) {
2204 filemap_nr_thps_dec(mapping
);
2206 * Paired with smp_mb() in do_dentry_open() to
2207 * ensure the update to nr_thps is visible.
2212 hpage
->mapping
= NULL
;
2217 VM_BUG_ON(!list_empty(&pagelist
));
2218 trace_mm_khugepaged_collapse_file(mm
, hpage
, index
, is_shmem
, addr
, file
, nr
, result
);
2222 static int hpage_collapse_scan_file(struct mm_struct
*mm
, unsigned long addr
,
2223 struct file
*file
, pgoff_t start
,
2224 struct collapse_control
*cc
)
2226 struct page
*page
= NULL
;
2227 struct address_space
*mapping
= file
->f_mapping
;
2228 XA_STATE(xas
, &mapping
->i_pages
, start
);
2230 int node
= NUMA_NO_NODE
;
2231 int result
= SCAN_SUCCEED
;
2235 memset(cc
->node_load
, 0, sizeof(cc
->node_load
));
2236 nodes_clear(cc
->alloc_nmask
);
2238 xas_for_each(&xas
, page
, start
+ HPAGE_PMD_NR
- 1) {
2239 if (xas_retry(&xas
, page
))
2242 if (xa_is_value(page
)) {
2244 if (cc
->is_khugepaged
&&
2245 swap
> khugepaged_max_ptes_swap
) {
2246 result
= SCAN_EXCEED_SWAP_PTE
;
2247 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE
);
2254 * TODO: khugepaged should compact smaller compound pages
2255 * into a PMD sized page
2257 if (PageTransCompound(page
)) {
2258 struct page
*head
= compound_head(page
);
2260 result
= compound_order(head
) == HPAGE_PMD_ORDER
&&
2261 head
->index
== start
2262 /* Maybe PMD-mapped */
2263 ? SCAN_PTE_MAPPED_HUGEPAGE
2264 : SCAN_PAGE_COMPOUND
;
2266 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2267 * by the caller won't touch the page cache, and so
2268 * it's safe to skip LRU and refcount checks before
2274 node
= page_to_nid(page
);
2275 if (hpage_collapse_scan_abort(node
, cc
)) {
2276 result
= SCAN_SCAN_ABORT
;
2279 cc
->node_load
[node
]++;
2281 if (!PageLRU(page
)) {
2282 result
= SCAN_PAGE_LRU
;
2286 if (page_count(page
) !=
2287 1 + page_mapcount(page
) + page_has_private(page
)) {
2288 result
= SCAN_PAGE_COUNT
;
2293 * We probably should check if the page is referenced here, but
2294 * nobody would transfer pte_young() to PageReferenced() for us.
2295 * And rmap walk here is just too costly...
2300 if (need_resched()) {
2307 if (result
== SCAN_SUCCEED
) {
2308 if (cc
->is_khugepaged
&&
2309 present
< HPAGE_PMD_NR
- khugepaged_max_ptes_none
) {
2310 result
= SCAN_EXCEED_NONE_PTE
;
2311 count_vm_event(THP_SCAN_EXCEED_NONE_PTE
);
2313 result
= collapse_file(mm
, addr
, file
, start
, cc
);
2317 trace_mm_khugepaged_scan_file(mm
, page
, file
, present
, swap
, result
);
2321 static int hpage_collapse_scan_file(struct mm_struct
*mm
, unsigned long addr
,
2322 struct file
*file
, pgoff_t start
,
2323 struct collapse_control
*cc
)
2329 static unsigned int khugepaged_scan_mm_slot(unsigned int pages
, int *result
,
2330 struct collapse_control
*cc
)
2331 __releases(&khugepaged_mm_lock
)
2332 __acquires(&khugepaged_mm_lock
)
2334 struct vma_iterator vmi
;
2335 struct khugepaged_mm_slot
*mm_slot
;
2336 struct mm_slot
*slot
;
2337 struct mm_struct
*mm
;
2338 struct vm_area_struct
*vma
;
2342 lockdep_assert_held(&khugepaged_mm_lock
);
2343 *result
= SCAN_FAIL
;
2345 if (khugepaged_scan
.mm_slot
) {
2346 mm_slot
= khugepaged_scan
.mm_slot
;
2347 slot
= &mm_slot
->slot
;
2349 slot
= list_entry(khugepaged_scan
.mm_head
.next
,
2350 struct mm_slot
, mm_node
);
2351 mm_slot
= mm_slot_entry(slot
, struct khugepaged_mm_slot
, slot
);
2352 khugepaged_scan
.address
= 0;
2353 khugepaged_scan
.mm_slot
= mm_slot
;
2355 spin_unlock(&khugepaged_mm_lock
);
2359 * Don't wait for semaphore (to avoid long wait times). Just move to
2360 * the next mm on the list.
2363 if (unlikely(!mmap_read_trylock(mm
)))
2364 goto breakouterloop_mmap_lock
;
2367 if (unlikely(hpage_collapse_test_exit_or_disable(mm
)))
2368 goto breakouterloop
;
2370 vma_iter_init(&vmi
, mm
, khugepaged_scan
.address
);
2371 for_each_vma(vmi
, vma
) {
2372 unsigned long hstart
, hend
;
2375 if (unlikely(hpage_collapse_test_exit_or_disable(mm
))) {
2379 if (!thp_vma_allowable_order(vma
, vma
->vm_flags
, false, false,
2385 hstart
= round_up(vma
->vm_start
, HPAGE_PMD_SIZE
);
2386 hend
= round_down(vma
->vm_end
, HPAGE_PMD_SIZE
);
2387 if (khugepaged_scan
.address
> hend
)
2389 if (khugepaged_scan
.address
< hstart
)
2390 khugepaged_scan
.address
= hstart
;
2391 VM_BUG_ON(khugepaged_scan
.address
& ~HPAGE_PMD_MASK
);
2393 while (khugepaged_scan
.address
< hend
) {
2394 bool mmap_locked
= true;
2397 if (unlikely(hpage_collapse_test_exit_or_disable(mm
)))
2398 goto breakouterloop
;
2400 VM_BUG_ON(khugepaged_scan
.address
< hstart
||
2401 khugepaged_scan
.address
+ HPAGE_PMD_SIZE
>
2403 if (IS_ENABLED(CONFIG_SHMEM
) && vma
->vm_file
) {
2404 struct file
*file
= get_file(vma
->vm_file
);
2405 pgoff_t pgoff
= linear_page_index(vma
,
2406 khugepaged_scan
.address
);
2408 mmap_read_unlock(mm
);
2409 mmap_locked
= false;
2410 *result
= hpage_collapse_scan_file(mm
,
2411 khugepaged_scan
.address
, file
, pgoff
, cc
);
2413 if (*result
== SCAN_PTE_MAPPED_HUGEPAGE
) {
2415 if (hpage_collapse_test_exit_or_disable(mm
))
2416 goto breakouterloop
;
2417 *result
= collapse_pte_mapped_thp(mm
,
2418 khugepaged_scan
.address
, false);
2419 if (*result
== SCAN_PMD_MAPPED
)
2420 *result
= SCAN_SUCCEED
;
2421 mmap_read_unlock(mm
);
2424 *result
= hpage_collapse_scan_pmd(mm
, vma
,
2425 khugepaged_scan
.address
, &mmap_locked
, cc
);
2428 if (*result
== SCAN_SUCCEED
)
2429 ++khugepaged_pages_collapsed
;
2431 /* move to next address */
2432 khugepaged_scan
.address
+= HPAGE_PMD_SIZE
;
2433 progress
+= HPAGE_PMD_NR
;
2436 * We released mmap_lock so break loop. Note
2437 * that we drop mmap_lock before all hugepage
2438 * allocations, so if allocation fails, we are
2439 * guaranteed to break here and report the
2440 * correct result back to caller.
2442 goto breakouterloop_mmap_lock
;
2443 if (progress
>= pages
)
2444 goto breakouterloop
;
2448 mmap_read_unlock(mm
); /* exit_mmap will destroy ptes after this */
2449 breakouterloop_mmap_lock
:
2451 spin_lock(&khugepaged_mm_lock
);
2452 VM_BUG_ON(khugepaged_scan
.mm_slot
!= mm_slot
);
2454 * Release the current mm_slot if this mm is about to die, or
2455 * if we scanned all vmas of this mm.
2457 if (hpage_collapse_test_exit(mm
) || !vma
) {
2459 * Make sure that if mm_users is reaching zero while
2460 * khugepaged runs here, khugepaged_exit will find
2461 * mm_slot not pointing to the exiting mm.
2463 if (slot
->mm_node
.next
!= &khugepaged_scan
.mm_head
) {
2464 slot
= list_entry(slot
->mm_node
.next
,
2465 struct mm_slot
, mm_node
);
2466 khugepaged_scan
.mm_slot
=
2467 mm_slot_entry(slot
, struct khugepaged_mm_slot
, slot
);
2468 khugepaged_scan
.address
= 0;
2470 khugepaged_scan
.mm_slot
= NULL
;
2471 khugepaged_full_scans
++;
2474 collect_mm_slot(mm_slot
);
2480 static int khugepaged_has_work(void)
2482 return !list_empty(&khugepaged_scan
.mm_head
) &&
2483 hugepage_flags_enabled();
2486 static int khugepaged_wait_event(void)
2488 return !list_empty(&khugepaged_scan
.mm_head
) ||
2489 kthread_should_stop();
2492 static void khugepaged_do_scan(struct collapse_control
*cc
)
2494 unsigned int progress
= 0, pass_through_head
= 0;
2495 unsigned int pages
= READ_ONCE(khugepaged_pages_to_scan
);
2497 int result
= SCAN_SUCCEED
;
2499 lru_add_drain_all();
2504 if (unlikely(kthread_should_stop()))
2507 spin_lock(&khugepaged_mm_lock
);
2508 if (!khugepaged_scan
.mm_slot
)
2509 pass_through_head
++;
2510 if (khugepaged_has_work() &&
2511 pass_through_head
< 2)
2512 progress
+= khugepaged_scan_mm_slot(pages
- progress
,
2516 spin_unlock(&khugepaged_mm_lock
);
2518 if (progress
>= pages
)
2521 if (result
== SCAN_ALLOC_HUGE_PAGE_FAIL
) {
2523 * If fail to allocate the first time, try to sleep for
2524 * a while. When hit again, cancel the scan.
2529 khugepaged_alloc_sleep();
2534 static bool khugepaged_should_wakeup(void)
2536 return kthread_should_stop() ||
2537 time_after_eq(jiffies
, khugepaged_sleep_expire
);
2540 static void khugepaged_wait_work(void)
2542 if (khugepaged_has_work()) {
2543 const unsigned long scan_sleep_jiffies
=
2544 msecs_to_jiffies(khugepaged_scan_sleep_millisecs
);
2546 if (!scan_sleep_jiffies
)
2549 khugepaged_sleep_expire
= jiffies
+ scan_sleep_jiffies
;
2550 wait_event_freezable_timeout(khugepaged_wait
,
2551 khugepaged_should_wakeup(),
2552 scan_sleep_jiffies
);
2556 if (hugepage_flags_enabled())
2557 wait_event_freezable(khugepaged_wait
, khugepaged_wait_event());
2560 static int khugepaged(void *none
)
2562 struct khugepaged_mm_slot
*mm_slot
;
2565 set_user_nice(current
, MAX_NICE
);
2567 while (!kthread_should_stop()) {
2568 khugepaged_do_scan(&khugepaged_collapse_control
);
2569 khugepaged_wait_work();
2572 spin_lock(&khugepaged_mm_lock
);
2573 mm_slot
= khugepaged_scan
.mm_slot
;
2574 khugepaged_scan
.mm_slot
= NULL
;
2576 collect_mm_slot(mm_slot
);
2577 spin_unlock(&khugepaged_mm_lock
);
2581 static void set_recommended_min_free_kbytes(void)
2585 unsigned long recommended_min
;
2587 if (!hugepage_flags_enabled()) {
2588 calculate_min_free_kbytes();
2592 for_each_populated_zone(zone
) {
2594 * We don't need to worry about fragmentation of
2595 * ZONE_MOVABLE since it only has movable pages.
2597 if (zone_idx(zone
) > gfp_zone(GFP_USER
))
2603 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2604 recommended_min
= pageblock_nr_pages
* nr_zones
* 2;
2607 * Make sure that on average at least two pageblocks are almost free
2608 * of another type, one for a migratetype to fall back to and a
2609 * second to avoid subsequent fallbacks of other types There are 3
2610 * MIGRATE_TYPES we care about.
2612 recommended_min
+= pageblock_nr_pages
* nr_zones
*
2613 MIGRATE_PCPTYPES
* MIGRATE_PCPTYPES
;
2615 /* don't ever allow to reserve more than 5% of the lowmem */
2616 recommended_min
= min(recommended_min
,
2617 (unsigned long) nr_free_buffer_pages() / 20);
2618 recommended_min
<<= (PAGE_SHIFT
-10);
2620 if (recommended_min
> min_free_kbytes
) {
2621 if (user_min_free_kbytes
>= 0)
2622 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2623 min_free_kbytes
, recommended_min
);
2625 min_free_kbytes
= recommended_min
;
2629 setup_per_zone_wmarks();
2632 int start_stop_khugepaged(void)
2636 mutex_lock(&khugepaged_mutex
);
2637 if (hugepage_flags_enabled()) {
2638 if (!khugepaged_thread
)
2639 khugepaged_thread
= kthread_run(khugepaged
, NULL
,
2641 if (IS_ERR(khugepaged_thread
)) {
2642 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2643 err
= PTR_ERR(khugepaged_thread
);
2644 khugepaged_thread
= NULL
;
2648 if (!list_empty(&khugepaged_scan
.mm_head
))
2649 wake_up_interruptible(&khugepaged_wait
);
2650 } else if (khugepaged_thread
) {
2651 kthread_stop(khugepaged_thread
);
2652 khugepaged_thread
= NULL
;
2654 set_recommended_min_free_kbytes();
2656 mutex_unlock(&khugepaged_mutex
);
2660 void khugepaged_min_free_kbytes_update(void)
2662 mutex_lock(&khugepaged_mutex
);
2663 if (hugepage_flags_enabled() && khugepaged_thread
)
2664 set_recommended_min_free_kbytes();
2665 mutex_unlock(&khugepaged_mutex
);
2668 bool current_is_khugepaged(void)
2670 return kthread_func(current
) == khugepaged
;
2673 static int madvise_collapse_errno(enum scan_result r
)
2676 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2677 * actionable feedback to caller, so they may take an appropriate
2678 * fallback measure depending on the nature of the failure.
2681 case SCAN_ALLOC_HUGE_PAGE_FAIL
:
2683 case SCAN_CGROUP_CHARGE_FAIL
:
2684 case SCAN_EXCEED_NONE_PTE
:
2686 /* Resource temporary unavailable - trying again might succeed */
2687 case SCAN_PAGE_COUNT
:
2688 case SCAN_PAGE_LOCK
:
2690 case SCAN_DEL_PAGE_LRU
:
2691 case SCAN_PAGE_FILLED
:
2694 * Other: Trying again likely not to succeed / error intrinsic to
2695 * specified memory range. khugepaged likely won't be able to collapse
2703 int madvise_collapse(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
2704 unsigned long start
, unsigned long end
)
2706 struct collapse_control
*cc
;
2707 struct mm_struct
*mm
= vma
->vm_mm
;
2708 unsigned long hstart
, hend
, addr
;
2709 int thps
= 0, last_fail
= SCAN_FAIL
;
2710 bool mmap_locked
= true;
2712 BUG_ON(vma
->vm_start
> start
);
2713 BUG_ON(vma
->vm_end
< end
);
2717 if (!thp_vma_allowable_order(vma
, vma
->vm_flags
, false, false, false,
2721 cc
= kmalloc(sizeof(*cc
), GFP_KERNEL
);
2724 cc
->is_khugepaged
= false;
2727 lru_add_drain_all();
2729 hstart
= (start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
2730 hend
= end
& HPAGE_PMD_MASK
;
2732 for (addr
= hstart
; addr
< hend
; addr
+= HPAGE_PMD_SIZE
) {
2733 int result
= SCAN_FAIL
;
2739 result
= hugepage_vma_revalidate(mm
, addr
, false, &vma
,
2741 if (result
!= SCAN_SUCCEED
) {
2746 hend
= min(hend
, vma
->vm_end
& HPAGE_PMD_MASK
);
2748 mmap_assert_locked(mm
);
2749 memset(cc
->node_load
, 0, sizeof(cc
->node_load
));
2750 nodes_clear(cc
->alloc_nmask
);
2751 if (IS_ENABLED(CONFIG_SHMEM
) && vma
->vm_file
) {
2752 struct file
*file
= get_file(vma
->vm_file
);
2753 pgoff_t pgoff
= linear_page_index(vma
, addr
);
2755 mmap_read_unlock(mm
);
2756 mmap_locked
= false;
2757 result
= hpage_collapse_scan_file(mm
, addr
, file
, pgoff
,
2761 result
= hpage_collapse_scan_pmd(mm
, vma
, addr
,
2765 *prev
= NULL
; /* Tell caller we dropped mmap_lock */
2770 case SCAN_PMD_MAPPED
:
2773 case SCAN_PTE_MAPPED_HUGEPAGE
:
2774 BUG_ON(mmap_locked
);
2777 result
= collapse_pte_mapped_thp(mm
, addr
, true);
2778 mmap_read_unlock(mm
);
2780 /* Whitelisted set of results where continuing OK */
2782 case SCAN_PTE_NON_PRESENT
:
2783 case SCAN_PTE_UFFD_WP
:
2785 case SCAN_LACK_REFERENCED_PAGE
:
2786 case SCAN_PAGE_NULL
:
2787 case SCAN_PAGE_COUNT
:
2788 case SCAN_PAGE_LOCK
:
2789 case SCAN_PAGE_COMPOUND
:
2791 case SCAN_DEL_PAGE_LRU
:
2796 /* Other error, exit */
2802 /* Caller expects us to hold mmap_lock on return */
2806 mmap_assert_locked(mm
);
2810 return thps
== ((hend
- hstart
) >> HPAGE_PMD_SHIFT
) ? 0
2811 : madvise_collapse_errno(last_fail
);