1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/page_table_check.h>
20 #include <linux/swapops.h>
21 #include <linux/shmem_fs.h>
24 #include <asm/pgalloc.h>
36 SCAN_EXCEED_SHARED_PTE
,
39 SCAN_PTE_MAPPED_HUGEPAGE
,
41 SCAN_LACK_REFERENCED_PAGE
,
54 SCAN_ALLOC_HUGE_PAGE_FAIL
,
55 SCAN_CGROUP_CHARGE_FAIL
,
57 SCAN_PAGE_HAS_PRIVATE
,
60 #define CREATE_TRACE_POINTS
61 #include <trace/events/huge_memory.h>
63 static struct task_struct
*khugepaged_thread __read_mostly
;
64 static DEFINE_MUTEX(khugepaged_mutex
);
66 /* default scan 8*512 pte (or vmas) every 30 second */
67 static unsigned int khugepaged_pages_to_scan __read_mostly
;
68 static unsigned int khugepaged_pages_collapsed
;
69 static unsigned int khugepaged_full_scans
;
70 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly
= 10000;
71 /* during fragmentation poll the hugepage allocator once every minute */
72 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly
= 60000;
73 static unsigned long khugepaged_sleep_expire
;
74 static DEFINE_SPINLOCK(khugepaged_mm_lock
);
75 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait
);
77 * default collapse hugepages if there is at least one pte mapped like
78 * it would have happened if the vma was large enough during page
81 * Note that these are only respected if collapse was initiated by khugepaged.
83 static unsigned int khugepaged_max_ptes_none __read_mostly
;
84 static unsigned int khugepaged_max_ptes_swap __read_mostly
;
85 static unsigned int khugepaged_max_ptes_shared __read_mostly
;
87 #define MM_SLOTS_HASH_BITS 10
88 static __read_mostly
DEFINE_HASHTABLE(mm_slots_hash
, MM_SLOTS_HASH_BITS
);
90 static struct kmem_cache
*mm_slot_cache __read_mostly
;
92 #define MAX_PTE_MAPPED_THP 8
94 struct collapse_control
{
97 /* Num pages scanned per node */
98 u32 node_load
[MAX_NUMNODES
];
100 /* nodemask for allocation fallback */
101 nodemask_t alloc_nmask
;
105 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
106 * @slot: hash lookup from mm to mm_slot
107 * @nr_pte_mapped_thp: number of pte mapped THP
108 * @pte_mapped_thp: address array corresponding pte mapped THP
110 struct khugepaged_mm_slot
{
113 /* pte-mapped THP in this mm */
114 int nr_pte_mapped_thp
;
115 unsigned long pte_mapped_thp
[MAX_PTE_MAPPED_THP
];
119 * struct khugepaged_scan - cursor for scanning
120 * @mm_head: the head of the mm list to scan
121 * @mm_slot: the current mm_slot we are scanning
122 * @address: the next address inside that to be scanned
124 * There is only the one khugepaged_scan instance of this cursor structure.
126 struct khugepaged_scan
{
127 struct list_head mm_head
;
128 struct khugepaged_mm_slot
*mm_slot
;
129 unsigned long address
;
132 static struct khugepaged_scan khugepaged_scan
= {
133 .mm_head
= LIST_HEAD_INIT(khugepaged_scan
.mm_head
),
137 static ssize_t
scan_sleep_millisecs_show(struct kobject
*kobj
,
138 struct kobj_attribute
*attr
,
141 return sysfs_emit(buf
, "%u\n", khugepaged_scan_sleep_millisecs
);
144 static ssize_t
scan_sleep_millisecs_store(struct kobject
*kobj
,
145 struct kobj_attribute
*attr
,
146 const char *buf
, size_t count
)
151 err
= kstrtouint(buf
, 10, &msecs
);
155 khugepaged_scan_sleep_millisecs
= msecs
;
156 khugepaged_sleep_expire
= 0;
157 wake_up_interruptible(&khugepaged_wait
);
161 static struct kobj_attribute scan_sleep_millisecs_attr
=
162 __ATTR_RW(scan_sleep_millisecs
);
164 static ssize_t
alloc_sleep_millisecs_show(struct kobject
*kobj
,
165 struct kobj_attribute
*attr
,
168 return sysfs_emit(buf
, "%u\n", khugepaged_alloc_sleep_millisecs
);
171 static ssize_t
alloc_sleep_millisecs_store(struct kobject
*kobj
,
172 struct kobj_attribute
*attr
,
173 const char *buf
, size_t count
)
178 err
= kstrtouint(buf
, 10, &msecs
);
182 khugepaged_alloc_sleep_millisecs
= msecs
;
183 khugepaged_sleep_expire
= 0;
184 wake_up_interruptible(&khugepaged_wait
);
188 static struct kobj_attribute alloc_sleep_millisecs_attr
=
189 __ATTR_RW(alloc_sleep_millisecs
);
191 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
192 struct kobj_attribute
*attr
,
195 return sysfs_emit(buf
, "%u\n", khugepaged_pages_to_scan
);
197 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
198 struct kobj_attribute
*attr
,
199 const char *buf
, size_t count
)
204 err
= kstrtouint(buf
, 10, &pages
);
208 khugepaged_pages_to_scan
= pages
;
212 static struct kobj_attribute pages_to_scan_attr
=
213 __ATTR_RW(pages_to_scan
);
215 static ssize_t
pages_collapsed_show(struct kobject
*kobj
,
216 struct kobj_attribute
*attr
,
219 return sysfs_emit(buf
, "%u\n", khugepaged_pages_collapsed
);
221 static struct kobj_attribute pages_collapsed_attr
=
222 __ATTR_RO(pages_collapsed
);
224 static ssize_t
full_scans_show(struct kobject
*kobj
,
225 struct kobj_attribute
*attr
,
228 return sysfs_emit(buf
, "%u\n", khugepaged_full_scans
);
230 static struct kobj_attribute full_scans_attr
=
231 __ATTR_RO(full_scans
);
233 static ssize_t
defrag_show(struct kobject
*kobj
,
234 struct kobj_attribute
*attr
, char *buf
)
236 return single_hugepage_flag_show(kobj
, attr
, buf
,
237 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
239 static ssize_t
defrag_store(struct kobject
*kobj
,
240 struct kobj_attribute
*attr
,
241 const char *buf
, size_t count
)
243 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
244 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
246 static struct kobj_attribute khugepaged_defrag_attr
=
250 * max_ptes_none controls if khugepaged should collapse hugepages over
251 * any unmapped ptes in turn potentially increasing the memory
252 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
253 * reduce the available free memory in the system as it
254 * runs. Increasing max_ptes_none will instead potentially reduce the
255 * free memory in the system during the khugepaged scan.
257 static ssize_t
max_ptes_none_show(struct kobject
*kobj
,
258 struct kobj_attribute
*attr
,
261 return sysfs_emit(buf
, "%u\n", khugepaged_max_ptes_none
);
263 static ssize_t
max_ptes_none_store(struct kobject
*kobj
,
264 struct kobj_attribute
*attr
,
265 const char *buf
, size_t count
)
268 unsigned long max_ptes_none
;
270 err
= kstrtoul(buf
, 10, &max_ptes_none
);
271 if (err
|| max_ptes_none
> HPAGE_PMD_NR
- 1)
274 khugepaged_max_ptes_none
= max_ptes_none
;
278 static struct kobj_attribute khugepaged_max_ptes_none_attr
=
279 __ATTR_RW(max_ptes_none
);
281 static ssize_t
max_ptes_swap_show(struct kobject
*kobj
,
282 struct kobj_attribute
*attr
,
285 return sysfs_emit(buf
, "%u\n", khugepaged_max_ptes_swap
);
288 static ssize_t
max_ptes_swap_store(struct kobject
*kobj
,
289 struct kobj_attribute
*attr
,
290 const char *buf
, size_t count
)
293 unsigned long max_ptes_swap
;
295 err
= kstrtoul(buf
, 10, &max_ptes_swap
);
296 if (err
|| max_ptes_swap
> HPAGE_PMD_NR
- 1)
299 khugepaged_max_ptes_swap
= max_ptes_swap
;
304 static struct kobj_attribute khugepaged_max_ptes_swap_attr
=
305 __ATTR_RW(max_ptes_swap
);
307 static ssize_t
max_ptes_shared_show(struct kobject
*kobj
,
308 struct kobj_attribute
*attr
,
311 return sysfs_emit(buf
, "%u\n", khugepaged_max_ptes_shared
);
314 static ssize_t
max_ptes_shared_store(struct kobject
*kobj
,
315 struct kobj_attribute
*attr
,
316 const char *buf
, size_t count
)
319 unsigned long max_ptes_shared
;
321 err
= kstrtoul(buf
, 10, &max_ptes_shared
);
322 if (err
|| max_ptes_shared
> HPAGE_PMD_NR
- 1)
325 khugepaged_max_ptes_shared
= max_ptes_shared
;
330 static struct kobj_attribute khugepaged_max_ptes_shared_attr
=
331 __ATTR_RW(max_ptes_shared
);
333 static struct attribute
*khugepaged_attr
[] = {
334 &khugepaged_defrag_attr
.attr
,
335 &khugepaged_max_ptes_none_attr
.attr
,
336 &khugepaged_max_ptes_swap_attr
.attr
,
337 &khugepaged_max_ptes_shared_attr
.attr
,
338 &pages_to_scan_attr
.attr
,
339 &pages_collapsed_attr
.attr
,
340 &full_scans_attr
.attr
,
341 &scan_sleep_millisecs_attr
.attr
,
342 &alloc_sleep_millisecs_attr
.attr
,
346 struct attribute_group khugepaged_attr_group
= {
347 .attrs
= khugepaged_attr
,
348 .name
= "khugepaged",
350 #endif /* CONFIG_SYSFS */
352 int hugepage_madvise(struct vm_area_struct
*vma
,
353 unsigned long *vm_flags
, int advice
)
359 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
360 * can't handle this properly after s390_enable_sie, so we simply
361 * ignore the madvise to prevent qemu from causing a SIGSEGV.
363 if (mm_has_pgste(vma
->vm_mm
))
366 *vm_flags
&= ~VM_NOHUGEPAGE
;
367 *vm_flags
|= VM_HUGEPAGE
;
369 * If the vma become good for khugepaged to scan,
370 * register it here without waiting a page fault that
371 * may not happen any time soon.
373 khugepaged_enter_vma(vma
, *vm_flags
);
375 case MADV_NOHUGEPAGE
:
376 *vm_flags
&= ~VM_HUGEPAGE
;
377 *vm_flags
|= VM_NOHUGEPAGE
;
379 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
380 * this vma even if we leave the mm registered in khugepaged if
381 * it got registered before VM_NOHUGEPAGE was set.
389 int __init
khugepaged_init(void)
391 mm_slot_cache
= kmem_cache_create("khugepaged_mm_slot",
392 sizeof(struct khugepaged_mm_slot
),
393 __alignof__(struct khugepaged_mm_slot
),
398 khugepaged_pages_to_scan
= HPAGE_PMD_NR
* 8;
399 khugepaged_max_ptes_none
= HPAGE_PMD_NR
- 1;
400 khugepaged_max_ptes_swap
= HPAGE_PMD_NR
/ 8;
401 khugepaged_max_ptes_shared
= HPAGE_PMD_NR
/ 2;
406 void __init
khugepaged_destroy(void)
408 kmem_cache_destroy(mm_slot_cache
);
411 static inline int hpage_collapse_test_exit(struct mm_struct
*mm
)
413 return atomic_read(&mm
->mm_users
) == 0;
416 void __khugepaged_enter(struct mm_struct
*mm
)
418 struct khugepaged_mm_slot
*mm_slot
;
419 struct mm_slot
*slot
;
422 mm_slot
= mm_slot_alloc(mm_slot_cache
);
426 slot
= &mm_slot
->slot
;
428 /* __khugepaged_exit() must not run from under us */
429 VM_BUG_ON_MM(hpage_collapse_test_exit(mm
), mm
);
430 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))) {
431 mm_slot_free(mm_slot_cache
, mm_slot
);
435 spin_lock(&khugepaged_mm_lock
);
436 mm_slot_insert(mm_slots_hash
, mm
, slot
);
438 * Insert just behind the scanning cursor, to let the area settle
441 wakeup
= list_empty(&khugepaged_scan
.mm_head
);
442 list_add_tail(&slot
->mm_node
, &khugepaged_scan
.mm_head
);
443 spin_unlock(&khugepaged_mm_lock
);
447 wake_up_interruptible(&khugepaged_wait
);
450 void khugepaged_enter_vma(struct vm_area_struct
*vma
,
451 unsigned long vm_flags
)
453 if (!test_bit(MMF_VM_HUGEPAGE
, &vma
->vm_mm
->flags
) &&
454 hugepage_flags_enabled()) {
455 if (hugepage_vma_check(vma
, vm_flags
, false, false, true))
456 __khugepaged_enter(vma
->vm_mm
);
460 void __khugepaged_exit(struct mm_struct
*mm
)
462 struct khugepaged_mm_slot
*mm_slot
;
463 struct mm_slot
*slot
;
466 spin_lock(&khugepaged_mm_lock
);
467 slot
= mm_slot_lookup(mm_slots_hash
, mm
);
468 mm_slot
= mm_slot_entry(slot
, struct khugepaged_mm_slot
, slot
);
469 if (mm_slot
&& khugepaged_scan
.mm_slot
!= mm_slot
) {
470 hash_del(&slot
->hash
);
471 list_del(&slot
->mm_node
);
474 spin_unlock(&khugepaged_mm_lock
);
477 clear_bit(MMF_VM_HUGEPAGE
, &mm
->flags
);
478 mm_slot_free(mm_slot_cache
, mm_slot
);
480 } else if (mm_slot
) {
482 * This is required to serialize against
483 * hpage_collapse_test_exit() (which is guaranteed to run
484 * under mmap sem read mode). Stop here (after we return all
485 * pagetables will be destroyed) until khugepaged has finished
486 * working on the pagetables under the mmap_lock.
489 mmap_write_unlock(mm
);
493 static void release_pte_folio(struct folio
*folio
)
495 node_stat_mod_folio(folio
,
496 NR_ISOLATED_ANON
+ folio_is_file_lru(folio
),
497 -folio_nr_pages(folio
));
499 folio_putback_lru(folio
);
502 static void release_pte_page(struct page
*page
)
504 release_pte_folio(page_folio(page
));
507 static void release_pte_pages(pte_t
*pte
, pte_t
*_pte
,
508 struct list_head
*compound_pagelist
)
510 struct folio
*folio
, *tmp
;
512 while (--_pte
>= pte
) {
513 pte_t pteval
= *_pte
;
516 if (pte_none(pteval
))
518 pfn
= pte_pfn(pteval
);
519 if (is_zero_pfn(pfn
))
521 folio
= pfn_folio(pfn
);
522 if (folio_test_large(folio
))
524 release_pte_folio(folio
);
527 list_for_each_entry_safe(folio
, tmp
, compound_pagelist
, lru
) {
528 list_del(&folio
->lru
);
529 release_pte_folio(folio
);
533 static bool is_refcount_suitable(struct page
*page
)
535 int expected_refcount
;
537 expected_refcount
= total_mapcount(page
);
538 if (PageSwapCache(page
))
539 expected_refcount
+= compound_nr(page
);
541 return page_count(page
) == expected_refcount
;
544 static int __collapse_huge_page_isolate(struct vm_area_struct
*vma
,
545 unsigned long address
,
547 struct collapse_control
*cc
,
548 struct list_head
*compound_pagelist
)
550 struct page
*page
= NULL
;
552 int none_or_zero
= 0, shared
= 0, result
= SCAN_FAIL
, referenced
= 0;
553 bool writable
= false;
555 for (_pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
556 _pte
++, address
+= PAGE_SIZE
) {
557 pte_t pteval
= *_pte
;
558 if (pte_none(pteval
) || (pte_present(pteval
) &&
559 is_zero_pfn(pte_pfn(pteval
)))) {
561 if (!userfaultfd_armed(vma
) &&
562 (!cc
->is_khugepaged
||
563 none_or_zero
<= khugepaged_max_ptes_none
)) {
566 result
= SCAN_EXCEED_NONE_PTE
;
567 count_vm_event(THP_SCAN_EXCEED_NONE_PTE
);
571 if (!pte_present(pteval
)) {
572 result
= SCAN_PTE_NON_PRESENT
;
575 page
= vm_normal_page(vma
, address
, pteval
);
576 if (unlikely(!page
) || unlikely(is_zone_device_page(page
))) {
577 result
= SCAN_PAGE_NULL
;
581 VM_BUG_ON_PAGE(!PageAnon(page
), page
);
583 if (page_mapcount(page
) > 1) {
585 if (cc
->is_khugepaged
&&
586 shared
> khugepaged_max_ptes_shared
) {
587 result
= SCAN_EXCEED_SHARED_PTE
;
588 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE
);
593 if (PageCompound(page
)) {
595 page
= compound_head(page
);
598 * Check if we have dealt with the compound page
601 list_for_each_entry(p
, compound_pagelist
, lru
) {
608 * We can do it before isolate_lru_page because the
609 * page can't be freed from under us. NOTE: PG_lock
610 * is needed to serialize against split_huge_page
611 * when invoked from the VM.
613 if (!trylock_page(page
)) {
614 result
= SCAN_PAGE_LOCK
;
619 * Check if the page has any GUP (or other external) pins.
621 * The page table that maps the page has been already unlinked
622 * from the page table tree and this process cannot get
623 * an additional pin on the page.
625 * New pins can come later if the page is shared across fork,
626 * but not from this process. The other process cannot write to
627 * the page, only trigger CoW.
629 if (!is_refcount_suitable(page
)) {
631 result
= SCAN_PAGE_COUNT
;
636 * Isolate the page to avoid collapsing an hugepage
637 * currently in use by the VM.
639 if (!isolate_lru_page(page
)) {
641 result
= SCAN_DEL_PAGE_LRU
;
644 mod_node_page_state(page_pgdat(page
),
645 NR_ISOLATED_ANON
+ page_is_file_lru(page
),
647 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
648 VM_BUG_ON_PAGE(PageLRU(page
), page
);
650 if (PageCompound(page
))
651 list_add_tail(&page
->lru
, compound_pagelist
);
654 * If collapse was initiated by khugepaged, check that there is
655 * enough young pte to justify collapsing the page
657 if (cc
->is_khugepaged
&&
658 (pte_young(pteval
) || page_is_young(page
) ||
659 PageReferenced(page
) || mmu_notifier_test_young(vma
->vm_mm
,
663 if (pte_write(pteval
))
667 if (unlikely(!writable
)) {
668 result
= SCAN_PAGE_RO
;
669 } else if (unlikely(cc
->is_khugepaged
&& !referenced
)) {
670 result
= SCAN_LACK_REFERENCED_PAGE
;
672 result
= SCAN_SUCCEED
;
673 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
674 referenced
, writable
, result
);
678 release_pte_pages(pte
, _pte
, compound_pagelist
);
679 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
680 referenced
, writable
, result
);
684 static void __collapse_huge_page_copy(pte_t
*pte
, struct page
*page
,
685 struct vm_area_struct
*vma
,
686 unsigned long address
,
688 struct list_head
*compound_pagelist
)
690 struct page
*src_page
, *tmp
;
692 for (_pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
693 _pte
++, page
++, address
+= PAGE_SIZE
) {
694 pte_t pteval
= *_pte
;
696 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
697 clear_user_highpage(page
, address
);
698 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, 1);
699 if (is_zero_pfn(pte_pfn(pteval
))) {
701 * ptl mostly unnecessary.
704 ptep_clear(vma
->vm_mm
, address
, _pte
);
708 src_page
= pte_page(pteval
);
709 copy_user_highpage(page
, src_page
, address
, vma
);
710 if (!PageCompound(src_page
))
711 release_pte_page(src_page
);
713 * ptl mostly unnecessary, but preempt has to
714 * be disabled to update the per-cpu stats
715 * inside page_remove_rmap().
718 ptep_clear(vma
->vm_mm
, address
, _pte
);
719 page_remove_rmap(src_page
, vma
, false);
721 free_page_and_swap_cache(src_page
);
725 list_for_each_entry_safe(src_page
, tmp
, compound_pagelist
, lru
) {
726 list_del(&src_page
->lru
);
727 mod_node_page_state(page_pgdat(src_page
),
728 NR_ISOLATED_ANON
+ page_is_file_lru(src_page
),
729 -compound_nr(src_page
));
730 unlock_page(src_page
);
731 free_swap_cache(src_page
);
732 putback_lru_page(src_page
);
736 static void khugepaged_alloc_sleep(void)
740 add_wait_queue(&khugepaged_wait
, &wait
);
741 __set_current_state(TASK_INTERRUPTIBLE
|TASK_FREEZABLE
);
742 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs
));
743 remove_wait_queue(&khugepaged_wait
, &wait
);
746 struct collapse_control khugepaged_collapse_control
= {
747 .is_khugepaged
= true,
750 static bool hpage_collapse_scan_abort(int nid
, struct collapse_control
*cc
)
755 * If node_reclaim_mode is disabled, then no extra effort is made to
756 * allocate memory locally.
758 if (!node_reclaim_enabled())
761 /* If there is a count for this node already, it must be acceptable */
762 if (cc
->node_load
[nid
])
765 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
766 if (!cc
->node_load
[i
])
768 if (node_distance(nid
, i
) > node_reclaim_distance
)
774 #define khugepaged_defrag() \
775 (transparent_hugepage_flags & \
776 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
778 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
779 static inline gfp_t
alloc_hugepage_khugepaged_gfpmask(void)
781 return khugepaged_defrag() ? GFP_TRANSHUGE
: GFP_TRANSHUGE_LIGHT
;
785 static int hpage_collapse_find_target_node(struct collapse_control
*cc
)
787 int nid
, target_node
= 0, max_value
= 0;
789 /* find first node with max normal pages hit */
790 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++)
791 if (cc
->node_load
[nid
] > max_value
) {
792 max_value
= cc
->node_load
[nid
];
796 for_each_online_node(nid
) {
797 if (max_value
== cc
->node_load
[nid
])
798 node_set(nid
, cc
->alloc_nmask
);
804 static int hpage_collapse_find_target_node(struct collapse_control
*cc
)
810 static bool hpage_collapse_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
,
813 *hpage
= __alloc_pages(gfp
, HPAGE_PMD_ORDER
, node
, nmask
);
814 if (unlikely(!*hpage
)) {
815 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
819 prep_transhuge_page(*hpage
);
820 count_vm_event(THP_COLLAPSE_ALLOC
);
825 * If mmap_lock temporarily dropped, revalidate vma
826 * before taking mmap_lock.
827 * Returns enum scan_result value.
830 static int hugepage_vma_revalidate(struct mm_struct
*mm
, unsigned long address
,
832 struct vm_area_struct
**vmap
,
833 struct collapse_control
*cc
)
835 struct vm_area_struct
*vma
;
837 if (unlikely(hpage_collapse_test_exit(mm
)))
838 return SCAN_ANY_PROCESS
;
840 *vmap
= vma
= find_vma(mm
, address
);
842 return SCAN_VMA_NULL
;
844 if (!transhuge_vma_suitable(vma
, address
))
845 return SCAN_ADDRESS_RANGE
;
846 if (!hugepage_vma_check(vma
, vma
->vm_flags
, false, false,
848 return SCAN_VMA_CHECK
;
850 * Anon VMA expected, the address may be unmapped then
851 * remapped to file after khugepaged reaquired the mmap_lock.
853 * hugepage_vma_check may return true for qualified file
856 if (expect_anon
&& (!(*vmap
)->anon_vma
|| !vma_is_anonymous(*vmap
)))
857 return SCAN_PAGE_ANON
;
862 * See pmd_trans_unstable() for how the result may change out from
863 * underneath us, even if we hold mmap_lock in read.
865 static int find_pmd_or_thp_or_none(struct mm_struct
*mm
,
866 unsigned long address
,
871 *pmd
= mm_find_pmd(mm
, address
);
873 return SCAN_PMD_NULL
;
875 pmde
= pmdp_get_lockless(*pmd
);
877 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
878 /* See comments in pmd_none_or_trans_huge_or_clear_bad() */
882 return SCAN_PMD_NONE
;
883 if (!pmd_present(pmde
))
884 return SCAN_PMD_NULL
;
885 if (pmd_trans_huge(pmde
))
886 return SCAN_PMD_MAPPED
;
887 if (pmd_devmap(pmde
))
888 return SCAN_PMD_NULL
;
890 return SCAN_PMD_NULL
;
894 static int check_pmd_still_valid(struct mm_struct
*mm
,
895 unsigned long address
,
899 int result
= find_pmd_or_thp_or_none(mm
, address
, &new_pmd
);
901 if (result
!= SCAN_SUCCEED
)
909 * Bring missing pages in from swap, to complete THP collapse.
910 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
912 * Called and returns without pte mapped or spinlocks held.
913 * Note that if false is returned, mmap_lock will be released.
916 static int __collapse_huge_page_swapin(struct mm_struct
*mm
,
917 struct vm_area_struct
*vma
,
918 unsigned long haddr
, pmd_t
*pmd
,
923 unsigned long address
, end
= haddr
+ (HPAGE_PMD_NR
* PAGE_SIZE
);
925 for (address
= haddr
; address
< end
; address
+= PAGE_SIZE
) {
926 struct vm_fault vmf
= {
929 .pgoff
= linear_page_index(vma
, haddr
),
930 .flags
= FAULT_FLAG_ALLOW_RETRY
,
934 vmf
.pte
= pte_offset_map(pmd
, address
);
935 vmf
.orig_pte
= *vmf
.pte
;
936 if (!is_swap_pte(vmf
.orig_pte
)) {
940 ret
= do_swap_page(&vmf
);
943 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
944 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
945 * we do not retry here and swap entry will remain in pagetable
946 * resulting in later failure.
948 if (ret
& VM_FAULT_RETRY
) {
949 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
950 /* Likely, but not guaranteed, that page lock failed */
951 return SCAN_PAGE_LOCK
;
953 if (ret
& VM_FAULT_ERROR
) {
954 mmap_read_unlock(mm
);
955 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
961 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
965 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 1);
969 static int alloc_charge_hpage(struct page
**hpage
, struct mm_struct
*mm
,
970 struct collapse_control
*cc
)
972 gfp_t gfp
= (cc
->is_khugepaged
? alloc_hugepage_khugepaged_gfpmask() :
974 int node
= hpage_collapse_find_target_node(cc
);
976 if (!hpage_collapse_alloc_page(hpage
, gfp
, node
, &cc
->alloc_nmask
))
977 return SCAN_ALLOC_HUGE_PAGE_FAIL
;
978 if (unlikely(mem_cgroup_charge(page_folio(*hpage
), mm
, gfp
)))
979 return SCAN_CGROUP_CHARGE_FAIL
;
980 count_memcg_page_event(*hpage
, THP_COLLAPSE_ALLOC
);
984 static int collapse_huge_page(struct mm_struct
*mm
, unsigned long address
,
985 int referenced
, int unmapped
,
986 struct collapse_control
*cc
)
988 LIST_HEAD(compound_pagelist
);
993 spinlock_t
*pmd_ptl
, *pte_ptl
;
994 int result
= SCAN_FAIL
;
995 struct vm_area_struct
*vma
;
996 struct mmu_notifier_range range
;
998 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1001 * Before allocating the hugepage, release the mmap_lock read lock.
1002 * The allocation can take potentially a long time if it involves
1003 * sync compaction, and we do not need to hold the mmap_lock during
1004 * that. We will recheck the vma after taking it again in write mode.
1006 mmap_read_unlock(mm
);
1008 result
= alloc_charge_hpage(&hpage
, mm
, cc
);
1009 if (result
!= SCAN_SUCCEED
)
1013 result
= hugepage_vma_revalidate(mm
, address
, true, &vma
, cc
);
1014 if (result
!= SCAN_SUCCEED
) {
1015 mmap_read_unlock(mm
);
1019 result
= find_pmd_or_thp_or_none(mm
, address
, &pmd
);
1020 if (result
!= SCAN_SUCCEED
) {
1021 mmap_read_unlock(mm
);
1027 * __collapse_huge_page_swapin will return with mmap_lock
1028 * released when it fails. So we jump out_nolock directly in
1029 * that case. Continuing to collapse causes inconsistency.
1031 result
= __collapse_huge_page_swapin(mm
, vma
, address
, pmd
,
1033 if (result
!= SCAN_SUCCEED
)
1037 mmap_read_unlock(mm
);
1039 * Prevent all access to pagetables with the exception of
1040 * gup_fast later handled by the ptep_clear_flush and the VM
1041 * handled by the anon_vma lock + PG_lock.
1043 mmap_write_lock(mm
);
1044 result
= hugepage_vma_revalidate(mm
, address
, true, &vma
, cc
);
1045 if (result
!= SCAN_SUCCEED
)
1047 /* check if the pmd is still valid */
1048 result
= check_pmd_still_valid(mm
, address
, pmd
);
1049 if (result
!= SCAN_SUCCEED
)
1052 anon_vma_lock_write(vma
->anon_vma
);
1054 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, address
,
1055 address
+ HPAGE_PMD_SIZE
);
1056 mmu_notifier_invalidate_range_start(&range
);
1058 pte
= pte_offset_map(pmd
, address
);
1059 pte_ptl
= pte_lockptr(mm
, pmd
);
1061 pmd_ptl
= pmd_lock(mm
, pmd
); /* probably unnecessary */
1063 * This removes any huge TLB entry from the CPU so we won't allow
1064 * huge and small TLB entries for the same virtual address to
1065 * avoid the risk of CPU bugs in that area.
1067 * Parallel fast GUP is fine since fast GUP will back off when
1068 * it detects PMD is changed.
1070 _pmd
= pmdp_collapse_flush(vma
, address
, pmd
);
1071 spin_unlock(pmd_ptl
);
1072 mmu_notifier_invalidate_range_end(&range
);
1073 tlb_remove_table_sync_one();
1076 result
= __collapse_huge_page_isolate(vma
, address
, pte
, cc
,
1077 &compound_pagelist
);
1078 spin_unlock(pte_ptl
);
1080 if (unlikely(result
!= SCAN_SUCCEED
)) {
1083 BUG_ON(!pmd_none(*pmd
));
1085 * We can only use set_pmd_at when establishing
1086 * hugepmds and never for establishing regular pmds that
1087 * points to regular pagetables. Use pmd_populate for that
1089 pmd_populate(mm
, pmd
, pmd_pgtable(_pmd
));
1090 spin_unlock(pmd_ptl
);
1091 anon_vma_unlock_write(vma
->anon_vma
);
1096 * All pages are isolated and locked so anon_vma rmap
1097 * can't run anymore.
1099 anon_vma_unlock_write(vma
->anon_vma
);
1101 __collapse_huge_page_copy(pte
, hpage
, vma
, address
, pte_ptl
,
1102 &compound_pagelist
);
1105 * spin_lock() below is not the equivalent of smp_wmb(), but
1106 * the smp_wmb() inside __SetPageUptodate() can be reused to
1107 * avoid the copy_huge_page writes to become visible after
1108 * the set_pmd_at() write.
1110 __SetPageUptodate(hpage
);
1111 pgtable
= pmd_pgtable(_pmd
);
1113 _pmd
= mk_huge_pmd(hpage
, vma
->vm_page_prot
);
1114 _pmd
= maybe_pmd_mkwrite(pmd_mkdirty(_pmd
), vma
);
1117 BUG_ON(!pmd_none(*pmd
));
1118 page_add_new_anon_rmap(hpage
, vma
, address
);
1119 lru_cache_add_inactive_or_unevictable(hpage
, vma
);
1120 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
1121 set_pmd_at(mm
, address
, pmd
, _pmd
);
1122 update_mmu_cache_pmd(vma
, address
, pmd
);
1123 spin_unlock(pmd_ptl
);
1127 result
= SCAN_SUCCEED
;
1129 mmap_write_unlock(mm
);
1132 mem_cgroup_uncharge(page_folio(hpage
));
1135 trace_mm_collapse_huge_page(mm
, result
== SCAN_SUCCEED
, result
);
1139 static int hpage_collapse_scan_pmd(struct mm_struct
*mm
,
1140 struct vm_area_struct
*vma
,
1141 unsigned long address
, bool *mmap_locked
,
1142 struct collapse_control
*cc
)
1146 int result
= SCAN_FAIL
, referenced
= 0;
1147 int none_or_zero
= 0, shared
= 0;
1148 struct page
*page
= NULL
;
1149 unsigned long _address
;
1151 int node
= NUMA_NO_NODE
, unmapped
= 0;
1152 bool writable
= false;
1154 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1156 result
= find_pmd_or_thp_or_none(mm
, address
, &pmd
);
1157 if (result
!= SCAN_SUCCEED
)
1160 memset(cc
->node_load
, 0, sizeof(cc
->node_load
));
1161 nodes_clear(cc
->alloc_nmask
);
1162 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1163 for (_address
= address
, _pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
1164 _pte
++, _address
+= PAGE_SIZE
) {
1165 pte_t pteval
= *_pte
;
1166 if (is_swap_pte(pteval
)) {
1168 if (!cc
->is_khugepaged
||
1169 unmapped
<= khugepaged_max_ptes_swap
) {
1171 * Always be strict with uffd-wp
1172 * enabled swap entries. Please see
1173 * comment below for pte_uffd_wp().
1175 if (pte_swp_uffd_wp(pteval
)) {
1176 result
= SCAN_PTE_UFFD_WP
;
1181 result
= SCAN_EXCEED_SWAP_PTE
;
1182 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE
);
1186 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
1188 if (!userfaultfd_armed(vma
) &&
1189 (!cc
->is_khugepaged
||
1190 none_or_zero
<= khugepaged_max_ptes_none
)) {
1193 result
= SCAN_EXCEED_NONE_PTE
;
1194 count_vm_event(THP_SCAN_EXCEED_NONE_PTE
);
1198 if (pte_uffd_wp(pteval
)) {
1200 * Don't collapse the page if any of the small
1201 * PTEs are armed with uffd write protection.
1202 * Here we can also mark the new huge pmd as
1203 * write protected if any of the small ones is
1204 * marked but that could bring unknown
1205 * userfault messages that falls outside of
1206 * the registered range. So, just be simple.
1208 result
= SCAN_PTE_UFFD_WP
;
1211 if (pte_write(pteval
))
1214 page
= vm_normal_page(vma
, _address
, pteval
);
1215 if (unlikely(!page
) || unlikely(is_zone_device_page(page
))) {
1216 result
= SCAN_PAGE_NULL
;
1220 if (page_mapcount(page
) > 1) {
1222 if (cc
->is_khugepaged
&&
1223 shared
> khugepaged_max_ptes_shared
) {
1224 result
= SCAN_EXCEED_SHARED_PTE
;
1225 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE
);
1230 page
= compound_head(page
);
1233 * Record which node the original page is from and save this
1234 * information to cc->node_load[].
1235 * Khugepaged will allocate hugepage from the node has the max
1238 node
= page_to_nid(page
);
1239 if (hpage_collapse_scan_abort(node
, cc
)) {
1240 result
= SCAN_SCAN_ABORT
;
1243 cc
->node_load
[node
]++;
1244 if (!PageLRU(page
)) {
1245 result
= SCAN_PAGE_LRU
;
1248 if (PageLocked(page
)) {
1249 result
= SCAN_PAGE_LOCK
;
1252 if (!PageAnon(page
)) {
1253 result
= SCAN_PAGE_ANON
;
1258 * Check if the page has any GUP (or other external) pins.
1260 * Here the check may be racy:
1261 * it may see total_mapcount > refcount in some cases?
1262 * But such case is ephemeral we could always retry collapse
1263 * later. However it may report false positive if the page
1264 * has excessive GUP pins (i.e. 512). Anyway the same check
1265 * will be done again later the risk seems low.
1267 if (!is_refcount_suitable(page
)) {
1268 result
= SCAN_PAGE_COUNT
;
1273 * If collapse was initiated by khugepaged, check that there is
1274 * enough young pte to justify collapsing the page
1276 if (cc
->is_khugepaged
&&
1277 (pte_young(pteval
) || page_is_young(page
) ||
1278 PageReferenced(page
) || mmu_notifier_test_young(vma
->vm_mm
,
1283 result
= SCAN_PAGE_RO
;
1284 } else if (cc
->is_khugepaged
&&
1286 (unmapped
&& referenced
< HPAGE_PMD_NR
/ 2))) {
1287 result
= SCAN_LACK_REFERENCED_PAGE
;
1289 result
= SCAN_SUCCEED
;
1292 pte_unmap_unlock(pte
, ptl
);
1293 if (result
== SCAN_SUCCEED
) {
1294 result
= collapse_huge_page(mm
, address
, referenced
,
1296 /* collapse_huge_page will return with the mmap_lock released */
1297 *mmap_locked
= false;
1300 trace_mm_khugepaged_scan_pmd(mm
, page
, writable
, referenced
,
1301 none_or_zero
, result
, unmapped
);
1305 static void collect_mm_slot(struct khugepaged_mm_slot
*mm_slot
)
1307 struct mm_slot
*slot
= &mm_slot
->slot
;
1308 struct mm_struct
*mm
= slot
->mm
;
1310 lockdep_assert_held(&khugepaged_mm_lock
);
1312 if (hpage_collapse_test_exit(mm
)) {
1314 hash_del(&slot
->hash
);
1315 list_del(&slot
->mm_node
);
1318 * Not strictly needed because the mm exited already.
1320 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1323 /* khugepaged_mm_lock actually not necessary for the below */
1324 mm_slot_free(mm_slot_cache
, mm_slot
);
1331 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1332 * khugepaged should try to collapse the page table.
1334 * Note that following race exists:
1335 * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1336 * emptying the A's ->pte_mapped_thp[] array.
1337 * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1338 * retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1339 * (at virtual address X) and adds an entry (for X) into mm_struct A's
1340 * ->pte-mapped_thp[] array.
1341 * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1342 * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1343 * (for X) into mm_struct A's ->pte-mapped_thp[] array.
1344 * Thus, it's possible the same address is added multiple times for the same
1345 * mm_struct. Should this happen, we'll simply attempt
1346 * collapse_pte_mapped_thp() multiple times for the same address, under the same
1347 * exclusive mmap_lock, and assuming the first call is successful, subsequent
1348 * attempts will return quickly (without grabbing any additional locks) when
1349 * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
1350 * check, and since this is a rare occurrence, the cost of preventing this
1351 * "multiple-add" is thought to be more expensive than just handling it, should
1354 static bool khugepaged_add_pte_mapped_thp(struct mm_struct
*mm
,
1357 struct khugepaged_mm_slot
*mm_slot
;
1358 struct mm_slot
*slot
;
1361 VM_BUG_ON(addr
& ~HPAGE_PMD_MASK
);
1363 spin_lock(&khugepaged_mm_lock
);
1364 slot
= mm_slot_lookup(mm_slots_hash
, mm
);
1365 mm_slot
= mm_slot_entry(slot
, struct khugepaged_mm_slot
, slot
);
1366 if (likely(mm_slot
&& mm_slot
->nr_pte_mapped_thp
< MAX_PTE_MAPPED_THP
)) {
1367 mm_slot
->pte_mapped_thp
[mm_slot
->nr_pte_mapped_thp
++] = addr
;
1370 spin_unlock(&khugepaged_mm_lock
);
1374 /* hpage must be locked, and mmap_lock must be held in write */
1375 static int set_huge_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
1376 pmd_t
*pmdp
, struct page
*hpage
)
1378 struct vm_fault vmf
= {
1385 VM_BUG_ON(!PageTransHuge(hpage
));
1386 mmap_assert_write_locked(vma
->vm_mm
);
1388 if (do_set_pmd(&vmf
, hpage
))
1392 return SCAN_SUCCEED
;
1396 * A note about locking:
1397 * Trying to take the page table spinlocks would be useless here because those
1398 * are only used to synchronize:
1400 * - modifying terminal entries (ones that point to a data page, not to another
1402 * - installing *new* non-terminal entries
1404 * Instead, we need roughly the same kind of protection as free_pgtables() or
1405 * mm_take_all_locks() (but only for a single VMA):
1406 * The mmap lock together with this VMA's rmap locks covers all paths towards
1407 * the page table entries we're messing with here, except for hardware page
1408 * table walks and lockless_pages_from_mm().
1410 static void collapse_and_free_pmd(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
1411 unsigned long addr
, pmd_t
*pmdp
)
1414 struct mmu_notifier_range range
;
1416 mmap_assert_write_locked(mm
);
1418 lockdep_assert_held_write(&vma
->vm_file
->f_mapping
->i_mmap_rwsem
);
1420 * All anon_vmas attached to the VMA have the same root and are
1421 * therefore locked by the same lock.
1424 lockdep_assert_held_write(&vma
->anon_vma
->root
->rwsem
);
1426 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, addr
,
1427 addr
+ HPAGE_PMD_SIZE
);
1428 mmu_notifier_invalidate_range_start(&range
);
1429 pmd
= pmdp_collapse_flush(vma
, addr
, pmdp
);
1430 tlb_remove_table_sync_one();
1431 mmu_notifier_invalidate_range_end(&range
);
1433 page_table_check_pte_clear_range(mm
, addr
, pmd
);
1434 pte_free(mm
, pmd_pgtable(pmd
));
1438 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1441 * @mm: process address space where collapse happens
1442 * @addr: THP collapse address
1443 * @install_pmd: If a huge PMD should be installed
1445 * This function checks whether all the PTEs in the PMD are pointing to the
1446 * right THP. If so, retract the page table so the THP can refault in with
1447 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1449 int collapse_pte_mapped_thp(struct mm_struct
*mm
, unsigned long addr
,
1452 unsigned long haddr
= addr
& HPAGE_PMD_MASK
;
1453 struct vm_area_struct
*vma
= vma_lookup(mm
, haddr
);
1455 pte_t
*start_pte
, *pte
;
1458 int count
= 0, result
= SCAN_FAIL
;
1461 mmap_assert_write_locked(mm
);
1463 /* Fast check before locking page if already PMD-mapped */
1464 result
= find_pmd_or_thp_or_none(mm
, haddr
, &pmd
);
1465 if (result
== SCAN_PMD_MAPPED
)
1468 if (!vma
|| !vma
->vm_file
||
1469 !range_in_vma(vma
, haddr
, haddr
+ HPAGE_PMD_SIZE
))
1470 return SCAN_VMA_CHECK
;
1473 * If we are here, we've succeeded in replacing all the native pages
1474 * in the page cache with a single hugepage. If a mm were to fault-in
1475 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1476 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1477 * analogously elide sysfs THP settings here.
1479 if (!hugepage_vma_check(vma
, vma
->vm_flags
, false, false, false))
1480 return SCAN_VMA_CHECK
;
1482 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1483 if (userfaultfd_wp(vma
))
1484 return SCAN_PTE_UFFD_WP
;
1486 hpage
= find_lock_page(vma
->vm_file
->f_mapping
,
1487 linear_page_index(vma
, haddr
));
1489 return SCAN_PAGE_NULL
;
1491 if (!PageHead(hpage
)) {
1496 if (compound_order(hpage
) != HPAGE_PMD_ORDER
) {
1497 result
= SCAN_PAGE_COMPOUND
;
1506 * In MADV_COLLAPSE path, possible race with khugepaged where
1507 * all pte entries have been removed and pmd cleared. If so,
1508 * skip all the pte checks and just update the pmd mapping.
1510 goto maybe_install_pmd
;
1516 * We need to lock the mapping so that from here on, only GUP-fast and
1517 * hardware page walks can access the parts of the page tables that
1518 * we're operating on.
1519 * See collapse_and_free_pmd().
1521 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
1524 * This spinlock should be unnecessary: Nobody else should be accessing
1525 * the page tables under spinlock protection here, only
1526 * lockless_pages_from_mm() and the hardware page walker can access page
1527 * tables while all the high-level locks are held in write mode.
1529 start_pte
= pte_offset_map_lock(mm
, pmd
, haddr
, &ptl
);
1532 /* step 1: check all mapped PTEs are to the right huge page */
1533 for (i
= 0, addr
= haddr
, pte
= start_pte
;
1534 i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
, pte
++) {
1537 /* empty pte, skip */
1541 /* page swapped out, abort */
1542 if (!pte_present(*pte
)) {
1543 result
= SCAN_PTE_NON_PRESENT
;
1547 page
= vm_normal_page(vma
, addr
, *pte
);
1548 if (WARN_ON_ONCE(page
&& is_zone_device_page(page
)))
1551 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1552 * page table, but the new page will not be a subpage of hpage.
1554 if (hpage
+ i
!= page
)
1559 /* step 2: adjust rmap */
1560 for (i
= 0, addr
= haddr
, pte
= start_pte
;
1561 i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
, pte
++) {
1566 page
= vm_normal_page(vma
, addr
, *pte
);
1567 if (WARN_ON_ONCE(page
&& is_zone_device_page(page
)))
1569 page_remove_rmap(page
, vma
, false);
1572 pte_unmap_unlock(start_pte
, ptl
);
1574 /* step 3: set proper refcount and mm_counters. */
1576 page_ref_sub(hpage
, count
);
1577 add_mm_counter(vma
->vm_mm
, mm_counter_file(hpage
), -count
);
1580 /* step 4: remove pte entries */
1581 /* we make no change to anon, but protect concurrent anon page lookup */
1583 anon_vma_lock_write(vma
->anon_vma
);
1585 collapse_and_free_pmd(mm
, vma
, haddr
, pmd
);
1588 anon_vma_unlock_write(vma
->anon_vma
);
1589 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
1592 /* step 5: install pmd entry */
1593 result
= install_pmd
1594 ? set_huge_pmd(vma
, haddr
, pmd
, hpage
)
1603 pte_unmap_unlock(start_pte
, ptl
);
1604 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
1608 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot
*mm_slot
)
1610 struct mm_slot
*slot
= &mm_slot
->slot
;
1611 struct mm_struct
*mm
= slot
->mm
;
1614 if (likely(mm_slot
->nr_pte_mapped_thp
== 0))
1617 if (!mmap_write_trylock(mm
))
1620 if (unlikely(hpage_collapse_test_exit(mm
)))
1623 for (i
= 0; i
< mm_slot
->nr_pte_mapped_thp
; i
++)
1624 collapse_pte_mapped_thp(mm
, mm_slot
->pte_mapped_thp
[i
], false);
1627 mm_slot
->nr_pte_mapped_thp
= 0;
1628 mmap_write_unlock(mm
);
1631 static int retract_page_tables(struct address_space
*mapping
, pgoff_t pgoff
,
1632 struct mm_struct
*target_mm
,
1633 unsigned long target_addr
, struct page
*hpage
,
1634 struct collapse_control
*cc
)
1636 struct vm_area_struct
*vma
;
1637 int target_result
= SCAN_FAIL
;
1639 i_mmap_lock_write(mapping
);
1640 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
1641 int result
= SCAN_FAIL
;
1642 struct mm_struct
*mm
= NULL
;
1643 unsigned long addr
= 0;
1645 bool is_target
= false;
1648 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1649 * got written to. These VMAs are likely not worth investing
1650 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1653 * Note that vma->anon_vma check is racy: it can be set up after
1654 * the check but before we took mmap_lock by the fault path.
1655 * But page lock would prevent establishing any new ptes of the
1656 * page, so we are safe.
1658 * An alternative would be drop the check, but check that page
1659 * table is clear before calling pmdp_collapse_flush() under
1660 * ptl. It has higher chance to recover THP for the VMA, but
1661 * has higher cost too. It would also probably require locking
1664 if (READ_ONCE(vma
->anon_vma
)) {
1665 result
= SCAN_PAGE_ANON
;
1668 addr
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
1669 if (addr
& ~HPAGE_PMD_MASK
||
1670 vma
->vm_end
< addr
+ HPAGE_PMD_SIZE
) {
1671 result
= SCAN_VMA_CHECK
;
1675 is_target
= mm
== target_mm
&& addr
== target_addr
;
1676 result
= find_pmd_or_thp_or_none(mm
, addr
, &pmd
);
1677 if (result
!= SCAN_SUCCEED
)
1680 * We need exclusive mmap_lock to retract page table.
1682 * We use trylock due to lock inversion: we need to acquire
1683 * mmap_lock while holding page lock. Fault path does it in
1684 * reverse order. Trylock is a way to avoid deadlock.
1686 * Also, it's not MADV_COLLAPSE's job to collapse other
1687 * mappings - let khugepaged take care of them later.
1689 result
= SCAN_PTE_MAPPED_HUGEPAGE
;
1690 if ((cc
->is_khugepaged
|| is_target
) &&
1691 mmap_write_trylock(mm
)) {
1693 * Re-check whether we have an ->anon_vma, because
1694 * collapse_and_free_pmd() requires that either no
1695 * ->anon_vma exists or the anon_vma is locked.
1696 * We already checked ->anon_vma above, but that check
1697 * is racy because ->anon_vma can be populated under the
1698 * mmap lock in read mode.
1700 if (vma
->anon_vma
) {
1701 result
= SCAN_PAGE_ANON
;
1705 * When a vma is registered with uffd-wp, we can't
1706 * recycle the pmd pgtable because there can be pte
1707 * markers installed. Skip it only, so the rest mm/vma
1708 * can still have the same file mapped hugely, however
1709 * it'll always mapped in small page size for uffd-wp
1710 * registered ranges.
1712 if (hpage_collapse_test_exit(mm
)) {
1713 result
= SCAN_ANY_PROCESS
;
1716 if (userfaultfd_wp(vma
)) {
1717 result
= SCAN_PTE_UFFD_WP
;
1720 collapse_and_free_pmd(mm
, vma
, addr
, pmd
);
1721 if (!cc
->is_khugepaged
&& is_target
)
1722 result
= set_huge_pmd(vma
, addr
, pmd
, hpage
);
1724 result
= SCAN_SUCCEED
;
1727 mmap_write_unlock(mm
);
1731 * Calling context will handle target mm/addr. Otherwise, let
1732 * khugepaged try again later.
1735 khugepaged_add_pte_mapped_thp(mm
, addr
);
1740 target_result
= result
;
1742 i_mmap_unlock_write(mapping
);
1743 return target_result
;
1747 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1749 * @mm: process address space where collapse happens
1750 * @addr: virtual collapse start address
1751 * @file: file that collapse on
1752 * @start: collapse start address
1753 * @cc: collapse context and scratchpad
1755 * Basic scheme is simple, details are more complex:
1756 * - allocate and lock a new huge page;
1757 * - scan page cache replacing old pages with the new one
1758 * + swap/gup in pages if necessary;
1760 * + keep old pages around in case rollback is required;
1761 * - if replacing succeeds:
1764 * + unlock huge page;
1765 * - if replacing failed;
1766 * + put all pages back and unfreeze them;
1767 * + restore gaps in the page cache;
1768 * + unlock and free huge page;
1770 static int collapse_file(struct mm_struct
*mm
, unsigned long addr
,
1771 struct file
*file
, pgoff_t start
,
1772 struct collapse_control
*cc
)
1774 struct address_space
*mapping
= file
->f_mapping
;
1776 pgoff_t index
= 0, end
= start
+ HPAGE_PMD_NR
;
1777 LIST_HEAD(pagelist
);
1778 XA_STATE_ORDER(xas
, &mapping
->i_pages
, start
, HPAGE_PMD_ORDER
);
1779 int nr_none
= 0, result
= SCAN_SUCCEED
;
1780 bool is_shmem
= shmem_file(file
);
1783 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS
) && !is_shmem
);
1784 VM_BUG_ON(start
& (HPAGE_PMD_NR
- 1));
1786 result
= alloc_charge_hpage(&hpage
, mm
, cc
);
1787 if (result
!= SCAN_SUCCEED
)
1791 * Ensure we have slots for all the pages in the range. This is
1792 * almost certainly a no-op because most of the pages must be present
1796 xas_create_range(&xas
);
1797 if (!xas_error(&xas
))
1799 xas_unlock_irq(&xas
);
1800 if (!xas_nomem(&xas
, GFP_KERNEL
)) {
1806 __SetPageLocked(hpage
);
1808 __SetPageSwapBacked(hpage
);
1809 hpage
->index
= start
;
1810 hpage
->mapping
= mapping
;
1813 * At this point the hpage is locked and not up-to-date.
1814 * It's safe to insert it into the page cache, because nobody would
1815 * be able to map it or use it in another way until we unlock it.
1818 xas_set(&xas
, start
);
1819 for (index
= start
; index
< end
; index
++) {
1820 struct page
*page
= xas_next(&xas
);
1821 struct folio
*folio
;
1823 VM_BUG_ON(index
!= xas
.xa_index
);
1827 * Stop if extent has been truncated or
1828 * hole-punched, and is now completely
1831 if (index
== start
) {
1832 if (!xas_next_entry(&xas
, end
- 1)) {
1833 result
= SCAN_TRUNCATED
;
1836 xas_set(&xas
, index
);
1838 if (!shmem_charge(mapping
->host
, 1)) {
1842 xas_store(&xas
, hpage
);
1847 if (xa_is_value(page
) || !PageUptodate(page
)) {
1848 xas_unlock_irq(&xas
);
1849 /* swap in or instantiate fallocated page */
1850 if (shmem_get_folio(mapping
->host
, index
,
1851 &folio
, SGP_NOALLOC
)) {
1855 page
= folio_file_page(folio
, index
);
1856 } else if (trylock_page(page
)) {
1858 xas_unlock_irq(&xas
);
1860 result
= SCAN_PAGE_LOCK
;
1863 } else { /* !is_shmem */
1864 if (!page
|| xa_is_value(page
)) {
1865 xas_unlock_irq(&xas
);
1866 page_cache_sync_readahead(mapping
, &file
->f_ra
,
1869 /* drain pagevecs to help isolate_lru_page() */
1871 page
= find_lock_page(mapping
, index
);
1872 if (unlikely(page
== NULL
)) {
1876 } else if (PageDirty(page
)) {
1878 * khugepaged only works on read-only fd,
1879 * so this page is dirty because it hasn't
1880 * been flushed since first write. There
1881 * won't be new dirty pages.
1883 * Trigger async flush here and hope the
1884 * writeback is done when khugepaged
1885 * revisits this page.
1887 * This is a one-off situation. We are not
1888 * forcing writeback in loop.
1890 xas_unlock_irq(&xas
);
1891 filemap_flush(mapping
);
1894 } else if (PageWriteback(page
)) {
1895 xas_unlock_irq(&xas
);
1898 } else if (trylock_page(page
)) {
1900 xas_unlock_irq(&xas
);
1902 result
= SCAN_PAGE_LOCK
;
1908 * The page must be locked, so we can drop the i_pages lock
1909 * without racing with truncate.
1911 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
1913 /* make sure the page is up to date */
1914 if (unlikely(!PageUptodate(page
))) {
1920 * If file was truncated then extended, or hole-punched, before
1921 * we locked the first page, then a THP might be there already.
1922 * This will be discovered on the first iteration.
1924 if (PageTransCompound(page
)) {
1925 struct page
*head
= compound_head(page
);
1927 result
= compound_order(head
) == HPAGE_PMD_ORDER
&&
1928 head
->index
== start
1929 /* Maybe PMD-mapped */
1930 ? SCAN_PTE_MAPPED_HUGEPAGE
1931 : SCAN_PAGE_COMPOUND
;
1935 folio
= page_folio(page
);
1937 if (folio_mapping(folio
) != mapping
) {
1938 result
= SCAN_TRUNCATED
;
1942 if (!is_shmem
&& (folio_test_dirty(folio
) ||
1943 folio_test_writeback(folio
))) {
1945 * khugepaged only works on read-only fd, so this
1946 * page is dirty because it hasn't been flushed
1947 * since first write.
1953 if (!folio_isolate_lru(folio
)) {
1954 result
= SCAN_DEL_PAGE_LRU
;
1958 if (folio_has_private(folio
) &&
1959 !filemap_release_folio(folio
, GFP_KERNEL
)) {
1960 result
= SCAN_PAGE_HAS_PRIVATE
;
1961 folio_putback_lru(folio
);
1965 if (folio_mapped(folio
))
1967 TTU_IGNORE_MLOCK
| TTU_BATCH_FLUSH
);
1970 xas_set(&xas
, index
);
1972 VM_BUG_ON_PAGE(page
!= xas_load(&xas
), page
);
1975 * The page is expected to have page_count() == 3:
1976 * - we hold a pin on it;
1977 * - one reference from page cache;
1978 * - one from isolate_lru_page;
1980 if (!page_ref_freeze(page
, 3)) {
1981 result
= SCAN_PAGE_COUNT
;
1982 xas_unlock_irq(&xas
);
1983 putback_lru_page(page
);
1988 * Add the page to the list to be able to undo the collapse if
1989 * something go wrong.
1991 list_add_tail(&page
->lru
, &pagelist
);
1993 /* Finally, replace with the new page. */
1994 xas_store(&xas
, hpage
);
2001 nr
= thp_nr_pages(hpage
);
2004 __mod_lruvec_page_state(hpage
, NR_SHMEM_THPS
, nr
);
2006 __mod_lruvec_page_state(hpage
, NR_FILE_THPS
, nr
);
2007 filemap_nr_thps_inc(mapping
);
2009 * Paired with smp_mb() in do_dentry_open() to ensure
2010 * i_writecount is up to date and the update to nr_thps is
2011 * visible. Ensures the page cache will be truncated if the
2012 * file is opened writable.
2015 if (inode_is_open_for_write(mapping
->host
)) {
2017 __mod_lruvec_page_state(hpage
, NR_FILE_THPS
, -nr
);
2018 filemap_nr_thps_dec(mapping
);
2024 __mod_lruvec_page_state(hpage
, NR_FILE_PAGES
, nr_none
);
2025 /* nr_none is always 0 for non-shmem. */
2026 __mod_lruvec_page_state(hpage
, NR_SHMEM
, nr_none
);
2029 /* Join all the small entries into a single multi-index entry */
2030 xas_set_order(&xas
, start
, HPAGE_PMD_ORDER
);
2031 xas_store(&xas
, hpage
);
2033 xas_unlock_irq(&xas
);
2037 * If collapse is successful, flush must be done now before copying.
2038 * If collapse is unsuccessful, does flush actually need to be done?
2039 * Do it anyway, to clear the state.
2041 try_to_unmap_flush();
2043 if (result
== SCAN_SUCCEED
) {
2044 struct page
*page
, *tmp
;
2045 struct folio
*folio
;
2048 * Replacing old pages with new one has succeeded, now we
2049 * need to copy the content and free the old pages.
2052 list_for_each_entry_safe(page
, tmp
, &pagelist
, lru
) {
2053 while (index
< page
->index
) {
2054 clear_highpage(hpage
+ (index
% HPAGE_PMD_NR
));
2057 copy_highpage(hpage
+ (page
->index
% HPAGE_PMD_NR
),
2059 list_del(&page
->lru
);
2060 page
->mapping
= NULL
;
2061 page_ref_unfreeze(page
, 1);
2062 ClearPageActive(page
);
2063 ClearPageUnevictable(page
);
2068 while (index
< end
) {
2069 clear_highpage(hpage
+ (index
% HPAGE_PMD_NR
));
2073 folio
= page_folio(hpage
);
2074 folio_mark_uptodate(folio
);
2075 folio_ref_add(folio
, HPAGE_PMD_NR
- 1);
2078 folio_mark_dirty(folio
);
2079 folio_add_lru(folio
);
2082 * Remove pte page tables, so we can re-fault the page as huge.
2084 result
= retract_page_tables(mapping
, start
, mm
, addr
, hpage
,
2091 /* Something went wrong: roll back page cache changes */
2094 mapping
->nrpages
-= nr_none
;
2095 shmem_uncharge(mapping
->host
, nr_none
);
2098 xas_set(&xas
, start
);
2099 xas_for_each(&xas
, page
, end
- 1) {
2100 page
= list_first_entry_or_null(&pagelist
,
2102 if (!page
|| xas
.xa_index
< page
->index
) {
2106 /* Put holes back where they were */
2107 xas_store(&xas
, NULL
);
2111 VM_BUG_ON_PAGE(page
->index
!= xas
.xa_index
, page
);
2113 /* Unfreeze the page. */
2114 list_del(&page
->lru
);
2115 page_ref_unfreeze(page
, 2);
2116 xas_store(&xas
, page
);
2118 xas_unlock_irq(&xas
);
2120 putback_lru_page(page
);
2124 xas_unlock_irq(&xas
);
2126 hpage
->mapping
= NULL
;
2132 VM_BUG_ON(!list_empty(&pagelist
));
2134 mem_cgroup_uncharge(page_folio(hpage
));
2138 trace_mm_khugepaged_collapse_file(mm
, hpage
, index
, is_shmem
, addr
, file
, nr
, result
);
2142 static int hpage_collapse_scan_file(struct mm_struct
*mm
, unsigned long addr
,
2143 struct file
*file
, pgoff_t start
,
2144 struct collapse_control
*cc
)
2146 struct page
*page
= NULL
;
2147 struct address_space
*mapping
= file
->f_mapping
;
2148 XA_STATE(xas
, &mapping
->i_pages
, start
);
2150 int node
= NUMA_NO_NODE
;
2151 int result
= SCAN_SUCCEED
;
2155 memset(cc
->node_load
, 0, sizeof(cc
->node_load
));
2156 nodes_clear(cc
->alloc_nmask
);
2158 xas_for_each(&xas
, page
, start
+ HPAGE_PMD_NR
- 1) {
2159 if (xas_retry(&xas
, page
))
2162 if (xa_is_value(page
)) {
2164 if (cc
->is_khugepaged
&&
2165 swap
> khugepaged_max_ptes_swap
) {
2166 result
= SCAN_EXCEED_SWAP_PTE
;
2167 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE
);
2174 * TODO: khugepaged should compact smaller compound pages
2175 * into a PMD sized page
2177 if (PageTransCompound(page
)) {
2178 struct page
*head
= compound_head(page
);
2180 result
= compound_order(head
) == HPAGE_PMD_ORDER
&&
2181 head
->index
== start
2182 /* Maybe PMD-mapped */
2183 ? SCAN_PTE_MAPPED_HUGEPAGE
2184 : SCAN_PAGE_COMPOUND
;
2186 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2187 * by the caller won't touch the page cache, and so
2188 * it's safe to skip LRU and refcount checks before
2194 node
= page_to_nid(page
);
2195 if (hpage_collapse_scan_abort(node
, cc
)) {
2196 result
= SCAN_SCAN_ABORT
;
2199 cc
->node_load
[node
]++;
2201 if (!PageLRU(page
)) {
2202 result
= SCAN_PAGE_LRU
;
2206 if (page_count(page
) !=
2207 1 + page_mapcount(page
) + page_has_private(page
)) {
2208 result
= SCAN_PAGE_COUNT
;
2213 * We probably should check if the page is referenced here, but
2214 * nobody would transfer pte_young() to PageReferenced() for us.
2215 * And rmap walk here is just too costly...
2220 if (need_resched()) {
2227 if (result
== SCAN_SUCCEED
) {
2228 if (cc
->is_khugepaged
&&
2229 present
< HPAGE_PMD_NR
- khugepaged_max_ptes_none
) {
2230 result
= SCAN_EXCEED_NONE_PTE
;
2231 count_vm_event(THP_SCAN_EXCEED_NONE_PTE
);
2233 result
= collapse_file(mm
, addr
, file
, start
, cc
);
2237 trace_mm_khugepaged_scan_file(mm
, page
, file
, present
, swap
, result
);
2241 static int hpage_collapse_scan_file(struct mm_struct
*mm
, unsigned long addr
,
2242 struct file
*file
, pgoff_t start
,
2243 struct collapse_control
*cc
)
2248 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot
*mm_slot
)
2252 static bool khugepaged_add_pte_mapped_thp(struct mm_struct
*mm
,
2259 static unsigned int khugepaged_scan_mm_slot(unsigned int pages
, int *result
,
2260 struct collapse_control
*cc
)
2261 __releases(&khugepaged_mm_lock
)
2262 __acquires(&khugepaged_mm_lock
)
2264 struct vma_iterator vmi
;
2265 struct khugepaged_mm_slot
*mm_slot
;
2266 struct mm_slot
*slot
;
2267 struct mm_struct
*mm
;
2268 struct vm_area_struct
*vma
;
2272 lockdep_assert_held(&khugepaged_mm_lock
);
2273 *result
= SCAN_FAIL
;
2275 if (khugepaged_scan
.mm_slot
) {
2276 mm_slot
= khugepaged_scan
.mm_slot
;
2277 slot
= &mm_slot
->slot
;
2279 slot
= list_entry(khugepaged_scan
.mm_head
.next
,
2280 struct mm_slot
, mm_node
);
2281 mm_slot
= mm_slot_entry(slot
, struct khugepaged_mm_slot
, slot
);
2282 khugepaged_scan
.address
= 0;
2283 khugepaged_scan
.mm_slot
= mm_slot
;
2285 spin_unlock(&khugepaged_mm_lock
);
2286 khugepaged_collapse_pte_mapped_thps(mm_slot
);
2290 * Don't wait for semaphore (to avoid long wait times). Just move to
2291 * the next mm on the list.
2294 if (unlikely(!mmap_read_trylock(mm
)))
2295 goto breakouterloop_mmap_lock
;
2298 if (unlikely(hpage_collapse_test_exit(mm
)))
2299 goto breakouterloop
;
2301 vma_iter_init(&vmi
, mm
, khugepaged_scan
.address
);
2302 for_each_vma(vmi
, vma
) {
2303 unsigned long hstart
, hend
;
2306 if (unlikely(hpage_collapse_test_exit(mm
))) {
2310 if (!hugepage_vma_check(vma
, vma
->vm_flags
, false, false, true)) {
2315 hstart
= round_up(vma
->vm_start
, HPAGE_PMD_SIZE
);
2316 hend
= round_down(vma
->vm_end
, HPAGE_PMD_SIZE
);
2317 if (khugepaged_scan
.address
> hend
)
2319 if (khugepaged_scan
.address
< hstart
)
2320 khugepaged_scan
.address
= hstart
;
2321 VM_BUG_ON(khugepaged_scan
.address
& ~HPAGE_PMD_MASK
);
2323 while (khugepaged_scan
.address
< hend
) {
2324 bool mmap_locked
= true;
2327 if (unlikely(hpage_collapse_test_exit(mm
)))
2328 goto breakouterloop
;
2330 VM_BUG_ON(khugepaged_scan
.address
< hstart
||
2331 khugepaged_scan
.address
+ HPAGE_PMD_SIZE
>
2333 if (IS_ENABLED(CONFIG_SHMEM
) && vma
->vm_file
) {
2334 struct file
*file
= get_file(vma
->vm_file
);
2335 pgoff_t pgoff
= linear_page_index(vma
,
2336 khugepaged_scan
.address
);
2338 mmap_read_unlock(mm
);
2339 *result
= hpage_collapse_scan_file(mm
,
2340 khugepaged_scan
.address
,
2342 mmap_locked
= false;
2345 *result
= hpage_collapse_scan_pmd(mm
, vma
,
2346 khugepaged_scan
.address
,
2351 case SCAN_PTE_MAPPED_HUGEPAGE
: {
2354 *result
= find_pmd_or_thp_or_none(mm
,
2355 khugepaged_scan
.address
,
2357 if (*result
!= SCAN_SUCCEED
)
2359 if (!khugepaged_add_pte_mapped_thp(mm
,
2360 khugepaged_scan
.address
))
2364 ++khugepaged_pages_collapsed
;
2370 /* move to next address */
2371 khugepaged_scan
.address
+= HPAGE_PMD_SIZE
;
2372 progress
+= HPAGE_PMD_NR
;
2375 * We released mmap_lock so break loop. Note
2376 * that we drop mmap_lock before all hugepage
2377 * allocations, so if allocation fails, we are
2378 * guaranteed to break here and report the
2379 * correct result back to caller.
2381 goto breakouterloop_mmap_lock
;
2382 if (progress
>= pages
)
2383 goto breakouterloop
;
2387 mmap_read_unlock(mm
); /* exit_mmap will destroy ptes after this */
2388 breakouterloop_mmap_lock
:
2390 spin_lock(&khugepaged_mm_lock
);
2391 VM_BUG_ON(khugepaged_scan
.mm_slot
!= mm_slot
);
2393 * Release the current mm_slot if this mm is about to die, or
2394 * if we scanned all vmas of this mm.
2396 if (hpage_collapse_test_exit(mm
) || !vma
) {
2398 * Make sure that if mm_users is reaching zero while
2399 * khugepaged runs here, khugepaged_exit will find
2400 * mm_slot not pointing to the exiting mm.
2402 if (slot
->mm_node
.next
!= &khugepaged_scan
.mm_head
) {
2403 slot
= list_entry(slot
->mm_node
.next
,
2404 struct mm_slot
, mm_node
);
2405 khugepaged_scan
.mm_slot
=
2406 mm_slot_entry(slot
, struct khugepaged_mm_slot
, slot
);
2407 khugepaged_scan
.address
= 0;
2409 khugepaged_scan
.mm_slot
= NULL
;
2410 khugepaged_full_scans
++;
2413 collect_mm_slot(mm_slot
);
2419 static int khugepaged_has_work(void)
2421 return !list_empty(&khugepaged_scan
.mm_head
) &&
2422 hugepage_flags_enabled();
2425 static int khugepaged_wait_event(void)
2427 return !list_empty(&khugepaged_scan
.mm_head
) ||
2428 kthread_should_stop();
2431 static void khugepaged_do_scan(struct collapse_control
*cc
)
2433 unsigned int progress
= 0, pass_through_head
= 0;
2434 unsigned int pages
= READ_ONCE(khugepaged_pages_to_scan
);
2436 int result
= SCAN_SUCCEED
;
2438 lru_add_drain_all();
2443 if (unlikely(kthread_should_stop() || try_to_freeze()))
2446 spin_lock(&khugepaged_mm_lock
);
2447 if (!khugepaged_scan
.mm_slot
)
2448 pass_through_head
++;
2449 if (khugepaged_has_work() &&
2450 pass_through_head
< 2)
2451 progress
+= khugepaged_scan_mm_slot(pages
- progress
,
2455 spin_unlock(&khugepaged_mm_lock
);
2457 if (progress
>= pages
)
2460 if (result
== SCAN_ALLOC_HUGE_PAGE_FAIL
) {
2462 * If fail to allocate the first time, try to sleep for
2463 * a while. When hit again, cancel the scan.
2468 khugepaged_alloc_sleep();
2473 static bool khugepaged_should_wakeup(void)
2475 return kthread_should_stop() ||
2476 time_after_eq(jiffies
, khugepaged_sleep_expire
);
2479 static void khugepaged_wait_work(void)
2481 if (khugepaged_has_work()) {
2482 const unsigned long scan_sleep_jiffies
=
2483 msecs_to_jiffies(khugepaged_scan_sleep_millisecs
);
2485 if (!scan_sleep_jiffies
)
2488 khugepaged_sleep_expire
= jiffies
+ scan_sleep_jiffies
;
2489 wait_event_freezable_timeout(khugepaged_wait
,
2490 khugepaged_should_wakeup(),
2491 scan_sleep_jiffies
);
2495 if (hugepage_flags_enabled())
2496 wait_event_freezable(khugepaged_wait
, khugepaged_wait_event());
2499 static int khugepaged(void *none
)
2501 struct khugepaged_mm_slot
*mm_slot
;
2504 set_user_nice(current
, MAX_NICE
);
2506 while (!kthread_should_stop()) {
2507 khugepaged_do_scan(&khugepaged_collapse_control
);
2508 khugepaged_wait_work();
2511 spin_lock(&khugepaged_mm_lock
);
2512 mm_slot
= khugepaged_scan
.mm_slot
;
2513 khugepaged_scan
.mm_slot
= NULL
;
2515 collect_mm_slot(mm_slot
);
2516 spin_unlock(&khugepaged_mm_lock
);
2520 static void set_recommended_min_free_kbytes(void)
2524 unsigned long recommended_min
;
2526 if (!hugepage_flags_enabled()) {
2527 calculate_min_free_kbytes();
2531 for_each_populated_zone(zone
) {
2533 * We don't need to worry about fragmentation of
2534 * ZONE_MOVABLE since it only has movable pages.
2536 if (zone_idx(zone
) > gfp_zone(GFP_USER
))
2542 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2543 recommended_min
= pageblock_nr_pages
* nr_zones
* 2;
2546 * Make sure that on average at least two pageblocks are almost free
2547 * of another type, one for a migratetype to fall back to and a
2548 * second to avoid subsequent fallbacks of other types There are 3
2549 * MIGRATE_TYPES we care about.
2551 recommended_min
+= pageblock_nr_pages
* nr_zones
*
2552 MIGRATE_PCPTYPES
* MIGRATE_PCPTYPES
;
2554 /* don't ever allow to reserve more than 5% of the lowmem */
2555 recommended_min
= min(recommended_min
,
2556 (unsigned long) nr_free_buffer_pages() / 20);
2557 recommended_min
<<= (PAGE_SHIFT
-10);
2559 if (recommended_min
> min_free_kbytes
) {
2560 if (user_min_free_kbytes
>= 0)
2561 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2562 min_free_kbytes
, recommended_min
);
2564 min_free_kbytes
= recommended_min
;
2568 setup_per_zone_wmarks();
2571 int start_stop_khugepaged(void)
2575 mutex_lock(&khugepaged_mutex
);
2576 if (hugepage_flags_enabled()) {
2577 if (!khugepaged_thread
)
2578 khugepaged_thread
= kthread_run(khugepaged
, NULL
,
2580 if (IS_ERR(khugepaged_thread
)) {
2581 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2582 err
= PTR_ERR(khugepaged_thread
);
2583 khugepaged_thread
= NULL
;
2587 if (!list_empty(&khugepaged_scan
.mm_head
))
2588 wake_up_interruptible(&khugepaged_wait
);
2589 } else if (khugepaged_thread
) {
2590 kthread_stop(khugepaged_thread
);
2591 khugepaged_thread
= NULL
;
2593 set_recommended_min_free_kbytes();
2595 mutex_unlock(&khugepaged_mutex
);
2599 void khugepaged_min_free_kbytes_update(void)
2601 mutex_lock(&khugepaged_mutex
);
2602 if (hugepage_flags_enabled() && khugepaged_thread
)
2603 set_recommended_min_free_kbytes();
2604 mutex_unlock(&khugepaged_mutex
);
2607 bool current_is_khugepaged(void)
2609 return kthread_func(current
) == khugepaged
;
2612 static int madvise_collapse_errno(enum scan_result r
)
2615 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2616 * actionable feedback to caller, so they may take an appropriate
2617 * fallback measure depending on the nature of the failure.
2620 case SCAN_ALLOC_HUGE_PAGE_FAIL
:
2622 case SCAN_CGROUP_CHARGE_FAIL
:
2624 /* Resource temporary unavailable - trying again might succeed */
2625 case SCAN_PAGE_COUNT
:
2626 case SCAN_PAGE_LOCK
:
2628 case SCAN_DEL_PAGE_LRU
:
2631 * Other: Trying again likely not to succeed / error intrinsic to
2632 * specified memory range. khugepaged likely won't be able to collapse
2640 int madvise_collapse(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
2641 unsigned long start
, unsigned long end
)
2643 struct collapse_control
*cc
;
2644 struct mm_struct
*mm
= vma
->vm_mm
;
2645 unsigned long hstart
, hend
, addr
;
2646 int thps
= 0, last_fail
= SCAN_FAIL
;
2647 bool mmap_locked
= true;
2649 BUG_ON(vma
->vm_start
> start
);
2650 BUG_ON(vma
->vm_end
< end
);
2654 if (!hugepage_vma_check(vma
, vma
->vm_flags
, false, false, false))
2657 cc
= kmalloc(sizeof(*cc
), GFP_KERNEL
);
2660 cc
->is_khugepaged
= false;
2663 lru_add_drain_all();
2665 hstart
= (start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
2666 hend
= end
& HPAGE_PMD_MASK
;
2668 for (addr
= hstart
; addr
< hend
; addr
+= HPAGE_PMD_SIZE
) {
2669 int result
= SCAN_FAIL
;
2675 result
= hugepage_vma_revalidate(mm
, addr
, false, &vma
,
2677 if (result
!= SCAN_SUCCEED
) {
2682 hend
= min(hend
, vma
->vm_end
& HPAGE_PMD_MASK
);
2684 mmap_assert_locked(mm
);
2685 memset(cc
->node_load
, 0, sizeof(cc
->node_load
));
2686 nodes_clear(cc
->alloc_nmask
);
2687 if (IS_ENABLED(CONFIG_SHMEM
) && vma
->vm_file
) {
2688 struct file
*file
= get_file(vma
->vm_file
);
2689 pgoff_t pgoff
= linear_page_index(vma
, addr
);
2691 mmap_read_unlock(mm
);
2692 mmap_locked
= false;
2693 result
= hpage_collapse_scan_file(mm
, addr
, file
, pgoff
,
2697 result
= hpage_collapse_scan_pmd(mm
, vma
, addr
,
2701 *prev
= NULL
; /* Tell caller we dropped mmap_lock */
2706 case SCAN_PMD_MAPPED
:
2709 case SCAN_PTE_MAPPED_HUGEPAGE
:
2710 BUG_ON(mmap_locked
);
2712 mmap_write_lock(mm
);
2713 result
= collapse_pte_mapped_thp(mm
, addr
, true);
2714 mmap_write_unlock(mm
);
2716 /* Whitelisted set of results where continuing OK */
2718 case SCAN_PTE_NON_PRESENT
:
2719 case SCAN_PTE_UFFD_WP
:
2721 case SCAN_LACK_REFERENCED_PAGE
:
2722 case SCAN_PAGE_NULL
:
2723 case SCAN_PAGE_COUNT
:
2724 case SCAN_PAGE_LOCK
:
2725 case SCAN_PAGE_COMPOUND
:
2727 case SCAN_DEL_PAGE_LRU
:
2732 /* Other error, exit */
2738 /* Caller expects us to hold mmap_lock on return */
2742 mmap_assert_locked(mm
);
2746 return thps
== ((hend
- hstart
) >> HPAGE_PMD_SHIFT
) ? 0
2747 : madvise_collapse_errno(last_fail
);