1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009 Red Hat, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
42 #include <asm/pgalloc.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/thp.h>
50 * By default, transparent hugepage support is disabled in order to avoid
51 * risking an increased memory footprint for applications that are not
52 * guaranteed to benefit from it. When transparent hugepage support is
53 * enabled, it is for all mappings, and khugepaged scans all mappings.
54 * Defrag is invoked by khugepaged hugepage allocations and by page faults
55 * for all hugepage allocations.
57 unsigned long transparent_hugepage_flags __read_mostly
=
58 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
59 (1<<TRANSPARENT_HUGEPAGE_FLAG
)|
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
)|
64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
)|
65 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
)|
66 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
68 static struct shrinker
*deferred_split_shrinker
;
69 static unsigned long deferred_split_count(struct shrinker
*shrink
,
70 struct shrink_control
*sc
);
71 static unsigned long deferred_split_scan(struct shrinker
*shrink
,
72 struct shrink_control
*sc
);
74 static atomic_t huge_zero_refcount
;
75 struct page
*huge_zero_page __read_mostly
;
76 unsigned long huge_zero_pfn __read_mostly
= ~0UL;
78 bool hugepage_vma_check(struct vm_area_struct
*vma
, unsigned long vm_flags
,
79 bool smaps
, bool in_pf
, bool enforce_sysfs
)
81 if (!vma
->vm_mm
) /* vdso */
85 * Explicitly disabled through madvise or prctl, or some
86 * architectures may disable THP for some mappings, for
89 if ((vm_flags
& VM_NOHUGEPAGE
) ||
90 test_bit(MMF_DISABLE_THP
, &vma
->vm_mm
->flags
))
93 * If the hardware/firmware marked hugepage support disabled.
95 if (transparent_hugepage_flags
& (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED
))
98 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
103 * khugepaged special VMA and hugetlb VMA.
104 * Must be checked after dax since some dax mappings may have
107 if (!in_pf
&& !smaps
&& (vm_flags
& VM_NO_KHUGEPAGED
))
111 * Check alignment for file vma and size for both file and anon vma.
113 * Skip the check for page fault. Huge fault does the check in fault
114 * handlers. And this check is not suitable for huge PUD fault.
117 !transhuge_vma_suitable(vma
, (vma
->vm_end
- HPAGE_PMD_SIZE
)))
121 * Enabled via shmem mount options or sysfs settings.
122 * Must be done before hugepage flags check since shmem has its
125 if (!in_pf
&& shmem_file(vma
->vm_file
))
126 return shmem_is_huge(file_inode(vma
->vm_file
), vma
->vm_pgoff
,
127 !enforce_sysfs
, vma
->vm_mm
, vm_flags
);
129 /* Enforce sysfs THP requirements as necessary */
131 (!hugepage_flags_enabled() || (!(vm_flags
& VM_HUGEPAGE
) &&
132 !hugepage_flags_always())))
135 if (!vma_is_anonymous(vma
)) {
137 * Trust that ->huge_fault() handlers know what they are doing
140 if (((in_pf
|| smaps
)) && vma
->vm_ops
->huge_fault
)
142 /* Only regular file is valid in collapse path */
143 if (((!in_pf
|| smaps
)) && file_thp_enabled(vma
))
148 if (vma_is_temporary_stack(vma
))
152 * THPeligible bit of smaps should show 1 for proper VMAs even
153 * though anon_vma is not initialized yet.
155 * Allow page fault since anon_vma may be not initialized until
156 * the first page fault.
159 return (smaps
|| in_pf
);
164 static bool get_huge_zero_page(void)
166 struct page
*zero_page
;
168 if (likely(atomic_inc_not_zero(&huge_zero_refcount
)))
171 zero_page
= alloc_pages((GFP_TRANSHUGE
| __GFP_ZERO
) & ~__GFP_MOVABLE
,
174 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED
);
178 if (cmpxchg(&huge_zero_page
, NULL
, zero_page
)) {
180 __free_pages(zero_page
, compound_order(zero_page
));
183 WRITE_ONCE(huge_zero_pfn
, page_to_pfn(zero_page
));
185 /* We take additional reference here. It will be put back by shrinker */
186 atomic_set(&huge_zero_refcount
, 2);
188 count_vm_event(THP_ZERO_PAGE_ALLOC
);
192 static void put_huge_zero_page(void)
195 * Counter should never go to zero here. Only shrinker can put
198 BUG_ON(atomic_dec_and_test(&huge_zero_refcount
));
201 struct page
*mm_get_huge_zero_page(struct mm_struct
*mm
)
203 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
204 return READ_ONCE(huge_zero_page
);
206 if (!get_huge_zero_page())
209 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
210 put_huge_zero_page();
212 return READ_ONCE(huge_zero_page
);
215 void mm_put_huge_zero_page(struct mm_struct
*mm
)
217 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
218 put_huge_zero_page();
221 static unsigned long shrink_huge_zero_page_count(struct shrinker
*shrink
,
222 struct shrink_control
*sc
)
224 /* we can free zero page only if last reference remains */
225 return atomic_read(&huge_zero_refcount
) == 1 ? HPAGE_PMD_NR
: 0;
228 static unsigned long shrink_huge_zero_page_scan(struct shrinker
*shrink
,
229 struct shrink_control
*sc
)
231 if (atomic_cmpxchg(&huge_zero_refcount
, 1, 0) == 1) {
232 struct page
*zero_page
= xchg(&huge_zero_page
, NULL
);
233 BUG_ON(zero_page
== NULL
);
234 WRITE_ONCE(huge_zero_pfn
, ~0UL);
235 __free_pages(zero_page
, compound_order(zero_page
));
242 static struct shrinker
*huge_zero_page_shrinker
;
245 static ssize_t
enabled_show(struct kobject
*kobj
,
246 struct kobj_attribute
*attr
, char *buf
)
250 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
))
251 output
= "[always] madvise never";
252 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
253 &transparent_hugepage_flags
))
254 output
= "always [madvise] never";
256 output
= "always madvise [never]";
258 return sysfs_emit(buf
, "%s\n", output
);
261 static ssize_t
enabled_store(struct kobject
*kobj
,
262 struct kobj_attribute
*attr
,
263 const char *buf
, size_t count
)
267 if (sysfs_streq(buf
, "always")) {
268 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
269 set_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
270 } else if (sysfs_streq(buf
, "madvise")) {
271 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
272 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
273 } else if (sysfs_streq(buf
, "never")) {
274 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
275 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
280 int err
= start_stop_khugepaged();
287 static struct kobj_attribute enabled_attr
= __ATTR_RW(enabled
);
289 ssize_t
single_hugepage_flag_show(struct kobject
*kobj
,
290 struct kobj_attribute
*attr
, char *buf
,
291 enum transparent_hugepage_flag flag
)
293 return sysfs_emit(buf
, "%d\n",
294 !!test_bit(flag
, &transparent_hugepage_flags
));
297 ssize_t
single_hugepage_flag_store(struct kobject
*kobj
,
298 struct kobj_attribute
*attr
,
299 const char *buf
, size_t count
,
300 enum transparent_hugepage_flag flag
)
305 ret
= kstrtoul(buf
, 10, &value
);
312 set_bit(flag
, &transparent_hugepage_flags
);
314 clear_bit(flag
, &transparent_hugepage_flags
);
319 static ssize_t
defrag_show(struct kobject
*kobj
,
320 struct kobj_attribute
*attr
, char *buf
)
324 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
,
325 &transparent_hugepage_flags
))
326 output
= "[always] defer defer+madvise madvise never";
327 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
,
328 &transparent_hugepage_flags
))
329 output
= "always [defer] defer+madvise madvise never";
330 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
,
331 &transparent_hugepage_flags
))
332 output
= "always defer [defer+madvise] madvise never";
333 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
,
334 &transparent_hugepage_flags
))
335 output
= "always defer defer+madvise [madvise] never";
337 output
= "always defer defer+madvise madvise [never]";
339 return sysfs_emit(buf
, "%s\n", output
);
342 static ssize_t
defrag_store(struct kobject
*kobj
,
343 struct kobj_attribute
*attr
,
344 const char *buf
, size_t count
)
346 if (sysfs_streq(buf
, "always")) {
347 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
348 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
349 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
350 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
351 } else if (sysfs_streq(buf
, "defer+madvise")) {
352 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
353 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
354 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
355 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
356 } else if (sysfs_streq(buf
, "defer")) {
357 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
358 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
359 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
360 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
361 } else if (sysfs_streq(buf
, "madvise")) {
362 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
363 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
364 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
365 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
366 } else if (sysfs_streq(buf
, "never")) {
367 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
368 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
369 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
370 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
376 static struct kobj_attribute defrag_attr
= __ATTR_RW(defrag
);
378 static ssize_t
use_zero_page_show(struct kobject
*kobj
,
379 struct kobj_attribute
*attr
, char *buf
)
381 return single_hugepage_flag_show(kobj
, attr
, buf
,
382 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
384 static ssize_t
use_zero_page_store(struct kobject
*kobj
,
385 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
387 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
388 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
390 static struct kobj_attribute use_zero_page_attr
= __ATTR_RW(use_zero_page
);
392 static ssize_t
hpage_pmd_size_show(struct kobject
*kobj
,
393 struct kobj_attribute
*attr
, char *buf
)
395 return sysfs_emit(buf
, "%lu\n", HPAGE_PMD_SIZE
);
397 static struct kobj_attribute hpage_pmd_size_attr
=
398 __ATTR_RO(hpage_pmd_size
);
400 static struct attribute
*hugepage_attr
[] = {
403 &use_zero_page_attr
.attr
,
404 &hpage_pmd_size_attr
.attr
,
406 &shmem_enabled_attr
.attr
,
411 static const struct attribute_group hugepage_attr_group
= {
412 .attrs
= hugepage_attr
,
415 static int __init
hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
419 *hugepage_kobj
= kobject_create_and_add("transparent_hugepage", mm_kobj
);
420 if (unlikely(!*hugepage_kobj
)) {
421 pr_err("failed to create transparent hugepage kobject\n");
425 err
= sysfs_create_group(*hugepage_kobj
, &hugepage_attr_group
);
427 pr_err("failed to register transparent hugepage group\n");
431 err
= sysfs_create_group(*hugepage_kobj
, &khugepaged_attr_group
);
433 pr_err("failed to register transparent hugepage group\n");
434 goto remove_hp_group
;
440 sysfs_remove_group(*hugepage_kobj
, &hugepage_attr_group
);
442 kobject_put(*hugepage_kobj
);
446 static void __init
hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
448 sysfs_remove_group(hugepage_kobj
, &khugepaged_attr_group
);
449 sysfs_remove_group(hugepage_kobj
, &hugepage_attr_group
);
450 kobject_put(hugepage_kobj
);
453 static inline int hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
458 static inline void hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
461 #endif /* CONFIG_SYSFS */
463 static int __init
thp_shrinker_init(void)
465 huge_zero_page_shrinker
= shrinker_alloc(0, "thp-zero");
466 if (!huge_zero_page_shrinker
)
469 deferred_split_shrinker
= shrinker_alloc(SHRINKER_NUMA_AWARE
|
470 SHRINKER_MEMCG_AWARE
|
472 "thp-deferred_split");
473 if (!deferred_split_shrinker
) {
474 shrinker_free(huge_zero_page_shrinker
);
478 huge_zero_page_shrinker
->count_objects
= shrink_huge_zero_page_count
;
479 huge_zero_page_shrinker
->scan_objects
= shrink_huge_zero_page_scan
;
480 shrinker_register(huge_zero_page_shrinker
);
482 deferred_split_shrinker
->count_objects
= deferred_split_count
;
483 deferred_split_shrinker
->scan_objects
= deferred_split_scan
;
484 shrinker_register(deferred_split_shrinker
);
489 static void __init
thp_shrinker_exit(void)
491 shrinker_free(huge_zero_page_shrinker
);
492 shrinker_free(deferred_split_shrinker
);
495 static int __init
hugepage_init(void)
498 struct kobject
*hugepage_kobj
;
500 if (!has_transparent_hugepage()) {
501 transparent_hugepage_flags
= 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED
;
506 * hugepages can't be allocated by the buddy allocator
508 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
> MAX_ORDER
);
510 * we use page->mapping and page->index in second tail page
511 * as list_head: assuming THP order >= 2
513 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
< 2);
515 err
= hugepage_init_sysfs(&hugepage_kobj
);
519 err
= khugepaged_init();
523 err
= thp_shrinker_init();
528 * By default disable transparent hugepages on smaller systems,
529 * where the extra memory used could hurt more than TLB overhead
530 * is likely to save. The admin can still enable it through /sys.
532 if (totalram_pages() < (512 << (20 - PAGE_SHIFT
))) {
533 transparent_hugepage_flags
= 0;
537 err
= start_stop_khugepaged();
545 khugepaged_destroy();
547 hugepage_exit_sysfs(hugepage_kobj
);
551 subsys_initcall(hugepage_init
);
553 static int __init
setup_transparent_hugepage(char *str
)
558 if (!strcmp(str
, "always")) {
559 set_bit(TRANSPARENT_HUGEPAGE_FLAG
,
560 &transparent_hugepage_flags
);
561 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
562 &transparent_hugepage_flags
);
564 } else if (!strcmp(str
, "madvise")) {
565 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
566 &transparent_hugepage_flags
);
567 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
568 &transparent_hugepage_flags
);
570 } else if (!strcmp(str
, "never")) {
571 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
572 &transparent_hugepage_flags
);
573 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
574 &transparent_hugepage_flags
);
579 pr_warn("transparent_hugepage= cannot parse, ignored\n");
582 __setup("transparent_hugepage=", setup_transparent_hugepage
);
584 pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
)
586 if (likely(vma
->vm_flags
& VM_WRITE
))
587 pmd
= pmd_mkwrite(pmd
, vma
);
593 struct deferred_split
*get_deferred_split_queue(struct folio
*folio
)
595 struct mem_cgroup
*memcg
= folio_memcg(folio
);
596 struct pglist_data
*pgdat
= NODE_DATA(folio_nid(folio
));
599 return &memcg
->deferred_split_queue
;
601 return &pgdat
->deferred_split_queue
;
605 struct deferred_split
*get_deferred_split_queue(struct folio
*folio
)
607 struct pglist_data
*pgdat
= NODE_DATA(folio_nid(folio
));
609 return &pgdat
->deferred_split_queue
;
613 void folio_prep_large_rmappable(struct folio
*folio
)
615 VM_BUG_ON_FOLIO(folio_order(folio
) < 2, folio
);
616 INIT_LIST_HEAD(&folio
->_deferred_list
);
617 folio_set_large_rmappable(folio
);
620 static inline bool is_transparent_hugepage(struct folio
*folio
)
622 if (!folio_test_large(folio
))
625 return is_huge_zero_page(&folio
->page
) ||
626 folio_test_large_rmappable(folio
);
629 static unsigned long __thp_get_unmapped_area(struct file
*filp
,
630 unsigned long addr
, unsigned long len
,
631 loff_t off
, unsigned long flags
, unsigned long size
)
633 loff_t off_end
= off
+ len
;
634 loff_t off_align
= round_up(off
, size
);
635 unsigned long len_pad
, ret
;
637 if (off_end
<= off_align
|| (off_end
- off_align
) < size
)
640 len_pad
= len
+ size
;
641 if (len_pad
< len
|| (off
+ len_pad
) < off
)
644 ret
= current
->mm
->get_unmapped_area(filp
, addr
, len_pad
,
645 off
>> PAGE_SHIFT
, flags
);
648 * The failure might be due to length padding. The caller will retry
649 * without the padding.
651 if (IS_ERR_VALUE(ret
))
655 * Do not try to align to THP boundary if allocation at the address
661 ret
+= (off
- ret
) & (size
- 1);
665 unsigned long thp_get_unmapped_area(struct file
*filp
, unsigned long addr
,
666 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
669 loff_t off
= (loff_t
)pgoff
<< PAGE_SHIFT
;
671 ret
= __thp_get_unmapped_area(filp
, addr
, len
, off
, flags
, PMD_SIZE
);
675 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
677 EXPORT_SYMBOL_GPL(thp_get_unmapped_area
);
679 static vm_fault_t
__do_huge_pmd_anonymous_page(struct vm_fault
*vmf
,
680 struct page
*page
, gfp_t gfp
)
682 struct vm_area_struct
*vma
= vmf
->vma
;
683 struct folio
*folio
= page_folio(page
);
685 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
688 VM_BUG_ON_FOLIO(!folio_test_large(folio
), folio
);
690 if (mem_cgroup_charge(folio
, vma
->vm_mm
, gfp
)) {
692 count_vm_event(THP_FAULT_FALLBACK
);
693 count_vm_event(THP_FAULT_FALLBACK_CHARGE
);
694 return VM_FAULT_FALLBACK
;
696 folio_throttle_swaprate(folio
, gfp
);
698 pgtable
= pte_alloc_one(vma
->vm_mm
);
699 if (unlikely(!pgtable
)) {
704 clear_huge_page(page
, vmf
->address
, HPAGE_PMD_NR
);
706 * The memory barrier inside __folio_mark_uptodate makes sure that
707 * clear_huge_page writes become visible before the set_pmd_at()
710 __folio_mark_uptodate(folio
);
712 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
713 if (unlikely(!pmd_none(*vmf
->pmd
))) {
718 ret
= check_stable_address_space(vma
->vm_mm
);
722 /* Deliver the page fault to userland */
723 if (userfaultfd_missing(vma
)) {
724 spin_unlock(vmf
->ptl
);
726 pte_free(vma
->vm_mm
, pgtable
);
727 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
728 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
732 entry
= mk_huge_pmd(page
, vma
->vm_page_prot
);
733 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
734 folio_add_new_anon_rmap(folio
, vma
, haddr
);
735 folio_add_lru_vma(folio
, vma
);
736 pgtable_trans_huge_deposit(vma
->vm_mm
, vmf
->pmd
, pgtable
);
737 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
738 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
739 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
740 mm_inc_nr_ptes(vma
->vm_mm
);
741 spin_unlock(vmf
->ptl
);
742 count_vm_event(THP_FAULT_ALLOC
);
743 count_memcg_event_mm(vma
->vm_mm
, THP_FAULT_ALLOC
);
748 spin_unlock(vmf
->ptl
);
751 pte_free(vma
->vm_mm
, pgtable
);
758 * always: directly stall for all thp allocations
759 * defer: wake kswapd and fail if not immediately available
760 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
761 * fail if not immediately available
762 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
764 * never: never stall for any thp allocation
766 gfp_t
vma_thp_gfp_mask(struct vm_area_struct
*vma
)
768 const bool vma_madvised
= vma
&& (vma
->vm_flags
& VM_HUGEPAGE
);
770 /* Always do synchronous compaction */
771 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
772 return GFP_TRANSHUGE
| (vma_madvised
? 0 : __GFP_NORETRY
);
774 /* Kick kcompactd and fail quickly */
775 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
776 return GFP_TRANSHUGE_LIGHT
| __GFP_KSWAPD_RECLAIM
;
778 /* Synchronous compaction if madvised, otherwise kick kcompactd */
779 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
780 return GFP_TRANSHUGE_LIGHT
|
781 (vma_madvised
? __GFP_DIRECT_RECLAIM
:
782 __GFP_KSWAPD_RECLAIM
);
784 /* Only do synchronous compaction if madvised */
785 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
786 return GFP_TRANSHUGE_LIGHT
|
787 (vma_madvised
? __GFP_DIRECT_RECLAIM
: 0);
789 return GFP_TRANSHUGE_LIGHT
;
792 /* Caller must hold page table lock. */
793 static void set_huge_zero_page(pgtable_t pgtable
, struct mm_struct
*mm
,
794 struct vm_area_struct
*vma
, unsigned long haddr
, pmd_t
*pmd
,
795 struct page
*zero_page
)
800 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
801 entry
= pmd_mkhuge(entry
);
802 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
803 set_pmd_at(mm
, haddr
, pmd
, entry
);
807 vm_fault_t
do_huge_pmd_anonymous_page(struct vm_fault
*vmf
)
809 struct vm_area_struct
*vma
= vmf
->vma
;
812 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
814 if (!transhuge_vma_suitable(vma
, haddr
))
815 return VM_FAULT_FALLBACK
;
816 if (unlikely(anon_vma_prepare(vma
)))
818 khugepaged_enter_vma(vma
, vma
->vm_flags
);
820 if (!(vmf
->flags
& FAULT_FLAG_WRITE
) &&
821 !mm_forbids_zeropage(vma
->vm_mm
) &&
822 transparent_hugepage_use_zero_page()) {
824 struct page
*zero_page
;
826 pgtable
= pte_alloc_one(vma
->vm_mm
);
827 if (unlikely(!pgtable
))
829 zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
830 if (unlikely(!zero_page
)) {
831 pte_free(vma
->vm_mm
, pgtable
);
832 count_vm_event(THP_FAULT_FALLBACK
);
833 return VM_FAULT_FALLBACK
;
835 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
837 if (pmd_none(*vmf
->pmd
)) {
838 ret
= check_stable_address_space(vma
->vm_mm
);
840 spin_unlock(vmf
->ptl
);
841 pte_free(vma
->vm_mm
, pgtable
);
842 } else if (userfaultfd_missing(vma
)) {
843 spin_unlock(vmf
->ptl
);
844 pte_free(vma
->vm_mm
, pgtable
);
845 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
846 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
848 set_huge_zero_page(pgtable
, vma
->vm_mm
, vma
,
849 haddr
, vmf
->pmd
, zero_page
);
850 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
851 spin_unlock(vmf
->ptl
);
854 spin_unlock(vmf
->ptl
);
855 pte_free(vma
->vm_mm
, pgtable
);
859 gfp
= vma_thp_gfp_mask(vma
);
860 folio
= vma_alloc_folio(gfp
, HPAGE_PMD_ORDER
, vma
, haddr
, true);
861 if (unlikely(!folio
)) {
862 count_vm_event(THP_FAULT_FALLBACK
);
863 return VM_FAULT_FALLBACK
;
865 return __do_huge_pmd_anonymous_page(vmf
, &folio
->page
, gfp
);
868 static void insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
869 pmd_t
*pmd
, pfn_t pfn
, pgprot_t prot
, bool write
,
872 struct mm_struct
*mm
= vma
->vm_mm
;
876 ptl
= pmd_lock(mm
, pmd
);
877 if (!pmd_none(*pmd
)) {
879 if (pmd_pfn(*pmd
) != pfn_t_to_pfn(pfn
)) {
880 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd
));
883 entry
= pmd_mkyoung(*pmd
);
884 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
885 if (pmdp_set_access_flags(vma
, addr
, pmd
, entry
, 1))
886 update_mmu_cache_pmd(vma
, addr
, pmd
);
892 entry
= pmd_mkhuge(pfn_t_pmd(pfn
, prot
));
893 if (pfn_t_devmap(pfn
))
894 entry
= pmd_mkdevmap(entry
);
896 entry
= pmd_mkyoung(pmd_mkdirty(entry
));
897 entry
= maybe_pmd_mkwrite(entry
, vma
);
901 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
906 set_pmd_at(mm
, addr
, pmd
, entry
);
907 update_mmu_cache_pmd(vma
, addr
, pmd
);
912 pte_free(mm
, pgtable
);
916 * vmf_insert_pfn_pmd - insert a pmd size pfn
917 * @vmf: Structure describing the fault
918 * @pfn: pfn to insert
919 * @write: whether it's a write fault
921 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
923 * Return: vm_fault_t value.
925 vm_fault_t
vmf_insert_pfn_pmd(struct vm_fault
*vmf
, pfn_t pfn
, bool write
)
927 unsigned long addr
= vmf
->address
& PMD_MASK
;
928 struct vm_area_struct
*vma
= vmf
->vma
;
929 pgprot_t pgprot
= vma
->vm_page_prot
;
930 pgtable_t pgtable
= NULL
;
933 * If we had pmd_special, we could avoid all these restrictions,
934 * but we need to be consistent with PTEs and architectures that
935 * can't support a 'special' bit.
937 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
939 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
940 (VM_PFNMAP
|VM_MIXEDMAP
));
941 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
943 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
944 return VM_FAULT_SIGBUS
;
946 if (arch_needs_pgtable_deposit()) {
947 pgtable
= pte_alloc_one(vma
->vm_mm
);
952 track_pfn_insert(vma
, &pgprot
, pfn
);
954 insert_pfn_pmd(vma
, addr
, vmf
->pmd
, pfn
, pgprot
, write
, pgtable
);
955 return VM_FAULT_NOPAGE
;
957 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd
);
959 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
960 static pud_t
maybe_pud_mkwrite(pud_t pud
, struct vm_area_struct
*vma
)
962 if (likely(vma
->vm_flags
& VM_WRITE
))
963 pud
= pud_mkwrite(pud
);
967 static void insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
968 pud_t
*pud
, pfn_t pfn
, bool write
)
970 struct mm_struct
*mm
= vma
->vm_mm
;
971 pgprot_t prot
= vma
->vm_page_prot
;
975 ptl
= pud_lock(mm
, pud
);
976 if (!pud_none(*pud
)) {
978 if (pud_pfn(*pud
) != pfn_t_to_pfn(pfn
)) {
979 WARN_ON_ONCE(!is_huge_zero_pud(*pud
));
982 entry
= pud_mkyoung(*pud
);
983 entry
= maybe_pud_mkwrite(pud_mkdirty(entry
), vma
);
984 if (pudp_set_access_flags(vma
, addr
, pud
, entry
, 1))
985 update_mmu_cache_pud(vma
, addr
, pud
);
990 entry
= pud_mkhuge(pfn_t_pud(pfn
, prot
));
991 if (pfn_t_devmap(pfn
))
992 entry
= pud_mkdevmap(entry
);
994 entry
= pud_mkyoung(pud_mkdirty(entry
));
995 entry
= maybe_pud_mkwrite(entry
, vma
);
997 set_pud_at(mm
, addr
, pud
, entry
);
998 update_mmu_cache_pud(vma
, addr
, pud
);
1005 * vmf_insert_pfn_pud - insert a pud size pfn
1006 * @vmf: Structure describing the fault
1007 * @pfn: pfn to insert
1008 * @write: whether it's a write fault
1010 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1012 * Return: vm_fault_t value.
1014 vm_fault_t
vmf_insert_pfn_pud(struct vm_fault
*vmf
, pfn_t pfn
, bool write
)
1016 unsigned long addr
= vmf
->address
& PUD_MASK
;
1017 struct vm_area_struct
*vma
= vmf
->vma
;
1018 pgprot_t pgprot
= vma
->vm_page_prot
;
1021 * If we had pud_special, we could avoid all these restrictions,
1022 * but we need to be consistent with PTEs and architectures that
1023 * can't support a 'special' bit.
1025 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
1026 !pfn_t_devmap(pfn
));
1027 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
1028 (VM_PFNMAP
|VM_MIXEDMAP
));
1029 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
1031 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
1032 return VM_FAULT_SIGBUS
;
1034 track_pfn_insert(vma
, &pgprot
, pfn
);
1036 insert_pfn_pud(vma
, addr
, vmf
->pud
, pfn
, write
);
1037 return VM_FAULT_NOPAGE
;
1039 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud
);
1040 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1042 static void touch_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
1043 pmd_t
*pmd
, bool write
)
1047 _pmd
= pmd_mkyoung(*pmd
);
1049 _pmd
= pmd_mkdirty(_pmd
);
1050 if (pmdp_set_access_flags(vma
, addr
& HPAGE_PMD_MASK
,
1052 update_mmu_cache_pmd(vma
, addr
, pmd
);
1055 struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
1056 pmd_t
*pmd
, int flags
, struct dev_pagemap
**pgmap
)
1058 unsigned long pfn
= pmd_pfn(*pmd
);
1059 struct mm_struct
*mm
= vma
->vm_mm
;
1063 assert_spin_locked(pmd_lockptr(mm
, pmd
));
1065 if (flags
& FOLL_WRITE
&& !pmd_write(*pmd
))
1068 if (pmd_present(*pmd
) && pmd_devmap(*pmd
))
1073 if (flags
& FOLL_TOUCH
)
1074 touch_pmd(vma
, addr
, pmd
, flags
& FOLL_WRITE
);
1077 * device mapped pages can only be returned if the
1078 * caller will manage the page reference count.
1080 if (!(flags
& (FOLL_GET
| FOLL_PIN
)))
1081 return ERR_PTR(-EEXIST
);
1083 pfn
+= (addr
& ~PMD_MASK
) >> PAGE_SHIFT
;
1084 *pgmap
= get_dev_pagemap(pfn
, *pgmap
);
1086 return ERR_PTR(-EFAULT
);
1087 page
= pfn_to_page(pfn
);
1088 ret
= try_grab_page(page
, flags
);
1090 page
= ERR_PTR(ret
);
1095 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1096 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
1097 struct vm_area_struct
*dst_vma
, struct vm_area_struct
*src_vma
)
1099 spinlock_t
*dst_ptl
, *src_ptl
;
1100 struct page
*src_page
;
1102 pgtable_t pgtable
= NULL
;
1105 /* Skip if can be re-fill on fault */
1106 if (!vma_is_anonymous(dst_vma
))
1109 pgtable
= pte_alloc_one(dst_mm
);
1110 if (unlikely(!pgtable
))
1113 dst_ptl
= pmd_lock(dst_mm
, dst_pmd
);
1114 src_ptl
= pmd_lockptr(src_mm
, src_pmd
);
1115 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1120 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1121 if (unlikely(is_swap_pmd(pmd
))) {
1122 swp_entry_t entry
= pmd_to_swp_entry(pmd
);
1124 VM_BUG_ON(!is_pmd_migration_entry(pmd
));
1125 if (!is_readable_migration_entry(entry
)) {
1126 entry
= make_readable_migration_entry(
1128 pmd
= swp_entry_to_pmd(entry
);
1129 if (pmd_swp_soft_dirty(*src_pmd
))
1130 pmd
= pmd_swp_mksoft_dirty(pmd
);
1131 if (pmd_swp_uffd_wp(*src_pmd
))
1132 pmd
= pmd_swp_mkuffd_wp(pmd
);
1133 set_pmd_at(src_mm
, addr
, src_pmd
, pmd
);
1135 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1136 mm_inc_nr_ptes(dst_mm
);
1137 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
1138 if (!userfaultfd_wp(dst_vma
))
1139 pmd
= pmd_swp_clear_uffd_wp(pmd
);
1140 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
1146 if (unlikely(!pmd_trans_huge(pmd
))) {
1147 pte_free(dst_mm
, pgtable
);
1151 * When page table lock is held, the huge zero pmd should not be
1152 * under splitting since we don't split the page itself, only pmd to
1155 if (is_huge_zero_pmd(pmd
)) {
1157 * get_huge_zero_page() will never allocate a new page here,
1158 * since we already have a zero page to copy. It just takes a
1161 mm_get_huge_zero_page(dst_mm
);
1165 src_page
= pmd_page(pmd
);
1166 VM_BUG_ON_PAGE(!PageHead(src_page
), src_page
);
1169 if (unlikely(page_try_dup_anon_rmap(src_page
, true, src_vma
))) {
1170 /* Page maybe pinned: split and retry the fault on PTEs. */
1172 pte_free(dst_mm
, pgtable
);
1173 spin_unlock(src_ptl
);
1174 spin_unlock(dst_ptl
);
1175 __split_huge_pmd(src_vma
, src_pmd
, addr
, false, NULL
);
1178 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1180 mm_inc_nr_ptes(dst_mm
);
1181 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
1182 pmdp_set_wrprotect(src_mm
, addr
, src_pmd
);
1183 if (!userfaultfd_wp(dst_vma
))
1184 pmd
= pmd_clear_uffd_wp(pmd
);
1185 pmd
= pmd_mkold(pmd_wrprotect(pmd
));
1186 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
1190 spin_unlock(src_ptl
);
1191 spin_unlock(dst_ptl
);
1196 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1197 static void touch_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1198 pud_t
*pud
, bool write
)
1202 _pud
= pud_mkyoung(*pud
);
1204 _pud
= pud_mkdirty(_pud
);
1205 if (pudp_set_access_flags(vma
, addr
& HPAGE_PUD_MASK
,
1207 update_mmu_cache_pud(vma
, addr
, pud
);
1210 struct page
*follow_devmap_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1211 pud_t
*pud
, int flags
, struct dev_pagemap
**pgmap
)
1213 unsigned long pfn
= pud_pfn(*pud
);
1214 struct mm_struct
*mm
= vma
->vm_mm
;
1218 assert_spin_locked(pud_lockptr(mm
, pud
));
1220 if (flags
& FOLL_WRITE
&& !pud_write(*pud
))
1223 if (pud_present(*pud
) && pud_devmap(*pud
))
1228 if (flags
& FOLL_TOUCH
)
1229 touch_pud(vma
, addr
, pud
, flags
& FOLL_WRITE
);
1232 * device mapped pages can only be returned if the
1233 * caller will manage the page reference count.
1235 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1237 if (!(flags
& (FOLL_GET
| FOLL_PIN
)))
1238 return ERR_PTR(-EEXIST
);
1240 pfn
+= (addr
& ~PUD_MASK
) >> PAGE_SHIFT
;
1241 *pgmap
= get_dev_pagemap(pfn
, *pgmap
);
1243 return ERR_PTR(-EFAULT
);
1244 page
= pfn_to_page(pfn
);
1246 ret
= try_grab_page(page
, flags
);
1248 page
= ERR_PTR(ret
);
1253 int copy_huge_pud(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1254 pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long addr
,
1255 struct vm_area_struct
*vma
)
1257 spinlock_t
*dst_ptl
, *src_ptl
;
1261 dst_ptl
= pud_lock(dst_mm
, dst_pud
);
1262 src_ptl
= pud_lockptr(src_mm
, src_pud
);
1263 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1267 if (unlikely(!pud_trans_huge(pud
) && !pud_devmap(pud
)))
1271 * When page table lock is held, the huge zero pud should not be
1272 * under splitting since we don't split the page itself, only pud to
1275 if (is_huge_zero_pud(pud
)) {
1276 /* No huge zero pud yet */
1280 * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
1281 * and split if duplicating fails.
1283 pudp_set_wrprotect(src_mm
, addr
, src_pud
);
1284 pud
= pud_mkold(pud_wrprotect(pud
));
1285 set_pud_at(dst_mm
, addr
, dst_pud
, pud
);
1289 spin_unlock(src_ptl
);
1290 spin_unlock(dst_ptl
);
1294 void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
)
1296 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1298 vmf
->ptl
= pud_lock(vmf
->vma
->vm_mm
, vmf
->pud
);
1299 if (unlikely(!pud_same(*vmf
->pud
, orig_pud
)))
1302 touch_pud(vmf
->vma
, vmf
->address
, vmf
->pud
, write
);
1304 spin_unlock(vmf
->ptl
);
1306 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1308 void huge_pmd_set_accessed(struct vm_fault
*vmf
)
1310 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1312 vmf
->ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1313 if (unlikely(!pmd_same(*vmf
->pmd
, vmf
->orig_pmd
)))
1316 touch_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
, write
);
1319 spin_unlock(vmf
->ptl
);
1322 vm_fault_t
do_huge_pmd_wp_page(struct vm_fault
*vmf
)
1324 const bool unshare
= vmf
->flags
& FAULT_FLAG_UNSHARE
;
1325 struct vm_area_struct
*vma
= vmf
->vma
;
1326 struct folio
*folio
;
1328 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1329 pmd_t orig_pmd
= vmf
->orig_pmd
;
1331 vmf
->ptl
= pmd_lockptr(vma
->vm_mm
, vmf
->pmd
);
1332 VM_BUG_ON_VMA(!vma
->anon_vma
, vma
);
1334 if (is_huge_zero_pmd(orig_pmd
))
1337 spin_lock(vmf
->ptl
);
1339 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1340 spin_unlock(vmf
->ptl
);
1344 page
= pmd_page(orig_pmd
);
1345 folio
= page_folio(page
);
1346 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1348 /* Early check when only holding the PT lock. */
1349 if (PageAnonExclusive(page
))
1352 if (!folio_trylock(folio
)) {
1354 spin_unlock(vmf
->ptl
);
1356 spin_lock(vmf
->ptl
);
1357 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1358 spin_unlock(vmf
->ptl
);
1359 folio_unlock(folio
);
1366 /* Recheck after temporarily dropping the PT lock. */
1367 if (PageAnonExclusive(page
)) {
1368 folio_unlock(folio
);
1373 * See do_wp_page(): we can only reuse the folio exclusively if
1374 * there are no additional references. Note that we always drain
1375 * the LRU cache immediately after adding a THP.
1377 if (folio_ref_count(folio
) >
1378 1 + folio_test_swapcache(folio
) * folio_nr_pages(folio
))
1379 goto unlock_fallback
;
1380 if (folio_test_swapcache(folio
))
1381 folio_free_swap(folio
);
1382 if (folio_ref_count(folio
) == 1) {
1385 folio_move_anon_rmap(folio
, vma
);
1386 SetPageAnonExclusive(page
);
1387 folio_unlock(folio
);
1389 if (unlikely(unshare
)) {
1390 spin_unlock(vmf
->ptl
);
1393 entry
= pmd_mkyoung(orig_pmd
);
1394 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1395 if (pmdp_set_access_flags(vma
, haddr
, vmf
->pmd
, entry
, 1))
1396 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1397 spin_unlock(vmf
->ptl
);
1402 folio_unlock(folio
);
1403 spin_unlock(vmf
->ptl
);
1405 __split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
, false, NULL
);
1406 return VM_FAULT_FALLBACK
;
1409 static inline bool can_change_pmd_writable(struct vm_area_struct
*vma
,
1410 unsigned long addr
, pmd_t pmd
)
1414 if (WARN_ON_ONCE(!(vma
->vm_flags
& VM_WRITE
)))
1417 /* Don't touch entries that are not even readable (NUMA hinting). */
1418 if (pmd_protnone(pmd
))
1421 /* Do we need write faults for softdirty tracking? */
1422 if (vma_soft_dirty_enabled(vma
) && !pmd_soft_dirty(pmd
))
1425 /* Do we need write faults for uffd-wp tracking? */
1426 if (userfaultfd_huge_pmd_wp(vma
, pmd
))
1429 if (!(vma
->vm_flags
& VM_SHARED
)) {
1430 /* See can_change_pte_writable(). */
1431 page
= vm_normal_page_pmd(vma
, addr
, pmd
);
1432 return page
&& PageAnon(page
) && PageAnonExclusive(page
);
1435 /* See can_change_pte_writable(). */
1436 return pmd_dirty(pmd
);
1439 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1440 static inline bool can_follow_write_pmd(pmd_t pmd
, struct page
*page
,
1441 struct vm_area_struct
*vma
,
1444 /* If the pmd is writable, we can write to the page. */
1448 /* Maybe FOLL_FORCE is set to override it? */
1449 if (!(flags
& FOLL_FORCE
))
1452 /* But FOLL_FORCE has no effect on shared mappings */
1453 if (vma
->vm_flags
& (VM_MAYSHARE
| VM_SHARED
))
1456 /* ... or read-only private ones */
1457 if (!(vma
->vm_flags
& VM_MAYWRITE
))
1460 /* ... or already writable ones that just need to take a write fault */
1461 if (vma
->vm_flags
& VM_WRITE
)
1465 * See can_change_pte_writable(): we broke COW and could map the page
1466 * writable if we have an exclusive anonymous page ...
1468 if (!page
|| !PageAnon(page
) || !PageAnonExclusive(page
))
1471 /* ... and a write-fault isn't required for other reasons. */
1472 if (vma_soft_dirty_enabled(vma
) && !pmd_soft_dirty(pmd
))
1474 return !userfaultfd_huge_pmd_wp(vma
, pmd
);
1477 struct page
*follow_trans_huge_pmd(struct vm_area_struct
*vma
,
1482 struct mm_struct
*mm
= vma
->vm_mm
;
1486 assert_spin_locked(pmd_lockptr(mm
, pmd
));
1488 page
= pmd_page(*pmd
);
1489 VM_BUG_ON_PAGE(!PageHead(page
) && !is_zone_device_page(page
), page
);
1491 if ((flags
& FOLL_WRITE
) &&
1492 !can_follow_write_pmd(*pmd
, page
, vma
, flags
))
1495 /* Avoid dumping huge zero page */
1496 if ((flags
& FOLL_DUMP
) && is_huge_zero_pmd(*pmd
))
1497 return ERR_PTR(-EFAULT
);
1499 if (pmd_protnone(*pmd
) && !gup_can_follow_protnone(vma
, flags
))
1502 if (!pmd_write(*pmd
) && gup_must_unshare(vma
, flags
, page
))
1503 return ERR_PTR(-EMLINK
);
1505 VM_BUG_ON_PAGE((flags
& FOLL_PIN
) && PageAnon(page
) &&
1506 !PageAnonExclusive(page
), page
);
1508 ret
= try_grab_page(page
, flags
);
1510 return ERR_PTR(ret
);
1512 if (flags
& FOLL_TOUCH
)
1513 touch_pmd(vma
, addr
, pmd
, flags
& FOLL_WRITE
);
1515 page
+= (addr
& ~HPAGE_PMD_MASK
) >> PAGE_SHIFT
;
1516 VM_BUG_ON_PAGE(!PageCompound(page
) && !is_zone_device_page(page
), page
);
1521 /* NUMA hinting page fault entry point for trans huge pmds */
1522 vm_fault_t
do_huge_pmd_numa_page(struct vm_fault
*vmf
)
1524 struct vm_area_struct
*vma
= vmf
->vma
;
1525 pmd_t oldpmd
= vmf
->orig_pmd
;
1527 struct folio
*folio
;
1528 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1529 int nid
= NUMA_NO_NODE
;
1530 int target_nid
, last_cpupid
= (-1 & LAST_CPUPID_MASK
);
1531 bool migrated
= false, writable
= false;
1534 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1535 if (unlikely(!pmd_same(oldpmd
, *vmf
->pmd
))) {
1536 spin_unlock(vmf
->ptl
);
1540 pmd
= pmd_modify(oldpmd
, vma
->vm_page_prot
);
1543 * Detect now whether the PMD could be writable; this information
1544 * is only valid while holding the PT lock.
1546 writable
= pmd_write(pmd
);
1547 if (!writable
&& vma_wants_manual_pte_write_upgrade(vma
) &&
1548 can_change_pmd_writable(vma
, vmf
->address
, pmd
))
1551 folio
= vm_normal_folio_pmd(vma
, haddr
, pmd
);
1555 /* See similar comment in do_numa_page for explanation */
1557 flags
|= TNF_NO_GROUP
;
1559 nid
= folio_nid(folio
);
1561 * For memory tiering mode, cpupid of slow memory page is used
1562 * to record page access time. So use default value.
1564 if (node_is_toptier(nid
))
1565 last_cpupid
= folio_last_cpupid(folio
);
1566 target_nid
= numa_migrate_prep(folio
, vma
, haddr
, nid
, &flags
);
1567 if (target_nid
== NUMA_NO_NODE
) {
1572 spin_unlock(vmf
->ptl
);
1575 migrated
= migrate_misplaced_folio(folio
, vma
, target_nid
);
1577 flags
|= TNF_MIGRATED
;
1580 flags
|= TNF_MIGRATE_FAIL
;
1581 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1582 if (unlikely(!pmd_same(oldpmd
, *vmf
->pmd
))) {
1583 spin_unlock(vmf
->ptl
);
1590 if (nid
!= NUMA_NO_NODE
)
1591 task_numa_fault(last_cpupid
, nid
, HPAGE_PMD_NR
, flags
);
1596 /* Restore the PMD */
1597 pmd
= pmd_modify(oldpmd
, vma
->vm_page_prot
);
1598 pmd
= pmd_mkyoung(pmd
);
1600 pmd
= pmd_mkwrite(pmd
, vma
);
1601 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, pmd
);
1602 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1603 spin_unlock(vmf
->ptl
);
1608 * Return true if we do MADV_FREE successfully on entire pmd page.
1609 * Otherwise, return false.
1611 bool madvise_free_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1612 pmd_t
*pmd
, unsigned long addr
, unsigned long next
)
1616 struct folio
*folio
;
1617 struct mm_struct
*mm
= tlb
->mm
;
1620 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1622 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1627 if (is_huge_zero_pmd(orig_pmd
))
1630 if (unlikely(!pmd_present(orig_pmd
))) {
1631 VM_BUG_ON(thp_migration_supported() &&
1632 !is_pmd_migration_entry(orig_pmd
));
1636 folio
= pfn_folio(pmd_pfn(orig_pmd
));
1638 * If other processes are mapping this folio, we couldn't discard
1639 * the folio unless they all do MADV_FREE so let's skip the folio.
1641 if (folio_estimated_sharers(folio
) != 1)
1644 if (!folio_trylock(folio
))
1648 * If user want to discard part-pages of THP, split it so MADV_FREE
1649 * will deactivate only them.
1651 if (next
- addr
!= HPAGE_PMD_SIZE
) {
1655 folio_unlock(folio
);
1660 if (folio_test_dirty(folio
))
1661 folio_clear_dirty(folio
);
1662 folio_unlock(folio
);
1664 if (pmd_young(orig_pmd
) || pmd_dirty(orig_pmd
)) {
1665 pmdp_invalidate(vma
, addr
, pmd
);
1666 orig_pmd
= pmd_mkold(orig_pmd
);
1667 orig_pmd
= pmd_mkclean(orig_pmd
);
1669 set_pmd_at(mm
, addr
, pmd
, orig_pmd
);
1670 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1673 folio_mark_lazyfree(folio
);
1681 static inline void zap_deposited_table(struct mm_struct
*mm
, pmd_t
*pmd
)
1685 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1686 pte_free(mm
, pgtable
);
1690 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1691 pmd_t
*pmd
, unsigned long addr
)
1696 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1698 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1702 * For architectures like ppc64 we look at deposited pgtable
1703 * when calling pmdp_huge_get_and_clear. So do the
1704 * pgtable_trans_huge_withdraw after finishing pmdp related
1707 orig_pmd
= pmdp_huge_get_and_clear_full(vma
, addr
, pmd
,
1709 arch_check_zapped_pmd(vma
, orig_pmd
);
1710 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1711 if (vma_is_special_huge(vma
)) {
1712 if (arch_needs_pgtable_deposit())
1713 zap_deposited_table(tlb
->mm
, pmd
);
1715 } else if (is_huge_zero_pmd(orig_pmd
)) {
1716 zap_deposited_table(tlb
->mm
, pmd
);
1719 struct page
*page
= NULL
;
1720 int flush_needed
= 1;
1722 if (pmd_present(orig_pmd
)) {
1723 page
= pmd_page(orig_pmd
);
1724 page_remove_rmap(page
, vma
, true);
1725 VM_BUG_ON_PAGE(page_mapcount(page
) < 0, page
);
1726 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1727 } else if (thp_migration_supported()) {
1730 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd
));
1731 entry
= pmd_to_swp_entry(orig_pmd
);
1732 page
= pfn_swap_entry_to_page(entry
);
1735 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1737 if (PageAnon(page
)) {
1738 zap_deposited_table(tlb
->mm
, pmd
);
1739 add_mm_counter(tlb
->mm
, MM_ANONPAGES
, -HPAGE_PMD_NR
);
1741 if (arch_needs_pgtable_deposit())
1742 zap_deposited_table(tlb
->mm
, pmd
);
1743 add_mm_counter(tlb
->mm
, mm_counter_file(page
), -HPAGE_PMD_NR
);
1748 tlb_remove_page_size(tlb
, page
, HPAGE_PMD_SIZE
);
1753 #ifndef pmd_move_must_withdraw
1754 static inline int pmd_move_must_withdraw(spinlock_t
*new_pmd_ptl
,
1755 spinlock_t
*old_pmd_ptl
,
1756 struct vm_area_struct
*vma
)
1759 * With split pmd lock we also need to move preallocated
1760 * PTE page table if new_pmd is on different PMD page table.
1762 * We also don't deposit and withdraw tables for file pages.
1764 return (new_pmd_ptl
!= old_pmd_ptl
) && vma_is_anonymous(vma
);
1768 static pmd_t
move_soft_dirty_pmd(pmd_t pmd
)
1770 #ifdef CONFIG_MEM_SOFT_DIRTY
1771 if (unlikely(is_pmd_migration_entry(pmd
)))
1772 pmd
= pmd_swp_mksoft_dirty(pmd
);
1773 else if (pmd_present(pmd
))
1774 pmd
= pmd_mksoft_dirty(pmd
);
1779 bool move_huge_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
1780 unsigned long new_addr
, pmd_t
*old_pmd
, pmd_t
*new_pmd
)
1782 spinlock_t
*old_ptl
, *new_ptl
;
1784 struct mm_struct
*mm
= vma
->vm_mm
;
1785 bool force_flush
= false;
1788 * The destination pmd shouldn't be established, free_pgtables()
1789 * should have released it; but move_page_tables() might have already
1790 * inserted a page table, if racing against shmem/file collapse.
1792 if (!pmd_none(*new_pmd
)) {
1793 VM_BUG_ON(pmd_trans_huge(*new_pmd
));
1798 * We don't have to worry about the ordering of src and dst
1799 * ptlocks because exclusive mmap_lock prevents deadlock.
1801 old_ptl
= __pmd_trans_huge_lock(old_pmd
, vma
);
1803 new_ptl
= pmd_lockptr(mm
, new_pmd
);
1804 if (new_ptl
!= old_ptl
)
1805 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
1806 pmd
= pmdp_huge_get_and_clear(mm
, old_addr
, old_pmd
);
1807 if (pmd_present(pmd
))
1809 VM_BUG_ON(!pmd_none(*new_pmd
));
1811 if (pmd_move_must_withdraw(new_ptl
, old_ptl
, vma
)) {
1813 pgtable
= pgtable_trans_huge_withdraw(mm
, old_pmd
);
1814 pgtable_trans_huge_deposit(mm
, new_pmd
, pgtable
);
1816 pmd
= move_soft_dirty_pmd(pmd
);
1817 set_pmd_at(mm
, new_addr
, new_pmd
, pmd
);
1819 flush_pmd_tlb_range(vma
, old_addr
, old_addr
+ PMD_SIZE
);
1820 if (new_ptl
!= old_ptl
)
1821 spin_unlock(new_ptl
);
1822 spin_unlock(old_ptl
);
1830 * - 0 if PMD could not be locked
1831 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1832 * or if prot_numa but THP migration is not supported
1833 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
1835 int change_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1836 pmd_t
*pmd
, unsigned long addr
, pgprot_t newprot
,
1837 unsigned long cp_flags
)
1839 struct mm_struct
*mm
= vma
->vm_mm
;
1841 pmd_t oldpmd
, entry
;
1842 bool prot_numa
= cp_flags
& MM_CP_PROT_NUMA
;
1843 bool uffd_wp
= cp_flags
& MM_CP_UFFD_WP
;
1844 bool uffd_wp_resolve
= cp_flags
& MM_CP_UFFD_WP_RESOLVE
;
1847 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1849 if (prot_numa
&& !thp_migration_supported())
1852 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1856 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1857 if (is_swap_pmd(*pmd
)) {
1858 swp_entry_t entry
= pmd_to_swp_entry(*pmd
);
1859 struct folio
*folio
= page_folio(pfn_swap_entry_to_page(entry
));
1862 VM_BUG_ON(!is_pmd_migration_entry(*pmd
));
1863 if (is_writable_migration_entry(entry
)) {
1865 * A protection check is difficult so
1866 * just be safe and disable write
1868 if (folio_test_anon(folio
))
1869 entry
= make_readable_exclusive_migration_entry(swp_offset(entry
));
1871 entry
= make_readable_migration_entry(swp_offset(entry
));
1872 newpmd
= swp_entry_to_pmd(entry
);
1873 if (pmd_swp_soft_dirty(*pmd
))
1874 newpmd
= pmd_swp_mksoft_dirty(newpmd
);
1880 newpmd
= pmd_swp_mkuffd_wp(newpmd
);
1881 else if (uffd_wp_resolve
)
1882 newpmd
= pmd_swp_clear_uffd_wp(newpmd
);
1883 if (!pmd_same(*pmd
, newpmd
))
1884 set_pmd_at(mm
, addr
, pmd
, newpmd
);
1890 struct folio
*folio
;
1893 * Avoid trapping faults against the zero page. The read-only
1894 * data is likely to be read-cached on the local CPU and
1895 * local/remote hits to the zero page are not interesting.
1897 if (is_huge_zero_pmd(*pmd
))
1900 if (pmd_protnone(*pmd
))
1903 folio
= page_folio(pmd_page(*pmd
));
1904 toptier
= node_is_toptier(folio_nid(folio
));
1906 * Skip scanning top tier node if normal numa
1907 * balancing is disabled
1909 if (!(sysctl_numa_balancing_mode
& NUMA_BALANCING_NORMAL
) &&
1913 if (sysctl_numa_balancing_mode
& NUMA_BALANCING_MEMORY_TIERING
&&
1915 folio_xchg_access_time(folio
,
1916 jiffies_to_msecs(jiffies
));
1919 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
1920 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1921 * which is also under mmap_read_lock(mm):
1924 * change_huge_pmd(prot_numa=1)
1925 * pmdp_huge_get_and_clear_notify()
1926 * madvise_dontneed()
1928 * pmd_trans_huge(*pmd) == 0 (without ptl)
1931 * // pmd is re-established
1933 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1934 * which may break userspace.
1936 * pmdp_invalidate_ad() is required to make sure we don't miss
1937 * dirty/young flags set by hardware.
1939 oldpmd
= pmdp_invalidate_ad(vma
, addr
, pmd
);
1941 entry
= pmd_modify(oldpmd
, newprot
);
1943 entry
= pmd_mkuffd_wp(entry
);
1944 else if (uffd_wp_resolve
)
1946 * Leave the write bit to be handled by PF interrupt
1947 * handler, then things like COW could be properly
1950 entry
= pmd_clear_uffd_wp(entry
);
1952 /* See change_pte_range(). */
1953 if ((cp_flags
& MM_CP_TRY_CHANGE_WRITABLE
) && !pmd_write(entry
) &&
1954 can_change_pmd_writable(vma
, addr
, entry
))
1955 entry
= pmd_mkwrite(entry
, vma
);
1958 set_pmd_at(mm
, addr
, pmd
, entry
);
1960 if (huge_pmd_needs_flush(oldpmd
, entry
))
1961 tlb_flush_pmd_range(tlb
, addr
, HPAGE_PMD_SIZE
);
1968 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1970 * Note that if it returns page table lock pointer, this routine returns without
1971 * unlocking page table lock. So callers must unlock it.
1973 spinlock_t
*__pmd_trans_huge_lock(pmd_t
*pmd
, struct vm_area_struct
*vma
)
1976 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1977 if (likely(is_swap_pmd(*pmd
) || pmd_trans_huge(*pmd
) ||
1985 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
1987 * Note that if it returns page table lock pointer, this routine returns without
1988 * unlocking page table lock. So callers must unlock it.
1990 spinlock_t
*__pud_trans_huge_lock(pud_t
*pud
, struct vm_area_struct
*vma
)
1994 ptl
= pud_lock(vma
->vm_mm
, pud
);
1995 if (likely(pud_trans_huge(*pud
) || pud_devmap(*pud
)))
2001 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2002 int zap_huge_pud(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
2003 pud_t
*pud
, unsigned long addr
)
2007 ptl
= __pud_trans_huge_lock(pud
, vma
);
2011 pudp_huge_get_and_clear_full(vma
, addr
, pud
, tlb
->fullmm
);
2012 tlb_remove_pud_tlb_entry(tlb
, pud
, addr
);
2013 if (vma_is_special_huge(vma
)) {
2015 /* No zero page support yet */
2017 /* No support for anonymous PUD pages yet */
2023 static void __split_huge_pud_locked(struct vm_area_struct
*vma
, pud_t
*pud
,
2024 unsigned long haddr
)
2026 VM_BUG_ON(haddr
& ~HPAGE_PUD_MASK
);
2027 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
2028 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PUD_SIZE
, vma
);
2029 VM_BUG_ON(!pud_trans_huge(*pud
) && !pud_devmap(*pud
));
2031 count_vm_event(THP_SPLIT_PUD
);
2033 pudp_huge_clear_flush(vma
, haddr
, pud
);
2036 void __split_huge_pud(struct vm_area_struct
*vma
, pud_t
*pud
,
2037 unsigned long address
)
2040 struct mmu_notifier_range range
;
2042 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
2043 address
& HPAGE_PUD_MASK
,
2044 (address
& HPAGE_PUD_MASK
) + HPAGE_PUD_SIZE
);
2045 mmu_notifier_invalidate_range_start(&range
);
2046 ptl
= pud_lock(vma
->vm_mm
, pud
);
2047 if (unlikely(!pud_trans_huge(*pud
) && !pud_devmap(*pud
)))
2049 __split_huge_pud_locked(vma
, pud
, range
.start
);
2053 mmu_notifier_invalidate_range_end(&range
);
2055 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2057 static void __split_huge_zero_page_pmd(struct vm_area_struct
*vma
,
2058 unsigned long haddr
, pmd_t
*pmd
)
2060 struct mm_struct
*mm
= vma
->vm_mm
;
2062 pmd_t _pmd
, old_pmd
;
2068 * Leave pmd empty until pte is filled note that it is fine to delay
2069 * notification until mmu_notifier_invalidate_range_end() as we are
2070 * replacing a zero pmd write protected page with a zero pte write
2073 * See Documentation/mm/mmu_notifier.rst
2075 old_pmd
= pmdp_huge_clear_flush(vma
, haddr
, pmd
);
2077 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2078 pmd_populate(mm
, &_pmd
, pgtable
);
2080 pte
= pte_offset_map(&_pmd
, haddr
);
2082 for (i
= 0, addr
= haddr
; i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
) {
2085 entry
= pfn_pte(my_zero_pfn(addr
), vma
->vm_page_prot
);
2086 entry
= pte_mkspecial(entry
);
2087 if (pmd_uffd_wp(old_pmd
))
2088 entry
= pte_mkuffd_wp(entry
);
2089 VM_BUG_ON(!pte_none(ptep_get(pte
)));
2090 set_pte_at(mm
, addr
, pte
, entry
);
2094 smp_wmb(); /* make pte visible before pmd */
2095 pmd_populate(mm
, pmd
, pgtable
);
2098 static void __split_huge_pmd_locked(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2099 unsigned long haddr
, bool freeze
)
2101 struct mm_struct
*mm
= vma
->vm_mm
;
2104 pmd_t old_pmd
, _pmd
;
2105 bool young
, write
, soft_dirty
, pmd_migration
= false, uffd_wp
= false;
2106 bool anon_exclusive
= false, dirty
= false;
2111 VM_BUG_ON(haddr
& ~HPAGE_PMD_MASK
);
2112 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
2113 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PMD_SIZE
, vma
);
2114 VM_BUG_ON(!is_pmd_migration_entry(*pmd
) && !pmd_trans_huge(*pmd
)
2115 && !pmd_devmap(*pmd
));
2117 count_vm_event(THP_SPLIT_PMD
);
2119 if (!vma_is_anonymous(vma
)) {
2120 old_pmd
= pmdp_huge_clear_flush(vma
, haddr
, pmd
);
2122 * We are going to unmap this huge page. So
2123 * just go ahead and zap it
2125 if (arch_needs_pgtable_deposit())
2126 zap_deposited_table(mm
, pmd
);
2127 if (vma_is_special_huge(vma
))
2129 if (unlikely(is_pmd_migration_entry(old_pmd
))) {
2132 entry
= pmd_to_swp_entry(old_pmd
);
2133 page
= pfn_swap_entry_to_page(entry
);
2135 page
= pmd_page(old_pmd
);
2136 if (!PageDirty(page
) && pmd_dirty(old_pmd
))
2137 set_page_dirty(page
);
2138 if (!PageReferenced(page
) && pmd_young(old_pmd
))
2139 SetPageReferenced(page
);
2140 page_remove_rmap(page
, vma
, true);
2143 add_mm_counter(mm
, mm_counter_file(page
), -HPAGE_PMD_NR
);
2147 if (is_huge_zero_pmd(*pmd
)) {
2149 * FIXME: Do we want to invalidate secondary mmu by calling
2150 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2151 * inside __split_huge_pmd() ?
2153 * We are going from a zero huge page write protected to zero
2154 * small page also write protected so it does not seems useful
2155 * to invalidate secondary mmu at this time.
2157 return __split_huge_zero_page_pmd(vma
, haddr
, pmd
);
2161 * Up to this point the pmd is present and huge and userland has the
2162 * whole access to the hugepage during the split (which happens in
2163 * place). If we overwrite the pmd with the not-huge version pointing
2164 * to the pte here (which of course we could if all CPUs were bug
2165 * free), userland could trigger a small page size TLB miss on the
2166 * small sized TLB while the hugepage TLB entry is still established in
2167 * the huge TLB. Some CPU doesn't like that.
2168 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2169 * 383 on page 105. Intel should be safe but is also warns that it's
2170 * only safe if the permission and cache attributes of the two entries
2171 * loaded in the two TLB is identical (which should be the case here).
2172 * But it is generally safer to never allow small and huge TLB entries
2173 * for the same virtual address to be loaded simultaneously. So instead
2174 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2175 * current pmd notpresent (atomically because here the pmd_trans_huge
2176 * must remain set at all times on the pmd until the split is complete
2177 * for this pmd), then we flush the SMP TLB and finally we write the
2178 * non-huge version of the pmd entry with pmd_populate.
2180 old_pmd
= pmdp_invalidate(vma
, haddr
, pmd
);
2182 pmd_migration
= is_pmd_migration_entry(old_pmd
);
2183 if (unlikely(pmd_migration
)) {
2186 entry
= pmd_to_swp_entry(old_pmd
);
2187 page
= pfn_swap_entry_to_page(entry
);
2188 write
= is_writable_migration_entry(entry
);
2190 anon_exclusive
= is_readable_exclusive_migration_entry(entry
);
2191 young
= is_migration_entry_young(entry
);
2192 dirty
= is_migration_entry_dirty(entry
);
2193 soft_dirty
= pmd_swp_soft_dirty(old_pmd
);
2194 uffd_wp
= pmd_swp_uffd_wp(old_pmd
);
2196 page
= pmd_page(old_pmd
);
2197 if (pmd_dirty(old_pmd
)) {
2201 write
= pmd_write(old_pmd
);
2202 young
= pmd_young(old_pmd
);
2203 soft_dirty
= pmd_soft_dirty(old_pmd
);
2204 uffd_wp
= pmd_uffd_wp(old_pmd
);
2206 VM_BUG_ON_PAGE(!page_count(page
), page
);
2209 * Without "freeze", we'll simply split the PMD, propagating the
2210 * PageAnonExclusive() flag for each PTE by setting it for
2211 * each subpage -- no need to (temporarily) clear.
2213 * With "freeze" we want to replace mapped pages by
2214 * migration entries right away. This is only possible if we
2215 * managed to clear PageAnonExclusive() -- see
2216 * set_pmd_migration_entry().
2218 * In case we cannot clear PageAnonExclusive(), split the PMD
2219 * only and let try_to_migrate_one() fail later.
2221 * See page_try_share_anon_rmap(): invalidate PMD first.
2223 anon_exclusive
= PageAnon(page
) && PageAnonExclusive(page
);
2224 if (freeze
&& anon_exclusive
&& page_try_share_anon_rmap(page
))
2227 page_ref_add(page
, HPAGE_PMD_NR
- 1);
2231 * Withdraw the table only after we mark the pmd entry invalid.
2232 * This's critical for some architectures (Power).
2234 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2235 pmd_populate(mm
, &_pmd
, pgtable
);
2237 pte
= pte_offset_map(&_pmd
, haddr
);
2239 for (i
= 0, addr
= haddr
; i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
) {
2242 * Note that NUMA hinting access restrictions are not
2243 * transferred to avoid any possibility of altering
2244 * permissions across VMAs.
2246 if (freeze
|| pmd_migration
) {
2247 swp_entry_t swp_entry
;
2249 swp_entry
= make_writable_migration_entry(
2250 page_to_pfn(page
+ i
));
2251 else if (anon_exclusive
)
2252 swp_entry
= make_readable_exclusive_migration_entry(
2253 page_to_pfn(page
+ i
));
2255 swp_entry
= make_readable_migration_entry(
2256 page_to_pfn(page
+ i
));
2258 swp_entry
= make_migration_entry_young(swp_entry
);
2260 swp_entry
= make_migration_entry_dirty(swp_entry
);
2261 entry
= swp_entry_to_pte(swp_entry
);
2263 entry
= pte_swp_mksoft_dirty(entry
);
2265 entry
= pte_swp_mkuffd_wp(entry
);
2267 entry
= mk_pte(page
+ i
, READ_ONCE(vma
->vm_page_prot
));
2269 entry
= pte_mkwrite(entry
, vma
);
2271 SetPageAnonExclusive(page
+ i
);
2273 entry
= pte_mkold(entry
);
2274 /* NOTE: this may set soft-dirty too on some archs */
2276 entry
= pte_mkdirty(entry
);
2278 entry
= pte_mksoft_dirty(entry
);
2280 entry
= pte_mkuffd_wp(entry
);
2281 page_add_anon_rmap(page
+ i
, vma
, addr
, RMAP_NONE
);
2283 VM_BUG_ON(!pte_none(ptep_get(pte
)));
2284 set_pte_at(mm
, addr
, pte
, entry
);
2290 page_remove_rmap(page
, vma
, true);
2294 smp_wmb(); /* make pte visible before pmd */
2295 pmd_populate(mm
, pmd
, pgtable
);
2298 void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2299 unsigned long address
, bool freeze
, struct folio
*folio
)
2302 struct mmu_notifier_range range
;
2304 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
2305 address
& HPAGE_PMD_MASK
,
2306 (address
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
);
2307 mmu_notifier_invalidate_range_start(&range
);
2308 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
2311 * If caller asks to setup a migration entry, we need a folio to check
2312 * pmd against. Otherwise we can end up replacing wrong folio.
2314 VM_BUG_ON(freeze
&& !folio
);
2315 VM_WARN_ON_ONCE(folio
&& !folio_test_locked(folio
));
2317 if (pmd_trans_huge(*pmd
) || pmd_devmap(*pmd
) ||
2318 is_pmd_migration_entry(*pmd
)) {
2320 * It's safe to call pmd_page when folio is set because it's
2321 * guaranteed that pmd is present.
2323 if (folio
&& folio
!= page_folio(pmd_page(*pmd
)))
2325 __split_huge_pmd_locked(vma
, pmd
, range
.start
, freeze
);
2330 mmu_notifier_invalidate_range_end(&range
);
2333 void split_huge_pmd_address(struct vm_area_struct
*vma
, unsigned long address
,
2334 bool freeze
, struct folio
*folio
)
2336 pmd_t
*pmd
= mm_find_pmd(vma
->vm_mm
, address
);
2341 __split_huge_pmd(vma
, pmd
, address
, freeze
, folio
);
2344 static inline void split_huge_pmd_if_needed(struct vm_area_struct
*vma
, unsigned long address
)
2347 * If the new address isn't hpage aligned and it could previously
2348 * contain an hugepage: check if we need to split an huge pmd.
2350 if (!IS_ALIGNED(address
, HPAGE_PMD_SIZE
) &&
2351 range_in_vma(vma
, ALIGN_DOWN(address
, HPAGE_PMD_SIZE
),
2352 ALIGN(address
, HPAGE_PMD_SIZE
)))
2353 split_huge_pmd_address(vma
, address
, false, NULL
);
2356 void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
2357 unsigned long start
,
2361 /* Check if we need to split start first. */
2362 split_huge_pmd_if_needed(vma
, start
);
2364 /* Check if we need to split end next. */
2365 split_huge_pmd_if_needed(vma
, end
);
2368 * If we're also updating the next vma vm_start,
2369 * check if we need to split it.
2371 if (adjust_next
> 0) {
2372 struct vm_area_struct
*next
= find_vma(vma
->vm_mm
, vma
->vm_end
);
2373 unsigned long nstart
= next
->vm_start
;
2374 nstart
+= adjust_next
;
2375 split_huge_pmd_if_needed(next
, nstart
);
2379 static void unmap_folio(struct folio
*folio
)
2381 enum ttu_flags ttu_flags
= TTU_RMAP_LOCKED
| TTU_SPLIT_HUGE_PMD
|
2384 VM_BUG_ON_FOLIO(!folio_test_large(folio
), folio
);
2387 * Anon pages need migration entries to preserve them, but file
2388 * pages can simply be left unmapped, then faulted back on demand.
2389 * If that is ever changed (perhaps for mlock), update remap_page().
2391 if (folio_test_anon(folio
))
2392 try_to_migrate(folio
, ttu_flags
);
2394 try_to_unmap(folio
, ttu_flags
| TTU_IGNORE_MLOCK
);
2397 static void remap_page(struct folio
*folio
, unsigned long nr
)
2401 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
2402 if (!folio_test_anon(folio
))
2405 remove_migration_ptes(folio
, folio
, true);
2406 i
+= folio_nr_pages(folio
);
2409 folio
= folio_next(folio
);
2413 static void lru_add_page_tail(struct page
*head
, struct page
*tail
,
2414 struct lruvec
*lruvec
, struct list_head
*list
)
2416 VM_BUG_ON_PAGE(!PageHead(head
), head
);
2417 VM_BUG_ON_PAGE(PageCompound(tail
), head
);
2418 VM_BUG_ON_PAGE(PageLRU(tail
), head
);
2419 lockdep_assert_held(&lruvec
->lru_lock
);
2422 /* page reclaim is reclaiming a huge page */
2423 VM_WARN_ON(PageLRU(head
));
2425 list_add_tail(&tail
->lru
, list
);
2427 /* head is still on lru (and we have it frozen) */
2428 VM_WARN_ON(!PageLRU(head
));
2429 if (PageUnevictable(tail
))
2430 tail
->mlock_count
= 0;
2432 list_add_tail(&tail
->lru
, &head
->lru
);
2437 static void __split_huge_page_tail(struct folio
*folio
, int tail
,
2438 struct lruvec
*lruvec
, struct list_head
*list
)
2440 struct page
*head
= &folio
->page
;
2441 struct page
*page_tail
= head
+ tail
;
2443 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2444 * Don't pass it around before clear_compound_head().
2446 struct folio
*new_folio
= (struct folio
*)page_tail
;
2448 VM_BUG_ON_PAGE(atomic_read(&page_tail
->_mapcount
) != -1, page_tail
);
2451 * Clone page flags before unfreezing refcount.
2453 * After successful get_page_unless_zero() might follow flags change,
2454 * for example lock_page() which set PG_waiters.
2456 * Note that for mapped sub-pages of an anonymous THP,
2457 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2458 * the migration entry instead from where remap_page() will restore it.
2459 * We can still have PG_anon_exclusive set on effectively unmapped and
2460 * unreferenced sub-pages of an anonymous THP: we can simply drop
2461 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2463 page_tail
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
2464 page_tail
->flags
|= (head
->flags
&
2465 ((1L << PG_referenced
) |
2466 (1L << PG_swapbacked
) |
2467 (1L << PG_swapcache
) |
2468 (1L << PG_mlocked
) |
2469 (1L << PG_uptodate
) |
2471 (1L << PG_workingset
) |
2473 (1L << PG_unevictable
) |
2474 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2479 LRU_GEN_MASK
| LRU_REFS_MASK
));
2481 /* ->mapping in first and second tail page is replaced by other uses */
2482 VM_BUG_ON_PAGE(tail
> 2 && page_tail
->mapping
!= TAIL_MAPPING
,
2484 page_tail
->mapping
= head
->mapping
;
2485 page_tail
->index
= head
->index
+ tail
;
2488 * page->private should not be set in tail pages. Fix up and warn once
2489 * if private is unexpectedly set.
2491 if (unlikely(page_tail
->private)) {
2492 VM_WARN_ON_ONCE_PAGE(true, page_tail
);
2493 page_tail
->private = 0;
2495 if (folio_test_swapcache(folio
))
2496 new_folio
->swap
.val
= folio
->swap
.val
+ tail
;
2498 /* Page flags must be visible before we make the page non-compound. */
2502 * Clear PageTail before unfreezing page refcount.
2504 * After successful get_page_unless_zero() might follow put_page()
2505 * which needs correct compound_head().
2507 clear_compound_head(page_tail
);
2509 /* Finally unfreeze refcount. Additional reference from page cache. */
2510 page_ref_unfreeze(page_tail
, 1 + (!PageAnon(head
) ||
2511 PageSwapCache(head
)));
2513 if (page_is_young(head
))
2514 set_page_young(page_tail
);
2515 if (page_is_idle(head
))
2516 set_page_idle(page_tail
);
2518 folio_xchg_last_cpupid(new_folio
, folio_last_cpupid(folio
));
2521 * always add to the tail because some iterators expect new
2522 * pages to show after the currently processed elements - e.g.
2525 lru_add_page_tail(head
, page_tail
, lruvec
, list
);
2528 static void __split_huge_page(struct page
*page
, struct list_head
*list
,
2531 struct folio
*folio
= page_folio(page
);
2532 struct page
*head
= &folio
->page
;
2533 struct lruvec
*lruvec
;
2534 struct address_space
*swap_cache
= NULL
;
2535 unsigned long offset
= 0;
2536 unsigned int nr
= thp_nr_pages(head
);
2537 int i
, nr_dropped
= 0;
2539 /* complete memcg works before add pages to LRU */
2540 split_page_memcg(head
, nr
);
2542 if (folio_test_anon(folio
) && folio_test_swapcache(folio
)) {
2543 offset
= swp_offset(folio
->swap
);
2544 swap_cache
= swap_address_space(folio
->swap
);
2545 xa_lock(&swap_cache
->i_pages
);
2548 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2549 lruvec
= folio_lruvec_lock(folio
);
2551 ClearPageHasHWPoisoned(head
);
2553 for (i
= nr
- 1; i
>= 1; i
--) {
2554 __split_huge_page_tail(folio
, i
, lruvec
, list
);
2555 /* Some pages can be beyond EOF: drop them from page cache */
2556 if (head
[i
].index
>= end
) {
2557 struct folio
*tail
= page_folio(head
+ i
);
2559 if (shmem_mapping(head
->mapping
))
2561 else if (folio_test_clear_dirty(tail
))
2562 folio_account_cleaned(tail
,
2563 inode_to_wb(folio
->mapping
->host
));
2564 __filemap_remove_folio(tail
, NULL
);
2566 } else if (!PageAnon(page
)) {
2567 __xa_store(&head
->mapping
->i_pages
, head
[i
].index
,
2569 } else if (swap_cache
) {
2570 __xa_store(&swap_cache
->i_pages
, offset
+ i
,
2575 ClearPageCompound(head
);
2576 unlock_page_lruvec(lruvec
);
2577 /* Caller disabled irqs, so they are still disabled here */
2579 split_page_owner(head
, nr
);
2581 /* See comment in __split_huge_page_tail() */
2582 if (PageAnon(head
)) {
2583 /* Additional pin to swap cache */
2584 if (PageSwapCache(head
)) {
2585 page_ref_add(head
, 2);
2586 xa_unlock(&swap_cache
->i_pages
);
2591 /* Additional pin to page cache */
2592 page_ref_add(head
, 2);
2593 xa_unlock(&head
->mapping
->i_pages
);
2598 shmem_uncharge(head
->mapping
->host
, nr_dropped
);
2599 remap_page(folio
, nr
);
2601 if (folio_test_swapcache(folio
))
2602 split_swap_cluster(folio
->swap
);
2604 for (i
= 0; i
< nr
; i
++) {
2605 struct page
*subpage
= head
+ i
;
2606 if (subpage
== page
)
2608 unlock_page(subpage
);
2611 * Subpages may be freed if there wasn't any mapping
2612 * like if add_to_swap() is running on a lru page that
2613 * had its mapping zapped. And freeing these pages
2614 * requires taking the lru_lock so we do the put_page
2615 * of the tail pages after the split is complete.
2617 free_page_and_swap_cache(subpage
);
2621 /* Racy check whether the huge page can be split */
2622 bool can_split_folio(struct folio
*folio
, int *pextra_pins
)
2626 /* Additional pins from page cache */
2627 if (folio_test_anon(folio
))
2628 extra_pins
= folio_test_swapcache(folio
) ?
2629 folio_nr_pages(folio
) : 0;
2631 extra_pins
= folio_nr_pages(folio
);
2633 *pextra_pins
= extra_pins
;
2634 return folio_mapcount(folio
) == folio_ref_count(folio
) - extra_pins
- 1;
2638 * This function splits huge page into normal pages. @page can point to any
2639 * subpage of huge page to split. Split doesn't change the position of @page.
2641 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2642 * The huge page must be locked.
2644 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2646 * Both head page and tail pages will inherit mapping, flags, and so on from
2649 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2650 * they are not mapped.
2652 * Returns 0 if the hugepage is split successfully.
2653 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2656 int split_huge_page_to_list(struct page
*page
, struct list_head
*list
)
2658 struct folio
*folio
= page_folio(page
);
2659 struct deferred_split
*ds_queue
= get_deferred_split_queue(folio
);
2660 XA_STATE(xas
, &folio
->mapping
->i_pages
, folio
->index
);
2661 struct anon_vma
*anon_vma
= NULL
;
2662 struct address_space
*mapping
= NULL
;
2663 int extra_pins
, ret
;
2667 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
2668 VM_BUG_ON_FOLIO(!folio_test_large(folio
), folio
);
2670 is_hzp
= is_huge_zero_page(&folio
->page
);
2672 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
2676 if (folio_test_writeback(folio
))
2679 if (folio_test_anon(folio
)) {
2681 * The caller does not necessarily hold an mmap_lock that would
2682 * prevent the anon_vma disappearing so we first we take a
2683 * reference to it and then lock the anon_vma for write. This
2684 * is similar to folio_lock_anon_vma_read except the write lock
2685 * is taken to serialise against parallel split or collapse
2688 anon_vma
= folio_get_anon_vma(folio
);
2695 anon_vma_lock_write(anon_vma
);
2699 mapping
= folio
->mapping
;
2707 gfp
= current_gfp_context(mapping_gfp_mask(mapping
) &
2710 if (!filemap_release_folio(folio
, gfp
)) {
2715 xas_split_alloc(&xas
, folio
, folio_order(folio
), gfp
);
2716 if (xas_error(&xas
)) {
2717 ret
= xas_error(&xas
);
2722 i_mmap_lock_read(mapping
);
2725 *__split_huge_page() may need to trim off pages beyond EOF:
2726 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2727 * which cannot be nested inside the page tree lock. So note
2728 * end now: i_size itself may be changed at any moment, but
2729 * folio lock is good enough to serialize the trimming.
2731 end
= DIV_ROUND_UP(i_size_read(mapping
->host
), PAGE_SIZE
);
2732 if (shmem_mapping(mapping
))
2733 end
= shmem_fallocend(mapping
->host
, end
);
2737 * Racy check if we can split the page, before unmap_folio() will
2740 if (!can_split_folio(folio
, &extra_pins
)) {
2747 /* block interrupt reentry in xa_lock and spinlock */
2748 local_irq_disable();
2751 * Check if the folio is present in page cache.
2752 * We assume all tail are present too, if folio is there.
2756 if (xas_load(&xas
) != folio
)
2760 /* Prevent deferred_split_scan() touching ->_refcount */
2761 spin_lock(&ds_queue
->split_queue_lock
);
2762 if (folio_ref_freeze(folio
, 1 + extra_pins
)) {
2763 if (!list_empty(&folio
->_deferred_list
)) {
2764 ds_queue
->split_queue_len
--;
2765 list_del(&folio
->_deferred_list
);
2767 spin_unlock(&ds_queue
->split_queue_lock
);
2769 int nr
= folio_nr_pages(folio
);
2771 xas_split(&xas
, folio
, folio_order(folio
));
2772 if (folio_test_swapbacked(folio
)) {
2773 __lruvec_stat_mod_folio(folio
, NR_SHMEM_THPS
,
2776 __lruvec_stat_mod_folio(folio
, NR_FILE_THPS
,
2778 filemap_nr_thps_dec(mapping
);
2782 __split_huge_page(page
, list
, end
);
2785 spin_unlock(&ds_queue
->split_queue_lock
);
2790 remap_page(folio
, folio_nr_pages(folio
));
2796 anon_vma_unlock_write(anon_vma
);
2797 put_anon_vma(anon_vma
);
2800 i_mmap_unlock_read(mapping
);
2803 count_vm_event(!ret
? THP_SPLIT_PAGE
: THP_SPLIT_PAGE_FAILED
);
2807 void folio_undo_large_rmappable(struct folio
*folio
)
2809 struct deferred_split
*ds_queue
;
2810 unsigned long flags
;
2813 * At this point, there is no one trying to add the folio to
2814 * deferred_list. If folio is not in deferred_list, it's safe
2815 * to check without acquiring the split_queue_lock.
2817 if (data_race(list_empty(&folio
->_deferred_list
)))
2820 ds_queue
= get_deferred_split_queue(folio
);
2821 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2822 if (!list_empty(&folio
->_deferred_list
)) {
2823 ds_queue
->split_queue_len
--;
2824 list_del(&folio
->_deferred_list
);
2826 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2829 void deferred_split_folio(struct folio
*folio
)
2831 struct deferred_split
*ds_queue
= get_deferred_split_queue(folio
);
2833 struct mem_cgroup
*memcg
= folio_memcg(folio
);
2835 unsigned long flags
;
2837 VM_BUG_ON_FOLIO(folio_order(folio
) < 2, folio
);
2840 * The try_to_unmap() in page reclaim path might reach here too,
2841 * this may cause a race condition to corrupt deferred split queue.
2842 * And, if page reclaim is already handling the same folio, it is
2843 * unnecessary to handle it again in shrinker.
2845 * Check the swapcache flag to determine if the folio is being
2846 * handled by page reclaim since THP swap would add the folio into
2847 * swap cache before calling try_to_unmap().
2849 if (folio_test_swapcache(folio
))
2852 if (!list_empty(&folio
->_deferred_list
))
2855 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2856 if (list_empty(&folio
->_deferred_list
)) {
2857 count_vm_event(THP_DEFERRED_SPLIT_PAGE
);
2858 list_add_tail(&folio
->_deferred_list
, &ds_queue
->split_queue
);
2859 ds_queue
->split_queue_len
++;
2862 set_shrinker_bit(memcg
, folio_nid(folio
),
2863 deferred_split_shrinker
->id
);
2866 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2869 static unsigned long deferred_split_count(struct shrinker
*shrink
,
2870 struct shrink_control
*sc
)
2872 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2873 struct deferred_split
*ds_queue
= &pgdata
->deferred_split_queue
;
2877 ds_queue
= &sc
->memcg
->deferred_split_queue
;
2879 return READ_ONCE(ds_queue
->split_queue_len
);
2882 static unsigned long deferred_split_scan(struct shrinker
*shrink
,
2883 struct shrink_control
*sc
)
2885 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2886 struct deferred_split
*ds_queue
= &pgdata
->deferred_split_queue
;
2887 unsigned long flags
;
2889 struct folio
*folio
, *next
;
2894 ds_queue
= &sc
->memcg
->deferred_split_queue
;
2897 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2898 /* Take pin on all head pages to avoid freeing them under us */
2899 list_for_each_entry_safe(folio
, next
, &ds_queue
->split_queue
,
2901 if (folio_try_get(folio
)) {
2902 list_move(&folio
->_deferred_list
, &list
);
2904 /* We lost race with folio_put() */
2905 list_del_init(&folio
->_deferred_list
);
2906 ds_queue
->split_queue_len
--;
2908 if (!--sc
->nr_to_scan
)
2911 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2913 list_for_each_entry_safe(folio
, next
, &list
, _deferred_list
) {
2914 if (!folio_trylock(folio
))
2916 /* split_huge_page() removes page from list on success */
2917 if (!split_folio(folio
))
2919 folio_unlock(folio
);
2924 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2925 list_splice_tail(&list
, &ds_queue
->split_queue
);
2926 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2929 * Stop shrinker if we didn't split any page, but the queue is empty.
2930 * This can happen if pages were freed under us.
2932 if (!split
&& list_empty(&ds_queue
->split_queue
))
2937 #ifdef CONFIG_DEBUG_FS
2938 static void split_huge_pages_all(void)
2942 struct folio
*folio
;
2943 unsigned long pfn
, max_zone_pfn
;
2944 unsigned long total
= 0, split
= 0;
2946 pr_debug("Split all THPs\n");
2947 for_each_zone(zone
) {
2948 if (!managed_zone(zone
))
2950 max_zone_pfn
= zone_end_pfn(zone
);
2951 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++) {
2954 page
= pfn_to_online_page(pfn
);
2955 if (!page
|| PageTail(page
))
2957 folio
= page_folio(page
);
2958 if (!folio_try_get(folio
))
2961 if (unlikely(page_folio(page
) != folio
))
2964 if (zone
!= folio_zone(folio
))
2967 if (!folio_test_large(folio
)
2968 || folio_test_hugetlb(folio
)
2969 || !folio_test_lru(folio
))
2974 nr_pages
= folio_nr_pages(folio
);
2975 if (!split_folio(folio
))
2977 pfn
+= nr_pages
- 1;
2978 folio_unlock(folio
);
2985 pr_debug("%lu of %lu THP split\n", split
, total
);
2988 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct
*vma
)
2990 return vma_is_special_huge(vma
) || (vma
->vm_flags
& VM_IO
) ||
2991 is_vm_hugetlb_page(vma
);
2994 static int split_huge_pages_pid(int pid
, unsigned long vaddr_start
,
2995 unsigned long vaddr_end
)
2998 struct task_struct
*task
;
2999 struct mm_struct
*mm
;
3000 unsigned long total
= 0, split
= 0;
3003 vaddr_start
&= PAGE_MASK
;
3004 vaddr_end
&= PAGE_MASK
;
3006 /* Find the task_struct from pid */
3008 task
= find_task_by_vpid(pid
);
3014 get_task_struct(task
);
3017 /* Find the mm_struct */
3018 mm
= get_task_mm(task
);
3019 put_task_struct(task
);
3026 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3027 pid
, vaddr_start
, vaddr_end
);
3031 * always increase addr by PAGE_SIZE, since we could have a PTE page
3032 * table filled with PTE-mapped THPs, each of which is distinct.
3034 for (addr
= vaddr_start
; addr
< vaddr_end
; addr
+= PAGE_SIZE
) {
3035 struct vm_area_struct
*vma
= vma_lookup(mm
, addr
);
3037 struct folio
*folio
;
3042 /* skip special VMA and hugetlb VMA */
3043 if (vma_not_suitable_for_thp_split(vma
)) {
3048 /* FOLL_DUMP to ignore special (like zero) pages */
3049 page
= follow_page(vma
, addr
, FOLL_GET
| FOLL_DUMP
);
3051 if (IS_ERR_OR_NULL(page
))
3054 folio
= page_folio(page
);
3055 if (!is_transparent_hugepage(folio
))
3059 if (!can_split_folio(folio
, NULL
))
3062 if (!folio_trylock(folio
))
3065 if (!split_folio(folio
))
3068 folio_unlock(folio
);
3073 mmap_read_unlock(mm
);
3076 pr_debug("%lu of %lu THP split\n", split
, total
);
3082 static int split_huge_pages_in_file(const char *file_path
, pgoff_t off_start
,
3085 struct filename
*file
;
3086 struct file
*candidate
;
3087 struct address_space
*mapping
;
3091 unsigned long total
= 0, split
= 0;
3093 file
= getname_kernel(file_path
);
3097 candidate
= file_open_name(file
, O_RDONLY
, 0);
3098 if (IS_ERR(candidate
))
3101 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3102 file_path
, off_start
, off_end
);
3104 mapping
= candidate
->f_mapping
;
3106 for (index
= off_start
; index
< off_end
; index
+= nr_pages
) {
3107 struct folio
*folio
= filemap_get_folio(mapping
, index
);
3113 if (!folio_test_large(folio
))
3117 nr_pages
= folio_nr_pages(folio
);
3119 if (!folio_trylock(folio
))
3122 if (!split_folio(folio
))
3125 folio_unlock(folio
);
3131 filp_close(candidate
, NULL
);
3134 pr_debug("%lu of %lu file-backed THP split\n", split
, total
);
3140 #define MAX_INPUT_BUF_SZ 255
3142 static ssize_t
split_huge_pages_write(struct file
*file
, const char __user
*buf
,
3143 size_t count
, loff_t
*ppops
)
3145 static DEFINE_MUTEX(split_debug_mutex
);
3147 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3148 char input_buf
[MAX_INPUT_BUF_SZ
];
3150 unsigned long vaddr_start
, vaddr_end
;
3152 ret
= mutex_lock_interruptible(&split_debug_mutex
);
3158 memset(input_buf
, 0, MAX_INPUT_BUF_SZ
);
3159 if (copy_from_user(input_buf
, buf
, min_t(size_t, count
, MAX_INPUT_BUF_SZ
)))
3162 input_buf
[MAX_INPUT_BUF_SZ
- 1] = '\0';
3164 if (input_buf
[0] == '/') {
3166 char *buf
= input_buf
;
3167 char file_path
[MAX_INPUT_BUF_SZ
];
3168 pgoff_t off_start
= 0, off_end
= 0;
3169 size_t input_len
= strlen(input_buf
);
3171 tok
= strsep(&buf
, ",");
3173 strcpy(file_path
, tok
);
3179 ret
= sscanf(buf
, "0x%lx,0x%lx", &off_start
, &off_end
);
3184 ret
= split_huge_pages_in_file(file_path
, off_start
, off_end
);
3191 ret
= sscanf(input_buf
, "%d,0x%lx,0x%lx", &pid
, &vaddr_start
, &vaddr_end
);
3192 if (ret
== 1 && pid
== 1) {
3193 split_huge_pages_all();
3194 ret
= strlen(input_buf
);
3196 } else if (ret
!= 3) {
3201 ret
= split_huge_pages_pid(pid
, vaddr_start
, vaddr_end
);
3203 ret
= strlen(input_buf
);
3205 mutex_unlock(&split_debug_mutex
);
3210 static const struct file_operations split_huge_pages_fops
= {
3211 .owner
= THIS_MODULE
,
3212 .write
= split_huge_pages_write
,
3213 .llseek
= no_llseek
,
3216 static int __init
split_huge_pages_debugfs(void)
3218 debugfs_create_file("split_huge_pages", 0200, NULL
, NULL
,
3219 &split_huge_pages_fops
);
3222 late_initcall(split_huge_pages_debugfs
);
3225 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3226 int set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
3229 struct vm_area_struct
*vma
= pvmw
->vma
;
3230 struct mm_struct
*mm
= vma
->vm_mm
;
3231 unsigned long address
= pvmw
->address
;
3232 bool anon_exclusive
;
3237 if (!(pvmw
->pmd
&& !pvmw
->pte
))
3240 flush_cache_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
3241 pmdval
= pmdp_invalidate(vma
, address
, pvmw
->pmd
);
3243 /* See page_try_share_anon_rmap(): invalidate PMD first. */
3244 anon_exclusive
= PageAnon(page
) && PageAnonExclusive(page
);
3245 if (anon_exclusive
&& page_try_share_anon_rmap(page
)) {
3246 set_pmd_at(mm
, address
, pvmw
->pmd
, pmdval
);
3250 if (pmd_dirty(pmdval
))
3251 set_page_dirty(page
);
3252 if (pmd_write(pmdval
))
3253 entry
= make_writable_migration_entry(page_to_pfn(page
));
3254 else if (anon_exclusive
)
3255 entry
= make_readable_exclusive_migration_entry(page_to_pfn(page
));
3257 entry
= make_readable_migration_entry(page_to_pfn(page
));
3258 if (pmd_young(pmdval
))
3259 entry
= make_migration_entry_young(entry
);
3260 if (pmd_dirty(pmdval
))
3261 entry
= make_migration_entry_dirty(entry
);
3262 pmdswp
= swp_entry_to_pmd(entry
);
3263 if (pmd_soft_dirty(pmdval
))
3264 pmdswp
= pmd_swp_mksoft_dirty(pmdswp
);
3265 if (pmd_uffd_wp(pmdval
))
3266 pmdswp
= pmd_swp_mkuffd_wp(pmdswp
);
3267 set_pmd_at(mm
, address
, pvmw
->pmd
, pmdswp
);
3268 page_remove_rmap(page
, vma
, true);
3270 trace_set_migration_pmd(address
, pmd_val(pmdswp
));
3275 void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
, struct page
*new)
3277 struct vm_area_struct
*vma
= pvmw
->vma
;
3278 struct mm_struct
*mm
= vma
->vm_mm
;
3279 unsigned long address
= pvmw
->address
;
3280 unsigned long haddr
= address
& HPAGE_PMD_MASK
;
3284 if (!(pvmw
->pmd
&& !pvmw
->pte
))
3287 entry
= pmd_to_swp_entry(*pvmw
->pmd
);
3289 pmde
= mk_huge_pmd(new, READ_ONCE(vma
->vm_page_prot
));
3290 if (pmd_swp_soft_dirty(*pvmw
->pmd
))
3291 pmde
= pmd_mksoft_dirty(pmde
);
3292 if (is_writable_migration_entry(entry
))
3293 pmde
= pmd_mkwrite(pmde
, vma
);
3294 if (pmd_swp_uffd_wp(*pvmw
->pmd
))
3295 pmde
= pmd_mkuffd_wp(pmde
);
3296 if (!is_migration_entry_young(entry
))
3297 pmde
= pmd_mkold(pmde
);
3298 /* NOTE: this may contain setting soft-dirty on some archs */
3299 if (PageDirty(new) && is_migration_entry_dirty(entry
))
3300 pmde
= pmd_mkdirty(pmde
);
3302 if (PageAnon(new)) {
3303 rmap_t rmap_flags
= RMAP_COMPOUND
;
3305 if (!is_readable_migration_entry(entry
))
3306 rmap_flags
|= RMAP_EXCLUSIVE
;
3308 page_add_anon_rmap(new, vma
, haddr
, rmap_flags
);
3310 page_add_file_rmap(new, vma
, true);
3312 VM_BUG_ON(pmd_write(pmde
) && PageAnon(new) && !PageAnonExclusive(new));
3313 set_pmd_at(mm
, haddr
, pvmw
->pmd
, pmde
);
3315 /* No need to invalidate - it was non-present before */
3316 update_mmu_cache_pmd(vma
, address
, pvmw
->pmd
);
3317 trace_remove_migration_pmd(address
, pmd_val(pmde
));