2 * Copyright (C) 2009 Red Hat, Inc.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/sched.h>
12 #include <linux/sched/coredump.h>
13 #include <linux/sched/numa_balancing.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/rmap.h>
18 #include <linux/swap.h>
19 #include <linux/shrinker.h>
20 #include <linux/mm_inline.h>
21 #include <linux/swapops.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/page_owner.h>
39 #include <asm/pgalloc.h>
43 * By default transparent hugepage support is disabled in order that avoid
44 * to risk increase the memory footprint of applications without a guaranteed
45 * benefit. When transparent hugepage support is enabled, is for all mappings,
46 * and khugepaged scans all mappings.
47 * Defrag is invoked by khugepaged hugepage allocations and by page faults
48 * for all hugepage allocations.
50 unsigned long transparent_hugepage_flags __read_mostly
=
51 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
52 (1<<TRANSPARENT_HUGEPAGE_FLAG
)|
54 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
55 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
)|
57 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
)|
58 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
)|
59 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
61 static struct shrinker deferred_split_shrinker
;
63 static atomic_t huge_zero_refcount
;
64 struct page
*huge_zero_page __read_mostly
;
66 static struct page
*get_huge_zero_page(void)
68 struct page
*zero_page
;
70 if (likely(atomic_inc_not_zero(&huge_zero_refcount
)))
71 return READ_ONCE(huge_zero_page
);
73 zero_page
= alloc_pages((GFP_TRANSHUGE
| __GFP_ZERO
) & ~__GFP_MOVABLE
,
76 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED
);
79 count_vm_event(THP_ZERO_PAGE_ALLOC
);
81 if (cmpxchg(&huge_zero_page
, NULL
, zero_page
)) {
83 __free_pages(zero_page
, compound_order(zero_page
));
87 /* We take additional reference here. It will be put back by shrinker */
88 atomic_set(&huge_zero_refcount
, 2);
90 return READ_ONCE(huge_zero_page
);
93 static void put_huge_zero_page(void)
96 * Counter should never go to zero here. Only shrinker can put
99 BUG_ON(atomic_dec_and_test(&huge_zero_refcount
));
102 struct page
*mm_get_huge_zero_page(struct mm_struct
*mm
)
104 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
105 return READ_ONCE(huge_zero_page
);
107 if (!get_huge_zero_page())
110 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
111 put_huge_zero_page();
113 return READ_ONCE(huge_zero_page
);
116 void mm_put_huge_zero_page(struct mm_struct
*mm
)
118 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
119 put_huge_zero_page();
122 static unsigned long shrink_huge_zero_page_count(struct shrinker
*shrink
,
123 struct shrink_control
*sc
)
125 /* we can free zero page only if last reference remains */
126 return atomic_read(&huge_zero_refcount
) == 1 ? HPAGE_PMD_NR
: 0;
129 static unsigned long shrink_huge_zero_page_scan(struct shrinker
*shrink
,
130 struct shrink_control
*sc
)
132 if (atomic_cmpxchg(&huge_zero_refcount
, 1, 0) == 1) {
133 struct page
*zero_page
= xchg(&huge_zero_page
, NULL
);
134 BUG_ON(zero_page
== NULL
);
135 __free_pages(zero_page
, compound_order(zero_page
));
142 static struct shrinker huge_zero_page_shrinker
= {
143 .count_objects
= shrink_huge_zero_page_count
,
144 .scan_objects
= shrink_huge_zero_page_scan
,
145 .seeks
= DEFAULT_SEEKS
,
149 static ssize_t
enabled_show(struct kobject
*kobj
,
150 struct kobj_attribute
*attr
, char *buf
)
152 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
))
153 return sprintf(buf
, "[always] madvise never\n");
154 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
155 return sprintf(buf
, "always [madvise] never\n");
157 return sprintf(buf
, "always madvise [never]\n");
160 static ssize_t
enabled_store(struct kobject
*kobj
,
161 struct kobj_attribute
*attr
,
162 const char *buf
, size_t count
)
166 if (!memcmp("always", buf
,
167 min(sizeof("always")-1, count
))) {
168 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
169 set_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
170 } else if (!memcmp("madvise", buf
,
171 min(sizeof("madvise")-1, count
))) {
172 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
173 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
174 } else if (!memcmp("never", buf
,
175 min(sizeof("never")-1, count
))) {
176 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
177 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
182 int err
= start_stop_khugepaged();
188 static struct kobj_attribute enabled_attr
=
189 __ATTR(enabled
, 0644, enabled_show
, enabled_store
);
191 ssize_t
single_hugepage_flag_show(struct kobject
*kobj
,
192 struct kobj_attribute
*attr
, char *buf
,
193 enum transparent_hugepage_flag flag
)
195 return sprintf(buf
, "%d\n",
196 !!test_bit(flag
, &transparent_hugepage_flags
));
199 ssize_t
single_hugepage_flag_store(struct kobject
*kobj
,
200 struct kobj_attribute
*attr
,
201 const char *buf
, size_t count
,
202 enum transparent_hugepage_flag flag
)
207 ret
= kstrtoul(buf
, 10, &value
);
214 set_bit(flag
, &transparent_hugepage_flags
);
216 clear_bit(flag
, &transparent_hugepage_flags
);
221 static ssize_t
defrag_show(struct kobject
*kobj
,
222 struct kobj_attribute
*attr
, char *buf
)
224 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
225 return sprintf(buf
, "[always] defer defer+madvise madvise never\n");
226 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
227 return sprintf(buf
, "always [defer] defer+madvise madvise never\n");
228 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
229 return sprintf(buf
, "always defer [defer+madvise] madvise never\n");
230 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
231 return sprintf(buf
, "always defer defer+madvise [madvise] never\n");
232 return sprintf(buf
, "always defer defer+madvise madvise [never]\n");
235 static ssize_t
defrag_store(struct kobject
*kobj
,
236 struct kobj_attribute
*attr
,
237 const char *buf
, size_t count
)
239 if (!memcmp("always", buf
,
240 min(sizeof("always")-1, count
))) {
241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
242 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
243 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
244 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
245 } else if (!memcmp("defer+madvise", buf
,
246 min(sizeof("defer+madvise")-1, count
))) {
247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
248 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
249 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
250 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
251 } else if (!memcmp("defer", buf
,
252 min(sizeof("defer")-1, count
))) {
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
254 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
255 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
256 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
257 } else if (!memcmp("madvise", buf
,
258 min(sizeof("madvise")-1, count
))) {
259 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
260 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
261 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
262 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
263 } else if (!memcmp("never", buf
,
264 min(sizeof("never")-1, count
))) {
265 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
266 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
267 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
268 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
274 static struct kobj_attribute defrag_attr
=
275 __ATTR(defrag
, 0644, defrag_show
, defrag_store
);
277 static ssize_t
use_zero_page_show(struct kobject
*kobj
,
278 struct kobj_attribute
*attr
, char *buf
)
280 return single_hugepage_flag_show(kobj
, attr
, buf
,
281 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
283 static ssize_t
use_zero_page_store(struct kobject
*kobj
,
284 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
286 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
287 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
289 static struct kobj_attribute use_zero_page_attr
=
290 __ATTR(use_zero_page
, 0644, use_zero_page_show
, use_zero_page_store
);
292 static ssize_t
hpage_pmd_size_show(struct kobject
*kobj
,
293 struct kobj_attribute
*attr
, char *buf
)
295 return sprintf(buf
, "%lu\n", HPAGE_PMD_SIZE
);
297 static struct kobj_attribute hpage_pmd_size_attr
=
298 __ATTR_RO(hpage_pmd_size
);
300 #ifdef CONFIG_DEBUG_VM
301 static ssize_t
debug_cow_show(struct kobject
*kobj
,
302 struct kobj_attribute
*attr
, char *buf
)
304 return single_hugepage_flag_show(kobj
, attr
, buf
,
305 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
307 static ssize_t
debug_cow_store(struct kobject
*kobj
,
308 struct kobj_attribute
*attr
,
309 const char *buf
, size_t count
)
311 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
312 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
314 static struct kobj_attribute debug_cow_attr
=
315 __ATTR(debug_cow
, 0644, debug_cow_show
, debug_cow_store
);
316 #endif /* CONFIG_DEBUG_VM */
318 static struct attribute
*hugepage_attr
[] = {
321 &use_zero_page_attr
.attr
,
322 &hpage_pmd_size_attr
.attr
,
323 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
324 &shmem_enabled_attr
.attr
,
326 #ifdef CONFIG_DEBUG_VM
327 &debug_cow_attr
.attr
,
332 static const struct attribute_group hugepage_attr_group
= {
333 .attrs
= hugepage_attr
,
336 static int __init
hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
340 *hugepage_kobj
= kobject_create_and_add("transparent_hugepage", mm_kobj
);
341 if (unlikely(!*hugepage_kobj
)) {
342 pr_err("failed to create transparent hugepage kobject\n");
346 err
= sysfs_create_group(*hugepage_kobj
, &hugepage_attr_group
);
348 pr_err("failed to register transparent hugepage group\n");
352 err
= sysfs_create_group(*hugepage_kobj
, &khugepaged_attr_group
);
354 pr_err("failed to register transparent hugepage group\n");
355 goto remove_hp_group
;
361 sysfs_remove_group(*hugepage_kobj
, &hugepage_attr_group
);
363 kobject_put(*hugepage_kobj
);
367 static void __init
hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
369 sysfs_remove_group(hugepage_kobj
, &khugepaged_attr_group
);
370 sysfs_remove_group(hugepage_kobj
, &hugepage_attr_group
);
371 kobject_put(hugepage_kobj
);
374 static inline int hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
379 static inline void hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
382 #endif /* CONFIG_SYSFS */
384 static int __init
hugepage_init(void)
387 struct kobject
*hugepage_kobj
;
389 if (!has_transparent_hugepage()) {
390 transparent_hugepage_flags
= 0;
395 * hugepages can't be allocated by the buddy allocator
397 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
>= MAX_ORDER
);
399 * we use page->mapping and page->index in second tail page
400 * as list_head: assuming THP order >= 2
402 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
< 2);
404 err
= hugepage_init_sysfs(&hugepage_kobj
);
408 err
= khugepaged_init();
412 err
= register_shrinker(&huge_zero_page_shrinker
);
414 goto err_hzp_shrinker
;
415 err
= register_shrinker(&deferred_split_shrinker
);
417 goto err_split_shrinker
;
420 * By default disable transparent hugepages on smaller systems,
421 * where the extra memory used could hurt more than TLB overhead
422 * is likely to save. The admin can still enable it through /sys.
424 if (totalram_pages
< (512 << (20 - PAGE_SHIFT
))) {
425 transparent_hugepage_flags
= 0;
429 err
= start_stop_khugepaged();
435 unregister_shrinker(&deferred_split_shrinker
);
437 unregister_shrinker(&huge_zero_page_shrinker
);
439 khugepaged_destroy();
441 hugepage_exit_sysfs(hugepage_kobj
);
445 subsys_initcall(hugepage_init
);
447 static int __init
setup_transparent_hugepage(char *str
)
452 if (!strcmp(str
, "always")) {
453 set_bit(TRANSPARENT_HUGEPAGE_FLAG
,
454 &transparent_hugepage_flags
);
455 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
456 &transparent_hugepage_flags
);
458 } else if (!strcmp(str
, "madvise")) {
459 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
460 &transparent_hugepage_flags
);
461 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
462 &transparent_hugepage_flags
);
464 } else if (!strcmp(str
, "never")) {
465 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
466 &transparent_hugepage_flags
);
467 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
468 &transparent_hugepage_flags
);
473 pr_warn("transparent_hugepage= cannot parse, ignored\n");
476 __setup("transparent_hugepage=", setup_transparent_hugepage
);
478 pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
)
480 if (likely(vma
->vm_flags
& VM_WRITE
))
481 pmd
= pmd_mkwrite(pmd
);
485 static inline struct list_head
*page_deferred_list(struct page
*page
)
488 * ->lru in the tail pages is occupied by compound_head.
489 * Let's use ->mapping + ->index in the second tail page as list_head.
491 return (struct list_head
*)&page
[2].mapping
;
494 void prep_transhuge_page(struct page
*page
)
497 * we use page->mapping and page->indexlru in second tail page
498 * as list_head: assuming THP order >= 2
501 INIT_LIST_HEAD(page_deferred_list(page
));
502 set_compound_page_dtor(page
, TRANSHUGE_PAGE_DTOR
);
505 static unsigned long __thp_get_unmapped_area(struct file
*filp
,
506 unsigned long addr
, unsigned long len
,
507 loff_t off
, unsigned long flags
, unsigned long size
)
509 loff_t off_end
= off
+ len
;
510 loff_t off_align
= round_up(off
, size
);
511 unsigned long len_pad
, ret
;
513 if (off_end
<= off_align
|| (off_end
- off_align
) < size
)
516 len_pad
= len
+ size
;
517 if (len_pad
< len
|| (off
+ len_pad
) < off
)
520 ret
= current
->mm
->get_unmapped_area(filp
, addr
, len_pad
,
521 off
>> PAGE_SHIFT
, flags
);
524 * The failure might be due to length padding. The caller will retry
525 * without the padding.
527 if (IS_ERR_VALUE(ret
))
531 * Do not try to align to THP boundary if allocation at the address
537 ret
+= (off
- ret
) & (size
- 1);
541 unsigned long thp_get_unmapped_area(struct file
*filp
, unsigned long addr
,
542 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
545 loff_t off
= (loff_t
)pgoff
<< PAGE_SHIFT
;
547 if (!IS_DAX(filp
->f_mapping
->host
) || !IS_ENABLED(CONFIG_FS_DAX_PMD
))
550 ret
= __thp_get_unmapped_area(filp
, addr
, len
, off
, flags
, PMD_SIZE
);
554 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
556 EXPORT_SYMBOL_GPL(thp_get_unmapped_area
);
558 static int __do_huge_pmd_anonymous_page(struct vm_fault
*vmf
, struct page
*page
,
561 struct vm_area_struct
*vma
= vmf
->vma
;
562 struct mem_cgroup
*memcg
;
564 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
567 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
569 if (mem_cgroup_try_charge(page
, vma
->vm_mm
, gfp
| __GFP_NORETRY
, &memcg
,
572 count_vm_event(THP_FAULT_FALLBACK
);
573 return VM_FAULT_FALLBACK
;
576 pgtable
= pte_alloc_one(vma
->vm_mm
, haddr
);
577 if (unlikely(!pgtable
)) {
582 clear_huge_page(page
, vmf
->address
, HPAGE_PMD_NR
);
584 * The memory barrier inside __SetPageUptodate makes sure that
585 * clear_huge_page writes become visible before the set_pmd_at()
588 __SetPageUptodate(page
);
590 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
591 if (unlikely(!pmd_none(*vmf
->pmd
))) {
596 ret
= check_stable_address_space(vma
->vm_mm
);
600 /* Deliver the page fault to userland */
601 if (userfaultfd_missing(vma
)) {
604 spin_unlock(vmf
->ptl
);
605 mem_cgroup_cancel_charge(page
, memcg
, true);
607 pte_free(vma
->vm_mm
, pgtable
);
608 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
609 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
613 entry
= mk_huge_pmd(page
, vma
->vm_page_prot
);
614 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
615 page_add_new_anon_rmap(page
, vma
, haddr
, true);
616 mem_cgroup_commit_charge(page
, memcg
, false, true);
617 lru_cache_add_active_or_unevictable(page
, vma
);
618 pgtable_trans_huge_deposit(vma
->vm_mm
, vmf
->pmd
, pgtable
);
619 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
620 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
621 atomic_long_inc(&vma
->vm_mm
->nr_ptes
);
622 spin_unlock(vmf
->ptl
);
623 count_vm_event(THP_FAULT_ALLOC
);
628 spin_unlock(vmf
->ptl
);
631 pte_free(vma
->vm_mm
, pgtable
);
632 mem_cgroup_cancel_charge(page
, memcg
, true);
639 * always: directly stall for all thp allocations
640 * defer: wake kswapd and fail if not immediately available
641 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
642 * fail if not immediately available
643 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
645 * never: never stall for any thp allocation
647 static inline gfp_t
alloc_hugepage_direct_gfpmask(struct vm_area_struct
*vma
)
649 const bool vma_madvised
= !!(vma
->vm_flags
& VM_HUGEPAGE
);
651 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
652 return GFP_TRANSHUGE
| (vma_madvised
? 0 : __GFP_NORETRY
);
653 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
654 return GFP_TRANSHUGE_LIGHT
| __GFP_KSWAPD_RECLAIM
;
655 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
656 return GFP_TRANSHUGE_LIGHT
| (vma_madvised
? __GFP_DIRECT_RECLAIM
:
657 __GFP_KSWAPD_RECLAIM
);
658 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
659 return GFP_TRANSHUGE_LIGHT
| (vma_madvised
? __GFP_DIRECT_RECLAIM
:
661 return GFP_TRANSHUGE_LIGHT
;
664 /* Caller must hold page table lock. */
665 static bool set_huge_zero_page(pgtable_t pgtable
, struct mm_struct
*mm
,
666 struct vm_area_struct
*vma
, unsigned long haddr
, pmd_t
*pmd
,
667 struct page
*zero_page
)
672 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
673 entry
= pmd_mkhuge(entry
);
675 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
676 set_pmd_at(mm
, haddr
, pmd
, entry
);
677 atomic_long_inc(&mm
->nr_ptes
);
681 int do_huge_pmd_anonymous_page(struct vm_fault
*vmf
)
683 struct vm_area_struct
*vma
= vmf
->vma
;
686 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
688 if (haddr
< vma
->vm_start
|| haddr
+ HPAGE_PMD_SIZE
> vma
->vm_end
)
689 return VM_FAULT_FALLBACK
;
690 if (unlikely(anon_vma_prepare(vma
)))
692 if (unlikely(khugepaged_enter(vma
, vma
->vm_flags
)))
694 if (!(vmf
->flags
& FAULT_FLAG_WRITE
) &&
695 !mm_forbids_zeropage(vma
->vm_mm
) &&
696 transparent_hugepage_use_zero_page()) {
698 struct page
*zero_page
;
701 pgtable
= pte_alloc_one(vma
->vm_mm
, haddr
);
702 if (unlikely(!pgtable
))
704 zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
705 if (unlikely(!zero_page
)) {
706 pte_free(vma
->vm_mm
, pgtable
);
707 count_vm_event(THP_FAULT_FALLBACK
);
708 return VM_FAULT_FALLBACK
;
710 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
713 if (pmd_none(*vmf
->pmd
)) {
714 ret
= check_stable_address_space(vma
->vm_mm
);
716 spin_unlock(vmf
->ptl
);
717 } else if (userfaultfd_missing(vma
)) {
718 spin_unlock(vmf
->ptl
);
719 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
720 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
722 set_huge_zero_page(pgtable
, vma
->vm_mm
, vma
,
723 haddr
, vmf
->pmd
, zero_page
);
724 spin_unlock(vmf
->ptl
);
728 spin_unlock(vmf
->ptl
);
730 pte_free(vma
->vm_mm
, pgtable
);
733 gfp
= alloc_hugepage_direct_gfpmask(vma
);
734 page
= alloc_hugepage_vma(gfp
, vma
, haddr
, HPAGE_PMD_ORDER
);
735 if (unlikely(!page
)) {
736 count_vm_event(THP_FAULT_FALLBACK
);
737 return VM_FAULT_FALLBACK
;
739 prep_transhuge_page(page
);
740 return __do_huge_pmd_anonymous_page(vmf
, page
, gfp
);
743 static void insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
744 pmd_t
*pmd
, pfn_t pfn
, pgprot_t prot
, bool write
,
747 struct mm_struct
*mm
= vma
->vm_mm
;
751 ptl
= pmd_lock(mm
, pmd
);
752 entry
= pmd_mkhuge(pfn_t_pmd(pfn
, prot
));
753 if (pfn_t_devmap(pfn
))
754 entry
= pmd_mkdevmap(entry
);
756 entry
= pmd_mkyoung(pmd_mkdirty(entry
));
757 entry
= maybe_pmd_mkwrite(entry
, vma
);
761 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
762 atomic_long_inc(&mm
->nr_ptes
);
765 set_pmd_at(mm
, addr
, pmd
, entry
);
766 update_mmu_cache_pmd(vma
, addr
, pmd
);
770 int vmf_insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
771 pmd_t
*pmd
, pfn_t pfn
, bool write
)
773 pgprot_t pgprot
= vma
->vm_page_prot
;
774 pgtable_t pgtable
= NULL
;
776 * If we had pmd_special, we could avoid all these restrictions,
777 * but we need to be consistent with PTEs and architectures that
778 * can't support a 'special' bit.
780 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)));
781 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
782 (VM_PFNMAP
|VM_MIXEDMAP
));
783 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
784 BUG_ON(!pfn_t_devmap(pfn
));
786 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
787 return VM_FAULT_SIGBUS
;
789 if (arch_needs_pgtable_deposit()) {
790 pgtable
= pte_alloc_one(vma
->vm_mm
, addr
);
795 track_pfn_insert(vma
, &pgprot
, pfn
);
797 insert_pfn_pmd(vma
, addr
, pmd
, pfn
, pgprot
, write
, pgtable
);
798 return VM_FAULT_NOPAGE
;
800 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd
);
802 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
803 static pud_t
maybe_pud_mkwrite(pud_t pud
, struct vm_area_struct
*vma
)
805 if (likely(vma
->vm_flags
& VM_WRITE
))
806 pud
= pud_mkwrite(pud
);
810 static void insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
811 pud_t
*pud
, pfn_t pfn
, pgprot_t prot
, bool write
)
813 struct mm_struct
*mm
= vma
->vm_mm
;
817 ptl
= pud_lock(mm
, pud
);
818 entry
= pud_mkhuge(pfn_t_pud(pfn
, prot
));
819 if (pfn_t_devmap(pfn
))
820 entry
= pud_mkdevmap(entry
);
822 entry
= pud_mkyoung(pud_mkdirty(entry
));
823 entry
= maybe_pud_mkwrite(entry
, vma
);
825 set_pud_at(mm
, addr
, pud
, entry
);
826 update_mmu_cache_pud(vma
, addr
, pud
);
830 int vmf_insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
831 pud_t
*pud
, pfn_t pfn
, bool write
)
833 pgprot_t pgprot
= vma
->vm_page_prot
;
835 * If we had pud_special, we could avoid all these restrictions,
836 * but we need to be consistent with PTEs and architectures that
837 * can't support a 'special' bit.
839 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)));
840 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
841 (VM_PFNMAP
|VM_MIXEDMAP
));
842 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
843 BUG_ON(!pfn_t_devmap(pfn
));
845 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
846 return VM_FAULT_SIGBUS
;
848 track_pfn_insert(vma
, &pgprot
, pfn
);
850 insert_pfn_pud(vma
, addr
, pud
, pfn
, pgprot
, write
);
851 return VM_FAULT_NOPAGE
;
853 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud
);
854 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
856 static void touch_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
857 pmd_t
*pmd
, int flags
)
861 _pmd
= pmd_mkyoung(*pmd
);
862 if (flags
& FOLL_WRITE
)
863 _pmd
= pmd_mkdirty(_pmd
);
864 if (pmdp_set_access_flags(vma
, addr
& HPAGE_PMD_MASK
,
865 pmd
, _pmd
, flags
& FOLL_WRITE
))
866 update_mmu_cache_pmd(vma
, addr
, pmd
);
869 struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
870 pmd_t
*pmd
, int flags
)
872 unsigned long pfn
= pmd_pfn(*pmd
);
873 struct mm_struct
*mm
= vma
->vm_mm
;
874 struct dev_pagemap
*pgmap
;
877 assert_spin_locked(pmd_lockptr(mm
, pmd
));
880 * When we COW a devmap PMD entry, we split it into PTEs, so we should
881 * not be in this function with `flags & FOLL_COW` set.
883 WARN_ONCE(flags
& FOLL_COW
, "mm: In follow_devmap_pmd with FOLL_COW set");
885 if (flags
& FOLL_WRITE
&& !pmd_write(*pmd
))
888 if (pmd_present(*pmd
) && pmd_devmap(*pmd
))
893 if (flags
& FOLL_TOUCH
)
894 touch_pmd(vma
, addr
, pmd
, flags
);
897 * device mapped pages can only be returned if the
898 * caller will manage the page reference count.
900 if (!(flags
& FOLL_GET
))
901 return ERR_PTR(-EEXIST
);
903 pfn
+= (addr
& ~PMD_MASK
) >> PAGE_SHIFT
;
904 pgmap
= get_dev_pagemap(pfn
, NULL
);
906 return ERR_PTR(-EFAULT
);
907 page
= pfn_to_page(pfn
);
909 put_dev_pagemap(pgmap
);
914 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
915 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
916 struct vm_area_struct
*vma
)
918 spinlock_t
*dst_ptl
, *src_ptl
;
919 struct page
*src_page
;
921 pgtable_t pgtable
= NULL
;
924 /* Skip if can be re-fill on fault */
925 if (!vma_is_anonymous(vma
))
928 pgtable
= pte_alloc_one(dst_mm
, addr
);
929 if (unlikely(!pgtable
))
932 dst_ptl
= pmd_lock(dst_mm
, dst_pmd
);
933 src_ptl
= pmd_lockptr(src_mm
, src_pmd
);
934 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
939 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
940 if (unlikely(is_swap_pmd(pmd
))) {
941 swp_entry_t entry
= pmd_to_swp_entry(pmd
);
943 VM_BUG_ON(!is_pmd_migration_entry(pmd
));
944 if (is_write_migration_entry(entry
)) {
945 make_migration_entry_read(&entry
);
946 pmd
= swp_entry_to_pmd(entry
);
947 if (pmd_swp_soft_dirty(*src_pmd
))
948 pmd
= pmd_swp_mksoft_dirty(pmd
);
949 set_pmd_at(src_mm
, addr
, src_pmd
, pmd
);
951 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
952 atomic_long_inc(&dst_mm
->nr_ptes
);
953 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
954 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
960 if (unlikely(!pmd_trans_huge(pmd
))) {
961 pte_free(dst_mm
, pgtable
);
965 * When page table lock is held, the huge zero pmd should not be
966 * under splitting since we don't split the page itself, only pmd to
969 if (is_huge_zero_pmd(pmd
)) {
970 struct page
*zero_page
;
972 * get_huge_zero_page() will never allocate a new page here,
973 * since we already have a zero page to copy. It just takes a
976 zero_page
= mm_get_huge_zero_page(dst_mm
);
977 set_huge_zero_page(pgtable
, dst_mm
, vma
, addr
, dst_pmd
,
983 src_page
= pmd_page(pmd
);
984 VM_BUG_ON_PAGE(!PageHead(src_page
), src_page
);
986 page_dup_rmap(src_page
, true);
987 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
988 atomic_long_inc(&dst_mm
->nr_ptes
);
989 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
991 pmdp_set_wrprotect(src_mm
, addr
, src_pmd
);
992 pmd
= pmd_mkold(pmd_wrprotect(pmd
));
993 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
997 spin_unlock(src_ptl
);
998 spin_unlock(dst_ptl
);
1003 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1004 static void touch_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1005 pud_t
*pud
, int flags
)
1009 _pud
= pud_mkyoung(*pud
);
1010 if (flags
& FOLL_WRITE
)
1011 _pud
= pud_mkdirty(_pud
);
1012 if (pudp_set_access_flags(vma
, addr
& HPAGE_PUD_MASK
,
1013 pud
, _pud
, flags
& FOLL_WRITE
))
1014 update_mmu_cache_pud(vma
, addr
, pud
);
1017 struct page
*follow_devmap_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1018 pud_t
*pud
, int flags
)
1020 unsigned long pfn
= pud_pfn(*pud
);
1021 struct mm_struct
*mm
= vma
->vm_mm
;
1022 struct dev_pagemap
*pgmap
;
1025 assert_spin_locked(pud_lockptr(mm
, pud
));
1027 if (flags
& FOLL_WRITE
&& !pud_write(*pud
))
1030 if (pud_present(*pud
) && pud_devmap(*pud
))
1035 if (flags
& FOLL_TOUCH
)
1036 touch_pud(vma
, addr
, pud
, flags
);
1039 * device mapped pages can only be returned if the
1040 * caller will manage the page reference count.
1042 if (!(flags
& FOLL_GET
))
1043 return ERR_PTR(-EEXIST
);
1045 pfn
+= (addr
& ~PUD_MASK
) >> PAGE_SHIFT
;
1046 pgmap
= get_dev_pagemap(pfn
, NULL
);
1048 return ERR_PTR(-EFAULT
);
1049 page
= pfn_to_page(pfn
);
1051 put_dev_pagemap(pgmap
);
1056 int copy_huge_pud(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1057 pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long addr
,
1058 struct vm_area_struct
*vma
)
1060 spinlock_t
*dst_ptl
, *src_ptl
;
1064 dst_ptl
= pud_lock(dst_mm
, dst_pud
);
1065 src_ptl
= pud_lockptr(src_mm
, src_pud
);
1066 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1070 if (unlikely(!pud_trans_huge(pud
) && !pud_devmap(pud
)))
1074 * When page table lock is held, the huge zero pud should not be
1075 * under splitting since we don't split the page itself, only pud to
1078 if (is_huge_zero_pud(pud
)) {
1079 /* No huge zero pud yet */
1082 pudp_set_wrprotect(src_mm
, addr
, src_pud
);
1083 pud
= pud_mkold(pud_wrprotect(pud
));
1084 set_pud_at(dst_mm
, addr
, dst_pud
, pud
);
1088 spin_unlock(src_ptl
);
1089 spin_unlock(dst_ptl
);
1093 void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
)
1096 unsigned long haddr
;
1097 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1099 vmf
->ptl
= pud_lock(vmf
->vma
->vm_mm
, vmf
->pud
);
1100 if (unlikely(!pud_same(*vmf
->pud
, orig_pud
)))
1103 entry
= pud_mkyoung(orig_pud
);
1105 entry
= pud_mkdirty(entry
);
1106 haddr
= vmf
->address
& HPAGE_PUD_MASK
;
1107 if (pudp_set_access_flags(vmf
->vma
, haddr
, vmf
->pud
, entry
, write
))
1108 update_mmu_cache_pud(vmf
->vma
, vmf
->address
, vmf
->pud
);
1111 spin_unlock(vmf
->ptl
);
1113 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1115 void huge_pmd_set_accessed(struct vm_fault
*vmf
, pmd_t orig_pmd
)
1118 unsigned long haddr
;
1119 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1121 vmf
->ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1122 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1125 entry
= pmd_mkyoung(orig_pmd
);
1127 entry
= pmd_mkdirty(entry
);
1128 haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1129 if (pmdp_set_access_flags(vmf
->vma
, haddr
, vmf
->pmd
, entry
, write
))
1130 update_mmu_cache_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
);
1133 spin_unlock(vmf
->ptl
);
1136 static int do_huge_pmd_wp_page_fallback(struct vm_fault
*vmf
, pmd_t orig_pmd
,
1139 struct vm_area_struct
*vma
= vmf
->vma
;
1140 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1141 struct mem_cgroup
*memcg
;
1145 struct page
**pages
;
1146 unsigned long mmun_start
; /* For mmu_notifiers */
1147 unsigned long mmun_end
; /* For mmu_notifiers */
1149 pages
= kmalloc(sizeof(struct page
*) * HPAGE_PMD_NR
,
1151 if (unlikely(!pages
)) {
1152 ret
|= VM_FAULT_OOM
;
1156 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1157 pages
[i
] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE
, vma
,
1158 vmf
->address
, page_to_nid(page
));
1159 if (unlikely(!pages
[i
] ||
1160 mem_cgroup_try_charge(pages
[i
], vma
->vm_mm
,
1161 GFP_KERNEL
, &memcg
, false))) {
1165 memcg
= (void *)page_private(pages
[i
]);
1166 set_page_private(pages
[i
], 0);
1167 mem_cgroup_cancel_charge(pages
[i
], memcg
,
1172 ret
|= VM_FAULT_OOM
;
1175 set_page_private(pages
[i
], (unsigned long)memcg
);
1178 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1179 copy_user_highpage(pages
[i
], page
+ i
,
1180 haddr
+ PAGE_SIZE
* i
, vma
);
1181 __SetPageUptodate(pages
[i
]);
1186 mmun_end
= haddr
+ HPAGE_PMD_SIZE
;
1187 mmu_notifier_invalidate_range_start(vma
->vm_mm
, mmun_start
, mmun_end
);
1189 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1190 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1191 goto out_free_pages
;
1192 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1194 pmdp_huge_clear_flush_notify(vma
, haddr
, vmf
->pmd
);
1195 /* leave pmd empty until pte is filled */
1197 pgtable
= pgtable_trans_huge_withdraw(vma
->vm_mm
, vmf
->pmd
);
1198 pmd_populate(vma
->vm_mm
, &_pmd
, pgtable
);
1200 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
1202 entry
= mk_pte(pages
[i
], vma
->vm_page_prot
);
1203 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
1204 memcg
= (void *)page_private(pages
[i
]);
1205 set_page_private(pages
[i
], 0);
1206 page_add_new_anon_rmap(pages
[i
], vmf
->vma
, haddr
, false);
1207 mem_cgroup_commit_charge(pages
[i
], memcg
, false, false);
1208 lru_cache_add_active_or_unevictable(pages
[i
], vma
);
1209 vmf
->pte
= pte_offset_map(&_pmd
, haddr
);
1210 VM_BUG_ON(!pte_none(*vmf
->pte
));
1211 set_pte_at(vma
->vm_mm
, haddr
, vmf
->pte
, entry
);
1212 pte_unmap(vmf
->pte
);
1216 smp_wmb(); /* make pte visible before pmd */
1217 pmd_populate(vma
->vm_mm
, vmf
->pmd
, pgtable
);
1218 page_remove_rmap(page
, true);
1219 spin_unlock(vmf
->ptl
);
1221 mmu_notifier_invalidate_range_end(vma
->vm_mm
, mmun_start
, mmun_end
);
1223 ret
|= VM_FAULT_WRITE
;
1230 spin_unlock(vmf
->ptl
);
1231 mmu_notifier_invalidate_range_end(vma
->vm_mm
, mmun_start
, mmun_end
);
1232 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1233 memcg
= (void *)page_private(pages
[i
]);
1234 set_page_private(pages
[i
], 0);
1235 mem_cgroup_cancel_charge(pages
[i
], memcg
, false);
1242 int do_huge_pmd_wp_page(struct vm_fault
*vmf
, pmd_t orig_pmd
)
1244 struct vm_area_struct
*vma
= vmf
->vma
;
1245 struct page
*page
= NULL
, *new_page
;
1246 struct mem_cgroup
*memcg
;
1247 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1248 unsigned long mmun_start
; /* For mmu_notifiers */
1249 unsigned long mmun_end
; /* For mmu_notifiers */
1250 gfp_t huge_gfp
; /* for allocation and charge */
1253 vmf
->ptl
= pmd_lockptr(vma
->vm_mm
, vmf
->pmd
);
1254 VM_BUG_ON_VMA(!vma
->anon_vma
, vma
);
1255 if (is_huge_zero_pmd(orig_pmd
))
1257 spin_lock(vmf
->ptl
);
1258 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1261 page
= pmd_page(orig_pmd
);
1262 VM_BUG_ON_PAGE(!PageCompound(page
) || !PageHead(page
), page
);
1264 * We can only reuse the page if nobody else maps the huge page or it's
1267 if (!trylock_page(page
)) {
1269 spin_unlock(vmf
->ptl
);
1271 spin_lock(vmf
->ptl
);
1272 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1279 if (reuse_swap_page(page
, NULL
)) {
1281 entry
= pmd_mkyoung(orig_pmd
);
1282 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1283 if (pmdp_set_access_flags(vma
, haddr
, vmf
->pmd
, entry
, 1))
1284 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1285 ret
|= VM_FAULT_WRITE
;
1291 spin_unlock(vmf
->ptl
);
1293 if (transparent_hugepage_enabled(vma
) &&
1294 !transparent_hugepage_debug_cow()) {
1295 huge_gfp
= alloc_hugepage_direct_gfpmask(vma
);
1296 new_page
= alloc_hugepage_vma(huge_gfp
, vma
, haddr
, HPAGE_PMD_ORDER
);
1300 if (likely(new_page
)) {
1301 prep_transhuge_page(new_page
);
1304 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1305 ret
|= VM_FAULT_FALLBACK
;
1307 ret
= do_huge_pmd_wp_page_fallback(vmf
, orig_pmd
, page
);
1308 if (ret
& VM_FAULT_OOM
) {
1309 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1310 ret
|= VM_FAULT_FALLBACK
;
1314 count_vm_event(THP_FAULT_FALLBACK
);
1318 if (unlikely(mem_cgroup_try_charge(new_page
, vma
->vm_mm
,
1319 huge_gfp
| __GFP_NORETRY
, &memcg
, true))) {
1321 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1324 ret
|= VM_FAULT_FALLBACK
;
1325 count_vm_event(THP_FAULT_FALLBACK
);
1329 count_vm_event(THP_FAULT_ALLOC
);
1332 clear_huge_page(new_page
, vmf
->address
, HPAGE_PMD_NR
);
1334 copy_user_huge_page(new_page
, page
, haddr
, vma
, HPAGE_PMD_NR
);
1335 __SetPageUptodate(new_page
);
1338 mmun_end
= haddr
+ HPAGE_PMD_SIZE
;
1339 mmu_notifier_invalidate_range_start(vma
->vm_mm
, mmun_start
, mmun_end
);
1341 spin_lock(vmf
->ptl
);
1344 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1345 spin_unlock(vmf
->ptl
);
1346 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1351 entry
= mk_huge_pmd(new_page
, vma
->vm_page_prot
);
1352 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1353 pmdp_huge_clear_flush_notify(vma
, haddr
, vmf
->pmd
);
1354 page_add_new_anon_rmap(new_page
, vma
, haddr
, true);
1355 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1356 lru_cache_add_active_or_unevictable(new_page
, vma
);
1357 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
1358 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1360 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1362 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1363 page_remove_rmap(page
, true);
1366 ret
|= VM_FAULT_WRITE
;
1368 spin_unlock(vmf
->ptl
);
1370 mmu_notifier_invalidate_range_end(vma
->vm_mm
, mmun_start
, mmun_end
);
1374 spin_unlock(vmf
->ptl
);
1379 * FOLL_FORCE can write to even unwritable pmd's, but only
1380 * after we've gone through a COW cycle and they are dirty.
1382 static inline bool can_follow_write_pmd(pmd_t pmd
, unsigned int flags
)
1384 return pmd_write(pmd
) ||
1385 ((flags
& FOLL_FORCE
) && (flags
& FOLL_COW
) && pmd_dirty(pmd
));
1388 struct page
*follow_trans_huge_pmd(struct vm_area_struct
*vma
,
1393 struct mm_struct
*mm
= vma
->vm_mm
;
1394 struct page
*page
= NULL
;
1396 assert_spin_locked(pmd_lockptr(mm
, pmd
));
1398 if (flags
& FOLL_WRITE
&& !can_follow_write_pmd(*pmd
, flags
))
1401 /* Avoid dumping huge zero page */
1402 if ((flags
& FOLL_DUMP
) && is_huge_zero_pmd(*pmd
))
1403 return ERR_PTR(-EFAULT
);
1405 /* Full NUMA hinting faults to serialise migration in fault paths */
1406 if ((flags
& FOLL_NUMA
) && pmd_protnone(*pmd
))
1409 page
= pmd_page(*pmd
);
1410 VM_BUG_ON_PAGE(!PageHead(page
) && !is_zone_device_page(page
), page
);
1411 if (flags
& FOLL_TOUCH
)
1412 touch_pmd(vma
, addr
, pmd
, flags
);
1413 if ((flags
& FOLL_MLOCK
) && (vma
->vm_flags
& VM_LOCKED
)) {
1415 * We don't mlock() pte-mapped THPs. This way we can avoid
1416 * leaking mlocked pages into non-VM_LOCKED VMAs.
1420 * In most cases the pmd is the only mapping of the page as we
1421 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1422 * writable private mappings in populate_vma_page_range().
1424 * The only scenario when we have the page shared here is if we
1425 * mlocking read-only mapping shared over fork(). We skip
1426 * mlocking such pages.
1430 * We can expect PageDoubleMap() to be stable under page lock:
1431 * for file pages we set it in page_add_file_rmap(), which
1432 * requires page to be locked.
1435 if (PageAnon(page
) && compound_mapcount(page
) != 1)
1437 if (PageDoubleMap(page
) || !page
->mapping
)
1439 if (!trylock_page(page
))
1442 if (page
->mapping
&& !PageDoubleMap(page
))
1443 mlock_vma_page(page
);
1447 page
+= (addr
& ~HPAGE_PMD_MASK
) >> PAGE_SHIFT
;
1448 VM_BUG_ON_PAGE(!PageCompound(page
) && !is_zone_device_page(page
), page
);
1449 if (flags
& FOLL_GET
)
1456 /* NUMA hinting page fault entry point for trans huge pmds */
1457 int do_huge_pmd_numa_page(struct vm_fault
*vmf
, pmd_t pmd
)
1459 struct vm_area_struct
*vma
= vmf
->vma
;
1460 struct anon_vma
*anon_vma
= NULL
;
1462 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1463 int page_nid
= -1, this_nid
= numa_node_id();
1464 int target_nid
, last_cpupid
= -1;
1466 bool migrated
= false;
1470 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1471 if (unlikely(!pmd_same(pmd
, *vmf
->pmd
)))
1475 * If there are potential migrations, wait for completion and retry
1476 * without disrupting NUMA hinting information. Do not relock and
1477 * check_same as the page may no longer be mapped.
1479 if (unlikely(pmd_trans_migrating(*vmf
->pmd
))) {
1480 page
= pmd_page(*vmf
->pmd
);
1481 if (!get_page_unless_zero(page
))
1483 spin_unlock(vmf
->ptl
);
1484 wait_on_page_locked(page
);
1489 page
= pmd_page(pmd
);
1490 BUG_ON(is_huge_zero_page(page
));
1491 page_nid
= page_to_nid(page
);
1492 last_cpupid
= page_cpupid_last(page
);
1493 count_vm_numa_event(NUMA_HINT_FAULTS
);
1494 if (page_nid
== this_nid
) {
1495 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL
);
1496 flags
|= TNF_FAULT_LOCAL
;
1499 /* See similar comment in do_numa_page for explanation */
1500 if (!pmd_savedwrite(pmd
))
1501 flags
|= TNF_NO_GROUP
;
1504 * Acquire the page lock to serialise THP migrations but avoid dropping
1505 * page_table_lock if at all possible
1507 page_locked
= trylock_page(page
);
1508 target_nid
= mpol_misplaced(page
, vma
, haddr
);
1509 if (target_nid
== -1) {
1510 /* If the page was locked, there are no parallel migrations */
1515 /* Migration could have started since the pmd_trans_migrating check */
1518 if (!get_page_unless_zero(page
))
1520 spin_unlock(vmf
->ptl
);
1521 wait_on_page_locked(page
);
1527 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1528 * to serialises splits
1531 spin_unlock(vmf
->ptl
);
1532 anon_vma
= page_lock_anon_vma_read(page
);
1534 /* Confirm the PMD did not change while page_table_lock was released */
1535 spin_lock(vmf
->ptl
);
1536 if (unlikely(!pmd_same(pmd
, *vmf
->pmd
))) {
1543 /* Bail if we fail to protect against THP splits for any reason */
1544 if (unlikely(!anon_vma
)) {
1551 * Since we took the NUMA fault, we must have observed the !accessible
1552 * bit. Make sure all other CPUs agree with that, to avoid them
1553 * modifying the page we're about to migrate.
1555 * Must be done under PTL such that we'll observe the relevant
1556 * inc_tlb_flush_pending().
1558 * We are not sure a pending tlb flush here is for a huge page
1559 * mapping or not. Hence use the tlb range variant
1561 if (mm_tlb_flush_pending(vma
->vm_mm
))
1562 flush_tlb_range(vma
, haddr
, haddr
+ HPAGE_PMD_SIZE
);
1565 * Migrate the THP to the requested node, returns with page unlocked
1566 * and access rights restored.
1568 spin_unlock(vmf
->ptl
);
1570 migrated
= migrate_misplaced_transhuge_page(vma
->vm_mm
, vma
,
1571 vmf
->pmd
, pmd
, vmf
->address
, page
, target_nid
);
1573 flags
|= TNF_MIGRATED
;
1574 page_nid
= target_nid
;
1576 flags
|= TNF_MIGRATE_FAIL
;
1580 BUG_ON(!PageLocked(page
));
1581 was_writable
= pmd_savedwrite(pmd
);
1582 pmd
= pmd_modify(pmd
, vma
->vm_page_prot
);
1583 pmd
= pmd_mkyoung(pmd
);
1585 pmd
= pmd_mkwrite(pmd
);
1586 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, pmd
);
1587 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1590 spin_unlock(vmf
->ptl
);
1594 page_unlock_anon_vma_read(anon_vma
);
1597 task_numa_fault(last_cpupid
, page_nid
, HPAGE_PMD_NR
,
1604 * Return true if we do MADV_FREE successfully on entire pmd page.
1605 * Otherwise, return false.
1607 bool madvise_free_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1608 pmd_t
*pmd
, unsigned long addr
, unsigned long next
)
1613 struct mm_struct
*mm
= tlb
->mm
;
1616 tlb_remove_check_page_size_change(tlb
, HPAGE_PMD_SIZE
);
1618 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1623 if (is_huge_zero_pmd(orig_pmd
))
1626 if (unlikely(!pmd_present(orig_pmd
))) {
1627 VM_BUG_ON(thp_migration_supported() &&
1628 !is_pmd_migration_entry(orig_pmd
));
1632 page
= pmd_page(orig_pmd
);
1634 * If other processes are mapping this page, we couldn't discard
1635 * the page unless they all do MADV_FREE so let's skip the page.
1637 if (page_mapcount(page
) != 1)
1640 if (!trylock_page(page
))
1644 * If user want to discard part-pages of THP, split it so MADV_FREE
1645 * will deactivate only them.
1647 if (next
- addr
!= HPAGE_PMD_SIZE
) {
1650 split_huge_page(page
);
1656 if (PageDirty(page
))
1657 ClearPageDirty(page
);
1660 if (pmd_young(orig_pmd
) || pmd_dirty(orig_pmd
)) {
1661 pmdp_invalidate(vma
, addr
, pmd
);
1662 orig_pmd
= pmd_mkold(orig_pmd
);
1663 orig_pmd
= pmd_mkclean(orig_pmd
);
1665 set_pmd_at(mm
, addr
, pmd
, orig_pmd
);
1666 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1669 mark_page_lazyfree(page
);
1677 static inline void zap_deposited_table(struct mm_struct
*mm
, pmd_t
*pmd
)
1681 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1682 pte_free(mm
, pgtable
);
1683 atomic_long_dec(&mm
->nr_ptes
);
1686 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1687 pmd_t
*pmd
, unsigned long addr
)
1692 tlb_remove_check_page_size_change(tlb
, HPAGE_PMD_SIZE
);
1694 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1698 * For architectures like ppc64 we look at deposited pgtable
1699 * when calling pmdp_huge_get_and_clear. So do the
1700 * pgtable_trans_huge_withdraw after finishing pmdp related
1703 orig_pmd
= pmdp_huge_get_and_clear_full(tlb
->mm
, addr
, pmd
,
1705 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1706 if (vma_is_dax(vma
)) {
1707 if (arch_needs_pgtable_deposit())
1708 zap_deposited_table(tlb
->mm
, pmd
);
1710 if (is_huge_zero_pmd(orig_pmd
))
1711 tlb_remove_page_size(tlb
, pmd_page(orig_pmd
), HPAGE_PMD_SIZE
);
1712 } else if (is_huge_zero_pmd(orig_pmd
)) {
1713 zap_deposited_table(tlb
->mm
, pmd
);
1715 tlb_remove_page_size(tlb
, pmd_page(orig_pmd
), HPAGE_PMD_SIZE
);
1717 struct page
*page
= NULL
;
1718 int flush_needed
= 1;
1720 if (pmd_present(orig_pmd
)) {
1721 page
= pmd_page(orig_pmd
);
1722 page_remove_rmap(page
, true);
1723 VM_BUG_ON_PAGE(page_mapcount(page
) < 0, page
);
1724 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1725 } else if (thp_migration_supported()) {
1728 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd
));
1729 entry
= pmd_to_swp_entry(orig_pmd
);
1730 page
= pfn_to_page(swp_offset(entry
));
1733 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1735 if (PageAnon(page
)) {
1736 zap_deposited_table(tlb
->mm
, pmd
);
1737 add_mm_counter(tlb
->mm
, MM_ANONPAGES
, -HPAGE_PMD_NR
);
1739 if (arch_needs_pgtable_deposit())
1740 zap_deposited_table(tlb
->mm
, pmd
);
1741 add_mm_counter(tlb
->mm
, MM_FILEPAGES
, -HPAGE_PMD_NR
);
1746 tlb_remove_page_size(tlb
, page
, HPAGE_PMD_SIZE
);
1751 #ifndef pmd_move_must_withdraw
1752 static inline int pmd_move_must_withdraw(spinlock_t
*new_pmd_ptl
,
1753 spinlock_t
*old_pmd_ptl
,
1754 struct vm_area_struct
*vma
)
1757 * With split pmd lock we also need to move preallocated
1758 * PTE page table if new_pmd is on different PMD page table.
1760 * We also don't deposit and withdraw tables for file pages.
1762 return (new_pmd_ptl
!= old_pmd_ptl
) && vma_is_anonymous(vma
);
1766 static pmd_t
move_soft_dirty_pmd(pmd_t pmd
)
1768 #ifdef CONFIG_MEM_SOFT_DIRTY
1769 if (unlikely(is_pmd_migration_entry(pmd
)))
1770 pmd
= pmd_swp_mksoft_dirty(pmd
);
1771 else if (pmd_present(pmd
))
1772 pmd
= pmd_mksoft_dirty(pmd
);
1777 bool move_huge_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
1778 unsigned long new_addr
, unsigned long old_end
,
1779 pmd_t
*old_pmd
, pmd_t
*new_pmd
)
1781 spinlock_t
*old_ptl
, *new_ptl
;
1783 struct mm_struct
*mm
= vma
->vm_mm
;
1784 bool force_flush
= false;
1786 if ((old_addr
& ~HPAGE_PMD_MASK
) ||
1787 (new_addr
& ~HPAGE_PMD_MASK
) ||
1788 old_end
- old_addr
< HPAGE_PMD_SIZE
)
1792 * The destination pmd shouldn't be established, free_pgtables()
1793 * should have release it.
1795 if (WARN_ON(!pmd_none(*new_pmd
))) {
1796 VM_BUG_ON(pmd_trans_huge(*new_pmd
));
1801 * We don't have to worry about the ordering of src and dst
1802 * ptlocks because exclusive mmap_sem prevents deadlock.
1804 old_ptl
= __pmd_trans_huge_lock(old_pmd
, vma
);
1806 new_ptl
= pmd_lockptr(mm
, new_pmd
);
1807 if (new_ptl
!= old_ptl
)
1808 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
1809 pmd
= pmdp_huge_get_and_clear(mm
, old_addr
, old_pmd
);
1810 if (pmd_present(pmd
))
1812 VM_BUG_ON(!pmd_none(*new_pmd
));
1814 if (pmd_move_must_withdraw(new_ptl
, old_ptl
, vma
)) {
1816 pgtable
= pgtable_trans_huge_withdraw(mm
, old_pmd
);
1817 pgtable_trans_huge_deposit(mm
, new_pmd
, pgtable
);
1819 pmd
= move_soft_dirty_pmd(pmd
);
1820 set_pmd_at(mm
, new_addr
, new_pmd
, pmd
);
1822 flush_tlb_range(vma
, old_addr
, old_addr
+ PMD_SIZE
);
1823 if (new_ptl
!= old_ptl
)
1824 spin_unlock(new_ptl
);
1825 spin_unlock(old_ptl
);
1833 * - 0 if PMD could not be locked
1834 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1835 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1837 int change_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1838 unsigned long addr
, pgprot_t newprot
, int prot_numa
)
1840 struct mm_struct
*mm
= vma
->vm_mm
;
1843 bool preserve_write
;
1846 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1850 preserve_write
= prot_numa
&& pmd_write(*pmd
);
1853 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1854 if (is_swap_pmd(*pmd
)) {
1855 swp_entry_t entry
= pmd_to_swp_entry(*pmd
);
1857 VM_BUG_ON(!is_pmd_migration_entry(*pmd
));
1858 if (is_write_migration_entry(entry
)) {
1861 * A protection check is difficult so
1862 * just be safe and disable write
1864 make_migration_entry_read(&entry
);
1865 newpmd
= swp_entry_to_pmd(entry
);
1866 if (pmd_swp_soft_dirty(*pmd
))
1867 newpmd
= pmd_swp_mksoft_dirty(newpmd
);
1868 set_pmd_at(mm
, addr
, pmd
, newpmd
);
1875 * Avoid trapping faults against the zero page. The read-only
1876 * data is likely to be read-cached on the local CPU and
1877 * local/remote hits to the zero page are not interesting.
1879 if (prot_numa
&& is_huge_zero_pmd(*pmd
))
1882 if (prot_numa
&& pmd_protnone(*pmd
))
1886 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1887 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1888 * which is also under down_read(mmap_sem):
1891 * change_huge_pmd(prot_numa=1)
1892 * pmdp_huge_get_and_clear_notify()
1893 * madvise_dontneed()
1895 * pmd_trans_huge(*pmd) == 0 (without ptl)
1898 * // pmd is re-established
1900 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1901 * which may break userspace.
1903 * pmdp_invalidate() is required to make sure we don't miss
1904 * dirty/young flags set by hardware.
1907 pmdp_invalidate(vma
, addr
, pmd
);
1910 * Recover dirty/young flags. It relies on pmdp_invalidate to not
1913 if (pmd_dirty(*pmd
))
1914 entry
= pmd_mkdirty(entry
);
1915 if (pmd_young(*pmd
))
1916 entry
= pmd_mkyoung(entry
);
1918 entry
= pmd_modify(entry
, newprot
);
1920 entry
= pmd_mk_savedwrite(entry
);
1922 set_pmd_at(mm
, addr
, pmd
, entry
);
1923 BUG_ON(vma_is_anonymous(vma
) && !preserve_write
&& pmd_write(entry
));
1930 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1932 * Note that if it returns page table lock pointer, this routine returns without
1933 * unlocking page table lock. So callers must unlock it.
1935 spinlock_t
*__pmd_trans_huge_lock(pmd_t
*pmd
, struct vm_area_struct
*vma
)
1938 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1939 if (likely(is_swap_pmd(*pmd
) || pmd_trans_huge(*pmd
) ||
1947 * Returns true if a given pud maps a thp, false otherwise.
1949 * Note that if it returns true, this routine returns without unlocking page
1950 * table lock. So callers must unlock it.
1952 spinlock_t
*__pud_trans_huge_lock(pud_t
*pud
, struct vm_area_struct
*vma
)
1956 ptl
= pud_lock(vma
->vm_mm
, pud
);
1957 if (likely(pud_trans_huge(*pud
) || pud_devmap(*pud
)))
1963 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1964 int zap_huge_pud(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1965 pud_t
*pud
, unsigned long addr
)
1970 ptl
= __pud_trans_huge_lock(pud
, vma
);
1974 * For architectures like ppc64 we look at deposited pgtable
1975 * when calling pudp_huge_get_and_clear. So do the
1976 * pgtable_trans_huge_withdraw after finishing pudp related
1979 orig_pud
= pudp_huge_get_and_clear_full(tlb
->mm
, addr
, pud
,
1981 tlb_remove_pud_tlb_entry(tlb
, pud
, addr
);
1982 if (vma_is_dax(vma
)) {
1984 /* No zero page support yet */
1986 /* No support for anonymous PUD pages yet */
1992 static void __split_huge_pud_locked(struct vm_area_struct
*vma
, pud_t
*pud
,
1993 unsigned long haddr
)
1995 VM_BUG_ON(haddr
& ~HPAGE_PUD_MASK
);
1996 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
1997 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PUD_SIZE
, vma
);
1998 VM_BUG_ON(!pud_trans_huge(*pud
) && !pud_devmap(*pud
));
2000 count_vm_event(THP_SPLIT_PUD
);
2002 pudp_huge_clear_flush_notify(vma
, haddr
, pud
);
2005 void __split_huge_pud(struct vm_area_struct
*vma
, pud_t
*pud
,
2006 unsigned long address
)
2009 struct mm_struct
*mm
= vma
->vm_mm
;
2010 unsigned long haddr
= address
& HPAGE_PUD_MASK
;
2012 mmu_notifier_invalidate_range_start(mm
, haddr
, haddr
+ HPAGE_PUD_SIZE
);
2013 ptl
= pud_lock(mm
, pud
);
2014 if (unlikely(!pud_trans_huge(*pud
) && !pud_devmap(*pud
)))
2016 __split_huge_pud_locked(vma
, pud
, haddr
);
2020 mmu_notifier_invalidate_range_end(mm
, haddr
, haddr
+ HPAGE_PUD_SIZE
);
2022 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2024 static void __split_huge_zero_page_pmd(struct vm_area_struct
*vma
,
2025 unsigned long haddr
, pmd_t
*pmd
)
2027 struct mm_struct
*mm
= vma
->vm_mm
;
2032 /* leave pmd empty until pte is filled */
2033 pmdp_huge_clear_flush_notify(vma
, haddr
, pmd
);
2035 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2036 pmd_populate(mm
, &_pmd
, pgtable
);
2038 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
2040 entry
= pfn_pte(my_zero_pfn(haddr
), vma
->vm_page_prot
);
2041 entry
= pte_mkspecial(entry
);
2042 pte
= pte_offset_map(&_pmd
, haddr
);
2043 VM_BUG_ON(!pte_none(*pte
));
2044 set_pte_at(mm
, haddr
, pte
, entry
);
2047 smp_wmb(); /* make pte visible before pmd */
2048 pmd_populate(mm
, pmd
, pgtable
);
2051 static void __split_huge_pmd_locked(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2052 unsigned long haddr
, bool freeze
)
2054 struct mm_struct
*mm
= vma
->vm_mm
;
2058 bool young
, write
, dirty
, soft_dirty
, pmd_migration
= false;
2062 VM_BUG_ON(haddr
& ~HPAGE_PMD_MASK
);
2063 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
2064 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PMD_SIZE
, vma
);
2065 VM_BUG_ON(!is_pmd_migration_entry(*pmd
) && !pmd_trans_huge(*pmd
)
2066 && !pmd_devmap(*pmd
));
2068 count_vm_event(THP_SPLIT_PMD
);
2070 if (!vma_is_anonymous(vma
)) {
2071 _pmd
= pmdp_huge_clear_flush_notify(vma
, haddr
, pmd
);
2073 * We are going to unmap this huge page. So
2074 * just go ahead and zap it
2076 if (arch_needs_pgtable_deposit())
2077 zap_deposited_table(mm
, pmd
);
2078 if (vma_is_dax(vma
))
2080 page
= pmd_page(_pmd
);
2081 if (!PageDirty(page
) && pmd_dirty(_pmd
))
2082 set_page_dirty(page
);
2083 if (!PageReferenced(page
) && pmd_young(_pmd
))
2084 SetPageReferenced(page
);
2085 page_remove_rmap(page
, true);
2087 add_mm_counter(mm
, MM_FILEPAGES
, -HPAGE_PMD_NR
);
2089 } else if (is_huge_zero_pmd(*pmd
)) {
2090 return __split_huge_zero_page_pmd(vma
, haddr
, pmd
);
2093 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2094 pmd_migration
= is_pmd_migration_entry(*pmd
);
2095 if (pmd_migration
) {
2098 entry
= pmd_to_swp_entry(*pmd
);
2099 page
= pfn_to_page(swp_offset(entry
));
2102 page
= pmd_page(*pmd
);
2103 VM_BUG_ON_PAGE(!page_count(page
), page
);
2104 page_ref_add(page
, HPAGE_PMD_NR
- 1);
2105 write
= pmd_write(*pmd
);
2106 young
= pmd_young(*pmd
);
2107 dirty
= pmd_dirty(*pmd
);
2108 soft_dirty
= pmd_soft_dirty(*pmd
);
2110 pmdp_huge_split_prepare(vma
, haddr
, pmd
);
2111 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2112 pmd_populate(mm
, &_pmd
, pgtable
);
2114 for (i
= 0, addr
= haddr
; i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
) {
2117 * Note that NUMA hinting access restrictions are not
2118 * transferred to avoid any possibility of altering
2119 * permissions across VMAs.
2121 if (freeze
|| pmd_migration
) {
2122 swp_entry_t swp_entry
;
2123 swp_entry
= make_migration_entry(page
+ i
, write
);
2124 entry
= swp_entry_to_pte(swp_entry
);
2126 entry
= pte_swp_mksoft_dirty(entry
);
2128 entry
= mk_pte(page
+ i
, READ_ONCE(vma
->vm_page_prot
));
2129 entry
= maybe_mkwrite(entry
, vma
);
2131 entry
= pte_wrprotect(entry
);
2133 entry
= pte_mkold(entry
);
2135 entry
= pte_mksoft_dirty(entry
);
2138 SetPageDirty(page
+ i
);
2139 pte
= pte_offset_map(&_pmd
, addr
);
2140 BUG_ON(!pte_none(*pte
));
2141 set_pte_at(mm
, addr
, pte
, entry
);
2142 atomic_inc(&page
[i
]._mapcount
);
2147 * Set PG_double_map before dropping compound_mapcount to avoid
2148 * false-negative page_mapped().
2150 if (compound_mapcount(page
) > 1 && !TestSetPageDoubleMap(page
)) {
2151 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2152 atomic_inc(&page
[i
]._mapcount
);
2155 if (atomic_add_negative(-1, compound_mapcount_ptr(page
))) {
2156 /* Last compound_mapcount is gone. */
2157 __dec_node_page_state(page
, NR_ANON_THPS
);
2158 if (TestClearPageDoubleMap(page
)) {
2159 /* No need in mapcount reference anymore */
2160 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2161 atomic_dec(&page
[i
]._mapcount
);
2165 smp_wmb(); /* make pte visible before pmd */
2167 * Up to this point the pmd is present and huge and userland has the
2168 * whole access to the hugepage during the split (which happens in
2169 * place). If we overwrite the pmd with the not-huge version pointing
2170 * to the pte here (which of course we could if all CPUs were bug
2171 * free), userland could trigger a small page size TLB miss on the
2172 * small sized TLB while the hugepage TLB entry is still established in
2173 * the huge TLB. Some CPU doesn't like that.
2174 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
2175 * 383 on page 93. Intel should be safe but is also warns that it's
2176 * only safe if the permission and cache attributes of the two entries
2177 * loaded in the two TLB is identical (which should be the case here).
2178 * But it is generally safer to never allow small and huge TLB entries
2179 * for the same virtual address to be loaded simultaneously. So instead
2180 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2181 * current pmd notpresent (atomically because here the pmd_trans_huge
2182 * and pmd_trans_splitting must remain set at all times on the pmd
2183 * until the split is complete for this pmd), then we flush the SMP TLB
2184 * and finally we write the non-huge version of the pmd entry with
2187 pmdp_invalidate(vma
, haddr
, pmd
);
2188 pmd_populate(mm
, pmd
, pgtable
);
2191 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2192 page_remove_rmap(page
+ i
, false);
2198 void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2199 unsigned long address
, bool freeze
, struct page
*page
)
2202 struct mm_struct
*mm
= vma
->vm_mm
;
2203 unsigned long haddr
= address
& HPAGE_PMD_MASK
;
2205 mmu_notifier_invalidate_range_start(mm
, haddr
, haddr
+ HPAGE_PMD_SIZE
);
2206 ptl
= pmd_lock(mm
, pmd
);
2209 * If caller asks to setup a migration entries, we need a page to check
2210 * pmd against. Otherwise we can end up replacing wrong page.
2212 VM_BUG_ON(freeze
&& !page
);
2213 if (page
&& page
!= pmd_page(*pmd
))
2216 if (pmd_trans_huge(*pmd
)) {
2217 page
= pmd_page(*pmd
);
2218 if (PageMlocked(page
))
2219 clear_page_mlock(page
);
2220 } else if (!(pmd_devmap(*pmd
) || is_pmd_migration_entry(*pmd
)))
2222 __split_huge_pmd_locked(vma
, pmd
, haddr
, freeze
);
2225 mmu_notifier_invalidate_range_end(mm
, haddr
, haddr
+ HPAGE_PMD_SIZE
);
2228 void split_huge_pmd_address(struct vm_area_struct
*vma
, unsigned long address
,
2229 bool freeze
, struct page
*page
)
2236 pgd
= pgd_offset(vma
->vm_mm
, address
);
2237 if (!pgd_present(*pgd
))
2240 p4d
= p4d_offset(pgd
, address
);
2241 if (!p4d_present(*p4d
))
2244 pud
= pud_offset(p4d
, address
);
2245 if (!pud_present(*pud
))
2248 pmd
= pmd_offset(pud
, address
);
2250 __split_huge_pmd(vma
, pmd
, address
, freeze
, page
);
2253 void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
2254 unsigned long start
,
2259 * If the new start address isn't hpage aligned and it could
2260 * previously contain an hugepage: check if we need to split
2263 if (start
& ~HPAGE_PMD_MASK
&&
2264 (start
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2265 (start
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2266 split_huge_pmd_address(vma
, start
, false, NULL
);
2269 * If the new end address isn't hpage aligned and it could
2270 * previously contain an hugepage: check if we need to split
2273 if (end
& ~HPAGE_PMD_MASK
&&
2274 (end
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2275 (end
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2276 split_huge_pmd_address(vma
, end
, false, NULL
);
2279 * If we're also updating the vma->vm_next->vm_start, if the new
2280 * vm_next->vm_start isn't page aligned and it could previously
2281 * contain an hugepage: check if we need to split an huge pmd.
2283 if (adjust_next
> 0) {
2284 struct vm_area_struct
*next
= vma
->vm_next
;
2285 unsigned long nstart
= next
->vm_start
;
2286 nstart
+= adjust_next
<< PAGE_SHIFT
;
2287 if (nstart
& ~HPAGE_PMD_MASK
&&
2288 (nstart
& HPAGE_PMD_MASK
) >= next
->vm_start
&&
2289 (nstart
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= next
->vm_end
)
2290 split_huge_pmd_address(next
, nstart
, false, NULL
);
2294 static void unmap_page(struct page
*page
)
2296 enum ttu_flags ttu_flags
= TTU_IGNORE_MLOCK
| TTU_IGNORE_ACCESS
|
2297 TTU_RMAP_LOCKED
| TTU_SPLIT_HUGE_PMD
;
2300 VM_BUG_ON_PAGE(!PageHead(page
), page
);
2303 ttu_flags
|= TTU_SPLIT_FREEZE
;
2305 unmap_success
= try_to_unmap(page
, ttu_flags
);
2306 VM_BUG_ON_PAGE(!unmap_success
, page
);
2309 static void remap_page(struct page
*page
)
2312 if (PageTransHuge(page
)) {
2313 remove_migration_ptes(page
, page
, true);
2315 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2316 remove_migration_ptes(page
+ i
, page
+ i
, true);
2320 static void __split_huge_page_tail(struct page
*head
, int tail
,
2321 struct lruvec
*lruvec
, struct list_head
*list
)
2323 struct page
*page_tail
= head
+ tail
;
2325 VM_BUG_ON_PAGE(atomic_read(&page_tail
->_mapcount
) != -1, page_tail
);
2328 * Clone page flags before unfreezing refcount.
2330 * After successful get_page_unless_zero() might follow flags change,
2331 * for exmaple lock_page() which set PG_waiters.
2333 page_tail
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
2334 page_tail
->flags
|= (head
->flags
&
2335 ((1L << PG_referenced
) |
2336 (1L << PG_swapbacked
) |
2337 (1L << PG_swapcache
) |
2338 (1L << PG_mlocked
) |
2339 (1L << PG_uptodate
) |
2342 (1L << PG_unevictable
) |
2345 /* ->mapping in first tail page is compound_mapcount */
2346 VM_BUG_ON_PAGE(tail
> 2 && page_tail
->mapping
!= TAIL_MAPPING
,
2348 page_tail
->mapping
= head
->mapping
;
2349 page_tail
->index
= head
->index
+ tail
;
2351 /* Page flags must be visible before we make the page non-compound. */
2355 * Clear PageTail before unfreezing page refcount.
2357 * After successful get_page_unless_zero() might follow put_page()
2358 * which needs correct compound_head().
2360 clear_compound_head(page_tail
);
2362 /* Finally unfreeze refcount. Additional reference from page cache. */
2363 page_ref_unfreeze(page_tail
, 1 + (!PageAnon(head
) ||
2364 PageSwapCache(head
)));
2366 if (page_is_young(head
))
2367 set_page_young(page_tail
);
2368 if (page_is_idle(head
))
2369 set_page_idle(page_tail
);
2371 page_cpupid_xchg_last(page_tail
, page_cpupid_last(head
));
2372 lru_add_page_tail(head
, page_tail
, lruvec
, list
);
2375 static void __split_huge_page(struct page
*page
, struct list_head
*list
,
2376 pgoff_t end
, unsigned long flags
)
2378 struct page
*head
= compound_head(page
);
2379 struct zone
*zone
= page_zone(head
);
2380 struct lruvec
*lruvec
;
2383 lruvec
= mem_cgroup_page_lruvec(head
, zone
->zone_pgdat
);
2385 /* complete memcg works before add pages to LRU */
2386 mem_cgroup_split_huge_fixup(head
);
2388 for (i
= HPAGE_PMD_NR
- 1; i
>= 1; i
--) {
2389 __split_huge_page_tail(head
, i
, lruvec
, list
);
2390 /* Some pages can be beyond i_size: drop them from page cache */
2391 if (head
[i
].index
>= end
) {
2392 ClearPageDirty(head
+ i
);
2393 __delete_from_page_cache(head
+ i
, NULL
);
2394 if (IS_ENABLED(CONFIG_SHMEM
) && PageSwapBacked(head
))
2395 shmem_uncharge(head
->mapping
->host
, 1);
2400 ClearPageCompound(head
);
2402 split_page_owner(head
, HPAGE_PMD_ORDER
);
2404 /* See comment in __split_huge_page_tail() */
2405 if (PageAnon(head
)) {
2406 /* Additional pin to radix tree of swap cache */
2407 if (PageSwapCache(head
))
2408 page_ref_add(head
, 2);
2412 /* Additional pin to radix tree */
2413 page_ref_add(head
, 2);
2414 spin_unlock(&head
->mapping
->tree_lock
);
2417 spin_unlock_irqrestore(zone_lru_lock(page_zone(head
)), flags
);
2421 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2422 struct page
*subpage
= head
+ i
;
2423 if (subpage
== page
)
2425 unlock_page(subpage
);
2428 * Subpages may be freed if there wasn't any mapping
2429 * like if add_to_swap() is running on a lru page that
2430 * had its mapping zapped. And freeing these pages
2431 * requires taking the lru_lock so we do the put_page
2432 * of the tail pages after the split is complete.
2438 int total_mapcount(struct page
*page
)
2440 int i
, compound
, ret
;
2442 VM_BUG_ON_PAGE(PageTail(page
), page
);
2444 if (likely(!PageCompound(page
)))
2445 return atomic_read(&page
->_mapcount
) + 1;
2447 compound
= compound_mapcount(page
);
2451 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2452 ret
+= atomic_read(&page
[i
]._mapcount
) + 1;
2453 /* File pages has compound_mapcount included in _mapcount */
2454 if (!PageAnon(page
))
2455 return ret
- compound
* HPAGE_PMD_NR
;
2456 if (PageDoubleMap(page
))
2457 ret
-= HPAGE_PMD_NR
;
2462 * This calculates accurately how many mappings a transparent hugepage
2463 * has (unlike page_mapcount() which isn't fully accurate). This full
2464 * accuracy is primarily needed to know if copy-on-write faults can
2465 * reuse the page and change the mapping to read-write instead of
2466 * copying them. At the same time this returns the total_mapcount too.
2468 * The function returns the highest mapcount any one of the subpages
2469 * has. If the return value is one, even if different processes are
2470 * mapping different subpages of the transparent hugepage, they can
2471 * all reuse it, because each process is reusing a different subpage.
2473 * The total_mapcount is instead counting all virtual mappings of the
2474 * subpages. If the total_mapcount is equal to "one", it tells the
2475 * caller all mappings belong to the same "mm" and in turn the
2476 * anon_vma of the transparent hugepage can become the vma->anon_vma
2477 * local one as no other process may be mapping any of the subpages.
2479 * It would be more accurate to replace page_mapcount() with
2480 * page_trans_huge_mapcount(), however we only use
2481 * page_trans_huge_mapcount() in the copy-on-write faults where we
2482 * need full accuracy to avoid breaking page pinning, because
2483 * page_trans_huge_mapcount() is slower than page_mapcount().
2485 int page_trans_huge_mapcount(struct page
*page
, int *total_mapcount
)
2487 int i
, ret
, _total_mapcount
, mapcount
;
2489 /* hugetlbfs shouldn't call it */
2490 VM_BUG_ON_PAGE(PageHuge(page
), page
);
2492 if (likely(!PageTransCompound(page
))) {
2493 mapcount
= atomic_read(&page
->_mapcount
) + 1;
2495 *total_mapcount
= mapcount
;
2499 page
= compound_head(page
);
2501 _total_mapcount
= ret
= 0;
2502 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2503 mapcount
= atomic_read(&page
[i
]._mapcount
) + 1;
2504 ret
= max(ret
, mapcount
);
2505 _total_mapcount
+= mapcount
;
2507 if (PageDoubleMap(page
)) {
2509 _total_mapcount
-= HPAGE_PMD_NR
;
2511 mapcount
= compound_mapcount(page
);
2513 _total_mapcount
+= mapcount
;
2515 *total_mapcount
= _total_mapcount
;
2519 /* Racy check whether the huge page can be split */
2520 bool can_split_huge_page(struct page
*page
, int *pextra_pins
)
2524 /* Additional pins from radix tree */
2526 extra_pins
= PageSwapCache(page
) ? HPAGE_PMD_NR
: 0;
2528 extra_pins
= HPAGE_PMD_NR
;
2530 *pextra_pins
= extra_pins
;
2531 return total_mapcount(page
) == page_count(page
) - extra_pins
- 1;
2535 * This function splits huge page into normal pages. @page can point to any
2536 * subpage of huge page to split. Split doesn't change the position of @page.
2538 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2539 * The huge page must be locked.
2541 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2543 * Both head page and tail pages will inherit mapping, flags, and so on from
2546 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2547 * they are not mapped.
2549 * Returns 0 if the hugepage is split successfully.
2550 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2553 int split_huge_page_to_list(struct page
*page
, struct list_head
*list
)
2555 struct page
*head
= compound_head(page
);
2556 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(head
));
2557 struct anon_vma
*anon_vma
= NULL
;
2558 struct address_space
*mapping
= NULL
;
2559 int count
, mapcount
, extra_pins
, ret
;
2561 unsigned long flags
;
2564 VM_BUG_ON_PAGE(is_huge_zero_page(page
), page
);
2565 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
2566 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
2568 if (PageWriteback(page
))
2571 if (PageAnon(head
)) {
2573 * The caller does not necessarily hold an mmap_sem that would
2574 * prevent the anon_vma disappearing so we first we take a
2575 * reference to it and then lock the anon_vma for write. This
2576 * is similar to page_lock_anon_vma_read except the write lock
2577 * is taken to serialise against parallel split or collapse
2580 anon_vma
= page_get_anon_vma(head
);
2587 anon_vma_lock_write(anon_vma
);
2589 mapping
= head
->mapping
;
2598 i_mmap_lock_read(mapping
);
2601 *__split_huge_page() may need to trim off pages beyond EOF:
2602 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2603 * which cannot be nested inside the page tree lock. So note
2604 * end now: i_size itself may be changed at any moment, but
2605 * head page lock is good enough to serialize the trimming.
2607 end
= DIV_ROUND_UP(i_size_read(mapping
->host
), PAGE_SIZE
);
2611 * Racy check if we can split the page, before unmap_page() will
2614 if (!can_split_huge_page(head
, &extra_pins
)) {
2619 mlocked
= PageMlocked(page
);
2621 VM_BUG_ON_PAGE(compound_mapcount(head
), head
);
2623 /* Make sure the page is not on per-CPU pagevec as it takes pin */
2627 /* prevent PageLRU to go away from under us, and freeze lru stats */
2628 spin_lock_irqsave(zone_lru_lock(page_zone(head
)), flags
);
2633 spin_lock(&mapping
->tree_lock
);
2634 pslot
= radix_tree_lookup_slot(&mapping
->page_tree
,
2637 * Check if the head page is present in radix tree.
2638 * We assume all tail are present too, if head is there.
2640 if (radix_tree_deref_slot_protected(pslot
,
2641 &mapping
->tree_lock
) != head
)
2645 /* Prevent deferred_split_scan() touching ->_refcount */
2646 spin_lock(&pgdata
->split_queue_lock
);
2647 count
= page_count(head
);
2648 mapcount
= total_mapcount(head
);
2649 if (!mapcount
&& page_ref_freeze(head
, 1 + extra_pins
)) {
2650 if (!list_empty(page_deferred_list(head
))) {
2651 pgdata
->split_queue_len
--;
2652 list_del(page_deferred_list(head
));
2655 __dec_node_page_state(page
, NR_SHMEM_THPS
);
2656 spin_unlock(&pgdata
->split_queue_lock
);
2657 __split_huge_page(page
, list
, end
, flags
);
2658 if (PageSwapCache(head
)) {
2659 swp_entry_t entry
= { .val
= page_private(head
) };
2661 ret
= split_swap_cluster(entry
);
2665 if (IS_ENABLED(CONFIG_DEBUG_VM
) && mapcount
) {
2666 pr_alert("total_mapcount: %u, page_count(): %u\n",
2669 dump_page(head
, NULL
);
2670 dump_page(page
, "total_mapcount(head) > 0");
2673 spin_unlock(&pgdata
->split_queue_lock
);
2675 spin_unlock(&mapping
->tree_lock
);
2676 spin_unlock_irqrestore(zone_lru_lock(page_zone(head
)), flags
);
2683 anon_vma_unlock_write(anon_vma
);
2684 put_anon_vma(anon_vma
);
2687 i_mmap_unlock_read(mapping
);
2689 count_vm_event(!ret
? THP_SPLIT_PAGE
: THP_SPLIT_PAGE_FAILED
);
2693 void free_transhuge_page(struct page
*page
)
2695 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(page
));
2696 unsigned long flags
;
2698 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2699 if (!list_empty(page_deferred_list(page
))) {
2700 pgdata
->split_queue_len
--;
2701 list_del(page_deferred_list(page
));
2703 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2704 free_compound_page(page
);
2707 void deferred_split_huge_page(struct page
*page
)
2709 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(page
));
2710 unsigned long flags
;
2712 VM_BUG_ON_PAGE(!PageTransHuge(page
), page
);
2714 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2715 if (list_empty(page_deferred_list(page
))) {
2716 count_vm_event(THP_DEFERRED_SPLIT_PAGE
);
2717 list_add_tail(page_deferred_list(page
), &pgdata
->split_queue
);
2718 pgdata
->split_queue_len
++;
2720 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2723 static unsigned long deferred_split_count(struct shrinker
*shrink
,
2724 struct shrink_control
*sc
)
2726 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2727 return ACCESS_ONCE(pgdata
->split_queue_len
);
2730 static unsigned long deferred_split_scan(struct shrinker
*shrink
,
2731 struct shrink_control
*sc
)
2733 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2734 unsigned long flags
;
2735 LIST_HEAD(list
), *pos
, *next
;
2739 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2740 /* Take pin on all head pages to avoid freeing them under us */
2741 list_for_each_safe(pos
, next
, &pgdata
->split_queue
) {
2742 page
= list_entry((void *)pos
, struct page
, mapping
);
2743 page
= compound_head(page
);
2744 if (get_page_unless_zero(page
)) {
2745 list_move(page_deferred_list(page
), &list
);
2747 /* We lost race with put_compound_page() */
2748 list_del_init(page_deferred_list(page
));
2749 pgdata
->split_queue_len
--;
2751 if (!--sc
->nr_to_scan
)
2754 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2756 list_for_each_safe(pos
, next
, &list
) {
2757 page
= list_entry((void *)pos
, struct page
, mapping
);
2758 if (!trylock_page(page
))
2760 /* split_huge_page() removes page from list on success */
2761 if (!split_huge_page(page
))
2768 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2769 list_splice_tail(&list
, &pgdata
->split_queue
);
2770 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2773 * Stop shrinker if we didn't split any page, but the queue is empty.
2774 * This can happen if pages were freed under us.
2776 if (!split
&& list_empty(&pgdata
->split_queue
))
2781 static struct shrinker deferred_split_shrinker
= {
2782 .count_objects
= deferred_split_count
,
2783 .scan_objects
= deferred_split_scan
,
2784 .seeks
= DEFAULT_SEEKS
,
2785 .flags
= SHRINKER_NUMA_AWARE
,
2788 #ifdef CONFIG_DEBUG_FS
2789 static int split_huge_pages_set(void *data
, u64 val
)
2793 unsigned long pfn
, max_zone_pfn
;
2794 unsigned long total
= 0, split
= 0;
2799 for_each_populated_zone(zone
) {
2800 max_zone_pfn
= zone_end_pfn(zone
);
2801 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++) {
2802 if (!pfn_valid(pfn
))
2805 page
= pfn_to_page(pfn
);
2806 if (!get_page_unless_zero(page
))
2809 if (zone
!= page_zone(page
))
2812 if (!PageHead(page
) || PageHuge(page
) || !PageLRU(page
))
2817 if (!split_huge_page(page
))
2825 pr_info("%lu of %lu THP split\n", split
, total
);
2829 DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops
, NULL
, split_huge_pages_set
,
2832 static int __init
split_huge_pages_debugfs(void)
2836 ret
= debugfs_create_file("split_huge_pages", 0200, NULL
, NULL
,
2837 &split_huge_pages_fops
);
2839 pr_warn("Failed to create split_huge_pages in debugfs");
2842 late_initcall(split_huge_pages_debugfs
);
2845 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2846 void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
2849 struct vm_area_struct
*vma
= pvmw
->vma
;
2850 struct mm_struct
*mm
= vma
->vm_mm
;
2851 unsigned long address
= pvmw
->address
;
2856 if (!(pvmw
->pmd
&& !pvmw
->pte
))
2859 flush_cache_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
2860 pmdval
= *pvmw
->pmd
;
2861 pmdp_invalidate(vma
, address
, pvmw
->pmd
);
2862 if (pmd_dirty(pmdval
))
2863 set_page_dirty(page
);
2864 entry
= make_migration_entry(page
, pmd_write(pmdval
));
2865 pmdswp
= swp_entry_to_pmd(entry
);
2866 if (pmd_soft_dirty(pmdval
))
2867 pmdswp
= pmd_swp_mksoft_dirty(pmdswp
);
2868 set_pmd_at(mm
, address
, pvmw
->pmd
, pmdswp
);
2869 page_remove_rmap(page
, true);
2873 void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
, struct page
*new)
2875 struct vm_area_struct
*vma
= pvmw
->vma
;
2876 struct mm_struct
*mm
= vma
->vm_mm
;
2877 unsigned long address
= pvmw
->address
;
2878 unsigned long mmun_start
= address
& HPAGE_PMD_MASK
;
2882 if (!(pvmw
->pmd
&& !pvmw
->pte
))
2885 entry
= pmd_to_swp_entry(*pvmw
->pmd
);
2887 pmde
= pmd_mkold(mk_huge_pmd(new, vma
->vm_page_prot
));
2888 if (pmd_swp_soft_dirty(*pvmw
->pmd
))
2889 pmde
= pmd_mksoft_dirty(pmde
);
2890 if (is_write_migration_entry(entry
))
2891 pmde
= maybe_pmd_mkwrite(pmde
, vma
);
2893 flush_cache_range(vma
, mmun_start
, mmun_start
+ HPAGE_PMD_SIZE
);
2894 page_add_anon_rmap(new, vma
, mmun_start
, true);
2895 set_pmd_at(mm
, mmun_start
, pvmw
->pmd
, pmde
);
2896 if ((vma
->vm_flags
& VM_LOCKED
) && !PageDoubleMap(new))
2897 mlock_vma_page(new);
2898 update_mmu_cache_pmd(vma
, address
, pvmw
->pmd
);