1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
6 #include <linux/list.h>
7 #include <linux/init.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35 #include <linux/delayacct.h>
36 #include <linux/memory.h>
37 #include <linux/mm_inline.h>
40 #include <asm/pgalloc.h>
44 #include <linux/hugetlb.h>
45 #include <linux/hugetlb_cgroup.h>
46 #include <linux/node.h>
47 #include <linux/page_owner.h>
49 #include "hugetlb_vmemmap.h"
51 int hugetlb_max_hstate __read_mostly
;
52 unsigned int default_hstate_idx
;
53 struct hstate hstates
[HUGE_MAX_HSTATE
];
56 static struct cma
*hugetlb_cma
[MAX_NUMNODES
];
57 static unsigned long hugetlb_cma_size_in_node
[MAX_NUMNODES
] __initdata
;
58 static bool hugetlb_cma_folio(struct folio
*folio
, unsigned int order
)
60 return cma_pages_valid(hugetlb_cma
[folio_nid(folio
)], &folio
->page
,
64 static bool hugetlb_cma_folio(struct folio
*folio
, unsigned int order
)
69 static unsigned long hugetlb_cma_size __initdata
;
71 __initdata
LIST_HEAD(huge_boot_pages
);
73 /* for command line parsing */
74 static struct hstate
* __initdata parsed_hstate
;
75 static unsigned long __initdata default_hstate_max_huge_pages
;
76 static bool __initdata parsed_valid_hugepagesz
= true;
77 static bool __initdata parsed_default_hugepagesz
;
78 static unsigned int default_hugepages_in_node
[MAX_NUMNODES
] __initdata
;
81 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
82 * free_huge_pages, and surplus_huge_pages.
84 DEFINE_SPINLOCK(hugetlb_lock
);
87 * Serializes faults on the same logical page. This is used to
88 * prevent spurious OOMs when the hugepage pool is fully utilized.
90 static int num_fault_mutexes
;
91 struct mutex
*hugetlb_fault_mutex_table ____cacheline_aligned_in_smp
;
93 /* Forward declaration */
94 static int hugetlb_acct_memory(struct hstate
*h
, long delta
);
95 static void hugetlb_vma_lock_free(struct vm_area_struct
*vma
);
96 static void hugetlb_vma_lock_alloc(struct vm_area_struct
*vma
);
97 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct
*vma
);
98 static void hugetlb_unshare_pmds(struct vm_area_struct
*vma
,
99 unsigned long start
, unsigned long end
);
100 static struct resv_map
*vma_resv_map(struct vm_area_struct
*vma
);
102 static inline bool subpool_is_free(struct hugepage_subpool
*spool
)
106 if (spool
->max_hpages
!= -1)
107 return spool
->used_hpages
== 0;
108 if (spool
->min_hpages
!= -1)
109 return spool
->rsv_hpages
== spool
->min_hpages
;
114 static inline void unlock_or_release_subpool(struct hugepage_subpool
*spool
,
115 unsigned long irq_flags
)
117 spin_unlock_irqrestore(&spool
->lock
, irq_flags
);
119 /* If no pages are used, and no other handles to the subpool
120 * remain, give up any reservations based on minimum size and
121 * free the subpool */
122 if (subpool_is_free(spool
)) {
123 if (spool
->min_hpages
!= -1)
124 hugetlb_acct_memory(spool
->hstate
,
130 struct hugepage_subpool
*hugepage_new_subpool(struct hstate
*h
, long max_hpages
,
133 struct hugepage_subpool
*spool
;
135 spool
= kzalloc(sizeof(*spool
), GFP_KERNEL
);
139 spin_lock_init(&spool
->lock
);
141 spool
->max_hpages
= max_hpages
;
143 spool
->min_hpages
= min_hpages
;
145 if (min_hpages
!= -1 && hugetlb_acct_memory(h
, min_hpages
)) {
149 spool
->rsv_hpages
= min_hpages
;
154 void hugepage_put_subpool(struct hugepage_subpool
*spool
)
158 spin_lock_irqsave(&spool
->lock
, flags
);
159 BUG_ON(!spool
->count
);
161 unlock_or_release_subpool(spool
, flags
);
165 * Subpool accounting for allocating and reserving pages.
166 * Return -ENOMEM if there are not enough resources to satisfy the
167 * request. Otherwise, return the number of pages by which the
168 * global pools must be adjusted (upward). The returned value may
169 * only be different than the passed value (delta) in the case where
170 * a subpool minimum size must be maintained.
172 static long hugepage_subpool_get_pages(struct hugepage_subpool
*spool
,
180 spin_lock_irq(&spool
->lock
);
182 if (spool
->max_hpages
!= -1) { /* maximum size accounting */
183 if ((spool
->used_hpages
+ delta
) <= spool
->max_hpages
)
184 spool
->used_hpages
+= delta
;
191 /* minimum size accounting */
192 if (spool
->min_hpages
!= -1 && spool
->rsv_hpages
) {
193 if (delta
> spool
->rsv_hpages
) {
195 * Asking for more reserves than those already taken on
196 * behalf of subpool. Return difference.
198 ret
= delta
- spool
->rsv_hpages
;
199 spool
->rsv_hpages
= 0;
201 ret
= 0; /* reserves already accounted for */
202 spool
->rsv_hpages
-= delta
;
207 spin_unlock_irq(&spool
->lock
);
212 * Subpool accounting for freeing and unreserving pages.
213 * Return the number of global page reservations that must be dropped.
214 * The return value may only be different than the passed value (delta)
215 * in the case where a subpool minimum size must be maintained.
217 static long hugepage_subpool_put_pages(struct hugepage_subpool
*spool
,
226 spin_lock_irqsave(&spool
->lock
, flags
);
228 if (spool
->max_hpages
!= -1) /* maximum size accounting */
229 spool
->used_hpages
-= delta
;
231 /* minimum size accounting */
232 if (spool
->min_hpages
!= -1 && spool
->used_hpages
< spool
->min_hpages
) {
233 if (spool
->rsv_hpages
+ delta
<= spool
->min_hpages
)
236 ret
= spool
->rsv_hpages
+ delta
- spool
->min_hpages
;
238 spool
->rsv_hpages
+= delta
;
239 if (spool
->rsv_hpages
> spool
->min_hpages
)
240 spool
->rsv_hpages
= spool
->min_hpages
;
244 * If hugetlbfs_put_super couldn't free spool due to an outstanding
245 * quota reference, free it now.
247 unlock_or_release_subpool(spool
, flags
);
252 static inline struct hugepage_subpool
*subpool_inode(struct inode
*inode
)
254 return HUGETLBFS_SB(inode
->i_sb
)->spool
;
257 static inline struct hugepage_subpool
*subpool_vma(struct vm_area_struct
*vma
)
259 return subpool_inode(file_inode(vma
->vm_file
));
263 * hugetlb vma_lock helper routines
265 void hugetlb_vma_lock_read(struct vm_area_struct
*vma
)
267 if (__vma_shareable_lock(vma
)) {
268 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
270 down_read(&vma_lock
->rw_sema
);
271 } else if (__vma_private_lock(vma
)) {
272 struct resv_map
*resv_map
= vma_resv_map(vma
);
274 down_read(&resv_map
->rw_sema
);
278 void hugetlb_vma_unlock_read(struct vm_area_struct
*vma
)
280 if (__vma_shareable_lock(vma
)) {
281 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
283 up_read(&vma_lock
->rw_sema
);
284 } else if (__vma_private_lock(vma
)) {
285 struct resv_map
*resv_map
= vma_resv_map(vma
);
287 up_read(&resv_map
->rw_sema
);
291 void hugetlb_vma_lock_write(struct vm_area_struct
*vma
)
293 if (__vma_shareable_lock(vma
)) {
294 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
296 down_write(&vma_lock
->rw_sema
);
297 } else if (__vma_private_lock(vma
)) {
298 struct resv_map
*resv_map
= vma_resv_map(vma
);
300 down_write(&resv_map
->rw_sema
);
304 void hugetlb_vma_unlock_write(struct vm_area_struct
*vma
)
306 if (__vma_shareable_lock(vma
)) {
307 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
309 up_write(&vma_lock
->rw_sema
);
310 } else if (__vma_private_lock(vma
)) {
311 struct resv_map
*resv_map
= vma_resv_map(vma
);
313 up_write(&resv_map
->rw_sema
);
317 int hugetlb_vma_trylock_write(struct vm_area_struct
*vma
)
320 if (__vma_shareable_lock(vma
)) {
321 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
323 return down_write_trylock(&vma_lock
->rw_sema
);
324 } else if (__vma_private_lock(vma
)) {
325 struct resv_map
*resv_map
= vma_resv_map(vma
);
327 return down_write_trylock(&resv_map
->rw_sema
);
333 void hugetlb_vma_assert_locked(struct vm_area_struct
*vma
)
335 if (__vma_shareable_lock(vma
)) {
336 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
338 lockdep_assert_held(&vma_lock
->rw_sema
);
339 } else if (__vma_private_lock(vma
)) {
340 struct resv_map
*resv_map
= vma_resv_map(vma
);
342 lockdep_assert_held(&resv_map
->rw_sema
);
346 void hugetlb_vma_lock_release(struct kref
*kref
)
348 struct hugetlb_vma_lock
*vma_lock
= container_of(kref
,
349 struct hugetlb_vma_lock
, refs
);
354 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock
*vma_lock
)
356 struct vm_area_struct
*vma
= vma_lock
->vma
;
359 * vma_lock structure may or not be released as a result of put,
360 * it certainly will no longer be attached to vma so clear pointer.
361 * Semaphore synchronizes access to vma_lock->vma field.
363 vma_lock
->vma
= NULL
;
364 vma
->vm_private_data
= NULL
;
365 up_write(&vma_lock
->rw_sema
);
366 kref_put(&vma_lock
->refs
, hugetlb_vma_lock_release
);
369 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct
*vma
)
371 if (__vma_shareable_lock(vma
)) {
372 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
374 __hugetlb_vma_unlock_write_put(vma_lock
);
375 } else if (__vma_private_lock(vma
)) {
376 struct resv_map
*resv_map
= vma_resv_map(vma
);
378 /* no free for anon vmas, but still need to unlock */
379 up_write(&resv_map
->rw_sema
);
383 static void hugetlb_vma_lock_free(struct vm_area_struct
*vma
)
386 * Only present in sharable vmas.
388 if (!vma
|| !__vma_shareable_lock(vma
))
391 if (vma
->vm_private_data
) {
392 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
394 down_write(&vma_lock
->rw_sema
);
395 __hugetlb_vma_unlock_write_put(vma_lock
);
399 static void hugetlb_vma_lock_alloc(struct vm_area_struct
*vma
)
401 struct hugetlb_vma_lock
*vma_lock
;
403 /* Only establish in (flags) sharable vmas */
404 if (!vma
|| !(vma
->vm_flags
& VM_MAYSHARE
))
407 /* Should never get here with non-NULL vm_private_data */
408 if (vma
->vm_private_data
)
411 vma_lock
= kmalloc(sizeof(*vma_lock
), GFP_KERNEL
);
414 * If we can not allocate structure, then vma can not
415 * participate in pmd sharing. This is only a possible
416 * performance enhancement and memory saving issue.
417 * However, the lock is also used to synchronize page
418 * faults with truncation. If the lock is not present,
419 * unlikely races could leave pages in a file past i_size
420 * until the file is removed. Warn in the unlikely case of
421 * allocation failure.
423 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
427 kref_init(&vma_lock
->refs
);
428 init_rwsem(&vma_lock
->rw_sema
);
430 vma
->vm_private_data
= vma_lock
;
433 /* Helper that removes a struct file_region from the resv_map cache and returns
436 static struct file_region
*
437 get_file_region_entry_from_cache(struct resv_map
*resv
, long from
, long to
)
439 struct file_region
*nrg
;
441 VM_BUG_ON(resv
->region_cache_count
<= 0);
443 resv
->region_cache_count
--;
444 nrg
= list_first_entry(&resv
->region_cache
, struct file_region
, link
);
445 list_del(&nrg
->link
);
453 static void copy_hugetlb_cgroup_uncharge_info(struct file_region
*nrg
,
454 struct file_region
*rg
)
456 #ifdef CONFIG_CGROUP_HUGETLB
457 nrg
->reservation_counter
= rg
->reservation_counter
;
464 /* Helper that records hugetlb_cgroup uncharge info. */
465 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup
*h_cg
,
467 struct resv_map
*resv
,
468 struct file_region
*nrg
)
470 #ifdef CONFIG_CGROUP_HUGETLB
472 nrg
->reservation_counter
=
473 &h_cg
->rsvd_hugepage
[hstate_index(h
)];
474 nrg
->css
= &h_cg
->css
;
476 * The caller will hold exactly one h_cg->css reference for the
477 * whole contiguous reservation region. But this area might be
478 * scattered when there are already some file_regions reside in
479 * it. As a result, many file_regions may share only one css
480 * reference. In order to ensure that one file_region must hold
481 * exactly one h_cg->css reference, we should do css_get for
482 * each file_region and leave the reference held by caller
486 if (!resv
->pages_per_hpage
)
487 resv
->pages_per_hpage
= pages_per_huge_page(h
);
488 /* pages_per_hpage should be the same for all entries in
491 VM_BUG_ON(resv
->pages_per_hpage
!= pages_per_huge_page(h
));
493 nrg
->reservation_counter
= NULL
;
499 static void put_uncharge_info(struct file_region
*rg
)
501 #ifdef CONFIG_CGROUP_HUGETLB
507 static bool has_same_uncharge_info(struct file_region
*rg
,
508 struct file_region
*org
)
510 #ifdef CONFIG_CGROUP_HUGETLB
511 return rg
->reservation_counter
== org
->reservation_counter
&&
519 static void coalesce_file_region(struct resv_map
*resv
, struct file_region
*rg
)
521 struct file_region
*nrg
, *prg
;
523 prg
= list_prev_entry(rg
, link
);
524 if (&prg
->link
!= &resv
->regions
&& prg
->to
== rg
->from
&&
525 has_same_uncharge_info(prg
, rg
)) {
529 put_uncharge_info(rg
);
535 nrg
= list_next_entry(rg
, link
);
536 if (&nrg
->link
!= &resv
->regions
&& nrg
->from
== rg
->to
&&
537 has_same_uncharge_info(nrg
, rg
)) {
538 nrg
->from
= rg
->from
;
541 put_uncharge_info(rg
);
547 hugetlb_resv_map_add(struct resv_map
*map
, struct list_head
*rg
, long from
,
548 long to
, struct hstate
*h
, struct hugetlb_cgroup
*cg
,
549 long *regions_needed
)
551 struct file_region
*nrg
;
553 if (!regions_needed
) {
554 nrg
= get_file_region_entry_from_cache(map
, from
, to
);
555 record_hugetlb_cgroup_uncharge_info(cg
, h
, map
, nrg
);
556 list_add(&nrg
->link
, rg
);
557 coalesce_file_region(map
, nrg
);
559 *regions_needed
+= 1;
565 * Must be called with resv->lock held.
567 * Calling this with regions_needed != NULL will count the number of pages
568 * to be added but will not modify the linked list. And regions_needed will
569 * indicate the number of file_regions needed in the cache to carry out to add
570 * the regions for this range.
572 static long add_reservation_in_range(struct resv_map
*resv
, long f
, long t
,
573 struct hugetlb_cgroup
*h_cg
,
574 struct hstate
*h
, long *regions_needed
)
577 struct list_head
*head
= &resv
->regions
;
578 long last_accounted_offset
= f
;
579 struct file_region
*iter
, *trg
= NULL
;
580 struct list_head
*rg
= NULL
;
585 /* In this loop, we essentially handle an entry for the range
586 * [last_accounted_offset, iter->from), at every iteration, with some
589 list_for_each_entry_safe(iter
, trg
, head
, link
) {
590 /* Skip irrelevant regions that start before our range. */
591 if (iter
->from
< f
) {
592 /* If this region ends after the last accounted offset,
593 * then we need to update last_accounted_offset.
595 if (iter
->to
> last_accounted_offset
)
596 last_accounted_offset
= iter
->to
;
600 /* When we find a region that starts beyond our range, we've
603 if (iter
->from
>= t
) {
604 rg
= iter
->link
.prev
;
608 /* Add an entry for last_accounted_offset -> iter->from, and
609 * update last_accounted_offset.
611 if (iter
->from
> last_accounted_offset
)
612 add
+= hugetlb_resv_map_add(resv
, iter
->link
.prev
,
613 last_accounted_offset
,
617 last_accounted_offset
= iter
->to
;
620 /* Handle the case where our range extends beyond
621 * last_accounted_offset.
625 if (last_accounted_offset
< t
)
626 add
+= hugetlb_resv_map_add(resv
, rg
, last_accounted_offset
,
627 t
, h
, h_cg
, regions_needed
);
632 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
634 static int allocate_file_region_entries(struct resv_map
*resv
,
636 __must_hold(&resv
->lock
)
638 LIST_HEAD(allocated_regions
);
639 int to_allocate
= 0, i
= 0;
640 struct file_region
*trg
= NULL
, *rg
= NULL
;
642 VM_BUG_ON(regions_needed
< 0);
645 * Check for sufficient descriptors in the cache to accommodate
646 * the number of in progress add operations plus regions_needed.
648 * This is a while loop because when we drop the lock, some other call
649 * to region_add or region_del may have consumed some region_entries,
650 * so we keep looping here until we finally have enough entries for
651 * (adds_in_progress + regions_needed).
653 while (resv
->region_cache_count
<
654 (resv
->adds_in_progress
+ regions_needed
)) {
655 to_allocate
= resv
->adds_in_progress
+ regions_needed
-
656 resv
->region_cache_count
;
658 /* At this point, we should have enough entries in the cache
659 * for all the existing adds_in_progress. We should only be
660 * needing to allocate for regions_needed.
662 VM_BUG_ON(resv
->region_cache_count
< resv
->adds_in_progress
);
664 spin_unlock(&resv
->lock
);
665 for (i
= 0; i
< to_allocate
; i
++) {
666 trg
= kmalloc(sizeof(*trg
), GFP_KERNEL
);
669 list_add(&trg
->link
, &allocated_regions
);
672 spin_lock(&resv
->lock
);
674 list_splice(&allocated_regions
, &resv
->region_cache
);
675 resv
->region_cache_count
+= to_allocate
;
681 list_for_each_entry_safe(rg
, trg
, &allocated_regions
, link
) {
689 * Add the huge page range represented by [f, t) to the reserve
690 * map. Regions will be taken from the cache to fill in this range.
691 * Sufficient regions should exist in the cache due to the previous
692 * call to region_chg with the same range, but in some cases the cache will not
693 * have sufficient entries due to races with other code doing region_add or
694 * region_del. The extra needed entries will be allocated.
696 * regions_needed is the out value provided by a previous call to region_chg.
698 * Return the number of new huge pages added to the map. This number is greater
699 * than or equal to zero. If file_region entries needed to be allocated for
700 * this operation and we were not able to allocate, it returns -ENOMEM.
701 * region_add of regions of length 1 never allocate file_regions and cannot
702 * fail; region_chg will always allocate at least 1 entry and a region_add for
703 * 1 page will only require at most 1 entry.
705 static long region_add(struct resv_map
*resv
, long f
, long t
,
706 long in_regions_needed
, struct hstate
*h
,
707 struct hugetlb_cgroup
*h_cg
)
709 long add
= 0, actual_regions_needed
= 0;
711 spin_lock(&resv
->lock
);
714 /* Count how many regions are actually needed to execute this add. */
715 add_reservation_in_range(resv
, f
, t
, NULL
, NULL
,
716 &actual_regions_needed
);
719 * Check for sufficient descriptors in the cache to accommodate
720 * this add operation. Note that actual_regions_needed may be greater
721 * than in_regions_needed, as the resv_map may have been modified since
722 * the region_chg call. In this case, we need to make sure that we
723 * allocate extra entries, such that we have enough for all the
724 * existing adds_in_progress, plus the excess needed for this
727 if (actual_regions_needed
> in_regions_needed
&&
728 resv
->region_cache_count
<
729 resv
->adds_in_progress
+
730 (actual_regions_needed
- in_regions_needed
)) {
731 /* region_add operation of range 1 should never need to
732 * allocate file_region entries.
734 VM_BUG_ON(t
- f
<= 1);
736 if (allocate_file_region_entries(
737 resv
, actual_regions_needed
- in_regions_needed
)) {
744 add
= add_reservation_in_range(resv
, f
, t
, h_cg
, h
, NULL
);
746 resv
->adds_in_progress
-= in_regions_needed
;
748 spin_unlock(&resv
->lock
);
753 * Examine the existing reserve map and determine how many
754 * huge pages in the specified range [f, t) are NOT currently
755 * represented. This routine is called before a subsequent
756 * call to region_add that will actually modify the reserve
757 * map to add the specified range [f, t). region_chg does
758 * not change the number of huge pages represented by the
759 * map. A number of new file_region structures is added to the cache as a
760 * placeholder, for the subsequent region_add call to use. At least 1
761 * file_region structure is added.
763 * out_regions_needed is the number of regions added to the
764 * resv->adds_in_progress. This value needs to be provided to a follow up call
765 * to region_add or region_abort for proper accounting.
767 * Returns the number of huge pages that need to be added to the existing
768 * reservation map for the range [f, t). This number is greater or equal to
769 * zero. -ENOMEM is returned if a new file_region structure or cache entry
770 * is needed and can not be allocated.
772 static long region_chg(struct resv_map
*resv
, long f
, long t
,
773 long *out_regions_needed
)
777 spin_lock(&resv
->lock
);
779 /* Count how many hugepages in this range are NOT represented. */
780 chg
= add_reservation_in_range(resv
, f
, t
, NULL
, NULL
,
783 if (*out_regions_needed
== 0)
784 *out_regions_needed
= 1;
786 if (allocate_file_region_entries(resv
, *out_regions_needed
))
789 resv
->adds_in_progress
+= *out_regions_needed
;
791 spin_unlock(&resv
->lock
);
796 * Abort the in progress add operation. The adds_in_progress field
797 * of the resv_map keeps track of the operations in progress between
798 * calls to region_chg and region_add. Operations are sometimes
799 * aborted after the call to region_chg. In such cases, region_abort
800 * is called to decrement the adds_in_progress counter. regions_needed
801 * is the value returned by the region_chg call, it is used to decrement
802 * the adds_in_progress counter.
804 * NOTE: The range arguments [f, t) are not needed or used in this
805 * routine. They are kept to make reading the calling code easier as
806 * arguments will match the associated region_chg call.
808 static void region_abort(struct resv_map
*resv
, long f
, long t
,
811 spin_lock(&resv
->lock
);
812 VM_BUG_ON(!resv
->region_cache_count
);
813 resv
->adds_in_progress
-= regions_needed
;
814 spin_unlock(&resv
->lock
);
818 * Delete the specified range [f, t) from the reserve map. If the
819 * t parameter is LONG_MAX, this indicates that ALL regions after f
820 * should be deleted. Locate the regions which intersect [f, t)
821 * and either trim, delete or split the existing regions.
823 * Returns the number of huge pages deleted from the reserve map.
824 * In the normal case, the return value is zero or more. In the
825 * case where a region must be split, a new region descriptor must
826 * be allocated. If the allocation fails, -ENOMEM will be returned.
827 * NOTE: If the parameter t == LONG_MAX, then we will never split
828 * a region and possibly return -ENOMEM. Callers specifying
829 * t == LONG_MAX do not need to check for -ENOMEM error.
831 static long region_del(struct resv_map
*resv
, long f
, long t
)
833 struct list_head
*head
= &resv
->regions
;
834 struct file_region
*rg
, *trg
;
835 struct file_region
*nrg
= NULL
;
839 spin_lock(&resv
->lock
);
840 list_for_each_entry_safe(rg
, trg
, head
, link
) {
842 * Skip regions before the range to be deleted. file_region
843 * ranges are normally of the form [from, to). However, there
844 * may be a "placeholder" entry in the map which is of the form
845 * (from, to) with from == to. Check for placeholder entries
846 * at the beginning of the range to be deleted.
848 if (rg
->to
<= f
&& (rg
->to
!= rg
->from
|| rg
->to
!= f
))
854 if (f
> rg
->from
&& t
< rg
->to
) { /* Must split region */
856 * Check for an entry in the cache before dropping
857 * lock and attempting allocation.
860 resv
->region_cache_count
> resv
->adds_in_progress
) {
861 nrg
= list_first_entry(&resv
->region_cache
,
864 list_del(&nrg
->link
);
865 resv
->region_cache_count
--;
869 spin_unlock(&resv
->lock
);
870 nrg
= kmalloc(sizeof(*nrg
), GFP_KERNEL
);
877 hugetlb_cgroup_uncharge_file_region(
878 resv
, rg
, t
- f
, false);
880 /* New entry for end of split region */
884 copy_hugetlb_cgroup_uncharge_info(nrg
, rg
);
886 INIT_LIST_HEAD(&nrg
->link
);
888 /* Original entry is trimmed */
891 list_add(&nrg
->link
, &rg
->link
);
896 if (f
<= rg
->from
&& t
>= rg
->to
) { /* Remove entire region */
897 del
+= rg
->to
- rg
->from
;
898 hugetlb_cgroup_uncharge_file_region(resv
, rg
,
899 rg
->to
- rg
->from
, true);
905 if (f
<= rg
->from
) { /* Trim beginning of region */
906 hugetlb_cgroup_uncharge_file_region(resv
, rg
,
907 t
- rg
->from
, false);
911 } else { /* Trim end of region */
912 hugetlb_cgroup_uncharge_file_region(resv
, rg
,
920 spin_unlock(&resv
->lock
);
926 * A rare out of memory error was encountered which prevented removal of
927 * the reserve map region for a page. The huge page itself was free'ed
928 * and removed from the page cache. This routine will adjust the subpool
929 * usage count, and the global reserve count if needed. By incrementing
930 * these counts, the reserve map entry which could not be deleted will
931 * appear as a "reserved" entry instead of simply dangling with incorrect
934 void hugetlb_fix_reserve_counts(struct inode
*inode
)
936 struct hugepage_subpool
*spool
= subpool_inode(inode
);
938 bool reserved
= false;
940 rsv_adjust
= hugepage_subpool_get_pages(spool
, 1);
941 if (rsv_adjust
> 0) {
942 struct hstate
*h
= hstate_inode(inode
);
944 if (!hugetlb_acct_memory(h
, 1))
946 } else if (!rsv_adjust
) {
951 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
955 * Count and return the number of huge pages in the reserve map
956 * that intersect with the range [f, t).
958 static long region_count(struct resv_map
*resv
, long f
, long t
)
960 struct list_head
*head
= &resv
->regions
;
961 struct file_region
*rg
;
964 spin_lock(&resv
->lock
);
965 /* Locate each segment we overlap with, and count that overlap. */
966 list_for_each_entry(rg
, head
, link
) {
975 seg_from
= max(rg
->from
, f
);
976 seg_to
= min(rg
->to
, t
);
978 chg
+= seg_to
- seg_from
;
980 spin_unlock(&resv
->lock
);
986 * Convert the address within this vma to the page offset within
987 * the mapping, huge page units here.
989 static pgoff_t
vma_hugecache_offset(struct hstate
*h
,
990 struct vm_area_struct
*vma
, unsigned long address
)
992 return ((address
- vma
->vm_start
) >> huge_page_shift(h
)) +
993 (vma
->vm_pgoff
>> huge_page_order(h
));
997 * vma_kernel_pagesize - Page size granularity for this VMA.
998 * @vma: The user mapping.
1000 * Folios in this VMA will be aligned to, and at least the size of the
1001 * number of bytes returned by this function.
1003 * Return: The default size of the folios allocated when backing a VMA.
1005 unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
)
1007 if (vma
->vm_ops
&& vma
->vm_ops
->pagesize
)
1008 return vma
->vm_ops
->pagesize(vma
);
1011 EXPORT_SYMBOL_GPL(vma_kernel_pagesize
);
1014 * Return the page size being used by the MMU to back a VMA. In the majority
1015 * of cases, the page size used by the kernel matches the MMU size. On
1016 * architectures where it differs, an architecture-specific 'strong'
1017 * version of this symbol is required.
1019 __weak
unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
1021 return vma_kernel_pagesize(vma
);
1025 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
1026 * bits of the reservation map pointer, which are always clear due to
1029 #define HPAGE_RESV_OWNER (1UL << 0)
1030 #define HPAGE_RESV_UNMAPPED (1UL << 1)
1031 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1034 * These helpers are used to track how many pages are reserved for
1035 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1036 * is guaranteed to have their future faults succeed.
1038 * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1039 * the reserve counters are updated with the hugetlb_lock held. It is safe
1040 * to reset the VMA at fork() time as it is not in use yet and there is no
1041 * chance of the global counters getting corrupted as a result of the values.
1043 * The private mapping reservation is represented in a subtly different
1044 * manner to a shared mapping. A shared mapping has a region map associated
1045 * with the underlying file, this region map represents the backing file
1046 * pages which have ever had a reservation assigned which this persists even
1047 * after the page is instantiated. A private mapping has a region map
1048 * associated with the original mmap which is attached to all VMAs which
1049 * reference it, this region map represents those offsets which have consumed
1050 * reservation ie. where pages have been instantiated.
1052 static unsigned long get_vma_private_data(struct vm_area_struct
*vma
)
1054 return (unsigned long)vma
->vm_private_data
;
1057 static void set_vma_private_data(struct vm_area_struct
*vma
,
1058 unsigned long value
)
1060 vma
->vm_private_data
= (void *)value
;
1064 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map
*resv_map
,
1065 struct hugetlb_cgroup
*h_cg
,
1068 #ifdef CONFIG_CGROUP_HUGETLB
1070 resv_map
->reservation_counter
= NULL
;
1071 resv_map
->pages_per_hpage
= 0;
1072 resv_map
->css
= NULL
;
1074 resv_map
->reservation_counter
=
1075 &h_cg
->rsvd_hugepage
[hstate_index(h
)];
1076 resv_map
->pages_per_hpage
= pages_per_huge_page(h
);
1077 resv_map
->css
= &h_cg
->css
;
1082 struct resv_map
*resv_map_alloc(void)
1084 struct resv_map
*resv_map
= kmalloc(sizeof(*resv_map
), GFP_KERNEL
);
1085 struct file_region
*rg
= kmalloc(sizeof(*rg
), GFP_KERNEL
);
1087 if (!resv_map
|| !rg
) {
1093 kref_init(&resv_map
->refs
);
1094 spin_lock_init(&resv_map
->lock
);
1095 INIT_LIST_HEAD(&resv_map
->regions
);
1096 init_rwsem(&resv_map
->rw_sema
);
1098 resv_map
->adds_in_progress
= 0;
1100 * Initialize these to 0. On shared mappings, 0's here indicate these
1101 * fields don't do cgroup accounting. On private mappings, these will be
1102 * re-initialized to the proper values, to indicate that hugetlb cgroup
1103 * reservations are to be un-charged from here.
1105 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map
, NULL
, NULL
);
1107 INIT_LIST_HEAD(&resv_map
->region_cache
);
1108 list_add(&rg
->link
, &resv_map
->region_cache
);
1109 resv_map
->region_cache_count
= 1;
1114 void resv_map_release(struct kref
*ref
)
1116 struct resv_map
*resv_map
= container_of(ref
, struct resv_map
, refs
);
1117 struct list_head
*head
= &resv_map
->region_cache
;
1118 struct file_region
*rg
, *trg
;
1120 /* Clear out any active regions before we release the map. */
1121 region_del(resv_map
, 0, LONG_MAX
);
1123 /* ... and any entries left in the cache */
1124 list_for_each_entry_safe(rg
, trg
, head
, link
) {
1125 list_del(&rg
->link
);
1129 VM_BUG_ON(resv_map
->adds_in_progress
);
1134 static inline struct resv_map
*inode_resv_map(struct inode
*inode
)
1137 * At inode evict time, i_mapping may not point to the original
1138 * address space within the inode. This original address space
1139 * contains the pointer to the resv_map. So, always use the
1140 * address space embedded within the inode.
1141 * The VERY common case is inode->mapping == &inode->i_data but,
1142 * this may not be true for device special inodes.
1144 return (struct resv_map
*)(&inode
->i_data
)->i_private_data
;
1147 static struct resv_map
*vma_resv_map(struct vm_area_struct
*vma
)
1149 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1150 if (vma
->vm_flags
& VM_MAYSHARE
) {
1151 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1152 struct inode
*inode
= mapping
->host
;
1154 return inode_resv_map(inode
);
1157 return (struct resv_map
*)(get_vma_private_data(vma
) &
1162 static void set_vma_resv_map(struct vm_area_struct
*vma
, struct resv_map
*map
)
1164 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1165 VM_BUG_ON_VMA(vma
->vm_flags
& VM_MAYSHARE
, vma
);
1167 set_vma_private_data(vma
, (unsigned long)map
);
1170 static void set_vma_resv_flags(struct vm_area_struct
*vma
, unsigned long flags
)
1172 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1173 VM_BUG_ON_VMA(vma
->vm_flags
& VM_MAYSHARE
, vma
);
1175 set_vma_private_data(vma
, get_vma_private_data(vma
) | flags
);
1178 static int is_vma_resv_set(struct vm_area_struct
*vma
, unsigned long flag
)
1180 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1182 return (get_vma_private_data(vma
) & flag
) != 0;
1185 bool __vma_private_lock(struct vm_area_struct
*vma
)
1187 return !(vma
->vm_flags
& VM_MAYSHARE
) &&
1188 get_vma_private_data(vma
) & ~HPAGE_RESV_MASK
&&
1189 is_vma_resv_set(vma
, HPAGE_RESV_OWNER
);
1192 void hugetlb_dup_vma_private(struct vm_area_struct
*vma
)
1194 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1196 * Clear vm_private_data
1197 * - For shared mappings this is a per-vma semaphore that may be
1198 * allocated in a subsequent call to hugetlb_vm_op_open.
1199 * Before clearing, make sure pointer is not associated with vma
1200 * as this will leak the structure. This is the case when called
1201 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1202 * been called to allocate a new structure.
1203 * - For MAP_PRIVATE mappings, this is the reserve map which does
1204 * not apply to children. Faults generated by the children are
1205 * not guaranteed to succeed, even if read-only.
1207 if (vma
->vm_flags
& VM_MAYSHARE
) {
1208 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
1210 if (vma_lock
&& vma_lock
->vma
!= vma
)
1211 vma
->vm_private_data
= NULL
;
1213 vma
->vm_private_data
= NULL
;
1217 * Reset and decrement one ref on hugepage private reservation.
1218 * Called with mm->mmap_lock writer semaphore held.
1219 * This function should be only used by move_vma() and operate on
1220 * same sized vma. It should never come here with last ref on the
1223 void clear_vma_resv_huge_pages(struct vm_area_struct
*vma
)
1226 * Clear the old hugetlb private page reservation.
1227 * It has already been transferred to new_vma.
1229 * During a mremap() operation of a hugetlb vma we call move_vma()
1230 * which copies vma into new_vma and unmaps vma. After the copy
1231 * operation both new_vma and vma share a reference to the resv_map
1232 * struct, and at that point vma is about to be unmapped. We don't
1233 * want to return the reservation to the pool at unmap of vma because
1234 * the reservation still lives on in new_vma, so simply decrement the
1235 * ref here and remove the resv_map reference from this vma.
1237 struct resv_map
*reservations
= vma_resv_map(vma
);
1239 if (reservations
&& is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
1240 resv_map_put_hugetlb_cgroup_uncharge_info(reservations
);
1241 kref_put(&reservations
->refs
, resv_map_release
);
1244 hugetlb_dup_vma_private(vma
);
1247 /* Returns true if the VMA has associated reserve pages */
1248 static bool vma_has_reserves(struct vm_area_struct
*vma
, long chg
)
1250 if (vma
->vm_flags
& VM_NORESERVE
) {
1252 * This address is already reserved by other process(chg == 0),
1253 * so, we should decrement reserved count. Without decrementing,
1254 * reserve count remains after releasing inode, because this
1255 * allocated page will go into page cache and is regarded as
1256 * coming from reserved pool in releasing step. Currently, we
1257 * don't have any other solution to deal with this situation
1258 * properly, so add work-around here.
1260 if (vma
->vm_flags
& VM_MAYSHARE
&& chg
== 0)
1266 /* Shared mappings always use reserves */
1267 if (vma
->vm_flags
& VM_MAYSHARE
) {
1269 * We know VM_NORESERVE is not set. Therefore, there SHOULD
1270 * be a region map for all pages. The only situation where
1271 * there is no region map is if a hole was punched via
1272 * fallocate. In this case, there really are no reserves to
1273 * use. This situation is indicated if chg != 0.
1282 * Only the process that called mmap() has reserves for
1285 if (is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
1287 * Like the shared case above, a hole punch or truncate
1288 * could have been performed on the private mapping.
1289 * Examine the value of chg to determine if reserves
1290 * actually exist or were previously consumed.
1291 * Very Subtle - The value of chg comes from a previous
1292 * call to vma_needs_reserves(). The reserve map for
1293 * private mappings has different (opposite) semantics
1294 * than that of shared mappings. vma_needs_reserves()
1295 * has already taken this difference in semantics into
1296 * account. Therefore, the meaning of chg is the same
1297 * as in the shared case above. Code could easily be
1298 * combined, but keeping it separate draws attention to
1299 * subtle differences.
1310 static void enqueue_hugetlb_folio(struct hstate
*h
, struct folio
*folio
)
1312 int nid
= folio_nid(folio
);
1314 lockdep_assert_held(&hugetlb_lock
);
1315 VM_BUG_ON_FOLIO(folio_ref_count(folio
), folio
);
1317 list_move(&folio
->lru
, &h
->hugepage_freelists
[nid
]);
1318 h
->free_huge_pages
++;
1319 h
->free_huge_pages_node
[nid
]++;
1320 folio_set_hugetlb_freed(folio
);
1323 static struct folio
*dequeue_hugetlb_folio_node_exact(struct hstate
*h
,
1326 struct folio
*folio
;
1327 bool pin
= !!(current
->flags
& PF_MEMALLOC_PIN
);
1329 lockdep_assert_held(&hugetlb_lock
);
1330 list_for_each_entry(folio
, &h
->hugepage_freelists
[nid
], lru
) {
1331 if (pin
&& !folio_is_longterm_pinnable(folio
))
1334 if (folio_test_hwpoison(folio
))
1337 list_move(&folio
->lru
, &h
->hugepage_activelist
);
1338 folio_ref_unfreeze(folio
, 1);
1339 folio_clear_hugetlb_freed(folio
);
1340 h
->free_huge_pages
--;
1341 h
->free_huge_pages_node
[nid
]--;
1348 static struct folio
*dequeue_hugetlb_folio_nodemask(struct hstate
*h
, gfp_t gfp_mask
,
1349 int nid
, nodemask_t
*nmask
)
1351 unsigned int cpuset_mems_cookie
;
1352 struct zonelist
*zonelist
;
1355 int node
= NUMA_NO_NODE
;
1357 zonelist
= node_zonelist(nid
, gfp_mask
);
1360 cpuset_mems_cookie
= read_mems_allowed_begin();
1361 for_each_zone_zonelist_nodemask(zone
, z
, zonelist
, gfp_zone(gfp_mask
), nmask
) {
1362 struct folio
*folio
;
1364 if (!cpuset_zone_allowed(zone
, gfp_mask
))
1367 * no need to ask again on the same node. Pool is node rather than
1370 if (zone_to_nid(zone
) == node
)
1372 node
= zone_to_nid(zone
);
1374 folio
= dequeue_hugetlb_folio_node_exact(h
, node
);
1378 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie
)))
1384 static unsigned long available_huge_pages(struct hstate
*h
)
1386 return h
->free_huge_pages
- h
->resv_huge_pages
;
1389 static struct folio
*dequeue_hugetlb_folio_vma(struct hstate
*h
,
1390 struct vm_area_struct
*vma
,
1391 unsigned long address
, int avoid_reserve
,
1394 struct folio
*folio
= NULL
;
1395 struct mempolicy
*mpol
;
1397 nodemask_t
*nodemask
;
1401 * A child process with MAP_PRIVATE mappings created by their parent
1402 * have no page reserves. This check ensures that reservations are
1403 * not "stolen". The child may still get SIGKILLed
1405 if (!vma_has_reserves(vma
, chg
) && !available_huge_pages(h
))
1408 /* If reserves cannot be used, ensure enough pages are in the pool */
1409 if (avoid_reserve
&& !available_huge_pages(h
))
1412 gfp_mask
= htlb_alloc_mask(h
);
1413 nid
= huge_node(vma
, address
, gfp_mask
, &mpol
, &nodemask
);
1415 if (mpol_is_preferred_many(mpol
)) {
1416 folio
= dequeue_hugetlb_folio_nodemask(h
, gfp_mask
,
1419 /* Fallback to all nodes if page==NULL */
1424 folio
= dequeue_hugetlb_folio_nodemask(h
, gfp_mask
,
1427 if (folio
&& !avoid_reserve
&& vma_has_reserves(vma
, chg
)) {
1428 folio_set_hugetlb_restore_reserve(folio
);
1429 h
->resv_huge_pages
--;
1432 mpol_cond_put(mpol
);
1440 * common helper functions for hstate_next_node_to_{alloc|free}.
1441 * We may have allocated or freed a huge page based on a different
1442 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1443 * be outside of *nodes_allowed. Ensure that we use an allowed
1444 * node for alloc or free.
1446 static int next_node_allowed(int nid
, nodemask_t
*nodes_allowed
)
1448 nid
= next_node_in(nid
, *nodes_allowed
);
1449 VM_BUG_ON(nid
>= MAX_NUMNODES
);
1454 static int get_valid_node_allowed(int nid
, nodemask_t
*nodes_allowed
)
1456 if (!node_isset(nid
, *nodes_allowed
))
1457 nid
= next_node_allowed(nid
, nodes_allowed
);
1462 * returns the previously saved node ["this node"] from which to
1463 * allocate a persistent huge page for the pool and advance the
1464 * next node from which to allocate, handling wrap at end of node
1467 static int hstate_next_node_to_alloc(struct hstate
*h
,
1468 nodemask_t
*nodes_allowed
)
1472 VM_BUG_ON(!nodes_allowed
);
1474 nid
= get_valid_node_allowed(h
->next_nid_to_alloc
, nodes_allowed
);
1475 h
->next_nid_to_alloc
= next_node_allowed(nid
, nodes_allowed
);
1481 * helper for remove_pool_hugetlb_folio() - return the previously saved
1482 * node ["this node"] from which to free a huge page. Advance the
1483 * next node id whether or not we find a free huge page to free so
1484 * that the next attempt to free addresses the next node.
1486 static int hstate_next_node_to_free(struct hstate
*h
, nodemask_t
*nodes_allowed
)
1490 VM_BUG_ON(!nodes_allowed
);
1492 nid
= get_valid_node_allowed(h
->next_nid_to_free
, nodes_allowed
);
1493 h
->next_nid_to_free
= next_node_allowed(nid
, nodes_allowed
);
1498 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1499 for (nr_nodes = nodes_weight(*mask); \
1501 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1504 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1505 for (nr_nodes = nodes_weight(*mask); \
1507 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1510 /* used to demote non-gigantic_huge pages as well */
1511 static void __destroy_compound_gigantic_folio(struct folio
*folio
,
1512 unsigned int order
, bool demote
)
1515 int nr_pages
= 1 << order
;
1518 atomic_set(&folio
->_entire_mapcount
, 0);
1519 atomic_set(&folio
->_nr_pages_mapped
, 0);
1520 atomic_set(&folio
->_pincount
, 0);
1522 for (i
= 1; i
< nr_pages
; i
++) {
1523 p
= folio_page(folio
, i
);
1524 p
->flags
&= ~PAGE_FLAGS_CHECK_AT_FREE
;
1526 clear_compound_head(p
);
1528 set_page_refcounted(p
);
1531 __folio_clear_head(folio
);
1534 static void destroy_compound_hugetlb_folio_for_demote(struct folio
*folio
,
1537 __destroy_compound_gigantic_folio(folio
, order
, true);
1540 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1541 static void destroy_compound_gigantic_folio(struct folio
*folio
,
1544 __destroy_compound_gigantic_folio(folio
, order
, false);
1547 static void free_gigantic_folio(struct folio
*folio
, unsigned int order
)
1550 * If the page isn't allocated using the cma allocator,
1551 * cma_release() returns false.
1554 int nid
= folio_nid(folio
);
1556 if (cma_release(hugetlb_cma
[nid
], &folio
->page
, 1 << order
))
1560 free_contig_range(folio_pfn(folio
), 1 << order
);
1563 #ifdef CONFIG_CONTIG_ALLOC
1564 static struct folio
*alloc_gigantic_folio(struct hstate
*h
, gfp_t gfp_mask
,
1565 int nid
, nodemask_t
*nodemask
)
1568 unsigned long nr_pages
= pages_per_huge_page(h
);
1569 if (nid
== NUMA_NO_NODE
)
1570 nid
= numa_mem_id();
1576 if (hugetlb_cma
[nid
]) {
1577 page
= cma_alloc(hugetlb_cma
[nid
], nr_pages
,
1578 huge_page_order(h
), true);
1580 return page_folio(page
);
1583 if (!(gfp_mask
& __GFP_THISNODE
)) {
1584 for_each_node_mask(node
, *nodemask
) {
1585 if (node
== nid
|| !hugetlb_cma
[node
])
1588 page
= cma_alloc(hugetlb_cma
[node
], nr_pages
,
1589 huge_page_order(h
), true);
1591 return page_folio(page
);
1597 page
= alloc_contig_pages(nr_pages
, gfp_mask
, nid
, nodemask
);
1598 return page
? page_folio(page
) : NULL
;
1601 #else /* !CONFIG_CONTIG_ALLOC */
1602 static struct folio
*alloc_gigantic_folio(struct hstate
*h
, gfp_t gfp_mask
,
1603 int nid
, nodemask_t
*nodemask
)
1607 #endif /* CONFIG_CONTIG_ALLOC */
1609 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1610 static struct folio
*alloc_gigantic_folio(struct hstate
*h
, gfp_t gfp_mask
,
1611 int nid
, nodemask_t
*nodemask
)
1615 static inline void free_gigantic_folio(struct folio
*folio
,
1616 unsigned int order
) { }
1617 static inline void destroy_compound_gigantic_folio(struct folio
*folio
,
1618 unsigned int order
) { }
1621 static inline void __clear_hugetlb_destructor(struct hstate
*h
,
1622 struct folio
*folio
)
1624 lockdep_assert_held(&hugetlb_lock
);
1626 folio_clear_hugetlb(folio
);
1630 * Remove hugetlb folio from lists.
1631 * If vmemmap exists for the folio, update dtor so that the folio appears
1632 * as just a compound page. Otherwise, wait until after allocating vmemmap
1635 * A reference is held on the folio, except in the case of demote.
1637 * Must be called with hugetlb lock held.
1639 static void __remove_hugetlb_folio(struct hstate
*h
, struct folio
*folio
,
1640 bool adjust_surplus
,
1643 int nid
= folio_nid(folio
);
1645 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio
), folio
);
1646 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio
), folio
);
1648 lockdep_assert_held(&hugetlb_lock
);
1649 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
1652 list_del(&folio
->lru
);
1654 if (folio_test_hugetlb_freed(folio
)) {
1655 h
->free_huge_pages
--;
1656 h
->free_huge_pages_node
[nid
]--;
1658 if (adjust_surplus
) {
1659 h
->surplus_huge_pages
--;
1660 h
->surplus_huge_pages_node
[nid
]--;
1664 * We can only clear the hugetlb destructor after allocating vmemmap
1665 * pages. Otherwise, someone (memory error handling) may try to write
1666 * to tail struct pages.
1668 if (!folio_test_hugetlb_vmemmap_optimized(folio
))
1669 __clear_hugetlb_destructor(h
, folio
);
1672 * In the case of demote we do not ref count the page as it will soon
1673 * be turned into a page of smaller size.
1676 folio_ref_unfreeze(folio
, 1);
1679 h
->nr_huge_pages_node
[nid
]--;
1682 static void remove_hugetlb_folio(struct hstate
*h
, struct folio
*folio
,
1683 bool adjust_surplus
)
1685 __remove_hugetlb_folio(h
, folio
, adjust_surplus
, false);
1688 static void remove_hugetlb_folio_for_demote(struct hstate
*h
, struct folio
*folio
,
1689 bool adjust_surplus
)
1691 __remove_hugetlb_folio(h
, folio
, adjust_surplus
, true);
1694 static void add_hugetlb_folio(struct hstate
*h
, struct folio
*folio
,
1695 bool adjust_surplus
)
1698 int nid
= folio_nid(folio
);
1700 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio
), folio
);
1702 lockdep_assert_held(&hugetlb_lock
);
1704 INIT_LIST_HEAD(&folio
->lru
);
1706 h
->nr_huge_pages_node
[nid
]++;
1708 if (adjust_surplus
) {
1709 h
->surplus_huge_pages
++;
1710 h
->surplus_huge_pages_node
[nid
]++;
1713 folio_set_hugetlb(folio
);
1714 folio_change_private(folio
, NULL
);
1716 * We have to set hugetlb_vmemmap_optimized again as above
1717 * folio_change_private(folio, NULL) cleared it.
1719 folio_set_hugetlb_vmemmap_optimized(folio
);
1722 * This folio is about to be managed by the hugetlb allocator and
1723 * should have no users. Drop our reference, and check for others
1726 zeroed
= folio_put_testzero(folio
);
1727 if (unlikely(!zeroed
))
1729 * It is VERY unlikely soneone else has taken a ref
1730 * on the folio. In this case, we simply return as
1731 * free_huge_folio() will be called when this other ref
1736 arch_clear_hugepage_flags(&folio
->page
);
1737 enqueue_hugetlb_folio(h
, folio
);
1740 static void __update_and_free_hugetlb_folio(struct hstate
*h
,
1741 struct folio
*folio
)
1743 bool clear_dtor
= folio_test_hugetlb_vmemmap_optimized(folio
);
1745 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
1749 * If we don't know which subpages are hwpoisoned, we can't free
1750 * the hugepage, so it's leaked intentionally.
1752 if (folio_test_hugetlb_raw_hwp_unreliable(folio
))
1756 * If folio is not vmemmap optimized (!clear_dtor), then the folio
1757 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio
1758 * can only be passed hugetlb pages and will BUG otherwise.
1760 if (clear_dtor
&& hugetlb_vmemmap_restore_folio(h
, folio
)) {
1761 spin_lock_irq(&hugetlb_lock
);
1763 * If we cannot allocate vmemmap pages, just refuse to free the
1764 * page and put the page back on the hugetlb free list and treat
1765 * as a surplus page.
1767 add_hugetlb_folio(h
, folio
, true);
1768 spin_unlock_irq(&hugetlb_lock
);
1773 * Move PageHWPoison flag from head page to the raw error pages,
1774 * which makes any healthy subpages reusable.
1776 if (unlikely(folio_test_hwpoison(folio
)))
1777 folio_clear_hugetlb_hwpoison(folio
);
1780 * If vmemmap pages were allocated above, then we need to clear the
1781 * hugetlb destructor under the hugetlb lock.
1784 spin_lock_irq(&hugetlb_lock
);
1785 __clear_hugetlb_destructor(h
, folio
);
1786 spin_unlock_irq(&hugetlb_lock
);
1790 * Non-gigantic pages demoted from CMA allocated gigantic pages
1791 * need to be given back to CMA in free_gigantic_folio.
1793 if (hstate_is_gigantic(h
) ||
1794 hugetlb_cma_folio(folio
, huge_page_order(h
))) {
1795 destroy_compound_gigantic_folio(folio
, huge_page_order(h
));
1796 free_gigantic_folio(folio
, huge_page_order(h
));
1798 __free_pages(&folio
->page
, huge_page_order(h
));
1803 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1804 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1805 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1806 * the vmemmap pages.
1808 * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1809 * freed and frees them one-by-one. As the page->mapping pointer is going
1810 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1811 * structure of a lockless linked list of huge pages to be freed.
1813 static LLIST_HEAD(hpage_freelist
);
1815 static void free_hpage_workfn(struct work_struct
*work
)
1817 struct llist_node
*node
;
1819 node
= llist_del_all(&hpage_freelist
);
1822 struct folio
*folio
;
1825 folio
= container_of((struct address_space
**)node
,
1826 struct folio
, mapping
);
1828 folio
->mapping
= NULL
;
1830 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1831 * folio_hstate() is going to trigger because a previous call to
1832 * remove_hugetlb_folio() will clear the hugetlb bit, so do
1833 * not use folio_hstate() directly.
1835 h
= size_to_hstate(folio_size(folio
));
1837 __update_and_free_hugetlb_folio(h
, folio
);
1842 static DECLARE_WORK(free_hpage_work
, free_hpage_workfn
);
1844 static inline void flush_free_hpage_work(struct hstate
*h
)
1846 if (hugetlb_vmemmap_optimizable(h
))
1847 flush_work(&free_hpage_work
);
1850 static void update_and_free_hugetlb_folio(struct hstate
*h
, struct folio
*folio
,
1853 if (!folio_test_hugetlb_vmemmap_optimized(folio
) || !atomic
) {
1854 __update_and_free_hugetlb_folio(h
, folio
);
1859 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1861 * Only call schedule_work() if hpage_freelist is previously
1862 * empty. Otherwise, schedule_work() had been called but the workfn
1863 * hasn't retrieved the list yet.
1865 if (llist_add((struct llist_node
*)&folio
->mapping
, &hpage_freelist
))
1866 schedule_work(&free_hpage_work
);
1869 static void bulk_vmemmap_restore_error(struct hstate
*h
,
1870 struct list_head
*folio_list
,
1871 struct list_head
*non_hvo_folios
)
1873 struct folio
*folio
, *t_folio
;
1875 if (!list_empty(non_hvo_folios
)) {
1877 * Free any restored hugetlb pages so that restore of the
1878 * entire list can be retried.
1879 * The idea is that in the common case of ENOMEM errors freeing
1880 * hugetlb pages with vmemmap we will free up memory so that we
1881 * can allocate vmemmap for more hugetlb pages.
1883 list_for_each_entry_safe(folio
, t_folio
, non_hvo_folios
, lru
) {
1884 list_del(&folio
->lru
);
1885 spin_lock_irq(&hugetlb_lock
);
1886 __clear_hugetlb_destructor(h
, folio
);
1887 spin_unlock_irq(&hugetlb_lock
);
1888 update_and_free_hugetlb_folio(h
, folio
, false);
1893 * In the case where there are no folios which can be
1894 * immediately freed, we loop through the list trying to restore
1895 * vmemmap individually in the hope that someone elsewhere may
1896 * have done something to cause success (such as freeing some
1897 * memory). If unable to restore a hugetlb page, the hugetlb
1898 * page is made a surplus page and removed from the list.
1899 * If are able to restore vmemmap and free one hugetlb page, we
1900 * quit processing the list to retry the bulk operation.
1902 list_for_each_entry_safe(folio
, t_folio
, folio_list
, lru
)
1903 if (hugetlb_vmemmap_restore_folio(h
, folio
)) {
1904 list_del(&folio
->lru
);
1905 spin_lock_irq(&hugetlb_lock
);
1906 add_hugetlb_folio(h
, folio
, true);
1907 spin_unlock_irq(&hugetlb_lock
);
1909 list_del(&folio
->lru
);
1910 spin_lock_irq(&hugetlb_lock
);
1911 __clear_hugetlb_destructor(h
, folio
);
1912 spin_unlock_irq(&hugetlb_lock
);
1913 update_and_free_hugetlb_folio(h
, folio
, false);
1920 static void update_and_free_pages_bulk(struct hstate
*h
,
1921 struct list_head
*folio_list
)
1924 struct folio
*folio
, *t_folio
;
1925 LIST_HEAD(non_hvo_folios
);
1928 * First allocate required vmemmmap (if necessary) for all folios.
1929 * Carefully handle errors and free up any available hugetlb pages
1930 * in an effort to make forward progress.
1933 ret
= hugetlb_vmemmap_restore_folios(h
, folio_list
, &non_hvo_folios
);
1935 bulk_vmemmap_restore_error(h
, folio_list
, &non_hvo_folios
);
1940 * At this point, list should be empty, ret should be >= 0 and there
1941 * should only be pages on the non_hvo_folios list.
1942 * Do note that the non_hvo_folios list could be empty.
1943 * Without HVO enabled, ret will be 0 and there is no need to call
1944 * __clear_hugetlb_destructor as this was done previously.
1946 VM_WARN_ON(!list_empty(folio_list
));
1947 VM_WARN_ON(ret
< 0);
1948 if (!list_empty(&non_hvo_folios
) && ret
) {
1949 spin_lock_irq(&hugetlb_lock
);
1950 list_for_each_entry(folio
, &non_hvo_folios
, lru
)
1951 __clear_hugetlb_destructor(h
, folio
);
1952 spin_unlock_irq(&hugetlb_lock
);
1955 list_for_each_entry_safe(folio
, t_folio
, &non_hvo_folios
, lru
) {
1956 update_and_free_hugetlb_folio(h
, folio
, false);
1961 struct hstate
*size_to_hstate(unsigned long size
)
1965 for_each_hstate(h
) {
1966 if (huge_page_size(h
) == size
)
1972 void free_huge_folio(struct folio
*folio
)
1975 * Can't pass hstate in here because it is called from the
1976 * compound page destructor.
1978 struct hstate
*h
= folio_hstate(folio
);
1979 int nid
= folio_nid(folio
);
1980 struct hugepage_subpool
*spool
= hugetlb_folio_subpool(folio
);
1981 bool restore_reserve
;
1982 unsigned long flags
;
1984 VM_BUG_ON_FOLIO(folio_ref_count(folio
), folio
);
1985 VM_BUG_ON_FOLIO(folio_mapcount(folio
), folio
);
1987 hugetlb_set_folio_subpool(folio
, NULL
);
1988 if (folio_test_anon(folio
))
1989 __ClearPageAnonExclusive(&folio
->page
);
1990 folio
->mapping
= NULL
;
1991 restore_reserve
= folio_test_hugetlb_restore_reserve(folio
);
1992 folio_clear_hugetlb_restore_reserve(folio
);
1995 * If HPageRestoreReserve was set on page, page allocation consumed a
1996 * reservation. If the page was associated with a subpool, there
1997 * would have been a page reserved in the subpool before allocation
1998 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1999 * reservation, do not call hugepage_subpool_put_pages() as this will
2000 * remove the reserved page from the subpool.
2002 if (!restore_reserve
) {
2004 * A return code of zero implies that the subpool will be
2005 * under its minimum size if the reservation is not restored
2006 * after page is free. Therefore, force restore_reserve
2009 if (hugepage_subpool_put_pages(spool
, 1) == 0)
2010 restore_reserve
= true;
2013 spin_lock_irqsave(&hugetlb_lock
, flags
);
2014 folio_clear_hugetlb_migratable(folio
);
2015 hugetlb_cgroup_uncharge_folio(hstate_index(h
),
2016 pages_per_huge_page(h
), folio
);
2017 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h
),
2018 pages_per_huge_page(h
), folio
);
2019 mem_cgroup_uncharge(folio
);
2020 if (restore_reserve
)
2021 h
->resv_huge_pages
++;
2023 if (folio_test_hugetlb_temporary(folio
)) {
2024 remove_hugetlb_folio(h
, folio
, false);
2025 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
2026 update_and_free_hugetlb_folio(h
, folio
, true);
2027 } else if (h
->surplus_huge_pages_node
[nid
]) {
2028 /* remove the page from active list */
2029 remove_hugetlb_folio(h
, folio
, true);
2030 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
2031 update_and_free_hugetlb_folio(h
, folio
, true);
2033 arch_clear_hugepage_flags(&folio
->page
);
2034 enqueue_hugetlb_folio(h
, folio
);
2035 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
2040 * Must be called with the hugetlb lock held
2042 static void __prep_account_new_huge_page(struct hstate
*h
, int nid
)
2044 lockdep_assert_held(&hugetlb_lock
);
2046 h
->nr_huge_pages_node
[nid
]++;
2049 static void init_new_hugetlb_folio(struct hstate
*h
, struct folio
*folio
)
2051 folio_set_hugetlb(folio
);
2052 INIT_LIST_HEAD(&folio
->lru
);
2053 hugetlb_set_folio_subpool(folio
, NULL
);
2054 set_hugetlb_cgroup(folio
, NULL
);
2055 set_hugetlb_cgroup_rsvd(folio
, NULL
);
2058 static void __prep_new_hugetlb_folio(struct hstate
*h
, struct folio
*folio
)
2060 init_new_hugetlb_folio(h
, folio
);
2061 hugetlb_vmemmap_optimize_folio(h
, folio
);
2064 static void prep_new_hugetlb_folio(struct hstate
*h
, struct folio
*folio
, int nid
)
2066 __prep_new_hugetlb_folio(h
, folio
);
2067 spin_lock_irq(&hugetlb_lock
);
2068 __prep_account_new_huge_page(h
, nid
);
2069 spin_unlock_irq(&hugetlb_lock
);
2072 static bool __prep_compound_gigantic_folio(struct folio
*folio
,
2073 unsigned int order
, bool demote
)
2076 int nr_pages
= 1 << order
;
2079 __folio_clear_reserved(folio
);
2080 for (i
= 0; i
< nr_pages
; i
++) {
2081 p
= folio_page(folio
, i
);
2084 * For gigantic hugepages allocated through bootmem at
2085 * boot, it's safer to be consistent with the not-gigantic
2086 * hugepages and clear the PG_reserved bit from all tail pages
2087 * too. Otherwise drivers using get_user_pages() to access tail
2088 * pages may get the reference counting wrong if they see
2089 * PG_reserved set on a tail page (despite the head page not
2090 * having PG_reserved set). Enforcing this consistency between
2091 * head and tail pages allows drivers to optimize away a check
2092 * on the head page when they need know if put_page() is needed
2093 * after get_user_pages().
2095 if (i
!= 0) /* head page cleared above */
2096 __ClearPageReserved(p
);
2098 * Subtle and very unlikely
2100 * Gigantic 'page allocators' such as memblock or cma will
2101 * return a set of pages with each page ref counted. We need
2102 * to turn this set of pages into a compound page with tail
2103 * page ref counts set to zero. Code such as speculative page
2104 * cache adding could take a ref on a 'to be' tail page.
2105 * We need to respect any increased ref count, and only set
2106 * the ref count to zero if count is currently 1. If count
2107 * is not 1, we return an error. An error return indicates
2108 * the set of pages can not be converted to a gigantic page.
2109 * The caller who allocated the pages should then discard the
2110 * pages using the appropriate free interface.
2112 * In the case of demote, the ref count will be zero.
2115 if (!page_ref_freeze(p
, 1)) {
2116 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
2120 VM_BUG_ON_PAGE(page_count(p
), p
);
2123 set_compound_head(p
, &folio
->page
);
2125 __folio_set_head(folio
);
2126 /* we rely on prep_new_hugetlb_folio to set the destructor */
2127 folio_set_order(folio
, order
);
2128 atomic_set(&folio
->_entire_mapcount
, -1);
2129 atomic_set(&folio
->_nr_pages_mapped
, 0);
2130 atomic_set(&folio
->_pincount
, 0);
2134 /* undo page modifications made above */
2135 for (j
= 0; j
< i
; j
++) {
2136 p
= folio_page(folio
, j
);
2138 clear_compound_head(p
);
2139 set_page_refcounted(p
);
2141 /* need to clear PG_reserved on remaining tail pages */
2142 for (; j
< nr_pages
; j
++) {
2143 p
= folio_page(folio
, j
);
2144 __ClearPageReserved(p
);
2149 static bool prep_compound_gigantic_folio(struct folio
*folio
,
2152 return __prep_compound_gigantic_folio(folio
, order
, false);
2155 static bool prep_compound_gigantic_folio_for_demote(struct folio
*folio
,
2158 return __prep_compound_gigantic_folio(folio
, order
, true);
2162 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
2163 * transparent huge pages. See the PageTransHuge() documentation for more
2166 int PageHuge(struct page
*page
)
2168 struct folio
*folio
;
2170 if (!PageCompound(page
))
2172 folio
= page_folio(page
);
2173 return folio_test_hugetlb(folio
);
2175 EXPORT_SYMBOL_GPL(PageHuge
);
2178 * Find and lock address space (mapping) in write mode.
2180 * Upon entry, the page is locked which means that page_mapping() is
2181 * stable. Due to locking order, we can only trylock_write. If we can
2182 * not get the lock, simply return NULL to caller.
2184 struct address_space
*hugetlb_page_mapping_lock_write(struct page
*hpage
)
2186 struct address_space
*mapping
= page_mapping(hpage
);
2191 if (i_mmap_trylock_write(mapping
))
2197 static struct folio
*alloc_buddy_hugetlb_folio(struct hstate
*h
,
2198 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
,
2199 nodemask_t
*node_alloc_noretry
)
2201 int order
= huge_page_order(h
);
2203 bool alloc_try_hard
= true;
2207 * By default we always try hard to allocate the page with
2208 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
2209 * a loop (to adjust global huge page counts) and previous allocation
2210 * failed, do not continue to try hard on the same node. Use the
2211 * node_alloc_noretry bitmap to manage this state information.
2213 if (node_alloc_noretry
&& node_isset(nid
, *node_alloc_noretry
))
2214 alloc_try_hard
= false;
2215 gfp_mask
|= __GFP_COMP
|__GFP_NOWARN
;
2217 gfp_mask
|= __GFP_RETRY_MAYFAIL
;
2218 if (nid
== NUMA_NO_NODE
)
2219 nid
= numa_mem_id();
2221 page
= __alloc_pages(gfp_mask
, order
, nid
, nmask
);
2223 /* Freeze head page */
2224 if (page
&& !page_ref_freeze(page
, 1)) {
2225 __free_pages(page
, order
);
2226 if (retry
) { /* retry once */
2230 /* WOW! twice in a row. */
2231 pr_warn("HugeTLB head page unexpected inflated ref count\n");
2236 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
2237 * indicates an overall state change. Clear bit so that we resume
2238 * normal 'try hard' allocations.
2240 if (node_alloc_noretry
&& page
&& !alloc_try_hard
)
2241 node_clear(nid
, *node_alloc_noretry
);
2244 * If we tried hard to get a page but failed, set bit so that
2245 * subsequent attempts will not try as hard until there is an
2246 * overall state change.
2248 if (node_alloc_noretry
&& !page
&& alloc_try_hard
)
2249 node_set(nid
, *node_alloc_noretry
);
2252 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL
);
2256 __count_vm_event(HTLB_BUDDY_PGALLOC
);
2257 return page_folio(page
);
2260 static struct folio
*__alloc_fresh_hugetlb_folio(struct hstate
*h
,
2261 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
,
2262 nodemask_t
*node_alloc_noretry
)
2264 struct folio
*folio
;
2268 if (hstate_is_gigantic(h
))
2269 folio
= alloc_gigantic_folio(h
, gfp_mask
, nid
, nmask
);
2271 folio
= alloc_buddy_hugetlb_folio(h
, gfp_mask
,
2272 nid
, nmask
, node_alloc_noretry
);
2276 if (hstate_is_gigantic(h
)) {
2277 if (!prep_compound_gigantic_folio(folio
, huge_page_order(h
))) {
2279 * Rare failure to convert pages to compound page.
2280 * Free pages and try again - ONCE!
2282 free_gigantic_folio(folio
, huge_page_order(h
));
2294 static struct folio
*only_alloc_fresh_hugetlb_folio(struct hstate
*h
,
2295 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
,
2296 nodemask_t
*node_alloc_noretry
)
2298 struct folio
*folio
;
2300 folio
= __alloc_fresh_hugetlb_folio(h
, gfp_mask
, nid
, nmask
,
2301 node_alloc_noretry
);
2303 init_new_hugetlb_folio(h
, folio
);
2308 * Common helper to allocate a fresh hugetlb page. All specific allocators
2309 * should use this function to get new hugetlb pages
2311 * Note that returned page is 'frozen': ref count of head page and all tail
2314 static struct folio
*alloc_fresh_hugetlb_folio(struct hstate
*h
,
2315 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
,
2316 nodemask_t
*node_alloc_noretry
)
2318 struct folio
*folio
;
2320 folio
= __alloc_fresh_hugetlb_folio(h
, gfp_mask
, nid
, nmask
,
2321 node_alloc_noretry
);
2325 prep_new_hugetlb_folio(h
, folio
, folio_nid(folio
));
2329 static void prep_and_add_allocated_folios(struct hstate
*h
,
2330 struct list_head
*folio_list
)
2332 unsigned long flags
;
2333 struct folio
*folio
, *tmp_f
;
2335 /* Send list for bulk vmemmap optimization processing */
2336 hugetlb_vmemmap_optimize_folios(h
, folio_list
);
2338 /* Add all new pool pages to free lists in one lock cycle */
2339 spin_lock_irqsave(&hugetlb_lock
, flags
);
2340 list_for_each_entry_safe(folio
, tmp_f
, folio_list
, lru
) {
2341 __prep_account_new_huge_page(h
, folio_nid(folio
));
2342 enqueue_hugetlb_folio(h
, folio
);
2344 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
2348 * Allocates a fresh hugetlb page in a node interleaved manner. The page
2349 * will later be added to the appropriate hugetlb pool.
2351 static struct folio
*alloc_pool_huge_folio(struct hstate
*h
,
2352 nodemask_t
*nodes_allowed
,
2353 nodemask_t
*node_alloc_noretry
)
2355 gfp_t gfp_mask
= htlb_alloc_mask(h
) | __GFP_THISNODE
;
2358 for_each_node_mask_to_alloc(h
, nr_nodes
, node
, nodes_allowed
) {
2359 struct folio
*folio
;
2361 folio
= only_alloc_fresh_hugetlb_folio(h
, gfp_mask
, node
,
2362 nodes_allowed
, node_alloc_noretry
);
2371 * Remove huge page from pool from next node to free. Attempt to keep
2372 * persistent huge pages more or less balanced over allowed nodes.
2373 * This routine only 'removes' the hugetlb page. The caller must make
2374 * an additional call to free the page to low level allocators.
2375 * Called with hugetlb_lock locked.
2377 static struct folio
*remove_pool_hugetlb_folio(struct hstate
*h
,
2378 nodemask_t
*nodes_allowed
, bool acct_surplus
)
2381 struct folio
*folio
= NULL
;
2383 lockdep_assert_held(&hugetlb_lock
);
2384 for_each_node_mask_to_free(h
, nr_nodes
, node
, nodes_allowed
) {
2386 * If we're returning unused surplus pages, only examine
2387 * nodes with surplus pages.
2389 if ((!acct_surplus
|| h
->surplus_huge_pages_node
[node
]) &&
2390 !list_empty(&h
->hugepage_freelists
[node
])) {
2391 folio
= list_entry(h
->hugepage_freelists
[node
].next
,
2393 remove_hugetlb_folio(h
, folio
, acct_surplus
);
2402 * Dissolve a given free hugepage into free buddy pages. This function does
2403 * nothing for in-use hugepages and non-hugepages.
2404 * This function returns values like below:
2406 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2407 * when the system is under memory pressure and the feature of
2408 * freeing unused vmemmap pages associated with each hugetlb page
2410 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2411 * (allocated or reserved.)
2412 * 0: successfully dissolved free hugepages or the page is not a
2413 * hugepage (considered as already dissolved)
2415 int dissolve_free_huge_page(struct page
*page
)
2418 struct folio
*folio
= page_folio(page
);
2421 /* Not to disrupt normal path by vainly holding hugetlb_lock */
2422 if (!folio_test_hugetlb(folio
))
2425 spin_lock_irq(&hugetlb_lock
);
2426 if (!folio_test_hugetlb(folio
)) {
2431 if (!folio_ref_count(folio
)) {
2432 struct hstate
*h
= folio_hstate(folio
);
2433 if (!available_huge_pages(h
))
2437 * We should make sure that the page is already on the free list
2438 * when it is dissolved.
2440 if (unlikely(!folio_test_hugetlb_freed(folio
))) {
2441 spin_unlock_irq(&hugetlb_lock
);
2445 * Theoretically, we should return -EBUSY when we
2446 * encounter this race. In fact, we have a chance
2447 * to successfully dissolve the page if we do a
2448 * retry. Because the race window is quite small.
2449 * If we seize this opportunity, it is an optimization
2450 * for increasing the success rate of dissolving page.
2455 remove_hugetlb_folio(h
, folio
, false);
2456 h
->max_huge_pages
--;
2457 spin_unlock_irq(&hugetlb_lock
);
2460 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2461 * before freeing the page. update_and_free_hugtlb_folio will fail to
2462 * free the page if it can not allocate required vmemmap. We
2463 * need to adjust max_huge_pages if the page is not freed.
2464 * Attempt to allocate vmemmmap here so that we can take
2465 * appropriate action on failure.
2467 * The folio_test_hugetlb check here is because
2468 * remove_hugetlb_folio will clear hugetlb folio flag for
2469 * non-vmemmap optimized hugetlb folios.
2471 if (folio_test_hugetlb(folio
)) {
2472 rc
= hugetlb_vmemmap_restore_folio(h
, folio
);
2474 spin_lock_irq(&hugetlb_lock
);
2475 add_hugetlb_folio(h
, folio
, false);
2476 h
->max_huge_pages
++;
2482 update_and_free_hugetlb_folio(h
, folio
, false);
2486 spin_unlock_irq(&hugetlb_lock
);
2491 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2492 * make specified memory blocks removable from the system.
2493 * Note that this will dissolve a free gigantic hugepage completely, if any
2494 * part of it lies within the given range.
2495 * Also note that if dissolve_free_huge_page() returns with an error, all
2496 * free hugepages that were dissolved before that error are lost.
2498 int dissolve_free_huge_pages(unsigned long start_pfn
, unsigned long end_pfn
)
2506 if (!hugepages_supported())
2509 order
= huge_page_order(&default_hstate
);
2511 order
= min(order
, huge_page_order(h
));
2513 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= 1 << order
) {
2514 page
= pfn_to_page(pfn
);
2515 rc
= dissolve_free_huge_page(page
);
2524 * Allocates a fresh surplus page from the page allocator.
2526 static struct folio
*alloc_surplus_hugetlb_folio(struct hstate
*h
,
2527 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
)
2529 struct folio
*folio
= NULL
;
2531 if (hstate_is_gigantic(h
))
2534 spin_lock_irq(&hugetlb_lock
);
2535 if (h
->surplus_huge_pages
>= h
->nr_overcommit_huge_pages
)
2537 spin_unlock_irq(&hugetlb_lock
);
2539 folio
= alloc_fresh_hugetlb_folio(h
, gfp_mask
, nid
, nmask
, NULL
);
2543 spin_lock_irq(&hugetlb_lock
);
2545 * We could have raced with the pool size change.
2546 * Double check that and simply deallocate the new page
2547 * if we would end up overcommiting the surpluses. Abuse
2548 * temporary page to workaround the nasty free_huge_folio
2551 if (h
->surplus_huge_pages
>= h
->nr_overcommit_huge_pages
) {
2552 folio_set_hugetlb_temporary(folio
);
2553 spin_unlock_irq(&hugetlb_lock
);
2554 free_huge_folio(folio
);
2558 h
->surplus_huge_pages
++;
2559 h
->surplus_huge_pages_node
[folio_nid(folio
)]++;
2562 spin_unlock_irq(&hugetlb_lock
);
2567 static struct folio
*alloc_migrate_hugetlb_folio(struct hstate
*h
, gfp_t gfp_mask
,
2568 int nid
, nodemask_t
*nmask
)
2570 struct folio
*folio
;
2572 if (hstate_is_gigantic(h
))
2575 folio
= alloc_fresh_hugetlb_folio(h
, gfp_mask
, nid
, nmask
, NULL
);
2579 /* fresh huge pages are frozen */
2580 folio_ref_unfreeze(folio
, 1);
2582 * We do not account these pages as surplus because they are only
2583 * temporary and will be released properly on the last reference
2585 folio_set_hugetlb_temporary(folio
);
2591 * Use the VMA's mpolicy to allocate a huge page from the buddy.
2594 struct folio
*alloc_buddy_hugetlb_folio_with_mpol(struct hstate
*h
,
2595 struct vm_area_struct
*vma
, unsigned long addr
)
2597 struct folio
*folio
= NULL
;
2598 struct mempolicy
*mpol
;
2599 gfp_t gfp_mask
= htlb_alloc_mask(h
);
2601 nodemask_t
*nodemask
;
2603 nid
= huge_node(vma
, addr
, gfp_mask
, &mpol
, &nodemask
);
2604 if (mpol_is_preferred_many(mpol
)) {
2605 gfp_t gfp
= gfp_mask
| __GFP_NOWARN
;
2607 gfp
&= ~(__GFP_DIRECT_RECLAIM
| __GFP_NOFAIL
);
2608 folio
= alloc_surplus_hugetlb_folio(h
, gfp
, nid
, nodemask
);
2610 /* Fallback to all nodes if page==NULL */
2615 folio
= alloc_surplus_hugetlb_folio(h
, gfp_mask
, nid
, nodemask
);
2616 mpol_cond_put(mpol
);
2620 /* folio migration callback function */
2621 struct folio
*alloc_hugetlb_folio_nodemask(struct hstate
*h
, int preferred_nid
,
2622 nodemask_t
*nmask
, gfp_t gfp_mask
)
2624 spin_lock_irq(&hugetlb_lock
);
2625 if (available_huge_pages(h
)) {
2626 struct folio
*folio
;
2628 folio
= dequeue_hugetlb_folio_nodemask(h
, gfp_mask
,
2629 preferred_nid
, nmask
);
2631 spin_unlock_irq(&hugetlb_lock
);
2635 spin_unlock_irq(&hugetlb_lock
);
2637 return alloc_migrate_hugetlb_folio(h
, gfp_mask
, preferred_nid
, nmask
);
2641 * Increase the hugetlb pool such that it can accommodate a reservation
2644 static int gather_surplus_pages(struct hstate
*h
, long delta
)
2645 __must_hold(&hugetlb_lock
)
2647 LIST_HEAD(surplus_list
);
2648 struct folio
*folio
, *tmp
;
2651 long needed
, allocated
;
2652 bool alloc_ok
= true;
2654 lockdep_assert_held(&hugetlb_lock
);
2655 needed
= (h
->resv_huge_pages
+ delta
) - h
->free_huge_pages
;
2657 h
->resv_huge_pages
+= delta
;
2665 spin_unlock_irq(&hugetlb_lock
);
2666 for (i
= 0; i
< needed
; i
++) {
2667 folio
= alloc_surplus_hugetlb_folio(h
, htlb_alloc_mask(h
),
2668 NUMA_NO_NODE
, NULL
);
2673 list_add(&folio
->lru
, &surplus_list
);
2679 * After retaking hugetlb_lock, we need to recalculate 'needed'
2680 * because either resv_huge_pages or free_huge_pages may have changed.
2682 spin_lock_irq(&hugetlb_lock
);
2683 needed
= (h
->resv_huge_pages
+ delta
) -
2684 (h
->free_huge_pages
+ allocated
);
2689 * We were not able to allocate enough pages to
2690 * satisfy the entire reservation so we free what
2691 * we've allocated so far.
2696 * The surplus_list now contains _at_least_ the number of extra pages
2697 * needed to accommodate the reservation. Add the appropriate number
2698 * of pages to the hugetlb pool and free the extras back to the buddy
2699 * allocator. Commit the entire reservation here to prevent another
2700 * process from stealing the pages as they are added to the pool but
2701 * before they are reserved.
2703 needed
+= allocated
;
2704 h
->resv_huge_pages
+= delta
;
2707 /* Free the needed pages to the hugetlb pool */
2708 list_for_each_entry_safe(folio
, tmp
, &surplus_list
, lru
) {
2711 /* Add the page to the hugetlb allocator */
2712 enqueue_hugetlb_folio(h
, folio
);
2715 spin_unlock_irq(&hugetlb_lock
);
2718 * Free unnecessary surplus pages to the buddy allocator.
2719 * Pages have no ref count, call free_huge_folio directly.
2721 list_for_each_entry_safe(folio
, tmp
, &surplus_list
, lru
)
2722 free_huge_folio(folio
);
2723 spin_lock_irq(&hugetlb_lock
);
2729 * This routine has two main purposes:
2730 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2731 * in unused_resv_pages. This corresponds to the prior adjustments made
2732 * to the associated reservation map.
2733 * 2) Free any unused surplus pages that may have been allocated to satisfy
2734 * the reservation. As many as unused_resv_pages may be freed.
2736 static void return_unused_surplus_pages(struct hstate
*h
,
2737 unsigned long unused_resv_pages
)
2739 unsigned long nr_pages
;
2740 LIST_HEAD(page_list
);
2742 lockdep_assert_held(&hugetlb_lock
);
2743 /* Uncommit the reservation */
2744 h
->resv_huge_pages
-= unused_resv_pages
;
2746 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
2750 * Part (or even all) of the reservation could have been backed
2751 * by pre-allocated pages. Only free surplus pages.
2753 nr_pages
= min(unused_resv_pages
, h
->surplus_huge_pages
);
2756 * We want to release as many surplus pages as possible, spread
2757 * evenly across all nodes with memory. Iterate across these nodes
2758 * until we can no longer free unreserved surplus pages. This occurs
2759 * when the nodes with surplus pages have no free pages.
2760 * remove_pool_hugetlb_folio() will balance the freed pages across the
2761 * on-line nodes with memory and will handle the hstate accounting.
2763 while (nr_pages
--) {
2764 struct folio
*folio
;
2766 folio
= remove_pool_hugetlb_folio(h
, &node_states
[N_MEMORY
], 1);
2770 list_add(&folio
->lru
, &page_list
);
2774 spin_unlock_irq(&hugetlb_lock
);
2775 update_and_free_pages_bulk(h
, &page_list
);
2776 spin_lock_irq(&hugetlb_lock
);
2781 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2782 * are used by the huge page allocation routines to manage reservations.
2784 * vma_needs_reservation is called to determine if the huge page at addr
2785 * within the vma has an associated reservation. If a reservation is
2786 * needed, the value 1 is returned. The caller is then responsible for
2787 * managing the global reservation and subpool usage counts. After
2788 * the huge page has been allocated, vma_commit_reservation is called
2789 * to add the page to the reservation map. If the page allocation fails,
2790 * the reservation must be ended instead of committed. vma_end_reservation
2791 * is called in such cases.
2793 * In the normal case, vma_commit_reservation returns the same value
2794 * as the preceding vma_needs_reservation call. The only time this
2795 * is not the case is if a reserve map was changed between calls. It
2796 * is the responsibility of the caller to notice the difference and
2797 * take appropriate action.
2799 * vma_add_reservation is used in error paths where a reservation must
2800 * be restored when a newly allocated huge page must be freed. It is
2801 * to be called after calling vma_needs_reservation to determine if a
2802 * reservation exists.
2804 * vma_del_reservation is used in error paths where an entry in the reserve
2805 * map was created during huge page allocation and must be removed. It is to
2806 * be called after calling vma_needs_reservation to determine if a reservation
2809 enum vma_resv_mode
{
2816 static long __vma_reservation_common(struct hstate
*h
,
2817 struct vm_area_struct
*vma
, unsigned long addr
,
2818 enum vma_resv_mode mode
)
2820 struct resv_map
*resv
;
2823 long dummy_out_regions_needed
;
2825 resv
= vma_resv_map(vma
);
2829 idx
= vma_hugecache_offset(h
, vma
, addr
);
2831 case VMA_NEEDS_RESV
:
2832 ret
= region_chg(resv
, idx
, idx
+ 1, &dummy_out_regions_needed
);
2833 /* We assume that vma_reservation_* routines always operate on
2834 * 1 page, and that adding to resv map a 1 page entry can only
2835 * ever require 1 region.
2837 VM_BUG_ON(dummy_out_regions_needed
!= 1);
2839 case VMA_COMMIT_RESV
:
2840 ret
= region_add(resv
, idx
, idx
+ 1, 1, NULL
, NULL
);
2841 /* region_add calls of range 1 should never fail. */
2845 region_abort(resv
, idx
, idx
+ 1, 1);
2849 if (vma
->vm_flags
& VM_MAYSHARE
) {
2850 ret
= region_add(resv
, idx
, idx
+ 1, 1, NULL
, NULL
);
2851 /* region_add calls of range 1 should never fail. */
2854 region_abort(resv
, idx
, idx
+ 1, 1);
2855 ret
= region_del(resv
, idx
, idx
+ 1);
2859 if (vma
->vm_flags
& VM_MAYSHARE
) {
2860 region_abort(resv
, idx
, idx
+ 1, 1);
2861 ret
= region_del(resv
, idx
, idx
+ 1);
2863 ret
= region_add(resv
, idx
, idx
+ 1, 1, NULL
, NULL
);
2864 /* region_add calls of range 1 should never fail. */
2872 if (vma
->vm_flags
& VM_MAYSHARE
|| mode
== VMA_DEL_RESV
)
2875 * We know private mapping must have HPAGE_RESV_OWNER set.
2877 * In most cases, reserves always exist for private mappings.
2878 * However, a file associated with mapping could have been
2879 * hole punched or truncated after reserves were consumed.
2880 * As subsequent fault on such a range will not use reserves.
2881 * Subtle - The reserve map for private mappings has the
2882 * opposite meaning than that of shared mappings. If NO
2883 * entry is in the reserve map, it means a reservation exists.
2884 * If an entry exists in the reserve map, it means the
2885 * reservation has already been consumed. As a result, the
2886 * return value of this routine is the opposite of the
2887 * value returned from reserve map manipulation routines above.
2896 static long vma_needs_reservation(struct hstate
*h
,
2897 struct vm_area_struct
*vma
, unsigned long addr
)
2899 return __vma_reservation_common(h
, vma
, addr
, VMA_NEEDS_RESV
);
2902 static long vma_commit_reservation(struct hstate
*h
,
2903 struct vm_area_struct
*vma
, unsigned long addr
)
2905 return __vma_reservation_common(h
, vma
, addr
, VMA_COMMIT_RESV
);
2908 static void vma_end_reservation(struct hstate
*h
,
2909 struct vm_area_struct
*vma
, unsigned long addr
)
2911 (void)__vma_reservation_common(h
, vma
, addr
, VMA_END_RESV
);
2914 static long vma_add_reservation(struct hstate
*h
,
2915 struct vm_area_struct
*vma
, unsigned long addr
)
2917 return __vma_reservation_common(h
, vma
, addr
, VMA_ADD_RESV
);
2920 static long vma_del_reservation(struct hstate
*h
,
2921 struct vm_area_struct
*vma
, unsigned long addr
)
2923 return __vma_reservation_common(h
, vma
, addr
, VMA_DEL_RESV
);
2927 * This routine is called to restore reservation information on error paths.
2928 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2929 * and the hugetlb mutex should remain held when calling this routine.
2931 * It handles two specific cases:
2932 * 1) A reservation was in place and the folio consumed the reservation.
2933 * hugetlb_restore_reserve is set in the folio.
2934 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2935 * not set. However, alloc_hugetlb_folio always updates the reserve map.
2937 * In case 1, free_huge_folio later in the error path will increment the
2938 * global reserve count. But, free_huge_folio does not have enough context
2939 * to adjust the reservation map. This case deals primarily with private
2940 * mappings. Adjust the reserve map here to be consistent with global
2941 * reserve count adjustments to be made by free_huge_folio. Make sure the
2942 * reserve map indicates there is a reservation present.
2944 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2946 void restore_reserve_on_error(struct hstate
*h
, struct vm_area_struct
*vma
,
2947 unsigned long address
, struct folio
*folio
)
2949 long rc
= vma_needs_reservation(h
, vma
, address
);
2951 if (folio_test_hugetlb_restore_reserve(folio
)) {
2952 if (unlikely(rc
< 0))
2954 * Rare out of memory condition in reserve map
2955 * manipulation. Clear hugetlb_restore_reserve so
2956 * that global reserve count will not be incremented
2957 * by free_huge_folio. This will make it appear
2958 * as though the reservation for this folio was
2959 * consumed. This may prevent the task from
2960 * faulting in the folio at a later time. This
2961 * is better than inconsistent global huge page
2962 * accounting of reserve counts.
2964 folio_clear_hugetlb_restore_reserve(folio
);
2966 (void)vma_add_reservation(h
, vma
, address
);
2968 vma_end_reservation(h
, vma
, address
);
2972 * This indicates there is an entry in the reserve map
2973 * not added by alloc_hugetlb_folio. We know it was added
2974 * before the alloc_hugetlb_folio call, otherwise
2975 * hugetlb_restore_reserve would be set on the folio.
2976 * Remove the entry so that a subsequent allocation
2977 * does not consume a reservation.
2979 rc
= vma_del_reservation(h
, vma
, address
);
2982 * VERY rare out of memory condition. Since
2983 * we can not delete the entry, set
2984 * hugetlb_restore_reserve so that the reserve
2985 * count will be incremented when the folio
2986 * is freed. This reserve will be consumed
2987 * on a subsequent allocation.
2989 folio_set_hugetlb_restore_reserve(folio
);
2990 } else if (rc
< 0) {
2992 * Rare out of memory condition from
2993 * vma_needs_reservation call. Memory allocation is
2994 * only attempted if a new entry is needed. Therefore,
2995 * this implies there is not an entry in the
2998 * For shared mappings, no entry in the map indicates
2999 * no reservation. We are done.
3001 if (!(vma
->vm_flags
& VM_MAYSHARE
))
3003 * For private mappings, no entry indicates
3004 * a reservation is present. Since we can
3005 * not add an entry, set hugetlb_restore_reserve
3006 * on the folio so reserve count will be
3007 * incremented when freed. This reserve will
3008 * be consumed on a subsequent allocation.
3010 folio_set_hugetlb_restore_reserve(folio
);
3013 * No reservation present, do nothing
3015 vma_end_reservation(h
, vma
, address
);
3020 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
3022 * @h: struct hstate old page belongs to
3023 * @old_folio: Old folio to dissolve
3024 * @list: List to isolate the page in case we need to
3025 * Returns 0 on success, otherwise negated error.
3027 static int alloc_and_dissolve_hugetlb_folio(struct hstate
*h
,
3028 struct folio
*old_folio
, struct list_head
*list
)
3030 gfp_t gfp_mask
= htlb_alloc_mask(h
) | __GFP_THISNODE
;
3031 int nid
= folio_nid(old_folio
);
3032 struct folio
*new_folio
;
3036 * Before dissolving the folio, we need to allocate a new one for the
3037 * pool to remain stable. Here, we allocate the folio and 'prep' it
3038 * by doing everything but actually updating counters and adding to
3039 * the pool. This simplifies and let us do most of the processing
3042 new_folio
= alloc_buddy_hugetlb_folio(h
, gfp_mask
, nid
, NULL
, NULL
);
3045 __prep_new_hugetlb_folio(h
, new_folio
);
3048 spin_lock_irq(&hugetlb_lock
);
3049 if (!folio_test_hugetlb(old_folio
)) {
3051 * Freed from under us. Drop new_folio too.
3054 } else if (folio_ref_count(old_folio
)) {
3058 * Someone has grabbed the folio, try to isolate it here.
3059 * Fail with -EBUSY if not possible.
3061 spin_unlock_irq(&hugetlb_lock
);
3062 isolated
= isolate_hugetlb(old_folio
, list
);
3063 ret
= isolated
? 0 : -EBUSY
;
3064 spin_lock_irq(&hugetlb_lock
);
3066 } else if (!folio_test_hugetlb_freed(old_folio
)) {
3068 * Folio's refcount is 0 but it has not been enqueued in the
3069 * freelist yet. Race window is small, so we can succeed here if
3072 spin_unlock_irq(&hugetlb_lock
);
3077 * Ok, old_folio is still a genuine free hugepage. Remove it from
3078 * the freelist and decrease the counters. These will be
3079 * incremented again when calling __prep_account_new_huge_page()
3080 * and enqueue_hugetlb_folio() for new_folio. The counters will
3081 * remain stable since this happens under the lock.
3083 remove_hugetlb_folio(h
, old_folio
, false);
3086 * Ref count on new_folio is already zero as it was dropped
3087 * earlier. It can be directly added to the pool free list.
3089 __prep_account_new_huge_page(h
, nid
);
3090 enqueue_hugetlb_folio(h
, new_folio
);
3093 * Folio has been replaced, we can safely free the old one.
3095 spin_unlock_irq(&hugetlb_lock
);
3096 update_and_free_hugetlb_folio(h
, old_folio
, false);
3102 spin_unlock_irq(&hugetlb_lock
);
3103 /* Folio has a zero ref count, but needs a ref to be freed */
3104 folio_ref_unfreeze(new_folio
, 1);
3105 update_and_free_hugetlb_folio(h
, new_folio
, false);
3110 int isolate_or_dissolve_huge_page(struct page
*page
, struct list_head
*list
)
3113 struct folio
*folio
= page_folio(page
);
3117 * The page might have been dissolved from under our feet, so make sure
3118 * to carefully check the state under the lock.
3119 * Return success when racing as if we dissolved the page ourselves.
3121 spin_lock_irq(&hugetlb_lock
);
3122 if (folio_test_hugetlb(folio
)) {
3123 h
= folio_hstate(folio
);
3125 spin_unlock_irq(&hugetlb_lock
);
3128 spin_unlock_irq(&hugetlb_lock
);
3131 * Fence off gigantic pages as there is a cyclic dependency between
3132 * alloc_contig_range and them. Return -ENOMEM as this has the effect
3133 * of bailing out right away without further retrying.
3135 if (hstate_is_gigantic(h
))
3138 if (folio_ref_count(folio
) && isolate_hugetlb(folio
, list
))
3140 else if (!folio_ref_count(folio
))
3141 ret
= alloc_and_dissolve_hugetlb_folio(h
, folio
, list
);
3146 struct folio
*alloc_hugetlb_folio(struct vm_area_struct
*vma
,
3147 unsigned long addr
, int avoid_reserve
)
3149 struct hugepage_subpool
*spool
= subpool_vma(vma
);
3150 struct hstate
*h
= hstate_vma(vma
);
3151 struct folio
*folio
;
3152 long map_chg
, map_commit
, nr_pages
= pages_per_huge_page(h
);
3154 int memcg_charge_ret
, ret
, idx
;
3155 struct hugetlb_cgroup
*h_cg
= NULL
;
3156 struct mem_cgroup
*memcg
;
3157 bool deferred_reserve
;
3158 gfp_t gfp
= htlb_alloc_mask(h
) | __GFP_RETRY_MAYFAIL
;
3160 memcg
= get_mem_cgroup_from_current();
3161 memcg_charge_ret
= mem_cgroup_hugetlb_try_charge(memcg
, gfp
, nr_pages
);
3162 if (memcg_charge_ret
== -ENOMEM
) {
3163 mem_cgroup_put(memcg
);
3164 return ERR_PTR(-ENOMEM
);
3167 idx
= hstate_index(h
);
3169 * Examine the region/reserve map to determine if the process
3170 * has a reservation for the page to be allocated. A return
3171 * code of zero indicates a reservation exists (no change).
3173 map_chg
= gbl_chg
= vma_needs_reservation(h
, vma
, addr
);
3175 if (!memcg_charge_ret
)
3176 mem_cgroup_cancel_charge(memcg
, nr_pages
);
3177 mem_cgroup_put(memcg
);
3178 return ERR_PTR(-ENOMEM
);
3182 * Processes that did not create the mapping will have no
3183 * reserves as indicated by the region/reserve map. Check
3184 * that the allocation will not exceed the subpool limit.
3185 * Allocations for MAP_NORESERVE mappings also need to be
3186 * checked against any subpool limit.
3188 if (map_chg
|| avoid_reserve
) {
3189 gbl_chg
= hugepage_subpool_get_pages(spool
, 1);
3191 goto out_end_reservation
;
3194 * Even though there was no reservation in the region/reserve
3195 * map, there could be reservations associated with the
3196 * subpool that can be used. This would be indicated if the
3197 * return value of hugepage_subpool_get_pages() is zero.
3198 * However, if avoid_reserve is specified we still avoid even
3199 * the subpool reservations.
3205 /* If this allocation is not consuming a reservation, charge it now.
3207 deferred_reserve
= map_chg
|| avoid_reserve
;
3208 if (deferred_reserve
) {
3209 ret
= hugetlb_cgroup_charge_cgroup_rsvd(
3210 idx
, pages_per_huge_page(h
), &h_cg
);
3212 goto out_subpool_put
;
3215 ret
= hugetlb_cgroup_charge_cgroup(idx
, pages_per_huge_page(h
), &h_cg
);
3217 goto out_uncharge_cgroup_reservation
;
3219 spin_lock_irq(&hugetlb_lock
);
3221 * glb_chg is passed to indicate whether or not a page must be taken
3222 * from the global free pool (global change). gbl_chg == 0 indicates
3223 * a reservation exists for the allocation.
3225 folio
= dequeue_hugetlb_folio_vma(h
, vma
, addr
, avoid_reserve
, gbl_chg
);
3227 spin_unlock_irq(&hugetlb_lock
);
3228 folio
= alloc_buddy_hugetlb_folio_with_mpol(h
, vma
, addr
);
3230 goto out_uncharge_cgroup
;
3231 spin_lock_irq(&hugetlb_lock
);
3232 if (!avoid_reserve
&& vma_has_reserves(vma
, gbl_chg
)) {
3233 folio_set_hugetlb_restore_reserve(folio
);
3234 h
->resv_huge_pages
--;
3236 list_add(&folio
->lru
, &h
->hugepage_activelist
);
3237 folio_ref_unfreeze(folio
, 1);
3241 hugetlb_cgroup_commit_charge(idx
, pages_per_huge_page(h
), h_cg
, folio
);
3242 /* If allocation is not consuming a reservation, also store the
3243 * hugetlb_cgroup pointer on the page.
3245 if (deferred_reserve
) {
3246 hugetlb_cgroup_commit_charge_rsvd(idx
, pages_per_huge_page(h
),
3250 spin_unlock_irq(&hugetlb_lock
);
3252 hugetlb_set_folio_subpool(folio
, spool
);
3254 map_commit
= vma_commit_reservation(h
, vma
, addr
);
3255 if (unlikely(map_chg
> map_commit
)) {
3257 * The page was added to the reservation map between
3258 * vma_needs_reservation and vma_commit_reservation.
3259 * This indicates a race with hugetlb_reserve_pages.
3260 * Adjust for the subpool count incremented above AND
3261 * in hugetlb_reserve_pages for the same page. Also,
3262 * the reservation count added in hugetlb_reserve_pages
3263 * no longer applies.
3267 rsv_adjust
= hugepage_subpool_put_pages(spool
, 1);
3268 hugetlb_acct_memory(h
, -rsv_adjust
);
3269 if (deferred_reserve
)
3270 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h
),
3271 pages_per_huge_page(h
), folio
);
3274 if (!memcg_charge_ret
)
3275 mem_cgroup_commit_charge(folio
, memcg
);
3276 mem_cgroup_put(memcg
);
3280 out_uncharge_cgroup
:
3281 hugetlb_cgroup_uncharge_cgroup(idx
, pages_per_huge_page(h
), h_cg
);
3282 out_uncharge_cgroup_reservation
:
3283 if (deferred_reserve
)
3284 hugetlb_cgroup_uncharge_cgroup_rsvd(idx
, pages_per_huge_page(h
),
3287 if (map_chg
|| avoid_reserve
)
3288 hugepage_subpool_put_pages(spool
, 1);
3289 out_end_reservation
:
3290 vma_end_reservation(h
, vma
, addr
);
3291 if (!memcg_charge_ret
)
3292 mem_cgroup_cancel_charge(memcg
, nr_pages
);
3293 mem_cgroup_put(memcg
);
3294 return ERR_PTR(-ENOSPC
);
3297 int alloc_bootmem_huge_page(struct hstate
*h
, int nid
)
3298 __attribute__ ((weak
, alias("__alloc_bootmem_huge_page")));
3299 int __alloc_bootmem_huge_page(struct hstate
*h
, int nid
)
3301 struct huge_bootmem_page
*m
= NULL
; /* initialize for clang */
3304 /* do node specific alloc */
3305 if (nid
!= NUMA_NO_NODE
) {
3306 m
= memblock_alloc_try_nid_raw(huge_page_size(h
), huge_page_size(h
),
3307 0, MEMBLOCK_ALLOC_ACCESSIBLE
, nid
);
3312 /* allocate from next node when distributing huge pages */
3313 for_each_node_mask_to_alloc(h
, nr_nodes
, node
, &node_states
[N_MEMORY
]) {
3314 m
= memblock_alloc_try_nid_raw(
3315 huge_page_size(h
), huge_page_size(h
),
3316 0, MEMBLOCK_ALLOC_ACCESSIBLE
, node
);
3318 * Use the beginning of the huge page to store the
3319 * huge_bootmem_page struct (until gather_bootmem
3320 * puts them into the mem_map).
3330 * Only initialize the head struct page in memmap_init_reserved_pages,
3331 * rest of the struct pages will be initialized by the HugeTLB
3333 * The head struct page is used to get folio information by the HugeTLB
3334 * subsystem like zone id and node id.
3336 memblock_reserved_mark_noinit(virt_to_phys((void *)m
+ PAGE_SIZE
),
3337 huge_page_size(h
) - PAGE_SIZE
);
3338 /* Put them into a private list first because mem_map is not up yet */
3339 INIT_LIST_HEAD(&m
->list
);
3340 list_add(&m
->list
, &huge_boot_pages
);
3345 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
3346 static void __init
hugetlb_folio_init_tail_vmemmap(struct folio
*folio
,
3347 unsigned long start_page_number
,
3348 unsigned long end_page_number
)
3350 enum zone_type zone
= zone_idx(folio_zone(folio
));
3351 int nid
= folio_nid(folio
);
3352 unsigned long head_pfn
= folio_pfn(folio
);
3353 unsigned long pfn
, end_pfn
= head_pfn
+ end_page_number
;
3356 for (pfn
= head_pfn
+ start_page_number
; pfn
< end_pfn
; pfn
++) {
3357 struct page
*page
= pfn_to_page(pfn
);
3359 __init_single_page(page
, pfn
, zone
, nid
);
3360 prep_compound_tail((struct page
*)folio
, pfn
- head_pfn
);
3361 ret
= page_ref_freeze(page
, 1);
3366 static void __init
hugetlb_folio_init_vmemmap(struct folio
*folio
,
3368 unsigned long nr_pages
)
3372 /* Prepare folio head */
3373 __folio_clear_reserved(folio
);
3374 __folio_set_head(folio
);
3375 ret
= folio_ref_freeze(folio
, 1);
3377 /* Initialize the necessary tail struct pages */
3378 hugetlb_folio_init_tail_vmemmap(folio
, 1, nr_pages
);
3379 prep_compound_head((struct page
*)folio
, huge_page_order(h
));
3382 static void __init
prep_and_add_bootmem_folios(struct hstate
*h
,
3383 struct list_head
*folio_list
)
3385 unsigned long flags
;
3386 struct folio
*folio
, *tmp_f
;
3388 /* Send list for bulk vmemmap optimization processing */
3389 hugetlb_vmemmap_optimize_folios(h
, folio_list
);
3391 /* Add all new pool pages to free lists in one lock cycle */
3392 spin_lock_irqsave(&hugetlb_lock
, flags
);
3393 list_for_each_entry_safe(folio
, tmp_f
, folio_list
, lru
) {
3394 if (!folio_test_hugetlb_vmemmap_optimized(folio
)) {
3396 * If HVO fails, initialize all tail struct pages
3397 * We do not worry about potential long lock hold
3398 * time as this is early in boot and there should
3401 hugetlb_folio_init_tail_vmemmap(folio
,
3402 HUGETLB_VMEMMAP_RESERVE_PAGES
,
3403 pages_per_huge_page(h
));
3405 __prep_account_new_huge_page(h
, folio_nid(folio
));
3406 enqueue_hugetlb_folio(h
, folio
);
3408 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
3412 * Put bootmem huge pages into the standard lists after mem_map is up.
3413 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3415 static void __init
gather_bootmem_prealloc(void)
3417 LIST_HEAD(folio_list
);
3418 struct huge_bootmem_page
*m
;
3419 struct hstate
*h
= NULL
, *prev_h
= NULL
;
3421 list_for_each_entry(m
, &huge_boot_pages
, list
) {
3422 struct page
*page
= virt_to_page(m
);
3423 struct folio
*folio
= (void *)page
;
3427 * It is possible to have multiple huge page sizes (hstates)
3428 * in this list. If so, process each size separately.
3430 if (h
!= prev_h
&& prev_h
!= NULL
)
3431 prep_and_add_bootmem_folios(prev_h
, &folio_list
);
3434 VM_BUG_ON(!hstate_is_gigantic(h
));
3435 WARN_ON(folio_ref_count(folio
) != 1);
3437 hugetlb_folio_init_vmemmap(folio
, h
,
3438 HUGETLB_VMEMMAP_RESERVE_PAGES
);
3439 init_new_hugetlb_folio(h
, folio
);
3440 list_add(&folio
->lru
, &folio_list
);
3443 * We need to restore the 'stolen' pages to totalram_pages
3444 * in order to fix confusing memory reports from free(1) and
3445 * other side-effects, like CommitLimit going negative.
3447 adjust_managed_page_count(page
, pages_per_huge_page(h
));
3451 prep_and_add_bootmem_folios(h
, &folio_list
);
3454 static void __init
hugetlb_hstate_alloc_pages_onenode(struct hstate
*h
, int nid
)
3459 for (i
= 0; i
< h
->max_huge_pages_node
[nid
]; ++i
) {
3460 if (hstate_is_gigantic(h
)) {
3461 if (!alloc_bootmem_huge_page(h
, nid
))
3464 struct folio
*folio
;
3465 gfp_t gfp_mask
= htlb_alloc_mask(h
) | __GFP_THISNODE
;
3467 folio
= alloc_fresh_hugetlb_folio(h
, gfp_mask
, nid
,
3468 &node_states
[N_MEMORY
], NULL
);
3471 free_huge_folio(folio
); /* free it into the hugepage allocator */
3475 if (i
== h
->max_huge_pages_node
[nid
])
3478 string_get_size(huge_page_size(h
), 1, STRING_UNITS_2
, buf
, 32);
3479 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3480 h
->max_huge_pages_node
[nid
], buf
, nid
, i
);
3481 h
->max_huge_pages
-= (h
->max_huge_pages_node
[nid
] - i
);
3482 h
->max_huge_pages_node
[nid
] = i
;
3486 * NOTE: this routine is called in different contexts for gigantic and
3487 * non-gigantic pages.
3488 * - For gigantic pages, this is called early in the boot process and
3489 * pages are allocated from memblock allocated or something similar.
3490 * Gigantic pages are actually added to pools later with the routine
3491 * gather_bootmem_prealloc.
3492 * - For non-gigantic pages, this is called later in the boot process after
3493 * all of mm is up and functional. Pages are allocated from buddy and
3494 * then added to hugetlb pools.
3496 static void __init
hugetlb_hstate_alloc_pages(struct hstate
*h
)
3499 struct folio
*folio
;
3500 LIST_HEAD(folio_list
);
3501 nodemask_t
*node_alloc_noretry
;
3502 bool node_specific_alloc
= false;
3504 /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3505 if (hstate_is_gigantic(h
) && hugetlb_cma_size
) {
3506 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3510 /* do node specific alloc */
3511 for_each_online_node(i
) {
3512 if (h
->max_huge_pages_node
[i
] > 0) {
3513 hugetlb_hstate_alloc_pages_onenode(h
, i
);
3514 node_specific_alloc
= true;
3518 if (node_specific_alloc
)
3521 /* below will do all node balanced alloc */
3522 if (!hstate_is_gigantic(h
)) {
3524 * Bit mask controlling how hard we retry per-node allocations.
3525 * Ignore errors as lower level routines can deal with
3526 * node_alloc_noretry == NULL. If this kmalloc fails at boot
3527 * time, we are likely in bigger trouble.
3529 node_alloc_noretry
= kmalloc(sizeof(*node_alloc_noretry
),
3532 /* allocations done at boot time */
3533 node_alloc_noretry
= NULL
;
3536 /* bit mask controlling how hard we retry per-node allocations */
3537 if (node_alloc_noretry
)
3538 nodes_clear(*node_alloc_noretry
);
3540 for (i
= 0; i
< h
->max_huge_pages
; ++i
) {
3541 if (hstate_is_gigantic(h
)) {
3543 * gigantic pages not added to list as they are not
3544 * added to pools now.
3546 if (!alloc_bootmem_huge_page(h
, NUMA_NO_NODE
))
3549 folio
= alloc_pool_huge_folio(h
, &node_states
[N_MEMORY
],
3550 node_alloc_noretry
);
3553 list_add(&folio
->lru
, &folio_list
);
3558 /* list will be empty if hstate_is_gigantic */
3559 prep_and_add_allocated_folios(h
, &folio_list
);
3561 if (i
< h
->max_huge_pages
) {
3564 string_get_size(huge_page_size(h
), 1, STRING_UNITS_2
, buf
, 32);
3565 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3566 h
->max_huge_pages
, buf
, i
);
3567 h
->max_huge_pages
= i
;
3569 kfree(node_alloc_noretry
);
3572 static void __init
hugetlb_init_hstates(void)
3574 struct hstate
*h
, *h2
;
3576 for_each_hstate(h
) {
3577 /* oversize hugepages were init'ed in early boot */
3578 if (!hstate_is_gigantic(h
))
3579 hugetlb_hstate_alloc_pages(h
);
3582 * Set demote order for each hstate. Note that
3583 * h->demote_order is initially 0.
3584 * - We can not demote gigantic pages if runtime freeing
3585 * is not supported, so skip this.
3586 * - If CMA allocation is possible, we can not demote
3587 * HUGETLB_PAGE_ORDER or smaller size pages.
3589 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
3591 if (hugetlb_cma_size
&& h
->order
<= HUGETLB_PAGE_ORDER
)
3593 for_each_hstate(h2
) {
3596 if (h2
->order
< h
->order
&&
3597 h2
->order
> h
->demote_order
)
3598 h
->demote_order
= h2
->order
;
3603 static void __init
report_hugepages(void)
3607 for_each_hstate(h
) {
3610 string_get_size(huge_page_size(h
), 1, STRING_UNITS_2
, buf
, 32);
3611 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3612 buf
, h
->free_huge_pages
);
3613 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3614 hugetlb_vmemmap_optimizable_size(h
) / SZ_1K
, buf
);
3618 #ifdef CONFIG_HIGHMEM
3619 static void try_to_free_low(struct hstate
*h
, unsigned long count
,
3620 nodemask_t
*nodes_allowed
)
3623 LIST_HEAD(page_list
);
3625 lockdep_assert_held(&hugetlb_lock
);
3626 if (hstate_is_gigantic(h
))
3630 * Collect pages to be freed on a list, and free after dropping lock
3632 for_each_node_mask(i
, *nodes_allowed
) {
3633 struct folio
*folio
, *next
;
3634 struct list_head
*freel
= &h
->hugepage_freelists
[i
];
3635 list_for_each_entry_safe(folio
, next
, freel
, lru
) {
3636 if (count
>= h
->nr_huge_pages
)
3638 if (folio_test_highmem(folio
))
3640 remove_hugetlb_folio(h
, folio
, false);
3641 list_add(&folio
->lru
, &page_list
);
3646 spin_unlock_irq(&hugetlb_lock
);
3647 update_and_free_pages_bulk(h
, &page_list
);
3648 spin_lock_irq(&hugetlb_lock
);
3651 static inline void try_to_free_low(struct hstate
*h
, unsigned long count
,
3652 nodemask_t
*nodes_allowed
)
3658 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3659 * balanced by operating on them in a round-robin fashion.
3660 * Returns 1 if an adjustment was made.
3662 static int adjust_pool_surplus(struct hstate
*h
, nodemask_t
*nodes_allowed
,
3667 lockdep_assert_held(&hugetlb_lock
);
3668 VM_BUG_ON(delta
!= -1 && delta
!= 1);
3671 for_each_node_mask_to_alloc(h
, nr_nodes
, node
, nodes_allowed
) {
3672 if (h
->surplus_huge_pages_node
[node
])
3676 for_each_node_mask_to_free(h
, nr_nodes
, node
, nodes_allowed
) {
3677 if (h
->surplus_huge_pages_node
[node
] <
3678 h
->nr_huge_pages_node
[node
])
3685 h
->surplus_huge_pages
+= delta
;
3686 h
->surplus_huge_pages_node
[node
] += delta
;
3690 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3691 static int set_max_huge_pages(struct hstate
*h
, unsigned long count
, int nid
,
3692 nodemask_t
*nodes_allowed
)
3694 unsigned long min_count
;
3695 unsigned long allocated
;
3696 struct folio
*folio
;
3697 LIST_HEAD(page_list
);
3698 NODEMASK_ALLOC(nodemask_t
, node_alloc_noretry
, GFP_KERNEL
);
3701 * Bit mask controlling how hard we retry per-node allocations.
3702 * If we can not allocate the bit mask, do not attempt to allocate
3703 * the requested huge pages.
3705 if (node_alloc_noretry
)
3706 nodes_clear(*node_alloc_noretry
);
3711 * resize_lock mutex prevents concurrent adjustments to number of
3712 * pages in hstate via the proc/sysfs interfaces.
3714 mutex_lock(&h
->resize_lock
);
3715 flush_free_hpage_work(h
);
3716 spin_lock_irq(&hugetlb_lock
);
3719 * Check for a node specific request.
3720 * Changing node specific huge page count may require a corresponding
3721 * change to the global count. In any case, the passed node mask
3722 * (nodes_allowed) will restrict alloc/free to the specified node.
3724 if (nid
!= NUMA_NO_NODE
) {
3725 unsigned long old_count
= count
;
3727 count
+= persistent_huge_pages(h
) -
3728 (h
->nr_huge_pages_node
[nid
] -
3729 h
->surplus_huge_pages_node
[nid
]);
3731 * User may have specified a large count value which caused the
3732 * above calculation to overflow. In this case, they wanted
3733 * to allocate as many huge pages as possible. Set count to
3734 * largest possible value to align with their intention.
3736 if (count
< old_count
)
3741 * Gigantic pages runtime allocation depend on the capability for large
3742 * page range allocation.
3743 * If the system does not provide this feature, return an error when
3744 * the user tries to allocate gigantic pages but let the user free the
3745 * boottime allocated gigantic pages.
3747 if (hstate_is_gigantic(h
) && !IS_ENABLED(CONFIG_CONTIG_ALLOC
)) {
3748 if (count
> persistent_huge_pages(h
)) {
3749 spin_unlock_irq(&hugetlb_lock
);
3750 mutex_unlock(&h
->resize_lock
);
3751 NODEMASK_FREE(node_alloc_noretry
);
3754 /* Fall through to decrease pool */
3758 * Increase the pool size
3759 * First take pages out of surplus state. Then make up the
3760 * remaining difference by allocating fresh huge pages.
3762 * We might race with alloc_surplus_hugetlb_folio() here and be unable
3763 * to convert a surplus huge page to a normal huge page. That is
3764 * not critical, though, it just means the overall size of the
3765 * pool might be one hugepage larger than it needs to be, but
3766 * within all the constraints specified by the sysctls.
3768 while (h
->surplus_huge_pages
&& count
> persistent_huge_pages(h
)) {
3769 if (!adjust_pool_surplus(h
, nodes_allowed
, -1))
3774 while (count
> (persistent_huge_pages(h
) + allocated
)) {
3776 * If this allocation races such that we no longer need the
3777 * page, free_huge_folio will handle it by freeing the page
3778 * and reducing the surplus.
3780 spin_unlock_irq(&hugetlb_lock
);
3782 /* yield cpu to avoid soft lockup */
3785 folio
= alloc_pool_huge_folio(h
, nodes_allowed
,
3786 node_alloc_noretry
);
3788 prep_and_add_allocated_folios(h
, &page_list
);
3789 spin_lock_irq(&hugetlb_lock
);
3793 list_add(&folio
->lru
, &page_list
);
3796 /* Bail for signals. Probably ctrl-c from user */
3797 if (signal_pending(current
)) {
3798 prep_and_add_allocated_folios(h
, &page_list
);
3799 spin_lock_irq(&hugetlb_lock
);
3803 spin_lock_irq(&hugetlb_lock
);
3806 /* Add allocated pages to the pool */
3807 if (!list_empty(&page_list
)) {
3808 spin_unlock_irq(&hugetlb_lock
);
3809 prep_and_add_allocated_folios(h
, &page_list
);
3810 spin_lock_irq(&hugetlb_lock
);
3814 * Decrease the pool size
3815 * First return free pages to the buddy allocator (being careful
3816 * to keep enough around to satisfy reservations). Then place
3817 * pages into surplus state as needed so the pool will shrink
3818 * to the desired size as pages become free.
3820 * By placing pages into the surplus state independent of the
3821 * overcommit value, we are allowing the surplus pool size to
3822 * exceed overcommit. There are few sane options here. Since
3823 * alloc_surplus_hugetlb_folio() is checking the global counter,
3824 * though, we'll note that we're not allowed to exceed surplus
3825 * and won't grow the pool anywhere else. Not until one of the
3826 * sysctls are changed, or the surplus pages go out of use.
3828 min_count
= h
->resv_huge_pages
+ h
->nr_huge_pages
- h
->free_huge_pages
;
3829 min_count
= max(count
, min_count
);
3830 try_to_free_low(h
, min_count
, nodes_allowed
);
3833 * Collect pages to be removed on list without dropping lock
3835 while (min_count
< persistent_huge_pages(h
)) {
3836 folio
= remove_pool_hugetlb_folio(h
, nodes_allowed
, 0);
3840 list_add(&folio
->lru
, &page_list
);
3842 /* free the pages after dropping lock */
3843 spin_unlock_irq(&hugetlb_lock
);
3844 update_and_free_pages_bulk(h
, &page_list
);
3845 flush_free_hpage_work(h
);
3846 spin_lock_irq(&hugetlb_lock
);
3848 while (count
< persistent_huge_pages(h
)) {
3849 if (!adjust_pool_surplus(h
, nodes_allowed
, 1))
3853 h
->max_huge_pages
= persistent_huge_pages(h
);
3854 spin_unlock_irq(&hugetlb_lock
);
3855 mutex_unlock(&h
->resize_lock
);
3857 NODEMASK_FREE(node_alloc_noretry
);
3862 static int demote_free_hugetlb_folio(struct hstate
*h
, struct folio
*folio
)
3864 int i
, nid
= folio_nid(folio
);
3865 struct hstate
*target_hstate
;
3866 struct page
*subpage
;
3867 struct folio
*inner_folio
;
3870 target_hstate
= size_to_hstate(PAGE_SIZE
<< h
->demote_order
);
3872 remove_hugetlb_folio_for_demote(h
, folio
, false);
3873 spin_unlock_irq(&hugetlb_lock
);
3876 * If vmemmap already existed for folio, the remove routine above would
3877 * have cleared the hugetlb folio flag. Hence the folio is technically
3878 * no longer a hugetlb folio. hugetlb_vmemmap_restore_folio can only be
3879 * passed hugetlb folios and will BUG otherwise.
3881 if (folio_test_hugetlb(folio
)) {
3882 rc
= hugetlb_vmemmap_restore_folio(h
, folio
);
3884 /* Allocation of vmemmmap failed, we can not demote folio */
3885 spin_lock_irq(&hugetlb_lock
);
3886 folio_ref_unfreeze(folio
, 1);
3887 add_hugetlb_folio(h
, folio
, false);
3893 * Use destroy_compound_hugetlb_folio_for_demote for all huge page
3894 * sizes as it will not ref count folios.
3896 destroy_compound_hugetlb_folio_for_demote(folio
, huge_page_order(h
));
3899 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3900 * Without the mutex, pages added to target hstate could be marked
3903 * Note that we already hold h->resize_lock. To prevent deadlock,
3904 * use the convention of always taking larger size hstate mutex first.
3906 mutex_lock(&target_hstate
->resize_lock
);
3907 for (i
= 0; i
< pages_per_huge_page(h
);
3908 i
+= pages_per_huge_page(target_hstate
)) {
3909 subpage
= folio_page(folio
, i
);
3910 inner_folio
= page_folio(subpage
);
3911 if (hstate_is_gigantic(target_hstate
))
3912 prep_compound_gigantic_folio_for_demote(inner_folio
,
3913 target_hstate
->order
);
3915 prep_compound_page(subpage
, target_hstate
->order
);
3916 folio_change_private(inner_folio
, NULL
);
3917 prep_new_hugetlb_folio(target_hstate
, inner_folio
, nid
);
3918 free_huge_folio(inner_folio
);
3920 mutex_unlock(&target_hstate
->resize_lock
);
3922 spin_lock_irq(&hugetlb_lock
);
3925 * Not absolutely necessary, but for consistency update max_huge_pages
3926 * based on pool changes for the demoted page.
3928 h
->max_huge_pages
--;
3929 target_hstate
->max_huge_pages
+=
3930 pages_per_huge_page(h
) / pages_per_huge_page(target_hstate
);
3935 static int demote_pool_huge_page(struct hstate
*h
, nodemask_t
*nodes_allowed
)
3936 __must_hold(&hugetlb_lock
)
3939 struct folio
*folio
;
3941 lockdep_assert_held(&hugetlb_lock
);
3943 /* We should never get here if no demote order */
3944 if (!h
->demote_order
) {
3945 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3946 return -EINVAL
; /* internal error */
3949 for_each_node_mask_to_free(h
, nr_nodes
, node
, nodes_allowed
) {
3950 list_for_each_entry(folio
, &h
->hugepage_freelists
[node
], lru
) {
3951 if (folio_test_hwpoison(folio
))
3953 return demote_free_hugetlb_folio(h
, folio
);
3958 * Only way to get here is if all pages on free lists are poisoned.
3959 * Return -EBUSY so that caller will not retry.
3964 #define HSTATE_ATTR_RO(_name) \
3965 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3967 #define HSTATE_ATTR_WO(_name) \
3968 static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3970 #define HSTATE_ATTR(_name) \
3971 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3973 static struct kobject
*hugepages_kobj
;
3974 static struct kobject
*hstate_kobjs
[HUGE_MAX_HSTATE
];
3976 static struct hstate
*kobj_to_node_hstate(struct kobject
*kobj
, int *nidp
);
3978 static struct hstate
*kobj_to_hstate(struct kobject
*kobj
, int *nidp
)
3982 for (i
= 0; i
< HUGE_MAX_HSTATE
; i
++)
3983 if (hstate_kobjs
[i
] == kobj
) {
3985 *nidp
= NUMA_NO_NODE
;
3989 return kobj_to_node_hstate(kobj
, nidp
);
3992 static ssize_t
nr_hugepages_show_common(struct kobject
*kobj
,
3993 struct kobj_attribute
*attr
, char *buf
)
3996 unsigned long nr_huge_pages
;
3999 h
= kobj_to_hstate(kobj
, &nid
);
4000 if (nid
== NUMA_NO_NODE
)
4001 nr_huge_pages
= h
->nr_huge_pages
;
4003 nr_huge_pages
= h
->nr_huge_pages_node
[nid
];
4005 return sysfs_emit(buf
, "%lu\n", nr_huge_pages
);
4008 static ssize_t
__nr_hugepages_store_common(bool obey_mempolicy
,
4009 struct hstate
*h
, int nid
,
4010 unsigned long count
, size_t len
)
4013 nodemask_t nodes_allowed
, *n_mask
;
4015 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
4018 if (nid
== NUMA_NO_NODE
) {
4020 * global hstate attribute
4022 if (!(obey_mempolicy
&&
4023 init_nodemask_of_mempolicy(&nodes_allowed
)))
4024 n_mask
= &node_states
[N_MEMORY
];
4026 n_mask
= &nodes_allowed
;
4029 * Node specific request. count adjustment happens in
4030 * set_max_huge_pages() after acquiring hugetlb_lock.
4032 init_nodemask_of_node(&nodes_allowed
, nid
);
4033 n_mask
= &nodes_allowed
;
4036 err
= set_max_huge_pages(h
, count
, nid
, n_mask
);
4038 return err
? err
: len
;
4041 static ssize_t
nr_hugepages_store_common(bool obey_mempolicy
,
4042 struct kobject
*kobj
, const char *buf
,
4046 unsigned long count
;
4050 err
= kstrtoul(buf
, 10, &count
);
4054 h
= kobj_to_hstate(kobj
, &nid
);
4055 return __nr_hugepages_store_common(obey_mempolicy
, h
, nid
, count
, len
);
4058 static ssize_t
nr_hugepages_show(struct kobject
*kobj
,
4059 struct kobj_attribute
*attr
, char *buf
)
4061 return nr_hugepages_show_common(kobj
, attr
, buf
);
4064 static ssize_t
nr_hugepages_store(struct kobject
*kobj
,
4065 struct kobj_attribute
*attr
, const char *buf
, size_t len
)
4067 return nr_hugepages_store_common(false, kobj
, buf
, len
);
4069 HSTATE_ATTR(nr_hugepages
);
4074 * hstate attribute for optionally mempolicy-based constraint on persistent
4075 * huge page alloc/free.
4077 static ssize_t
nr_hugepages_mempolicy_show(struct kobject
*kobj
,
4078 struct kobj_attribute
*attr
,
4081 return nr_hugepages_show_common(kobj
, attr
, buf
);
4084 static ssize_t
nr_hugepages_mempolicy_store(struct kobject
*kobj
,
4085 struct kobj_attribute
*attr
, const char *buf
, size_t len
)
4087 return nr_hugepages_store_common(true, kobj
, buf
, len
);
4089 HSTATE_ATTR(nr_hugepages_mempolicy
);
4093 static ssize_t
nr_overcommit_hugepages_show(struct kobject
*kobj
,
4094 struct kobj_attribute
*attr
, char *buf
)
4096 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
4097 return sysfs_emit(buf
, "%lu\n", h
->nr_overcommit_huge_pages
);
4100 static ssize_t
nr_overcommit_hugepages_store(struct kobject
*kobj
,
4101 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
4104 unsigned long input
;
4105 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
4107 if (hstate_is_gigantic(h
))
4110 err
= kstrtoul(buf
, 10, &input
);
4114 spin_lock_irq(&hugetlb_lock
);
4115 h
->nr_overcommit_huge_pages
= input
;
4116 spin_unlock_irq(&hugetlb_lock
);
4120 HSTATE_ATTR(nr_overcommit_hugepages
);
4122 static ssize_t
free_hugepages_show(struct kobject
*kobj
,
4123 struct kobj_attribute
*attr
, char *buf
)
4126 unsigned long free_huge_pages
;
4129 h
= kobj_to_hstate(kobj
, &nid
);
4130 if (nid
== NUMA_NO_NODE
)
4131 free_huge_pages
= h
->free_huge_pages
;
4133 free_huge_pages
= h
->free_huge_pages_node
[nid
];
4135 return sysfs_emit(buf
, "%lu\n", free_huge_pages
);
4137 HSTATE_ATTR_RO(free_hugepages
);
4139 static ssize_t
resv_hugepages_show(struct kobject
*kobj
,
4140 struct kobj_attribute
*attr
, char *buf
)
4142 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
4143 return sysfs_emit(buf
, "%lu\n", h
->resv_huge_pages
);
4145 HSTATE_ATTR_RO(resv_hugepages
);
4147 static ssize_t
surplus_hugepages_show(struct kobject
*kobj
,
4148 struct kobj_attribute
*attr
, char *buf
)
4151 unsigned long surplus_huge_pages
;
4154 h
= kobj_to_hstate(kobj
, &nid
);
4155 if (nid
== NUMA_NO_NODE
)
4156 surplus_huge_pages
= h
->surplus_huge_pages
;
4158 surplus_huge_pages
= h
->surplus_huge_pages_node
[nid
];
4160 return sysfs_emit(buf
, "%lu\n", surplus_huge_pages
);
4162 HSTATE_ATTR_RO(surplus_hugepages
);
4164 static ssize_t
demote_store(struct kobject
*kobj
,
4165 struct kobj_attribute
*attr
, const char *buf
, size_t len
)
4167 unsigned long nr_demote
;
4168 unsigned long nr_available
;
4169 nodemask_t nodes_allowed
, *n_mask
;
4174 err
= kstrtoul(buf
, 10, &nr_demote
);
4177 h
= kobj_to_hstate(kobj
, &nid
);
4179 if (nid
!= NUMA_NO_NODE
) {
4180 init_nodemask_of_node(&nodes_allowed
, nid
);
4181 n_mask
= &nodes_allowed
;
4183 n_mask
= &node_states
[N_MEMORY
];
4186 /* Synchronize with other sysfs operations modifying huge pages */
4187 mutex_lock(&h
->resize_lock
);
4188 spin_lock_irq(&hugetlb_lock
);
4192 * Check for available pages to demote each time thorough the
4193 * loop as demote_pool_huge_page will drop hugetlb_lock.
4195 if (nid
!= NUMA_NO_NODE
)
4196 nr_available
= h
->free_huge_pages_node
[nid
];
4198 nr_available
= h
->free_huge_pages
;
4199 nr_available
-= h
->resv_huge_pages
;
4203 err
= demote_pool_huge_page(h
, n_mask
);
4210 spin_unlock_irq(&hugetlb_lock
);
4211 mutex_unlock(&h
->resize_lock
);
4217 HSTATE_ATTR_WO(demote
);
4219 static ssize_t
demote_size_show(struct kobject
*kobj
,
4220 struct kobj_attribute
*attr
, char *buf
)
4222 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
4223 unsigned long demote_size
= (PAGE_SIZE
<< h
->demote_order
) / SZ_1K
;
4225 return sysfs_emit(buf
, "%lukB\n", demote_size
);
4228 static ssize_t
demote_size_store(struct kobject
*kobj
,
4229 struct kobj_attribute
*attr
,
4230 const char *buf
, size_t count
)
4232 struct hstate
*h
, *demote_hstate
;
4233 unsigned long demote_size
;
4234 unsigned int demote_order
;
4236 demote_size
= (unsigned long)memparse(buf
, NULL
);
4238 demote_hstate
= size_to_hstate(demote_size
);
4241 demote_order
= demote_hstate
->order
;
4242 if (demote_order
< HUGETLB_PAGE_ORDER
)
4245 /* demote order must be smaller than hstate order */
4246 h
= kobj_to_hstate(kobj
, NULL
);
4247 if (demote_order
>= h
->order
)
4250 /* resize_lock synchronizes access to demote size and writes */
4251 mutex_lock(&h
->resize_lock
);
4252 h
->demote_order
= demote_order
;
4253 mutex_unlock(&h
->resize_lock
);
4257 HSTATE_ATTR(demote_size
);
4259 static struct attribute
*hstate_attrs
[] = {
4260 &nr_hugepages_attr
.attr
,
4261 &nr_overcommit_hugepages_attr
.attr
,
4262 &free_hugepages_attr
.attr
,
4263 &resv_hugepages_attr
.attr
,
4264 &surplus_hugepages_attr
.attr
,
4266 &nr_hugepages_mempolicy_attr
.attr
,
4271 static const struct attribute_group hstate_attr_group
= {
4272 .attrs
= hstate_attrs
,
4275 static struct attribute
*hstate_demote_attrs
[] = {
4276 &demote_size_attr
.attr
,
4281 static const struct attribute_group hstate_demote_attr_group
= {
4282 .attrs
= hstate_demote_attrs
,
4285 static int hugetlb_sysfs_add_hstate(struct hstate
*h
, struct kobject
*parent
,
4286 struct kobject
**hstate_kobjs
,
4287 const struct attribute_group
*hstate_attr_group
)
4290 int hi
= hstate_index(h
);
4292 hstate_kobjs
[hi
] = kobject_create_and_add(h
->name
, parent
);
4293 if (!hstate_kobjs
[hi
])
4296 retval
= sysfs_create_group(hstate_kobjs
[hi
], hstate_attr_group
);
4298 kobject_put(hstate_kobjs
[hi
]);
4299 hstate_kobjs
[hi
] = NULL
;
4303 if (h
->demote_order
) {
4304 retval
= sysfs_create_group(hstate_kobjs
[hi
],
4305 &hstate_demote_attr_group
);
4307 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h
->name
);
4308 sysfs_remove_group(hstate_kobjs
[hi
], hstate_attr_group
);
4309 kobject_put(hstate_kobjs
[hi
]);
4310 hstate_kobjs
[hi
] = NULL
;
4319 static bool hugetlb_sysfs_initialized __ro_after_init
;
4322 * node_hstate/s - associate per node hstate attributes, via their kobjects,
4323 * with node devices in node_devices[] using a parallel array. The array
4324 * index of a node device or _hstate == node id.
4325 * This is here to avoid any static dependency of the node device driver, in
4326 * the base kernel, on the hugetlb module.
4328 struct node_hstate
{
4329 struct kobject
*hugepages_kobj
;
4330 struct kobject
*hstate_kobjs
[HUGE_MAX_HSTATE
];
4332 static struct node_hstate node_hstates
[MAX_NUMNODES
];
4335 * A subset of global hstate attributes for node devices
4337 static struct attribute
*per_node_hstate_attrs
[] = {
4338 &nr_hugepages_attr
.attr
,
4339 &free_hugepages_attr
.attr
,
4340 &surplus_hugepages_attr
.attr
,
4344 static const struct attribute_group per_node_hstate_attr_group
= {
4345 .attrs
= per_node_hstate_attrs
,
4349 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
4350 * Returns node id via non-NULL nidp.
4352 static struct hstate
*kobj_to_node_hstate(struct kobject
*kobj
, int *nidp
)
4356 for (nid
= 0; nid
< nr_node_ids
; nid
++) {
4357 struct node_hstate
*nhs
= &node_hstates
[nid
];
4359 for (i
= 0; i
< HUGE_MAX_HSTATE
; i
++)
4360 if (nhs
->hstate_kobjs
[i
] == kobj
) {
4372 * Unregister hstate attributes from a single node device.
4373 * No-op if no hstate attributes attached.
4375 void hugetlb_unregister_node(struct node
*node
)
4378 struct node_hstate
*nhs
= &node_hstates
[node
->dev
.id
];
4380 if (!nhs
->hugepages_kobj
)
4381 return; /* no hstate attributes */
4383 for_each_hstate(h
) {
4384 int idx
= hstate_index(h
);
4385 struct kobject
*hstate_kobj
= nhs
->hstate_kobjs
[idx
];
4389 if (h
->demote_order
)
4390 sysfs_remove_group(hstate_kobj
, &hstate_demote_attr_group
);
4391 sysfs_remove_group(hstate_kobj
, &per_node_hstate_attr_group
);
4392 kobject_put(hstate_kobj
);
4393 nhs
->hstate_kobjs
[idx
] = NULL
;
4396 kobject_put(nhs
->hugepages_kobj
);
4397 nhs
->hugepages_kobj
= NULL
;
4402 * Register hstate attributes for a single node device.
4403 * No-op if attributes already registered.
4405 void hugetlb_register_node(struct node
*node
)
4408 struct node_hstate
*nhs
= &node_hstates
[node
->dev
.id
];
4411 if (!hugetlb_sysfs_initialized
)
4414 if (nhs
->hugepages_kobj
)
4415 return; /* already allocated */
4417 nhs
->hugepages_kobj
= kobject_create_and_add("hugepages",
4419 if (!nhs
->hugepages_kobj
)
4422 for_each_hstate(h
) {
4423 err
= hugetlb_sysfs_add_hstate(h
, nhs
->hugepages_kobj
,
4425 &per_node_hstate_attr_group
);
4427 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
4428 h
->name
, node
->dev
.id
);
4429 hugetlb_unregister_node(node
);
4436 * hugetlb init time: register hstate attributes for all registered node
4437 * devices of nodes that have memory. All on-line nodes should have
4438 * registered their associated device by this time.
4440 static void __init
hugetlb_register_all_nodes(void)
4444 for_each_online_node(nid
)
4445 hugetlb_register_node(node_devices
[nid
]);
4447 #else /* !CONFIG_NUMA */
4449 static struct hstate
*kobj_to_node_hstate(struct kobject
*kobj
, int *nidp
)
4457 static void hugetlb_register_all_nodes(void) { }
4462 static void __init
hugetlb_cma_check(void);
4464 static inline __init
void hugetlb_cma_check(void)
4469 static void __init
hugetlb_sysfs_init(void)
4474 hugepages_kobj
= kobject_create_and_add("hugepages", mm_kobj
);
4475 if (!hugepages_kobj
)
4478 for_each_hstate(h
) {
4479 err
= hugetlb_sysfs_add_hstate(h
, hugepages_kobj
,
4480 hstate_kobjs
, &hstate_attr_group
);
4482 pr_err("HugeTLB: Unable to add hstate %s", h
->name
);
4486 hugetlb_sysfs_initialized
= true;
4488 hugetlb_register_all_nodes();
4491 #ifdef CONFIG_SYSCTL
4492 static void hugetlb_sysctl_init(void);
4494 static inline void hugetlb_sysctl_init(void) { }
4497 static int __init
hugetlb_init(void)
4501 BUILD_BUG_ON(sizeof_field(struct page
, private) * BITS_PER_BYTE
<
4504 if (!hugepages_supported()) {
4505 if (hugetlb_max_hstate
|| default_hstate_max_huge_pages
)
4506 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4511 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4512 * architectures depend on setup being done here.
4514 hugetlb_add_hstate(HUGETLB_PAGE_ORDER
);
4515 if (!parsed_default_hugepagesz
) {
4517 * If we did not parse a default huge page size, set
4518 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4519 * number of huge pages for this default size was implicitly
4520 * specified, set that here as well.
4521 * Note that the implicit setting will overwrite an explicit
4522 * setting. A warning will be printed in this case.
4524 default_hstate_idx
= hstate_index(size_to_hstate(HPAGE_SIZE
));
4525 if (default_hstate_max_huge_pages
) {
4526 if (default_hstate
.max_huge_pages
) {
4529 string_get_size(huge_page_size(&default_hstate
),
4530 1, STRING_UNITS_2
, buf
, 32);
4531 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4532 default_hstate
.max_huge_pages
, buf
);
4533 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4534 default_hstate_max_huge_pages
);
4536 default_hstate
.max_huge_pages
=
4537 default_hstate_max_huge_pages
;
4539 for_each_online_node(i
)
4540 default_hstate
.max_huge_pages_node
[i
] =
4541 default_hugepages_in_node
[i
];
4545 hugetlb_cma_check();
4546 hugetlb_init_hstates();
4547 gather_bootmem_prealloc();
4550 hugetlb_sysfs_init();
4551 hugetlb_cgroup_file_init();
4552 hugetlb_sysctl_init();
4555 num_fault_mutexes
= roundup_pow_of_two(8 * num_possible_cpus());
4557 num_fault_mutexes
= 1;
4559 hugetlb_fault_mutex_table
=
4560 kmalloc_array(num_fault_mutexes
, sizeof(struct mutex
),
4562 BUG_ON(!hugetlb_fault_mutex_table
);
4564 for (i
= 0; i
< num_fault_mutexes
; i
++)
4565 mutex_init(&hugetlb_fault_mutex_table
[i
]);
4568 subsys_initcall(hugetlb_init
);
4570 /* Overwritten by architectures with more huge page sizes */
4571 bool __init
__attribute((weak
)) arch_hugetlb_valid_size(unsigned long size
)
4573 return size
== HPAGE_SIZE
;
4576 void __init
hugetlb_add_hstate(unsigned int order
)
4581 if (size_to_hstate(PAGE_SIZE
<< order
)) {
4584 BUG_ON(hugetlb_max_hstate
>= HUGE_MAX_HSTATE
);
4585 BUG_ON(order
< order_base_2(__NR_USED_SUBPAGE
));
4586 h
= &hstates
[hugetlb_max_hstate
++];
4587 mutex_init(&h
->resize_lock
);
4589 h
->mask
= ~(huge_page_size(h
) - 1);
4590 for (i
= 0; i
< MAX_NUMNODES
; ++i
)
4591 INIT_LIST_HEAD(&h
->hugepage_freelists
[i
]);
4592 INIT_LIST_HEAD(&h
->hugepage_activelist
);
4593 h
->next_nid_to_alloc
= first_memory_node
;
4594 h
->next_nid_to_free
= first_memory_node
;
4595 snprintf(h
->name
, HSTATE_NAME_LEN
, "hugepages-%lukB",
4596 huge_page_size(h
)/SZ_1K
);
4601 bool __init __weak
hugetlb_node_alloc_supported(void)
4606 static void __init
hugepages_clear_pages_in_node(void)
4608 if (!hugetlb_max_hstate
) {
4609 default_hstate_max_huge_pages
= 0;
4610 memset(default_hugepages_in_node
, 0,
4611 sizeof(default_hugepages_in_node
));
4613 parsed_hstate
->max_huge_pages
= 0;
4614 memset(parsed_hstate
->max_huge_pages_node
, 0,
4615 sizeof(parsed_hstate
->max_huge_pages_node
));
4620 * hugepages command line processing
4621 * hugepages normally follows a valid hugepagsz or default_hugepagsz
4622 * specification. If not, ignore the hugepages value. hugepages can also
4623 * be the first huge page command line option in which case it implicitly
4624 * specifies the number of huge pages for the default size.
4626 static int __init
hugepages_setup(char *s
)
4629 static unsigned long *last_mhp
;
4630 int node
= NUMA_NO_NODE
;
4635 if (!parsed_valid_hugepagesz
) {
4636 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s
);
4637 parsed_valid_hugepagesz
= true;
4642 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4643 * yet, so this hugepages= parameter goes to the "default hstate".
4644 * Otherwise, it goes with the previously parsed hugepagesz or
4645 * default_hugepagesz.
4647 else if (!hugetlb_max_hstate
)
4648 mhp
= &default_hstate_max_huge_pages
;
4650 mhp
= &parsed_hstate
->max_huge_pages
;
4652 if (mhp
== last_mhp
) {
4653 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s
);
4659 if (sscanf(p
, "%lu%n", &tmp
, &count
) != 1)
4661 /* Parameter is node format */
4662 if (p
[count
] == ':') {
4663 if (!hugetlb_node_alloc_supported()) {
4664 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4667 if (tmp
>= MAX_NUMNODES
|| !node_online(tmp
))
4669 node
= array_index_nospec(tmp
, MAX_NUMNODES
);
4671 /* Parse hugepages */
4672 if (sscanf(p
, "%lu%n", &tmp
, &count
) != 1)
4674 if (!hugetlb_max_hstate
)
4675 default_hugepages_in_node
[node
] = tmp
;
4677 parsed_hstate
->max_huge_pages_node
[node
] = tmp
;
4679 /* Go to parse next node*/
4680 if (p
[count
] == ',')
4693 * Global state is always initialized later in hugetlb_init.
4694 * But we need to allocate gigantic hstates here early to still
4695 * use the bootmem allocator.
4697 if (hugetlb_max_hstate
&& hstate_is_gigantic(parsed_hstate
))
4698 hugetlb_hstate_alloc_pages(parsed_hstate
);
4705 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p
);
4706 hugepages_clear_pages_in_node();
4709 __setup("hugepages=", hugepages_setup
);
4712 * hugepagesz command line processing
4713 * A specific huge page size can only be specified once with hugepagesz.
4714 * hugepagesz is followed by hugepages on the command line. The global
4715 * variable 'parsed_valid_hugepagesz' is used to determine if prior
4716 * hugepagesz argument was valid.
4718 static int __init
hugepagesz_setup(char *s
)
4723 parsed_valid_hugepagesz
= false;
4724 size
= (unsigned long)memparse(s
, NULL
);
4726 if (!arch_hugetlb_valid_size(size
)) {
4727 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s
);
4731 h
= size_to_hstate(size
);
4734 * hstate for this size already exists. This is normally
4735 * an error, but is allowed if the existing hstate is the
4736 * default hstate. More specifically, it is only allowed if
4737 * the number of huge pages for the default hstate was not
4738 * previously specified.
4740 if (!parsed_default_hugepagesz
|| h
!= &default_hstate
||
4741 default_hstate
.max_huge_pages
) {
4742 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s
);
4747 * No need to call hugetlb_add_hstate() as hstate already
4748 * exists. But, do set parsed_hstate so that a following
4749 * hugepages= parameter will be applied to this hstate.
4752 parsed_valid_hugepagesz
= true;
4756 hugetlb_add_hstate(ilog2(size
) - PAGE_SHIFT
);
4757 parsed_valid_hugepagesz
= true;
4760 __setup("hugepagesz=", hugepagesz_setup
);
4763 * default_hugepagesz command line input
4764 * Only one instance of default_hugepagesz allowed on command line.
4766 static int __init
default_hugepagesz_setup(char *s
)
4771 parsed_valid_hugepagesz
= false;
4772 if (parsed_default_hugepagesz
) {
4773 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s
);
4777 size
= (unsigned long)memparse(s
, NULL
);
4779 if (!arch_hugetlb_valid_size(size
)) {
4780 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s
);
4784 hugetlb_add_hstate(ilog2(size
) - PAGE_SHIFT
);
4785 parsed_valid_hugepagesz
= true;
4786 parsed_default_hugepagesz
= true;
4787 default_hstate_idx
= hstate_index(size_to_hstate(size
));
4790 * The number of default huge pages (for this size) could have been
4791 * specified as the first hugetlb parameter: hugepages=X. If so,
4792 * then default_hstate_max_huge_pages is set. If the default huge
4793 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
4794 * allocated here from bootmem allocator.
4796 if (default_hstate_max_huge_pages
) {
4797 default_hstate
.max_huge_pages
= default_hstate_max_huge_pages
;
4798 for_each_online_node(i
)
4799 default_hstate
.max_huge_pages_node
[i
] =
4800 default_hugepages_in_node
[i
];
4801 if (hstate_is_gigantic(&default_hstate
))
4802 hugetlb_hstate_alloc_pages(&default_hstate
);
4803 default_hstate_max_huge_pages
= 0;
4808 __setup("default_hugepagesz=", default_hugepagesz_setup
);
4810 static nodemask_t
*policy_mbind_nodemask(gfp_t gfp
)
4813 struct mempolicy
*mpol
= get_task_policy(current
);
4816 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4817 * (from policy_nodemask) specifically for hugetlb case
4819 if (mpol
->mode
== MPOL_BIND
&&
4820 (apply_policy_zone(mpol
, gfp_zone(gfp
)) &&
4821 cpuset_nodemask_valid_mems_allowed(&mpol
->nodes
)))
4822 return &mpol
->nodes
;
4827 static unsigned int allowed_mems_nr(struct hstate
*h
)
4830 unsigned int nr
= 0;
4831 nodemask_t
*mbind_nodemask
;
4832 unsigned int *array
= h
->free_huge_pages_node
;
4833 gfp_t gfp_mask
= htlb_alloc_mask(h
);
4835 mbind_nodemask
= policy_mbind_nodemask(gfp_mask
);
4836 for_each_node_mask(node
, cpuset_current_mems_allowed
) {
4837 if (!mbind_nodemask
|| node_isset(node
, *mbind_nodemask
))
4844 #ifdef CONFIG_SYSCTL
4845 static int proc_hugetlb_doulongvec_minmax(struct ctl_table
*table
, int write
,
4846 void *buffer
, size_t *length
,
4847 loff_t
*ppos
, unsigned long *out
)
4849 struct ctl_table dup_table
;
4852 * In order to avoid races with __do_proc_doulongvec_minmax(), we
4853 * can duplicate the @table and alter the duplicate of it.
4856 dup_table
.data
= out
;
4858 return proc_doulongvec_minmax(&dup_table
, write
, buffer
, length
, ppos
);
4861 static int hugetlb_sysctl_handler_common(bool obey_mempolicy
,
4862 struct ctl_table
*table
, int write
,
4863 void *buffer
, size_t *length
, loff_t
*ppos
)
4865 struct hstate
*h
= &default_hstate
;
4866 unsigned long tmp
= h
->max_huge_pages
;
4869 if (!hugepages_supported())
4872 ret
= proc_hugetlb_doulongvec_minmax(table
, write
, buffer
, length
, ppos
,
4878 ret
= __nr_hugepages_store_common(obey_mempolicy
, h
,
4879 NUMA_NO_NODE
, tmp
, *length
);
4884 static int hugetlb_sysctl_handler(struct ctl_table
*table
, int write
,
4885 void *buffer
, size_t *length
, loff_t
*ppos
)
4888 return hugetlb_sysctl_handler_common(false, table
, write
,
4889 buffer
, length
, ppos
);
4893 static int hugetlb_mempolicy_sysctl_handler(struct ctl_table
*table
, int write
,
4894 void *buffer
, size_t *length
, loff_t
*ppos
)
4896 return hugetlb_sysctl_handler_common(true, table
, write
,
4897 buffer
, length
, ppos
);
4899 #endif /* CONFIG_NUMA */
4901 static int hugetlb_overcommit_handler(struct ctl_table
*table
, int write
,
4902 void *buffer
, size_t *length
, loff_t
*ppos
)
4904 struct hstate
*h
= &default_hstate
;
4908 if (!hugepages_supported())
4911 tmp
= h
->nr_overcommit_huge_pages
;
4913 if (write
&& hstate_is_gigantic(h
))
4916 ret
= proc_hugetlb_doulongvec_minmax(table
, write
, buffer
, length
, ppos
,
4922 spin_lock_irq(&hugetlb_lock
);
4923 h
->nr_overcommit_huge_pages
= tmp
;
4924 spin_unlock_irq(&hugetlb_lock
);
4930 static struct ctl_table hugetlb_table
[] = {
4932 .procname
= "nr_hugepages",
4934 .maxlen
= sizeof(unsigned long),
4936 .proc_handler
= hugetlb_sysctl_handler
,
4940 .procname
= "nr_hugepages_mempolicy",
4942 .maxlen
= sizeof(unsigned long),
4944 .proc_handler
= &hugetlb_mempolicy_sysctl_handler
,
4948 .procname
= "hugetlb_shm_group",
4949 .data
= &sysctl_hugetlb_shm_group
,
4950 .maxlen
= sizeof(gid_t
),
4952 .proc_handler
= proc_dointvec
,
4955 .procname
= "nr_overcommit_hugepages",
4957 .maxlen
= sizeof(unsigned long),
4959 .proc_handler
= hugetlb_overcommit_handler
,
4964 static void hugetlb_sysctl_init(void)
4966 register_sysctl_init("vm", hugetlb_table
);
4968 #endif /* CONFIG_SYSCTL */
4970 void hugetlb_report_meminfo(struct seq_file
*m
)
4973 unsigned long total
= 0;
4975 if (!hugepages_supported())
4978 for_each_hstate(h
) {
4979 unsigned long count
= h
->nr_huge_pages
;
4981 total
+= huge_page_size(h
) * count
;
4983 if (h
== &default_hstate
)
4985 "HugePages_Total: %5lu\n"
4986 "HugePages_Free: %5lu\n"
4987 "HugePages_Rsvd: %5lu\n"
4988 "HugePages_Surp: %5lu\n"
4989 "Hugepagesize: %8lu kB\n",
4993 h
->surplus_huge_pages
,
4994 huge_page_size(h
) / SZ_1K
);
4997 seq_printf(m
, "Hugetlb: %8lu kB\n", total
/ SZ_1K
);
5000 int hugetlb_report_node_meminfo(char *buf
, int len
, int nid
)
5002 struct hstate
*h
= &default_hstate
;
5004 if (!hugepages_supported())
5007 return sysfs_emit_at(buf
, len
,
5008 "Node %d HugePages_Total: %5u\n"
5009 "Node %d HugePages_Free: %5u\n"
5010 "Node %d HugePages_Surp: %5u\n",
5011 nid
, h
->nr_huge_pages_node
[nid
],
5012 nid
, h
->free_huge_pages_node
[nid
],
5013 nid
, h
->surplus_huge_pages_node
[nid
]);
5016 void hugetlb_show_meminfo_node(int nid
)
5020 if (!hugepages_supported())
5024 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
5026 h
->nr_huge_pages_node
[nid
],
5027 h
->free_huge_pages_node
[nid
],
5028 h
->surplus_huge_pages_node
[nid
],
5029 huge_page_size(h
) / SZ_1K
);
5032 void hugetlb_report_usage(struct seq_file
*m
, struct mm_struct
*mm
)
5034 seq_printf(m
, "HugetlbPages:\t%8lu kB\n",
5035 K(atomic_long_read(&mm
->hugetlb_usage
)));
5038 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
5039 unsigned long hugetlb_total_pages(void)
5042 unsigned long nr_total_pages
= 0;
5045 nr_total_pages
+= h
->nr_huge_pages
* pages_per_huge_page(h
);
5046 return nr_total_pages
;
5049 static int hugetlb_acct_memory(struct hstate
*h
, long delta
)
5056 spin_lock_irq(&hugetlb_lock
);
5058 * When cpuset is configured, it breaks the strict hugetlb page
5059 * reservation as the accounting is done on a global variable. Such
5060 * reservation is completely rubbish in the presence of cpuset because
5061 * the reservation is not checked against page availability for the
5062 * current cpuset. Application can still potentially OOM'ed by kernel
5063 * with lack of free htlb page in cpuset that the task is in.
5064 * Attempt to enforce strict accounting with cpuset is almost
5065 * impossible (or too ugly) because cpuset is too fluid that
5066 * task or memory node can be dynamically moved between cpusets.
5068 * The change of semantics for shared hugetlb mapping with cpuset is
5069 * undesirable. However, in order to preserve some of the semantics,
5070 * we fall back to check against current free page availability as
5071 * a best attempt and hopefully to minimize the impact of changing
5072 * semantics that cpuset has.
5074 * Apart from cpuset, we also have memory policy mechanism that
5075 * also determines from which node the kernel will allocate memory
5076 * in a NUMA system. So similar to cpuset, we also should consider
5077 * the memory policy of the current task. Similar to the description
5081 if (gather_surplus_pages(h
, delta
) < 0)
5084 if (delta
> allowed_mems_nr(h
)) {
5085 return_unused_surplus_pages(h
, delta
);
5092 return_unused_surplus_pages(h
, (unsigned long) -delta
);
5095 spin_unlock_irq(&hugetlb_lock
);
5099 static void hugetlb_vm_op_open(struct vm_area_struct
*vma
)
5101 struct resv_map
*resv
= vma_resv_map(vma
);
5104 * HPAGE_RESV_OWNER indicates a private mapping.
5105 * This new VMA should share its siblings reservation map if present.
5106 * The VMA will only ever have a valid reservation map pointer where
5107 * it is being copied for another still existing VMA. As that VMA
5108 * has a reference to the reservation map it cannot disappear until
5109 * after this open call completes. It is therefore safe to take a
5110 * new reference here without additional locking.
5112 if (resv
&& is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
5113 resv_map_dup_hugetlb_cgroup_uncharge_info(resv
);
5114 kref_get(&resv
->refs
);
5118 * vma_lock structure for sharable mappings is vma specific.
5119 * Clear old pointer (if copied via vm_area_dup) and allocate
5120 * new structure. Before clearing, make sure vma_lock is not
5123 if (vma
->vm_flags
& VM_MAYSHARE
) {
5124 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
5127 if (vma_lock
->vma
!= vma
) {
5128 vma
->vm_private_data
= NULL
;
5129 hugetlb_vma_lock_alloc(vma
);
5131 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__
);
5133 hugetlb_vma_lock_alloc(vma
);
5137 static void hugetlb_vm_op_close(struct vm_area_struct
*vma
)
5139 struct hstate
*h
= hstate_vma(vma
);
5140 struct resv_map
*resv
;
5141 struct hugepage_subpool
*spool
= subpool_vma(vma
);
5142 unsigned long reserve
, start
, end
;
5145 hugetlb_vma_lock_free(vma
);
5147 resv
= vma_resv_map(vma
);
5148 if (!resv
|| !is_vma_resv_set(vma
, HPAGE_RESV_OWNER
))
5151 start
= vma_hugecache_offset(h
, vma
, vma
->vm_start
);
5152 end
= vma_hugecache_offset(h
, vma
, vma
->vm_end
);
5154 reserve
= (end
- start
) - region_count(resv
, start
, end
);
5155 hugetlb_cgroup_uncharge_counter(resv
, start
, end
);
5158 * Decrement reserve counts. The global reserve count may be
5159 * adjusted if the subpool has a minimum size.
5161 gbl_reserve
= hugepage_subpool_put_pages(spool
, reserve
);
5162 hugetlb_acct_memory(h
, -gbl_reserve
);
5165 kref_put(&resv
->refs
, resv_map_release
);
5168 static int hugetlb_vm_op_split(struct vm_area_struct
*vma
, unsigned long addr
)
5170 if (addr
& ~(huge_page_mask(hstate_vma(vma
))))
5174 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
5175 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
5176 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
5178 if (addr
& ~PUD_MASK
) {
5180 * hugetlb_vm_op_split is called right before we attempt to
5181 * split the VMA. We will need to unshare PMDs in the old and
5182 * new VMAs, so let's unshare before we split.
5184 unsigned long floor
= addr
& PUD_MASK
;
5185 unsigned long ceil
= floor
+ PUD_SIZE
;
5187 if (floor
>= vma
->vm_start
&& ceil
<= vma
->vm_end
)
5188 hugetlb_unshare_pmds(vma
, floor
, ceil
);
5194 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct
*vma
)
5196 return huge_page_size(hstate_vma(vma
));
5200 * We cannot handle pagefaults against hugetlb pages at all. They cause
5201 * handle_mm_fault() to try to instantiate regular-sized pages in the
5202 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
5205 static vm_fault_t
hugetlb_vm_op_fault(struct vm_fault
*vmf
)
5212 * When a new function is introduced to vm_operations_struct and added
5213 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
5214 * This is because under System V memory model, mappings created via
5215 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
5216 * their original vm_ops are overwritten with shm_vm_ops.
5218 const struct vm_operations_struct hugetlb_vm_ops
= {
5219 .fault
= hugetlb_vm_op_fault
,
5220 .open
= hugetlb_vm_op_open
,
5221 .close
= hugetlb_vm_op_close
,
5222 .may_split
= hugetlb_vm_op_split
,
5223 .pagesize
= hugetlb_vm_op_pagesize
,
5226 static pte_t
make_huge_pte(struct vm_area_struct
*vma
, struct page
*page
,
5230 unsigned int shift
= huge_page_shift(hstate_vma(vma
));
5233 entry
= huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page
,
5234 vma
->vm_page_prot
)));
5236 entry
= huge_pte_wrprotect(mk_huge_pte(page
,
5237 vma
->vm_page_prot
));
5239 entry
= pte_mkyoung(entry
);
5240 entry
= arch_make_huge_pte(entry
, shift
, vma
->vm_flags
);
5245 static void set_huge_ptep_writable(struct vm_area_struct
*vma
,
5246 unsigned long address
, pte_t
*ptep
)
5250 entry
= huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep
)));
5251 if (huge_ptep_set_access_flags(vma
, address
, ptep
, entry
, 1))
5252 update_mmu_cache(vma
, address
, ptep
);
5255 bool is_hugetlb_entry_migration(pte_t pte
)
5259 if (huge_pte_none(pte
) || pte_present(pte
))
5261 swp
= pte_to_swp_entry(pte
);
5262 if (is_migration_entry(swp
))
5268 bool is_hugetlb_entry_hwpoisoned(pte_t pte
)
5272 if (huge_pte_none(pte
) || pte_present(pte
))
5274 swp
= pte_to_swp_entry(pte
);
5275 if (is_hwpoison_entry(swp
))
5282 hugetlb_install_folio(struct vm_area_struct
*vma
, pte_t
*ptep
, unsigned long addr
,
5283 struct folio
*new_folio
, pte_t old
, unsigned long sz
)
5285 pte_t newpte
= make_huge_pte(vma
, &new_folio
->page
, 1);
5287 __folio_mark_uptodate(new_folio
);
5288 hugetlb_add_new_anon_rmap(new_folio
, vma
, addr
);
5289 if (userfaultfd_wp(vma
) && huge_pte_uffd_wp(old
))
5290 newpte
= huge_pte_mkuffd_wp(newpte
);
5291 set_huge_pte_at(vma
->vm_mm
, addr
, ptep
, newpte
, sz
);
5292 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma
)), vma
->vm_mm
);
5293 folio_set_hugetlb_migratable(new_folio
);
5296 int copy_hugetlb_page_range(struct mm_struct
*dst
, struct mm_struct
*src
,
5297 struct vm_area_struct
*dst_vma
,
5298 struct vm_area_struct
*src_vma
)
5300 pte_t
*src_pte
, *dst_pte
, entry
;
5301 struct folio
*pte_folio
;
5303 bool cow
= is_cow_mapping(src_vma
->vm_flags
);
5304 struct hstate
*h
= hstate_vma(src_vma
);
5305 unsigned long sz
= huge_page_size(h
);
5306 unsigned long npages
= pages_per_huge_page(h
);
5307 struct mmu_notifier_range range
;
5308 unsigned long last_addr_mask
;
5312 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, src
,
5315 mmu_notifier_invalidate_range_start(&range
);
5316 vma_assert_write_locked(src_vma
);
5317 raw_write_seqcount_begin(&src
->write_protect_seq
);
5320 * For shared mappings the vma lock must be held before
5321 * calling hugetlb_walk() in the src vma. Otherwise, the
5322 * returned ptep could go away if part of a shared pmd and
5323 * another thread calls huge_pmd_unshare.
5325 hugetlb_vma_lock_read(src_vma
);
5328 last_addr_mask
= hugetlb_mask_last_page(h
);
5329 for (addr
= src_vma
->vm_start
; addr
< src_vma
->vm_end
; addr
+= sz
) {
5330 spinlock_t
*src_ptl
, *dst_ptl
;
5331 src_pte
= hugetlb_walk(src_vma
, addr
, sz
);
5333 addr
|= last_addr_mask
;
5336 dst_pte
= huge_pte_alloc(dst
, dst_vma
, addr
, sz
);
5343 * If the pagetables are shared don't copy or take references.
5345 * dst_pte == src_pte is the common case of src/dest sharing.
5346 * However, src could have 'unshared' and dst shares with
5347 * another vma. So page_count of ptep page is checked instead
5348 * to reliably determine whether pte is shared.
5350 if (page_count(virt_to_page(dst_pte
)) > 1) {
5351 addr
|= last_addr_mask
;
5355 dst_ptl
= huge_pte_lock(h
, dst
, dst_pte
);
5356 src_ptl
= huge_pte_lockptr(h
, src
, src_pte
);
5357 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
5358 entry
= huge_ptep_get(src_pte
);
5360 if (huge_pte_none(entry
)) {
5362 * Skip if src entry none.
5365 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry
))) {
5366 if (!userfaultfd_wp(dst_vma
))
5367 entry
= huge_pte_clear_uffd_wp(entry
);
5368 set_huge_pte_at(dst
, addr
, dst_pte
, entry
, sz
);
5369 } else if (unlikely(is_hugetlb_entry_migration(entry
))) {
5370 swp_entry_t swp_entry
= pte_to_swp_entry(entry
);
5371 bool uffd_wp
= pte_swp_uffd_wp(entry
);
5373 if (!is_readable_migration_entry(swp_entry
) && cow
) {
5375 * COW mappings require pages in both
5376 * parent and child to be set to read.
5378 swp_entry
= make_readable_migration_entry(
5379 swp_offset(swp_entry
));
5380 entry
= swp_entry_to_pte(swp_entry
);
5381 if (userfaultfd_wp(src_vma
) && uffd_wp
)
5382 entry
= pte_swp_mkuffd_wp(entry
);
5383 set_huge_pte_at(src
, addr
, src_pte
, entry
, sz
);
5385 if (!userfaultfd_wp(dst_vma
))
5386 entry
= huge_pte_clear_uffd_wp(entry
);
5387 set_huge_pte_at(dst
, addr
, dst_pte
, entry
, sz
);
5388 } else if (unlikely(is_pte_marker(entry
))) {
5389 pte_marker marker
= copy_pte_marker(
5390 pte_to_swp_entry(entry
), dst_vma
);
5393 set_huge_pte_at(dst
, addr
, dst_pte
,
5394 make_pte_marker(marker
), sz
);
5396 entry
= huge_ptep_get(src_pte
);
5397 pte_folio
= page_folio(pte_page(entry
));
5398 folio_get(pte_folio
);
5401 * Failing to duplicate the anon rmap is a rare case
5402 * where we see pinned hugetlb pages while they're
5403 * prone to COW. We need to do the COW earlier during
5406 * When pre-allocating the page or copying data, we
5407 * need to be without the pgtable locks since we could
5408 * sleep during the process.
5410 if (!folio_test_anon(pte_folio
)) {
5411 hugetlb_add_file_rmap(pte_folio
);
5412 } else if (hugetlb_try_dup_anon_rmap(pte_folio
, src_vma
)) {
5413 pte_t src_pte_old
= entry
;
5414 struct folio
*new_folio
;
5416 spin_unlock(src_ptl
);
5417 spin_unlock(dst_ptl
);
5418 /* Do not use reserve as it's private owned */
5419 new_folio
= alloc_hugetlb_folio(dst_vma
, addr
, 1);
5420 if (IS_ERR(new_folio
)) {
5421 folio_put(pte_folio
);
5422 ret
= PTR_ERR(new_folio
);
5425 ret
= copy_user_large_folio(new_folio
,
5428 folio_put(pte_folio
);
5430 folio_put(new_folio
);
5434 /* Install the new hugetlb folio if src pte stable */
5435 dst_ptl
= huge_pte_lock(h
, dst
, dst_pte
);
5436 src_ptl
= huge_pte_lockptr(h
, src
, src_pte
);
5437 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
5438 entry
= huge_ptep_get(src_pte
);
5439 if (!pte_same(src_pte_old
, entry
)) {
5440 restore_reserve_on_error(h
, dst_vma
, addr
,
5442 folio_put(new_folio
);
5443 /* huge_ptep of dst_pte won't change as in child */
5446 hugetlb_install_folio(dst_vma
, dst_pte
, addr
,
5447 new_folio
, src_pte_old
, sz
);
5448 spin_unlock(src_ptl
);
5449 spin_unlock(dst_ptl
);
5455 * No need to notify as we are downgrading page
5456 * table protection not changing it to point
5459 * See Documentation/mm/mmu_notifier.rst
5461 huge_ptep_set_wrprotect(src
, addr
, src_pte
);
5462 entry
= huge_pte_wrprotect(entry
);
5465 if (!userfaultfd_wp(dst_vma
))
5466 entry
= huge_pte_clear_uffd_wp(entry
);
5468 set_huge_pte_at(dst
, addr
, dst_pte
, entry
, sz
);
5469 hugetlb_count_add(npages
, dst
);
5471 spin_unlock(src_ptl
);
5472 spin_unlock(dst_ptl
);
5476 raw_write_seqcount_end(&src
->write_protect_seq
);
5477 mmu_notifier_invalidate_range_end(&range
);
5479 hugetlb_vma_unlock_read(src_vma
);
5485 static void move_huge_pte(struct vm_area_struct
*vma
, unsigned long old_addr
,
5486 unsigned long new_addr
, pte_t
*src_pte
, pte_t
*dst_pte
,
5489 struct hstate
*h
= hstate_vma(vma
);
5490 struct mm_struct
*mm
= vma
->vm_mm
;
5491 spinlock_t
*src_ptl
, *dst_ptl
;
5494 dst_ptl
= huge_pte_lock(h
, mm
, dst_pte
);
5495 src_ptl
= huge_pte_lockptr(h
, mm
, src_pte
);
5498 * We don't have to worry about the ordering of src and dst ptlocks
5499 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5501 if (src_ptl
!= dst_ptl
)
5502 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
5504 pte
= huge_ptep_get_and_clear(mm
, old_addr
, src_pte
);
5505 set_huge_pte_at(mm
, new_addr
, dst_pte
, pte
, sz
);
5507 if (src_ptl
!= dst_ptl
)
5508 spin_unlock(src_ptl
);
5509 spin_unlock(dst_ptl
);
5512 int move_hugetlb_page_tables(struct vm_area_struct
*vma
,
5513 struct vm_area_struct
*new_vma
,
5514 unsigned long old_addr
, unsigned long new_addr
,
5517 struct hstate
*h
= hstate_vma(vma
);
5518 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
5519 unsigned long sz
= huge_page_size(h
);
5520 struct mm_struct
*mm
= vma
->vm_mm
;
5521 unsigned long old_end
= old_addr
+ len
;
5522 unsigned long last_addr_mask
;
5523 pte_t
*src_pte
, *dst_pte
;
5524 struct mmu_notifier_range range
;
5525 bool shared_pmd
= false;
5527 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, old_addr
,
5529 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
, &range
.end
);
5531 * In case of shared PMDs, we should cover the maximum possible
5534 flush_cache_range(vma
, range
.start
, range
.end
);
5536 mmu_notifier_invalidate_range_start(&range
);
5537 last_addr_mask
= hugetlb_mask_last_page(h
);
5538 /* Prevent race with file truncation */
5539 hugetlb_vma_lock_write(vma
);
5540 i_mmap_lock_write(mapping
);
5541 for (; old_addr
< old_end
; old_addr
+= sz
, new_addr
+= sz
) {
5542 src_pte
= hugetlb_walk(vma
, old_addr
, sz
);
5544 old_addr
|= last_addr_mask
;
5545 new_addr
|= last_addr_mask
;
5548 if (huge_pte_none(huge_ptep_get(src_pte
)))
5551 if (huge_pmd_unshare(mm
, vma
, old_addr
, src_pte
)) {
5553 old_addr
|= last_addr_mask
;
5554 new_addr
|= last_addr_mask
;
5558 dst_pte
= huge_pte_alloc(mm
, new_vma
, new_addr
, sz
);
5562 move_huge_pte(vma
, old_addr
, new_addr
, src_pte
, dst_pte
, sz
);
5566 flush_hugetlb_tlb_range(vma
, range
.start
, range
.end
);
5568 flush_hugetlb_tlb_range(vma
, old_end
- len
, old_end
);
5569 mmu_notifier_invalidate_range_end(&range
);
5570 i_mmap_unlock_write(mapping
);
5571 hugetlb_vma_unlock_write(vma
);
5573 return len
+ old_addr
- old_end
;
5576 void __unmap_hugepage_range(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
5577 unsigned long start
, unsigned long end
,
5578 struct page
*ref_page
, zap_flags_t zap_flags
)
5580 struct mm_struct
*mm
= vma
->vm_mm
;
5581 unsigned long address
;
5586 struct hstate
*h
= hstate_vma(vma
);
5587 unsigned long sz
= huge_page_size(h
);
5588 unsigned long last_addr_mask
;
5589 bool force_flush
= false;
5591 WARN_ON(!is_vm_hugetlb_page(vma
));
5592 BUG_ON(start
& ~huge_page_mask(h
));
5593 BUG_ON(end
& ~huge_page_mask(h
));
5596 * This is a hugetlb vma, all the pte entries should point
5599 tlb_change_page_size(tlb
, sz
);
5600 tlb_start_vma(tlb
, vma
);
5602 last_addr_mask
= hugetlb_mask_last_page(h
);
5604 for (; address
< end
; address
+= sz
) {
5605 ptep
= hugetlb_walk(vma
, address
, sz
);
5607 address
|= last_addr_mask
;
5611 ptl
= huge_pte_lock(h
, mm
, ptep
);
5612 if (huge_pmd_unshare(mm
, vma
, address
, ptep
)) {
5614 tlb_flush_pmd_range(tlb
, address
& PUD_MASK
, PUD_SIZE
);
5616 address
|= last_addr_mask
;
5620 pte
= huge_ptep_get(ptep
);
5621 if (huge_pte_none(pte
)) {
5627 * Migrating hugepage or HWPoisoned hugepage is already
5628 * unmapped and its refcount is dropped, so just clear pte here.
5630 if (unlikely(!pte_present(pte
))) {
5632 * If the pte was wr-protected by uffd-wp in any of the
5633 * swap forms, meanwhile the caller does not want to
5634 * drop the uffd-wp bit in this zap, then replace the
5635 * pte with a marker.
5637 if (pte_swp_uffd_wp_any(pte
) &&
5638 !(zap_flags
& ZAP_FLAG_DROP_MARKER
))
5639 set_huge_pte_at(mm
, address
, ptep
,
5640 make_pte_marker(PTE_MARKER_UFFD_WP
),
5643 huge_pte_clear(mm
, address
, ptep
, sz
);
5648 page
= pte_page(pte
);
5650 * If a reference page is supplied, it is because a specific
5651 * page is being unmapped, not a range. Ensure the page we
5652 * are about to unmap is the actual page of interest.
5655 if (page
!= ref_page
) {
5660 * Mark the VMA as having unmapped its page so that
5661 * future faults in this VMA will fail rather than
5662 * looking like data was lost
5664 set_vma_resv_flags(vma
, HPAGE_RESV_UNMAPPED
);
5667 pte
= huge_ptep_get_and_clear(mm
, address
, ptep
);
5668 tlb_remove_huge_tlb_entry(h
, tlb
, ptep
, address
);
5669 if (huge_pte_dirty(pte
))
5670 set_page_dirty(page
);
5671 /* Leave a uffd-wp pte marker if needed */
5672 if (huge_pte_uffd_wp(pte
) &&
5673 !(zap_flags
& ZAP_FLAG_DROP_MARKER
))
5674 set_huge_pte_at(mm
, address
, ptep
,
5675 make_pte_marker(PTE_MARKER_UFFD_WP
),
5677 hugetlb_count_sub(pages_per_huge_page(h
), mm
);
5678 hugetlb_remove_rmap(page_folio(page
));
5681 tlb_remove_page_size(tlb
, page
, huge_page_size(h
));
5683 * Bail out after unmapping reference page if supplied
5688 tlb_end_vma(tlb
, vma
);
5691 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5692 * could defer the flush until now, since by holding i_mmap_rwsem we
5693 * guaranteed that the last refernece would not be dropped. But we must
5694 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5695 * dropped and the last reference to the shared PMDs page might be
5698 * In theory we could defer the freeing of the PMD pages as well, but
5699 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5700 * detect sharing, so we cannot defer the release of the page either.
5701 * Instead, do flush now.
5704 tlb_flush_mmu_tlbonly(tlb
);
5707 void __hugetlb_zap_begin(struct vm_area_struct
*vma
,
5708 unsigned long *start
, unsigned long *end
)
5710 if (!vma
->vm_file
) /* hugetlbfs_file_mmap error */
5713 adjust_range_if_pmd_sharing_possible(vma
, start
, end
);
5714 hugetlb_vma_lock_write(vma
);
5716 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
5719 void __hugetlb_zap_end(struct vm_area_struct
*vma
,
5720 struct zap_details
*details
)
5722 zap_flags_t zap_flags
= details
? details
->zap_flags
: 0;
5724 if (!vma
->vm_file
) /* hugetlbfs_file_mmap error */
5727 if (zap_flags
& ZAP_FLAG_UNMAP
) { /* final unmap */
5729 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5730 * When the vma_lock is freed, this makes the vma ineligible
5731 * for pmd sharing. And, i_mmap_rwsem is required to set up
5732 * pmd sharing. This is important as page tables for this
5733 * unmapped range will be asynchrously deleted. If the page
5734 * tables are shared, there will be issues when accessed by
5737 __hugetlb_vma_unlock_write_free(vma
);
5739 hugetlb_vma_unlock_write(vma
);
5743 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
5746 void unmap_hugepage_range(struct vm_area_struct
*vma
, unsigned long start
,
5747 unsigned long end
, struct page
*ref_page
,
5748 zap_flags_t zap_flags
)
5750 struct mmu_notifier_range range
;
5751 struct mmu_gather tlb
;
5753 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
5755 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
, &range
.end
);
5756 mmu_notifier_invalidate_range_start(&range
);
5757 tlb_gather_mmu(&tlb
, vma
->vm_mm
);
5759 __unmap_hugepage_range(&tlb
, vma
, start
, end
, ref_page
, zap_flags
);
5761 mmu_notifier_invalidate_range_end(&range
);
5762 tlb_finish_mmu(&tlb
);
5766 * This is called when the original mapper is failing to COW a MAP_PRIVATE
5767 * mapping it owns the reserve page for. The intention is to unmap the page
5768 * from other VMAs and let the children be SIGKILLed if they are faulting the
5771 static void unmap_ref_private(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
5772 struct page
*page
, unsigned long address
)
5774 struct hstate
*h
= hstate_vma(vma
);
5775 struct vm_area_struct
*iter_vma
;
5776 struct address_space
*mapping
;
5780 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5781 * from page cache lookup which is in HPAGE_SIZE units.
5783 address
= address
& huge_page_mask(h
);
5784 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
5786 mapping
= vma
->vm_file
->f_mapping
;
5789 * Take the mapping lock for the duration of the table walk. As
5790 * this mapping should be shared between all the VMAs,
5791 * __unmap_hugepage_range() is called as the lock is already held
5793 i_mmap_lock_write(mapping
);
5794 vma_interval_tree_foreach(iter_vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
5795 /* Do not unmap the current VMA */
5796 if (iter_vma
== vma
)
5800 * Shared VMAs have their own reserves and do not affect
5801 * MAP_PRIVATE accounting but it is possible that a shared
5802 * VMA is using the same page so check and skip such VMAs.
5804 if (iter_vma
->vm_flags
& VM_MAYSHARE
)
5808 * Unmap the page from other VMAs without their own reserves.
5809 * They get marked to be SIGKILLed if they fault in these
5810 * areas. This is because a future no-page fault on this VMA
5811 * could insert a zeroed page instead of the data existing
5812 * from the time of fork. This would look like data corruption
5814 if (!is_vma_resv_set(iter_vma
, HPAGE_RESV_OWNER
))
5815 unmap_hugepage_range(iter_vma
, address
,
5816 address
+ huge_page_size(h
), page
, 0);
5818 i_mmap_unlock_write(mapping
);
5822 * hugetlb_wp() should be called with page lock of the original hugepage held.
5823 * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5824 * cannot race with other handlers or page migration.
5825 * Keep the pte_same checks anyway to make transition from the mutex easier.
5827 static vm_fault_t
hugetlb_wp(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
5828 unsigned long address
, pte_t
*ptep
, unsigned int flags
,
5829 struct folio
*pagecache_folio
, spinlock_t
*ptl
)
5831 const bool unshare
= flags
& FAULT_FLAG_UNSHARE
;
5832 pte_t pte
= huge_ptep_get(ptep
);
5833 struct hstate
*h
= hstate_vma(vma
);
5834 struct folio
*old_folio
;
5835 struct folio
*new_folio
;
5836 int outside_reserve
= 0;
5838 unsigned long haddr
= address
& huge_page_mask(h
);
5839 struct mmu_notifier_range range
;
5842 * Never handle CoW for uffd-wp protected pages. It should be only
5843 * handled when the uffd-wp protection is removed.
5845 * Note that only the CoW optimization path (in hugetlb_no_page())
5846 * can trigger this, because hugetlb_fault() will always resolve
5847 * uffd-wp bit first.
5849 if (!unshare
&& huge_pte_uffd_wp(pte
))
5853 * hugetlb does not support FOLL_FORCE-style write faults that keep the
5854 * PTE mapped R/O such as maybe_mkwrite() would do.
5856 if (WARN_ON_ONCE(!unshare
&& !(vma
->vm_flags
& VM_WRITE
)))
5857 return VM_FAULT_SIGSEGV
;
5859 /* Let's take out MAP_SHARED mappings first. */
5860 if (vma
->vm_flags
& VM_MAYSHARE
) {
5861 set_huge_ptep_writable(vma
, haddr
, ptep
);
5865 old_folio
= page_folio(pte_page(pte
));
5867 delayacct_wpcopy_start();
5871 * If no-one else is actually using this page, we're the exclusive
5872 * owner and can reuse this page.
5874 if (folio_mapcount(old_folio
) == 1 && folio_test_anon(old_folio
)) {
5875 if (!PageAnonExclusive(&old_folio
->page
)) {
5876 folio_move_anon_rmap(old_folio
, vma
);
5877 SetPageAnonExclusive(&old_folio
->page
);
5879 if (likely(!unshare
))
5880 set_huge_ptep_writable(vma
, haddr
, ptep
);
5882 delayacct_wpcopy_end();
5885 VM_BUG_ON_PAGE(folio_test_anon(old_folio
) &&
5886 PageAnonExclusive(&old_folio
->page
), &old_folio
->page
);
5889 * If the process that created a MAP_PRIVATE mapping is about to
5890 * perform a COW due to a shared page count, attempt to satisfy
5891 * the allocation without using the existing reserves. The pagecache
5892 * page is used to determine if the reserve at this address was
5893 * consumed or not. If reserves were used, a partial faulted mapping
5894 * at the time of fork() could consume its reserves on COW instead
5895 * of the full address range.
5897 if (is_vma_resv_set(vma
, HPAGE_RESV_OWNER
) &&
5898 old_folio
!= pagecache_folio
)
5899 outside_reserve
= 1;
5901 folio_get(old_folio
);
5904 * Drop page table lock as buddy allocator may be called. It will
5905 * be acquired again before returning to the caller, as expected.
5908 new_folio
= alloc_hugetlb_folio(vma
, haddr
, outside_reserve
);
5910 if (IS_ERR(new_folio
)) {
5912 * If a process owning a MAP_PRIVATE mapping fails to COW,
5913 * it is due to references held by a child and an insufficient
5914 * huge page pool. To guarantee the original mappers
5915 * reliability, unmap the page from child processes. The child
5916 * may get SIGKILLed if it later faults.
5918 if (outside_reserve
) {
5919 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
5923 folio_put(old_folio
);
5925 * Drop hugetlb_fault_mutex and vma_lock before
5926 * unmapping. unmapping needs to hold vma_lock
5927 * in write mode. Dropping vma_lock in read mode
5928 * here is OK as COW mappings do not interact with
5931 * Reacquire both after unmap operation.
5933 idx
= vma_hugecache_offset(h
, vma
, haddr
);
5934 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
5935 hugetlb_vma_unlock_read(vma
);
5936 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
5938 unmap_ref_private(mm
, vma
, &old_folio
->page
, haddr
);
5940 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
5941 hugetlb_vma_lock_read(vma
);
5943 ptep
= hugetlb_walk(vma
, haddr
, huge_page_size(h
));
5945 pte_same(huge_ptep_get(ptep
), pte
)))
5946 goto retry_avoidcopy
;
5948 * race occurs while re-acquiring page table
5949 * lock, and our job is done.
5951 delayacct_wpcopy_end();
5955 ret
= vmf_error(PTR_ERR(new_folio
));
5956 goto out_release_old
;
5960 * When the original hugepage is shared one, it does not have
5961 * anon_vma prepared.
5963 if (unlikely(anon_vma_prepare(vma
))) {
5965 goto out_release_all
;
5968 if (copy_user_large_folio(new_folio
, old_folio
, address
, vma
)) {
5969 ret
= VM_FAULT_HWPOISON_LARGE
;
5970 goto out_release_all
;
5972 __folio_mark_uptodate(new_folio
);
5974 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, haddr
,
5975 haddr
+ huge_page_size(h
));
5976 mmu_notifier_invalidate_range_start(&range
);
5979 * Retake the page table lock to check for racing updates
5980 * before the page tables are altered
5983 ptep
= hugetlb_walk(vma
, haddr
, huge_page_size(h
));
5984 if (likely(ptep
&& pte_same(huge_ptep_get(ptep
), pte
))) {
5985 pte_t newpte
= make_huge_pte(vma
, &new_folio
->page
, !unshare
);
5987 /* Break COW or unshare */
5988 huge_ptep_clear_flush(vma
, haddr
, ptep
);
5989 hugetlb_remove_rmap(old_folio
);
5990 hugetlb_add_new_anon_rmap(new_folio
, vma
, haddr
);
5991 if (huge_pte_uffd_wp(pte
))
5992 newpte
= huge_pte_mkuffd_wp(newpte
);
5993 set_huge_pte_at(mm
, haddr
, ptep
, newpte
, huge_page_size(h
));
5994 folio_set_hugetlb_migratable(new_folio
);
5995 /* Make the old page be freed below */
5996 new_folio
= old_folio
;
5999 mmu_notifier_invalidate_range_end(&range
);
6002 * No restore in case of successful pagetable update (Break COW or
6005 if (new_folio
!= old_folio
)
6006 restore_reserve_on_error(h
, vma
, haddr
, new_folio
);
6007 folio_put(new_folio
);
6009 folio_put(old_folio
);
6011 spin_lock(ptl
); /* Caller expects lock to be held */
6013 delayacct_wpcopy_end();
6018 * Return whether there is a pagecache page to back given address within VMA.
6020 static bool hugetlbfs_pagecache_present(struct hstate
*h
,
6021 struct vm_area_struct
*vma
, unsigned long address
)
6023 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
6024 pgoff_t idx
= linear_page_index(vma
, address
);
6025 struct folio
*folio
;
6027 folio
= filemap_get_folio(mapping
, idx
);
6034 int hugetlb_add_to_page_cache(struct folio
*folio
, struct address_space
*mapping
,
6037 struct inode
*inode
= mapping
->host
;
6038 struct hstate
*h
= hstate_inode(inode
);
6041 idx
<<= huge_page_order(h
);
6042 __folio_set_locked(folio
);
6043 err
= __filemap_add_folio(mapping
, folio
, idx
, GFP_KERNEL
, NULL
);
6045 if (unlikely(err
)) {
6046 __folio_clear_locked(folio
);
6049 folio_clear_hugetlb_restore_reserve(folio
);
6052 * mark folio dirty so that it will not be removed from cache/file
6053 * by non-hugetlbfs specific code paths.
6055 folio_mark_dirty(folio
);
6057 spin_lock(&inode
->i_lock
);
6058 inode
->i_blocks
+= blocks_per_huge_page(h
);
6059 spin_unlock(&inode
->i_lock
);
6063 static inline vm_fault_t
hugetlb_handle_userfault(struct vm_area_struct
*vma
,
6064 struct address_space
*mapping
,
6067 unsigned long haddr
,
6069 unsigned long reason
)
6072 struct vm_fault vmf
= {
6075 .real_address
= addr
,
6079 * Hard to debug if it ends up being
6080 * used by a callee that assumes
6081 * something about the other
6082 * uninitialized fields... same as in
6088 * vma_lock and hugetlb_fault_mutex must be dropped before handling
6089 * userfault. Also mmap_lock could be dropped due to handling
6090 * userfault, any vma operation should be careful from here.
6092 hugetlb_vma_unlock_read(vma
);
6093 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
6094 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6095 return handle_userfault(&vmf
, reason
);
6099 * Recheck pte with pgtable lock. Returns true if pte didn't change, or
6100 * false if pte changed or is changing.
6102 static bool hugetlb_pte_stable(struct hstate
*h
, struct mm_struct
*mm
,
6103 pte_t
*ptep
, pte_t old_pte
)
6108 ptl
= huge_pte_lock(h
, mm
, ptep
);
6109 same
= pte_same(huge_ptep_get(ptep
), old_pte
);
6115 static vm_fault_t
hugetlb_no_page(struct mm_struct
*mm
,
6116 struct vm_area_struct
*vma
,
6117 struct address_space
*mapping
, pgoff_t idx
,
6118 unsigned long address
, pte_t
*ptep
,
6119 pte_t old_pte
, unsigned int flags
)
6121 struct hstate
*h
= hstate_vma(vma
);
6122 vm_fault_t ret
= VM_FAULT_SIGBUS
;
6125 struct folio
*folio
;
6128 unsigned long haddr
= address
& huge_page_mask(h
);
6129 bool new_folio
, new_pagecache_folio
= false;
6130 u32 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
6133 * Currently, we are forced to kill the process in the event the
6134 * original mapper has unmapped pages from the child due to a failed
6135 * COW/unsharing. Warn that such a situation has occurred as it may not
6138 if (is_vma_resv_set(vma
, HPAGE_RESV_UNMAPPED
)) {
6139 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
6145 * Use page lock to guard against racing truncation
6146 * before we get page_table_lock.
6149 folio
= filemap_lock_hugetlb_folio(h
, mapping
, idx
);
6150 if (IS_ERR(folio
)) {
6151 size
= i_size_read(mapping
->host
) >> huge_page_shift(h
);
6154 /* Check for page in userfault range */
6155 if (userfaultfd_missing(vma
)) {
6157 * Since hugetlb_no_page() was examining pte
6158 * without pgtable lock, we need to re-test under
6159 * lock because the pte may not be stable and could
6160 * have changed from under us. Try to detect
6161 * either changed or during-changing ptes and retry
6162 * properly when needed.
6164 * Note that userfaultfd is actually fine with
6165 * false positives (e.g. caused by pte changed),
6166 * but not wrong logical events (e.g. caused by
6167 * reading a pte during changing). The latter can
6168 * confuse the userspace, so the strictness is very
6169 * much preferred. E.g., MISSING event should
6170 * never happen on the page after UFFDIO_COPY has
6171 * correctly installed the page and returned.
6173 if (!hugetlb_pte_stable(h
, mm
, ptep
, old_pte
)) {
6178 return hugetlb_handle_userfault(vma
, mapping
, idx
, flags
,
6183 folio
= alloc_hugetlb_folio(vma
, haddr
, 0);
6184 if (IS_ERR(folio
)) {
6186 * Returning error will result in faulting task being
6187 * sent SIGBUS. The hugetlb fault mutex prevents two
6188 * tasks from racing to fault in the same page which
6189 * could result in false unable to allocate errors.
6190 * Page migration does not take the fault mutex, but
6191 * does a clear then write of pte's under page table
6192 * lock. Page fault code could race with migration,
6193 * notice the clear pte and try to allocate a page
6194 * here. Before returning error, get ptl and make
6195 * sure there really is no pte entry.
6197 if (hugetlb_pte_stable(h
, mm
, ptep
, old_pte
))
6198 ret
= vmf_error(PTR_ERR(folio
));
6203 clear_huge_page(&folio
->page
, address
, pages_per_huge_page(h
));
6204 __folio_mark_uptodate(folio
);
6207 if (vma
->vm_flags
& VM_MAYSHARE
) {
6208 int err
= hugetlb_add_to_page_cache(folio
, mapping
, idx
);
6211 * err can't be -EEXIST which implies someone
6212 * else consumed the reservation since hugetlb
6213 * fault mutex is held when add a hugetlb page
6214 * to the page cache. So it's safe to call
6215 * restore_reserve_on_error() here.
6217 restore_reserve_on_error(h
, vma
, haddr
, folio
);
6221 new_pagecache_folio
= true;
6224 if (unlikely(anon_vma_prepare(vma
))) {
6226 goto backout_unlocked
;
6232 * If memory error occurs between mmap() and fault, some process
6233 * don't have hwpoisoned swap entry for errored virtual address.
6234 * So we need to block hugepage fault by PG_hwpoison bit check.
6236 if (unlikely(folio_test_hwpoison(folio
))) {
6237 ret
= VM_FAULT_HWPOISON_LARGE
|
6238 VM_FAULT_SET_HINDEX(hstate_index(h
));
6239 goto backout_unlocked
;
6242 /* Check for page in userfault range. */
6243 if (userfaultfd_minor(vma
)) {
6244 folio_unlock(folio
);
6246 /* See comment in userfaultfd_missing() block above */
6247 if (!hugetlb_pte_stable(h
, mm
, ptep
, old_pte
)) {
6251 return hugetlb_handle_userfault(vma
, mapping
, idx
, flags
,
6258 * If we are going to COW a private mapping later, we examine the
6259 * pending reservations for this page now. This will ensure that
6260 * any allocations necessary to record that reservation occur outside
6263 if ((flags
& FAULT_FLAG_WRITE
) && !(vma
->vm_flags
& VM_SHARED
)) {
6264 if (vma_needs_reservation(h
, vma
, haddr
) < 0) {
6266 goto backout_unlocked
;
6268 /* Just decrements count, does not deallocate */
6269 vma_end_reservation(h
, vma
, haddr
);
6272 ptl
= huge_pte_lock(h
, mm
, ptep
);
6274 /* If pte changed from under us, retry */
6275 if (!pte_same(huge_ptep_get(ptep
), old_pte
))
6279 hugetlb_add_new_anon_rmap(folio
, vma
, haddr
);
6281 hugetlb_add_file_rmap(folio
);
6282 new_pte
= make_huge_pte(vma
, &folio
->page
, ((vma
->vm_flags
& VM_WRITE
)
6283 && (vma
->vm_flags
& VM_SHARED
)));
6285 * If this pte was previously wr-protected, keep it wr-protected even
6288 if (unlikely(pte_marker_uffd_wp(old_pte
)))
6289 new_pte
= huge_pte_mkuffd_wp(new_pte
);
6290 set_huge_pte_at(mm
, haddr
, ptep
, new_pte
, huge_page_size(h
));
6292 hugetlb_count_add(pages_per_huge_page(h
), mm
);
6293 if ((flags
& FAULT_FLAG_WRITE
) && !(vma
->vm_flags
& VM_SHARED
)) {
6294 /* Optimization, do the COW without a second fault */
6295 ret
= hugetlb_wp(mm
, vma
, address
, ptep
, flags
, folio
, ptl
);
6301 * Only set hugetlb_migratable in newly allocated pages. Existing pages
6302 * found in the pagecache may not have hugetlb_migratable if they have
6303 * been isolated for migration.
6306 folio_set_hugetlb_migratable(folio
);
6308 folio_unlock(folio
);
6310 hugetlb_vma_unlock_read(vma
);
6311 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6317 if (new_folio
&& !new_pagecache_folio
)
6318 restore_reserve_on_error(h
, vma
, haddr
, folio
);
6320 folio_unlock(folio
);
6326 u32
hugetlb_fault_mutex_hash(struct address_space
*mapping
, pgoff_t idx
)
6328 unsigned long key
[2];
6331 key
[0] = (unsigned long) mapping
;
6334 hash
= jhash2((u32
*)&key
, sizeof(key
)/(sizeof(u32
)), 0);
6336 return hash
& (num_fault_mutexes
- 1);
6340 * For uniprocessor systems we always use a single mutex, so just
6341 * return 0 and avoid the hashing overhead.
6343 u32
hugetlb_fault_mutex_hash(struct address_space
*mapping
, pgoff_t idx
)
6349 vm_fault_t
hugetlb_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6350 unsigned long address
, unsigned int flags
)
6357 struct folio
*folio
= NULL
;
6358 struct folio
*pagecache_folio
= NULL
;
6359 struct hstate
*h
= hstate_vma(vma
);
6360 struct address_space
*mapping
;
6361 int need_wait_lock
= 0;
6362 unsigned long haddr
= address
& huge_page_mask(h
);
6364 /* TODO: Handle faults under the VMA lock */
6365 if (flags
& FAULT_FLAG_VMA_LOCK
) {
6367 return VM_FAULT_RETRY
;
6371 * Serialize hugepage allocation and instantiation, so that we don't
6372 * get spurious allocation failures if two CPUs race to instantiate
6373 * the same page in the page cache.
6375 mapping
= vma
->vm_file
->f_mapping
;
6376 idx
= vma_hugecache_offset(h
, vma
, haddr
);
6377 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
6378 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
6381 * Acquire vma lock before calling huge_pte_alloc and hold
6382 * until finished with ptep. This prevents huge_pmd_unshare from
6383 * being called elsewhere and making the ptep no longer valid.
6385 hugetlb_vma_lock_read(vma
);
6386 ptep
= huge_pte_alloc(mm
, vma
, haddr
, huge_page_size(h
));
6388 hugetlb_vma_unlock_read(vma
);
6389 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6390 return VM_FAULT_OOM
;
6393 entry
= huge_ptep_get(ptep
);
6394 if (huge_pte_none_mostly(entry
)) {
6395 if (is_pte_marker(entry
)) {
6397 pte_marker_get(pte_to_swp_entry(entry
));
6399 if (marker
& PTE_MARKER_POISONED
) {
6400 ret
= VM_FAULT_HWPOISON_LARGE
;
6406 * Other PTE markers should be handled the same way as none PTE.
6408 * hugetlb_no_page will drop vma lock and hugetlb fault
6409 * mutex internally, which make us return immediately.
6411 return hugetlb_no_page(mm
, vma
, mapping
, idx
, address
, ptep
,
6418 * entry could be a migration/hwpoison entry at this point, so this
6419 * check prevents the kernel from going below assuming that we have
6420 * an active hugepage in pagecache. This goto expects the 2nd page
6421 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
6422 * properly handle it.
6424 if (!pte_present(entry
)) {
6425 if (unlikely(is_hugetlb_entry_migration(entry
))) {
6427 * Release the hugetlb fault lock now, but retain
6428 * the vma lock, because it is needed to guard the
6429 * huge_pte_lockptr() later in
6430 * migration_entry_wait_huge(). The vma lock will
6431 * be released there.
6433 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6434 migration_entry_wait_huge(vma
, ptep
);
6436 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry
)))
6437 ret
= VM_FAULT_HWPOISON_LARGE
|
6438 VM_FAULT_SET_HINDEX(hstate_index(h
));
6443 * If we are going to COW/unshare the mapping later, we examine the
6444 * pending reservations for this page now. This will ensure that any
6445 * allocations necessary to record that reservation occur outside the
6446 * spinlock. Also lookup the pagecache page now as it is used to
6447 * determine if a reservation has been consumed.
6449 if ((flags
& (FAULT_FLAG_WRITE
|FAULT_FLAG_UNSHARE
)) &&
6450 !(vma
->vm_flags
& VM_MAYSHARE
) && !huge_pte_write(entry
)) {
6451 if (vma_needs_reservation(h
, vma
, haddr
) < 0) {
6455 /* Just decrements count, does not deallocate */
6456 vma_end_reservation(h
, vma
, haddr
);
6458 pagecache_folio
= filemap_lock_hugetlb_folio(h
, mapping
, idx
);
6459 if (IS_ERR(pagecache_folio
))
6460 pagecache_folio
= NULL
;
6463 ptl
= huge_pte_lock(h
, mm
, ptep
);
6465 /* Check for a racing update before calling hugetlb_wp() */
6466 if (unlikely(!pte_same(entry
, huge_ptep_get(ptep
))))
6469 /* Handle userfault-wp first, before trying to lock more pages */
6470 if (userfaultfd_wp(vma
) && huge_pte_uffd_wp(huge_ptep_get(ptep
)) &&
6471 (flags
& FAULT_FLAG_WRITE
) && !huge_pte_write(entry
)) {
6472 if (!userfaultfd_wp_async(vma
)) {
6473 struct vm_fault vmf
= {
6476 .real_address
= address
,
6481 if (pagecache_folio
) {
6482 folio_unlock(pagecache_folio
);
6483 folio_put(pagecache_folio
);
6485 hugetlb_vma_unlock_read(vma
);
6486 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6487 return handle_userfault(&vmf
, VM_UFFD_WP
);
6490 entry
= huge_pte_clear_uffd_wp(entry
);
6491 set_huge_pte_at(mm
, haddr
, ptep
, entry
,
6492 huge_page_size(hstate_vma(vma
)));
6493 /* Fallthrough to CoW */
6497 * hugetlb_wp() requires page locks of pte_page(entry) and
6498 * pagecache_folio, so here we need take the former one
6499 * when folio != pagecache_folio or !pagecache_folio.
6501 folio
= page_folio(pte_page(entry
));
6502 if (folio
!= pagecache_folio
)
6503 if (!folio_trylock(folio
)) {
6510 if (flags
& (FAULT_FLAG_WRITE
|FAULT_FLAG_UNSHARE
)) {
6511 if (!huge_pte_write(entry
)) {
6512 ret
= hugetlb_wp(mm
, vma
, address
, ptep
, flags
,
6513 pagecache_folio
, ptl
);
6515 } else if (likely(flags
& FAULT_FLAG_WRITE
)) {
6516 entry
= huge_pte_mkdirty(entry
);
6519 entry
= pte_mkyoung(entry
);
6520 if (huge_ptep_set_access_flags(vma
, haddr
, ptep
, entry
,
6521 flags
& FAULT_FLAG_WRITE
))
6522 update_mmu_cache(vma
, haddr
, ptep
);
6524 if (folio
!= pagecache_folio
)
6525 folio_unlock(folio
);
6530 if (pagecache_folio
) {
6531 folio_unlock(pagecache_folio
);
6532 folio_put(pagecache_folio
);
6535 hugetlb_vma_unlock_read(vma
);
6536 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6538 * Generally it's safe to hold refcount during waiting page lock. But
6539 * here we just wait to defer the next page fault to avoid busy loop and
6540 * the page is not used after unlocked before returning from the current
6541 * page fault. So we are safe from accessing freed page, even if we wait
6542 * here without taking refcount.
6545 folio_wait_locked(folio
);
6549 #ifdef CONFIG_USERFAULTFD
6551 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6553 static struct folio
*alloc_hugetlb_folio_vma(struct hstate
*h
,
6554 struct vm_area_struct
*vma
, unsigned long address
)
6556 struct mempolicy
*mpol
;
6557 nodemask_t
*nodemask
;
6558 struct folio
*folio
;
6562 gfp_mask
= htlb_alloc_mask(h
);
6563 node
= huge_node(vma
, address
, gfp_mask
, &mpol
, &nodemask
);
6564 folio
= alloc_hugetlb_folio_nodemask(h
, node
, nodemask
, gfp_mask
);
6565 mpol_cond_put(mpol
);
6571 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6572 * with modifications for hugetlb pages.
6574 int hugetlb_mfill_atomic_pte(pte_t
*dst_pte
,
6575 struct vm_area_struct
*dst_vma
,
6576 unsigned long dst_addr
,
6577 unsigned long src_addr
,
6579 struct folio
**foliop
)
6581 struct mm_struct
*dst_mm
= dst_vma
->vm_mm
;
6582 bool is_continue
= uffd_flags_mode_is(flags
, MFILL_ATOMIC_CONTINUE
);
6583 bool wp_enabled
= (flags
& MFILL_ATOMIC_WP
);
6584 struct hstate
*h
= hstate_vma(dst_vma
);
6585 struct address_space
*mapping
= dst_vma
->vm_file
->f_mapping
;
6586 pgoff_t idx
= vma_hugecache_offset(h
, dst_vma
, dst_addr
);
6588 int vm_shared
= dst_vma
->vm_flags
& VM_SHARED
;
6592 struct folio
*folio
;
6594 bool folio_in_pagecache
= false;
6596 if (uffd_flags_mode_is(flags
, MFILL_ATOMIC_POISON
)) {
6597 ptl
= huge_pte_lock(h
, dst_mm
, dst_pte
);
6599 /* Don't overwrite any existing PTEs (even markers) */
6600 if (!huge_pte_none(huge_ptep_get(dst_pte
))) {
6605 _dst_pte
= make_pte_marker(PTE_MARKER_POISONED
);
6606 set_huge_pte_at(dst_mm
, dst_addr
, dst_pte
, _dst_pte
,
6609 /* No need to invalidate - it was non-present before */
6610 update_mmu_cache(dst_vma
, dst_addr
, dst_pte
);
6618 folio
= filemap_lock_hugetlb_folio(h
, mapping
, idx
);
6621 folio_in_pagecache
= true;
6622 } else if (!*foliop
) {
6623 /* If a folio already exists, then it's UFFDIO_COPY for
6624 * a non-missing case. Return -EEXIST.
6627 hugetlbfs_pagecache_present(h
, dst_vma
, dst_addr
)) {
6632 folio
= alloc_hugetlb_folio(dst_vma
, dst_addr
, 0);
6633 if (IS_ERR(folio
)) {
6638 ret
= copy_folio_from_user(folio
, (const void __user
*) src_addr
,
6641 /* fallback to copy_from_user outside mmap_lock */
6642 if (unlikely(ret
)) {
6644 /* Free the allocated folio which may have
6645 * consumed a reservation.
6647 restore_reserve_on_error(h
, dst_vma
, dst_addr
, folio
);
6650 /* Allocate a temporary folio to hold the copied
6653 folio
= alloc_hugetlb_folio_vma(h
, dst_vma
, dst_addr
);
6659 /* Set the outparam foliop and return to the caller to
6660 * copy the contents outside the lock. Don't free the
6667 hugetlbfs_pagecache_present(h
, dst_vma
, dst_addr
)) {
6674 folio
= alloc_hugetlb_folio(dst_vma
, dst_addr
, 0);
6675 if (IS_ERR(folio
)) {
6681 ret
= copy_user_large_folio(folio
, *foliop
, dst_addr
, dst_vma
);
6691 * The memory barrier inside __folio_mark_uptodate makes sure that
6692 * preceding stores to the page contents become visible before
6693 * the set_pte_at() write.
6695 __folio_mark_uptodate(folio
);
6697 /* Add shared, newly allocated pages to the page cache. */
6698 if (vm_shared
&& !is_continue
) {
6699 size
= i_size_read(mapping
->host
) >> huge_page_shift(h
);
6702 goto out_release_nounlock
;
6705 * Serialization between remove_inode_hugepages() and
6706 * hugetlb_add_to_page_cache() below happens through the
6707 * hugetlb_fault_mutex_table that here must be hold by
6710 ret
= hugetlb_add_to_page_cache(folio
, mapping
, idx
);
6712 goto out_release_nounlock
;
6713 folio_in_pagecache
= true;
6716 ptl
= huge_pte_lock(h
, dst_mm
, dst_pte
);
6719 if (folio_test_hwpoison(folio
))
6720 goto out_release_unlock
;
6723 * We allow to overwrite a pte marker: consider when both MISSING|WP
6724 * registered, we firstly wr-protect a none pte which has no page cache
6725 * page backing it, then access the page.
6728 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte
)))
6729 goto out_release_unlock
;
6731 if (folio_in_pagecache
)
6732 hugetlb_add_file_rmap(folio
);
6734 hugetlb_add_new_anon_rmap(folio
, dst_vma
, dst_addr
);
6737 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6738 * with wp flag set, don't set pte write bit.
6740 if (wp_enabled
|| (is_continue
&& !vm_shared
))
6743 writable
= dst_vma
->vm_flags
& VM_WRITE
;
6745 _dst_pte
= make_huge_pte(dst_vma
, &folio
->page
, writable
);
6747 * Always mark UFFDIO_COPY page dirty; note that this may not be
6748 * extremely important for hugetlbfs for now since swapping is not
6749 * supported, but we should still be clear in that this page cannot be
6750 * thrown away at will, even if write bit not set.
6752 _dst_pte
= huge_pte_mkdirty(_dst_pte
);
6753 _dst_pte
= pte_mkyoung(_dst_pte
);
6756 _dst_pte
= huge_pte_mkuffd_wp(_dst_pte
);
6758 set_huge_pte_at(dst_mm
, dst_addr
, dst_pte
, _dst_pte
, huge_page_size(h
));
6760 hugetlb_count_add(pages_per_huge_page(h
), dst_mm
);
6762 /* No need to invalidate - it was non-present before */
6763 update_mmu_cache(dst_vma
, dst_addr
, dst_pte
);
6767 folio_set_hugetlb_migratable(folio
);
6768 if (vm_shared
|| is_continue
)
6769 folio_unlock(folio
);
6775 if (vm_shared
|| is_continue
)
6776 folio_unlock(folio
);
6777 out_release_nounlock
:
6778 if (!folio_in_pagecache
)
6779 restore_reserve_on_error(h
, dst_vma
, dst_addr
, folio
);
6783 #endif /* CONFIG_USERFAULTFD */
6785 struct page
*hugetlb_follow_page_mask(struct vm_area_struct
*vma
,
6786 unsigned long address
, unsigned int flags
,
6787 unsigned int *page_mask
)
6789 struct hstate
*h
= hstate_vma(vma
);
6790 struct mm_struct
*mm
= vma
->vm_mm
;
6791 unsigned long haddr
= address
& huge_page_mask(h
);
6792 struct page
*page
= NULL
;
6797 hugetlb_vma_lock_read(vma
);
6798 pte
= hugetlb_walk(vma
, haddr
, huge_page_size(h
));
6802 ptl
= huge_pte_lock(h
, mm
, pte
);
6803 entry
= huge_ptep_get(pte
);
6804 if (pte_present(entry
)) {
6805 page
= pte_page(entry
);
6807 if (!huge_pte_write(entry
)) {
6808 if (flags
& FOLL_WRITE
) {
6813 if (gup_must_unshare(vma
, flags
, page
)) {
6814 /* Tell the caller to do unsharing */
6815 page
= ERR_PTR(-EMLINK
);
6820 page
= nth_page(page
, ((address
& ~huge_page_mask(h
)) >> PAGE_SHIFT
));
6823 * Note that page may be a sub-page, and with vmemmap
6824 * optimizations the page struct may be read only.
6825 * try_grab_page() will increase the ref count on the
6826 * head page, so this will be OK.
6828 * try_grab_page() should always be able to get the page here,
6829 * because we hold the ptl lock and have verified pte_present().
6831 ret
= try_grab_page(page
, flags
);
6833 if (WARN_ON_ONCE(ret
)) {
6834 page
= ERR_PTR(ret
);
6838 *page_mask
= (1U << huge_page_order(h
)) - 1;
6843 hugetlb_vma_unlock_read(vma
);
6846 * Fixup retval for dump requests: if pagecache doesn't exist,
6847 * don't try to allocate a new page but just skip it.
6849 if (!page
&& (flags
& FOLL_DUMP
) &&
6850 !hugetlbfs_pagecache_present(h
, vma
, address
))
6851 page
= ERR_PTR(-EFAULT
);
6856 long hugetlb_change_protection(struct vm_area_struct
*vma
,
6857 unsigned long address
, unsigned long end
,
6858 pgprot_t newprot
, unsigned long cp_flags
)
6860 struct mm_struct
*mm
= vma
->vm_mm
;
6861 unsigned long start
= address
;
6864 struct hstate
*h
= hstate_vma(vma
);
6865 long pages
= 0, psize
= huge_page_size(h
);
6866 bool shared_pmd
= false;
6867 struct mmu_notifier_range range
;
6868 unsigned long last_addr_mask
;
6869 bool uffd_wp
= cp_flags
& MM_CP_UFFD_WP
;
6870 bool uffd_wp_resolve
= cp_flags
& MM_CP_UFFD_WP_RESOLVE
;
6873 * In the case of shared PMDs, the area to flush could be beyond
6874 * start/end. Set range.start/range.end to cover the maximum possible
6875 * range if PMD sharing is possible.
6877 mmu_notifier_range_init(&range
, MMU_NOTIFY_PROTECTION_VMA
,
6879 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
, &range
.end
);
6881 BUG_ON(address
>= end
);
6882 flush_cache_range(vma
, range
.start
, range
.end
);
6884 mmu_notifier_invalidate_range_start(&range
);
6885 hugetlb_vma_lock_write(vma
);
6886 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
6887 last_addr_mask
= hugetlb_mask_last_page(h
);
6888 for (; address
< end
; address
+= psize
) {
6890 ptep
= hugetlb_walk(vma
, address
, psize
);
6893 address
|= last_addr_mask
;
6897 * Userfaultfd wr-protect requires pgtable
6898 * pre-allocations to install pte markers.
6900 ptep
= huge_pte_alloc(mm
, vma
, address
, psize
);
6906 ptl
= huge_pte_lock(h
, mm
, ptep
);
6907 if (huge_pmd_unshare(mm
, vma
, address
, ptep
)) {
6909 * When uffd-wp is enabled on the vma, unshare
6910 * shouldn't happen at all. Warn about it if it
6911 * happened due to some reason.
6913 WARN_ON_ONCE(uffd_wp
|| uffd_wp_resolve
);
6917 address
|= last_addr_mask
;
6920 pte
= huge_ptep_get(ptep
);
6921 if (unlikely(is_hugetlb_entry_hwpoisoned(pte
))) {
6922 /* Nothing to do. */
6923 } else if (unlikely(is_hugetlb_entry_migration(pte
))) {
6924 swp_entry_t entry
= pte_to_swp_entry(pte
);
6925 struct page
*page
= pfn_swap_entry_to_page(entry
);
6928 if (is_writable_migration_entry(entry
)) {
6930 entry
= make_readable_exclusive_migration_entry(
6933 entry
= make_readable_migration_entry(
6935 newpte
= swp_entry_to_pte(entry
);
6940 newpte
= pte_swp_mkuffd_wp(newpte
);
6941 else if (uffd_wp_resolve
)
6942 newpte
= pte_swp_clear_uffd_wp(newpte
);
6943 if (!pte_same(pte
, newpte
))
6944 set_huge_pte_at(mm
, address
, ptep
, newpte
, psize
);
6945 } else if (unlikely(is_pte_marker(pte
))) {
6946 /* No other markers apply for now. */
6947 WARN_ON_ONCE(!pte_marker_uffd_wp(pte
));
6948 if (uffd_wp_resolve
)
6949 /* Safe to modify directly (non-present->none). */
6950 huge_pte_clear(mm
, address
, ptep
, psize
);
6951 } else if (!huge_pte_none(pte
)) {
6953 unsigned int shift
= huge_page_shift(hstate_vma(vma
));
6955 old_pte
= huge_ptep_modify_prot_start(vma
, address
, ptep
);
6956 pte
= huge_pte_modify(old_pte
, newprot
);
6957 pte
= arch_make_huge_pte(pte
, shift
, vma
->vm_flags
);
6959 pte
= huge_pte_mkuffd_wp(pte
);
6960 else if (uffd_wp_resolve
)
6961 pte
= huge_pte_clear_uffd_wp(pte
);
6962 huge_ptep_modify_prot_commit(vma
, address
, ptep
, old_pte
, pte
);
6966 if (unlikely(uffd_wp
))
6967 /* Safe to modify directly (none->non-present). */
6968 set_huge_pte_at(mm
, address
, ptep
,
6969 make_pte_marker(PTE_MARKER_UFFD_WP
),
6975 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6976 * may have cleared our pud entry and done put_page on the page table:
6977 * once we release i_mmap_rwsem, another task can do the final put_page
6978 * and that page table be reused and filled with junk. If we actually
6979 * did unshare a page of pmds, flush the range corresponding to the pud.
6982 flush_hugetlb_tlb_range(vma
, range
.start
, range
.end
);
6984 flush_hugetlb_tlb_range(vma
, start
, end
);
6986 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6987 * downgrading page table protection not changing it to point to a new
6990 * See Documentation/mm/mmu_notifier.rst
6992 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
6993 hugetlb_vma_unlock_write(vma
);
6994 mmu_notifier_invalidate_range_end(&range
);
6996 return pages
> 0 ? (pages
<< h
->order
) : pages
;
6999 /* Return true if reservation was successful, false otherwise. */
7000 bool hugetlb_reserve_pages(struct inode
*inode
,
7002 struct vm_area_struct
*vma
,
7003 vm_flags_t vm_flags
)
7005 long chg
= -1, add
= -1;
7006 struct hstate
*h
= hstate_inode(inode
);
7007 struct hugepage_subpool
*spool
= subpool_inode(inode
);
7008 struct resv_map
*resv_map
;
7009 struct hugetlb_cgroup
*h_cg
= NULL
;
7010 long gbl_reserve
, regions_needed
= 0;
7012 /* This should never happen */
7014 VM_WARN(1, "%s called with a negative range\n", __func__
);
7019 * vma specific semaphore used for pmd sharing and fault/truncation
7022 hugetlb_vma_lock_alloc(vma
);
7025 * Only apply hugepage reservation if asked. At fault time, an
7026 * attempt will be made for VM_NORESERVE to allocate a page
7027 * without using reserves
7029 if (vm_flags
& VM_NORESERVE
)
7033 * Shared mappings base their reservation on the number of pages that
7034 * are already allocated on behalf of the file. Private mappings need
7035 * to reserve the full area even if read-only as mprotect() may be
7036 * called to make the mapping read-write. Assume !vma is a shm mapping
7038 if (!vma
|| vma
->vm_flags
& VM_MAYSHARE
) {
7040 * resv_map can not be NULL as hugetlb_reserve_pages is only
7041 * called for inodes for which resv_maps were created (see
7042 * hugetlbfs_get_inode).
7044 resv_map
= inode_resv_map(inode
);
7046 chg
= region_chg(resv_map
, from
, to
, ®ions_needed
);
7048 /* Private mapping. */
7049 resv_map
= resv_map_alloc();
7055 set_vma_resv_map(vma
, resv_map
);
7056 set_vma_resv_flags(vma
, HPAGE_RESV_OWNER
);
7062 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h
),
7063 chg
* pages_per_huge_page(h
), &h_cg
) < 0)
7066 if (vma
&& !(vma
->vm_flags
& VM_MAYSHARE
) && h_cg
) {
7067 /* For private mappings, the hugetlb_cgroup uncharge info hangs
7070 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map
, h_cg
, h
);
7074 * There must be enough pages in the subpool for the mapping. If
7075 * the subpool has a minimum size, there may be some global
7076 * reservations already in place (gbl_reserve).
7078 gbl_reserve
= hugepage_subpool_get_pages(spool
, chg
);
7079 if (gbl_reserve
< 0)
7080 goto out_uncharge_cgroup
;
7083 * Check enough hugepages are available for the reservation.
7084 * Hand the pages back to the subpool if there are not
7086 if (hugetlb_acct_memory(h
, gbl_reserve
) < 0)
7090 * Account for the reservations made. Shared mappings record regions
7091 * that have reservations as they are shared by multiple VMAs.
7092 * When the last VMA disappears, the region map says how much
7093 * the reservation was and the page cache tells how much of
7094 * the reservation was consumed. Private mappings are per-VMA and
7095 * only the consumed reservations are tracked. When the VMA
7096 * disappears, the original reservation is the VMA size and the
7097 * consumed reservations are stored in the map. Hence, nothing
7098 * else has to be done for private mappings here
7100 if (!vma
|| vma
->vm_flags
& VM_MAYSHARE
) {
7101 add
= region_add(resv_map
, from
, to
, regions_needed
, h
, h_cg
);
7103 if (unlikely(add
< 0)) {
7104 hugetlb_acct_memory(h
, -gbl_reserve
);
7106 } else if (unlikely(chg
> add
)) {
7108 * pages in this range were added to the reserve
7109 * map between region_chg and region_add. This
7110 * indicates a race with alloc_hugetlb_folio. Adjust
7111 * the subpool and reserve counts modified above
7112 * based on the difference.
7117 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
7118 * reference to h_cg->css. See comment below for detail.
7120 hugetlb_cgroup_uncharge_cgroup_rsvd(
7122 (chg
- add
) * pages_per_huge_page(h
), h_cg
);
7124 rsv_adjust
= hugepage_subpool_put_pages(spool
,
7126 hugetlb_acct_memory(h
, -rsv_adjust
);
7129 * The file_regions will hold their own reference to
7130 * h_cg->css. So we should release the reference held
7131 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
7134 hugetlb_cgroup_put_rsvd_cgroup(h_cg
);
7140 /* put back original number of pages, chg */
7141 (void)hugepage_subpool_put_pages(spool
, chg
);
7142 out_uncharge_cgroup
:
7143 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h
),
7144 chg
* pages_per_huge_page(h
), h_cg
);
7146 hugetlb_vma_lock_free(vma
);
7147 if (!vma
|| vma
->vm_flags
& VM_MAYSHARE
)
7148 /* Only call region_abort if the region_chg succeeded but the
7149 * region_add failed or didn't run.
7151 if (chg
>= 0 && add
< 0)
7152 region_abort(resv_map
, from
, to
, regions_needed
);
7153 if (vma
&& is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
7154 kref_put(&resv_map
->refs
, resv_map_release
);
7155 set_vma_resv_map(vma
, NULL
);
7160 long hugetlb_unreserve_pages(struct inode
*inode
, long start
, long end
,
7163 struct hstate
*h
= hstate_inode(inode
);
7164 struct resv_map
*resv_map
= inode_resv_map(inode
);
7166 struct hugepage_subpool
*spool
= subpool_inode(inode
);
7170 * Since this routine can be called in the evict inode path for all
7171 * hugetlbfs inodes, resv_map could be NULL.
7174 chg
= region_del(resv_map
, start
, end
);
7176 * region_del() can fail in the rare case where a region
7177 * must be split and another region descriptor can not be
7178 * allocated. If end == LONG_MAX, it will not fail.
7184 spin_lock(&inode
->i_lock
);
7185 inode
->i_blocks
-= (blocks_per_huge_page(h
) * freed
);
7186 spin_unlock(&inode
->i_lock
);
7189 * If the subpool has a minimum size, the number of global
7190 * reservations to be released may be adjusted.
7192 * Note that !resv_map implies freed == 0. So (chg - freed)
7193 * won't go negative.
7195 gbl_reserve
= hugepage_subpool_put_pages(spool
, (chg
- freed
));
7196 hugetlb_acct_memory(h
, -gbl_reserve
);
7201 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7202 static unsigned long page_table_shareable(struct vm_area_struct
*svma
,
7203 struct vm_area_struct
*vma
,
7204 unsigned long addr
, pgoff_t idx
)
7206 unsigned long saddr
= ((idx
- svma
->vm_pgoff
) << PAGE_SHIFT
) +
7208 unsigned long sbase
= saddr
& PUD_MASK
;
7209 unsigned long s_end
= sbase
+ PUD_SIZE
;
7211 /* Allow segments to share if only one is marked locked */
7212 unsigned long vm_flags
= vma
->vm_flags
& ~VM_LOCKED_MASK
;
7213 unsigned long svm_flags
= svma
->vm_flags
& ~VM_LOCKED_MASK
;
7216 * match the virtual addresses, permission and the alignment of the
7219 * Also, vma_lock (vm_private_data) is required for sharing.
7221 if (pmd_index(addr
) != pmd_index(saddr
) ||
7222 vm_flags
!= svm_flags
||
7223 !range_in_vma(svma
, sbase
, s_end
) ||
7224 !svma
->vm_private_data
)
7230 bool want_pmd_share(struct vm_area_struct
*vma
, unsigned long addr
)
7232 unsigned long start
= addr
& PUD_MASK
;
7233 unsigned long end
= start
+ PUD_SIZE
;
7235 #ifdef CONFIG_USERFAULTFD
7236 if (uffd_disable_huge_pmd_share(vma
))
7240 * check on proper vm_flags and page table alignment
7242 if (!(vma
->vm_flags
& VM_MAYSHARE
))
7244 if (!vma
->vm_private_data
) /* vma lock required for sharing */
7246 if (!range_in_vma(vma
, start
, end
))
7252 * Determine if start,end range within vma could be mapped by shared pmd.
7253 * If yes, adjust start and end to cover range associated with possible
7254 * shared pmd mappings.
7256 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct
*vma
,
7257 unsigned long *start
, unsigned long *end
)
7259 unsigned long v_start
= ALIGN(vma
->vm_start
, PUD_SIZE
),
7260 v_end
= ALIGN_DOWN(vma
->vm_end
, PUD_SIZE
);
7263 * vma needs to span at least one aligned PUD size, and the range
7264 * must be at least partially within in.
7266 if (!(vma
->vm_flags
& VM_MAYSHARE
) || !(v_end
> v_start
) ||
7267 (*end
<= v_start
) || (*start
>= v_end
))
7270 /* Extend the range to be PUD aligned for a worst case scenario */
7271 if (*start
> v_start
)
7272 *start
= ALIGN_DOWN(*start
, PUD_SIZE
);
7275 *end
= ALIGN(*end
, PUD_SIZE
);
7279 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
7280 * and returns the corresponding pte. While this is not necessary for the
7281 * !shared pmd case because we can allocate the pmd later as well, it makes the
7282 * code much cleaner. pmd allocation is essential for the shared case because
7283 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
7284 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
7285 * bad pmd for sharing.
7287 pte_t
*huge_pmd_share(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
7288 unsigned long addr
, pud_t
*pud
)
7290 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
7291 pgoff_t idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) +
7293 struct vm_area_struct
*svma
;
7294 unsigned long saddr
;
7298 i_mmap_lock_read(mapping
);
7299 vma_interval_tree_foreach(svma
, &mapping
->i_mmap
, idx
, idx
) {
7303 saddr
= page_table_shareable(svma
, vma
, addr
, idx
);
7305 spte
= hugetlb_walk(svma
, saddr
,
7306 vma_mmu_pagesize(svma
));
7308 get_page(virt_to_page(spte
));
7317 spin_lock(&mm
->page_table_lock
);
7318 if (pud_none(*pud
)) {
7319 pud_populate(mm
, pud
,
7320 (pmd_t
*)((unsigned long)spte
& PAGE_MASK
));
7323 put_page(virt_to_page(spte
));
7325 spin_unlock(&mm
->page_table_lock
);
7327 pte
= (pte_t
*)pmd_alloc(mm
, pud
, addr
);
7328 i_mmap_unlock_read(mapping
);
7333 * unmap huge page backed by shared pte.
7335 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
7336 * indicated by page_count > 1, unmap is achieved by clearing pud and
7337 * decrementing the ref count. If count == 1, the pte page is not shared.
7339 * Called with page table lock held.
7341 * returns: 1 successfully unmapped a shared pte page
7342 * 0 the underlying pte page is not shared, or it is the last user
7344 int huge_pmd_unshare(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
7345 unsigned long addr
, pte_t
*ptep
)
7347 pgd_t
*pgd
= pgd_offset(mm
, addr
);
7348 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
7349 pud_t
*pud
= pud_offset(p4d
, addr
);
7351 i_mmap_assert_write_locked(vma
->vm_file
->f_mapping
);
7352 hugetlb_vma_assert_locked(vma
);
7353 BUG_ON(page_count(virt_to_page(ptep
)) == 0);
7354 if (page_count(virt_to_page(ptep
)) == 1)
7358 put_page(virt_to_page(ptep
));
7363 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7365 pte_t
*huge_pmd_share(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
7366 unsigned long addr
, pud_t
*pud
)
7371 int huge_pmd_unshare(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
7372 unsigned long addr
, pte_t
*ptep
)
7377 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct
*vma
,
7378 unsigned long *start
, unsigned long *end
)
7382 bool want_pmd_share(struct vm_area_struct
*vma
, unsigned long addr
)
7386 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7388 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7389 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
7390 unsigned long addr
, unsigned long sz
)
7397 pgd
= pgd_offset(mm
, addr
);
7398 p4d
= p4d_alloc(mm
, pgd
, addr
);
7401 pud
= pud_alloc(mm
, p4d
, addr
);
7403 if (sz
== PUD_SIZE
) {
7406 BUG_ON(sz
!= PMD_SIZE
);
7407 if (want_pmd_share(vma
, addr
) && pud_none(*pud
))
7408 pte
= huge_pmd_share(mm
, vma
, addr
, pud
);
7410 pte
= (pte_t
*)pmd_alloc(mm
, pud
, addr
);
7415 pte_t pteval
= ptep_get_lockless(pte
);
7417 BUG_ON(pte_present(pteval
) && !pte_huge(pteval
));
7424 * huge_pte_offset() - Walk the page table to resolve the hugepage
7425 * entry at address @addr
7427 * Return: Pointer to page table entry (PUD or PMD) for
7428 * address @addr, or NULL if a !p*d_present() entry is encountered and the
7429 * size @sz doesn't match the hugepage size at this level of the page
7432 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
7433 unsigned long addr
, unsigned long sz
)
7440 pgd
= pgd_offset(mm
, addr
);
7441 if (!pgd_present(*pgd
))
7443 p4d
= p4d_offset(pgd
, addr
);
7444 if (!p4d_present(*p4d
))
7447 pud
= pud_offset(p4d
, addr
);
7449 /* must be pud huge, non-present or none */
7450 return (pte_t
*)pud
;
7451 if (!pud_present(*pud
))
7453 /* must have a valid entry and size to go further */
7455 pmd
= pmd_offset(pud
, addr
);
7456 /* must be pmd huge, non-present or none */
7457 return (pte_t
*)pmd
;
7461 * Return a mask that can be used to update an address to the last huge
7462 * page in a page table page mapping size. Used to skip non-present
7463 * page table entries when linearly scanning address ranges. Architectures
7464 * with unique huge page to page table relationships can define their own
7465 * version of this routine.
7467 unsigned long hugetlb_mask_last_page(struct hstate
*h
)
7469 unsigned long hp_size
= huge_page_size(h
);
7471 if (hp_size
== PUD_SIZE
)
7472 return P4D_SIZE
- PUD_SIZE
;
7473 else if (hp_size
== PMD_SIZE
)
7474 return PUD_SIZE
- PMD_SIZE
;
7481 /* See description above. Architectures can provide their own version. */
7482 __weak
unsigned long hugetlb_mask_last_page(struct hstate
*h
)
7484 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7485 if (huge_page_size(h
) == PMD_SIZE
)
7486 return PUD_SIZE
- PMD_SIZE
;
7491 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7494 * These functions are overwritable if your architecture needs its own
7497 bool isolate_hugetlb(struct folio
*folio
, struct list_head
*list
)
7501 spin_lock_irq(&hugetlb_lock
);
7502 if (!folio_test_hugetlb(folio
) ||
7503 !folio_test_hugetlb_migratable(folio
) ||
7504 !folio_try_get(folio
)) {
7508 folio_clear_hugetlb_migratable(folio
);
7509 list_move_tail(&folio
->lru
, list
);
7511 spin_unlock_irq(&hugetlb_lock
);
7515 int get_hwpoison_hugetlb_folio(struct folio
*folio
, bool *hugetlb
, bool unpoison
)
7520 spin_lock_irq(&hugetlb_lock
);
7521 if (folio_test_hugetlb(folio
)) {
7523 if (folio_test_hugetlb_freed(folio
))
7525 else if (folio_test_hugetlb_migratable(folio
) || unpoison
)
7526 ret
= folio_try_get(folio
);
7530 spin_unlock_irq(&hugetlb_lock
);
7534 int get_huge_page_for_hwpoison(unsigned long pfn
, int flags
,
7535 bool *migratable_cleared
)
7539 spin_lock_irq(&hugetlb_lock
);
7540 ret
= __get_huge_page_for_hwpoison(pfn
, flags
, migratable_cleared
);
7541 spin_unlock_irq(&hugetlb_lock
);
7545 void folio_putback_active_hugetlb(struct folio
*folio
)
7547 spin_lock_irq(&hugetlb_lock
);
7548 folio_set_hugetlb_migratable(folio
);
7549 list_move_tail(&folio
->lru
, &(folio_hstate(folio
))->hugepage_activelist
);
7550 spin_unlock_irq(&hugetlb_lock
);
7554 void move_hugetlb_state(struct folio
*old_folio
, struct folio
*new_folio
, int reason
)
7556 struct hstate
*h
= folio_hstate(old_folio
);
7558 hugetlb_cgroup_migrate(old_folio
, new_folio
);
7559 set_page_owner_migrate_reason(&new_folio
->page
, reason
);
7562 * transfer temporary state of the new hugetlb folio. This is
7563 * reverse to other transitions because the newpage is going to
7564 * be final while the old one will be freed so it takes over
7565 * the temporary status.
7567 * Also note that we have to transfer the per-node surplus state
7568 * here as well otherwise the global surplus count will not match
7571 if (folio_test_hugetlb_temporary(new_folio
)) {
7572 int old_nid
= folio_nid(old_folio
);
7573 int new_nid
= folio_nid(new_folio
);
7575 folio_set_hugetlb_temporary(old_folio
);
7576 folio_clear_hugetlb_temporary(new_folio
);
7580 * There is no need to transfer the per-node surplus state
7581 * when we do not cross the node.
7583 if (new_nid
== old_nid
)
7585 spin_lock_irq(&hugetlb_lock
);
7586 if (h
->surplus_huge_pages_node
[old_nid
]) {
7587 h
->surplus_huge_pages_node
[old_nid
]--;
7588 h
->surplus_huge_pages_node
[new_nid
]++;
7590 spin_unlock_irq(&hugetlb_lock
);
7594 static void hugetlb_unshare_pmds(struct vm_area_struct
*vma
,
7595 unsigned long start
,
7598 struct hstate
*h
= hstate_vma(vma
);
7599 unsigned long sz
= huge_page_size(h
);
7600 struct mm_struct
*mm
= vma
->vm_mm
;
7601 struct mmu_notifier_range range
;
7602 unsigned long address
;
7606 if (!(vma
->vm_flags
& VM_MAYSHARE
))
7612 flush_cache_range(vma
, start
, end
);
7614 * No need to call adjust_range_if_pmd_sharing_possible(), because
7615 * we have already done the PUD_SIZE alignment.
7617 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
,
7619 mmu_notifier_invalidate_range_start(&range
);
7620 hugetlb_vma_lock_write(vma
);
7621 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
7622 for (address
= start
; address
< end
; address
+= PUD_SIZE
) {
7623 ptep
= hugetlb_walk(vma
, address
, sz
);
7626 ptl
= huge_pte_lock(h
, mm
, ptep
);
7627 huge_pmd_unshare(mm
, vma
, address
, ptep
);
7630 flush_hugetlb_tlb_range(vma
, start
, end
);
7631 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
7632 hugetlb_vma_unlock_write(vma
);
7634 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7635 * Documentation/mm/mmu_notifier.rst.
7637 mmu_notifier_invalidate_range_end(&range
);
7641 * This function will unconditionally remove all the shared pmd pgtable entries
7642 * within the specific vma for a hugetlbfs memory range.
7644 void hugetlb_unshare_all_pmds(struct vm_area_struct
*vma
)
7646 hugetlb_unshare_pmds(vma
, ALIGN(vma
->vm_start
, PUD_SIZE
),
7647 ALIGN_DOWN(vma
->vm_end
, PUD_SIZE
));
7651 static bool cma_reserve_called __initdata
;
7653 static int __init
cmdline_parse_hugetlb_cma(char *p
)
7660 if (sscanf(s
, "%lu%n", &tmp
, &count
) != 1)
7663 if (s
[count
] == ':') {
7664 if (tmp
>= MAX_NUMNODES
)
7666 nid
= array_index_nospec(tmp
, MAX_NUMNODES
);
7669 tmp
= memparse(s
, &s
);
7670 hugetlb_cma_size_in_node
[nid
] = tmp
;
7671 hugetlb_cma_size
+= tmp
;
7674 * Skip the separator if have one, otherwise
7675 * break the parsing.
7682 hugetlb_cma_size
= memparse(p
, &p
);
7690 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma
);
7692 void __init
hugetlb_cma_reserve(int order
)
7694 unsigned long size
, reserved
, per_node
;
7695 bool node_specific_cma_alloc
= false;
7698 cma_reserve_called
= true;
7700 if (!hugetlb_cma_size
)
7703 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++) {
7704 if (hugetlb_cma_size_in_node
[nid
] == 0)
7707 if (!node_online(nid
)) {
7708 pr_warn("hugetlb_cma: invalid node %d specified\n", nid
);
7709 hugetlb_cma_size
-= hugetlb_cma_size_in_node
[nid
];
7710 hugetlb_cma_size_in_node
[nid
] = 0;
7714 if (hugetlb_cma_size_in_node
[nid
] < (PAGE_SIZE
<< order
)) {
7715 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7716 nid
, (PAGE_SIZE
<< order
) / SZ_1M
);
7717 hugetlb_cma_size
-= hugetlb_cma_size_in_node
[nid
];
7718 hugetlb_cma_size_in_node
[nid
] = 0;
7720 node_specific_cma_alloc
= true;
7724 /* Validate the CMA size again in case some invalid nodes specified. */
7725 if (!hugetlb_cma_size
)
7728 if (hugetlb_cma_size
< (PAGE_SIZE
<< order
)) {
7729 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7730 (PAGE_SIZE
<< order
) / SZ_1M
);
7731 hugetlb_cma_size
= 0;
7735 if (!node_specific_cma_alloc
) {
7737 * If 3 GB area is requested on a machine with 4 numa nodes,
7738 * let's allocate 1 GB on first three nodes and ignore the last one.
7740 per_node
= DIV_ROUND_UP(hugetlb_cma_size
, nr_online_nodes
);
7741 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7742 hugetlb_cma_size
/ SZ_1M
, per_node
/ SZ_1M
);
7746 for_each_online_node(nid
) {
7748 char name
[CMA_MAX_NAME
];
7750 if (node_specific_cma_alloc
) {
7751 if (hugetlb_cma_size_in_node
[nid
] == 0)
7754 size
= hugetlb_cma_size_in_node
[nid
];
7756 size
= min(per_node
, hugetlb_cma_size
- reserved
);
7759 size
= round_up(size
, PAGE_SIZE
<< order
);
7761 snprintf(name
, sizeof(name
), "hugetlb%d", nid
);
7763 * Note that 'order per bit' is based on smallest size that
7764 * may be returned to CMA allocator in the case of
7765 * huge page demotion.
7767 res
= cma_declare_contiguous_nid(0, size
, 0,
7768 PAGE_SIZE
<< HUGETLB_PAGE_ORDER
,
7770 &hugetlb_cma
[nid
], nid
);
7772 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7778 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7781 if (reserved
>= hugetlb_cma_size
)
7787 * hugetlb_cma_size is used to determine if allocations from
7788 * cma are possible. Set to zero if no cma regions are set up.
7790 hugetlb_cma_size
= 0;
7793 static void __init
hugetlb_cma_check(void)
7795 if (!hugetlb_cma_size
|| cma_reserve_called
)
7798 pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7801 #endif /* CONFIG_CMA */