1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
6 #include <linux/list.h>
7 #include <linux/init.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35 #include <linux/delayacct.h>
36 #include <linux/memory.h>
37 #include <linux/mm_inline.h>
40 #include <asm/pgalloc.h>
44 #include <linux/hugetlb.h>
45 #include <linux/hugetlb_cgroup.h>
46 #include <linux/node.h>
47 #include <linux/page_owner.h>
49 #include "hugetlb_vmemmap.h"
51 int hugetlb_max_hstate __read_mostly
;
52 unsigned int default_hstate_idx
;
53 struct hstate hstates
[HUGE_MAX_HSTATE
];
56 static struct cma
*hugetlb_cma
[MAX_NUMNODES
];
57 static unsigned long hugetlb_cma_size_in_node
[MAX_NUMNODES
] __initdata
;
58 static bool hugetlb_cma_folio(struct folio
*folio
, unsigned int order
)
60 return cma_pages_valid(hugetlb_cma
[folio_nid(folio
)], &folio
->page
,
64 static bool hugetlb_cma_folio(struct folio
*folio
, unsigned int order
)
69 static unsigned long hugetlb_cma_size __initdata
;
71 __initdata
LIST_HEAD(huge_boot_pages
);
73 /* for command line parsing */
74 static struct hstate
* __initdata parsed_hstate
;
75 static unsigned long __initdata default_hstate_max_huge_pages
;
76 static bool __initdata parsed_valid_hugepagesz
= true;
77 static bool __initdata parsed_default_hugepagesz
;
78 static unsigned int default_hugepages_in_node
[MAX_NUMNODES
] __initdata
;
81 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
82 * free_huge_pages, and surplus_huge_pages.
84 DEFINE_SPINLOCK(hugetlb_lock
);
87 * Serializes faults on the same logical page. This is used to
88 * prevent spurious OOMs when the hugepage pool is fully utilized.
90 static int num_fault_mutexes
;
91 struct mutex
*hugetlb_fault_mutex_table ____cacheline_aligned_in_smp
;
93 /* Forward declaration */
94 static int hugetlb_acct_memory(struct hstate
*h
, long delta
);
95 static void hugetlb_vma_lock_free(struct vm_area_struct
*vma
);
96 static void hugetlb_vma_lock_alloc(struct vm_area_struct
*vma
);
97 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct
*vma
);
98 static void hugetlb_unshare_pmds(struct vm_area_struct
*vma
,
99 unsigned long start
, unsigned long end
);
101 static inline bool subpool_is_free(struct hugepage_subpool
*spool
)
105 if (spool
->max_hpages
!= -1)
106 return spool
->used_hpages
== 0;
107 if (spool
->min_hpages
!= -1)
108 return spool
->rsv_hpages
== spool
->min_hpages
;
113 static inline void unlock_or_release_subpool(struct hugepage_subpool
*spool
,
114 unsigned long irq_flags
)
116 spin_unlock_irqrestore(&spool
->lock
, irq_flags
);
118 /* If no pages are used, and no other handles to the subpool
119 * remain, give up any reservations based on minimum size and
120 * free the subpool */
121 if (subpool_is_free(spool
)) {
122 if (spool
->min_hpages
!= -1)
123 hugetlb_acct_memory(spool
->hstate
,
129 struct hugepage_subpool
*hugepage_new_subpool(struct hstate
*h
, long max_hpages
,
132 struct hugepage_subpool
*spool
;
134 spool
= kzalloc(sizeof(*spool
), GFP_KERNEL
);
138 spin_lock_init(&spool
->lock
);
140 spool
->max_hpages
= max_hpages
;
142 spool
->min_hpages
= min_hpages
;
144 if (min_hpages
!= -1 && hugetlb_acct_memory(h
, min_hpages
)) {
148 spool
->rsv_hpages
= min_hpages
;
153 void hugepage_put_subpool(struct hugepage_subpool
*spool
)
157 spin_lock_irqsave(&spool
->lock
, flags
);
158 BUG_ON(!spool
->count
);
160 unlock_or_release_subpool(spool
, flags
);
164 * Subpool accounting for allocating and reserving pages.
165 * Return -ENOMEM if there are not enough resources to satisfy the
166 * request. Otherwise, return the number of pages by which the
167 * global pools must be adjusted (upward). The returned value may
168 * only be different than the passed value (delta) in the case where
169 * a subpool minimum size must be maintained.
171 static long hugepage_subpool_get_pages(struct hugepage_subpool
*spool
,
179 spin_lock_irq(&spool
->lock
);
181 if (spool
->max_hpages
!= -1) { /* maximum size accounting */
182 if ((spool
->used_hpages
+ delta
) <= spool
->max_hpages
)
183 spool
->used_hpages
+= delta
;
190 /* minimum size accounting */
191 if (spool
->min_hpages
!= -1 && spool
->rsv_hpages
) {
192 if (delta
> spool
->rsv_hpages
) {
194 * Asking for more reserves than those already taken on
195 * behalf of subpool. Return difference.
197 ret
= delta
- spool
->rsv_hpages
;
198 spool
->rsv_hpages
= 0;
200 ret
= 0; /* reserves already accounted for */
201 spool
->rsv_hpages
-= delta
;
206 spin_unlock_irq(&spool
->lock
);
211 * Subpool accounting for freeing and unreserving pages.
212 * Return the number of global page reservations that must be dropped.
213 * The return value may only be different than the passed value (delta)
214 * in the case where a subpool minimum size must be maintained.
216 static long hugepage_subpool_put_pages(struct hugepage_subpool
*spool
,
225 spin_lock_irqsave(&spool
->lock
, flags
);
227 if (spool
->max_hpages
!= -1) /* maximum size accounting */
228 spool
->used_hpages
-= delta
;
230 /* minimum size accounting */
231 if (spool
->min_hpages
!= -1 && spool
->used_hpages
< spool
->min_hpages
) {
232 if (spool
->rsv_hpages
+ delta
<= spool
->min_hpages
)
235 ret
= spool
->rsv_hpages
+ delta
- spool
->min_hpages
;
237 spool
->rsv_hpages
+= delta
;
238 if (spool
->rsv_hpages
> spool
->min_hpages
)
239 spool
->rsv_hpages
= spool
->min_hpages
;
243 * If hugetlbfs_put_super couldn't free spool due to an outstanding
244 * quota reference, free it now.
246 unlock_or_release_subpool(spool
, flags
);
251 static inline struct hugepage_subpool
*subpool_inode(struct inode
*inode
)
253 return HUGETLBFS_SB(inode
->i_sb
)->spool
;
256 static inline struct hugepage_subpool
*subpool_vma(struct vm_area_struct
*vma
)
258 return subpool_inode(file_inode(vma
->vm_file
));
262 * hugetlb vma_lock helper routines
264 void hugetlb_vma_lock_read(struct vm_area_struct
*vma
)
266 if (__vma_shareable_lock(vma
)) {
267 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
269 down_read(&vma_lock
->rw_sema
);
273 void hugetlb_vma_unlock_read(struct vm_area_struct
*vma
)
275 if (__vma_shareable_lock(vma
)) {
276 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
278 up_read(&vma_lock
->rw_sema
);
282 void hugetlb_vma_lock_write(struct vm_area_struct
*vma
)
284 if (__vma_shareable_lock(vma
)) {
285 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
287 down_write(&vma_lock
->rw_sema
);
291 void hugetlb_vma_unlock_write(struct vm_area_struct
*vma
)
293 if (__vma_shareable_lock(vma
)) {
294 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
296 up_write(&vma_lock
->rw_sema
);
300 int hugetlb_vma_trylock_write(struct vm_area_struct
*vma
)
302 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
304 if (!__vma_shareable_lock(vma
))
307 return down_write_trylock(&vma_lock
->rw_sema
);
310 void hugetlb_vma_assert_locked(struct vm_area_struct
*vma
)
312 if (__vma_shareable_lock(vma
)) {
313 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
315 lockdep_assert_held(&vma_lock
->rw_sema
);
319 void hugetlb_vma_lock_release(struct kref
*kref
)
321 struct hugetlb_vma_lock
*vma_lock
= container_of(kref
,
322 struct hugetlb_vma_lock
, refs
);
327 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock
*vma_lock
)
329 struct vm_area_struct
*vma
= vma_lock
->vma
;
332 * vma_lock structure may or not be released as a result of put,
333 * it certainly will no longer be attached to vma so clear pointer.
334 * Semaphore synchronizes access to vma_lock->vma field.
336 vma_lock
->vma
= NULL
;
337 vma
->vm_private_data
= NULL
;
338 up_write(&vma_lock
->rw_sema
);
339 kref_put(&vma_lock
->refs
, hugetlb_vma_lock_release
);
342 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct
*vma
)
344 if (__vma_shareable_lock(vma
)) {
345 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
347 __hugetlb_vma_unlock_write_put(vma_lock
);
351 static void hugetlb_vma_lock_free(struct vm_area_struct
*vma
)
354 * Only present in sharable vmas.
356 if (!vma
|| !__vma_shareable_lock(vma
))
359 if (vma
->vm_private_data
) {
360 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
362 down_write(&vma_lock
->rw_sema
);
363 __hugetlb_vma_unlock_write_put(vma_lock
);
367 static void hugetlb_vma_lock_alloc(struct vm_area_struct
*vma
)
369 struct hugetlb_vma_lock
*vma_lock
;
371 /* Only establish in (flags) sharable vmas */
372 if (!vma
|| !(vma
->vm_flags
& VM_MAYSHARE
))
375 /* Should never get here with non-NULL vm_private_data */
376 if (vma
->vm_private_data
)
379 vma_lock
= kmalloc(sizeof(*vma_lock
), GFP_KERNEL
);
382 * If we can not allocate structure, then vma can not
383 * participate in pmd sharing. This is only a possible
384 * performance enhancement and memory saving issue.
385 * However, the lock is also used to synchronize page
386 * faults with truncation. If the lock is not present,
387 * unlikely races could leave pages in a file past i_size
388 * until the file is removed. Warn in the unlikely case of
389 * allocation failure.
391 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
395 kref_init(&vma_lock
->refs
);
396 init_rwsem(&vma_lock
->rw_sema
);
398 vma
->vm_private_data
= vma_lock
;
401 /* Helper that removes a struct file_region from the resv_map cache and returns
404 static struct file_region
*
405 get_file_region_entry_from_cache(struct resv_map
*resv
, long from
, long to
)
407 struct file_region
*nrg
;
409 VM_BUG_ON(resv
->region_cache_count
<= 0);
411 resv
->region_cache_count
--;
412 nrg
= list_first_entry(&resv
->region_cache
, struct file_region
, link
);
413 list_del(&nrg
->link
);
421 static void copy_hugetlb_cgroup_uncharge_info(struct file_region
*nrg
,
422 struct file_region
*rg
)
424 #ifdef CONFIG_CGROUP_HUGETLB
425 nrg
->reservation_counter
= rg
->reservation_counter
;
432 /* Helper that records hugetlb_cgroup uncharge info. */
433 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup
*h_cg
,
435 struct resv_map
*resv
,
436 struct file_region
*nrg
)
438 #ifdef CONFIG_CGROUP_HUGETLB
440 nrg
->reservation_counter
=
441 &h_cg
->rsvd_hugepage
[hstate_index(h
)];
442 nrg
->css
= &h_cg
->css
;
444 * The caller will hold exactly one h_cg->css reference for the
445 * whole contiguous reservation region. But this area might be
446 * scattered when there are already some file_regions reside in
447 * it. As a result, many file_regions may share only one css
448 * reference. In order to ensure that one file_region must hold
449 * exactly one h_cg->css reference, we should do css_get for
450 * each file_region and leave the reference held by caller
454 if (!resv
->pages_per_hpage
)
455 resv
->pages_per_hpage
= pages_per_huge_page(h
);
456 /* pages_per_hpage should be the same for all entries in
459 VM_BUG_ON(resv
->pages_per_hpage
!= pages_per_huge_page(h
));
461 nrg
->reservation_counter
= NULL
;
467 static void put_uncharge_info(struct file_region
*rg
)
469 #ifdef CONFIG_CGROUP_HUGETLB
475 static bool has_same_uncharge_info(struct file_region
*rg
,
476 struct file_region
*org
)
478 #ifdef CONFIG_CGROUP_HUGETLB
479 return rg
->reservation_counter
== org
->reservation_counter
&&
487 static void coalesce_file_region(struct resv_map
*resv
, struct file_region
*rg
)
489 struct file_region
*nrg
, *prg
;
491 prg
= list_prev_entry(rg
, link
);
492 if (&prg
->link
!= &resv
->regions
&& prg
->to
== rg
->from
&&
493 has_same_uncharge_info(prg
, rg
)) {
497 put_uncharge_info(rg
);
503 nrg
= list_next_entry(rg
, link
);
504 if (&nrg
->link
!= &resv
->regions
&& nrg
->from
== rg
->to
&&
505 has_same_uncharge_info(nrg
, rg
)) {
506 nrg
->from
= rg
->from
;
509 put_uncharge_info(rg
);
515 hugetlb_resv_map_add(struct resv_map
*map
, struct list_head
*rg
, long from
,
516 long to
, struct hstate
*h
, struct hugetlb_cgroup
*cg
,
517 long *regions_needed
)
519 struct file_region
*nrg
;
521 if (!regions_needed
) {
522 nrg
= get_file_region_entry_from_cache(map
, from
, to
);
523 record_hugetlb_cgroup_uncharge_info(cg
, h
, map
, nrg
);
524 list_add(&nrg
->link
, rg
);
525 coalesce_file_region(map
, nrg
);
527 *regions_needed
+= 1;
533 * Must be called with resv->lock held.
535 * Calling this with regions_needed != NULL will count the number of pages
536 * to be added but will not modify the linked list. And regions_needed will
537 * indicate the number of file_regions needed in the cache to carry out to add
538 * the regions for this range.
540 static long add_reservation_in_range(struct resv_map
*resv
, long f
, long t
,
541 struct hugetlb_cgroup
*h_cg
,
542 struct hstate
*h
, long *regions_needed
)
545 struct list_head
*head
= &resv
->regions
;
546 long last_accounted_offset
= f
;
547 struct file_region
*iter
, *trg
= NULL
;
548 struct list_head
*rg
= NULL
;
553 /* In this loop, we essentially handle an entry for the range
554 * [last_accounted_offset, iter->from), at every iteration, with some
557 list_for_each_entry_safe(iter
, trg
, head
, link
) {
558 /* Skip irrelevant regions that start before our range. */
559 if (iter
->from
< f
) {
560 /* If this region ends after the last accounted offset,
561 * then we need to update last_accounted_offset.
563 if (iter
->to
> last_accounted_offset
)
564 last_accounted_offset
= iter
->to
;
568 /* When we find a region that starts beyond our range, we've
571 if (iter
->from
>= t
) {
572 rg
= iter
->link
.prev
;
576 /* Add an entry for last_accounted_offset -> iter->from, and
577 * update last_accounted_offset.
579 if (iter
->from
> last_accounted_offset
)
580 add
+= hugetlb_resv_map_add(resv
, iter
->link
.prev
,
581 last_accounted_offset
,
585 last_accounted_offset
= iter
->to
;
588 /* Handle the case where our range extends beyond
589 * last_accounted_offset.
593 if (last_accounted_offset
< t
)
594 add
+= hugetlb_resv_map_add(resv
, rg
, last_accounted_offset
,
595 t
, h
, h_cg
, regions_needed
);
600 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
602 static int allocate_file_region_entries(struct resv_map
*resv
,
604 __must_hold(&resv
->lock
)
606 LIST_HEAD(allocated_regions
);
607 int to_allocate
= 0, i
= 0;
608 struct file_region
*trg
= NULL
, *rg
= NULL
;
610 VM_BUG_ON(regions_needed
< 0);
613 * Check for sufficient descriptors in the cache to accommodate
614 * the number of in progress add operations plus regions_needed.
616 * This is a while loop because when we drop the lock, some other call
617 * to region_add or region_del may have consumed some region_entries,
618 * so we keep looping here until we finally have enough entries for
619 * (adds_in_progress + regions_needed).
621 while (resv
->region_cache_count
<
622 (resv
->adds_in_progress
+ regions_needed
)) {
623 to_allocate
= resv
->adds_in_progress
+ regions_needed
-
624 resv
->region_cache_count
;
626 /* At this point, we should have enough entries in the cache
627 * for all the existing adds_in_progress. We should only be
628 * needing to allocate for regions_needed.
630 VM_BUG_ON(resv
->region_cache_count
< resv
->adds_in_progress
);
632 spin_unlock(&resv
->lock
);
633 for (i
= 0; i
< to_allocate
; i
++) {
634 trg
= kmalloc(sizeof(*trg
), GFP_KERNEL
);
637 list_add(&trg
->link
, &allocated_regions
);
640 spin_lock(&resv
->lock
);
642 list_splice(&allocated_regions
, &resv
->region_cache
);
643 resv
->region_cache_count
+= to_allocate
;
649 list_for_each_entry_safe(rg
, trg
, &allocated_regions
, link
) {
657 * Add the huge page range represented by [f, t) to the reserve
658 * map. Regions will be taken from the cache to fill in this range.
659 * Sufficient regions should exist in the cache due to the previous
660 * call to region_chg with the same range, but in some cases the cache will not
661 * have sufficient entries due to races with other code doing region_add or
662 * region_del. The extra needed entries will be allocated.
664 * regions_needed is the out value provided by a previous call to region_chg.
666 * Return the number of new huge pages added to the map. This number is greater
667 * than or equal to zero. If file_region entries needed to be allocated for
668 * this operation and we were not able to allocate, it returns -ENOMEM.
669 * region_add of regions of length 1 never allocate file_regions and cannot
670 * fail; region_chg will always allocate at least 1 entry and a region_add for
671 * 1 page will only require at most 1 entry.
673 static long region_add(struct resv_map
*resv
, long f
, long t
,
674 long in_regions_needed
, struct hstate
*h
,
675 struct hugetlb_cgroup
*h_cg
)
677 long add
= 0, actual_regions_needed
= 0;
679 spin_lock(&resv
->lock
);
682 /* Count how many regions are actually needed to execute this add. */
683 add_reservation_in_range(resv
, f
, t
, NULL
, NULL
,
684 &actual_regions_needed
);
687 * Check for sufficient descriptors in the cache to accommodate
688 * this add operation. Note that actual_regions_needed may be greater
689 * than in_regions_needed, as the resv_map may have been modified since
690 * the region_chg call. In this case, we need to make sure that we
691 * allocate extra entries, such that we have enough for all the
692 * existing adds_in_progress, plus the excess needed for this
695 if (actual_regions_needed
> in_regions_needed
&&
696 resv
->region_cache_count
<
697 resv
->adds_in_progress
+
698 (actual_regions_needed
- in_regions_needed
)) {
699 /* region_add operation of range 1 should never need to
700 * allocate file_region entries.
702 VM_BUG_ON(t
- f
<= 1);
704 if (allocate_file_region_entries(
705 resv
, actual_regions_needed
- in_regions_needed
)) {
712 add
= add_reservation_in_range(resv
, f
, t
, h_cg
, h
, NULL
);
714 resv
->adds_in_progress
-= in_regions_needed
;
716 spin_unlock(&resv
->lock
);
721 * Examine the existing reserve map and determine how many
722 * huge pages in the specified range [f, t) are NOT currently
723 * represented. This routine is called before a subsequent
724 * call to region_add that will actually modify the reserve
725 * map to add the specified range [f, t). region_chg does
726 * not change the number of huge pages represented by the
727 * map. A number of new file_region structures is added to the cache as a
728 * placeholder, for the subsequent region_add call to use. At least 1
729 * file_region structure is added.
731 * out_regions_needed is the number of regions added to the
732 * resv->adds_in_progress. This value needs to be provided to a follow up call
733 * to region_add or region_abort for proper accounting.
735 * Returns the number of huge pages that need to be added to the existing
736 * reservation map for the range [f, t). This number is greater or equal to
737 * zero. -ENOMEM is returned if a new file_region structure or cache entry
738 * is needed and can not be allocated.
740 static long region_chg(struct resv_map
*resv
, long f
, long t
,
741 long *out_regions_needed
)
745 spin_lock(&resv
->lock
);
747 /* Count how many hugepages in this range are NOT represented. */
748 chg
= add_reservation_in_range(resv
, f
, t
, NULL
, NULL
,
751 if (*out_regions_needed
== 0)
752 *out_regions_needed
= 1;
754 if (allocate_file_region_entries(resv
, *out_regions_needed
))
757 resv
->adds_in_progress
+= *out_regions_needed
;
759 spin_unlock(&resv
->lock
);
764 * Abort the in progress add operation. The adds_in_progress field
765 * of the resv_map keeps track of the operations in progress between
766 * calls to region_chg and region_add. Operations are sometimes
767 * aborted after the call to region_chg. In such cases, region_abort
768 * is called to decrement the adds_in_progress counter. regions_needed
769 * is the value returned by the region_chg call, it is used to decrement
770 * the adds_in_progress counter.
772 * NOTE: The range arguments [f, t) are not needed or used in this
773 * routine. They are kept to make reading the calling code easier as
774 * arguments will match the associated region_chg call.
776 static void region_abort(struct resv_map
*resv
, long f
, long t
,
779 spin_lock(&resv
->lock
);
780 VM_BUG_ON(!resv
->region_cache_count
);
781 resv
->adds_in_progress
-= regions_needed
;
782 spin_unlock(&resv
->lock
);
786 * Delete the specified range [f, t) from the reserve map. If the
787 * t parameter is LONG_MAX, this indicates that ALL regions after f
788 * should be deleted. Locate the regions which intersect [f, t)
789 * and either trim, delete or split the existing regions.
791 * Returns the number of huge pages deleted from the reserve map.
792 * In the normal case, the return value is zero or more. In the
793 * case where a region must be split, a new region descriptor must
794 * be allocated. If the allocation fails, -ENOMEM will be returned.
795 * NOTE: If the parameter t == LONG_MAX, then we will never split
796 * a region and possibly return -ENOMEM. Callers specifying
797 * t == LONG_MAX do not need to check for -ENOMEM error.
799 static long region_del(struct resv_map
*resv
, long f
, long t
)
801 struct list_head
*head
= &resv
->regions
;
802 struct file_region
*rg
, *trg
;
803 struct file_region
*nrg
= NULL
;
807 spin_lock(&resv
->lock
);
808 list_for_each_entry_safe(rg
, trg
, head
, link
) {
810 * Skip regions before the range to be deleted. file_region
811 * ranges are normally of the form [from, to). However, there
812 * may be a "placeholder" entry in the map which is of the form
813 * (from, to) with from == to. Check for placeholder entries
814 * at the beginning of the range to be deleted.
816 if (rg
->to
<= f
&& (rg
->to
!= rg
->from
|| rg
->to
!= f
))
822 if (f
> rg
->from
&& t
< rg
->to
) { /* Must split region */
824 * Check for an entry in the cache before dropping
825 * lock and attempting allocation.
828 resv
->region_cache_count
> resv
->adds_in_progress
) {
829 nrg
= list_first_entry(&resv
->region_cache
,
832 list_del(&nrg
->link
);
833 resv
->region_cache_count
--;
837 spin_unlock(&resv
->lock
);
838 nrg
= kmalloc(sizeof(*nrg
), GFP_KERNEL
);
845 hugetlb_cgroup_uncharge_file_region(
846 resv
, rg
, t
- f
, false);
848 /* New entry for end of split region */
852 copy_hugetlb_cgroup_uncharge_info(nrg
, rg
);
854 INIT_LIST_HEAD(&nrg
->link
);
856 /* Original entry is trimmed */
859 list_add(&nrg
->link
, &rg
->link
);
864 if (f
<= rg
->from
&& t
>= rg
->to
) { /* Remove entire region */
865 del
+= rg
->to
- rg
->from
;
866 hugetlb_cgroup_uncharge_file_region(resv
, rg
,
867 rg
->to
- rg
->from
, true);
873 if (f
<= rg
->from
) { /* Trim beginning of region */
874 hugetlb_cgroup_uncharge_file_region(resv
, rg
,
875 t
- rg
->from
, false);
879 } else { /* Trim end of region */
880 hugetlb_cgroup_uncharge_file_region(resv
, rg
,
888 spin_unlock(&resv
->lock
);
894 * A rare out of memory error was encountered which prevented removal of
895 * the reserve map region for a page. The huge page itself was free'ed
896 * and removed from the page cache. This routine will adjust the subpool
897 * usage count, and the global reserve count if needed. By incrementing
898 * these counts, the reserve map entry which could not be deleted will
899 * appear as a "reserved" entry instead of simply dangling with incorrect
902 void hugetlb_fix_reserve_counts(struct inode
*inode
)
904 struct hugepage_subpool
*spool
= subpool_inode(inode
);
906 bool reserved
= false;
908 rsv_adjust
= hugepage_subpool_get_pages(spool
, 1);
909 if (rsv_adjust
> 0) {
910 struct hstate
*h
= hstate_inode(inode
);
912 if (!hugetlb_acct_memory(h
, 1))
914 } else if (!rsv_adjust
) {
919 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
923 * Count and return the number of huge pages in the reserve map
924 * that intersect with the range [f, t).
926 static long region_count(struct resv_map
*resv
, long f
, long t
)
928 struct list_head
*head
= &resv
->regions
;
929 struct file_region
*rg
;
932 spin_lock(&resv
->lock
);
933 /* Locate each segment we overlap with, and count that overlap. */
934 list_for_each_entry(rg
, head
, link
) {
943 seg_from
= max(rg
->from
, f
);
944 seg_to
= min(rg
->to
, t
);
946 chg
+= seg_to
- seg_from
;
948 spin_unlock(&resv
->lock
);
954 * Convert the address within this vma to the page offset within
955 * the mapping, in pagecache page units; huge pages here.
957 static pgoff_t
vma_hugecache_offset(struct hstate
*h
,
958 struct vm_area_struct
*vma
, unsigned long address
)
960 return ((address
- vma
->vm_start
) >> huge_page_shift(h
)) +
961 (vma
->vm_pgoff
>> huge_page_order(h
));
964 pgoff_t
linear_hugepage_index(struct vm_area_struct
*vma
,
965 unsigned long address
)
967 return vma_hugecache_offset(hstate_vma(vma
), vma
, address
);
969 EXPORT_SYMBOL_GPL(linear_hugepage_index
);
972 * vma_kernel_pagesize - Page size granularity for this VMA.
973 * @vma: The user mapping.
975 * Folios in this VMA will be aligned to, and at least the size of the
976 * number of bytes returned by this function.
978 * Return: The default size of the folios allocated when backing a VMA.
980 unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
)
982 if (vma
->vm_ops
&& vma
->vm_ops
->pagesize
)
983 return vma
->vm_ops
->pagesize(vma
);
986 EXPORT_SYMBOL_GPL(vma_kernel_pagesize
);
989 * Return the page size being used by the MMU to back a VMA. In the majority
990 * of cases, the page size used by the kernel matches the MMU size. On
991 * architectures where it differs, an architecture-specific 'strong'
992 * version of this symbol is required.
994 __weak
unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
996 return vma_kernel_pagesize(vma
);
1000 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
1001 * bits of the reservation map pointer, which are always clear due to
1004 #define HPAGE_RESV_OWNER (1UL << 0)
1005 #define HPAGE_RESV_UNMAPPED (1UL << 1)
1006 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1009 * These helpers are used to track how many pages are reserved for
1010 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1011 * is guaranteed to have their future faults succeed.
1013 * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1014 * the reserve counters are updated with the hugetlb_lock held. It is safe
1015 * to reset the VMA at fork() time as it is not in use yet and there is no
1016 * chance of the global counters getting corrupted as a result of the values.
1018 * The private mapping reservation is represented in a subtly different
1019 * manner to a shared mapping. A shared mapping has a region map associated
1020 * with the underlying file, this region map represents the backing file
1021 * pages which have ever had a reservation assigned which this persists even
1022 * after the page is instantiated. A private mapping has a region map
1023 * associated with the original mmap which is attached to all VMAs which
1024 * reference it, this region map represents those offsets which have consumed
1025 * reservation ie. where pages have been instantiated.
1027 static unsigned long get_vma_private_data(struct vm_area_struct
*vma
)
1029 return (unsigned long)vma
->vm_private_data
;
1032 static void set_vma_private_data(struct vm_area_struct
*vma
,
1033 unsigned long value
)
1035 vma
->vm_private_data
= (void *)value
;
1039 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map
*resv_map
,
1040 struct hugetlb_cgroup
*h_cg
,
1043 #ifdef CONFIG_CGROUP_HUGETLB
1045 resv_map
->reservation_counter
= NULL
;
1046 resv_map
->pages_per_hpage
= 0;
1047 resv_map
->css
= NULL
;
1049 resv_map
->reservation_counter
=
1050 &h_cg
->rsvd_hugepage
[hstate_index(h
)];
1051 resv_map
->pages_per_hpage
= pages_per_huge_page(h
);
1052 resv_map
->css
= &h_cg
->css
;
1057 struct resv_map
*resv_map_alloc(void)
1059 struct resv_map
*resv_map
= kmalloc(sizeof(*resv_map
), GFP_KERNEL
);
1060 struct file_region
*rg
= kmalloc(sizeof(*rg
), GFP_KERNEL
);
1062 if (!resv_map
|| !rg
) {
1068 kref_init(&resv_map
->refs
);
1069 spin_lock_init(&resv_map
->lock
);
1070 INIT_LIST_HEAD(&resv_map
->regions
);
1072 resv_map
->adds_in_progress
= 0;
1074 * Initialize these to 0. On shared mappings, 0's here indicate these
1075 * fields don't do cgroup accounting. On private mappings, these will be
1076 * re-initialized to the proper values, to indicate that hugetlb cgroup
1077 * reservations are to be un-charged from here.
1079 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map
, NULL
, NULL
);
1081 INIT_LIST_HEAD(&resv_map
->region_cache
);
1082 list_add(&rg
->link
, &resv_map
->region_cache
);
1083 resv_map
->region_cache_count
= 1;
1088 void resv_map_release(struct kref
*ref
)
1090 struct resv_map
*resv_map
= container_of(ref
, struct resv_map
, refs
);
1091 struct list_head
*head
= &resv_map
->region_cache
;
1092 struct file_region
*rg
, *trg
;
1094 /* Clear out any active regions before we release the map. */
1095 region_del(resv_map
, 0, LONG_MAX
);
1097 /* ... and any entries left in the cache */
1098 list_for_each_entry_safe(rg
, trg
, head
, link
) {
1099 list_del(&rg
->link
);
1103 VM_BUG_ON(resv_map
->adds_in_progress
);
1108 static inline struct resv_map
*inode_resv_map(struct inode
*inode
)
1111 * At inode evict time, i_mapping may not point to the original
1112 * address space within the inode. This original address space
1113 * contains the pointer to the resv_map. So, always use the
1114 * address space embedded within the inode.
1115 * The VERY common case is inode->mapping == &inode->i_data but,
1116 * this may not be true for device special inodes.
1118 return (struct resv_map
*)(&inode
->i_data
)->private_data
;
1121 static struct resv_map
*vma_resv_map(struct vm_area_struct
*vma
)
1123 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1124 if (vma
->vm_flags
& VM_MAYSHARE
) {
1125 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1126 struct inode
*inode
= mapping
->host
;
1128 return inode_resv_map(inode
);
1131 return (struct resv_map
*)(get_vma_private_data(vma
) &
1136 static void set_vma_resv_map(struct vm_area_struct
*vma
, struct resv_map
*map
)
1138 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1139 VM_BUG_ON_VMA(vma
->vm_flags
& VM_MAYSHARE
, vma
);
1141 set_vma_private_data(vma
, (get_vma_private_data(vma
) &
1142 HPAGE_RESV_MASK
) | (unsigned long)map
);
1145 static void set_vma_resv_flags(struct vm_area_struct
*vma
, unsigned long flags
)
1147 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1148 VM_BUG_ON_VMA(vma
->vm_flags
& VM_MAYSHARE
, vma
);
1150 set_vma_private_data(vma
, get_vma_private_data(vma
) | flags
);
1153 static int is_vma_resv_set(struct vm_area_struct
*vma
, unsigned long flag
)
1155 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1157 return (get_vma_private_data(vma
) & flag
) != 0;
1160 void hugetlb_dup_vma_private(struct vm_area_struct
*vma
)
1162 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1164 * Clear vm_private_data
1165 * - For shared mappings this is a per-vma semaphore that may be
1166 * allocated in a subsequent call to hugetlb_vm_op_open.
1167 * Before clearing, make sure pointer is not associated with vma
1168 * as this will leak the structure. This is the case when called
1169 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1170 * been called to allocate a new structure.
1171 * - For MAP_PRIVATE mappings, this is the reserve map which does
1172 * not apply to children. Faults generated by the children are
1173 * not guaranteed to succeed, even if read-only.
1175 if (vma
->vm_flags
& VM_MAYSHARE
) {
1176 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
1178 if (vma_lock
&& vma_lock
->vma
!= vma
)
1179 vma
->vm_private_data
= NULL
;
1181 vma
->vm_private_data
= NULL
;
1185 * Reset and decrement one ref on hugepage private reservation.
1186 * Called with mm->mmap_lock writer semaphore held.
1187 * This function should be only used by move_vma() and operate on
1188 * same sized vma. It should never come here with last ref on the
1191 void clear_vma_resv_huge_pages(struct vm_area_struct
*vma
)
1194 * Clear the old hugetlb private page reservation.
1195 * It has already been transferred to new_vma.
1197 * During a mremap() operation of a hugetlb vma we call move_vma()
1198 * which copies vma into new_vma and unmaps vma. After the copy
1199 * operation both new_vma and vma share a reference to the resv_map
1200 * struct, and at that point vma is about to be unmapped. We don't
1201 * want to return the reservation to the pool at unmap of vma because
1202 * the reservation still lives on in new_vma, so simply decrement the
1203 * ref here and remove the resv_map reference from this vma.
1205 struct resv_map
*reservations
= vma_resv_map(vma
);
1207 if (reservations
&& is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
1208 resv_map_put_hugetlb_cgroup_uncharge_info(reservations
);
1209 kref_put(&reservations
->refs
, resv_map_release
);
1212 hugetlb_dup_vma_private(vma
);
1215 /* Returns true if the VMA has associated reserve pages */
1216 static bool vma_has_reserves(struct vm_area_struct
*vma
, long chg
)
1218 if (vma
->vm_flags
& VM_NORESERVE
) {
1220 * This address is already reserved by other process(chg == 0),
1221 * so, we should decrement reserved count. Without decrementing,
1222 * reserve count remains after releasing inode, because this
1223 * allocated page will go into page cache and is regarded as
1224 * coming from reserved pool in releasing step. Currently, we
1225 * don't have any other solution to deal with this situation
1226 * properly, so add work-around here.
1228 if (vma
->vm_flags
& VM_MAYSHARE
&& chg
== 0)
1234 /* Shared mappings always use reserves */
1235 if (vma
->vm_flags
& VM_MAYSHARE
) {
1237 * We know VM_NORESERVE is not set. Therefore, there SHOULD
1238 * be a region map for all pages. The only situation where
1239 * there is no region map is if a hole was punched via
1240 * fallocate. In this case, there really are no reserves to
1241 * use. This situation is indicated if chg != 0.
1250 * Only the process that called mmap() has reserves for
1253 if (is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
1255 * Like the shared case above, a hole punch or truncate
1256 * could have been performed on the private mapping.
1257 * Examine the value of chg to determine if reserves
1258 * actually exist or were previously consumed.
1259 * Very Subtle - The value of chg comes from a previous
1260 * call to vma_needs_reserves(). The reserve map for
1261 * private mappings has different (opposite) semantics
1262 * than that of shared mappings. vma_needs_reserves()
1263 * has already taken this difference in semantics into
1264 * account. Therefore, the meaning of chg is the same
1265 * as in the shared case above. Code could easily be
1266 * combined, but keeping it separate draws attention to
1267 * subtle differences.
1278 static void enqueue_hugetlb_folio(struct hstate
*h
, struct folio
*folio
)
1280 int nid
= folio_nid(folio
);
1282 lockdep_assert_held(&hugetlb_lock
);
1283 VM_BUG_ON_FOLIO(folio_ref_count(folio
), folio
);
1285 list_move(&folio
->lru
, &h
->hugepage_freelists
[nid
]);
1286 h
->free_huge_pages
++;
1287 h
->free_huge_pages_node
[nid
]++;
1288 folio_set_hugetlb_freed(folio
);
1291 static struct folio
*dequeue_hugetlb_folio_node_exact(struct hstate
*h
,
1294 struct folio
*folio
;
1295 bool pin
= !!(current
->flags
& PF_MEMALLOC_PIN
);
1297 lockdep_assert_held(&hugetlb_lock
);
1298 list_for_each_entry(folio
, &h
->hugepage_freelists
[nid
], lru
) {
1299 if (pin
&& !folio_is_longterm_pinnable(folio
))
1302 if (folio_test_hwpoison(folio
))
1305 list_move(&folio
->lru
, &h
->hugepage_activelist
);
1306 folio_ref_unfreeze(folio
, 1);
1307 folio_clear_hugetlb_freed(folio
);
1308 h
->free_huge_pages
--;
1309 h
->free_huge_pages_node
[nid
]--;
1316 static struct folio
*dequeue_hugetlb_folio_nodemask(struct hstate
*h
, gfp_t gfp_mask
,
1317 int nid
, nodemask_t
*nmask
)
1319 unsigned int cpuset_mems_cookie
;
1320 struct zonelist
*zonelist
;
1323 int node
= NUMA_NO_NODE
;
1325 zonelist
= node_zonelist(nid
, gfp_mask
);
1328 cpuset_mems_cookie
= read_mems_allowed_begin();
1329 for_each_zone_zonelist_nodemask(zone
, z
, zonelist
, gfp_zone(gfp_mask
), nmask
) {
1330 struct folio
*folio
;
1332 if (!cpuset_zone_allowed(zone
, gfp_mask
))
1335 * no need to ask again on the same node. Pool is node rather than
1338 if (zone_to_nid(zone
) == node
)
1340 node
= zone_to_nid(zone
);
1342 folio
= dequeue_hugetlb_folio_node_exact(h
, node
);
1346 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie
)))
1352 static unsigned long available_huge_pages(struct hstate
*h
)
1354 return h
->free_huge_pages
- h
->resv_huge_pages
;
1357 static struct folio
*dequeue_hugetlb_folio_vma(struct hstate
*h
,
1358 struct vm_area_struct
*vma
,
1359 unsigned long address
, int avoid_reserve
,
1362 struct folio
*folio
= NULL
;
1363 struct mempolicy
*mpol
;
1365 nodemask_t
*nodemask
;
1369 * A child process with MAP_PRIVATE mappings created by their parent
1370 * have no page reserves. This check ensures that reservations are
1371 * not "stolen". The child may still get SIGKILLed
1373 if (!vma_has_reserves(vma
, chg
) && !available_huge_pages(h
))
1376 /* If reserves cannot be used, ensure enough pages are in the pool */
1377 if (avoid_reserve
&& !available_huge_pages(h
))
1380 gfp_mask
= htlb_alloc_mask(h
);
1381 nid
= huge_node(vma
, address
, gfp_mask
, &mpol
, &nodemask
);
1383 if (mpol_is_preferred_many(mpol
)) {
1384 folio
= dequeue_hugetlb_folio_nodemask(h
, gfp_mask
,
1387 /* Fallback to all nodes if page==NULL */
1392 folio
= dequeue_hugetlb_folio_nodemask(h
, gfp_mask
,
1395 if (folio
&& !avoid_reserve
&& vma_has_reserves(vma
, chg
)) {
1396 folio_set_hugetlb_restore_reserve(folio
);
1397 h
->resv_huge_pages
--;
1400 mpol_cond_put(mpol
);
1408 * common helper functions for hstate_next_node_to_{alloc|free}.
1409 * We may have allocated or freed a huge page based on a different
1410 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1411 * be outside of *nodes_allowed. Ensure that we use an allowed
1412 * node for alloc or free.
1414 static int next_node_allowed(int nid
, nodemask_t
*nodes_allowed
)
1416 nid
= next_node_in(nid
, *nodes_allowed
);
1417 VM_BUG_ON(nid
>= MAX_NUMNODES
);
1422 static int get_valid_node_allowed(int nid
, nodemask_t
*nodes_allowed
)
1424 if (!node_isset(nid
, *nodes_allowed
))
1425 nid
= next_node_allowed(nid
, nodes_allowed
);
1430 * returns the previously saved node ["this node"] from which to
1431 * allocate a persistent huge page for the pool and advance the
1432 * next node from which to allocate, handling wrap at end of node
1435 static int hstate_next_node_to_alloc(struct hstate
*h
,
1436 nodemask_t
*nodes_allowed
)
1440 VM_BUG_ON(!nodes_allowed
);
1442 nid
= get_valid_node_allowed(h
->next_nid_to_alloc
, nodes_allowed
);
1443 h
->next_nid_to_alloc
= next_node_allowed(nid
, nodes_allowed
);
1449 * helper for remove_pool_huge_page() - return the previously saved
1450 * node ["this node"] from which to free a huge page. Advance the
1451 * next node id whether or not we find a free huge page to free so
1452 * that the next attempt to free addresses the next node.
1454 static int hstate_next_node_to_free(struct hstate
*h
, nodemask_t
*nodes_allowed
)
1458 VM_BUG_ON(!nodes_allowed
);
1460 nid
= get_valid_node_allowed(h
->next_nid_to_free
, nodes_allowed
);
1461 h
->next_nid_to_free
= next_node_allowed(nid
, nodes_allowed
);
1466 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1467 for (nr_nodes = nodes_weight(*mask); \
1469 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1472 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1473 for (nr_nodes = nodes_weight(*mask); \
1475 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1478 /* used to demote non-gigantic_huge pages as well */
1479 static void __destroy_compound_gigantic_folio(struct folio
*folio
,
1480 unsigned int order
, bool demote
)
1483 int nr_pages
= 1 << order
;
1486 atomic_set(&folio
->_entire_mapcount
, 0);
1487 atomic_set(&folio
->_nr_pages_mapped
, 0);
1488 atomic_set(&folio
->_pincount
, 0);
1490 for (i
= 1; i
< nr_pages
; i
++) {
1491 p
= folio_page(folio
, i
);
1492 p
->flags
&= ~PAGE_FLAGS_CHECK_AT_FREE
;
1494 clear_compound_head(p
);
1496 set_page_refcounted(p
);
1499 __folio_clear_head(folio
);
1502 static void destroy_compound_hugetlb_folio_for_demote(struct folio
*folio
,
1505 __destroy_compound_gigantic_folio(folio
, order
, true);
1508 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1509 static void destroy_compound_gigantic_folio(struct folio
*folio
,
1512 __destroy_compound_gigantic_folio(folio
, order
, false);
1515 static void free_gigantic_folio(struct folio
*folio
, unsigned int order
)
1518 * If the page isn't allocated using the cma allocator,
1519 * cma_release() returns false.
1522 int nid
= folio_nid(folio
);
1524 if (cma_release(hugetlb_cma
[nid
], &folio
->page
, 1 << order
))
1528 free_contig_range(folio_pfn(folio
), 1 << order
);
1531 #ifdef CONFIG_CONTIG_ALLOC
1532 static struct folio
*alloc_gigantic_folio(struct hstate
*h
, gfp_t gfp_mask
,
1533 int nid
, nodemask_t
*nodemask
)
1536 unsigned long nr_pages
= pages_per_huge_page(h
);
1537 if (nid
== NUMA_NO_NODE
)
1538 nid
= numa_mem_id();
1544 if (hugetlb_cma
[nid
]) {
1545 page
= cma_alloc(hugetlb_cma
[nid
], nr_pages
,
1546 huge_page_order(h
), true);
1548 return page_folio(page
);
1551 if (!(gfp_mask
& __GFP_THISNODE
)) {
1552 for_each_node_mask(node
, *nodemask
) {
1553 if (node
== nid
|| !hugetlb_cma
[node
])
1556 page
= cma_alloc(hugetlb_cma
[node
], nr_pages
,
1557 huge_page_order(h
), true);
1559 return page_folio(page
);
1565 page
= alloc_contig_pages(nr_pages
, gfp_mask
, nid
, nodemask
);
1566 return page
? page_folio(page
) : NULL
;
1569 #else /* !CONFIG_CONTIG_ALLOC */
1570 static struct folio
*alloc_gigantic_folio(struct hstate
*h
, gfp_t gfp_mask
,
1571 int nid
, nodemask_t
*nodemask
)
1575 #endif /* CONFIG_CONTIG_ALLOC */
1577 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1578 static struct folio
*alloc_gigantic_folio(struct hstate
*h
, gfp_t gfp_mask
,
1579 int nid
, nodemask_t
*nodemask
)
1583 static inline void free_gigantic_folio(struct folio
*folio
,
1584 unsigned int order
) { }
1585 static inline void destroy_compound_gigantic_folio(struct folio
*folio
,
1586 unsigned int order
) { }
1589 static inline void __clear_hugetlb_destructor(struct hstate
*h
,
1590 struct folio
*folio
)
1592 lockdep_assert_held(&hugetlb_lock
);
1594 folio_clear_hugetlb(folio
);
1598 * Remove hugetlb folio from lists.
1599 * If vmemmap exists for the folio, update dtor so that the folio appears
1600 * as just a compound page. Otherwise, wait until after allocating vmemmap
1603 * A reference is held on the folio, except in the case of demote.
1605 * Must be called with hugetlb lock held.
1607 static void __remove_hugetlb_folio(struct hstate
*h
, struct folio
*folio
,
1608 bool adjust_surplus
,
1611 int nid
= folio_nid(folio
);
1613 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio
), folio
);
1614 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio
), folio
);
1616 lockdep_assert_held(&hugetlb_lock
);
1617 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
1620 list_del(&folio
->lru
);
1622 if (folio_test_hugetlb_freed(folio
)) {
1623 h
->free_huge_pages
--;
1624 h
->free_huge_pages_node
[nid
]--;
1626 if (adjust_surplus
) {
1627 h
->surplus_huge_pages
--;
1628 h
->surplus_huge_pages_node
[nid
]--;
1632 * We can only clear the hugetlb destructor after allocating vmemmap
1633 * pages. Otherwise, someone (memory error handling) may try to write
1634 * to tail struct pages.
1636 if (!folio_test_hugetlb_vmemmap_optimized(folio
))
1637 __clear_hugetlb_destructor(h
, folio
);
1640 * In the case of demote we do not ref count the page as it will soon
1641 * be turned into a page of smaller size.
1644 folio_ref_unfreeze(folio
, 1);
1647 h
->nr_huge_pages_node
[nid
]--;
1650 static void remove_hugetlb_folio(struct hstate
*h
, struct folio
*folio
,
1651 bool adjust_surplus
)
1653 __remove_hugetlb_folio(h
, folio
, adjust_surplus
, false);
1656 static void remove_hugetlb_folio_for_demote(struct hstate
*h
, struct folio
*folio
,
1657 bool adjust_surplus
)
1659 __remove_hugetlb_folio(h
, folio
, adjust_surplus
, true);
1662 static void add_hugetlb_folio(struct hstate
*h
, struct folio
*folio
,
1663 bool adjust_surplus
)
1666 int nid
= folio_nid(folio
);
1668 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio
), folio
);
1670 lockdep_assert_held(&hugetlb_lock
);
1672 INIT_LIST_HEAD(&folio
->lru
);
1674 h
->nr_huge_pages_node
[nid
]++;
1676 if (adjust_surplus
) {
1677 h
->surplus_huge_pages
++;
1678 h
->surplus_huge_pages_node
[nid
]++;
1681 folio_set_hugetlb(folio
);
1682 folio_change_private(folio
, NULL
);
1684 * We have to set hugetlb_vmemmap_optimized again as above
1685 * folio_change_private(folio, NULL) cleared it.
1687 folio_set_hugetlb_vmemmap_optimized(folio
);
1690 * This folio is about to be managed by the hugetlb allocator and
1691 * should have no users. Drop our reference, and check for others
1694 zeroed
= folio_put_testzero(folio
);
1695 if (unlikely(!zeroed
))
1697 * It is VERY unlikely soneone else has taken a ref
1698 * on the folio. In this case, we simply return as
1699 * free_huge_folio() will be called when this other ref
1704 arch_clear_hugepage_flags(&folio
->page
);
1705 enqueue_hugetlb_folio(h
, folio
);
1708 static void __update_and_free_hugetlb_folio(struct hstate
*h
,
1709 struct folio
*folio
)
1711 bool clear_dtor
= folio_test_hugetlb_vmemmap_optimized(folio
);
1713 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
1717 * If we don't know which subpages are hwpoisoned, we can't free
1718 * the hugepage, so it's leaked intentionally.
1720 if (folio_test_hugetlb_raw_hwp_unreliable(folio
))
1723 if (hugetlb_vmemmap_restore(h
, &folio
->page
)) {
1724 spin_lock_irq(&hugetlb_lock
);
1726 * If we cannot allocate vmemmap pages, just refuse to free the
1727 * page and put the page back on the hugetlb free list and treat
1728 * as a surplus page.
1730 add_hugetlb_folio(h
, folio
, true);
1731 spin_unlock_irq(&hugetlb_lock
);
1736 * Move PageHWPoison flag from head page to the raw error pages,
1737 * which makes any healthy subpages reusable.
1739 if (unlikely(folio_test_hwpoison(folio
)))
1740 folio_clear_hugetlb_hwpoison(folio
);
1743 * If vmemmap pages were allocated above, then we need to clear the
1744 * hugetlb destructor under the hugetlb lock.
1747 spin_lock_irq(&hugetlb_lock
);
1748 __clear_hugetlb_destructor(h
, folio
);
1749 spin_unlock_irq(&hugetlb_lock
);
1753 * Non-gigantic pages demoted from CMA allocated gigantic pages
1754 * need to be given back to CMA in free_gigantic_folio.
1756 if (hstate_is_gigantic(h
) ||
1757 hugetlb_cma_folio(folio
, huge_page_order(h
))) {
1758 destroy_compound_gigantic_folio(folio
, huge_page_order(h
));
1759 free_gigantic_folio(folio
, huge_page_order(h
));
1761 __free_pages(&folio
->page
, huge_page_order(h
));
1766 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1767 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1768 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1769 * the vmemmap pages.
1771 * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1772 * freed and frees them one-by-one. As the page->mapping pointer is going
1773 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1774 * structure of a lockless linked list of huge pages to be freed.
1776 static LLIST_HEAD(hpage_freelist
);
1778 static void free_hpage_workfn(struct work_struct
*work
)
1780 struct llist_node
*node
;
1782 node
= llist_del_all(&hpage_freelist
);
1788 page
= container_of((struct address_space
**)node
,
1789 struct page
, mapping
);
1791 page
->mapping
= NULL
;
1793 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1794 * folio_hstate() is going to trigger because a previous call to
1795 * remove_hugetlb_folio() will clear the hugetlb bit, so do
1796 * not use folio_hstate() directly.
1798 h
= size_to_hstate(page_size(page
));
1800 __update_and_free_hugetlb_folio(h
, page_folio(page
));
1805 static DECLARE_WORK(free_hpage_work
, free_hpage_workfn
);
1807 static inline void flush_free_hpage_work(struct hstate
*h
)
1809 if (hugetlb_vmemmap_optimizable(h
))
1810 flush_work(&free_hpage_work
);
1813 static void update_and_free_hugetlb_folio(struct hstate
*h
, struct folio
*folio
,
1816 if (!folio_test_hugetlb_vmemmap_optimized(folio
) || !atomic
) {
1817 __update_and_free_hugetlb_folio(h
, folio
);
1822 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1824 * Only call schedule_work() if hpage_freelist is previously
1825 * empty. Otherwise, schedule_work() had been called but the workfn
1826 * hasn't retrieved the list yet.
1828 if (llist_add((struct llist_node
*)&folio
->mapping
, &hpage_freelist
))
1829 schedule_work(&free_hpage_work
);
1832 static void update_and_free_pages_bulk(struct hstate
*h
, struct list_head
*list
)
1834 struct page
*page
, *t_page
;
1835 struct folio
*folio
;
1837 list_for_each_entry_safe(page
, t_page
, list
, lru
) {
1838 folio
= page_folio(page
);
1839 update_and_free_hugetlb_folio(h
, folio
, false);
1844 struct hstate
*size_to_hstate(unsigned long size
)
1848 for_each_hstate(h
) {
1849 if (huge_page_size(h
) == size
)
1855 void free_huge_folio(struct folio
*folio
)
1858 * Can't pass hstate in here because it is called from the
1859 * compound page destructor.
1861 struct hstate
*h
= folio_hstate(folio
);
1862 int nid
= folio_nid(folio
);
1863 struct hugepage_subpool
*spool
= hugetlb_folio_subpool(folio
);
1864 bool restore_reserve
;
1865 unsigned long flags
;
1867 VM_BUG_ON_FOLIO(folio_ref_count(folio
), folio
);
1868 VM_BUG_ON_FOLIO(folio_mapcount(folio
), folio
);
1870 hugetlb_set_folio_subpool(folio
, NULL
);
1871 if (folio_test_anon(folio
))
1872 __ClearPageAnonExclusive(&folio
->page
);
1873 folio
->mapping
= NULL
;
1874 restore_reserve
= folio_test_hugetlb_restore_reserve(folio
);
1875 folio_clear_hugetlb_restore_reserve(folio
);
1878 * If HPageRestoreReserve was set on page, page allocation consumed a
1879 * reservation. If the page was associated with a subpool, there
1880 * would have been a page reserved in the subpool before allocation
1881 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1882 * reservation, do not call hugepage_subpool_put_pages() as this will
1883 * remove the reserved page from the subpool.
1885 if (!restore_reserve
) {
1887 * A return code of zero implies that the subpool will be
1888 * under its minimum size if the reservation is not restored
1889 * after page is free. Therefore, force restore_reserve
1892 if (hugepage_subpool_put_pages(spool
, 1) == 0)
1893 restore_reserve
= true;
1896 spin_lock_irqsave(&hugetlb_lock
, flags
);
1897 folio_clear_hugetlb_migratable(folio
);
1898 hugetlb_cgroup_uncharge_folio(hstate_index(h
),
1899 pages_per_huge_page(h
), folio
);
1900 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h
),
1901 pages_per_huge_page(h
), folio
);
1902 if (restore_reserve
)
1903 h
->resv_huge_pages
++;
1905 if (folio_test_hugetlb_temporary(folio
)) {
1906 remove_hugetlb_folio(h
, folio
, false);
1907 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
1908 update_and_free_hugetlb_folio(h
, folio
, true);
1909 } else if (h
->surplus_huge_pages_node
[nid
]) {
1910 /* remove the page from active list */
1911 remove_hugetlb_folio(h
, folio
, true);
1912 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
1913 update_and_free_hugetlb_folio(h
, folio
, true);
1915 arch_clear_hugepage_flags(&folio
->page
);
1916 enqueue_hugetlb_folio(h
, folio
);
1917 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
1922 * Must be called with the hugetlb lock held
1924 static void __prep_account_new_huge_page(struct hstate
*h
, int nid
)
1926 lockdep_assert_held(&hugetlb_lock
);
1928 h
->nr_huge_pages_node
[nid
]++;
1931 static void __prep_new_hugetlb_folio(struct hstate
*h
, struct folio
*folio
)
1933 hugetlb_vmemmap_optimize(h
, &folio
->page
);
1934 INIT_LIST_HEAD(&folio
->lru
);
1935 folio_set_hugetlb(folio
);
1936 hugetlb_set_folio_subpool(folio
, NULL
);
1937 set_hugetlb_cgroup(folio
, NULL
);
1938 set_hugetlb_cgroup_rsvd(folio
, NULL
);
1941 static void prep_new_hugetlb_folio(struct hstate
*h
, struct folio
*folio
, int nid
)
1943 __prep_new_hugetlb_folio(h
, folio
);
1944 spin_lock_irq(&hugetlb_lock
);
1945 __prep_account_new_huge_page(h
, nid
);
1946 spin_unlock_irq(&hugetlb_lock
);
1949 static bool __prep_compound_gigantic_folio(struct folio
*folio
,
1950 unsigned int order
, bool demote
)
1953 int nr_pages
= 1 << order
;
1956 __folio_clear_reserved(folio
);
1957 for (i
= 0; i
< nr_pages
; i
++) {
1958 p
= folio_page(folio
, i
);
1961 * For gigantic hugepages allocated through bootmem at
1962 * boot, it's safer to be consistent with the not-gigantic
1963 * hugepages and clear the PG_reserved bit from all tail pages
1964 * too. Otherwise drivers using get_user_pages() to access tail
1965 * pages may get the reference counting wrong if they see
1966 * PG_reserved set on a tail page (despite the head page not
1967 * having PG_reserved set). Enforcing this consistency between
1968 * head and tail pages allows drivers to optimize away a check
1969 * on the head page when they need know if put_page() is needed
1970 * after get_user_pages().
1972 if (i
!= 0) /* head page cleared above */
1973 __ClearPageReserved(p
);
1975 * Subtle and very unlikely
1977 * Gigantic 'page allocators' such as memblock or cma will
1978 * return a set of pages with each page ref counted. We need
1979 * to turn this set of pages into a compound page with tail
1980 * page ref counts set to zero. Code such as speculative page
1981 * cache adding could take a ref on a 'to be' tail page.
1982 * We need to respect any increased ref count, and only set
1983 * the ref count to zero if count is currently 1. If count
1984 * is not 1, we return an error. An error return indicates
1985 * the set of pages can not be converted to a gigantic page.
1986 * The caller who allocated the pages should then discard the
1987 * pages using the appropriate free interface.
1989 * In the case of demote, the ref count will be zero.
1992 if (!page_ref_freeze(p
, 1)) {
1993 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
1997 VM_BUG_ON_PAGE(page_count(p
), p
);
2000 set_compound_head(p
, &folio
->page
);
2002 __folio_set_head(folio
);
2003 /* we rely on prep_new_hugetlb_folio to set the destructor */
2004 folio_set_order(folio
, order
);
2005 atomic_set(&folio
->_entire_mapcount
, -1);
2006 atomic_set(&folio
->_nr_pages_mapped
, 0);
2007 atomic_set(&folio
->_pincount
, 0);
2011 /* undo page modifications made above */
2012 for (j
= 0; j
< i
; j
++) {
2013 p
= folio_page(folio
, j
);
2015 clear_compound_head(p
);
2016 set_page_refcounted(p
);
2018 /* need to clear PG_reserved on remaining tail pages */
2019 for (; j
< nr_pages
; j
++) {
2020 p
= folio_page(folio
, j
);
2021 __ClearPageReserved(p
);
2026 static bool prep_compound_gigantic_folio(struct folio
*folio
,
2029 return __prep_compound_gigantic_folio(folio
, order
, false);
2032 static bool prep_compound_gigantic_folio_for_demote(struct folio
*folio
,
2035 return __prep_compound_gigantic_folio(folio
, order
, true);
2039 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
2040 * transparent huge pages. See the PageTransHuge() documentation for more
2043 int PageHuge(struct page
*page
)
2045 struct folio
*folio
;
2047 if (!PageCompound(page
))
2049 folio
= page_folio(page
);
2050 return folio_test_hugetlb(folio
);
2052 EXPORT_SYMBOL_GPL(PageHuge
);
2055 * Find and lock address space (mapping) in write mode.
2057 * Upon entry, the page is locked which means that page_mapping() is
2058 * stable. Due to locking order, we can only trylock_write. If we can
2059 * not get the lock, simply return NULL to caller.
2061 struct address_space
*hugetlb_page_mapping_lock_write(struct page
*hpage
)
2063 struct address_space
*mapping
= page_mapping(hpage
);
2068 if (i_mmap_trylock_write(mapping
))
2074 pgoff_t
hugetlb_basepage_index(struct page
*page
)
2076 struct page
*page_head
= compound_head(page
);
2077 pgoff_t index
= page_index(page_head
);
2078 unsigned long compound_idx
;
2080 if (compound_order(page_head
) > MAX_ORDER
)
2081 compound_idx
= page_to_pfn(page
) - page_to_pfn(page_head
);
2083 compound_idx
= page
- page_head
;
2085 return (index
<< compound_order(page_head
)) + compound_idx
;
2088 static struct folio
*alloc_buddy_hugetlb_folio(struct hstate
*h
,
2089 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
,
2090 nodemask_t
*node_alloc_noretry
)
2092 int order
= huge_page_order(h
);
2094 bool alloc_try_hard
= true;
2098 * By default we always try hard to allocate the page with
2099 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
2100 * a loop (to adjust global huge page counts) and previous allocation
2101 * failed, do not continue to try hard on the same node. Use the
2102 * node_alloc_noretry bitmap to manage this state information.
2104 if (node_alloc_noretry
&& node_isset(nid
, *node_alloc_noretry
))
2105 alloc_try_hard
= false;
2106 gfp_mask
|= __GFP_COMP
|__GFP_NOWARN
;
2108 gfp_mask
|= __GFP_RETRY_MAYFAIL
;
2109 if (nid
== NUMA_NO_NODE
)
2110 nid
= numa_mem_id();
2112 page
= __alloc_pages(gfp_mask
, order
, nid
, nmask
);
2114 /* Freeze head page */
2115 if (page
&& !page_ref_freeze(page
, 1)) {
2116 __free_pages(page
, order
);
2117 if (retry
) { /* retry once */
2121 /* WOW! twice in a row. */
2122 pr_warn("HugeTLB head page unexpected inflated ref count\n");
2127 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
2128 * indicates an overall state change. Clear bit so that we resume
2129 * normal 'try hard' allocations.
2131 if (node_alloc_noretry
&& page
&& !alloc_try_hard
)
2132 node_clear(nid
, *node_alloc_noretry
);
2135 * If we tried hard to get a page but failed, set bit so that
2136 * subsequent attempts will not try as hard until there is an
2137 * overall state change.
2139 if (node_alloc_noretry
&& !page
&& alloc_try_hard
)
2140 node_set(nid
, *node_alloc_noretry
);
2143 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL
);
2147 __count_vm_event(HTLB_BUDDY_PGALLOC
);
2148 return page_folio(page
);
2152 * Common helper to allocate a fresh hugetlb page. All specific allocators
2153 * should use this function to get new hugetlb pages
2155 * Note that returned page is 'frozen': ref count of head page and all tail
2158 static struct folio
*alloc_fresh_hugetlb_folio(struct hstate
*h
,
2159 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
,
2160 nodemask_t
*node_alloc_noretry
)
2162 struct folio
*folio
;
2166 if (hstate_is_gigantic(h
))
2167 folio
= alloc_gigantic_folio(h
, gfp_mask
, nid
, nmask
);
2169 folio
= alloc_buddy_hugetlb_folio(h
, gfp_mask
,
2170 nid
, nmask
, node_alloc_noretry
);
2173 if (hstate_is_gigantic(h
)) {
2174 if (!prep_compound_gigantic_folio(folio
, huge_page_order(h
))) {
2176 * Rare failure to convert pages to compound page.
2177 * Free pages and try again - ONCE!
2179 free_gigantic_folio(folio
, huge_page_order(h
));
2187 prep_new_hugetlb_folio(h
, folio
, folio_nid(folio
));
2193 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
2196 static int alloc_pool_huge_page(struct hstate
*h
, nodemask_t
*nodes_allowed
,
2197 nodemask_t
*node_alloc_noretry
)
2199 struct folio
*folio
;
2201 gfp_t gfp_mask
= htlb_alloc_mask(h
) | __GFP_THISNODE
;
2203 for_each_node_mask_to_alloc(h
, nr_nodes
, node
, nodes_allowed
) {
2204 folio
= alloc_fresh_hugetlb_folio(h
, gfp_mask
, node
,
2205 nodes_allowed
, node_alloc_noretry
);
2207 free_huge_folio(folio
); /* free it into the hugepage allocator */
2216 * Remove huge page from pool from next node to free. Attempt to keep
2217 * persistent huge pages more or less balanced over allowed nodes.
2218 * This routine only 'removes' the hugetlb page. The caller must make
2219 * an additional call to free the page to low level allocators.
2220 * Called with hugetlb_lock locked.
2222 static struct page
*remove_pool_huge_page(struct hstate
*h
,
2223 nodemask_t
*nodes_allowed
,
2227 struct page
*page
= NULL
;
2228 struct folio
*folio
;
2230 lockdep_assert_held(&hugetlb_lock
);
2231 for_each_node_mask_to_free(h
, nr_nodes
, node
, nodes_allowed
) {
2233 * If we're returning unused surplus pages, only examine
2234 * nodes with surplus pages.
2236 if ((!acct_surplus
|| h
->surplus_huge_pages_node
[node
]) &&
2237 !list_empty(&h
->hugepage_freelists
[node
])) {
2238 page
= list_entry(h
->hugepage_freelists
[node
].next
,
2240 folio
= page_folio(page
);
2241 remove_hugetlb_folio(h
, folio
, acct_surplus
);
2250 * Dissolve a given free hugepage into free buddy pages. This function does
2251 * nothing for in-use hugepages and non-hugepages.
2252 * This function returns values like below:
2254 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2255 * when the system is under memory pressure and the feature of
2256 * freeing unused vmemmap pages associated with each hugetlb page
2258 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2259 * (allocated or reserved.)
2260 * 0: successfully dissolved free hugepages or the page is not a
2261 * hugepage (considered as already dissolved)
2263 int dissolve_free_huge_page(struct page
*page
)
2266 struct folio
*folio
= page_folio(page
);
2269 /* Not to disrupt normal path by vainly holding hugetlb_lock */
2270 if (!folio_test_hugetlb(folio
))
2273 spin_lock_irq(&hugetlb_lock
);
2274 if (!folio_test_hugetlb(folio
)) {
2279 if (!folio_ref_count(folio
)) {
2280 struct hstate
*h
= folio_hstate(folio
);
2281 if (!available_huge_pages(h
))
2285 * We should make sure that the page is already on the free list
2286 * when it is dissolved.
2288 if (unlikely(!folio_test_hugetlb_freed(folio
))) {
2289 spin_unlock_irq(&hugetlb_lock
);
2293 * Theoretically, we should return -EBUSY when we
2294 * encounter this race. In fact, we have a chance
2295 * to successfully dissolve the page if we do a
2296 * retry. Because the race window is quite small.
2297 * If we seize this opportunity, it is an optimization
2298 * for increasing the success rate of dissolving page.
2303 remove_hugetlb_folio(h
, folio
, false);
2304 h
->max_huge_pages
--;
2305 spin_unlock_irq(&hugetlb_lock
);
2308 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2309 * before freeing the page. update_and_free_hugtlb_folio will fail to
2310 * free the page if it can not allocate required vmemmap. We
2311 * need to adjust max_huge_pages if the page is not freed.
2312 * Attempt to allocate vmemmmap here so that we can take
2313 * appropriate action on failure.
2315 rc
= hugetlb_vmemmap_restore(h
, &folio
->page
);
2317 update_and_free_hugetlb_folio(h
, folio
, false);
2319 spin_lock_irq(&hugetlb_lock
);
2320 add_hugetlb_folio(h
, folio
, false);
2321 h
->max_huge_pages
++;
2322 spin_unlock_irq(&hugetlb_lock
);
2328 spin_unlock_irq(&hugetlb_lock
);
2333 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2334 * make specified memory blocks removable from the system.
2335 * Note that this will dissolve a free gigantic hugepage completely, if any
2336 * part of it lies within the given range.
2337 * Also note that if dissolve_free_huge_page() returns with an error, all
2338 * free hugepages that were dissolved before that error are lost.
2340 int dissolve_free_huge_pages(unsigned long start_pfn
, unsigned long end_pfn
)
2348 if (!hugepages_supported())
2351 order
= huge_page_order(&default_hstate
);
2353 order
= min(order
, huge_page_order(h
));
2355 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= 1 << order
) {
2356 page
= pfn_to_page(pfn
);
2357 rc
= dissolve_free_huge_page(page
);
2366 * Allocates a fresh surplus page from the page allocator.
2368 static struct folio
*alloc_surplus_hugetlb_folio(struct hstate
*h
,
2369 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
)
2371 struct folio
*folio
= NULL
;
2373 if (hstate_is_gigantic(h
))
2376 spin_lock_irq(&hugetlb_lock
);
2377 if (h
->surplus_huge_pages
>= h
->nr_overcommit_huge_pages
)
2379 spin_unlock_irq(&hugetlb_lock
);
2381 folio
= alloc_fresh_hugetlb_folio(h
, gfp_mask
, nid
, nmask
, NULL
);
2385 spin_lock_irq(&hugetlb_lock
);
2387 * We could have raced with the pool size change.
2388 * Double check that and simply deallocate the new page
2389 * if we would end up overcommiting the surpluses. Abuse
2390 * temporary page to workaround the nasty free_huge_folio
2393 if (h
->surplus_huge_pages
>= h
->nr_overcommit_huge_pages
) {
2394 folio_set_hugetlb_temporary(folio
);
2395 spin_unlock_irq(&hugetlb_lock
);
2396 free_huge_folio(folio
);
2400 h
->surplus_huge_pages
++;
2401 h
->surplus_huge_pages_node
[folio_nid(folio
)]++;
2404 spin_unlock_irq(&hugetlb_lock
);
2409 static struct folio
*alloc_migrate_hugetlb_folio(struct hstate
*h
, gfp_t gfp_mask
,
2410 int nid
, nodemask_t
*nmask
)
2412 struct folio
*folio
;
2414 if (hstate_is_gigantic(h
))
2417 folio
= alloc_fresh_hugetlb_folio(h
, gfp_mask
, nid
, nmask
, NULL
);
2421 /* fresh huge pages are frozen */
2422 folio_ref_unfreeze(folio
, 1);
2424 * We do not account these pages as surplus because they are only
2425 * temporary and will be released properly on the last reference
2427 folio_set_hugetlb_temporary(folio
);
2433 * Use the VMA's mpolicy to allocate a huge page from the buddy.
2436 struct folio
*alloc_buddy_hugetlb_folio_with_mpol(struct hstate
*h
,
2437 struct vm_area_struct
*vma
, unsigned long addr
)
2439 struct folio
*folio
= NULL
;
2440 struct mempolicy
*mpol
;
2441 gfp_t gfp_mask
= htlb_alloc_mask(h
);
2443 nodemask_t
*nodemask
;
2445 nid
= huge_node(vma
, addr
, gfp_mask
, &mpol
, &nodemask
);
2446 if (mpol_is_preferred_many(mpol
)) {
2447 gfp_t gfp
= gfp_mask
| __GFP_NOWARN
;
2449 gfp
&= ~(__GFP_DIRECT_RECLAIM
| __GFP_NOFAIL
);
2450 folio
= alloc_surplus_hugetlb_folio(h
, gfp
, nid
, nodemask
);
2452 /* Fallback to all nodes if page==NULL */
2457 folio
= alloc_surplus_hugetlb_folio(h
, gfp_mask
, nid
, nodemask
);
2458 mpol_cond_put(mpol
);
2462 /* folio migration callback function */
2463 struct folio
*alloc_hugetlb_folio_nodemask(struct hstate
*h
, int preferred_nid
,
2464 nodemask_t
*nmask
, gfp_t gfp_mask
)
2466 spin_lock_irq(&hugetlb_lock
);
2467 if (available_huge_pages(h
)) {
2468 struct folio
*folio
;
2470 folio
= dequeue_hugetlb_folio_nodemask(h
, gfp_mask
,
2471 preferred_nid
, nmask
);
2473 spin_unlock_irq(&hugetlb_lock
);
2477 spin_unlock_irq(&hugetlb_lock
);
2479 return alloc_migrate_hugetlb_folio(h
, gfp_mask
, preferred_nid
, nmask
);
2482 /* mempolicy aware migration callback */
2483 struct folio
*alloc_hugetlb_folio_vma(struct hstate
*h
, struct vm_area_struct
*vma
,
2484 unsigned long address
)
2486 struct mempolicy
*mpol
;
2487 nodemask_t
*nodemask
;
2488 struct folio
*folio
;
2492 gfp_mask
= htlb_alloc_mask(h
);
2493 node
= huge_node(vma
, address
, gfp_mask
, &mpol
, &nodemask
);
2494 folio
= alloc_hugetlb_folio_nodemask(h
, node
, nodemask
, gfp_mask
);
2495 mpol_cond_put(mpol
);
2501 * Increase the hugetlb pool such that it can accommodate a reservation
2504 static int gather_surplus_pages(struct hstate
*h
, long delta
)
2505 __must_hold(&hugetlb_lock
)
2507 LIST_HEAD(surplus_list
);
2508 struct folio
*folio
, *tmp
;
2511 long needed
, allocated
;
2512 bool alloc_ok
= true;
2514 lockdep_assert_held(&hugetlb_lock
);
2515 needed
= (h
->resv_huge_pages
+ delta
) - h
->free_huge_pages
;
2517 h
->resv_huge_pages
+= delta
;
2525 spin_unlock_irq(&hugetlb_lock
);
2526 for (i
= 0; i
< needed
; i
++) {
2527 folio
= alloc_surplus_hugetlb_folio(h
, htlb_alloc_mask(h
),
2528 NUMA_NO_NODE
, NULL
);
2533 list_add(&folio
->lru
, &surplus_list
);
2539 * After retaking hugetlb_lock, we need to recalculate 'needed'
2540 * because either resv_huge_pages or free_huge_pages may have changed.
2542 spin_lock_irq(&hugetlb_lock
);
2543 needed
= (h
->resv_huge_pages
+ delta
) -
2544 (h
->free_huge_pages
+ allocated
);
2549 * We were not able to allocate enough pages to
2550 * satisfy the entire reservation so we free what
2551 * we've allocated so far.
2556 * The surplus_list now contains _at_least_ the number of extra pages
2557 * needed to accommodate the reservation. Add the appropriate number
2558 * of pages to the hugetlb pool and free the extras back to the buddy
2559 * allocator. Commit the entire reservation here to prevent another
2560 * process from stealing the pages as they are added to the pool but
2561 * before they are reserved.
2563 needed
+= allocated
;
2564 h
->resv_huge_pages
+= delta
;
2567 /* Free the needed pages to the hugetlb pool */
2568 list_for_each_entry_safe(folio
, tmp
, &surplus_list
, lru
) {
2571 /* Add the page to the hugetlb allocator */
2572 enqueue_hugetlb_folio(h
, folio
);
2575 spin_unlock_irq(&hugetlb_lock
);
2578 * Free unnecessary surplus pages to the buddy allocator.
2579 * Pages have no ref count, call free_huge_folio directly.
2581 list_for_each_entry_safe(folio
, tmp
, &surplus_list
, lru
)
2582 free_huge_folio(folio
);
2583 spin_lock_irq(&hugetlb_lock
);
2589 * This routine has two main purposes:
2590 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2591 * in unused_resv_pages. This corresponds to the prior adjustments made
2592 * to the associated reservation map.
2593 * 2) Free any unused surplus pages that may have been allocated to satisfy
2594 * the reservation. As many as unused_resv_pages may be freed.
2596 static void return_unused_surplus_pages(struct hstate
*h
,
2597 unsigned long unused_resv_pages
)
2599 unsigned long nr_pages
;
2601 LIST_HEAD(page_list
);
2603 lockdep_assert_held(&hugetlb_lock
);
2604 /* Uncommit the reservation */
2605 h
->resv_huge_pages
-= unused_resv_pages
;
2607 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
2611 * Part (or even all) of the reservation could have been backed
2612 * by pre-allocated pages. Only free surplus pages.
2614 nr_pages
= min(unused_resv_pages
, h
->surplus_huge_pages
);
2617 * We want to release as many surplus pages as possible, spread
2618 * evenly across all nodes with memory. Iterate across these nodes
2619 * until we can no longer free unreserved surplus pages. This occurs
2620 * when the nodes with surplus pages have no free pages.
2621 * remove_pool_huge_page() will balance the freed pages across the
2622 * on-line nodes with memory and will handle the hstate accounting.
2624 while (nr_pages
--) {
2625 page
= remove_pool_huge_page(h
, &node_states
[N_MEMORY
], 1);
2629 list_add(&page
->lru
, &page_list
);
2633 spin_unlock_irq(&hugetlb_lock
);
2634 update_and_free_pages_bulk(h
, &page_list
);
2635 spin_lock_irq(&hugetlb_lock
);
2640 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2641 * are used by the huge page allocation routines to manage reservations.
2643 * vma_needs_reservation is called to determine if the huge page at addr
2644 * within the vma has an associated reservation. If a reservation is
2645 * needed, the value 1 is returned. The caller is then responsible for
2646 * managing the global reservation and subpool usage counts. After
2647 * the huge page has been allocated, vma_commit_reservation is called
2648 * to add the page to the reservation map. If the page allocation fails,
2649 * the reservation must be ended instead of committed. vma_end_reservation
2650 * is called in such cases.
2652 * In the normal case, vma_commit_reservation returns the same value
2653 * as the preceding vma_needs_reservation call. The only time this
2654 * is not the case is if a reserve map was changed between calls. It
2655 * is the responsibility of the caller to notice the difference and
2656 * take appropriate action.
2658 * vma_add_reservation is used in error paths where a reservation must
2659 * be restored when a newly allocated huge page must be freed. It is
2660 * to be called after calling vma_needs_reservation to determine if a
2661 * reservation exists.
2663 * vma_del_reservation is used in error paths where an entry in the reserve
2664 * map was created during huge page allocation and must be removed. It is to
2665 * be called after calling vma_needs_reservation to determine if a reservation
2668 enum vma_resv_mode
{
2675 static long __vma_reservation_common(struct hstate
*h
,
2676 struct vm_area_struct
*vma
, unsigned long addr
,
2677 enum vma_resv_mode mode
)
2679 struct resv_map
*resv
;
2682 long dummy_out_regions_needed
;
2684 resv
= vma_resv_map(vma
);
2688 idx
= vma_hugecache_offset(h
, vma
, addr
);
2690 case VMA_NEEDS_RESV
:
2691 ret
= region_chg(resv
, idx
, idx
+ 1, &dummy_out_regions_needed
);
2692 /* We assume that vma_reservation_* routines always operate on
2693 * 1 page, and that adding to resv map a 1 page entry can only
2694 * ever require 1 region.
2696 VM_BUG_ON(dummy_out_regions_needed
!= 1);
2698 case VMA_COMMIT_RESV
:
2699 ret
= region_add(resv
, idx
, idx
+ 1, 1, NULL
, NULL
);
2700 /* region_add calls of range 1 should never fail. */
2704 region_abort(resv
, idx
, idx
+ 1, 1);
2708 if (vma
->vm_flags
& VM_MAYSHARE
) {
2709 ret
= region_add(resv
, idx
, idx
+ 1, 1, NULL
, NULL
);
2710 /* region_add calls of range 1 should never fail. */
2713 region_abort(resv
, idx
, idx
+ 1, 1);
2714 ret
= region_del(resv
, idx
, idx
+ 1);
2718 if (vma
->vm_flags
& VM_MAYSHARE
) {
2719 region_abort(resv
, idx
, idx
+ 1, 1);
2720 ret
= region_del(resv
, idx
, idx
+ 1);
2722 ret
= region_add(resv
, idx
, idx
+ 1, 1, NULL
, NULL
);
2723 /* region_add calls of range 1 should never fail. */
2731 if (vma
->vm_flags
& VM_MAYSHARE
|| mode
== VMA_DEL_RESV
)
2734 * We know private mapping must have HPAGE_RESV_OWNER set.
2736 * In most cases, reserves always exist for private mappings.
2737 * However, a file associated with mapping could have been
2738 * hole punched or truncated after reserves were consumed.
2739 * As subsequent fault on such a range will not use reserves.
2740 * Subtle - The reserve map for private mappings has the
2741 * opposite meaning than that of shared mappings. If NO
2742 * entry is in the reserve map, it means a reservation exists.
2743 * If an entry exists in the reserve map, it means the
2744 * reservation has already been consumed. As a result, the
2745 * return value of this routine is the opposite of the
2746 * value returned from reserve map manipulation routines above.
2755 static long vma_needs_reservation(struct hstate
*h
,
2756 struct vm_area_struct
*vma
, unsigned long addr
)
2758 return __vma_reservation_common(h
, vma
, addr
, VMA_NEEDS_RESV
);
2761 static long vma_commit_reservation(struct hstate
*h
,
2762 struct vm_area_struct
*vma
, unsigned long addr
)
2764 return __vma_reservation_common(h
, vma
, addr
, VMA_COMMIT_RESV
);
2767 static void vma_end_reservation(struct hstate
*h
,
2768 struct vm_area_struct
*vma
, unsigned long addr
)
2770 (void)__vma_reservation_common(h
, vma
, addr
, VMA_END_RESV
);
2773 static long vma_add_reservation(struct hstate
*h
,
2774 struct vm_area_struct
*vma
, unsigned long addr
)
2776 return __vma_reservation_common(h
, vma
, addr
, VMA_ADD_RESV
);
2779 static long vma_del_reservation(struct hstate
*h
,
2780 struct vm_area_struct
*vma
, unsigned long addr
)
2782 return __vma_reservation_common(h
, vma
, addr
, VMA_DEL_RESV
);
2786 * This routine is called to restore reservation information on error paths.
2787 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2788 * and the hugetlb mutex should remain held when calling this routine.
2790 * It handles two specific cases:
2791 * 1) A reservation was in place and the folio consumed the reservation.
2792 * hugetlb_restore_reserve is set in the folio.
2793 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2794 * not set. However, alloc_hugetlb_folio always updates the reserve map.
2796 * In case 1, free_huge_folio later in the error path will increment the
2797 * global reserve count. But, free_huge_folio does not have enough context
2798 * to adjust the reservation map. This case deals primarily with private
2799 * mappings. Adjust the reserve map here to be consistent with global
2800 * reserve count adjustments to be made by free_huge_folio. Make sure the
2801 * reserve map indicates there is a reservation present.
2803 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2805 void restore_reserve_on_error(struct hstate
*h
, struct vm_area_struct
*vma
,
2806 unsigned long address
, struct folio
*folio
)
2808 long rc
= vma_needs_reservation(h
, vma
, address
);
2810 if (folio_test_hugetlb_restore_reserve(folio
)) {
2811 if (unlikely(rc
< 0))
2813 * Rare out of memory condition in reserve map
2814 * manipulation. Clear hugetlb_restore_reserve so
2815 * that global reserve count will not be incremented
2816 * by free_huge_folio. This will make it appear
2817 * as though the reservation for this folio was
2818 * consumed. This may prevent the task from
2819 * faulting in the folio at a later time. This
2820 * is better than inconsistent global huge page
2821 * accounting of reserve counts.
2823 folio_clear_hugetlb_restore_reserve(folio
);
2825 (void)vma_add_reservation(h
, vma
, address
);
2827 vma_end_reservation(h
, vma
, address
);
2831 * This indicates there is an entry in the reserve map
2832 * not added by alloc_hugetlb_folio. We know it was added
2833 * before the alloc_hugetlb_folio call, otherwise
2834 * hugetlb_restore_reserve would be set on the folio.
2835 * Remove the entry so that a subsequent allocation
2836 * does not consume a reservation.
2838 rc
= vma_del_reservation(h
, vma
, address
);
2841 * VERY rare out of memory condition. Since
2842 * we can not delete the entry, set
2843 * hugetlb_restore_reserve so that the reserve
2844 * count will be incremented when the folio
2845 * is freed. This reserve will be consumed
2846 * on a subsequent allocation.
2848 folio_set_hugetlb_restore_reserve(folio
);
2849 } else if (rc
< 0) {
2851 * Rare out of memory condition from
2852 * vma_needs_reservation call. Memory allocation is
2853 * only attempted if a new entry is needed. Therefore,
2854 * this implies there is not an entry in the
2857 * For shared mappings, no entry in the map indicates
2858 * no reservation. We are done.
2860 if (!(vma
->vm_flags
& VM_MAYSHARE
))
2862 * For private mappings, no entry indicates
2863 * a reservation is present. Since we can
2864 * not add an entry, set hugetlb_restore_reserve
2865 * on the folio so reserve count will be
2866 * incremented when freed. This reserve will
2867 * be consumed on a subsequent allocation.
2869 folio_set_hugetlb_restore_reserve(folio
);
2872 * No reservation present, do nothing
2874 vma_end_reservation(h
, vma
, address
);
2879 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2881 * @h: struct hstate old page belongs to
2882 * @old_folio: Old folio to dissolve
2883 * @list: List to isolate the page in case we need to
2884 * Returns 0 on success, otherwise negated error.
2886 static int alloc_and_dissolve_hugetlb_folio(struct hstate
*h
,
2887 struct folio
*old_folio
, struct list_head
*list
)
2889 gfp_t gfp_mask
= htlb_alloc_mask(h
) | __GFP_THISNODE
;
2890 int nid
= folio_nid(old_folio
);
2891 struct folio
*new_folio
;
2895 * Before dissolving the folio, we need to allocate a new one for the
2896 * pool to remain stable. Here, we allocate the folio and 'prep' it
2897 * by doing everything but actually updating counters and adding to
2898 * the pool. This simplifies and let us do most of the processing
2901 new_folio
= alloc_buddy_hugetlb_folio(h
, gfp_mask
, nid
, NULL
, NULL
);
2904 __prep_new_hugetlb_folio(h
, new_folio
);
2907 spin_lock_irq(&hugetlb_lock
);
2908 if (!folio_test_hugetlb(old_folio
)) {
2910 * Freed from under us. Drop new_folio too.
2913 } else if (folio_ref_count(old_folio
)) {
2917 * Someone has grabbed the folio, try to isolate it here.
2918 * Fail with -EBUSY if not possible.
2920 spin_unlock_irq(&hugetlb_lock
);
2921 isolated
= isolate_hugetlb(old_folio
, list
);
2922 ret
= isolated
? 0 : -EBUSY
;
2923 spin_lock_irq(&hugetlb_lock
);
2925 } else if (!folio_test_hugetlb_freed(old_folio
)) {
2927 * Folio's refcount is 0 but it has not been enqueued in the
2928 * freelist yet. Race window is small, so we can succeed here if
2931 spin_unlock_irq(&hugetlb_lock
);
2936 * Ok, old_folio is still a genuine free hugepage. Remove it from
2937 * the freelist and decrease the counters. These will be
2938 * incremented again when calling __prep_account_new_huge_page()
2939 * and enqueue_hugetlb_folio() for new_folio. The counters will
2940 * remain stable since this happens under the lock.
2942 remove_hugetlb_folio(h
, old_folio
, false);
2945 * Ref count on new_folio is already zero as it was dropped
2946 * earlier. It can be directly added to the pool free list.
2948 __prep_account_new_huge_page(h
, nid
);
2949 enqueue_hugetlb_folio(h
, new_folio
);
2952 * Folio has been replaced, we can safely free the old one.
2954 spin_unlock_irq(&hugetlb_lock
);
2955 update_and_free_hugetlb_folio(h
, old_folio
, false);
2961 spin_unlock_irq(&hugetlb_lock
);
2962 /* Folio has a zero ref count, but needs a ref to be freed */
2963 folio_ref_unfreeze(new_folio
, 1);
2964 update_and_free_hugetlb_folio(h
, new_folio
, false);
2969 int isolate_or_dissolve_huge_page(struct page
*page
, struct list_head
*list
)
2972 struct folio
*folio
= page_folio(page
);
2976 * The page might have been dissolved from under our feet, so make sure
2977 * to carefully check the state under the lock.
2978 * Return success when racing as if we dissolved the page ourselves.
2980 spin_lock_irq(&hugetlb_lock
);
2981 if (folio_test_hugetlb(folio
)) {
2982 h
= folio_hstate(folio
);
2984 spin_unlock_irq(&hugetlb_lock
);
2987 spin_unlock_irq(&hugetlb_lock
);
2990 * Fence off gigantic pages as there is a cyclic dependency between
2991 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2992 * of bailing out right away without further retrying.
2994 if (hstate_is_gigantic(h
))
2997 if (folio_ref_count(folio
) && isolate_hugetlb(folio
, list
))
2999 else if (!folio_ref_count(folio
))
3000 ret
= alloc_and_dissolve_hugetlb_folio(h
, folio
, list
);
3005 struct folio
*alloc_hugetlb_folio(struct vm_area_struct
*vma
,
3006 unsigned long addr
, int avoid_reserve
)
3008 struct hugepage_subpool
*spool
= subpool_vma(vma
);
3009 struct hstate
*h
= hstate_vma(vma
);
3010 struct folio
*folio
;
3011 long map_chg
, map_commit
;
3014 struct hugetlb_cgroup
*h_cg
= NULL
;
3015 bool deferred_reserve
;
3017 idx
= hstate_index(h
);
3019 * Examine the region/reserve map to determine if the process
3020 * has a reservation for the page to be allocated. A return
3021 * code of zero indicates a reservation exists (no change).
3023 map_chg
= gbl_chg
= vma_needs_reservation(h
, vma
, addr
);
3025 return ERR_PTR(-ENOMEM
);
3028 * Processes that did not create the mapping will have no
3029 * reserves as indicated by the region/reserve map. Check
3030 * that the allocation will not exceed the subpool limit.
3031 * Allocations for MAP_NORESERVE mappings also need to be
3032 * checked against any subpool limit.
3034 if (map_chg
|| avoid_reserve
) {
3035 gbl_chg
= hugepage_subpool_get_pages(spool
, 1);
3037 vma_end_reservation(h
, vma
, addr
);
3038 return ERR_PTR(-ENOSPC
);
3042 * Even though there was no reservation in the region/reserve
3043 * map, there could be reservations associated with the
3044 * subpool that can be used. This would be indicated if the
3045 * return value of hugepage_subpool_get_pages() is zero.
3046 * However, if avoid_reserve is specified we still avoid even
3047 * the subpool reservations.
3053 /* If this allocation is not consuming a reservation, charge it now.
3055 deferred_reserve
= map_chg
|| avoid_reserve
;
3056 if (deferred_reserve
) {
3057 ret
= hugetlb_cgroup_charge_cgroup_rsvd(
3058 idx
, pages_per_huge_page(h
), &h_cg
);
3060 goto out_subpool_put
;
3063 ret
= hugetlb_cgroup_charge_cgroup(idx
, pages_per_huge_page(h
), &h_cg
);
3065 goto out_uncharge_cgroup_reservation
;
3067 spin_lock_irq(&hugetlb_lock
);
3069 * glb_chg is passed to indicate whether or not a page must be taken
3070 * from the global free pool (global change). gbl_chg == 0 indicates
3071 * a reservation exists for the allocation.
3073 folio
= dequeue_hugetlb_folio_vma(h
, vma
, addr
, avoid_reserve
, gbl_chg
);
3075 spin_unlock_irq(&hugetlb_lock
);
3076 folio
= alloc_buddy_hugetlb_folio_with_mpol(h
, vma
, addr
);
3078 goto out_uncharge_cgroup
;
3079 spin_lock_irq(&hugetlb_lock
);
3080 if (!avoid_reserve
&& vma_has_reserves(vma
, gbl_chg
)) {
3081 folio_set_hugetlb_restore_reserve(folio
);
3082 h
->resv_huge_pages
--;
3084 list_add(&folio
->lru
, &h
->hugepage_activelist
);
3085 folio_ref_unfreeze(folio
, 1);
3089 hugetlb_cgroup_commit_charge(idx
, pages_per_huge_page(h
), h_cg
, folio
);
3090 /* If allocation is not consuming a reservation, also store the
3091 * hugetlb_cgroup pointer on the page.
3093 if (deferred_reserve
) {
3094 hugetlb_cgroup_commit_charge_rsvd(idx
, pages_per_huge_page(h
),
3098 spin_unlock_irq(&hugetlb_lock
);
3100 hugetlb_set_folio_subpool(folio
, spool
);
3102 map_commit
= vma_commit_reservation(h
, vma
, addr
);
3103 if (unlikely(map_chg
> map_commit
)) {
3105 * The page was added to the reservation map between
3106 * vma_needs_reservation and vma_commit_reservation.
3107 * This indicates a race with hugetlb_reserve_pages.
3108 * Adjust for the subpool count incremented above AND
3109 * in hugetlb_reserve_pages for the same page. Also,
3110 * the reservation count added in hugetlb_reserve_pages
3111 * no longer applies.
3115 rsv_adjust
= hugepage_subpool_put_pages(spool
, 1);
3116 hugetlb_acct_memory(h
, -rsv_adjust
);
3117 if (deferred_reserve
)
3118 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h
),
3119 pages_per_huge_page(h
), folio
);
3123 out_uncharge_cgroup
:
3124 hugetlb_cgroup_uncharge_cgroup(idx
, pages_per_huge_page(h
), h_cg
);
3125 out_uncharge_cgroup_reservation
:
3126 if (deferred_reserve
)
3127 hugetlb_cgroup_uncharge_cgroup_rsvd(idx
, pages_per_huge_page(h
),
3130 if (map_chg
|| avoid_reserve
)
3131 hugepage_subpool_put_pages(spool
, 1);
3132 vma_end_reservation(h
, vma
, addr
);
3133 return ERR_PTR(-ENOSPC
);
3136 int alloc_bootmem_huge_page(struct hstate
*h
, int nid
)
3137 __attribute__ ((weak
, alias("__alloc_bootmem_huge_page")));
3138 int __alloc_bootmem_huge_page(struct hstate
*h
, int nid
)
3140 struct huge_bootmem_page
*m
= NULL
; /* initialize for clang */
3143 /* do node specific alloc */
3144 if (nid
!= NUMA_NO_NODE
) {
3145 m
= memblock_alloc_try_nid_raw(huge_page_size(h
), huge_page_size(h
),
3146 0, MEMBLOCK_ALLOC_ACCESSIBLE
, nid
);
3151 /* allocate from next node when distributing huge pages */
3152 for_each_node_mask_to_alloc(h
, nr_nodes
, node
, &node_states
[N_MEMORY
]) {
3153 m
= memblock_alloc_try_nid_raw(
3154 huge_page_size(h
), huge_page_size(h
),
3155 0, MEMBLOCK_ALLOC_ACCESSIBLE
, node
);
3157 * Use the beginning of the huge page to store the
3158 * huge_bootmem_page struct (until gather_bootmem
3159 * puts them into the mem_map).
3167 /* Put them into a private list first because mem_map is not up yet */
3168 INIT_LIST_HEAD(&m
->list
);
3169 list_add(&m
->list
, &huge_boot_pages
);
3175 * Put bootmem huge pages into the standard lists after mem_map is up.
3176 * Note: This only applies to gigantic (order > MAX_ORDER) pages.
3178 static void __init
gather_bootmem_prealloc(void)
3180 struct huge_bootmem_page
*m
;
3182 list_for_each_entry(m
, &huge_boot_pages
, list
) {
3183 struct page
*page
= virt_to_page(m
);
3184 struct folio
*folio
= page_folio(page
);
3185 struct hstate
*h
= m
->hstate
;
3187 VM_BUG_ON(!hstate_is_gigantic(h
));
3188 WARN_ON(folio_ref_count(folio
) != 1);
3189 if (prep_compound_gigantic_folio(folio
, huge_page_order(h
))) {
3190 WARN_ON(folio_test_reserved(folio
));
3191 prep_new_hugetlb_folio(h
, folio
, folio_nid(folio
));
3192 free_huge_folio(folio
); /* add to the hugepage allocator */
3194 /* VERY unlikely inflated ref count on a tail page */
3195 free_gigantic_folio(folio
, huge_page_order(h
));
3199 * We need to restore the 'stolen' pages to totalram_pages
3200 * in order to fix confusing memory reports from free(1) and
3201 * other side-effects, like CommitLimit going negative.
3203 adjust_managed_page_count(page
, pages_per_huge_page(h
));
3207 static void __init
hugetlb_hstate_alloc_pages_onenode(struct hstate
*h
, int nid
)
3212 for (i
= 0; i
< h
->max_huge_pages_node
[nid
]; ++i
) {
3213 if (hstate_is_gigantic(h
)) {
3214 if (!alloc_bootmem_huge_page(h
, nid
))
3217 struct folio
*folio
;
3218 gfp_t gfp_mask
= htlb_alloc_mask(h
) | __GFP_THISNODE
;
3220 folio
= alloc_fresh_hugetlb_folio(h
, gfp_mask
, nid
,
3221 &node_states
[N_MEMORY
], NULL
);
3224 free_huge_folio(folio
); /* free it into the hugepage allocator */
3228 if (i
== h
->max_huge_pages_node
[nid
])
3231 string_get_size(huge_page_size(h
), 1, STRING_UNITS_2
, buf
, 32);
3232 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3233 h
->max_huge_pages_node
[nid
], buf
, nid
, i
);
3234 h
->max_huge_pages
-= (h
->max_huge_pages_node
[nid
] - i
);
3235 h
->max_huge_pages_node
[nid
] = i
;
3238 static void __init
hugetlb_hstate_alloc_pages(struct hstate
*h
)
3241 nodemask_t
*node_alloc_noretry
;
3242 bool node_specific_alloc
= false;
3244 /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3245 if (hstate_is_gigantic(h
) && hugetlb_cma_size
) {
3246 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3250 /* do node specific alloc */
3251 for_each_online_node(i
) {
3252 if (h
->max_huge_pages_node
[i
] > 0) {
3253 hugetlb_hstate_alloc_pages_onenode(h
, i
);
3254 node_specific_alloc
= true;
3258 if (node_specific_alloc
)
3261 /* below will do all node balanced alloc */
3262 if (!hstate_is_gigantic(h
)) {
3264 * Bit mask controlling how hard we retry per-node allocations.
3265 * Ignore errors as lower level routines can deal with
3266 * node_alloc_noretry == NULL. If this kmalloc fails at boot
3267 * time, we are likely in bigger trouble.
3269 node_alloc_noretry
= kmalloc(sizeof(*node_alloc_noretry
),
3272 /* allocations done at boot time */
3273 node_alloc_noretry
= NULL
;
3276 /* bit mask controlling how hard we retry per-node allocations */
3277 if (node_alloc_noretry
)
3278 nodes_clear(*node_alloc_noretry
);
3280 for (i
= 0; i
< h
->max_huge_pages
; ++i
) {
3281 if (hstate_is_gigantic(h
)) {
3282 if (!alloc_bootmem_huge_page(h
, NUMA_NO_NODE
))
3284 } else if (!alloc_pool_huge_page(h
,
3285 &node_states
[N_MEMORY
],
3286 node_alloc_noretry
))
3290 if (i
< h
->max_huge_pages
) {
3293 string_get_size(huge_page_size(h
), 1, STRING_UNITS_2
, buf
, 32);
3294 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3295 h
->max_huge_pages
, buf
, i
);
3296 h
->max_huge_pages
= i
;
3298 kfree(node_alloc_noretry
);
3301 static void __init
hugetlb_init_hstates(void)
3303 struct hstate
*h
, *h2
;
3305 for_each_hstate(h
) {
3306 /* oversize hugepages were init'ed in early boot */
3307 if (!hstate_is_gigantic(h
))
3308 hugetlb_hstate_alloc_pages(h
);
3311 * Set demote order for each hstate. Note that
3312 * h->demote_order is initially 0.
3313 * - We can not demote gigantic pages if runtime freeing
3314 * is not supported, so skip this.
3315 * - If CMA allocation is possible, we can not demote
3316 * HUGETLB_PAGE_ORDER or smaller size pages.
3318 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
3320 if (hugetlb_cma_size
&& h
->order
<= HUGETLB_PAGE_ORDER
)
3322 for_each_hstate(h2
) {
3325 if (h2
->order
< h
->order
&&
3326 h2
->order
> h
->demote_order
)
3327 h
->demote_order
= h2
->order
;
3332 static void __init
report_hugepages(void)
3336 for_each_hstate(h
) {
3339 string_get_size(huge_page_size(h
), 1, STRING_UNITS_2
, buf
, 32);
3340 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3341 buf
, h
->free_huge_pages
);
3342 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3343 hugetlb_vmemmap_optimizable_size(h
) / SZ_1K
, buf
);
3347 #ifdef CONFIG_HIGHMEM
3348 static void try_to_free_low(struct hstate
*h
, unsigned long count
,
3349 nodemask_t
*nodes_allowed
)
3352 LIST_HEAD(page_list
);
3354 lockdep_assert_held(&hugetlb_lock
);
3355 if (hstate_is_gigantic(h
))
3359 * Collect pages to be freed on a list, and free after dropping lock
3361 for_each_node_mask(i
, *nodes_allowed
) {
3362 struct page
*page
, *next
;
3363 struct list_head
*freel
= &h
->hugepage_freelists
[i
];
3364 list_for_each_entry_safe(page
, next
, freel
, lru
) {
3365 if (count
>= h
->nr_huge_pages
)
3367 if (PageHighMem(page
))
3369 remove_hugetlb_folio(h
, page_folio(page
), false);
3370 list_add(&page
->lru
, &page_list
);
3375 spin_unlock_irq(&hugetlb_lock
);
3376 update_and_free_pages_bulk(h
, &page_list
);
3377 spin_lock_irq(&hugetlb_lock
);
3380 static inline void try_to_free_low(struct hstate
*h
, unsigned long count
,
3381 nodemask_t
*nodes_allowed
)
3387 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3388 * balanced by operating on them in a round-robin fashion.
3389 * Returns 1 if an adjustment was made.
3391 static int adjust_pool_surplus(struct hstate
*h
, nodemask_t
*nodes_allowed
,
3396 lockdep_assert_held(&hugetlb_lock
);
3397 VM_BUG_ON(delta
!= -1 && delta
!= 1);
3400 for_each_node_mask_to_alloc(h
, nr_nodes
, node
, nodes_allowed
) {
3401 if (h
->surplus_huge_pages_node
[node
])
3405 for_each_node_mask_to_free(h
, nr_nodes
, node
, nodes_allowed
) {
3406 if (h
->surplus_huge_pages_node
[node
] <
3407 h
->nr_huge_pages_node
[node
])
3414 h
->surplus_huge_pages
+= delta
;
3415 h
->surplus_huge_pages_node
[node
] += delta
;
3419 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3420 static int set_max_huge_pages(struct hstate
*h
, unsigned long count
, int nid
,
3421 nodemask_t
*nodes_allowed
)
3423 unsigned long min_count
, ret
;
3425 LIST_HEAD(page_list
);
3426 NODEMASK_ALLOC(nodemask_t
, node_alloc_noretry
, GFP_KERNEL
);
3429 * Bit mask controlling how hard we retry per-node allocations.
3430 * If we can not allocate the bit mask, do not attempt to allocate
3431 * the requested huge pages.
3433 if (node_alloc_noretry
)
3434 nodes_clear(*node_alloc_noretry
);
3439 * resize_lock mutex prevents concurrent adjustments to number of
3440 * pages in hstate via the proc/sysfs interfaces.
3442 mutex_lock(&h
->resize_lock
);
3443 flush_free_hpage_work(h
);
3444 spin_lock_irq(&hugetlb_lock
);
3447 * Check for a node specific request.
3448 * Changing node specific huge page count may require a corresponding
3449 * change to the global count. In any case, the passed node mask
3450 * (nodes_allowed) will restrict alloc/free to the specified node.
3452 if (nid
!= NUMA_NO_NODE
) {
3453 unsigned long old_count
= count
;
3455 count
+= h
->nr_huge_pages
- h
->nr_huge_pages_node
[nid
];
3457 * User may have specified a large count value which caused the
3458 * above calculation to overflow. In this case, they wanted
3459 * to allocate as many huge pages as possible. Set count to
3460 * largest possible value to align with their intention.
3462 if (count
< old_count
)
3467 * Gigantic pages runtime allocation depend on the capability for large
3468 * page range allocation.
3469 * If the system does not provide this feature, return an error when
3470 * the user tries to allocate gigantic pages but let the user free the
3471 * boottime allocated gigantic pages.
3473 if (hstate_is_gigantic(h
) && !IS_ENABLED(CONFIG_CONTIG_ALLOC
)) {
3474 if (count
> persistent_huge_pages(h
)) {
3475 spin_unlock_irq(&hugetlb_lock
);
3476 mutex_unlock(&h
->resize_lock
);
3477 NODEMASK_FREE(node_alloc_noretry
);
3480 /* Fall through to decrease pool */
3484 * Increase the pool size
3485 * First take pages out of surplus state. Then make up the
3486 * remaining difference by allocating fresh huge pages.
3488 * We might race with alloc_surplus_hugetlb_folio() here and be unable
3489 * to convert a surplus huge page to a normal huge page. That is
3490 * not critical, though, it just means the overall size of the
3491 * pool might be one hugepage larger than it needs to be, but
3492 * within all the constraints specified by the sysctls.
3494 while (h
->surplus_huge_pages
&& count
> persistent_huge_pages(h
)) {
3495 if (!adjust_pool_surplus(h
, nodes_allowed
, -1))
3499 while (count
> persistent_huge_pages(h
)) {
3501 * If this allocation races such that we no longer need the
3502 * page, free_huge_folio will handle it by freeing the page
3503 * and reducing the surplus.
3505 spin_unlock_irq(&hugetlb_lock
);
3507 /* yield cpu to avoid soft lockup */
3510 ret
= alloc_pool_huge_page(h
, nodes_allowed
,
3511 node_alloc_noretry
);
3512 spin_lock_irq(&hugetlb_lock
);
3516 /* Bail for signals. Probably ctrl-c from user */
3517 if (signal_pending(current
))
3522 * Decrease the pool size
3523 * First return free pages to the buddy allocator (being careful
3524 * to keep enough around to satisfy reservations). Then place
3525 * pages into surplus state as needed so the pool will shrink
3526 * to the desired size as pages become free.
3528 * By placing pages into the surplus state independent of the
3529 * overcommit value, we are allowing the surplus pool size to
3530 * exceed overcommit. There are few sane options here. Since
3531 * alloc_surplus_hugetlb_folio() is checking the global counter,
3532 * though, we'll note that we're not allowed to exceed surplus
3533 * and won't grow the pool anywhere else. Not until one of the
3534 * sysctls are changed, or the surplus pages go out of use.
3536 min_count
= h
->resv_huge_pages
+ h
->nr_huge_pages
- h
->free_huge_pages
;
3537 min_count
= max(count
, min_count
);
3538 try_to_free_low(h
, min_count
, nodes_allowed
);
3541 * Collect pages to be removed on list without dropping lock
3543 while (min_count
< persistent_huge_pages(h
)) {
3544 page
= remove_pool_huge_page(h
, nodes_allowed
, 0);
3548 list_add(&page
->lru
, &page_list
);
3550 /* free the pages after dropping lock */
3551 spin_unlock_irq(&hugetlb_lock
);
3552 update_and_free_pages_bulk(h
, &page_list
);
3553 flush_free_hpage_work(h
);
3554 spin_lock_irq(&hugetlb_lock
);
3556 while (count
< persistent_huge_pages(h
)) {
3557 if (!adjust_pool_surplus(h
, nodes_allowed
, 1))
3561 h
->max_huge_pages
= persistent_huge_pages(h
);
3562 spin_unlock_irq(&hugetlb_lock
);
3563 mutex_unlock(&h
->resize_lock
);
3565 NODEMASK_FREE(node_alloc_noretry
);
3570 static int demote_free_hugetlb_folio(struct hstate
*h
, struct folio
*folio
)
3572 int i
, nid
= folio_nid(folio
);
3573 struct hstate
*target_hstate
;
3574 struct page
*subpage
;
3575 struct folio
*inner_folio
;
3578 target_hstate
= size_to_hstate(PAGE_SIZE
<< h
->demote_order
);
3580 remove_hugetlb_folio_for_demote(h
, folio
, false);
3581 spin_unlock_irq(&hugetlb_lock
);
3583 rc
= hugetlb_vmemmap_restore(h
, &folio
->page
);
3585 /* Allocation of vmemmmap failed, we can not demote folio */
3586 spin_lock_irq(&hugetlb_lock
);
3587 folio_ref_unfreeze(folio
, 1);
3588 add_hugetlb_folio(h
, folio
, false);
3593 * Use destroy_compound_hugetlb_folio_for_demote for all huge page
3594 * sizes as it will not ref count folios.
3596 destroy_compound_hugetlb_folio_for_demote(folio
, huge_page_order(h
));
3599 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3600 * Without the mutex, pages added to target hstate could be marked
3603 * Note that we already hold h->resize_lock. To prevent deadlock,
3604 * use the convention of always taking larger size hstate mutex first.
3606 mutex_lock(&target_hstate
->resize_lock
);
3607 for (i
= 0; i
< pages_per_huge_page(h
);
3608 i
+= pages_per_huge_page(target_hstate
)) {
3609 subpage
= folio_page(folio
, i
);
3610 inner_folio
= page_folio(subpage
);
3611 if (hstate_is_gigantic(target_hstate
))
3612 prep_compound_gigantic_folio_for_demote(inner_folio
,
3613 target_hstate
->order
);
3615 prep_compound_page(subpage
, target_hstate
->order
);
3616 folio_change_private(inner_folio
, NULL
);
3617 prep_new_hugetlb_folio(target_hstate
, inner_folio
, nid
);
3618 free_huge_folio(inner_folio
);
3620 mutex_unlock(&target_hstate
->resize_lock
);
3622 spin_lock_irq(&hugetlb_lock
);
3625 * Not absolutely necessary, but for consistency update max_huge_pages
3626 * based on pool changes for the demoted page.
3628 h
->max_huge_pages
--;
3629 target_hstate
->max_huge_pages
+=
3630 pages_per_huge_page(h
) / pages_per_huge_page(target_hstate
);
3635 static int demote_pool_huge_page(struct hstate
*h
, nodemask_t
*nodes_allowed
)
3636 __must_hold(&hugetlb_lock
)
3639 struct folio
*folio
;
3641 lockdep_assert_held(&hugetlb_lock
);
3643 /* We should never get here if no demote order */
3644 if (!h
->demote_order
) {
3645 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3646 return -EINVAL
; /* internal error */
3649 for_each_node_mask_to_free(h
, nr_nodes
, node
, nodes_allowed
) {
3650 list_for_each_entry(folio
, &h
->hugepage_freelists
[node
], lru
) {
3651 if (folio_test_hwpoison(folio
))
3653 return demote_free_hugetlb_folio(h
, folio
);
3658 * Only way to get here is if all pages on free lists are poisoned.
3659 * Return -EBUSY so that caller will not retry.
3664 #define HSTATE_ATTR_RO(_name) \
3665 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3667 #define HSTATE_ATTR_WO(_name) \
3668 static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3670 #define HSTATE_ATTR(_name) \
3671 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3673 static struct kobject
*hugepages_kobj
;
3674 static struct kobject
*hstate_kobjs
[HUGE_MAX_HSTATE
];
3676 static struct hstate
*kobj_to_node_hstate(struct kobject
*kobj
, int *nidp
);
3678 static struct hstate
*kobj_to_hstate(struct kobject
*kobj
, int *nidp
)
3682 for (i
= 0; i
< HUGE_MAX_HSTATE
; i
++)
3683 if (hstate_kobjs
[i
] == kobj
) {
3685 *nidp
= NUMA_NO_NODE
;
3689 return kobj_to_node_hstate(kobj
, nidp
);
3692 static ssize_t
nr_hugepages_show_common(struct kobject
*kobj
,
3693 struct kobj_attribute
*attr
, char *buf
)
3696 unsigned long nr_huge_pages
;
3699 h
= kobj_to_hstate(kobj
, &nid
);
3700 if (nid
== NUMA_NO_NODE
)
3701 nr_huge_pages
= h
->nr_huge_pages
;
3703 nr_huge_pages
= h
->nr_huge_pages_node
[nid
];
3705 return sysfs_emit(buf
, "%lu\n", nr_huge_pages
);
3708 static ssize_t
__nr_hugepages_store_common(bool obey_mempolicy
,
3709 struct hstate
*h
, int nid
,
3710 unsigned long count
, size_t len
)
3713 nodemask_t nodes_allowed
, *n_mask
;
3715 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
3718 if (nid
== NUMA_NO_NODE
) {
3720 * global hstate attribute
3722 if (!(obey_mempolicy
&&
3723 init_nodemask_of_mempolicy(&nodes_allowed
)))
3724 n_mask
= &node_states
[N_MEMORY
];
3726 n_mask
= &nodes_allowed
;
3729 * Node specific request. count adjustment happens in
3730 * set_max_huge_pages() after acquiring hugetlb_lock.
3732 init_nodemask_of_node(&nodes_allowed
, nid
);
3733 n_mask
= &nodes_allowed
;
3736 err
= set_max_huge_pages(h
, count
, nid
, n_mask
);
3738 return err
? err
: len
;
3741 static ssize_t
nr_hugepages_store_common(bool obey_mempolicy
,
3742 struct kobject
*kobj
, const char *buf
,
3746 unsigned long count
;
3750 err
= kstrtoul(buf
, 10, &count
);
3754 h
= kobj_to_hstate(kobj
, &nid
);
3755 return __nr_hugepages_store_common(obey_mempolicy
, h
, nid
, count
, len
);
3758 static ssize_t
nr_hugepages_show(struct kobject
*kobj
,
3759 struct kobj_attribute
*attr
, char *buf
)
3761 return nr_hugepages_show_common(kobj
, attr
, buf
);
3764 static ssize_t
nr_hugepages_store(struct kobject
*kobj
,
3765 struct kobj_attribute
*attr
, const char *buf
, size_t len
)
3767 return nr_hugepages_store_common(false, kobj
, buf
, len
);
3769 HSTATE_ATTR(nr_hugepages
);
3774 * hstate attribute for optionally mempolicy-based constraint on persistent
3775 * huge page alloc/free.
3777 static ssize_t
nr_hugepages_mempolicy_show(struct kobject
*kobj
,
3778 struct kobj_attribute
*attr
,
3781 return nr_hugepages_show_common(kobj
, attr
, buf
);
3784 static ssize_t
nr_hugepages_mempolicy_store(struct kobject
*kobj
,
3785 struct kobj_attribute
*attr
, const char *buf
, size_t len
)
3787 return nr_hugepages_store_common(true, kobj
, buf
, len
);
3789 HSTATE_ATTR(nr_hugepages_mempolicy
);
3793 static ssize_t
nr_overcommit_hugepages_show(struct kobject
*kobj
,
3794 struct kobj_attribute
*attr
, char *buf
)
3796 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
3797 return sysfs_emit(buf
, "%lu\n", h
->nr_overcommit_huge_pages
);
3800 static ssize_t
nr_overcommit_hugepages_store(struct kobject
*kobj
,
3801 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
3804 unsigned long input
;
3805 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
3807 if (hstate_is_gigantic(h
))
3810 err
= kstrtoul(buf
, 10, &input
);
3814 spin_lock_irq(&hugetlb_lock
);
3815 h
->nr_overcommit_huge_pages
= input
;
3816 spin_unlock_irq(&hugetlb_lock
);
3820 HSTATE_ATTR(nr_overcommit_hugepages
);
3822 static ssize_t
free_hugepages_show(struct kobject
*kobj
,
3823 struct kobj_attribute
*attr
, char *buf
)
3826 unsigned long free_huge_pages
;
3829 h
= kobj_to_hstate(kobj
, &nid
);
3830 if (nid
== NUMA_NO_NODE
)
3831 free_huge_pages
= h
->free_huge_pages
;
3833 free_huge_pages
= h
->free_huge_pages_node
[nid
];
3835 return sysfs_emit(buf
, "%lu\n", free_huge_pages
);
3837 HSTATE_ATTR_RO(free_hugepages
);
3839 static ssize_t
resv_hugepages_show(struct kobject
*kobj
,
3840 struct kobj_attribute
*attr
, char *buf
)
3842 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
3843 return sysfs_emit(buf
, "%lu\n", h
->resv_huge_pages
);
3845 HSTATE_ATTR_RO(resv_hugepages
);
3847 static ssize_t
surplus_hugepages_show(struct kobject
*kobj
,
3848 struct kobj_attribute
*attr
, char *buf
)
3851 unsigned long surplus_huge_pages
;
3854 h
= kobj_to_hstate(kobj
, &nid
);
3855 if (nid
== NUMA_NO_NODE
)
3856 surplus_huge_pages
= h
->surplus_huge_pages
;
3858 surplus_huge_pages
= h
->surplus_huge_pages_node
[nid
];
3860 return sysfs_emit(buf
, "%lu\n", surplus_huge_pages
);
3862 HSTATE_ATTR_RO(surplus_hugepages
);
3864 static ssize_t
demote_store(struct kobject
*kobj
,
3865 struct kobj_attribute
*attr
, const char *buf
, size_t len
)
3867 unsigned long nr_demote
;
3868 unsigned long nr_available
;
3869 nodemask_t nodes_allowed
, *n_mask
;
3874 err
= kstrtoul(buf
, 10, &nr_demote
);
3877 h
= kobj_to_hstate(kobj
, &nid
);
3879 if (nid
!= NUMA_NO_NODE
) {
3880 init_nodemask_of_node(&nodes_allowed
, nid
);
3881 n_mask
= &nodes_allowed
;
3883 n_mask
= &node_states
[N_MEMORY
];
3886 /* Synchronize with other sysfs operations modifying huge pages */
3887 mutex_lock(&h
->resize_lock
);
3888 spin_lock_irq(&hugetlb_lock
);
3892 * Check for available pages to demote each time thorough the
3893 * loop as demote_pool_huge_page will drop hugetlb_lock.
3895 if (nid
!= NUMA_NO_NODE
)
3896 nr_available
= h
->free_huge_pages_node
[nid
];
3898 nr_available
= h
->free_huge_pages
;
3899 nr_available
-= h
->resv_huge_pages
;
3903 err
= demote_pool_huge_page(h
, n_mask
);
3910 spin_unlock_irq(&hugetlb_lock
);
3911 mutex_unlock(&h
->resize_lock
);
3917 HSTATE_ATTR_WO(demote
);
3919 static ssize_t
demote_size_show(struct kobject
*kobj
,
3920 struct kobj_attribute
*attr
, char *buf
)
3922 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
3923 unsigned long demote_size
= (PAGE_SIZE
<< h
->demote_order
) / SZ_1K
;
3925 return sysfs_emit(buf
, "%lukB\n", demote_size
);
3928 static ssize_t
demote_size_store(struct kobject
*kobj
,
3929 struct kobj_attribute
*attr
,
3930 const char *buf
, size_t count
)
3932 struct hstate
*h
, *demote_hstate
;
3933 unsigned long demote_size
;
3934 unsigned int demote_order
;
3936 demote_size
= (unsigned long)memparse(buf
, NULL
);
3938 demote_hstate
= size_to_hstate(demote_size
);
3941 demote_order
= demote_hstate
->order
;
3942 if (demote_order
< HUGETLB_PAGE_ORDER
)
3945 /* demote order must be smaller than hstate order */
3946 h
= kobj_to_hstate(kobj
, NULL
);
3947 if (demote_order
>= h
->order
)
3950 /* resize_lock synchronizes access to demote size and writes */
3951 mutex_lock(&h
->resize_lock
);
3952 h
->demote_order
= demote_order
;
3953 mutex_unlock(&h
->resize_lock
);
3957 HSTATE_ATTR(demote_size
);
3959 static struct attribute
*hstate_attrs
[] = {
3960 &nr_hugepages_attr
.attr
,
3961 &nr_overcommit_hugepages_attr
.attr
,
3962 &free_hugepages_attr
.attr
,
3963 &resv_hugepages_attr
.attr
,
3964 &surplus_hugepages_attr
.attr
,
3966 &nr_hugepages_mempolicy_attr
.attr
,
3971 static const struct attribute_group hstate_attr_group
= {
3972 .attrs
= hstate_attrs
,
3975 static struct attribute
*hstate_demote_attrs
[] = {
3976 &demote_size_attr
.attr
,
3981 static const struct attribute_group hstate_demote_attr_group
= {
3982 .attrs
= hstate_demote_attrs
,
3985 static int hugetlb_sysfs_add_hstate(struct hstate
*h
, struct kobject
*parent
,
3986 struct kobject
**hstate_kobjs
,
3987 const struct attribute_group
*hstate_attr_group
)
3990 int hi
= hstate_index(h
);
3992 hstate_kobjs
[hi
] = kobject_create_and_add(h
->name
, parent
);
3993 if (!hstate_kobjs
[hi
])
3996 retval
= sysfs_create_group(hstate_kobjs
[hi
], hstate_attr_group
);
3998 kobject_put(hstate_kobjs
[hi
]);
3999 hstate_kobjs
[hi
] = NULL
;
4003 if (h
->demote_order
) {
4004 retval
= sysfs_create_group(hstate_kobjs
[hi
],
4005 &hstate_demote_attr_group
);
4007 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h
->name
);
4008 sysfs_remove_group(hstate_kobjs
[hi
], hstate_attr_group
);
4009 kobject_put(hstate_kobjs
[hi
]);
4010 hstate_kobjs
[hi
] = NULL
;
4019 static bool hugetlb_sysfs_initialized __ro_after_init
;
4022 * node_hstate/s - associate per node hstate attributes, via their kobjects,
4023 * with node devices in node_devices[] using a parallel array. The array
4024 * index of a node device or _hstate == node id.
4025 * This is here to avoid any static dependency of the node device driver, in
4026 * the base kernel, on the hugetlb module.
4028 struct node_hstate
{
4029 struct kobject
*hugepages_kobj
;
4030 struct kobject
*hstate_kobjs
[HUGE_MAX_HSTATE
];
4032 static struct node_hstate node_hstates
[MAX_NUMNODES
];
4035 * A subset of global hstate attributes for node devices
4037 static struct attribute
*per_node_hstate_attrs
[] = {
4038 &nr_hugepages_attr
.attr
,
4039 &free_hugepages_attr
.attr
,
4040 &surplus_hugepages_attr
.attr
,
4044 static const struct attribute_group per_node_hstate_attr_group
= {
4045 .attrs
= per_node_hstate_attrs
,
4049 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
4050 * Returns node id via non-NULL nidp.
4052 static struct hstate
*kobj_to_node_hstate(struct kobject
*kobj
, int *nidp
)
4056 for (nid
= 0; nid
< nr_node_ids
; nid
++) {
4057 struct node_hstate
*nhs
= &node_hstates
[nid
];
4059 for (i
= 0; i
< HUGE_MAX_HSTATE
; i
++)
4060 if (nhs
->hstate_kobjs
[i
] == kobj
) {
4072 * Unregister hstate attributes from a single node device.
4073 * No-op if no hstate attributes attached.
4075 void hugetlb_unregister_node(struct node
*node
)
4078 struct node_hstate
*nhs
= &node_hstates
[node
->dev
.id
];
4080 if (!nhs
->hugepages_kobj
)
4081 return; /* no hstate attributes */
4083 for_each_hstate(h
) {
4084 int idx
= hstate_index(h
);
4085 struct kobject
*hstate_kobj
= nhs
->hstate_kobjs
[idx
];
4089 if (h
->demote_order
)
4090 sysfs_remove_group(hstate_kobj
, &hstate_demote_attr_group
);
4091 sysfs_remove_group(hstate_kobj
, &per_node_hstate_attr_group
);
4092 kobject_put(hstate_kobj
);
4093 nhs
->hstate_kobjs
[idx
] = NULL
;
4096 kobject_put(nhs
->hugepages_kobj
);
4097 nhs
->hugepages_kobj
= NULL
;
4102 * Register hstate attributes for a single node device.
4103 * No-op if attributes already registered.
4105 void hugetlb_register_node(struct node
*node
)
4108 struct node_hstate
*nhs
= &node_hstates
[node
->dev
.id
];
4111 if (!hugetlb_sysfs_initialized
)
4114 if (nhs
->hugepages_kobj
)
4115 return; /* already allocated */
4117 nhs
->hugepages_kobj
= kobject_create_and_add("hugepages",
4119 if (!nhs
->hugepages_kobj
)
4122 for_each_hstate(h
) {
4123 err
= hugetlb_sysfs_add_hstate(h
, nhs
->hugepages_kobj
,
4125 &per_node_hstate_attr_group
);
4127 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
4128 h
->name
, node
->dev
.id
);
4129 hugetlb_unregister_node(node
);
4136 * hugetlb init time: register hstate attributes for all registered node
4137 * devices of nodes that have memory. All on-line nodes should have
4138 * registered their associated device by this time.
4140 static void __init
hugetlb_register_all_nodes(void)
4144 for_each_online_node(nid
)
4145 hugetlb_register_node(node_devices
[nid
]);
4147 #else /* !CONFIG_NUMA */
4149 static struct hstate
*kobj_to_node_hstate(struct kobject
*kobj
, int *nidp
)
4157 static void hugetlb_register_all_nodes(void) { }
4162 static void __init
hugetlb_cma_check(void);
4164 static inline __init
void hugetlb_cma_check(void)
4169 static void __init
hugetlb_sysfs_init(void)
4174 hugepages_kobj
= kobject_create_and_add("hugepages", mm_kobj
);
4175 if (!hugepages_kobj
)
4178 for_each_hstate(h
) {
4179 err
= hugetlb_sysfs_add_hstate(h
, hugepages_kobj
,
4180 hstate_kobjs
, &hstate_attr_group
);
4182 pr_err("HugeTLB: Unable to add hstate %s", h
->name
);
4186 hugetlb_sysfs_initialized
= true;
4188 hugetlb_register_all_nodes();
4191 #ifdef CONFIG_SYSCTL
4192 static void hugetlb_sysctl_init(void);
4194 static inline void hugetlb_sysctl_init(void) { }
4197 static int __init
hugetlb_init(void)
4201 BUILD_BUG_ON(sizeof_field(struct page
, private) * BITS_PER_BYTE
<
4204 if (!hugepages_supported()) {
4205 if (hugetlb_max_hstate
|| default_hstate_max_huge_pages
)
4206 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4211 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4212 * architectures depend on setup being done here.
4214 hugetlb_add_hstate(HUGETLB_PAGE_ORDER
);
4215 if (!parsed_default_hugepagesz
) {
4217 * If we did not parse a default huge page size, set
4218 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4219 * number of huge pages for this default size was implicitly
4220 * specified, set that here as well.
4221 * Note that the implicit setting will overwrite an explicit
4222 * setting. A warning will be printed in this case.
4224 default_hstate_idx
= hstate_index(size_to_hstate(HPAGE_SIZE
));
4225 if (default_hstate_max_huge_pages
) {
4226 if (default_hstate
.max_huge_pages
) {
4229 string_get_size(huge_page_size(&default_hstate
),
4230 1, STRING_UNITS_2
, buf
, 32);
4231 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4232 default_hstate
.max_huge_pages
, buf
);
4233 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4234 default_hstate_max_huge_pages
);
4236 default_hstate
.max_huge_pages
=
4237 default_hstate_max_huge_pages
;
4239 for_each_online_node(i
)
4240 default_hstate
.max_huge_pages_node
[i
] =
4241 default_hugepages_in_node
[i
];
4245 hugetlb_cma_check();
4246 hugetlb_init_hstates();
4247 gather_bootmem_prealloc();
4250 hugetlb_sysfs_init();
4251 hugetlb_cgroup_file_init();
4252 hugetlb_sysctl_init();
4255 num_fault_mutexes
= roundup_pow_of_two(8 * num_possible_cpus());
4257 num_fault_mutexes
= 1;
4259 hugetlb_fault_mutex_table
=
4260 kmalloc_array(num_fault_mutexes
, sizeof(struct mutex
),
4262 BUG_ON(!hugetlb_fault_mutex_table
);
4264 for (i
= 0; i
< num_fault_mutexes
; i
++)
4265 mutex_init(&hugetlb_fault_mutex_table
[i
]);
4268 subsys_initcall(hugetlb_init
);
4270 /* Overwritten by architectures with more huge page sizes */
4271 bool __init
__attribute((weak
)) arch_hugetlb_valid_size(unsigned long size
)
4273 return size
== HPAGE_SIZE
;
4276 void __init
hugetlb_add_hstate(unsigned int order
)
4281 if (size_to_hstate(PAGE_SIZE
<< order
)) {
4284 BUG_ON(hugetlb_max_hstate
>= HUGE_MAX_HSTATE
);
4286 h
= &hstates
[hugetlb_max_hstate
++];
4287 mutex_init(&h
->resize_lock
);
4289 h
->mask
= ~(huge_page_size(h
) - 1);
4290 for (i
= 0; i
< MAX_NUMNODES
; ++i
)
4291 INIT_LIST_HEAD(&h
->hugepage_freelists
[i
]);
4292 INIT_LIST_HEAD(&h
->hugepage_activelist
);
4293 h
->next_nid_to_alloc
= first_memory_node
;
4294 h
->next_nid_to_free
= first_memory_node
;
4295 snprintf(h
->name
, HSTATE_NAME_LEN
, "hugepages-%lukB",
4296 huge_page_size(h
)/SZ_1K
);
4301 bool __init __weak
hugetlb_node_alloc_supported(void)
4306 static void __init
hugepages_clear_pages_in_node(void)
4308 if (!hugetlb_max_hstate
) {
4309 default_hstate_max_huge_pages
= 0;
4310 memset(default_hugepages_in_node
, 0,
4311 sizeof(default_hugepages_in_node
));
4313 parsed_hstate
->max_huge_pages
= 0;
4314 memset(parsed_hstate
->max_huge_pages_node
, 0,
4315 sizeof(parsed_hstate
->max_huge_pages_node
));
4320 * hugepages command line processing
4321 * hugepages normally follows a valid hugepagsz or default_hugepagsz
4322 * specification. If not, ignore the hugepages value. hugepages can also
4323 * be the first huge page command line option in which case it implicitly
4324 * specifies the number of huge pages for the default size.
4326 static int __init
hugepages_setup(char *s
)
4329 static unsigned long *last_mhp
;
4330 int node
= NUMA_NO_NODE
;
4335 if (!parsed_valid_hugepagesz
) {
4336 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s
);
4337 parsed_valid_hugepagesz
= true;
4342 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4343 * yet, so this hugepages= parameter goes to the "default hstate".
4344 * Otherwise, it goes with the previously parsed hugepagesz or
4345 * default_hugepagesz.
4347 else if (!hugetlb_max_hstate
)
4348 mhp
= &default_hstate_max_huge_pages
;
4350 mhp
= &parsed_hstate
->max_huge_pages
;
4352 if (mhp
== last_mhp
) {
4353 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s
);
4359 if (sscanf(p
, "%lu%n", &tmp
, &count
) != 1)
4361 /* Parameter is node format */
4362 if (p
[count
] == ':') {
4363 if (!hugetlb_node_alloc_supported()) {
4364 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4367 if (tmp
>= MAX_NUMNODES
|| !node_online(tmp
))
4369 node
= array_index_nospec(tmp
, MAX_NUMNODES
);
4371 /* Parse hugepages */
4372 if (sscanf(p
, "%lu%n", &tmp
, &count
) != 1)
4374 if (!hugetlb_max_hstate
)
4375 default_hugepages_in_node
[node
] = tmp
;
4377 parsed_hstate
->max_huge_pages_node
[node
] = tmp
;
4379 /* Go to parse next node*/
4380 if (p
[count
] == ',')
4393 * Global state is always initialized later in hugetlb_init.
4394 * But we need to allocate gigantic hstates here early to still
4395 * use the bootmem allocator.
4397 if (hugetlb_max_hstate
&& hstate_is_gigantic(parsed_hstate
))
4398 hugetlb_hstate_alloc_pages(parsed_hstate
);
4405 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p
);
4406 hugepages_clear_pages_in_node();
4409 __setup("hugepages=", hugepages_setup
);
4412 * hugepagesz command line processing
4413 * A specific huge page size can only be specified once with hugepagesz.
4414 * hugepagesz is followed by hugepages on the command line. The global
4415 * variable 'parsed_valid_hugepagesz' is used to determine if prior
4416 * hugepagesz argument was valid.
4418 static int __init
hugepagesz_setup(char *s
)
4423 parsed_valid_hugepagesz
= false;
4424 size
= (unsigned long)memparse(s
, NULL
);
4426 if (!arch_hugetlb_valid_size(size
)) {
4427 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s
);
4431 h
= size_to_hstate(size
);
4434 * hstate for this size already exists. This is normally
4435 * an error, but is allowed if the existing hstate is the
4436 * default hstate. More specifically, it is only allowed if
4437 * the number of huge pages for the default hstate was not
4438 * previously specified.
4440 if (!parsed_default_hugepagesz
|| h
!= &default_hstate
||
4441 default_hstate
.max_huge_pages
) {
4442 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s
);
4447 * No need to call hugetlb_add_hstate() as hstate already
4448 * exists. But, do set parsed_hstate so that a following
4449 * hugepages= parameter will be applied to this hstate.
4452 parsed_valid_hugepagesz
= true;
4456 hugetlb_add_hstate(ilog2(size
) - PAGE_SHIFT
);
4457 parsed_valid_hugepagesz
= true;
4460 __setup("hugepagesz=", hugepagesz_setup
);
4463 * default_hugepagesz command line input
4464 * Only one instance of default_hugepagesz allowed on command line.
4466 static int __init
default_hugepagesz_setup(char *s
)
4471 parsed_valid_hugepagesz
= false;
4472 if (parsed_default_hugepagesz
) {
4473 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s
);
4477 size
= (unsigned long)memparse(s
, NULL
);
4479 if (!arch_hugetlb_valid_size(size
)) {
4480 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s
);
4484 hugetlb_add_hstate(ilog2(size
) - PAGE_SHIFT
);
4485 parsed_valid_hugepagesz
= true;
4486 parsed_default_hugepagesz
= true;
4487 default_hstate_idx
= hstate_index(size_to_hstate(size
));
4490 * The number of default huge pages (for this size) could have been
4491 * specified as the first hugetlb parameter: hugepages=X. If so,
4492 * then default_hstate_max_huge_pages is set. If the default huge
4493 * page size is gigantic (> MAX_ORDER), then the pages must be
4494 * allocated here from bootmem allocator.
4496 if (default_hstate_max_huge_pages
) {
4497 default_hstate
.max_huge_pages
= default_hstate_max_huge_pages
;
4498 for_each_online_node(i
)
4499 default_hstate
.max_huge_pages_node
[i
] =
4500 default_hugepages_in_node
[i
];
4501 if (hstate_is_gigantic(&default_hstate
))
4502 hugetlb_hstate_alloc_pages(&default_hstate
);
4503 default_hstate_max_huge_pages
= 0;
4508 __setup("default_hugepagesz=", default_hugepagesz_setup
);
4510 static nodemask_t
*policy_mbind_nodemask(gfp_t gfp
)
4513 struct mempolicy
*mpol
= get_task_policy(current
);
4516 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4517 * (from policy_nodemask) specifically for hugetlb case
4519 if (mpol
->mode
== MPOL_BIND
&&
4520 (apply_policy_zone(mpol
, gfp_zone(gfp
)) &&
4521 cpuset_nodemask_valid_mems_allowed(&mpol
->nodes
)))
4522 return &mpol
->nodes
;
4527 static unsigned int allowed_mems_nr(struct hstate
*h
)
4530 unsigned int nr
= 0;
4531 nodemask_t
*mbind_nodemask
;
4532 unsigned int *array
= h
->free_huge_pages_node
;
4533 gfp_t gfp_mask
= htlb_alloc_mask(h
);
4535 mbind_nodemask
= policy_mbind_nodemask(gfp_mask
);
4536 for_each_node_mask(node
, cpuset_current_mems_allowed
) {
4537 if (!mbind_nodemask
|| node_isset(node
, *mbind_nodemask
))
4544 #ifdef CONFIG_SYSCTL
4545 static int proc_hugetlb_doulongvec_minmax(struct ctl_table
*table
, int write
,
4546 void *buffer
, size_t *length
,
4547 loff_t
*ppos
, unsigned long *out
)
4549 struct ctl_table dup_table
;
4552 * In order to avoid races with __do_proc_doulongvec_minmax(), we
4553 * can duplicate the @table and alter the duplicate of it.
4556 dup_table
.data
= out
;
4558 return proc_doulongvec_minmax(&dup_table
, write
, buffer
, length
, ppos
);
4561 static int hugetlb_sysctl_handler_common(bool obey_mempolicy
,
4562 struct ctl_table
*table
, int write
,
4563 void *buffer
, size_t *length
, loff_t
*ppos
)
4565 struct hstate
*h
= &default_hstate
;
4566 unsigned long tmp
= h
->max_huge_pages
;
4569 if (!hugepages_supported())
4572 ret
= proc_hugetlb_doulongvec_minmax(table
, write
, buffer
, length
, ppos
,
4578 ret
= __nr_hugepages_store_common(obey_mempolicy
, h
,
4579 NUMA_NO_NODE
, tmp
, *length
);
4584 static int hugetlb_sysctl_handler(struct ctl_table
*table
, int write
,
4585 void *buffer
, size_t *length
, loff_t
*ppos
)
4588 return hugetlb_sysctl_handler_common(false, table
, write
,
4589 buffer
, length
, ppos
);
4593 static int hugetlb_mempolicy_sysctl_handler(struct ctl_table
*table
, int write
,
4594 void *buffer
, size_t *length
, loff_t
*ppos
)
4596 return hugetlb_sysctl_handler_common(true, table
, write
,
4597 buffer
, length
, ppos
);
4599 #endif /* CONFIG_NUMA */
4601 static int hugetlb_overcommit_handler(struct ctl_table
*table
, int write
,
4602 void *buffer
, size_t *length
, loff_t
*ppos
)
4604 struct hstate
*h
= &default_hstate
;
4608 if (!hugepages_supported())
4611 tmp
= h
->nr_overcommit_huge_pages
;
4613 if (write
&& hstate_is_gigantic(h
))
4616 ret
= proc_hugetlb_doulongvec_minmax(table
, write
, buffer
, length
, ppos
,
4622 spin_lock_irq(&hugetlb_lock
);
4623 h
->nr_overcommit_huge_pages
= tmp
;
4624 spin_unlock_irq(&hugetlb_lock
);
4630 static struct ctl_table hugetlb_table
[] = {
4632 .procname
= "nr_hugepages",
4634 .maxlen
= sizeof(unsigned long),
4636 .proc_handler
= hugetlb_sysctl_handler
,
4640 .procname
= "nr_hugepages_mempolicy",
4642 .maxlen
= sizeof(unsigned long),
4644 .proc_handler
= &hugetlb_mempolicy_sysctl_handler
,
4648 .procname
= "hugetlb_shm_group",
4649 .data
= &sysctl_hugetlb_shm_group
,
4650 .maxlen
= sizeof(gid_t
),
4652 .proc_handler
= proc_dointvec
,
4655 .procname
= "nr_overcommit_hugepages",
4657 .maxlen
= sizeof(unsigned long),
4659 .proc_handler
= hugetlb_overcommit_handler
,
4664 static void hugetlb_sysctl_init(void)
4666 register_sysctl_init("vm", hugetlb_table
);
4668 #endif /* CONFIG_SYSCTL */
4670 void hugetlb_report_meminfo(struct seq_file
*m
)
4673 unsigned long total
= 0;
4675 if (!hugepages_supported())
4678 for_each_hstate(h
) {
4679 unsigned long count
= h
->nr_huge_pages
;
4681 total
+= huge_page_size(h
) * count
;
4683 if (h
== &default_hstate
)
4685 "HugePages_Total: %5lu\n"
4686 "HugePages_Free: %5lu\n"
4687 "HugePages_Rsvd: %5lu\n"
4688 "HugePages_Surp: %5lu\n"
4689 "Hugepagesize: %8lu kB\n",
4693 h
->surplus_huge_pages
,
4694 huge_page_size(h
) / SZ_1K
);
4697 seq_printf(m
, "Hugetlb: %8lu kB\n", total
/ SZ_1K
);
4700 int hugetlb_report_node_meminfo(char *buf
, int len
, int nid
)
4702 struct hstate
*h
= &default_hstate
;
4704 if (!hugepages_supported())
4707 return sysfs_emit_at(buf
, len
,
4708 "Node %d HugePages_Total: %5u\n"
4709 "Node %d HugePages_Free: %5u\n"
4710 "Node %d HugePages_Surp: %5u\n",
4711 nid
, h
->nr_huge_pages_node
[nid
],
4712 nid
, h
->free_huge_pages_node
[nid
],
4713 nid
, h
->surplus_huge_pages_node
[nid
]);
4716 void hugetlb_show_meminfo_node(int nid
)
4720 if (!hugepages_supported())
4724 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4726 h
->nr_huge_pages_node
[nid
],
4727 h
->free_huge_pages_node
[nid
],
4728 h
->surplus_huge_pages_node
[nid
],
4729 huge_page_size(h
) / SZ_1K
);
4732 void hugetlb_report_usage(struct seq_file
*m
, struct mm_struct
*mm
)
4734 seq_printf(m
, "HugetlbPages:\t%8lu kB\n",
4735 K(atomic_long_read(&mm
->hugetlb_usage
)));
4738 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
4739 unsigned long hugetlb_total_pages(void)
4742 unsigned long nr_total_pages
= 0;
4745 nr_total_pages
+= h
->nr_huge_pages
* pages_per_huge_page(h
);
4746 return nr_total_pages
;
4749 static int hugetlb_acct_memory(struct hstate
*h
, long delta
)
4756 spin_lock_irq(&hugetlb_lock
);
4758 * When cpuset is configured, it breaks the strict hugetlb page
4759 * reservation as the accounting is done on a global variable. Such
4760 * reservation is completely rubbish in the presence of cpuset because
4761 * the reservation is not checked against page availability for the
4762 * current cpuset. Application can still potentially OOM'ed by kernel
4763 * with lack of free htlb page in cpuset that the task is in.
4764 * Attempt to enforce strict accounting with cpuset is almost
4765 * impossible (or too ugly) because cpuset is too fluid that
4766 * task or memory node can be dynamically moved between cpusets.
4768 * The change of semantics for shared hugetlb mapping with cpuset is
4769 * undesirable. However, in order to preserve some of the semantics,
4770 * we fall back to check against current free page availability as
4771 * a best attempt and hopefully to minimize the impact of changing
4772 * semantics that cpuset has.
4774 * Apart from cpuset, we also have memory policy mechanism that
4775 * also determines from which node the kernel will allocate memory
4776 * in a NUMA system. So similar to cpuset, we also should consider
4777 * the memory policy of the current task. Similar to the description
4781 if (gather_surplus_pages(h
, delta
) < 0)
4784 if (delta
> allowed_mems_nr(h
)) {
4785 return_unused_surplus_pages(h
, delta
);
4792 return_unused_surplus_pages(h
, (unsigned long) -delta
);
4795 spin_unlock_irq(&hugetlb_lock
);
4799 static void hugetlb_vm_op_open(struct vm_area_struct
*vma
)
4801 struct resv_map
*resv
= vma_resv_map(vma
);
4804 * HPAGE_RESV_OWNER indicates a private mapping.
4805 * This new VMA should share its siblings reservation map if present.
4806 * The VMA will only ever have a valid reservation map pointer where
4807 * it is being copied for another still existing VMA. As that VMA
4808 * has a reference to the reservation map it cannot disappear until
4809 * after this open call completes. It is therefore safe to take a
4810 * new reference here without additional locking.
4812 if (resv
&& is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
4813 resv_map_dup_hugetlb_cgroup_uncharge_info(resv
);
4814 kref_get(&resv
->refs
);
4818 * vma_lock structure for sharable mappings is vma specific.
4819 * Clear old pointer (if copied via vm_area_dup) and allocate
4820 * new structure. Before clearing, make sure vma_lock is not
4823 if (vma
->vm_flags
& VM_MAYSHARE
) {
4824 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
4827 if (vma_lock
->vma
!= vma
) {
4828 vma
->vm_private_data
= NULL
;
4829 hugetlb_vma_lock_alloc(vma
);
4831 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__
);
4833 hugetlb_vma_lock_alloc(vma
);
4837 static void hugetlb_vm_op_close(struct vm_area_struct
*vma
)
4839 struct hstate
*h
= hstate_vma(vma
);
4840 struct resv_map
*resv
;
4841 struct hugepage_subpool
*spool
= subpool_vma(vma
);
4842 unsigned long reserve
, start
, end
;
4845 hugetlb_vma_lock_free(vma
);
4847 resv
= vma_resv_map(vma
);
4848 if (!resv
|| !is_vma_resv_set(vma
, HPAGE_RESV_OWNER
))
4851 start
= vma_hugecache_offset(h
, vma
, vma
->vm_start
);
4852 end
= vma_hugecache_offset(h
, vma
, vma
->vm_end
);
4854 reserve
= (end
- start
) - region_count(resv
, start
, end
);
4855 hugetlb_cgroup_uncharge_counter(resv
, start
, end
);
4858 * Decrement reserve counts. The global reserve count may be
4859 * adjusted if the subpool has a minimum size.
4861 gbl_reserve
= hugepage_subpool_put_pages(spool
, reserve
);
4862 hugetlb_acct_memory(h
, -gbl_reserve
);
4865 kref_put(&resv
->refs
, resv_map_release
);
4868 static int hugetlb_vm_op_split(struct vm_area_struct
*vma
, unsigned long addr
)
4870 if (addr
& ~(huge_page_mask(hstate_vma(vma
))))
4874 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
4875 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
4876 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4878 if (addr
& ~PUD_MASK
) {
4880 * hugetlb_vm_op_split is called right before we attempt to
4881 * split the VMA. We will need to unshare PMDs in the old and
4882 * new VMAs, so let's unshare before we split.
4884 unsigned long floor
= addr
& PUD_MASK
;
4885 unsigned long ceil
= floor
+ PUD_SIZE
;
4887 if (floor
>= vma
->vm_start
&& ceil
<= vma
->vm_end
)
4888 hugetlb_unshare_pmds(vma
, floor
, ceil
);
4894 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct
*vma
)
4896 return huge_page_size(hstate_vma(vma
));
4900 * We cannot handle pagefaults against hugetlb pages at all. They cause
4901 * handle_mm_fault() to try to instantiate regular-sized pages in the
4902 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
4905 static vm_fault_t
hugetlb_vm_op_fault(struct vm_fault
*vmf
)
4912 * When a new function is introduced to vm_operations_struct and added
4913 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4914 * This is because under System V memory model, mappings created via
4915 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4916 * their original vm_ops are overwritten with shm_vm_ops.
4918 const struct vm_operations_struct hugetlb_vm_ops
= {
4919 .fault
= hugetlb_vm_op_fault
,
4920 .open
= hugetlb_vm_op_open
,
4921 .close
= hugetlb_vm_op_close
,
4922 .may_split
= hugetlb_vm_op_split
,
4923 .pagesize
= hugetlb_vm_op_pagesize
,
4926 static pte_t
make_huge_pte(struct vm_area_struct
*vma
, struct page
*page
,
4930 unsigned int shift
= huge_page_shift(hstate_vma(vma
));
4933 entry
= huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page
,
4934 vma
->vm_page_prot
)));
4936 entry
= huge_pte_wrprotect(mk_huge_pte(page
,
4937 vma
->vm_page_prot
));
4939 entry
= pte_mkyoung(entry
);
4940 entry
= arch_make_huge_pte(entry
, shift
, vma
->vm_flags
);
4945 static void set_huge_ptep_writable(struct vm_area_struct
*vma
,
4946 unsigned long address
, pte_t
*ptep
)
4950 entry
= huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep
)));
4951 if (huge_ptep_set_access_flags(vma
, address
, ptep
, entry
, 1))
4952 update_mmu_cache(vma
, address
, ptep
);
4955 bool is_hugetlb_entry_migration(pte_t pte
)
4959 if (huge_pte_none(pte
) || pte_present(pte
))
4961 swp
= pte_to_swp_entry(pte
);
4962 if (is_migration_entry(swp
))
4968 static bool is_hugetlb_entry_hwpoisoned(pte_t pte
)
4972 if (huge_pte_none(pte
) || pte_present(pte
))
4974 swp
= pte_to_swp_entry(pte
);
4975 if (is_hwpoison_entry(swp
))
4982 hugetlb_install_folio(struct vm_area_struct
*vma
, pte_t
*ptep
, unsigned long addr
,
4983 struct folio
*new_folio
, pte_t old
)
4985 pte_t newpte
= make_huge_pte(vma
, &new_folio
->page
, 1);
4987 __folio_mark_uptodate(new_folio
);
4988 hugepage_add_new_anon_rmap(new_folio
, vma
, addr
);
4989 if (userfaultfd_wp(vma
) && huge_pte_uffd_wp(old
))
4990 newpte
= huge_pte_mkuffd_wp(newpte
);
4991 set_huge_pte_at(vma
->vm_mm
, addr
, ptep
, newpte
);
4992 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma
)), vma
->vm_mm
);
4993 folio_set_hugetlb_migratable(new_folio
);
4996 int copy_hugetlb_page_range(struct mm_struct
*dst
, struct mm_struct
*src
,
4997 struct vm_area_struct
*dst_vma
,
4998 struct vm_area_struct
*src_vma
)
5000 pte_t
*src_pte
, *dst_pte
, entry
;
5001 struct folio
*pte_folio
;
5003 bool cow
= is_cow_mapping(src_vma
->vm_flags
);
5004 struct hstate
*h
= hstate_vma(src_vma
);
5005 unsigned long sz
= huge_page_size(h
);
5006 unsigned long npages
= pages_per_huge_page(h
);
5007 struct mmu_notifier_range range
;
5008 unsigned long last_addr_mask
;
5012 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, src
,
5015 mmu_notifier_invalidate_range_start(&range
);
5016 vma_assert_write_locked(src_vma
);
5017 raw_write_seqcount_begin(&src
->write_protect_seq
);
5020 * For shared mappings the vma lock must be held before
5021 * calling hugetlb_walk() in the src vma. Otherwise, the
5022 * returned ptep could go away if part of a shared pmd and
5023 * another thread calls huge_pmd_unshare.
5025 hugetlb_vma_lock_read(src_vma
);
5028 last_addr_mask
= hugetlb_mask_last_page(h
);
5029 for (addr
= src_vma
->vm_start
; addr
< src_vma
->vm_end
; addr
+= sz
) {
5030 spinlock_t
*src_ptl
, *dst_ptl
;
5031 src_pte
= hugetlb_walk(src_vma
, addr
, sz
);
5033 addr
|= last_addr_mask
;
5036 dst_pte
= huge_pte_alloc(dst
, dst_vma
, addr
, sz
);
5043 * If the pagetables are shared don't copy or take references.
5045 * dst_pte == src_pte is the common case of src/dest sharing.
5046 * However, src could have 'unshared' and dst shares with
5047 * another vma. So page_count of ptep page is checked instead
5048 * to reliably determine whether pte is shared.
5050 if (page_count(virt_to_page(dst_pte
)) > 1) {
5051 addr
|= last_addr_mask
;
5055 dst_ptl
= huge_pte_lock(h
, dst
, dst_pte
);
5056 src_ptl
= huge_pte_lockptr(h
, src
, src_pte
);
5057 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
5058 entry
= huge_ptep_get(src_pte
);
5060 if (huge_pte_none(entry
)) {
5062 * Skip if src entry none.
5065 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry
))) {
5066 if (!userfaultfd_wp(dst_vma
))
5067 entry
= huge_pte_clear_uffd_wp(entry
);
5068 set_huge_pte_at(dst
, addr
, dst_pte
, entry
);
5069 } else if (unlikely(is_hugetlb_entry_migration(entry
))) {
5070 swp_entry_t swp_entry
= pte_to_swp_entry(entry
);
5071 bool uffd_wp
= pte_swp_uffd_wp(entry
);
5073 if (!is_readable_migration_entry(swp_entry
) && cow
) {
5075 * COW mappings require pages in both
5076 * parent and child to be set to read.
5078 swp_entry
= make_readable_migration_entry(
5079 swp_offset(swp_entry
));
5080 entry
= swp_entry_to_pte(swp_entry
);
5081 if (userfaultfd_wp(src_vma
) && uffd_wp
)
5082 entry
= pte_swp_mkuffd_wp(entry
);
5083 set_huge_pte_at(src
, addr
, src_pte
, entry
);
5085 if (!userfaultfd_wp(dst_vma
))
5086 entry
= huge_pte_clear_uffd_wp(entry
);
5087 set_huge_pte_at(dst
, addr
, dst_pte
, entry
);
5088 } else if (unlikely(is_pte_marker(entry
))) {
5089 pte_marker marker
= copy_pte_marker(
5090 pte_to_swp_entry(entry
), dst_vma
);
5093 set_huge_pte_at(dst
, addr
, dst_pte
,
5094 make_pte_marker(marker
));
5096 entry
= huge_ptep_get(src_pte
);
5097 pte_folio
= page_folio(pte_page(entry
));
5098 folio_get(pte_folio
);
5101 * Failing to duplicate the anon rmap is a rare case
5102 * where we see pinned hugetlb pages while they're
5103 * prone to COW. We need to do the COW earlier during
5106 * When pre-allocating the page or copying data, we
5107 * need to be without the pgtable locks since we could
5108 * sleep during the process.
5110 if (!folio_test_anon(pte_folio
)) {
5111 page_dup_file_rmap(&pte_folio
->page
, true);
5112 } else if (page_try_dup_anon_rmap(&pte_folio
->page
,
5114 pte_t src_pte_old
= entry
;
5115 struct folio
*new_folio
;
5117 spin_unlock(src_ptl
);
5118 spin_unlock(dst_ptl
);
5119 /* Do not use reserve as it's private owned */
5120 new_folio
= alloc_hugetlb_folio(dst_vma
, addr
, 1);
5121 if (IS_ERR(new_folio
)) {
5122 folio_put(pte_folio
);
5123 ret
= PTR_ERR(new_folio
);
5126 ret
= copy_user_large_folio(new_folio
,
5129 folio_put(pte_folio
);
5131 folio_put(new_folio
);
5135 /* Install the new hugetlb folio if src pte stable */
5136 dst_ptl
= huge_pte_lock(h
, dst
, dst_pte
);
5137 src_ptl
= huge_pte_lockptr(h
, src
, src_pte
);
5138 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
5139 entry
= huge_ptep_get(src_pte
);
5140 if (!pte_same(src_pte_old
, entry
)) {
5141 restore_reserve_on_error(h
, dst_vma
, addr
,
5143 folio_put(new_folio
);
5144 /* huge_ptep of dst_pte won't change as in child */
5147 hugetlb_install_folio(dst_vma
, dst_pte
, addr
,
5148 new_folio
, src_pte_old
);
5149 spin_unlock(src_ptl
);
5150 spin_unlock(dst_ptl
);
5156 * No need to notify as we are downgrading page
5157 * table protection not changing it to point
5160 * See Documentation/mm/mmu_notifier.rst
5162 huge_ptep_set_wrprotect(src
, addr
, src_pte
);
5163 entry
= huge_pte_wrprotect(entry
);
5166 if (!userfaultfd_wp(dst_vma
))
5167 entry
= huge_pte_clear_uffd_wp(entry
);
5169 set_huge_pte_at(dst
, addr
, dst_pte
, entry
);
5170 hugetlb_count_add(npages
, dst
);
5172 spin_unlock(src_ptl
);
5173 spin_unlock(dst_ptl
);
5177 raw_write_seqcount_end(&src
->write_protect_seq
);
5178 mmu_notifier_invalidate_range_end(&range
);
5180 hugetlb_vma_unlock_read(src_vma
);
5186 static void move_huge_pte(struct vm_area_struct
*vma
, unsigned long old_addr
,
5187 unsigned long new_addr
, pte_t
*src_pte
, pte_t
*dst_pte
)
5189 struct hstate
*h
= hstate_vma(vma
);
5190 struct mm_struct
*mm
= vma
->vm_mm
;
5191 spinlock_t
*src_ptl
, *dst_ptl
;
5194 dst_ptl
= huge_pte_lock(h
, mm
, dst_pte
);
5195 src_ptl
= huge_pte_lockptr(h
, mm
, src_pte
);
5198 * We don't have to worry about the ordering of src and dst ptlocks
5199 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5201 if (src_ptl
!= dst_ptl
)
5202 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
5204 pte
= huge_ptep_get_and_clear(mm
, old_addr
, src_pte
);
5205 set_huge_pte_at(mm
, new_addr
, dst_pte
, pte
);
5207 if (src_ptl
!= dst_ptl
)
5208 spin_unlock(src_ptl
);
5209 spin_unlock(dst_ptl
);
5212 int move_hugetlb_page_tables(struct vm_area_struct
*vma
,
5213 struct vm_area_struct
*new_vma
,
5214 unsigned long old_addr
, unsigned long new_addr
,
5217 struct hstate
*h
= hstate_vma(vma
);
5218 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
5219 unsigned long sz
= huge_page_size(h
);
5220 struct mm_struct
*mm
= vma
->vm_mm
;
5221 unsigned long old_end
= old_addr
+ len
;
5222 unsigned long last_addr_mask
;
5223 pte_t
*src_pte
, *dst_pte
;
5224 struct mmu_notifier_range range
;
5225 bool shared_pmd
= false;
5227 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, old_addr
,
5229 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
, &range
.end
);
5231 * In case of shared PMDs, we should cover the maximum possible
5234 flush_cache_range(vma
, range
.start
, range
.end
);
5236 mmu_notifier_invalidate_range_start(&range
);
5237 last_addr_mask
= hugetlb_mask_last_page(h
);
5238 /* Prevent race with file truncation */
5239 hugetlb_vma_lock_write(vma
);
5240 i_mmap_lock_write(mapping
);
5241 for (; old_addr
< old_end
; old_addr
+= sz
, new_addr
+= sz
) {
5242 src_pte
= hugetlb_walk(vma
, old_addr
, sz
);
5244 old_addr
|= last_addr_mask
;
5245 new_addr
|= last_addr_mask
;
5248 if (huge_pte_none(huge_ptep_get(src_pte
)))
5251 if (huge_pmd_unshare(mm
, vma
, old_addr
, src_pte
)) {
5253 old_addr
|= last_addr_mask
;
5254 new_addr
|= last_addr_mask
;
5258 dst_pte
= huge_pte_alloc(mm
, new_vma
, new_addr
, sz
);
5262 move_huge_pte(vma
, old_addr
, new_addr
, src_pte
, dst_pte
);
5266 flush_hugetlb_tlb_range(vma
, range
.start
, range
.end
);
5268 flush_hugetlb_tlb_range(vma
, old_end
- len
, old_end
);
5269 mmu_notifier_invalidate_range_end(&range
);
5270 i_mmap_unlock_write(mapping
);
5271 hugetlb_vma_unlock_write(vma
);
5273 return len
+ old_addr
- old_end
;
5276 static void __unmap_hugepage_range(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
5277 unsigned long start
, unsigned long end
,
5278 struct page
*ref_page
, zap_flags_t zap_flags
)
5280 struct mm_struct
*mm
= vma
->vm_mm
;
5281 unsigned long address
;
5286 struct hstate
*h
= hstate_vma(vma
);
5287 unsigned long sz
= huge_page_size(h
);
5288 unsigned long last_addr_mask
;
5289 bool force_flush
= false;
5291 WARN_ON(!is_vm_hugetlb_page(vma
));
5292 BUG_ON(start
& ~huge_page_mask(h
));
5293 BUG_ON(end
& ~huge_page_mask(h
));
5296 * This is a hugetlb vma, all the pte entries should point
5299 tlb_change_page_size(tlb
, sz
);
5300 tlb_start_vma(tlb
, vma
);
5302 last_addr_mask
= hugetlb_mask_last_page(h
);
5304 for (; address
< end
; address
+= sz
) {
5305 ptep
= hugetlb_walk(vma
, address
, sz
);
5307 address
|= last_addr_mask
;
5311 ptl
= huge_pte_lock(h
, mm
, ptep
);
5312 if (huge_pmd_unshare(mm
, vma
, address
, ptep
)) {
5314 tlb_flush_pmd_range(tlb
, address
& PUD_MASK
, PUD_SIZE
);
5316 address
|= last_addr_mask
;
5320 pte
= huge_ptep_get(ptep
);
5321 if (huge_pte_none(pte
)) {
5327 * Migrating hugepage or HWPoisoned hugepage is already
5328 * unmapped and its refcount is dropped, so just clear pte here.
5330 if (unlikely(!pte_present(pte
))) {
5332 * If the pte was wr-protected by uffd-wp in any of the
5333 * swap forms, meanwhile the caller does not want to
5334 * drop the uffd-wp bit in this zap, then replace the
5335 * pte with a marker.
5337 if (pte_swp_uffd_wp_any(pte
) &&
5338 !(zap_flags
& ZAP_FLAG_DROP_MARKER
))
5339 set_huge_pte_at(mm
, address
, ptep
,
5340 make_pte_marker(PTE_MARKER_UFFD_WP
));
5342 huge_pte_clear(mm
, address
, ptep
, sz
);
5347 page
= pte_page(pte
);
5349 * If a reference page is supplied, it is because a specific
5350 * page is being unmapped, not a range. Ensure the page we
5351 * are about to unmap is the actual page of interest.
5354 if (page
!= ref_page
) {
5359 * Mark the VMA as having unmapped its page so that
5360 * future faults in this VMA will fail rather than
5361 * looking like data was lost
5363 set_vma_resv_flags(vma
, HPAGE_RESV_UNMAPPED
);
5366 pte
= huge_ptep_get_and_clear(mm
, address
, ptep
);
5367 tlb_remove_huge_tlb_entry(h
, tlb
, ptep
, address
);
5368 if (huge_pte_dirty(pte
))
5369 set_page_dirty(page
);
5370 /* Leave a uffd-wp pte marker if needed */
5371 if (huge_pte_uffd_wp(pte
) &&
5372 !(zap_flags
& ZAP_FLAG_DROP_MARKER
))
5373 set_huge_pte_at(mm
, address
, ptep
,
5374 make_pte_marker(PTE_MARKER_UFFD_WP
));
5375 hugetlb_count_sub(pages_per_huge_page(h
), mm
);
5376 page_remove_rmap(page
, vma
, true);
5379 tlb_remove_page_size(tlb
, page
, huge_page_size(h
));
5381 * Bail out after unmapping reference page if supplied
5386 tlb_end_vma(tlb
, vma
);
5389 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5390 * could defer the flush until now, since by holding i_mmap_rwsem we
5391 * guaranteed that the last refernece would not be dropped. But we must
5392 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5393 * dropped and the last reference to the shared PMDs page might be
5396 * In theory we could defer the freeing of the PMD pages as well, but
5397 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5398 * detect sharing, so we cannot defer the release of the page either.
5399 * Instead, do flush now.
5402 tlb_flush_mmu_tlbonly(tlb
);
5405 void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
5406 struct vm_area_struct
*vma
, unsigned long start
,
5407 unsigned long end
, struct page
*ref_page
,
5408 zap_flags_t zap_flags
)
5410 hugetlb_vma_lock_write(vma
);
5411 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
5413 /* mmu notification performed in caller */
5414 __unmap_hugepage_range(tlb
, vma
, start
, end
, ref_page
, zap_flags
);
5416 if (zap_flags
& ZAP_FLAG_UNMAP
) { /* final unmap */
5418 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5419 * When the vma_lock is freed, this makes the vma ineligible
5420 * for pmd sharing. And, i_mmap_rwsem is required to set up
5421 * pmd sharing. This is important as page tables for this
5422 * unmapped range will be asynchrously deleted. If the page
5423 * tables are shared, there will be issues when accessed by
5426 __hugetlb_vma_unlock_write_free(vma
);
5427 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
5429 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
5430 hugetlb_vma_unlock_write(vma
);
5434 void unmap_hugepage_range(struct vm_area_struct
*vma
, unsigned long start
,
5435 unsigned long end
, struct page
*ref_page
,
5436 zap_flags_t zap_flags
)
5438 struct mmu_notifier_range range
;
5439 struct mmu_gather tlb
;
5441 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
5443 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
, &range
.end
);
5444 mmu_notifier_invalidate_range_start(&range
);
5445 tlb_gather_mmu(&tlb
, vma
->vm_mm
);
5447 __unmap_hugepage_range(&tlb
, vma
, start
, end
, ref_page
, zap_flags
);
5449 mmu_notifier_invalidate_range_end(&range
);
5450 tlb_finish_mmu(&tlb
);
5454 * This is called when the original mapper is failing to COW a MAP_PRIVATE
5455 * mapping it owns the reserve page for. The intention is to unmap the page
5456 * from other VMAs and let the children be SIGKILLed if they are faulting the
5459 static void unmap_ref_private(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
5460 struct page
*page
, unsigned long address
)
5462 struct hstate
*h
= hstate_vma(vma
);
5463 struct vm_area_struct
*iter_vma
;
5464 struct address_space
*mapping
;
5468 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5469 * from page cache lookup which is in HPAGE_SIZE units.
5471 address
= address
& huge_page_mask(h
);
5472 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
5474 mapping
= vma
->vm_file
->f_mapping
;
5477 * Take the mapping lock for the duration of the table walk. As
5478 * this mapping should be shared between all the VMAs,
5479 * __unmap_hugepage_range() is called as the lock is already held
5481 i_mmap_lock_write(mapping
);
5482 vma_interval_tree_foreach(iter_vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
5483 /* Do not unmap the current VMA */
5484 if (iter_vma
== vma
)
5488 * Shared VMAs have their own reserves and do not affect
5489 * MAP_PRIVATE accounting but it is possible that a shared
5490 * VMA is using the same page so check and skip such VMAs.
5492 if (iter_vma
->vm_flags
& VM_MAYSHARE
)
5496 * Unmap the page from other VMAs without their own reserves.
5497 * They get marked to be SIGKILLed if they fault in these
5498 * areas. This is because a future no-page fault on this VMA
5499 * could insert a zeroed page instead of the data existing
5500 * from the time of fork. This would look like data corruption
5502 if (!is_vma_resv_set(iter_vma
, HPAGE_RESV_OWNER
))
5503 unmap_hugepage_range(iter_vma
, address
,
5504 address
+ huge_page_size(h
), page
, 0);
5506 i_mmap_unlock_write(mapping
);
5510 * hugetlb_wp() should be called with page lock of the original hugepage held.
5511 * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5512 * cannot race with other handlers or page migration.
5513 * Keep the pte_same checks anyway to make transition from the mutex easier.
5515 static vm_fault_t
hugetlb_wp(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
5516 unsigned long address
, pte_t
*ptep
, unsigned int flags
,
5517 struct folio
*pagecache_folio
, spinlock_t
*ptl
)
5519 const bool unshare
= flags
& FAULT_FLAG_UNSHARE
;
5520 pte_t pte
= huge_ptep_get(ptep
);
5521 struct hstate
*h
= hstate_vma(vma
);
5522 struct folio
*old_folio
;
5523 struct folio
*new_folio
;
5524 int outside_reserve
= 0;
5526 unsigned long haddr
= address
& huge_page_mask(h
);
5527 struct mmu_notifier_range range
;
5530 * Never handle CoW for uffd-wp protected pages. It should be only
5531 * handled when the uffd-wp protection is removed.
5533 * Note that only the CoW optimization path (in hugetlb_no_page())
5534 * can trigger this, because hugetlb_fault() will always resolve
5535 * uffd-wp bit first.
5537 if (!unshare
&& huge_pte_uffd_wp(pte
))
5541 * hugetlb does not support FOLL_FORCE-style write faults that keep the
5542 * PTE mapped R/O such as maybe_mkwrite() would do.
5544 if (WARN_ON_ONCE(!unshare
&& !(vma
->vm_flags
& VM_WRITE
)))
5545 return VM_FAULT_SIGSEGV
;
5547 /* Let's take out MAP_SHARED mappings first. */
5548 if (vma
->vm_flags
& VM_MAYSHARE
) {
5549 set_huge_ptep_writable(vma
, haddr
, ptep
);
5553 old_folio
= page_folio(pte_page(pte
));
5555 delayacct_wpcopy_start();
5559 * If no-one else is actually using this page, we're the exclusive
5560 * owner and can reuse this page.
5562 if (folio_mapcount(old_folio
) == 1 && folio_test_anon(old_folio
)) {
5563 if (!PageAnonExclusive(&old_folio
->page
))
5564 page_move_anon_rmap(&old_folio
->page
, vma
);
5565 if (likely(!unshare
))
5566 set_huge_ptep_writable(vma
, haddr
, ptep
);
5568 delayacct_wpcopy_end();
5571 VM_BUG_ON_PAGE(folio_test_anon(old_folio
) &&
5572 PageAnonExclusive(&old_folio
->page
), &old_folio
->page
);
5575 * If the process that created a MAP_PRIVATE mapping is about to
5576 * perform a COW due to a shared page count, attempt to satisfy
5577 * the allocation without using the existing reserves. The pagecache
5578 * page is used to determine if the reserve at this address was
5579 * consumed or not. If reserves were used, a partial faulted mapping
5580 * at the time of fork() could consume its reserves on COW instead
5581 * of the full address range.
5583 if (is_vma_resv_set(vma
, HPAGE_RESV_OWNER
) &&
5584 old_folio
!= pagecache_folio
)
5585 outside_reserve
= 1;
5587 folio_get(old_folio
);
5590 * Drop page table lock as buddy allocator may be called. It will
5591 * be acquired again before returning to the caller, as expected.
5594 new_folio
= alloc_hugetlb_folio(vma
, haddr
, outside_reserve
);
5596 if (IS_ERR(new_folio
)) {
5598 * If a process owning a MAP_PRIVATE mapping fails to COW,
5599 * it is due to references held by a child and an insufficient
5600 * huge page pool. To guarantee the original mappers
5601 * reliability, unmap the page from child processes. The child
5602 * may get SIGKILLed if it later faults.
5604 if (outside_reserve
) {
5605 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
5609 folio_put(old_folio
);
5611 * Drop hugetlb_fault_mutex and vma_lock before
5612 * unmapping. unmapping needs to hold vma_lock
5613 * in write mode. Dropping vma_lock in read mode
5614 * here is OK as COW mappings do not interact with
5617 * Reacquire both after unmap operation.
5619 idx
= vma_hugecache_offset(h
, vma
, haddr
);
5620 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
5621 hugetlb_vma_unlock_read(vma
);
5622 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
5624 unmap_ref_private(mm
, vma
, &old_folio
->page
, haddr
);
5626 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
5627 hugetlb_vma_lock_read(vma
);
5629 ptep
= hugetlb_walk(vma
, haddr
, huge_page_size(h
));
5631 pte_same(huge_ptep_get(ptep
), pte
)))
5632 goto retry_avoidcopy
;
5634 * race occurs while re-acquiring page table
5635 * lock, and our job is done.
5637 delayacct_wpcopy_end();
5641 ret
= vmf_error(PTR_ERR(new_folio
));
5642 goto out_release_old
;
5646 * When the original hugepage is shared one, it does not have
5647 * anon_vma prepared.
5649 if (unlikely(anon_vma_prepare(vma
))) {
5651 goto out_release_all
;
5654 if (copy_user_large_folio(new_folio
, old_folio
, address
, vma
)) {
5655 ret
= VM_FAULT_HWPOISON_LARGE
;
5656 goto out_release_all
;
5658 __folio_mark_uptodate(new_folio
);
5660 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, haddr
,
5661 haddr
+ huge_page_size(h
));
5662 mmu_notifier_invalidate_range_start(&range
);
5665 * Retake the page table lock to check for racing updates
5666 * before the page tables are altered
5669 ptep
= hugetlb_walk(vma
, haddr
, huge_page_size(h
));
5670 if (likely(ptep
&& pte_same(huge_ptep_get(ptep
), pte
))) {
5671 pte_t newpte
= make_huge_pte(vma
, &new_folio
->page
, !unshare
);
5673 /* Break COW or unshare */
5674 huge_ptep_clear_flush(vma
, haddr
, ptep
);
5675 page_remove_rmap(&old_folio
->page
, vma
, true);
5676 hugepage_add_new_anon_rmap(new_folio
, vma
, haddr
);
5677 if (huge_pte_uffd_wp(pte
))
5678 newpte
= huge_pte_mkuffd_wp(newpte
);
5679 set_huge_pte_at(mm
, haddr
, ptep
, newpte
);
5680 folio_set_hugetlb_migratable(new_folio
);
5681 /* Make the old page be freed below */
5682 new_folio
= old_folio
;
5685 mmu_notifier_invalidate_range_end(&range
);
5688 * No restore in case of successful pagetable update (Break COW or
5691 if (new_folio
!= old_folio
)
5692 restore_reserve_on_error(h
, vma
, haddr
, new_folio
);
5693 folio_put(new_folio
);
5695 folio_put(old_folio
);
5697 spin_lock(ptl
); /* Caller expects lock to be held */
5699 delayacct_wpcopy_end();
5704 * Return whether there is a pagecache page to back given address within VMA.
5706 static bool hugetlbfs_pagecache_present(struct hstate
*h
,
5707 struct vm_area_struct
*vma
, unsigned long address
)
5709 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
5710 pgoff_t idx
= vma_hugecache_offset(h
, vma
, address
);
5711 struct folio
*folio
;
5713 folio
= filemap_get_folio(mapping
, idx
);
5720 int hugetlb_add_to_page_cache(struct folio
*folio
, struct address_space
*mapping
,
5723 struct inode
*inode
= mapping
->host
;
5724 struct hstate
*h
= hstate_inode(inode
);
5727 __folio_set_locked(folio
);
5728 err
= __filemap_add_folio(mapping
, folio
, idx
, GFP_KERNEL
, NULL
);
5730 if (unlikely(err
)) {
5731 __folio_clear_locked(folio
);
5734 folio_clear_hugetlb_restore_reserve(folio
);
5737 * mark folio dirty so that it will not be removed from cache/file
5738 * by non-hugetlbfs specific code paths.
5740 folio_mark_dirty(folio
);
5742 spin_lock(&inode
->i_lock
);
5743 inode
->i_blocks
+= blocks_per_huge_page(h
);
5744 spin_unlock(&inode
->i_lock
);
5748 static inline vm_fault_t
hugetlb_handle_userfault(struct vm_area_struct
*vma
,
5749 struct address_space
*mapping
,
5752 unsigned long haddr
,
5754 unsigned long reason
)
5757 struct vm_fault vmf
= {
5760 .real_address
= addr
,
5764 * Hard to debug if it ends up being
5765 * used by a callee that assumes
5766 * something about the other
5767 * uninitialized fields... same as in
5773 * vma_lock and hugetlb_fault_mutex must be dropped before handling
5774 * userfault. Also mmap_lock could be dropped due to handling
5775 * userfault, any vma operation should be careful from here.
5777 hugetlb_vma_unlock_read(vma
);
5778 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
5779 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
5780 return handle_userfault(&vmf
, reason
);
5784 * Recheck pte with pgtable lock. Returns true if pte didn't change, or
5785 * false if pte changed or is changing.
5787 static bool hugetlb_pte_stable(struct hstate
*h
, struct mm_struct
*mm
,
5788 pte_t
*ptep
, pte_t old_pte
)
5793 ptl
= huge_pte_lock(h
, mm
, ptep
);
5794 same
= pte_same(huge_ptep_get(ptep
), old_pte
);
5800 static vm_fault_t
hugetlb_no_page(struct mm_struct
*mm
,
5801 struct vm_area_struct
*vma
,
5802 struct address_space
*mapping
, pgoff_t idx
,
5803 unsigned long address
, pte_t
*ptep
,
5804 pte_t old_pte
, unsigned int flags
)
5806 struct hstate
*h
= hstate_vma(vma
);
5807 vm_fault_t ret
= VM_FAULT_SIGBUS
;
5810 struct folio
*folio
;
5813 unsigned long haddr
= address
& huge_page_mask(h
);
5814 bool new_folio
, new_pagecache_folio
= false;
5815 u32 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
5818 * Currently, we are forced to kill the process in the event the
5819 * original mapper has unmapped pages from the child due to a failed
5820 * COW/unsharing. Warn that such a situation has occurred as it may not
5823 if (is_vma_resv_set(vma
, HPAGE_RESV_UNMAPPED
)) {
5824 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5830 * Use page lock to guard against racing truncation
5831 * before we get page_table_lock.
5834 folio
= filemap_lock_folio(mapping
, idx
);
5835 if (IS_ERR(folio
)) {
5836 size
= i_size_read(mapping
->host
) >> huge_page_shift(h
);
5839 /* Check for page in userfault range */
5840 if (userfaultfd_missing(vma
)) {
5842 * Since hugetlb_no_page() was examining pte
5843 * without pgtable lock, we need to re-test under
5844 * lock because the pte may not be stable and could
5845 * have changed from under us. Try to detect
5846 * either changed or during-changing ptes and retry
5847 * properly when needed.
5849 * Note that userfaultfd is actually fine with
5850 * false positives (e.g. caused by pte changed),
5851 * but not wrong logical events (e.g. caused by
5852 * reading a pte during changing). The latter can
5853 * confuse the userspace, so the strictness is very
5854 * much preferred. E.g., MISSING event should
5855 * never happen on the page after UFFDIO_COPY has
5856 * correctly installed the page and returned.
5858 if (!hugetlb_pte_stable(h
, mm
, ptep
, old_pte
)) {
5863 return hugetlb_handle_userfault(vma
, mapping
, idx
, flags
,
5868 folio
= alloc_hugetlb_folio(vma
, haddr
, 0);
5869 if (IS_ERR(folio
)) {
5871 * Returning error will result in faulting task being
5872 * sent SIGBUS. The hugetlb fault mutex prevents two
5873 * tasks from racing to fault in the same page which
5874 * could result in false unable to allocate errors.
5875 * Page migration does not take the fault mutex, but
5876 * does a clear then write of pte's under page table
5877 * lock. Page fault code could race with migration,
5878 * notice the clear pte and try to allocate a page
5879 * here. Before returning error, get ptl and make
5880 * sure there really is no pte entry.
5882 if (hugetlb_pte_stable(h
, mm
, ptep
, old_pte
))
5883 ret
= vmf_error(PTR_ERR(folio
));
5888 clear_huge_page(&folio
->page
, address
, pages_per_huge_page(h
));
5889 __folio_mark_uptodate(folio
);
5892 if (vma
->vm_flags
& VM_MAYSHARE
) {
5893 int err
= hugetlb_add_to_page_cache(folio
, mapping
, idx
);
5896 * err can't be -EEXIST which implies someone
5897 * else consumed the reservation since hugetlb
5898 * fault mutex is held when add a hugetlb page
5899 * to the page cache. So it's safe to call
5900 * restore_reserve_on_error() here.
5902 restore_reserve_on_error(h
, vma
, haddr
, folio
);
5906 new_pagecache_folio
= true;
5909 if (unlikely(anon_vma_prepare(vma
))) {
5911 goto backout_unlocked
;
5917 * If memory error occurs between mmap() and fault, some process
5918 * don't have hwpoisoned swap entry for errored virtual address.
5919 * So we need to block hugepage fault by PG_hwpoison bit check.
5921 if (unlikely(folio_test_hwpoison(folio
))) {
5922 ret
= VM_FAULT_HWPOISON_LARGE
|
5923 VM_FAULT_SET_HINDEX(hstate_index(h
));
5924 goto backout_unlocked
;
5927 /* Check for page in userfault range. */
5928 if (userfaultfd_minor(vma
)) {
5929 folio_unlock(folio
);
5931 /* See comment in userfaultfd_missing() block above */
5932 if (!hugetlb_pte_stable(h
, mm
, ptep
, old_pte
)) {
5936 return hugetlb_handle_userfault(vma
, mapping
, idx
, flags
,
5943 * If we are going to COW a private mapping later, we examine the
5944 * pending reservations for this page now. This will ensure that
5945 * any allocations necessary to record that reservation occur outside
5948 if ((flags
& FAULT_FLAG_WRITE
) && !(vma
->vm_flags
& VM_SHARED
)) {
5949 if (vma_needs_reservation(h
, vma
, haddr
) < 0) {
5951 goto backout_unlocked
;
5953 /* Just decrements count, does not deallocate */
5954 vma_end_reservation(h
, vma
, haddr
);
5957 ptl
= huge_pte_lock(h
, mm
, ptep
);
5959 /* If pte changed from under us, retry */
5960 if (!pte_same(huge_ptep_get(ptep
), old_pte
))
5964 hugepage_add_new_anon_rmap(folio
, vma
, haddr
);
5966 page_dup_file_rmap(&folio
->page
, true);
5967 new_pte
= make_huge_pte(vma
, &folio
->page
, ((vma
->vm_flags
& VM_WRITE
)
5968 && (vma
->vm_flags
& VM_SHARED
)));
5970 * If this pte was previously wr-protected, keep it wr-protected even
5973 if (unlikely(pte_marker_uffd_wp(old_pte
)))
5974 new_pte
= huge_pte_mkuffd_wp(new_pte
);
5975 set_huge_pte_at(mm
, haddr
, ptep
, new_pte
);
5977 hugetlb_count_add(pages_per_huge_page(h
), mm
);
5978 if ((flags
& FAULT_FLAG_WRITE
) && !(vma
->vm_flags
& VM_SHARED
)) {
5979 /* Optimization, do the COW without a second fault */
5980 ret
= hugetlb_wp(mm
, vma
, address
, ptep
, flags
, folio
, ptl
);
5986 * Only set hugetlb_migratable in newly allocated pages. Existing pages
5987 * found in the pagecache may not have hugetlb_migratable if they have
5988 * been isolated for migration.
5991 folio_set_hugetlb_migratable(folio
);
5993 folio_unlock(folio
);
5995 hugetlb_vma_unlock_read(vma
);
5996 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6002 if (new_folio
&& !new_pagecache_folio
)
6003 restore_reserve_on_error(h
, vma
, haddr
, folio
);
6005 folio_unlock(folio
);
6011 u32
hugetlb_fault_mutex_hash(struct address_space
*mapping
, pgoff_t idx
)
6013 unsigned long key
[2];
6016 key
[0] = (unsigned long) mapping
;
6019 hash
= jhash2((u32
*)&key
, sizeof(key
)/(sizeof(u32
)), 0);
6021 return hash
& (num_fault_mutexes
- 1);
6025 * For uniprocessor systems we always use a single mutex, so just
6026 * return 0 and avoid the hashing overhead.
6028 u32
hugetlb_fault_mutex_hash(struct address_space
*mapping
, pgoff_t idx
)
6034 vm_fault_t
hugetlb_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6035 unsigned long address
, unsigned int flags
)
6042 struct folio
*folio
= NULL
;
6043 struct folio
*pagecache_folio
= NULL
;
6044 struct hstate
*h
= hstate_vma(vma
);
6045 struct address_space
*mapping
;
6046 int need_wait_lock
= 0;
6047 unsigned long haddr
= address
& huge_page_mask(h
);
6049 /* TODO: Handle faults under the VMA lock */
6050 if (flags
& FAULT_FLAG_VMA_LOCK
) {
6052 return VM_FAULT_RETRY
;
6056 * Serialize hugepage allocation and instantiation, so that we don't
6057 * get spurious allocation failures if two CPUs race to instantiate
6058 * the same page in the page cache.
6060 mapping
= vma
->vm_file
->f_mapping
;
6061 idx
= vma_hugecache_offset(h
, vma
, haddr
);
6062 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
6063 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
6066 * Acquire vma lock before calling huge_pte_alloc and hold
6067 * until finished with ptep. This prevents huge_pmd_unshare from
6068 * being called elsewhere and making the ptep no longer valid.
6070 hugetlb_vma_lock_read(vma
);
6071 ptep
= huge_pte_alloc(mm
, vma
, haddr
, huge_page_size(h
));
6073 hugetlb_vma_unlock_read(vma
);
6074 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6075 return VM_FAULT_OOM
;
6078 entry
= huge_ptep_get(ptep
);
6079 if (huge_pte_none_mostly(entry
)) {
6080 if (is_pte_marker(entry
)) {
6082 pte_marker_get(pte_to_swp_entry(entry
));
6084 if (marker
& PTE_MARKER_POISONED
) {
6085 ret
= VM_FAULT_HWPOISON_LARGE
;
6091 * Other PTE markers should be handled the same way as none PTE.
6093 * hugetlb_no_page will drop vma lock and hugetlb fault
6094 * mutex internally, which make us return immediately.
6096 return hugetlb_no_page(mm
, vma
, mapping
, idx
, address
, ptep
,
6103 * entry could be a migration/hwpoison entry at this point, so this
6104 * check prevents the kernel from going below assuming that we have
6105 * an active hugepage in pagecache. This goto expects the 2nd page
6106 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
6107 * properly handle it.
6109 if (!pte_present(entry
)) {
6110 if (unlikely(is_hugetlb_entry_migration(entry
))) {
6112 * Release the hugetlb fault lock now, but retain
6113 * the vma lock, because it is needed to guard the
6114 * huge_pte_lockptr() later in
6115 * migration_entry_wait_huge(). The vma lock will
6116 * be released there.
6118 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6119 migration_entry_wait_huge(vma
, ptep
);
6121 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry
)))
6122 ret
= VM_FAULT_HWPOISON_LARGE
|
6123 VM_FAULT_SET_HINDEX(hstate_index(h
));
6128 * If we are going to COW/unshare the mapping later, we examine the
6129 * pending reservations for this page now. This will ensure that any
6130 * allocations necessary to record that reservation occur outside the
6131 * spinlock. Also lookup the pagecache page now as it is used to
6132 * determine if a reservation has been consumed.
6134 if ((flags
& (FAULT_FLAG_WRITE
|FAULT_FLAG_UNSHARE
)) &&
6135 !(vma
->vm_flags
& VM_MAYSHARE
) && !huge_pte_write(entry
)) {
6136 if (vma_needs_reservation(h
, vma
, haddr
) < 0) {
6140 /* Just decrements count, does not deallocate */
6141 vma_end_reservation(h
, vma
, haddr
);
6143 pagecache_folio
= filemap_lock_folio(mapping
, idx
);
6144 if (IS_ERR(pagecache_folio
))
6145 pagecache_folio
= NULL
;
6148 ptl
= huge_pte_lock(h
, mm
, ptep
);
6150 /* Check for a racing update before calling hugetlb_wp() */
6151 if (unlikely(!pte_same(entry
, huge_ptep_get(ptep
))))
6154 /* Handle userfault-wp first, before trying to lock more pages */
6155 if (userfaultfd_wp(vma
) && huge_pte_uffd_wp(huge_ptep_get(ptep
)) &&
6156 (flags
& FAULT_FLAG_WRITE
) && !huge_pte_write(entry
)) {
6157 struct vm_fault vmf
= {
6160 .real_address
= address
,
6165 if (pagecache_folio
) {
6166 folio_unlock(pagecache_folio
);
6167 folio_put(pagecache_folio
);
6169 hugetlb_vma_unlock_read(vma
);
6170 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6171 return handle_userfault(&vmf
, VM_UFFD_WP
);
6175 * hugetlb_wp() requires page locks of pte_page(entry) and
6176 * pagecache_folio, so here we need take the former one
6177 * when folio != pagecache_folio or !pagecache_folio.
6179 folio
= page_folio(pte_page(entry
));
6180 if (folio
!= pagecache_folio
)
6181 if (!folio_trylock(folio
)) {
6188 if (flags
& (FAULT_FLAG_WRITE
|FAULT_FLAG_UNSHARE
)) {
6189 if (!huge_pte_write(entry
)) {
6190 ret
= hugetlb_wp(mm
, vma
, address
, ptep
, flags
,
6191 pagecache_folio
, ptl
);
6193 } else if (likely(flags
& FAULT_FLAG_WRITE
)) {
6194 entry
= huge_pte_mkdirty(entry
);
6197 entry
= pte_mkyoung(entry
);
6198 if (huge_ptep_set_access_flags(vma
, haddr
, ptep
, entry
,
6199 flags
& FAULT_FLAG_WRITE
))
6200 update_mmu_cache(vma
, haddr
, ptep
);
6202 if (folio
!= pagecache_folio
)
6203 folio_unlock(folio
);
6208 if (pagecache_folio
) {
6209 folio_unlock(pagecache_folio
);
6210 folio_put(pagecache_folio
);
6213 hugetlb_vma_unlock_read(vma
);
6214 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
6216 * Generally it's safe to hold refcount during waiting page lock. But
6217 * here we just wait to defer the next page fault to avoid busy loop and
6218 * the page is not used after unlocked before returning from the current
6219 * page fault. So we are safe from accessing freed page, even if we wait
6220 * here without taking refcount.
6223 folio_wait_locked(folio
);
6227 #ifdef CONFIG_USERFAULTFD
6229 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6230 * with modifications for hugetlb pages.
6232 int hugetlb_mfill_atomic_pte(pte_t
*dst_pte
,
6233 struct vm_area_struct
*dst_vma
,
6234 unsigned long dst_addr
,
6235 unsigned long src_addr
,
6237 struct folio
**foliop
)
6239 struct mm_struct
*dst_mm
= dst_vma
->vm_mm
;
6240 bool is_continue
= uffd_flags_mode_is(flags
, MFILL_ATOMIC_CONTINUE
);
6241 bool wp_enabled
= (flags
& MFILL_ATOMIC_WP
);
6242 struct hstate
*h
= hstate_vma(dst_vma
);
6243 struct address_space
*mapping
= dst_vma
->vm_file
->f_mapping
;
6244 pgoff_t idx
= vma_hugecache_offset(h
, dst_vma
, dst_addr
);
6246 int vm_shared
= dst_vma
->vm_flags
& VM_SHARED
;
6250 struct folio
*folio
;
6252 bool folio_in_pagecache
= false;
6254 if (uffd_flags_mode_is(flags
, MFILL_ATOMIC_POISON
)) {
6255 ptl
= huge_pte_lock(h
, dst_mm
, dst_pte
);
6257 /* Don't overwrite any existing PTEs (even markers) */
6258 if (!huge_pte_none(huge_ptep_get(dst_pte
))) {
6263 _dst_pte
= make_pte_marker(PTE_MARKER_POISONED
);
6264 set_huge_pte_at(dst_mm
, dst_addr
, dst_pte
, _dst_pte
);
6266 /* No need to invalidate - it was non-present before */
6267 update_mmu_cache(dst_vma
, dst_addr
, dst_pte
);
6275 folio
= filemap_lock_folio(mapping
, idx
);
6278 folio_in_pagecache
= true;
6279 } else if (!*foliop
) {
6280 /* If a folio already exists, then it's UFFDIO_COPY for
6281 * a non-missing case. Return -EEXIST.
6284 hugetlbfs_pagecache_present(h
, dst_vma
, dst_addr
)) {
6289 folio
= alloc_hugetlb_folio(dst_vma
, dst_addr
, 0);
6290 if (IS_ERR(folio
)) {
6295 ret
= copy_folio_from_user(folio
, (const void __user
*) src_addr
,
6298 /* fallback to copy_from_user outside mmap_lock */
6299 if (unlikely(ret
)) {
6301 /* Free the allocated folio which may have
6302 * consumed a reservation.
6304 restore_reserve_on_error(h
, dst_vma
, dst_addr
, folio
);
6307 /* Allocate a temporary folio to hold the copied
6310 folio
= alloc_hugetlb_folio_vma(h
, dst_vma
, dst_addr
);
6316 /* Set the outparam foliop and return to the caller to
6317 * copy the contents outside the lock. Don't free the
6324 hugetlbfs_pagecache_present(h
, dst_vma
, dst_addr
)) {
6331 folio
= alloc_hugetlb_folio(dst_vma
, dst_addr
, 0);
6332 if (IS_ERR(folio
)) {
6338 ret
= copy_user_large_folio(folio
, *foliop
, dst_addr
, dst_vma
);
6348 * The memory barrier inside __folio_mark_uptodate makes sure that
6349 * preceding stores to the page contents become visible before
6350 * the set_pte_at() write.
6352 __folio_mark_uptodate(folio
);
6354 /* Add shared, newly allocated pages to the page cache. */
6355 if (vm_shared
&& !is_continue
) {
6356 size
= i_size_read(mapping
->host
) >> huge_page_shift(h
);
6359 goto out_release_nounlock
;
6362 * Serialization between remove_inode_hugepages() and
6363 * hugetlb_add_to_page_cache() below happens through the
6364 * hugetlb_fault_mutex_table that here must be hold by
6367 ret
= hugetlb_add_to_page_cache(folio
, mapping
, idx
);
6369 goto out_release_nounlock
;
6370 folio_in_pagecache
= true;
6373 ptl
= huge_pte_lock(h
, dst_mm
, dst_pte
);
6376 if (folio_test_hwpoison(folio
))
6377 goto out_release_unlock
;
6380 * We allow to overwrite a pte marker: consider when both MISSING|WP
6381 * registered, we firstly wr-protect a none pte which has no page cache
6382 * page backing it, then access the page.
6385 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte
)))
6386 goto out_release_unlock
;
6388 if (folio_in_pagecache
)
6389 page_dup_file_rmap(&folio
->page
, true);
6391 hugepage_add_new_anon_rmap(folio
, dst_vma
, dst_addr
);
6394 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6395 * with wp flag set, don't set pte write bit.
6397 if (wp_enabled
|| (is_continue
&& !vm_shared
))
6400 writable
= dst_vma
->vm_flags
& VM_WRITE
;
6402 _dst_pte
= make_huge_pte(dst_vma
, &folio
->page
, writable
);
6404 * Always mark UFFDIO_COPY page dirty; note that this may not be
6405 * extremely important for hugetlbfs for now since swapping is not
6406 * supported, but we should still be clear in that this page cannot be
6407 * thrown away at will, even if write bit not set.
6409 _dst_pte
= huge_pte_mkdirty(_dst_pte
);
6410 _dst_pte
= pte_mkyoung(_dst_pte
);
6413 _dst_pte
= huge_pte_mkuffd_wp(_dst_pte
);
6415 set_huge_pte_at(dst_mm
, dst_addr
, dst_pte
, _dst_pte
);
6417 hugetlb_count_add(pages_per_huge_page(h
), dst_mm
);
6419 /* No need to invalidate - it was non-present before */
6420 update_mmu_cache(dst_vma
, dst_addr
, dst_pte
);
6424 folio_set_hugetlb_migratable(folio
);
6425 if (vm_shared
|| is_continue
)
6426 folio_unlock(folio
);
6432 if (vm_shared
|| is_continue
)
6433 folio_unlock(folio
);
6434 out_release_nounlock
:
6435 if (!folio_in_pagecache
)
6436 restore_reserve_on_error(h
, dst_vma
, dst_addr
, folio
);
6440 #endif /* CONFIG_USERFAULTFD */
6442 struct page
*hugetlb_follow_page_mask(struct vm_area_struct
*vma
,
6443 unsigned long address
, unsigned int flags
,
6444 unsigned int *page_mask
)
6446 struct hstate
*h
= hstate_vma(vma
);
6447 struct mm_struct
*mm
= vma
->vm_mm
;
6448 unsigned long haddr
= address
& huge_page_mask(h
);
6449 struct page
*page
= NULL
;
6454 hugetlb_vma_lock_read(vma
);
6455 pte
= hugetlb_walk(vma
, haddr
, huge_page_size(h
));
6459 ptl
= huge_pte_lock(h
, mm
, pte
);
6460 entry
= huge_ptep_get(pte
);
6461 if (pte_present(entry
)) {
6462 page
= pte_page(entry
);
6464 if (!huge_pte_write(entry
)) {
6465 if (flags
& FOLL_WRITE
) {
6470 if (gup_must_unshare(vma
, flags
, page
)) {
6471 /* Tell the caller to do unsharing */
6472 page
= ERR_PTR(-EMLINK
);
6477 page
+= ((address
& ~huge_page_mask(h
)) >> PAGE_SHIFT
);
6480 * Note that page may be a sub-page, and with vmemmap
6481 * optimizations the page struct may be read only.
6482 * try_grab_page() will increase the ref count on the
6483 * head page, so this will be OK.
6485 * try_grab_page() should always be able to get the page here,
6486 * because we hold the ptl lock and have verified pte_present().
6488 ret
= try_grab_page(page
, flags
);
6490 if (WARN_ON_ONCE(ret
)) {
6491 page
= ERR_PTR(ret
);
6495 *page_mask
= (1U << huge_page_order(h
)) - 1;
6500 hugetlb_vma_unlock_read(vma
);
6503 * Fixup retval for dump requests: if pagecache doesn't exist,
6504 * don't try to allocate a new page but just skip it.
6506 if (!page
&& (flags
& FOLL_DUMP
) &&
6507 !hugetlbfs_pagecache_present(h
, vma
, address
))
6508 page
= ERR_PTR(-EFAULT
);
6513 long hugetlb_change_protection(struct vm_area_struct
*vma
,
6514 unsigned long address
, unsigned long end
,
6515 pgprot_t newprot
, unsigned long cp_flags
)
6517 struct mm_struct
*mm
= vma
->vm_mm
;
6518 unsigned long start
= address
;
6521 struct hstate
*h
= hstate_vma(vma
);
6522 long pages
= 0, psize
= huge_page_size(h
);
6523 bool shared_pmd
= false;
6524 struct mmu_notifier_range range
;
6525 unsigned long last_addr_mask
;
6526 bool uffd_wp
= cp_flags
& MM_CP_UFFD_WP
;
6527 bool uffd_wp_resolve
= cp_flags
& MM_CP_UFFD_WP_RESOLVE
;
6530 * In the case of shared PMDs, the area to flush could be beyond
6531 * start/end. Set range.start/range.end to cover the maximum possible
6532 * range if PMD sharing is possible.
6534 mmu_notifier_range_init(&range
, MMU_NOTIFY_PROTECTION_VMA
,
6536 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
, &range
.end
);
6538 BUG_ON(address
>= end
);
6539 flush_cache_range(vma
, range
.start
, range
.end
);
6541 mmu_notifier_invalidate_range_start(&range
);
6542 hugetlb_vma_lock_write(vma
);
6543 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
6544 last_addr_mask
= hugetlb_mask_last_page(h
);
6545 for (; address
< end
; address
+= psize
) {
6547 ptep
= hugetlb_walk(vma
, address
, psize
);
6550 address
|= last_addr_mask
;
6554 * Userfaultfd wr-protect requires pgtable
6555 * pre-allocations to install pte markers.
6557 ptep
= huge_pte_alloc(mm
, vma
, address
, psize
);
6563 ptl
= huge_pte_lock(h
, mm
, ptep
);
6564 if (huge_pmd_unshare(mm
, vma
, address
, ptep
)) {
6566 * When uffd-wp is enabled on the vma, unshare
6567 * shouldn't happen at all. Warn about it if it
6568 * happened due to some reason.
6570 WARN_ON_ONCE(uffd_wp
|| uffd_wp_resolve
);
6574 address
|= last_addr_mask
;
6577 pte
= huge_ptep_get(ptep
);
6578 if (unlikely(is_hugetlb_entry_hwpoisoned(pte
))) {
6579 /* Nothing to do. */
6580 } else if (unlikely(is_hugetlb_entry_migration(pte
))) {
6581 swp_entry_t entry
= pte_to_swp_entry(pte
);
6582 struct page
*page
= pfn_swap_entry_to_page(entry
);
6585 if (is_writable_migration_entry(entry
)) {
6587 entry
= make_readable_exclusive_migration_entry(
6590 entry
= make_readable_migration_entry(
6592 newpte
= swp_entry_to_pte(entry
);
6597 newpte
= pte_swp_mkuffd_wp(newpte
);
6598 else if (uffd_wp_resolve
)
6599 newpte
= pte_swp_clear_uffd_wp(newpte
);
6600 if (!pte_same(pte
, newpte
))
6601 set_huge_pte_at(mm
, address
, ptep
, newpte
);
6602 } else if (unlikely(is_pte_marker(pte
))) {
6603 /* No other markers apply for now. */
6604 WARN_ON_ONCE(!pte_marker_uffd_wp(pte
));
6605 if (uffd_wp_resolve
)
6606 /* Safe to modify directly (non-present->none). */
6607 huge_pte_clear(mm
, address
, ptep
, psize
);
6608 } else if (!huge_pte_none(pte
)) {
6610 unsigned int shift
= huge_page_shift(hstate_vma(vma
));
6612 old_pte
= huge_ptep_modify_prot_start(vma
, address
, ptep
);
6613 pte
= huge_pte_modify(old_pte
, newprot
);
6614 pte
= arch_make_huge_pte(pte
, shift
, vma
->vm_flags
);
6616 pte
= huge_pte_mkuffd_wp(pte
);
6617 else if (uffd_wp_resolve
)
6618 pte
= huge_pte_clear_uffd_wp(pte
);
6619 huge_ptep_modify_prot_commit(vma
, address
, ptep
, old_pte
, pte
);
6623 if (unlikely(uffd_wp
))
6624 /* Safe to modify directly (none->non-present). */
6625 set_huge_pte_at(mm
, address
, ptep
,
6626 make_pte_marker(PTE_MARKER_UFFD_WP
));
6631 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6632 * may have cleared our pud entry and done put_page on the page table:
6633 * once we release i_mmap_rwsem, another task can do the final put_page
6634 * and that page table be reused and filled with junk. If we actually
6635 * did unshare a page of pmds, flush the range corresponding to the pud.
6638 flush_hugetlb_tlb_range(vma
, range
.start
, range
.end
);
6640 flush_hugetlb_tlb_range(vma
, start
, end
);
6642 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6643 * downgrading page table protection not changing it to point to a new
6646 * See Documentation/mm/mmu_notifier.rst
6648 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
6649 hugetlb_vma_unlock_write(vma
);
6650 mmu_notifier_invalidate_range_end(&range
);
6652 return pages
> 0 ? (pages
<< h
->order
) : pages
;
6655 /* Return true if reservation was successful, false otherwise. */
6656 bool hugetlb_reserve_pages(struct inode
*inode
,
6658 struct vm_area_struct
*vma
,
6659 vm_flags_t vm_flags
)
6661 long chg
= -1, add
= -1;
6662 struct hstate
*h
= hstate_inode(inode
);
6663 struct hugepage_subpool
*spool
= subpool_inode(inode
);
6664 struct resv_map
*resv_map
;
6665 struct hugetlb_cgroup
*h_cg
= NULL
;
6666 long gbl_reserve
, regions_needed
= 0;
6668 /* This should never happen */
6670 VM_WARN(1, "%s called with a negative range\n", __func__
);
6675 * vma specific semaphore used for pmd sharing and fault/truncation
6678 hugetlb_vma_lock_alloc(vma
);
6681 * Only apply hugepage reservation if asked. At fault time, an
6682 * attempt will be made for VM_NORESERVE to allocate a page
6683 * without using reserves
6685 if (vm_flags
& VM_NORESERVE
)
6689 * Shared mappings base their reservation on the number of pages that
6690 * are already allocated on behalf of the file. Private mappings need
6691 * to reserve the full area even if read-only as mprotect() may be
6692 * called to make the mapping read-write. Assume !vma is a shm mapping
6694 if (!vma
|| vma
->vm_flags
& VM_MAYSHARE
) {
6696 * resv_map can not be NULL as hugetlb_reserve_pages is only
6697 * called for inodes for which resv_maps were created (see
6698 * hugetlbfs_get_inode).
6700 resv_map
= inode_resv_map(inode
);
6702 chg
= region_chg(resv_map
, from
, to
, ®ions_needed
);
6704 /* Private mapping. */
6705 resv_map
= resv_map_alloc();
6711 set_vma_resv_map(vma
, resv_map
);
6712 set_vma_resv_flags(vma
, HPAGE_RESV_OWNER
);
6718 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h
),
6719 chg
* pages_per_huge_page(h
), &h_cg
) < 0)
6722 if (vma
&& !(vma
->vm_flags
& VM_MAYSHARE
) && h_cg
) {
6723 /* For private mappings, the hugetlb_cgroup uncharge info hangs
6726 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map
, h_cg
, h
);
6730 * There must be enough pages in the subpool for the mapping. If
6731 * the subpool has a minimum size, there may be some global
6732 * reservations already in place (gbl_reserve).
6734 gbl_reserve
= hugepage_subpool_get_pages(spool
, chg
);
6735 if (gbl_reserve
< 0)
6736 goto out_uncharge_cgroup
;
6739 * Check enough hugepages are available for the reservation.
6740 * Hand the pages back to the subpool if there are not
6742 if (hugetlb_acct_memory(h
, gbl_reserve
) < 0)
6746 * Account for the reservations made. Shared mappings record regions
6747 * that have reservations as they are shared by multiple VMAs.
6748 * When the last VMA disappears, the region map says how much
6749 * the reservation was and the page cache tells how much of
6750 * the reservation was consumed. Private mappings are per-VMA and
6751 * only the consumed reservations are tracked. When the VMA
6752 * disappears, the original reservation is the VMA size and the
6753 * consumed reservations are stored in the map. Hence, nothing
6754 * else has to be done for private mappings here
6756 if (!vma
|| vma
->vm_flags
& VM_MAYSHARE
) {
6757 add
= region_add(resv_map
, from
, to
, regions_needed
, h
, h_cg
);
6759 if (unlikely(add
< 0)) {
6760 hugetlb_acct_memory(h
, -gbl_reserve
);
6762 } else if (unlikely(chg
> add
)) {
6764 * pages in this range were added to the reserve
6765 * map between region_chg and region_add. This
6766 * indicates a race with alloc_hugetlb_folio. Adjust
6767 * the subpool and reserve counts modified above
6768 * based on the difference.
6773 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6774 * reference to h_cg->css. See comment below for detail.
6776 hugetlb_cgroup_uncharge_cgroup_rsvd(
6778 (chg
- add
) * pages_per_huge_page(h
), h_cg
);
6780 rsv_adjust
= hugepage_subpool_put_pages(spool
,
6782 hugetlb_acct_memory(h
, -rsv_adjust
);
6785 * The file_regions will hold their own reference to
6786 * h_cg->css. So we should release the reference held
6787 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6790 hugetlb_cgroup_put_rsvd_cgroup(h_cg
);
6796 /* put back original number of pages, chg */
6797 (void)hugepage_subpool_put_pages(spool
, chg
);
6798 out_uncharge_cgroup
:
6799 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h
),
6800 chg
* pages_per_huge_page(h
), h_cg
);
6802 hugetlb_vma_lock_free(vma
);
6803 if (!vma
|| vma
->vm_flags
& VM_MAYSHARE
)
6804 /* Only call region_abort if the region_chg succeeded but the
6805 * region_add failed or didn't run.
6807 if (chg
>= 0 && add
< 0)
6808 region_abort(resv_map
, from
, to
, regions_needed
);
6809 if (vma
&& is_vma_resv_set(vma
, HPAGE_RESV_OWNER
))
6810 kref_put(&resv_map
->refs
, resv_map_release
);
6814 long hugetlb_unreserve_pages(struct inode
*inode
, long start
, long end
,
6817 struct hstate
*h
= hstate_inode(inode
);
6818 struct resv_map
*resv_map
= inode_resv_map(inode
);
6820 struct hugepage_subpool
*spool
= subpool_inode(inode
);
6824 * Since this routine can be called in the evict inode path for all
6825 * hugetlbfs inodes, resv_map could be NULL.
6828 chg
= region_del(resv_map
, start
, end
);
6830 * region_del() can fail in the rare case where a region
6831 * must be split and another region descriptor can not be
6832 * allocated. If end == LONG_MAX, it will not fail.
6838 spin_lock(&inode
->i_lock
);
6839 inode
->i_blocks
-= (blocks_per_huge_page(h
) * freed
);
6840 spin_unlock(&inode
->i_lock
);
6843 * If the subpool has a minimum size, the number of global
6844 * reservations to be released may be adjusted.
6846 * Note that !resv_map implies freed == 0. So (chg - freed)
6847 * won't go negative.
6849 gbl_reserve
= hugepage_subpool_put_pages(spool
, (chg
- freed
));
6850 hugetlb_acct_memory(h
, -gbl_reserve
);
6855 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6856 static unsigned long page_table_shareable(struct vm_area_struct
*svma
,
6857 struct vm_area_struct
*vma
,
6858 unsigned long addr
, pgoff_t idx
)
6860 unsigned long saddr
= ((idx
- svma
->vm_pgoff
) << PAGE_SHIFT
) +
6862 unsigned long sbase
= saddr
& PUD_MASK
;
6863 unsigned long s_end
= sbase
+ PUD_SIZE
;
6865 /* Allow segments to share if only one is marked locked */
6866 unsigned long vm_flags
= vma
->vm_flags
& ~VM_LOCKED_MASK
;
6867 unsigned long svm_flags
= svma
->vm_flags
& ~VM_LOCKED_MASK
;
6870 * match the virtual addresses, permission and the alignment of the
6873 * Also, vma_lock (vm_private_data) is required for sharing.
6875 if (pmd_index(addr
) != pmd_index(saddr
) ||
6876 vm_flags
!= svm_flags
||
6877 !range_in_vma(svma
, sbase
, s_end
) ||
6878 !svma
->vm_private_data
)
6884 bool want_pmd_share(struct vm_area_struct
*vma
, unsigned long addr
)
6886 unsigned long start
= addr
& PUD_MASK
;
6887 unsigned long end
= start
+ PUD_SIZE
;
6889 #ifdef CONFIG_USERFAULTFD
6890 if (uffd_disable_huge_pmd_share(vma
))
6894 * check on proper vm_flags and page table alignment
6896 if (!(vma
->vm_flags
& VM_MAYSHARE
))
6898 if (!vma
->vm_private_data
) /* vma lock required for sharing */
6900 if (!range_in_vma(vma
, start
, end
))
6906 * Determine if start,end range within vma could be mapped by shared pmd.
6907 * If yes, adjust start and end to cover range associated with possible
6908 * shared pmd mappings.
6910 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct
*vma
,
6911 unsigned long *start
, unsigned long *end
)
6913 unsigned long v_start
= ALIGN(vma
->vm_start
, PUD_SIZE
),
6914 v_end
= ALIGN_DOWN(vma
->vm_end
, PUD_SIZE
);
6917 * vma needs to span at least one aligned PUD size, and the range
6918 * must be at least partially within in.
6920 if (!(vma
->vm_flags
& VM_MAYSHARE
) || !(v_end
> v_start
) ||
6921 (*end
<= v_start
) || (*start
>= v_end
))
6924 /* Extend the range to be PUD aligned for a worst case scenario */
6925 if (*start
> v_start
)
6926 *start
= ALIGN_DOWN(*start
, PUD_SIZE
);
6929 *end
= ALIGN(*end
, PUD_SIZE
);
6933 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6934 * and returns the corresponding pte. While this is not necessary for the
6935 * !shared pmd case because we can allocate the pmd later as well, it makes the
6936 * code much cleaner. pmd allocation is essential for the shared case because
6937 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
6938 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
6939 * bad pmd for sharing.
6941 pte_t
*huge_pmd_share(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6942 unsigned long addr
, pud_t
*pud
)
6944 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
6945 pgoff_t idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) +
6947 struct vm_area_struct
*svma
;
6948 unsigned long saddr
;
6952 i_mmap_lock_read(mapping
);
6953 vma_interval_tree_foreach(svma
, &mapping
->i_mmap
, idx
, idx
) {
6957 saddr
= page_table_shareable(svma
, vma
, addr
, idx
);
6959 spte
= hugetlb_walk(svma
, saddr
,
6960 vma_mmu_pagesize(svma
));
6962 get_page(virt_to_page(spte
));
6971 spin_lock(&mm
->page_table_lock
);
6972 if (pud_none(*pud
)) {
6973 pud_populate(mm
, pud
,
6974 (pmd_t
*)((unsigned long)spte
& PAGE_MASK
));
6977 put_page(virt_to_page(spte
));
6979 spin_unlock(&mm
->page_table_lock
);
6981 pte
= (pte_t
*)pmd_alloc(mm
, pud
, addr
);
6982 i_mmap_unlock_read(mapping
);
6987 * unmap huge page backed by shared pte.
6989 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
6990 * indicated by page_count > 1, unmap is achieved by clearing pud and
6991 * decrementing the ref count. If count == 1, the pte page is not shared.
6993 * Called with page table lock held.
6995 * returns: 1 successfully unmapped a shared pte page
6996 * 0 the underlying pte page is not shared, or it is the last user
6998 int huge_pmd_unshare(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6999 unsigned long addr
, pte_t
*ptep
)
7001 pgd_t
*pgd
= pgd_offset(mm
, addr
);
7002 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
7003 pud_t
*pud
= pud_offset(p4d
, addr
);
7005 i_mmap_assert_write_locked(vma
->vm_file
->f_mapping
);
7006 hugetlb_vma_assert_locked(vma
);
7007 BUG_ON(page_count(virt_to_page(ptep
)) == 0);
7008 if (page_count(virt_to_page(ptep
)) == 1)
7012 put_page(virt_to_page(ptep
));
7017 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7019 pte_t
*huge_pmd_share(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
7020 unsigned long addr
, pud_t
*pud
)
7025 int huge_pmd_unshare(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
7026 unsigned long addr
, pte_t
*ptep
)
7031 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct
*vma
,
7032 unsigned long *start
, unsigned long *end
)
7036 bool want_pmd_share(struct vm_area_struct
*vma
, unsigned long addr
)
7040 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7042 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7043 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
7044 unsigned long addr
, unsigned long sz
)
7051 pgd
= pgd_offset(mm
, addr
);
7052 p4d
= p4d_alloc(mm
, pgd
, addr
);
7055 pud
= pud_alloc(mm
, p4d
, addr
);
7057 if (sz
== PUD_SIZE
) {
7060 BUG_ON(sz
!= PMD_SIZE
);
7061 if (want_pmd_share(vma
, addr
) && pud_none(*pud
))
7062 pte
= huge_pmd_share(mm
, vma
, addr
, pud
);
7064 pte
= (pte_t
*)pmd_alloc(mm
, pud
, addr
);
7069 pte_t pteval
= ptep_get_lockless(pte
);
7071 BUG_ON(pte_present(pteval
) && !pte_huge(pteval
));
7078 * huge_pte_offset() - Walk the page table to resolve the hugepage
7079 * entry at address @addr
7081 * Return: Pointer to page table entry (PUD or PMD) for
7082 * address @addr, or NULL if a !p*d_present() entry is encountered and the
7083 * size @sz doesn't match the hugepage size at this level of the page
7086 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
7087 unsigned long addr
, unsigned long sz
)
7094 pgd
= pgd_offset(mm
, addr
);
7095 if (!pgd_present(*pgd
))
7097 p4d
= p4d_offset(pgd
, addr
);
7098 if (!p4d_present(*p4d
))
7101 pud
= pud_offset(p4d
, addr
);
7103 /* must be pud huge, non-present or none */
7104 return (pte_t
*)pud
;
7105 if (!pud_present(*pud
))
7107 /* must have a valid entry and size to go further */
7109 pmd
= pmd_offset(pud
, addr
);
7110 /* must be pmd huge, non-present or none */
7111 return (pte_t
*)pmd
;
7115 * Return a mask that can be used to update an address to the last huge
7116 * page in a page table page mapping size. Used to skip non-present
7117 * page table entries when linearly scanning address ranges. Architectures
7118 * with unique huge page to page table relationships can define their own
7119 * version of this routine.
7121 unsigned long hugetlb_mask_last_page(struct hstate
*h
)
7123 unsigned long hp_size
= huge_page_size(h
);
7125 if (hp_size
== PUD_SIZE
)
7126 return P4D_SIZE
- PUD_SIZE
;
7127 else if (hp_size
== PMD_SIZE
)
7128 return PUD_SIZE
- PMD_SIZE
;
7135 /* See description above. Architectures can provide their own version. */
7136 __weak
unsigned long hugetlb_mask_last_page(struct hstate
*h
)
7138 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7139 if (huge_page_size(h
) == PMD_SIZE
)
7140 return PUD_SIZE
- PMD_SIZE
;
7145 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7148 * These functions are overwritable if your architecture needs its own
7151 bool isolate_hugetlb(struct folio
*folio
, struct list_head
*list
)
7155 spin_lock_irq(&hugetlb_lock
);
7156 if (!folio_test_hugetlb(folio
) ||
7157 !folio_test_hugetlb_migratable(folio
) ||
7158 !folio_try_get(folio
)) {
7162 folio_clear_hugetlb_migratable(folio
);
7163 list_move_tail(&folio
->lru
, list
);
7165 spin_unlock_irq(&hugetlb_lock
);
7169 int get_hwpoison_hugetlb_folio(struct folio
*folio
, bool *hugetlb
, bool unpoison
)
7174 spin_lock_irq(&hugetlb_lock
);
7175 if (folio_test_hugetlb(folio
)) {
7177 if (folio_test_hugetlb_freed(folio
))
7179 else if (folio_test_hugetlb_migratable(folio
) || unpoison
)
7180 ret
= folio_try_get(folio
);
7184 spin_unlock_irq(&hugetlb_lock
);
7188 int get_huge_page_for_hwpoison(unsigned long pfn
, int flags
,
7189 bool *migratable_cleared
)
7193 spin_lock_irq(&hugetlb_lock
);
7194 ret
= __get_huge_page_for_hwpoison(pfn
, flags
, migratable_cleared
);
7195 spin_unlock_irq(&hugetlb_lock
);
7199 void folio_putback_active_hugetlb(struct folio
*folio
)
7201 spin_lock_irq(&hugetlb_lock
);
7202 folio_set_hugetlb_migratable(folio
);
7203 list_move_tail(&folio
->lru
, &(folio_hstate(folio
))->hugepage_activelist
);
7204 spin_unlock_irq(&hugetlb_lock
);
7208 void move_hugetlb_state(struct folio
*old_folio
, struct folio
*new_folio
, int reason
)
7210 struct hstate
*h
= folio_hstate(old_folio
);
7212 hugetlb_cgroup_migrate(old_folio
, new_folio
);
7213 set_page_owner_migrate_reason(&new_folio
->page
, reason
);
7216 * transfer temporary state of the new hugetlb folio. This is
7217 * reverse to other transitions because the newpage is going to
7218 * be final while the old one will be freed so it takes over
7219 * the temporary status.
7221 * Also note that we have to transfer the per-node surplus state
7222 * here as well otherwise the global surplus count will not match
7225 if (folio_test_hugetlb_temporary(new_folio
)) {
7226 int old_nid
= folio_nid(old_folio
);
7227 int new_nid
= folio_nid(new_folio
);
7229 folio_set_hugetlb_temporary(old_folio
);
7230 folio_clear_hugetlb_temporary(new_folio
);
7234 * There is no need to transfer the per-node surplus state
7235 * when we do not cross the node.
7237 if (new_nid
== old_nid
)
7239 spin_lock_irq(&hugetlb_lock
);
7240 if (h
->surplus_huge_pages_node
[old_nid
]) {
7241 h
->surplus_huge_pages_node
[old_nid
]--;
7242 h
->surplus_huge_pages_node
[new_nid
]++;
7244 spin_unlock_irq(&hugetlb_lock
);
7248 static void hugetlb_unshare_pmds(struct vm_area_struct
*vma
,
7249 unsigned long start
,
7252 struct hstate
*h
= hstate_vma(vma
);
7253 unsigned long sz
= huge_page_size(h
);
7254 struct mm_struct
*mm
= vma
->vm_mm
;
7255 struct mmu_notifier_range range
;
7256 unsigned long address
;
7260 if (!(vma
->vm_flags
& VM_MAYSHARE
))
7266 flush_cache_range(vma
, start
, end
);
7268 * No need to call adjust_range_if_pmd_sharing_possible(), because
7269 * we have already done the PUD_SIZE alignment.
7271 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
,
7273 mmu_notifier_invalidate_range_start(&range
);
7274 hugetlb_vma_lock_write(vma
);
7275 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
7276 for (address
= start
; address
< end
; address
+= PUD_SIZE
) {
7277 ptep
= hugetlb_walk(vma
, address
, sz
);
7280 ptl
= huge_pte_lock(h
, mm
, ptep
);
7281 huge_pmd_unshare(mm
, vma
, address
, ptep
);
7284 flush_hugetlb_tlb_range(vma
, start
, end
);
7285 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
7286 hugetlb_vma_unlock_write(vma
);
7288 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7289 * Documentation/mm/mmu_notifier.rst.
7291 mmu_notifier_invalidate_range_end(&range
);
7295 * This function will unconditionally remove all the shared pmd pgtable entries
7296 * within the specific vma for a hugetlbfs memory range.
7298 void hugetlb_unshare_all_pmds(struct vm_area_struct
*vma
)
7300 hugetlb_unshare_pmds(vma
, ALIGN(vma
->vm_start
, PUD_SIZE
),
7301 ALIGN_DOWN(vma
->vm_end
, PUD_SIZE
));
7305 static bool cma_reserve_called __initdata
;
7307 static int __init
cmdline_parse_hugetlb_cma(char *p
)
7314 if (sscanf(s
, "%lu%n", &tmp
, &count
) != 1)
7317 if (s
[count
] == ':') {
7318 if (tmp
>= MAX_NUMNODES
)
7320 nid
= array_index_nospec(tmp
, MAX_NUMNODES
);
7323 tmp
= memparse(s
, &s
);
7324 hugetlb_cma_size_in_node
[nid
] = tmp
;
7325 hugetlb_cma_size
+= tmp
;
7328 * Skip the separator if have one, otherwise
7329 * break the parsing.
7336 hugetlb_cma_size
= memparse(p
, &p
);
7344 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma
);
7346 void __init
hugetlb_cma_reserve(int order
)
7348 unsigned long size
, reserved
, per_node
;
7349 bool node_specific_cma_alloc
= false;
7352 cma_reserve_called
= true;
7354 if (!hugetlb_cma_size
)
7357 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++) {
7358 if (hugetlb_cma_size_in_node
[nid
] == 0)
7361 if (!node_online(nid
)) {
7362 pr_warn("hugetlb_cma: invalid node %d specified\n", nid
);
7363 hugetlb_cma_size
-= hugetlb_cma_size_in_node
[nid
];
7364 hugetlb_cma_size_in_node
[nid
] = 0;
7368 if (hugetlb_cma_size_in_node
[nid
] < (PAGE_SIZE
<< order
)) {
7369 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7370 nid
, (PAGE_SIZE
<< order
) / SZ_1M
);
7371 hugetlb_cma_size
-= hugetlb_cma_size_in_node
[nid
];
7372 hugetlb_cma_size_in_node
[nid
] = 0;
7374 node_specific_cma_alloc
= true;
7378 /* Validate the CMA size again in case some invalid nodes specified. */
7379 if (!hugetlb_cma_size
)
7382 if (hugetlb_cma_size
< (PAGE_SIZE
<< order
)) {
7383 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7384 (PAGE_SIZE
<< order
) / SZ_1M
);
7385 hugetlb_cma_size
= 0;
7389 if (!node_specific_cma_alloc
) {
7391 * If 3 GB area is requested on a machine with 4 numa nodes,
7392 * let's allocate 1 GB on first three nodes and ignore the last one.
7394 per_node
= DIV_ROUND_UP(hugetlb_cma_size
, nr_online_nodes
);
7395 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7396 hugetlb_cma_size
/ SZ_1M
, per_node
/ SZ_1M
);
7400 for_each_online_node(nid
) {
7402 char name
[CMA_MAX_NAME
];
7404 if (node_specific_cma_alloc
) {
7405 if (hugetlb_cma_size_in_node
[nid
] == 0)
7408 size
= hugetlb_cma_size_in_node
[nid
];
7410 size
= min(per_node
, hugetlb_cma_size
- reserved
);
7413 size
= round_up(size
, PAGE_SIZE
<< order
);
7415 snprintf(name
, sizeof(name
), "hugetlb%d", nid
);
7417 * Note that 'order per bit' is based on smallest size that
7418 * may be returned to CMA allocator in the case of
7419 * huge page demotion.
7421 res
= cma_declare_contiguous_nid(0, size
, 0,
7422 PAGE_SIZE
<< HUGETLB_PAGE_ORDER
,
7424 &hugetlb_cma
[nid
], nid
);
7426 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7432 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7435 if (reserved
>= hugetlb_cma_size
)
7441 * hugetlb_cma_size is used to determine if allocations from
7442 * cma are possible. Set to zero if no cma regions are set up.
7444 hugetlb_cma_size
= 0;
7447 static void __init
hugetlb_cma_check(void)
7449 if (!hugetlb_cma_size
|| cma_reserve_called
)
7452 pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7455 #endif /* CONFIG_CMA */