2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * Nadia Yvette Chambers, 2002
6 * Copyright (C) 2002 Linus Torvalds.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/falloc.h>
16 #include <linux/mount.h>
17 #include <linux/file.h>
18 #include <linux/kernel.h>
19 #include <linux/writeback.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/init.h>
23 #include <linux/string.h>
24 #include <linux/capability.h>
25 #include <linux/ctype.h>
26 #include <linux/backing-dev.h>
27 #include <linux/hugetlb.h>
28 #include <linux/pagevec.h>
29 #include <linux/fs_parser.h>
30 #include <linux/mman.h>
31 #include <linux/slab.h>
32 #include <linux/dnotify.h>
33 #include <linux/statfs.h>
34 #include <linux/security.h>
35 #include <linux/magic.h>
36 #include <linux/migrate.h>
37 #include <linux/uio.h>
39 #include <linux/uaccess.h>
40 #include <linux/sched/mm.h>
42 static const struct address_space_operations hugetlbfs_aops
;
43 const struct file_operations hugetlbfs_file_operations
;
44 static const struct inode_operations hugetlbfs_dir_inode_operations
;
45 static const struct inode_operations hugetlbfs_inode_operations
;
47 enum hugetlbfs_size_type
{ NO_SIZE
, SIZE_STD
, SIZE_PERCENT
};
49 struct hugetlbfs_fs_context
{
50 struct hstate
*hstate
;
51 unsigned long long max_size_opt
;
52 unsigned long long min_size_opt
;
56 enum hugetlbfs_size_type max_val_type
;
57 enum hugetlbfs_size_type min_val_type
;
63 int sysctl_hugetlb_shm_group
;
75 static const struct fs_parameter_spec hugetlb_fs_parameters
[] = {
76 fsparam_u32 ("gid", Opt_gid
),
77 fsparam_string("min_size", Opt_min_size
),
78 fsparam_u32oct("mode", Opt_mode
),
79 fsparam_string("nr_inodes", Opt_nr_inodes
),
80 fsparam_string("pagesize", Opt_pagesize
),
81 fsparam_string("size", Opt_size
),
82 fsparam_u32 ("uid", Opt_uid
),
87 * Mask used when checking the page offset value passed in via system
88 * calls. This value will be converted to a loff_t which is signed.
89 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
90 * value. The extra bit (- 1 in the shift value) is to take the sign
93 #define PGOFF_LOFFT_MAX \
94 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
96 static int hugetlbfs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
98 struct inode
*inode
= file_inode(file
);
99 struct hugetlbfs_inode_info
*info
= HUGETLBFS_I(inode
);
102 struct hstate
*h
= hstate_file(file
);
105 * vma address alignment (but not the pgoff alignment) has
106 * already been checked by prepare_hugepage_range. If you add
107 * any error returns here, do so after setting VM_HUGETLB, so
108 * is_vm_hugetlb_page tests below unmap_region go the right
109 * way when do_mmap unwinds (may be important on powerpc
112 vm_flags_set(vma
, VM_HUGETLB
| VM_DONTEXPAND
);
113 vma
->vm_ops
= &hugetlb_vm_ops
;
115 ret
= seal_check_write(info
->seals
, vma
);
120 * page based offset in vm_pgoff could be sufficiently large to
121 * overflow a loff_t when converted to byte offset. This can
122 * only happen on architectures where sizeof(loff_t) ==
123 * sizeof(unsigned long). So, only check in those instances.
125 if (sizeof(unsigned long) == sizeof(loff_t
)) {
126 if (vma
->vm_pgoff
& PGOFF_LOFFT_MAX
)
130 /* must be huge page aligned */
131 if (vma
->vm_pgoff
& (~huge_page_mask(h
) >> PAGE_SHIFT
))
134 vma_len
= (loff_t
)(vma
->vm_end
- vma
->vm_start
);
135 len
= vma_len
+ ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
136 /* check for overflow */
144 if (!hugetlb_reserve_pages(inode
,
145 vma
->vm_pgoff
>> huge_page_order(h
),
146 len
>> huge_page_shift(h
), vma
,
151 if (vma
->vm_flags
& VM_WRITE
&& inode
->i_size
< len
)
152 i_size_write(inode
, len
);
160 * Called under mmap_write_lock(mm).
164 hugetlb_get_unmapped_area_bottomup(struct file
*file
, unsigned long addr
,
165 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
167 struct hstate
*h
= hstate_file(file
);
168 struct vm_unmapped_area_info info
;
172 info
.low_limit
= current
->mm
->mmap_base
;
173 info
.high_limit
= arch_get_mmap_end(addr
, len
, flags
);
174 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
175 info
.align_offset
= 0;
176 return vm_unmapped_area(&info
);
180 hugetlb_get_unmapped_area_topdown(struct file
*file
, unsigned long addr
,
181 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
183 struct hstate
*h
= hstate_file(file
);
184 struct vm_unmapped_area_info info
;
186 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
188 info
.low_limit
= PAGE_SIZE
;
189 info
.high_limit
= arch_get_mmap_base(addr
, current
->mm
->mmap_base
);
190 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
191 info
.align_offset
= 0;
192 addr
= vm_unmapped_area(&info
);
195 * A failed mmap() very likely causes application failure,
196 * so fall back to the bottom-up function here. This scenario
197 * can happen with large stack limits and large mmap()
200 if (unlikely(offset_in_page(addr
))) {
201 VM_BUG_ON(addr
!= -ENOMEM
);
203 info
.low_limit
= current
->mm
->mmap_base
;
204 info
.high_limit
= arch_get_mmap_end(addr
, len
, flags
);
205 addr
= vm_unmapped_area(&info
);
212 generic_hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
213 unsigned long len
, unsigned long pgoff
,
216 struct mm_struct
*mm
= current
->mm
;
217 struct vm_area_struct
*vma
;
218 struct hstate
*h
= hstate_file(file
);
219 const unsigned long mmap_end
= arch_get_mmap_end(addr
, len
, flags
);
221 if (len
& ~huge_page_mask(h
))
226 if (flags
& MAP_FIXED
) {
227 if (prepare_hugepage_range(file
, addr
, len
))
233 addr
= ALIGN(addr
, huge_page_size(h
));
234 vma
= find_vma(mm
, addr
);
235 if (mmap_end
- len
>= addr
&&
236 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
241 * Use mm->get_unmapped_area value as a hint to use topdown routine.
242 * If architectures have special needs, they should define their own
243 * version of hugetlb_get_unmapped_area.
245 if (mm
->get_unmapped_area
== arch_get_unmapped_area_topdown
)
246 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
248 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
252 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
254 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
255 unsigned long len
, unsigned long pgoff
,
258 return generic_hugetlb_get_unmapped_area(file
, addr
, len
, pgoff
, flags
);
263 * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
264 * Returns the maximum number of bytes one can read without touching the 1st raw
267 * The implementation borrows the iteration logic from copy_page_to_iter*.
269 static size_t adjust_range_hwpoison(struct page
*page
, size_t offset
, size_t bytes
)
274 /* First subpage to start the loop. */
275 page
= nth_page(page
, offset
/ PAGE_SIZE
);
278 if (is_raw_hwpoison_page_in_hugepage(page
))
281 /* Safe to read n bytes without touching HWPOISON subpage. */
282 n
= min(bytes
, (size_t)PAGE_SIZE
- offset
);
288 if (offset
== PAGE_SIZE
) {
289 page
= nth_page(page
, 1);
298 * Support for read() - Find the page attached to f_mapping and copy out the
299 * data. This provides functionality similar to filemap_read().
301 static ssize_t
hugetlbfs_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
303 struct file
*file
= iocb
->ki_filp
;
304 struct hstate
*h
= hstate_file(file
);
305 struct address_space
*mapping
= file
->f_mapping
;
306 struct inode
*inode
= mapping
->host
;
307 unsigned long index
= iocb
->ki_pos
>> huge_page_shift(h
);
308 unsigned long offset
= iocb
->ki_pos
& ~huge_page_mask(h
);
309 unsigned long end_index
;
313 while (iov_iter_count(to
)) {
315 size_t nr
, copied
, want
;
317 /* nr is the maximum number of bytes to copy from this page */
318 nr
= huge_page_size(h
);
319 isize
= i_size_read(inode
);
322 end_index
= (isize
- 1) >> huge_page_shift(h
);
323 if (index
> end_index
)
325 if (index
== end_index
) {
326 nr
= ((isize
- 1) & ~huge_page_mask(h
)) + 1;
333 folio
= filemap_lock_hugetlb_folio(h
, mapping
, index
);
336 * We have a HOLE, zero out the user-buffer for the
337 * length of the hole or request.
339 copied
= iov_iter_zero(nr
, to
);
343 if (!folio_test_has_hwpoisoned(folio
))
347 * Adjust how many bytes safe to read without
348 * touching the 1st raw HWPOISON subpage after
351 want
= adjust_range_hwpoison(&folio
->page
, offset
, nr
);
360 * We have the folio, copy it to user space buffer.
362 copied
= copy_folio_to_iter(folio
, offset
, want
, to
);
367 if (copied
!= nr
&& iov_iter_count(to
)) {
372 index
+= offset
>> huge_page_shift(h
);
373 offset
&= ~huge_page_mask(h
);
375 iocb
->ki_pos
= ((loff_t
)index
<< huge_page_shift(h
)) + offset
;
379 static int hugetlbfs_write_begin(struct file
*file
,
380 struct address_space
*mapping
,
381 loff_t pos
, unsigned len
,
382 struct page
**pagep
, void **fsdata
)
387 static int hugetlbfs_write_end(struct file
*file
, struct address_space
*mapping
,
388 loff_t pos
, unsigned len
, unsigned copied
,
389 struct page
*page
, void *fsdata
)
395 static void hugetlb_delete_from_page_cache(struct folio
*folio
)
397 folio_clear_dirty(folio
);
398 folio_clear_uptodate(folio
);
399 filemap_remove_folio(folio
);
403 * Called with i_mmap_rwsem held for inode based vma maps. This makes
404 * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault
405 * mutex for the page in the mapping. So, we can not race with page being
406 * faulted into the vma.
408 static bool hugetlb_vma_maps_page(struct vm_area_struct
*vma
,
409 unsigned long addr
, struct page
*page
)
413 ptep
= hugetlb_walk(vma
, addr
, huge_page_size(hstate_vma(vma
)));
417 pte
= huge_ptep_get(ptep
);
418 if (huge_pte_none(pte
) || !pte_present(pte
))
421 if (pte_page(pte
) == page
)
428 * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
429 * No, because the interval tree returns us only those vmas
430 * which overlap the truncated area starting at pgoff,
431 * and no vma on a 32-bit arch can span beyond the 4GB.
433 static unsigned long vma_offset_start(struct vm_area_struct
*vma
, pgoff_t start
)
435 unsigned long offset
= 0;
437 if (vma
->vm_pgoff
< start
)
438 offset
= (start
- vma
->vm_pgoff
) << PAGE_SHIFT
;
440 return vma
->vm_start
+ offset
;
443 static unsigned long vma_offset_end(struct vm_area_struct
*vma
, pgoff_t end
)
450 t_end
= ((end
- vma
->vm_pgoff
) << PAGE_SHIFT
) + vma
->vm_start
;
451 if (t_end
> vma
->vm_end
)
457 * Called with hugetlb fault mutex held. Therefore, no more mappings to
458 * this folio can be created while executing the routine.
460 static void hugetlb_unmap_file_folio(struct hstate
*h
,
461 struct address_space
*mapping
,
462 struct folio
*folio
, pgoff_t index
)
464 struct rb_root_cached
*root
= &mapping
->i_mmap
;
465 struct hugetlb_vma_lock
*vma_lock
;
466 struct page
*page
= &folio
->page
;
467 struct vm_area_struct
*vma
;
468 unsigned long v_start
;
472 start
= index
* pages_per_huge_page(h
);
473 end
= (index
+ 1) * pages_per_huge_page(h
);
475 i_mmap_lock_write(mapping
);
478 vma_interval_tree_foreach(vma
, root
, start
, end
- 1) {
479 v_start
= vma_offset_start(vma
, start
);
480 v_end
= vma_offset_end(vma
, end
);
482 if (!hugetlb_vma_maps_page(vma
, v_start
, page
))
485 if (!hugetlb_vma_trylock_write(vma
)) {
486 vma_lock
= vma
->vm_private_data
;
488 * If we can not get vma lock, we need to drop
489 * immap_sema and take locks in order. First,
490 * take a ref on the vma_lock structure so that
491 * we can be guaranteed it will not go away when
492 * dropping immap_sema.
494 kref_get(&vma_lock
->refs
);
498 unmap_hugepage_range(vma
, v_start
, v_end
, NULL
,
499 ZAP_FLAG_DROP_MARKER
);
500 hugetlb_vma_unlock_write(vma
);
503 i_mmap_unlock_write(mapping
);
507 * Wait on vma_lock. We know it is still valid as we have
508 * a reference. We must 'open code' vma locking as we do
509 * not know if vma_lock is still attached to vma.
511 down_write(&vma_lock
->rw_sema
);
512 i_mmap_lock_write(mapping
);
517 * If lock is no longer attached to vma, then just
518 * unlock, drop our reference and retry looking for
521 up_write(&vma_lock
->rw_sema
);
522 kref_put(&vma_lock
->refs
, hugetlb_vma_lock_release
);
527 * vma_lock is still attached to vma. Check to see if vma
528 * still maps page and if so, unmap.
530 v_start
= vma_offset_start(vma
, start
);
531 v_end
= vma_offset_end(vma
, end
);
532 if (hugetlb_vma_maps_page(vma
, v_start
, page
))
533 unmap_hugepage_range(vma
, v_start
, v_end
, NULL
,
534 ZAP_FLAG_DROP_MARKER
);
536 kref_put(&vma_lock
->refs
, hugetlb_vma_lock_release
);
537 hugetlb_vma_unlock_write(vma
);
544 hugetlb_vmdelete_list(struct rb_root_cached
*root
, pgoff_t start
, pgoff_t end
,
545 zap_flags_t zap_flags
)
547 struct vm_area_struct
*vma
;
550 * end == 0 indicates that the entire range after start should be
551 * unmapped. Note, end is exclusive, whereas the interval tree takes
552 * an inclusive "last".
554 vma_interval_tree_foreach(vma
, root
, start
, end
? end
- 1 : ULONG_MAX
) {
555 unsigned long v_start
;
558 if (!hugetlb_vma_trylock_write(vma
))
561 v_start
= vma_offset_start(vma
, start
);
562 v_end
= vma_offset_end(vma
, end
);
564 unmap_hugepage_range(vma
, v_start
, v_end
, NULL
, zap_flags
);
567 * Note that vma lock only exists for shared/non-private
568 * vmas. Therefore, lock is not held when calling
569 * unmap_hugepage_range for private vmas.
571 hugetlb_vma_unlock_write(vma
);
576 * Called with hugetlb fault mutex held.
577 * Returns true if page was actually removed, false otherwise.
579 static bool remove_inode_single_folio(struct hstate
*h
, struct inode
*inode
,
580 struct address_space
*mapping
,
581 struct folio
*folio
, pgoff_t index
,
587 * If folio is mapped, it was faulted in after being
588 * unmapped in caller. Unmap (again) while holding
589 * the fault mutex. The mutex will prevent faults
590 * until we finish removing the folio.
592 if (unlikely(folio_mapped(folio
)))
593 hugetlb_unmap_file_folio(h
, mapping
, folio
, index
);
597 * We must remove the folio from page cache before removing
598 * the region/ reserve map (hugetlb_unreserve_pages). In
599 * rare out of memory conditions, removal of the region/reserve
600 * map could fail. Correspondingly, the subpool and global
601 * reserve usage count can need to be adjusted.
603 VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio
), folio
);
604 hugetlb_delete_from_page_cache(folio
);
607 if (unlikely(hugetlb_unreserve_pages(inode
, index
,
609 hugetlb_fix_reserve_counts(inode
);
617 * remove_inode_hugepages handles two distinct cases: truncation and hole
618 * punch. There are subtle differences in operation for each case.
620 * truncation is indicated by end of range being LLONG_MAX
621 * In this case, we first scan the range and release found pages.
622 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
623 * maps and global counts. Page faults can race with truncation.
624 * During faults, hugetlb_no_page() checks i_size before page allocation,
625 * and again after obtaining page table lock. It will 'back out'
626 * allocations in the truncated range.
627 * hole punch is indicated if end is not LLONG_MAX
628 * In the hole punch case we scan the range and release found pages.
629 * Only when releasing a page is the associated region/reserve map
630 * deleted. The region/reserve map for ranges without associated
631 * pages are not modified. Page faults can race with hole punch.
632 * This is indicated if we find a mapped page.
633 * Note: If the passed end of range value is beyond the end of file, but
634 * not LLONG_MAX this routine still performs a hole punch operation.
636 static void remove_inode_hugepages(struct inode
*inode
, loff_t lstart
,
639 struct hstate
*h
= hstate_inode(inode
);
640 struct address_space
*mapping
= &inode
->i_data
;
641 const pgoff_t end
= lend
>> PAGE_SHIFT
;
642 struct folio_batch fbatch
;
645 bool truncate_op
= (lend
== LLONG_MAX
);
647 folio_batch_init(&fbatch
);
648 next
= lstart
>> PAGE_SHIFT
;
649 while (filemap_get_folios(mapping
, &next
, end
- 1, &fbatch
)) {
650 for (i
= 0; i
< folio_batch_count(&fbatch
); ++i
) {
651 struct folio
*folio
= fbatch
.folios
[i
];
654 index
= folio
->index
>> huge_page_order(h
);
655 hash
= hugetlb_fault_mutex_hash(mapping
, index
);
656 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
659 * Remove folio that was part of folio_batch.
661 if (remove_inode_single_folio(h
, inode
, mapping
, folio
,
665 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
667 folio_batch_release(&fbatch
);
672 (void)hugetlb_unreserve_pages(inode
,
673 lstart
>> huge_page_shift(h
),
677 static void hugetlbfs_evict_inode(struct inode
*inode
)
679 struct resv_map
*resv_map
;
681 remove_inode_hugepages(inode
, 0, LLONG_MAX
);
684 * Get the resv_map from the address space embedded in the inode.
685 * This is the address space which points to any resv_map allocated
686 * at inode creation time. If this is a device special inode,
687 * i_mapping may not point to the original address space.
689 resv_map
= (struct resv_map
*)(&inode
->i_data
)->private_data
;
690 /* Only regular and link inodes have associated reserve maps */
692 resv_map_release(&resv_map
->refs
);
696 static void hugetlb_vmtruncate(struct inode
*inode
, loff_t offset
)
699 struct address_space
*mapping
= inode
->i_mapping
;
700 struct hstate
*h
= hstate_inode(inode
);
702 BUG_ON(offset
& ~huge_page_mask(h
));
703 pgoff
= offset
>> PAGE_SHIFT
;
705 i_size_write(inode
, offset
);
706 i_mmap_lock_write(mapping
);
707 if (!RB_EMPTY_ROOT(&mapping
->i_mmap
.rb_root
))
708 hugetlb_vmdelete_list(&mapping
->i_mmap
, pgoff
, 0,
709 ZAP_FLAG_DROP_MARKER
);
710 i_mmap_unlock_write(mapping
);
711 remove_inode_hugepages(inode
, offset
, LLONG_MAX
);
714 static void hugetlbfs_zero_partial_page(struct hstate
*h
,
715 struct address_space
*mapping
,
719 pgoff_t idx
= start
>> huge_page_shift(h
);
722 folio
= filemap_lock_hugetlb_folio(h
, mapping
, idx
);
726 start
= start
& ~huge_page_mask(h
);
727 end
= end
& ~huge_page_mask(h
);
729 end
= huge_page_size(h
);
731 folio_zero_segment(folio
, (size_t)start
, (size_t)end
);
737 static long hugetlbfs_punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
739 struct hugetlbfs_inode_info
*info
= HUGETLBFS_I(inode
);
740 struct address_space
*mapping
= inode
->i_mapping
;
741 struct hstate
*h
= hstate_inode(inode
);
742 loff_t hpage_size
= huge_page_size(h
);
743 loff_t hole_start
, hole_end
;
746 * hole_start and hole_end indicate the full pages within the hole.
748 hole_start
= round_up(offset
, hpage_size
);
749 hole_end
= round_down(offset
+ len
, hpage_size
);
753 /* protected by i_rwsem */
754 if (info
->seals
& (F_SEAL_WRITE
| F_SEAL_FUTURE_WRITE
)) {
759 i_mmap_lock_write(mapping
);
761 /* If range starts before first full page, zero partial page. */
762 if (offset
< hole_start
)
763 hugetlbfs_zero_partial_page(h
, mapping
,
764 offset
, min(offset
+ len
, hole_start
));
766 /* Unmap users of full pages in the hole. */
767 if (hole_end
> hole_start
) {
768 if (!RB_EMPTY_ROOT(&mapping
->i_mmap
.rb_root
))
769 hugetlb_vmdelete_list(&mapping
->i_mmap
,
770 hole_start
>> PAGE_SHIFT
,
771 hole_end
>> PAGE_SHIFT
, 0);
774 /* If range extends beyond last full page, zero partial page. */
775 if ((offset
+ len
) > hole_end
&& (offset
+ len
) > hole_start
)
776 hugetlbfs_zero_partial_page(h
, mapping
,
777 hole_end
, offset
+ len
);
779 i_mmap_unlock_write(mapping
);
781 /* Remove full pages from the file. */
782 if (hole_end
> hole_start
)
783 remove_inode_hugepages(inode
, hole_start
, hole_end
);
790 static long hugetlbfs_fallocate(struct file
*file
, int mode
, loff_t offset
,
793 struct inode
*inode
= file_inode(file
);
794 struct hugetlbfs_inode_info
*info
= HUGETLBFS_I(inode
);
795 struct address_space
*mapping
= inode
->i_mapping
;
796 struct hstate
*h
= hstate_inode(inode
);
797 struct vm_area_struct pseudo_vma
;
798 struct mm_struct
*mm
= current
->mm
;
799 loff_t hpage_size
= huge_page_size(h
);
800 unsigned long hpage_shift
= huge_page_shift(h
);
801 pgoff_t start
, index
, end
;
805 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
808 if (mode
& FALLOC_FL_PUNCH_HOLE
)
809 return hugetlbfs_punch_hole(inode
, offset
, len
);
812 * Default preallocate case.
813 * For this range, start is rounded down and end is rounded up
814 * as well as being converted to page offsets.
816 start
= offset
>> hpage_shift
;
817 end
= (offset
+ len
+ hpage_size
- 1) >> hpage_shift
;
821 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
822 error
= inode_newsize_ok(inode
, offset
+ len
);
826 if ((info
->seals
& F_SEAL_GROW
) && offset
+ len
> inode
->i_size
) {
832 * Initialize a pseudo vma as this is required by the huge page
833 * allocation routines.
835 vma_init(&pseudo_vma
, mm
);
836 vm_flags_init(&pseudo_vma
, VM_HUGETLB
| VM_MAYSHARE
| VM_SHARED
);
837 pseudo_vma
.vm_file
= file
;
839 for (index
= start
; index
< end
; index
++) {
841 * This is supposed to be the vaddr where the page is being
842 * faulted in, but we have no vaddr here.
850 * fallocate(2) manpage permits EINTR; we may have been
851 * interrupted because we are using up too much memory.
853 if (signal_pending(current
)) {
858 /* addr is the offset within the file (zero based) */
859 addr
= index
* hpage_size
;
861 /* mutex taken here, fault path and hole punch */
862 hash
= hugetlb_fault_mutex_hash(mapping
, index
);
863 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
865 /* See if already present in mapping to avoid alloc/free */
866 folio
= filemap_get_folio(mapping
, index
<< huge_page_order(h
));
867 if (!IS_ERR(folio
)) {
869 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
874 * Allocate folio without setting the avoid_reserve argument.
875 * There certainly are no reserves associated with the
876 * pseudo_vma. However, there could be shared mappings with
877 * reserves for the file at the inode level. If we fallocate
878 * folios in these areas, we need to consume the reserves
879 * to keep reservation accounting consistent.
881 folio
= alloc_hugetlb_folio(&pseudo_vma
, addr
, 0);
883 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
884 error
= PTR_ERR(folio
);
887 clear_huge_page(&folio
->page
, addr
, pages_per_huge_page(h
));
888 __folio_mark_uptodate(folio
);
889 error
= hugetlb_add_to_page_cache(folio
, mapping
, index
);
890 if (unlikely(error
)) {
891 restore_reserve_on_error(h
, &pseudo_vma
, addr
, folio
);
893 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
897 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
899 folio_set_hugetlb_migratable(folio
);
901 * folio_unlock because locked by hugetlb_add_to_page_cache()
902 * folio_put() due to reference from alloc_hugetlb_folio()
908 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && offset
+ len
> inode
->i_size
)
909 i_size_write(inode
, offset
+ len
);
910 inode_set_ctime_current(inode
);
916 static int hugetlbfs_setattr(struct mnt_idmap
*idmap
,
917 struct dentry
*dentry
, struct iattr
*attr
)
919 struct inode
*inode
= d_inode(dentry
);
920 struct hstate
*h
= hstate_inode(inode
);
922 unsigned int ia_valid
= attr
->ia_valid
;
923 struct hugetlbfs_inode_info
*info
= HUGETLBFS_I(inode
);
925 error
= setattr_prepare(&nop_mnt_idmap
, dentry
, attr
);
929 if (ia_valid
& ATTR_SIZE
) {
930 loff_t oldsize
= inode
->i_size
;
931 loff_t newsize
= attr
->ia_size
;
933 if (newsize
& ~huge_page_mask(h
))
935 /* protected by i_rwsem */
936 if ((newsize
< oldsize
&& (info
->seals
& F_SEAL_SHRINK
)) ||
937 (newsize
> oldsize
&& (info
->seals
& F_SEAL_GROW
)))
939 hugetlb_vmtruncate(inode
, newsize
);
942 setattr_copy(&nop_mnt_idmap
, inode
, attr
);
943 mark_inode_dirty(inode
);
947 static struct inode
*hugetlbfs_get_root(struct super_block
*sb
,
948 struct hugetlbfs_fs_context
*ctx
)
952 inode
= new_inode(sb
);
954 inode
->i_ino
= get_next_ino();
955 inode
->i_mode
= S_IFDIR
| ctx
->mode
;
956 inode
->i_uid
= ctx
->uid
;
957 inode
->i_gid
= ctx
->gid
;
958 simple_inode_init_ts(inode
);
959 inode
->i_op
= &hugetlbfs_dir_inode_operations
;
960 inode
->i_fop
= &simple_dir_operations
;
961 /* directory inodes start off with i_nlink == 2 (for "." entry) */
963 lockdep_annotate_inode_mutex_key(inode
);
969 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
970 * be taken from reclaim -- unlike regular filesystems. This needs an
971 * annotation because huge_pmd_share() does an allocation under hugetlb's
974 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key
;
976 static struct inode
*hugetlbfs_get_inode(struct super_block
*sb
,
978 umode_t mode
, dev_t dev
)
981 struct resv_map
*resv_map
= NULL
;
984 * Reserve maps are only needed for inodes that can have associated
987 if (S_ISREG(mode
) || S_ISLNK(mode
)) {
988 resv_map
= resv_map_alloc();
993 inode
= new_inode(sb
);
995 struct hugetlbfs_inode_info
*info
= HUGETLBFS_I(inode
);
997 inode
->i_ino
= get_next_ino();
998 inode_init_owner(&nop_mnt_idmap
, inode
, dir
, mode
);
999 lockdep_set_class(&inode
->i_mapping
->i_mmap_rwsem
,
1000 &hugetlbfs_i_mmap_rwsem_key
);
1001 inode
->i_mapping
->a_ops
= &hugetlbfs_aops
;
1002 simple_inode_init_ts(inode
);
1003 inode
->i_mapping
->private_data
= resv_map
;
1004 info
->seals
= F_SEAL_SEAL
;
1005 switch (mode
& S_IFMT
) {
1007 init_special_inode(inode
, mode
, dev
);
1010 inode
->i_op
= &hugetlbfs_inode_operations
;
1011 inode
->i_fop
= &hugetlbfs_file_operations
;
1014 inode
->i_op
= &hugetlbfs_dir_inode_operations
;
1015 inode
->i_fop
= &simple_dir_operations
;
1017 /* directory inodes start off with i_nlink == 2 (for "." entry) */
1021 inode
->i_op
= &page_symlink_inode_operations
;
1022 inode_nohighmem(inode
);
1025 lockdep_annotate_inode_mutex_key(inode
);
1028 kref_put(&resv_map
->refs
, resv_map_release
);
1035 * File creation. Allocate an inode, and we're done..
1037 static int hugetlbfs_mknod(struct mnt_idmap
*idmap
, struct inode
*dir
,
1038 struct dentry
*dentry
, umode_t mode
, dev_t dev
)
1040 struct inode
*inode
;
1042 inode
= hugetlbfs_get_inode(dir
->i_sb
, dir
, mode
, dev
);
1045 inode_set_mtime_to_ts(dir
, inode_set_ctime_current(dir
));
1046 d_instantiate(dentry
, inode
);
1047 dget(dentry
);/* Extra count - pin the dentry in core */
1051 static int hugetlbfs_mkdir(struct mnt_idmap
*idmap
, struct inode
*dir
,
1052 struct dentry
*dentry
, umode_t mode
)
1054 int retval
= hugetlbfs_mknod(&nop_mnt_idmap
, dir
, dentry
,
1061 static int hugetlbfs_create(struct mnt_idmap
*idmap
,
1062 struct inode
*dir
, struct dentry
*dentry
,
1063 umode_t mode
, bool excl
)
1065 return hugetlbfs_mknod(&nop_mnt_idmap
, dir
, dentry
, mode
| S_IFREG
, 0);
1068 static int hugetlbfs_tmpfile(struct mnt_idmap
*idmap
,
1069 struct inode
*dir
, struct file
*file
,
1072 struct inode
*inode
;
1074 inode
= hugetlbfs_get_inode(dir
->i_sb
, dir
, mode
| S_IFREG
, 0);
1077 inode_set_mtime_to_ts(dir
, inode_set_ctime_current(dir
));
1078 d_tmpfile(file
, inode
);
1079 return finish_open_simple(file
, 0);
1082 static int hugetlbfs_symlink(struct mnt_idmap
*idmap
,
1083 struct inode
*dir
, struct dentry
*dentry
,
1084 const char *symname
)
1086 struct inode
*inode
;
1087 int error
= -ENOSPC
;
1089 inode
= hugetlbfs_get_inode(dir
->i_sb
, dir
, S_IFLNK
|S_IRWXUGO
, 0);
1091 int l
= strlen(symname
)+1;
1092 error
= page_symlink(inode
, symname
, l
);
1094 d_instantiate(dentry
, inode
);
1099 inode_set_mtime_to_ts(dir
, inode_set_ctime_current(dir
));
1104 #ifdef CONFIG_MIGRATION
1105 static int hugetlbfs_migrate_folio(struct address_space
*mapping
,
1106 struct folio
*dst
, struct folio
*src
,
1107 enum migrate_mode mode
)
1111 rc
= migrate_huge_page_move_mapping(mapping
, dst
, src
);
1112 if (rc
!= MIGRATEPAGE_SUCCESS
)
1115 if (hugetlb_folio_subpool(src
)) {
1116 hugetlb_set_folio_subpool(dst
,
1117 hugetlb_folio_subpool(src
));
1118 hugetlb_set_folio_subpool(src
, NULL
);
1121 if (mode
!= MIGRATE_SYNC_NO_COPY
)
1122 folio_migrate_copy(dst
, src
);
1124 folio_migrate_flags(dst
, src
);
1126 return MIGRATEPAGE_SUCCESS
;
1129 #define hugetlbfs_migrate_folio NULL
1132 static int hugetlbfs_error_remove_page(struct address_space
*mapping
,
1139 * Display the mount options in /proc/mounts.
1141 static int hugetlbfs_show_options(struct seq_file
*m
, struct dentry
*root
)
1143 struct hugetlbfs_sb_info
*sbinfo
= HUGETLBFS_SB(root
->d_sb
);
1144 struct hugepage_subpool
*spool
= sbinfo
->spool
;
1145 unsigned long hpage_size
= huge_page_size(sbinfo
->hstate
);
1146 unsigned hpage_shift
= huge_page_shift(sbinfo
->hstate
);
1149 if (!uid_eq(sbinfo
->uid
, GLOBAL_ROOT_UID
))
1150 seq_printf(m
, ",uid=%u",
1151 from_kuid_munged(&init_user_ns
, sbinfo
->uid
));
1152 if (!gid_eq(sbinfo
->gid
, GLOBAL_ROOT_GID
))
1153 seq_printf(m
, ",gid=%u",
1154 from_kgid_munged(&init_user_ns
, sbinfo
->gid
));
1155 if (sbinfo
->mode
!= 0755)
1156 seq_printf(m
, ",mode=%o", sbinfo
->mode
);
1157 if (sbinfo
->max_inodes
!= -1)
1158 seq_printf(m
, ",nr_inodes=%lu", sbinfo
->max_inodes
);
1162 if (hpage_size
>= 1024) {
1166 seq_printf(m
, ",pagesize=%lu%c", hpage_size
, mod
);
1168 if (spool
->max_hpages
!= -1)
1169 seq_printf(m
, ",size=%llu",
1170 (unsigned long long)spool
->max_hpages
<< hpage_shift
);
1171 if (spool
->min_hpages
!= -1)
1172 seq_printf(m
, ",min_size=%llu",
1173 (unsigned long long)spool
->min_hpages
<< hpage_shift
);
1178 static int hugetlbfs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1180 struct hugetlbfs_sb_info
*sbinfo
= HUGETLBFS_SB(dentry
->d_sb
);
1181 struct hstate
*h
= hstate_inode(d_inode(dentry
));
1182 u64 id
= huge_encode_dev(dentry
->d_sb
->s_dev
);
1184 buf
->f_fsid
= u64_to_fsid(id
);
1185 buf
->f_type
= HUGETLBFS_MAGIC
;
1186 buf
->f_bsize
= huge_page_size(h
);
1188 spin_lock(&sbinfo
->stat_lock
);
1189 /* If no limits set, just report 0 or -1 for max/free/used
1190 * blocks, like simple_statfs() */
1191 if (sbinfo
->spool
) {
1194 spin_lock_irq(&sbinfo
->spool
->lock
);
1195 buf
->f_blocks
= sbinfo
->spool
->max_hpages
;
1196 free_pages
= sbinfo
->spool
->max_hpages
1197 - sbinfo
->spool
->used_hpages
;
1198 buf
->f_bavail
= buf
->f_bfree
= free_pages
;
1199 spin_unlock_irq(&sbinfo
->spool
->lock
);
1200 buf
->f_files
= sbinfo
->max_inodes
;
1201 buf
->f_ffree
= sbinfo
->free_inodes
;
1203 spin_unlock(&sbinfo
->stat_lock
);
1205 buf
->f_namelen
= NAME_MAX
;
1209 static void hugetlbfs_put_super(struct super_block
*sb
)
1211 struct hugetlbfs_sb_info
*sbi
= HUGETLBFS_SB(sb
);
1214 sb
->s_fs_info
= NULL
;
1217 hugepage_put_subpool(sbi
->spool
);
1223 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info
*sbinfo
)
1225 if (sbinfo
->free_inodes
>= 0) {
1226 spin_lock(&sbinfo
->stat_lock
);
1227 if (unlikely(!sbinfo
->free_inodes
)) {
1228 spin_unlock(&sbinfo
->stat_lock
);
1231 sbinfo
->free_inodes
--;
1232 spin_unlock(&sbinfo
->stat_lock
);
1238 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info
*sbinfo
)
1240 if (sbinfo
->free_inodes
>= 0) {
1241 spin_lock(&sbinfo
->stat_lock
);
1242 sbinfo
->free_inodes
++;
1243 spin_unlock(&sbinfo
->stat_lock
);
1248 static struct kmem_cache
*hugetlbfs_inode_cachep
;
1250 static struct inode
*hugetlbfs_alloc_inode(struct super_block
*sb
)
1252 struct hugetlbfs_sb_info
*sbinfo
= HUGETLBFS_SB(sb
);
1253 struct hugetlbfs_inode_info
*p
;
1255 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo
)))
1257 p
= alloc_inode_sb(sb
, hugetlbfs_inode_cachep
, GFP_KERNEL
);
1259 hugetlbfs_inc_free_inodes(sbinfo
);
1262 return &p
->vfs_inode
;
1265 static void hugetlbfs_free_inode(struct inode
*inode
)
1267 kmem_cache_free(hugetlbfs_inode_cachep
, HUGETLBFS_I(inode
));
1270 static void hugetlbfs_destroy_inode(struct inode
*inode
)
1272 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode
->i_sb
));
1275 static const struct address_space_operations hugetlbfs_aops
= {
1276 .write_begin
= hugetlbfs_write_begin
,
1277 .write_end
= hugetlbfs_write_end
,
1278 .dirty_folio
= noop_dirty_folio
,
1279 .migrate_folio
= hugetlbfs_migrate_folio
,
1280 .error_remove_page
= hugetlbfs_error_remove_page
,
1284 static void init_once(void *foo
)
1286 struct hugetlbfs_inode_info
*ei
= foo
;
1288 inode_init_once(&ei
->vfs_inode
);
1291 const struct file_operations hugetlbfs_file_operations
= {
1292 .read_iter
= hugetlbfs_read_iter
,
1293 .mmap
= hugetlbfs_file_mmap
,
1294 .fsync
= noop_fsync
,
1295 .get_unmapped_area
= hugetlb_get_unmapped_area
,
1296 .llseek
= default_llseek
,
1297 .fallocate
= hugetlbfs_fallocate
,
1300 static const struct inode_operations hugetlbfs_dir_inode_operations
= {
1301 .create
= hugetlbfs_create
,
1302 .lookup
= simple_lookup
,
1303 .link
= simple_link
,
1304 .unlink
= simple_unlink
,
1305 .symlink
= hugetlbfs_symlink
,
1306 .mkdir
= hugetlbfs_mkdir
,
1307 .rmdir
= simple_rmdir
,
1308 .mknod
= hugetlbfs_mknod
,
1309 .rename
= simple_rename
,
1310 .setattr
= hugetlbfs_setattr
,
1311 .tmpfile
= hugetlbfs_tmpfile
,
1314 static const struct inode_operations hugetlbfs_inode_operations
= {
1315 .setattr
= hugetlbfs_setattr
,
1318 static const struct super_operations hugetlbfs_ops
= {
1319 .alloc_inode
= hugetlbfs_alloc_inode
,
1320 .free_inode
= hugetlbfs_free_inode
,
1321 .destroy_inode
= hugetlbfs_destroy_inode
,
1322 .evict_inode
= hugetlbfs_evict_inode
,
1323 .statfs
= hugetlbfs_statfs
,
1324 .put_super
= hugetlbfs_put_super
,
1325 .show_options
= hugetlbfs_show_options
,
1329 * Convert size option passed from command line to number of huge pages
1330 * in the pool specified by hstate. Size option could be in bytes
1331 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1334 hugetlbfs_size_to_hpages(struct hstate
*h
, unsigned long long size_opt
,
1335 enum hugetlbfs_size_type val_type
)
1337 if (val_type
== NO_SIZE
)
1340 if (val_type
== SIZE_PERCENT
) {
1341 size_opt
<<= huge_page_shift(h
);
1342 size_opt
*= h
->max_huge_pages
;
1343 do_div(size_opt
, 100);
1346 size_opt
>>= huge_page_shift(h
);
1351 * Parse one mount parameter.
1353 static int hugetlbfs_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
)
1355 struct hugetlbfs_fs_context
*ctx
= fc
->fs_private
;
1356 struct fs_parse_result result
;
1361 opt
= fs_parse(fc
, hugetlb_fs_parameters
, param
, &result
);
1367 ctx
->uid
= make_kuid(current_user_ns(), result
.uint_32
);
1368 if (!uid_valid(ctx
->uid
))
1373 ctx
->gid
= make_kgid(current_user_ns(), result
.uint_32
);
1374 if (!gid_valid(ctx
->gid
))
1379 ctx
->mode
= result
.uint_32
& 01777U;
1383 /* memparse() will accept a K/M/G without a digit */
1384 if (!param
->string
|| !isdigit(param
->string
[0]))
1386 ctx
->max_size_opt
= memparse(param
->string
, &rest
);
1387 ctx
->max_val_type
= SIZE_STD
;
1389 ctx
->max_val_type
= SIZE_PERCENT
;
1393 /* memparse() will accept a K/M/G without a digit */
1394 if (!param
->string
|| !isdigit(param
->string
[0]))
1396 ctx
->nr_inodes
= memparse(param
->string
, &rest
);
1400 ps
= memparse(param
->string
, &rest
);
1401 ctx
->hstate
= size_to_hstate(ps
);
1403 pr_err("Unsupported page size %lu MB\n", ps
/ SZ_1M
);
1409 /* memparse() will accept a K/M/G without a digit */
1410 if (!param
->string
|| !isdigit(param
->string
[0]))
1412 ctx
->min_size_opt
= memparse(param
->string
, &rest
);
1413 ctx
->min_val_type
= SIZE_STD
;
1415 ctx
->min_val_type
= SIZE_PERCENT
;
1423 return invalfc(fc
, "Bad value '%s' for mount option '%s'\n",
1424 param
->string
, param
->key
);
1428 * Validate the parsed options.
1430 static int hugetlbfs_validate(struct fs_context
*fc
)
1432 struct hugetlbfs_fs_context
*ctx
= fc
->fs_private
;
1435 * Use huge page pool size (in hstate) to convert the size
1436 * options to number of huge pages. If NO_SIZE, -1 is returned.
1438 ctx
->max_hpages
= hugetlbfs_size_to_hpages(ctx
->hstate
,
1441 ctx
->min_hpages
= hugetlbfs_size_to_hpages(ctx
->hstate
,
1446 * If max_size was specified, then min_size must be smaller
1448 if (ctx
->max_val_type
> NO_SIZE
&&
1449 ctx
->min_hpages
> ctx
->max_hpages
) {
1450 pr_err("Minimum size can not be greater than maximum size\n");
1458 hugetlbfs_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
1460 struct hugetlbfs_fs_context
*ctx
= fc
->fs_private
;
1461 struct hugetlbfs_sb_info
*sbinfo
;
1463 sbinfo
= kmalloc(sizeof(struct hugetlbfs_sb_info
), GFP_KERNEL
);
1466 sb
->s_fs_info
= sbinfo
;
1467 spin_lock_init(&sbinfo
->stat_lock
);
1468 sbinfo
->hstate
= ctx
->hstate
;
1469 sbinfo
->max_inodes
= ctx
->nr_inodes
;
1470 sbinfo
->free_inodes
= ctx
->nr_inodes
;
1471 sbinfo
->spool
= NULL
;
1472 sbinfo
->uid
= ctx
->uid
;
1473 sbinfo
->gid
= ctx
->gid
;
1474 sbinfo
->mode
= ctx
->mode
;
1477 * Allocate and initialize subpool if maximum or minimum size is
1478 * specified. Any needed reservations (for minimum size) are taken
1479 * when the subpool is created.
1481 if (ctx
->max_hpages
!= -1 || ctx
->min_hpages
!= -1) {
1482 sbinfo
->spool
= hugepage_new_subpool(ctx
->hstate
,
1488 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1489 sb
->s_blocksize
= huge_page_size(ctx
->hstate
);
1490 sb
->s_blocksize_bits
= huge_page_shift(ctx
->hstate
);
1491 sb
->s_magic
= HUGETLBFS_MAGIC
;
1492 sb
->s_op
= &hugetlbfs_ops
;
1493 sb
->s_time_gran
= 1;
1496 * Due to the special and limited functionality of hugetlbfs, it does
1497 * not work well as a stacking filesystem.
1499 sb
->s_stack_depth
= FILESYSTEM_MAX_STACK_DEPTH
;
1500 sb
->s_root
= d_make_root(hugetlbfs_get_root(sb
, ctx
));
1505 kfree(sbinfo
->spool
);
1510 static int hugetlbfs_get_tree(struct fs_context
*fc
)
1512 int err
= hugetlbfs_validate(fc
);
1515 return get_tree_nodev(fc
, hugetlbfs_fill_super
);
1518 static void hugetlbfs_fs_context_free(struct fs_context
*fc
)
1520 kfree(fc
->fs_private
);
1523 static const struct fs_context_operations hugetlbfs_fs_context_ops
= {
1524 .free
= hugetlbfs_fs_context_free
,
1525 .parse_param
= hugetlbfs_parse_param
,
1526 .get_tree
= hugetlbfs_get_tree
,
1529 static int hugetlbfs_init_fs_context(struct fs_context
*fc
)
1531 struct hugetlbfs_fs_context
*ctx
;
1533 ctx
= kzalloc(sizeof(struct hugetlbfs_fs_context
), GFP_KERNEL
);
1537 ctx
->max_hpages
= -1; /* No limit on size by default */
1538 ctx
->nr_inodes
= -1; /* No limit on number of inodes by default */
1539 ctx
->uid
= current_fsuid();
1540 ctx
->gid
= current_fsgid();
1542 ctx
->hstate
= &default_hstate
;
1543 ctx
->min_hpages
= -1; /* No default minimum size */
1544 ctx
->max_val_type
= NO_SIZE
;
1545 ctx
->min_val_type
= NO_SIZE
;
1546 fc
->fs_private
= ctx
;
1547 fc
->ops
= &hugetlbfs_fs_context_ops
;
1551 static struct file_system_type hugetlbfs_fs_type
= {
1552 .name
= "hugetlbfs",
1553 .init_fs_context
= hugetlbfs_init_fs_context
,
1554 .parameters
= hugetlb_fs_parameters
,
1555 .kill_sb
= kill_litter_super
,
1558 static struct vfsmount
*hugetlbfs_vfsmount
[HUGE_MAX_HSTATE
];
1560 static int can_do_hugetlb_shm(void)
1563 shm_group
= make_kgid(&init_user_ns
, sysctl_hugetlb_shm_group
);
1564 return capable(CAP_IPC_LOCK
) || in_group_p(shm_group
);
1567 static int get_hstate_idx(int page_size_log
)
1569 struct hstate
*h
= hstate_sizelog(page_size_log
);
1573 return hstate_index(h
);
1577 * Note that size should be aligned to proper hugepage size in caller side,
1578 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1580 struct file
*hugetlb_file_setup(const char *name
, size_t size
,
1581 vm_flags_t acctflag
, int creat_flags
,
1584 struct inode
*inode
;
1585 struct vfsmount
*mnt
;
1589 hstate_idx
= get_hstate_idx(page_size_log
);
1591 return ERR_PTR(-ENODEV
);
1593 mnt
= hugetlbfs_vfsmount
[hstate_idx
];
1595 return ERR_PTR(-ENOENT
);
1597 if (creat_flags
== HUGETLB_SHMFS_INODE
&& !can_do_hugetlb_shm()) {
1598 struct ucounts
*ucounts
= current_ucounts();
1600 if (user_shm_lock(size
, ucounts
)) {
1601 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
1602 current
->comm
, current
->pid
);
1603 user_shm_unlock(size
, ucounts
);
1605 return ERR_PTR(-EPERM
);
1608 file
= ERR_PTR(-ENOSPC
);
1609 inode
= hugetlbfs_get_inode(mnt
->mnt_sb
, NULL
, S_IFREG
| S_IRWXUGO
, 0);
1612 if (creat_flags
== HUGETLB_SHMFS_INODE
)
1613 inode
->i_flags
|= S_PRIVATE
;
1615 inode
->i_size
= size
;
1618 if (!hugetlb_reserve_pages(inode
, 0,
1619 size
>> huge_page_shift(hstate_inode(inode
)), NULL
,
1621 file
= ERR_PTR(-ENOMEM
);
1623 file
= alloc_file_pseudo(inode
, mnt
, name
, O_RDWR
,
1624 &hugetlbfs_file_operations
);
1633 static struct vfsmount
*__init
mount_one_hugetlbfs(struct hstate
*h
)
1635 struct fs_context
*fc
;
1636 struct vfsmount
*mnt
;
1638 fc
= fs_context_for_mount(&hugetlbfs_fs_type
, SB_KERNMOUNT
);
1642 struct hugetlbfs_fs_context
*ctx
= fc
->fs_private
;
1648 pr_err("Cannot mount internal hugetlbfs for page size %luK",
1649 huge_page_size(h
) / SZ_1K
);
1653 static int __init
init_hugetlbfs_fs(void)
1655 struct vfsmount
*mnt
;
1660 if (!hugepages_supported()) {
1661 pr_info("disabling because there are no supported hugepage sizes\n");
1666 hugetlbfs_inode_cachep
= kmem_cache_create("hugetlbfs_inode_cache",
1667 sizeof(struct hugetlbfs_inode_info
),
1668 0, SLAB_ACCOUNT
, init_once
);
1669 if (hugetlbfs_inode_cachep
== NULL
)
1672 error
= register_filesystem(&hugetlbfs_fs_type
);
1676 /* default hstate mount is required */
1677 mnt
= mount_one_hugetlbfs(&default_hstate
);
1679 error
= PTR_ERR(mnt
);
1682 hugetlbfs_vfsmount
[default_hstate_idx
] = mnt
;
1684 /* other hstates are optional */
1686 for_each_hstate(h
) {
1687 if (i
== default_hstate_idx
) {
1692 mnt
= mount_one_hugetlbfs(h
);
1694 hugetlbfs_vfsmount
[i
] = NULL
;
1696 hugetlbfs_vfsmount
[i
] = mnt
;
1703 (void)unregister_filesystem(&hugetlbfs_fs_type
);
1705 kmem_cache_destroy(hugetlbfs_inode_cachep
);
1709 fs_initcall(init_hugetlbfs_fs
)