]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - fs/hugetlbfs/inode.c
Linux 6.18-rc7
[thirdparty/kernel/stable.git] / fs / hugetlbfs / inode.c
CommitLineData
1da177e4
LT
1/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
6d49e352 4 * Nadia Yvette Chambers, 2002
1da177e4
LT
5 *
6 * Copyright (C) 2002 Linus Torvalds.
3e89e1c5 7 * License: GPL
1da177e4
LT
8 */
9
9b857d26
AM
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
1da177e4
LT
12#include <linux/thread_info.h>
13#include <asm/current.h>
70c3547e 14#include <linux/falloc.h>
1da177e4
LT
15#include <linux/fs.h>
16#include <linux/mount.h>
17#include <linux/file.h>
e73a75fa 18#include <linux/kernel.h>
1da177e4
LT
19#include <linux/writeback.h>
20#include <linux/pagemap.h>
21#include <linux/highmem.h>
22#include <linux/init.h>
23#include <linux/string.h>
16f7e0fe 24#include <linux/capability.h>
e73a75fa 25#include <linux/ctype.h>
1da177e4
LT
26#include <linux/backing-dev.h>
27#include <linux/hugetlb.h>
28#include <linux/pagevec.h>
32021982 29#include <linux/fs_parser.h>
036e0856 30#include <linux/mman.h>
1da177e4
LT
31#include <linux/slab.h>
32#include <linux/dnotify.h>
33#include <linux/statfs.h>
34#include <linux/security.h>
1fd7317d 35#include <linux/magic.h>
290408d4 36#include <linux/migrate.h>
34d0640e 37#include <linux/uio.h>
1da177e4 38
7c0f6ba6 39#include <linux/uaccess.h>
88590253 40#include <linux/sched/mm.h>
1da177e4 41
014ad7c4
HL
42#define CREATE_TRACE_POINTS
43#include <trace/events/hugetlbfs.h>
44
f5e54d6e 45static const struct address_space_operations hugetlbfs_aops;
886b94d2 46static const struct file_operations hugetlbfs_file_operations;
92e1d5be
AV
47static const struct inode_operations hugetlbfs_dir_inode_operations;
48static const struct inode_operations hugetlbfs_inode_operations;
1da177e4 49
32021982
DH
50enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
51
52struct hugetlbfs_fs_context {
4a25220d 53 struct hstate *hstate;
32021982
DH
54 unsigned long long max_size_opt;
55 unsigned long long min_size_opt;
4a25220d
DH
56 long max_hpages;
57 long nr_inodes;
58 long min_hpages;
32021982
DH
59 enum hugetlbfs_size_type max_val_type;
60 enum hugetlbfs_size_type min_val_type;
4a25220d
DH
61 kuid_t uid;
62 kgid_t gid;
63 umode_t mode;
a1d776ee
DG
64};
65
1da177e4
LT
66int sysctl_hugetlb_shm_group;
67
32021982
DH
68enum hugetlb_param {
69 Opt_gid,
70 Opt_min_size,
71 Opt_mode,
72 Opt_nr_inodes,
73 Opt_pagesize,
74 Opt_size,
75 Opt_uid,
e73a75fa
RD
76};
77
d7167b14 78static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
eefc1324 79 fsparam_gid ("gid", Opt_gid),
32021982 80 fsparam_string("min_size", Opt_min_size),
e0f7e2b2 81 fsparam_u32oct("mode", Opt_mode),
32021982
DH
82 fsparam_string("nr_inodes", Opt_nr_inodes),
83 fsparam_string("pagesize", Opt_pagesize),
84 fsparam_string("size", Opt_size),
eefc1324 85 fsparam_uid ("uid", Opt_uid),
32021982
DH
86 {}
87};
88
63489f8e
MK
89/*
90 * Mask used when checking the page offset value passed in via system
91 * calls. This value will be converted to a loff_t which is signed.
92 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
93 * value. The extra bit (- 1 in the shift value) is to take the sign
94 * bit into account.
95 */
96#define PGOFF_LOFFT_MAX \
97 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
98
1da177e4
LT
99static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
100{
496ad9aa 101 struct inode *inode = file_inode(file);
1da177e4
LT
102 loff_t len, vma_len;
103 int ret;
a5516438 104 struct hstate *h = hstate_file(file);
e656c7a9 105 vm_flags_t vm_flags;
1da177e4 106
68589bc3 107 /*
dec4ad86
DG
108 * vma address alignment (but not the pgoff alignment) has
109 * already been checked by prepare_hugepage_range. If you add
110 * any error returns here, do so after setting VM_HUGETLB, so
111 * is_vm_hugetlb_page tests below unmap_region go the right
45e55300 112 * way when do_mmap unwinds (may be important on powerpc
dec4ad86 113 * and ia64).
68589bc3 114 */
49ccf2c3 115 vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
68589bc3 116 vma->vm_ops = &hugetlb_vm_ops;
1da177e4 117
045c7a3f 118 /*
63489f8e 119 * page based offset in vm_pgoff could be sufficiently large to
5df63c2a
MK
120 * overflow a loff_t when converted to byte offset. This can
121 * only happen on architectures where sizeof(loff_t) ==
122 * sizeof(unsigned long). So, only check in those instances.
045c7a3f 123 */
5df63c2a
MK
124 if (sizeof(unsigned long) == sizeof(loff_t)) {
125 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
126 return -EINVAL;
127 }
045c7a3f 128
63489f8e 129 /* must be huge page aligned */
2b37c35e 130 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
dec4ad86
DG
131 return -EINVAL;
132
1da177e4 133 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
045c7a3f
MK
134 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
135 /* check for overflow */
136 if (len < vma_len)
137 return -EINVAL;
1da177e4 138
5955102c 139 inode_lock(inode);
1da177e4 140 file_accessed(file);
1da177e4
LT
141
142 ret = -ENOMEM;
e656c7a9
PS
143
144 vm_flags = vma->vm_flags;
145 /*
146 * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
147 * reserving here. Note: only for SHM hugetlbfs file, the inode
148 * flag S_PRIVATE is set.
149 */
150 if (inode->i_flags & S_PRIVATE)
151 vm_flags |= VM_NORESERVE;
152
986f5f2b 153 if (hugetlb_reserve_pages(inode,
a5516438 154 vma->vm_pgoff >> huge_page_order(h),
5a6fe125 155 len >> huge_page_shift(h), vma,
986f5f2b 156 vm_flags) < 0)
a43a8c39 157 goto out;
b45b5bd6 158
4c887265 159 ret = 0;
b6174df5 160 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
045c7a3f 161 i_size_write(inode, len);
1da177e4 162out:
5955102c 163 inode_unlock(inode);
1da177e4
LT
164
165 return ret;
166}
167
168/*
3e4e28c5 169 * Called under mmap_write_lock(mm).
1da177e4
LT
170 */
171
4b439e25 172unsigned long
cc92882e
OS
173hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
174 unsigned long len, unsigned long pgoff,
175 unsigned long flags)
1da177e4 176{
7bd3f1e1 177 unsigned long addr0 = 0;
a5516438 178 struct hstate *h = hstate_file(file);
1da177e4 179
a5516438 180 if (len & ~huge_page_mask(h))
1da177e4 181 return -EINVAL;
eff41389
PX
182 if ((flags & MAP_FIXED) && (addr & ~huge_page_mask(h)))
183 return -EINVAL;
7bd3f1e1
OS
184 if (addr)
185 addr0 = ALIGN(addr, huge_page_size(h));
1da177e4 186
cc92882e 187 return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff,
7bd3f1e1 188 flags, 0);
4b439e25 189}
1da177e4 190
38c1ddbd 191/*
fcc09f5b 192 * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset.
38c1ddbd 193 * Returns the maximum number of bytes one can read without touching the 1st raw
fcc09f5b 194 * HWPOISON page.
38c1ddbd 195 */
fcc09f5b
MWO
196static size_t adjust_range_hwpoison(struct folio *folio, size_t offset,
197 size_t bytes)
38c1ddbd 198{
a638ee7f
DH
199 struct page *page = folio_page(folio, offset / PAGE_SIZE);
200 size_t safe_bytes;
38c1ddbd 201
a638ee7f
DH
202 if (is_raw_hwpoison_page_in_hugepage(page))
203 return 0;
204 /* Safe to read the remaining bytes in this page. */
205 safe_bytes = PAGE_SIZE - (offset % PAGE_SIZE);
206 page++;
207
208 /* Check each remaining page as long as we are not done yet. */
209 for (; safe_bytes < bytes; safe_bytes += PAGE_SIZE, page++)
38c1ddbd
JY
210 if (is_raw_hwpoison_page_in_hugepage(page))
211 break;
212
a638ee7f 213 return min(safe_bytes, bytes);
38c1ddbd
JY
214}
215
e63e1e5a
BP
216/*
217 * Support for read() - Find the page attached to f_mapping and copy out the
445c8098 218 * data. This provides functionality similar to filemap_read().
e63e1e5a 219 */
34d0640e 220static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
e63e1e5a 221{
34d0640e
AV
222 struct file *file = iocb->ki_filp;
223 struct hstate *h = hstate_file(file);
224 struct address_space *mapping = file->f_mapping;
e63e1e5a 225 struct inode *inode = mapping->host;
34d0640e
AV
226 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
227 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
e63e1e5a
BP
228 unsigned long end_index;
229 loff_t isize;
230 ssize_t retval = 0;
231
34d0640e 232 while (iov_iter_count(to)) {
a08c7193 233 struct folio *folio;
38c1ddbd 234 size_t nr, copied, want;
e63e1e5a
BP
235
236 /* nr is the maximum number of bytes to copy from this page */
a5516438 237 nr = huge_page_size(h);
a05b0855
AK
238 isize = i_size_read(inode);
239 if (!isize)
34d0640e 240 break;
a05b0855 241 end_index = (isize - 1) >> huge_page_shift(h);
34d0640e
AV
242 if (index > end_index)
243 break;
244 if (index == end_index) {
a5516438 245 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
a05b0855 246 if (nr <= offset)
34d0640e 247 break;
e63e1e5a
BP
248 }
249 nr = nr - offset;
250
a08c7193
SK
251 /* Find the folio */
252 folio = filemap_lock_hugetlb_folio(h, mapping, index);
253 if (IS_ERR(folio)) {
e63e1e5a
BP
254 /*
255 * We have a HOLE, zero out the user-buffer for the
256 * length of the hole or request.
257 */
34d0640e 258 copied = iov_iter_zero(nr, to);
e63e1e5a 259 } else {
a08c7193 260 folio_unlock(folio);
a05b0855 261
19d3e221 262 if (!folio_test_hwpoison(folio))
38c1ddbd
JY
263 want = nr;
264 else {
265 /*
266 * Adjust how many bytes safe to read without
fcc09f5b 267 * touching the 1st raw HWPOISON page after
38c1ddbd
JY
268 * offset.
269 */
fcc09f5b 270 want = adjust_range_hwpoison(folio, offset, nr);
38c1ddbd 271 if (want == 0) {
a08c7193 272 folio_put(folio);
38c1ddbd
JY
273 retval = -EIO;
274 break;
275 }
8625147c
JH
276 }
277
e63e1e5a 278 /*
a08c7193 279 * We have the folio, copy it to user space buffer.
e63e1e5a 280 */
a08c7193
SK
281 copied = copy_folio_to_iter(folio, offset, want, to);
282 folio_put(folio);
e63e1e5a 283 }
34d0640e
AV
284 offset += copied;
285 retval += copied;
286 if (copied != nr && iov_iter_count(to)) {
287 if (!retval)
288 retval = -EFAULT;
289 break;
e63e1e5a 290 }
a5516438
AK
291 index += offset >> huge_page_shift(h);
292 offset &= ~huge_page_mask(h);
e63e1e5a 293 }
34d0640e 294 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
e63e1e5a
BP
295 return retval;
296}
297
e9d8e2bf 298static int hugetlbfs_write_begin(const struct kiocb *iocb,
800d15a5 299 struct address_space *mapping,
9d6b0cd7 300 loff_t pos, unsigned len,
1da86618 301 struct folio **foliop, void **fsdata)
1da177e4
LT
302{
303 return -EINVAL;
304}
305
e9d8e2bf
TC
306static int hugetlbfs_write_end(const struct kiocb *iocb,
307 struct address_space *mapping,
308 loff_t pos, unsigned len, unsigned copied,
309 struct folio *folio, void *fsdata)
1da177e4 310{
800d15a5 311 BUG();
1da177e4
LT
312 return -EINVAL;
313}
314
ece62684 315static void hugetlb_delete_from_page_cache(struct folio *folio)
1da177e4 316{
ece62684
SK
317 folio_clear_dirty(folio);
318 folio_clear_uptodate(folio);
319 filemap_remove_folio(folio);
1da177e4
LT
320}
321
378397cc
MK
322/*
323 * Called with i_mmap_rwsem held for inode based vma maps. This makes
324 * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault
325 * mutex for the page in the mapping. So, we can not race with page being
326 * faulted into the vma.
327 */
fa17ad58
MWO
328static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma,
329 unsigned long addr, unsigned long pfn)
378397cc
MK
330{
331 pte_t *ptep, pte;
332
9c67a207 333 ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma)));
378397cc
MK
334 if (!ptep)
335 return false;
336
e6c0c032 337 pte = huge_ptep_get(vma->vm_mm, addr, ptep);
378397cc
MK
338 if (huge_pte_none(pte) || !pte_present(pte))
339 return false;
340
fa17ad58 341 if (pte_pfn(pte) == pfn)
378397cc
MK
342 return true;
343
344 return false;
345}
346
347/*
348 * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
349 * No, because the interval tree returns us only those vmas
350 * which overlap the truncated area starting at pgoff,
351 * and no vma on a 32-bit arch can span beyond the 4GB.
352 */
353static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
354{
243b1f2d
PX
355 unsigned long offset = 0;
356
378397cc 357 if (vma->vm_pgoff < start)
243b1f2d
PX
358 offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
359
360 return vma->vm_start + offset;
378397cc
MK
361}
362
363static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
364{
365 unsigned long t_end;
366
367 if (!end)
368 return vma->vm_end;
369
370 t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
371 if (t_end > vma->vm_end)
372 t_end = vma->vm_end;
373 return t_end;
374}
375
376/*
377 * Called with hugetlb fault mutex held. Therefore, no more mappings to
378 * this folio can be created while executing the routine.
379 */
380static void hugetlb_unmap_file_folio(struct hstate *h,
381 struct address_space *mapping,
382 struct folio *folio, pgoff_t index)
383{
384 struct rb_root_cached *root = &mapping->i_mmap;
40549ba8 385 struct hugetlb_vma_lock *vma_lock;
fa17ad58 386 unsigned long pfn = folio_pfn(folio);
378397cc
MK
387 struct vm_area_struct *vma;
388 unsigned long v_start;
389 unsigned long v_end;
390 pgoff_t start, end;
391
392 start = index * pages_per_huge_page(h);
393 end = (index + 1) * pages_per_huge_page(h);
394
395 i_mmap_lock_write(mapping);
40549ba8
MK
396retry:
397 vma_lock = NULL;
378397cc
MK
398 vma_interval_tree_foreach(vma, root, start, end - 1) {
399 v_start = vma_offset_start(vma, start);
400 v_end = vma_offset_end(vma, end);
401
fa17ad58 402 if (!hugetlb_vma_maps_pfn(vma, v_start, pfn))
378397cc
MK
403 continue;
404
40549ba8
MK
405 if (!hugetlb_vma_trylock_write(vma)) {
406 vma_lock = vma->vm_private_data;
407 /*
408 * If we can not get vma lock, we need to drop
409 * immap_sema and take locks in order. First,
410 * take a ref on the vma_lock structure so that
411 * we can be guaranteed it will not go away when
412 * dropping immap_sema.
413 */
414 kref_get(&vma_lock->refs);
415 break;
416 }
417
243b1f2d
PX
418 unmap_hugepage_range(vma, v_start, v_end, NULL,
419 ZAP_FLAG_DROP_MARKER);
40549ba8 420 hugetlb_vma_unlock_write(vma);
378397cc
MK
421 }
422
423 i_mmap_unlock_write(mapping);
40549ba8
MK
424
425 if (vma_lock) {
426 /*
427 * Wait on vma_lock. We know it is still valid as we have
428 * a reference. We must 'open code' vma locking as we do
429 * not know if vma_lock is still attached to vma.
430 */
431 down_write(&vma_lock->rw_sema);
432 i_mmap_lock_write(mapping);
433
434 vma = vma_lock->vma;
435 if (!vma) {
436 /*
437 * If lock is no longer attached to vma, then just
438 * unlock, drop our reference and retry looking for
439 * other vmas.
440 */
441 up_write(&vma_lock->rw_sema);
442 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
443 goto retry;
444 }
445
446 /*
447 * vma_lock is still attached to vma. Check to see if vma
448 * still maps page and if so, unmap.
449 */
450 v_start = vma_offset_start(vma, start);
451 v_end = vma_offset_end(vma, end);
fa17ad58 452 if (hugetlb_vma_maps_pfn(vma, v_start, pfn))
243b1f2d
PX
453 unmap_hugepage_range(vma, v_start, v_end, NULL,
454 ZAP_FLAG_DROP_MARKER);
40549ba8
MK
455
456 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
457 hugetlb_vma_unlock_write(vma);
458
459 goto retry;
460 }
378397cc
MK
461}
462
4aae8d1c 463static void
05e90bd0
PX
464hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
465 zap_flags_t zap_flags)
4aae8d1c
MK
466{
467 struct vm_area_struct *vma;
468
469 /*
d6aba4c8
SC
470 * end == 0 indicates that the entire range after start should be
471 * unmapped. Note, end is exclusive, whereas the interval tree takes
472 * an inclusive "last".
4aae8d1c 473 */
d6aba4c8 474 vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
378397cc 475 unsigned long v_start;
4aae8d1c
MK
476 unsigned long v_end;
477
40549ba8
MK
478 if (!hugetlb_vma_trylock_write(vma))
479 continue;
480
378397cc
MK
481 v_start = vma_offset_start(vma, start);
482 v_end = vma_offset_end(vma, end);
4aae8d1c 483
243b1f2d 484 unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags);
40549ba8
MK
485
486 /*
487 * Note that vma lock only exists for shared/non-private
488 * vmas. Therefore, lock is not held when calling
489 * unmap_hugepage_range for private vmas.
490 */
491 hugetlb_vma_unlock_write(vma);
4aae8d1c
MK
492 }
493}
b5cec28d 494
c8627228
MK
495/*
496 * Called with hugetlb fault mutex held.
497 * Returns true if page was actually removed, false otherwise.
498 */
499static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
500 struct address_space *mapping,
501 struct folio *folio, pgoff_t index,
502 bool truncate_op)
503{
504 bool ret = false;
505
506 /*
507 * If folio is mapped, it was faulted in after being
7b738765
JT
508 * unmapped in caller or hugetlb_vmdelete_list() skips
509 * unmapping it due to fail to grab lock. Unmap (again)
510 * while holding the fault mutex. The mutex will prevent
511 * faults until we finish removing the folio. Hold folio
512 * lock to guarantee no concurrent migration.
c8627228 513 */
7b738765 514 folio_lock(folio);
378397cc
MK
515 if (unlikely(folio_mapped(folio)))
516 hugetlb_unmap_file_folio(h, mapping, folio, index);
c8627228 517
c8627228 518 /*
fa27759a
MK
519 * We must remove the folio from page cache before removing
520 * the region/ reserve map (hugetlb_unreserve_pages). In
521 * rare out of memory conditions, removal of the region/reserve
522 * map could fail. Correspondingly, the subpool and global
523 * reserve usage count can need to be adjusted.
c8627228 524 */
ece62684
SK
525 VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio);
526 hugetlb_delete_from_page_cache(folio);
fa27759a
MK
527 ret = true;
528 if (!truncate_op) {
529 if (unlikely(hugetlb_unreserve_pages(inode, index,
530 index + 1, 1)))
531 hugetlb_fix_reserve_counts(inode);
c8627228
MK
532 }
533
534 folio_unlock(folio);
535 return ret;
536}
537
b5cec28d
MK
538/*
539 * remove_inode_hugepages handles two distinct cases: truncation and hole
540 * punch. There are subtle differences in operation for each case.
4aae8d1c 541 *
b5cec28d
MK
542 * truncation is indicated by end of range being LLONG_MAX
543 * In this case, we first scan the range and release found pages.
1935ebd3 544 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
c8627228
MK
545 * maps and global counts. Page faults can race with truncation.
546 * During faults, hugetlb_no_page() checks i_size before page allocation,
547 * and again after obtaining page table lock. It will 'back out'
548 * allocations in the truncated range.
b5cec28d
MK
549 * hole punch is indicated if end is not LLONG_MAX
550 * In the hole punch case we scan the range and release found pages.
1935ebd3
ML
551 * Only when releasing a page is the associated region/reserve map
552 * deleted. The region/reserve map for ranges without associated
e7c58097
MK
553 * pages are not modified. Page faults can race with hole punch.
554 * This is indicated if we find a mapped page.
b5cec28d
MK
555 * Note: If the passed end of range value is beyond the end of file, but
556 * not LLONG_MAX this routine still performs a hole punch operation.
557 */
558static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
559 loff_t lend)
1da177e4 560{
a5516438 561 struct hstate *h = hstate_inode(inode);
b45b5bd6 562 struct address_space *mapping = &inode->i_data;
a08c7193 563 const pgoff_t end = lend >> PAGE_SHIFT;
1508062e 564 struct folio_batch fbatch;
d72dc8a2 565 pgoff_t next, index;
a43a8c39 566 int i, freed = 0;
b5cec28d 567 bool truncate_op = (lend == LLONG_MAX);
1da177e4 568
1508062e 569 folio_batch_init(&fbatch);
a08c7193 570 next = lstart >> PAGE_SHIFT;
1508062e
MWO
571 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
572 for (i = 0; i < folio_batch_count(&fbatch); ++i) {
573 struct folio *folio = fbatch.folios[i];
d4241a04 574 u32 hash = 0;
b5cec28d 575
a08c7193 576 index = folio->index >> huge_page_order(h);
188a3972
MK
577 hash = hugetlb_fault_mutex_hash(mapping, index);
578 mutex_lock(&hugetlb_fault_mutex_table[hash]);
e7c58097 579
4aae8d1c 580 /*
c8627228 581 * Remove folio that was part of folio_batch.
4aae8d1c 582 */
c8627228
MK
583 if (remove_inode_single_folio(h, inode, mapping, folio,
584 index, truncate_op))
585 freed++;
586
188a3972 587 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
1da177e4 588 }
1508062e 589 folio_batch_release(&fbatch);
1817889e 590 cond_resched();
1da177e4 591 }
b5cec28d
MK
592
593 if (truncate_op)
a08c7193
SK
594 (void)hugetlb_unreserve_pages(inode,
595 lstart >> huge_page_shift(h),
596 LONG_MAX, freed);
1da177e4
LT
597}
598
2bbbda30 599static void hugetlbfs_evict_inode(struct inode *inode)
1da177e4 600{
9119a41e
JK
601 struct resv_map *resv_map;
602
014ad7c4 603 trace_hugetlbfs_evict_inode(inode);
b5cec28d 604 remove_inode_hugepages(inode, 0, LLONG_MAX);
f27a5136
MK
605
606 /*
607 * Get the resv_map from the address space embedded in the inode.
608 * This is the address space which points to any resv_map allocated
609 * at inode creation time. If this is a device special inode,
610 * i_mapping may not point to the original address space.
611 */
600f111e 612 resv_map = (struct resv_map *)(&inode->i_data)->i_private_data;
f27a5136 613 /* Only regular and link inodes have associated reserve maps */
9119a41e
JK
614 if (resv_map)
615 resv_map_release(&resv_map->refs);
dbd5768f 616 clear_inode(inode);
149f4211
CH
617}
618
e5d319de 619static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
1da177e4 620{
856fc295 621 pgoff_t pgoff;
1da177e4 622 struct address_space *mapping = inode->i_mapping;
a5516438 623 struct hstate *h = hstate_inode(inode);
1da177e4 624
a5516438 625 BUG_ON(offset & ~huge_page_mask(h));
856fc295 626 pgoff = offset >> PAGE_SHIFT;
1da177e4 627
87bf91d3 628 i_size_write(inode, offset);
188a3972 629 i_mmap_lock_write(mapping);
f808c13f 630 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
05e90bd0
PX
631 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
632 ZAP_FLAG_DROP_MARKER);
c86aa7bb 633 i_mmap_unlock_write(mapping);
e7c58097 634 remove_inode_hugepages(inode, offset, LLONG_MAX);
1da177e4
LT
635}
636
68d32527
MK
637static void hugetlbfs_zero_partial_page(struct hstate *h,
638 struct address_space *mapping,
639 loff_t start,
640 loff_t end)
641{
642 pgoff_t idx = start >> huge_page_shift(h);
643 struct folio *folio;
644
a08c7193 645 folio = filemap_lock_hugetlb_folio(h, mapping, idx);
66dabbb6 646 if (IS_ERR(folio))
68d32527
MK
647 return;
648
649 start = start & ~huge_page_mask(h);
650 end = end & ~huge_page_mask(h);
651 if (!end)
652 end = huge_page_size(h);
653
654 folio_zero_segment(folio, (size_t)start, (size_t)end);
655
656 folio_unlock(folio);
657 folio_put(folio);
658}
659
70c3547e
MK
660static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
661{
68d32527
MK
662 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
663 struct address_space *mapping = inode->i_mapping;
70c3547e
MK
664 struct hstate *h = hstate_inode(inode);
665 loff_t hpage_size = huge_page_size(h);
666 loff_t hole_start, hole_end;
667
668 /*
68d32527 669 * hole_start and hole_end indicate the full pages within the hole.
70c3547e
MK
670 */
671 hole_start = round_up(offset, hpage_size);
672 hole_end = round_down(offset + len, hpage_size);
673
68d32527
MK
674 inode_lock(inode);
675
676 /* protected by i_rwsem */
677 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
678 inode_unlock(inode);
679 return -EPERM;
680 }
70c3547e 681
68d32527 682 i_mmap_lock_write(mapping);
ff62a342 683
68d32527
MK
684 /* If range starts before first full page, zero partial page. */
685 if (offset < hole_start)
686 hugetlbfs_zero_partial_page(h, mapping,
687 offset, min(offset + len, hole_start));
ff62a342 688
68d32527
MK
689 /* Unmap users of full pages in the hole. */
690 if (hole_end > hole_start) {
f808c13f 691 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
70c3547e 692 hugetlb_vmdelete_list(&mapping->i_mmap,
05e90bd0
PX
693 hole_start >> PAGE_SHIFT,
694 hole_end >> PAGE_SHIFT, 0);
70c3547e
MK
695 }
696
68d32527
MK
697 /* If range extends beyond last full page, zero partial page. */
698 if ((offset + len) > hole_end && (offset + len) > hole_start)
699 hugetlbfs_zero_partial_page(h, mapping,
700 hole_end, offset + len);
701
702 i_mmap_unlock_write(mapping);
703
704 /* Remove full pages from the file. */
705 if (hole_end > hole_start)
706 remove_inode_hugepages(inode, hole_start, hole_end);
707
708 inode_unlock(inode);
709
70c3547e
MK
710 return 0;
711}
712
713static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
714 loff_t len)
715{
716 struct inode *inode = file_inode(file);
ff62a342 717 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
70c3547e
MK
718 struct address_space *mapping = inode->i_mapping;
719 struct hstate *h = hstate_inode(inode);
720 struct vm_area_struct pseudo_vma;
721 struct mm_struct *mm = current->mm;
722 loff_t hpage_size = huge_page_size(h);
723 unsigned long hpage_shift = huge_page_shift(h);
724 pgoff_t start, index, end;
725 int error;
726 u32 hash;
727
728 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
729 return -EOPNOTSUPP;
730
014ad7c4
HL
731 if (mode & FALLOC_FL_PUNCH_HOLE) {
732 error = hugetlbfs_punch_hole(inode, offset, len);
733 goto out_nolock;
734 }
70c3547e
MK
735
736 /*
737 * Default preallocate case.
738 * For this range, start is rounded down and end is rounded up
739 * as well as being converted to page offsets.
740 */
741 start = offset >> hpage_shift;
742 end = (offset + len + hpage_size - 1) >> hpage_shift;
743
5955102c 744 inode_lock(inode);
70c3547e
MK
745
746 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
747 error = inode_newsize_ok(inode, offset + len);
748 if (error)
749 goto out;
750
ff62a342
MAL
751 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
752 error = -EPERM;
753 goto out;
754 }
755
70c3547e
MK
756 /*
757 * Initialize a pseudo vma as this is required by the huge page
10969b55 758 * allocation routines.
70c3547e 759 */
2c4541e2 760 vma_init(&pseudo_vma, mm);
1c71222e 761 vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
70c3547e
MK
762 pseudo_vma.vm_file = file;
763
764 for (index = start; index < end; index++) {
765 /*
766 * This is supposed to be the vaddr where the page is being
767 * faulted in, but we have no vaddr here.
768 */
d0ce0e47 769 struct folio *folio;
70c3547e 770 unsigned long addr;
70c3547e
MK
771
772 cond_resched();
773
774 /*
775 * fallocate(2) manpage permits EINTR; we may have been
776 * interrupted because we are using up too much memory.
777 */
778 if (signal_pending(current)) {
779 error = -EINTR;
780 break;
781 }
782
70c3547e
MK
783 /* addr is the offset within the file (zero based) */
784 addr = index * hpage_size;
785
188a3972 786 /* mutex taken here, fault path and hole punch */
188b04a7 787 hash = hugetlb_fault_mutex_hash(mapping, index);
70c3547e
MK
788 mutex_lock(&hugetlb_fault_mutex_table[hash]);
789
790 /* See if already present in mapping to avoid alloc/free */
a08c7193 791 folio = filemap_get_folio(mapping, index << huge_page_order(h));
fd4aed8d
MK
792 if (!IS_ERR(folio)) {
793 folio_put(folio);
70c3547e 794 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
70c3547e
MK
795 continue;
796 }
797
88ce3fef 798 /*
d0ce0e47 799 * Allocate folio without setting the avoid_reserve argument.
88ce3fef
ML
800 * There certainly are no reserves associated with the
801 * pseudo_vma. However, there could be shared mappings with
802 * reserves for the file at the inode level. If we fallocate
d0ce0e47 803 * folios in these areas, we need to consume the reserves
88ce3fef
ML
804 * to keep reservation accounting consistent.
805 */
30cef82b 806 folio = alloc_hugetlb_folio(&pseudo_vma, addr, false);
d0ce0e47 807 if (IS_ERR(folio)) {
70c3547e 808 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
d0ce0e47 809 error = PTR_ERR(folio);
70c3547e
MK
810 goto out;
811 }
8aca2bc9 812 folio_zero_user(folio, addr);
d0ce0e47 813 __folio_mark_uptodate(folio);
9b91c0e2 814 error = hugetlb_add_to_page_cache(folio, mapping, index);
70c3547e 815 if (unlikely(error)) {
d2d7bb44 816 restore_reserve_on_error(h, &pseudo_vma, addr, folio);
d0ce0e47 817 folio_put(folio);
70c3547e
MK
818 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
819 goto out;
820 }
821
822 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
823
d0ce0e47 824 folio_set_hugetlb_migratable(folio);
70c3547e 825 /*
d0ce0e47
SK
826 * folio_unlock because locked by hugetlb_add_to_page_cache()
827 * folio_put() due to reference from alloc_hugetlb_folio()
70c3547e 828 */
d0ce0e47
SK
829 folio_unlock(folio);
830 folio_put(folio);
70c3547e
MK
831 }
832
833 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
834 i_size_write(inode, offset + len);
a72a7dea 835 inode_set_ctime_current(inode);
70c3547e 836out:
5955102c 837 inode_unlock(inode);
014ad7c4
HL
838
839out_nolock:
840 trace_hugetlbfs_fallocate(inode, mode, offset, len, error);
70c3547e
MK
841 return error;
842}
843
c1632a0f 844static int hugetlbfs_setattr(struct mnt_idmap *idmap,
549c7297 845 struct dentry *dentry, struct iattr *attr)
1da177e4 846{
2b0143b5 847 struct inode *inode = d_inode(dentry);
a5516438 848 struct hstate *h = hstate_inode(inode);
1da177e4
LT
849 int error;
850 unsigned int ia_valid = attr->ia_valid;
ff62a342 851 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
1da177e4 852
91e78a1e 853 error = setattr_prepare(idmap, dentry, attr);
1da177e4 854 if (error)
1025774c 855 return error;
1da177e4 856
014ad7c4
HL
857 trace_hugetlbfs_setattr(inode, dentry, attr);
858
1da177e4 859 if (ia_valid & ATTR_SIZE) {
ff62a342
MAL
860 loff_t oldsize = inode->i_size;
861 loff_t newsize = attr->ia_size;
862
863 if (newsize & ~huge_page_mask(h))
1025774c 864 return -EINVAL;
398c0da7 865 /* protected by i_rwsem */
ff62a342
MAL
866 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
867 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
868 return -EPERM;
e5d319de 869 hugetlb_vmtruncate(inode, newsize);
1da177e4 870 }
1025774c 871
91e78a1e 872 setattr_copy(idmap, inode, attr);
1025774c
CH
873 mark_inode_dirty(inode);
874 return 0;
1da177e4
LT
875}
876
7d54fa64 877static struct inode *hugetlbfs_get_root(struct super_block *sb,
32021982 878 struct hugetlbfs_fs_context *ctx)
1da177e4
LT
879{
880 struct inode *inode;
1da177e4
LT
881
882 inode = new_inode(sb);
883 if (inode) {
85fe4025 884 inode->i_ino = get_next_ino();
32021982
DH
885 inode->i_mode = S_IFDIR | ctx->mode;
886 inode->i_uid = ctx->uid;
887 inode->i_gid = ctx->gid;
cfd87e76 888 simple_inode_init_ts(inode);
7d54fa64
AV
889 inode->i_op = &hugetlbfs_dir_inode_operations;
890 inode->i_fop = &simple_dir_operations;
891 /* directory inodes start off with i_nlink == 2 (for "." entry) */
892 inc_nlink(inode);
65ed7601 893 lockdep_annotate_inode_mutex_key(inode);
7d54fa64
AV
894 }
895 return inode;
896}
897
b610ded7 898/*
c8c06efa 899 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
b610ded7 900 * be taken from reclaim -- unlike regular filesystems. This needs an
88f306b6 901 * annotation because huge_pmd_share() does an allocation under hugetlb's
c8c06efa 902 * i_mmap_rwsem.
b610ded7 903 */
c8c06efa 904static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
b610ded7 905
7d54fa64 906static struct inode *hugetlbfs_get_inode(struct super_block *sb,
91e78a1e 907 struct mnt_idmap *idmap,
7d54fa64 908 struct inode *dir,
18df2252 909 umode_t mode, dev_t dev)
7d54fa64
AV
910{
911 struct inode *inode;
58b6e5e8 912 struct resv_map *resv_map = NULL;
9119a41e 913
58b6e5e8
MK
914 /*
915 * Reserve maps are only needed for inodes that can have associated
916 * page allocations.
917 */
918 if (S_ISREG(mode) || S_ISLNK(mode)) {
919 resv_map = resv_map_alloc();
920 if (!resv_map)
921 return NULL;
922 }
7d54fa64
AV
923
924 inode = new_inode(sb);
925 if (inode) {
ff62a342
MAL
926 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
927
7d54fa64 928 inode->i_ino = get_next_ino();
91e78a1e 929 inode_init_owner(idmap, inode, dir, mode);
c8c06efa
DB
930 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
931 &hugetlbfs_i_mmap_rwsem_key);
1da177e4 932 inode->i_mapping->a_ops = &hugetlbfs_aops;
cfd87e76 933 simple_inode_init_ts(inode);
600f111e 934 inode->i_mapping->i_private_data = resv_map;
ff62a342 935 info->seals = F_SEAL_SEAL;
1da177e4
LT
936 switch (mode & S_IFMT) {
937 default:
938 init_special_inode(inode, mode, dev);
939 break;
940 case S_IFREG:
941 inode->i_op = &hugetlbfs_inode_operations;
942 inode->i_fop = &hugetlbfs_file_operations;
943 break;
944 case S_IFDIR:
945 inode->i_op = &hugetlbfs_dir_inode_operations;
946 inode->i_fop = &simple_dir_operations;
947
948 /* directory inodes start off with i_nlink == 2 (for "." entry) */
d8c76e6f 949 inc_nlink(inode);
1da177e4
LT
950 break;
951 case S_IFLNK:
952 inode->i_op = &page_symlink_inode_operations;
21fc61c7 953 inode_nohighmem(inode);
1da177e4
LT
954 break;
955 }
e096d0c7 956 lockdep_annotate_inode_mutex_key(inode);
014ad7c4 957 trace_hugetlbfs_alloc_inode(inode, dir, mode);
58b6e5e8
MK
958 } else {
959 if (resv_map)
960 kref_put(&resv_map->refs, resv_map_release);
961 }
9119a41e 962
1da177e4
LT
963 return inode;
964}
965
966/*
967 * File creation. Allocate an inode, and we're done..
968 */
5ebb29be 969static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
19ee5345 970 struct dentry *dentry, umode_t mode, dev_t dev)
1da177e4
LT
971{
972 struct inode *inode;
7d54fa64 973
91e78a1e 974 inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev);
19ee5345
AV
975 if (!inode)
976 return -ENOSPC;
cfd87e76 977 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
19ee5345
AV
978 d_instantiate(dentry, inode);
979 dget(dentry);/* Extra count - pin the dentry in core */
980 return 0;
1ab5b82f
PS
981}
982
88d5baf6
N
983static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
984 struct dentry *dentry, umode_t mode)
1da177e4 985{
91e78a1e 986 int retval = hugetlbfs_mknod(idmap, dir, dentry,
549c7297 987 mode | S_IFDIR, 0);
1da177e4 988 if (!retval)
d8c76e6f 989 inc_nlink(dir);
88d5baf6 990 return ERR_PTR(retval);
1da177e4
LT
991}
992
6c960e68 993static int hugetlbfs_create(struct mnt_idmap *idmap,
549c7297
CB
994 struct inode *dir, struct dentry *dentry,
995 umode_t mode, bool excl)
1da177e4 996{
91e78a1e 997 return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
1da177e4
LT
998}
999
011e2b71 1000static int hugetlbfs_tmpfile(struct mnt_idmap *idmap,
863f144f 1001 struct inode *dir, struct file *file,
549c7297 1002 umode_t mode)
1ab5b82f 1003{
19ee5345
AV
1004 struct inode *inode;
1005
91e78a1e 1006 inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0);
19ee5345
AV
1007 if (!inode)
1008 return -ENOSPC;
cfd87e76 1009 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
863f144f
MS
1010 d_tmpfile(file, inode);
1011 return finish_open_simple(file, 0);
1ab5b82f
PS
1012}
1013
7a77db95 1014static int hugetlbfs_symlink(struct mnt_idmap *idmap,
549c7297
CB
1015 struct inode *dir, struct dentry *dentry,
1016 const char *symname)
1da177e4 1017{
91e78a1e 1018 const umode_t mode = S_IFLNK|S_IRWXUGO;
1da177e4
LT
1019 struct inode *inode;
1020 int error = -ENOSPC;
1da177e4 1021
91e78a1e 1022 inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0);
1da177e4
LT
1023 if (inode) {
1024 int l = strlen(symname)+1;
1025 error = page_symlink(inode, symname, l);
1026 if (!error) {
1027 d_instantiate(dentry, inode);
1028 dget(dentry);
1029 } else
1030 iput(inode);
1031 }
cfd87e76 1032 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
1da177e4
LT
1033
1034 return error;
1035}
1036
b890ec2a
MWO
1037#ifdef CONFIG_MIGRATION
1038static int hugetlbfs_migrate_folio(struct address_space *mapping,
1039 struct folio *dst, struct folio *src,
a6bc32b8 1040 enum migrate_mode mode)
290408d4
NH
1041{
1042 int rc;
1043
b890ec2a 1044 rc = migrate_huge_page_move_mapping(mapping, dst, src);
fb49a442 1045 if (rc)
290408d4 1046 return rc;
cb6acd01 1047
149562f7
SK
1048 if (hugetlb_folio_subpool(src)) {
1049 hugetlb_set_folio_subpool(dst,
1050 hugetlb_folio_subpool(src));
1051 hugetlb_set_folio_subpool(src, NULL);
cb6acd01
MK
1052 }
1053
f00b295b 1054 folio_migrate_flags(dst, src);
290408d4 1055
fb49a442 1056 return 0;
290408d4 1057}
b890ec2a
MWO
1058#else
1059#define hugetlbfs_migrate_folio NULL
1060#endif
290408d4 1061
af7628d6
MWO
1062static int hugetlbfs_error_remove_folio(struct address_space *mapping,
1063 struct folio *folio)
78bb9203 1064{
78bb9203
NH
1065 return 0;
1066}
1067
4a25220d
DH
1068/*
1069 * Display the mount options in /proc/mounts.
1070 */
1071static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
1072{
1073 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1074 struct hugepage_subpool *spool = sbinfo->spool;
1075 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1076 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1077 char mod;
1078
1079 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1080 seq_printf(m, ",uid=%u",
1081 from_kuid_munged(&init_user_ns, sbinfo->uid));
1082 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1083 seq_printf(m, ",gid=%u",
1084 from_kgid_munged(&init_user_ns, sbinfo->gid));
1085 if (sbinfo->mode != 0755)
1086 seq_printf(m, ",mode=%o", sbinfo->mode);
1087 if (sbinfo->max_inodes != -1)
1088 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1089
1090 hpage_size /= 1024;
1091 mod = 'K';
1092 if (hpage_size >= 1024) {
1093 hpage_size /= 1024;
1094 mod = 'M';
1095 }
1096 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1097 if (spool) {
1098 if (spool->max_hpages != -1)
1099 seq_printf(m, ",size=%llu",
1100 (unsigned long long)spool->max_hpages << hpage_shift);
1101 if (spool->min_hpages != -1)
1102 seq_printf(m, ",min_size=%llu",
1103 (unsigned long long)spool->min_hpages << hpage_shift);
1104 }
1105 return 0;
1106}
1107
726c3342 1108static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 1109{
726c3342 1110 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
2b0143b5 1111 struct hstate *h = hstate_inode(d_inode(dentry));
ae62bcb5 1112 u64 id = huge_encode_dev(dentry->d_sb->s_dev);
1da177e4 1113
ae62bcb5 1114 buf->f_fsid = u64_to_fsid(id);
1da177e4 1115 buf->f_type = HUGETLBFS_MAGIC;
a5516438 1116 buf->f_bsize = huge_page_size(h);
1da177e4
LT
1117 if (sbinfo) {
1118 spin_lock(&sbinfo->stat_lock);
11680763 1119 /* If no limits set, just report 0 or -1 for max/free/used
74a8a65c 1120 * blocks, like simple_statfs() */
90481622
DG
1121 if (sbinfo->spool) {
1122 long free_pages;
1123
4b25f030 1124 spin_lock_irq(&sbinfo->spool->lock);
90481622
DG
1125 buf->f_blocks = sbinfo->spool->max_hpages;
1126 free_pages = sbinfo->spool->max_hpages
1127 - sbinfo->spool->used_hpages;
1128 buf->f_bavail = buf->f_bfree = free_pages;
4b25f030 1129 spin_unlock_irq(&sbinfo->spool->lock);
74a8a65c
DG
1130 buf->f_files = sbinfo->max_inodes;
1131 buf->f_ffree = sbinfo->free_inodes;
1132 }
1da177e4
LT
1133 spin_unlock(&sbinfo->stat_lock);
1134 }
1135 buf->f_namelen = NAME_MAX;
1136 return 0;
1137}
1138
1139static void hugetlbfs_put_super(struct super_block *sb)
1140{
1141 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1142
1143 if (sbi) {
1144 sb->s_fs_info = NULL;
90481622
DG
1145
1146 if (sbi->spool)
1147 hugepage_put_subpool(sbi->spool);
1148
1da177e4
LT
1149 kfree(sbi);
1150 }
1151}
1152
96527980
CH
1153static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1154{
1155 if (sbinfo->free_inodes >= 0) {
1156 spin_lock(&sbinfo->stat_lock);
1157 if (unlikely(!sbinfo->free_inodes)) {
1158 spin_unlock(&sbinfo->stat_lock);
1159 return 0;
1160 }
1161 sbinfo->free_inodes--;
1162 spin_unlock(&sbinfo->stat_lock);
1163 }
1164
1165 return 1;
1166}
1167
1168static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1169{
1170 if (sbinfo->free_inodes >= 0) {
1171 spin_lock(&sbinfo->stat_lock);
1172 sbinfo->free_inodes++;
1173 spin_unlock(&sbinfo->stat_lock);
1174 }
1175}
1176
1177
e18b890b 1178static struct kmem_cache *hugetlbfs_inode_cachep;
1da177e4
LT
1179
1180static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1181{
96527980 1182 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1da177e4
LT
1183 struct hugetlbfs_inode_info *p;
1184
96527980
CH
1185 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
1186 return NULL;
fd60b288 1187 p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
96527980
CH
1188 if (unlikely(!p)) {
1189 hugetlbfs_inc_free_inodes(sbinfo);
1da177e4 1190 return NULL;
96527980 1191 }
1da177e4
LT
1192 return &p->vfs_inode;
1193}
1194
b62de322 1195static void hugetlbfs_free_inode(struct inode *inode)
fa0d7e3d 1196{
014ad7c4 1197 trace_hugetlbfs_free_inode(inode);
fa0d7e3d
NP
1198 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1199}
1200
1da177e4
LT
1201static void hugetlbfs_destroy_inode(struct inode *inode)
1202{
96527980 1203 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1da177e4
LT
1204}
1205
f5e54d6e 1206static const struct address_space_operations hugetlbfs_aops = {
800d15a5
NP
1207 .write_begin = hugetlbfs_write_begin,
1208 .write_end = hugetlbfs_write_end,
46de8b97 1209 .dirty_folio = noop_dirty_folio,
b890ec2a 1210 .migrate_folio = hugetlbfs_migrate_folio,
af7628d6 1211 .error_remove_folio = hugetlbfs_error_remove_folio,
1da177e4
LT
1212};
1213
96527980 1214
51cc5068 1215static void init_once(void *foo)
96527980 1216{
dbaf7dc9 1217 struct hugetlbfs_inode_info *ei = foo;
96527980 1218
a35afb83 1219 inode_init_once(&ei->vfs_inode);
96527980
CH
1220}
1221
886b94d2 1222static const struct file_operations hugetlbfs_file_operations = {
34d0640e 1223 .read_iter = hugetlbfs_read_iter,
1da177e4 1224 .mmap = hugetlbfs_file_mmap,
1b061d92 1225 .fsync = noop_fsync,
1da177e4 1226 .get_unmapped_area = hugetlb_get_unmapped_area,
70c3547e
MK
1227 .llseek = default_llseek,
1228 .fallocate = hugetlbfs_fallocate,
886b94d2 1229 .fop_flags = FOP_HUGE_PAGES,
1da177e4
LT
1230};
1231
92e1d5be 1232static const struct inode_operations hugetlbfs_dir_inode_operations = {
1da177e4
LT
1233 .create = hugetlbfs_create,
1234 .lookup = simple_lookup,
1235 .link = simple_link,
1236 .unlink = simple_unlink,
1237 .symlink = hugetlbfs_symlink,
1238 .mkdir = hugetlbfs_mkdir,
1239 .rmdir = simple_rmdir,
1240 .mknod = hugetlbfs_mknod,
1241 .rename = simple_rename,
1242 .setattr = hugetlbfs_setattr,
1ab5b82f 1243 .tmpfile = hugetlbfs_tmpfile,
1da177e4
LT
1244};
1245
92e1d5be 1246static const struct inode_operations hugetlbfs_inode_operations = {
1da177e4
LT
1247 .setattr = hugetlbfs_setattr,
1248};
1249
ee9b6d61 1250static const struct super_operations hugetlbfs_ops = {
1da177e4 1251 .alloc_inode = hugetlbfs_alloc_inode,
b62de322 1252 .free_inode = hugetlbfs_free_inode,
1da177e4 1253 .destroy_inode = hugetlbfs_destroy_inode,
2bbbda30 1254 .evict_inode = hugetlbfs_evict_inode,
1da177e4 1255 .statfs = hugetlbfs_statfs,
1da177e4 1256 .put_super = hugetlbfs_put_super,
4a25220d 1257 .show_options = hugetlbfs_show_options,
1da177e4
LT
1258};
1259
7ca02d0a
MK
1260/*
1261 * Convert size option passed from command line to number of huge pages
1262 * in the pool specified by hstate. Size option could be in bytes
1263 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1264 */
4a25220d 1265static long
7ca02d0a 1266hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
4a25220d 1267 enum hugetlbfs_size_type val_type)
7ca02d0a
MK
1268{
1269 if (val_type == NO_SIZE)
1270 return -1;
1271
1272 if (val_type == SIZE_PERCENT) {
1273 size_opt <<= huge_page_shift(h);
1274 size_opt *= h->max_huge_pages;
1275 do_div(size_opt, 100);
1276 }
1277
1278 size_opt >>= huge_page_shift(h);
1279 return size_opt;
1280}
1281
32021982
DH
1282/*
1283 * Parse one mount parameter.
1284 */
1285static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1da177e4 1286{
32021982
DH
1287 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1288 struct fs_parse_result result;
79d72c68 1289 struct hstate *h;
32021982
DH
1290 char *rest;
1291 unsigned long ps;
1292 int opt;
1293
d7167b14 1294 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
32021982
DH
1295 if (opt < 0)
1296 return opt;
1297
1298 switch (opt) {
1299 case Opt_uid:
eefc1324 1300 ctx->uid = result.uid;
1da177e4 1301 return 0;
1da177e4 1302
32021982 1303 case Opt_gid:
eefc1324 1304 ctx->gid = result.gid;
32021982 1305 return 0;
e73a75fa 1306
32021982
DH
1307 case Opt_mode:
1308 ctx->mode = result.uint_32 & 01777U;
1309 return 0;
e73a75fa 1310
32021982
DH
1311 case Opt_size:
1312 /* memparse() will accept a K/M/G without a digit */
26215b7e 1313 if (!param->string || !isdigit(param->string[0]))
32021982
DH
1314 goto bad_val;
1315 ctx->max_size_opt = memparse(param->string, &rest);
1316 ctx->max_val_type = SIZE_STD;
1317 if (*rest == '%')
1318 ctx->max_val_type = SIZE_PERCENT;
1319 return 0;
e73a75fa 1320
32021982
DH
1321 case Opt_nr_inodes:
1322 /* memparse() will accept a K/M/G without a digit */
26215b7e 1323 if (!param->string || !isdigit(param->string[0]))
32021982
DH
1324 goto bad_val;
1325 ctx->nr_inodes = memparse(param->string, &rest);
1326 return 0;
e73a75fa 1327
32021982
DH
1328 case Opt_pagesize:
1329 ps = memparse(param->string, &rest);
79d72c68
OS
1330 h = size_to_hstate(ps);
1331 if (!h) {
d0036517 1332 pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
32021982 1333 return -EINVAL;
e73a75fa 1334 }
79d72c68 1335 ctx->hstate = h;
32021982 1336 return 0;
1da177e4 1337
32021982
DH
1338 case Opt_min_size:
1339 /* memparse() will accept a K/M/G without a digit */
26215b7e 1340 if (!param->string || !isdigit(param->string[0]))
32021982
DH
1341 goto bad_val;
1342 ctx->min_size_opt = memparse(param->string, &rest);
1343 ctx->min_val_type = SIZE_STD;
1344 if (*rest == '%')
1345 ctx->min_val_type = SIZE_PERCENT;
1346 return 0;
e73a75fa 1347
32021982
DH
1348 default:
1349 return -EINVAL;
1350 }
a137e1cc 1351
32021982 1352bad_val:
b5db30cf 1353 return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
32021982
DH
1354 param->string, param->key);
1355}
7ca02d0a 1356
32021982
DH
1357/*
1358 * Validate the parsed options.
1359 */
1360static int hugetlbfs_validate(struct fs_context *fc)
1361{
1362 struct hugetlbfs_fs_context *ctx = fc->fs_private;
a137e1cc 1363
7ca02d0a
MK
1364 /*
1365 * Use huge page pool size (in hstate) to convert the size
1366 * options to number of huge pages. If NO_SIZE, -1 is returned.
1367 */
32021982
DH
1368 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1369 ctx->max_size_opt,
1370 ctx->max_val_type);
1371 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1372 ctx->min_size_opt,
1373 ctx->min_val_type);
7ca02d0a
MK
1374
1375 /*
1376 * If max_size was specified, then min_size must be smaller
1377 */
32021982
DH
1378 if (ctx->max_val_type > NO_SIZE &&
1379 ctx->min_hpages > ctx->max_hpages) {
1380 pr_err("Minimum size can not be greater than maximum size\n");
7ca02d0a 1381 return -EINVAL;
a137e1cc
AK
1382 }
1383
1da177e4
LT
1384 return 0;
1385}
1386
1387static int
32021982 1388hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
1da177e4 1389{
32021982 1390 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1da177e4
LT
1391 struct hugetlbfs_sb_info *sbinfo;
1392
1da177e4
LT
1393 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1394 if (!sbinfo)
1395 return -ENOMEM;
1396 sb->s_fs_info = sbinfo;
1397 spin_lock_init(&sbinfo->stat_lock);
32021982
DH
1398 sbinfo->hstate = ctx->hstate;
1399 sbinfo->max_inodes = ctx->nr_inodes;
1400 sbinfo->free_inodes = ctx->nr_inodes;
1401 sbinfo->spool = NULL;
1402 sbinfo->uid = ctx->uid;
1403 sbinfo->gid = ctx->gid;
1404 sbinfo->mode = ctx->mode;
4a25220d 1405
7ca02d0a
MK
1406 /*
1407 * Allocate and initialize subpool if maximum or minimum size is
1935ebd3 1408 * specified. Any needed reservations (for minimum size) are taken
445c8098 1409 * when the subpool is created.
7ca02d0a 1410 */
32021982
DH
1411 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1412 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1413 ctx->max_hpages,
1414 ctx->min_hpages);
90481622
DG
1415 if (!sbinfo->spool)
1416 goto out_free;
1417 }
1da177e4 1418 sb->s_maxbytes = MAX_LFS_FILESIZE;
32021982
DH
1419 sb->s_blocksize = huge_page_size(ctx->hstate);
1420 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
1da177e4
LT
1421 sb->s_magic = HUGETLBFS_MAGIC;
1422 sb->s_op = &hugetlbfs_ops;
3333ed35 1423 sb->s_d_flags = DCACHE_DONTCACHE;
1da177e4 1424 sb->s_time_gran = 1;
15568299
MK
1425
1426 /*
1427 * Due to the special and limited functionality of hugetlbfs, it does
1428 * not work well as a stacking filesystem.
1429 */
1430 sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
32021982 1431 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
48fde701 1432 if (!sb->s_root)
1da177e4 1433 goto out_free;
1da177e4
LT
1434 return 0;
1435out_free:
6e6870d4 1436 kfree(sbinfo->spool);
1da177e4
LT
1437 kfree(sbinfo);
1438 return -ENOMEM;
1439}
1440
32021982
DH
1441static int hugetlbfs_get_tree(struct fs_context *fc)
1442{
1443 int err = hugetlbfs_validate(fc);
1444 if (err)
1445 return err;
2ac295d4 1446 return get_tree_nodev(fc, hugetlbfs_fill_super);
32021982
DH
1447}
1448
1449static void hugetlbfs_fs_context_free(struct fs_context *fc)
1450{
1451 kfree(fc->fs_private);
1452}
1453
1454static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1455 .free = hugetlbfs_fs_context_free,
1456 .parse_param = hugetlbfs_parse_param,
1457 .get_tree = hugetlbfs_get_tree,
1458};
1459
1460static int hugetlbfs_init_fs_context(struct fs_context *fc)
1da177e4 1461{
32021982
DH
1462 struct hugetlbfs_fs_context *ctx;
1463
1464 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1465 if (!ctx)
1466 return -ENOMEM;
1467
1468 ctx->max_hpages = -1; /* No limit on size by default */
1469 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1470 ctx->uid = current_fsuid();
1471 ctx->gid = current_fsgid();
1472 ctx->mode = 0755;
1473 ctx->hstate = &default_hstate;
1474 ctx->min_hpages = -1; /* No default minimum size */
1475 ctx->max_val_type = NO_SIZE;
1476 ctx->min_val_type = NO_SIZE;
1477 fc->fs_private = ctx;
1478 fc->ops = &hugetlbfs_fs_context_ops;
1479 return 0;
1da177e4
LT
1480}
1481
1482static struct file_system_type hugetlbfs_fs_type = {
32021982
DH
1483 .name = "hugetlbfs",
1484 .init_fs_context = hugetlbfs_init_fs_context,
d7167b14 1485 .parameters = hugetlb_fs_parameters,
32021982 1486 .kill_sb = kill_litter_super,
91e78a1e 1487 .fs_flags = FS_ALLOW_IDMAP,
1da177e4
LT
1488};
1489
42d7395f 1490static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1da177e4 1491
ef1ff6b8 1492static int can_do_hugetlb_shm(void)
1da177e4 1493{
a0eb3a05
EB
1494 kgid_t shm_group;
1495 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1496 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1da177e4
LT
1497}
1498
42d7395f
AK
1499static int get_hstate_idx(int page_size_log)
1500{
af73e4d9 1501 struct hstate *h = hstate_sizelog(page_size_log);
42d7395f 1502
42d7395f
AK
1503 if (!h)
1504 return -1;
04adbc3f 1505 return hstate_index(h);
42d7395f
AK
1506}
1507
af73e4d9
NH
1508/*
1509 * Note that size should be aligned to proper hugepage size in caller side,
1510 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1511 */
1512struct file *hugetlb_file_setup(const char *name, size_t size,
83c1fd76 1513 vm_flags_t acctflag, int creat_flags,
1514 int page_size_log)
1da177e4 1515{
1da177e4 1516 struct inode *inode;
e68375c8 1517 struct vfsmount *mnt;
42d7395f 1518 int hstate_idx;
e68375c8 1519 struct file *file;
42d7395f
AK
1520
1521 hstate_idx = get_hstate_idx(page_size_log);
1522 if (hstate_idx < 0)
1523 return ERR_PTR(-ENODEV);
1da177e4 1524
e68375c8
AV
1525 mnt = hugetlbfs_vfsmount[hstate_idx];
1526 if (!mnt)
5bc98594
AM
1527 return ERR_PTR(-ENOENT);
1528
ef1ff6b8 1529 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
83c1fd76 1530 struct ucounts *ucounts = current_ucounts();
1531
1532 if (user_shm_lock(size, ucounts)) {
1533 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
21a3c273 1534 current->comm, current->pid);
83c1fd76 1535 user_shm_unlock(size, ucounts);
353d5c30 1536 }
83c1fd76 1537 return ERR_PTR(-EPERM);
2584e517 1538 }
1da177e4 1539
39b65252 1540 file = ERR_PTR(-ENOSPC);
91e78a1e
GS
1541 /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */
1542 inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL,
1543 S_IFREG | S_IRWXUGO, 0);
1da177e4 1544 if (!inode)
e68375c8 1545 goto out;
e1832f29
SS
1546 if (creat_flags == HUGETLB_SHMFS_INODE)
1547 inode->i_flags |= S_PRIVATE;
1da177e4 1548
1da177e4 1549 inode->i_size = size;
6d6b77f1 1550 clear_nlink(inode);
ce8d2cdf 1551
986f5f2b 1552 if (hugetlb_reserve_pages(inode, 0,
e68375c8 1553 size >> huge_page_shift(hstate_inode(inode)), NULL,
986f5f2b 1554 acctflag) < 0)
e68375c8
AV
1555 file = ERR_PTR(-ENOMEM);
1556 else
1557 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1558 &hugetlbfs_file_operations);
1559 if (!IS_ERR(file))
1560 return file;
1da177e4 1561
b45b5bd6 1562 iput(inode);
e68375c8 1563out:
39b65252 1564 return file;
1da177e4
LT
1565}
1566
32021982
DH
1567static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1568{
1569 struct fs_context *fc;
1570 struct vfsmount *mnt;
1571
1572 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1573 if (IS_ERR(fc)) {
1574 mnt = ERR_CAST(fc);
1575 } else {
1576 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1577 ctx->hstate = h;
24368a74 1578 mnt = fc_mount_longterm(fc);
32021982
DH
1579 put_fs_context(fc);
1580 }
1581 if (IS_ERR(mnt))
a25fddce 1582 pr_err("Cannot mount internal hugetlbfs for page size %luK",
d0036517 1583 huge_page_size(h) / SZ_1K);
32021982
DH
1584 return mnt;
1585}
1586
1da177e4
LT
1587static int __init init_hugetlbfs_fs(void)
1588{
32021982 1589 struct vfsmount *mnt;
42d7395f 1590 struct hstate *h;
1da177e4 1591 int error;
42d7395f 1592 int i;
1da177e4 1593
457c1b27 1594 if (!hugepages_supported()) {
9b857d26 1595 pr_info("disabling because there are no supported hugepage sizes\n");
457c1b27
NA
1596 return -ENOTSUPP;
1597 }
1598
d1d5e05f 1599 error = -ENOMEM;
1da177e4
LT
1600 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1601 sizeof(struct hugetlbfs_inode_info),
5d097056 1602 0, SLAB_ACCOUNT, init_once);
1da177e4 1603 if (hugetlbfs_inode_cachep == NULL)
8fc312b3 1604 goto out;
1da177e4
LT
1605
1606 error = register_filesystem(&hugetlbfs_fs_type);
1607 if (error)
8fc312b3 1608 goto out_free;
1da177e4 1609
8fc312b3 1610 /* default hstate mount is required */
3b2275a8 1611 mnt = mount_one_hugetlbfs(&default_hstate);
8fc312b3
MK
1612 if (IS_ERR(mnt)) {
1613 error = PTR_ERR(mnt);
1614 goto out_unreg;
1615 }
1616 hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1617
1618 /* other hstates are optional */
42d7395f
AK
1619 i = 0;
1620 for_each_hstate(h) {
15f0ec94
JS
1621 if (i == default_hstate_idx) {
1622 i++;
8fc312b3 1623 continue;
15f0ec94 1624 }
8fc312b3 1625
32021982 1626 mnt = mount_one_hugetlbfs(h);
8fc312b3
MK
1627 if (IS_ERR(mnt))
1628 hugetlbfs_vfsmount[i] = NULL;
1629 else
1630 hugetlbfs_vfsmount[i] = mnt;
42d7395f
AK
1631 i++;
1632 }
32021982
DH
1633
1634 return 0;
1da177e4 1635
8fc312b3
MK
1636 out_unreg:
1637 (void)unregister_filesystem(&hugetlbfs_fs_type);
1638 out_free:
d1d5e05f 1639 kmem_cache_destroy(hugetlbfs_inode_cachep);
8fc312b3 1640 out:
1da177e4
LT
1641 return error;
1642}
3e89e1c5 1643fs_initcall(init_hugetlbfs_fs)