]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * hugetlbpage-backed filesystem. Based on ramfs. | |
3 | * | |
6d49e352 | 4 | * Nadia Yvette Chambers, 2002 |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 2002 Linus Torvalds. | |
3e89e1c5 | 7 | * License: GPL |
1da177e4 LT |
8 | */ |
9 | ||
9b857d26 AM |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
1da177e4 LT |
12 | #include <linux/thread_info.h> |
13 | #include <asm/current.h> | |
70c3547e | 14 | #include <linux/falloc.h> |
1da177e4 LT |
15 | #include <linux/fs.h> |
16 | #include <linux/mount.h> | |
17 | #include <linux/file.h> | |
e73a75fa | 18 | #include <linux/kernel.h> |
1da177e4 LT |
19 | #include <linux/writeback.h> |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/string.h> | |
16f7e0fe | 24 | #include <linux/capability.h> |
e73a75fa | 25 | #include <linux/ctype.h> |
1da177e4 LT |
26 | #include <linux/backing-dev.h> |
27 | #include <linux/hugetlb.h> | |
28 | #include <linux/pagevec.h> | |
32021982 | 29 | #include <linux/fs_parser.h> |
036e0856 | 30 | #include <linux/mman.h> |
1da177e4 LT |
31 | #include <linux/slab.h> |
32 | #include <linux/dnotify.h> | |
33 | #include <linux/statfs.h> | |
34 | #include <linux/security.h> | |
1fd7317d | 35 | #include <linux/magic.h> |
290408d4 | 36 | #include <linux/migrate.h> |
34d0640e | 37 | #include <linux/uio.h> |
1da177e4 | 38 | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
88590253 | 40 | #include <linux/sched/mm.h> |
1da177e4 | 41 | |
f5e54d6e | 42 | static const struct address_space_operations hugetlbfs_aops; |
886b94d2 | 43 | static const struct file_operations hugetlbfs_file_operations; |
92e1d5be AV |
44 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
45 | static const struct inode_operations hugetlbfs_inode_operations; | |
1da177e4 | 46 | |
32021982 DH |
47 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
48 | ||
49 | struct hugetlbfs_fs_context { | |
4a25220d | 50 | struct hstate *hstate; |
32021982 DH |
51 | unsigned long long max_size_opt; |
52 | unsigned long long min_size_opt; | |
4a25220d DH |
53 | long max_hpages; |
54 | long nr_inodes; | |
55 | long min_hpages; | |
32021982 DH |
56 | enum hugetlbfs_size_type max_val_type; |
57 | enum hugetlbfs_size_type min_val_type; | |
4a25220d DH |
58 | kuid_t uid; |
59 | kgid_t gid; | |
60 | umode_t mode; | |
a1d776ee DG |
61 | }; |
62 | ||
1da177e4 LT |
63 | int sysctl_hugetlb_shm_group; |
64 | ||
32021982 DH |
65 | enum hugetlb_param { |
66 | Opt_gid, | |
67 | Opt_min_size, | |
68 | Opt_mode, | |
69 | Opt_nr_inodes, | |
70 | Opt_pagesize, | |
71 | Opt_size, | |
72 | Opt_uid, | |
e73a75fa RD |
73 | }; |
74 | ||
d7167b14 | 75 | static const struct fs_parameter_spec hugetlb_fs_parameters[] = { |
32021982 DH |
76 | fsparam_u32 ("gid", Opt_gid), |
77 | fsparam_string("min_size", Opt_min_size), | |
e0f7e2b2 | 78 | fsparam_u32oct("mode", Opt_mode), |
32021982 DH |
79 | fsparam_string("nr_inodes", Opt_nr_inodes), |
80 | fsparam_string("pagesize", Opt_pagesize), | |
81 | fsparam_string("size", Opt_size), | |
82 | fsparam_u32 ("uid", Opt_uid), | |
83 | {} | |
84 | }; | |
85 | ||
63489f8e MK |
86 | /* |
87 | * Mask used when checking the page offset value passed in via system | |
88 | * calls. This value will be converted to a loff_t which is signed. | |
89 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | |
90 | * value. The extra bit (- 1 in the shift value) is to take the sign | |
91 | * bit into account. | |
92 | */ | |
93 | #define PGOFF_LOFFT_MAX \ | |
94 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | |
95 | ||
1da177e4 LT |
96 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
97 | { | |
496ad9aa | 98 | struct inode *inode = file_inode(file); |
22247efd | 99 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
1da177e4 LT |
100 | loff_t len, vma_len; |
101 | int ret; | |
a5516438 | 102 | struct hstate *h = hstate_file(file); |
e656c7a9 | 103 | vm_flags_t vm_flags; |
1da177e4 | 104 | |
68589bc3 | 105 | /* |
dec4ad86 DG |
106 | * vma address alignment (but not the pgoff alignment) has |
107 | * already been checked by prepare_hugepage_range. If you add | |
108 | * any error returns here, do so after setting VM_HUGETLB, so | |
109 | * is_vm_hugetlb_page tests below unmap_region go the right | |
45e55300 | 110 | * way when do_mmap unwinds (may be important on powerpc |
dec4ad86 | 111 | * and ia64). |
68589bc3 | 112 | */ |
1c71222e | 113 | vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); |
68589bc3 | 114 | vma->vm_ops = &hugetlb_vm_ops; |
1da177e4 | 115 | |
28464bbb | 116 | ret = seal_check_write(info->seals, vma); |
22247efd PX |
117 | if (ret) |
118 | return ret; | |
119 | ||
045c7a3f | 120 | /* |
63489f8e | 121 | * page based offset in vm_pgoff could be sufficiently large to |
5df63c2a MK |
122 | * overflow a loff_t when converted to byte offset. This can |
123 | * only happen on architectures where sizeof(loff_t) == | |
124 | * sizeof(unsigned long). So, only check in those instances. | |
045c7a3f | 125 | */ |
5df63c2a MK |
126 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
127 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) | |
128 | return -EINVAL; | |
129 | } | |
045c7a3f | 130 | |
63489f8e | 131 | /* must be huge page aligned */ |
2b37c35e | 132 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
dec4ad86 DG |
133 | return -EINVAL; |
134 | ||
1da177e4 | 135 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
045c7a3f MK |
136 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
137 | /* check for overflow */ | |
138 | if (len < vma_len) | |
139 | return -EINVAL; | |
1da177e4 | 140 | |
5955102c | 141 | inode_lock(inode); |
1da177e4 | 142 | file_accessed(file); |
1da177e4 LT |
143 | |
144 | ret = -ENOMEM; | |
e656c7a9 PS |
145 | |
146 | vm_flags = vma->vm_flags; | |
147 | /* | |
148 | * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip | |
149 | * reserving here. Note: only for SHM hugetlbfs file, the inode | |
150 | * flag S_PRIVATE is set. | |
151 | */ | |
152 | if (inode->i_flags & S_PRIVATE) | |
153 | vm_flags |= VM_NORESERVE; | |
154 | ||
33b8f84a | 155 | if (!hugetlb_reserve_pages(inode, |
a5516438 | 156 | vma->vm_pgoff >> huge_page_order(h), |
5a6fe125 | 157 | len >> huge_page_shift(h), vma, |
e656c7a9 | 158 | vm_flags)) |
a43a8c39 | 159 | goto out; |
b45b5bd6 | 160 | |
4c887265 | 161 | ret = 0; |
b6174df5 | 162 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
045c7a3f | 163 | i_size_write(inode, len); |
1da177e4 | 164 | out: |
5955102c | 165 | inode_unlock(inode); |
1da177e4 LT |
166 | |
167 | return ret; | |
168 | } | |
169 | ||
170 | /* | |
3e4e28c5 | 171 | * Called under mmap_write_lock(mm). |
1da177e4 LT |
172 | */ |
173 | ||
88590253 SH |
174 | static unsigned long |
175 | hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, | |
176 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
177 | { | |
178 | struct hstate *h = hstate_file(file); | |
b80fa3cb | 179 | struct vm_unmapped_area_info info = {}; |
88590253 | 180 | |
88590253 SH |
181 | info.length = len; |
182 | info.low_limit = current->mm->mmap_base; | |
2cb4de08 | 183 | info.high_limit = arch_get_mmap_end(addr, len, flags); |
88590253 | 184 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
88590253 SH |
185 | return vm_unmapped_area(&info); |
186 | } | |
187 | ||
188 | static unsigned long | |
189 | hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, | |
190 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
191 | { | |
192 | struct hstate *h = hstate_file(file); | |
b80fa3cb | 193 | struct vm_unmapped_area_info info = {}; |
88590253 SH |
194 | |
195 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | |
196 | info.length = len; | |
6b008640 | 197 | info.low_limit = PAGE_SIZE; |
5f24d5a5 | 198 | info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); |
88590253 | 199 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
88590253 SH |
200 | addr = vm_unmapped_area(&info); |
201 | ||
202 | /* | |
203 | * A failed mmap() very likely causes application failure, | |
204 | * so fall back to the bottom-up function here. This scenario | |
205 | * can happen with large stack limits and large mmap() | |
206 | * allocations. | |
207 | */ | |
208 | if (unlikely(offset_in_page(addr))) { | |
209 | VM_BUG_ON(addr != -ENOMEM); | |
210 | info.flags = 0; | |
211 | info.low_limit = current->mm->mmap_base; | |
2cb4de08 | 212 | info.high_limit = arch_get_mmap_end(addr, len, flags); |
88590253 SH |
213 | addr = vm_unmapped_area(&info); |
214 | } | |
215 | ||
216 | return addr; | |
217 | } | |
218 | ||
4b439e25 CL |
219 | unsigned long |
220 | generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
221 | unsigned long len, unsigned long pgoff, | |
222 | unsigned long flags) | |
1da177e4 LT |
223 | { |
224 | struct mm_struct *mm = current->mm; | |
225 | struct vm_area_struct *vma; | |
a5516438 | 226 | struct hstate *h = hstate_file(file); |
2cb4de08 | 227 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
1da177e4 | 228 | |
a5516438 | 229 | if (len & ~huge_page_mask(h)) |
1da177e4 LT |
230 | return -EINVAL; |
231 | if (len > TASK_SIZE) | |
232 | return -ENOMEM; | |
233 | ||
036e0856 | 234 | if (flags & MAP_FIXED) { |
a5516438 | 235 | if (prepare_hugepage_range(file, addr, len)) |
036e0856 BH |
236 | return -EINVAL; |
237 | return addr; | |
238 | } | |
239 | ||
1da177e4 | 240 | if (addr) { |
a5516438 | 241 | addr = ALIGN(addr, huge_page_size(h)); |
1da177e4 | 242 | vma = find_vma(mm, addr); |
5f24d5a5 | 243 | if (mmap_end - len >= addr && |
1be7107f | 244 | (!vma || addr + len <= vm_start_gap(vma))) |
1da177e4 LT |
245 | return addr; |
246 | } | |
247 | ||
88590253 | 248 | /* |
529ce23a | 249 | * Use MMF_TOPDOWN flag as a hint to use topdown routine. |
88590253 SH |
250 | * If architectures have special needs, they should define their own |
251 | * version of hugetlb_get_unmapped_area. | |
252 | */ | |
529ce23a | 253 | if (test_bit(MMF_TOPDOWN, &mm->flags)) |
88590253 SH |
254 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
255 | pgoff, flags); | |
256 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | |
257 | pgoff, flags); | |
1da177e4 | 258 | } |
4b439e25 CL |
259 | |
260 | #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | |
261 | static unsigned long | |
262 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
263 | unsigned long len, unsigned long pgoff, | |
264 | unsigned long flags) | |
265 | { | |
266 | return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); | |
267 | } | |
1da177e4 LT |
268 | #endif |
269 | ||
38c1ddbd JY |
270 | /* |
271 | * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset. | |
272 | * Returns the maximum number of bytes one can read without touching the 1st raw | |
273 | * HWPOISON subpage. | |
274 | * | |
275 | * The implementation borrows the iteration logic from copy_page_to_iter*. | |
276 | */ | |
277 | static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes) | |
278 | { | |
279 | size_t n = 0; | |
280 | size_t res = 0; | |
281 | ||
282 | /* First subpage to start the loop. */ | |
8db0ec79 | 283 | page = nth_page(page, offset / PAGE_SIZE); |
38c1ddbd JY |
284 | offset %= PAGE_SIZE; |
285 | while (1) { | |
286 | if (is_raw_hwpoison_page_in_hugepage(page)) | |
287 | break; | |
288 | ||
289 | /* Safe to read n bytes without touching HWPOISON subpage. */ | |
290 | n = min(bytes, (size_t)PAGE_SIZE - offset); | |
291 | res += n; | |
292 | bytes -= n; | |
293 | if (!bytes || !n) | |
294 | break; | |
295 | offset += n; | |
296 | if (offset == PAGE_SIZE) { | |
8db0ec79 | 297 | page = nth_page(page, 1); |
38c1ddbd JY |
298 | offset = 0; |
299 | } | |
300 | } | |
301 | ||
302 | return res; | |
303 | } | |
304 | ||
e63e1e5a BP |
305 | /* |
306 | * Support for read() - Find the page attached to f_mapping and copy out the | |
445c8098 | 307 | * data. This provides functionality similar to filemap_read(). |
e63e1e5a | 308 | */ |
34d0640e | 309 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
e63e1e5a | 310 | { |
34d0640e AV |
311 | struct file *file = iocb->ki_filp; |
312 | struct hstate *h = hstate_file(file); | |
313 | struct address_space *mapping = file->f_mapping; | |
e63e1e5a | 314 | struct inode *inode = mapping->host; |
34d0640e AV |
315 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
316 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); | |
e63e1e5a BP |
317 | unsigned long end_index; |
318 | loff_t isize; | |
319 | ssize_t retval = 0; | |
320 | ||
34d0640e | 321 | while (iov_iter_count(to)) { |
a08c7193 | 322 | struct folio *folio; |
38c1ddbd | 323 | size_t nr, copied, want; |
e63e1e5a BP |
324 | |
325 | /* nr is the maximum number of bytes to copy from this page */ | |
a5516438 | 326 | nr = huge_page_size(h); |
a05b0855 AK |
327 | isize = i_size_read(inode); |
328 | if (!isize) | |
34d0640e | 329 | break; |
a05b0855 | 330 | end_index = (isize - 1) >> huge_page_shift(h); |
34d0640e AV |
331 | if (index > end_index) |
332 | break; | |
333 | if (index == end_index) { | |
a5516438 | 334 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
a05b0855 | 335 | if (nr <= offset) |
34d0640e | 336 | break; |
e63e1e5a BP |
337 | } |
338 | nr = nr - offset; | |
339 | ||
a08c7193 SK |
340 | /* Find the folio */ |
341 | folio = filemap_lock_hugetlb_folio(h, mapping, index); | |
342 | if (IS_ERR(folio)) { | |
e63e1e5a BP |
343 | /* |
344 | * We have a HOLE, zero out the user-buffer for the | |
345 | * length of the hole or request. | |
346 | */ | |
34d0640e | 347 | copied = iov_iter_zero(nr, to); |
e63e1e5a | 348 | } else { |
a08c7193 | 349 | folio_unlock(folio); |
a05b0855 | 350 | |
19d3e221 | 351 | if (!folio_test_hwpoison(folio)) |
38c1ddbd JY |
352 | want = nr; |
353 | else { | |
354 | /* | |
355 | * Adjust how many bytes safe to read without | |
356 | * touching the 1st raw HWPOISON subpage after | |
357 | * offset. | |
358 | */ | |
a08c7193 | 359 | want = adjust_range_hwpoison(&folio->page, offset, nr); |
38c1ddbd | 360 | if (want == 0) { |
a08c7193 | 361 | folio_put(folio); |
38c1ddbd JY |
362 | retval = -EIO; |
363 | break; | |
364 | } | |
8625147c JH |
365 | } |
366 | ||
e63e1e5a | 367 | /* |
a08c7193 | 368 | * We have the folio, copy it to user space buffer. |
e63e1e5a | 369 | */ |
a08c7193 SK |
370 | copied = copy_folio_to_iter(folio, offset, want, to); |
371 | folio_put(folio); | |
e63e1e5a | 372 | } |
34d0640e AV |
373 | offset += copied; |
374 | retval += copied; | |
375 | if (copied != nr && iov_iter_count(to)) { | |
376 | if (!retval) | |
377 | retval = -EFAULT; | |
378 | break; | |
e63e1e5a | 379 | } |
a5516438 AK |
380 | index += offset >> huge_page_shift(h); |
381 | offset &= ~huge_page_mask(h); | |
e63e1e5a | 382 | } |
34d0640e | 383 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
e63e1e5a BP |
384 | return retval; |
385 | } | |
386 | ||
800d15a5 NP |
387 | static int hugetlbfs_write_begin(struct file *file, |
388 | struct address_space *mapping, | |
9d6b0cd7 | 389 | loff_t pos, unsigned len, |
800d15a5 | 390 | struct page **pagep, void **fsdata) |
1da177e4 LT |
391 | { |
392 | return -EINVAL; | |
393 | } | |
394 | ||
800d15a5 NP |
395 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
396 | loff_t pos, unsigned len, unsigned copied, | |
397 | struct page *page, void *fsdata) | |
1da177e4 | 398 | { |
800d15a5 | 399 | BUG(); |
1da177e4 LT |
400 | return -EINVAL; |
401 | } | |
402 | ||
ece62684 | 403 | static void hugetlb_delete_from_page_cache(struct folio *folio) |
1da177e4 | 404 | { |
ece62684 SK |
405 | folio_clear_dirty(folio); |
406 | folio_clear_uptodate(folio); | |
407 | filemap_remove_folio(folio); | |
1da177e4 LT |
408 | } |
409 | ||
378397cc MK |
410 | /* |
411 | * Called with i_mmap_rwsem held for inode based vma maps. This makes | |
412 | * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault | |
413 | * mutex for the page in the mapping. So, we can not race with page being | |
414 | * faulted into the vma. | |
415 | */ | |
416 | static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, | |
417 | unsigned long addr, struct page *page) | |
418 | { | |
419 | pte_t *ptep, pte; | |
420 | ||
9c67a207 | 421 | ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); |
378397cc MK |
422 | if (!ptep) |
423 | return false; | |
424 | ||
425 | pte = huge_ptep_get(ptep); | |
426 | if (huge_pte_none(pte) || !pte_present(pte)) | |
427 | return false; | |
428 | ||
429 | if (pte_page(pte) == page) | |
430 | return true; | |
431 | ||
432 | return false; | |
433 | } | |
434 | ||
435 | /* | |
436 | * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? | |
437 | * No, because the interval tree returns us only those vmas | |
438 | * which overlap the truncated area starting at pgoff, | |
439 | * and no vma on a 32-bit arch can span beyond the 4GB. | |
440 | */ | |
441 | static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) | |
442 | { | |
243b1f2d PX |
443 | unsigned long offset = 0; |
444 | ||
378397cc | 445 | if (vma->vm_pgoff < start) |
243b1f2d PX |
446 | offset = (start - vma->vm_pgoff) << PAGE_SHIFT; |
447 | ||
448 | return vma->vm_start + offset; | |
378397cc MK |
449 | } |
450 | ||
451 | static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) | |
452 | { | |
453 | unsigned long t_end; | |
454 | ||
455 | if (!end) | |
456 | return vma->vm_end; | |
457 | ||
458 | t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; | |
459 | if (t_end > vma->vm_end) | |
460 | t_end = vma->vm_end; | |
461 | return t_end; | |
462 | } | |
463 | ||
464 | /* | |
465 | * Called with hugetlb fault mutex held. Therefore, no more mappings to | |
466 | * this folio can be created while executing the routine. | |
467 | */ | |
468 | static void hugetlb_unmap_file_folio(struct hstate *h, | |
469 | struct address_space *mapping, | |
470 | struct folio *folio, pgoff_t index) | |
471 | { | |
472 | struct rb_root_cached *root = &mapping->i_mmap; | |
40549ba8 | 473 | struct hugetlb_vma_lock *vma_lock; |
378397cc MK |
474 | struct page *page = &folio->page; |
475 | struct vm_area_struct *vma; | |
476 | unsigned long v_start; | |
477 | unsigned long v_end; | |
478 | pgoff_t start, end; | |
479 | ||
480 | start = index * pages_per_huge_page(h); | |
481 | end = (index + 1) * pages_per_huge_page(h); | |
482 | ||
483 | i_mmap_lock_write(mapping); | |
40549ba8 MK |
484 | retry: |
485 | vma_lock = NULL; | |
378397cc MK |
486 | vma_interval_tree_foreach(vma, root, start, end - 1) { |
487 | v_start = vma_offset_start(vma, start); | |
488 | v_end = vma_offset_end(vma, end); | |
489 | ||
243b1f2d | 490 | if (!hugetlb_vma_maps_page(vma, v_start, page)) |
378397cc MK |
491 | continue; |
492 | ||
40549ba8 MK |
493 | if (!hugetlb_vma_trylock_write(vma)) { |
494 | vma_lock = vma->vm_private_data; | |
495 | /* | |
496 | * If we can not get vma lock, we need to drop | |
497 | * immap_sema and take locks in order. First, | |
498 | * take a ref on the vma_lock structure so that | |
499 | * we can be guaranteed it will not go away when | |
500 | * dropping immap_sema. | |
501 | */ | |
502 | kref_get(&vma_lock->refs); | |
503 | break; | |
504 | } | |
505 | ||
243b1f2d PX |
506 | unmap_hugepage_range(vma, v_start, v_end, NULL, |
507 | ZAP_FLAG_DROP_MARKER); | |
40549ba8 | 508 | hugetlb_vma_unlock_write(vma); |
378397cc MK |
509 | } |
510 | ||
511 | i_mmap_unlock_write(mapping); | |
40549ba8 MK |
512 | |
513 | if (vma_lock) { | |
514 | /* | |
515 | * Wait on vma_lock. We know it is still valid as we have | |
516 | * a reference. We must 'open code' vma locking as we do | |
517 | * not know if vma_lock is still attached to vma. | |
518 | */ | |
519 | down_write(&vma_lock->rw_sema); | |
520 | i_mmap_lock_write(mapping); | |
521 | ||
522 | vma = vma_lock->vma; | |
523 | if (!vma) { | |
524 | /* | |
525 | * If lock is no longer attached to vma, then just | |
526 | * unlock, drop our reference and retry looking for | |
527 | * other vmas. | |
528 | */ | |
529 | up_write(&vma_lock->rw_sema); | |
530 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
531 | goto retry; | |
532 | } | |
533 | ||
534 | /* | |
535 | * vma_lock is still attached to vma. Check to see if vma | |
536 | * still maps page and if so, unmap. | |
537 | */ | |
538 | v_start = vma_offset_start(vma, start); | |
539 | v_end = vma_offset_end(vma, end); | |
243b1f2d PX |
540 | if (hugetlb_vma_maps_page(vma, v_start, page)) |
541 | unmap_hugepage_range(vma, v_start, v_end, NULL, | |
542 | ZAP_FLAG_DROP_MARKER); | |
40549ba8 MK |
543 | |
544 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
545 | hugetlb_vma_unlock_write(vma); | |
546 | ||
547 | goto retry; | |
548 | } | |
378397cc MK |
549 | } |
550 | ||
4aae8d1c | 551 | static void |
05e90bd0 PX |
552 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, |
553 | zap_flags_t zap_flags) | |
4aae8d1c MK |
554 | { |
555 | struct vm_area_struct *vma; | |
556 | ||
557 | /* | |
d6aba4c8 SC |
558 | * end == 0 indicates that the entire range after start should be |
559 | * unmapped. Note, end is exclusive, whereas the interval tree takes | |
560 | * an inclusive "last". | |
4aae8d1c | 561 | */ |
d6aba4c8 | 562 | vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { |
378397cc | 563 | unsigned long v_start; |
4aae8d1c MK |
564 | unsigned long v_end; |
565 | ||
40549ba8 MK |
566 | if (!hugetlb_vma_trylock_write(vma)) |
567 | continue; | |
568 | ||
378397cc MK |
569 | v_start = vma_offset_start(vma, start); |
570 | v_end = vma_offset_end(vma, end); | |
4aae8d1c | 571 | |
243b1f2d | 572 | unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); |
40549ba8 MK |
573 | |
574 | /* | |
575 | * Note that vma lock only exists for shared/non-private | |
576 | * vmas. Therefore, lock is not held when calling | |
577 | * unmap_hugepage_range for private vmas. | |
578 | */ | |
579 | hugetlb_vma_unlock_write(vma); | |
4aae8d1c MK |
580 | } |
581 | } | |
b5cec28d | 582 | |
c8627228 MK |
583 | /* |
584 | * Called with hugetlb fault mutex held. | |
585 | * Returns true if page was actually removed, false otherwise. | |
586 | */ | |
587 | static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, | |
588 | struct address_space *mapping, | |
589 | struct folio *folio, pgoff_t index, | |
590 | bool truncate_op) | |
591 | { | |
592 | bool ret = false; | |
593 | ||
594 | /* | |
595 | * If folio is mapped, it was faulted in after being | |
596 | * unmapped in caller. Unmap (again) while holding | |
597 | * the fault mutex. The mutex will prevent faults | |
598 | * until we finish removing the folio. | |
599 | */ | |
378397cc MK |
600 | if (unlikely(folio_mapped(folio))) |
601 | hugetlb_unmap_file_folio(h, mapping, folio, index); | |
c8627228 MK |
602 | |
603 | folio_lock(folio); | |
604 | /* | |
fa27759a MK |
605 | * We must remove the folio from page cache before removing |
606 | * the region/ reserve map (hugetlb_unreserve_pages). In | |
607 | * rare out of memory conditions, removal of the region/reserve | |
608 | * map could fail. Correspondingly, the subpool and global | |
609 | * reserve usage count can need to be adjusted. | |
c8627228 | 610 | */ |
ece62684 SK |
611 | VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); |
612 | hugetlb_delete_from_page_cache(folio); | |
fa27759a MK |
613 | ret = true; |
614 | if (!truncate_op) { | |
615 | if (unlikely(hugetlb_unreserve_pages(inode, index, | |
616 | index + 1, 1))) | |
617 | hugetlb_fix_reserve_counts(inode); | |
c8627228 MK |
618 | } |
619 | ||
620 | folio_unlock(folio); | |
621 | return ret; | |
622 | } | |
623 | ||
b5cec28d MK |
624 | /* |
625 | * remove_inode_hugepages handles two distinct cases: truncation and hole | |
626 | * punch. There are subtle differences in operation for each case. | |
4aae8d1c | 627 | * |
b5cec28d MK |
628 | * truncation is indicated by end of range being LLONG_MAX |
629 | * In this case, we first scan the range and release found pages. | |
1935ebd3 | 630 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve |
c8627228 MK |
631 | * maps and global counts. Page faults can race with truncation. |
632 | * During faults, hugetlb_no_page() checks i_size before page allocation, | |
633 | * and again after obtaining page table lock. It will 'back out' | |
634 | * allocations in the truncated range. | |
b5cec28d MK |
635 | * hole punch is indicated if end is not LLONG_MAX |
636 | * In the hole punch case we scan the range and release found pages. | |
1935ebd3 ML |
637 | * Only when releasing a page is the associated region/reserve map |
638 | * deleted. The region/reserve map for ranges without associated | |
e7c58097 MK |
639 | * pages are not modified. Page faults can race with hole punch. |
640 | * This is indicated if we find a mapped page. | |
b5cec28d MK |
641 | * Note: If the passed end of range value is beyond the end of file, but |
642 | * not LLONG_MAX this routine still performs a hole punch operation. | |
643 | */ | |
644 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |
645 | loff_t lend) | |
1da177e4 | 646 | { |
a5516438 | 647 | struct hstate *h = hstate_inode(inode); |
b45b5bd6 | 648 | struct address_space *mapping = &inode->i_data; |
a08c7193 | 649 | const pgoff_t end = lend >> PAGE_SHIFT; |
1508062e | 650 | struct folio_batch fbatch; |
d72dc8a2 | 651 | pgoff_t next, index; |
a43a8c39 | 652 | int i, freed = 0; |
b5cec28d | 653 | bool truncate_op = (lend == LLONG_MAX); |
1da177e4 | 654 | |
1508062e | 655 | folio_batch_init(&fbatch); |
a08c7193 | 656 | next = lstart >> PAGE_SHIFT; |
1508062e MWO |
657 | while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { |
658 | for (i = 0; i < folio_batch_count(&fbatch); ++i) { | |
659 | struct folio *folio = fbatch.folios[i]; | |
d4241a04 | 660 | u32 hash = 0; |
b5cec28d | 661 | |
a08c7193 | 662 | index = folio->index >> huge_page_order(h); |
188a3972 MK |
663 | hash = hugetlb_fault_mutex_hash(mapping, index); |
664 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
e7c58097 | 665 | |
4aae8d1c | 666 | /* |
c8627228 | 667 | * Remove folio that was part of folio_batch. |
4aae8d1c | 668 | */ |
c8627228 MK |
669 | if (remove_inode_single_folio(h, inode, mapping, folio, |
670 | index, truncate_op)) | |
671 | freed++; | |
672 | ||
188a3972 | 673 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
1da177e4 | 674 | } |
1508062e | 675 | folio_batch_release(&fbatch); |
1817889e | 676 | cond_resched(); |
1da177e4 | 677 | } |
b5cec28d MK |
678 | |
679 | if (truncate_op) | |
a08c7193 SK |
680 | (void)hugetlb_unreserve_pages(inode, |
681 | lstart >> huge_page_shift(h), | |
682 | LONG_MAX, freed); | |
1da177e4 LT |
683 | } |
684 | ||
2bbbda30 | 685 | static void hugetlbfs_evict_inode(struct inode *inode) |
1da177e4 | 686 | { |
9119a41e JK |
687 | struct resv_map *resv_map; |
688 | ||
b5cec28d | 689 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
f27a5136 MK |
690 | |
691 | /* | |
692 | * Get the resv_map from the address space embedded in the inode. | |
693 | * This is the address space which points to any resv_map allocated | |
694 | * at inode creation time. If this is a device special inode, | |
695 | * i_mapping may not point to the original address space. | |
696 | */ | |
600f111e | 697 | resv_map = (struct resv_map *)(&inode->i_data)->i_private_data; |
f27a5136 | 698 | /* Only regular and link inodes have associated reserve maps */ |
9119a41e JK |
699 | if (resv_map) |
700 | resv_map_release(&resv_map->refs); | |
dbd5768f | 701 | clear_inode(inode); |
149f4211 CH |
702 | } |
703 | ||
e5d319de | 704 | static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
1da177e4 | 705 | { |
856fc295 | 706 | pgoff_t pgoff; |
1da177e4 | 707 | struct address_space *mapping = inode->i_mapping; |
a5516438 | 708 | struct hstate *h = hstate_inode(inode); |
1da177e4 | 709 | |
a5516438 | 710 | BUG_ON(offset & ~huge_page_mask(h)); |
856fc295 | 711 | pgoff = offset >> PAGE_SHIFT; |
1da177e4 | 712 | |
87bf91d3 | 713 | i_size_write(inode, offset); |
188a3972 | 714 | i_mmap_lock_write(mapping); |
f808c13f | 715 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
05e90bd0 PX |
716 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, |
717 | ZAP_FLAG_DROP_MARKER); | |
c86aa7bb | 718 | i_mmap_unlock_write(mapping); |
e7c58097 | 719 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
1da177e4 LT |
720 | } |
721 | ||
68d32527 MK |
722 | static void hugetlbfs_zero_partial_page(struct hstate *h, |
723 | struct address_space *mapping, | |
724 | loff_t start, | |
725 | loff_t end) | |
726 | { | |
727 | pgoff_t idx = start >> huge_page_shift(h); | |
728 | struct folio *folio; | |
729 | ||
a08c7193 | 730 | folio = filemap_lock_hugetlb_folio(h, mapping, idx); |
66dabbb6 | 731 | if (IS_ERR(folio)) |
68d32527 MK |
732 | return; |
733 | ||
734 | start = start & ~huge_page_mask(h); | |
735 | end = end & ~huge_page_mask(h); | |
736 | if (!end) | |
737 | end = huge_page_size(h); | |
738 | ||
739 | folio_zero_segment(folio, (size_t)start, (size_t)end); | |
740 | ||
741 | folio_unlock(folio); | |
742 | folio_put(folio); | |
743 | } | |
744 | ||
70c3547e MK |
745 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
746 | { | |
68d32527 MK |
747 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
748 | struct address_space *mapping = inode->i_mapping; | |
70c3547e MK |
749 | struct hstate *h = hstate_inode(inode); |
750 | loff_t hpage_size = huge_page_size(h); | |
751 | loff_t hole_start, hole_end; | |
752 | ||
753 | /* | |
68d32527 | 754 | * hole_start and hole_end indicate the full pages within the hole. |
70c3547e MK |
755 | */ |
756 | hole_start = round_up(offset, hpage_size); | |
757 | hole_end = round_down(offset + len, hpage_size); | |
758 | ||
68d32527 MK |
759 | inode_lock(inode); |
760 | ||
761 | /* protected by i_rwsem */ | |
762 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { | |
763 | inode_unlock(inode); | |
764 | return -EPERM; | |
765 | } | |
70c3547e | 766 | |
68d32527 | 767 | i_mmap_lock_write(mapping); |
ff62a342 | 768 | |
68d32527 MK |
769 | /* If range starts before first full page, zero partial page. */ |
770 | if (offset < hole_start) | |
771 | hugetlbfs_zero_partial_page(h, mapping, | |
772 | offset, min(offset + len, hole_start)); | |
ff62a342 | 773 | |
68d32527 MK |
774 | /* Unmap users of full pages in the hole. */ |
775 | if (hole_end > hole_start) { | |
f808c13f | 776 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
70c3547e | 777 | hugetlb_vmdelete_list(&mapping->i_mmap, |
05e90bd0 PX |
778 | hole_start >> PAGE_SHIFT, |
779 | hole_end >> PAGE_SHIFT, 0); | |
70c3547e MK |
780 | } |
781 | ||
68d32527 MK |
782 | /* If range extends beyond last full page, zero partial page. */ |
783 | if ((offset + len) > hole_end && (offset + len) > hole_start) | |
784 | hugetlbfs_zero_partial_page(h, mapping, | |
785 | hole_end, offset + len); | |
786 | ||
787 | i_mmap_unlock_write(mapping); | |
788 | ||
789 | /* Remove full pages from the file. */ | |
790 | if (hole_end > hole_start) | |
791 | remove_inode_hugepages(inode, hole_start, hole_end); | |
792 | ||
793 | inode_unlock(inode); | |
794 | ||
70c3547e MK |
795 | return 0; |
796 | } | |
797 | ||
798 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |
799 | loff_t len) | |
800 | { | |
801 | struct inode *inode = file_inode(file); | |
ff62a342 | 802 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
70c3547e MK |
803 | struct address_space *mapping = inode->i_mapping; |
804 | struct hstate *h = hstate_inode(inode); | |
805 | struct vm_area_struct pseudo_vma; | |
806 | struct mm_struct *mm = current->mm; | |
807 | loff_t hpage_size = huge_page_size(h); | |
808 | unsigned long hpage_shift = huge_page_shift(h); | |
809 | pgoff_t start, index, end; | |
810 | int error; | |
811 | u32 hash; | |
812 | ||
813 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
814 | return -EOPNOTSUPP; | |
815 | ||
816 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
817 | return hugetlbfs_punch_hole(inode, offset, len); | |
818 | ||
819 | /* | |
820 | * Default preallocate case. | |
821 | * For this range, start is rounded down and end is rounded up | |
822 | * as well as being converted to page offsets. | |
823 | */ | |
824 | start = offset >> hpage_shift; | |
825 | end = (offset + len + hpage_size - 1) >> hpage_shift; | |
826 | ||
5955102c | 827 | inode_lock(inode); |
70c3547e MK |
828 | |
829 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ | |
830 | error = inode_newsize_ok(inode, offset + len); | |
831 | if (error) | |
832 | goto out; | |
833 | ||
ff62a342 MAL |
834 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
835 | error = -EPERM; | |
836 | goto out; | |
837 | } | |
838 | ||
70c3547e MK |
839 | /* |
840 | * Initialize a pseudo vma as this is required by the huge page | |
10969b55 | 841 | * allocation routines. |
70c3547e | 842 | */ |
2c4541e2 | 843 | vma_init(&pseudo_vma, mm); |
1c71222e | 844 | vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
70c3547e MK |
845 | pseudo_vma.vm_file = file; |
846 | ||
847 | for (index = start; index < end; index++) { | |
848 | /* | |
849 | * This is supposed to be the vaddr where the page is being | |
850 | * faulted in, but we have no vaddr here. | |
851 | */ | |
d0ce0e47 | 852 | struct folio *folio; |
70c3547e | 853 | unsigned long addr; |
70c3547e MK |
854 | |
855 | cond_resched(); | |
856 | ||
857 | /* | |
858 | * fallocate(2) manpage permits EINTR; we may have been | |
859 | * interrupted because we are using up too much memory. | |
860 | */ | |
861 | if (signal_pending(current)) { | |
862 | error = -EINTR; | |
863 | break; | |
864 | } | |
865 | ||
70c3547e MK |
866 | /* addr is the offset within the file (zero based) */ |
867 | addr = index * hpage_size; | |
868 | ||
188a3972 | 869 | /* mutex taken here, fault path and hole punch */ |
188b04a7 | 870 | hash = hugetlb_fault_mutex_hash(mapping, index); |
70c3547e MK |
871 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
872 | ||
873 | /* See if already present in mapping to avoid alloc/free */ | |
a08c7193 | 874 | folio = filemap_get_folio(mapping, index << huge_page_order(h)); |
fd4aed8d MK |
875 | if (!IS_ERR(folio)) { |
876 | folio_put(folio); | |
70c3547e | 877 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
70c3547e MK |
878 | continue; |
879 | } | |
880 | ||
88ce3fef | 881 | /* |
d0ce0e47 | 882 | * Allocate folio without setting the avoid_reserve argument. |
88ce3fef ML |
883 | * There certainly are no reserves associated with the |
884 | * pseudo_vma. However, there could be shared mappings with | |
885 | * reserves for the file at the inode level. If we fallocate | |
d0ce0e47 | 886 | * folios in these areas, we need to consume the reserves |
88ce3fef ML |
887 | * to keep reservation accounting consistent. |
888 | */ | |
d0ce0e47 | 889 | folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0); |
d0ce0e47 | 890 | if (IS_ERR(folio)) { |
70c3547e | 891 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
d0ce0e47 | 892 | error = PTR_ERR(folio); |
70c3547e MK |
893 | goto out; |
894 | } | |
d0ce0e47 SK |
895 | clear_huge_page(&folio->page, addr, pages_per_huge_page(h)); |
896 | __folio_mark_uptodate(folio); | |
9b91c0e2 | 897 | error = hugetlb_add_to_page_cache(folio, mapping, index); |
70c3547e | 898 | if (unlikely(error)) { |
d2d7bb44 | 899 | restore_reserve_on_error(h, &pseudo_vma, addr, folio); |
d0ce0e47 | 900 | folio_put(folio); |
70c3547e MK |
901 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
902 | goto out; | |
903 | } | |
904 | ||
905 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
906 | ||
d0ce0e47 | 907 | folio_set_hugetlb_migratable(folio); |
70c3547e | 908 | /* |
d0ce0e47 SK |
909 | * folio_unlock because locked by hugetlb_add_to_page_cache() |
910 | * folio_put() due to reference from alloc_hugetlb_folio() | |
70c3547e | 911 | */ |
d0ce0e47 SK |
912 | folio_unlock(folio); |
913 | folio_put(folio); | |
70c3547e MK |
914 | } |
915 | ||
916 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | |
917 | i_size_write(inode, offset + len); | |
a72a7dea | 918 | inode_set_ctime_current(inode); |
70c3547e | 919 | out: |
5955102c | 920 | inode_unlock(inode); |
70c3547e MK |
921 | return error; |
922 | } | |
923 | ||
c1632a0f | 924 | static int hugetlbfs_setattr(struct mnt_idmap *idmap, |
549c7297 | 925 | struct dentry *dentry, struct iattr *attr) |
1da177e4 | 926 | { |
2b0143b5 | 927 | struct inode *inode = d_inode(dentry); |
a5516438 | 928 | struct hstate *h = hstate_inode(inode); |
1da177e4 LT |
929 | int error; |
930 | unsigned int ia_valid = attr->ia_valid; | |
ff62a342 | 931 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
1da177e4 | 932 | |
91e78a1e | 933 | error = setattr_prepare(idmap, dentry, attr); |
1da177e4 | 934 | if (error) |
1025774c | 935 | return error; |
1da177e4 LT |
936 | |
937 | if (ia_valid & ATTR_SIZE) { | |
ff62a342 MAL |
938 | loff_t oldsize = inode->i_size; |
939 | loff_t newsize = attr->ia_size; | |
940 | ||
941 | if (newsize & ~huge_page_mask(h)) | |
1025774c | 942 | return -EINVAL; |
398c0da7 | 943 | /* protected by i_rwsem */ |
ff62a342 MAL |
944 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || |
945 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) | |
946 | return -EPERM; | |
e5d319de | 947 | hugetlb_vmtruncate(inode, newsize); |
1da177e4 | 948 | } |
1025774c | 949 | |
91e78a1e | 950 | setattr_copy(idmap, inode, attr); |
1025774c CH |
951 | mark_inode_dirty(inode); |
952 | return 0; | |
1da177e4 LT |
953 | } |
954 | ||
7d54fa64 | 955 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
32021982 | 956 | struct hugetlbfs_fs_context *ctx) |
1da177e4 LT |
957 | { |
958 | struct inode *inode; | |
1da177e4 LT |
959 | |
960 | inode = new_inode(sb); | |
961 | if (inode) { | |
85fe4025 | 962 | inode->i_ino = get_next_ino(); |
32021982 DH |
963 | inode->i_mode = S_IFDIR | ctx->mode; |
964 | inode->i_uid = ctx->uid; | |
965 | inode->i_gid = ctx->gid; | |
cfd87e76 | 966 | simple_inode_init_ts(inode); |
7d54fa64 AV |
967 | inode->i_op = &hugetlbfs_dir_inode_operations; |
968 | inode->i_fop = &simple_dir_operations; | |
969 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
970 | inc_nlink(inode); | |
65ed7601 | 971 | lockdep_annotate_inode_mutex_key(inode); |
7d54fa64 AV |
972 | } |
973 | return inode; | |
974 | } | |
975 | ||
b610ded7 | 976 | /* |
c8c06efa | 977 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
b610ded7 | 978 | * be taken from reclaim -- unlike regular filesystems. This needs an |
88f306b6 | 979 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
c8c06efa | 980 | * i_mmap_rwsem. |
b610ded7 | 981 | */ |
c8c06efa | 982 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
b610ded7 | 983 | |
7d54fa64 | 984 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
91e78a1e | 985 | struct mnt_idmap *idmap, |
7d54fa64 | 986 | struct inode *dir, |
18df2252 | 987 | umode_t mode, dev_t dev) |
7d54fa64 AV |
988 | { |
989 | struct inode *inode; | |
58b6e5e8 | 990 | struct resv_map *resv_map = NULL; |
9119a41e | 991 | |
58b6e5e8 MK |
992 | /* |
993 | * Reserve maps are only needed for inodes that can have associated | |
994 | * page allocations. | |
995 | */ | |
996 | if (S_ISREG(mode) || S_ISLNK(mode)) { | |
997 | resv_map = resv_map_alloc(); | |
998 | if (!resv_map) | |
999 | return NULL; | |
1000 | } | |
7d54fa64 AV |
1001 | |
1002 | inode = new_inode(sb); | |
1003 | if (inode) { | |
ff62a342 MAL |
1004 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
1005 | ||
7d54fa64 | 1006 | inode->i_ino = get_next_ino(); |
91e78a1e | 1007 | inode_init_owner(idmap, inode, dir, mode); |
c8c06efa DB |
1008 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
1009 | &hugetlbfs_i_mmap_rwsem_key); | |
1da177e4 | 1010 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
cfd87e76 | 1011 | simple_inode_init_ts(inode); |
600f111e | 1012 | inode->i_mapping->i_private_data = resv_map; |
ff62a342 | 1013 | info->seals = F_SEAL_SEAL; |
1da177e4 LT |
1014 | switch (mode & S_IFMT) { |
1015 | default: | |
1016 | init_special_inode(inode, mode, dev); | |
1017 | break; | |
1018 | case S_IFREG: | |
1019 | inode->i_op = &hugetlbfs_inode_operations; | |
1020 | inode->i_fop = &hugetlbfs_file_operations; | |
1021 | break; | |
1022 | case S_IFDIR: | |
1023 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
1024 | inode->i_fop = &simple_dir_operations; | |
1025 | ||
1026 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
d8c76e6f | 1027 | inc_nlink(inode); |
1da177e4 LT |
1028 | break; |
1029 | case S_IFLNK: | |
1030 | inode->i_op = &page_symlink_inode_operations; | |
21fc61c7 | 1031 | inode_nohighmem(inode); |
1da177e4 LT |
1032 | break; |
1033 | } | |
e096d0c7 | 1034 | lockdep_annotate_inode_mutex_key(inode); |
58b6e5e8 MK |
1035 | } else { |
1036 | if (resv_map) | |
1037 | kref_put(&resv_map->refs, resv_map_release); | |
1038 | } | |
9119a41e | 1039 | |
1da177e4 LT |
1040 | return inode; |
1041 | } | |
1042 | ||
1043 | /* | |
1044 | * File creation. Allocate an inode, and we're done.. | |
1045 | */ | |
5ebb29be | 1046 | static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, |
19ee5345 | 1047 | struct dentry *dentry, umode_t mode, dev_t dev) |
1da177e4 LT |
1048 | { |
1049 | struct inode *inode; | |
7d54fa64 | 1050 | |
91e78a1e | 1051 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev); |
19ee5345 AV |
1052 | if (!inode) |
1053 | return -ENOSPC; | |
cfd87e76 | 1054 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
19ee5345 AV |
1055 | d_instantiate(dentry, inode); |
1056 | dget(dentry);/* Extra count - pin the dentry in core */ | |
1057 | return 0; | |
1ab5b82f PS |
1058 | } |
1059 | ||
c54bd91e | 1060 | static int hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, |
549c7297 | 1061 | struct dentry *dentry, umode_t mode) |
1da177e4 | 1062 | { |
91e78a1e | 1063 | int retval = hugetlbfs_mknod(idmap, dir, dentry, |
549c7297 | 1064 | mode | S_IFDIR, 0); |
1da177e4 | 1065 | if (!retval) |
d8c76e6f | 1066 | inc_nlink(dir); |
1da177e4 LT |
1067 | return retval; |
1068 | } | |
1069 | ||
6c960e68 | 1070 | static int hugetlbfs_create(struct mnt_idmap *idmap, |
549c7297 CB |
1071 | struct inode *dir, struct dentry *dentry, |
1072 | umode_t mode, bool excl) | |
1da177e4 | 1073 | { |
91e78a1e | 1074 | return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0); |
1da177e4 LT |
1075 | } |
1076 | ||
011e2b71 | 1077 | static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, |
863f144f | 1078 | struct inode *dir, struct file *file, |
549c7297 | 1079 | umode_t mode) |
1ab5b82f | 1080 | { |
19ee5345 AV |
1081 | struct inode *inode; |
1082 | ||
91e78a1e | 1083 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0); |
19ee5345 AV |
1084 | if (!inode) |
1085 | return -ENOSPC; | |
cfd87e76 | 1086 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
863f144f MS |
1087 | d_tmpfile(file, inode); |
1088 | return finish_open_simple(file, 0); | |
1ab5b82f PS |
1089 | } |
1090 | ||
7a77db95 | 1091 | static int hugetlbfs_symlink(struct mnt_idmap *idmap, |
549c7297 CB |
1092 | struct inode *dir, struct dentry *dentry, |
1093 | const char *symname) | |
1da177e4 | 1094 | { |
91e78a1e | 1095 | const umode_t mode = S_IFLNK|S_IRWXUGO; |
1da177e4 LT |
1096 | struct inode *inode; |
1097 | int error = -ENOSPC; | |
1da177e4 | 1098 | |
91e78a1e | 1099 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0); |
1da177e4 LT |
1100 | if (inode) { |
1101 | int l = strlen(symname)+1; | |
1102 | error = page_symlink(inode, symname, l); | |
1103 | if (!error) { | |
1104 | d_instantiate(dentry, inode); | |
1105 | dget(dentry); | |
1106 | } else | |
1107 | iput(inode); | |
1108 | } | |
cfd87e76 | 1109 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
1da177e4 LT |
1110 | |
1111 | return error; | |
1112 | } | |
1113 | ||
b890ec2a MWO |
1114 | #ifdef CONFIG_MIGRATION |
1115 | static int hugetlbfs_migrate_folio(struct address_space *mapping, | |
1116 | struct folio *dst, struct folio *src, | |
a6bc32b8 | 1117 | enum migrate_mode mode) |
290408d4 NH |
1118 | { |
1119 | int rc; | |
1120 | ||
b890ec2a | 1121 | rc = migrate_huge_page_move_mapping(mapping, dst, src); |
78bd5209 | 1122 | if (rc != MIGRATEPAGE_SUCCESS) |
290408d4 | 1123 | return rc; |
cb6acd01 | 1124 | |
149562f7 SK |
1125 | if (hugetlb_folio_subpool(src)) { |
1126 | hugetlb_set_folio_subpool(dst, | |
1127 | hugetlb_folio_subpool(src)); | |
1128 | hugetlb_set_folio_subpool(src, NULL); | |
cb6acd01 MK |
1129 | } |
1130 | ||
2916ecc0 | 1131 | if (mode != MIGRATE_SYNC_NO_COPY) |
b890ec2a | 1132 | folio_migrate_copy(dst, src); |
2916ecc0 | 1133 | else |
b890ec2a | 1134 | folio_migrate_flags(dst, src); |
290408d4 | 1135 | |
78bd5209 | 1136 | return MIGRATEPAGE_SUCCESS; |
290408d4 | 1137 | } |
b890ec2a MWO |
1138 | #else |
1139 | #define hugetlbfs_migrate_folio NULL | |
1140 | #endif | |
290408d4 | 1141 | |
af7628d6 MWO |
1142 | static int hugetlbfs_error_remove_folio(struct address_space *mapping, |
1143 | struct folio *folio) | |
78bb9203 | 1144 | { |
78bb9203 NH |
1145 | return 0; |
1146 | } | |
1147 | ||
4a25220d DH |
1148 | /* |
1149 | * Display the mount options in /proc/mounts. | |
1150 | */ | |
1151 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) | |
1152 | { | |
1153 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); | |
1154 | struct hugepage_subpool *spool = sbinfo->spool; | |
1155 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); | |
1156 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); | |
1157 | char mod; | |
1158 | ||
1159 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) | |
1160 | seq_printf(m, ",uid=%u", | |
1161 | from_kuid_munged(&init_user_ns, sbinfo->uid)); | |
1162 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) | |
1163 | seq_printf(m, ",gid=%u", | |
1164 | from_kgid_munged(&init_user_ns, sbinfo->gid)); | |
1165 | if (sbinfo->mode != 0755) | |
1166 | seq_printf(m, ",mode=%o", sbinfo->mode); | |
1167 | if (sbinfo->max_inodes != -1) | |
1168 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); | |
1169 | ||
1170 | hpage_size /= 1024; | |
1171 | mod = 'K'; | |
1172 | if (hpage_size >= 1024) { | |
1173 | hpage_size /= 1024; | |
1174 | mod = 'M'; | |
1175 | } | |
1176 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); | |
1177 | if (spool) { | |
1178 | if (spool->max_hpages != -1) | |
1179 | seq_printf(m, ",size=%llu", | |
1180 | (unsigned long long)spool->max_hpages << hpage_shift); | |
1181 | if (spool->min_hpages != -1) | |
1182 | seq_printf(m, ",min_size=%llu", | |
1183 | (unsigned long long)spool->min_hpages << hpage_shift); | |
1184 | } | |
1185 | return 0; | |
1186 | } | |
1187 | ||
726c3342 | 1188 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
1da177e4 | 1189 | { |
726c3342 | 1190 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
2b0143b5 | 1191 | struct hstate *h = hstate_inode(d_inode(dentry)); |
ae62bcb5 | 1192 | u64 id = huge_encode_dev(dentry->d_sb->s_dev); |
1da177e4 | 1193 | |
ae62bcb5 | 1194 | buf->f_fsid = u64_to_fsid(id); |
1da177e4 | 1195 | buf->f_type = HUGETLBFS_MAGIC; |
a5516438 | 1196 | buf->f_bsize = huge_page_size(h); |
1da177e4 LT |
1197 | if (sbinfo) { |
1198 | spin_lock(&sbinfo->stat_lock); | |
11680763 | 1199 | /* If no limits set, just report 0 or -1 for max/free/used |
74a8a65c | 1200 | * blocks, like simple_statfs() */ |
90481622 DG |
1201 | if (sbinfo->spool) { |
1202 | long free_pages; | |
1203 | ||
4b25f030 | 1204 | spin_lock_irq(&sbinfo->spool->lock); |
90481622 DG |
1205 | buf->f_blocks = sbinfo->spool->max_hpages; |
1206 | free_pages = sbinfo->spool->max_hpages | |
1207 | - sbinfo->spool->used_hpages; | |
1208 | buf->f_bavail = buf->f_bfree = free_pages; | |
4b25f030 | 1209 | spin_unlock_irq(&sbinfo->spool->lock); |
74a8a65c DG |
1210 | buf->f_files = sbinfo->max_inodes; |
1211 | buf->f_ffree = sbinfo->free_inodes; | |
1212 | } | |
1da177e4 LT |
1213 | spin_unlock(&sbinfo->stat_lock); |
1214 | } | |
1215 | buf->f_namelen = NAME_MAX; | |
1216 | return 0; | |
1217 | } | |
1218 | ||
1219 | static void hugetlbfs_put_super(struct super_block *sb) | |
1220 | { | |
1221 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); | |
1222 | ||
1223 | if (sbi) { | |
1224 | sb->s_fs_info = NULL; | |
90481622 DG |
1225 | |
1226 | if (sbi->spool) | |
1227 | hugepage_put_subpool(sbi->spool); | |
1228 | ||
1da177e4 LT |
1229 | kfree(sbi); |
1230 | } | |
1231 | } | |
1232 | ||
96527980 CH |
1233 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
1234 | { | |
1235 | if (sbinfo->free_inodes >= 0) { | |
1236 | spin_lock(&sbinfo->stat_lock); | |
1237 | if (unlikely(!sbinfo->free_inodes)) { | |
1238 | spin_unlock(&sbinfo->stat_lock); | |
1239 | return 0; | |
1240 | } | |
1241 | sbinfo->free_inodes--; | |
1242 | spin_unlock(&sbinfo->stat_lock); | |
1243 | } | |
1244 | ||
1245 | return 1; | |
1246 | } | |
1247 | ||
1248 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
1249 | { | |
1250 | if (sbinfo->free_inodes >= 0) { | |
1251 | spin_lock(&sbinfo->stat_lock); | |
1252 | sbinfo->free_inodes++; | |
1253 | spin_unlock(&sbinfo->stat_lock); | |
1254 | } | |
1255 | } | |
1256 | ||
1257 | ||
e18b890b | 1258 | static struct kmem_cache *hugetlbfs_inode_cachep; |
1da177e4 LT |
1259 | |
1260 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |
1261 | { | |
96527980 | 1262 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
1da177e4 LT |
1263 | struct hugetlbfs_inode_info *p; |
1264 | ||
96527980 CH |
1265 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
1266 | return NULL; | |
fd60b288 | 1267 | p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); |
96527980 CH |
1268 | if (unlikely(!p)) { |
1269 | hugetlbfs_inc_free_inodes(sbinfo); | |
1da177e4 | 1270 | return NULL; |
96527980 | 1271 | } |
1da177e4 LT |
1272 | return &p->vfs_inode; |
1273 | } | |
1274 | ||
b62de322 | 1275 | static void hugetlbfs_free_inode(struct inode *inode) |
fa0d7e3d | 1276 | { |
fa0d7e3d NP |
1277 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); |
1278 | } | |
1279 | ||
1da177e4 LT |
1280 | static void hugetlbfs_destroy_inode(struct inode *inode) |
1281 | { | |
96527980 | 1282 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); |
1da177e4 LT |
1283 | } |
1284 | ||
f5e54d6e | 1285 | static const struct address_space_operations hugetlbfs_aops = { |
800d15a5 NP |
1286 | .write_begin = hugetlbfs_write_begin, |
1287 | .write_end = hugetlbfs_write_end, | |
46de8b97 | 1288 | .dirty_folio = noop_dirty_folio, |
b890ec2a | 1289 | .migrate_folio = hugetlbfs_migrate_folio, |
af7628d6 | 1290 | .error_remove_folio = hugetlbfs_error_remove_folio, |
1da177e4 LT |
1291 | }; |
1292 | ||
96527980 | 1293 | |
51cc5068 | 1294 | static void init_once(void *foo) |
96527980 | 1295 | { |
dbaf7dc9 | 1296 | struct hugetlbfs_inode_info *ei = foo; |
96527980 | 1297 | |
a35afb83 | 1298 | inode_init_once(&ei->vfs_inode); |
96527980 CH |
1299 | } |
1300 | ||
886b94d2 | 1301 | static const struct file_operations hugetlbfs_file_operations = { |
34d0640e | 1302 | .read_iter = hugetlbfs_read_iter, |
1da177e4 | 1303 | .mmap = hugetlbfs_file_mmap, |
1b061d92 | 1304 | .fsync = noop_fsync, |
1da177e4 | 1305 | .get_unmapped_area = hugetlb_get_unmapped_area, |
70c3547e MK |
1306 | .llseek = default_llseek, |
1307 | .fallocate = hugetlbfs_fallocate, | |
886b94d2 | 1308 | .fop_flags = FOP_HUGE_PAGES, |
1da177e4 LT |
1309 | }; |
1310 | ||
92e1d5be | 1311 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
1da177e4 LT |
1312 | .create = hugetlbfs_create, |
1313 | .lookup = simple_lookup, | |
1314 | .link = simple_link, | |
1315 | .unlink = simple_unlink, | |
1316 | .symlink = hugetlbfs_symlink, | |
1317 | .mkdir = hugetlbfs_mkdir, | |
1318 | .rmdir = simple_rmdir, | |
1319 | .mknod = hugetlbfs_mknod, | |
1320 | .rename = simple_rename, | |
1321 | .setattr = hugetlbfs_setattr, | |
1ab5b82f | 1322 | .tmpfile = hugetlbfs_tmpfile, |
1da177e4 LT |
1323 | }; |
1324 | ||
92e1d5be | 1325 | static const struct inode_operations hugetlbfs_inode_operations = { |
1da177e4 LT |
1326 | .setattr = hugetlbfs_setattr, |
1327 | }; | |
1328 | ||
ee9b6d61 | 1329 | static const struct super_operations hugetlbfs_ops = { |
1da177e4 | 1330 | .alloc_inode = hugetlbfs_alloc_inode, |
b62de322 | 1331 | .free_inode = hugetlbfs_free_inode, |
1da177e4 | 1332 | .destroy_inode = hugetlbfs_destroy_inode, |
2bbbda30 | 1333 | .evict_inode = hugetlbfs_evict_inode, |
1da177e4 | 1334 | .statfs = hugetlbfs_statfs, |
1da177e4 | 1335 | .put_super = hugetlbfs_put_super, |
4a25220d | 1336 | .show_options = hugetlbfs_show_options, |
1da177e4 LT |
1337 | }; |
1338 | ||
7ca02d0a MK |
1339 | /* |
1340 | * Convert size option passed from command line to number of huge pages | |
1341 | * in the pool specified by hstate. Size option could be in bytes | |
1342 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). | |
1343 | */ | |
4a25220d | 1344 | static long |
7ca02d0a | 1345 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
4a25220d | 1346 | enum hugetlbfs_size_type val_type) |
7ca02d0a MK |
1347 | { |
1348 | if (val_type == NO_SIZE) | |
1349 | return -1; | |
1350 | ||
1351 | if (val_type == SIZE_PERCENT) { | |
1352 | size_opt <<= huge_page_shift(h); | |
1353 | size_opt *= h->max_huge_pages; | |
1354 | do_div(size_opt, 100); | |
1355 | } | |
1356 | ||
1357 | size_opt >>= huge_page_shift(h); | |
1358 | return size_opt; | |
1359 | } | |
1360 | ||
32021982 DH |
1361 | /* |
1362 | * Parse one mount parameter. | |
1363 | */ | |
1364 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) | |
1da177e4 | 1365 | { |
32021982 DH |
1366 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1367 | struct fs_parse_result result; | |
79d72c68 | 1368 | struct hstate *h; |
32021982 DH |
1369 | char *rest; |
1370 | unsigned long ps; | |
1371 | int opt; | |
1372 | ||
d7167b14 | 1373 | opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); |
32021982 DH |
1374 | if (opt < 0) |
1375 | return opt; | |
1376 | ||
1377 | switch (opt) { | |
1378 | case Opt_uid: | |
1379 | ctx->uid = make_kuid(current_user_ns(), result.uint_32); | |
1380 | if (!uid_valid(ctx->uid)) | |
1381 | goto bad_val; | |
1da177e4 | 1382 | return 0; |
1da177e4 | 1383 | |
32021982 DH |
1384 | case Opt_gid: |
1385 | ctx->gid = make_kgid(current_user_ns(), result.uint_32); | |
1386 | if (!gid_valid(ctx->gid)) | |
1387 | goto bad_val; | |
1388 | return 0; | |
e73a75fa | 1389 | |
32021982 DH |
1390 | case Opt_mode: |
1391 | ctx->mode = result.uint_32 & 01777U; | |
1392 | return 0; | |
e73a75fa | 1393 | |
32021982 DH |
1394 | case Opt_size: |
1395 | /* memparse() will accept a K/M/G without a digit */ | |
26215b7e | 1396 | if (!param->string || !isdigit(param->string[0])) |
32021982 DH |
1397 | goto bad_val; |
1398 | ctx->max_size_opt = memparse(param->string, &rest); | |
1399 | ctx->max_val_type = SIZE_STD; | |
1400 | if (*rest == '%') | |
1401 | ctx->max_val_type = SIZE_PERCENT; | |
1402 | return 0; | |
e73a75fa | 1403 | |
32021982 DH |
1404 | case Opt_nr_inodes: |
1405 | /* memparse() will accept a K/M/G without a digit */ | |
26215b7e | 1406 | if (!param->string || !isdigit(param->string[0])) |
32021982 DH |
1407 | goto bad_val; |
1408 | ctx->nr_inodes = memparse(param->string, &rest); | |
1409 | return 0; | |
e73a75fa | 1410 | |
32021982 DH |
1411 | case Opt_pagesize: |
1412 | ps = memparse(param->string, &rest); | |
79d72c68 OS |
1413 | h = size_to_hstate(ps); |
1414 | if (!h) { | |
d0036517 | 1415 | pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); |
32021982 | 1416 | return -EINVAL; |
e73a75fa | 1417 | } |
79d72c68 | 1418 | ctx->hstate = h; |
32021982 | 1419 | return 0; |
1da177e4 | 1420 | |
32021982 DH |
1421 | case Opt_min_size: |
1422 | /* memparse() will accept a K/M/G without a digit */ | |
26215b7e | 1423 | if (!param->string || !isdigit(param->string[0])) |
32021982 DH |
1424 | goto bad_val; |
1425 | ctx->min_size_opt = memparse(param->string, &rest); | |
1426 | ctx->min_val_type = SIZE_STD; | |
1427 | if (*rest == '%') | |
1428 | ctx->min_val_type = SIZE_PERCENT; | |
1429 | return 0; | |
e73a75fa | 1430 | |
32021982 DH |
1431 | default: |
1432 | return -EINVAL; | |
1433 | } | |
a137e1cc | 1434 | |
32021982 | 1435 | bad_val: |
b5db30cf | 1436 | return invalfc(fc, "Bad value '%s' for mount option '%s'\n", |
32021982 DH |
1437 | param->string, param->key); |
1438 | } | |
7ca02d0a | 1439 | |
32021982 DH |
1440 | /* |
1441 | * Validate the parsed options. | |
1442 | */ | |
1443 | static int hugetlbfs_validate(struct fs_context *fc) | |
1444 | { | |
1445 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
a137e1cc | 1446 | |
7ca02d0a MK |
1447 | /* |
1448 | * Use huge page pool size (in hstate) to convert the size | |
1449 | * options to number of huge pages. If NO_SIZE, -1 is returned. | |
1450 | */ | |
32021982 DH |
1451 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
1452 | ctx->max_size_opt, | |
1453 | ctx->max_val_type); | |
1454 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
1455 | ctx->min_size_opt, | |
1456 | ctx->min_val_type); | |
7ca02d0a MK |
1457 | |
1458 | /* | |
1459 | * If max_size was specified, then min_size must be smaller | |
1460 | */ | |
32021982 DH |
1461 | if (ctx->max_val_type > NO_SIZE && |
1462 | ctx->min_hpages > ctx->max_hpages) { | |
1463 | pr_err("Minimum size can not be greater than maximum size\n"); | |
7ca02d0a | 1464 | return -EINVAL; |
a137e1cc AK |
1465 | } |
1466 | ||
1da177e4 LT |
1467 | return 0; |
1468 | } | |
1469 | ||
1470 | static int | |
32021982 | 1471 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) |
1da177e4 | 1472 | { |
32021982 | 1473 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1da177e4 LT |
1474 | struct hugetlbfs_sb_info *sbinfo; |
1475 | ||
1da177e4 LT |
1476 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); |
1477 | if (!sbinfo) | |
1478 | return -ENOMEM; | |
1479 | sb->s_fs_info = sbinfo; | |
1480 | spin_lock_init(&sbinfo->stat_lock); | |
32021982 DH |
1481 | sbinfo->hstate = ctx->hstate; |
1482 | sbinfo->max_inodes = ctx->nr_inodes; | |
1483 | sbinfo->free_inodes = ctx->nr_inodes; | |
1484 | sbinfo->spool = NULL; | |
1485 | sbinfo->uid = ctx->uid; | |
1486 | sbinfo->gid = ctx->gid; | |
1487 | sbinfo->mode = ctx->mode; | |
4a25220d | 1488 | |
7ca02d0a MK |
1489 | /* |
1490 | * Allocate and initialize subpool if maximum or minimum size is | |
1935ebd3 | 1491 | * specified. Any needed reservations (for minimum size) are taken |
445c8098 | 1492 | * when the subpool is created. |
7ca02d0a | 1493 | */ |
32021982 DH |
1494 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { |
1495 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, | |
1496 | ctx->max_hpages, | |
1497 | ctx->min_hpages); | |
90481622 DG |
1498 | if (!sbinfo->spool) |
1499 | goto out_free; | |
1500 | } | |
1da177e4 | 1501 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
32021982 DH |
1502 | sb->s_blocksize = huge_page_size(ctx->hstate); |
1503 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); | |
1da177e4 LT |
1504 | sb->s_magic = HUGETLBFS_MAGIC; |
1505 | sb->s_op = &hugetlbfs_ops; | |
1506 | sb->s_time_gran = 1; | |
15568299 MK |
1507 | |
1508 | /* | |
1509 | * Due to the special and limited functionality of hugetlbfs, it does | |
1510 | * not work well as a stacking filesystem. | |
1511 | */ | |
1512 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; | |
32021982 | 1513 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); |
48fde701 | 1514 | if (!sb->s_root) |
1da177e4 | 1515 | goto out_free; |
1da177e4 LT |
1516 | return 0; |
1517 | out_free: | |
6e6870d4 | 1518 | kfree(sbinfo->spool); |
1da177e4 LT |
1519 | kfree(sbinfo); |
1520 | return -ENOMEM; | |
1521 | } | |
1522 | ||
32021982 DH |
1523 | static int hugetlbfs_get_tree(struct fs_context *fc) |
1524 | { | |
1525 | int err = hugetlbfs_validate(fc); | |
1526 | if (err) | |
1527 | return err; | |
2ac295d4 | 1528 | return get_tree_nodev(fc, hugetlbfs_fill_super); |
32021982 DH |
1529 | } |
1530 | ||
1531 | static void hugetlbfs_fs_context_free(struct fs_context *fc) | |
1532 | { | |
1533 | kfree(fc->fs_private); | |
1534 | } | |
1535 | ||
1536 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { | |
1537 | .free = hugetlbfs_fs_context_free, | |
1538 | .parse_param = hugetlbfs_parse_param, | |
1539 | .get_tree = hugetlbfs_get_tree, | |
1540 | }; | |
1541 | ||
1542 | static int hugetlbfs_init_fs_context(struct fs_context *fc) | |
1da177e4 | 1543 | { |
32021982 DH |
1544 | struct hugetlbfs_fs_context *ctx; |
1545 | ||
1546 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); | |
1547 | if (!ctx) | |
1548 | return -ENOMEM; | |
1549 | ||
1550 | ctx->max_hpages = -1; /* No limit on size by default */ | |
1551 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ | |
1552 | ctx->uid = current_fsuid(); | |
1553 | ctx->gid = current_fsgid(); | |
1554 | ctx->mode = 0755; | |
1555 | ctx->hstate = &default_hstate; | |
1556 | ctx->min_hpages = -1; /* No default minimum size */ | |
1557 | ctx->max_val_type = NO_SIZE; | |
1558 | ctx->min_val_type = NO_SIZE; | |
1559 | fc->fs_private = ctx; | |
1560 | fc->ops = &hugetlbfs_fs_context_ops; | |
1561 | return 0; | |
1da177e4 LT |
1562 | } |
1563 | ||
1564 | static struct file_system_type hugetlbfs_fs_type = { | |
32021982 DH |
1565 | .name = "hugetlbfs", |
1566 | .init_fs_context = hugetlbfs_init_fs_context, | |
d7167b14 | 1567 | .parameters = hugetlb_fs_parameters, |
32021982 | 1568 | .kill_sb = kill_litter_super, |
91e78a1e | 1569 | .fs_flags = FS_ALLOW_IDMAP, |
1da177e4 LT |
1570 | }; |
1571 | ||
42d7395f | 1572 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
1da177e4 | 1573 | |
ef1ff6b8 | 1574 | static int can_do_hugetlb_shm(void) |
1da177e4 | 1575 | { |
a0eb3a05 EB |
1576 | kgid_t shm_group; |
1577 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); | |
1578 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); | |
1da177e4 LT |
1579 | } |
1580 | ||
42d7395f AK |
1581 | static int get_hstate_idx(int page_size_log) |
1582 | { | |
af73e4d9 | 1583 | struct hstate *h = hstate_sizelog(page_size_log); |
42d7395f | 1584 | |
42d7395f AK |
1585 | if (!h) |
1586 | return -1; | |
04adbc3f | 1587 | return hstate_index(h); |
42d7395f AK |
1588 | } |
1589 | ||
af73e4d9 NH |
1590 | /* |
1591 | * Note that size should be aligned to proper hugepage size in caller side, | |
1592 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | |
1593 | */ | |
1594 | struct file *hugetlb_file_setup(const char *name, size_t size, | |
83c1fd76 | 1595 | vm_flags_t acctflag, int creat_flags, |
1596 | int page_size_log) | |
1da177e4 | 1597 | { |
1da177e4 | 1598 | struct inode *inode; |
e68375c8 | 1599 | struct vfsmount *mnt; |
42d7395f | 1600 | int hstate_idx; |
e68375c8 | 1601 | struct file *file; |
42d7395f AK |
1602 | |
1603 | hstate_idx = get_hstate_idx(page_size_log); | |
1604 | if (hstate_idx < 0) | |
1605 | return ERR_PTR(-ENODEV); | |
1da177e4 | 1606 | |
e68375c8 AV |
1607 | mnt = hugetlbfs_vfsmount[hstate_idx]; |
1608 | if (!mnt) | |
5bc98594 AM |
1609 | return ERR_PTR(-ENOENT); |
1610 | ||
ef1ff6b8 | 1611 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
83c1fd76 | 1612 | struct ucounts *ucounts = current_ucounts(); |
1613 | ||
1614 | if (user_shm_lock(size, ucounts)) { | |
1615 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", | |
21a3c273 | 1616 | current->comm, current->pid); |
83c1fd76 | 1617 | user_shm_unlock(size, ucounts); |
353d5c30 | 1618 | } |
83c1fd76 | 1619 | return ERR_PTR(-EPERM); |
2584e517 | 1620 | } |
1da177e4 | 1621 | |
39b65252 | 1622 | file = ERR_PTR(-ENOSPC); |
91e78a1e GS |
1623 | /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */ |
1624 | inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL, | |
1625 | S_IFREG | S_IRWXUGO, 0); | |
1da177e4 | 1626 | if (!inode) |
e68375c8 | 1627 | goto out; |
e1832f29 SS |
1628 | if (creat_flags == HUGETLB_SHMFS_INODE) |
1629 | inode->i_flags |= S_PRIVATE; | |
1da177e4 | 1630 | |
1da177e4 | 1631 | inode->i_size = size; |
6d6b77f1 | 1632 | clear_nlink(inode); |
ce8d2cdf | 1633 | |
33b8f84a | 1634 | if (!hugetlb_reserve_pages(inode, 0, |
e68375c8 AV |
1635 | size >> huge_page_shift(hstate_inode(inode)), NULL, |
1636 | acctflag)) | |
1637 | file = ERR_PTR(-ENOMEM); | |
1638 | else | |
1639 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, | |
1640 | &hugetlbfs_file_operations); | |
1641 | if (!IS_ERR(file)) | |
1642 | return file; | |
1da177e4 | 1643 | |
b45b5bd6 | 1644 | iput(inode); |
e68375c8 | 1645 | out: |
39b65252 | 1646 | return file; |
1da177e4 LT |
1647 | } |
1648 | ||
32021982 DH |
1649 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) |
1650 | { | |
1651 | struct fs_context *fc; | |
1652 | struct vfsmount *mnt; | |
1653 | ||
1654 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); | |
1655 | if (IS_ERR(fc)) { | |
1656 | mnt = ERR_CAST(fc); | |
1657 | } else { | |
1658 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1659 | ctx->hstate = h; | |
1660 | mnt = fc_mount(fc); | |
1661 | put_fs_context(fc); | |
1662 | } | |
1663 | if (IS_ERR(mnt)) | |
a25fddce | 1664 | pr_err("Cannot mount internal hugetlbfs for page size %luK", |
d0036517 | 1665 | huge_page_size(h) / SZ_1K); |
32021982 DH |
1666 | return mnt; |
1667 | } | |
1668 | ||
1da177e4 LT |
1669 | static int __init init_hugetlbfs_fs(void) |
1670 | { | |
32021982 | 1671 | struct vfsmount *mnt; |
42d7395f | 1672 | struct hstate *h; |
1da177e4 | 1673 | int error; |
42d7395f | 1674 | int i; |
1da177e4 | 1675 | |
457c1b27 | 1676 | if (!hugepages_supported()) { |
9b857d26 | 1677 | pr_info("disabling because there are no supported hugepage sizes\n"); |
457c1b27 NA |
1678 | return -ENOTSUPP; |
1679 | } | |
1680 | ||
d1d5e05f | 1681 | error = -ENOMEM; |
1da177e4 LT |
1682 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
1683 | sizeof(struct hugetlbfs_inode_info), | |
5d097056 | 1684 | 0, SLAB_ACCOUNT, init_once); |
1da177e4 | 1685 | if (hugetlbfs_inode_cachep == NULL) |
8fc312b3 | 1686 | goto out; |
1da177e4 LT |
1687 | |
1688 | error = register_filesystem(&hugetlbfs_fs_type); | |
1689 | if (error) | |
8fc312b3 | 1690 | goto out_free; |
1da177e4 | 1691 | |
8fc312b3 | 1692 | /* default hstate mount is required */ |
3b2275a8 | 1693 | mnt = mount_one_hugetlbfs(&default_hstate); |
8fc312b3 MK |
1694 | if (IS_ERR(mnt)) { |
1695 | error = PTR_ERR(mnt); | |
1696 | goto out_unreg; | |
1697 | } | |
1698 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; | |
1699 | ||
1700 | /* other hstates are optional */ | |
42d7395f AK |
1701 | i = 0; |
1702 | for_each_hstate(h) { | |
15f0ec94 JS |
1703 | if (i == default_hstate_idx) { |
1704 | i++; | |
8fc312b3 | 1705 | continue; |
15f0ec94 | 1706 | } |
8fc312b3 | 1707 | |
32021982 | 1708 | mnt = mount_one_hugetlbfs(h); |
8fc312b3 MK |
1709 | if (IS_ERR(mnt)) |
1710 | hugetlbfs_vfsmount[i] = NULL; | |
1711 | else | |
1712 | hugetlbfs_vfsmount[i] = mnt; | |
42d7395f AK |
1713 | i++; |
1714 | } | |
32021982 DH |
1715 | |
1716 | return 0; | |
1da177e4 | 1717 | |
8fc312b3 MK |
1718 | out_unreg: |
1719 | (void)unregister_filesystem(&hugetlbfs_fs_type); | |
1720 | out_free: | |
d1d5e05f | 1721 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
8fc312b3 | 1722 | out: |
1da177e4 LT |
1723 | return error; |
1724 | } | |
3e89e1c5 | 1725 | fs_initcall(init_hugetlbfs_fs) |