]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * hugetlbpage-backed filesystem. Based on ramfs. | |
3 | * | |
4 | * Nadia Yvette Chambers, 2002 | |
5 | * | |
6 | * Copyright (C) 2002 Linus Torvalds. | |
7 | * License: GPL | |
8 | */ | |
9 | ||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
11 | ||
12 | #include <linux/thread_info.h> | |
13 | #include <asm/current.h> | |
14 | #include <linux/falloc.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/mount.h> | |
17 | #include <linux/file.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/writeback.h> | |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/capability.h> | |
25 | #include <linux/ctype.h> | |
26 | #include <linux/backing-dev.h> | |
27 | #include <linux/hugetlb.h> | |
28 | #include <linux/pagevec.h> | |
29 | #include <linux/fs_parser.h> | |
30 | #include <linux/mman.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/dnotify.h> | |
33 | #include <linux/statfs.h> | |
34 | #include <linux/security.h> | |
35 | #include <linux/magic.h> | |
36 | #include <linux/migrate.h> | |
37 | #include <linux/uio.h> | |
38 | ||
39 | #include <linux/uaccess.h> | |
40 | #include <linux/sched/mm.h> | |
41 | ||
42 | #define CREATE_TRACE_POINTS | |
43 | #include <trace/events/hugetlbfs.h> | |
44 | ||
45 | static const struct address_space_operations hugetlbfs_aops; | |
46 | static const struct file_operations hugetlbfs_file_operations; | |
47 | static const struct inode_operations hugetlbfs_dir_inode_operations; | |
48 | static const struct inode_operations hugetlbfs_inode_operations; | |
49 | ||
50 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; | |
51 | ||
52 | struct hugetlbfs_fs_context { | |
53 | struct hstate *hstate; | |
54 | unsigned long long max_size_opt; | |
55 | unsigned long long min_size_opt; | |
56 | long max_hpages; | |
57 | long nr_inodes; | |
58 | long min_hpages; | |
59 | enum hugetlbfs_size_type max_val_type; | |
60 | enum hugetlbfs_size_type min_val_type; | |
61 | kuid_t uid; | |
62 | kgid_t gid; | |
63 | umode_t mode; | |
64 | }; | |
65 | ||
66 | int sysctl_hugetlb_shm_group; | |
67 | ||
68 | enum hugetlb_param { | |
69 | Opt_gid, | |
70 | Opt_min_size, | |
71 | Opt_mode, | |
72 | Opt_nr_inodes, | |
73 | Opt_pagesize, | |
74 | Opt_size, | |
75 | Opt_uid, | |
76 | }; | |
77 | ||
78 | static const struct fs_parameter_spec hugetlb_fs_parameters[] = { | |
79 | fsparam_gid ("gid", Opt_gid), | |
80 | fsparam_string("min_size", Opt_min_size), | |
81 | fsparam_u32oct("mode", Opt_mode), | |
82 | fsparam_string("nr_inodes", Opt_nr_inodes), | |
83 | fsparam_string("pagesize", Opt_pagesize), | |
84 | fsparam_string("size", Opt_size), | |
85 | fsparam_uid ("uid", Opt_uid), | |
86 | {} | |
87 | }; | |
88 | ||
89 | /* | |
90 | * Mask used when checking the page offset value passed in via system | |
91 | * calls. This value will be converted to a loff_t which is signed. | |
92 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | |
93 | * value. The extra bit (- 1 in the shift value) is to take the sign | |
94 | * bit into account. | |
95 | */ | |
96 | #define PGOFF_LOFFT_MAX \ | |
97 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | |
98 | ||
99 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |
100 | { | |
101 | struct inode *inode = file_inode(file); | |
102 | loff_t len, vma_len; | |
103 | int ret; | |
104 | struct hstate *h = hstate_file(file); | |
105 | vm_flags_t vm_flags; | |
106 | ||
107 | /* | |
108 | * vma address alignment (but not the pgoff alignment) has | |
109 | * already been checked by prepare_hugepage_range. If you add | |
110 | * any error returns here, do so after setting VM_HUGETLB, so | |
111 | * is_vm_hugetlb_page tests below unmap_region go the right | |
112 | * way when do_mmap unwinds (may be important on powerpc | |
113 | * and ia64). | |
114 | */ | |
115 | vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); | |
116 | vma->vm_ops = &hugetlb_vm_ops; | |
117 | ||
118 | /* | |
119 | * page based offset in vm_pgoff could be sufficiently large to | |
120 | * overflow a loff_t when converted to byte offset. This can | |
121 | * only happen on architectures where sizeof(loff_t) == | |
122 | * sizeof(unsigned long). So, only check in those instances. | |
123 | */ | |
124 | if (sizeof(unsigned long) == sizeof(loff_t)) { | |
125 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) | |
126 | return -EINVAL; | |
127 | } | |
128 | ||
129 | /* must be huge page aligned */ | |
130 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) | |
131 | return -EINVAL; | |
132 | ||
133 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); | |
134 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | |
135 | /* check for overflow */ | |
136 | if (len < vma_len) | |
137 | return -EINVAL; | |
138 | ||
139 | inode_lock(inode); | |
140 | file_accessed(file); | |
141 | ||
142 | ret = -ENOMEM; | |
143 | ||
144 | vm_flags = vma->vm_flags; | |
145 | /* | |
146 | * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip | |
147 | * reserving here. Note: only for SHM hugetlbfs file, the inode | |
148 | * flag S_PRIVATE is set. | |
149 | */ | |
150 | if (inode->i_flags & S_PRIVATE) | |
151 | vm_flags |= VM_NORESERVE; | |
152 | ||
153 | if (!hugetlb_reserve_pages(inode, | |
154 | vma->vm_pgoff >> huge_page_order(h), | |
155 | len >> huge_page_shift(h), vma, | |
156 | vm_flags)) | |
157 | goto out; | |
158 | ||
159 | ret = 0; | |
160 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) | |
161 | i_size_write(inode, len); | |
162 | out: | |
163 | inode_unlock(inode); | |
164 | ||
165 | return ret; | |
166 | } | |
167 | ||
168 | /* | |
169 | * Called under mmap_write_lock(mm). | |
170 | */ | |
171 | ||
172 | unsigned long | |
173 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
174 | unsigned long len, unsigned long pgoff, | |
175 | unsigned long flags) | |
176 | { | |
177 | unsigned long addr0 = 0; | |
178 | struct hstate *h = hstate_file(file); | |
179 | ||
180 | if (len & ~huge_page_mask(h)) | |
181 | return -EINVAL; | |
182 | if (flags & MAP_FIXED) { | |
183 | if (addr & ~huge_page_mask(h)) | |
184 | return -EINVAL; | |
185 | if (prepare_hugepage_range(file, addr, len)) | |
186 | return -EINVAL; | |
187 | } | |
188 | if (addr) | |
189 | addr0 = ALIGN(addr, huge_page_size(h)); | |
190 | ||
191 | return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff, | |
192 | flags, 0); | |
193 | } | |
194 | ||
195 | /* | |
196 | * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset. | |
197 | * Returns the maximum number of bytes one can read without touching the 1st raw | |
198 | * HWPOISON page. | |
199 | * | |
200 | * The implementation borrows the iteration logic from copy_page_to_iter*. | |
201 | */ | |
202 | static size_t adjust_range_hwpoison(struct folio *folio, size_t offset, | |
203 | size_t bytes) | |
204 | { | |
205 | struct page *page; | |
206 | size_t n = 0; | |
207 | size_t res = 0; | |
208 | ||
209 | /* First page to start the loop. */ | |
210 | page = folio_page(folio, offset / PAGE_SIZE); | |
211 | offset %= PAGE_SIZE; | |
212 | while (1) { | |
213 | if (is_raw_hwpoison_page_in_hugepage(page)) | |
214 | break; | |
215 | ||
216 | /* Safe to read n bytes without touching HWPOISON subpage. */ | |
217 | n = min(bytes, (size_t)PAGE_SIZE - offset); | |
218 | res += n; | |
219 | bytes -= n; | |
220 | if (!bytes || !n) | |
221 | break; | |
222 | offset += n; | |
223 | if (offset == PAGE_SIZE) { | |
224 | page = nth_page(page, 1); | |
225 | offset = 0; | |
226 | } | |
227 | } | |
228 | ||
229 | return res; | |
230 | } | |
231 | ||
232 | /* | |
233 | * Support for read() - Find the page attached to f_mapping and copy out the | |
234 | * data. This provides functionality similar to filemap_read(). | |
235 | */ | |
236 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) | |
237 | { | |
238 | struct file *file = iocb->ki_filp; | |
239 | struct hstate *h = hstate_file(file); | |
240 | struct address_space *mapping = file->f_mapping; | |
241 | struct inode *inode = mapping->host; | |
242 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); | |
243 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); | |
244 | unsigned long end_index; | |
245 | loff_t isize; | |
246 | ssize_t retval = 0; | |
247 | ||
248 | while (iov_iter_count(to)) { | |
249 | struct folio *folio; | |
250 | size_t nr, copied, want; | |
251 | ||
252 | /* nr is the maximum number of bytes to copy from this page */ | |
253 | nr = huge_page_size(h); | |
254 | isize = i_size_read(inode); | |
255 | if (!isize) | |
256 | break; | |
257 | end_index = (isize - 1) >> huge_page_shift(h); | |
258 | if (index > end_index) | |
259 | break; | |
260 | if (index == end_index) { | |
261 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; | |
262 | if (nr <= offset) | |
263 | break; | |
264 | } | |
265 | nr = nr - offset; | |
266 | ||
267 | /* Find the folio */ | |
268 | folio = filemap_lock_hugetlb_folio(h, mapping, index); | |
269 | if (IS_ERR(folio)) { | |
270 | /* | |
271 | * We have a HOLE, zero out the user-buffer for the | |
272 | * length of the hole or request. | |
273 | */ | |
274 | copied = iov_iter_zero(nr, to); | |
275 | } else { | |
276 | folio_unlock(folio); | |
277 | ||
278 | if (!folio_test_hwpoison(folio)) | |
279 | want = nr; | |
280 | else { | |
281 | /* | |
282 | * Adjust how many bytes safe to read without | |
283 | * touching the 1st raw HWPOISON page after | |
284 | * offset. | |
285 | */ | |
286 | want = adjust_range_hwpoison(folio, offset, nr); | |
287 | if (want == 0) { | |
288 | folio_put(folio); | |
289 | retval = -EIO; | |
290 | break; | |
291 | } | |
292 | } | |
293 | ||
294 | /* | |
295 | * We have the folio, copy it to user space buffer. | |
296 | */ | |
297 | copied = copy_folio_to_iter(folio, offset, want, to); | |
298 | folio_put(folio); | |
299 | } | |
300 | offset += copied; | |
301 | retval += copied; | |
302 | if (copied != nr && iov_iter_count(to)) { | |
303 | if (!retval) | |
304 | retval = -EFAULT; | |
305 | break; | |
306 | } | |
307 | index += offset >> huge_page_shift(h); | |
308 | offset &= ~huge_page_mask(h); | |
309 | } | |
310 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; | |
311 | return retval; | |
312 | } | |
313 | ||
314 | static int hugetlbfs_write_begin(struct file *file, | |
315 | struct address_space *mapping, | |
316 | loff_t pos, unsigned len, | |
317 | struct folio **foliop, void **fsdata) | |
318 | { | |
319 | return -EINVAL; | |
320 | } | |
321 | ||
322 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, | |
323 | loff_t pos, unsigned len, unsigned copied, | |
324 | struct folio *folio, void *fsdata) | |
325 | { | |
326 | BUG(); | |
327 | return -EINVAL; | |
328 | } | |
329 | ||
330 | static void hugetlb_delete_from_page_cache(struct folio *folio) | |
331 | { | |
332 | folio_clear_dirty(folio); | |
333 | folio_clear_uptodate(folio); | |
334 | filemap_remove_folio(folio); | |
335 | } | |
336 | ||
337 | /* | |
338 | * Called with i_mmap_rwsem held for inode based vma maps. This makes | |
339 | * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault | |
340 | * mutex for the page in the mapping. So, we can not race with page being | |
341 | * faulted into the vma. | |
342 | */ | |
343 | static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, | |
344 | unsigned long addr, unsigned long pfn) | |
345 | { | |
346 | pte_t *ptep, pte; | |
347 | ||
348 | ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); | |
349 | if (!ptep) | |
350 | return false; | |
351 | ||
352 | pte = huge_ptep_get(vma->vm_mm, addr, ptep); | |
353 | if (huge_pte_none(pte) || !pte_present(pte)) | |
354 | return false; | |
355 | ||
356 | if (pte_pfn(pte) == pfn) | |
357 | return true; | |
358 | ||
359 | return false; | |
360 | } | |
361 | ||
362 | /* | |
363 | * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? | |
364 | * No, because the interval tree returns us only those vmas | |
365 | * which overlap the truncated area starting at pgoff, | |
366 | * and no vma on a 32-bit arch can span beyond the 4GB. | |
367 | */ | |
368 | static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) | |
369 | { | |
370 | unsigned long offset = 0; | |
371 | ||
372 | if (vma->vm_pgoff < start) | |
373 | offset = (start - vma->vm_pgoff) << PAGE_SHIFT; | |
374 | ||
375 | return vma->vm_start + offset; | |
376 | } | |
377 | ||
378 | static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) | |
379 | { | |
380 | unsigned long t_end; | |
381 | ||
382 | if (!end) | |
383 | return vma->vm_end; | |
384 | ||
385 | t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; | |
386 | if (t_end > vma->vm_end) | |
387 | t_end = vma->vm_end; | |
388 | return t_end; | |
389 | } | |
390 | ||
391 | /* | |
392 | * Called with hugetlb fault mutex held. Therefore, no more mappings to | |
393 | * this folio can be created while executing the routine. | |
394 | */ | |
395 | static void hugetlb_unmap_file_folio(struct hstate *h, | |
396 | struct address_space *mapping, | |
397 | struct folio *folio, pgoff_t index) | |
398 | { | |
399 | struct rb_root_cached *root = &mapping->i_mmap; | |
400 | struct hugetlb_vma_lock *vma_lock; | |
401 | unsigned long pfn = folio_pfn(folio); | |
402 | struct vm_area_struct *vma; | |
403 | unsigned long v_start; | |
404 | unsigned long v_end; | |
405 | pgoff_t start, end; | |
406 | ||
407 | start = index * pages_per_huge_page(h); | |
408 | end = (index + 1) * pages_per_huge_page(h); | |
409 | ||
410 | i_mmap_lock_write(mapping); | |
411 | retry: | |
412 | vma_lock = NULL; | |
413 | vma_interval_tree_foreach(vma, root, start, end - 1) { | |
414 | v_start = vma_offset_start(vma, start); | |
415 | v_end = vma_offset_end(vma, end); | |
416 | ||
417 | if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) | |
418 | continue; | |
419 | ||
420 | if (!hugetlb_vma_trylock_write(vma)) { | |
421 | vma_lock = vma->vm_private_data; | |
422 | /* | |
423 | * If we can not get vma lock, we need to drop | |
424 | * immap_sema and take locks in order. First, | |
425 | * take a ref on the vma_lock structure so that | |
426 | * we can be guaranteed it will not go away when | |
427 | * dropping immap_sema. | |
428 | */ | |
429 | kref_get(&vma_lock->refs); | |
430 | break; | |
431 | } | |
432 | ||
433 | unmap_hugepage_range(vma, v_start, v_end, NULL, | |
434 | ZAP_FLAG_DROP_MARKER); | |
435 | hugetlb_vma_unlock_write(vma); | |
436 | } | |
437 | ||
438 | i_mmap_unlock_write(mapping); | |
439 | ||
440 | if (vma_lock) { | |
441 | /* | |
442 | * Wait on vma_lock. We know it is still valid as we have | |
443 | * a reference. We must 'open code' vma locking as we do | |
444 | * not know if vma_lock is still attached to vma. | |
445 | */ | |
446 | down_write(&vma_lock->rw_sema); | |
447 | i_mmap_lock_write(mapping); | |
448 | ||
449 | vma = vma_lock->vma; | |
450 | if (!vma) { | |
451 | /* | |
452 | * If lock is no longer attached to vma, then just | |
453 | * unlock, drop our reference and retry looking for | |
454 | * other vmas. | |
455 | */ | |
456 | up_write(&vma_lock->rw_sema); | |
457 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
458 | goto retry; | |
459 | } | |
460 | ||
461 | /* | |
462 | * vma_lock is still attached to vma. Check to see if vma | |
463 | * still maps page and if so, unmap. | |
464 | */ | |
465 | v_start = vma_offset_start(vma, start); | |
466 | v_end = vma_offset_end(vma, end); | |
467 | if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) | |
468 | unmap_hugepage_range(vma, v_start, v_end, NULL, | |
469 | ZAP_FLAG_DROP_MARKER); | |
470 | ||
471 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
472 | hugetlb_vma_unlock_write(vma); | |
473 | ||
474 | goto retry; | |
475 | } | |
476 | } | |
477 | ||
478 | static void | |
479 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, | |
480 | zap_flags_t zap_flags) | |
481 | { | |
482 | struct vm_area_struct *vma; | |
483 | ||
484 | /* | |
485 | * end == 0 indicates that the entire range after start should be | |
486 | * unmapped. Note, end is exclusive, whereas the interval tree takes | |
487 | * an inclusive "last". | |
488 | */ | |
489 | vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { | |
490 | unsigned long v_start; | |
491 | unsigned long v_end; | |
492 | ||
493 | if (!hugetlb_vma_trylock_write(vma)) | |
494 | continue; | |
495 | ||
496 | v_start = vma_offset_start(vma, start); | |
497 | v_end = vma_offset_end(vma, end); | |
498 | ||
499 | unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); | |
500 | ||
501 | /* | |
502 | * Note that vma lock only exists for shared/non-private | |
503 | * vmas. Therefore, lock is not held when calling | |
504 | * unmap_hugepage_range for private vmas. | |
505 | */ | |
506 | hugetlb_vma_unlock_write(vma); | |
507 | } | |
508 | } | |
509 | ||
510 | /* | |
511 | * Called with hugetlb fault mutex held. | |
512 | * Returns true if page was actually removed, false otherwise. | |
513 | */ | |
514 | static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, | |
515 | struct address_space *mapping, | |
516 | struct folio *folio, pgoff_t index, | |
517 | bool truncate_op) | |
518 | { | |
519 | bool ret = false; | |
520 | ||
521 | /* | |
522 | * If folio is mapped, it was faulted in after being | |
523 | * unmapped in caller. Unmap (again) while holding | |
524 | * the fault mutex. The mutex will prevent faults | |
525 | * until we finish removing the folio. | |
526 | */ | |
527 | if (unlikely(folio_mapped(folio))) | |
528 | hugetlb_unmap_file_folio(h, mapping, folio, index); | |
529 | ||
530 | folio_lock(folio); | |
531 | /* | |
532 | * We must remove the folio from page cache before removing | |
533 | * the region/ reserve map (hugetlb_unreserve_pages). In | |
534 | * rare out of memory conditions, removal of the region/reserve | |
535 | * map could fail. Correspondingly, the subpool and global | |
536 | * reserve usage count can need to be adjusted. | |
537 | */ | |
538 | VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); | |
539 | hugetlb_delete_from_page_cache(folio); | |
540 | ret = true; | |
541 | if (!truncate_op) { | |
542 | if (unlikely(hugetlb_unreserve_pages(inode, index, | |
543 | index + 1, 1))) | |
544 | hugetlb_fix_reserve_counts(inode); | |
545 | } | |
546 | ||
547 | folio_unlock(folio); | |
548 | return ret; | |
549 | } | |
550 | ||
551 | /* | |
552 | * remove_inode_hugepages handles two distinct cases: truncation and hole | |
553 | * punch. There are subtle differences in operation for each case. | |
554 | * | |
555 | * truncation is indicated by end of range being LLONG_MAX | |
556 | * In this case, we first scan the range and release found pages. | |
557 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve | |
558 | * maps and global counts. Page faults can race with truncation. | |
559 | * During faults, hugetlb_no_page() checks i_size before page allocation, | |
560 | * and again after obtaining page table lock. It will 'back out' | |
561 | * allocations in the truncated range. | |
562 | * hole punch is indicated if end is not LLONG_MAX | |
563 | * In the hole punch case we scan the range and release found pages. | |
564 | * Only when releasing a page is the associated region/reserve map | |
565 | * deleted. The region/reserve map for ranges without associated | |
566 | * pages are not modified. Page faults can race with hole punch. | |
567 | * This is indicated if we find a mapped page. | |
568 | * Note: If the passed end of range value is beyond the end of file, but | |
569 | * not LLONG_MAX this routine still performs a hole punch operation. | |
570 | */ | |
571 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |
572 | loff_t lend) | |
573 | { | |
574 | struct hstate *h = hstate_inode(inode); | |
575 | struct address_space *mapping = &inode->i_data; | |
576 | const pgoff_t end = lend >> PAGE_SHIFT; | |
577 | struct folio_batch fbatch; | |
578 | pgoff_t next, index; | |
579 | int i, freed = 0; | |
580 | bool truncate_op = (lend == LLONG_MAX); | |
581 | ||
582 | folio_batch_init(&fbatch); | |
583 | next = lstart >> PAGE_SHIFT; | |
584 | while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { | |
585 | for (i = 0; i < folio_batch_count(&fbatch); ++i) { | |
586 | struct folio *folio = fbatch.folios[i]; | |
587 | u32 hash = 0; | |
588 | ||
589 | index = folio->index >> huge_page_order(h); | |
590 | hash = hugetlb_fault_mutex_hash(mapping, index); | |
591 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
592 | ||
593 | /* | |
594 | * Remove folio that was part of folio_batch. | |
595 | */ | |
596 | if (remove_inode_single_folio(h, inode, mapping, folio, | |
597 | index, truncate_op)) | |
598 | freed++; | |
599 | ||
600 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
601 | } | |
602 | folio_batch_release(&fbatch); | |
603 | cond_resched(); | |
604 | } | |
605 | ||
606 | if (truncate_op) | |
607 | (void)hugetlb_unreserve_pages(inode, | |
608 | lstart >> huge_page_shift(h), | |
609 | LONG_MAX, freed); | |
610 | } | |
611 | ||
612 | static void hugetlbfs_evict_inode(struct inode *inode) | |
613 | { | |
614 | struct resv_map *resv_map; | |
615 | ||
616 | trace_hugetlbfs_evict_inode(inode); | |
617 | remove_inode_hugepages(inode, 0, LLONG_MAX); | |
618 | ||
619 | /* | |
620 | * Get the resv_map from the address space embedded in the inode. | |
621 | * This is the address space which points to any resv_map allocated | |
622 | * at inode creation time. If this is a device special inode, | |
623 | * i_mapping may not point to the original address space. | |
624 | */ | |
625 | resv_map = (struct resv_map *)(&inode->i_data)->i_private_data; | |
626 | /* Only regular and link inodes have associated reserve maps */ | |
627 | if (resv_map) | |
628 | resv_map_release(&resv_map->refs); | |
629 | clear_inode(inode); | |
630 | } | |
631 | ||
632 | static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) | |
633 | { | |
634 | pgoff_t pgoff; | |
635 | struct address_space *mapping = inode->i_mapping; | |
636 | struct hstate *h = hstate_inode(inode); | |
637 | ||
638 | BUG_ON(offset & ~huge_page_mask(h)); | |
639 | pgoff = offset >> PAGE_SHIFT; | |
640 | ||
641 | i_size_write(inode, offset); | |
642 | i_mmap_lock_write(mapping); | |
643 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) | |
644 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, | |
645 | ZAP_FLAG_DROP_MARKER); | |
646 | i_mmap_unlock_write(mapping); | |
647 | remove_inode_hugepages(inode, offset, LLONG_MAX); | |
648 | } | |
649 | ||
650 | static void hugetlbfs_zero_partial_page(struct hstate *h, | |
651 | struct address_space *mapping, | |
652 | loff_t start, | |
653 | loff_t end) | |
654 | { | |
655 | pgoff_t idx = start >> huge_page_shift(h); | |
656 | struct folio *folio; | |
657 | ||
658 | folio = filemap_lock_hugetlb_folio(h, mapping, idx); | |
659 | if (IS_ERR(folio)) | |
660 | return; | |
661 | ||
662 | start = start & ~huge_page_mask(h); | |
663 | end = end & ~huge_page_mask(h); | |
664 | if (!end) | |
665 | end = huge_page_size(h); | |
666 | ||
667 | folio_zero_segment(folio, (size_t)start, (size_t)end); | |
668 | ||
669 | folio_unlock(folio); | |
670 | folio_put(folio); | |
671 | } | |
672 | ||
673 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |
674 | { | |
675 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
676 | struct address_space *mapping = inode->i_mapping; | |
677 | struct hstate *h = hstate_inode(inode); | |
678 | loff_t hpage_size = huge_page_size(h); | |
679 | loff_t hole_start, hole_end; | |
680 | ||
681 | /* | |
682 | * hole_start and hole_end indicate the full pages within the hole. | |
683 | */ | |
684 | hole_start = round_up(offset, hpage_size); | |
685 | hole_end = round_down(offset + len, hpage_size); | |
686 | ||
687 | inode_lock(inode); | |
688 | ||
689 | /* protected by i_rwsem */ | |
690 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { | |
691 | inode_unlock(inode); | |
692 | return -EPERM; | |
693 | } | |
694 | ||
695 | i_mmap_lock_write(mapping); | |
696 | ||
697 | /* If range starts before first full page, zero partial page. */ | |
698 | if (offset < hole_start) | |
699 | hugetlbfs_zero_partial_page(h, mapping, | |
700 | offset, min(offset + len, hole_start)); | |
701 | ||
702 | /* Unmap users of full pages in the hole. */ | |
703 | if (hole_end > hole_start) { | |
704 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) | |
705 | hugetlb_vmdelete_list(&mapping->i_mmap, | |
706 | hole_start >> PAGE_SHIFT, | |
707 | hole_end >> PAGE_SHIFT, 0); | |
708 | } | |
709 | ||
710 | /* If range extends beyond last full page, zero partial page. */ | |
711 | if ((offset + len) > hole_end && (offset + len) > hole_start) | |
712 | hugetlbfs_zero_partial_page(h, mapping, | |
713 | hole_end, offset + len); | |
714 | ||
715 | i_mmap_unlock_write(mapping); | |
716 | ||
717 | /* Remove full pages from the file. */ | |
718 | if (hole_end > hole_start) | |
719 | remove_inode_hugepages(inode, hole_start, hole_end); | |
720 | ||
721 | inode_unlock(inode); | |
722 | ||
723 | return 0; | |
724 | } | |
725 | ||
726 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |
727 | loff_t len) | |
728 | { | |
729 | struct inode *inode = file_inode(file); | |
730 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
731 | struct address_space *mapping = inode->i_mapping; | |
732 | struct hstate *h = hstate_inode(inode); | |
733 | struct vm_area_struct pseudo_vma; | |
734 | struct mm_struct *mm = current->mm; | |
735 | loff_t hpage_size = huge_page_size(h); | |
736 | unsigned long hpage_shift = huge_page_shift(h); | |
737 | pgoff_t start, index, end; | |
738 | int error; | |
739 | u32 hash; | |
740 | ||
741 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
742 | return -EOPNOTSUPP; | |
743 | ||
744 | if (mode & FALLOC_FL_PUNCH_HOLE) { | |
745 | error = hugetlbfs_punch_hole(inode, offset, len); | |
746 | goto out_nolock; | |
747 | } | |
748 | ||
749 | /* | |
750 | * Default preallocate case. | |
751 | * For this range, start is rounded down and end is rounded up | |
752 | * as well as being converted to page offsets. | |
753 | */ | |
754 | start = offset >> hpage_shift; | |
755 | end = (offset + len + hpage_size - 1) >> hpage_shift; | |
756 | ||
757 | inode_lock(inode); | |
758 | ||
759 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ | |
760 | error = inode_newsize_ok(inode, offset + len); | |
761 | if (error) | |
762 | goto out; | |
763 | ||
764 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { | |
765 | error = -EPERM; | |
766 | goto out; | |
767 | } | |
768 | ||
769 | /* | |
770 | * Initialize a pseudo vma as this is required by the huge page | |
771 | * allocation routines. | |
772 | */ | |
773 | vma_init(&pseudo_vma, mm); | |
774 | vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); | |
775 | pseudo_vma.vm_file = file; | |
776 | ||
777 | for (index = start; index < end; index++) { | |
778 | /* | |
779 | * This is supposed to be the vaddr where the page is being | |
780 | * faulted in, but we have no vaddr here. | |
781 | */ | |
782 | struct folio *folio; | |
783 | unsigned long addr; | |
784 | ||
785 | cond_resched(); | |
786 | ||
787 | /* | |
788 | * fallocate(2) manpage permits EINTR; we may have been | |
789 | * interrupted because we are using up too much memory. | |
790 | */ | |
791 | if (signal_pending(current)) { | |
792 | error = -EINTR; | |
793 | break; | |
794 | } | |
795 | ||
796 | /* addr is the offset within the file (zero based) */ | |
797 | addr = index * hpage_size; | |
798 | ||
799 | /* mutex taken here, fault path and hole punch */ | |
800 | hash = hugetlb_fault_mutex_hash(mapping, index); | |
801 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
802 | ||
803 | /* See if already present in mapping to avoid alloc/free */ | |
804 | folio = filemap_get_folio(mapping, index << huge_page_order(h)); | |
805 | if (!IS_ERR(folio)) { | |
806 | folio_put(folio); | |
807 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
808 | continue; | |
809 | } | |
810 | ||
811 | /* | |
812 | * Allocate folio without setting the avoid_reserve argument. | |
813 | * There certainly are no reserves associated with the | |
814 | * pseudo_vma. However, there could be shared mappings with | |
815 | * reserves for the file at the inode level. If we fallocate | |
816 | * folios in these areas, we need to consume the reserves | |
817 | * to keep reservation accounting consistent. | |
818 | */ | |
819 | folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); | |
820 | if (IS_ERR(folio)) { | |
821 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
822 | error = PTR_ERR(folio); | |
823 | goto out; | |
824 | } | |
825 | folio_zero_user(folio, addr); | |
826 | __folio_mark_uptodate(folio); | |
827 | error = hugetlb_add_to_page_cache(folio, mapping, index); | |
828 | if (unlikely(error)) { | |
829 | restore_reserve_on_error(h, &pseudo_vma, addr, folio); | |
830 | folio_put(folio); | |
831 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
832 | goto out; | |
833 | } | |
834 | ||
835 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
836 | ||
837 | folio_set_hugetlb_migratable(folio); | |
838 | /* | |
839 | * folio_unlock because locked by hugetlb_add_to_page_cache() | |
840 | * folio_put() due to reference from alloc_hugetlb_folio() | |
841 | */ | |
842 | folio_unlock(folio); | |
843 | folio_put(folio); | |
844 | } | |
845 | ||
846 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | |
847 | i_size_write(inode, offset + len); | |
848 | inode_set_ctime_current(inode); | |
849 | out: | |
850 | inode_unlock(inode); | |
851 | ||
852 | out_nolock: | |
853 | trace_hugetlbfs_fallocate(inode, mode, offset, len, error); | |
854 | return error; | |
855 | } | |
856 | ||
857 | static int hugetlbfs_setattr(struct mnt_idmap *idmap, | |
858 | struct dentry *dentry, struct iattr *attr) | |
859 | { | |
860 | struct inode *inode = d_inode(dentry); | |
861 | struct hstate *h = hstate_inode(inode); | |
862 | int error; | |
863 | unsigned int ia_valid = attr->ia_valid; | |
864 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
865 | ||
866 | error = setattr_prepare(idmap, dentry, attr); | |
867 | if (error) | |
868 | return error; | |
869 | ||
870 | trace_hugetlbfs_setattr(inode, dentry, attr); | |
871 | ||
872 | if (ia_valid & ATTR_SIZE) { | |
873 | loff_t oldsize = inode->i_size; | |
874 | loff_t newsize = attr->ia_size; | |
875 | ||
876 | if (newsize & ~huge_page_mask(h)) | |
877 | return -EINVAL; | |
878 | /* protected by i_rwsem */ | |
879 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || | |
880 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) | |
881 | return -EPERM; | |
882 | hugetlb_vmtruncate(inode, newsize); | |
883 | } | |
884 | ||
885 | setattr_copy(idmap, inode, attr); | |
886 | mark_inode_dirty(inode); | |
887 | return 0; | |
888 | } | |
889 | ||
890 | static struct inode *hugetlbfs_get_root(struct super_block *sb, | |
891 | struct hugetlbfs_fs_context *ctx) | |
892 | { | |
893 | struct inode *inode; | |
894 | ||
895 | inode = new_inode(sb); | |
896 | if (inode) { | |
897 | inode->i_ino = get_next_ino(); | |
898 | inode->i_mode = S_IFDIR | ctx->mode; | |
899 | inode->i_uid = ctx->uid; | |
900 | inode->i_gid = ctx->gid; | |
901 | simple_inode_init_ts(inode); | |
902 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
903 | inode->i_fop = &simple_dir_operations; | |
904 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
905 | inc_nlink(inode); | |
906 | lockdep_annotate_inode_mutex_key(inode); | |
907 | } | |
908 | return inode; | |
909 | } | |
910 | ||
911 | /* | |
912 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never | |
913 | * be taken from reclaim -- unlike regular filesystems. This needs an | |
914 | * annotation because huge_pmd_share() does an allocation under hugetlb's | |
915 | * i_mmap_rwsem. | |
916 | */ | |
917 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; | |
918 | ||
919 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |
920 | struct mnt_idmap *idmap, | |
921 | struct inode *dir, | |
922 | umode_t mode, dev_t dev) | |
923 | { | |
924 | struct inode *inode; | |
925 | struct resv_map *resv_map = NULL; | |
926 | ||
927 | /* | |
928 | * Reserve maps are only needed for inodes that can have associated | |
929 | * page allocations. | |
930 | */ | |
931 | if (S_ISREG(mode) || S_ISLNK(mode)) { | |
932 | resv_map = resv_map_alloc(); | |
933 | if (!resv_map) | |
934 | return NULL; | |
935 | } | |
936 | ||
937 | inode = new_inode(sb); | |
938 | if (inode) { | |
939 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
940 | ||
941 | inode->i_ino = get_next_ino(); | |
942 | inode_init_owner(idmap, inode, dir, mode); | |
943 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, | |
944 | &hugetlbfs_i_mmap_rwsem_key); | |
945 | inode->i_mapping->a_ops = &hugetlbfs_aops; | |
946 | simple_inode_init_ts(inode); | |
947 | inode->i_mapping->i_private_data = resv_map; | |
948 | info->seals = F_SEAL_SEAL; | |
949 | switch (mode & S_IFMT) { | |
950 | default: | |
951 | init_special_inode(inode, mode, dev); | |
952 | break; | |
953 | case S_IFREG: | |
954 | inode->i_op = &hugetlbfs_inode_operations; | |
955 | inode->i_fop = &hugetlbfs_file_operations; | |
956 | break; | |
957 | case S_IFDIR: | |
958 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
959 | inode->i_fop = &simple_dir_operations; | |
960 | ||
961 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
962 | inc_nlink(inode); | |
963 | break; | |
964 | case S_IFLNK: | |
965 | inode->i_op = &page_symlink_inode_operations; | |
966 | inode_nohighmem(inode); | |
967 | break; | |
968 | } | |
969 | lockdep_annotate_inode_mutex_key(inode); | |
970 | trace_hugetlbfs_alloc_inode(inode, dir, mode); | |
971 | } else { | |
972 | if (resv_map) | |
973 | kref_put(&resv_map->refs, resv_map_release); | |
974 | } | |
975 | ||
976 | return inode; | |
977 | } | |
978 | ||
979 | /* | |
980 | * File creation. Allocate an inode, and we're done.. | |
981 | */ | |
982 | static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, | |
983 | struct dentry *dentry, umode_t mode, dev_t dev) | |
984 | { | |
985 | struct inode *inode; | |
986 | ||
987 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev); | |
988 | if (!inode) | |
989 | return -ENOSPC; | |
990 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); | |
991 | d_instantiate(dentry, inode); | |
992 | dget(dentry);/* Extra count - pin the dentry in core */ | |
993 | return 0; | |
994 | } | |
995 | ||
996 | static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, | |
997 | struct dentry *dentry, umode_t mode) | |
998 | { | |
999 | int retval = hugetlbfs_mknod(idmap, dir, dentry, | |
1000 | mode | S_IFDIR, 0); | |
1001 | if (!retval) | |
1002 | inc_nlink(dir); | |
1003 | return ERR_PTR(retval); | |
1004 | } | |
1005 | ||
1006 | static int hugetlbfs_create(struct mnt_idmap *idmap, | |
1007 | struct inode *dir, struct dentry *dentry, | |
1008 | umode_t mode, bool excl) | |
1009 | { | |
1010 | return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0); | |
1011 | } | |
1012 | ||
1013 | static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, | |
1014 | struct inode *dir, struct file *file, | |
1015 | umode_t mode) | |
1016 | { | |
1017 | struct inode *inode; | |
1018 | ||
1019 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0); | |
1020 | if (!inode) | |
1021 | return -ENOSPC; | |
1022 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); | |
1023 | d_tmpfile(file, inode); | |
1024 | return finish_open_simple(file, 0); | |
1025 | } | |
1026 | ||
1027 | static int hugetlbfs_symlink(struct mnt_idmap *idmap, | |
1028 | struct inode *dir, struct dentry *dentry, | |
1029 | const char *symname) | |
1030 | { | |
1031 | const umode_t mode = S_IFLNK|S_IRWXUGO; | |
1032 | struct inode *inode; | |
1033 | int error = -ENOSPC; | |
1034 | ||
1035 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0); | |
1036 | if (inode) { | |
1037 | int l = strlen(symname)+1; | |
1038 | error = page_symlink(inode, symname, l); | |
1039 | if (!error) { | |
1040 | d_instantiate(dentry, inode); | |
1041 | dget(dentry); | |
1042 | } else | |
1043 | iput(inode); | |
1044 | } | |
1045 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); | |
1046 | ||
1047 | return error; | |
1048 | } | |
1049 | ||
1050 | #ifdef CONFIG_MIGRATION | |
1051 | static int hugetlbfs_migrate_folio(struct address_space *mapping, | |
1052 | struct folio *dst, struct folio *src, | |
1053 | enum migrate_mode mode) | |
1054 | { | |
1055 | int rc; | |
1056 | ||
1057 | rc = migrate_huge_page_move_mapping(mapping, dst, src); | |
1058 | if (rc != MIGRATEPAGE_SUCCESS) | |
1059 | return rc; | |
1060 | ||
1061 | if (hugetlb_folio_subpool(src)) { | |
1062 | hugetlb_set_folio_subpool(dst, | |
1063 | hugetlb_folio_subpool(src)); | |
1064 | hugetlb_set_folio_subpool(src, NULL); | |
1065 | } | |
1066 | ||
1067 | folio_migrate_flags(dst, src); | |
1068 | ||
1069 | return MIGRATEPAGE_SUCCESS; | |
1070 | } | |
1071 | #else | |
1072 | #define hugetlbfs_migrate_folio NULL | |
1073 | #endif | |
1074 | ||
1075 | static int hugetlbfs_error_remove_folio(struct address_space *mapping, | |
1076 | struct folio *folio) | |
1077 | { | |
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | /* | |
1082 | * Display the mount options in /proc/mounts. | |
1083 | */ | |
1084 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) | |
1085 | { | |
1086 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); | |
1087 | struct hugepage_subpool *spool = sbinfo->spool; | |
1088 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); | |
1089 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); | |
1090 | char mod; | |
1091 | ||
1092 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) | |
1093 | seq_printf(m, ",uid=%u", | |
1094 | from_kuid_munged(&init_user_ns, sbinfo->uid)); | |
1095 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) | |
1096 | seq_printf(m, ",gid=%u", | |
1097 | from_kgid_munged(&init_user_ns, sbinfo->gid)); | |
1098 | if (sbinfo->mode != 0755) | |
1099 | seq_printf(m, ",mode=%o", sbinfo->mode); | |
1100 | if (sbinfo->max_inodes != -1) | |
1101 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); | |
1102 | ||
1103 | hpage_size /= 1024; | |
1104 | mod = 'K'; | |
1105 | if (hpage_size >= 1024) { | |
1106 | hpage_size /= 1024; | |
1107 | mod = 'M'; | |
1108 | } | |
1109 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); | |
1110 | if (spool) { | |
1111 | if (spool->max_hpages != -1) | |
1112 | seq_printf(m, ",size=%llu", | |
1113 | (unsigned long long)spool->max_hpages << hpage_shift); | |
1114 | if (spool->min_hpages != -1) | |
1115 | seq_printf(m, ",min_size=%llu", | |
1116 | (unsigned long long)spool->min_hpages << hpage_shift); | |
1117 | } | |
1118 | return 0; | |
1119 | } | |
1120 | ||
1121 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |
1122 | { | |
1123 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); | |
1124 | struct hstate *h = hstate_inode(d_inode(dentry)); | |
1125 | u64 id = huge_encode_dev(dentry->d_sb->s_dev); | |
1126 | ||
1127 | buf->f_fsid = u64_to_fsid(id); | |
1128 | buf->f_type = HUGETLBFS_MAGIC; | |
1129 | buf->f_bsize = huge_page_size(h); | |
1130 | if (sbinfo) { | |
1131 | spin_lock(&sbinfo->stat_lock); | |
1132 | /* If no limits set, just report 0 or -1 for max/free/used | |
1133 | * blocks, like simple_statfs() */ | |
1134 | if (sbinfo->spool) { | |
1135 | long free_pages; | |
1136 | ||
1137 | spin_lock_irq(&sbinfo->spool->lock); | |
1138 | buf->f_blocks = sbinfo->spool->max_hpages; | |
1139 | free_pages = sbinfo->spool->max_hpages | |
1140 | - sbinfo->spool->used_hpages; | |
1141 | buf->f_bavail = buf->f_bfree = free_pages; | |
1142 | spin_unlock_irq(&sbinfo->spool->lock); | |
1143 | buf->f_files = sbinfo->max_inodes; | |
1144 | buf->f_ffree = sbinfo->free_inodes; | |
1145 | } | |
1146 | spin_unlock(&sbinfo->stat_lock); | |
1147 | } | |
1148 | buf->f_namelen = NAME_MAX; | |
1149 | return 0; | |
1150 | } | |
1151 | ||
1152 | static void hugetlbfs_put_super(struct super_block *sb) | |
1153 | { | |
1154 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); | |
1155 | ||
1156 | if (sbi) { | |
1157 | sb->s_fs_info = NULL; | |
1158 | ||
1159 | if (sbi->spool) | |
1160 | hugepage_put_subpool(sbi->spool); | |
1161 | ||
1162 | kfree(sbi); | |
1163 | } | |
1164 | } | |
1165 | ||
1166 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
1167 | { | |
1168 | if (sbinfo->free_inodes >= 0) { | |
1169 | spin_lock(&sbinfo->stat_lock); | |
1170 | if (unlikely(!sbinfo->free_inodes)) { | |
1171 | spin_unlock(&sbinfo->stat_lock); | |
1172 | return 0; | |
1173 | } | |
1174 | sbinfo->free_inodes--; | |
1175 | spin_unlock(&sbinfo->stat_lock); | |
1176 | } | |
1177 | ||
1178 | return 1; | |
1179 | } | |
1180 | ||
1181 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
1182 | { | |
1183 | if (sbinfo->free_inodes >= 0) { | |
1184 | spin_lock(&sbinfo->stat_lock); | |
1185 | sbinfo->free_inodes++; | |
1186 | spin_unlock(&sbinfo->stat_lock); | |
1187 | } | |
1188 | } | |
1189 | ||
1190 | ||
1191 | static struct kmem_cache *hugetlbfs_inode_cachep; | |
1192 | ||
1193 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |
1194 | { | |
1195 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); | |
1196 | struct hugetlbfs_inode_info *p; | |
1197 | ||
1198 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) | |
1199 | return NULL; | |
1200 | p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); | |
1201 | if (unlikely(!p)) { | |
1202 | hugetlbfs_inc_free_inodes(sbinfo); | |
1203 | return NULL; | |
1204 | } | |
1205 | return &p->vfs_inode; | |
1206 | } | |
1207 | ||
1208 | static void hugetlbfs_free_inode(struct inode *inode) | |
1209 | { | |
1210 | trace_hugetlbfs_free_inode(inode); | |
1211 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); | |
1212 | } | |
1213 | ||
1214 | static void hugetlbfs_destroy_inode(struct inode *inode) | |
1215 | { | |
1216 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); | |
1217 | } | |
1218 | ||
1219 | static const struct address_space_operations hugetlbfs_aops = { | |
1220 | .write_begin = hugetlbfs_write_begin, | |
1221 | .write_end = hugetlbfs_write_end, | |
1222 | .dirty_folio = noop_dirty_folio, | |
1223 | .migrate_folio = hugetlbfs_migrate_folio, | |
1224 | .error_remove_folio = hugetlbfs_error_remove_folio, | |
1225 | }; | |
1226 | ||
1227 | ||
1228 | static void init_once(void *foo) | |
1229 | { | |
1230 | struct hugetlbfs_inode_info *ei = foo; | |
1231 | ||
1232 | inode_init_once(&ei->vfs_inode); | |
1233 | } | |
1234 | ||
1235 | static const struct file_operations hugetlbfs_file_operations = { | |
1236 | .read_iter = hugetlbfs_read_iter, | |
1237 | .mmap = hugetlbfs_file_mmap, | |
1238 | .fsync = noop_fsync, | |
1239 | .get_unmapped_area = hugetlb_get_unmapped_area, | |
1240 | .llseek = default_llseek, | |
1241 | .fallocate = hugetlbfs_fallocate, | |
1242 | .fop_flags = FOP_HUGE_PAGES, | |
1243 | }; | |
1244 | ||
1245 | static const struct inode_operations hugetlbfs_dir_inode_operations = { | |
1246 | .create = hugetlbfs_create, | |
1247 | .lookup = simple_lookup, | |
1248 | .link = simple_link, | |
1249 | .unlink = simple_unlink, | |
1250 | .symlink = hugetlbfs_symlink, | |
1251 | .mkdir = hugetlbfs_mkdir, | |
1252 | .rmdir = simple_rmdir, | |
1253 | .mknod = hugetlbfs_mknod, | |
1254 | .rename = simple_rename, | |
1255 | .setattr = hugetlbfs_setattr, | |
1256 | .tmpfile = hugetlbfs_tmpfile, | |
1257 | }; | |
1258 | ||
1259 | static const struct inode_operations hugetlbfs_inode_operations = { | |
1260 | .setattr = hugetlbfs_setattr, | |
1261 | }; | |
1262 | ||
1263 | static const struct super_operations hugetlbfs_ops = { | |
1264 | .alloc_inode = hugetlbfs_alloc_inode, | |
1265 | .free_inode = hugetlbfs_free_inode, | |
1266 | .destroy_inode = hugetlbfs_destroy_inode, | |
1267 | .evict_inode = hugetlbfs_evict_inode, | |
1268 | .statfs = hugetlbfs_statfs, | |
1269 | .put_super = hugetlbfs_put_super, | |
1270 | .show_options = hugetlbfs_show_options, | |
1271 | }; | |
1272 | ||
1273 | /* | |
1274 | * Convert size option passed from command line to number of huge pages | |
1275 | * in the pool specified by hstate. Size option could be in bytes | |
1276 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). | |
1277 | */ | |
1278 | static long | |
1279 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, | |
1280 | enum hugetlbfs_size_type val_type) | |
1281 | { | |
1282 | if (val_type == NO_SIZE) | |
1283 | return -1; | |
1284 | ||
1285 | if (val_type == SIZE_PERCENT) { | |
1286 | size_opt <<= huge_page_shift(h); | |
1287 | size_opt *= h->max_huge_pages; | |
1288 | do_div(size_opt, 100); | |
1289 | } | |
1290 | ||
1291 | size_opt >>= huge_page_shift(h); | |
1292 | return size_opt; | |
1293 | } | |
1294 | ||
1295 | /* | |
1296 | * Parse one mount parameter. | |
1297 | */ | |
1298 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) | |
1299 | { | |
1300 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1301 | struct fs_parse_result result; | |
1302 | struct hstate *h; | |
1303 | char *rest; | |
1304 | unsigned long ps; | |
1305 | int opt; | |
1306 | ||
1307 | opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); | |
1308 | if (opt < 0) | |
1309 | return opt; | |
1310 | ||
1311 | switch (opt) { | |
1312 | case Opt_uid: | |
1313 | ctx->uid = result.uid; | |
1314 | return 0; | |
1315 | ||
1316 | case Opt_gid: | |
1317 | ctx->gid = result.gid; | |
1318 | return 0; | |
1319 | ||
1320 | case Opt_mode: | |
1321 | ctx->mode = result.uint_32 & 01777U; | |
1322 | return 0; | |
1323 | ||
1324 | case Opt_size: | |
1325 | /* memparse() will accept a K/M/G without a digit */ | |
1326 | if (!param->string || !isdigit(param->string[0])) | |
1327 | goto bad_val; | |
1328 | ctx->max_size_opt = memparse(param->string, &rest); | |
1329 | ctx->max_val_type = SIZE_STD; | |
1330 | if (*rest == '%') | |
1331 | ctx->max_val_type = SIZE_PERCENT; | |
1332 | return 0; | |
1333 | ||
1334 | case Opt_nr_inodes: | |
1335 | /* memparse() will accept a K/M/G without a digit */ | |
1336 | if (!param->string || !isdigit(param->string[0])) | |
1337 | goto bad_val; | |
1338 | ctx->nr_inodes = memparse(param->string, &rest); | |
1339 | return 0; | |
1340 | ||
1341 | case Opt_pagesize: | |
1342 | ps = memparse(param->string, &rest); | |
1343 | h = size_to_hstate(ps); | |
1344 | if (!h) { | |
1345 | pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); | |
1346 | return -EINVAL; | |
1347 | } | |
1348 | ctx->hstate = h; | |
1349 | return 0; | |
1350 | ||
1351 | case Opt_min_size: | |
1352 | /* memparse() will accept a K/M/G without a digit */ | |
1353 | if (!param->string || !isdigit(param->string[0])) | |
1354 | goto bad_val; | |
1355 | ctx->min_size_opt = memparse(param->string, &rest); | |
1356 | ctx->min_val_type = SIZE_STD; | |
1357 | if (*rest == '%') | |
1358 | ctx->min_val_type = SIZE_PERCENT; | |
1359 | return 0; | |
1360 | ||
1361 | default: | |
1362 | return -EINVAL; | |
1363 | } | |
1364 | ||
1365 | bad_val: | |
1366 | return invalfc(fc, "Bad value '%s' for mount option '%s'\n", | |
1367 | param->string, param->key); | |
1368 | } | |
1369 | ||
1370 | /* | |
1371 | * Validate the parsed options. | |
1372 | */ | |
1373 | static int hugetlbfs_validate(struct fs_context *fc) | |
1374 | { | |
1375 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1376 | ||
1377 | /* | |
1378 | * Use huge page pool size (in hstate) to convert the size | |
1379 | * options to number of huge pages. If NO_SIZE, -1 is returned. | |
1380 | */ | |
1381 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
1382 | ctx->max_size_opt, | |
1383 | ctx->max_val_type); | |
1384 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
1385 | ctx->min_size_opt, | |
1386 | ctx->min_val_type); | |
1387 | ||
1388 | /* | |
1389 | * If max_size was specified, then min_size must be smaller | |
1390 | */ | |
1391 | if (ctx->max_val_type > NO_SIZE && | |
1392 | ctx->min_hpages > ctx->max_hpages) { | |
1393 | pr_err("Minimum size can not be greater than maximum size\n"); | |
1394 | return -EINVAL; | |
1395 | } | |
1396 | ||
1397 | return 0; | |
1398 | } | |
1399 | ||
1400 | static int | |
1401 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) | |
1402 | { | |
1403 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1404 | struct hugetlbfs_sb_info *sbinfo; | |
1405 | ||
1406 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); | |
1407 | if (!sbinfo) | |
1408 | return -ENOMEM; | |
1409 | sb->s_fs_info = sbinfo; | |
1410 | spin_lock_init(&sbinfo->stat_lock); | |
1411 | sbinfo->hstate = ctx->hstate; | |
1412 | sbinfo->max_inodes = ctx->nr_inodes; | |
1413 | sbinfo->free_inodes = ctx->nr_inodes; | |
1414 | sbinfo->spool = NULL; | |
1415 | sbinfo->uid = ctx->uid; | |
1416 | sbinfo->gid = ctx->gid; | |
1417 | sbinfo->mode = ctx->mode; | |
1418 | ||
1419 | /* | |
1420 | * Allocate and initialize subpool if maximum or minimum size is | |
1421 | * specified. Any needed reservations (for minimum size) are taken | |
1422 | * when the subpool is created. | |
1423 | */ | |
1424 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { | |
1425 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, | |
1426 | ctx->max_hpages, | |
1427 | ctx->min_hpages); | |
1428 | if (!sbinfo->spool) | |
1429 | goto out_free; | |
1430 | } | |
1431 | sb->s_maxbytes = MAX_LFS_FILESIZE; | |
1432 | sb->s_blocksize = huge_page_size(ctx->hstate); | |
1433 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); | |
1434 | sb->s_magic = HUGETLBFS_MAGIC; | |
1435 | sb->s_op = &hugetlbfs_ops; | |
1436 | sb->s_time_gran = 1; | |
1437 | ||
1438 | /* | |
1439 | * Due to the special and limited functionality of hugetlbfs, it does | |
1440 | * not work well as a stacking filesystem. | |
1441 | */ | |
1442 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; | |
1443 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); | |
1444 | if (!sb->s_root) | |
1445 | goto out_free; | |
1446 | return 0; | |
1447 | out_free: | |
1448 | kfree(sbinfo->spool); | |
1449 | kfree(sbinfo); | |
1450 | return -ENOMEM; | |
1451 | } | |
1452 | ||
1453 | static int hugetlbfs_get_tree(struct fs_context *fc) | |
1454 | { | |
1455 | int err = hugetlbfs_validate(fc); | |
1456 | if (err) | |
1457 | return err; | |
1458 | return get_tree_nodev(fc, hugetlbfs_fill_super); | |
1459 | } | |
1460 | ||
1461 | static void hugetlbfs_fs_context_free(struct fs_context *fc) | |
1462 | { | |
1463 | kfree(fc->fs_private); | |
1464 | } | |
1465 | ||
1466 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { | |
1467 | .free = hugetlbfs_fs_context_free, | |
1468 | .parse_param = hugetlbfs_parse_param, | |
1469 | .get_tree = hugetlbfs_get_tree, | |
1470 | }; | |
1471 | ||
1472 | static int hugetlbfs_init_fs_context(struct fs_context *fc) | |
1473 | { | |
1474 | struct hugetlbfs_fs_context *ctx; | |
1475 | ||
1476 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); | |
1477 | if (!ctx) | |
1478 | return -ENOMEM; | |
1479 | ||
1480 | ctx->max_hpages = -1; /* No limit on size by default */ | |
1481 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ | |
1482 | ctx->uid = current_fsuid(); | |
1483 | ctx->gid = current_fsgid(); | |
1484 | ctx->mode = 0755; | |
1485 | ctx->hstate = &default_hstate; | |
1486 | ctx->min_hpages = -1; /* No default minimum size */ | |
1487 | ctx->max_val_type = NO_SIZE; | |
1488 | ctx->min_val_type = NO_SIZE; | |
1489 | fc->fs_private = ctx; | |
1490 | fc->ops = &hugetlbfs_fs_context_ops; | |
1491 | return 0; | |
1492 | } | |
1493 | ||
1494 | static struct file_system_type hugetlbfs_fs_type = { | |
1495 | .name = "hugetlbfs", | |
1496 | .init_fs_context = hugetlbfs_init_fs_context, | |
1497 | .parameters = hugetlb_fs_parameters, | |
1498 | .kill_sb = kill_litter_super, | |
1499 | .fs_flags = FS_ALLOW_IDMAP, | |
1500 | }; | |
1501 | ||
1502 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; | |
1503 | ||
1504 | static int can_do_hugetlb_shm(void) | |
1505 | { | |
1506 | kgid_t shm_group; | |
1507 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); | |
1508 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); | |
1509 | } | |
1510 | ||
1511 | static int get_hstate_idx(int page_size_log) | |
1512 | { | |
1513 | struct hstate *h = hstate_sizelog(page_size_log); | |
1514 | ||
1515 | if (!h) | |
1516 | return -1; | |
1517 | return hstate_index(h); | |
1518 | } | |
1519 | ||
1520 | /* | |
1521 | * Note that size should be aligned to proper hugepage size in caller side, | |
1522 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | |
1523 | */ | |
1524 | struct file *hugetlb_file_setup(const char *name, size_t size, | |
1525 | vm_flags_t acctflag, int creat_flags, | |
1526 | int page_size_log) | |
1527 | { | |
1528 | struct inode *inode; | |
1529 | struct vfsmount *mnt; | |
1530 | int hstate_idx; | |
1531 | struct file *file; | |
1532 | ||
1533 | hstate_idx = get_hstate_idx(page_size_log); | |
1534 | if (hstate_idx < 0) | |
1535 | return ERR_PTR(-ENODEV); | |
1536 | ||
1537 | mnt = hugetlbfs_vfsmount[hstate_idx]; | |
1538 | if (!mnt) | |
1539 | return ERR_PTR(-ENOENT); | |
1540 | ||
1541 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { | |
1542 | struct ucounts *ucounts = current_ucounts(); | |
1543 | ||
1544 | if (user_shm_lock(size, ucounts)) { | |
1545 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", | |
1546 | current->comm, current->pid); | |
1547 | user_shm_unlock(size, ucounts); | |
1548 | } | |
1549 | return ERR_PTR(-EPERM); | |
1550 | } | |
1551 | ||
1552 | file = ERR_PTR(-ENOSPC); | |
1553 | /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */ | |
1554 | inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL, | |
1555 | S_IFREG | S_IRWXUGO, 0); | |
1556 | if (!inode) | |
1557 | goto out; | |
1558 | if (creat_flags == HUGETLB_SHMFS_INODE) | |
1559 | inode->i_flags |= S_PRIVATE; | |
1560 | ||
1561 | inode->i_size = size; | |
1562 | clear_nlink(inode); | |
1563 | ||
1564 | if (!hugetlb_reserve_pages(inode, 0, | |
1565 | size >> huge_page_shift(hstate_inode(inode)), NULL, | |
1566 | acctflag)) | |
1567 | file = ERR_PTR(-ENOMEM); | |
1568 | else | |
1569 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, | |
1570 | &hugetlbfs_file_operations); | |
1571 | if (!IS_ERR(file)) | |
1572 | return file; | |
1573 | ||
1574 | iput(inode); | |
1575 | out: | |
1576 | return file; | |
1577 | } | |
1578 | ||
1579 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) | |
1580 | { | |
1581 | struct fs_context *fc; | |
1582 | struct vfsmount *mnt; | |
1583 | ||
1584 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); | |
1585 | if (IS_ERR(fc)) { | |
1586 | mnt = ERR_CAST(fc); | |
1587 | } else { | |
1588 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1589 | ctx->hstate = h; | |
1590 | mnt = fc_mount(fc); | |
1591 | put_fs_context(fc); | |
1592 | } | |
1593 | if (IS_ERR(mnt)) | |
1594 | pr_err("Cannot mount internal hugetlbfs for page size %luK", | |
1595 | huge_page_size(h) / SZ_1K); | |
1596 | return mnt; | |
1597 | } | |
1598 | ||
1599 | static int __init init_hugetlbfs_fs(void) | |
1600 | { | |
1601 | struct vfsmount *mnt; | |
1602 | struct hstate *h; | |
1603 | int error; | |
1604 | int i; | |
1605 | ||
1606 | if (!hugepages_supported()) { | |
1607 | pr_info("disabling because there are no supported hugepage sizes\n"); | |
1608 | return -ENOTSUPP; | |
1609 | } | |
1610 | ||
1611 | error = -ENOMEM; | |
1612 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", | |
1613 | sizeof(struct hugetlbfs_inode_info), | |
1614 | 0, SLAB_ACCOUNT, init_once); | |
1615 | if (hugetlbfs_inode_cachep == NULL) | |
1616 | goto out; | |
1617 | ||
1618 | error = register_filesystem(&hugetlbfs_fs_type); | |
1619 | if (error) | |
1620 | goto out_free; | |
1621 | ||
1622 | /* default hstate mount is required */ | |
1623 | mnt = mount_one_hugetlbfs(&default_hstate); | |
1624 | if (IS_ERR(mnt)) { | |
1625 | error = PTR_ERR(mnt); | |
1626 | goto out_unreg; | |
1627 | } | |
1628 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; | |
1629 | ||
1630 | /* other hstates are optional */ | |
1631 | i = 0; | |
1632 | for_each_hstate(h) { | |
1633 | if (i == default_hstate_idx) { | |
1634 | i++; | |
1635 | continue; | |
1636 | } | |
1637 | ||
1638 | mnt = mount_one_hugetlbfs(h); | |
1639 | if (IS_ERR(mnt)) | |
1640 | hugetlbfs_vfsmount[i] = NULL; | |
1641 | else | |
1642 | hugetlbfs_vfsmount[i] = mnt; | |
1643 | i++; | |
1644 | } | |
1645 | ||
1646 | return 0; | |
1647 | ||
1648 | out_unreg: | |
1649 | (void)unregister_filesystem(&hugetlbfs_fs_type); | |
1650 | out_free: | |
1651 | kmem_cache_destroy(hugetlbfs_inode_cachep); | |
1652 | out: | |
1653 | return error; | |
1654 | } | |
1655 | fs_initcall(init_hugetlbfs_fs) |