]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * hugetlbpage-backed filesystem. Based on ramfs. | |
3 | * | |
4 | * Nadia Yvette Chambers, 2002 | |
5 | * | |
6 | * Copyright (C) 2002 Linus Torvalds. | |
7 | * License: GPL | |
8 | */ | |
9 | ||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
11 | ||
12 | #include <linux/thread_info.h> | |
13 | #include <asm/current.h> | |
14 | #include <linux/falloc.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/mount.h> | |
17 | #include <linux/file.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/writeback.h> | |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/capability.h> | |
25 | #include <linux/ctype.h> | |
26 | #include <linux/backing-dev.h> | |
27 | #include <linux/hugetlb.h> | |
28 | #include <linux/pagevec.h> | |
29 | #include <linux/fs_parser.h> | |
30 | #include <linux/mman.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/dnotify.h> | |
33 | #include <linux/statfs.h> | |
34 | #include <linux/security.h> | |
35 | #include <linux/magic.h> | |
36 | #include <linux/migrate.h> | |
37 | #include <linux/uio.h> | |
38 | ||
39 | #include <linux/uaccess.h> | |
40 | #include <linux/sched/mm.h> | |
41 | ||
42 | #define CREATE_TRACE_POINTS | |
43 | #include <trace/events/hugetlbfs.h> | |
44 | ||
45 | static const struct address_space_operations hugetlbfs_aops; | |
46 | static const struct file_operations hugetlbfs_file_operations; | |
47 | static const struct inode_operations hugetlbfs_dir_inode_operations; | |
48 | static const struct inode_operations hugetlbfs_inode_operations; | |
49 | ||
50 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; | |
51 | ||
52 | struct hugetlbfs_fs_context { | |
53 | struct hstate *hstate; | |
54 | unsigned long long max_size_opt; | |
55 | unsigned long long min_size_opt; | |
56 | long max_hpages; | |
57 | long nr_inodes; | |
58 | long min_hpages; | |
59 | enum hugetlbfs_size_type max_val_type; | |
60 | enum hugetlbfs_size_type min_val_type; | |
61 | kuid_t uid; | |
62 | kgid_t gid; | |
63 | umode_t mode; | |
64 | }; | |
65 | ||
66 | int sysctl_hugetlb_shm_group; | |
67 | ||
68 | enum hugetlb_param { | |
69 | Opt_gid, | |
70 | Opt_min_size, | |
71 | Opt_mode, | |
72 | Opt_nr_inodes, | |
73 | Opt_pagesize, | |
74 | Opt_size, | |
75 | Opt_uid, | |
76 | }; | |
77 | ||
78 | static const struct fs_parameter_spec hugetlb_fs_parameters[] = { | |
79 | fsparam_gid ("gid", Opt_gid), | |
80 | fsparam_string("min_size", Opt_min_size), | |
81 | fsparam_u32oct("mode", Opt_mode), | |
82 | fsparam_string("nr_inodes", Opt_nr_inodes), | |
83 | fsparam_string("pagesize", Opt_pagesize), | |
84 | fsparam_string("size", Opt_size), | |
85 | fsparam_uid ("uid", Opt_uid), | |
86 | {} | |
87 | }; | |
88 | ||
89 | /* | |
90 | * Mask used when checking the page offset value passed in via system | |
91 | * calls. This value will be converted to a loff_t which is signed. | |
92 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | |
93 | * value. The extra bit (- 1 in the shift value) is to take the sign | |
94 | * bit into account. | |
95 | */ | |
96 | #define PGOFF_LOFFT_MAX \ | |
97 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | |
98 | ||
99 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |
100 | { | |
101 | struct inode *inode = file_inode(file); | |
102 | loff_t len, vma_len; | |
103 | int ret; | |
104 | struct hstate *h = hstate_file(file); | |
105 | vm_flags_t vm_flags; | |
106 | ||
107 | /* | |
108 | * vma address alignment (but not the pgoff alignment) has | |
109 | * already been checked by prepare_hugepage_range. If you add | |
110 | * any error returns here, do so after setting VM_HUGETLB, so | |
111 | * is_vm_hugetlb_page tests below unmap_region go the right | |
112 | * way when do_mmap unwinds (may be important on powerpc | |
113 | * and ia64). | |
114 | */ | |
115 | vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); | |
116 | vma->vm_ops = &hugetlb_vm_ops; | |
117 | ||
118 | /* | |
119 | * page based offset in vm_pgoff could be sufficiently large to | |
120 | * overflow a loff_t when converted to byte offset. This can | |
121 | * only happen on architectures where sizeof(loff_t) == | |
122 | * sizeof(unsigned long). So, only check in those instances. | |
123 | */ | |
124 | if (sizeof(unsigned long) == sizeof(loff_t)) { | |
125 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) | |
126 | return -EINVAL; | |
127 | } | |
128 | ||
129 | /* must be huge page aligned */ | |
130 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) | |
131 | return -EINVAL; | |
132 | ||
133 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); | |
134 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | |
135 | /* check for overflow */ | |
136 | if (len < vma_len) | |
137 | return -EINVAL; | |
138 | ||
139 | inode_lock(inode); | |
140 | file_accessed(file); | |
141 | ||
142 | ret = -ENOMEM; | |
143 | ||
144 | vm_flags = vma->vm_flags; | |
145 | /* | |
146 | * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip | |
147 | * reserving here. Note: only for SHM hugetlbfs file, the inode | |
148 | * flag S_PRIVATE is set. | |
149 | */ | |
150 | if (inode->i_flags & S_PRIVATE) | |
151 | vm_flags |= VM_NORESERVE; | |
152 | ||
153 | if (hugetlb_reserve_pages(inode, | |
154 | vma->vm_pgoff >> huge_page_order(h), | |
155 | len >> huge_page_shift(h), vma, | |
156 | vm_flags) < 0) | |
157 | goto out; | |
158 | ||
159 | ret = 0; | |
160 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) | |
161 | i_size_write(inode, len); | |
162 | out: | |
163 | inode_unlock(inode); | |
164 | ||
165 | return ret; | |
166 | } | |
167 | ||
168 | /* | |
169 | * Called under mmap_write_lock(mm). | |
170 | */ | |
171 | ||
172 | unsigned long | |
173 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
174 | unsigned long len, unsigned long pgoff, | |
175 | unsigned long flags) | |
176 | { | |
177 | unsigned long addr0 = 0; | |
178 | struct hstate *h = hstate_file(file); | |
179 | ||
180 | if (len & ~huge_page_mask(h)) | |
181 | return -EINVAL; | |
182 | if ((flags & MAP_FIXED) && (addr & ~huge_page_mask(h))) | |
183 | return -EINVAL; | |
184 | if (addr) | |
185 | addr0 = ALIGN(addr, huge_page_size(h)); | |
186 | ||
187 | return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff, | |
188 | flags, 0); | |
189 | } | |
190 | ||
191 | /* | |
192 | * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset. | |
193 | * Returns the maximum number of bytes one can read without touching the 1st raw | |
194 | * HWPOISON page. | |
195 | * | |
196 | * The implementation borrows the iteration logic from copy_page_to_iter*. | |
197 | */ | |
198 | static size_t adjust_range_hwpoison(struct folio *folio, size_t offset, | |
199 | size_t bytes) | |
200 | { | |
201 | struct page *page; | |
202 | size_t n = 0; | |
203 | size_t res = 0; | |
204 | ||
205 | /* First page to start the loop. */ | |
206 | page = folio_page(folio, offset / PAGE_SIZE); | |
207 | offset %= PAGE_SIZE; | |
208 | while (1) { | |
209 | if (is_raw_hwpoison_page_in_hugepage(page)) | |
210 | break; | |
211 | ||
212 | /* Safe to read n bytes without touching HWPOISON subpage. */ | |
213 | n = min(bytes, (size_t)PAGE_SIZE - offset); | |
214 | res += n; | |
215 | bytes -= n; | |
216 | if (!bytes || !n) | |
217 | break; | |
218 | offset += n; | |
219 | if (offset == PAGE_SIZE) { | |
220 | page = nth_page(page, 1); | |
221 | offset = 0; | |
222 | } | |
223 | } | |
224 | ||
225 | return res; | |
226 | } | |
227 | ||
228 | /* | |
229 | * Support for read() - Find the page attached to f_mapping and copy out the | |
230 | * data. This provides functionality similar to filemap_read(). | |
231 | */ | |
232 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) | |
233 | { | |
234 | struct file *file = iocb->ki_filp; | |
235 | struct hstate *h = hstate_file(file); | |
236 | struct address_space *mapping = file->f_mapping; | |
237 | struct inode *inode = mapping->host; | |
238 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); | |
239 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); | |
240 | unsigned long end_index; | |
241 | loff_t isize; | |
242 | ssize_t retval = 0; | |
243 | ||
244 | while (iov_iter_count(to)) { | |
245 | struct folio *folio; | |
246 | size_t nr, copied, want; | |
247 | ||
248 | /* nr is the maximum number of bytes to copy from this page */ | |
249 | nr = huge_page_size(h); | |
250 | isize = i_size_read(inode); | |
251 | if (!isize) | |
252 | break; | |
253 | end_index = (isize - 1) >> huge_page_shift(h); | |
254 | if (index > end_index) | |
255 | break; | |
256 | if (index == end_index) { | |
257 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; | |
258 | if (nr <= offset) | |
259 | break; | |
260 | } | |
261 | nr = nr - offset; | |
262 | ||
263 | /* Find the folio */ | |
264 | folio = filemap_lock_hugetlb_folio(h, mapping, index); | |
265 | if (IS_ERR(folio)) { | |
266 | /* | |
267 | * We have a HOLE, zero out the user-buffer for the | |
268 | * length of the hole or request. | |
269 | */ | |
270 | copied = iov_iter_zero(nr, to); | |
271 | } else { | |
272 | folio_unlock(folio); | |
273 | ||
274 | if (!folio_test_hwpoison(folio)) | |
275 | want = nr; | |
276 | else { | |
277 | /* | |
278 | * Adjust how many bytes safe to read without | |
279 | * touching the 1st raw HWPOISON page after | |
280 | * offset. | |
281 | */ | |
282 | want = adjust_range_hwpoison(folio, offset, nr); | |
283 | if (want == 0) { | |
284 | folio_put(folio); | |
285 | retval = -EIO; | |
286 | break; | |
287 | } | |
288 | } | |
289 | ||
290 | /* | |
291 | * We have the folio, copy it to user space buffer. | |
292 | */ | |
293 | copied = copy_folio_to_iter(folio, offset, want, to); | |
294 | folio_put(folio); | |
295 | } | |
296 | offset += copied; | |
297 | retval += copied; | |
298 | if (copied != nr && iov_iter_count(to)) { | |
299 | if (!retval) | |
300 | retval = -EFAULT; | |
301 | break; | |
302 | } | |
303 | index += offset >> huge_page_shift(h); | |
304 | offset &= ~huge_page_mask(h); | |
305 | } | |
306 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; | |
307 | return retval; | |
308 | } | |
309 | ||
310 | static int hugetlbfs_write_begin(const struct kiocb *iocb, | |
311 | struct address_space *mapping, | |
312 | loff_t pos, unsigned len, | |
313 | struct folio **foliop, void **fsdata) | |
314 | { | |
315 | return -EINVAL; | |
316 | } | |
317 | ||
318 | static int hugetlbfs_write_end(const struct kiocb *iocb, | |
319 | struct address_space *mapping, | |
320 | loff_t pos, unsigned len, unsigned copied, | |
321 | struct folio *folio, void *fsdata) | |
322 | { | |
323 | BUG(); | |
324 | return -EINVAL; | |
325 | } | |
326 | ||
327 | static void hugetlb_delete_from_page_cache(struct folio *folio) | |
328 | { | |
329 | folio_clear_dirty(folio); | |
330 | folio_clear_uptodate(folio); | |
331 | filemap_remove_folio(folio); | |
332 | } | |
333 | ||
334 | /* | |
335 | * Called with i_mmap_rwsem held for inode based vma maps. This makes | |
336 | * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault | |
337 | * mutex for the page in the mapping. So, we can not race with page being | |
338 | * faulted into the vma. | |
339 | */ | |
340 | static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, | |
341 | unsigned long addr, unsigned long pfn) | |
342 | { | |
343 | pte_t *ptep, pte; | |
344 | ||
345 | ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); | |
346 | if (!ptep) | |
347 | return false; | |
348 | ||
349 | pte = huge_ptep_get(vma->vm_mm, addr, ptep); | |
350 | if (huge_pte_none(pte) || !pte_present(pte)) | |
351 | return false; | |
352 | ||
353 | if (pte_pfn(pte) == pfn) | |
354 | return true; | |
355 | ||
356 | return false; | |
357 | } | |
358 | ||
359 | /* | |
360 | * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? | |
361 | * No, because the interval tree returns us only those vmas | |
362 | * which overlap the truncated area starting at pgoff, | |
363 | * and no vma on a 32-bit arch can span beyond the 4GB. | |
364 | */ | |
365 | static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) | |
366 | { | |
367 | unsigned long offset = 0; | |
368 | ||
369 | if (vma->vm_pgoff < start) | |
370 | offset = (start - vma->vm_pgoff) << PAGE_SHIFT; | |
371 | ||
372 | return vma->vm_start + offset; | |
373 | } | |
374 | ||
375 | static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) | |
376 | { | |
377 | unsigned long t_end; | |
378 | ||
379 | if (!end) | |
380 | return vma->vm_end; | |
381 | ||
382 | t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; | |
383 | if (t_end > vma->vm_end) | |
384 | t_end = vma->vm_end; | |
385 | return t_end; | |
386 | } | |
387 | ||
388 | /* | |
389 | * Called with hugetlb fault mutex held. Therefore, no more mappings to | |
390 | * this folio can be created while executing the routine. | |
391 | */ | |
392 | static void hugetlb_unmap_file_folio(struct hstate *h, | |
393 | struct address_space *mapping, | |
394 | struct folio *folio, pgoff_t index) | |
395 | { | |
396 | struct rb_root_cached *root = &mapping->i_mmap; | |
397 | struct hugetlb_vma_lock *vma_lock; | |
398 | unsigned long pfn = folio_pfn(folio); | |
399 | struct vm_area_struct *vma; | |
400 | unsigned long v_start; | |
401 | unsigned long v_end; | |
402 | pgoff_t start, end; | |
403 | ||
404 | start = index * pages_per_huge_page(h); | |
405 | end = (index + 1) * pages_per_huge_page(h); | |
406 | ||
407 | i_mmap_lock_write(mapping); | |
408 | retry: | |
409 | vma_lock = NULL; | |
410 | vma_interval_tree_foreach(vma, root, start, end - 1) { | |
411 | v_start = vma_offset_start(vma, start); | |
412 | v_end = vma_offset_end(vma, end); | |
413 | ||
414 | if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) | |
415 | continue; | |
416 | ||
417 | if (!hugetlb_vma_trylock_write(vma)) { | |
418 | vma_lock = vma->vm_private_data; | |
419 | /* | |
420 | * If we can not get vma lock, we need to drop | |
421 | * immap_sema and take locks in order. First, | |
422 | * take a ref on the vma_lock structure so that | |
423 | * we can be guaranteed it will not go away when | |
424 | * dropping immap_sema. | |
425 | */ | |
426 | kref_get(&vma_lock->refs); | |
427 | break; | |
428 | } | |
429 | ||
430 | unmap_hugepage_range(vma, v_start, v_end, NULL, | |
431 | ZAP_FLAG_DROP_MARKER); | |
432 | hugetlb_vma_unlock_write(vma); | |
433 | } | |
434 | ||
435 | i_mmap_unlock_write(mapping); | |
436 | ||
437 | if (vma_lock) { | |
438 | /* | |
439 | * Wait on vma_lock. We know it is still valid as we have | |
440 | * a reference. We must 'open code' vma locking as we do | |
441 | * not know if vma_lock is still attached to vma. | |
442 | */ | |
443 | down_write(&vma_lock->rw_sema); | |
444 | i_mmap_lock_write(mapping); | |
445 | ||
446 | vma = vma_lock->vma; | |
447 | if (!vma) { | |
448 | /* | |
449 | * If lock is no longer attached to vma, then just | |
450 | * unlock, drop our reference and retry looking for | |
451 | * other vmas. | |
452 | */ | |
453 | up_write(&vma_lock->rw_sema); | |
454 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
455 | goto retry; | |
456 | } | |
457 | ||
458 | /* | |
459 | * vma_lock is still attached to vma. Check to see if vma | |
460 | * still maps page and if so, unmap. | |
461 | */ | |
462 | v_start = vma_offset_start(vma, start); | |
463 | v_end = vma_offset_end(vma, end); | |
464 | if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) | |
465 | unmap_hugepage_range(vma, v_start, v_end, NULL, | |
466 | ZAP_FLAG_DROP_MARKER); | |
467 | ||
468 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
469 | hugetlb_vma_unlock_write(vma); | |
470 | ||
471 | goto retry; | |
472 | } | |
473 | } | |
474 | ||
475 | static void | |
476 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, | |
477 | zap_flags_t zap_flags) | |
478 | { | |
479 | struct vm_area_struct *vma; | |
480 | ||
481 | /* | |
482 | * end == 0 indicates that the entire range after start should be | |
483 | * unmapped. Note, end is exclusive, whereas the interval tree takes | |
484 | * an inclusive "last". | |
485 | */ | |
486 | vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { | |
487 | unsigned long v_start; | |
488 | unsigned long v_end; | |
489 | ||
490 | if (!hugetlb_vma_trylock_write(vma)) | |
491 | continue; | |
492 | ||
493 | v_start = vma_offset_start(vma, start); | |
494 | v_end = vma_offset_end(vma, end); | |
495 | ||
496 | unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); | |
497 | ||
498 | /* | |
499 | * Note that vma lock only exists for shared/non-private | |
500 | * vmas. Therefore, lock is not held when calling | |
501 | * unmap_hugepage_range for private vmas. | |
502 | */ | |
503 | hugetlb_vma_unlock_write(vma); | |
504 | } | |
505 | } | |
506 | ||
507 | /* | |
508 | * Called with hugetlb fault mutex held. | |
509 | * Returns true if page was actually removed, false otherwise. | |
510 | */ | |
511 | static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, | |
512 | struct address_space *mapping, | |
513 | struct folio *folio, pgoff_t index, | |
514 | bool truncate_op) | |
515 | { | |
516 | bool ret = false; | |
517 | ||
518 | /* | |
519 | * If folio is mapped, it was faulted in after being | |
520 | * unmapped in caller. Unmap (again) while holding | |
521 | * the fault mutex. The mutex will prevent faults | |
522 | * until we finish removing the folio. | |
523 | */ | |
524 | if (unlikely(folio_mapped(folio))) | |
525 | hugetlb_unmap_file_folio(h, mapping, folio, index); | |
526 | ||
527 | folio_lock(folio); | |
528 | /* | |
529 | * We must remove the folio from page cache before removing | |
530 | * the region/ reserve map (hugetlb_unreserve_pages). In | |
531 | * rare out of memory conditions, removal of the region/reserve | |
532 | * map could fail. Correspondingly, the subpool and global | |
533 | * reserve usage count can need to be adjusted. | |
534 | */ | |
535 | VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); | |
536 | hugetlb_delete_from_page_cache(folio); | |
537 | ret = true; | |
538 | if (!truncate_op) { | |
539 | if (unlikely(hugetlb_unreserve_pages(inode, index, | |
540 | index + 1, 1))) | |
541 | hugetlb_fix_reserve_counts(inode); | |
542 | } | |
543 | ||
544 | folio_unlock(folio); | |
545 | return ret; | |
546 | } | |
547 | ||
548 | /* | |
549 | * remove_inode_hugepages handles two distinct cases: truncation and hole | |
550 | * punch. There are subtle differences in operation for each case. | |
551 | * | |
552 | * truncation is indicated by end of range being LLONG_MAX | |
553 | * In this case, we first scan the range and release found pages. | |
554 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve | |
555 | * maps and global counts. Page faults can race with truncation. | |
556 | * During faults, hugetlb_no_page() checks i_size before page allocation, | |
557 | * and again after obtaining page table lock. It will 'back out' | |
558 | * allocations in the truncated range. | |
559 | * hole punch is indicated if end is not LLONG_MAX | |
560 | * In the hole punch case we scan the range and release found pages. | |
561 | * Only when releasing a page is the associated region/reserve map | |
562 | * deleted. The region/reserve map for ranges without associated | |
563 | * pages are not modified. Page faults can race with hole punch. | |
564 | * This is indicated if we find a mapped page. | |
565 | * Note: If the passed end of range value is beyond the end of file, but | |
566 | * not LLONG_MAX this routine still performs a hole punch operation. | |
567 | */ | |
568 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |
569 | loff_t lend) | |
570 | { | |
571 | struct hstate *h = hstate_inode(inode); | |
572 | struct address_space *mapping = &inode->i_data; | |
573 | const pgoff_t end = lend >> PAGE_SHIFT; | |
574 | struct folio_batch fbatch; | |
575 | pgoff_t next, index; | |
576 | int i, freed = 0; | |
577 | bool truncate_op = (lend == LLONG_MAX); | |
578 | ||
579 | folio_batch_init(&fbatch); | |
580 | next = lstart >> PAGE_SHIFT; | |
581 | while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { | |
582 | for (i = 0; i < folio_batch_count(&fbatch); ++i) { | |
583 | struct folio *folio = fbatch.folios[i]; | |
584 | u32 hash = 0; | |
585 | ||
586 | index = folio->index >> huge_page_order(h); | |
587 | hash = hugetlb_fault_mutex_hash(mapping, index); | |
588 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
589 | ||
590 | /* | |
591 | * Remove folio that was part of folio_batch. | |
592 | */ | |
593 | if (remove_inode_single_folio(h, inode, mapping, folio, | |
594 | index, truncate_op)) | |
595 | freed++; | |
596 | ||
597 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
598 | } | |
599 | folio_batch_release(&fbatch); | |
600 | cond_resched(); | |
601 | } | |
602 | ||
603 | if (truncate_op) | |
604 | (void)hugetlb_unreserve_pages(inode, | |
605 | lstart >> huge_page_shift(h), | |
606 | LONG_MAX, freed); | |
607 | } | |
608 | ||
609 | static void hugetlbfs_evict_inode(struct inode *inode) | |
610 | { | |
611 | struct resv_map *resv_map; | |
612 | ||
613 | trace_hugetlbfs_evict_inode(inode); | |
614 | remove_inode_hugepages(inode, 0, LLONG_MAX); | |
615 | ||
616 | /* | |
617 | * Get the resv_map from the address space embedded in the inode. | |
618 | * This is the address space which points to any resv_map allocated | |
619 | * at inode creation time. If this is a device special inode, | |
620 | * i_mapping may not point to the original address space. | |
621 | */ | |
622 | resv_map = (struct resv_map *)(&inode->i_data)->i_private_data; | |
623 | /* Only regular and link inodes have associated reserve maps */ | |
624 | if (resv_map) | |
625 | resv_map_release(&resv_map->refs); | |
626 | clear_inode(inode); | |
627 | } | |
628 | ||
629 | static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) | |
630 | { | |
631 | pgoff_t pgoff; | |
632 | struct address_space *mapping = inode->i_mapping; | |
633 | struct hstate *h = hstate_inode(inode); | |
634 | ||
635 | BUG_ON(offset & ~huge_page_mask(h)); | |
636 | pgoff = offset >> PAGE_SHIFT; | |
637 | ||
638 | i_size_write(inode, offset); | |
639 | i_mmap_lock_write(mapping); | |
640 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) | |
641 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, | |
642 | ZAP_FLAG_DROP_MARKER); | |
643 | i_mmap_unlock_write(mapping); | |
644 | remove_inode_hugepages(inode, offset, LLONG_MAX); | |
645 | } | |
646 | ||
647 | static void hugetlbfs_zero_partial_page(struct hstate *h, | |
648 | struct address_space *mapping, | |
649 | loff_t start, | |
650 | loff_t end) | |
651 | { | |
652 | pgoff_t idx = start >> huge_page_shift(h); | |
653 | struct folio *folio; | |
654 | ||
655 | folio = filemap_lock_hugetlb_folio(h, mapping, idx); | |
656 | if (IS_ERR(folio)) | |
657 | return; | |
658 | ||
659 | start = start & ~huge_page_mask(h); | |
660 | end = end & ~huge_page_mask(h); | |
661 | if (!end) | |
662 | end = huge_page_size(h); | |
663 | ||
664 | folio_zero_segment(folio, (size_t)start, (size_t)end); | |
665 | ||
666 | folio_unlock(folio); | |
667 | folio_put(folio); | |
668 | } | |
669 | ||
670 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |
671 | { | |
672 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
673 | struct address_space *mapping = inode->i_mapping; | |
674 | struct hstate *h = hstate_inode(inode); | |
675 | loff_t hpage_size = huge_page_size(h); | |
676 | loff_t hole_start, hole_end; | |
677 | ||
678 | /* | |
679 | * hole_start and hole_end indicate the full pages within the hole. | |
680 | */ | |
681 | hole_start = round_up(offset, hpage_size); | |
682 | hole_end = round_down(offset + len, hpage_size); | |
683 | ||
684 | inode_lock(inode); | |
685 | ||
686 | /* protected by i_rwsem */ | |
687 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { | |
688 | inode_unlock(inode); | |
689 | return -EPERM; | |
690 | } | |
691 | ||
692 | i_mmap_lock_write(mapping); | |
693 | ||
694 | /* If range starts before first full page, zero partial page. */ | |
695 | if (offset < hole_start) | |
696 | hugetlbfs_zero_partial_page(h, mapping, | |
697 | offset, min(offset + len, hole_start)); | |
698 | ||
699 | /* Unmap users of full pages in the hole. */ | |
700 | if (hole_end > hole_start) { | |
701 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) | |
702 | hugetlb_vmdelete_list(&mapping->i_mmap, | |
703 | hole_start >> PAGE_SHIFT, | |
704 | hole_end >> PAGE_SHIFT, 0); | |
705 | } | |
706 | ||
707 | /* If range extends beyond last full page, zero partial page. */ | |
708 | if ((offset + len) > hole_end && (offset + len) > hole_start) | |
709 | hugetlbfs_zero_partial_page(h, mapping, | |
710 | hole_end, offset + len); | |
711 | ||
712 | i_mmap_unlock_write(mapping); | |
713 | ||
714 | /* Remove full pages from the file. */ | |
715 | if (hole_end > hole_start) | |
716 | remove_inode_hugepages(inode, hole_start, hole_end); | |
717 | ||
718 | inode_unlock(inode); | |
719 | ||
720 | return 0; | |
721 | } | |
722 | ||
723 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |
724 | loff_t len) | |
725 | { | |
726 | struct inode *inode = file_inode(file); | |
727 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
728 | struct address_space *mapping = inode->i_mapping; | |
729 | struct hstate *h = hstate_inode(inode); | |
730 | struct vm_area_struct pseudo_vma; | |
731 | struct mm_struct *mm = current->mm; | |
732 | loff_t hpage_size = huge_page_size(h); | |
733 | unsigned long hpage_shift = huge_page_shift(h); | |
734 | pgoff_t start, index, end; | |
735 | int error; | |
736 | u32 hash; | |
737 | ||
738 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
739 | return -EOPNOTSUPP; | |
740 | ||
741 | if (mode & FALLOC_FL_PUNCH_HOLE) { | |
742 | error = hugetlbfs_punch_hole(inode, offset, len); | |
743 | goto out_nolock; | |
744 | } | |
745 | ||
746 | /* | |
747 | * Default preallocate case. | |
748 | * For this range, start is rounded down and end is rounded up | |
749 | * as well as being converted to page offsets. | |
750 | */ | |
751 | start = offset >> hpage_shift; | |
752 | end = (offset + len + hpage_size - 1) >> hpage_shift; | |
753 | ||
754 | inode_lock(inode); | |
755 | ||
756 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ | |
757 | error = inode_newsize_ok(inode, offset + len); | |
758 | if (error) | |
759 | goto out; | |
760 | ||
761 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { | |
762 | error = -EPERM; | |
763 | goto out; | |
764 | } | |
765 | ||
766 | /* | |
767 | * Initialize a pseudo vma as this is required by the huge page | |
768 | * allocation routines. | |
769 | */ | |
770 | vma_init(&pseudo_vma, mm); | |
771 | vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); | |
772 | pseudo_vma.vm_file = file; | |
773 | ||
774 | for (index = start; index < end; index++) { | |
775 | /* | |
776 | * This is supposed to be the vaddr where the page is being | |
777 | * faulted in, but we have no vaddr here. | |
778 | */ | |
779 | struct folio *folio; | |
780 | unsigned long addr; | |
781 | ||
782 | cond_resched(); | |
783 | ||
784 | /* | |
785 | * fallocate(2) manpage permits EINTR; we may have been | |
786 | * interrupted because we are using up too much memory. | |
787 | */ | |
788 | if (signal_pending(current)) { | |
789 | error = -EINTR; | |
790 | break; | |
791 | } | |
792 | ||
793 | /* addr is the offset within the file (zero based) */ | |
794 | addr = index * hpage_size; | |
795 | ||
796 | /* mutex taken here, fault path and hole punch */ | |
797 | hash = hugetlb_fault_mutex_hash(mapping, index); | |
798 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
799 | ||
800 | /* See if already present in mapping to avoid alloc/free */ | |
801 | folio = filemap_get_folio(mapping, index << huge_page_order(h)); | |
802 | if (!IS_ERR(folio)) { | |
803 | folio_put(folio); | |
804 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
805 | continue; | |
806 | } | |
807 | ||
808 | /* | |
809 | * Allocate folio without setting the avoid_reserve argument. | |
810 | * There certainly are no reserves associated with the | |
811 | * pseudo_vma. However, there could be shared mappings with | |
812 | * reserves for the file at the inode level. If we fallocate | |
813 | * folios in these areas, we need to consume the reserves | |
814 | * to keep reservation accounting consistent. | |
815 | */ | |
816 | folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); | |
817 | if (IS_ERR(folio)) { | |
818 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
819 | error = PTR_ERR(folio); | |
820 | goto out; | |
821 | } | |
822 | folio_zero_user(folio, addr); | |
823 | __folio_mark_uptodate(folio); | |
824 | error = hugetlb_add_to_page_cache(folio, mapping, index); | |
825 | if (unlikely(error)) { | |
826 | restore_reserve_on_error(h, &pseudo_vma, addr, folio); | |
827 | folio_put(folio); | |
828 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
829 | goto out; | |
830 | } | |
831 | ||
832 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
833 | ||
834 | folio_set_hugetlb_migratable(folio); | |
835 | /* | |
836 | * folio_unlock because locked by hugetlb_add_to_page_cache() | |
837 | * folio_put() due to reference from alloc_hugetlb_folio() | |
838 | */ | |
839 | folio_unlock(folio); | |
840 | folio_put(folio); | |
841 | } | |
842 | ||
843 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | |
844 | i_size_write(inode, offset + len); | |
845 | inode_set_ctime_current(inode); | |
846 | out: | |
847 | inode_unlock(inode); | |
848 | ||
849 | out_nolock: | |
850 | trace_hugetlbfs_fallocate(inode, mode, offset, len, error); | |
851 | return error; | |
852 | } | |
853 | ||
854 | static int hugetlbfs_setattr(struct mnt_idmap *idmap, | |
855 | struct dentry *dentry, struct iattr *attr) | |
856 | { | |
857 | struct inode *inode = d_inode(dentry); | |
858 | struct hstate *h = hstate_inode(inode); | |
859 | int error; | |
860 | unsigned int ia_valid = attr->ia_valid; | |
861 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
862 | ||
863 | error = setattr_prepare(idmap, dentry, attr); | |
864 | if (error) | |
865 | return error; | |
866 | ||
867 | trace_hugetlbfs_setattr(inode, dentry, attr); | |
868 | ||
869 | if (ia_valid & ATTR_SIZE) { | |
870 | loff_t oldsize = inode->i_size; | |
871 | loff_t newsize = attr->ia_size; | |
872 | ||
873 | if (newsize & ~huge_page_mask(h)) | |
874 | return -EINVAL; | |
875 | /* protected by i_rwsem */ | |
876 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || | |
877 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) | |
878 | return -EPERM; | |
879 | hugetlb_vmtruncate(inode, newsize); | |
880 | } | |
881 | ||
882 | setattr_copy(idmap, inode, attr); | |
883 | mark_inode_dirty(inode); | |
884 | return 0; | |
885 | } | |
886 | ||
887 | static struct inode *hugetlbfs_get_root(struct super_block *sb, | |
888 | struct hugetlbfs_fs_context *ctx) | |
889 | { | |
890 | struct inode *inode; | |
891 | ||
892 | inode = new_inode(sb); | |
893 | if (inode) { | |
894 | inode->i_ino = get_next_ino(); | |
895 | inode->i_mode = S_IFDIR | ctx->mode; | |
896 | inode->i_uid = ctx->uid; | |
897 | inode->i_gid = ctx->gid; | |
898 | simple_inode_init_ts(inode); | |
899 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
900 | inode->i_fop = &simple_dir_operations; | |
901 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
902 | inc_nlink(inode); | |
903 | lockdep_annotate_inode_mutex_key(inode); | |
904 | } | |
905 | return inode; | |
906 | } | |
907 | ||
908 | /* | |
909 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never | |
910 | * be taken from reclaim -- unlike regular filesystems. This needs an | |
911 | * annotation because huge_pmd_share() does an allocation under hugetlb's | |
912 | * i_mmap_rwsem. | |
913 | */ | |
914 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; | |
915 | ||
916 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |
917 | struct mnt_idmap *idmap, | |
918 | struct inode *dir, | |
919 | umode_t mode, dev_t dev) | |
920 | { | |
921 | struct inode *inode; | |
922 | struct resv_map *resv_map = NULL; | |
923 | ||
924 | /* | |
925 | * Reserve maps are only needed for inodes that can have associated | |
926 | * page allocations. | |
927 | */ | |
928 | if (S_ISREG(mode) || S_ISLNK(mode)) { | |
929 | resv_map = resv_map_alloc(); | |
930 | if (!resv_map) | |
931 | return NULL; | |
932 | } | |
933 | ||
934 | inode = new_inode(sb); | |
935 | if (inode) { | |
936 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
937 | ||
938 | inode->i_ino = get_next_ino(); | |
939 | inode_init_owner(idmap, inode, dir, mode); | |
940 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, | |
941 | &hugetlbfs_i_mmap_rwsem_key); | |
942 | inode->i_mapping->a_ops = &hugetlbfs_aops; | |
943 | simple_inode_init_ts(inode); | |
944 | inode->i_mapping->i_private_data = resv_map; | |
945 | info->seals = F_SEAL_SEAL; | |
946 | switch (mode & S_IFMT) { | |
947 | default: | |
948 | init_special_inode(inode, mode, dev); | |
949 | break; | |
950 | case S_IFREG: | |
951 | inode->i_op = &hugetlbfs_inode_operations; | |
952 | inode->i_fop = &hugetlbfs_file_operations; | |
953 | break; | |
954 | case S_IFDIR: | |
955 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
956 | inode->i_fop = &simple_dir_operations; | |
957 | ||
958 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
959 | inc_nlink(inode); | |
960 | break; | |
961 | case S_IFLNK: | |
962 | inode->i_op = &page_symlink_inode_operations; | |
963 | inode_nohighmem(inode); | |
964 | break; | |
965 | } | |
966 | lockdep_annotate_inode_mutex_key(inode); | |
967 | trace_hugetlbfs_alloc_inode(inode, dir, mode); | |
968 | } else { | |
969 | if (resv_map) | |
970 | kref_put(&resv_map->refs, resv_map_release); | |
971 | } | |
972 | ||
973 | return inode; | |
974 | } | |
975 | ||
976 | /* | |
977 | * File creation. Allocate an inode, and we're done.. | |
978 | */ | |
979 | static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, | |
980 | struct dentry *dentry, umode_t mode, dev_t dev) | |
981 | { | |
982 | struct inode *inode; | |
983 | ||
984 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev); | |
985 | if (!inode) | |
986 | return -ENOSPC; | |
987 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); | |
988 | d_instantiate(dentry, inode); | |
989 | dget(dentry);/* Extra count - pin the dentry in core */ | |
990 | return 0; | |
991 | } | |
992 | ||
993 | static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, | |
994 | struct dentry *dentry, umode_t mode) | |
995 | { | |
996 | int retval = hugetlbfs_mknod(idmap, dir, dentry, | |
997 | mode | S_IFDIR, 0); | |
998 | if (!retval) | |
999 | inc_nlink(dir); | |
1000 | return ERR_PTR(retval); | |
1001 | } | |
1002 | ||
1003 | static int hugetlbfs_create(struct mnt_idmap *idmap, | |
1004 | struct inode *dir, struct dentry *dentry, | |
1005 | umode_t mode, bool excl) | |
1006 | { | |
1007 | return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0); | |
1008 | } | |
1009 | ||
1010 | static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, | |
1011 | struct inode *dir, struct file *file, | |
1012 | umode_t mode) | |
1013 | { | |
1014 | struct inode *inode; | |
1015 | ||
1016 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0); | |
1017 | if (!inode) | |
1018 | return -ENOSPC; | |
1019 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); | |
1020 | d_tmpfile(file, inode); | |
1021 | return finish_open_simple(file, 0); | |
1022 | } | |
1023 | ||
1024 | static int hugetlbfs_symlink(struct mnt_idmap *idmap, | |
1025 | struct inode *dir, struct dentry *dentry, | |
1026 | const char *symname) | |
1027 | { | |
1028 | const umode_t mode = S_IFLNK|S_IRWXUGO; | |
1029 | struct inode *inode; | |
1030 | int error = -ENOSPC; | |
1031 | ||
1032 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0); | |
1033 | if (inode) { | |
1034 | int l = strlen(symname)+1; | |
1035 | error = page_symlink(inode, symname, l); | |
1036 | if (!error) { | |
1037 | d_instantiate(dentry, inode); | |
1038 | dget(dentry); | |
1039 | } else | |
1040 | iput(inode); | |
1041 | } | |
1042 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); | |
1043 | ||
1044 | return error; | |
1045 | } | |
1046 | ||
1047 | #ifdef CONFIG_MIGRATION | |
1048 | static int hugetlbfs_migrate_folio(struct address_space *mapping, | |
1049 | struct folio *dst, struct folio *src, | |
1050 | enum migrate_mode mode) | |
1051 | { | |
1052 | int rc; | |
1053 | ||
1054 | rc = migrate_huge_page_move_mapping(mapping, dst, src); | |
1055 | if (rc != MIGRATEPAGE_SUCCESS) | |
1056 | return rc; | |
1057 | ||
1058 | if (hugetlb_folio_subpool(src)) { | |
1059 | hugetlb_set_folio_subpool(dst, | |
1060 | hugetlb_folio_subpool(src)); | |
1061 | hugetlb_set_folio_subpool(src, NULL); | |
1062 | } | |
1063 | ||
1064 | folio_migrate_flags(dst, src); | |
1065 | ||
1066 | return MIGRATEPAGE_SUCCESS; | |
1067 | } | |
1068 | #else | |
1069 | #define hugetlbfs_migrate_folio NULL | |
1070 | #endif | |
1071 | ||
1072 | static int hugetlbfs_error_remove_folio(struct address_space *mapping, | |
1073 | struct folio *folio) | |
1074 | { | |
1075 | return 0; | |
1076 | } | |
1077 | ||
1078 | /* | |
1079 | * Display the mount options in /proc/mounts. | |
1080 | */ | |
1081 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) | |
1082 | { | |
1083 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); | |
1084 | struct hugepage_subpool *spool = sbinfo->spool; | |
1085 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); | |
1086 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); | |
1087 | char mod; | |
1088 | ||
1089 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) | |
1090 | seq_printf(m, ",uid=%u", | |
1091 | from_kuid_munged(&init_user_ns, sbinfo->uid)); | |
1092 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) | |
1093 | seq_printf(m, ",gid=%u", | |
1094 | from_kgid_munged(&init_user_ns, sbinfo->gid)); | |
1095 | if (sbinfo->mode != 0755) | |
1096 | seq_printf(m, ",mode=%o", sbinfo->mode); | |
1097 | if (sbinfo->max_inodes != -1) | |
1098 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); | |
1099 | ||
1100 | hpage_size /= 1024; | |
1101 | mod = 'K'; | |
1102 | if (hpage_size >= 1024) { | |
1103 | hpage_size /= 1024; | |
1104 | mod = 'M'; | |
1105 | } | |
1106 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); | |
1107 | if (spool) { | |
1108 | if (spool->max_hpages != -1) | |
1109 | seq_printf(m, ",size=%llu", | |
1110 | (unsigned long long)spool->max_hpages << hpage_shift); | |
1111 | if (spool->min_hpages != -1) | |
1112 | seq_printf(m, ",min_size=%llu", | |
1113 | (unsigned long long)spool->min_hpages << hpage_shift); | |
1114 | } | |
1115 | return 0; | |
1116 | } | |
1117 | ||
1118 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |
1119 | { | |
1120 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); | |
1121 | struct hstate *h = hstate_inode(d_inode(dentry)); | |
1122 | u64 id = huge_encode_dev(dentry->d_sb->s_dev); | |
1123 | ||
1124 | buf->f_fsid = u64_to_fsid(id); | |
1125 | buf->f_type = HUGETLBFS_MAGIC; | |
1126 | buf->f_bsize = huge_page_size(h); | |
1127 | if (sbinfo) { | |
1128 | spin_lock(&sbinfo->stat_lock); | |
1129 | /* If no limits set, just report 0 or -1 for max/free/used | |
1130 | * blocks, like simple_statfs() */ | |
1131 | if (sbinfo->spool) { | |
1132 | long free_pages; | |
1133 | ||
1134 | spin_lock_irq(&sbinfo->spool->lock); | |
1135 | buf->f_blocks = sbinfo->spool->max_hpages; | |
1136 | free_pages = sbinfo->spool->max_hpages | |
1137 | - sbinfo->spool->used_hpages; | |
1138 | buf->f_bavail = buf->f_bfree = free_pages; | |
1139 | spin_unlock_irq(&sbinfo->spool->lock); | |
1140 | buf->f_files = sbinfo->max_inodes; | |
1141 | buf->f_ffree = sbinfo->free_inodes; | |
1142 | } | |
1143 | spin_unlock(&sbinfo->stat_lock); | |
1144 | } | |
1145 | buf->f_namelen = NAME_MAX; | |
1146 | return 0; | |
1147 | } | |
1148 | ||
1149 | static void hugetlbfs_put_super(struct super_block *sb) | |
1150 | { | |
1151 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); | |
1152 | ||
1153 | if (sbi) { | |
1154 | sb->s_fs_info = NULL; | |
1155 | ||
1156 | if (sbi->spool) | |
1157 | hugepage_put_subpool(sbi->spool); | |
1158 | ||
1159 | kfree(sbi); | |
1160 | } | |
1161 | } | |
1162 | ||
1163 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
1164 | { | |
1165 | if (sbinfo->free_inodes >= 0) { | |
1166 | spin_lock(&sbinfo->stat_lock); | |
1167 | if (unlikely(!sbinfo->free_inodes)) { | |
1168 | spin_unlock(&sbinfo->stat_lock); | |
1169 | return 0; | |
1170 | } | |
1171 | sbinfo->free_inodes--; | |
1172 | spin_unlock(&sbinfo->stat_lock); | |
1173 | } | |
1174 | ||
1175 | return 1; | |
1176 | } | |
1177 | ||
1178 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
1179 | { | |
1180 | if (sbinfo->free_inodes >= 0) { | |
1181 | spin_lock(&sbinfo->stat_lock); | |
1182 | sbinfo->free_inodes++; | |
1183 | spin_unlock(&sbinfo->stat_lock); | |
1184 | } | |
1185 | } | |
1186 | ||
1187 | ||
1188 | static struct kmem_cache *hugetlbfs_inode_cachep; | |
1189 | ||
1190 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |
1191 | { | |
1192 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); | |
1193 | struct hugetlbfs_inode_info *p; | |
1194 | ||
1195 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) | |
1196 | return NULL; | |
1197 | p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); | |
1198 | if (unlikely(!p)) { | |
1199 | hugetlbfs_inc_free_inodes(sbinfo); | |
1200 | return NULL; | |
1201 | } | |
1202 | return &p->vfs_inode; | |
1203 | } | |
1204 | ||
1205 | static void hugetlbfs_free_inode(struct inode *inode) | |
1206 | { | |
1207 | trace_hugetlbfs_free_inode(inode); | |
1208 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); | |
1209 | } | |
1210 | ||
1211 | static void hugetlbfs_destroy_inode(struct inode *inode) | |
1212 | { | |
1213 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); | |
1214 | } | |
1215 | ||
1216 | static const struct address_space_operations hugetlbfs_aops = { | |
1217 | .write_begin = hugetlbfs_write_begin, | |
1218 | .write_end = hugetlbfs_write_end, | |
1219 | .dirty_folio = noop_dirty_folio, | |
1220 | .migrate_folio = hugetlbfs_migrate_folio, | |
1221 | .error_remove_folio = hugetlbfs_error_remove_folio, | |
1222 | }; | |
1223 | ||
1224 | ||
1225 | static void init_once(void *foo) | |
1226 | { | |
1227 | struct hugetlbfs_inode_info *ei = foo; | |
1228 | ||
1229 | inode_init_once(&ei->vfs_inode); | |
1230 | } | |
1231 | ||
1232 | static const struct file_operations hugetlbfs_file_operations = { | |
1233 | .read_iter = hugetlbfs_read_iter, | |
1234 | .mmap = hugetlbfs_file_mmap, | |
1235 | .fsync = noop_fsync, | |
1236 | .get_unmapped_area = hugetlb_get_unmapped_area, | |
1237 | .llseek = default_llseek, | |
1238 | .fallocate = hugetlbfs_fallocate, | |
1239 | .fop_flags = FOP_HUGE_PAGES, | |
1240 | }; | |
1241 | ||
1242 | static const struct inode_operations hugetlbfs_dir_inode_operations = { | |
1243 | .create = hugetlbfs_create, | |
1244 | .lookup = simple_lookup, | |
1245 | .link = simple_link, | |
1246 | .unlink = simple_unlink, | |
1247 | .symlink = hugetlbfs_symlink, | |
1248 | .mkdir = hugetlbfs_mkdir, | |
1249 | .rmdir = simple_rmdir, | |
1250 | .mknod = hugetlbfs_mknod, | |
1251 | .rename = simple_rename, | |
1252 | .setattr = hugetlbfs_setattr, | |
1253 | .tmpfile = hugetlbfs_tmpfile, | |
1254 | }; | |
1255 | ||
1256 | static const struct inode_operations hugetlbfs_inode_operations = { | |
1257 | .setattr = hugetlbfs_setattr, | |
1258 | }; | |
1259 | ||
1260 | static const struct super_operations hugetlbfs_ops = { | |
1261 | .alloc_inode = hugetlbfs_alloc_inode, | |
1262 | .free_inode = hugetlbfs_free_inode, | |
1263 | .destroy_inode = hugetlbfs_destroy_inode, | |
1264 | .evict_inode = hugetlbfs_evict_inode, | |
1265 | .statfs = hugetlbfs_statfs, | |
1266 | .put_super = hugetlbfs_put_super, | |
1267 | .show_options = hugetlbfs_show_options, | |
1268 | }; | |
1269 | ||
1270 | /* | |
1271 | * Convert size option passed from command line to number of huge pages | |
1272 | * in the pool specified by hstate. Size option could be in bytes | |
1273 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). | |
1274 | */ | |
1275 | static long | |
1276 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, | |
1277 | enum hugetlbfs_size_type val_type) | |
1278 | { | |
1279 | if (val_type == NO_SIZE) | |
1280 | return -1; | |
1281 | ||
1282 | if (val_type == SIZE_PERCENT) { | |
1283 | size_opt <<= huge_page_shift(h); | |
1284 | size_opt *= h->max_huge_pages; | |
1285 | do_div(size_opt, 100); | |
1286 | } | |
1287 | ||
1288 | size_opt >>= huge_page_shift(h); | |
1289 | return size_opt; | |
1290 | } | |
1291 | ||
1292 | /* | |
1293 | * Parse one mount parameter. | |
1294 | */ | |
1295 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) | |
1296 | { | |
1297 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1298 | struct fs_parse_result result; | |
1299 | struct hstate *h; | |
1300 | char *rest; | |
1301 | unsigned long ps; | |
1302 | int opt; | |
1303 | ||
1304 | opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); | |
1305 | if (opt < 0) | |
1306 | return opt; | |
1307 | ||
1308 | switch (opt) { | |
1309 | case Opt_uid: | |
1310 | ctx->uid = result.uid; | |
1311 | return 0; | |
1312 | ||
1313 | case Opt_gid: | |
1314 | ctx->gid = result.gid; | |
1315 | return 0; | |
1316 | ||
1317 | case Opt_mode: | |
1318 | ctx->mode = result.uint_32 & 01777U; | |
1319 | return 0; | |
1320 | ||
1321 | case Opt_size: | |
1322 | /* memparse() will accept a K/M/G without a digit */ | |
1323 | if (!param->string || !isdigit(param->string[0])) | |
1324 | goto bad_val; | |
1325 | ctx->max_size_opt = memparse(param->string, &rest); | |
1326 | ctx->max_val_type = SIZE_STD; | |
1327 | if (*rest == '%') | |
1328 | ctx->max_val_type = SIZE_PERCENT; | |
1329 | return 0; | |
1330 | ||
1331 | case Opt_nr_inodes: | |
1332 | /* memparse() will accept a K/M/G without a digit */ | |
1333 | if (!param->string || !isdigit(param->string[0])) | |
1334 | goto bad_val; | |
1335 | ctx->nr_inodes = memparse(param->string, &rest); | |
1336 | return 0; | |
1337 | ||
1338 | case Opt_pagesize: | |
1339 | ps = memparse(param->string, &rest); | |
1340 | h = size_to_hstate(ps); | |
1341 | if (!h) { | |
1342 | pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); | |
1343 | return -EINVAL; | |
1344 | } | |
1345 | ctx->hstate = h; | |
1346 | return 0; | |
1347 | ||
1348 | case Opt_min_size: | |
1349 | /* memparse() will accept a K/M/G without a digit */ | |
1350 | if (!param->string || !isdigit(param->string[0])) | |
1351 | goto bad_val; | |
1352 | ctx->min_size_opt = memparse(param->string, &rest); | |
1353 | ctx->min_val_type = SIZE_STD; | |
1354 | if (*rest == '%') | |
1355 | ctx->min_val_type = SIZE_PERCENT; | |
1356 | return 0; | |
1357 | ||
1358 | default: | |
1359 | return -EINVAL; | |
1360 | } | |
1361 | ||
1362 | bad_val: | |
1363 | return invalfc(fc, "Bad value '%s' for mount option '%s'\n", | |
1364 | param->string, param->key); | |
1365 | } | |
1366 | ||
1367 | /* | |
1368 | * Validate the parsed options. | |
1369 | */ | |
1370 | static int hugetlbfs_validate(struct fs_context *fc) | |
1371 | { | |
1372 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1373 | ||
1374 | /* | |
1375 | * Use huge page pool size (in hstate) to convert the size | |
1376 | * options to number of huge pages. If NO_SIZE, -1 is returned. | |
1377 | */ | |
1378 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
1379 | ctx->max_size_opt, | |
1380 | ctx->max_val_type); | |
1381 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
1382 | ctx->min_size_opt, | |
1383 | ctx->min_val_type); | |
1384 | ||
1385 | /* | |
1386 | * If max_size was specified, then min_size must be smaller | |
1387 | */ | |
1388 | if (ctx->max_val_type > NO_SIZE && | |
1389 | ctx->min_hpages > ctx->max_hpages) { | |
1390 | pr_err("Minimum size can not be greater than maximum size\n"); | |
1391 | return -EINVAL; | |
1392 | } | |
1393 | ||
1394 | return 0; | |
1395 | } | |
1396 | ||
1397 | static int | |
1398 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) | |
1399 | { | |
1400 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1401 | struct hugetlbfs_sb_info *sbinfo; | |
1402 | ||
1403 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); | |
1404 | if (!sbinfo) | |
1405 | return -ENOMEM; | |
1406 | sb->s_fs_info = sbinfo; | |
1407 | spin_lock_init(&sbinfo->stat_lock); | |
1408 | sbinfo->hstate = ctx->hstate; | |
1409 | sbinfo->max_inodes = ctx->nr_inodes; | |
1410 | sbinfo->free_inodes = ctx->nr_inodes; | |
1411 | sbinfo->spool = NULL; | |
1412 | sbinfo->uid = ctx->uid; | |
1413 | sbinfo->gid = ctx->gid; | |
1414 | sbinfo->mode = ctx->mode; | |
1415 | ||
1416 | /* | |
1417 | * Allocate and initialize subpool if maximum or minimum size is | |
1418 | * specified. Any needed reservations (for minimum size) are taken | |
1419 | * when the subpool is created. | |
1420 | */ | |
1421 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { | |
1422 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, | |
1423 | ctx->max_hpages, | |
1424 | ctx->min_hpages); | |
1425 | if (!sbinfo->spool) | |
1426 | goto out_free; | |
1427 | } | |
1428 | sb->s_maxbytes = MAX_LFS_FILESIZE; | |
1429 | sb->s_blocksize = huge_page_size(ctx->hstate); | |
1430 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); | |
1431 | sb->s_magic = HUGETLBFS_MAGIC; | |
1432 | sb->s_op = &hugetlbfs_ops; | |
1433 | sb->s_d_flags = DCACHE_DONTCACHE; | |
1434 | sb->s_time_gran = 1; | |
1435 | ||
1436 | /* | |
1437 | * Due to the special and limited functionality of hugetlbfs, it does | |
1438 | * not work well as a stacking filesystem. | |
1439 | */ | |
1440 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; | |
1441 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); | |
1442 | if (!sb->s_root) | |
1443 | goto out_free; | |
1444 | return 0; | |
1445 | out_free: | |
1446 | kfree(sbinfo->spool); | |
1447 | kfree(sbinfo); | |
1448 | return -ENOMEM; | |
1449 | } | |
1450 | ||
1451 | static int hugetlbfs_get_tree(struct fs_context *fc) | |
1452 | { | |
1453 | int err = hugetlbfs_validate(fc); | |
1454 | if (err) | |
1455 | return err; | |
1456 | return get_tree_nodev(fc, hugetlbfs_fill_super); | |
1457 | } | |
1458 | ||
1459 | static void hugetlbfs_fs_context_free(struct fs_context *fc) | |
1460 | { | |
1461 | kfree(fc->fs_private); | |
1462 | } | |
1463 | ||
1464 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { | |
1465 | .free = hugetlbfs_fs_context_free, | |
1466 | .parse_param = hugetlbfs_parse_param, | |
1467 | .get_tree = hugetlbfs_get_tree, | |
1468 | }; | |
1469 | ||
1470 | static int hugetlbfs_init_fs_context(struct fs_context *fc) | |
1471 | { | |
1472 | struct hugetlbfs_fs_context *ctx; | |
1473 | ||
1474 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); | |
1475 | if (!ctx) | |
1476 | return -ENOMEM; | |
1477 | ||
1478 | ctx->max_hpages = -1; /* No limit on size by default */ | |
1479 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ | |
1480 | ctx->uid = current_fsuid(); | |
1481 | ctx->gid = current_fsgid(); | |
1482 | ctx->mode = 0755; | |
1483 | ctx->hstate = &default_hstate; | |
1484 | ctx->min_hpages = -1; /* No default minimum size */ | |
1485 | ctx->max_val_type = NO_SIZE; | |
1486 | ctx->min_val_type = NO_SIZE; | |
1487 | fc->fs_private = ctx; | |
1488 | fc->ops = &hugetlbfs_fs_context_ops; | |
1489 | return 0; | |
1490 | } | |
1491 | ||
1492 | static struct file_system_type hugetlbfs_fs_type = { | |
1493 | .name = "hugetlbfs", | |
1494 | .init_fs_context = hugetlbfs_init_fs_context, | |
1495 | .parameters = hugetlb_fs_parameters, | |
1496 | .kill_sb = kill_litter_super, | |
1497 | .fs_flags = FS_ALLOW_IDMAP, | |
1498 | }; | |
1499 | ||
1500 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; | |
1501 | ||
1502 | static int can_do_hugetlb_shm(void) | |
1503 | { | |
1504 | kgid_t shm_group; | |
1505 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); | |
1506 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); | |
1507 | } | |
1508 | ||
1509 | static int get_hstate_idx(int page_size_log) | |
1510 | { | |
1511 | struct hstate *h = hstate_sizelog(page_size_log); | |
1512 | ||
1513 | if (!h) | |
1514 | return -1; | |
1515 | return hstate_index(h); | |
1516 | } | |
1517 | ||
1518 | /* | |
1519 | * Note that size should be aligned to proper hugepage size in caller side, | |
1520 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | |
1521 | */ | |
1522 | struct file *hugetlb_file_setup(const char *name, size_t size, | |
1523 | vm_flags_t acctflag, int creat_flags, | |
1524 | int page_size_log) | |
1525 | { | |
1526 | struct inode *inode; | |
1527 | struct vfsmount *mnt; | |
1528 | int hstate_idx; | |
1529 | struct file *file; | |
1530 | ||
1531 | hstate_idx = get_hstate_idx(page_size_log); | |
1532 | if (hstate_idx < 0) | |
1533 | return ERR_PTR(-ENODEV); | |
1534 | ||
1535 | mnt = hugetlbfs_vfsmount[hstate_idx]; | |
1536 | if (!mnt) | |
1537 | return ERR_PTR(-ENOENT); | |
1538 | ||
1539 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { | |
1540 | struct ucounts *ucounts = current_ucounts(); | |
1541 | ||
1542 | if (user_shm_lock(size, ucounts)) { | |
1543 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", | |
1544 | current->comm, current->pid); | |
1545 | user_shm_unlock(size, ucounts); | |
1546 | } | |
1547 | return ERR_PTR(-EPERM); | |
1548 | } | |
1549 | ||
1550 | file = ERR_PTR(-ENOSPC); | |
1551 | /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */ | |
1552 | inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL, | |
1553 | S_IFREG | S_IRWXUGO, 0); | |
1554 | if (!inode) | |
1555 | goto out; | |
1556 | if (creat_flags == HUGETLB_SHMFS_INODE) | |
1557 | inode->i_flags |= S_PRIVATE; | |
1558 | ||
1559 | inode->i_size = size; | |
1560 | clear_nlink(inode); | |
1561 | ||
1562 | if (hugetlb_reserve_pages(inode, 0, | |
1563 | size >> huge_page_shift(hstate_inode(inode)), NULL, | |
1564 | acctflag) < 0) | |
1565 | file = ERR_PTR(-ENOMEM); | |
1566 | else | |
1567 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, | |
1568 | &hugetlbfs_file_operations); | |
1569 | if (!IS_ERR(file)) | |
1570 | return file; | |
1571 | ||
1572 | iput(inode); | |
1573 | out: | |
1574 | return file; | |
1575 | } | |
1576 | ||
1577 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) | |
1578 | { | |
1579 | struct fs_context *fc; | |
1580 | struct vfsmount *mnt; | |
1581 | ||
1582 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); | |
1583 | if (IS_ERR(fc)) { | |
1584 | mnt = ERR_CAST(fc); | |
1585 | } else { | |
1586 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1587 | ctx->hstate = h; | |
1588 | mnt = fc_mount_longterm(fc); | |
1589 | put_fs_context(fc); | |
1590 | } | |
1591 | if (IS_ERR(mnt)) | |
1592 | pr_err("Cannot mount internal hugetlbfs for page size %luK", | |
1593 | huge_page_size(h) / SZ_1K); | |
1594 | return mnt; | |
1595 | } | |
1596 | ||
1597 | static int __init init_hugetlbfs_fs(void) | |
1598 | { | |
1599 | struct vfsmount *mnt; | |
1600 | struct hstate *h; | |
1601 | int error; | |
1602 | int i; | |
1603 | ||
1604 | if (!hugepages_supported()) { | |
1605 | pr_info("disabling because there are no supported hugepage sizes\n"); | |
1606 | return -ENOTSUPP; | |
1607 | } | |
1608 | ||
1609 | error = -ENOMEM; | |
1610 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", | |
1611 | sizeof(struct hugetlbfs_inode_info), | |
1612 | 0, SLAB_ACCOUNT, init_once); | |
1613 | if (hugetlbfs_inode_cachep == NULL) | |
1614 | goto out; | |
1615 | ||
1616 | error = register_filesystem(&hugetlbfs_fs_type); | |
1617 | if (error) | |
1618 | goto out_free; | |
1619 | ||
1620 | /* default hstate mount is required */ | |
1621 | mnt = mount_one_hugetlbfs(&default_hstate); | |
1622 | if (IS_ERR(mnt)) { | |
1623 | error = PTR_ERR(mnt); | |
1624 | goto out_unreg; | |
1625 | } | |
1626 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; | |
1627 | ||
1628 | /* other hstates are optional */ | |
1629 | i = 0; | |
1630 | for_each_hstate(h) { | |
1631 | if (i == default_hstate_idx) { | |
1632 | i++; | |
1633 | continue; | |
1634 | } | |
1635 | ||
1636 | mnt = mount_one_hugetlbfs(h); | |
1637 | if (IS_ERR(mnt)) | |
1638 | hugetlbfs_vfsmount[i] = NULL; | |
1639 | else | |
1640 | hugetlbfs_vfsmount[i] = mnt; | |
1641 | i++; | |
1642 | } | |
1643 | ||
1644 | return 0; | |
1645 | ||
1646 | out_unreg: | |
1647 | (void)unregister_filesystem(&hugetlbfs_fs_type); | |
1648 | out_free: | |
1649 | kmem_cache_destroy(hugetlbfs_inode_cachep); | |
1650 | out: | |
1651 | return error; | |
1652 | } | |
1653 | fs_initcall(init_hugetlbfs_fs) |