]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * hugetlbpage-backed filesystem. Based on ramfs. | |
3 | * | |
6d49e352 | 4 | * Nadia Yvette Chambers, 2002 |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 2002 Linus Torvalds. | |
3e89e1c5 | 7 | * License: GPL |
1da177e4 LT |
8 | */ |
9 | ||
9b857d26 AM |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
1da177e4 LT |
12 | #include <linux/thread_info.h> |
13 | #include <asm/current.h> | |
70c3547e | 14 | #include <linux/falloc.h> |
1da177e4 LT |
15 | #include <linux/fs.h> |
16 | #include <linux/mount.h> | |
17 | #include <linux/file.h> | |
e73a75fa | 18 | #include <linux/kernel.h> |
1da177e4 LT |
19 | #include <linux/writeback.h> |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/string.h> | |
16f7e0fe | 24 | #include <linux/capability.h> |
e73a75fa | 25 | #include <linux/ctype.h> |
1da177e4 LT |
26 | #include <linux/backing-dev.h> |
27 | #include <linux/hugetlb.h> | |
28 | #include <linux/pagevec.h> | |
32021982 | 29 | #include <linux/fs_parser.h> |
036e0856 | 30 | #include <linux/mman.h> |
1da177e4 LT |
31 | #include <linux/slab.h> |
32 | #include <linux/dnotify.h> | |
33 | #include <linux/statfs.h> | |
34 | #include <linux/security.h> | |
1fd7317d | 35 | #include <linux/magic.h> |
290408d4 | 36 | #include <linux/migrate.h> |
34d0640e | 37 | #include <linux/uio.h> |
1da177e4 | 38 | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
88590253 | 40 | #include <linux/sched/mm.h> |
1da177e4 | 41 | |
014ad7c4 HL |
42 | #define CREATE_TRACE_POINTS |
43 | #include <trace/events/hugetlbfs.h> | |
44 | ||
f5e54d6e | 45 | static const struct address_space_operations hugetlbfs_aops; |
886b94d2 | 46 | static const struct file_operations hugetlbfs_file_operations; |
92e1d5be AV |
47 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
48 | static const struct inode_operations hugetlbfs_inode_operations; | |
1da177e4 | 49 | |
32021982 DH |
50 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
51 | ||
52 | struct hugetlbfs_fs_context { | |
4a25220d | 53 | struct hstate *hstate; |
32021982 DH |
54 | unsigned long long max_size_opt; |
55 | unsigned long long min_size_opt; | |
4a25220d DH |
56 | long max_hpages; |
57 | long nr_inodes; | |
58 | long min_hpages; | |
32021982 DH |
59 | enum hugetlbfs_size_type max_val_type; |
60 | enum hugetlbfs_size_type min_val_type; | |
4a25220d DH |
61 | kuid_t uid; |
62 | kgid_t gid; | |
63 | umode_t mode; | |
a1d776ee DG |
64 | }; |
65 | ||
1da177e4 LT |
66 | int sysctl_hugetlb_shm_group; |
67 | ||
32021982 DH |
68 | enum hugetlb_param { |
69 | Opt_gid, | |
70 | Opt_min_size, | |
71 | Opt_mode, | |
72 | Opt_nr_inodes, | |
73 | Opt_pagesize, | |
74 | Opt_size, | |
75 | Opt_uid, | |
e73a75fa RD |
76 | }; |
77 | ||
d7167b14 | 78 | static const struct fs_parameter_spec hugetlb_fs_parameters[] = { |
eefc1324 | 79 | fsparam_gid ("gid", Opt_gid), |
32021982 | 80 | fsparam_string("min_size", Opt_min_size), |
e0f7e2b2 | 81 | fsparam_u32oct("mode", Opt_mode), |
32021982 DH |
82 | fsparam_string("nr_inodes", Opt_nr_inodes), |
83 | fsparam_string("pagesize", Opt_pagesize), | |
84 | fsparam_string("size", Opt_size), | |
eefc1324 | 85 | fsparam_uid ("uid", Opt_uid), |
32021982 DH |
86 | {} |
87 | }; | |
88 | ||
63489f8e MK |
89 | /* |
90 | * Mask used when checking the page offset value passed in via system | |
91 | * calls. This value will be converted to a loff_t which is signed. | |
92 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | |
93 | * value. The extra bit (- 1 in the shift value) is to take the sign | |
94 | * bit into account. | |
95 | */ | |
96 | #define PGOFF_LOFFT_MAX \ | |
97 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | |
98 | ||
1da177e4 LT |
99 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
100 | { | |
496ad9aa | 101 | struct inode *inode = file_inode(file); |
1da177e4 LT |
102 | loff_t len, vma_len; |
103 | int ret; | |
a5516438 | 104 | struct hstate *h = hstate_file(file); |
e656c7a9 | 105 | vm_flags_t vm_flags; |
1da177e4 | 106 | |
68589bc3 | 107 | /* |
dec4ad86 DG |
108 | * vma address alignment (but not the pgoff alignment) has |
109 | * already been checked by prepare_hugepage_range. If you add | |
110 | * any error returns here, do so after setting VM_HUGETLB, so | |
111 | * is_vm_hugetlb_page tests below unmap_region go the right | |
45e55300 | 112 | * way when do_mmap unwinds (may be important on powerpc |
dec4ad86 | 113 | * and ia64). |
68589bc3 | 114 | */ |
49ccf2c3 | 115 | vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); |
68589bc3 | 116 | vma->vm_ops = &hugetlb_vm_ops; |
1da177e4 | 117 | |
045c7a3f | 118 | /* |
63489f8e | 119 | * page based offset in vm_pgoff could be sufficiently large to |
5df63c2a MK |
120 | * overflow a loff_t when converted to byte offset. This can |
121 | * only happen on architectures where sizeof(loff_t) == | |
122 | * sizeof(unsigned long). So, only check in those instances. | |
045c7a3f | 123 | */ |
5df63c2a MK |
124 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
125 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) | |
126 | return -EINVAL; | |
127 | } | |
045c7a3f | 128 | |
63489f8e | 129 | /* must be huge page aligned */ |
2b37c35e | 130 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
dec4ad86 DG |
131 | return -EINVAL; |
132 | ||
1da177e4 | 133 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
045c7a3f MK |
134 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
135 | /* check for overflow */ | |
136 | if (len < vma_len) | |
137 | return -EINVAL; | |
1da177e4 | 138 | |
5955102c | 139 | inode_lock(inode); |
1da177e4 | 140 | file_accessed(file); |
1da177e4 LT |
141 | |
142 | ret = -ENOMEM; | |
e656c7a9 PS |
143 | |
144 | vm_flags = vma->vm_flags; | |
145 | /* | |
146 | * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip | |
147 | * reserving here. Note: only for SHM hugetlbfs file, the inode | |
148 | * flag S_PRIVATE is set. | |
149 | */ | |
150 | if (inode->i_flags & S_PRIVATE) | |
151 | vm_flags |= VM_NORESERVE; | |
152 | ||
33b8f84a | 153 | if (!hugetlb_reserve_pages(inode, |
a5516438 | 154 | vma->vm_pgoff >> huge_page_order(h), |
5a6fe125 | 155 | len >> huge_page_shift(h), vma, |
e656c7a9 | 156 | vm_flags)) |
a43a8c39 | 157 | goto out; |
b45b5bd6 | 158 | |
4c887265 | 159 | ret = 0; |
b6174df5 | 160 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
045c7a3f | 161 | i_size_write(inode, len); |
1da177e4 | 162 | out: |
5955102c | 163 | inode_unlock(inode); |
1da177e4 LT |
164 | |
165 | return ret; | |
166 | } | |
167 | ||
168 | /* | |
3e4e28c5 | 169 | * Called under mmap_write_lock(mm). |
1da177e4 LT |
170 | */ |
171 | ||
4b439e25 | 172 | unsigned long |
cc92882e OS |
173 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
174 | unsigned long len, unsigned long pgoff, | |
175 | unsigned long flags) | |
1da177e4 | 176 | { |
7bd3f1e1 | 177 | unsigned long addr0 = 0; |
a5516438 | 178 | struct hstate *h = hstate_file(file); |
1da177e4 | 179 | |
a5516438 | 180 | if (len & ~huge_page_mask(h)) |
1da177e4 | 181 | return -EINVAL; |
036e0856 | 182 | if (flags & MAP_FIXED) { |
bd40b053 OS |
183 | if (addr & ~huge_page_mask(h)) |
184 | return -EINVAL; | |
a5516438 | 185 | if (prepare_hugepage_range(file, addr, len)) |
036e0856 | 186 | return -EINVAL; |
1da177e4 | 187 | } |
7bd3f1e1 OS |
188 | if (addr) |
189 | addr0 = ALIGN(addr, huge_page_size(h)); | |
1da177e4 | 190 | |
cc92882e | 191 | return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff, |
7bd3f1e1 | 192 | flags, 0); |
4b439e25 | 193 | } |
1da177e4 | 194 | |
38c1ddbd | 195 | /* |
fcc09f5b | 196 | * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset. |
38c1ddbd | 197 | * Returns the maximum number of bytes one can read without touching the 1st raw |
fcc09f5b | 198 | * HWPOISON page. |
38c1ddbd JY |
199 | * |
200 | * The implementation borrows the iteration logic from copy_page_to_iter*. | |
201 | */ | |
fcc09f5b MWO |
202 | static size_t adjust_range_hwpoison(struct folio *folio, size_t offset, |
203 | size_t bytes) | |
38c1ddbd | 204 | { |
fcc09f5b | 205 | struct page *page; |
38c1ddbd JY |
206 | size_t n = 0; |
207 | size_t res = 0; | |
208 | ||
fcc09f5b MWO |
209 | /* First page to start the loop. */ |
210 | page = folio_page(folio, offset / PAGE_SIZE); | |
38c1ddbd JY |
211 | offset %= PAGE_SIZE; |
212 | while (1) { | |
213 | if (is_raw_hwpoison_page_in_hugepage(page)) | |
214 | break; | |
215 | ||
216 | /* Safe to read n bytes without touching HWPOISON subpage. */ | |
217 | n = min(bytes, (size_t)PAGE_SIZE - offset); | |
218 | res += n; | |
219 | bytes -= n; | |
220 | if (!bytes || !n) | |
221 | break; | |
222 | offset += n; | |
223 | if (offset == PAGE_SIZE) { | |
8db0ec79 | 224 | page = nth_page(page, 1); |
38c1ddbd JY |
225 | offset = 0; |
226 | } | |
227 | } | |
228 | ||
229 | return res; | |
230 | } | |
231 | ||
e63e1e5a BP |
232 | /* |
233 | * Support for read() - Find the page attached to f_mapping and copy out the | |
445c8098 | 234 | * data. This provides functionality similar to filemap_read(). |
e63e1e5a | 235 | */ |
34d0640e | 236 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
e63e1e5a | 237 | { |
34d0640e AV |
238 | struct file *file = iocb->ki_filp; |
239 | struct hstate *h = hstate_file(file); | |
240 | struct address_space *mapping = file->f_mapping; | |
e63e1e5a | 241 | struct inode *inode = mapping->host; |
34d0640e AV |
242 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
243 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); | |
e63e1e5a BP |
244 | unsigned long end_index; |
245 | loff_t isize; | |
246 | ssize_t retval = 0; | |
247 | ||
34d0640e | 248 | while (iov_iter_count(to)) { |
a08c7193 | 249 | struct folio *folio; |
38c1ddbd | 250 | size_t nr, copied, want; |
e63e1e5a BP |
251 | |
252 | /* nr is the maximum number of bytes to copy from this page */ | |
a5516438 | 253 | nr = huge_page_size(h); |
a05b0855 AK |
254 | isize = i_size_read(inode); |
255 | if (!isize) | |
34d0640e | 256 | break; |
a05b0855 | 257 | end_index = (isize - 1) >> huge_page_shift(h); |
34d0640e AV |
258 | if (index > end_index) |
259 | break; | |
260 | if (index == end_index) { | |
a5516438 | 261 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
a05b0855 | 262 | if (nr <= offset) |
34d0640e | 263 | break; |
e63e1e5a BP |
264 | } |
265 | nr = nr - offset; | |
266 | ||
a08c7193 SK |
267 | /* Find the folio */ |
268 | folio = filemap_lock_hugetlb_folio(h, mapping, index); | |
269 | if (IS_ERR(folio)) { | |
e63e1e5a BP |
270 | /* |
271 | * We have a HOLE, zero out the user-buffer for the | |
272 | * length of the hole or request. | |
273 | */ | |
34d0640e | 274 | copied = iov_iter_zero(nr, to); |
e63e1e5a | 275 | } else { |
a08c7193 | 276 | folio_unlock(folio); |
a05b0855 | 277 | |
19d3e221 | 278 | if (!folio_test_hwpoison(folio)) |
38c1ddbd JY |
279 | want = nr; |
280 | else { | |
281 | /* | |
282 | * Adjust how many bytes safe to read without | |
fcc09f5b | 283 | * touching the 1st raw HWPOISON page after |
38c1ddbd JY |
284 | * offset. |
285 | */ | |
fcc09f5b | 286 | want = adjust_range_hwpoison(folio, offset, nr); |
38c1ddbd | 287 | if (want == 0) { |
a08c7193 | 288 | folio_put(folio); |
38c1ddbd JY |
289 | retval = -EIO; |
290 | break; | |
291 | } | |
8625147c JH |
292 | } |
293 | ||
e63e1e5a | 294 | /* |
a08c7193 | 295 | * We have the folio, copy it to user space buffer. |
e63e1e5a | 296 | */ |
a08c7193 SK |
297 | copied = copy_folio_to_iter(folio, offset, want, to); |
298 | folio_put(folio); | |
e63e1e5a | 299 | } |
34d0640e AV |
300 | offset += copied; |
301 | retval += copied; | |
302 | if (copied != nr && iov_iter_count(to)) { | |
303 | if (!retval) | |
304 | retval = -EFAULT; | |
305 | break; | |
e63e1e5a | 306 | } |
a5516438 AK |
307 | index += offset >> huge_page_shift(h); |
308 | offset &= ~huge_page_mask(h); | |
e63e1e5a | 309 | } |
34d0640e | 310 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
e63e1e5a BP |
311 | return retval; |
312 | } | |
313 | ||
800d15a5 NP |
314 | static int hugetlbfs_write_begin(struct file *file, |
315 | struct address_space *mapping, | |
9d6b0cd7 | 316 | loff_t pos, unsigned len, |
1da86618 | 317 | struct folio **foliop, void **fsdata) |
1da177e4 LT |
318 | { |
319 | return -EINVAL; | |
320 | } | |
321 | ||
800d15a5 NP |
322 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
323 | loff_t pos, unsigned len, unsigned copied, | |
a225800f | 324 | struct folio *folio, void *fsdata) |
1da177e4 | 325 | { |
800d15a5 | 326 | BUG(); |
1da177e4 LT |
327 | return -EINVAL; |
328 | } | |
329 | ||
ece62684 | 330 | static void hugetlb_delete_from_page_cache(struct folio *folio) |
1da177e4 | 331 | { |
ece62684 SK |
332 | folio_clear_dirty(folio); |
333 | folio_clear_uptodate(folio); | |
334 | filemap_remove_folio(folio); | |
1da177e4 LT |
335 | } |
336 | ||
378397cc MK |
337 | /* |
338 | * Called with i_mmap_rwsem held for inode based vma maps. This makes | |
339 | * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault | |
340 | * mutex for the page in the mapping. So, we can not race with page being | |
341 | * faulted into the vma. | |
342 | */ | |
fa17ad58 MWO |
343 | static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, |
344 | unsigned long addr, unsigned long pfn) | |
378397cc MK |
345 | { |
346 | pte_t *ptep, pte; | |
347 | ||
9c67a207 | 348 | ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); |
378397cc MK |
349 | if (!ptep) |
350 | return false; | |
351 | ||
e6c0c032 | 352 | pte = huge_ptep_get(vma->vm_mm, addr, ptep); |
378397cc MK |
353 | if (huge_pte_none(pte) || !pte_present(pte)) |
354 | return false; | |
355 | ||
fa17ad58 | 356 | if (pte_pfn(pte) == pfn) |
378397cc MK |
357 | return true; |
358 | ||
359 | return false; | |
360 | } | |
361 | ||
362 | /* | |
363 | * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? | |
364 | * No, because the interval tree returns us only those vmas | |
365 | * which overlap the truncated area starting at pgoff, | |
366 | * and no vma on a 32-bit arch can span beyond the 4GB. | |
367 | */ | |
368 | static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) | |
369 | { | |
243b1f2d PX |
370 | unsigned long offset = 0; |
371 | ||
378397cc | 372 | if (vma->vm_pgoff < start) |
243b1f2d PX |
373 | offset = (start - vma->vm_pgoff) << PAGE_SHIFT; |
374 | ||
375 | return vma->vm_start + offset; | |
378397cc MK |
376 | } |
377 | ||
378 | static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) | |
379 | { | |
380 | unsigned long t_end; | |
381 | ||
382 | if (!end) | |
383 | return vma->vm_end; | |
384 | ||
385 | t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; | |
386 | if (t_end > vma->vm_end) | |
387 | t_end = vma->vm_end; | |
388 | return t_end; | |
389 | } | |
390 | ||
391 | /* | |
392 | * Called with hugetlb fault mutex held. Therefore, no more mappings to | |
393 | * this folio can be created while executing the routine. | |
394 | */ | |
395 | static void hugetlb_unmap_file_folio(struct hstate *h, | |
396 | struct address_space *mapping, | |
397 | struct folio *folio, pgoff_t index) | |
398 | { | |
399 | struct rb_root_cached *root = &mapping->i_mmap; | |
40549ba8 | 400 | struct hugetlb_vma_lock *vma_lock; |
fa17ad58 | 401 | unsigned long pfn = folio_pfn(folio); |
378397cc MK |
402 | struct vm_area_struct *vma; |
403 | unsigned long v_start; | |
404 | unsigned long v_end; | |
405 | pgoff_t start, end; | |
406 | ||
407 | start = index * pages_per_huge_page(h); | |
408 | end = (index + 1) * pages_per_huge_page(h); | |
409 | ||
410 | i_mmap_lock_write(mapping); | |
40549ba8 MK |
411 | retry: |
412 | vma_lock = NULL; | |
378397cc MK |
413 | vma_interval_tree_foreach(vma, root, start, end - 1) { |
414 | v_start = vma_offset_start(vma, start); | |
415 | v_end = vma_offset_end(vma, end); | |
416 | ||
fa17ad58 | 417 | if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) |
378397cc MK |
418 | continue; |
419 | ||
40549ba8 MK |
420 | if (!hugetlb_vma_trylock_write(vma)) { |
421 | vma_lock = vma->vm_private_data; | |
422 | /* | |
423 | * If we can not get vma lock, we need to drop | |
424 | * immap_sema and take locks in order. First, | |
425 | * take a ref on the vma_lock structure so that | |
426 | * we can be guaranteed it will not go away when | |
427 | * dropping immap_sema. | |
428 | */ | |
429 | kref_get(&vma_lock->refs); | |
430 | break; | |
431 | } | |
432 | ||
243b1f2d PX |
433 | unmap_hugepage_range(vma, v_start, v_end, NULL, |
434 | ZAP_FLAG_DROP_MARKER); | |
40549ba8 | 435 | hugetlb_vma_unlock_write(vma); |
378397cc MK |
436 | } |
437 | ||
438 | i_mmap_unlock_write(mapping); | |
40549ba8 MK |
439 | |
440 | if (vma_lock) { | |
441 | /* | |
442 | * Wait on vma_lock. We know it is still valid as we have | |
443 | * a reference. We must 'open code' vma locking as we do | |
444 | * not know if vma_lock is still attached to vma. | |
445 | */ | |
446 | down_write(&vma_lock->rw_sema); | |
447 | i_mmap_lock_write(mapping); | |
448 | ||
449 | vma = vma_lock->vma; | |
450 | if (!vma) { | |
451 | /* | |
452 | * If lock is no longer attached to vma, then just | |
453 | * unlock, drop our reference and retry looking for | |
454 | * other vmas. | |
455 | */ | |
456 | up_write(&vma_lock->rw_sema); | |
457 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
458 | goto retry; | |
459 | } | |
460 | ||
461 | /* | |
462 | * vma_lock is still attached to vma. Check to see if vma | |
463 | * still maps page and if so, unmap. | |
464 | */ | |
465 | v_start = vma_offset_start(vma, start); | |
466 | v_end = vma_offset_end(vma, end); | |
fa17ad58 | 467 | if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) |
243b1f2d PX |
468 | unmap_hugepage_range(vma, v_start, v_end, NULL, |
469 | ZAP_FLAG_DROP_MARKER); | |
40549ba8 MK |
470 | |
471 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
472 | hugetlb_vma_unlock_write(vma); | |
473 | ||
474 | goto retry; | |
475 | } | |
378397cc MK |
476 | } |
477 | ||
4aae8d1c | 478 | static void |
05e90bd0 PX |
479 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, |
480 | zap_flags_t zap_flags) | |
4aae8d1c MK |
481 | { |
482 | struct vm_area_struct *vma; | |
483 | ||
484 | /* | |
d6aba4c8 SC |
485 | * end == 0 indicates that the entire range after start should be |
486 | * unmapped. Note, end is exclusive, whereas the interval tree takes | |
487 | * an inclusive "last". | |
4aae8d1c | 488 | */ |
d6aba4c8 | 489 | vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { |
378397cc | 490 | unsigned long v_start; |
4aae8d1c MK |
491 | unsigned long v_end; |
492 | ||
40549ba8 MK |
493 | if (!hugetlb_vma_trylock_write(vma)) |
494 | continue; | |
495 | ||
378397cc MK |
496 | v_start = vma_offset_start(vma, start); |
497 | v_end = vma_offset_end(vma, end); | |
4aae8d1c | 498 | |
243b1f2d | 499 | unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); |
40549ba8 MK |
500 | |
501 | /* | |
502 | * Note that vma lock only exists for shared/non-private | |
503 | * vmas. Therefore, lock is not held when calling | |
504 | * unmap_hugepage_range for private vmas. | |
505 | */ | |
506 | hugetlb_vma_unlock_write(vma); | |
4aae8d1c MK |
507 | } |
508 | } | |
b5cec28d | 509 | |
c8627228 MK |
510 | /* |
511 | * Called with hugetlb fault mutex held. | |
512 | * Returns true if page was actually removed, false otherwise. | |
513 | */ | |
514 | static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, | |
515 | struct address_space *mapping, | |
516 | struct folio *folio, pgoff_t index, | |
517 | bool truncate_op) | |
518 | { | |
519 | bool ret = false; | |
520 | ||
521 | /* | |
522 | * If folio is mapped, it was faulted in after being | |
523 | * unmapped in caller. Unmap (again) while holding | |
524 | * the fault mutex. The mutex will prevent faults | |
525 | * until we finish removing the folio. | |
526 | */ | |
378397cc MK |
527 | if (unlikely(folio_mapped(folio))) |
528 | hugetlb_unmap_file_folio(h, mapping, folio, index); | |
c8627228 MK |
529 | |
530 | folio_lock(folio); | |
531 | /* | |
fa27759a MK |
532 | * We must remove the folio from page cache before removing |
533 | * the region/ reserve map (hugetlb_unreserve_pages). In | |
534 | * rare out of memory conditions, removal of the region/reserve | |
535 | * map could fail. Correspondingly, the subpool and global | |
536 | * reserve usage count can need to be adjusted. | |
c8627228 | 537 | */ |
ece62684 SK |
538 | VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); |
539 | hugetlb_delete_from_page_cache(folio); | |
fa27759a MK |
540 | ret = true; |
541 | if (!truncate_op) { | |
542 | if (unlikely(hugetlb_unreserve_pages(inode, index, | |
543 | index + 1, 1))) | |
544 | hugetlb_fix_reserve_counts(inode); | |
c8627228 MK |
545 | } |
546 | ||
547 | folio_unlock(folio); | |
548 | return ret; | |
549 | } | |
550 | ||
b5cec28d MK |
551 | /* |
552 | * remove_inode_hugepages handles two distinct cases: truncation and hole | |
553 | * punch. There are subtle differences in operation for each case. | |
4aae8d1c | 554 | * |
b5cec28d MK |
555 | * truncation is indicated by end of range being LLONG_MAX |
556 | * In this case, we first scan the range and release found pages. | |
1935ebd3 | 557 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve |
c8627228 MK |
558 | * maps and global counts. Page faults can race with truncation. |
559 | * During faults, hugetlb_no_page() checks i_size before page allocation, | |
560 | * and again after obtaining page table lock. It will 'back out' | |
561 | * allocations in the truncated range. | |
b5cec28d MK |
562 | * hole punch is indicated if end is not LLONG_MAX |
563 | * In the hole punch case we scan the range and release found pages. | |
1935ebd3 ML |
564 | * Only when releasing a page is the associated region/reserve map |
565 | * deleted. The region/reserve map for ranges without associated | |
e7c58097 MK |
566 | * pages are not modified. Page faults can race with hole punch. |
567 | * This is indicated if we find a mapped page. | |
b5cec28d MK |
568 | * Note: If the passed end of range value is beyond the end of file, but |
569 | * not LLONG_MAX this routine still performs a hole punch operation. | |
570 | */ | |
571 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |
572 | loff_t lend) | |
1da177e4 | 573 | { |
a5516438 | 574 | struct hstate *h = hstate_inode(inode); |
b45b5bd6 | 575 | struct address_space *mapping = &inode->i_data; |
a08c7193 | 576 | const pgoff_t end = lend >> PAGE_SHIFT; |
1508062e | 577 | struct folio_batch fbatch; |
d72dc8a2 | 578 | pgoff_t next, index; |
a43a8c39 | 579 | int i, freed = 0; |
b5cec28d | 580 | bool truncate_op = (lend == LLONG_MAX); |
1da177e4 | 581 | |
1508062e | 582 | folio_batch_init(&fbatch); |
a08c7193 | 583 | next = lstart >> PAGE_SHIFT; |
1508062e MWO |
584 | while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { |
585 | for (i = 0; i < folio_batch_count(&fbatch); ++i) { | |
586 | struct folio *folio = fbatch.folios[i]; | |
d4241a04 | 587 | u32 hash = 0; |
b5cec28d | 588 | |
a08c7193 | 589 | index = folio->index >> huge_page_order(h); |
188a3972 MK |
590 | hash = hugetlb_fault_mutex_hash(mapping, index); |
591 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
e7c58097 | 592 | |
4aae8d1c | 593 | /* |
c8627228 | 594 | * Remove folio that was part of folio_batch. |
4aae8d1c | 595 | */ |
c8627228 MK |
596 | if (remove_inode_single_folio(h, inode, mapping, folio, |
597 | index, truncate_op)) | |
598 | freed++; | |
599 | ||
188a3972 | 600 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
1da177e4 | 601 | } |
1508062e | 602 | folio_batch_release(&fbatch); |
1817889e | 603 | cond_resched(); |
1da177e4 | 604 | } |
b5cec28d MK |
605 | |
606 | if (truncate_op) | |
a08c7193 SK |
607 | (void)hugetlb_unreserve_pages(inode, |
608 | lstart >> huge_page_shift(h), | |
609 | LONG_MAX, freed); | |
1da177e4 LT |
610 | } |
611 | ||
2bbbda30 | 612 | static void hugetlbfs_evict_inode(struct inode *inode) |
1da177e4 | 613 | { |
9119a41e JK |
614 | struct resv_map *resv_map; |
615 | ||
014ad7c4 | 616 | trace_hugetlbfs_evict_inode(inode); |
b5cec28d | 617 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
f27a5136 MK |
618 | |
619 | /* | |
620 | * Get the resv_map from the address space embedded in the inode. | |
621 | * This is the address space which points to any resv_map allocated | |
622 | * at inode creation time. If this is a device special inode, | |
623 | * i_mapping may not point to the original address space. | |
624 | */ | |
600f111e | 625 | resv_map = (struct resv_map *)(&inode->i_data)->i_private_data; |
f27a5136 | 626 | /* Only regular and link inodes have associated reserve maps */ |
9119a41e JK |
627 | if (resv_map) |
628 | resv_map_release(&resv_map->refs); | |
dbd5768f | 629 | clear_inode(inode); |
149f4211 CH |
630 | } |
631 | ||
e5d319de | 632 | static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
1da177e4 | 633 | { |
856fc295 | 634 | pgoff_t pgoff; |
1da177e4 | 635 | struct address_space *mapping = inode->i_mapping; |
a5516438 | 636 | struct hstate *h = hstate_inode(inode); |
1da177e4 | 637 | |
a5516438 | 638 | BUG_ON(offset & ~huge_page_mask(h)); |
856fc295 | 639 | pgoff = offset >> PAGE_SHIFT; |
1da177e4 | 640 | |
87bf91d3 | 641 | i_size_write(inode, offset); |
188a3972 | 642 | i_mmap_lock_write(mapping); |
f808c13f | 643 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
05e90bd0 PX |
644 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, |
645 | ZAP_FLAG_DROP_MARKER); | |
c86aa7bb | 646 | i_mmap_unlock_write(mapping); |
e7c58097 | 647 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
1da177e4 LT |
648 | } |
649 | ||
68d32527 MK |
650 | static void hugetlbfs_zero_partial_page(struct hstate *h, |
651 | struct address_space *mapping, | |
652 | loff_t start, | |
653 | loff_t end) | |
654 | { | |
655 | pgoff_t idx = start >> huge_page_shift(h); | |
656 | struct folio *folio; | |
657 | ||
a08c7193 | 658 | folio = filemap_lock_hugetlb_folio(h, mapping, idx); |
66dabbb6 | 659 | if (IS_ERR(folio)) |
68d32527 MK |
660 | return; |
661 | ||
662 | start = start & ~huge_page_mask(h); | |
663 | end = end & ~huge_page_mask(h); | |
664 | if (!end) | |
665 | end = huge_page_size(h); | |
666 | ||
667 | folio_zero_segment(folio, (size_t)start, (size_t)end); | |
668 | ||
669 | folio_unlock(folio); | |
670 | folio_put(folio); | |
671 | } | |
672 | ||
70c3547e MK |
673 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
674 | { | |
68d32527 MK |
675 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
676 | struct address_space *mapping = inode->i_mapping; | |
70c3547e MK |
677 | struct hstate *h = hstate_inode(inode); |
678 | loff_t hpage_size = huge_page_size(h); | |
679 | loff_t hole_start, hole_end; | |
680 | ||
681 | /* | |
68d32527 | 682 | * hole_start and hole_end indicate the full pages within the hole. |
70c3547e MK |
683 | */ |
684 | hole_start = round_up(offset, hpage_size); | |
685 | hole_end = round_down(offset + len, hpage_size); | |
686 | ||
68d32527 MK |
687 | inode_lock(inode); |
688 | ||
689 | /* protected by i_rwsem */ | |
690 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { | |
691 | inode_unlock(inode); | |
692 | return -EPERM; | |
693 | } | |
70c3547e | 694 | |
68d32527 | 695 | i_mmap_lock_write(mapping); |
ff62a342 | 696 | |
68d32527 MK |
697 | /* If range starts before first full page, zero partial page. */ |
698 | if (offset < hole_start) | |
699 | hugetlbfs_zero_partial_page(h, mapping, | |
700 | offset, min(offset + len, hole_start)); | |
ff62a342 | 701 | |
68d32527 MK |
702 | /* Unmap users of full pages in the hole. */ |
703 | if (hole_end > hole_start) { | |
f808c13f | 704 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
70c3547e | 705 | hugetlb_vmdelete_list(&mapping->i_mmap, |
05e90bd0 PX |
706 | hole_start >> PAGE_SHIFT, |
707 | hole_end >> PAGE_SHIFT, 0); | |
70c3547e MK |
708 | } |
709 | ||
68d32527 MK |
710 | /* If range extends beyond last full page, zero partial page. */ |
711 | if ((offset + len) > hole_end && (offset + len) > hole_start) | |
712 | hugetlbfs_zero_partial_page(h, mapping, | |
713 | hole_end, offset + len); | |
714 | ||
715 | i_mmap_unlock_write(mapping); | |
716 | ||
717 | /* Remove full pages from the file. */ | |
718 | if (hole_end > hole_start) | |
719 | remove_inode_hugepages(inode, hole_start, hole_end); | |
720 | ||
721 | inode_unlock(inode); | |
722 | ||
70c3547e MK |
723 | return 0; |
724 | } | |
725 | ||
726 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |
727 | loff_t len) | |
728 | { | |
729 | struct inode *inode = file_inode(file); | |
ff62a342 | 730 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
70c3547e MK |
731 | struct address_space *mapping = inode->i_mapping; |
732 | struct hstate *h = hstate_inode(inode); | |
733 | struct vm_area_struct pseudo_vma; | |
734 | struct mm_struct *mm = current->mm; | |
735 | loff_t hpage_size = huge_page_size(h); | |
736 | unsigned long hpage_shift = huge_page_shift(h); | |
737 | pgoff_t start, index, end; | |
738 | int error; | |
739 | u32 hash; | |
740 | ||
741 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
742 | return -EOPNOTSUPP; | |
743 | ||
014ad7c4 HL |
744 | if (mode & FALLOC_FL_PUNCH_HOLE) { |
745 | error = hugetlbfs_punch_hole(inode, offset, len); | |
746 | goto out_nolock; | |
747 | } | |
70c3547e MK |
748 | |
749 | /* | |
750 | * Default preallocate case. | |
751 | * For this range, start is rounded down and end is rounded up | |
752 | * as well as being converted to page offsets. | |
753 | */ | |
754 | start = offset >> hpage_shift; | |
755 | end = (offset + len + hpage_size - 1) >> hpage_shift; | |
756 | ||
5955102c | 757 | inode_lock(inode); |
70c3547e MK |
758 | |
759 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ | |
760 | error = inode_newsize_ok(inode, offset + len); | |
761 | if (error) | |
762 | goto out; | |
763 | ||
ff62a342 MAL |
764 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
765 | error = -EPERM; | |
766 | goto out; | |
767 | } | |
768 | ||
70c3547e MK |
769 | /* |
770 | * Initialize a pseudo vma as this is required by the huge page | |
10969b55 | 771 | * allocation routines. |
70c3547e | 772 | */ |
2c4541e2 | 773 | vma_init(&pseudo_vma, mm); |
1c71222e | 774 | vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
70c3547e MK |
775 | pseudo_vma.vm_file = file; |
776 | ||
777 | for (index = start; index < end; index++) { | |
778 | /* | |
779 | * This is supposed to be the vaddr where the page is being | |
780 | * faulted in, but we have no vaddr here. | |
781 | */ | |
d0ce0e47 | 782 | struct folio *folio; |
70c3547e | 783 | unsigned long addr; |
70c3547e MK |
784 | |
785 | cond_resched(); | |
786 | ||
787 | /* | |
788 | * fallocate(2) manpage permits EINTR; we may have been | |
789 | * interrupted because we are using up too much memory. | |
790 | */ | |
791 | if (signal_pending(current)) { | |
792 | error = -EINTR; | |
793 | break; | |
794 | } | |
795 | ||
70c3547e MK |
796 | /* addr is the offset within the file (zero based) */ |
797 | addr = index * hpage_size; | |
798 | ||
188a3972 | 799 | /* mutex taken here, fault path and hole punch */ |
188b04a7 | 800 | hash = hugetlb_fault_mutex_hash(mapping, index); |
70c3547e MK |
801 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
802 | ||
803 | /* See if already present in mapping to avoid alloc/free */ | |
a08c7193 | 804 | folio = filemap_get_folio(mapping, index << huge_page_order(h)); |
fd4aed8d MK |
805 | if (!IS_ERR(folio)) { |
806 | folio_put(folio); | |
70c3547e | 807 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
70c3547e MK |
808 | continue; |
809 | } | |
810 | ||
88ce3fef | 811 | /* |
d0ce0e47 | 812 | * Allocate folio without setting the avoid_reserve argument. |
88ce3fef ML |
813 | * There certainly are no reserves associated with the |
814 | * pseudo_vma. However, there could be shared mappings with | |
815 | * reserves for the file at the inode level. If we fallocate | |
d0ce0e47 | 816 | * folios in these areas, we need to consume the reserves |
88ce3fef ML |
817 | * to keep reservation accounting consistent. |
818 | */ | |
30cef82b | 819 | folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); |
d0ce0e47 | 820 | if (IS_ERR(folio)) { |
70c3547e | 821 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
d0ce0e47 | 822 | error = PTR_ERR(folio); |
70c3547e MK |
823 | goto out; |
824 | } | |
8aca2bc9 | 825 | folio_zero_user(folio, addr); |
d0ce0e47 | 826 | __folio_mark_uptodate(folio); |
9b91c0e2 | 827 | error = hugetlb_add_to_page_cache(folio, mapping, index); |
70c3547e | 828 | if (unlikely(error)) { |
d2d7bb44 | 829 | restore_reserve_on_error(h, &pseudo_vma, addr, folio); |
d0ce0e47 | 830 | folio_put(folio); |
70c3547e MK |
831 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
832 | goto out; | |
833 | } | |
834 | ||
835 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
836 | ||
d0ce0e47 | 837 | folio_set_hugetlb_migratable(folio); |
70c3547e | 838 | /* |
d0ce0e47 SK |
839 | * folio_unlock because locked by hugetlb_add_to_page_cache() |
840 | * folio_put() due to reference from alloc_hugetlb_folio() | |
70c3547e | 841 | */ |
d0ce0e47 SK |
842 | folio_unlock(folio); |
843 | folio_put(folio); | |
70c3547e MK |
844 | } |
845 | ||
846 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | |
847 | i_size_write(inode, offset + len); | |
a72a7dea | 848 | inode_set_ctime_current(inode); |
70c3547e | 849 | out: |
5955102c | 850 | inode_unlock(inode); |
014ad7c4 HL |
851 | |
852 | out_nolock: | |
853 | trace_hugetlbfs_fallocate(inode, mode, offset, len, error); | |
70c3547e MK |
854 | return error; |
855 | } | |
856 | ||
c1632a0f | 857 | static int hugetlbfs_setattr(struct mnt_idmap *idmap, |
549c7297 | 858 | struct dentry *dentry, struct iattr *attr) |
1da177e4 | 859 | { |
2b0143b5 | 860 | struct inode *inode = d_inode(dentry); |
a5516438 | 861 | struct hstate *h = hstate_inode(inode); |
1da177e4 LT |
862 | int error; |
863 | unsigned int ia_valid = attr->ia_valid; | |
ff62a342 | 864 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
1da177e4 | 865 | |
91e78a1e | 866 | error = setattr_prepare(idmap, dentry, attr); |
1da177e4 | 867 | if (error) |
1025774c | 868 | return error; |
1da177e4 | 869 | |
014ad7c4 HL |
870 | trace_hugetlbfs_setattr(inode, dentry, attr); |
871 | ||
1da177e4 | 872 | if (ia_valid & ATTR_SIZE) { |
ff62a342 MAL |
873 | loff_t oldsize = inode->i_size; |
874 | loff_t newsize = attr->ia_size; | |
875 | ||
876 | if (newsize & ~huge_page_mask(h)) | |
1025774c | 877 | return -EINVAL; |
398c0da7 | 878 | /* protected by i_rwsem */ |
ff62a342 MAL |
879 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || |
880 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) | |
881 | return -EPERM; | |
e5d319de | 882 | hugetlb_vmtruncate(inode, newsize); |
1da177e4 | 883 | } |
1025774c | 884 | |
91e78a1e | 885 | setattr_copy(idmap, inode, attr); |
1025774c CH |
886 | mark_inode_dirty(inode); |
887 | return 0; | |
1da177e4 LT |
888 | } |
889 | ||
7d54fa64 | 890 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
32021982 | 891 | struct hugetlbfs_fs_context *ctx) |
1da177e4 LT |
892 | { |
893 | struct inode *inode; | |
1da177e4 LT |
894 | |
895 | inode = new_inode(sb); | |
896 | if (inode) { | |
85fe4025 | 897 | inode->i_ino = get_next_ino(); |
32021982 DH |
898 | inode->i_mode = S_IFDIR | ctx->mode; |
899 | inode->i_uid = ctx->uid; | |
900 | inode->i_gid = ctx->gid; | |
cfd87e76 | 901 | simple_inode_init_ts(inode); |
7d54fa64 AV |
902 | inode->i_op = &hugetlbfs_dir_inode_operations; |
903 | inode->i_fop = &simple_dir_operations; | |
904 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
905 | inc_nlink(inode); | |
65ed7601 | 906 | lockdep_annotate_inode_mutex_key(inode); |
7d54fa64 AV |
907 | } |
908 | return inode; | |
909 | } | |
910 | ||
b610ded7 | 911 | /* |
c8c06efa | 912 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
b610ded7 | 913 | * be taken from reclaim -- unlike regular filesystems. This needs an |
88f306b6 | 914 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
c8c06efa | 915 | * i_mmap_rwsem. |
b610ded7 | 916 | */ |
c8c06efa | 917 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
b610ded7 | 918 | |
7d54fa64 | 919 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
91e78a1e | 920 | struct mnt_idmap *idmap, |
7d54fa64 | 921 | struct inode *dir, |
18df2252 | 922 | umode_t mode, dev_t dev) |
7d54fa64 AV |
923 | { |
924 | struct inode *inode; | |
58b6e5e8 | 925 | struct resv_map *resv_map = NULL; |
9119a41e | 926 | |
58b6e5e8 MK |
927 | /* |
928 | * Reserve maps are only needed for inodes that can have associated | |
929 | * page allocations. | |
930 | */ | |
931 | if (S_ISREG(mode) || S_ISLNK(mode)) { | |
932 | resv_map = resv_map_alloc(); | |
933 | if (!resv_map) | |
934 | return NULL; | |
935 | } | |
7d54fa64 AV |
936 | |
937 | inode = new_inode(sb); | |
938 | if (inode) { | |
ff62a342 MAL |
939 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
940 | ||
7d54fa64 | 941 | inode->i_ino = get_next_ino(); |
91e78a1e | 942 | inode_init_owner(idmap, inode, dir, mode); |
c8c06efa DB |
943 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
944 | &hugetlbfs_i_mmap_rwsem_key); | |
1da177e4 | 945 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
cfd87e76 | 946 | simple_inode_init_ts(inode); |
600f111e | 947 | inode->i_mapping->i_private_data = resv_map; |
ff62a342 | 948 | info->seals = F_SEAL_SEAL; |
1da177e4 LT |
949 | switch (mode & S_IFMT) { |
950 | default: | |
951 | init_special_inode(inode, mode, dev); | |
952 | break; | |
953 | case S_IFREG: | |
954 | inode->i_op = &hugetlbfs_inode_operations; | |
955 | inode->i_fop = &hugetlbfs_file_operations; | |
956 | break; | |
957 | case S_IFDIR: | |
958 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
959 | inode->i_fop = &simple_dir_operations; | |
960 | ||
961 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
d8c76e6f | 962 | inc_nlink(inode); |
1da177e4 LT |
963 | break; |
964 | case S_IFLNK: | |
965 | inode->i_op = &page_symlink_inode_operations; | |
21fc61c7 | 966 | inode_nohighmem(inode); |
1da177e4 LT |
967 | break; |
968 | } | |
e096d0c7 | 969 | lockdep_annotate_inode_mutex_key(inode); |
014ad7c4 | 970 | trace_hugetlbfs_alloc_inode(inode, dir, mode); |
58b6e5e8 MK |
971 | } else { |
972 | if (resv_map) | |
973 | kref_put(&resv_map->refs, resv_map_release); | |
974 | } | |
9119a41e | 975 | |
1da177e4 LT |
976 | return inode; |
977 | } | |
978 | ||
979 | /* | |
980 | * File creation. Allocate an inode, and we're done.. | |
981 | */ | |
5ebb29be | 982 | static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, |
19ee5345 | 983 | struct dentry *dentry, umode_t mode, dev_t dev) |
1da177e4 LT |
984 | { |
985 | struct inode *inode; | |
7d54fa64 | 986 | |
91e78a1e | 987 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev); |
19ee5345 AV |
988 | if (!inode) |
989 | return -ENOSPC; | |
cfd87e76 | 990 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
19ee5345 AV |
991 | d_instantiate(dentry, inode); |
992 | dget(dentry);/* Extra count - pin the dentry in core */ | |
993 | return 0; | |
1ab5b82f PS |
994 | } |
995 | ||
88d5baf6 N |
996 | static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, |
997 | struct dentry *dentry, umode_t mode) | |
1da177e4 | 998 | { |
91e78a1e | 999 | int retval = hugetlbfs_mknod(idmap, dir, dentry, |
549c7297 | 1000 | mode | S_IFDIR, 0); |
1da177e4 | 1001 | if (!retval) |
d8c76e6f | 1002 | inc_nlink(dir); |
88d5baf6 | 1003 | return ERR_PTR(retval); |
1da177e4 LT |
1004 | } |
1005 | ||
6c960e68 | 1006 | static int hugetlbfs_create(struct mnt_idmap *idmap, |
549c7297 CB |
1007 | struct inode *dir, struct dentry *dentry, |
1008 | umode_t mode, bool excl) | |
1da177e4 | 1009 | { |
91e78a1e | 1010 | return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0); |
1da177e4 LT |
1011 | } |
1012 | ||
011e2b71 | 1013 | static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, |
863f144f | 1014 | struct inode *dir, struct file *file, |
549c7297 | 1015 | umode_t mode) |
1ab5b82f | 1016 | { |
19ee5345 AV |
1017 | struct inode *inode; |
1018 | ||
91e78a1e | 1019 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0); |
19ee5345 AV |
1020 | if (!inode) |
1021 | return -ENOSPC; | |
cfd87e76 | 1022 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
863f144f MS |
1023 | d_tmpfile(file, inode); |
1024 | return finish_open_simple(file, 0); | |
1ab5b82f PS |
1025 | } |
1026 | ||
7a77db95 | 1027 | static int hugetlbfs_symlink(struct mnt_idmap *idmap, |
549c7297 CB |
1028 | struct inode *dir, struct dentry *dentry, |
1029 | const char *symname) | |
1da177e4 | 1030 | { |
91e78a1e | 1031 | const umode_t mode = S_IFLNK|S_IRWXUGO; |
1da177e4 LT |
1032 | struct inode *inode; |
1033 | int error = -ENOSPC; | |
1da177e4 | 1034 | |
91e78a1e | 1035 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0); |
1da177e4 LT |
1036 | if (inode) { |
1037 | int l = strlen(symname)+1; | |
1038 | error = page_symlink(inode, symname, l); | |
1039 | if (!error) { | |
1040 | d_instantiate(dentry, inode); | |
1041 | dget(dentry); | |
1042 | } else | |
1043 | iput(inode); | |
1044 | } | |
cfd87e76 | 1045 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
1da177e4 LT |
1046 | |
1047 | return error; | |
1048 | } | |
1049 | ||
b890ec2a MWO |
1050 | #ifdef CONFIG_MIGRATION |
1051 | static int hugetlbfs_migrate_folio(struct address_space *mapping, | |
1052 | struct folio *dst, struct folio *src, | |
a6bc32b8 | 1053 | enum migrate_mode mode) |
290408d4 NH |
1054 | { |
1055 | int rc; | |
1056 | ||
b890ec2a | 1057 | rc = migrate_huge_page_move_mapping(mapping, dst, src); |
78bd5209 | 1058 | if (rc != MIGRATEPAGE_SUCCESS) |
290408d4 | 1059 | return rc; |
cb6acd01 | 1060 | |
149562f7 SK |
1061 | if (hugetlb_folio_subpool(src)) { |
1062 | hugetlb_set_folio_subpool(dst, | |
1063 | hugetlb_folio_subpool(src)); | |
1064 | hugetlb_set_folio_subpool(src, NULL); | |
cb6acd01 MK |
1065 | } |
1066 | ||
f00b295b | 1067 | folio_migrate_flags(dst, src); |
290408d4 | 1068 | |
78bd5209 | 1069 | return MIGRATEPAGE_SUCCESS; |
290408d4 | 1070 | } |
b890ec2a MWO |
1071 | #else |
1072 | #define hugetlbfs_migrate_folio NULL | |
1073 | #endif | |
290408d4 | 1074 | |
af7628d6 MWO |
1075 | static int hugetlbfs_error_remove_folio(struct address_space *mapping, |
1076 | struct folio *folio) | |
78bb9203 | 1077 | { |
78bb9203 NH |
1078 | return 0; |
1079 | } | |
1080 | ||
4a25220d DH |
1081 | /* |
1082 | * Display the mount options in /proc/mounts. | |
1083 | */ | |
1084 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) | |
1085 | { | |
1086 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); | |
1087 | struct hugepage_subpool *spool = sbinfo->spool; | |
1088 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); | |
1089 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); | |
1090 | char mod; | |
1091 | ||
1092 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) | |
1093 | seq_printf(m, ",uid=%u", | |
1094 | from_kuid_munged(&init_user_ns, sbinfo->uid)); | |
1095 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) | |
1096 | seq_printf(m, ",gid=%u", | |
1097 | from_kgid_munged(&init_user_ns, sbinfo->gid)); | |
1098 | if (sbinfo->mode != 0755) | |
1099 | seq_printf(m, ",mode=%o", sbinfo->mode); | |
1100 | if (sbinfo->max_inodes != -1) | |
1101 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); | |
1102 | ||
1103 | hpage_size /= 1024; | |
1104 | mod = 'K'; | |
1105 | if (hpage_size >= 1024) { | |
1106 | hpage_size /= 1024; | |
1107 | mod = 'M'; | |
1108 | } | |
1109 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); | |
1110 | if (spool) { | |
1111 | if (spool->max_hpages != -1) | |
1112 | seq_printf(m, ",size=%llu", | |
1113 | (unsigned long long)spool->max_hpages << hpage_shift); | |
1114 | if (spool->min_hpages != -1) | |
1115 | seq_printf(m, ",min_size=%llu", | |
1116 | (unsigned long long)spool->min_hpages << hpage_shift); | |
1117 | } | |
1118 | return 0; | |
1119 | } | |
1120 | ||
726c3342 | 1121 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
1da177e4 | 1122 | { |
726c3342 | 1123 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
2b0143b5 | 1124 | struct hstate *h = hstate_inode(d_inode(dentry)); |
ae62bcb5 | 1125 | u64 id = huge_encode_dev(dentry->d_sb->s_dev); |
1da177e4 | 1126 | |
ae62bcb5 | 1127 | buf->f_fsid = u64_to_fsid(id); |
1da177e4 | 1128 | buf->f_type = HUGETLBFS_MAGIC; |
a5516438 | 1129 | buf->f_bsize = huge_page_size(h); |
1da177e4 LT |
1130 | if (sbinfo) { |
1131 | spin_lock(&sbinfo->stat_lock); | |
11680763 | 1132 | /* If no limits set, just report 0 or -1 for max/free/used |
74a8a65c | 1133 | * blocks, like simple_statfs() */ |
90481622 DG |
1134 | if (sbinfo->spool) { |
1135 | long free_pages; | |
1136 | ||
4b25f030 | 1137 | spin_lock_irq(&sbinfo->spool->lock); |
90481622 DG |
1138 | buf->f_blocks = sbinfo->spool->max_hpages; |
1139 | free_pages = sbinfo->spool->max_hpages | |
1140 | - sbinfo->spool->used_hpages; | |
1141 | buf->f_bavail = buf->f_bfree = free_pages; | |
4b25f030 | 1142 | spin_unlock_irq(&sbinfo->spool->lock); |
74a8a65c DG |
1143 | buf->f_files = sbinfo->max_inodes; |
1144 | buf->f_ffree = sbinfo->free_inodes; | |
1145 | } | |
1da177e4 LT |
1146 | spin_unlock(&sbinfo->stat_lock); |
1147 | } | |
1148 | buf->f_namelen = NAME_MAX; | |
1149 | return 0; | |
1150 | } | |
1151 | ||
1152 | static void hugetlbfs_put_super(struct super_block *sb) | |
1153 | { | |
1154 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); | |
1155 | ||
1156 | if (sbi) { | |
1157 | sb->s_fs_info = NULL; | |
90481622 DG |
1158 | |
1159 | if (sbi->spool) | |
1160 | hugepage_put_subpool(sbi->spool); | |
1161 | ||
1da177e4 LT |
1162 | kfree(sbi); |
1163 | } | |
1164 | } | |
1165 | ||
96527980 CH |
1166 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
1167 | { | |
1168 | if (sbinfo->free_inodes >= 0) { | |
1169 | spin_lock(&sbinfo->stat_lock); | |
1170 | if (unlikely(!sbinfo->free_inodes)) { | |
1171 | spin_unlock(&sbinfo->stat_lock); | |
1172 | return 0; | |
1173 | } | |
1174 | sbinfo->free_inodes--; | |
1175 | spin_unlock(&sbinfo->stat_lock); | |
1176 | } | |
1177 | ||
1178 | return 1; | |
1179 | } | |
1180 | ||
1181 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
1182 | { | |
1183 | if (sbinfo->free_inodes >= 0) { | |
1184 | spin_lock(&sbinfo->stat_lock); | |
1185 | sbinfo->free_inodes++; | |
1186 | spin_unlock(&sbinfo->stat_lock); | |
1187 | } | |
1188 | } | |
1189 | ||
1190 | ||
e18b890b | 1191 | static struct kmem_cache *hugetlbfs_inode_cachep; |
1da177e4 LT |
1192 | |
1193 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |
1194 | { | |
96527980 | 1195 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
1da177e4 LT |
1196 | struct hugetlbfs_inode_info *p; |
1197 | ||
96527980 CH |
1198 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
1199 | return NULL; | |
fd60b288 | 1200 | p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); |
96527980 CH |
1201 | if (unlikely(!p)) { |
1202 | hugetlbfs_inc_free_inodes(sbinfo); | |
1da177e4 | 1203 | return NULL; |
96527980 | 1204 | } |
1da177e4 LT |
1205 | return &p->vfs_inode; |
1206 | } | |
1207 | ||
b62de322 | 1208 | static void hugetlbfs_free_inode(struct inode *inode) |
fa0d7e3d | 1209 | { |
014ad7c4 | 1210 | trace_hugetlbfs_free_inode(inode); |
fa0d7e3d NP |
1211 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); |
1212 | } | |
1213 | ||
1da177e4 LT |
1214 | static void hugetlbfs_destroy_inode(struct inode *inode) |
1215 | { | |
96527980 | 1216 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); |
1da177e4 LT |
1217 | } |
1218 | ||
f5e54d6e | 1219 | static const struct address_space_operations hugetlbfs_aops = { |
800d15a5 NP |
1220 | .write_begin = hugetlbfs_write_begin, |
1221 | .write_end = hugetlbfs_write_end, | |
46de8b97 | 1222 | .dirty_folio = noop_dirty_folio, |
b890ec2a | 1223 | .migrate_folio = hugetlbfs_migrate_folio, |
af7628d6 | 1224 | .error_remove_folio = hugetlbfs_error_remove_folio, |
1da177e4 LT |
1225 | }; |
1226 | ||
96527980 | 1227 | |
51cc5068 | 1228 | static void init_once(void *foo) |
96527980 | 1229 | { |
dbaf7dc9 | 1230 | struct hugetlbfs_inode_info *ei = foo; |
96527980 | 1231 | |
a35afb83 | 1232 | inode_init_once(&ei->vfs_inode); |
96527980 CH |
1233 | } |
1234 | ||
886b94d2 | 1235 | static const struct file_operations hugetlbfs_file_operations = { |
34d0640e | 1236 | .read_iter = hugetlbfs_read_iter, |
1da177e4 | 1237 | .mmap = hugetlbfs_file_mmap, |
1b061d92 | 1238 | .fsync = noop_fsync, |
1da177e4 | 1239 | .get_unmapped_area = hugetlb_get_unmapped_area, |
70c3547e MK |
1240 | .llseek = default_llseek, |
1241 | .fallocate = hugetlbfs_fallocate, | |
886b94d2 | 1242 | .fop_flags = FOP_HUGE_PAGES, |
1da177e4 LT |
1243 | }; |
1244 | ||
92e1d5be | 1245 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
1da177e4 LT |
1246 | .create = hugetlbfs_create, |
1247 | .lookup = simple_lookup, | |
1248 | .link = simple_link, | |
1249 | .unlink = simple_unlink, | |
1250 | .symlink = hugetlbfs_symlink, | |
1251 | .mkdir = hugetlbfs_mkdir, | |
1252 | .rmdir = simple_rmdir, | |
1253 | .mknod = hugetlbfs_mknod, | |
1254 | .rename = simple_rename, | |
1255 | .setattr = hugetlbfs_setattr, | |
1ab5b82f | 1256 | .tmpfile = hugetlbfs_tmpfile, |
1da177e4 LT |
1257 | }; |
1258 | ||
92e1d5be | 1259 | static const struct inode_operations hugetlbfs_inode_operations = { |
1da177e4 LT |
1260 | .setattr = hugetlbfs_setattr, |
1261 | }; | |
1262 | ||
ee9b6d61 | 1263 | static const struct super_operations hugetlbfs_ops = { |
1da177e4 | 1264 | .alloc_inode = hugetlbfs_alloc_inode, |
b62de322 | 1265 | .free_inode = hugetlbfs_free_inode, |
1da177e4 | 1266 | .destroy_inode = hugetlbfs_destroy_inode, |
2bbbda30 | 1267 | .evict_inode = hugetlbfs_evict_inode, |
1da177e4 | 1268 | .statfs = hugetlbfs_statfs, |
1da177e4 | 1269 | .put_super = hugetlbfs_put_super, |
4a25220d | 1270 | .show_options = hugetlbfs_show_options, |
1da177e4 LT |
1271 | }; |
1272 | ||
7ca02d0a MK |
1273 | /* |
1274 | * Convert size option passed from command line to number of huge pages | |
1275 | * in the pool specified by hstate. Size option could be in bytes | |
1276 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). | |
1277 | */ | |
4a25220d | 1278 | static long |
7ca02d0a | 1279 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
4a25220d | 1280 | enum hugetlbfs_size_type val_type) |
7ca02d0a MK |
1281 | { |
1282 | if (val_type == NO_SIZE) | |
1283 | return -1; | |
1284 | ||
1285 | if (val_type == SIZE_PERCENT) { | |
1286 | size_opt <<= huge_page_shift(h); | |
1287 | size_opt *= h->max_huge_pages; | |
1288 | do_div(size_opt, 100); | |
1289 | } | |
1290 | ||
1291 | size_opt >>= huge_page_shift(h); | |
1292 | return size_opt; | |
1293 | } | |
1294 | ||
32021982 DH |
1295 | /* |
1296 | * Parse one mount parameter. | |
1297 | */ | |
1298 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) | |
1da177e4 | 1299 | { |
32021982 DH |
1300 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1301 | struct fs_parse_result result; | |
79d72c68 | 1302 | struct hstate *h; |
32021982 DH |
1303 | char *rest; |
1304 | unsigned long ps; | |
1305 | int opt; | |
1306 | ||
d7167b14 | 1307 | opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); |
32021982 DH |
1308 | if (opt < 0) |
1309 | return opt; | |
1310 | ||
1311 | switch (opt) { | |
1312 | case Opt_uid: | |
eefc1324 | 1313 | ctx->uid = result.uid; |
1da177e4 | 1314 | return 0; |
1da177e4 | 1315 | |
32021982 | 1316 | case Opt_gid: |
eefc1324 | 1317 | ctx->gid = result.gid; |
32021982 | 1318 | return 0; |
e73a75fa | 1319 | |
32021982 DH |
1320 | case Opt_mode: |
1321 | ctx->mode = result.uint_32 & 01777U; | |
1322 | return 0; | |
e73a75fa | 1323 | |
32021982 DH |
1324 | case Opt_size: |
1325 | /* memparse() will accept a K/M/G without a digit */ | |
26215b7e | 1326 | if (!param->string || !isdigit(param->string[0])) |
32021982 DH |
1327 | goto bad_val; |
1328 | ctx->max_size_opt = memparse(param->string, &rest); | |
1329 | ctx->max_val_type = SIZE_STD; | |
1330 | if (*rest == '%') | |
1331 | ctx->max_val_type = SIZE_PERCENT; | |
1332 | return 0; | |
e73a75fa | 1333 | |
32021982 DH |
1334 | case Opt_nr_inodes: |
1335 | /* memparse() will accept a K/M/G without a digit */ | |
26215b7e | 1336 | if (!param->string || !isdigit(param->string[0])) |
32021982 DH |
1337 | goto bad_val; |
1338 | ctx->nr_inodes = memparse(param->string, &rest); | |
1339 | return 0; | |
e73a75fa | 1340 | |
32021982 DH |
1341 | case Opt_pagesize: |
1342 | ps = memparse(param->string, &rest); | |
79d72c68 OS |
1343 | h = size_to_hstate(ps); |
1344 | if (!h) { | |
d0036517 | 1345 | pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); |
32021982 | 1346 | return -EINVAL; |
e73a75fa | 1347 | } |
79d72c68 | 1348 | ctx->hstate = h; |
32021982 | 1349 | return 0; |
1da177e4 | 1350 | |
32021982 DH |
1351 | case Opt_min_size: |
1352 | /* memparse() will accept a K/M/G without a digit */ | |
26215b7e | 1353 | if (!param->string || !isdigit(param->string[0])) |
32021982 DH |
1354 | goto bad_val; |
1355 | ctx->min_size_opt = memparse(param->string, &rest); | |
1356 | ctx->min_val_type = SIZE_STD; | |
1357 | if (*rest == '%') | |
1358 | ctx->min_val_type = SIZE_PERCENT; | |
1359 | return 0; | |
e73a75fa | 1360 | |
32021982 DH |
1361 | default: |
1362 | return -EINVAL; | |
1363 | } | |
a137e1cc | 1364 | |
32021982 | 1365 | bad_val: |
b5db30cf | 1366 | return invalfc(fc, "Bad value '%s' for mount option '%s'\n", |
32021982 DH |
1367 | param->string, param->key); |
1368 | } | |
7ca02d0a | 1369 | |
32021982 DH |
1370 | /* |
1371 | * Validate the parsed options. | |
1372 | */ | |
1373 | static int hugetlbfs_validate(struct fs_context *fc) | |
1374 | { | |
1375 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
a137e1cc | 1376 | |
7ca02d0a MK |
1377 | /* |
1378 | * Use huge page pool size (in hstate) to convert the size | |
1379 | * options to number of huge pages. If NO_SIZE, -1 is returned. | |
1380 | */ | |
32021982 DH |
1381 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
1382 | ctx->max_size_opt, | |
1383 | ctx->max_val_type); | |
1384 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
1385 | ctx->min_size_opt, | |
1386 | ctx->min_val_type); | |
7ca02d0a MK |
1387 | |
1388 | /* | |
1389 | * If max_size was specified, then min_size must be smaller | |
1390 | */ | |
32021982 DH |
1391 | if (ctx->max_val_type > NO_SIZE && |
1392 | ctx->min_hpages > ctx->max_hpages) { | |
1393 | pr_err("Minimum size can not be greater than maximum size\n"); | |
7ca02d0a | 1394 | return -EINVAL; |
a137e1cc AK |
1395 | } |
1396 | ||
1da177e4 LT |
1397 | return 0; |
1398 | } | |
1399 | ||
1400 | static int | |
32021982 | 1401 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) |
1da177e4 | 1402 | { |
32021982 | 1403 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1da177e4 LT |
1404 | struct hugetlbfs_sb_info *sbinfo; |
1405 | ||
1da177e4 LT |
1406 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); |
1407 | if (!sbinfo) | |
1408 | return -ENOMEM; | |
1409 | sb->s_fs_info = sbinfo; | |
1410 | spin_lock_init(&sbinfo->stat_lock); | |
32021982 DH |
1411 | sbinfo->hstate = ctx->hstate; |
1412 | sbinfo->max_inodes = ctx->nr_inodes; | |
1413 | sbinfo->free_inodes = ctx->nr_inodes; | |
1414 | sbinfo->spool = NULL; | |
1415 | sbinfo->uid = ctx->uid; | |
1416 | sbinfo->gid = ctx->gid; | |
1417 | sbinfo->mode = ctx->mode; | |
4a25220d | 1418 | |
7ca02d0a MK |
1419 | /* |
1420 | * Allocate and initialize subpool if maximum or minimum size is | |
1935ebd3 | 1421 | * specified. Any needed reservations (for minimum size) are taken |
445c8098 | 1422 | * when the subpool is created. |
7ca02d0a | 1423 | */ |
32021982 DH |
1424 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { |
1425 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, | |
1426 | ctx->max_hpages, | |
1427 | ctx->min_hpages); | |
90481622 DG |
1428 | if (!sbinfo->spool) |
1429 | goto out_free; | |
1430 | } | |
1da177e4 | 1431 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
32021982 DH |
1432 | sb->s_blocksize = huge_page_size(ctx->hstate); |
1433 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); | |
1da177e4 LT |
1434 | sb->s_magic = HUGETLBFS_MAGIC; |
1435 | sb->s_op = &hugetlbfs_ops; | |
1436 | sb->s_time_gran = 1; | |
15568299 MK |
1437 | |
1438 | /* | |
1439 | * Due to the special and limited functionality of hugetlbfs, it does | |
1440 | * not work well as a stacking filesystem. | |
1441 | */ | |
1442 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; | |
32021982 | 1443 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); |
48fde701 | 1444 | if (!sb->s_root) |
1da177e4 | 1445 | goto out_free; |
1da177e4 LT |
1446 | return 0; |
1447 | out_free: | |
6e6870d4 | 1448 | kfree(sbinfo->spool); |
1da177e4 LT |
1449 | kfree(sbinfo); |
1450 | return -ENOMEM; | |
1451 | } | |
1452 | ||
32021982 DH |
1453 | static int hugetlbfs_get_tree(struct fs_context *fc) |
1454 | { | |
1455 | int err = hugetlbfs_validate(fc); | |
1456 | if (err) | |
1457 | return err; | |
2ac295d4 | 1458 | return get_tree_nodev(fc, hugetlbfs_fill_super); |
32021982 DH |
1459 | } |
1460 | ||
1461 | static void hugetlbfs_fs_context_free(struct fs_context *fc) | |
1462 | { | |
1463 | kfree(fc->fs_private); | |
1464 | } | |
1465 | ||
1466 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { | |
1467 | .free = hugetlbfs_fs_context_free, | |
1468 | .parse_param = hugetlbfs_parse_param, | |
1469 | .get_tree = hugetlbfs_get_tree, | |
1470 | }; | |
1471 | ||
1472 | static int hugetlbfs_init_fs_context(struct fs_context *fc) | |
1da177e4 | 1473 | { |
32021982 DH |
1474 | struct hugetlbfs_fs_context *ctx; |
1475 | ||
1476 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); | |
1477 | if (!ctx) | |
1478 | return -ENOMEM; | |
1479 | ||
1480 | ctx->max_hpages = -1; /* No limit on size by default */ | |
1481 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ | |
1482 | ctx->uid = current_fsuid(); | |
1483 | ctx->gid = current_fsgid(); | |
1484 | ctx->mode = 0755; | |
1485 | ctx->hstate = &default_hstate; | |
1486 | ctx->min_hpages = -1; /* No default minimum size */ | |
1487 | ctx->max_val_type = NO_SIZE; | |
1488 | ctx->min_val_type = NO_SIZE; | |
1489 | fc->fs_private = ctx; | |
1490 | fc->ops = &hugetlbfs_fs_context_ops; | |
1491 | return 0; | |
1da177e4 LT |
1492 | } |
1493 | ||
1494 | static struct file_system_type hugetlbfs_fs_type = { | |
32021982 DH |
1495 | .name = "hugetlbfs", |
1496 | .init_fs_context = hugetlbfs_init_fs_context, | |
d7167b14 | 1497 | .parameters = hugetlb_fs_parameters, |
32021982 | 1498 | .kill_sb = kill_litter_super, |
91e78a1e | 1499 | .fs_flags = FS_ALLOW_IDMAP, |
1da177e4 LT |
1500 | }; |
1501 | ||
42d7395f | 1502 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
1da177e4 | 1503 | |
ef1ff6b8 | 1504 | static int can_do_hugetlb_shm(void) |
1da177e4 | 1505 | { |
a0eb3a05 EB |
1506 | kgid_t shm_group; |
1507 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); | |
1508 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); | |
1da177e4 LT |
1509 | } |
1510 | ||
42d7395f AK |
1511 | static int get_hstate_idx(int page_size_log) |
1512 | { | |
af73e4d9 | 1513 | struct hstate *h = hstate_sizelog(page_size_log); |
42d7395f | 1514 | |
42d7395f AK |
1515 | if (!h) |
1516 | return -1; | |
04adbc3f | 1517 | return hstate_index(h); |
42d7395f AK |
1518 | } |
1519 | ||
af73e4d9 NH |
1520 | /* |
1521 | * Note that size should be aligned to proper hugepage size in caller side, | |
1522 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | |
1523 | */ | |
1524 | struct file *hugetlb_file_setup(const char *name, size_t size, | |
83c1fd76 | 1525 | vm_flags_t acctflag, int creat_flags, |
1526 | int page_size_log) | |
1da177e4 | 1527 | { |
1da177e4 | 1528 | struct inode *inode; |
e68375c8 | 1529 | struct vfsmount *mnt; |
42d7395f | 1530 | int hstate_idx; |
e68375c8 | 1531 | struct file *file; |
42d7395f AK |
1532 | |
1533 | hstate_idx = get_hstate_idx(page_size_log); | |
1534 | if (hstate_idx < 0) | |
1535 | return ERR_PTR(-ENODEV); | |
1da177e4 | 1536 | |
e68375c8 AV |
1537 | mnt = hugetlbfs_vfsmount[hstate_idx]; |
1538 | if (!mnt) | |
5bc98594 AM |
1539 | return ERR_PTR(-ENOENT); |
1540 | ||
ef1ff6b8 | 1541 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
83c1fd76 | 1542 | struct ucounts *ucounts = current_ucounts(); |
1543 | ||
1544 | if (user_shm_lock(size, ucounts)) { | |
1545 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", | |
21a3c273 | 1546 | current->comm, current->pid); |
83c1fd76 | 1547 | user_shm_unlock(size, ucounts); |
353d5c30 | 1548 | } |
83c1fd76 | 1549 | return ERR_PTR(-EPERM); |
2584e517 | 1550 | } |
1da177e4 | 1551 | |
39b65252 | 1552 | file = ERR_PTR(-ENOSPC); |
91e78a1e GS |
1553 | /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */ |
1554 | inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL, | |
1555 | S_IFREG | S_IRWXUGO, 0); | |
1da177e4 | 1556 | if (!inode) |
e68375c8 | 1557 | goto out; |
e1832f29 SS |
1558 | if (creat_flags == HUGETLB_SHMFS_INODE) |
1559 | inode->i_flags |= S_PRIVATE; | |
1da177e4 | 1560 | |
1da177e4 | 1561 | inode->i_size = size; |
6d6b77f1 | 1562 | clear_nlink(inode); |
ce8d2cdf | 1563 | |
33b8f84a | 1564 | if (!hugetlb_reserve_pages(inode, 0, |
e68375c8 AV |
1565 | size >> huge_page_shift(hstate_inode(inode)), NULL, |
1566 | acctflag)) | |
1567 | file = ERR_PTR(-ENOMEM); | |
1568 | else | |
1569 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, | |
1570 | &hugetlbfs_file_operations); | |
1571 | if (!IS_ERR(file)) | |
1572 | return file; | |
1da177e4 | 1573 | |
b45b5bd6 | 1574 | iput(inode); |
e68375c8 | 1575 | out: |
39b65252 | 1576 | return file; |
1da177e4 LT |
1577 | } |
1578 | ||
32021982 DH |
1579 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) |
1580 | { | |
1581 | struct fs_context *fc; | |
1582 | struct vfsmount *mnt; | |
1583 | ||
1584 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); | |
1585 | if (IS_ERR(fc)) { | |
1586 | mnt = ERR_CAST(fc); | |
1587 | } else { | |
1588 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1589 | ctx->hstate = h; | |
1590 | mnt = fc_mount(fc); | |
1591 | put_fs_context(fc); | |
1592 | } | |
1593 | if (IS_ERR(mnt)) | |
a25fddce | 1594 | pr_err("Cannot mount internal hugetlbfs for page size %luK", |
d0036517 | 1595 | huge_page_size(h) / SZ_1K); |
32021982 DH |
1596 | return mnt; |
1597 | } | |
1598 | ||
1da177e4 LT |
1599 | static int __init init_hugetlbfs_fs(void) |
1600 | { | |
32021982 | 1601 | struct vfsmount *mnt; |
42d7395f | 1602 | struct hstate *h; |
1da177e4 | 1603 | int error; |
42d7395f | 1604 | int i; |
1da177e4 | 1605 | |
457c1b27 | 1606 | if (!hugepages_supported()) { |
9b857d26 | 1607 | pr_info("disabling because there are no supported hugepage sizes\n"); |
457c1b27 NA |
1608 | return -ENOTSUPP; |
1609 | } | |
1610 | ||
d1d5e05f | 1611 | error = -ENOMEM; |
1da177e4 LT |
1612 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
1613 | sizeof(struct hugetlbfs_inode_info), | |
5d097056 | 1614 | 0, SLAB_ACCOUNT, init_once); |
1da177e4 | 1615 | if (hugetlbfs_inode_cachep == NULL) |
8fc312b3 | 1616 | goto out; |
1da177e4 LT |
1617 | |
1618 | error = register_filesystem(&hugetlbfs_fs_type); | |
1619 | if (error) | |
8fc312b3 | 1620 | goto out_free; |
1da177e4 | 1621 | |
8fc312b3 | 1622 | /* default hstate mount is required */ |
3b2275a8 | 1623 | mnt = mount_one_hugetlbfs(&default_hstate); |
8fc312b3 MK |
1624 | if (IS_ERR(mnt)) { |
1625 | error = PTR_ERR(mnt); | |
1626 | goto out_unreg; | |
1627 | } | |
1628 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; | |
1629 | ||
1630 | /* other hstates are optional */ | |
42d7395f AK |
1631 | i = 0; |
1632 | for_each_hstate(h) { | |
15f0ec94 JS |
1633 | if (i == default_hstate_idx) { |
1634 | i++; | |
8fc312b3 | 1635 | continue; |
15f0ec94 | 1636 | } |
8fc312b3 | 1637 | |
32021982 | 1638 | mnt = mount_one_hugetlbfs(h); |
8fc312b3 MK |
1639 | if (IS_ERR(mnt)) |
1640 | hugetlbfs_vfsmount[i] = NULL; | |
1641 | else | |
1642 | hugetlbfs_vfsmount[i] = mnt; | |
42d7395f AK |
1643 | i++; |
1644 | } | |
32021982 DH |
1645 | |
1646 | return 0; | |
1da177e4 | 1647 | |
8fc312b3 MK |
1648 | out_unreg: |
1649 | (void)unregister_filesystem(&hugetlbfs_fs_type); | |
1650 | out_free: | |
d1d5e05f | 1651 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
8fc312b3 | 1652 | out: |
1da177e4 LT |
1653 | return error; |
1654 | } | |
3e89e1c5 | 1655 | fs_initcall(init_hugetlbfs_fs) |