]>
| Commit | Line | Data |
|---|---|---|
| 1 | /* | |
| 2 | * hugetlbpage-backed filesystem. Based on ramfs. | |
| 3 | * | |
| 4 | * Nadia Yvette Chambers, 2002 | |
| 5 | * | |
| 6 | * Copyright (C) 2002 Linus Torvalds. | |
| 7 | * License: GPL | |
| 8 | */ | |
| 9 | ||
| 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
| 11 | ||
| 12 | #include <linux/thread_info.h> | |
| 13 | #include <asm/current.h> | |
| 14 | #include <linux/falloc.h> | |
| 15 | #include <linux/fs.h> | |
| 16 | #include <linux/mount.h> | |
| 17 | #include <linux/file.h> | |
| 18 | #include <linux/kernel.h> | |
| 19 | #include <linux/writeback.h> | |
| 20 | #include <linux/pagemap.h> | |
| 21 | #include <linux/highmem.h> | |
| 22 | #include <linux/init.h> | |
| 23 | #include <linux/string.h> | |
| 24 | #include <linux/capability.h> | |
| 25 | #include <linux/ctype.h> | |
| 26 | #include <linux/backing-dev.h> | |
| 27 | #include <linux/hugetlb.h> | |
| 28 | #include <linux/pagevec.h> | |
| 29 | #include <linux/fs_parser.h> | |
| 30 | #include <linux/mman.h> | |
| 31 | #include <linux/slab.h> | |
| 32 | #include <linux/dnotify.h> | |
| 33 | #include <linux/statfs.h> | |
| 34 | #include <linux/security.h> | |
| 35 | #include <linux/magic.h> | |
| 36 | #include <linux/migrate.h> | |
| 37 | #include <linux/uio.h> | |
| 38 | ||
| 39 | #include <linux/uaccess.h> | |
| 40 | #include <linux/sched/mm.h> | |
| 41 | ||
| 42 | #define CREATE_TRACE_POINTS | |
| 43 | #include <trace/events/hugetlbfs.h> | |
| 44 | ||
| 45 | static const struct address_space_operations hugetlbfs_aops; | |
| 46 | static const struct file_operations hugetlbfs_file_operations; | |
| 47 | static const struct inode_operations hugetlbfs_dir_inode_operations; | |
| 48 | static const struct inode_operations hugetlbfs_inode_operations; | |
| 49 | ||
| 50 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; | |
| 51 | ||
| 52 | struct hugetlbfs_fs_context { | |
| 53 | struct hstate *hstate; | |
| 54 | unsigned long long max_size_opt; | |
| 55 | unsigned long long min_size_opt; | |
| 56 | long max_hpages; | |
| 57 | long nr_inodes; | |
| 58 | long min_hpages; | |
| 59 | enum hugetlbfs_size_type max_val_type; | |
| 60 | enum hugetlbfs_size_type min_val_type; | |
| 61 | kuid_t uid; | |
| 62 | kgid_t gid; | |
| 63 | umode_t mode; | |
| 64 | }; | |
| 65 | ||
| 66 | int sysctl_hugetlb_shm_group; | |
| 67 | ||
| 68 | enum hugetlb_param { | |
| 69 | Opt_gid, | |
| 70 | Opt_min_size, | |
| 71 | Opt_mode, | |
| 72 | Opt_nr_inodes, | |
| 73 | Opt_pagesize, | |
| 74 | Opt_size, | |
| 75 | Opt_uid, | |
| 76 | }; | |
| 77 | ||
| 78 | static const struct fs_parameter_spec hugetlb_fs_parameters[] = { | |
| 79 | fsparam_gid ("gid", Opt_gid), | |
| 80 | fsparam_string("min_size", Opt_min_size), | |
| 81 | fsparam_u32oct("mode", Opt_mode), | |
| 82 | fsparam_string("nr_inodes", Opt_nr_inodes), | |
| 83 | fsparam_string("pagesize", Opt_pagesize), | |
| 84 | fsparam_string("size", Opt_size), | |
| 85 | fsparam_uid ("uid", Opt_uid), | |
| 86 | {} | |
| 87 | }; | |
| 88 | ||
| 89 | /* | |
| 90 | * Mask used when checking the page offset value passed in via system | |
| 91 | * calls. This value will be converted to a loff_t which is signed. | |
| 92 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | |
| 93 | * value. The extra bit (- 1 in the shift value) is to take the sign | |
| 94 | * bit into account. | |
| 95 | */ | |
| 96 | #define PGOFF_LOFFT_MAX \ | |
| 97 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | |
| 98 | ||
| 99 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |
| 100 | { | |
| 101 | struct inode *inode = file_inode(file); | |
| 102 | loff_t len, vma_len; | |
| 103 | int ret; | |
| 104 | struct hstate *h = hstate_file(file); | |
| 105 | vm_flags_t vm_flags; | |
| 106 | ||
| 107 | /* | |
| 108 | * vma address alignment (but not the pgoff alignment) has | |
| 109 | * already been checked by prepare_hugepage_range. If you add | |
| 110 | * any error returns here, do so after setting VM_HUGETLB, so | |
| 111 | * is_vm_hugetlb_page tests below unmap_region go the right | |
| 112 | * way when do_mmap unwinds (may be important on powerpc | |
| 113 | * and ia64). | |
| 114 | */ | |
| 115 | vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); | |
| 116 | vma->vm_ops = &hugetlb_vm_ops; | |
| 117 | ||
| 118 | /* | |
| 119 | * page based offset in vm_pgoff could be sufficiently large to | |
| 120 | * overflow a loff_t when converted to byte offset. This can | |
| 121 | * only happen on architectures where sizeof(loff_t) == | |
| 122 | * sizeof(unsigned long). So, only check in those instances. | |
| 123 | */ | |
| 124 | if (sizeof(unsigned long) == sizeof(loff_t)) { | |
| 125 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) | |
| 126 | return -EINVAL; | |
| 127 | } | |
| 128 | ||
| 129 | /* must be huge page aligned */ | |
| 130 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) | |
| 131 | return -EINVAL; | |
| 132 | ||
| 133 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); | |
| 134 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | |
| 135 | /* check for overflow */ | |
| 136 | if (len < vma_len) | |
| 137 | return -EINVAL; | |
| 138 | ||
| 139 | inode_lock(inode); | |
| 140 | file_accessed(file); | |
| 141 | ||
| 142 | ret = -ENOMEM; | |
| 143 | ||
| 144 | vm_flags = vma->vm_flags; | |
| 145 | /* | |
| 146 | * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip | |
| 147 | * reserving here. Note: only for SHM hugetlbfs file, the inode | |
| 148 | * flag S_PRIVATE is set. | |
| 149 | */ | |
| 150 | if (inode->i_flags & S_PRIVATE) | |
| 151 | vm_flags |= VM_NORESERVE; | |
| 152 | ||
| 153 | if (hugetlb_reserve_pages(inode, | |
| 154 | vma->vm_pgoff >> huge_page_order(h), | |
| 155 | len >> huge_page_shift(h), vma, | |
| 156 | vm_flags) < 0) | |
| 157 | goto out; | |
| 158 | ||
| 159 | ret = 0; | |
| 160 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) | |
| 161 | i_size_write(inode, len); | |
| 162 | out: | |
| 163 | inode_unlock(inode); | |
| 164 | ||
| 165 | return ret; | |
| 166 | } | |
| 167 | ||
| 168 | /* | |
| 169 | * Called under mmap_write_lock(mm). | |
| 170 | */ | |
| 171 | ||
| 172 | unsigned long | |
| 173 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
| 174 | unsigned long len, unsigned long pgoff, | |
| 175 | unsigned long flags) | |
| 176 | { | |
| 177 | unsigned long addr0 = 0; | |
| 178 | struct hstate *h = hstate_file(file); | |
| 179 | ||
| 180 | if (len & ~huge_page_mask(h)) | |
| 181 | return -EINVAL; | |
| 182 | if ((flags & MAP_FIXED) && (addr & ~huge_page_mask(h))) | |
| 183 | return -EINVAL; | |
| 184 | if (addr) | |
| 185 | addr0 = ALIGN(addr, huge_page_size(h)); | |
| 186 | ||
| 187 | return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff, | |
| 188 | flags, 0); | |
| 189 | } | |
| 190 | ||
| 191 | /* | |
| 192 | * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset. | |
| 193 | * Returns the maximum number of bytes one can read without touching the 1st raw | |
| 194 | * HWPOISON page. | |
| 195 | */ | |
| 196 | static size_t adjust_range_hwpoison(struct folio *folio, size_t offset, | |
| 197 | size_t bytes) | |
| 198 | { | |
| 199 | struct page *page = folio_page(folio, offset / PAGE_SIZE); | |
| 200 | size_t safe_bytes; | |
| 201 | ||
| 202 | if (is_raw_hwpoison_page_in_hugepage(page)) | |
| 203 | return 0; | |
| 204 | /* Safe to read the remaining bytes in this page. */ | |
| 205 | safe_bytes = PAGE_SIZE - (offset % PAGE_SIZE); | |
| 206 | page++; | |
| 207 | ||
| 208 | /* Check each remaining page as long as we are not done yet. */ | |
| 209 | for (; safe_bytes < bytes; safe_bytes += PAGE_SIZE, page++) | |
| 210 | if (is_raw_hwpoison_page_in_hugepage(page)) | |
| 211 | break; | |
| 212 | ||
| 213 | return min(safe_bytes, bytes); | |
| 214 | } | |
| 215 | ||
| 216 | /* | |
| 217 | * Support for read() - Find the page attached to f_mapping and copy out the | |
| 218 | * data. This provides functionality similar to filemap_read(). | |
| 219 | */ | |
| 220 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) | |
| 221 | { | |
| 222 | struct file *file = iocb->ki_filp; | |
| 223 | struct hstate *h = hstate_file(file); | |
| 224 | struct address_space *mapping = file->f_mapping; | |
| 225 | struct inode *inode = mapping->host; | |
| 226 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); | |
| 227 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); | |
| 228 | unsigned long end_index; | |
| 229 | loff_t isize; | |
| 230 | ssize_t retval = 0; | |
| 231 | ||
| 232 | while (iov_iter_count(to)) { | |
| 233 | struct folio *folio; | |
| 234 | size_t nr, copied, want; | |
| 235 | ||
| 236 | /* nr is the maximum number of bytes to copy from this page */ | |
| 237 | nr = huge_page_size(h); | |
| 238 | isize = i_size_read(inode); | |
| 239 | if (!isize) | |
| 240 | break; | |
| 241 | end_index = (isize - 1) >> huge_page_shift(h); | |
| 242 | if (index > end_index) | |
| 243 | break; | |
| 244 | if (index == end_index) { | |
| 245 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; | |
| 246 | if (nr <= offset) | |
| 247 | break; | |
| 248 | } | |
| 249 | nr = nr - offset; | |
| 250 | ||
| 251 | /* Find the folio */ | |
| 252 | folio = filemap_lock_hugetlb_folio(h, mapping, index); | |
| 253 | if (IS_ERR(folio)) { | |
| 254 | /* | |
| 255 | * We have a HOLE, zero out the user-buffer for the | |
| 256 | * length of the hole or request. | |
| 257 | */ | |
| 258 | copied = iov_iter_zero(nr, to); | |
| 259 | } else { | |
| 260 | folio_unlock(folio); | |
| 261 | ||
| 262 | if (!folio_test_hwpoison(folio)) | |
| 263 | want = nr; | |
| 264 | else { | |
| 265 | /* | |
| 266 | * Adjust how many bytes safe to read without | |
| 267 | * touching the 1st raw HWPOISON page after | |
| 268 | * offset. | |
| 269 | */ | |
| 270 | want = adjust_range_hwpoison(folio, offset, nr); | |
| 271 | if (want == 0) { | |
| 272 | folio_put(folio); | |
| 273 | retval = -EIO; | |
| 274 | break; | |
| 275 | } | |
| 276 | } | |
| 277 | ||
| 278 | /* | |
| 279 | * We have the folio, copy it to user space buffer. | |
| 280 | */ | |
| 281 | copied = copy_folio_to_iter(folio, offset, want, to); | |
| 282 | folio_put(folio); | |
| 283 | } | |
| 284 | offset += copied; | |
| 285 | retval += copied; | |
| 286 | if (copied != nr && iov_iter_count(to)) { | |
| 287 | if (!retval) | |
| 288 | retval = -EFAULT; | |
| 289 | break; | |
| 290 | } | |
| 291 | index += offset >> huge_page_shift(h); | |
| 292 | offset &= ~huge_page_mask(h); | |
| 293 | } | |
| 294 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; | |
| 295 | return retval; | |
| 296 | } | |
| 297 | ||
| 298 | static int hugetlbfs_write_begin(const struct kiocb *iocb, | |
| 299 | struct address_space *mapping, | |
| 300 | loff_t pos, unsigned len, | |
| 301 | struct folio **foliop, void **fsdata) | |
| 302 | { | |
| 303 | return -EINVAL; | |
| 304 | } | |
| 305 | ||
| 306 | static int hugetlbfs_write_end(const struct kiocb *iocb, | |
| 307 | struct address_space *mapping, | |
| 308 | loff_t pos, unsigned len, unsigned copied, | |
| 309 | struct folio *folio, void *fsdata) | |
| 310 | { | |
| 311 | BUG(); | |
| 312 | return -EINVAL; | |
| 313 | } | |
| 314 | ||
| 315 | static void hugetlb_delete_from_page_cache(struct folio *folio) | |
| 316 | { | |
| 317 | folio_clear_dirty(folio); | |
| 318 | folio_clear_uptodate(folio); | |
| 319 | filemap_remove_folio(folio); | |
| 320 | } | |
| 321 | ||
| 322 | /* | |
| 323 | * Called with i_mmap_rwsem held for inode based vma maps. This makes | |
| 324 | * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault | |
| 325 | * mutex for the page in the mapping. So, we can not race with page being | |
| 326 | * faulted into the vma. | |
| 327 | */ | |
| 328 | static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, | |
| 329 | unsigned long addr, unsigned long pfn) | |
| 330 | { | |
| 331 | pte_t *ptep, pte; | |
| 332 | ||
| 333 | ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); | |
| 334 | if (!ptep) | |
| 335 | return false; | |
| 336 | ||
| 337 | pte = huge_ptep_get(vma->vm_mm, addr, ptep); | |
| 338 | if (huge_pte_none(pte) || !pte_present(pte)) | |
| 339 | return false; | |
| 340 | ||
| 341 | if (pte_pfn(pte) == pfn) | |
| 342 | return true; | |
| 343 | ||
| 344 | return false; | |
| 345 | } | |
| 346 | ||
| 347 | /* | |
| 348 | * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? | |
| 349 | * No, because the interval tree returns us only those vmas | |
| 350 | * which overlap the truncated area starting at pgoff, | |
| 351 | * and no vma on a 32-bit arch can span beyond the 4GB. | |
| 352 | */ | |
| 353 | static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) | |
| 354 | { | |
| 355 | unsigned long offset = 0; | |
| 356 | ||
| 357 | if (vma->vm_pgoff < start) | |
| 358 | offset = (start - vma->vm_pgoff) << PAGE_SHIFT; | |
| 359 | ||
| 360 | return vma->vm_start + offset; | |
| 361 | } | |
| 362 | ||
| 363 | static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) | |
| 364 | { | |
| 365 | unsigned long t_end; | |
| 366 | ||
| 367 | if (!end) | |
| 368 | return vma->vm_end; | |
| 369 | ||
| 370 | t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; | |
| 371 | if (t_end > vma->vm_end) | |
| 372 | t_end = vma->vm_end; | |
| 373 | return t_end; | |
| 374 | } | |
| 375 | ||
| 376 | /* | |
| 377 | * Called with hugetlb fault mutex held. Therefore, no more mappings to | |
| 378 | * this folio can be created while executing the routine. | |
| 379 | */ | |
| 380 | static void hugetlb_unmap_file_folio(struct hstate *h, | |
| 381 | struct address_space *mapping, | |
| 382 | struct folio *folio, pgoff_t index) | |
| 383 | { | |
| 384 | struct rb_root_cached *root = &mapping->i_mmap; | |
| 385 | struct hugetlb_vma_lock *vma_lock; | |
| 386 | unsigned long pfn = folio_pfn(folio); | |
| 387 | struct vm_area_struct *vma; | |
| 388 | unsigned long v_start; | |
| 389 | unsigned long v_end; | |
| 390 | pgoff_t start, end; | |
| 391 | ||
| 392 | start = index * pages_per_huge_page(h); | |
| 393 | end = (index + 1) * pages_per_huge_page(h); | |
| 394 | ||
| 395 | i_mmap_lock_write(mapping); | |
| 396 | retry: | |
| 397 | vma_lock = NULL; | |
| 398 | vma_interval_tree_foreach(vma, root, start, end - 1) { | |
| 399 | v_start = vma_offset_start(vma, start); | |
| 400 | v_end = vma_offset_end(vma, end); | |
| 401 | ||
| 402 | if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) | |
| 403 | continue; | |
| 404 | ||
| 405 | if (!hugetlb_vma_trylock_write(vma)) { | |
| 406 | vma_lock = vma->vm_private_data; | |
| 407 | /* | |
| 408 | * If we can not get vma lock, we need to drop | |
| 409 | * immap_sema and take locks in order. First, | |
| 410 | * take a ref on the vma_lock structure so that | |
| 411 | * we can be guaranteed it will not go away when | |
| 412 | * dropping immap_sema. | |
| 413 | */ | |
| 414 | kref_get(&vma_lock->refs); | |
| 415 | break; | |
| 416 | } | |
| 417 | ||
| 418 | unmap_hugepage_range(vma, v_start, v_end, NULL, | |
| 419 | ZAP_FLAG_DROP_MARKER); | |
| 420 | hugetlb_vma_unlock_write(vma); | |
| 421 | } | |
| 422 | ||
| 423 | i_mmap_unlock_write(mapping); | |
| 424 | ||
| 425 | if (vma_lock) { | |
| 426 | /* | |
| 427 | * Wait on vma_lock. We know it is still valid as we have | |
| 428 | * a reference. We must 'open code' vma locking as we do | |
| 429 | * not know if vma_lock is still attached to vma. | |
| 430 | */ | |
| 431 | down_write(&vma_lock->rw_sema); | |
| 432 | i_mmap_lock_write(mapping); | |
| 433 | ||
| 434 | vma = vma_lock->vma; | |
| 435 | if (!vma) { | |
| 436 | /* | |
| 437 | * If lock is no longer attached to vma, then just | |
| 438 | * unlock, drop our reference and retry looking for | |
| 439 | * other vmas. | |
| 440 | */ | |
| 441 | up_write(&vma_lock->rw_sema); | |
| 442 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
| 443 | goto retry; | |
| 444 | } | |
| 445 | ||
| 446 | /* | |
| 447 | * vma_lock is still attached to vma. Check to see if vma | |
| 448 | * still maps page and if so, unmap. | |
| 449 | */ | |
| 450 | v_start = vma_offset_start(vma, start); | |
| 451 | v_end = vma_offset_end(vma, end); | |
| 452 | if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) | |
| 453 | unmap_hugepage_range(vma, v_start, v_end, NULL, | |
| 454 | ZAP_FLAG_DROP_MARKER); | |
| 455 | ||
| 456 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
| 457 | hugetlb_vma_unlock_write(vma); | |
| 458 | ||
| 459 | goto retry; | |
| 460 | } | |
| 461 | } | |
| 462 | ||
| 463 | static void | |
| 464 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, | |
| 465 | zap_flags_t zap_flags) | |
| 466 | { | |
| 467 | struct vm_area_struct *vma; | |
| 468 | ||
| 469 | /* | |
| 470 | * end == 0 indicates that the entire range after start should be | |
| 471 | * unmapped. Note, end is exclusive, whereas the interval tree takes | |
| 472 | * an inclusive "last". | |
| 473 | */ | |
| 474 | vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { | |
| 475 | unsigned long v_start; | |
| 476 | unsigned long v_end; | |
| 477 | ||
| 478 | if (!hugetlb_vma_trylock_write(vma)) | |
| 479 | continue; | |
| 480 | ||
| 481 | v_start = vma_offset_start(vma, start); | |
| 482 | v_end = vma_offset_end(vma, end); | |
| 483 | ||
| 484 | unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); | |
| 485 | ||
| 486 | /* | |
| 487 | * Note that vma lock only exists for shared/non-private | |
| 488 | * vmas. Therefore, lock is not held when calling | |
| 489 | * unmap_hugepage_range for private vmas. | |
| 490 | */ | |
| 491 | hugetlb_vma_unlock_write(vma); | |
| 492 | } | |
| 493 | } | |
| 494 | ||
| 495 | /* | |
| 496 | * Called with hugetlb fault mutex held. | |
| 497 | * Returns true if page was actually removed, false otherwise. | |
| 498 | */ | |
| 499 | static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, | |
| 500 | struct address_space *mapping, | |
| 501 | struct folio *folio, pgoff_t index, | |
| 502 | bool truncate_op) | |
| 503 | { | |
| 504 | bool ret = false; | |
| 505 | ||
| 506 | /* | |
| 507 | * If folio is mapped, it was faulted in after being | |
| 508 | * unmapped in caller or hugetlb_vmdelete_list() skips | |
| 509 | * unmapping it due to fail to grab lock. Unmap (again) | |
| 510 | * while holding the fault mutex. The mutex will prevent | |
| 511 | * faults until we finish removing the folio. Hold folio | |
| 512 | * lock to guarantee no concurrent migration. | |
| 513 | */ | |
| 514 | folio_lock(folio); | |
| 515 | if (unlikely(folio_mapped(folio))) | |
| 516 | hugetlb_unmap_file_folio(h, mapping, folio, index); | |
| 517 | ||
| 518 | /* | |
| 519 | * We must remove the folio from page cache before removing | |
| 520 | * the region/ reserve map (hugetlb_unreserve_pages). In | |
| 521 | * rare out of memory conditions, removal of the region/reserve | |
| 522 | * map could fail. Correspondingly, the subpool and global | |
| 523 | * reserve usage count can need to be adjusted. | |
| 524 | */ | |
| 525 | VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); | |
| 526 | hugetlb_delete_from_page_cache(folio); | |
| 527 | ret = true; | |
| 528 | if (!truncate_op) { | |
| 529 | if (unlikely(hugetlb_unreserve_pages(inode, index, | |
| 530 | index + 1, 1))) | |
| 531 | hugetlb_fix_reserve_counts(inode); | |
| 532 | } | |
| 533 | ||
| 534 | folio_unlock(folio); | |
| 535 | return ret; | |
| 536 | } | |
| 537 | ||
| 538 | /* | |
| 539 | * remove_inode_hugepages handles two distinct cases: truncation and hole | |
| 540 | * punch. There are subtle differences in operation for each case. | |
| 541 | * | |
| 542 | * truncation is indicated by end of range being LLONG_MAX | |
| 543 | * In this case, we first scan the range and release found pages. | |
| 544 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve | |
| 545 | * maps and global counts. Page faults can race with truncation. | |
| 546 | * During faults, hugetlb_no_page() checks i_size before page allocation, | |
| 547 | * and again after obtaining page table lock. It will 'back out' | |
| 548 | * allocations in the truncated range. | |
| 549 | * hole punch is indicated if end is not LLONG_MAX | |
| 550 | * In the hole punch case we scan the range and release found pages. | |
| 551 | * Only when releasing a page is the associated region/reserve map | |
| 552 | * deleted. The region/reserve map for ranges without associated | |
| 553 | * pages are not modified. Page faults can race with hole punch. | |
| 554 | * This is indicated if we find a mapped page. | |
| 555 | * Note: If the passed end of range value is beyond the end of file, but | |
| 556 | * not LLONG_MAX this routine still performs a hole punch operation. | |
| 557 | */ | |
| 558 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |
| 559 | loff_t lend) | |
| 560 | { | |
| 561 | struct hstate *h = hstate_inode(inode); | |
| 562 | struct address_space *mapping = &inode->i_data; | |
| 563 | const pgoff_t end = lend >> PAGE_SHIFT; | |
| 564 | struct folio_batch fbatch; | |
| 565 | pgoff_t next, index; | |
| 566 | int i, freed = 0; | |
| 567 | bool truncate_op = (lend == LLONG_MAX); | |
| 568 | ||
| 569 | folio_batch_init(&fbatch); | |
| 570 | next = lstart >> PAGE_SHIFT; | |
| 571 | while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { | |
| 572 | for (i = 0; i < folio_batch_count(&fbatch); ++i) { | |
| 573 | struct folio *folio = fbatch.folios[i]; | |
| 574 | u32 hash = 0; | |
| 575 | ||
| 576 | index = folio->index >> huge_page_order(h); | |
| 577 | hash = hugetlb_fault_mutex_hash(mapping, index); | |
| 578 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
| 579 | ||
| 580 | /* | |
| 581 | * Remove folio that was part of folio_batch. | |
| 582 | */ | |
| 583 | if (remove_inode_single_folio(h, inode, mapping, folio, | |
| 584 | index, truncate_op)) | |
| 585 | freed++; | |
| 586 | ||
| 587 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
| 588 | } | |
| 589 | folio_batch_release(&fbatch); | |
| 590 | cond_resched(); | |
| 591 | } | |
| 592 | ||
| 593 | if (truncate_op) | |
| 594 | (void)hugetlb_unreserve_pages(inode, | |
| 595 | lstart >> huge_page_shift(h), | |
| 596 | LONG_MAX, freed); | |
| 597 | } | |
| 598 | ||
| 599 | static void hugetlbfs_evict_inode(struct inode *inode) | |
| 600 | { | |
| 601 | struct resv_map *resv_map; | |
| 602 | ||
| 603 | trace_hugetlbfs_evict_inode(inode); | |
| 604 | remove_inode_hugepages(inode, 0, LLONG_MAX); | |
| 605 | ||
| 606 | /* | |
| 607 | * Get the resv_map from the address space embedded in the inode. | |
| 608 | * This is the address space which points to any resv_map allocated | |
| 609 | * at inode creation time. If this is a device special inode, | |
| 610 | * i_mapping may not point to the original address space. | |
| 611 | */ | |
| 612 | resv_map = (struct resv_map *)(&inode->i_data)->i_private_data; | |
| 613 | /* Only regular and link inodes have associated reserve maps */ | |
| 614 | if (resv_map) | |
| 615 | resv_map_release(&resv_map->refs); | |
| 616 | clear_inode(inode); | |
| 617 | } | |
| 618 | ||
| 619 | static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) | |
| 620 | { | |
| 621 | pgoff_t pgoff; | |
| 622 | struct address_space *mapping = inode->i_mapping; | |
| 623 | struct hstate *h = hstate_inode(inode); | |
| 624 | ||
| 625 | BUG_ON(offset & ~huge_page_mask(h)); | |
| 626 | pgoff = offset >> PAGE_SHIFT; | |
| 627 | ||
| 628 | i_size_write(inode, offset); | |
| 629 | i_mmap_lock_write(mapping); | |
| 630 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) | |
| 631 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, | |
| 632 | ZAP_FLAG_DROP_MARKER); | |
| 633 | i_mmap_unlock_write(mapping); | |
| 634 | remove_inode_hugepages(inode, offset, LLONG_MAX); | |
| 635 | } | |
| 636 | ||
| 637 | static void hugetlbfs_zero_partial_page(struct hstate *h, | |
| 638 | struct address_space *mapping, | |
| 639 | loff_t start, | |
| 640 | loff_t end) | |
| 641 | { | |
| 642 | pgoff_t idx = start >> huge_page_shift(h); | |
| 643 | struct folio *folio; | |
| 644 | ||
| 645 | folio = filemap_lock_hugetlb_folio(h, mapping, idx); | |
| 646 | if (IS_ERR(folio)) | |
| 647 | return; | |
| 648 | ||
| 649 | start = start & ~huge_page_mask(h); | |
| 650 | end = end & ~huge_page_mask(h); | |
| 651 | if (!end) | |
| 652 | end = huge_page_size(h); | |
| 653 | ||
| 654 | folio_zero_segment(folio, (size_t)start, (size_t)end); | |
| 655 | ||
| 656 | folio_unlock(folio); | |
| 657 | folio_put(folio); | |
| 658 | } | |
| 659 | ||
| 660 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |
| 661 | { | |
| 662 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
| 663 | struct address_space *mapping = inode->i_mapping; | |
| 664 | struct hstate *h = hstate_inode(inode); | |
| 665 | loff_t hpage_size = huge_page_size(h); | |
| 666 | loff_t hole_start, hole_end; | |
| 667 | ||
| 668 | /* | |
| 669 | * hole_start and hole_end indicate the full pages within the hole. | |
| 670 | */ | |
| 671 | hole_start = round_up(offset, hpage_size); | |
| 672 | hole_end = round_down(offset + len, hpage_size); | |
| 673 | ||
| 674 | inode_lock(inode); | |
| 675 | ||
| 676 | /* protected by i_rwsem */ | |
| 677 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { | |
| 678 | inode_unlock(inode); | |
| 679 | return -EPERM; | |
| 680 | } | |
| 681 | ||
| 682 | i_mmap_lock_write(mapping); | |
| 683 | ||
| 684 | /* If range starts before first full page, zero partial page. */ | |
| 685 | if (offset < hole_start) | |
| 686 | hugetlbfs_zero_partial_page(h, mapping, | |
| 687 | offset, min(offset + len, hole_start)); | |
| 688 | ||
| 689 | /* Unmap users of full pages in the hole. */ | |
| 690 | if (hole_end > hole_start) { | |
| 691 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) | |
| 692 | hugetlb_vmdelete_list(&mapping->i_mmap, | |
| 693 | hole_start >> PAGE_SHIFT, | |
| 694 | hole_end >> PAGE_SHIFT, 0); | |
| 695 | } | |
| 696 | ||
| 697 | /* If range extends beyond last full page, zero partial page. */ | |
| 698 | if ((offset + len) > hole_end && (offset + len) > hole_start) | |
| 699 | hugetlbfs_zero_partial_page(h, mapping, | |
| 700 | hole_end, offset + len); | |
| 701 | ||
| 702 | i_mmap_unlock_write(mapping); | |
| 703 | ||
| 704 | /* Remove full pages from the file. */ | |
| 705 | if (hole_end > hole_start) | |
| 706 | remove_inode_hugepages(inode, hole_start, hole_end); | |
| 707 | ||
| 708 | inode_unlock(inode); | |
| 709 | ||
| 710 | return 0; | |
| 711 | } | |
| 712 | ||
| 713 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |
| 714 | loff_t len) | |
| 715 | { | |
| 716 | struct inode *inode = file_inode(file); | |
| 717 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
| 718 | struct address_space *mapping = inode->i_mapping; | |
| 719 | struct hstate *h = hstate_inode(inode); | |
| 720 | struct vm_area_struct pseudo_vma; | |
| 721 | struct mm_struct *mm = current->mm; | |
| 722 | loff_t hpage_size = huge_page_size(h); | |
| 723 | unsigned long hpage_shift = huge_page_shift(h); | |
| 724 | pgoff_t start, index, end; | |
| 725 | int error; | |
| 726 | u32 hash; | |
| 727 | ||
| 728 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
| 729 | return -EOPNOTSUPP; | |
| 730 | ||
| 731 | if (mode & FALLOC_FL_PUNCH_HOLE) { | |
| 732 | error = hugetlbfs_punch_hole(inode, offset, len); | |
| 733 | goto out_nolock; | |
| 734 | } | |
| 735 | ||
| 736 | /* | |
| 737 | * Default preallocate case. | |
| 738 | * For this range, start is rounded down and end is rounded up | |
| 739 | * as well as being converted to page offsets. | |
| 740 | */ | |
| 741 | start = offset >> hpage_shift; | |
| 742 | end = (offset + len + hpage_size - 1) >> hpage_shift; | |
| 743 | ||
| 744 | inode_lock(inode); | |
| 745 | ||
| 746 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ | |
| 747 | error = inode_newsize_ok(inode, offset + len); | |
| 748 | if (error) | |
| 749 | goto out; | |
| 750 | ||
| 751 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { | |
| 752 | error = -EPERM; | |
| 753 | goto out; | |
| 754 | } | |
| 755 | ||
| 756 | /* | |
| 757 | * Initialize a pseudo vma as this is required by the huge page | |
| 758 | * allocation routines. | |
| 759 | */ | |
| 760 | vma_init(&pseudo_vma, mm); | |
| 761 | vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); | |
| 762 | pseudo_vma.vm_file = file; | |
| 763 | ||
| 764 | for (index = start; index < end; index++) { | |
| 765 | /* | |
| 766 | * This is supposed to be the vaddr where the page is being | |
| 767 | * faulted in, but we have no vaddr here. | |
| 768 | */ | |
| 769 | struct folio *folio; | |
| 770 | unsigned long addr; | |
| 771 | ||
| 772 | cond_resched(); | |
| 773 | ||
| 774 | /* | |
| 775 | * fallocate(2) manpage permits EINTR; we may have been | |
| 776 | * interrupted because we are using up too much memory. | |
| 777 | */ | |
| 778 | if (signal_pending(current)) { | |
| 779 | error = -EINTR; | |
| 780 | break; | |
| 781 | } | |
| 782 | ||
| 783 | /* addr is the offset within the file (zero based) */ | |
| 784 | addr = index * hpage_size; | |
| 785 | ||
| 786 | /* mutex taken here, fault path and hole punch */ | |
| 787 | hash = hugetlb_fault_mutex_hash(mapping, index); | |
| 788 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
| 789 | ||
| 790 | /* See if already present in mapping to avoid alloc/free */ | |
| 791 | folio = filemap_get_folio(mapping, index << huge_page_order(h)); | |
| 792 | if (!IS_ERR(folio)) { | |
| 793 | folio_put(folio); | |
| 794 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
| 795 | continue; | |
| 796 | } | |
| 797 | ||
| 798 | /* | |
| 799 | * Allocate folio without setting the avoid_reserve argument. | |
| 800 | * There certainly are no reserves associated with the | |
| 801 | * pseudo_vma. However, there could be shared mappings with | |
| 802 | * reserves for the file at the inode level. If we fallocate | |
| 803 | * folios in these areas, we need to consume the reserves | |
| 804 | * to keep reservation accounting consistent. | |
| 805 | */ | |
| 806 | folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); | |
| 807 | if (IS_ERR(folio)) { | |
| 808 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
| 809 | error = PTR_ERR(folio); | |
| 810 | goto out; | |
| 811 | } | |
| 812 | folio_zero_user(folio, addr); | |
| 813 | __folio_mark_uptodate(folio); | |
| 814 | error = hugetlb_add_to_page_cache(folio, mapping, index); | |
| 815 | if (unlikely(error)) { | |
| 816 | restore_reserve_on_error(h, &pseudo_vma, addr, folio); | |
| 817 | folio_put(folio); | |
| 818 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
| 819 | goto out; | |
| 820 | } | |
| 821 | ||
| 822 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
| 823 | ||
| 824 | folio_set_hugetlb_migratable(folio); | |
| 825 | /* | |
| 826 | * folio_unlock because locked by hugetlb_add_to_page_cache() | |
| 827 | * folio_put() due to reference from alloc_hugetlb_folio() | |
| 828 | */ | |
| 829 | folio_unlock(folio); | |
| 830 | folio_put(folio); | |
| 831 | } | |
| 832 | ||
| 833 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | |
| 834 | i_size_write(inode, offset + len); | |
| 835 | inode_set_ctime_current(inode); | |
| 836 | out: | |
| 837 | inode_unlock(inode); | |
| 838 | ||
| 839 | out_nolock: | |
| 840 | trace_hugetlbfs_fallocate(inode, mode, offset, len, error); | |
| 841 | return error; | |
| 842 | } | |
| 843 | ||
| 844 | static int hugetlbfs_setattr(struct mnt_idmap *idmap, | |
| 845 | struct dentry *dentry, struct iattr *attr) | |
| 846 | { | |
| 847 | struct inode *inode = d_inode(dentry); | |
| 848 | struct hstate *h = hstate_inode(inode); | |
| 849 | int error; | |
| 850 | unsigned int ia_valid = attr->ia_valid; | |
| 851 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
| 852 | ||
| 853 | error = setattr_prepare(idmap, dentry, attr); | |
| 854 | if (error) | |
| 855 | return error; | |
| 856 | ||
| 857 | trace_hugetlbfs_setattr(inode, dentry, attr); | |
| 858 | ||
| 859 | if (ia_valid & ATTR_SIZE) { | |
| 860 | loff_t oldsize = inode->i_size; | |
| 861 | loff_t newsize = attr->ia_size; | |
| 862 | ||
| 863 | if (newsize & ~huge_page_mask(h)) | |
| 864 | return -EINVAL; | |
| 865 | /* protected by i_rwsem */ | |
| 866 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || | |
| 867 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) | |
| 868 | return -EPERM; | |
| 869 | hugetlb_vmtruncate(inode, newsize); | |
| 870 | } | |
| 871 | ||
| 872 | setattr_copy(idmap, inode, attr); | |
| 873 | mark_inode_dirty(inode); | |
| 874 | return 0; | |
| 875 | } | |
| 876 | ||
| 877 | static struct inode *hugetlbfs_get_root(struct super_block *sb, | |
| 878 | struct hugetlbfs_fs_context *ctx) | |
| 879 | { | |
| 880 | struct inode *inode; | |
| 881 | ||
| 882 | inode = new_inode(sb); | |
| 883 | if (inode) { | |
| 884 | inode->i_ino = get_next_ino(); | |
| 885 | inode->i_mode = S_IFDIR | ctx->mode; | |
| 886 | inode->i_uid = ctx->uid; | |
| 887 | inode->i_gid = ctx->gid; | |
| 888 | simple_inode_init_ts(inode); | |
| 889 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
| 890 | inode->i_fop = &simple_dir_operations; | |
| 891 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
| 892 | inc_nlink(inode); | |
| 893 | lockdep_annotate_inode_mutex_key(inode); | |
| 894 | } | |
| 895 | return inode; | |
| 896 | } | |
| 897 | ||
| 898 | /* | |
| 899 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never | |
| 900 | * be taken from reclaim -- unlike regular filesystems. This needs an | |
| 901 | * annotation because huge_pmd_share() does an allocation under hugetlb's | |
| 902 | * i_mmap_rwsem. | |
| 903 | */ | |
| 904 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; | |
| 905 | ||
| 906 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |
| 907 | struct mnt_idmap *idmap, | |
| 908 | struct inode *dir, | |
| 909 | umode_t mode, dev_t dev) | |
| 910 | { | |
| 911 | struct inode *inode; | |
| 912 | struct resv_map *resv_map = NULL; | |
| 913 | ||
| 914 | /* | |
| 915 | * Reserve maps are only needed for inodes that can have associated | |
| 916 | * page allocations. | |
| 917 | */ | |
| 918 | if (S_ISREG(mode) || S_ISLNK(mode)) { | |
| 919 | resv_map = resv_map_alloc(); | |
| 920 | if (!resv_map) | |
| 921 | return NULL; | |
| 922 | } | |
| 923 | ||
| 924 | inode = new_inode(sb); | |
| 925 | if (inode) { | |
| 926 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); | |
| 927 | ||
| 928 | inode->i_ino = get_next_ino(); | |
| 929 | inode_init_owner(idmap, inode, dir, mode); | |
| 930 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, | |
| 931 | &hugetlbfs_i_mmap_rwsem_key); | |
| 932 | inode->i_mapping->a_ops = &hugetlbfs_aops; | |
| 933 | simple_inode_init_ts(inode); | |
| 934 | inode->i_mapping->i_private_data = resv_map; | |
| 935 | info->seals = F_SEAL_SEAL; | |
| 936 | switch (mode & S_IFMT) { | |
| 937 | default: | |
| 938 | init_special_inode(inode, mode, dev); | |
| 939 | break; | |
| 940 | case S_IFREG: | |
| 941 | inode->i_op = &hugetlbfs_inode_operations; | |
| 942 | inode->i_fop = &hugetlbfs_file_operations; | |
| 943 | break; | |
| 944 | case S_IFDIR: | |
| 945 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
| 946 | inode->i_fop = &simple_dir_operations; | |
| 947 | ||
| 948 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
| 949 | inc_nlink(inode); | |
| 950 | break; | |
| 951 | case S_IFLNK: | |
| 952 | inode->i_op = &page_symlink_inode_operations; | |
| 953 | inode_nohighmem(inode); | |
| 954 | break; | |
| 955 | } | |
| 956 | lockdep_annotate_inode_mutex_key(inode); | |
| 957 | trace_hugetlbfs_alloc_inode(inode, dir, mode); | |
| 958 | } else { | |
| 959 | if (resv_map) | |
| 960 | kref_put(&resv_map->refs, resv_map_release); | |
| 961 | } | |
| 962 | ||
| 963 | return inode; | |
| 964 | } | |
| 965 | ||
| 966 | /* | |
| 967 | * File creation. Allocate an inode, and we're done.. | |
| 968 | */ | |
| 969 | static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, | |
| 970 | struct dentry *dentry, umode_t mode, dev_t dev) | |
| 971 | { | |
| 972 | struct inode *inode; | |
| 973 | ||
| 974 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev); | |
| 975 | if (!inode) | |
| 976 | return -ENOSPC; | |
| 977 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); | |
| 978 | d_instantiate(dentry, inode); | |
| 979 | dget(dentry);/* Extra count - pin the dentry in core */ | |
| 980 | return 0; | |
| 981 | } | |
| 982 | ||
| 983 | static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, | |
| 984 | struct dentry *dentry, umode_t mode) | |
| 985 | { | |
| 986 | int retval = hugetlbfs_mknod(idmap, dir, dentry, | |
| 987 | mode | S_IFDIR, 0); | |
| 988 | if (!retval) | |
| 989 | inc_nlink(dir); | |
| 990 | return ERR_PTR(retval); | |
| 991 | } | |
| 992 | ||
| 993 | static int hugetlbfs_create(struct mnt_idmap *idmap, | |
| 994 | struct inode *dir, struct dentry *dentry, | |
| 995 | umode_t mode, bool excl) | |
| 996 | { | |
| 997 | return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0); | |
| 998 | } | |
| 999 | ||
| 1000 | static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, | |
| 1001 | struct inode *dir, struct file *file, | |
| 1002 | umode_t mode) | |
| 1003 | { | |
| 1004 | struct inode *inode; | |
| 1005 | ||
| 1006 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0); | |
| 1007 | if (!inode) | |
| 1008 | return -ENOSPC; | |
| 1009 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); | |
| 1010 | d_tmpfile(file, inode); | |
| 1011 | return finish_open_simple(file, 0); | |
| 1012 | } | |
| 1013 | ||
| 1014 | static int hugetlbfs_symlink(struct mnt_idmap *idmap, | |
| 1015 | struct inode *dir, struct dentry *dentry, | |
| 1016 | const char *symname) | |
| 1017 | { | |
| 1018 | const umode_t mode = S_IFLNK|S_IRWXUGO; | |
| 1019 | struct inode *inode; | |
| 1020 | int error = -ENOSPC; | |
| 1021 | ||
| 1022 | inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0); | |
| 1023 | if (inode) { | |
| 1024 | int l = strlen(symname)+1; | |
| 1025 | error = page_symlink(inode, symname, l); | |
| 1026 | if (!error) { | |
| 1027 | d_instantiate(dentry, inode); | |
| 1028 | dget(dentry); | |
| 1029 | } else | |
| 1030 | iput(inode); | |
| 1031 | } | |
| 1032 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); | |
| 1033 | ||
| 1034 | return error; | |
| 1035 | } | |
| 1036 | ||
| 1037 | #ifdef CONFIG_MIGRATION | |
| 1038 | static int hugetlbfs_migrate_folio(struct address_space *mapping, | |
| 1039 | struct folio *dst, struct folio *src, | |
| 1040 | enum migrate_mode mode) | |
| 1041 | { | |
| 1042 | int rc; | |
| 1043 | ||
| 1044 | rc = migrate_huge_page_move_mapping(mapping, dst, src); | |
| 1045 | if (rc) | |
| 1046 | return rc; | |
| 1047 | ||
| 1048 | if (hugetlb_folio_subpool(src)) { | |
| 1049 | hugetlb_set_folio_subpool(dst, | |
| 1050 | hugetlb_folio_subpool(src)); | |
| 1051 | hugetlb_set_folio_subpool(src, NULL); | |
| 1052 | } | |
| 1053 | ||
| 1054 | folio_migrate_flags(dst, src); | |
| 1055 | ||
| 1056 | return 0; | |
| 1057 | } | |
| 1058 | #else | |
| 1059 | #define hugetlbfs_migrate_folio NULL | |
| 1060 | #endif | |
| 1061 | ||
| 1062 | static int hugetlbfs_error_remove_folio(struct address_space *mapping, | |
| 1063 | struct folio *folio) | |
| 1064 | { | |
| 1065 | return 0; | |
| 1066 | } | |
| 1067 | ||
| 1068 | /* | |
| 1069 | * Display the mount options in /proc/mounts. | |
| 1070 | */ | |
| 1071 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) | |
| 1072 | { | |
| 1073 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); | |
| 1074 | struct hugepage_subpool *spool = sbinfo->spool; | |
| 1075 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); | |
| 1076 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); | |
| 1077 | char mod; | |
| 1078 | ||
| 1079 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) | |
| 1080 | seq_printf(m, ",uid=%u", | |
| 1081 | from_kuid_munged(&init_user_ns, sbinfo->uid)); | |
| 1082 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) | |
| 1083 | seq_printf(m, ",gid=%u", | |
| 1084 | from_kgid_munged(&init_user_ns, sbinfo->gid)); | |
| 1085 | if (sbinfo->mode != 0755) | |
| 1086 | seq_printf(m, ",mode=%o", sbinfo->mode); | |
| 1087 | if (sbinfo->max_inodes != -1) | |
| 1088 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); | |
| 1089 | ||
| 1090 | hpage_size /= 1024; | |
| 1091 | mod = 'K'; | |
| 1092 | if (hpage_size >= 1024) { | |
| 1093 | hpage_size /= 1024; | |
| 1094 | mod = 'M'; | |
| 1095 | } | |
| 1096 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); | |
| 1097 | if (spool) { | |
| 1098 | if (spool->max_hpages != -1) | |
| 1099 | seq_printf(m, ",size=%llu", | |
| 1100 | (unsigned long long)spool->max_hpages << hpage_shift); | |
| 1101 | if (spool->min_hpages != -1) | |
| 1102 | seq_printf(m, ",min_size=%llu", | |
| 1103 | (unsigned long long)spool->min_hpages << hpage_shift); | |
| 1104 | } | |
| 1105 | return 0; | |
| 1106 | } | |
| 1107 | ||
| 1108 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |
| 1109 | { | |
| 1110 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); | |
| 1111 | struct hstate *h = hstate_inode(d_inode(dentry)); | |
| 1112 | u64 id = huge_encode_dev(dentry->d_sb->s_dev); | |
| 1113 | ||
| 1114 | buf->f_fsid = u64_to_fsid(id); | |
| 1115 | buf->f_type = HUGETLBFS_MAGIC; | |
| 1116 | buf->f_bsize = huge_page_size(h); | |
| 1117 | if (sbinfo) { | |
| 1118 | spin_lock(&sbinfo->stat_lock); | |
| 1119 | /* If no limits set, just report 0 or -1 for max/free/used | |
| 1120 | * blocks, like simple_statfs() */ | |
| 1121 | if (sbinfo->spool) { | |
| 1122 | long free_pages; | |
| 1123 | ||
| 1124 | spin_lock_irq(&sbinfo->spool->lock); | |
| 1125 | buf->f_blocks = sbinfo->spool->max_hpages; | |
| 1126 | free_pages = sbinfo->spool->max_hpages | |
| 1127 | - sbinfo->spool->used_hpages; | |
| 1128 | buf->f_bavail = buf->f_bfree = free_pages; | |
| 1129 | spin_unlock_irq(&sbinfo->spool->lock); | |
| 1130 | buf->f_files = sbinfo->max_inodes; | |
| 1131 | buf->f_ffree = sbinfo->free_inodes; | |
| 1132 | } | |
| 1133 | spin_unlock(&sbinfo->stat_lock); | |
| 1134 | } | |
| 1135 | buf->f_namelen = NAME_MAX; | |
| 1136 | return 0; | |
| 1137 | } | |
| 1138 | ||
| 1139 | static void hugetlbfs_put_super(struct super_block *sb) | |
| 1140 | { | |
| 1141 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); | |
| 1142 | ||
| 1143 | if (sbi) { | |
| 1144 | sb->s_fs_info = NULL; | |
| 1145 | ||
| 1146 | if (sbi->spool) | |
| 1147 | hugepage_put_subpool(sbi->spool); | |
| 1148 | ||
| 1149 | kfree(sbi); | |
| 1150 | } | |
| 1151 | } | |
| 1152 | ||
| 1153 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
| 1154 | { | |
| 1155 | if (sbinfo->free_inodes >= 0) { | |
| 1156 | spin_lock(&sbinfo->stat_lock); | |
| 1157 | if (unlikely(!sbinfo->free_inodes)) { | |
| 1158 | spin_unlock(&sbinfo->stat_lock); | |
| 1159 | return 0; | |
| 1160 | } | |
| 1161 | sbinfo->free_inodes--; | |
| 1162 | spin_unlock(&sbinfo->stat_lock); | |
| 1163 | } | |
| 1164 | ||
| 1165 | return 1; | |
| 1166 | } | |
| 1167 | ||
| 1168 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
| 1169 | { | |
| 1170 | if (sbinfo->free_inodes >= 0) { | |
| 1171 | spin_lock(&sbinfo->stat_lock); | |
| 1172 | sbinfo->free_inodes++; | |
| 1173 | spin_unlock(&sbinfo->stat_lock); | |
| 1174 | } | |
| 1175 | } | |
| 1176 | ||
| 1177 | ||
| 1178 | static struct kmem_cache *hugetlbfs_inode_cachep; | |
| 1179 | ||
| 1180 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |
| 1181 | { | |
| 1182 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); | |
| 1183 | struct hugetlbfs_inode_info *p; | |
| 1184 | ||
| 1185 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) | |
| 1186 | return NULL; | |
| 1187 | p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); | |
| 1188 | if (unlikely(!p)) { | |
| 1189 | hugetlbfs_inc_free_inodes(sbinfo); | |
| 1190 | return NULL; | |
| 1191 | } | |
| 1192 | return &p->vfs_inode; | |
| 1193 | } | |
| 1194 | ||
| 1195 | static void hugetlbfs_free_inode(struct inode *inode) | |
| 1196 | { | |
| 1197 | trace_hugetlbfs_free_inode(inode); | |
| 1198 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); | |
| 1199 | } | |
| 1200 | ||
| 1201 | static void hugetlbfs_destroy_inode(struct inode *inode) | |
| 1202 | { | |
| 1203 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); | |
| 1204 | } | |
| 1205 | ||
| 1206 | static const struct address_space_operations hugetlbfs_aops = { | |
| 1207 | .write_begin = hugetlbfs_write_begin, | |
| 1208 | .write_end = hugetlbfs_write_end, | |
| 1209 | .dirty_folio = noop_dirty_folio, | |
| 1210 | .migrate_folio = hugetlbfs_migrate_folio, | |
| 1211 | .error_remove_folio = hugetlbfs_error_remove_folio, | |
| 1212 | }; | |
| 1213 | ||
| 1214 | ||
| 1215 | static void init_once(void *foo) | |
| 1216 | { | |
| 1217 | struct hugetlbfs_inode_info *ei = foo; | |
| 1218 | ||
| 1219 | inode_init_once(&ei->vfs_inode); | |
| 1220 | } | |
| 1221 | ||
| 1222 | static const struct file_operations hugetlbfs_file_operations = { | |
| 1223 | .read_iter = hugetlbfs_read_iter, | |
| 1224 | .mmap = hugetlbfs_file_mmap, | |
| 1225 | .fsync = noop_fsync, | |
| 1226 | .get_unmapped_area = hugetlb_get_unmapped_area, | |
| 1227 | .llseek = default_llseek, | |
| 1228 | .fallocate = hugetlbfs_fallocate, | |
| 1229 | .fop_flags = FOP_HUGE_PAGES, | |
| 1230 | }; | |
| 1231 | ||
| 1232 | static const struct inode_operations hugetlbfs_dir_inode_operations = { | |
| 1233 | .create = hugetlbfs_create, | |
| 1234 | .lookup = simple_lookup, | |
| 1235 | .link = simple_link, | |
| 1236 | .unlink = simple_unlink, | |
| 1237 | .symlink = hugetlbfs_symlink, | |
| 1238 | .mkdir = hugetlbfs_mkdir, | |
| 1239 | .rmdir = simple_rmdir, | |
| 1240 | .mknod = hugetlbfs_mknod, | |
| 1241 | .rename = simple_rename, | |
| 1242 | .setattr = hugetlbfs_setattr, | |
| 1243 | .tmpfile = hugetlbfs_tmpfile, | |
| 1244 | }; | |
| 1245 | ||
| 1246 | static const struct inode_operations hugetlbfs_inode_operations = { | |
| 1247 | .setattr = hugetlbfs_setattr, | |
| 1248 | }; | |
| 1249 | ||
| 1250 | static const struct super_operations hugetlbfs_ops = { | |
| 1251 | .alloc_inode = hugetlbfs_alloc_inode, | |
| 1252 | .free_inode = hugetlbfs_free_inode, | |
| 1253 | .destroy_inode = hugetlbfs_destroy_inode, | |
| 1254 | .evict_inode = hugetlbfs_evict_inode, | |
| 1255 | .statfs = hugetlbfs_statfs, | |
| 1256 | .put_super = hugetlbfs_put_super, | |
| 1257 | .show_options = hugetlbfs_show_options, | |
| 1258 | }; | |
| 1259 | ||
| 1260 | /* | |
| 1261 | * Convert size option passed from command line to number of huge pages | |
| 1262 | * in the pool specified by hstate. Size option could be in bytes | |
| 1263 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). | |
| 1264 | */ | |
| 1265 | static long | |
| 1266 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, | |
| 1267 | enum hugetlbfs_size_type val_type) | |
| 1268 | { | |
| 1269 | if (val_type == NO_SIZE) | |
| 1270 | return -1; | |
| 1271 | ||
| 1272 | if (val_type == SIZE_PERCENT) { | |
| 1273 | size_opt <<= huge_page_shift(h); | |
| 1274 | size_opt *= h->max_huge_pages; | |
| 1275 | do_div(size_opt, 100); | |
| 1276 | } | |
| 1277 | ||
| 1278 | size_opt >>= huge_page_shift(h); | |
| 1279 | return size_opt; | |
| 1280 | } | |
| 1281 | ||
| 1282 | /* | |
| 1283 | * Parse one mount parameter. | |
| 1284 | */ | |
| 1285 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) | |
| 1286 | { | |
| 1287 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
| 1288 | struct fs_parse_result result; | |
| 1289 | struct hstate *h; | |
| 1290 | char *rest; | |
| 1291 | unsigned long ps; | |
| 1292 | int opt; | |
| 1293 | ||
| 1294 | opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); | |
| 1295 | if (opt < 0) | |
| 1296 | return opt; | |
| 1297 | ||
| 1298 | switch (opt) { | |
| 1299 | case Opt_uid: | |
| 1300 | ctx->uid = result.uid; | |
| 1301 | return 0; | |
| 1302 | ||
| 1303 | case Opt_gid: | |
| 1304 | ctx->gid = result.gid; | |
| 1305 | return 0; | |
| 1306 | ||
| 1307 | case Opt_mode: | |
| 1308 | ctx->mode = result.uint_32 & 01777U; | |
| 1309 | return 0; | |
| 1310 | ||
| 1311 | case Opt_size: | |
| 1312 | /* memparse() will accept a K/M/G without a digit */ | |
| 1313 | if (!param->string || !isdigit(param->string[0])) | |
| 1314 | goto bad_val; | |
| 1315 | ctx->max_size_opt = memparse(param->string, &rest); | |
| 1316 | ctx->max_val_type = SIZE_STD; | |
| 1317 | if (*rest == '%') | |
| 1318 | ctx->max_val_type = SIZE_PERCENT; | |
| 1319 | return 0; | |
| 1320 | ||
| 1321 | case Opt_nr_inodes: | |
| 1322 | /* memparse() will accept a K/M/G without a digit */ | |
| 1323 | if (!param->string || !isdigit(param->string[0])) | |
| 1324 | goto bad_val; | |
| 1325 | ctx->nr_inodes = memparse(param->string, &rest); | |
| 1326 | return 0; | |
| 1327 | ||
| 1328 | case Opt_pagesize: | |
| 1329 | ps = memparse(param->string, &rest); | |
| 1330 | h = size_to_hstate(ps); | |
| 1331 | if (!h) { | |
| 1332 | pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); | |
| 1333 | return -EINVAL; | |
| 1334 | } | |
| 1335 | ctx->hstate = h; | |
| 1336 | return 0; | |
| 1337 | ||
| 1338 | case Opt_min_size: | |
| 1339 | /* memparse() will accept a K/M/G without a digit */ | |
| 1340 | if (!param->string || !isdigit(param->string[0])) | |
| 1341 | goto bad_val; | |
| 1342 | ctx->min_size_opt = memparse(param->string, &rest); | |
| 1343 | ctx->min_val_type = SIZE_STD; | |
| 1344 | if (*rest == '%') | |
| 1345 | ctx->min_val_type = SIZE_PERCENT; | |
| 1346 | return 0; | |
| 1347 | ||
| 1348 | default: | |
| 1349 | return -EINVAL; | |
| 1350 | } | |
| 1351 | ||
| 1352 | bad_val: | |
| 1353 | return invalfc(fc, "Bad value '%s' for mount option '%s'\n", | |
| 1354 | param->string, param->key); | |
| 1355 | } | |
| 1356 | ||
| 1357 | /* | |
| 1358 | * Validate the parsed options. | |
| 1359 | */ | |
| 1360 | static int hugetlbfs_validate(struct fs_context *fc) | |
| 1361 | { | |
| 1362 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
| 1363 | ||
| 1364 | /* | |
| 1365 | * Use huge page pool size (in hstate) to convert the size | |
| 1366 | * options to number of huge pages. If NO_SIZE, -1 is returned. | |
| 1367 | */ | |
| 1368 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
| 1369 | ctx->max_size_opt, | |
| 1370 | ctx->max_val_type); | |
| 1371 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
| 1372 | ctx->min_size_opt, | |
| 1373 | ctx->min_val_type); | |
| 1374 | ||
| 1375 | /* | |
| 1376 | * If max_size was specified, then min_size must be smaller | |
| 1377 | */ | |
| 1378 | if (ctx->max_val_type > NO_SIZE && | |
| 1379 | ctx->min_hpages > ctx->max_hpages) { | |
| 1380 | pr_err("Minimum size can not be greater than maximum size\n"); | |
| 1381 | return -EINVAL; | |
| 1382 | } | |
| 1383 | ||
| 1384 | return 0; | |
| 1385 | } | |
| 1386 | ||
| 1387 | static int | |
| 1388 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) | |
| 1389 | { | |
| 1390 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
| 1391 | struct hugetlbfs_sb_info *sbinfo; | |
| 1392 | ||
| 1393 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); | |
| 1394 | if (!sbinfo) | |
| 1395 | return -ENOMEM; | |
| 1396 | sb->s_fs_info = sbinfo; | |
| 1397 | spin_lock_init(&sbinfo->stat_lock); | |
| 1398 | sbinfo->hstate = ctx->hstate; | |
| 1399 | sbinfo->max_inodes = ctx->nr_inodes; | |
| 1400 | sbinfo->free_inodes = ctx->nr_inodes; | |
| 1401 | sbinfo->spool = NULL; | |
| 1402 | sbinfo->uid = ctx->uid; | |
| 1403 | sbinfo->gid = ctx->gid; | |
| 1404 | sbinfo->mode = ctx->mode; | |
| 1405 | ||
| 1406 | /* | |
| 1407 | * Allocate and initialize subpool if maximum or minimum size is | |
| 1408 | * specified. Any needed reservations (for minimum size) are taken | |
| 1409 | * when the subpool is created. | |
| 1410 | */ | |
| 1411 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { | |
| 1412 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, | |
| 1413 | ctx->max_hpages, | |
| 1414 | ctx->min_hpages); | |
| 1415 | if (!sbinfo->spool) | |
| 1416 | goto out_free; | |
| 1417 | } | |
| 1418 | sb->s_maxbytes = MAX_LFS_FILESIZE; | |
| 1419 | sb->s_blocksize = huge_page_size(ctx->hstate); | |
| 1420 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); | |
| 1421 | sb->s_magic = HUGETLBFS_MAGIC; | |
| 1422 | sb->s_op = &hugetlbfs_ops; | |
| 1423 | sb->s_d_flags = DCACHE_DONTCACHE; | |
| 1424 | sb->s_time_gran = 1; | |
| 1425 | ||
| 1426 | /* | |
| 1427 | * Due to the special and limited functionality of hugetlbfs, it does | |
| 1428 | * not work well as a stacking filesystem. | |
| 1429 | */ | |
| 1430 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; | |
| 1431 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); | |
| 1432 | if (!sb->s_root) | |
| 1433 | goto out_free; | |
| 1434 | return 0; | |
| 1435 | out_free: | |
| 1436 | kfree(sbinfo->spool); | |
| 1437 | kfree(sbinfo); | |
| 1438 | return -ENOMEM; | |
| 1439 | } | |
| 1440 | ||
| 1441 | static int hugetlbfs_get_tree(struct fs_context *fc) | |
| 1442 | { | |
| 1443 | int err = hugetlbfs_validate(fc); | |
| 1444 | if (err) | |
| 1445 | return err; | |
| 1446 | return get_tree_nodev(fc, hugetlbfs_fill_super); | |
| 1447 | } | |
| 1448 | ||
| 1449 | static void hugetlbfs_fs_context_free(struct fs_context *fc) | |
| 1450 | { | |
| 1451 | kfree(fc->fs_private); | |
| 1452 | } | |
| 1453 | ||
| 1454 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { | |
| 1455 | .free = hugetlbfs_fs_context_free, | |
| 1456 | .parse_param = hugetlbfs_parse_param, | |
| 1457 | .get_tree = hugetlbfs_get_tree, | |
| 1458 | }; | |
| 1459 | ||
| 1460 | static int hugetlbfs_init_fs_context(struct fs_context *fc) | |
| 1461 | { | |
| 1462 | struct hugetlbfs_fs_context *ctx; | |
| 1463 | ||
| 1464 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); | |
| 1465 | if (!ctx) | |
| 1466 | return -ENOMEM; | |
| 1467 | ||
| 1468 | ctx->max_hpages = -1; /* No limit on size by default */ | |
| 1469 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ | |
| 1470 | ctx->uid = current_fsuid(); | |
| 1471 | ctx->gid = current_fsgid(); | |
| 1472 | ctx->mode = 0755; | |
| 1473 | ctx->hstate = &default_hstate; | |
| 1474 | ctx->min_hpages = -1; /* No default minimum size */ | |
| 1475 | ctx->max_val_type = NO_SIZE; | |
| 1476 | ctx->min_val_type = NO_SIZE; | |
| 1477 | fc->fs_private = ctx; | |
| 1478 | fc->ops = &hugetlbfs_fs_context_ops; | |
| 1479 | return 0; | |
| 1480 | } | |
| 1481 | ||
| 1482 | static struct file_system_type hugetlbfs_fs_type = { | |
| 1483 | .name = "hugetlbfs", | |
| 1484 | .init_fs_context = hugetlbfs_init_fs_context, | |
| 1485 | .parameters = hugetlb_fs_parameters, | |
| 1486 | .kill_sb = kill_litter_super, | |
| 1487 | .fs_flags = FS_ALLOW_IDMAP, | |
| 1488 | }; | |
| 1489 | ||
| 1490 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; | |
| 1491 | ||
| 1492 | static int can_do_hugetlb_shm(void) | |
| 1493 | { | |
| 1494 | kgid_t shm_group; | |
| 1495 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); | |
| 1496 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); | |
| 1497 | } | |
| 1498 | ||
| 1499 | static int get_hstate_idx(int page_size_log) | |
| 1500 | { | |
| 1501 | struct hstate *h = hstate_sizelog(page_size_log); | |
| 1502 | ||
| 1503 | if (!h) | |
| 1504 | return -1; | |
| 1505 | return hstate_index(h); | |
| 1506 | } | |
| 1507 | ||
| 1508 | /* | |
| 1509 | * Note that size should be aligned to proper hugepage size in caller side, | |
| 1510 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | |
| 1511 | */ | |
| 1512 | struct file *hugetlb_file_setup(const char *name, size_t size, | |
| 1513 | vm_flags_t acctflag, int creat_flags, | |
| 1514 | int page_size_log) | |
| 1515 | { | |
| 1516 | struct inode *inode; | |
| 1517 | struct vfsmount *mnt; | |
| 1518 | int hstate_idx; | |
| 1519 | struct file *file; | |
| 1520 | ||
| 1521 | hstate_idx = get_hstate_idx(page_size_log); | |
| 1522 | if (hstate_idx < 0) | |
| 1523 | return ERR_PTR(-ENODEV); | |
| 1524 | ||
| 1525 | mnt = hugetlbfs_vfsmount[hstate_idx]; | |
| 1526 | if (!mnt) | |
| 1527 | return ERR_PTR(-ENOENT); | |
| 1528 | ||
| 1529 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { | |
| 1530 | struct ucounts *ucounts = current_ucounts(); | |
| 1531 | ||
| 1532 | if (user_shm_lock(size, ucounts)) { | |
| 1533 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", | |
| 1534 | current->comm, current->pid); | |
| 1535 | user_shm_unlock(size, ucounts); | |
| 1536 | } | |
| 1537 | return ERR_PTR(-EPERM); | |
| 1538 | } | |
| 1539 | ||
| 1540 | file = ERR_PTR(-ENOSPC); | |
| 1541 | /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */ | |
| 1542 | inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL, | |
| 1543 | S_IFREG | S_IRWXUGO, 0); | |
| 1544 | if (!inode) | |
| 1545 | goto out; | |
| 1546 | if (creat_flags == HUGETLB_SHMFS_INODE) | |
| 1547 | inode->i_flags |= S_PRIVATE; | |
| 1548 | ||
| 1549 | inode->i_size = size; | |
| 1550 | clear_nlink(inode); | |
| 1551 | ||
| 1552 | if (hugetlb_reserve_pages(inode, 0, | |
| 1553 | size >> huge_page_shift(hstate_inode(inode)), NULL, | |
| 1554 | acctflag) < 0) | |
| 1555 | file = ERR_PTR(-ENOMEM); | |
| 1556 | else | |
| 1557 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, | |
| 1558 | &hugetlbfs_file_operations); | |
| 1559 | if (!IS_ERR(file)) | |
| 1560 | return file; | |
| 1561 | ||
| 1562 | iput(inode); | |
| 1563 | out: | |
| 1564 | return file; | |
| 1565 | } | |
| 1566 | ||
| 1567 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) | |
| 1568 | { | |
| 1569 | struct fs_context *fc; | |
| 1570 | struct vfsmount *mnt; | |
| 1571 | ||
| 1572 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); | |
| 1573 | if (IS_ERR(fc)) { | |
| 1574 | mnt = ERR_CAST(fc); | |
| 1575 | } else { | |
| 1576 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
| 1577 | ctx->hstate = h; | |
| 1578 | mnt = fc_mount_longterm(fc); | |
| 1579 | put_fs_context(fc); | |
| 1580 | } | |
| 1581 | if (IS_ERR(mnt)) | |
| 1582 | pr_err("Cannot mount internal hugetlbfs for page size %luK", | |
| 1583 | huge_page_size(h) / SZ_1K); | |
| 1584 | return mnt; | |
| 1585 | } | |
| 1586 | ||
| 1587 | static int __init init_hugetlbfs_fs(void) | |
| 1588 | { | |
| 1589 | struct vfsmount *mnt; | |
| 1590 | struct hstate *h; | |
| 1591 | int error; | |
| 1592 | int i; | |
| 1593 | ||
| 1594 | if (!hugepages_supported()) { | |
| 1595 | pr_info("disabling because there are no supported hugepage sizes\n"); | |
| 1596 | return -ENOTSUPP; | |
| 1597 | } | |
| 1598 | ||
| 1599 | error = -ENOMEM; | |
| 1600 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", | |
| 1601 | sizeof(struct hugetlbfs_inode_info), | |
| 1602 | 0, SLAB_ACCOUNT, init_once); | |
| 1603 | if (hugetlbfs_inode_cachep == NULL) | |
| 1604 | goto out; | |
| 1605 | ||
| 1606 | error = register_filesystem(&hugetlbfs_fs_type); | |
| 1607 | if (error) | |
| 1608 | goto out_free; | |
| 1609 | ||
| 1610 | /* default hstate mount is required */ | |
| 1611 | mnt = mount_one_hugetlbfs(&default_hstate); | |
| 1612 | if (IS_ERR(mnt)) { | |
| 1613 | error = PTR_ERR(mnt); | |
| 1614 | goto out_unreg; | |
| 1615 | } | |
| 1616 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; | |
| 1617 | ||
| 1618 | /* other hstates are optional */ | |
| 1619 | i = 0; | |
| 1620 | for_each_hstate(h) { | |
| 1621 | if (i == default_hstate_idx) { | |
| 1622 | i++; | |
| 1623 | continue; | |
| 1624 | } | |
| 1625 | ||
| 1626 | mnt = mount_one_hugetlbfs(h); | |
| 1627 | if (IS_ERR(mnt)) | |
| 1628 | hugetlbfs_vfsmount[i] = NULL; | |
| 1629 | else | |
| 1630 | hugetlbfs_vfsmount[i] = mnt; | |
| 1631 | i++; | |
| 1632 | } | |
| 1633 | ||
| 1634 | return 0; | |
| 1635 | ||
| 1636 | out_unreg: | |
| 1637 | (void)unregister_filesystem(&hugetlbfs_fs_type); | |
| 1638 | out_free: | |
| 1639 | kmem_cache_destroy(hugetlbfs_inode_cachep); | |
| 1640 | out: | |
| 1641 | return error; | |
| 1642 | } | |
| 1643 | fs_initcall(init_hugetlbfs_fs) |