]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/shmem.c
tmpfs: verify {g,u}id mount options correctly
[thirdparty/linux.git] / mm / shmem.c
1 /*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/fileattr.h>
32 #include <linux/mm.h>
33 #include <linux/random.h>
34 #include <linux/sched/signal.h>
35 #include <linux/export.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/swap.h>
38 #include <linux/uio.h>
39 #include <linux/hugetlb.h>
40 #include <linux/fs_parser.h>
41 #include <linux/swapfile.h>
42 #include <linux/iversion.h>
43 #include "swap.h"
44
45 static struct vfsmount *shm_mnt;
46
47 #ifdef CONFIG_SHMEM
48 /*
49 * This virtual memory filesystem is heavily based on the ramfs. It
50 * extends ramfs by the ability to use swap and honor resource limits
51 * which makes it a completely usable filesystem.
52 */
53
54 #include <linux/xattr.h>
55 #include <linux/exportfs.h>
56 #include <linux/posix_acl.h>
57 #include <linux/posix_acl_xattr.h>
58 #include <linux/mman.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/backing-dev.h>
62 #include <linux/writeback.h>
63 #include <linux/pagevec.h>
64 #include <linux/percpu_counter.h>
65 #include <linux/falloc.h>
66 #include <linux/splice.h>
67 #include <linux/security.h>
68 #include <linux/swapops.h>
69 #include <linux/mempolicy.h>
70 #include <linux/namei.h>
71 #include <linux/ctype.h>
72 #include <linux/migrate.h>
73 #include <linux/highmem.h>
74 #include <linux/seq_file.h>
75 #include <linux/magic.h>
76 #include <linux/syscalls.h>
77 #include <linux/fcntl.h>
78 #include <uapi/linux/memfd.h>
79 #include <linux/rmap.h>
80 #include <linux/uuid.h>
81 #include <linux/quotaops.h>
82
83 #include <linux/uaccess.h>
84
85 #include "internal.h"
86
87 #define BLOCKS_PER_PAGE (PAGE_SIZE/512)
88 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
89
90 /* Pretend that each entry is of this size in directory's i_size */
91 #define BOGO_DIRENT_SIZE 20
92
93 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
94 #define SHORT_SYMLINK_LEN 128
95
96 /*
97 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98 * inode->i_private (with i_rwsem making sure that it has only one user at
99 * a time): we would prefer not to enlarge the shmem inode just for that.
100 */
101 struct shmem_falloc {
102 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
103 pgoff_t start; /* start of range currently being fallocated */
104 pgoff_t next; /* the next page offset to be fallocated */
105 pgoff_t nr_falloced; /* how many new pages have been fallocated */
106 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
107 };
108
109 struct shmem_options {
110 unsigned long long blocks;
111 unsigned long long inodes;
112 struct mempolicy *mpol;
113 kuid_t uid;
114 kgid_t gid;
115 umode_t mode;
116 bool full_inums;
117 int huge;
118 int seen;
119 bool noswap;
120 unsigned short quota_types;
121 struct shmem_quota_limits qlimits;
122 #define SHMEM_SEEN_BLOCKS 1
123 #define SHMEM_SEEN_INODES 2
124 #define SHMEM_SEEN_HUGE 4
125 #define SHMEM_SEEN_INUMS 8
126 #define SHMEM_SEEN_NOSWAP 16
127 #define SHMEM_SEEN_QUOTA 32
128 };
129
130 #ifdef CONFIG_TMPFS
131 static unsigned long shmem_default_max_blocks(void)
132 {
133 return totalram_pages() / 2;
134 }
135
136 static unsigned long shmem_default_max_inodes(void)
137 {
138 unsigned long nr_pages = totalram_pages();
139
140 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
141 }
142 #endif
143
144 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
145 struct folio **foliop, enum sgp_type sgp,
146 gfp_t gfp, struct vm_area_struct *vma,
147 vm_fault_t *fault_type);
148
149 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
150 {
151 return sb->s_fs_info;
152 }
153
154 /*
155 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
156 * for shared memory and for shared anonymous (/dev/zero) mappings
157 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
158 * consistent with the pre-accounting of private mappings ...
159 */
160 static inline int shmem_acct_size(unsigned long flags, loff_t size)
161 {
162 return (flags & VM_NORESERVE) ?
163 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
164 }
165
166 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
167 {
168 if (!(flags & VM_NORESERVE))
169 vm_unacct_memory(VM_ACCT(size));
170 }
171
172 static inline int shmem_reacct_size(unsigned long flags,
173 loff_t oldsize, loff_t newsize)
174 {
175 if (!(flags & VM_NORESERVE)) {
176 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
177 return security_vm_enough_memory_mm(current->mm,
178 VM_ACCT(newsize) - VM_ACCT(oldsize));
179 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
180 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
181 }
182 return 0;
183 }
184
185 /*
186 * ... whereas tmpfs objects are accounted incrementally as
187 * pages are allocated, in order to allow large sparse files.
188 * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
189 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
190 */
191 static inline int shmem_acct_block(unsigned long flags, long pages)
192 {
193 if (!(flags & VM_NORESERVE))
194 return 0;
195
196 return security_vm_enough_memory_mm(current->mm,
197 pages * VM_ACCT(PAGE_SIZE));
198 }
199
200 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
201 {
202 if (flags & VM_NORESERVE)
203 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
204 }
205
206 static int shmem_inode_acct_block(struct inode *inode, long pages)
207 {
208 struct shmem_inode_info *info = SHMEM_I(inode);
209 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
210 int err = -ENOSPC;
211
212 if (shmem_acct_block(info->flags, pages))
213 return err;
214
215 might_sleep(); /* when quotas */
216 if (sbinfo->max_blocks) {
217 if (percpu_counter_compare(&sbinfo->used_blocks,
218 sbinfo->max_blocks - pages) > 0)
219 goto unacct;
220
221 err = dquot_alloc_block_nodirty(inode, pages);
222 if (err)
223 goto unacct;
224
225 percpu_counter_add(&sbinfo->used_blocks, pages);
226 } else {
227 err = dquot_alloc_block_nodirty(inode, pages);
228 if (err)
229 goto unacct;
230 }
231
232 return 0;
233
234 unacct:
235 shmem_unacct_blocks(info->flags, pages);
236 return err;
237 }
238
239 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
240 {
241 struct shmem_inode_info *info = SHMEM_I(inode);
242 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
243
244 might_sleep(); /* when quotas */
245 dquot_free_block_nodirty(inode, pages);
246
247 if (sbinfo->max_blocks)
248 percpu_counter_sub(&sbinfo->used_blocks, pages);
249 shmem_unacct_blocks(info->flags, pages);
250 }
251
252 static const struct super_operations shmem_ops;
253 const struct address_space_operations shmem_aops;
254 static const struct file_operations shmem_file_operations;
255 static const struct inode_operations shmem_inode_operations;
256 static const struct inode_operations shmem_dir_inode_operations;
257 static const struct inode_operations shmem_special_inode_operations;
258 static const struct vm_operations_struct shmem_vm_ops;
259 static const struct vm_operations_struct shmem_anon_vm_ops;
260 static struct file_system_type shmem_fs_type;
261
262 bool vma_is_anon_shmem(struct vm_area_struct *vma)
263 {
264 return vma->vm_ops == &shmem_anon_vm_ops;
265 }
266
267 bool vma_is_shmem(struct vm_area_struct *vma)
268 {
269 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
270 }
271
272 static LIST_HEAD(shmem_swaplist);
273 static DEFINE_MUTEX(shmem_swaplist_mutex);
274
275 #ifdef CONFIG_TMPFS_QUOTA
276
277 static int shmem_enable_quotas(struct super_block *sb,
278 unsigned short quota_types)
279 {
280 int type, err = 0;
281
282 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
283 for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
284 if (!(quota_types & (1 << type)))
285 continue;
286 err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
287 DQUOT_USAGE_ENABLED |
288 DQUOT_LIMITS_ENABLED);
289 if (err)
290 goto out_err;
291 }
292 return 0;
293
294 out_err:
295 pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
296 type, err);
297 for (type--; type >= 0; type--)
298 dquot_quota_off(sb, type);
299 return err;
300 }
301
302 static void shmem_disable_quotas(struct super_block *sb)
303 {
304 int type;
305
306 for (type = 0; type < SHMEM_MAXQUOTAS; type++)
307 dquot_quota_off(sb, type);
308 }
309
310 static struct dquot **shmem_get_dquots(struct inode *inode)
311 {
312 return SHMEM_I(inode)->i_dquot;
313 }
314 #endif /* CONFIG_TMPFS_QUOTA */
315
316 /*
317 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
318 * produces a novel ino for the newly allocated inode.
319 *
320 * It may also be called when making a hard link to permit the space needed by
321 * each dentry. However, in that case, no new inode number is needed since that
322 * internally draws from another pool of inode numbers (currently global
323 * get_next_ino()). This case is indicated by passing NULL as inop.
324 */
325 #define SHMEM_INO_BATCH 1024
326 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
327 {
328 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
329 ino_t ino;
330
331 if (!(sb->s_flags & SB_KERNMOUNT)) {
332 raw_spin_lock(&sbinfo->stat_lock);
333 if (sbinfo->max_inodes) {
334 if (!sbinfo->free_inodes) {
335 raw_spin_unlock(&sbinfo->stat_lock);
336 return -ENOSPC;
337 }
338 sbinfo->free_inodes--;
339 }
340 if (inop) {
341 ino = sbinfo->next_ino++;
342 if (unlikely(is_zero_ino(ino)))
343 ino = sbinfo->next_ino++;
344 if (unlikely(!sbinfo->full_inums &&
345 ino > UINT_MAX)) {
346 /*
347 * Emulate get_next_ino uint wraparound for
348 * compatibility
349 */
350 if (IS_ENABLED(CONFIG_64BIT))
351 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
352 __func__, MINOR(sb->s_dev));
353 sbinfo->next_ino = 1;
354 ino = sbinfo->next_ino++;
355 }
356 *inop = ino;
357 }
358 raw_spin_unlock(&sbinfo->stat_lock);
359 } else if (inop) {
360 /*
361 * __shmem_file_setup, one of our callers, is lock-free: it
362 * doesn't hold stat_lock in shmem_reserve_inode since
363 * max_inodes is always 0, and is called from potentially
364 * unknown contexts. As such, use a per-cpu batched allocator
365 * which doesn't require the per-sb stat_lock unless we are at
366 * the batch boundary.
367 *
368 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
369 * shmem mounts are not exposed to userspace, so we don't need
370 * to worry about things like glibc compatibility.
371 */
372 ino_t *next_ino;
373
374 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
375 ino = *next_ino;
376 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
377 raw_spin_lock(&sbinfo->stat_lock);
378 ino = sbinfo->next_ino;
379 sbinfo->next_ino += SHMEM_INO_BATCH;
380 raw_spin_unlock(&sbinfo->stat_lock);
381 if (unlikely(is_zero_ino(ino)))
382 ino++;
383 }
384 *inop = ino;
385 *next_ino = ++ino;
386 put_cpu();
387 }
388
389 return 0;
390 }
391
392 static void shmem_free_inode(struct super_block *sb)
393 {
394 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
395 if (sbinfo->max_inodes) {
396 raw_spin_lock(&sbinfo->stat_lock);
397 sbinfo->free_inodes++;
398 raw_spin_unlock(&sbinfo->stat_lock);
399 }
400 }
401
402 /**
403 * shmem_recalc_inode - recalculate the block usage of an inode
404 * @inode: inode to recalc
405 * @alloced: the change in number of pages allocated to inode
406 * @swapped: the change in number of pages swapped from inode
407 *
408 * We have to calculate the free blocks since the mm can drop
409 * undirtied hole pages behind our back.
410 *
411 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
412 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
413 */
414 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
415 {
416 struct shmem_inode_info *info = SHMEM_I(inode);
417 long freed;
418
419 spin_lock(&info->lock);
420 info->alloced += alloced;
421 info->swapped += swapped;
422 freed = info->alloced - info->swapped -
423 READ_ONCE(inode->i_mapping->nrpages);
424 /*
425 * Special case: whereas normally shmem_recalc_inode() is called
426 * after i_mapping->nrpages has already been adjusted (up or down),
427 * shmem_writepage() has to raise swapped before nrpages is lowered -
428 * to stop a racing shmem_recalc_inode() from thinking that a page has
429 * been freed. Compensate here, to avoid the need for a followup call.
430 */
431 if (swapped > 0)
432 freed += swapped;
433 if (freed > 0)
434 info->alloced -= freed;
435 spin_unlock(&info->lock);
436
437 /* The quota case may block */
438 if (freed > 0)
439 shmem_inode_unacct_blocks(inode, freed);
440 }
441
442 bool shmem_charge(struct inode *inode, long pages)
443 {
444 struct address_space *mapping = inode->i_mapping;
445
446 if (shmem_inode_acct_block(inode, pages))
447 return false;
448
449 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
450 xa_lock_irq(&mapping->i_pages);
451 mapping->nrpages += pages;
452 xa_unlock_irq(&mapping->i_pages);
453
454 shmem_recalc_inode(inode, pages, 0);
455 return true;
456 }
457
458 void shmem_uncharge(struct inode *inode, long pages)
459 {
460 /* pages argument is currently unused: keep it to help debugging */
461 /* nrpages adjustment done by __filemap_remove_folio() or caller */
462
463 shmem_recalc_inode(inode, 0, 0);
464 }
465
466 /*
467 * Replace item expected in xarray by a new item, while holding xa_lock.
468 */
469 static int shmem_replace_entry(struct address_space *mapping,
470 pgoff_t index, void *expected, void *replacement)
471 {
472 XA_STATE(xas, &mapping->i_pages, index);
473 void *item;
474
475 VM_BUG_ON(!expected);
476 VM_BUG_ON(!replacement);
477 item = xas_load(&xas);
478 if (item != expected)
479 return -ENOENT;
480 xas_store(&xas, replacement);
481 return 0;
482 }
483
484 /*
485 * Sometimes, before we decide whether to proceed or to fail, we must check
486 * that an entry was not already brought back from swap by a racing thread.
487 *
488 * Checking page is not enough: by the time a SwapCache page is locked, it
489 * might be reused, and again be SwapCache, using the same swap as before.
490 */
491 static bool shmem_confirm_swap(struct address_space *mapping,
492 pgoff_t index, swp_entry_t swap)
493 {
494 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
495 }
496
497 /*
498 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
499 *
500 * SHMEM_HUGE_NEVER:
501 * disables huge pages for the mount;
502 * SHMEM_HUGE_ALWAYS:
503 * enables huge pages for the mount;
504 * SHMEM_HUGE_WITHIN_SIZE:
505 * only allocate huge pages if the page will be fully within i_size,
506 * also respect fadvise()/madvise() hints;
507 * SHMEM_HUGE_ADVISE:
508 * only allocate huge pages if requested with fadvise()/madvise();
509 */
510
511 #define SHMEM_HUGE_NEVER 0
512 #define SHMEM_HUGE_ALWAYS 1
513 #define SHMEM_HUGE_WITHIN_SIZE 2
514 #define SHMEM_HUGE_ADVISE 3
515
516 /*
517 * Special values.
518 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
519 *
520 * SHMEM_HUGE_DENY:
521 * disables huge on shm_mnt and all mounts, for emergency use;
522 * SHMEM_HUGE_FORCE:
523 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
524 *
525 */
526 #define SHMEM_HUGE_DENY (-1)
527 #define SHMEM_HUGE_FORCE (-2)
528
529 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
530 /* ifdef here to avoid bloating shmem.o when not necessary */
531
532 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
533
534 bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
535 struct mm_struct *mm, unsigned long vm_flags)
536 {
537 loff_t i_size;
538
539 if (!S_ISREG(inode->i_mode))
540 return false;
541 if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
542 return false;
543 if (shmem_huge == SHMEM_HUGE_DENY)
544 return false;
545 if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
546 return true;
547
548 switch (SHMEM_SB(inode->i_sb)->huge) {
549 case SHMEM_HUGE_ALWAYS:
550 return true;
551 case SHMEM_HUGE_WITHIN_SIZE:
552 index = round_up(index + 1, HPAGE_PMD_NR);
553 i_size = round_up(i_size_read(inode), PAGE_SIZE);
554 if (i_size >> PAGE_SHIFT >= index)
555 return true;
556 fallthrough;
557 case SHMEM_HUGE_ADVISE:
558 if (mm && (vm_flags & VM_HUGEPAGE))
559 return true;
560 fallthrough;
561 default:
562 return false;
563 }
564 }
565
566 #if defined(CONFIG_SYSFS)
567 static int shmem_parse_huge(const char *str)
568 {
569 if (!strcmp(str, "never"))
570 return SHMEM_HUGE_NEVER;
571 if (!strcmp(str, "always"))
572 return SHMEM_HUGE_ALWAYS;
573 if (!strcmp(str, "within_size"))
574 return SHMEM_HUGE_WITHIN_SIZE;
575 if (!strcmp(str, "advise"))
576 return SHMEM_HUGE_ADVISE;
577 if (!strcmp(str, "deny"))
578 return SHMEM_HUGE_DENY;
579 if (!strcmp(str, "force"))
580 return SHMEM_HUGE_FORCE;
581 return -EINVAL;
582 }
583 #endif
584
585 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
586 static const char *shmem_format_huge(int huge)
587 {
588 switch (huge) {
589 case SHMEM_HUGE_NEVER:
590 return "never";
591 case SHMEM_HUGE_ALWAYS:
592 return "always";
593 case SHMEM_HUGE_WITHIN_SIZE:
594 return "within_size";
595 case SHMEM_HUGE_ADVISE:
596 return "advise";
597 case SHMEM_HUGE_DENY:
598 return "deny";
599 case SHMEM_HUGE_FORCE:
600 return "force";
601 default:
602 VM_BUG_ON(1);
603 return "bad_val";
604 }
605 }
606 #endif
607
608 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
609 struct shrink_control *sc, unsigned long nr_to_split)
610 {
611 LIST_HEAD(list), *pos, *next;
612 LIST_HEAD(to_remove);
613 struct inode *inode;
614 struct shmem_inode_info *info;
615 struct folio *folio;
616 unsigned long batch = sc ? sc->nr_to_scan : 128;
617 int split = 0;
618
619 if (list_empty(&sbinfo->shrinklist))
620 return SHRINK_STOP;
621
622 spin_lock(&sbinfo->shrinklist_lock);
623 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
624 info = list_entry(pos, struct shmem_inode_info, shrinklist);
625
626 /* pin the inode */
627 inode = igrab(&info->vfs_inode);
628
629 /* inode is about to be evicted */
630 if (!inode) {
631 list_del_init(&info->shrinklist);
632 goto next;
633 }
634
635 /* Check if there's anything to gain */
636 if (round_up(inode->i_size, PAGE_SIZE) ==
637 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
638 list_move(&info->shrinklist, &to_remove);
639 goto next;
640 }
641
642 list_move(&info->shrinklist, &list);
643 next:
644 sbinfo->shrinklist_len--;
645 if (!--batch)
646 break;
647 }
648 spin_unlock(&sbinfo->shrinklist_lock);
649
650 list_for_each_safe(pos, next, &to_remove) {
651 info = list_entry(pos, struct shmem_inode_info, shrinklist);
652 inode = &info->vfs_inode;
653 list_del_init(&info->shrinklist);
654 iput(inode);
655 }
656
657 list_for_each_safe(pos, next, &list) {
658 int ret;
659 pgoff_t index;
660
661 info = list_entry(pos, struct shmem_inode_info, shrinklist);
662 inode = &info->vfs_inode;
663
664 if (nr_to_split && split >= nr_to_split)
665 goto move_back;
666
667 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
668 folio = filemap_get_folio(inode->i_mapping, index);
669 if (IS_ERR(folio))
670 goto drop;
671
672 /* No huge page at the end of the file: nothing to split */
673 if (!folio_test_large(folio)) {
674 folio_put(folio);
675 goto drop;
676 }
677
678 /*
679 * Move the inode on the list back to shrinklist if we failed
680 * to lock the page at this time.
681 *
682 * Waiting for the lock may lead to deadlock in the
683 * reclaim path.
684 */
685 if (!folio_trylock(folio)) {
686 folio_put(folio);
687 goto move_back;
688 }
689
690 ret = split_folio(folio);
691 folio_unlock(folio);
692 folio_put(folio);
693
694 /* If split failed move the inode on the list back to shrinklist */
695 if (ret)
696 goto move_back;
697
698 split++;
699 drop:
700 list_del_init(&info->shrinklist);
701 goto put;
702 move_back:
703 /*
704 * Make sure the inode is either on the global list or deleted
705 * from any local list before iput() since it could be deleted
706 * in another thread once we put the inode (then the local list
707 * is corrupted).
708 */
709 spin_lock(&sbinfo->shrinklist_lock);
710 list_move(&info->shrinklist, &sbinfo->shrinklist);
711 sbinfo->shrinklist_len++;
712 spin_unlock(&sbinfo->shrinklist_lock);
713 put:
714 iput(inode);
715 }
716
717 return split;
718 }
719
720 static long shmem_unused_huge_scan(struct super_block *sb,
721 struct shrink_control *sc)
722 {
723 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
724
725 if (!READ_ONCE(sbinfo->shrinklist_len))
726 return SHRINK_STOP;
727
728 return shmem_unused_huge_shrink(sbinfo, sc, 0);
729 }
730
731 static long shmem_unused_huge_count(struct super_block *sb,
732 struct shrink_control *sc)
733 {
734 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
735 return READ_ONCE(sbinfo->shrinklist_len);
736 }
737 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
738
739 #define shmem_huge SHMEM_HUGE_DENY
740
741 bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
742 struct mm_struct *mm, unsigned long vm_flags)
743 {
744 return false;
745 }
746
747 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
748 struct shrink_control *sc, unsigned long nr_to_split)
749 {
750 return 0;
751 }
752 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
753
754 /*
755 * Like filemap_add_folio, but error if expected item has gone.
756 */
757 static int shmem_add_to_page_cache(struct folio *folio,
758 struct address_space *mapping,
759 pgoff_t index, void *expected, gfp_t gfp,
760 struct mm_struct *charge_mm)
761 {
762 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
763 long nr = folio_nr_pages(folio);
764 int error;
765
766 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
767 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
768 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
769 VM_BUG_ON(expected && folio_test_large(folio));
770
771 folio_ref_add(folio, nr);
772 folio->mapping = mapping;
773 folio->index = index;
774
775 if (!folio_test_swapcache(folio)) {
776 error = mem_cgroup_charge(folio, charge_mm, gfp);
777 if (error) {
778 if (folio_test_pmd_mappable(folio)) {
779 count_vm_event(THP_FILE_FALLBACK);
780 count_vm_event(THP_FILE_FALLBACK_CHARGE);
781 }
782 goto error;
783 }
784 }
785 folio_throttle_swaprate(folio, gfp);
786
787 do {
788 xas_lock_irq(&xas);
789 if (expected != xas_find_conflict(&xas)) {
790 xas_set_err(&xas, -EEXIST);
791 goto unlock;
792 }
793 if (expected && xas_find_conflict(&xas)) {
794 xas_set_err(&xas, -EEXIST);
795 goto unlock;
796 }
797 xas_store(&xas, folio);
798 if (xas_error(&xas))
799 goto unlock;
800 if (folio_test_pmd_mappable(folio)) {
801 count_vm_event(THP_FILE_ALLOC);
802 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
803 }
804 mapping->nrpages += nr;
805 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
806 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
807 unlock:
808 xas_unlock_irq(&xas);
809 } while (xas_nomem(&xas, gfp));
810
811 if (xas_error(&xas)) {
812 error = xas_error(&xas);
813 goto error;
814 }
815
816 return 0;
817 error:
818 folio->mapping = NULL;
819 folio_ref_sub(folio, nr);
820 return error;
821 }
822
823 /*
824 * Like delete_from_page_cache, but substitutes swap for @folio.
825 */
826 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
827 {
828 struct address_space *mapping = folio->mapping;
829 long nr = folio_nr_pages(folio);
830 int error;
831
832 xa_lock_irq(&mapping->i_pages);
833 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
834 folio->mapping = NULL;
835 mapping->nrpages -= nr;
836 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
837 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
838 xa_unlock_irq(&mapping->i_pages);
839 folio_put(folio);
840 BUG_ON(error);
841 }
842
843 /*
844 * Remove swap entry from page cache, free the swap and its page cache.
845 */
846 static int shmem_free_swap(struct address_space *mapping,
847 pgoff_t index, void *radswap)
848 {
849 void *old;
850
851 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
852 if (old != radswap)
853 return -ENOENT;
854 free_swap_and_cache(radix_to_swp_entry(radswap));
855 return 0;
856 }
857
858 /*
859 * Determine (in bytes) how many of the shmem object's pages mapped by the
860 * given offsets are swapped out.
861 *
862 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
863 * as long as the inode doesn't go away and racy results are not a problem.
864 */
865 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
866 pgoff_t start, pgoff_t end)
867 {
868 XA_STATE(xas, &mapping->i_pages, start);
869 struct page *page;
870 unsigned long swapped = 0;
871
872 rcu_read_lock();
873 xas_for_each(&xas, page, end - 1) {
874 if (xas_retry(&xas, page))
875 continue;
876 if (xa_is_value(page))
877 swapped++;
878
879 if (need_resched()) {
880 xas_pause(&xas);
881 cond_resched_rcu();
882 }
883 }
884
885 rcu_read_unlock();
886
887 return swapped << PAGE_SHIFT;
888 }
889
890 /*
891 * Determine (in bytes) how many of the shmem object's pages mapped by the
892 * given vma is swapped out.
893 *
894 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
895 * as long as the inode doesn't go away and racy results are not a problem.
896 */
897 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
898 {
899 struct inode *inode = file_inode(vma->vm_file);
900 struct shmem_inode_info *info = SHMEM_I(inode);
901 struct address_space *mapping = inode->i_mapping;
902 unsigned long swapped;
903
904 /* Be careful as we don't hold info->lock */
905 swapped = READ_ONCE(info->swapped);
906
907 /*
908 * The easier cases are when the shmem object has nothing in swap, or
909 * the vma maps it whole. Then we can simply use the stats that we
910 * already track.
911 */
912 if (!swapped)
913 return 0;
914
915 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
916 return swapped << PAGE_SHIFT;
917
918 /* Here comes the more involved part */
919 return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
920 vma->vm_pgoff + vma_pages(vma));
921 }
922
923 /*
924 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
925 */
926 void shmem_unlock_mapping(struct address_space *mapping)
927 {
928 struct folio_batch fbatch;
929 pgoff_t index = 0;
930
931 folio_batch_init(&fbatch);
932 /*
933 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
934 */
935 while (!mapping_unevictable(mapping) &&
936 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
937 check_move_unevictable_folios(&fbatch);
938 folio_batch_release(&fbatch);
939 cond_resched();
940 }
941 }
942
943 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
944 {
945 struct folio *folio;
946
947 /*
948 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
949 * beyond i_size, and reports fallocated folios as holes.
950 */
951 folio = filemap_get_entry(inode->i_mapping, index);
952 if (!folio)
953 return folio;
954 if (!xa_is_value(folio)) {
955 folio_lock(folio);
956 if (folio->mapping == inode->i_mapping)
957 return folio;
958 /* The folio has been swapped out */
959 folio_unlock(folio);
960 folio_put(folio);
961 }
962 /*
963 * But read a folio back from swap if any of it is within i_size
964 * (although in some cases this is just a waste of time).
965 */
966 folio = NULL;
967 shmem_get_folio(inode, index, &folio, SGP_READ);
968 return folio;
969 }
970
971 /*
972 * Remove range of pages and swap entries from page cache, and free them.
973 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
974 */
975 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
976 bool unfalloc)
977 {
978 struct address_space *mapping = inode->i_mapping;
979 struct shmem_inode_info *info = SHMEM_I(inode);
980 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
981 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
982 struct folio_batch fbatch;
983 pgoff_t indices[PAGEVEC_SIZE];
984 struct folio *folio;
985 bool same_folio;
986 long nr_swaps_freed = 0;
987 pgoff_t index;
988 int i;
989
990 if (lend == -1)
991 end = -1; /* unsigned, so actually very big */
992
993 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
994 info->fallocend = start;
995
996 folio_batch_init(&fbatch);
997 index = start;
998 while (index < end && find_lock_entries(mapping, &index, end - 1,
999 &fbatch, indices)) {
1000 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1001 folio = fbatch.folios[i];
1002
1003 if (xa_is_value(folio)) {
1004 if (unfalloc)
1005 continue;
1006 nr_swaps_freed += !shmem_free_swap(mapping,
1007 indices[i], folio);
1008 continue;
1009 }
1010
1011 if (!unfalloc || !folio_test_uptodate(folio))
1012 truncate_inode_folio(mapping, folio);
1013 folio_unlock(folio);
1014 }
1015 folio_batch_remove_exceptionals(&fbatch);
1016 folio_batch_release(&fbatch);
1017 cond_resched();
1018 }
1019
1020 /*
1021 * When undoing a failed fallocate, we want none of the partial folio
1022 * zeroing and splitting below, but shall want to truncate the whole
1023 * folio when !uptodate indicates that it was added by this fallocate,
1024 * even when [lstart, lend] covers only a part of the folio.
1025 */
1026 if (unfalloc)
1027 goto whole_folios;
1028
1029 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1030 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1031 if (folio) {
1032 same_folio = lend < folio_pos(folio) + folio_size(folio);
1033 folio_mark_dirty(folio);
1034 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1035 start = folio->index + folio_nr_pages(folio);
1036 if (same_folio)
1037 end = folio->index;
1038 }
1039 folio_unlock(folio);
1040 folio_put(folio);
1041 folio = NULL;
1042 }
1043
1044 if (!same_folio)
1045 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1046 if (folio) {
1047 folio_mark_dirty(folio);
1048 if (!truncate_inode_partial_folio(folio, lstart, lend))
1049 end = folio->index;
1050 folio_unlock(folio);
1051 folio_put(folio);
1052 }
1053
1054 whole_folios:
1055
1056 index = start;
1057 while (index < end) {
1058 cond_resched();
1059
1060 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1061 indices)) {
1062 /* If all gone or hole-punch or unfalloc, we're done */
1063 if (index == start || end != -1)
1064 break;
1065 /* But if truncating, restart to make sure all gone */
1066 index = start;
1067 continue;
1068 }
1069 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1070 folio = fbatch.folios[i];
1071
1072 if (xa_is_value(folio)) {
1073 if (unfalloc)
1074 continue;
1075 if (shmem_free_swap(mapping, indices[i], folio)) {
1076 /* Swap was replaced by page: retry */
1077 index = indices[i];
1078 break;
1079 }
1080 nr_swaps_freed++;
1081 continue;
1082 }
1083
1084 folio_lock(folio);
1085
1086 if (!unfalloc || !folio_test_uptodate(folio)) {
1087 if (folio_mapping(folio) != mapping) {
1088 /* Page was replaced by swap: retry */
1089 folio_unlock(folio);
1090 index = indices[i];
1091 break;
1092 }
1093 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1094 folio);
1095 truncate_inode_folio(mapping, folio);
1096 }
1097 folio_unlock(folio);
1098 }
1099 folio_batch_remove_exceptionals(&fbatch);
1100 folio_batch_release(&fbatch);
1101 }
1102
1103 shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1104 }
1105
1106 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1107 {
1108 shmem_undo_range(inode, lstart, lend, false);
1109 inode->i_ctime = inode->i_mtime = current_time(inode);
1110 inode_inc_iversion(inode);
1111 }
1112 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1113
1114 static int shmem_getattr(struct mnt_idmap *idmap,
1115 const struct path *path, struct kstat *stat,
1116 u32 request_mask, unsigned int query_flags)
1117 {
1118 struct inode *inode = path->dentry->d_inode;
1119 struct shmem_inode_info *info = SHMEM_I(inode);
1120
1121 if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1122 shmem_recalc_inode(inode, 0, 0);
1123
1124 if (info->fsflags & FS_APPEND_FL)
1125 stat->attributes |= STATX_ATTR_APPEND;
1126 if (info->fsflags & FS_IMMUTABLE_FL)
1127 stat->attributes |= STATX_ATTR_IMMUTABLE;
1128 if (info->fsflags & FS_NODUMP_FL)
1129 stat->attributes |= STATX_ATTR_NODUMP;
1130 stat->attributes_mask |= (STATX_ATTR_APPEND |
1131 STATX_ATTR_IMMUTABLE |
1132 STATX_ATTR_NODUMP);
1133 generic_fillattr(idmap, inode, stat);
1134
1135 if (shmem_is_huge(inode, 0, false, NULL, 0))
1136 stat->blksize = HPAGE_PMD_SIZE;
1137
1138 if (request_mask & STATX_BTIME) {
1139 stat->result_mask |= STATX_BTIME;
1140 stat->btime.tv_sec = info->i_crtime.tv_sec;
1141 stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1142 }
1143
1144 return 0;
1145 }
1146
1147 static int shmem_setattr(struct mnt_idmap *idmap,
1148 struct dentry *dentry, struct iattr *attr)
1149 {
1150 struct inode *inode = d_inode(dentry);
1151 struct shmem_inode_info *info = SHMEM_I(inode);
1152 int error;
1153 bool update_mtime = false;
1154 bool update_ctime = true;
1155
1156 error = setattr_prepare(idmap, dentry, attr);
1157 if (error)
1158 return error;
1159
1160 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1161 if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1162 return -EPERM;
1163 }
1164 }
1165
1166 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1167 loff_t oldsize = inode->i_size;
1168 loff_t newsize = attr->ia_size;
1169
1170 /* protected by i_rwsem */
1171 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1172 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1173 return -EPERM;
1174
1175 if (newsize != oldsize) {
1176 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1177 oldsize, newsize);
1178 if (error)
1179 return error;
1180 i_size_write(inode, newsize);
1181 update_mtime = true;
1182 } else {
1183 update_ctime = false;
1184 }
1185 if (newsize <= oldsize) {
1186 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1187 if (oldsize > holebegin)
1188 unmap_mapping_range(inode->i_mapping,
1189 holebegin, 0, 1);
1190 if (info->alloced)
1191 shmem_truncate_range(inode,
1192 newsize, (loff_t)-1);
1193 /* unmap again to remove racily COWed private pages */
1194 if (oldsize > holebegin)
1195 unmap_mapping_range(inode->i_mapping,
1196 holebegin, 0, 1);
1197 }
1198 }
1199
1200 if (is_quota_modification(idmap, inode, attr)) {
1201 error = dquot_initialize(inode);
1202 if (error)
1203 return error;
1204 }
1205
1206 /* Transfer quota accounting */
1207 if (i_uid_needs_update(idmap, attr, inode) ||
1208 i_gid_needs_update(idmap, attr, inode)) {
1209 error = dquot_transfer(idmap, inode, attr);
1210
1211 if (error)
1212 return error;
1213 }
1214
1215 setattr_copy(idmap, inode, attr);
1216 if (attr->ia_valid & ATTR_MODE)
1217 error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1218 if (!error && update_ctime) {
1219 inode->i_ctime = current_time(inode);
1220 if (update_mtime)
1221 inode->i_mtime = inode->i_ctime;
1222 inode_inc_iversion(inode);
1223 }
1224 return error;
1225 }
1226
1227 static void shmem_evict_inode(struct inode *inode)
1228 {
1229 struct shmem_inode_info *info = SHMEM_I(inode);
1230 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1231
1232 if (shmem_mapping(inode->i_mapping)) {
1233 shmem_unacct_size(info->flags, inode->i_size);
1234 inode->i_size = 0;
1235 mapping_set_exiting(inode->i_mapping);
1236 shmem_truncate_range(inode, 0, (loff_t)-1);
1237 if (!list_empty(&info->shrinklist)) {
1238 spin_lock(&sbinfo->shrinklist_lock);
1239 if (!list_empty(&info->shrinklist)) {
1240 list_del_init(&info->shrinklist);
1241 sbinfo->shrinklist_len--;
1242 }
1243 spin_unlock(&sbinfo->shrinklist_lock);
1244 }
1245 while (!list_empty(&info->swaplist)) {
1246 /* Wait while shmem_unuse() is scanning this inode... */
1247 wait_var_event(&info->stop_eviction,
1248 !atomic_read(&info->stop_eviction));
1249 mutex_lock(&shmem_swaplist_mutex);
1250 /* ...but beware of the race if we peeked too early */
1251 if (!atomic_read(&info->stop_eviction))
1252 list_del_init(&info->swaplist);
1253 mutex_unlock(&shmem_swaplist_mutex);
1254 }
1255 }
1256
1257 simple_xattrs_free(&info->xattrs);
1258 WARN_ON(inode->i_blocks);
1259 shmem_free_inode(inode->i_sb);
1260 clear_inode(inode);
1261 #ifdef CONFIG_TMPFS_QUOTA
1262 dquot_free_inode(inode);
1263 dquot_drop(inode);
1264 #endif
1265 }
1266
1267 static int shmem_find_swap_entries(struct address_space *mapping,
1268 pgoff_t start, struct folio_batch *fbatch,
1269 pgoff_t *indices, unsigned int type)
1270 {
1271 XA_STATE(xas, &mapping->i_pages, start);
1272 struct folio *folio;
1273 swp_entry_t entry;
1274
1275 rcu_read_lock();
1276 xas_for_each(&xas, folio, ULONG_MAX) {
1277 if (xas_retry(&xas, folio))
1278 continue;
1279
1280 if (!xa_is_value(folio))
1281 continue;
1282
1283 entry = radix_to_swp_entry(folio);
1284 /*
1285 * swapin error entries can be found in the mapping. But they're
1286 * deliberately ignored here as we've done everything we can do.
1287 */
1288 if (swp_type(entry) != type)
1289 continue;
1290
1291 indices[folio_batch_count(fbatch)] = xas.xa_index;
1292 if (!folio_batch_add(fbatch, folio))
1293 break;
1294
1295 if (need_resched()) {
1296 xas_pause(&xas);
1297 cond_resched_rcu();
1298 }
1299 }
1300 rcu_read_unlock();
1301
1302 return xas.xa_index;
1303 }
1304
1305 /*
1306 * Move the swapped pages for an inode to page cache. Returns the count
1307 * of pages swapped in, or the error in case of failure.
1308 */
1309 static int shmem_unuse_swap_entries(struct inode *inode,
1310 struct folio_batch *fbatch, pgoff_t *indices)
1311 {
1312 int i = 0;
1313 int ret = 0;
1314 int error = 0;
1315 struct address_space *mapping = inode->i_mapping;
1316
1317 for (i = 0; i < folio_batch_count(fbatch); i++) {
1318 struct folio *folio = fbatch->folios[i];
1319
1320 if (!xa_is_value(folio))
1321 continue;
1322 error = shmem_swapin_folio(inode, indices[i],
1323 &folio, SGP_CACHE,
1324 mapping_gfp_mask(mapping),
1325 NULL, NULL);
1326 if (error == 0) {
1327 folio_unlock(folio);
1328 folio_put(folio);
1329 ret++;
1330 }
1331 if (error == -ENOMEM)
1332 break;
1333 error = 0;
1334 }
1335 return error ? error : ret;
1336 }
1337
1338 /*
1339 * If swap found in inode, free it and move page from swapcache to filecache.
1340 */
1341 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1342 {
1343 struct address_space *mapping = inode->i_mapping;
1344 pgoff_t start = 0;
1345 struct folio_batch fbatch;
1346 pgoff_t indices[PAGEVEC_SIZE];
1347 int ret = 0;
1348
1349 do {
1350 folio_batch_init(&fbatch);
1351 shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1352 if (folio_batch_count(&fbatch) == 0) {
1353 ret = 0;
1354 break;
1355 }
1356
1357 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1358 if (ret < 0)
1359 break;
1360
1361 start = indices[folio_batch_count(&fbatch) - 1];
1362 } while (true);
1363
1364 return ret;
1365 }
1366
1367 /*
1368 * Read all the shared memory data that resides in the swap
1369 * device 'type' back into memory, so the swap device can be
1370 * unused.
1371 */
1372 int shmem_unuse(unsigned int type)
1373 {
1374 struct shmem_inode_info *info, *next;
1375 int error = 0;
1376
1377 if (list_empty(&shmem_swaplist))
1378 return 0;
1379
1380 mutex_lock(&shmem_swaplist_mutex);
1381 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1382 if (!info->swapped) {
1383 list_del_init(&info->swaplist);
1384 continue;
1385 }
1386 /*
1387 * Drop the swaplist mutex while searching the inode for swap;
1388 * but before doing so, make sure shmem_evict_inode() will not
1389 * remove placeholder inode from swaplist, nor let it be freed
1390 * (igrab() would protect from unlink, but not from unmount).
1391 */
1392 atomic_inc(&info->stop_eviction);
1393 mutex_unlock(&shmem_swaplist_mutex);
1394
1395 error = shmem_unuse_inode(&info->vfs_inode, type);
1396 cond_resched();
1397
1398 mutex_lock(&shmem_swaplist_mutex);
1399 next = list_next_entry(info, swaplist);
1400 if (!info->swapped)
1401 list_del_init(&info->swaplist);
1402 if (atomic_dec_and_test(&info->stop_eviction))
1403 wake_up_var(&info->stop_eviction);
1404 if (error)
1405 break;
1406 }
1407 mutex_unlock(&shmem_swaplist_mutex);
1408
1409 return error;
1410 }
1411
1412 /*
1413 * Move the page from the page cache to the swap cache.
1414 */
1415 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1416 {
1417 struct folio *folio = page_folio(page);
1418 struct address_space *mapping = folio->mapping;
1419 struct inode *inode = mapping->host;
1420 struct shmem_inode_info *info = SHMEM_I(inode);
1421 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1422 swp_entry_t swap;
1423 pgoff_t index;
1424
1425 /*
1426 * Our capabilities prevent regular writeback or sync from ever calling
1427 * shmem_writepage; but a stacking filesystem might use ->writepage of
1428 * its underlying filesystem, in which case tmpfs should write out to
1429 * swap only in response to memory pressure, and not for the writeback
1430 * threads or sync.
1431 */
1432 if (WARN_ON_ONCE(!wbc->for_reclaim))
1433 goto redirty;
1434
1435 if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
1436 goto redirty;
1437
1438 if (!total_swap_pages)
1439 goto redirty;
1440
1441 /*
1442 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
1443 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
1444 * and its shmem_writeback() needs them to be split when swapping.
1445 */
1446 if (folio_test_large(folio)) {
1447 /* Ensure the subpages are still dirty */
1448 folio_test_set_dirty(folio);
1449 if (split_huge_page(page) < 0)
1450 goto redirty;
1451 folio = page_folio(page);
1452 folio_clear_dirty(folio);
1453 }
1454
1455 index = folio->index;
1456
1457 /*
1458 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1459 * value into swapfile.c, the only way we can correctly account for a
1460 * fallocated folio arriving here is now to initialize it and write it.
1461 *
1462 * That's okay for a folio already fallocated earlier, but if we have
1463 * not yet completed the fallocation, then (a) we want to keep track
1464 * of this folio in case we have to undo it, and (b) it may not be a
1465 * good idea to continue anyway, once we're pushing into swap. So
1466 * reactivate the folio, and let shmem_fallocate() quit when too many.
1467 */
1468 if (!folio_test_uptodate(folio)) {
1469 if (inode->i_private) {
1470 struct shmem_falloc *shmem_falloc;
1471 spin_lock(&inode->i_lock);
1472 shmem_falloc = inode->i_private;
1473 if (shmem_falloc &&
1474 !shmem_falloc->waitq &&
1475 index >= shmem_falloc->start &&
1476 index < shmem_falloc->next)
1477 shmem_falloc->nr_unswapped++;
1478 else
1479 shmem_falloc = NULL;
1480 spin_unlock(&inode->i_lock);
1481 if (shmem_falloc)
1482 goto redirty;
1483 }
1484 folio_zero_range(folio, 0, folio_size(folio));
1485 flush_dcache_folio(folio);
1486 folio_mark_uptodate(folio);
1487 }
1488
1489 swap = folio_alloc_swap(folio);
1490 if (!swap.val)
1491 goto redirty;
1492
1493 /*
1494 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1495 * if it's not already there. Do it now before the folio is
1496 * moved to swap cache, when its pagelock no longer protects
1497 * the inode from eviction. But don't unlock the mutex until
1498 * we've incremented swapped, because shmem_unuse_inode() will
1499 * prune a !swapped inode from the swaplist under this mutex.
1500 */
1501 mutex_lock(&shmem_swaplist_mutex);
1502 if (list_empty(&info->swaplist))
1503 list_add(&info->swaplist, &shmem_swaplist);
1504
1505 if (add_to_swap_cache(folio, swap,
1506 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1507 NULL) == 0) {
1508 shmem_recalc_inode(inode, 0, 1);
1509 swap_shmem_alloc(swap);
1510 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1511
1512 mutex_unlock(&shmem_swaplist_mutex);
1513 BUG_ON(folio_mapped(folio));
1514 swap_writepage(&folio->page, wbc);
1515 return 0;
1516 }
1517
1518 mutex_unlock(&shmem_swaplist_mutex);
1519 put_swap_folio(folio, swap);
1520 redirty:
1521 folio_mark_dirty(folio);
1522 if (wbc->for_reclaim)
1523 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1524 folio_unlock(folio);
1525 return 0;
1526 }
1527
1528 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1529 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1530 {
1531 char buffer[64];
1532
1533 if (!mpol || mpol->mode == MPOL_DEFAULT)
1534 return; /* show nothing */
1535
1536 mpol_to_str(buffer, sizeof(buffer), mpol);
1537
1538 seq_printf(seq, ",mpol=%s", buffer);
1539 }
1540
1541 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1542 {
1543 struct mempolicy *mpol = NULL;
1544 if (sbinfo->mpol) {
1545 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1546 mpol = sbinfo->mpol;
1547 mpol_get(mpol);
1548 raw_spin_unlock(&sbinfo->stat_lock);
1549 }
1550 return mpol;
1551 }
1552 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1553 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1554 {
1555 }
1556 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1557 {
1558 return NULL;
1559 }
1560 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1561 #ifndef CONFIG_NUMA
1562 #define vm_policy vm_private_data
1563 #endif
1564
1565 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1566 struct shmem_inode_info *info, pgoff_t index)
1567 {
1568 /* Create a pseudo vma that just contains the policy */
1569 vma_init(vma, NULL);
1570 /* Bias interleave by inode number to distribute better across nodes */
1571 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1572 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1573 }
1574
1575 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1576 {
1577 /* Drop reference taken by mpol_shared_policy_lookup() */
1578 mpol_cond_put(vma->vm_policy);
1579 }
1580
1581 static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1582 struct shmem_inode_info *info, pgoff_t index)
1583 {
1584 struct vm_area_struct pvma;
1585 struct page *page;
1586 struct vm_fault vmf = {
1587 .vma = &pvma,
1588 };
1589
1590 shmem_pseudo_vma_init(&pvma, info, index);
1591 page = swap_cluster_readahead(swap, gfp, &vmf);
1592 shmem_pseudo_vma_destroy(&pvma);
1593
1594 if (!page)
1595 return NULL;
1596 return page_folio(page);
1597 }
1598
1599 /*
1600 * Make sure huge_gfp is always more limited than limit_gfp.
1601 * Some of the flags set permissions, while others set limitations.
1602 */
1603 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1604 {
1605 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1606 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1607 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1608 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1609
1610 /* Allow allocations only from the originally specified zones. */
1611 result |= zoneflags;
1612
1613 /*
1614 * Minimize the result gfp by taking the union with the deny flags,
1615 * and the intersection of the allow flags.
1616 */
1617 result |= (limit_gfp & denyflags);
1618 result |= (huge_gfp & limit_gfp) & allowflags;
1619
1620 return result;
1621 }
1622
1623 static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1624 struct shmem_inode_info *info, pgoff_t index)
1625 {
1626 struct vm_area_struct pvma;
1627 struct address_space *mapping = info->vfs_inode.i_mapping;
1628 pgoff_t hindex;
1629 struct folio *folio;
1630
1631 hindex = round_down(index, HPAGE_PMD_NR);
1632 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1633 XA_PRESENT))
1634 return NULL;
1635
1636 shmem_pseudo_vma_init(&pvma, info, hindex);
1637 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
1638 shmem_pseudo_vma_destroy(&pvma);
1639 if (!folio)
1640 count_vm_event(THP_FILE_FALLBACK);
1641 return folio;
1642 }
1643
1644 static struct folio *shmem_alloc_folio(gfp_t gfp,
1645 struct shmem_inode_info *info, pgoff_t index)
1646 {
1647 struct vm_area_struct pvma;
1648 struct folio *folio;
1649
1650 shmem_pseudo_vma_init(&pvma, info, index);
1651 folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
1652 shmem_pseudo_vma_destroy(&pvma);
1653
1654 return folio;
1655 }
1656
1657 static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
1658 pgoff_t index, bool huge)
1659 {
1660 struct shmem_inode_info *info = SHMEM_I(inode);
1661 struct folio *folio;
1662 int nr;
1663 int err;
1664
1665 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1666 huge = false;
1667 nr = huge ? HPAGE_PMD_NR : 1;
1668
1669 err = shmem_inode_acct_block(inode, nr);
1670 if (err)
1671 goto failed;
1672
1673 if (huge)
1674 folio = shmem_alloc_hugefolio(gfp, info, index);
1675 else
1676 folio = shmem_alloc_folio(gfp, info, index);
1677 if (folio) {
1678 __folio_set_locked(folio);
1679 __folio_set_swapbacked(folio);
1680 return folio;
1681 }
1682
1683 err = -ENOMEM;
1684 shmem_inode_unacct_blocks(inode, nr);
1685 failed:
1686 return ERR_PTR(err);
1687 }
1688
1689 /*
1690 * When a page is moved from swapcache to shmem filecache (either by the
1691 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1692 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1693 * ignorance of the mapping it belongs to. If that mapping has special
1694 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1695 * we may need to copy to a suitable page before moving to filecache.
1696 *
1697 * In a future release, this may well be extended to respect cpuset and
1698 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1699 * but for now it is a simple matter of zone.
1700 */
1701 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1702 {
1703 return folio_zonenum(folio) > gfp_zone(gfp);
1704 }
1705
1706 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1707 struct shmem_inode_info *info, pgoff_t index)
1708 {
1709 struct folio *old, *new;
1710 struct address_space *swap_mapping;
1711 swp_entry_t entry;
1712 pgoff_t swap_index;
1713 int error;
1714
1715 old = *foliop;
1716 entry = folio_swap_entry(old);
1717 swap_index = swp_offset(entry);
1718 swap_mapping = swap_address_space(entry);
1719
1720 /*
1721 * We have arrived here because our zones are constrained, so don't
1722 * limit chance of success by further cpuset and node constraints.
1723 */
1724 gfp &= ~GFP_CONSTRAINT_MASK;
1725 VM_BUG_ON_FOLIO(folio_test_large(old), old);
1726 new = shmem_alloc_folio(gfp, info, index);
1727 if (!new)
1728 return -ENOMEM;
1729
1730 folio_get(new);
1731 folio_copy(new, old);
1732 flush_dcache_folio(new);
1733
1734 __folio_set_locked(new);
1735 __folio_set_swapbacked(new);
1736 folio_mark_uptodate(new);
1737 folio_set_swap_entry(new, entry);
1738 folio_set_swapcache(new);
1739
1740 /*
1741 * Our caller will very soon move newpage out of swapcache, but it's
1742 * a nice clean interface for us to replace oldpage by newpage there.
1743 */
1744 xa_lock_irq(&swap_mapping->i_pages);
1745 error = shmem_replace_entry(swap_mapping, swap_index, old, new);
1746 if (!error) {
1747 mem_cgroup_migrate(old, new);
1748 __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
1749 __lruvec_stat_mod_folio(new, NR_SHMEM, 1);
1750 __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
1751 __lruvec_stat_mod_folio(old, NR_SHMEM, -1);
1752 }
1753 xa_unlock_irq(&swap_mapping->i_pages);
1754
1755 if (unlikely(error)) {
1756 /*
1757 * Is this possible? I think not, now that our callers check
1758 * both PageSwapCache and page_private after getting page lock;
1759 * but be defensive. Reverse old to newpage for clear and free.
1760 */
1761 old = new;
1762 } else {
1763 folio_add_lru(new);
1764 *foliop = new;
1765 }
1766
1767 folio_clear_swapcache(old);
1768 old->private = NULL;
1769
1770 folio_unlock(old);
1771 folio_put_refs(old, 2);
1772 return error;
1773 }
1774
1775 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1776 struct folio *folio, swp_entry_t swap)
1777 {
1778 struct address_space *mapping = inode->i_mapping;
1779 swp_entry_t swapin_error;
1780 void *old;
1781
1782 swapin_error = make_swapin_error_entry();
1783 old = xa_cmpxchg_irq(&mapping->i_pages, index,
1784 swp_to_radix_entry(swap),
1785 swp_to_radix_entry(swapin_error), 0);
1786 if (old != swp_to_radix_entry(swap))
1787 return;
1788
1789 folio_wait_writeback(folio);
1790 delete_from_swap_cache(folio);
1791 /*
1792 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
1793 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
1794 * in shmem_evict_inode().
1795 */
1796 shmem_recalc_inode(inode, -1, -1);
1797 swap_free(swap);
1798 }
1799
1800 /*
1801 * Swap in the folio pointed to by *foliop.
1802 * Caller has to make sure that *foliop contains a valid swapped folio.
1803 * Returns 0 and the folio in foliop if success. On failure, returns the
1804 * error code and NULL in *foliop.
1805 */
1806 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1807 struct folio **foliop, enum sgp_type sgp,
1808 gfp_t gfp, struct vm_area_struct *vma,
1809 vm_fault_t *fault_type)
1810 {
1811 struct address_space *mapping = inode->i_mapping;
1812 struct shmem_inode_info *info = SHMEM_I(inode);
1813 struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1814 struct swap_info_struct *si;
1815 struct folio *folio = NULL;
1816 swp_entry_t swap;
1817 int error;
1818
1819 VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1820 swap = radix_to_swp_entry(*foliop);
1821 *foliop = NULL;
1822
1823 if (is_swapin_error_entry(swap))
1824 return -EIO;
1825
1826 si = get_swap_device(swap);
1827 if (!si) {
1828 if (!shmem_confirm_swap(mapping, index, swap))
1829 return -EEXIST;
1830 else
1831 return -EINVAL;
1832 }
1833
1834 /* Look it up and read it in.. */
1835 folio = swap_cache_get_folio(swap, NULL, 0);
1836 if (!folio) {
1837 /* Or update major stats only when swapin succeeds?? */
1838 if (fault_type) {
1839 *fault_type |= VM_FAULT_MAJOR;
1840 count_vm_event(PGMAJFAULT);
1841 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1842 }
1843 /* Here we actually start the io */
1844 folio = shmem_swapin(swap, gfp, info, index);
1845 if (!folio) {
1846 error = -ENOMEM;
1847 goto failed;
1848 }
1849 }
1850
1851 /* We have to do this with folio locked to prevent races */
1852 folio_lock(folio);
1853 if (!folio_test_swapcache(folio) ||
1854 folio_swap_entry(folio).val != swap.val ||
1855 !shmem_confirm_swap(mapping, index, swap)) {
1856 error = -EEXIST;
1857 goto unlock;
1858 }
1859 if (!folio_test_uptodate(folio)) {
1860 error = -EIO;
1861 goto failed;
1862 }
1863 folio_wait_writeback(folio);
1864
1865 /*
1866 * Some architectures may have to restore extra metadata to the
1867 * folio after reading from swap.
1868 */
1869 arch_swap_restore(swap, folio);
1870
1871 if (shmem_should_replace_folio(folio, gfp)) {
1872 error = shmem_replace_folio(&folio, gfp, info, index);
1873 if (error)
1874 goto failed;
1875 }
1876
1877 error = shmem_add_to_page_cache(folio, mapping, index,
1878 swp_to_radix_entry(swap), gfp,
1879 charge_mm);
1880 if (error)
1881 goto failed;
1882
1883 shmem_recalc_inode(inode, 0, -1);
1884
1885 if (sgp == SGP_WRITE)
1886 folio_mark_accessed(folio);
1887
1888 delete_from_swap_cache(folio);
1889 folio_mark_dirty(folio);
1890 swap_free(swap);
1891 put_swap_device(si);
1892
1893 *foliop = folio;
1894 return 0;
1895 failed:
1896 if (!shmem_confirm_swap(mapping, index, swap))
1897 error = -EEXIST;
1898 if (error == -EIO)
1899 shmem_set_folio_swapin_error(inode, index, folio, swap);
1900 unlock:
1901 if (folio) {
1902 folio_unlock(folio);
1903 folio_put(folio);
1904 }
1905 put_swap_device(si);
1906
1907 return error;
1908 }
1909
1910 /*
1911 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1912 *
1913 * If we allocate a new one we do not mark it dirty. That's up to the
1914 * vm. If we swap it in we mark it dirty since we also free the swap
1915 * entry since a page cannot live in both the swap and page cache.
1916 *
1917 * vma, vmf, and fault_type are only supplied by shmem_fault:
1918 * otherwise they are NULL.
1919 */
1920 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
1921 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1922 struct vm_area_struct *vma, struct vm_fault *vmf,
1923 vm_fault_t *fault_type)
1924 {
1925 struct address_space *mapping = inode->i_mapping;
1926 struct shmem_inode_info *info = SHMEM_I(inode);
1927 struct shmem_sb_info *sbinfo;
1928 struct mm_struct *charge_mm;
1929 struct folio *folio;
1930 pgoff_t hindex;
1931 gfp_t huge_gfp;
1932 int error;
1933 int once = 0;
1934 int alloced = 0;
1935
1936 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1937 return -EFBIG;
1938 repeat:
1939 if (sgp <= SGP_CACHE &&
1940 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1941 return -EINVAL;
1942 }
1943
1944 sbinfo = SHMEM_SB(inode->i_sb);
1945 charge_mm = vma ? vma->vm_mm : NULL;
1946
1947 folio = filemap_get_entry(mapping, index);
1948 if (folio && vma && userfaultfd_minor(vma)) {
1949 if (!xa_is_value(folio))
1950 folio_put(folio);
1951 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1952 return 0;
1953 }
1954
1955 if (xa_is_value(folio)) {
1956 error = shmem_swapin_folio(inode, index, &folio,
1957 sgp, gfp, vma, fault_type);
1958 if (error == -EEXIST)
1959 goto repeat;
1960
1961 *foliop = folio;
1962 return error;
1963 }
1964
1965 if (folio) {
1966 folio_lock(folio);
1967
1968 /* Has the folio been truncated or swapped out? */
1969 if (unlikely(folio->mapping != mapping)) {
1970 folio_unlock(folio);
1971 folio_put(folio);
1972 goto repeat;
1973 }
1974 if (sgp == SGP_WRITE)
1975 folio_mark_accessed(folio);
1976 if (folio_test_uptodate(folio))
1977 goto out;
1978 /* fallocated folio */
1979 if (sgp != SGP_READ)
1980 goto clear;
1981 folio_unlock(folio);
1982 folio_put(folio);
1983 }
1984
1985 /*
1986 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
1987 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
1988 */
1989 *foliop = NULL;
1990 if (sgp == SGP_READ)
1991 return 0;
1992 if (sgp == SGP_NOALLOC)
1993 return -ENOENT;
1994
1995 /*
1996 * Fast cache lookup and swap lookup did not find it: allocate.
1997 */
1998
1999 if (vma && userfaultfd_missing(vma)) {
2000 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2001 return 0;
2002 }
2003
2004 if (!shmem_is_huge(inode, index, false,
2005 vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0))
2006 goto alloc_nohuge;
2007
2008 huge_gfp = vma_thp_gfp_mask(vma);
2009 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2010 folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
2011 if (IS_ERR(folio)) {
2012 alloc_nohuge:
2013 folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
2014 }
2015 if (IS_ERR(folio)) {
2016 int retry = 5;
2017
2018 error = PTR_ERR(folio);
2019 folio = NULL;
2020 if (error != -ENOSPC)
2021 goto unlock;
2022 /*
2023 * Try to reclaim some space by splitting a large folio
2024 * beyond i_size on the filesystem.
2025 */
2026 while (retry--) {
2027 int ret;
2028
2029 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
2030 if (ret == SHRINK_STOP)
2031 break;
2032 if (ret)
2033 goto alloc_nohuge;
2034 }
2035 goto unlock;
2036 }
2037
2038 hindex = round_down(index, folio_nr_pages(folio));
2039
2040 if (sgp == SGP_WRITE)
2041 __folio_set_referenced(folio);
2042
2043 error = shmem_add_to_page_cache(folio, mapping, hindex,
2044 NULL, gfp & GFP_RECLAIM_MASK,
2045 charge_mm);
2046 if (error)
2047 goto unacct;
2048
2049 folio_add_lru(folio);
2050 shmem_recalc_inode(inode, folio_nr_pages(folio), 0);
2051 alloced = true;
2052
2053 if (folio_test_pmd_mappable(folio) &&
2054 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2055 folio_next_index(folio) - 1) {
2056 /*
2057 * Part of the large folio is beyond i_size: subject
2058 * to shrink under memory pressure.
2059 */
2060 spin_lock(&sbinfo->shrinklist_lock);
2061 /*
2062 * _careful to defend against unlocked access to
2063 * ->shrink_list in shmem_unused_huge_shrink()
2064 */
2065 if (list_empty_careful(&info->shrinklist)) {
2066 list_add_tail(&info->shrinklist,
2067 &sbinfo->shrinklist);
2068 sbinfo->shrinklist_len++;
2069 }
2070 spin_unlock(&sbinfo->shrinklist_lock);
2071 }
2072
2073 /*
2074 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2075 */
2076 if (sgp == SGP_FALLOC)
2077 sgp = SGP_WRITE;
2078 clear:
2079 /*
2080 * Let SGP_WRITE caller clear ends if write does not fill folio;
2081 * but SGP_FALLOC on a folio fallocated earlier must initialize
2082 * it now, lest undo on failure cancel our earlier guarantee.
2083 */
2084 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2085 long i, n = folio_nr_pages(folio);
2086
2087 for (i = 0; i < n; i++)
2088 clear_highpage(folio_page(folio, i));
2089 flush_dcache_folio(folio);
2090 folio_mark_uptodate(folio);
2091 }
2092
2093 /* Perhaps the file has been truncated since we checked */
2094 if (sgp <= SGP_CACHE &&
2095 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2096 if (alloced) {
2097 folio_clear_dirty(folio);
2098 filemap_remove_folio(folio);
2099 shmem_recalc_inode(inode, 0, 0);
2100 }
2101 error = -EINVAL;
2102 goto unlock;
2103 }
2104 out:
2105 *foliop = folio;
2106 return 0;
2107
2108 /*
2109 * Error recovery.
2110 */
2111 unacct:
2112 shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
2113
2114 if (folio_test_large(folio)) {
2115 folio_unlock(folio);
2116 folio_put(folio);
2117 goto alloc_nohuge;
2118 }
2119 unlock:
2120 if (folio) {
2121 folio_unlock(folio);
2122 folio_put(folio);
2123 }
2124 if (error == -ENOSPC && !once++) {
2125 shmem_recalc_inode(inode, 0, 0);
2126 goto repeat;
2127 }
2128 if (error == -EEXIST)
2129 goto repeat;
2130 return error;
2131 }
2132
2133 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
2134 enum sgp_type sgp)
2135 {
2136 return shmem_get_folio_gfp(inode, index, foliop, sgp,
2137 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
2138 }
2139
2140 /*
2141 * This is like autoremove_wake_function, but it removes the wait queue
2142 * entry unconditionally - even if something else had already woken the
2143 * target.
2144 */
2145 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2146 {
2147 int ret = default_wake_function(wait, mode, sync, key);
2148 list_del_init(&wait->entry);
2149 return ret;
2150 }
2151
2152 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2153 {
2154 struct vm_area_struct *vma = vmf->vma;
2155 struct inode *inode = file_inode(vma->vm_file);
2156 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2157 struct folio *folio = NULL;
2158 int err;
2159 vm_fault_t ret = VM_FAULT_LOCKED;
2160
2161 /*
2162 * Trinity finds that probing a hole which tmpfs is punching can
2163 * prevent the hole-punch from ever completing: which in turn
2164 * locks writers out with its hold on i_rwsem. So refrain from
2165 * faulting pages into the hole while it's being punched. Although
2166 * shmem_undo_range() does remove the additions, it may be unable to
2167 * keep up, as each new page needs its own unmap_mapping_range() call,
2168 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2169 *
2170 * It does not matter if we sometimes reach this check just before the
2171 * hole-punch begins, so that one fault then races with the punch:
2172 * we just need to make racing faults a rare case.
2173 *
2174 * The implementation below would be much simpler if we just used a
2175 * standard mutex or completion: but we cannot take i_rwsem in fault,
2176 * and bloating every shmem inode for this unlikely case would be sad.
2177 */
2178 if (unlikely(inode->i_private)) {
2179 struct shmem_falloc *shmem_falloc;
2180
2181 spin_lock(&inode->i_lock);
2182 shmem_falloc = inode->i_private;
2183 if (shmem_falloc &&
2184 shmem_falloc->waitq &&
2185 vmf->pgoff >= shmem_falloc->start &&
2186 vmf->pgoff < shmem_falloc->next) {
2187 struct file *fpin;
2188 wait_queue_head_t *shmem_falloc_waitq;
2189 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2190
2191 ret = VM_FAULT_NOPAGE;
2192 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2193 if (fpin)
2194 ret = VM_FAULT_RETRY;
2195
2196 shmem_falloc_waitq = shmem_falloc->waitq;
2197 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2198 TASK_UNINTERRUPTIBLE);
2199 spin_unlock(&inode->i_lock);
2200 schedule();
2201
2202 /*
2203 * shmem_falloc_waitq points into the shmem_fallocate()
2204 * stack of the hole-punching task: shmem_falloc_waitq
2205 * is usually invalid by the time we reach here, but
2206 * finish_wait() does not dereference it in that case;
2207 * though i_lock needed lest racing with wake_up_all().
2208 */
2209 spin_lock(&inode->i_lock);
2210 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2211 spin_unlock(&inode->i_lock);
2212
2213 if (fpin)
2214 fput(fpin);
2215 return ret;
2216 }
2217 spin_unlock(&inode->i_lock);
2218 }
2219
2220 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
2221 gfp, vma, vmf, &ret);
2222 if (err)
2223 return vmf_error(err);
2224 if (folio)
2225 vmf->page = folio_file_page(folio, vmf->pgoff);
2226 return ret;
2227 }
2228
2229 unsigned long shmem_get_unmapped_area(struct file *file,
2230 unsigned long uaddr, unsigned long len,
2231 unsigned long pgoff, unsigned long flags)
2232 {
2233 unsigned long (*get_area)(struct file *,
2234 unsigned long, unsigned long, unsigned long, unsigned long);
2235 unsigned long addr;
2236 unsigned long offset;
2237 unsigned long inflated_len;
2238 unsigned long inflated_addr;
2239 unsigned long inflated_offset;
2240
2241 if (len > TASK_SIZE)
2242 return -ENOMEM;
2243
2244 get_area = current->mm->get_unmapped_area;
2245 addr = get_area(file, uaddr, len, pgoff, flags);
2246
2247 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2248 return addr;
2249 if (IS_ERR_VALUE(addr))
2250 return addr;
2251 if (addr & ~PAGE_MASK)
2252 return addr;
2253 if (addr > TASK_SIZE - len)
2254 return addr;
2255
2256 if (shmem_huge == SHMEM_HUGE_DENY)
2257 return addr;
2258 if (len < HPAGE_PMD_SIZE)
2259 return addr;
2260 if (flags & MAP_FIXED)
2261 return addr;
2262 /*
2263 * Our priority is to support MAP_SHARED mapped hugely;
2264 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2265 * But if caller specified an address hint and we allocated area there
2266 * successfully, respect that as before.
2267 */
2268 if (uaddr == addr)
2269 return addr;
2270
2271 if (shmem_huge != SHMEM_HUGE_FORCE) {
2272 struct super_block *sb;
2273
2274 if (file) {
2275 VM_BUG_ON(file->f_op != &shmem_file_operations);
2276 sb = file_inode(file)->i_sb;
2277 } else {
2278 /*
2279 * Called directly from mm/mmap.c, or drivers/char/mem.c
2280 * for "/dev/zero", to create a shared anonymous object.
2281 */
2282 if (IS_ERR(shm_mnt))
2283 return addr;
2284 sb = shm_mnt->mnt_sb;
2285 }
2286 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2287 return addr;
2288 }
2289
2290 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2291 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2292 return addr;
2293 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2294 return addr;
2295
2296 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2297 if (inflated_len > TASK_SIZE)
2298 return addr;
2299 if (inflated_len < len)
2300 return addr;
2301
2302 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2303 if (IS_ERR_VALUE(inflated_addr))
2304 return addr;
2305 if (inflated_addr & ~PAGE_MASK)
2306 return addr;
2307
2308 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2309 inflated_addr += offset - inflated_offset;
2310 if (inflated_offset > offset)
2311 inflated_addr += HPAGE_PMD_SIZE;
2312
2313 if (inflated_addr > TASK_SIZE - len)
2314 return addr;
2315 return inflated_addr;
2316 }
2317
2318 #ifdef CONFIG_NUMA
2319 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2320 {
2321 struct inode *inode = file_inode(vma->vm_file);
2322 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2323 }
2324
2325 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2326 unsigned long addr)
2327 {
2328 struct inode *inode = file_inode(vma->vm_file);
2329 pgoff_t index;
2330
2331 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2332 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2333 }
2334 #endif
2335
2336 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2337 {
2338 struct inode *inode = file_inode(file);
2339 struct shmem_inode_info *info = SHMEM_I(inode);
2340 int retval = -ENOMEM;
2341
2342 /*
2343 * What serializes the accesses to info->flags?
2344 * ipc_lock_object() when called from shmctl_do_lock(),
2345 * no serialization needed when called from shm_destroy().
2346 */
2347 if (lock && !(info->flags & VM_LOCKED)) {
2348 if (!user_shm_lock(inode->i_size, ucounts))
2349 goto out_nomem;
2350 info->flags |= VM_LOCKED;
2351 mapping_set_unevictable(file->f_mapping);
2352 }
2353 if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2354 user_shm_unlock(inode->i_size, ucounts);
2355 info->flags &= ~VM_LOCKED;
2356 mapping_clear_unevictable(file->f_mapping);
2357 }
2358 retval = 0;
2359
2360 out_nomem:
2361 return retval;
2362 }
2363
2364 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2365 {
2366 struct inode *inode = file_inode(file);
2367 struct shmem_inode_info *info = SHMEM_I(inode);
2368 int ret;
2369
2370 ret = seal_check_future_write(info->seals, vma);
2371 if (ret)
2372 return ret;
2373
2374 /* arm64 - allow memory tagging on RAM-based files */
2375 vm_flags_set(vma, VM_MTE_ALLOWED);
2376
2377 file_accessed(file);
2378 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2379 if (inode->i_nlink)
2380 vma->vm_ops = &shmem_vm_ops;
2381 else
2382 vma->vm_ops = &shmem_anon_vm_ops;
2383 return 0;
2384 }
2385
2386 #ifdef CONFIG_TMPFS_XATTR
2387 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2388
2389 /*
2390 * chattr's fsflags are unrelated to extended attributes,
2391 * but tmpfs has chosen to enable them under the same config option.
2392 */
2393 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2394 {
2395 unsigned int i_flags = 0;
2396
2397 if (fsflags & FS_NOATIME_FL)
2398 i_flags |= S_NOATIME;
2399 if (fsflags & FS_APPEND_FL)
2400 i_flags |= S_APPEND;
2401 if (fsflags & FS_IMMUTABLE_FL)
2402 i_flags |= S_IMMUTABLE;
2403 /*
2404 * But FS_NODUMP_FL does not require any action in i_flags.
2405 */
2406 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2407 }
2408 #else
2409 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2410 {
2411 }
2412 #define shmem_initxattrs NULL
2413 #endif
2414
2415 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2416 {
2417 return &SHMEM_I(inode)->dir_offsets;
2418 }
2419
2420 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2421 struct super_block *sb,
2422 struct inode *dir, umode_t mode,
2423 dev_t dev, unsigned long flags)
2424 {
2425 struct inode *inode;
2426 struct shmem_inode_info *info;
2427 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2428 ino_t ino;
2429 int err;
2430
2431 err = shmem_reserve_inode(sb, &ino);
2432 if (err)
2433 return ERR_PTR(err);
2434
2435
2436 inode = new_inode(sb);
2437
2438 if (!inode) {
2439 shmem_free_inode(sb);
2440 return ERR_PTR(-ENOSPC);
2441 }
2442
2443 inode->i_ino = ino;
2444 inode_init_owner(idmap, inode, dir, mode);
2445 inode->i_blocks = 0;
2446 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2447 inode->i_generation = get_random_u32();
2448 info = SHMEM_I(inode);
2449 memset(info, 0, (char *)inode - (char *)info);
2450 spin_lock_init(&info->lock);
2451 atomic_set(&info->stop_eviction, 0);
2452 info->seals = F_SEAL_SEAL;
2453 info->flags = flags & VM_NORESERVE;
2454 info->i_crtime = inode->i_mtime;
2455 info->fsflags = (dir == NULL) ? 0 :
2456 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2457 if (info->fsflags)
2458 shmem_set_inode_flags(inode, info->fsflags);
2459 INIT_LIST_HEAD(&info->shrinklist);
2460 INIT_LIST_HEAD(&info->swaplist);
2461 INIT_LIST_HEAD(&info->swaplist);
2462 if (sbinfo->noswap)
2463 mapping_set_unevictable(inode->i_mapping);
2464 simple_xattrs_init(&info->xattrs);
2465 cache_no_acl(inode);
2466 mapping_set_large_folios(inode->i_mapping);
2467
2468 switch (mode & S_IFMT) {
2469 default:
2470 inode->i_op = &shmem_special_inode_operations;
2471 init_special_inode(inode, mode, dev);
2472 break;
2473 case S_IFREG:
2474 inode->i_mapping->a_ops = &shmem_aops;
2475 inode->i_op = &shmem_inode_operations;
2476 inode->i_fop = &shmem_file_operations;
2477 mpol_shared_policy_init(&info->policy,
2478 shmem_get_sbmpol(sbinfo));
2479 break;
2480 case S_IFDIR:
2481 inc_nlink(inode);
2482 /* Some things misbehave if size == 0 on a directory */
2483 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2484 inode->i_op = &shmem_dir_inode_operations;
2485 inode->i_fop = &simple_offset_dir_operations;
2486 simple_offset_init(shmem_get_offset_ctx(inode));
2487 break;
2488 case S_IFLNK:
2489 /*
2490 * Must not load anything in the rbtree,
2491 * mpol_free_shared_policy will not be called.
2492 */
2493 mpol_shared_policy_init(&info->policy, NULL);
2494 break;
2495 }
2496
2497 lockdep_annotate_inode_mutex_key(inode);
2498 return inode;
2499 }
2500
2501 #ifdef CONFIG_TMPFS_QUOTA
2502 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2503 struct super_block *sb, struct inode *dir,
2504 umode_t mode, dev_t dev, unsigned long flags)
2505 {
2506 int err;
2507 struct inode *inode;
2508
2509 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2510 if (IS_ERR(inode))
2511 return inode;
2512
2513 err = dquot_initialize(inode);
2514 if (err)
2515 goto errout;
2516
2517 err = dquot_alloc_inode(inode);
2518 if (err) {
2519 dquot_drop(inode);
2520 goto errout;
2521 }
2522 return inode;
2523
2524 errout:
2525 inode->i_flags |= S_NOQUOTA;
2526 iput(inode);
2527 return ERR_PTR(err);
2528 }
2529 #else
2530 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2531 struct super_block *sb, struct inode *dir,
2532 umode_t mode, dev_t dev, unsigned long flags)
2533 {
2534 return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2535 }
2536 #endif /* CONFIG_TMPFS_QUOTA */
2537
2538 #ifdef CONFIG_USERFAULTFD
2539 int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
2540 struct vm_area_struct *dst_vma,
2541 unsigned long dst_addr,
2542 unsigned long src_addr,
2543 uffd_flags_t flags,
2544 struct folio **foliop)
2545 {
2546 struct inode *inode = file_inode(dst_vma->vm_file);
2547 struct shmem_inode_info *info = SHMEM_I(inode);
2548 struct address_space *mapping = inode->i_mapping;
2549 gfp_t gfp = mapping_gfp_mask(mapping);
2550 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2551 void *page_kaddr;
2552 struct folio *folio;
2553 int ret;
2554 pgoff_t max_off;
2555
2556 if (shmem_inode_acct_block(inode, 1)) {
2557 /*
2558 * We may have got a page, returned -ENOENT triggering a retry,
2559 * and now we find ourselves with -ENOMEM. Release the page, to
2560 * avoid a BUG_ON in our caller.
2561 */
2562 if (unlikely(*foliop)) {
2563 folio_put(*foliop);
2564 *foliop = NULL;
2565 }
2566 return -ENOMEM;
2567 }
2568
2569 if (!*foliop) {
2570 ret = -ENOMEM;
2571 folio = shmem_alloc_folio(gfp, info, pgoff);
2572 if (!folio)
2573 goto out_unacct_blocks;
2574
2575 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
2576 page_kaddr = kmap_local_folio(folio, 0);
2577 /*
2578 * The read mmap_lock is held here. Despite the
2579 * mmap_lock being read recursive a deadlock is still
2580 * possible if a writer has taken a lock. For example:
2581 *
2582 * process A thread 1 takes read lock on own mmap_lock
2583 * process A thread 2 calls mmap, blocks taking write lock
2584 * process B thread 1 takes page fault, read lock on own mmap lock
2585 * process B thread 2 calls mmap, blocks taking write lock
2586 * process A thread 1 blocks taking read lock on process B
2587 * process B thread 1 blocks taking read lock on process A
2588 *
2589 * Disable page faults to prevent potential deadlock
2590 * and retry the copy outside the mmap_lock.
2591 */
2592 pagefault_disable();
2593 ret = copy_from_user(page_kaddr,
2594 (const void __user *)src_addr,
2595 PAGE_SIZE);
2596 pagefault_enable();
2597 kunmap_local(page_kaddr);
2598
2599 /* fallback to copy_from_user outside mmap_lock */
2600 if (unlikely(ret)) {
2601 *foliop = folio;
2602 ret = -ENOENT;
2603 /* don't free the page */
2604 goto out_unacct_blocks;
2605 }
2606
2607 flush_dcache_folio(folio);
2608 } else { /* ZEROPAGE */
2609 clear_user_highpage(&folio->page, dst_addr);
2610 }
2611 } else {
2612 folio = *foliop;
2613 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2614 *foliop = NULL;
2615 }
2616
2617 VM_BUG_ON(folio_test_locked(folio));
2618 VM_BUG_ON(folio_test_swapbacked(folio));
2619 __folio_set_locked(folio);
2620 __folio_set_swapbacked(folio);
2621 __folio_mark_uptodate(folio);
2622
2623 ret = -EFAULT;
2624 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2625 if (unlikely(pgoff >= max_off))
2626 goto out_release;
2627
2628 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
2629 gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm);
2630 if (ret)
2631 goto out_release;
2632
2633 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
2634 &folio->page, true, flags);
2635 if (ret)
2636 goto out_delete_from_cache;
2637
2638 shmem_recalc_inode(inode, 1, 0);
2639 folio_unlock(folio);
2640 return 0;
2641 out_delete_from_cache:
2642 filemap_remove_folio(folio);
2643 out_release:
2644 folio_unlock(folio);
2645 folio_put(folio);
2646 out_unacct_blocks:
2647 shmem_inode_unacct_blocks(inode, 1);
2648 return ret;
2649 }
2650 #endif /* CONFIG_USERFAULTFD */
2651
2652 #ifdef CONFIG_TMPFS
2653 static const struct inode_operations shmem_symlink_inode_operations;
2654 static const struct inode_operations shmem_short_symlink_operations;
2655
2656 static int
2657 shmem_write_begin(struct file *file, struct address_space *mapping,
2658 loff_t pos, unsigned len,
2659 struct page **pagep, void **fsdata)
2660 {
2661 struct inode *inode = mapping->host;
2662 struct shmem_inode_info *info = SHMEM_I(inode);
2663 pgoff_t index = pos >> PAGE_SHIFT;
2664 struct folio *folio;
2665 int ret = 0;
2666
2667 /* i_rwsem is held by caller */
2668 if (unlikely(info->seals & (F_SEAL_GROW |
2669 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2670 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2671 return -EPERM;
2672 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2673 return -EPERM;
2674 }
2675
2676 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
2677
2678 if (ret)
2679 return ret;
2680
2681 *pagep = folio_file_page(folio, index);
2682 if (PageHWPoison(*pagep)) {
2683 folio_unlock(folio);
2684 folio_put(folio);
2685 *pagep = NULL;
2686 return -EIO;
2687 }
2688
2689 return 0;
2690 }
2691
2692 static int
2693 shmem_write_end(struct file *file, struct address_space *mapping,
2694 loff_t pos, unsigned len, unsigned copied,
2695 struct page *page, void *fsdata)
2696 {
2697 struct folio *folio = page_folio(page);
2698 struct inode *inode = mapping->host;
2699
2700 if (pos + copied > inode->i_size)
2701 i_size_write(inode, pos + copied);
2702
2703 if (!folio_test_uptodate(folio)) {
2704 if (copied < folio_size(folio)) {
2705 size_t from = offset_in_folio(folio, pos);
2706 folio_zero_segments(folio, 0, from,
2707 from + copied, folio_size(folio));
2708 }
2709 folio_mark_uptodate(folio);
2710 }
2711 folio_mark_dirty(folio);
2712 folio_unlock(folio);
2713 folio_put(folio);
2714
2715 return copied;
2716 }
2717
2718 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2719 {
2720 struct file *file = iocb->ki_filp;
2721 struct inode *inode = file_inode(file);
2722 struct address_space *mapping = inode->i_mapping;
2723 pgoff_t index;
2724 unsigned long offset;
2725 int error = 0;
2726 ssize_t retval = 0;
2727 loff_t *ppos = &iocb->ki_pos;
2728
2729 index = *ppos >> PAGE_SHIFT;
2730 offset = *ppos & ~PAGE_MASK;
2731
2732 for (;;) {
2733 struct folio *folio = NULL;
2734 struct page *page = NULL;
2735 pgoff_t end_index;
2736 unsigned long nr, ret;
2737 loff_t i_size = i_size_read(inode);
2738
2739 end_index = i_size >> PAGE_SHIFT;
2740 if (index > end_index)
2741 break;
2742 if (index == end_index) {
2743 nr = i_size & ~PAGE_MASK;
2744 if (nr <= offset)
2745 break;
2746 }
2747
2748 error = shmem_get_folio(inode, index, &folio, SGP_READ);
2749 if (error) {
2750 if (error == -EINVAL)
2751 error = 0;
2752 break;
2753 }
2754 if (folio) {
2755 folio_unlock(folio);
2756
2757 page = folio_file_page(folio, index);
2758 if (PageHWPoison(page)) {
2759 folio_put(folio);
2760 error = -EIO;
2761 break;
2762 }
2763 }
2764
2765 /*
2766 * We must evaluate after, since reads (unlike writes)
2767 * are called without i_rwsem protection against truncate
2768 */
2769 nr = PAGE_SIZE;
2770 i_size = i_size_read(inode);
2771 end_index = i_size >> PAGE_SHIFT;
2772 if (index == end_index) {
2773 nr = i_size & ~PAGE_MASK;
2774 if (nr <= offset) {
2775 if (folio)
2776 folio_put(folio);
2777 break;
2778 }
2779 }
2780 nr -= offset;
2781
2782 if (folio) {
2783 /*
2784 * If users can be writing to this page using arbitrary
2785 * virtual addresses, take care about potential aliasing
2786 * before reading the page on the kernel side.
2787 */
2788 if (mapping_writably_mapped(mapping))
2789 flush_dcache_page(page);
2790 /*
2791 * Mark the page accessed if we read the beginning.
2792 */
2793 if (!offset)
2794 folio_mark_accessed(folio);
2795 /*
2796 * Ok, we have the page, and it's up-to-date, so
2797 * now we can copy it to user space...
2798 */
2799 ret = copy_page_to_iter(page, offset, nr, to);
2800 folio_put(folio);
2801
2802 } else if (user_backed_iter(to)) {
2803 /*
2804 * Copy to user tends to be so well optimized, but
2805 * clear_user() not so much, that it is noticeably
2806 * faster to copy the zero page instead of clearing.
2807 */
2808 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
2809 } else {
2810 /*
2811 * But submitting the same page twice in a row to
2812 * splice() - or others? - can result in confusion:
2813 * so don't attempt that optimization on pipes etc.
2814 */
2815 ret = iov_iter_zero(nr, to);
2816 }
2817
2818 retval += ret;
2819 offset += ret;
2820 index += offset >> PAGE_SHIFT;
2821 offset &= ~PAGE_MASK;
2822
2823 if (!iov_iter_count(to))
2824 break;
2825 if (ret < nr) {
2826 error = -EFAULT;
2827 break;
2828 }
2829 cond_resched();
2830 }
2831
2832 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2833 file_accessed(file);
2834 return retval ? retval : error;
2835 }
2836
2837 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
2838 struct pipe_buffer *buf)
2839 {
2840 return true;
2841 }
2842
2843 static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
2844 struct pipe_buffer *buf)
2845 {
2846 }
2847
2848 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
2849 struct pipe_buffer *buf)
2850 {
2851 return false;
2852 }
2853
2854 static const struct pipe_buf_operations zero_pipe_buf_ops = {
2855 .release = zero_pipe_buf_release,
2856 .try_steal = zero_pipe_buf_try_steal,
2857 .get = zero_pipe_buf_get,
2858 };
2859
2860 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
2861 loff_t fpos, size_t size)
2862 {
2863 size_t offset = fpos & ~PAGE_MASK;
2864
2865 size = min_t(size_t, size, PAGE_SIZE - offset);
2866
2867 if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2868 struct pipe_buffer *buf = pipe_head_buf(pipe);
2869
2870 *buf = (struct pipe_buffer) {
2871 .ops = &zero_pipe_buf_ops,
2872 .page = ZERO_PAGE(0),
2873 .offset = offset,
2874 .len = size,
2875 };
2876 pipe->head++;
2877 }
2878
2879 return size;
2880 }
2881
2882 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
2883 struct pipe_inode_info *pipe,
2884 size_t len, unsigned int flags)
2885 {
2886 struct inode *inode = file_inode(in);
2887 struct address_space *mapping = inode->i_mapping;
2888 struct folio *folio = NULL;
2889 size_t total_spliced = 0, used, npages, n, part;
2890 loff_t isize;
2891 int error = 0;
2892
2893 /* Work out how much data we can actually add into the pipe */
2894 used = pipe_occupancy(pipe->head, pipe->tail);
2895 npages = max_t(ssize_t, pipe->max_usage - used, 0);
2896 len = min_t(size_t, len, npages * PAGE_SIZE);
2897
2898 do {
2899 if (*ppos >= i_size_read(inode))
2900 break;
2901
2902 error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
2903 SGP_READ);
2904 if (error) {
2905 if (error == -EINVAL)
2906 error = 0;
2907 break;
2908 }
2909 if (folio) {
2910 folio_unlock(folio);
2911
2912 if (folio_test_hwpoison(folio) ||
2913 (folio_test_large(folio) &&
2914 folio_test_has_hwpoisoned(folio))) {
2915 error = -EIO;
2916 break;
2917 }
2918 }
2919
2920 /*
2921 * i_size must be checked after we know the pages are Uptodate.
2922 *
2923 * Checking i_size after the check allows us to calculate
2924 * the correct value for "nr", which means the zero-filled
2925 * part of the page is not copied back to userspace (unless
2926 * another truncate extends the file - this is desired though).
2927 */
2928 isize = i_size_read(inode);
2929 if (unlikely(*ppos >= isize))
2930 break;
2931 part = min_t(loff_t, isize - *ppos, len);
2932
2933 if (folio) {
2934 /*
2935 * If users can be writing to this page using arbitrary
2936 * virtual addresses, take care about potential aliasing
2937 * before reading the page on the kernel side.
2938 */
2939 if (mapping_writably_mapped(mapping))
2940 flush_dcache_folio(folio);
2941 folio_mark_accessed(folio);
2942 /*
2943 * Ok, we have the page, and it's up-to-date, so we can
2944 * now splice it into the pipe.
2945 */
2946 n = splice_folio_into_pipe(pipe, folio, *ppos, part);
2947 folio_put(folio);
2948 folio = NULL;
2949 } else {
2950 n = splice_zeropage_into_pipe(pipe, *ppos, part);
2951 }
2952
2953 if (!n)
2954 break;
2955 len -= n;
2956 total_spliced += n;
2957 *ppos += n;
2958 in->f_ra.prev_pos = *ppos;
2959 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
2960 break;
2961
2962 cond_resched();
2963 } while (len);
2964
2965 if (folio)
2966 folio_put(folio);
2967
2968 file_accessed(in);
2969 return total_spliced ? total_spliced : error;
2970 }
2971
2972 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2973 {
2974 struct address_space *mapping = file->f_mapping;
2975 struct inode *inode = mapping->host;
2976
2977 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2978 return generic_file_llseek_size(file, offset, whence,
2979 MAX_LFS_FILESIZE, i_size_read(inode));
2980 if (offset < 0)
2981 return -ENXIO;
2982
2983 inode_lock(inode);
2984 /* We're holding i_rwsem so we can access i_size directly */
2985 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2986 if (offset >= 0)
2987 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2988 inode_unlock(inode);
2989 return offset;
2990 }
2991
2992 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2993 loff_t len)
2994 {
2995 struct inode *inode = file_inode(file);
2996 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2997 struct shmem_inode_info *info = SHMEM_I(inode);
2998 struct shmem_falloc shmem_falloc;
2999 pgoff_t start, index, end, undo_fallocend;
3000 int error;
3001
3002 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3003 return -EOPNOTSUPP;
3004
3005 inode_lock(inode);
3006
3007 if (mode & FALLOC_FL_PUNCH_HOLE) {
3008 struct address_space *mapping = file->f_mapping;
3009 loff_t unmap_start = round_up(offset, PAGE_SIZE);
3010 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3011 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3012
3013 /* protected by i_rwsem */
3014 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3015 error = -EPERM;
3016 goto out;
3017 }
3018
3019 shmem_falloc.waitq = &shmem_falloc_waitq;
3020 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3021 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3022 spin_lock(&inode->i_lock);
3023 inode->i_private = &shmem_falloc;
3024 spin_unlock(&inode->i_lock);
3025
3026 if ((u64)unmap_end > (u64)unmap_start)
3027 unmap_mapping_range(mapping, unmap_start,
3028 1 + unmap_end - unmap_start, 0);
3029 shmem_truncate_range(inode, offset, offset + len - 1);
3030 /* No need to unmap again: hole-punching leaves COWed pages */
3031
3032 spin_lock(&inode->i_lock);
3033 inode->i_private = NULL;
3034 wake_up_all(&shmem_falloc_waitq);
3035 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3036 spin_unlock(&inode->i_lock);
3037 error = 0;
3038 goto out;
3039 }
3040
3041 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3042 error = inode_newsize_ok(inode, offset + len);
3043 if (error)
3044 goto out;
3045
3046 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3047 error = -EPERM;
3048 goto out;
3049 }
3050
3051 start = offset >> PAGE_SHIFT;
3052 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3053 /* Try to avoid a swapstorm if len is impossible to satisfy */
3054 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3055 error = -ENOSPC;
3056 goto out;
3057 }
3058
3059 shmem_falloc.waitq = NULL;
3060 shmem_falloc.start = start;
3061 shmem_falloc.next = start;
3062 shmem_falloc.nr_falloced = 0;
3063 shmem_falloc.nr_unswapped = 0;
3064 spin_lock(&inode->i_lock);
3065 inode->i_private = &shmem_falloc;
3066 spin_unlock(&inode->i_lock);
3067
3068 /*
3069 * info->fallocend is only relevant when huge pages might be
3070 * involved: to prevent split_huge_page() freeing fallocated
3071 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3072 */
3073 undo_fallocend = info->fallocend;
3074 if (info->fallocend < end)
3075 info->fallocend = end;
3076
3077 for (index = start; index < end; ) {
3078 struct folio *folio;
3079
3080 /*
3081 * Good, the fallocate(2) manpage permits EINTR: we may have
3082 * been interrupted because we are using up too much memory.
3083 */
3084 if (signal_pending(current))
3085 error = -EINTR;
3086 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3087 error = -ENOMEM;
3088 else
3089 error = shmem_get_folio(inode, index, &folio,
3090 SGP_FALLOC);
3091 if (error) {
3092 info->fallocend = undo_fallocend;
3093 /* Remove the !uptodate folios we added */
3094 if (index > start) {
3095 shmem_undo_range(inode,
3096 (loff_t)start << PAGE_SHIFT,
3097 ((loff_t)index << PAGE_SHIFT) - 1, true);
3098 }
3099 goto undone;
3100 }
3101
3102 /*
3103 * Here is a more important optimization than it appears:
3104 * a second SGP_FALLOC on the same large folio will clear it,
3105 * making it uptodate and un-undoable if we fail later.
3106 */
3107 index = folio_next_index(folio);
3108 /* Beware 32-bit wraparound */
3109 if (!index)
3110 index--;
3111
3112 /*
3113 * Inform shmem_writepage() how far we have reached.
3114 * No need for lock or barrier: we have the page lock.
3115 */
3116 if (!folio_test_uptodate(folio))
3117 shmem_falloc.nr_falloced += index - shmem_falloc.next;
3118 shmem_falloc.next = index;
3119
3120 /*
3121 * If !uptodate, leave it that way so that freeable folios
3122 * can be recognized if we need to rollback on error later.
3123 * But mark it dirty so that memory pressure will swap rather
3124 * than free the folios we are allocating (and SGP_CACHE folios
3125 * might still be clean: we now need to mark those dirty too).
3126 */
3127 folio_mark_dirty(folio);
3128 folio_unlock(folio);
3129 folio_put(folio);
3130 cond_resched();
3131 }
3132
3133 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3134 i_size_write(inode, offset + len);
3135 undone:
3136 spin_lock(&inode->i_lock);
3137 inode->i_private = NULL;
3138 spin_unlock(&inode->i_lock);
3139 out:
3140 if (!error)
3141 file_modified(file);
3142 inode_unlock(inode);
3143 return error;
3144 }
3145
3146 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3147 {
3148 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3149
3150 buf->f_type = TMPFS_MAGIC;
3151 buf->f_bsize = PAGE_SIZE;
3152 buf->f_namelen = NAME_MAX;
3153 if (sbinfo->max_blocks) {
3154 buf->f_blocks = sbinfo->max_blocks;
3155 buf->f_bavail =
3156 buf->f_bfree = sbinfo->max_blocks -
3157 percpu_counter_sum(&sbinfo->used_blocks);
3158 }
3159 if (sbinfo->max_inodes) {
3160 buf->f_files = sbinfo->max_inodes;
3161 buf->f_ffree = sbinfo->free_inodes;
3162 }
3163 /* else leave those fields 0 like simple_statfs */
3164
3165 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3166
3167 return 0;
3168 }
3169
3170 /*
3171 * File creation. Allocate an inode, and we're done..
3172 */
3173 static int
3174 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3175 struct dentry *dentry, umode_t mode, dev_t dev)
3176 {
3177 struct inode *inode;
3178 int error;
3179
3180 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3181
3182 if (IS_ERR(inode))
3183 return PTR_ERR(inode);
3184
3185 error = simple_acl_create(dir, inode);
3186 if (error)
3187 goto out_iput;
3188 error = security_inode_init_security(inode, dir,
3189 &dentry->d_name,
3190 shmem_initxattrs, NULL);
3191 if (error && error != -EOPNOTSUPP)
3192 goto out_iput;
3193
3194 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3195 if (error)
3196 goto out_iput;
3197
3198 dir->i_size += BOGO_DIRENT_SIZE;
3199 dir->i_ctime = dir->i_mtime = current_time(dir);
3200 inode_inc_iversion(dir);
3201 d_instantiate(dentry, inode);
3202 dget(dentry); /* Extra count - pin the dentry in core */
3203 return error;
3204
3205 out_iput:
3206 iput(inode);
3207 return error;
3208 }
3209
3210 static int
3211 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3212 struct file *file, umode_t mode)
3213 {
3214 struct inode *inode;
3215 int error;
3216
3217 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3218
3219 if (IS_ERR(inode)) {
3220 error = PTR_ERR(inode);
3221 goto err_out;
3222 }
3223
3224 error = security_inode_init_security(inode, dir,
3225 NULL,
3226 shmem_initxattrs, NULL);
3227 if (error && error != -EOPNOTSUPP)
3228 goto out_iput;
3229 error = simple_acl_create(dir, inode);
3230 if (error)
3231 goto out_iput;
3232 d_tmpfile(file, inode);
3233
3234 err_out:
3235 return finish_open_simple(file, error);
3236 out_iput:
3237 iput(inode);
3238 return error;
3239 }
3240
3241 static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3242 struct dentry *dentry, umode_t mode)
3243 {
3244 int error;
3245
3246 error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3247 if (error)
3248 return error;
3249 inc_nlink(dir);
3250 return 0;
3251 }
3252
3253 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3254 struct dentry *dentry, umode_t mode, bool excl)
3255 {
3256 return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3257 }
3258
3259 /*
3260 * Link a file..
3261 */
3262 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
3263 {
3264 struct inode *inode = d_inode(old_dentry);
3265 int ret = 0;
3266
3267 /*
3268 * No ordinary (disk based) filesystem counts links as inodes;
3269 * but each new link needs a new dentry, pinning lowmem, and
3270 * tmpfs dentries cannot be pruned until they are unlinked.
3271 * But if an O_TMPFILE file is linked into the tmpfs, the
3272 * first link must skip that, to get the accounting right.
3273 */
3274 if (inode->i_nlink) {
3275 ret = shmem_reserve_inode(inode->i_sb, NULL);
3276 if (ret)
3277 goto out;
3278 }
3279
3280 ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3281 if (ret) {
3282 if (inode->i_nlink)
3283 shmem_free_inode(inode->i_sb);
3284 goto out;
3285 }
3286
3287 dir->i_size += BOGO_DIRENT_SIZE;
3288 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3289 inode_inc_iversion(dir);
3290 inc_nlink(inode);
3291 ihold(inode); /* New dentry reference */
3292 dget(dentry); /* Extra pinning count for the created dentry */
3293 d_instantiate(dentry, inode);
3294 out:
3295 return ret;
3296 }
3297
3298 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3299 {
3300 struct inode *inode = d_inode(dentry);
3301
3302 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3303 shmem_free_inode(inode->i_sb);
3304
3305 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3306
3307 dir->i_size -= BOGO_DIRENT_SIZE;
3308 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3309 inode_inc_iversion(dir);
3310 drop_nlink(inode);
3311 dput(dentry); /* Undo the count from "create" - this does all the work */
3312 return 0;
3313 }
3314
3315 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3316 {
3317 if (!simple_empty(dentry))
3318 return -ENOTEMPTY;
3319
3320 drop_nlink(d_inode(dentry));
3321 drop_nlink(dir);
3322 return shmem_unlink(dir, dentry);
3323 }
3324
3325 static int shmem_whiteout(struct mnt_idmap *idmap,
3326 struct inode *old_dir, struct dentry *old_dentry)
3327 {
3328 struct dentry *whiteout;
3329 int error;
3330
3331 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3332 if (!whiteout)
3333 return -ENOMEM;
3334
3335 error = shmem_mknod(idmap, old_dir, whiteout,
3336 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3337 dput(whiteout);
3338 if (error)
3339 return error;
3340
3341 /*
3342 * Cheat and hash the whiteout while the old dentry is still in
3343 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3344 *
3345 * d_lookup() will consistently find one of them at this point,
3346 * not sure which one, but that isn't even important.
3347 */
3348 d_rehash(whiteout);
3349 return 0;
3350 }
3351
3352 /*
3353 * The VFS layer already does all the dentry stuff for rename,
3354 * we just have to decrement the usage count for the target if
3355 * it exists so that the VFS layer correctly free's it when it
3356 * gets overwritten.
3357 */
3358 static int shmem_rename2(struct mnt_idmap *idmap,
3359 struct inode *old_dir, struct dentry *old_dentry,
3360 struct inode *new_dir, struct dentry *new_dentry,
3361 unsigned int flags)
3362 {
3363 struct inode *inode = d_inode(old_dentry);
3364 int they_are_dirs = S_ISDIR(inode->i_mode);
3365 int error;
3366
3367 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3368 return -EINVAL;
3369
3370 if (flags & RENAME_EXCHANGE)
3371 return simple_offset_rename_exchange(old_dir, old_dentry,
3372 new_dir, new_dentry);
3373
3374 if (!simple_empty(new_dentry))
3375 return -ENOTEMPTY;
3376
3377 if (flags & RENAME_WHITEOUT) {
3378 error = shmem_whiteout(idmap, old_dir, old_dentry);
3379 if (error)
3380 return error;
3381 }
3382
3383 simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry);
3384 error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry);
3385 if (error)
3386 return error;
3387
3388 if (d_really_is_positive(new_dentry)) {
3389 (void) shmem_unlink(new_dir, new_dentry);
3390 if (they_are_dirs) {
3391 drop_nlink(d_inode(new_dentry));
3392 drop_nlink(old_dir);
3393 }
3394 } else if (they_are_dirs) {
3395 drop_nlink(old_dir);
3396 inc_nlink(new_dir);
3397 }
3398
3399 old_dir->i_size -= BOGO_DIRENT_SIZE;
3400 new_dir->i_size += BOGO_DIRENT_SIZE;
3401 old_dir->i_ctime = old_dir->i_mtime =
3402 new_dir->i_ctime = new_dir->i_mtime =
3403 inode->i_ctime = current_time(old_dir);
3404 inode_inc_iversion(old_dir);
3405 inode_inc_iversion(new_dir);
3406 return 0;
3407 }
3408
3409 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3410 struct dentry *dentry, const char *symname)
3411 {
3412 int error;
3413 int len;
3414 struct inode *inode;
3415 struct folio *folio;
3416
3417 len = strlen(symname) + 1;
3418 if (len > PAGE_SIZE)
3419 return -ENAMETOOLONG;
3420
3421 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3422 VM_NORESERVE);
3423
3424 if (IS_ERR(inode))
3425 return PTR_ERR(inode);
3426
3427 error = security_inode_init_security(inode, dir, &dentry->d_name,
3428 shmem_initxattrs, NULL);
3429 if (error && error != -EOPNOTSUPP)
3430 goto out_iput;
3431
3432 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3433 if (error)
3434 goto out_iput;
3435
3436 inode->i_size = len-1;
3437 if (len <= SHORT_SYMLINK_LEN) {
3438 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3439 if (!inode->i_link) {
3440 error = -ENOMEM;
3441 goto out_remove_offset;
3442 }
3443 inode->i_op = &shmem_short_symlink_operations;
3444 } else {
3445 inode_nohighmem(inode);
3446 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
3447 if (error)
3448 goto out_remove_offset;
3449 inode->i_mapping->a_ops = &shmem_aops;
3450 inode->i_op = &shmem_symlink_inode_operations;
3451 memcpy(folio_address(folio), symname, len);
3452 folio_mark_uptodate(folio);
3453 folio_mark_dirty(folio);
3454 folio_unlock(folio);
3455 folio_put(folio);
3456 }
3457 dir->i_size += BOGO_DIRENT_SIZE;
3458 dir->i_ctime = dir->i_mtime = current_time(dir);
3459 inode_inc_iversion(dir);
3460 d_instantiate(dentry, inode);
3461 dget(dentry);
3462 return 0;
3463
3464 out_remove_offset:
3465 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3466 out_iput:
3467 iput(inode);
3468 return error;
3469 }
3470
3471 static void shmem_put_link(void *arg)
3472 {
3473 folio_mark_accessed(arg);
3474 folio_put(arg);
3475 }
3476
3477 static const char *shmem_get_link(struct dentry *dentry,
3478 struct inode *inode,
3479 struct delayed_call *done)
3480 {
3481 struct folio *folio = NULL;
3482 int error;
3483
3484 if (!dentry) {
3485 folio = filemap_get_folio(inode->i_mapping, 0);
3486 if (IS_ERR(folio))
3487 return ERR_PTR(-ECHILD);
3488 if (PageHWPoison(folio_page(folio, 0)) ||
3489 !folio_test_uptodate(folio)) {
3490 folio_put(folio);
3491 return ERR_PTR(-ECHILD);
3492 }
3493 } else {
3494 error = shmem_get_folio(inode, 0, &folio, SGP_READ);
3495 if (error)
3496 return ERR_PTR(error);
3497 if (!folio)
3498 return ERR_PTR(-ECHILD);
3499 if (PageHWPoison(folio_page(folio, 0))) {
3500 folio_unlock(folio);
3501 folio_put(folio);
3502 return ERR_PTR(-ECHILD);
3503 }
3504 folio_unlock(folio);
3505 }
3506 set_delayed_call(done, shmem_put_link, folio);
3507 return folio_address(folio);
3508 }
3509
3510 #ifdef CONFIG_TMPFS_XATTR
3511
3512 static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3513 {
3514 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3515
3516 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3517
3518 return 0;
3519 }
3520
3521 static int shmem_fileattr_set(struct mnt_idmap *idmap,
3522 struct dentry *dentry, struct fileattr *fa)
3523 {
3524 struct inode *inode = d_inode(dentry);
3525 struct shmem_inode_info *info = SHMEM_I(inode);
3526
3527 if (fileattr_has_fsx(fa))
3528 return -EOPNOTSUPP;
3529 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3530 return -EOPNOTSUPP;
3531
3532 info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3533 (fa->flags & SHMEM_FL_USER_MODIFIABLE);
3534
3535 shmem_set_inode_flags(inode, info->fsflags);
3536 inode->i_ctime = current_time(inode);
3537 inode_inc_iversion(inode);
3538 return 0;
3539 }
3540
3541 /*
3542 * Superblocks without xattr inode operations may get some security.* xattr
3543 * support from the LSM "for free". As soon as we have any other xattrs
3544 * like ACLs, we also need to implement the security.* handlers at
3545 * filesystem level, though.
3546 */
3547
3548 /*
3549 * Callback for security_inode_init_security() for acquiring xattrs.
3550 */
3551 static int shmem_initxattrs(struct inode *inode,
3552 const struct xattr *xattr_array,
3553 void *fs_info)
3554 {
3555 struct shmem_inode_info *info = SHMEM_I(inode);
3556 const struct xattr *xattr;
3557 struct simple_xattr *new_xattr;
3558 size_t len;
3559
3560 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3561 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3562 if (!new_xattr)
3563 return -ENOMEM;
3564
3565 len = strlen(xattr->name) + 1;
3566 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3567 GFP_KERNEL);
3568 if (!new_xattr->name) {
3569 kvfree(new_xattr);
3570 return -ENOMEM;
3571 }
3572
3573 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3574 XATTR_SECURITY_PREFIX_LEN);
3575 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3576 xattr->name, len);
3577
3578 simple_xattr_add(&info->xattrs, new_xattr);
3579 }
3580
3581 return 0;
3582 }
3583
3584 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3585 struct dentry *unused, struct inode *inode,
3586 const char *name, void *buffer, size_t size)
3587 {
3588 struct shmem_inode_info *info = SHMEM_I(inode);
3589
3590 name = xattr_full_name(handler, name);
3591 return simple_xattr_get(&info->xattrs, name, buffer, size);
3592 }
3593
3594 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3595 struct mnt_idmap *idmap,
3596 struct dentry *unused, struct inode *inode,
3597 const char *name, const void *value,
3598 size_t size, int flags)
3599 {
3600 struct shmem_inode_info *info = SHMEM_I(inode);
3601 int err;
3602
3603 name = xattr_full_name(handler, name);
3604 err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3605 if (!err) {
3606 inode->i_ctime = current_time(inode);
3607 inode_inc_iversion(inode);
3608 }
3609 return err;
3610 }
3611
3612 static const struct xattr_handler shmem_security_xattr_handler = {
3613 .prefix = XATTR_SECURITY_PREFIX,
3614 .get = shmem_xattr_handler_get,
3615 .set = shmem_xattr_handler_set,
3616 };
3617
3618 static const struct xattr_handler shmem_trusted_xattr_handler = {
3619 .prefix = XATTR_TRUSTED_PREFIX,
3620 .get = shmem_xattr_handler_get,
3621 .set = shmem_xattr_handler_set,
3622 };
3623
3624 static const struct xattr_handler *shmem_xattr_handlers[] = {
3625 &shmem_security_xattr_handler,
3626 &shmem_trusted_xattr_handler,
3627 NULL
3628 };
3629
3630 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3631 {
3632 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3633 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3634 }
3635 #endif /* CONFIG_TMPFS_XATTR */
3636
3637 static const struct inode_operations shmem_short_symlink_operations = {
3638 .getattr = shmem_getattr,
3639 .setattr = shmem_setattr,
3640 .get_link = simple_get_link,
3641 #ifdef CONFIG_TMPFS_XATTR
3642 .listxattr = shmem_listxattr,
3643 #endif
3644 };
3645
3646 static const struct inode_operations shmem_symlink_inode_operations = {
3647 .getattr = shmem_getattr,
3648 .setattr = shmem_setattr,
3649 .get_link = shmem_get_link,
3650 #ifdef CONFIG_TMPFS_XATTR
3651 .listxattr = shmem_listxattr,
3652 #endif
3653 };
3654
3655 static struct dentry *shmem_get_parent(struct dentry *child)
3656 {
3657 return ERR_PTR(-ESTALE);
3658 }
3659
3660 static int shmem_match(struct inode *ino, void *vfh)
3661 {
3662 __u32 *fh = vfh;
3663 __u64 inum = fh[2];
3664 inum = (inum << 32) | fh[1];
3665 return ino->i_ino == inum && fh[0] == ino->i_generation;
3666 }
3667
3668 /* Find any alias of inode, but prefer a hashed alias */
3669 static struct dentry *shmem_find_alias(struct inode *inode)
3670 {
3671 struct dentry *alias = d_find_alias(inode);
3672
3673 return alias ?: d_find_any_alias(inode);
3674 }
3675
3676
3677 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3678 struct fid *fid, int fh_len, int fh_type)
3679 {
3680 struct inode *inode;
3681 struct dentry *dentry = NULL;
3682 u64 inum;
3683
3684 if (fh_len < 3)
3685 return NULL;
3686
3687 inum = fid->raw[2];
3688 inum = (inum << 32) | fid->raw[1];
3689
3690 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3691 shmem_match, fid->raw);
3692 if (inode) {
3693 dentry = shmem_find_alias(inode);
3694 iput(inode);
3695 }
3696
3697 return dentry;
3698 }
3699
3700 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3701 struct inode *parent)
3702 {
3703 if (*len < 3) {
3704 *len = 3;
3705 return FILEID_INVALID;
3706 }
3707
3708 if (inode_unhashed(inode)) {
3709 /* Unfortunately insert_inode_hash is not idempotent,
3710 * so as we hash inodes here rather than at creation
3711 * time, we need a lock to ensure we only try
3712 * to do it once
3713 */
3714 static DEFINE_SPINLOCK(lock);
3715 spin_lock(&lock);
3716 if (inode_unhashed(inode))
3717 __insert_inode_hash(inode,
3718 inode->i_ino + inode->i_generation);
3719 spin_unlock(&lock);
3720 }
3721
3722 fh[0] = inode->i_generation;
3723 fh[1] = inode->i_ino;
3724 fh[2] = ((__u64)inode->i_ino) >> 32;
3725
3726 *len = 3;
3727 return 1;
3728 }
3729
3730 static const struct export_operations shmem_export_ops = {
3731 .get_parent = shmem_get_parent,
3732 .encode_fh = shmem_encode_fh,
3733 .fh_to_dentry = shmem_fh_to_dentry,
3734 };
3735
3736 enum shmem_param {
3737 Opt_gid,
3738 Opt_huge,
3739 Opt_mode,
3740 Opt_mpol,
3741 Opt_nr_blocks,
3742 Opt_nr_inodes,
3743 Opt_size,
3744 Opt_uid,
3745 Opt_inode32,
3746 Opt_inode64,
3747 Opt_noswap,
3748 Opt_quota,
3749 Opt_usrquota,
3750 Opt_grpquota,
3751 Opt_usrquota_block_hardlimit,
3752 Opt_usrquota_inode_hardlimit,
3753 Opt_grpquota_block_hardlimit,
3754 Opt_grpquota_inode_hardlimit,
3755 };
3756
3757 static const struct constant_table shmem_param_enums_huge[] = {
3758 {"never", SHMEM_HUGE_NEVER },
3759 {"always", SHMEM_HUGE_ALWAYS },
3760 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3761 {"advise", SHMEM_HUGE_ADVISE },
3762 {}
3763 };
3764
3765 const struct fs_parameter_spec shmem_fs_parameters[] = {
3766 fsparam_u32 ("gid", Opt_gid),
3767 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
3768 fsparam_u32oct("mode", Opt_mode),
3769 fsparam_string("mpol", Opt_mpol),
3770 fsparam_string("nr_blocks", Opt_nr_blocks),
3771 fsparam_string("nr_inodes", Opt_nr_inodes),
3772 fsparam_string("size", Opt_size),
3773 fsparam_u32 ("uid", Opt_uid),
3774 fsparam_flag ("inode32", Opt_inode32),
3775 fsparam_flag ("inode64", Opt_inode64),
3776 fsparam_flag ("noswap", Opt_noswap),
3777 #ifdef CONFIG_TMPFS_QUOTA
3778 fsparam_flag ("quota", Opt_quota),
3779 fsparam_flag ("usrquota", Opt_usrquota),
3780 fsparam_flag ("grpquota", Opt_grpquota),
3781 fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
3782 fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
3783 fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
3784 fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
3785 #endif
3786 {}
3787 };
3788
3789 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3790 {
3791 struct shmem_options *ctx = fc->fs_private;
3792 struct fs_parse_result result;
3793 unsigned long long size;
3794 char *rest;
3795 int opt;
3796 kuid_t kuid;
3797 kgid_t kgid;
3798
3799 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3800 if (opt < 0)
3801 return opt;
3802
3803 switch (opt) {
3804 case Opt_size:
3805 size = memparse(param->string, &rest);
3806 if (*rest == '%') {
3807 size <<= PAGE_SHIFT;
3808 size *= totalram_pages();
3809 do_div(size, 100);
3810 rest++;
3811 }
3812 if (*rest)
3813 goto bad_value;
3814 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3815 ctx->seen |= SHMEM_SEEN_BLOCKS;
3816 break;
3817 case Opt_nr_blocks:
3818 ctx->blocks = memparse(param->string, &rest);
3819 if (*rest || ctx->blocks > S64_MAX)
3820 goto bad_value;
3821 ctx->seen |= SHMEM_SEEN_BLOCKS;
3822 break;
3823 case Opt_nr_inodes:
3824 ctx->inodes = memparse(param->string, &rest);
3825 if (*rest)
3826 goto bad_value;
3827 ctx->seen |= SHMEM_SEEN_INODES;
3828 break;
3829 case Opt_mode:
3830 ctx->mode = result.uint_32 & 07777;
3831 break;
3832 case Opt_uid:
3833 kuid = make_kuid(current_user_ns(), result.uint_32);
3834 if (!uid_valid(kuid))
3835 goto bad_value;
3836
3837 /*
3838 * The requested uid must be representable in the
3839 * filesystem's idmapping.
3840 */
3841 if (!kuid_has_mapping(fc->user_ns, kuid))
3842 goto bad_value;
3843
3844 ctx->uid = kuid;
3845 break;
3846 case Opt_gid:
3847 kgid = make_kgid(current_user_ns(), result.uint_32);
3848 if (!gid_valid(kgid))
3849 goto bad_value;
3850
3851 /*
3852 * The requested gid must be representable in the
3853 * filesystem's idmapping.
3854 */
3855 if (!kgid_has_mapping(fc->user_ns, kgid))
3856 goto bad_value;
3857
3858 ctx->gid = kgid;
3859 break;
3860 case Opt_huge:
3861 ctx->huge = result.uint_32;
3862 if (ctx->huge != SHMEM_HUGE_NEVER &&
3863 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3864 has_transparent_hugepage()))
3865 goto unsupported_parameter;
3866 ctx->seen |= SHMEM_SEEN_HUGE;
3867 break;
3868 case Opt_mpol:
3869 if (IS_ENABLED(CONFIG_NUMA)) {
3870 mpol_put(ctx->mpol);
3871 ctx->mpol = NULL;
3872 if (mpol_parse_str(param->string, &ctx->mpol))
3873 goto bad_value;
3874 break;
3875 }
3876 goto unsupported_parameter;
3877 case Opt_inode32:
3878 ctx->full_inums = false;
3879 ctx->seen |= SHMEM_SEEN_INUMS;
3880 break;
3881 case Opt_inode64:
3882 if (sizeof(ino_t) < 8) {
3883 return invalfc(fc,
3884 "Cannot use inode64 with <64bit inums in kernel\n");
3885 }
3886 ctx->full_inums = true;
3887 ctx->seen |= SHMEM_SEEN_INUMS;
3888 break;
3889 case Opt_noswap:
3890 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
3891 return invalfc(fc,
3892 "Turning off swap in unprivileged tmpfs mounts unsupported");
3893 }
3894 ctx->noswap = true;
3895 ctx->seen |= SHMEM_SEEN_NOSWAP;
3896 break;
3897 case Opt_quota:
3898 if (fc->user_ns != &init_user_ns)
3899 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
3900 ctx->seen |= SHMEM_SEEN_QUOTA;
3901 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
3902 break;
3903 case Opt_usrquota:
3904 if (fc->user_ns != &init_user_ns)
3905 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
3906 ctx->seen |= SHMEM_SEEN_QUOTA;
3907 ctx->quota_types |= QTYPE_MASK_USR;
3908 break;
3909 case Opt_grpquota:
3910 if (fc->user_ns != &init_user_ns)
3911 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
3912 ctx->seen |= SHMEM_SEEN_QUOTA;
3913 ctx->quota_types |= QTYPE_MASK_GRP;
3914 break;
3915 case Opt_usrquota_block_hardlimit:
3916 size = memparse(param->string, &rest);
3917 if (*rest || !size)
3918 goto bad_value;
3919 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
3920 return invalfc(fc,
3921 "User quota block hardlimit too large.");
3922 ctx->qlimits.usrquota_bhardlimit = size;
3923 break;
3924 case Opt_grpquota_block_hardlimit:
3925 size = memparse(param->string, &rest);
3926 if (*rest || !size)
3927 goto bad_value;
3928 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
3929 return invalfc(fc,
3930 "Group quota block hardlimit too large.");
3931 ctx->qlimits.grpquota_bhardlimit = size;
3932 break;
3933 case Opt_usrquota_inode_hardlimit:
3934 size = memparse(param->string, &rest);
3935 if (*rest || !size)
3936 goto bad_value;
3937 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
3938 return invalfc(fc,
3939 "User quota inode hardlimit too large.");
3940 ctx->qlimits.usrquota_ihardlimit = size;
3941 break;
3942 case Opt_grpquota_inode_hardlimit:
3943 size = memparse(param->string, &rest);
3944 if (*rest || !size)
3945 goto bad_value;
3946 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
3947 return invalfc(fc,
3948 "Group quota inode hardlimit too large.");
3949 ctx->qlimits.grpquota_ihardlimit = size;
3950 break;
3951 }
3952 return 0;
3953
3954 unsupported_parameter:
3955 return invalfc(fc, "Unsupported parameter '%s'", param->key);
3956 bad_value:
3957 return invalfc(fc, "Bad value for '%s'", param->key);
3958 }
3959
3960 static int shmem_parse_options(struct fs_context *fc, void *data)
3961 {
3962 char *options = data;
3963
3964 if (options) {
3965 int err = security_sb_eat_lsm_opts(options, &fc->security);
3966 if (err)
3967 return err;
3968 }
3969
3970 while (options != NULL) {
3971 char *this_char = options;
3972 for (;;) {
3973 /*
3974 * NUL-terminate this option: unfortunately,
3975 * mount options form a comma-separated list,
3976 * but mpol's nodelist may also contain commas.
3977 */
3978 options = strchr(options, ',');
3979 if (options == NULL)
3980 break;
3981 options++;
3982 if (!isdigit(*options)) {
3983 options[-1] = '\0';
3984 break;
3985 }
3986 }
3987 if (*this_char) {
3988 char *value = strchr(this_char, '=');
3989 size_t len = 0;
3990 int err;
3991
3992 if (value) {
3993 *value++ = '\0';
3994 len = strlen(value);
3995 }
3996 err = vfs_parse_fs_string(fc, this_char, value, len);
3997 if (err < 0)
3998 return err;
3999 }
4000 }
4001 return 0;
4002 }
4003
4004 /*
4005 * Reconfigure a shmem filesystem.
4006 *
4007 * Note that we disallow change from limited->unlimited blocks/inodes while any
4008 * are in use; but we must separately disallow unlimited->limited, because in
4009 * that case we have no record of how much is already in use.
4010 */
4011 static int shmem_reconfigure(struct fs_context *fc)
4012 {
4013 struct shmem_options *ctx = fc->fs_private;
4014 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4015 unsigned long inodes;
4016 struct mempolicy *mpol = NULL;
4017 const char *err;
4018
4019 raw_spin_lock(&sbinfo->stat_lock);
4020 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
4021
4022 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4023 if (!sbinfo->max_blocks) {
4024 err = "Cannot retroactively limit size";
4025 goto out;
4026 }
4027 if (percpu_counter_compare(&sbinfo->used_blocks,
4028 ctx->blocks) > 0) {
4029 err = "Too small a size for current use";
4030 goto out;
4031 }
4032 }
4033 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4034 if (!sbinfo->max_inodes) {
4035 err = "Cannot retroactively limit inodes";
4036 goto out;
4037 }
4038 if (ctx->inodes < inodes) {
4039 err = "Too few inodes for current use";
4040 goto out;
4041 }
4042 }
4043
4044 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4045 sbinfo->next_ino > UINT_MAX) {
4046 err = "Current inum too high to switch to 32-bit inums";
4047 goto out;
4048 }
4049 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4050 err = "Cannot disable swap on remount";
4051 goto out;
4052 }
4053 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4054 err = "Cannot enable swap on remount if it was disabled on first mount";
4055 goto out;
4056 }
4057
4058 if (ctx->seen & SHMEM_SEEN_QUOTA &&
4059 !sb_any_quota_loaded(fc->root->d_sb)) {
4060 err = "Cannot enable quota on remount";
4061 goto out;
4062 }
4063
4064 #ifdef CONFIG_TMPFS_QUOTA
4065 #define CHANGED_LIMIT(name) \
4066 (ctx->qlimits.name## hardlimit && \
4067 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4068
4069 if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4070 CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4071 err = "Cannot change global quota limit on remount";
4072 goto out;
4073 }
4074 #endif /* CONFIG_TMPFS_QUOTA */
4075
4076 if (ctx->seen & SHMEM_SEEN_HUGE)
4077 sbinfo->huge = ctx->huge;
4078 if (ctx->seen & SHMEM_SEEN_INUMS)
4079 sbinfo->full_inums = ctx->full_inums;
4080 if (ctx->seen & SHMEM_SEEN_BLOCKS)
4081 sbinfo->max_blocks = ctx->blocks;
4082 if (ctx->seen & SHMEM_SEEN_INODES) {
4083 sbinfo->max_inodes = ctx->inodes;
4084 sbinfo->free_inodes = ctx->inodes - inodes;
4085 }
4086
4087 /*
4088 * Preserve previous mempolicy unless mpol remount option was specified.
4089 */
4090 if (ctx->mpol) {
4091 mpol = sbinfo->mpol;
4092 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
4093 ctx->mpol = NULL;
4094 }
4095
4096 if (ctx->noswap)
4097 sbinfo->noswap = true;
4098
4099 raw_spin_unlock(&sbinfo->stat_lock);
4100 mpol_put(mpol);
4101 return 0;
4102 out:
4103 raw_spin_unlock(&sbinfo->stat_lock);
4104 return invalfc(fc, "%s", err);
4105 }
4106
4107 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4108 {
4109 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4110 struct mempolicy *mpol;
4111
4112 if (sbinfo->max_blocks != shmem_default_max_blocks())
4113 seq_printf(seq, ",size=%luk",
4114 sbinfo->max_blocks << (PAGE_SHIFT - 10));
4115 if (sbinfo->max_inodes != shmem_default_max_inodes())
4116 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4117 if (sbinfo->mode != (0777 | S_ISVTX))
4118 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4119 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4120 seq_printf(seq, ",uid=%u",
4121 from_kuid_munged(&init_user_ns, sbinfo->uid));
4122 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4123 seq_printf(seq, ",gid=%u",
4124 from_kgid_munged(&init_user_ns, sbinfo->gid));
4125
4126 /*
4127 * Showing inode{64,32} might be useful even if it's the system default,
4128 * since then people don't have to resort to checking both here and
4129 * /proc/config.gz to confirm 64-bit inums were successfully applied
4130 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4131 *
4132 * We hide it when inode64 isn't the default and we are using 32-bit
4133 * inodes, since that probably just means the feature isn't even under
4134 * consideration.
4135 *
4136 * As such:
4137 *
4138 * +-----------------+-----------------+
4139 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
4140 * +------------------+-----------------+-----------------+
4141 * | full_inums=true | show | show |
4142 * | full_inums=false | show | hide |
4143 * +------------------+-----------------+-----------------+
4144 *
4145 */
4146 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4147 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4148 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4149 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4150 if (sbinfo->huge)
4151 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4152 #endif
4153 mpol = shmem_get_sbmpol(sbinfo);
4154 shmem_show_mpol(seq, mpol);
4155 mpol_put(mpol);
4156 if (sbinfo->noswap)
4157 seq_printf(seq, ",noswap");
4158 return 0;
4159 }
4160
4161 #endif /* CONFIG_TMPFS */
4162
4163 static void shmem_put_super(struct super_block *sb)
4164 {
4165 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4166
4167 #ifdef CONFIG_TMPFS_QUOTA
4168 shmem_disable_quotas(sb);
4169 #endif
4170 free_percpu(sbinfo->ino_batch);
4171 percpu_counter_destroy(&sbinfo->used_blocks);
4172 mpol_put(sbinfo->mpol);
4173 kfree(sbinfo);
4174 sb->s_fs_info = NULL;
4175 }
4176
4177 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
4178 {
4179 struct shmem_options *ctx = fc->fs_private;
4180 struct inode *inode;
4181 struct shmem_sb_info *sbinfo;
4182 int error = -ENOMEM;
4183
4184 /* Round up to L1_CACHE_BYTES to resist false sharing */
4185 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4186 L1_CACHE_BYTES), GFP_KERNEL);
4187 if (!sbinfo)
4188 return error;
4189
4190 sb->s_fs_info = sbinfo;
4191
4192 #ifdef CONFIG_TMPFS
4193 /*
4194 * Per default we only allow half of the physical ram per
4195 * tmpfs instance, limiting inodes to one per page of lowmem;
4196 * but the internal instance is left unlimited.
4197 */
4198 if (!(sb->s_flags & SB_KERNMOUNT)) {
4199 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
4200 ctx->blocks = shmem_default_max_blocks();
4201 if (!(ctx->seen & SHMEM_SEEN_INODES))
4202 ctx->inodes = shmem_default_max_inodes();
4203 if (!(ctx->seen & SHMEM_SEEN_INUMS))
4204 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
4205 sbinfo->noswap = ctx->noswap;
4206 } else {
4207 sb->s_flags |= SB_NOUSER;
4208 }
4209 sb->s_export_op = &shmem_export_ops;
4210 sb->s_flags |= SB_NOSEC | SB_I_VERSION;
4211 #else
4212 sb->s_flags |= SB_NOUSER;
4213 #endif
4214 sbinfo->max_blocks = ctx->blocks;
4215 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
4216 if (sb->s_flags & SB_KERNMOUNT) {
4217 sbinfo->ino_batch = alloc_percpu(ino_t);
4218 if (!sbinfo->ino_batch)
4219 goto failed;
4220 }
4221 sbinfo->uid = ctx->uid;
4222 sbinfo->gid = ctx->gid;
4223 sbinfo->full_inums = ctx->full_inums;
4224 sbinfo->mode = ctx->mode;
4225 sbinfo->huge = ctx->huge;
4226 sbinfo->mpol = ctx->mpol;
4227 ctx->mpol = NULL;
4228
4229 raw_spin_lock_init(&sbinfo->stat_lock);
4230 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
4231 goto failed;
4232 spin_lock_init(&sbinfo->shrinklist_lock);
4233 INIT_LIST_HEAD(&sbinfo->shrinklist);
4234
4235 sb->s_maxbytes = MAX_LFS_FILESIZE;
4236 sb->s_blocksize = PAGE_SIZE;
4237 sb->s_blocksize_bits = PAGE_SHIFT;
4238 sb->s_magic = TMPFS_MAGIC;
4239 sb->s_op = &shmem_ops;
4240 sb->s_time_gran = 1;
4241 #ifdef CONFIG_TMPFS_XATTR
4242 sb->s_xattr = shmem_xattr_handlers;
4243 #endif
4244 #ifdef CONFIG_TMPFS_POSIX_ACL
4245 sb->s_flags |= SB_POSIXACL;
4246 #endif
4247 uuid_gen(&sb->s_uuid);
4248
4249 #ifdef CONFIG_TMPFS_QUOTA
4250 if (ctx->seen & SHMEM_SEEN_QUOTA) {
4251 sb->dq_op = &shmem_quota_operations;
4252 sb->s_qcop = &dquot_quotactl_sysfile_ops;
4253 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
4254
4255 /* Copy the default limits from ctx into sbinfo */
4256 memcpy(&sbinfo->qlimits, &ctx->qlimits,
4257 sizeof(struct shmem_quota_limits));
4258
4259 if (shmem_enable_quotas(sb, ctx->quota_types))
4260 goto failed;
4261 }
4262 #endif /* CONFIG_TMPFS_QUOTA */
4263
4264 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0,
4265 VM_NORESERVE);
4266 if (IS_ERR(inode)) {
4267 error = PTR_ERR(inode);
4268 goto failed;
4269 }
4270 inode->i_uid = sbinfo->uid;
4271 inode->i_gid = sbinfo->gid;
4272 sb->s_root = d_make_root(inode);
4273 if (!sb->s_root)
4274 goto failed;
4275 return 0;
4276
4277 failed:
4278 shmem_put_super(sb);
4279 return error;
4280 }
4281
4282 static int shmem_get_tree(struct fs_context *fc)
4283 {
4284 return get_tree_nodev(fc, shmem_fill_super);
4285 }
4286
4287 static void shmem_free_fc(struct fs_context *fc)
4288 {
4289 struct shmem_options *ctx = fc->fs_private;
4290
4291 if (ctx) {
4292 mpol_put(ctx->mpol);
4293 kfree(ctx);
4294 }
4295 }
4296
4297 static const struct fs_context_operations shmem_fs_context_ops = {
4298 .free = shmem_free_fc,
4299 .get_tree = shmem_get_tree,
4300 #ifdef CONFIG_TMPFS
4301 .parse_monolithic = shmem_parse_options,
4302 .parse_param = shmem_parse_one,
4303 .reconfigure = shmem_reconfigure,
4304 #endif
4305 };
4306
4307 static struct kmem_cache *shmem_inode_cachep;
4308
4309 static struct inode *shmem_alloc_inode(struct super_block *sb)
4310 {
4311 struct shmem_inode_info *info;
4312 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
4313 if (!info)
4314 return NULL;
4315 return &info->vfs_inode;
4316 }
4317
4318 static void shmem_free_in_core_inode(struct inode *inode)
4319 {
4320 if (S_ISLNK(inode->i_mode))
4321 kfree(inode->i_link);
4322 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4323 }
4324
4325 static void shmem_destroy_inode(struct inode *inode)
4326 {
4327 if (S_ISREG(inode->i_mode))
4328 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
4329 if (S_ISDIR(inode->i_mode))
4330 simple_offset_destroy(shmem_get_offset_ctx(inode));
4331 }
4332
4333 static void shmem_init_inode(void *foo)
4334 {
4335 struct shmem_inode_info *info = foo;
4336 inode_init_once(&info->vfs_inode);
4337 }
4338
4339 static void shmem_init_inodecache(void)
4340 {
4341 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
4342 sizeof(struct shmem_inode_info),
4343 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
4344 }
4345
4346 static void shmem_destroy_inodecache(void)
4347 {
4348 kmem_cache_destroy(shmem_inode_cachep);
4349 }
4350
4351 /* Keep the page in page cache instead of truncating it */
4352 static int shmem_error_remove_page(struct address_space *mapping,
4353 struct page *page)
4354 {
4355 return 0;
4356 }
4357
4358 const struct address_space_operations shmem_aops = {
4359 .writepage = shmem_writepage,
4360 .dirty_folio = noop_dirty_folio,
4361 #ifdef CONFIG_TMPFS
4362 .write_begin = shmem_write_begin,
4363 .write_end = shmem_write_end,
4364 #endif
4365 #ifdef CONFIG_MIGRATION
4366 .migrate_folio = migrate_folio,
4367 #endif
4368 .error_remove_page = shmem_error_remove_page,
4369 };
4370 EXPORT_SYMBOL(shmem_aops);
4371
4372 static const struct file_operations shmem_file_operations = {
4373 .mmap = shmem_mmap,
4374 .open = generic_file_open,
4375 .get_unmapped_area = shmem_get_unmapped_area,
4376 #ifdef CONFIG_TMPFS
4377 .llseek = shmem_file_llseek,
4378 .read_iter = shmem_file_read_iter,
4379 .write_iter = generic_file_write_iter,
4380 .fsync = noop_fsync,
4381 .splice_read = shmem_file_splice_read,
4382 .splice_write = iter_file_splice_write,
4383 .fallocate = shmem_fallocate,
4384 #endif
4385 };
4386
4387 static const struct inode_operations shmem_inode_operations = {
4388 .getattr = shmem_getattr,
4389 .setattr = shmem_setattr,
4390 #ifdef CONFIG_TMPFS_XATTR
4391 .listxattr = shmem_listxattr,
4392 .set_acl = simple_set_acl,
4393 .fileattr_get = shmem_fileattr_get,
4394 .fileattr_set = shmem_fileattr_set,
4395 #endif
4396 };
4397
4398 static const struct inode_operations shmem_dir_inode_operations = {
4399 #ifdef CONFIG_TMPFS
4400 .getattr = shmem_getattr,
4401 .create = shmem_create,
4402 .lookup = simple_lookup,
4403 .link = shmem_link,
4404 .unlink = shmem_unlink,
4405 .symlink = shmem_symlink,
4406 .mkdir = shmem_mkdir,
4407 .rmdir = shmem_rmdir,
4408 .mknod = shmem_mknod,
4409 .rename = shmem_rename2,
4410 .tmpfile = shmem_tmpfile,
4411 .get_offset_ctx = shmem_get_offset_ctx,
4412 #endif
4413 #ifdef CONFIG_TMPFS_XATTR
4414 .listxattr = shmem_listxattr,
4415 .fileattr_get = shmem_fileattr_get,
4416 .fileattr_set = shmem_fileattr_set,
4417 #endif
4418 #ifdef CONFIG_TMPFS_POSIX_ACL
4419 .setattr = shmem_setattr,
4420 .set_acl = simple_set_acl,
4421 #endif
4422 };
4423
4424 static const struct inode_operations shmem_special_inode_operations = {
4425 .getattr = shmem_getattr,
4426 #ifdef CONFIG_TMPFS_XATTR
4427 .listxattr = shmem_listxattr,
4428 #endif
4429 #ifdef CONFIG_TMPFS_POSIX_ACL
4430 .setattr = shmem_setattr,
4431 .set_acl = simple_set_acl,
4432 #endif
4433 };
4434
4435 static const struct super_operations shmem_ops = {
4436 .alloc_inode = shmem_alloc_inode,
4437 .free_inode = shmem_free_in_core_inode,
4438 .destroy_inode = shmem_destroy_inode,
4439 #ifdef CONFIG_TMPFS
4440 .statfs = shmem_statfs,
4441 .show_options = shmem_show_options,
4442 #endif
4443 #ifdef CONFIG_TMPFS_QUOTA
4444 .get_dquots = shmem_get_dquots,
4445 #endif
4446 .evict_inode = shmem_evict_inode,
4447 .drop_inode = generic_delete_inode,
4448 .put_super = shmem_put_super,
4449 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4450 .nr_cached_objects = shmem_unused_huge_count,
4451 .free_cached_objects = shmem_unused_huge_scan,
4452 #endif
4453 };
4454
4455 static const struct vm_operations_struct shmem_vm_ops = {
4456 .fault = shmem_fault,
4457 .map_pages = filemap_map_pages,
4458 #ifdef CONFIG_NUMA
4459 .set_policy = shmem_set_policy,
4460 .get_policy = shmem_get_policy,
4461 #endif
4462 };
4463
4464 static const struct vm_operations_struct shmem_anon_vm_ops = {
4465 .fault = shmem_fault,
4466 .map_pages = filemap_map_pages,
4467 #ifdef CONFIG_NUMA
4468 .set_policy = shmem_set_policy,
4469 .get_policy = shmem_get_policy,
4470 #endif
4471 };
4472
4473 int shmem_init_fs_context(struct fs_context *fc)
4474 {
4475 struct shmem_options *ctx;
4476
4477 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4478 if (!ctx)
4479 return -ENOMEM;
4480
4481 ctx->mode = 0777 | S_ISVTX;
4482 ctx->uid = current_fsuid();
4483 ctx->gid = current_fsgid();
4484
4485 fc->fs_private = ctx;
4486 fc->ops = &shmem_fs_context_ops;
4487 return 0;
4488 }
4489
4490 static struct file_system_type shmem_fs_type = {
4491 .owner = THIS_MODULE,
4492 .name = "tmpfs",
4493 .init_fs_context = shmem_init_fs_context,
4494 #ifdef CONFIG_TMPFS
4495 .parameters = shmem_fs_parameters,
4496 #endif
4497 .kill_sb = kill_litter_super,
4498 #ifdef CONFIG_SHMEM
4499 .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
4500 #else
4501 .fs_flags = FS_USERNS_MOUNT,
4502 #endif
4503 };
4504
4505 void __init shmem_init(void)
4506 {
4507 int error;
4508
4509 shmem_init_inodecache();
4510
4511 #ifdef CONFIG_TMPFS_QUOTA
4512 error = register_quota_format(&shmem_quota_format);
4513 if (error < 0) {
4514 pr_err("Could not register quota format\n");
4515 goto out3;
4516 }
4517 #endif
4518
4519 error = register_filesystem(&shmem_fs_type);
4520 if (error) {
4521 pr_err("Could not register tmpfs\n");
4522 goto out2;
4523 }
4524
4525 shm_mnt = kern_mount(&shmem_fs_type);
4526 if (IS_ERR(shm_mnt)) {
4527 error = PTR_ERR(shm_mnt);
4528 pr_err("Could not kern_mount tmpfs\n");
4529 goto out1;
4530 }
4531
4532 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4533 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
4534 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4535 else
4536 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
4537 #endif
4538 return;
4539
4540 out1:
4541 unregister_filesystem(&shmem_fs_type);
4542 out2:
4543 #ifdef CONFIG_TMPFS_QUOTA
4544 unregister_quota_format(&shmem_quota_format);
4545 out3:
4546 #endif
4547 shmem_destroy_inodecache();
4548 shm_mnt = ERR_PTR(error);
4549 }
4550
4551 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
4552 static ssize_t shmem_enabled_show(struct kobject *kobj,
4553 struct kobj_attribute *attr, char *buf)
4554 {
4555 static const int values[] = {
4556 SHMEM_HUGE_ALWAYS,
4557 SHMEM_HUGE_WITHIN_SIZE,
4558 SHMEM_HUGE_ADVISE,
4559 SHMEM_HUGE_NEVER,
4560 SHMEM_HUGE_DENY,
4561 SHMEM_HUGE_FORCE,
4562 };
4563 int len = 0;
4564 int i;
4565
4566 for (i = 0; i < ARRAY_SIZE(values); i++) {
4567 len += sysfs_emit_at(buf, len,
4568 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
4569 i ? " " : "",
4570 shmem_format_huge(values[i]));
4571 }
4572
4573 len += sysfs_emit_at(buf, len, "\n");
4574
4575 return len;
4576 }
4577
4578 static ssize_t shmem_enabled_store(struct kobject *kobj,
4579 struct kobj_attribute *attr, const char *buf, size_t count)
4580 {
4581 char tmp[16];
4582 int huge;
4583
4584 if (count + 1 > sizeof(tmp))
4585 return -EINVAL;
4586 memcpy(tmp, buf, count);
4587 tmp[count] = '\0';
4588 if (count && tmp[count - 1] == '\n')
4589 tmp[count - 1] = '\0';
4590
4591 huge = shmem_parse_huge(tmp);
4592 if (huge == -EINVAL)
4593 return -EINVAL;
4594 if (!has_transparent_hugepage() &&
4595 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4596 return -EINVAL;
4597
4598 shmem_huge = huge;
4599 if (shmem_huge > SHMEM_HUGE_DENY)
4600 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4601 return count;
4602 }
4603
4604 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
4605 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4606
4607 #else /* !CONFIG_SHMEM */
4608
4609 /*
4610 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4611 *
4612 * This is intended for small system where the benefits of the full
4613 * shmem code (swap-backed and resource-limited) are outweighed by
4614 * their complexity. On systems without swap this code should be
4615 * effectively equivalent, but much lighter weight.
4616 */
4617
4618 static struct file_system_type shmem_fs_type = {
4619 .name = "tmpfs",
4620 .init_fs_context = ramfs_init_fs_context,
4621 .parameters = ramfs_fs_parameters,
4622 .kill_sb = ramfs_kill_sb,
4623 .fs_flags = FS_USERNS_MOUNT,
4624 };
4625
4626 void __init shmem_init(void)
4627 {
4628 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4629
4630 shm_mnt = kern_mount(&shmem_fs_type);
4631 BUG_ON(IS_ERR(shm_mnt));
4632 }
4633
4634 int shmem_unuse(unsigned int type)
4635 {
4636 return 0;
4637 }
4638
4639 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
4640 {
4641 return 0;
4642 }
4643
4644 void shmem_unlock_mapping(struct address_space *mapping)
4645 {
4646 }
4647
4648 #ifdef CONFIG_MMU
4649 unsigned long shmem_get_unmapped_area(struct file *file,
4650 unsigned long addr, unsigned long len,
4651 unsigned long pgoff, unsigned long flags)
4652 {
4653 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4654 }
4655 #endif
4656
4657 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4658 {
4659 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4660 }
4661 EXPORT_SYMBOL_GPL(shmem_truncate_range);
4662
4663 #define shmem_vm_ops generic_file_vm_ops
4664 #define shmem_anon_vm_ops generic_file_vm_ops
4665 #define shmem_file_operations ramfs_file_operations
4666 #define shmem_acct_size(flags, size) 0
4667 #define shmem_unacct_size(flags, size) do {} while (0)
4668
4669 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir,
4670 umode_t mode, dev_t dev, unsigned long flags)
4671 {
4672 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
4673 return inode ? inode : ERR_PTR(-ENOSPC);
4674 }
4675
4676 #endif /* CONFIG_SHMEM */
4677
4678 /* common code */
4679
4680 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4681 unsigned long flags, unsigned int i_flags)
4682 {
4683 struct inode *inode;
4684 struct file *res;
4685
4686 if (IS_ERR(mnt))
4687 return ERR_CAST(mnt);
4688
4689 if (size < 0 || size > MAX_LFS_FILESIZE)
4690 return ERR_PTR(-EINVAL);
4691
4692 if (shmem_acct_size(flags, size))
4693 return ERR_PTR(-ENOMEM);
4694
4695 if (is_idmapped_mnt(mnt))
4696 return ERR_PTR(-EINVAL);
4697
4698 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
4699 S_IFREG | S_IRWXUGO, 0, flags);
4700
4701 if (IS_ERR(inode)) {
4702 shmem_unacct_size(flags, size);
4703 return ERR_CAST(inode);
4704 }
4705 inode->i_flags |= i_flags;
4706 inode->i_size = size;
4707 clear_nlink(inode); /* It is unlinked */
4708 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4709 if (!IS_ERR(res))
4710 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4711 &shmem_file_operations);
4712 if (IS_ERR(res))
4713 iput(inode);
4714 return res;
4715 }
4716
4717 /**
4718 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4719 * kernel internal. There will be NO LSM permission checks against the
4720 * underlying inode. So users of this interface must do LSM checks at a
4721 * higher layer. The users are the big_key and shm implementations. LSM
4722 * checks are provided at the key or shm level rather than the inode.
4723 * @name: name for dentry (to be seen in /proc/<pid>/maps
4724 * @size: size to be set for the file
4725 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4726 */
4727 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4728 {
4729 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4730 }
4731
4732 /**
4733 * shmem_file_setup - get an unlinked file living in tmpfs
4734 * @name: name for dentry (to be seen in /proc/<pid>/maps
4735 * @size: size to be set for the file
4736 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4737 */
4738 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4739 {
4740 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4741 }
4742 EXPORT_SYMBOL_GPL(shmem_file_setup);
4743
4744 /**
4745 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4746 * @mnt: the tmpfs mount where the file will be created
4747 * @name: name for dentry (to be seen in /proc/<pid>/maps
4748 * @size: size to be set for the file
4749 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4750 */
4751 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4752 loff_t size, unsigned long flags)
4753 {
4754 return __shmem_file_setup(mnt, name, size, flags, 0);
4755 }
4756 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4757
4758 /**
4759 * shmem_zero_setup - setup a shared anonymous mapping
4760 * @vma: the vma to be mmapped is prepared by do_mmap
4761 */
4762 int shmem_zero_setup(struct vm_area_struct *vma)
4763 {
4764 struct file *file;
4765 loff_t size = vma->vm_end - vma->vm_start;
4766
4767 /*
4768 * Cloning a new file under mmap_lock leads to a lock ordering conflict
4769 * between XFS directory reading and selinux: since this file is only
4770 * accessible to the user through its mapping, use S_PRIVATE flag to
4771 * bypass file security, in the same way as shmem_kernel_file_setup().
4772 */
4773 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4774 if (IS_ERR(file))
4775 return PTR_ERR(file);
4776
4777 if (vma->vm_file)
4778 fput(vma->vm_file);
4779 vma->vm_file = file;
4780 vma->vm_ops = &shmem_anon_vm_ops;
4781
4782 return 0;
4783 }
4784
4785 /**
4786 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
4787 * @mapping: the folio's address_space
4788 * @index: the folio index
4789 * @gfp: the page allocator flags to use if allocating
4790 *
4791 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4792 * with any new page allocations done using the specified allocation flags.
4793 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
4794 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4795 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4796 *
4797 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4798 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4799 */
4800 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
4801 pgoff_t index, gfp_t gfp)
4802 {
4803 #ifdef CONFIG_SHMEM
4804 struct inode *inode = mapping->host;
4805 struct folio *folio;
4806 int error;
4807
4808 BUG_ON(!shmem_mapping(mapping));
4809 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
4810 gfp, NULL, NULL, NULL);
4811 if (error)
4812 return ERR_PTR(error);
4813
4814 folio_unlock(folio);
4815 return folio;
4816 #else
4817 /*
4818 * The tiny !SHMEM case uses ramfs without swap
4819 */
4820 return mapping_read_folio_gfp(mapping, index, gfp);
4821 #endif
4822 }
4823 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
4824
4825 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4826 pgoff_t index, gfp_t gfp)
4827 {
4828 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
4829 struct page *page;
4830
4831 if (IS_ERR(folio))
4832 return &folio->page;
4833
4834 page = folio_file_page(folio, index);
4835 if (PageHWPoison(page)) {
4836 folio_put(folio);
4837 return ERR_PTR(-EIO);
4838 }
4839
4840 return page;
4841 }
4842 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);