]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/shmem.c
fs: port ->set_acl() to pass mnt_idmap
[thirdparty/linux.git] / mm / shmem.c
CommitLineData
1da177e4
LT
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
6922c0c7
HD
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
0edd73b3 11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
1da177e4
LT
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
853ac43a
MM
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
1da177e4
LT
21 * This file is released under the GPL.
22 */
23
853ac43a
MM
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
250297ed 28#include <linux/ramfs.h>
caefba17 29#include <linux/pagemap.h>
853ac43a 30#include <linux/file.h>
e408e695 31#include <linux/fileattr.h>
853ac43a 32#include <linux/mm.h>
46c9a946 33#include <linux/random.h>
174cd4b1 34#include <linux/sched/signal.h>
b95f1b31 35#include <linux/export.h>
853ac43a 36#include <linux/swap.h>
e2e40f2c 37#include <linux/uio.h>
749df87b 38#include <linux/hugetlb.h>
626c3920 39#include <linux/fs_parser.h>
86a2f3f2 40#include <linux/swapfile.h>
36f05cab 41#include <linux/iversion.h>
014bb1de 42#include "swap.h"
95cc09d6 43
853ac43a
MM
44static struct vfsmount *shm_mnt;
45
46#ifdef CONFIG_SHMEM
1da177e4
LT
47/*
48 * This virtual memory filesystem is heavily based on the ramfs. It
49 * extends ramfs by the ability to use swap and honor resource limits
50 * which makes it a completely usable filesystem.
51 */
52
39f0247d 53#include <linux/xattr.h>
a5694255 54#include <linux/exportfs.h>
1c7c474c 55#include <linux/posix_acl.h>
feda821e 56#include <linux/posix_acl_xattr.h>
1da177e4 57#include <linux/mman.h>
1da177e4
LT
58#include <linux/string.h>
59#include <linux/slab.h>
60#include <linux/backing-dev.h>
61#include <linux/shmem_fs.h>
1da177e4 62#include <linux/writeback.h>
bda97eab 63#include <linux/pagevec.h>
41ffe5d5 64#include <linux/percpu_counter.h>
83e4fa9c 65#include <linux/falloc.h>
708e3508 66#include <linux/splice.h>
1da177e4
LT
67#include <linux/security.h>
68#include <linux/swapops.h>
69#include <linux/mempolicy.h>
70#include <linux/namei.h>
b00dc3ad 71#include <linux/ctype.h>
304dbdb7 72#include <linux/migrate.h>
c1f60a5a 73#include <linux/highmem.h>
680d794b 74#include <linux/seq_file.h>
92562927 75#include <linux/magic.h>
9183df25 76#include <linux/syscalls.h>
40e041a2 77#include <linux/fcntl.h>
9183df25 78#include <uapi/linux/memfd.h>
cfda0526 79#include <linux/userfaultfd_k.h>
4c27fe4c 80#include <linux/rmap.h>
2b4db796 81#include <linux/uuid.h>
304dbdb7 82
7c0f6ba6 83#include <linux/uaccess.h>
1da177e4 84
dd56b046
MG
85#include "internal.h"
86
09cbfeaf
KS
87#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
88#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
1da177e4 89
1da177e4
LT
90/* Pretend that each entry is of this size in directory's i_size */
91#define BOGO_DIRENT_SIZE 20
92
69f07ec9
HD
93/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
94#define SHORT_SYMLINK_LEN 128
95
1aac1400 96/*
f00cdc6d 97 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
9608703e 98 * inode->i_private (with i_rwsem making sure that it has only one user at
f00cdc6d 99 * a time): we would prefer not to enlarge the shmem inode just for that.
1aac1400
HD
100 */
101struct shmem_falloc {
8e205f77 102 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1aac1400
HD
103 pgoff_t start; /* start of range currently being fallocated */
104 pgoff_t next; /* the next page offset to be fallocated */
105 pgoff_t nr_falloced; /* how many new pages have been fallocated */
106 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
107};
108
0b5071dd
AV
109struct shmem_options {
110 unsigned long long blocks;
111 unsigned long long inodes;
112 struct mempolicy *mpol;
113 kuid_t uid;
114 kgid_t gid;
115 umode_t mode;
ea3271f7 116 bool full_inums;
0b5071dd
AV
117 int huge;
118 int seen;
119#define SHMEM_SEEN_BLOCKS 1
120#define SHMEM_SEEN_INODES 2
121#define SHMEM_SEEN_HUGE 4
ea3271f7 122#define SHMEM_SEEN_INUMS 8
0b5071dd
AV
123};
124
b76db735 125#ifdef CONFIG_TMPFS
680d794b
AM
126static unsigned long shmem_default_max_blocks(void)
127{
ca79b0c2 128 return totalram_pages() / 2;
680d794b
AM
129}
130
131static unsigned long shmem_default_max_inodes(void)
132{
ca79b0c2
AK
133 unsigned long nr_pages = totalram_pages();
134
135 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
680d794b 136}
b76db735 137#endif
680d794b 138
da08e9b7
MWO
139static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
140 struct folio **foliop, enum sgp_type sgp,
c5bf121e
VRP
141 gfp_t gfp, struct vm_area_struct *vma,
142 vm_fault_t *fault_type);
1da177e4 143
1da177e4
LT
144static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
145{
146 return sb->s_fs_info;
147}
148
149/*
150 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
151 * for shared memory and for shared anonymous (/dev/zero) mappings
152 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
153 * consistent with the pre-accounting of private mappings ...
154 */
155static inline int shmem_acct_size(unsigned long flags, loff_t size)
156{
0b0a0806 157 return (flags & VM_NORESERVE) ?
191c5424 158 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1da177e4
LT
159}
160
161static inline void shmem_unacct_size(unsigned long flags, loff_t size)
162{
0b0a0806 163 if (!(flags & VM_NORESERVE))
1da177e4
LT
164 vm_unacct_memory(VM_ACCT(size));
165}
166
77142517
KK
167static inline int shmem_reacct_size(unsigned long flags,
168 loff_t oldsize, loff_t newsize)
169{
170 if (!(flags & VM_NORESERVE)) {
171 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
172 return security_vm_enough_memory_mm(current->mm,
173 VM_ACCT(newsize) - VM_ACCT(oldsize));
174 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
175 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
176 }
177 return 0;
178}
179
1da177e4
LT
180/*
181 * ... whereas tmpfs objects are accounted incrementally as
75edd345 182 * pages are allocated, in order to allow large sparse files.
923e2f0e 183 * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1da177e4
LT
184 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
185 */
800d8c63 186static inline int shmem_acct_block(unsigned long flags, long pages)
1da177e4 187{
800d8c63
KS
188 if (!(flags & VM_NORESERVE))
189 return 0;
190
191 return security_vm_enough_memory_mm(current->mm,
192 pages * VM_ACCT(PAGE_SIZE));
1da177e4
LT
193}
194
195static inline void shmem_unacct_blocks(unsigned long flags, long pages)
196{
0b0a0806 197 if (flags & VM_NORESERVE)
09cbfeaf 198 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
1da177e4
LT
199}
200
0f079694
MR
201static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
202{
203 struct shmem_inode_info *info = SHMEM_I(inode);
204 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
205
206 if (shmem_acct_block(info->flags, pages))
207 return false;
208
209 if (sbinfo->max_blocks) {
210 if (percpu_counter_compare(&sbinfo->used_blocks,
211 sbinfo->max_blocks - pages) > 0)
212 goto unacct;
213 percpu_counter_add(&sbinfo->used_blocks, pages);
214 }
215
216 return true;
217
218unacct:
219 shmem_unacct_blocks(info->flags, pages);
220 return false;
221}
222
223static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
224{
225 struct shmem_inode_info *info = SHMEM_I(inode);
226 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
227
228 if (sbinfo->max_blocks)
229 percpu_counter_sub(&sbinfo->used_blocks, pages);
230 shmem_unacct_blocks(info->flags, pages);
231}
232
759b9775 233static const struct super_operations shmem_ops;
30e6a51d 234const struct address_space_operations shmem_aops;
15ad7cdc 235static const struct file_operations shmem_file_operations;
92e1d5be
AV
236static const struct inode_operations shmem_inode_operations;
237static const struct inode_operations shmem_dir_inode_operations;
238static const struct inode_operations shmem_special_inode_operations;
f0f37e2f 239static const struct vm_operations_struct shmem_vm_ops;
d09e8ca6 240static const struct vm_operations_struct shmem_anon_vm_ops;
779750d2 241static struct file_system_type shmem_fs_type;
1da177e4 242
d09e8ca6
PT
243bool vma_is_anon_shmem(struct vm_area_struct *vma)
244{
245 return vma->vm_ops == &shmem_anon_vm_ops;
246}
247
b0506e48
MR
248bool vma_is_shmem(struct vm_area_struct *vma)
249{
d09e8ca6 250 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
b0506e48
MR
251}
252
1da177e4 253static LIST_HEAD(shmem_swaplist);
cb5f7b9a 254static DEFINE_MUTEX(shmem_swaplist_mutex);
1da177e4 255
e809d5f0
CD
256/*
257 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
258 * produces a novel ino for the newly allocated inode.
259 *
260 * It may also be called when making a hard link to permit the space needed by
261 * each dentry. However, in that case, no new inode number is needed since that
262 * internally draws from another pool of inode numbers (currently global
263 * get_next_ino()). This case is indicated by passing NULL as inop.
264 */
265#define SHMEM_INO_BATCH 1024
266static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
5b04c689
PE
267{
268 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
e809d5f0
CD
269 ino_t ino;
270
271 if (!(sb->s_flags & SB_KERNMOUNT)) {
bf11b9a8 272 raw_spin_lock(&sbinfo->stat_lock);
bb3e96d6
BS
273 if (sbinfo->max_inodes) {
274 if (!sbinfo->free_inodes) {
bf11b9a8 275 raw_spin_unlock(&sbinfo->stat_lock);
bb3e96d6
BS
276 return -ENOSPC;
277 }
278 sbinfo->free_inodes--;
5b04c689 279 }
e809d5f0
CD
280 if (inop) {
281 ino = sbinfo->next_ino++;
282 if (unlikely(is_zero_ino(ino)))
283 ino = sbinfo->next_ino++;
ea3271f7
CD
284 if (unlikely(!sbinfo->full_inums &&
285 ino > UINT_MAX)) {
e809d5f0
CD
286 /*
287 * Emulate get_next_ino uint wraparound for
288 * compatibility
289 */
ea3271f7
CD
290 if (IS_ENABLED(CONFIG_64BIT))
291 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
292 __func__, MINOR(sb->s_dev));
293 sbinfo->next_ino = 1;
294 ino = sbinfo->next_ino++;
e809d5f0
CD
295 }
296 *inop = ino;
297 }
bf11b9a8 298 raw_spin_unlock(&sbinfo->stat_lock);
e809d5f0
CD
299 } else if (inop) {
300 /*
301 * __shmem_file_setup, one of our callers, is lock-free: it
302 * doesn't hold stat_lock in shmem_reserve_inode since
303 * max_inodes is always 0, and is called from potentially
304 * unknown contexts. As such, use a per-cpu batched allocator
305 * which doesn't require the per-sb stat_lock unless we are at
306 * the batch boundary.
ea3271f7
CD
307 *
308 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
309 * shmem mounts are not exposed to userspace, so we don't need
310 * to worry about things like glibc compatibility.
e809d5f0
CD
311 */
312 ino_t *next_ino;
bf11b9a8 313
e809d5f0
CD
314 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
315 ino = *next_ino;
316 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
bf11b9a8 317 raw_spin_lock(&sbinfo->stat_lock);
e809d5f0
CD
318 ino = sbinfo->next_ino;
319 sbinfo->next_ino += SHMEM_INO_BATCH;
bf11b9a8 320 raw_spin_unlock(&sbinfo->stat_lock);
e809d5f0
CD
321 if (unlikely(is_zero_ino(ino)))
322 ino++;
323 }
324 *inop = ino;
325 *next_ino = ++ino;
326 put_cpu();
5b04c689 327 }
e809d5f0 328
5b04c689
PE
329 return 0;
330}
331
332static void shmem_free_inode(struct super_block *sb)
333{
334 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
335 if (sbinfo->max_inodes) {
bf11b9a8 336 raw_spin_lock(&sbinfo->stat_lock);
5b04c689 337 sbinfo->free_inodes++;
bf11b9a8 338 raw_spin_unlock(&sbinfo->stat_lock);
5b04c689
PE
339 }
340}
341
46711810 342/**
41ffe5d5 343 * shmem_recalc_inode - recalculate the block usage of an inode
1da177e4
LT
344 * @inode: inode to recalc
345 *
346 * We have to calculate the free blocks since the mm can drop
347 * undirtied hole pages behind our back.
348 *
349 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
350 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
351 *
352 * It has to be called with the spinlock held.
353 */
354static void shmem_recalc_inode(struct inode *inode)
355{
356 struct shmem_inode_info *info = SHMEM_I(inode);
357 long freed;
358
359 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
360 if (freed > 0) {
361 info->alloced -= freed;
54af6042 362 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
0f079694 363 shmem_inode_unacct_blocks(inode, freed);
1da177e4
LT
364 }
365}
366
800d8c63
KS
367bool shmem_charge(struct inode *inode, long pages)
368{
369 struct shmem_inode_info *info = SHMEM_I(inode);
4595ef88 370 unsigned long flags;
800d8c63 371
0f079694 372 if (!shmem_inode_acct_block(inode, pages))
800d8c63 373 return false;
b1cc94ab 374
aaa52e34
HD
375 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
376 inode->i_mapping->nrpages += pages;
377
4595ef88 378 spin_lock_irqsave(&info->lock, flags);
800d8c63
KS
379 info->alloced += pages;
380 inode->i_blocks += pages * BLOCKS_PER_PAGE;
381 shmem_recalc_inode(inode);
4595ef88 382 spin_unlock_irqrestore(&info->lock, flags);
800d8c63 383
800d8c63
KS
384 return true;
385}
386
387void shmem_uncharge(struct inode *inode, long pages)
388{
389 struct shmem_inode_info *info = SHMEM_I(inode);
4595ef88 390 unsigned long flags;
800d8c63 391
6ffcd825 392 /* nrpages adjustment done by __filemap_remove_folio() or caller */
aaa52e34 393
4595ef88 394 spin_lock_irqsave(&info->lock, flags);
800d8c63
KS
395 info->alloced -= pages;
396 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
397 shmem_recalc_inode(inode);
4595ef88 398 spin_unlock_irqrestore(&info->lock, flags);
800d8c63 399
0f079694 400 shmem_inode_unacct_blocks(inode, pages);
800d8c63
KS
401}
402
7a5d0fbb 403/*
62f945b6 404 * Replace item expected in xarray by a new item, while holding xa_lock.
7a5d0fbb 405 */
62f945b6 406static int shmem_replace_entry(struct address_space *mapping,
7a5d0fbb
HD
407 pgoff_t index, void *expected, void *replacement)
408{
62f945b6 409 XA_STATE(xas, &mapping->i_pages, index);
6dbaf22c 410 void *item;
7a5d0fbb
HD
411
412 VM_BUG_ON(!expected);
6dbaf22c 413 VM_BUG_ON(!replacement);
62f945b6 414 item = xas_load(&xas);
7a5d0fbb
HD
415 if (item != expected)
416 return -ENOENT;
62f945b6 417 xas_store(&xas, replacement);
7a5d0fbb
HD
418 return 0;
419}
420
d1899228
HD
421/*
422 * Sometimes, before we decide whether to proceed or to fail, we must check
423 * that an entry was not already brought back from swap by a racing thread.
424 *
425 * Checking page is not enough: by the time a SwapCache page is locked, it
426 * might be reused, and again be SwapCache, using the same swap as before.
427 */
428static bool shmem_confirm_swap(struct address_space *mapping,
429 pgoff_t index, swp_entry_t swap)
430{
a12831bf 431 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
d1899228
HD
432}
433
5a6e75f8
KS
434/*
435 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
436 *
437 * SHMEM_HUGE_NEVER:
438 * disables huge pages for the mount;
439 * SHMEM_HUGE_ALWAYS:
440 * enables huge pages for the mount;
441 * SHMEM_HUGE_WITHIN_SIZE:
442 * only allocate huge pages if the page will be fully within i_size,
443 * also respect fadvise()/madvise() hints;
444 * SHMEM_HUGE_ADVISE:
445 * only allocate huge pages if requested with fadvise()/madvise();
446 */
447
448#define SHMEM_HUGE_NEVER 0
449#define SHMEM_HUGE_ALWAYS 1
450#define SHMEM_HUGE_WITHIN_SIZE 2
451#define SHMEM_HUGE_ADVISE 3
452
453/*
454 * Special values.
455 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
456 *
457 * SHMEM_HUGE_DENY:
458 * disables huge on shm_mnt and all mounts, for emergency use;
459 * SHMEM_HUGE_FORCE:
460 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
461 *
462 */
463#define SHMEM_HUGE_DENY (-1)
464#define SHMEM_HUGE_FORCE (-2)
465
396bcc52 466#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5a6e75f8
KS
467/* ifdef here to avoid bloating shmem.o when not necessary */
468
5e6e5a12 469static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
5a6e75f8 470
7c6c6cc4
ZK
471bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
472 pgoff_t index, bool shmem_huge_force)
c852023e 473{
c852023e 474 loff_t i_size;
c852023e 475
f7cd16a5
XR
476 if (!S_ISREG(inode->i_mode))
477 return false;
5e6e5a12
HD
478 if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
479 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
c852023e 480 return false;
7c6c6cc4
ZK
481 if (shmem_huge_force)
482 return true;
5e6e5a12
HD
483 if (shmem_huge == SHMEM_HUGE_FORCE)
484 return true;
7c6c6cc4
ZK
485 if (shmem_huge == SHMEM_HUGE_DENY)
486 return false;
5e6e5a12
HD
487
488 switch (SHMEM_SB(inode->i_sb)->huge) {
c852023e
HD
489 case SHMEM_HUGE_ALWAYS:
490 return true;
491 case SHMEM_HUGE_WITHIN_SIZE:
de6ee659 492 index = round_up(index + 1, HPAGE_PMD_NR);
c852023e 493 i_size = round_up(i_size_read(inode), PAGE_SIZE);
de6ee659 494 if (i_size >> PAGE_SHIFT >= index)
c852023e
HD
495 return true;
496 fallthrough;
497 case SHMEM_HUGE_ADVISE:
5e6e5a12
HD
498 if (vma && (vma->vm_flags & VM_HUGEPAGE))
499 return true;
500 fallthrough;
c852023e 501 default:
c852023e
HD
502 return false;
503 }
504}
5a6e75f8 505
e5f2249a 506#if defined(CONFIG_SYSFS)
5a6e75f8
KS
507static int shmem_parse_huge(const char *str)
508{
509 if (!strcmp(str, "never"))
510 return SHMEM_HUGE_NEVER;
511 if (!strcmp(str, "always"))
512 return SHMEM_HUGE_ALWAYS;
513 if (!strcmp(str, "within_size"))
514 return SHMEM_HUGE_WITHIN_SIZE;
515 if (!strcmp(str, "advise"))
516 return SHMEM_HUGE_ADVISE;
517 if (!strcmp(str, "deny"))
518 return SHMEM_HUGE_DENY;
519 if (!strcmp(str, "force"))
520 return SHMEM_HUGE_FORCE;
521 return -EINVAL;
522}
e5f2249a 523#endif
5a6e75f8 524
e5f2249a 525#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
5a6e75f8
KS
526static const char *shmem_format_huge(int huge)
527{
528 switch (huge) {
529 case SHMEM_HUGE_NEVER:
530 return "never";
531 case SHMEM_HUGE_ALWAYS:
532 return "always";
533 case SHMEM_HUGE_WITHIN_SIZE:
534 return "within_size";
535 case SHMEM_HUGE_ADVISE:
536 return "advise";
537 case SHMEM_HUGE_DENY:
538 return "deny";
539 case SHMEM_HUGE_FORCE:
540 return "force";
541 default:
542 VM_BUG_ON(1);
543 return "bad_val";
544 }
545}
f1f5929c 546#endif
5a6e75f8 547
779750d2
KS
548static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
549 struct shrink_control *sc, unsigned long nr_to_split)
550{
551 LIST_HEAD(list), *pos, *next;
253fd0f0 552 LIST_HEAD(to_remove);
779750d2
KS
553 struct inode *inode;
554 struct shmem_inode_info *info;
05624571 555 struct folio *folio;
779750d2 556 unsigned long batch = sc ? sc->nr_to_scan : 128;
62c9827c 557 int split = 0;
779750d2
KS
558
559 if (list_empty(&sbinfo->shrinklist))
560 return SHRINK_STOP;
561
562 spin_lock(&sbinfo->shrinklist_lock);
563 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
564 info = list_entry(pos, struct shmem_inode_info, shrinklist);
565
566 /* pin the inode */
567 inode = igrab(&info->vfs_inode);
568
569 /* inode is about to be evicted */
570 if (!inode) {
571 list_del_init(&info->shrinklist);
779750d2
KS
572 goto next;
573 }
574
575 /* Check if there's anything to gain */
576 if (round_up(inode->i_size, PAGE_SIZE) ==
577 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
253fd0f0 578 list_move(&info->shrinklist, &to_remove);
779750d2
KS
579 goto next;
580 }
581
582 list_move(&info->shrinklist, &list);
583next:
62c9827c 584 sbinfo->shrinklist_len--;
779750d2
KS
585 if (!--batch)
586 break;
587 }
588 spin_unlock(&sbinfo->shrinklist_lock);
589
253fd0f0
KS
590 list_for_each_safe(pos, next, &to_remove) {
591 info = list_entry(pos, struct shmem_inode_info, shrinklist);
592 inode = &info->vfs_inode;
593 list_del_init(&info->shrinklist);
594 iput(inode);
595 }
596
779750d2
KS
597 list_for_each_safe(pos, next, &list) {
598 int ret;
05624571 599 pgoff_t index;
779750d2
KS
600
601 info = list_entry(pos, struct shmem_inode_info, shrinklist);
602 inode = &info->vfs_inode;
603
b3cd54b2 604 if (nr_to_split && split >= nr_to_split)
62c9827c 605 goto move_back;
779750d2 606
05624571
MWO
607 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
608 folio = filemap_get_folio(inode->i_mapping, index);
609 if (!folio)
779750d2
KS
610 goto drop;
611
b3cd54b2 612 /* No huge page at the end of the file: nothing to split */
05624571
MWO
613 if (!folio_test_large(folio)) {
614 folio_put(folio);
779750d2
KS
615 goto drop;
616 }
617
b3cd54b2 618 /*
62c9827c
GL
619 * Move the inode on the list back to shrinklist if we failed
620 * to lock the page at this time.
b3cd54b2
KS
621 *
622 * Waiting for the lock may lead to deadlock in the
623 * reclaim path.
624 */
05624571
MWO
625 if (!folio_trylock(folio)) {
626 folio_put(folio);
62c9827c 627 goto move_back;
b3cd54b2
KS
628 }
629
d788f5b3 630 ret = split_folio(folio);
05624571
MWO
631 folio_unlock(folio);
632 folio_put(folio);
779750d2 633
62c9827c 634 /* If split failed move the inode on the list back to shrinklist */
b3cd54b2 635 if (ret)
62c9827c 636 goto move_back;
779750d2
KS
637
638 split++;
639drop:
640 list_del_init(&info->shrinklist);
62c9827c
GL
641 goto put;
642move_back:
643 /*
644 * Make sure the inode is either on the global list or deleted
645 * from any local list before iput() since it could be deleted
646 * in another thread once we put the inode (then the local list
647 * is corrupted).
648 */
649 spin_lock(&sbinfo->shrinklist_lock);
650 list_move(&info->shrinklist, &sbinfo->shrinklist);
651 sbinfo->shrinklist_len++;
652 spin_unlock(&sbinfo->shrinklist_lock);
653put:
779750d2
KS
654 iput(inode);
655 }
656
779750d2
KS
657 return split;
658}
659
660static long shmem_unused_huge_scan(struct super_block *sb,
661 struct shrink_control *sc)
662{
663 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
664
665 if (!READ_ONCE(sbinfo->shrinklist_len))
666 return SHRINK_STOP;
667
668 return shmem_unused_huge_shrink(sbinfo, sc, 0);
669}
670
671static long shmem_unused_huge_count(struct super_block *sb,
672 struct shrink_control *sc)
673{
674 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
675 return READ_ONCE(sbinfo->shrinklist_len);
676}
396bcc52 677#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
5a6e75f8
KS
678
679#define shmem_huge SHMEM_HUGE_DENY
680
7c6c6cc4
ZK
681bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
682 pgoff_t index, bool shmem_huge_force)
5e6e5a12
HD
683{
684 return false;
685}
686
779750d2
KS
687static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
688 struct shrink_control *sc, unsigned long nr_to_split)
689{
690 return 0;
691}
396bcc52 692#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5a6e75f8 693
46f65ec1 694/*
2bb876b5 695 * Like filemap_add_folio, but error if expected item has gone.
46f65ec1 696 */
b7dd44a1 697static int shmem_add_to_page_cache(struct folio *folio,
46f65ec1 698 struct address_space *mapping,
3fea5a49
JW
699 pgoff_t index, void *expected, gfp_t gfp,
700 struct mm_struct *charge_mm)
46f65ec1 701{
b7dd44a1
MWO
702 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
703 long nr = folio_nr_pages(folio);
3fea5a49 704 int error;
46f65ec1 705
b7dd44a1
MWO
706 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
707 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
708 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
709 VM_BUG_ON(expected && folio_test_large(folio));
46f65ec1 710
b7dd44a1
MWO
711 folio_ref_add(folio, nr);
712 folio->mapping = mapping;
713 folio->index = index;
b065b432 714
b7dd44a1
MWO
715 if (!folio_test_swapcache(folio)) {
716 error = mem_cgroup_charge(folio, charge_mm, gfp);
4c6355b2 717 if (error) {
b7dd44a1 718 if (folio_test_pmd_mappable(folio)) {
4c6355b2
JW
719 count_vm_event(THP_FILE_FALLBACK);
720 count_vm_event(THP_FILE_FALLBACK_CHARGE);
721 }
722 goto error;
3fea5a49 723 }
3fea5a49 724 }
b7dd44a1 725 folio_throttle_swaprate(folio, gfp);
3fea5a49 726
552446a4 727 do {
552446a4 728 xas_lock_irq(&xas);
6b24ca4a
MWO
729 if (expected != xas_find_conflict(&xas)) {
730 xas_set_err(&xas, -EEXIST);
731 goto unlock;
732 }
733 if (expected && xas_find_conflict(&xas)) {
552446a4 734 xas_set_err(&xas, -EEXIST);
552446a4 735 goto unlock;
800d8c63 736 }
b7dd44a1 737 xas_store(&xas, folio);
6b24ca4a
MWO
738 if (xas_error(&xas))
739 goto unlock;
b7dd44a1 740 if (folio_test_pmd_mappable(folio)) {
800d8c63 741 count_vm_event(THP_FILE_ALLOC);
b7dd44a1 742 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
800d8c63 743 }
800d8c63 744 mapping->nrpages += nr;
b7dd44a1
MWO
745 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
746 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
552446a4
MW
747unlock:
748 xas_unlock_irq(&xas);
749 } while (xas_nomem(&xas, gfp));
750
751 if (xas_error(&xas)) {
3fea5a49
JW
752 error = xas_error(&xas);
753 goto error;
46f65ec1 754 }
552446a4
MW
755
756 return 0;
3fea5a49 757error:
b7dd44a1
MWO
758 folio->mapping = NULL;
759 folio_ref_sub(folio, nr);
3fea5a49 760 return error;
46f65ec1
HD
761}
762
6922c0c7 763/*
4cd400fd 764 * Like delete_from_page_cache, but substitutes swap for @folio.
6922c0c7 765 */
4cd400fd 766static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
6922c0c7 767{
4cd400fd
MWO
768 struct address_space *mapping = folio->mapping;
769 long nr = folio_nr_pages(folio);
6922c0c7
HD
770 int error;
771
b93b0163 772 xa_lock_irq(&mapping->i_pages);
4cd400fd
MWO
773 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
774 folio->mapping = NULL;
775 mapping->nrpages -= nr;
776 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
777 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
b93b0163 778 xa_unlock_irq(&mapping->i_pages);
4cd400fd 779 folio_put(folio);
6922c0c7
HD
780 BUG_ON(error);
781}
782
7a5d0fbb 783/*
c121d3bb 784 * Remove swap entry from page cache, free the swap and its page cache.
7a5d0fbb
HD
785 */
786static int shmem_free_swap(struct address_space *mapping,
787 pgoff_t index, void *radswap)
788{
6dbaf22c 789 void *old;
7a5d0fbb 790
55f3f7ea 791 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
6dbaf22c
JW
792 if (old != radswap)
793 return -ENOENT;
794 free_swap_and_cache(radix_to_swp_entry(radswap));
795 return 0;
7a5d0fbb
HD
796}
797
6a15a370
VB
798/*
799 * Determine (in bytes) how many of the shmem object's pages mapped by the
48131e03 800 * given offsets are swapped out.
6a15a370 801 *
9608703e 802 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
6a15a370
VB
803 * as long as the inode doesn't go away and racy results are not a problem.
804 */
48131e03
VB
805unsigned long shmem_partial_swap_usage(struct address_space *mapping,
806 pgoff_t start, pgoff_t end)
6a15a370 807{
7ae3424f 808 XA_STATE(xas, &mapping->i_pages, start);
6a15a370 809 struct page *page;
48131e03 810 unsigned long swapped = 0;
6a15a370
VB
811
812 rcu_read_lock();
7ae3424f
MW
813 xas_for_each(&xas, page, end - 1) {
814 if (xas_retry(&xas, page))
2cf938aa 815 continue;
3159f943 816 if (xa_is_value(page))
6a15a370
VB
817 swapped++;
818
819 if (need_resched()) {
7ae3424f 820 xas_pause(&xas);
6a15a370 821 cond_resched_rcu();
6a15a370
VB
822 }
823 }
824
825 rcu_read_unlock();
826
827 return swapped << PAGE_SHIFT;
828}
829
48131e03
VB
830/*
831 * Determine (in bytes) how many of the shmem object's pages mapped by the
832 * given vma is swapped out.
833 *
9608703e 834 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
48131e03
VB
835 * as long as the inode doesn't go away and racy results are not a problem.
836 */
837unsigned long shmem_swap_usage(struct vm_area_struct *vma)
838{
839 struct inode *inode = file_inode(vma->vm_file);
840 struct shmem_inode_info *info = SHMEM_I(inode);
841 struct address_space *mapping = inode->i_mapping;
842 unsigned long swapped;
843
844 /* Be careful as we don't hold info->lock */
845 swapped = READ_ONCE(info->swapped);
846
847 /*
848 * The easier cases are when the shmem object has nothing in swap, or
849 * the vma maps it whole. Then we can simply use the stats that we
850 * already track.
851 */
852 if (!swapped)
853 return 0;
854
855 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
856 return swapped << PAGE_SHIFT;
857
858 /* Here comes the more involved part */
02399c88
PX
859 return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
860 vma->vm_pgoff + vma_pages(vma));
48131e03
VB
861}
862
24513264
HD
863/*
864 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
865 */
866void shmem_unlock_mapping(struct address_space *mapping)
867{
105c988f 868 struct folio_batch fbatch;
24513264
HD
869 pgoff_t index = 0;
870
105c988f 871 folio_batch_init(&fbatch);
24513264
HD
872 /*
873 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
874 */
105c988f
MWO
875 while (!mapping_unevictable(mapping) &&
876 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
877 check_move_unevictable_folios(&fbatch);
878 folio_batch_release(&fbatch);
24513264
HD
879 cond_resched();
880 }
7a5d0fbb
HD
881}
882
b9a8a419 883static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
71725ed1 884{
b9a8a419 885 struct folio *folio;
71725ed1 886
b9a8a419 887 /*
a7f5862c 888 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
b9a8a419
MWO
889 * beyond i_size, and reports fallocated pages as holes.
890 */
891 folio = __filemap_get_folio(inode->i_mapping, index,
892 FGP_ENTRY | FGP_LOCK, 0);
893 if (!xa_is_value(folio))
894 return folio;
895 /*
896 * But read a page back from swap if any of it is within i_size
897 * (although in some cases this is just a waste of time).
898 */
a7f5862c
MWO
899 folio = NULL;
900 shmem_get_folio(inode, index, &folio, SGP_READ);
901 return folio;
71725ed1
HD
902}
903
7a5d0fbb 904/*
7f4446ee 905 * Remove range of pages and swap entries from page cache, and free them.
1635f6a7 906 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
7a5d0fbb 907 */
1635f6a7
HD
908static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
909 bool unfalloc)
1da177e4 910{
285b2c4f 911 struct address_space *mapping = inode->i_mapping;
1da177e4 912 struct shmem_inode_info *info = SHMEM_I(inode);
09cbfeaf
KS
913 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
914 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
0e499ed3 915 struct folio_batch fbatch;
7a5d0fbb 916 pgoff_t indices[PAGEVEC_SIZE];
b9a8a419
MWO
917 struct folio *folio;
918 bool same_folio;
7a5d0fbb 919 long nr_swaps_freed = 0;
285b2c4f 920 pgoff_t index;
bda97eab
HD
921 int i;
922
83e4fa9c
HD
923 if (lend == -1)
924 end = -1; /* unsigned, so actually very big */
bda97eab 925
d144bf62
HD
926 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
927 info->fallocend = start;
928
51dcbdac 929 folio_batch_init(&fbatch);
bda97eab 930 index = start;
3392ca12 931 while (index < end && find_lock_entries(mapping, &index, end - 1,
51dcbdac
MWO
932 &fbatch, indices)) {
933 for (i = 0; i < folio_batch_count(&fbatch); i++) {
b9a8a419 934 folio = fbatch.folios[i];
bda97eab 935
7b774aab 936 if (xa_is_value(folio)) {
1635f6a7
HD
937 if (unfalloc)
938 continue;
7a5d0fbb 939 nr_swaps_freed += !shmem_free_swap(mapping,
3392ca12 940 indices[i], folio);
bda97eab 941 continue;
7a5d0fbb
HD
942 }
943
7b774aab 944 if (!unfalloc || !folio_test_uptodate(folio))
1e84a3d9 945 truncate_inode_folio(mapping, folio);
7b774aab 946 folio_unlock(folio);
bda97eab 947 }
51dcbdac
MWO
948 folio_batch_remove_exceptionals(&fbatch);
949 folio_batch_release(&fbatch);
bda97eab 950 cond_resched();
bda97eab 951 }
1da177e4 952
44bcabd7
HD
953 /*
954 * When undoing a failed fallocate, we want none of the partial folio
955 * zeroing and splitting below, but shall want to truncate the whole
956 * folio when !uptodate indicates that it was added by this fallocate,
957 * even when [lstart, lend] covers only a part of the folio.
958 */
959 if (unfalloc)
960 goto whole_folios;
961
b9a8a419
MWO
962 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
963 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
964 if (folio) {
965 same_folio = lend < folio_pos(folio) + folio_size(folio);
966 folio_mark_dirty(folio);
967 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
968 start = folio->index + folio_nr_pages(folio);
969 if (same_folio)
970 end = folio->index;
83e4fa9c 971 }
b9a8a419
MWO
972 folio_unlock(folio);
973 folio_put(folio);
974 folio = NULL;
83e4fa9c 975 }
b9a8a419
MWO
976
977 if (!same_folio)
978 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
979 if (folio) {
980 folio_mark_dirty(folio);
981 if (!truncate_inode_partial_folio(folio, lstart, lend))
982 end = folio->index;
983 folio_unlock(folio);
984 folio_put(folio);
bda97eab
HD
985 }
986
44bcabd7
HD
987whole_folios:
988
bda97eab 989 index = start;
b1a36650 990 while (index < end) {
bda97eab 991 cond_resched();
0cd6144a 992
9fb6beea 993 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
cf2039af 994 indices)) {
b1a36650
HD
995 /* If all gone or hole-punch or unfalloc, we're done */
996 if (index == start || end != -1)
bda97eab 997 break;
b1a36650 998 /* But if truncating, restart to make sure all gone */
bda97eab
HD
999 index = start;
1000 continue;
1001 }
0e499ed3 1002 for (i = 0; i < folio_batch_count(&fbatch); i++) {
b9a8a419 1003 folio = fbatch.folios[i];
bda97eab 1004
0e499ed3 1005 if (xa_is_value(folio)) {
1635f6a7
HD
1006 if (unfalloc)
1007 continue;
9fb6beea 1008 if (shmem_free_swap(mapping, indices[i], folio)) {
b1a36650 1009 /* Swap was replaced by page: retry */
9fb6beea 1010 index = indices[i];
b1a36650
HD
1011 break;
1012 }
1013 nr_swaps_freed++;
7a5d0fbb
HD
1014 continue;
1015 }
1016
0e499ed3 1017 folio_lock(folio);
800d8c63 1018
0e499ed3 1019 if (!unfalloc || !folio_test_uptodate(folio)) {
0e499ed3 1020 if (folio_mapping(folio) != mapping) {
b1a36650 1021 /* Page was replaced by swap: retry */
0e499ed3 1022 folio_unlock(folio);
9fb6beea 1023 index = indices[i];
b1a36650 1024 break;
1635f6a7 1025 }
0e499ed3
MWO
1026 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1027 folio);
b9a8a419 1028 truncate_inode_folio(mapping, folio);
7a5d0fbb 1029 }
0e499ed3 1030 folio_unlock(folio);
bda97eab 1031 }
0e499ed3
MWO
1032 folio_batch_remove_exceptionals(&fbatch);
1033 folio_batch_release(&fbatch);
bda97eab 1034 }
94c1e62d 1035
4595ef88 1036 spin_lock_irq(&info->lock);
7a5d0fbb 1037 info->swapped -= nr_swaps_freed;
1da177e4 1038 shmem_recalc_inode(inode);
4595ef88 1039 spin_unlock_irq(&info->lock);
1635f6a7 1040}
1da177e4 1041
1635f6a7
HD
1042void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1043{
1044 shmem_undo_range(inode, lstart, lend, false);
078cd827 1045 inode->i_ctime = inode->i_mtime = current_time(inode);
36f05cab 1046 inode_inc_iversion(inode);
1da177e4 1047}
94c1e62d 1048EXPORT_SYMBOL_GPL(shmem_truncate_range);
1da177e4 1049
b74d24f7 1050static int shmem_getattr(struct mnt_idmap *idmap,
549c7297 1051 const struct path *path, struct kstat *stat,
a528d35e 1052 u32 request_mask, unsigned int query_flags)
44a30220 1053{
a528d35e 1054 struct inode *inode = path->dentry->d_inode;
44a30220
YZ
1055 struct shmem_inode_info *info = SHMEM_I(inode);
1056
d0424c42 1057 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
4595ef88 1058 spin_lock_irq(&info->lock);
d0424c42 1059 shmem_recalc_inode(inode);
4595ef88 1060 spin_unlock_irq(&info->lock);
d0424c42 1061 }
e408e695
TT
1062 if (info->fsflags & FS_APPEND_FL)
1063 stat->attributes |= STATX_ATTR_APPEND;
1064 if (info->fsflags & FS_IMMUTABLE_FL)
1065 stat->attributes |= STATX_ATTR_IMMUTABLE;
1066 if (info->fsflags & FS_NODUMP_FL)
1067 stat->attributes |= STATX_ATTR_NODUMP;
1068 stat->attributes_mask |= (STATX_ATTR_APPEND |
1069 STATX_ATTR_IMMUTABLE |
1070 STATX_ATTR_NODUMP);
b74d24f7 1071 generic_fillattr(&nop_mnt_idmap, inode, stat);
89fdcd26 1072
7c6c6cc4 1073 if (shmem_is_huge(NULL, inode, 0, false))
89fdcd26
YS
1074 stat->blksize = HPAGE_PMD_SIZE;
1075
f7cd16a5
XR
1076 if (request_mask & STATX_BTIME) {
1077 stat->result_mask |= STATX_BTIME;
1078 stat->btime.tv_sec = info->i_crtime.tv_sec;
1079 stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1080 }
1081
44a30220
YZ
1082 return 0;
1083}
1084
c1632a0f 1085static int shmem_setattr(struct mnt_idmap *idmap,
549c7297 1086 struct dentry *dentry, struct iattr *attr)
1da177e4 1087{
75c3cfa8 1088 struct inode *inode = d_inode(dentry);
40e041a2 1089 struct shmem_inode_info *info = SHMEM_I(inode);
1da177e4 1090 int error;
36f05cab
JL
1091 bool update_mtime = false;
1092 bool update_ctime = true;
1da177e4 1093
c1632a0f 1094 error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
db78b877
CH
1095 if (error)
1096 return error;
1097
94c1e62d
HD
1098 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1099 loff_t oldsize = inode->i_size;
1100 loff_t newsize = attr->ia_size;
3889e6e7 1101
9608703e 1102 /* protected by i_rwsem */
40e041a2
DR
1103 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1104 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1105 return -EPERM;
1106
94c1e62d 1107 if (newsize != oldsize) {
77142517
KK
1108 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1109 oldsize, newsize);
1110 if (error)
1111 return error;
94c1e62d 1112 i_size_write(inode, newsize);
36f05cab
JL
1113 update_mtime = true;
1114 } else {
1115 update_ctime = false;
94c1e62d 1116 }
afa2db2f 1117 if (newsize <= oldsize) {
94c1e62d 1118 loff_t holebegin = round_up(newsize, PAGE_SIZE);
d0424c42
HD
1119 if (oldsize > holebegin)
1120 unmap_mapping_range(inode->i_mapping,
1121 holebegin, 0, 1);
1122 if (info->alloced)
1123 shmem_truncate_range(inode,
1124 newsize, (loff_t)-1);
94c1e62d 1125 /* unmap again to remove racily COWed private pages */
d0424c42
HD
1126 if (oldsize > holebegin)
1127 unmap_mapping_range(inode->i_mapping,
1128 holebegin, 0, 1);
94c1e62d 1129 }
1da177e4
LT
1130 }
1131
c1632a0f 1132 setattr_copy(&nop_mnt_idmap, inode, attr);
db78b877 1133 if (attr->ia_valid & ATTR_MODE)
13e83a49 1134 error = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode);
36f05cab
JL
1135 if (!error && update_ctime) {
1136 inode->i_ctime = current_time(inode);
1137 if (update_mtime)
1138 inode->i_mtime = inode->i_ctime;
1139 inode_inc_iversion(inode);
1140 }
1da177e4
LT
1141 return error;
1142}
1143
1f895f75 1144static void shmem_evict_inode(struct inode *inode)
1da177e4 1145{
1da177e4 1146 struct shmem_inode_info *info = SHMEM_I(inode);
779750d2 1147 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1da177e4 1148
30e6a51d 1149 if (shmem_mapping(inode->i_mapping)) {
1da177e4
LT
1150 shmem_unacct_size(info->flags, inode->i_size);
1151 inode->i_size = 0;
bc786390 1152 mapping_set_exiting(inode->i_mapping);
3889e6e7 1153 shmem_truncate_range(inode, 0, (loff_t)-1);
779750d2
KS
1154 if (!list_empty(&info->shrinklist)) {
1155 spin_lock(&sbinfo->shrinklist_lock);
1156 if (!list_empty(&info->shrinklist)) {
1157 list_del_init(&info->shrinklist);
1158 sbinfo->shrinklist_len--;
1159 }
1160 spin_unlock(&sbinfo->shrinklist_lock);
1161 }
af53d3e9
HD
1162 while (!list_empty(&info->swaplist)) {
1163 /* Wait while shmem_unuse() is scanning this inode... */
1164 wait_var_event(&info->stop_eviction,
1165 !atomic_read(&info->stop_eviction));
cb5f7b9a 1166 mutex_lock(&shmem_swaplist_mutex);
af53d3e9
HD
1167 /* ...but beware of the race if we peeked too early */
1168 if (!atomic_read(&info->stop_eviction))
1169 list_del_init(&info->swaplist);
cb5f7b9a 1170 mutex_unlock(&shmem_swaplist_mutex);
1da177e4 1171 }
3ed47db3 1172 }
b09e0fa4 1173
38f38657 1174 simple_xattrs_free(&info->xattrs);
0f3c42f5 1175 WARN_ON(inode->i_blocks);
5b04c689 1176 shmem_free_inode(inode->i_sb);
dbd5768f 1177 clear_inode(inode);
1da177e4
LT
1178}
1179
b56a2d8a 1180static int shmem_find_swap_entries(struct address_space *mapping,
da08e9b7
MWO
1181 pgoff_t start, struct folio_batch *fbatch,
1182 pgoff_t *indices, unsigned int type)
478922e2 1183{
b56a2d8a 1184 XA_STATE(xas, &mapping->i_pages, start);
da08e9b7 1185 struct folio *folio;
87039546 1186 swp_entry_t entry;
478922e2
MW
1187
1188 rcu_read_lock();
da08e9b7
MWO
1189 xas_for_each(&xas, folio, ULONG_MAX) {
1190 if (xas_retry(&xas, folio))
5b9c98f3 1191 continue;
b56a2d8a 1192
da08e9b7 1193 if (!xa_is_value(folio))
478922e2 1194 continue;
b56a2d8a 1195
da08e9b7 1196 entry = radix_to_swp_entry(folio);
6cec2b95
ML
1197 /*
1198 * swapin error entries can be found in the mapping. But they're
1199 * deliberately ignored here as we've done everything we can do.
1200 */
87039546
HD
1201 if (swp_type(entry) != type)
1202 continue;
b56a2d8a 1203
e384200e 1204 indices[folio_batch_count(fbatch)] = xas.xa_index;
da08e9b7
MWO
1205 if (!folio_batch_add(fbatch, folio))
1206 break;
b56a2d8a
VRP
1207
1208 if (need_resched()) {
1209 xas_pause(&xas);
1210 cond_resched_rcu();
1211 }
478922e2 1212 }
478922e2 1213 rcu_read_unlock();
e21a2955 1214
da08e9b7 1215 return xas.xa_index;
478922e2
MW
1216}
1217
46f65ec1 1218/*
b56a2d8a
VRP
1219 * Move the swapped pages for an inode to page cache. Returns the count
1220 * of pages swapped in, or the error in case of failure.
46f65ec1 1221 */
da08e9b7
MWO
1222static int shmem_unuse_swap_entries(struct inode *inode,
1223 struct folio_batch *fbatch, pgoff_t *indices)
1da177e4 1224{
b56a2d8a
VRP
1225 int i = 0;
1226 int ret = 0;
bde05d1c 1227 int error = 0;
b56a2d8a 1228 struct address_space *mapping = inode->i_mapping;
1da177e4 1229
da08e9b7
MWO
1230 for (i = 0; i < folio_batch_count(fbatch); i++) {
1231 struct folio *folio = fbatch->folios[i];
2e0e26c7 1232
da08e9b7 1233 if (!xa_is_value(folio))
b56a2d8a 1234 continue;
da08e9b7
MWO
1235 error = shmem_swapin_folio(inode, indices[i],
1236 &folio, SGP_CACHE,
b56a2d8a
VRP
1237 mapping_gfp_mask(mapping),
1238 NULL, NULL);
1239 if (error == 0) {
da08e9b7
MWO
1240 folio_unlock(folio);
1241 folio_put(folio);
b56a2d8a
VRP
1242 ret++;
1243 }
1244 if (error == -ENOMEM)
1245 break;
1246 error = 0;
bde05d1c 1247 }
b56a2d8a
VRP
1248 return error ? error : ret;
1249}
bde05d1c 1250
b56a2d8a
VRP
1251/*
1252 * If swap found in inode, free it and move page from swapcache to filecache.
1253 */
10a9c496 1254static int shmem_unuse_inode(struct inode *inode, unsigned int type)
b56a2d8a
VRP
1255{
1256 struct address_space *mapping = inode->i_mapping;
1257 pgoff_t start = 0;
da08e9b7 1258 struct folio_batch fbatch;
b56a2d8a 1259 pgoff_t indices[PAGEVEC_SIZE];
b56a2d8a
VRP
1260 int ret = 0;
1261
b56a2d8a 1262 do {
da08e9b7
MWO
1263 folio_batch_init(&fbatch);
1264 shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1265 if (folio_batch_count(&fbatch) == 0) {
b56a2d8a
VRP
1266 ret = 0;
1267 break;
46f65ec1 1268 }
b56a2d8a 1269
da08e9b7 1270 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
b56a2d8a
VRP
1271 if (ret < 0)
1272 break;
1273
da08e9b7 1274 start = indices[folio_batch_count(&fbatch) - 1];
b56a2d8a
VRP
1275 } while (true);
1276
1277 return ret;
1da177e4
LT
1278}
1279
1280/*
b56a2d8a
VRP
1281 * Read all the shared memory data that resides in the swap
1282 * device 'type' back into memory, so the swap device can be
1283 * unused.
1da177e4 1284 */
10a9c496 1285int shmem_unuse(unsigned int type)
1da177e4 1286{
b56a2d8a 1287 struct shmem_inode_info *info, *next;
bde05d1c
HD
1288 int error = 0;
1289
b56a2d8a
VRP
1290 if (list_empty(&shmem_swaplist))
1291 return 0;
1292
1293 mutex_lock(&shmem_swaplist_mutex);
b56a2d8a
VRP
1294 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1295 if (!info->swapped) {
6922c0c7 1296 list_del_init(&info->swaplist);
b56a2d8a
VRP
1297 continue;
1298 }
af53d3e9
HD
1299 /*
1300 * Drop the swaplist mutex while searching the inode for swap;
1301 * but before doing so, make sure shmem_evict_inode() will not
1302 * remove placeholder inode from swaplist, nor let it be freed
1303 * (igrab() would protect from unlink, but not from unmount).
1304 */
1305 atomic_inc(&info->stop_eviction);
b56a2d8a 1306 mutex_unlock(&shmem_swaplist_mutex);
b56a2d8a 1307
10a9c496 1308 error = shmem_unuse_inode(&info->vfs_inode, type);
cb5f7b9a 1309 cond_resched();
b56a2d8a
VRP
1310
1311 mutex_lock(&shmem_swaplist_mutex);
1312 next = list_next_entry(info, swaplist);
1313 if (!info->swapped)
1314 list_del_init(&info->swaplist);
af53d3e9
HD
1315 if (atomic_dec_and_test(&info->stop_eviction))
1316 wake_up_var(&info->stop_eviction);
b56a2d8a 1317 if (error)
778dd893 1318 break;
1da177e4 1319 }
cb5f7b9a 1320 mutex_unlock(&shmem_swaplist_mutex);
778dd893 1321
778dd893 1322 return error;
1da177e4
LT
1323}
1324
1325/*
1326 * Move the page from the page cache to the swap cache.
1327 */
1328static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1329{
e2e3fdc7 1330 struct folio *folio = page_folio(page);
1da177e4 1331 struct shmem_inode_info *info;
1da177e4 1332 struct address_space *mapping;
1da177e4 1333 struct inode *inode;
6922c0c7
HD
1334 swp_entry_t swap;
1335 pgoff_t index;
1da177e4 1336
1e6decf3
HD
1337 /*
1338 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
1339 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
1340 * and its shmem_writeback() needs them to be split when swapping.
1341 */
f530ed0e 1342 if (folio_test_large(folio)) {
1e6decf3 1343 /* Ensure the subpages are still dirty */
f530ed0e 1344 folio_test_set_dirty(folio);
1e6decf3
HD
1345 if (split_huge_page(page) < 0)
1346 goto redirty;
f530ed0e
MWO
1347 folio = page_folio(page);
1348 folio_clear_dirty(folio);
1e6decf3
HD
1349 }
1350
f530ed0e
MWO
1351 BUG_ON(!folio_test_locked(folio));
1352 mapping = folio->mapping;
1353 index = folio->index;
1da177e4
LT
1354 inode = mapping->host;
1355 info = SHMEM_I(inode);
1356 if (info->flags & VM_LOCKED)
1357 goto redirty;
d9fe526a 1358 if (!total_swap_pages)
1da177e4
LT
1359 goto redirty;
1360
d9fe526a 1361 /*
97b713ba
CH
1362 * Our capabilities prevent regular writeback or sync from ever calling
1363 * shmem_writepage; but a stacking filesystem might use ->writepage of
1364 * its underlying filesystem, in which case tmpfs should write out to
1365 * swap only in response to memory pressure, and not for the writeback
1366 * threads or sync.
d9fe526a 1367 */
48f170fb
HD
1368 if (!wbc->for_reclaim) {
1369 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1370 goto redirty;
1371 }
1635f6a7
HD
1372
1373 /*
1374 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1375 * value into swapfile.c, the only way we can correctly account for a
f530ed0e 1376 * fallocated folio arriving here is now to initialize it and write it.
1aac1400 1377 *
f530ed0e 1378 * That's okay for a folio already fallocated earlier, but if we have
1aac1400 1379 * not yet completed the fallocation, then (a) we want to keep track
f530ed0e 1380 * of this folio in case we have to undo it, and (b) it may not be a
1aac1400 1381 * good idea to continue anyway, once we're pushing into swap. So
f530ed0e 1382 * reactivate the folio, and let shmem_fallocate() quit when too many.
1635f6a7 1383 */
f530ed0e 1384 if (!folio_test_uptodate(folio)) {
1aac1400
HD
1385 if (inode->i_private) {
1386 struct shmem_falloc *shmem_falloc;
1387 spin_lock(&inode->i_lock);
1388 shmem_falloc = inode->i_private;
1389 if (shmem_falloc &&
8e205f77 1390 !shmem_falloc->waitq &&
1aac1400
HD
1391 index >= shmem_falloc->start &&
1392 index < shmem_falloc->next)
1393 shmem_falloc->nr_unswapped++;
1394 else
1395 shmem_falloc = NULL;
1396 spin_unlock(&inode->i_lock);
1397 if (shmem_falloc)
1398 goto redirty;
1399 }
f530ed0e
MWO
1400 folio_zero_range(folio, 0, folio_size(folio));
1401 flush_dcache_folio(folio);
1402 folio_mark_uptodate(folio);
1635f6a7
HD
1403 }
1404
e2e3fdc7 1405 swap = folio_alloc_swap(folio);
48f170fb
HD
1406 if (!swap.val)
1407 goto redirty;
d9fe526a 1408
b1dea800
HD
1409 /*
1410 * Add inode to shmem_unuse()'s list of swapped-out inodes,
f530ed0e 1411 * if it's not already there. Do it now before the folio is
6922c0c7 1412 * moved to swap cache, when its pagelock no longer protects
b1dea800 1413 * the inode from eviction. But don't unlock the mutex until
6922c0c7
HD
1414 * we've incremented swapped, because shmem_unuse_inode() will
1415 * prune a !swapped inode from the swaplist under this mutex.
b1dea800 1416 */
48f170fb
HD
1417 mutex_lock(&shmem_swaplist_mutex);
1418 if (list_empty(&info->swaplist))
b56a2d8a 1419 list_add(&info->swaplist, &shmem_swaplist);
b1dea800 1420
a4c366f0 1421 if (add_to_swap_cache(folio, swap,
3852f676
JK
1422 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1423 NULL) == 0) {
4595ef88 1424 spin_lock_irq(&info->lock);
6922c0c7 1425 shmem_recalc_inode(inode);
267a4c76 1426 info->swapped++;
4595ef88 1427 spin_unlock_irq(&info->lock);
6922c0c7 1428
267a4c76 1429 swap_shmem_alloc(swap);
4cd400fd 1430 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
267a4c76 1431
6922c0c7 1432 mutex_unlock(&shmem_swaplist_mutex);
f530ed0e
MWO
1433 BUG_ON(folio_mapped(folio));
1434 swap_writepage(&folio->page, wbc);
1da177e4
LT
1435 return 0;
1436 }
1437
6922c0c7 1438 mutex_unlock(&shmem_swaplist_mutex);
4081f744 1439 put_swap_folio(folio, swap);
1da177e4 1440redirty:
f530ed0e 1441 folio_mark_dirty(folio);
d9fe526a 1442 if (wbc->for_reclaim)
f530ed0e
MWO
1443 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1444 folio_unlock(folio);
d9fe526a 1445 return 0;
1da177e4
LT
1446}
1447
75edd345 1448#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
71fe804b 1449static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b 1450{
095f1fc4 1451 char buffer[64];
680d794b 1452
71fe804b 1453 if (!mpol || mpol->mode == MPOL_DEFAULT)
095f1fc4 1454 return; /* show nothing */
680d794b 1455
a7a88b23 1456 mpol_to_str(buffer, sizeof(buffer), mpol);
095f1fc4
LS
1457
1458 seq_printf(seq, ",mpol=%s", buffer);
680d794b 1459}
71fe804b
LS
1460
1461static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1462{
1463 struct mempolicy *mpol = NULL;
1464 if (sbinfo->mpol) {
bf11b9a8 1465 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
71fe804b
LS
1466 mpol = sbinfo->mpol;
1467 mpol_get(mpol);
bf11b9a8 1468 raw_spin_unlock(&sbinfo->stat_lock);
71fe804b
LS
1469 }
1470 return mpol;
1471}
75edd345
HD
1472#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1473static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1474{
1475}
1476static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1477{
1478 return NULL;
1479}
1480#endif /* CONFIG_NUMA && CONFIG_TMPFS */
1481#ifndef CONFIG_NUMA
1482#define vm_policy vm_private_data
1483#endif
680d794b 1484
800d8c63
KS
1485static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1486 struct shmem_inode_info *info, pgoff_t index)
1487{
1488 /* Create a pseudo vma that just contains the policy */
2c4541e2 1489 vma_init(vma, NULL);
800d8c63
KS
1490 /* Bias interleave by inode number to distribute better across nodes */
1491 vma->vm_pgoff = index + info->vfs_inode.i_ino;
800d8c63
KS
1492 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1493}
1494
1495static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1496{
1497 /* Drop reference taken by mpol_shared_policy_lookup() */
1498 mpol_cond_put(vma->vm_policy);
1499}
1500
5739a81c 1501static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
41ffe5d5 1502 struct shmem_inode_info *info, pgoff_t index)
1da177e4 1503{
1da177e4 1504 struct vm_area_struct pvma;
18a2f371 1505 struct page *page;
8c63ca5b
WD
1506 struct vm_fault vmf = {
1507 .vma = &pvma,
1508 };
52cd3b07 1509
800d8c63 1510 shmem_pseudo_vma_init(&pvma, info, index);
e9e9b7ec 1511 page = swap_cluster_readahead(swap, gfp, &vmf);
800d8c63 1512 shmem_pseudo_vma_destroy(&pvma);
18a2f371 1513
5739a81c
MWO
1514 if (!page)
1515 return NULL;
1516 return page_folio(page);
800d8c63
KS
1517}
1518
78cc8cdc
RR
1519/*
1520 * Make sure huge_gfp is always more limited than limit_gfp.
1521 * Some of the flags set permissions, while others set limitations.
1522 */
1523static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1524{
1525 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1526 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
187df5dd
RR
1527 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1528 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1529
1530 /* Allow allocations only from the originally specified zones. */
1531 result |= zoneflags;
78cc8cdc
RR
1532
1533 /*
1534 * Minimize the result gfp by taking the union with the deny flags,
1535 * and the intersection of the allow flags.
1536 */
1537 result |= (limit_gfp & denyflags);
1538 result |= (huge_gfp & limit_gfp) & allowflags;
1539
1540 return result;
1541}
1542
72827e5c 1543static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
800d8c63
KS
1544 struct shmem_inode_info *info, pgoff_t index)
1545{
1546 struct vm_area_struct pvma;
7b8d046f
MW
1547 struct address_space *mapping = info->vfs_inode.i_mapping;
1548 pgoff_t hindex;
dfe98499 1549 struct folio *folio;
800d8c63 1550
4620a06e 1551 hindex = round_down(index, HPAGE_PMD_NR);
7b8d046f
MW
1552 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1553 XA_PRESENT))
800d8c63 1554 return NULL;
18a2f371 1555
800d8c63 1556 shmem_pseudo_vma_init(&pvma, info, hindex);
dfe98499 1557 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
800d8c63 1558 shmem_pseudo_vma_destroy(&pvma);
dfe98499 1559 if (!folio)
dcdf11ee 1560 count_vm_event(THP_FILE_FALLBACK);
72827e5c 1561 return folio;
1da177e4
LT
1562}
1563
0c023ef5 1564static struct folio *shmem_alloc_folio(gfp_t gfp,
41ffe5d5 1565 struct shmem_inode_info *info, pgoff_t index)
1da177e4
LT
1566{
1567 struct vm_area_struct pvma;
0c023ef5 1568 struct folio *folio;
1da177e4 1569
800d8c63 1570 shmem_pseudo_vma_init(&pvma, info, index);
0c023ef5 1571 folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
800d8c63
KS
1572 shmem_pseudo_vma_destroy(&pvma);
1573
0c023ef5
MWO
1574 return folio;
1575}
1576
b1d0ec3a 1577static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
800d8c63
KS
1578 pgoff_t index, bool huge)
1579{
0f079694 1580 struct shmem_inode_info *info = SHMEM_I(inode);
72827e5c 1581 struct folio *folio;
800d8c63
KS
1582 int nr;
1583 int err = -ENOSPC;
52cd3b07 1584
396bcc52 1585 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
800d8c63
KS
1586 huge = false;
1587 nr = huge ? HPAGE_PMD_NR : 1;
1588
0f079694 1589 if (!shmem_inode_acct_block(inode, nr))
800d8c63 1590 goto failed;
800d8c63
KS
1591
1592 if (huge)
72827e5c 1593 folio = shmem_alloc_hugefolio(gfp, info, index);
800d8c63 1594 else
72827e5c
MWO
1595 folio = shmem_alloc_folio(gfp, info, index);
1596 if (folio) {
1597 __folio_set_locked(folio);
1598 __folio_set_swapbacked(folio);
b1d0ec3a 1599 return folio;
75edd345 1600 }
18a2f371 1601
800d8c63 1602 err = -ENOMEM;
0f079694 1603 shmem_inode_unacct_blocks(inode, nr);
800d8c63
KS
1604failed:
1605 return ERR_PTR(err);
1da177e4 1606}
71fe804b 1607
bde05d1c
HD
1608/*
1609 * When a page is moved from swapcache to shmem filecache (either by the
fc26babb 1610 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
bde05d1c
HD
1611 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1612 * ignorance of the mapping it belongs to. If that mapping has special
1613 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1614 * we may need to copy to a suitable page before moving to filecache.
1615 *
1616 * In a future release, this may well be extended to respect cpuset and
1617 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1618 * but for now it is a simple matter of zone.
1619 */
069d849c 1620static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
bde05d1c 1621{
069d849c 1622 return folio_zonenum(folio) > gfp_zone(gfp);
bde05d1c
HD
1623}
1624
0d698e25 1625static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
bde05d1c
HD
1626 struct shmem_inode_info *info, pgoff_t index)
1627{
d21bba2b 1628 struct folio *old, *new;
bde05d1c 1629 struct address_space *swap_mapping;
c1cb20d4 1630 swp_entry_t entry;
bde05d1c
HD
1631 pgoff_t swap_index;
1632 int error;
1633
0d698e25 1634 old = *foliop;
907ea17e 1635 entry = folio_swap_entry(old);
c1cb20d4 1636 swap_index = swp_offset(entry);
907ea17e 1637 swap_mapping = swap_address_space(entry);
bde05d1c
HD
1638
1639 /*
1640 * We have arrived here because our zones are constrained, so don't
1641 * limit chance of success by further cpuset and node constraints.
1642 */
1643 gfp &= ~GFP_CONSTRAINT_MASK;
907ea17e
MWO
1644 VM_BUG_ON_FOLIO(folio_test_large(old), old);
1645 new = shmem_alloc_folio(gfp, info, index);
1646 if (!new)
bde05d1c 1647 return -ENOMEM;
bde05d1c 1648
907ea17e
MWO
1649 folio_get(new);
1650 folio_copy(new, old);
1651 flush_dcache_folio(new);
bde05d1c 1652
907ea17e
MWO
1653 __folio_set_locked(new);
1654 __folio_set_swapbacked(new);
1655 folio_mark_uptodate(new);
1656 folio_set_swap_entry(new, entry);
1657 folio_set_swapcache(new);
bde05d1c
HD
1658
1659 /*
1660 * Our caller will very soon move newpage out of swapcache, but it's
1661 * a nice clean interface for us to replace oldpage by newpage there.
1662 */
b93b0163 1663 xa_lock_irq(&swap_mapping->i_pages);
907ea17e 1664 error = shmem_replace_entry(swap_mapping, swap_index, old, new);
0142ef6c 1665 if (!error) {
d21bba2b 1666 mem_cgroup_migrate(old, new);
907ea17e
MWO
1667 __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
1668 __lruvec_stat_mod_folio(new, NR_SHMEM, 1);
1669 __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
1670 __lruvec_stat_mod_folio(old, NR_SHMEM, -1);
0142ef6c 1671 }
b93b0163 1672 xa_unlock_irq(&swap_mapping->i_pages);
bde05d1c 1673
0142ef6c
HD
1674 if (unlikely(error)) {
1675 /*
1676 * Is this possible? I think not, now that our callers check
1677 * both PageSwapCache and page_private after getting page lock;
1678 * but be defensive. Reverse old to newpage for clear and free.
1679 */
907ea17e 1680 old = new;
0142ef6c 1681 } else {
907ea17e 1682 folio_add_lru(new);
0d698e25 1683 *foliop = new;
0142ef6c 1684 }
bde05d1c 1685
907ea17e
MWO
1686 folio_clear_swapcache(old);
1687 old->private = NULL;
bde05d1c 1688
907ea17e
MWO
1689 folio_unlock(old);
1690 folio_put_refs(old, 2);
0142ef6c 1691 return error;
bde05d1c
HD
1692}
1693
6cec2b95
ML
1694static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1695 struct folio *folio, swp_entry_t swap)
1696{
1697 struct address_space *mapping = inode->i_mapping;
1698 struct shmem_inode_info *info = SHMEM_I(inode);
1699 swp_entry_t swapin_error;
1700 void *old;
1701
15520a3f 1702 swapin_error = make_swapin_error_entry();
6cec2b95
ML
1703 old = xa_cmpxchg_irq(&mapping->i_pages, index,
1704 swp_to_radix_entry(swap),
1705 swp_to_radix_entry(swapin_error), 0);
1706 if (old != swp_to_radix_entry(swap))
1707 return;
1708
1709 folio_wait_writeback(folio);
75fa68a5 1710 delete_from_swap_cache(folio);
6cec2b95
ML
1711 spin_lock_irq(&info->lock);
1712 /*
1713 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
1714 * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in
1715 * shmem_evict_inode.
1716 */
1717 info->alloced--;
1718 info->swapped--;
1719 shmem_recalc_inode(inode);
1720 spin_unlock_irq(&info->lock);
1721 swap_free(swap);
1722}
1723
c5bf121e 1724/*
833de10f
ML
1725 * Swap in the folio pointed to by *foliop.
1726 * Caller has to make sure that *foliop contains a valid swapped folio.
1727 * Returns 0 and the folio in foliop if success. On failure, returns the
1728 * error code and NULL in *foliop.
c5bf121e 1729 */
da08e9b7
MWO
1730static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1731 struct folio **foliop, enum sgp_type sgp,
c5bf121e
VRP
1732 gfp_t gfp, struct vm_area_struct *vma,
1733 vm_fault_t *fault_type)
1734{
1735 struct address_space *mapping = inode->i_mapping;
1736 struct shmem_inode_info *info = SHMEM_I(inode);
04f94e3f 1737 struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
da08e9b7 1738 struct folio *folio = NULL;
c5bf121e
VRP
1739 swp_entry_t swap;
1740 int error;
1741
da08e9b7
MWO
1742 VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1743 swap = radix_to_swp_entry(*foliop);
1744 *foliop = NULL;
c5bf121e 1745
6cec2b95
ML
1746 if (is_swapin_error_entry(swap))
1747 return -EIO;
1748
c5bf121e 1749 /* Look it up and read it in.. */
5739a81c
MWO
1750 folio = swap_cache_get_folio(swap, NULL, 0);
1751 if (!folio) {
c5bf121e
VRP
1752 /* Or update major stats only when swapin succeeds?? */
1753 if (fault_type) {
1754 *fault_type |= VM_FAULT_MAJOR;
1755 count_vm_event(PGMAJFAULT);
1756 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1757 }
1758 /* Here we actually start the io */
5739a81c
MWO
1759 folio = shmem_swapin(swap, gfp, info, index);
1760 if (!folio) {
c5bf121e
VRP
1761 error = -ENOMEM;
1762 goto failed;
1763 }
1764 }
1765
833de10f 1766 /* We have to do this with folio locked to prevent races */
da08e9b7
MWO
1767 folio_lock(folio);
1768 if (!folio_test_swapcache(folio) ||
1769 folio_swap_entry(folio).val != swap.val ||
c5bf121e
VRP
1770 !shmem_confirm_swap(mapping, index, swap)) {
1771 error = -EEXIST;
1772 goto unlock;
1773 }
da08e9b7 1774 if (!folio_test_uptodate(folio)) {
c5bf121e
VRP
1775 error = -EIO;
1776 goto failed;
1777 }
da08e9b7 1778 folio_wait_writeback(folio);
c5bf121e 1779
8a84802e
SP
1780 /*
1781 * Some architectures may have to restore extra metadata to the
da08e9b7 1782 * folio after reading from swap.
8a84802e 1783 */
da08e9b7 1784 arch_swap_restore(swap, folio);
8a84802e 1785
069d849c 1786 if (shmem_should_replace_folio(folio, gfp)) {
0d698e25 1787 error = shmem_replace_folio(&folio, gfp, info, index);
c5bf121e
VRP
1788 if (error)
1789 goto failed;
1790 }
1791
b7dd44a1 1792 error = shmem_add_to_page_cache(folio, mapping, index,
3fea5a49
JW
1793 swp_to_radix_entry(swap), gfp,
1794 charge_mm);
1795 if (error)
14235ab3 1796 goto failed;
c5bf121e
VRP
1797
1798 spin_lock_irq(&info->lock);
1799 info->swapped--;
1800 shmem_recalc_inode(inode);
1801 spin_unlock_irq(&info->lock);
1802
1803 if (sgp == SGP_WRITE)
da08e9b7 1804 folio_mark_accessed(folio);
c5bf121e 1805
75fa68a5 1806 delete_from_swap_cache(folio);
da08e9b7 1807 folio_mark_dirty(folio);
c5bf121e
VRP
1808 swap_free(swap);
1809
da08e9b7 1810 *foliop = folio;
c5bf121e
VRP
1811 return 0;
1812failed:
1813 if (!shmem_confirm_swap(mapping, index, swap))
1814 error = -EEXIST;
6cec2b95
ML
1815 if (error == -EIO)
1816 shmem_set_folio_swapin_error(inode, index, folio, swap);
c5bf121e 1817unlock:
da08e9b7
MWO
1818 if (folio) {
1819 folio_unlock(folio);
1820 folio_put(folio);
c5bf121e
VRP
1821 }
1822
1823 return error;
1824}
1825
1da177e4 1826/*
fc26babb 1827 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1da177e4
LT
1828 *
1829 * If we allocate a new one we do not mark it dirty. That's up to the
1830 * vm. If we swap it in we mark it dirty since we also free the swap
9e18eb29
ALC
1831 * entry since a page cannot live in both the swap and page cache.
1832 *
c949b097 1833 * vma, vmf, and fault_type are only supplied by shmem_fault:
9e18eb29 1834 * otherwise they are NULL.
1da177e4 1835 */
fc26babb
MWO
1836static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
1837 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1838 struct vm_area_struct *vma, struct vm_fault *vmf,
1839 vm_fault_t *fault_type)
1da177e4
LT
1840{
1841 struct address_space *mapping = inode->i_mapping;
23f919d4 1842 struct shmem_inode_info *info = SHMEM_I(inode);
1da177e4 1843 struct shmem_sb_info *sbinfo;
9e18eb29 1844 struct mm_struct *charge_mm;
b7dd44a1 1845 struct folio *folio;
6fe7d712 1846 pgoff_t hindex;
164cc4fe 1847 gfp_t huge_gfp;
1da177e4 1848 int error;
54af6042 1849 int once = 0;
1635f6a7 1850 int alloced = 0;
1da177e4 1851
09cbfeaf 1852 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1da177e4 1853 return -EFBIG;
1da177e4 1854repeat:
c5bf121e
VRP
1855 if (sgp <= SGP_CACHE &&
1856 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1857 return -EINVAL;
1858 }
1859
1860 sbinfo = SHMEM_SB(inode->i_sb);
04f94e3f 1861 charge_mm = vma ? vma->vm_mm : NULL;
c5bf121e 1862
b1d0ec3a
MWO
1863 folio = __filemap_get_folio(mapping, index, FGP_ENTRY | FGP_LOCK, 0);
1864 if (folio && vma && userfaultfd_minor(vma)) {
1865 if (!xa_is_value(folio)) {
1866 folio_unlock(folio);
1867 folio_put(folio);
c949b097
AR
1868 }
1869 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1870 return 0;
1871 }
1872
b1d0ec3a 1873 if (xa_is_value(folio)) {
da08e9b7 1874 error = shmem_swapin_folio(inode, index, &folio,
c5bf121e
VRP
1875 sgp, gfp, vma, fault_type);
1876 if (error == -EEXIST)
1877 goto repeat;
54af6042 1878
fc26babb 1879 *foliop = folio;
c5bf121e 1880 return error;
54af6042
HD
1881 }
1882
b1d0ec3a 1883 if (folio) {
acdd9f8e 1884 if (sgp == SGP_WRITE)
b1d0ec3a
MWO
1885 folio_mark_accessed(folio);
1886 if (folio_test_uptodate(folio))
acdd9f8e 1887 goto out;
fc26babb 1888 /* fallocated folio */
1635f6a7
HD
1889 if (sgp != SGP_READ)
1890 goto clear;
b1d0ec3a
MWO
1891 folio_unlock(folio);
1892 folio_put(folio);
1635f6a7 1893 }
27ab7006
HD
1894
1895 /*
fc26babb
MWO
1896 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
1897 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
acdd9f8e 1898 */
fc26babb 1899 *foliop = NULL;
acdd9f8e
HD
1900 if (sgp == SGP_READ)
1901 return 0;
1902 if (sgp == SGP_NOALLOC)
1903 return -ENOENT;
1904
1905 /*
1906 * Fast cache lookup and swap lookup did not find it: allocate.
27ab7006 1907 */
54af6042 1908
c5bf121e
VRP
1909 if (vma && userfaultfd_missing(vma)) {
1910 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1911 return 0;
1912 }
cfda0526 1913
7c6c6cc4 1914 if (!shmem_is_huge(vma, inode, index, false))
c5bf121e 1915 goto alloc_nohuge;
1da177e4 1916
164cc4fe 1917 huge_gfp = vma_thp_gfp_mask(vma);
78cc8cdc 1918 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
b1d0ec3a
MWO
1919 folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
1920 if (IS_ERR(folio)) {
c5bf121e 1921alloc_nohuge:
b1d0ec3a 1922 folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
c5bf121e 1923 }
b1d0ec3a 1924 if (IS_ERR(folio)) {
c5bf121e 1925 int retry = 5;
800d8c63 1926
b1d0ec3a
MWO
1927 error = PTR_ERR(folio);
1928 folio = NULL;
c5bf121e
VRP
1929 if (error != -ENOSPC)
1930 goto unlock;
1931 /*
fc26babb 1932 * Try to reclaim some space by splitting a large folio
c5bf121e
VRP
1933 * beyond i_size on the filesystem.
1934 */
1935 while (retry--) {
1936 int ret;
66d2f4d2 1937
c5bf121e
VRP
1938 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1939 if (ret == SHRINK_STOP)
1940 break;
1941 if (ret)
1942 goto alloc_nohuge;
b065b432 1943 }
c5bf121e
VRP
1944 goto unlock;
1945 }
54af6042 1946
b1d0ec3a 1947 hindex = round_down(index, folio_nr_pages(folio));
54af6042 1948
c5bf121e 1949 if (sgp == SGP_WRITE)
b1d0ec3a 1950 __folio_set_referenced(folio);
c5bf121e 1951
b7dd44a1 1952 error = shmem_add_to_page_cache(folio, mapping, hindex,
3fea5a49
JW
1953 NULL, gfp & GFP_RECLAIM_MASK,
1954 charge_mm);
1955 if (error)
c5bf121e 1956 goto unacct;
b1d0ec3a 1957 folio_add_lru(folio);
779750d2 1958
c5bf121e 1959 spin_lock_irq(&info->lock);
b1d0ec3a 1960 info->alloced += folio_nr_pages(folio);
fa020a2b 1961 inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
c5bf121e
VRP
1962 shmem_recalc_inode(inode);
1963 spin_unlock_irq(&info->lock);
1964 alloced = true;
1965
b1d0ec3a 1966 if (folio_test_pmd_mappable(folio) &&
c5bf121e 1967 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
fc26babb 1968 folio_next_index(folio) - 1) {
ec9516fb 1969 /*
fc26babb 1970 * Part of the large folio is beyond i_size: subject
c5bf121e 1971 * to shrink under memory pressure.
1635f6a7 1972 */
c5bf121e 1973 spin_lock(&sbinfo->shrinklist_lock);
1635f6a7 1974 /*
c5bf121e
VRP
1975 * _careful to defend against unlocked access to
1976 * ->shrink_list in shmem_unused_huge_shrink()
ec9516fb 1977 */
c5bf121e
VRP
1978 if (list_empty_careful(&info->shrinklist)) {
1979 list_add_tail(&info->shrinklist,
1980 &sbinfo->shrinklist);
1981 sbinfo->shrinklist_len++;
1982 }
1983 spin_unlock(&sbinfo->shrinklist_lock);
1984 }
800d8c63 1985
c5bf121e 1986 /*
fc26babb 1987 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
c5bf121e
VRP
1988 */
1989 if (sgp == SGP_FALLOC)
1990 sgp = SGP_WRITE;
1991clear:
1992 /*
fc26babb
MWO
1993 * Let SGP_WRITE caller clear ends if write does not fill folio;
1994 * but SGP_FALLOC on a folio fallocated earlier must initialize
c5bf121e
VRP
1995 * it now, lest undo on failure cancel our earlier guarantee.
1996 */
b1d0ec3a
MWO
1997 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
1998 long i, n = folio_nr_pages(folio);
c5bf121e 1999
b1d0ec3a
MWO
2000 for (i = 0; i < n; i++)
2001 clear_highpage(folio_page(folio, i));
2002 flush_dcache_folio(folio);
2003 folio_mark_uptodate(folio);
1da177e4 2004 }
bde05d1c 2005
54af6042 2006 /* Perhaps the file has been truncated since we checked */
75edd345 2007 if (sgp <= SGP_CACHE &&
09cbfeaf 2008 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
267a4c76 2009 if (alloced) {
b1d0ec3a
MWO
2010 folio_clear_dirty(folio);
2011 filemap_remove_folio(folio);
4595ef88 2012 spin_lock_irq(&info->lock);
267a4c76 2013 shmem_recalc_inode(inode);
4595ef88 2014 spin_unlock_irq(&info->lock);
267a4c76 2015 }
54af6042 2016 error = -EINVAL;
267a4c76 2017 goto unlock;
e83c32e8 2018 }
63ec1973 2019out:
fc26babb 2020 *foliop = folio;
54af6042 2021 return 0;
1da177e4 2022
59a16ead 2023 /*
54af6042 2024 * Error recovery.
59a16ead 2025 */
54af6042 2026unacct:
b1d0ec3a 2027 shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
800d8c63 2028
b1d0ec3a
MWO
2029 if (folio_test_large(folio)) {
2030 folio_unlock(folio);
2031 folio_put(folio);
800d8c63
KS
2032 goto alloc_nohuge;
2033 }
d1899228 2034unlock:
b1d0ec3a
MWO
2035 if (folio) {
2036 folio_unlock(folio);
2037 folio_put(folio);
54af6042
HD
2038 }
2039 if (error == -ENOSPC && !once++) {
4595ef88 2040 spin_lock_irq(&info->lock);
54af6042 2041 shmem_recalc_inode(inode);
4595ef88 2042 spin_unlock_irq(&info->lock);
27ab7006 2043 goto repeat;
ff36b801 2044 }
7f4446ee 2045 if (error == -EEXIST)
54af6042
HD
2046 goto repeat;
2047 return error;
1da177e4
LT
2048}
2049
4e1fc793
MWO
2050int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
2051 enum sgp_type sgp)
2052{
2053 return shmem_get_folio_gfp(inode, index, foliop, sgp,
2054 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
2055}
2056
10d20bd2
LT
2057/*
2058 * This is like autoremove_wake_function, but it removes the wait queue
2059 * entry unconditionally - even if something else had already woken the
2060 * target.
2061 */
ac6424b9 2062static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
10d20bd2
LT
2063{
2064 int ret = default_wake_function(wait, mode, sync, key);
2055da97 2065 list_del_init(&wait->entry);
10d20bd2
LT
2066 return ret;
2067}
2068
20acce67 2069static vm_fault_t shmem_fault(struct vm_fault *vmf)
1da177e4 2070{
11bac800 2071 struct vm_area_struct *vma = vmf->vma;
496ad9aa 2072 struct inode *inode = file_inode(vma->vm_file);
9e18eb29 2073 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
68a54100 2074 struct folio *folio = NULL;
20acce67
SJ
2075 int err;
2076 vm_fault_t ret = VM_FAULT_LOCKED;
1da177e4 2077
f00cdc6d
HD
2078 /*
2079 * Trinity finds that probing a hole which tmpfs is punching can
2080 * prevent the hole-punch from ever completing: which in turn
9608703e 2081 * locks writers out with its hold on i_rwsem. So refrain from
8e205f77
HD
2082 * faulting pages into the hole while it's being punched. Although
2083 * shmem_undo_range() does remove the additions, it may be unable to
2084 * keep up, as each new page needs its own unmap_mapping_range() call,
2085 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2086 *
2087 * It does not matter if we sometimes reach this check just before the
2088 * hole-punch begins, so that one fault then races with the punch:
2089 * we just need to make racing faults a rare case.
2090 *
2091 * The implementation below would be much simpler if we just used a
9608703e 2092 * standard mutex or completion: but we cannot take i_rwsem in fault,
8e205f77 2093 * and bloating every shmem inode for this unlikely case would be sad.
f00cdc6d
HD
2094 */
2095 if (unlikely(inode->i_private)) {
2096 struct shmem_falloc *shmem_falloc;
2097
2098 spin_lock(&inode->i_lock);
2099 shmem_falloc = inode->i_private;
8e205f77
HD
2100 if (shmem_falloc &&
2101 shmem_falloc->waitq &&
2102 vmf->pgoff >= shmem_falloc->start &&
2103 vmf->pgoff < shmem_falloc->next) {
8897c1b1 2104 struct file *fpin;
8e205f77 2105 wait_queue_head_t *shmem_falloc_waitq;
10d20bd2 2106 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
8e205f77
HD
2107
2108 ret = VM_FAULT_NOPAGE;
8897c1b1
KS
2109 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2110 if (fpin)
8e205f77 2111 ret = VM_FAULT_RETRY;
8e205f77
HD
2112
2113 shmem_falloc_waitq = shmem_falloc->waitq;
2114 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2115 TASK_UNINTERRUPTIBLE);
2116 spin_unlock(&inode->i_lock);
2117 schedule();
2118
2119 /*
2120 * shmem_falloc_waitq points into the shmem_fallocate()
2121 * stack of the hole-punching task: shmem_falloc_waitq
2122 * is usually invalid by the time we reach here, but
2123 * finish_wait() does not dereference it in that case;
2124 * though i_lock needed lest racing with wake_up_all().
2125 */
2126 spin_lock(&inode->i_lock);
2127 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2128 spin_unlock(&inode->i_lock);
8897c1b1
KS
2129
2130 if (fpin)
2131 fput(fpin);
8e205f77 2132 return ret;
f00cdc6d 2133 }
8e205f77 2134 spin_unlock(&inode->i_lock);
f00cdc6d
HD
2135 }
2136
68a54100 2137 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
cfda0526 2138 gfp, vma, vmf, &ret);
20acce67
SJ
2139 if (err)
2140 return vmf_error(err);
68a54100
MWO
2141 if (folio)
2142 vmf->page = folio_file_page(folio, vmf->pgoff);
68da9f05 2143 return ret;
1da177e4
LT
2144}
2145
c01d5b30
HD
2146unsigned long shmem_get_unmapped_area(struct file *file,
2147 unsigned long uaddr, unsigned long len,
2148 unsigned long pgoff, unsigned long flags)
2149{
2150 unsigned long (*get_area)(struct file *,
2151 unsigned long, unsigned long, unsigned long, unsigned long);
2152 unsigned long addr;
2153 unsigned long offset;
2154 unsigned long inflated_len;
2155 unsigned long inflated_addr;
2156 unsigned long inflated_offset;
2157
2158 if (len > TASK_SIZE)
2159 return -ENOMEM;
2160
2161 get_area = current->mm->get_unmapped_area;
2162 addr = get_area(file, uaddr, len, pgoff, flags);
2163
396bcc52 2164 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
c01d5b30
HD
2165 return addr;
2166 if (IS_ERR_VALUE(addr))
2167 return addr;
2168 if (addr & ~PAGE_MASK)
2169 return addr;
2170 if (addr > TASK_SIZE - len)
2171 return addr;
2172
2173 if (shmem_huge == SHMEM_HUGE_DENY)
2174 return addr;
2175 if (len < HPAGE_PMD_SIZE)
2176 return addr;
2177 if (flags & MAP_FIXED)
2178 return addr;
2179 /*
2180 * Our priority is to support MAP_SHARED mapped hugely;
2181 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
99158997
KS
2182 * But if caller specified an address hint and we allocated area there
2183 * successfully, respect that as before.
c01d5b30 2184 */
99158997 2185 if (uaddr == addr)
c01d5b30
HD
2186 return addr;
2187
2188 if (shmem_huge != SHMEM_HUGE_FORCE) {
2189 struct super_block *sb;
2190
2191 if (file) {
2192 VM_BUG_ON(file->f_op != &shmem_file_operations);
2193 sb = file_inode(file)->i_sb;
2194 } else {
2195 /*
2196 * Called directly from mm/mmap.c, or drivers/char/mem.c
2197 * for "/dev/zero", to create a shared anonymous object.
2198 */
2199 if (IS_ERR(shm_mnt))
2200 return addr;
2201 sb = shm_mnt->mnt_sb;
2202 }
3089bf61 2203 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
c01d5b30
HD
2204 return addr;
2205 }
2206
2207 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2208 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2209 return addr;
2210 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2211 return addr;
2212
2213 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2214 if (inflated_len > TASK_SIZE)
2215 return addr;
2216 if (inflated_len < len)
2217 return addr;
2218
99158997 2219 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
c01d5b30
HD
2220 if (IS_ERR_VALUE(inflated_addr))
2221 return addr;
2222 if (inflated_addr & ~PAGE_MASK)
2223 return addr;
2224
2225 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2226 inflated_addr += offset - inflated_offset;
2227 if (inflated_offset > offset)
2228 inflated_addr += HPAGE_PMD_SIZE;
2229
2230 if (inflated_addr > TASK_SIZE - len)
2231 return addr;
2232 return inflated_addr;
2233}
2234
1da177e4 2235#ifdef CONFIG_NUMA
41ffe5d5 2236static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1da177e4 2237{
496ad9aa 2238 struct inode *inode = file_inode(vma->vm_file);
41ffe5d5 2239 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1da177e4
LT
2240}
2241
d8dc74f2
AB
2242static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2243 unsigned long addr)
1da177e4 2244{
496ad9aa 2245 struct inode *inode = file_inode(vma->vm_file);
41ffe5d5 2246 pgoff_t index;
1da177e4 2247
41ffe5d5
HD
2248 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2249 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1da177e4
LT
2250}
2251#endif
2252
d7c9e99a 2253int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
1da177e4 2254{
496ad9aa 2255 struct inode *inode = file_inode(file);
1da177e4
LT
2256 struct shmem_inode_info *info = SHMEM_I(inode);
2257 int retval = -ENOMEM;
2258
ea0dfeb4
HD
2259 /*
2260 * What serializes the accesses to info->flags?
2261 * ipc_lock_object() when called from shmctl_do_lock(),
2262 * no serialization needed when called from shm_destroy().
2263 */
1da177e4 2264 if (lock && !(info->flags & VM_LOCKED)) {
d7c9e99a 2265 if (!user_shm_lock(inode->i_size, ucounts))
1da177e4
LT
2266 goto out_nomem;
2267 info->flags |= VM_LOCKED;
89e004ea 2268 mapping_set_unevictable(file->f_mapping);
1da177e4 2269 }
d7c9e99a
AG
2270 if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2271 user_shm_unlock(inode->i_size, ucounts);
1da177e4 2272 info->flags &= ~VM_LOCKED;
89e004ea 2273 mapping_clear_unevictable(file->f_mapping);
1da177e4
LT
2274 }
2275 retval = 0;
89e004ea 2276
1da177e4 2277out_nomem:
1da177e4
LT
2278 return retval;
2279}
2280
9b83a6a8 2281static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1da177e4 2282{
d09e8ca6
PT
2283 struct inode *inode = file_inode(file);
2284 struct shmem_inode_info *info = SHMEM_I(inode);
22247efd 2285 int ret;
ab3948f5 2286
22247efd
PX
2287 ret = seal_check_future_write(info->seals, vma);
2288 if (ret)
2289 return ret;
ab3948f5 2290
51b0bff2
CM
2291 /* arm64 - allow memory tagging on RAM-based files */
2292 vma->vm_flags |= VM_MTE_ALLOWED;
2293
1da177e4 2294 file_accessed(file);
d09e8ca6
PT
2295 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2296 if (inode->i_nlink)
2297 vma->vm_ops = &shmem_vm_ops;
2298 else
2299 vma->vm_ops = &shmem_anon_vm_ops;
1da177e4
LT
2300 return 0;
2301}
2302
cb241339
HD
2303#ifdef CONFIG_TMPFS_XATTR
2304static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2305
2306/*
2307 * chattr's fsflags are unrelated to extended attributes,
2308 * but tmpfs has chosen to enable them under the same config option.
2309 */
2310static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2311{
2312 unsigned int i_flags = 0;
2313
2314 if (fsflags & FS_NOATIME_FL)
2315 i_flags |= S_NOATIME;
2316 if (fsflags & FS_APPEND_FL)
2317 i_flags |= S_APPEND;
2318 if (fsflags & FS_IMMUTABLE_FL)
2319 i_flags |= S_IMMUTABLE;
2320 /*
2321 * But FS_NODUMP_FL does not require any action in i_flags.
2322 */
2323 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2324}
2325#else
2326static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
e408e695 2327{
e408e695 2328}
cb241339
HD
2329#define shmem_initxattrs NULL
2330#endif
e408e695
TT
2331
2332static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
09208d15 2333 umode_t mode, dev_t dev, unsigned long flags)
1da177e4
LT
2334{
2335 struct inode *inode;
2336 struct shmem_inode_info *info;
2337 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
e809d5f0 2338 ino_t ino;
1da177e4 2339
e809d5f0 2340 if (shmem_reserve_inode(sb, &ino))
5b04c689 2341 return NULL;
1da177e4
LT
2342
2343 inode = new_inode(sb);
2344 if (inode) {
e809d5f0 2345 inode->i_ino = ino;
21cb47be 2346 inode_init_owner(&init_user_ns, inode, dir, mode);
1da177e4 2347 inode->i_blocks = 0;
078cd827 2348 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
a251c17a 2349 inode->i_generation = get_random_u32();
1da177e4
LT
2350 info = SHMEM_I(inode);
2351 memset(info, 0, (char *)inode - (char *)info);
2352 spin_lock_init(&info->lock);
af53d3e9 2353 atomic_set(&info->stop_eviction, 0);
40e041a2 2354 info->seals = F_SEAL_SEAL;
0b0a0806 2355 info->flags = flags & VM_NORESERVE;
f7cd16a5 2356 info->i_crtime = inode->i_mtime;
e408e695
TT
2357 info->fsflags = (dir == NULL) ? 0 :
2358 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
cb241339
HD
2359 if (info->fsflags)
2360 shmem_set_inode_flags(inode, info->fsflags);
779750d2 2361 INIT_LIST_HEAD(&info->shrinklist);
1da177e4 2362 INIT_LIST_HEAD(&info->swaplist);
38f38657 2363 simple_xattrs_init(&info->xattrs);
72c04902 2364 cache_no_acl(inode);
ff36da69 2365 mapping_set_large_folios(inode->i_mapping);
1da177e4
LT
2366
2367 switch (mode & S_IFMT) {
2368 default:
39f0247d 2369 inode->i_op = &shmem_special_inode_operations;
1da177e4
LT
2370 init_special_inode(inode, mode, dev);
2371 break;
2372 case S_IFREG:
14fcc23f 2373 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
2374 inode->i_op = &shmem_inode_operations;
2375 inode->i_fop = &shmem_file_operations;
71fe804b
LS
2376 mpol_shared_policy_init(&info->policy,
2377 shmem_get_sbmpol(sbinfo));
1da177e4
LT
2378 break;
2379 case S_IFDIR:
d8c76e6f 2380 inc_nlink(inode);
1da177e4
LT
2381 /* Some things misbehave if size == 0 on a directory */
2382 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2383 inode->i_op = &shmem_dir_inode_operations;
2384 inode->i_fop = &simple_dir_operations;
2385 break;
2386 case S_IFLNK:
2387 /*
2388 * Must not load anything in the rbtree,
2389 * mpol_free_shared_policy will not be called.
2390 */
71fe804b 2391 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
2392 break;
2393 }
b45d71fb
JFG
2394
2395 lockdep_annotate_inode_mutex_key(inode);
5b04c689
PE
2396 } else
2397 shmem_free_inode(sb);
1da177e4
LT
2398 return inode;
2399}
2400
3460f6e5
AR
2401#ifdef CONFIG_USERFAULTFD
2402int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2403 pmd_t *dst_pmd,
2404 struct vm_area_struct *dst_vma,
2405 unsigned long dst_addr,
2406 unsigned long src_addr,
8ee79edf 2407 bool zeropage, bool wp_copy,
3460f6e5 2408 struct page **pagep)
4c27fe4c
MR
2409{
2410 struct inode *inode = file_inode(dst_vma->vm_file);
2411 struct shmem_inode_info *info = SHMEM_I(inode);
4c27fe4c
MR
2412 struct address_space *mapping = inode->i_mapping;
2413 gfp_t gfp = mapping_gfp_mask(mapping);
2414 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
4c27fe4c 2415 void *page_kaddr;
b7dd44a1 2416 struct folio *folio;
4c27fe4c 2417 int ret;
3460f6e5 2418 pgoff_t max_off;
4c27fe4c 2419
7ed9d238
AR
2420 if (!shmem_inode_acct_block(inode, 1)) {
2421 /*
2422 * We may have got a page, returned -ENOENT triggering a retry,
2423 * and now we find ourselves with -ENOMEM. Release the page, to
2424 * avoid a BUG_ON in our caller.
2425 */
2426 if (unlikely(*pagep)) {
2427 put_page(*pagep);
2428 *pagep = NULL;
2429 }
7d64ae3a 2430 return -ENOMEM;
7ed9d238 2431 }
4c27fe4c 2432
cb658a45 2433 if (!*pagep) {
7d64ae3a 2434 ret = -ENOMEM;
7a7256d5
MWO
2435 folio = shmem_alloc_folio(gfp, info, pgoff);
2436 if (!folio)
0f079694 2437 goto out_unacct_blocks;
4c27fe4c 2438
3460f6e5 2439 if (!zeropage) { /* COPY */
7a7256d5 2440 page_kaddr = kmap_local_folio(folio, 0);
5dc21f0c
IW
2441 /*
2442 * The read mmap_lock is held here. Despite the
2443 * mmap_lock being read recursive a deadlock is still
2444 * possible if a writer has taken a lock. For example:
2445 *
2446 * process A thread 1 takes read lock on own mmap_lock
2447 * process A thread 2 calls mmap, blocks taking write lock
2448 * process B thread 1 takes page fault, read lock on own mmap lock
2449 * process B thread 2 calls mmap, blocks taking write lock
2450 * process A thread 1 blocks taking read lock on process B
2451 * process B thread 1 blocks taking read lock on process A
2452 *
2453 * Disable page faults to prevent potential deadlock
2454 * and retry the copy outside the mmap_lock.
2455 */
2456 pagefault_disable();
8d103963
MR
2457 ret = copy_from_user(page_kaddr,
2458 (const void __user *)src_addr,
2459 PAGE_SIZE);
5dc21f0c 2460 pagefault_enable();
7a7256d5 2461 kunmap_local(page_kaddr);
8d103963 2462
c1e8d7c6 2463 /* fallback to copy_from_user outside mmap_lock */
8d103963 2464 if (unlikely(ret)) {
7a7256d5 2465 *pagep = &folio->page;
7d64ae3a 2466 ret = -ENOENT;
8d103963 2467 /* don't free the page */
7d64ae3a 2468 goto out_unacct_blocks;
8d103963 2469 }
19b482c2 2470
7a7256d5 2471 flush_dcache_folio(folio);
3460f6e5 2472 } else { /* ZEROPAGE */
7a7256d5 2473 clear_user_highpage(&folio->page, dst_addr);
4c27fe4c
MR
2474 }
2475 } else {
7a7256d5
MWO
2476 folio = page_folio(*pagep);
2477 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
4c27fe4c
MR
2478 *pagep = NULL;
2479 }
2480
7a7256d5
MWO
2481 VM_BUG_ON(folio_test_locked(folio));
2482 VM_BUG_ON(folio_test_swapbacked(folio));
2483 __folio_set_locked(folio);
2484 __folio_set_swapbacked(folio);
2485 __folio_mark_uptodate(folio);
9cc90c66 2486
e2a50c1f 2487 ret = -EFAULT;
e2a50c1f 2488 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3460f6e5 2489 if (unlikely(pgoff >= max_off))
e2a50c1f
AA
2490 goto out_release;
2491
b7dd44a1 2492 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
3fea5a49 2493 gfp & GFP_RECLAIM_MASK, dst_mm);
4c27fe4c 2494 if (ret)
3fea5a49 2495 goto out_release;
4c27fe4c 2496
7d64ae3a 2497 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
7a7256d5 2498 &folio->page, true, wp_copy);
7d64ae3a
AR
2499 if (ret)
2500 goto out_delete_from_cache;
4c27fe4c 2501
94b7cc01 2502 spin_lock_irq(&info->lock);
4c27fe4c
MR
2503 info->alloced++;
2504 inode->i_blocks += BLOCKS_PER_PAGE;
2505 shmem_recalc_inode(inode);
94b7cc01 2506 spin_unlock_irq(&info->lock);
4c27fe4c 2507
7a7256d5 2508 folio_unlock(folio);
7d64ae3a
AR
2509 return 0;
2510out_delete_from_cache:
7a7256d5 2511 filemap_remove_folio(folio);
4c27fe4c 2512out_release:
7a7256d5
MWO
2513 folio_unlock(folio);
2514 folio_put(folio);
4c27fe4c 2515out_unacct_blocks:
0f079694 2516 shmem_inode_unacct_blocks(inode, 1);
7d64ae3a 2517 return ret;
8d103963 2518}
3460f6e5 2519#endif /* CONFIG_USERFAULTFD */
8d103963 2520
1da177e4 2521#ifdef CONFIG_TMPFS
92e1d5be 2522static const struct inode_operations shmem_symlink_inode_operations;
69f07ec9 2523static const struct inode_operations shmem_short_symlink_operations;
1da177e4 2524
1da177e4 2525static int
800d15a5 2526shmem_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 2527 loff_t pos, unsigned len,
800d15a5 2528 struct page **pagep, void **fsdata)
1da177e4 2529{
800d15a5 2530 struct inode *inode = mapping->host;
40e041a2 2531 struct shmem_inode_info *info = SHMEM_I(inode);
09cbfeaf 2532 pgoff_t index = pos >> PAGE_SHIFT;
eff1f906 2533 struct folio *folio;
a7605426 2534 int ret = 0;
40e041a2 2535
9608703e 2536 /* i_rwsem is held by caller */
ab3948f5
JFG
2537 if (unlikely(info->seals & (F_SEAL_GROW |
2538 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2539 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
40e041a2
DR
2540 return -EPERM;
2541 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2542 return -EPERM;
2543 }
2544
eff1f906 2545 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
a7605426
YS
2546
2547 if (ret)
2548 return ret;
2549
eff1f906 2550 *pagep = folio_file_page(folio, index);
a7605426 2551 if (PageHWPoison(*pagep)) {
eff1f906
MWO
2552 folio_unlock(folio);
2553 folio_put(folio);
a7605426
YS
2554 *pagep = NULL;
2555 return -EIO;
2556 }
2557
2558 return 0;
800d15a5
NP
2559}
2560
2561static int
2562shmem_write_end(struct file *file, struct address_space *mapping,
2563 loff_t pos, unsigned len, unsigned copied,
2564 struct page *page, void *fsdata)
2565{
2566 struct inode *inode = mapping->host;
2567
d3602444
HD
2568 if (pos + copied > inode->i_size)
2569 i_size_write(inode, pos + copied);
2570
ec9516fb 2571 if (!PageUptodate(page)) {
800d8c63
KS
2572 struct page *head = compound_head(page);
2573 if (PageTransCompound(page)) {
2574 int i;
2575
2576 for (i = 0; i < HPAGE_PMD_NR; i++) {
2577 if (head + i == page)
2578 continue;
2579 clear_highpage(head + i);
2580 flush_dcache_page(head + i);
2581 }
2582 }
09cbfeaf
KS
2583 if (copied < PAGE_SIZE) {
2584 unsigned from = pos & (PAGE_SIZE - 1);
ec9516fb 2585 zero_user_segments(page, 0, from,
09cbfeaf 2586 from + copied, PAGE_SIZE);
ec9516fb 2587 }
800d8c63 2588 SetPageUptodate(head);
ec9516fb 2589 }
800d15a5 2590 set_page_dirty(page);
6746aff7 2591 unlock_page(page);
09cbfeaf 2592 put_page(page);
800d15a5 2593
800d15a5 2594 return copied;
1da177e4
LT
2595}
2596
2ba5bbed 2597static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1da177e4 2598{
6e58e79d
AV
2599 struct file *file = iocb->ki_filp;
2600 struct inode *inode = file_inode(file);
1da177e4 2601 struct address_space *mapping = inode->i_mapping;
41ffe5d5
HD
2602 pgoff_t index;
2603 unsigned long offset;
f7c1d074 2604 int error = 0;
cb66a7a1 2605 ssize_t retval = 0;
6e58e79d 2606 loff_t *ppos = &iocb->ki_pos;
a0ee5ec5 2607
09cbfeaf
KS
2608 index = *ppos >> PAGE_SHIFT;
2609 offset = *ppos & ~PAGE_MASK;
1da177e4
LT
2610
2611 for (;;) {
4601e2fc 2612 struct folio *folio = NULL;
1da177e4 2613 struct page *page = NULL;
41ffe5d5
HD
2614 pgoff_t end_index;
2615 unsigned long nr, ret;
1da177e4
LT
2616 loff_t i_size = i_size_read(inode);
2617
09cbfeaf 2618 end_index = i_size >> PAGE_SHIFT;
1da177e4
LT
2619 if (index > end_index)
2620 break;
2621 if (index == end_index) {
09cbfeaf 2622 nr = i_size & ~PAGE_MASK;
1da177e4
LT
2623 if (nr <= offset)
2624 break;
2625 }
2626
4601e2fc 2627 error = shmem_get_folio(inode, index, &folio, SGP_READ);
6e58e79d
AV
2628 if (error) {
2629 if (error == -EINVAL)
2630 error = 0;
1da177e4
LT
2631 break;
2632 }
4601e2fc
MWO
2633 if (folio) {
2634 folio_unlock(folio);
a7605426 2635
4601e2fc 2636 page = folio_file_page(folio, index);
a7605426 2637 if (PageHWPoison(page)) {
4601e2fc 2638 folio_put(folio);
a7605426
YS
2639 error = -EIO;
2640 break;
2641 }
75edd345 2642 }
1da177e4
LT
2643
2644 /*
2645 * We must evaluate after, since reads (unlike writes)
9608703e 2646 * are called without i_rwsem protection against truncate
1da177e4 2647 */
09cbfeaf 2648 nr = PAGE_SIZE;
1da177e4 2649 i_size = i_size_read(inode);
09cbfeaf 2650 end_index = i_size >> PAGE_SHIFT;
1da177e4 2651 if (index == end_index) {
09cbfeaf 2652 nr = i_size & ~PAGE_MASK;
1da177e4 2653 if (nr <= offset) {
4601e2fc
MWO
2654 if (folio)
2655 folio_put(folio);
1da177e4
LT
2656 break;
2657 }
2658 }
2659 nr -= offset;
2660
4601e2fc 2661 if (folio) {
1da177e4
LT
2662 /*
2663 * If users can be writing to this page using arbitrary
2664 * virtual addresses, take care about potential aliasing
2665 * before reading the page on the kernel side.
2666 */
2667 if (mapping_writably_mapped(mapping))
2668 flush_dcache_page(page);
2669 /*
2670 * Mark the page accessed if we read the beginning.
2671 */
2672 if (!offset)
4601e2fc 2673 folio_mark_accessed(folio);
1bdec44b
HD
2674 /*
2675 * Ok, we have the page, and it's up-to-date, so
2676 * now we can copy it to user space...
2677 */
2678 ret = copy_page_to_iter(page, offset, nr, to);
4601e2fc 2679 folio_put(folio);
1bdec44b 2680
fcb14cb1 2681 } else if (user_backed_iter(to)) {
1bdec44b
HD
2682 /*
2683 * Copy to user tends to be so well optimized, but
2684 * clear_user() not so much, that it is noticeably
2685 * faster to copy the zero page instead of clearing.
2686 */
2687 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
b5810039 2688 } else {
1bdec44b
HD
2689 /*
2690 * But submitting the same page twice in a row to
2691 * splice() - or others? - can result in confusion:
2692 * so don't attempt that optimization on pipes etc.
2693 */
2694 ret = iov_iter_zero(nr, to);
b5810039 2695 }
1da177e4 2696
6e58e79d 2697 retval += ret;
1da177e4 2698 offset += ret;
09cbfeaf
KS
2699 index += offset >> PAGE_SHIFT;
2700 offset &= ~PAGE_MASK;
1da177e4 2701
2ba5bbed 2702 if (!iov_iter_count(to))
1da177e4 2703 break;
6e58e79d
AV
2704 if (ret < nr) {
2705 error = -EFAULT;
2706 break;
2707 }
1da177e4
LT
2708 cond_resched();
2709 }
2710
09cbfeaf 2711 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
6e58e79d
AV
2712 file_accessed(file);
2713 return retval ? retval : error;
1da177e4
LT
2714}
2715
965c8e59 2716static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
220f2ac9
HD
2717{
2718 struct address_space *mapping = file->f_mapping;
2719 struct inode *inode = mapping->host;
220f2ac9 2720
965c8e59
AM
2721 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2722 return generic_file_llseek_size(file, offset, whence,
220f2ac9 2723 MAX_LFS_FILESIZE, i_size_read(inode));
41139aa4
MWO
2724 if (offset < 0)
2725 return -ENXIO;
2726
5955102c 2727 inode_lock(inode);
9608703e 2728 /* We're holding i_rwsem so we can access i_size directly */
41139aa4 2729 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
387aae6f
HD
2730 if (offset >= 0)
2731 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
5955102c 2732 inode_unlock(inode);
220f2ac9
HD
2733 return offset;
2734}
2735
83e4fa9c
HD
2736static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2737 loff_t len)
2738{
496ad9aa 2739 struct inode *inode = file_inode(file);
e2d12e22 2740 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
40e041a2 2741 struct shmem_inode_info *info = SHMEM_I(inode);
1aac1400 2742 struct shmem_falloc shmem_falloc;
d144bf62 2743 pgoff_t start, index, end, undo_fallocend;
e2d12e22 2744 int error;
83e4fa9c 2745
13ace4d0
HD
2746 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2747 return -EOPNOTSUPP;
2748
5955102c 2749 inode_lock(inode);
83e4fa9c
HD
2750
2751 if (mode & FALLOC_FL_PUNCH_HOLE) {
2752 struct address_space *mapping = file->f_mapping;
2753 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2754 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
8e205f77 2755 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
83e4fa9c 2756
9608703e 2757 /* protected by i_rwsem */
ab3948f5 2758 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
40e041a2
DR
2759 error = -EPERM;
2760 goto out;
2761 }
2762
8e205f77 2763 shmem_falloc.waitq = &shmem_falloc_waitq;
aa71ecd8 2764 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
f00cdc6d
HD
2765 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2766 spin_lock(&inode->i_lock);
2767 inode->i_private = &shmem_falloc;
2768 spin_unlock(&inode->i_lock);
2769
83e4fa9c
HD
2770 if ((u64)unmap_end > (u64)unmap_start)
2771 unmap_mapping_range(mapping, unmap_start,
2772 1 + unmap_end - unmap_start, 0);
2773 shmem_truncate_range(inode, offset, offset + len - 1);
2774 /* No need to unmap again: hole-punching leaves COWed pages */
8e205f77
HD
2775
2776 spin_lock(&inode->i_lock);
2777 inode->i_private = NULL;
2778 wake_up_all(&shmem_falloc_waitq);
2055da97 2779 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
8e205f77 2780 spin_unlock(&inode->i_lock);
83e4fa9c 2781 error = 0;
8e205f77 2782 goto out;
e2d12e22
HD
2783 }
2784
2785 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2786 error = inode_newsize_ok(inode, offset + len);
2787 if (error)
2788 goto out;
2789
40e041a2
DR
2790 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2791 error = -EPERM;
2792 goto out;
2793 }
2794
09cbfeaf
KS
2795 start = offset >> PAGE_SHIFT;
2796 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
e2d12e22
HD
2797 /* Try to avoid a swapstorm if len is impossible to satisfy */
2798 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2799 error = -ENOSPC;
2800 goto out;
83e4fa9c
HD
2801 }
2802
8e205f77 2803 shmem_falloc.waitq = NULL;
1aac1400
HD
2804 shmem_falloc.start = start;
2805 shmem_falloc.next = start;
2806 shmem_falloc.nr_falloced = 0;
2807 shmem_falloc.nr_unswapped = 0;
2808 spin_lock(&inode->i_lock);
2809 inode->i_private = &shmem_falloc;
2810 spin_unlock(&inode->i_lock);
2811
d144bf62
HD
2812 /*
2813 * info->fallocend is only relevant when huge pages might be
2814 * involved: to prevent split_huge_page() freeing fallocated
2815 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
2816 */
2817 undo_fallocend = info->fallocend;
2818 if (info->fallocend < end)
2819 info->fallocend = end;
2820
050dcb5c 2821 for (index = start; index < end; ) {
b0802b22 2822 struct folio *folio;
e2d12e22
HD
2823
2824 /*
2825 * Good, the fallocate(2) manpage permits EINTR: we may have
2826 * been interrupted because we are using up too much memory.
2827 */
2828 if (signal_pending(current))
2829 error = -EINTR;
1aac1400
HD
2830 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2831 error = -ENOMEM;
e2d12e22 2832 else
b0802b22
MWO
2833 error = shmem_get_folio(inode, index, &folio,
2834 SGP_FALLOC);
e2d12e22 2835 if (error) {
d144bf62 2836 info->fallocend = undo_fallocend;
b0802b22 2837 /* Remove the !uptodate folios we added */
7f556567
HD
2838 if (index > start) {
2839 shmem_undo_range(inode,
2840 (loff_t)start << PAGE_SHIFT,
2841 ((loff_t)index << PAGE_SHIFT) - 1, true);
2842 }
1aac1400 2843 goto undone;
e2d12e22
HD
2844 }
2845
050dcb5c
HD
2846 /*
2847 * Here is a more important optimization than it appears:
b0802b22
MWO
2848 * a second SGP_FALLOC on the same large folio will clear it,
2849 * making it uptodate and un-undoable if we fail later.
050dcb5c 2850 */
b0802b22
MWO
2851 index = folio_next_index(folio);
2852 /* Beware 32-bit wraparound */
2853 if (!index)
2854 index--;
050dcb5c 2855
1aac1400
HD
2856 /*
2857 * Inform shmem_writepage() how far we have reached.
2858 * No need for lock or barrier: we have the page lock.
2859 */
b0802b22 2860 if (!folio_test_uptodate(folio))
050dcb5c
HD
2861 shmem_falloc.nr_falloced += index - shmem_falloc.next;
2862 shmem_falloc.next = index;
1aac1400 2863
e2d12e22 2864 /*
b0802b22 2865 * If !uptodate, leave it that way so that freeable folios
1635f6a7 2866 * can be recognized if we need to rollback on error later.
b0802b22
MWO
2867 * But mark it dirty so that memory pressure will swap rather
2868 * than free the folios we are allocating (and SGP_CACHE folios
e2d12e22
HD
2869 * might still be clean: we now need to mark those dirty too).
2870 */
b0802b22
MWO
2871 folio_mark_dirty(folio);
2872 folio_unlock(folio);
2873 folio_put(folio);
e2d12e22
HD
2874 cond_resched();
2875 }
2876
2877 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2878 i_size_write(inode, offset + len);
1aac1400
HD
2879undone:
2880 spin_lock(&inode->i_lock);
2881 inode->i_private = NULL;
2882 spin_unlock(&inode->i_lock);
e2d12e22 2883out:
15f242bb
HD
2884 if (!error)
2885 file_modified(file);
5955102c 2886 inode_unlock(inode);
83e4fa9c
HD
2887 return error;
2888}
2889
726c3342 2890static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 2891{
726c3342 2892 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1da177e4
LT
2893
2894 buf->f_type = TMPFS_MAGIC;
09cbfeaf 2895 buf->f_bsize = PAGE_SIZE;
1da177e4 2896 buf->f_namelen = NAME_MAX;
0edd73b3 2897 if (sbinfo->max_blocks) {
1da177e4 2898 buf->f_blocks = sbinfo->max_blocks;
41ffe5d5
HD
2899 buf->f_bavail =
2900 buf->f_bfree = sbinfo->max_blocks -
2901 percpu_counter_sum(&sbinfo->used_blocks);
0edd73b3
HD
2902 }
2903 if (sbinfo->max_inodes) {
1da177e4
LT
2904 buf->f_files = sbinfo->max_inodes;
2905 buf->f_ffree = sbinfo->free_inodes;
1da177e4
LT
2906 }
2907 /* else leave those fields 0 like simple_statfs */
59cda49e
AG
2908
2909 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
2910
1da177e4
LT
2911 return 0;
2912}
2913
2914/*
2915 * File creation. Allocate an inode, and we're done..
2916 */
2917static int
5ebb29be 2918shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
549c7297 2919 struct dentry *dentry, umode_t mode, dev_t dev)
1da177e4 2920{
0b0a0806 2921 struct inode *inode;
1da177e4
LT
2922 int error = -ENOSPC;
2923
454abafe 2924 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1da177e4 2925 if (inode) {
feda821e
CH
2926 error = simple_acl_create(dir, inode);
2927 if (error)
2928 goto out_iput;
2a7dba39 2929 error = security_inode_init_security(inode, dir,
9d8f13ba 2930 &dentry->d_name,
6d9d88d0 2931 shmem_initxattrs, NULL);
feda821e
CH
2932 if (error && error != -EOPNOTSUPP)
2933 goto out_iput;
37ec43cd 2934
718deb6b 2935 error = 0;
1da177e4 2936 dir->i_size += BOGO_DIRENT_SIZE;
078cd827 2937 dir->i_ctime = dir->i_mtime = current_time(dir);
36f05cab 2938 inode_inc_iversion(dir);
1da177e4
LT
2939 d_instantiate(dentry, inode);
2940 dget(dentry); /* Extra count - pin the dentry in core */
1da177e4
LT
2941 }
2942 return error;
feda821e
CH
2943out_iput:
2944 iput(inode);
2945 return error;
1da177e4
LT
2946}
2947
60545d0d 2948static int
011e2b71 2949shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
863f144f 2950 struct file *file, umode_t mode)
60545d0d
AV
2951{
2952 struct inode *inode;
2953 int error = -ENOSPC;
2954
2955 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2956 if (inode) {
2957 error = security_inode_init_security(inode, dir,
2958 NULL,
2959 shmem_initxattrs, NULL);
feda821e
CH
2960 if (error && error != -EOPNOTSUPP)
2961 goto out_iput;
2962 error = simple_acl_create(dir, inode);
2963 if (error)
2964 goto out_iput;
863f144f 2965 d_tmpfile(file, inode);
60545d0d 2966 }
863f144f 2967 return finish_open_simple(file, error);
feda821e
CH
2968out_iput:
2969 iput(inode);
2970 return error;
60545d0d
AV
2971}
2972
c54bd91e 2973static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
549c7297 2974 struct dentry *dentry, umode_t mode)
1da177e4
LT
2975{
2976 int error;
2977
5ebb29be 2978 if ((error = shmem_mknod(&nop_mnt_idmap, dir, dentry,
549c7297 2979 mode | S_IFDIR, 0)))
1da177e4 2980 return error;
d8c76e6f 2981 inc_nlink(dir);
1da177e4
LT
2982 return 0;
2983}
2984
6c960e68 2985static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
549c7297 2986 struct dentry *dentry, umode_t mode, bool excl)
1da177e4 2987{
5ebb29be 2988 return shmem_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFREG, 0);
1da177e4
LT
2989}
2990
2991/*
2992 * Link a file..
2993 */
2994static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2995{
75c3cfa8 2996 struct inode *inode = d_inode(old_dentry);
29b00e60 2997 int ret = 0;
1da177e4
LT
2998
2999 /*
3000 * No ordinary (disk based) filesystem counts links as inodes;
3001 * but each new link needs a new dentry, pinning lowmem, and
3002 * tmpfs dentries cannot be pruned until they are unlinked.
1062af92
DW
3003 * But if an O_TMPFILE file is linked into the tmpfs, the
3004 * first link must skip that, to get the accounting right.
1da177e4 3005 */
1062af92 3006 if (inode->i_nlink) {
e809d5f0 3007 ret = shmem_reserve_inode(inode->i_sb, NULL);
1062af92
DW
3008 if (ret)
3009 goto out;
3010 }
1da177e4
LT
3011
3012 dir->i_size += BOGO_DIRENT_SIZE;
078cd827 3013 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
36f05cab 3014 inode_inc_iversion(dir);
d8c76e6f 3015 inc_nlink(inode);
7de9c6ee 3016 ihold(inode); /* New dentry reference */
1da177e4
LT
3017 dget(dentry); /* Extra pinning count for the created dentry */
3018 d_instantiate(dentry, inode);
5b04c689
PE
3019out:
3020 return ret;
1da177e4
LT
3021}
3022
3023static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3024{
75c3cfa8 3025 struct inode *inode = d_inode(dentry);
1da177e4 3026
5b04c689
PE
3027 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3028 shmem_free_inode(inode->i_sb);
1da177e4
LT
3029
3030 dir->i_size -= BOGO_DIRENT_SIZE;
078cd827 3031 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
36f05cab 3032 inode_inc_iversion(dir);
9a53c3a7 3033 drop_nlink(inode);
1da177e4
LT
3034 dput(dentry); /* Undo the count from "create" - this does all the work */
3035 return 0;
3036}
3037
3038static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3039{
3040 if (!simple_empty(dentry))
3041 return -ENOTEMPTY;
3042
75c3cfa8 3043 drop_nlink(d_inode(dentry));
9a53c3a7 3044 drop_nlink(dir);
1da177e4
LT
3045 return shmem_unlink(dir, dentry);
3046}
3047
e18275ae 3048static int shmem_whiteout(struct mnt_idmap *idmap,
549c7297 3049 struct inode *old_dir, struct dentry *old_dentry)
46fdb794
MS
3050{
3051 struct dentry *whiteout;
3052 int error;
3053
3054 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3055 if (!whiteout)
3056 return -ENOMEM;
3057
5ebb29be 3058 error = shmem_mknod(&nop_mnt_idmap, old_dir, whiteout,
46fdb794
MS
3059 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3060 dput(whiteout);
3061 if (error)
3062 return error;
3063
3064 /*
3065 * Cheat and hash the whiteout while the old dentry is still in
3066 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3067 *
3068 * d_lookup() will consistently find one of them at this point,
3069 * not sure which one, but that isn't even important.
3070 */
3071 d_rehash(whiteout);
3072 return 0;
3073}
3074
1da177e4
LT
3075/*
3076 * The VFS layer already does all the dentry stuff for rename,
3077 * we just have to decrement the usage count for the target if
3078 * it exists so that the VFS layer correctly free's it when it
3079 * gets overwritten.
3080 */
e18275ae 3081static int shmem_rename2(struct mnt_idmap *idmap,
549c7297
CB
3082 struct inode *old_dir, struct dentry *old_dentry,
3083 struct inode *new_dir, struct dentry *new_dentry,
3084 unsigned int flags)
1da177e4 3085{
75c3cfa8 3086 struct inode *inode = d_inode(old_dentry);
1da177e4
LT
3087 int they_are_dirs = S_ISDIR(inode->i_mode);
3088
46fdb794 3089 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3b69ff51
MS
3090 return -EINVAL;
3091
37456771 3092 if (flags & RENAME_EXCHANGE)
6429e463 3093 return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
37456771 3094
1da177e4
LT
3095 if (!simple_empty(new_dentry))
3096 return -ENOTEMPTY;
3097
46fdb794
MS
3098 if (flags & RENAME_WHITEOUT) {
3099 int error;
3100
e18275ae 3101 error = shmem_whiteout(&nop_mnt_idmap, old_dir, old_dentry);
46fdb794
MS
3102 if (error)
3103 return error;
3104 }
3105
75c3cfa8 3106 if (d_really_is_positive(new_dentry)) {
1da177e4 3107 (void) shmem_unlink(new_dir, new_dentry);
b928095b 3108 if (they_are_dirs) {
75c3cfa8 3109 drop_nlink(d_inode(new_dentry));
9a53c3a7 3110 drop_nlink(old_dir);
b928095b 3111 }
1da177e4 3112 } else if (they_are_dirs) {
9a53c3a7 3113 drop_nlink(old_dir);
d8c76e6f 3114 inc_nlink(new_dir);
1da177e4
LT
3115 }
3116
3117 old_dir->i_size -= BOGO_DIRENT_SIZE;
3118 new_dir->i_size += BOGO_DIRENT_SIZE;
3119 old_dir->i_ctime = old_dir->i_mtime =
3120 new_dir->i_ctime = new_dir->i_mtime =
078cd827 3121 inode->i_ctime = current_time(old_dir);
36f05cab
JL
3122 inode_inc_iversion(old_dir);
3123 inode_inc_iversion(new_dir);
1da177e4
LT
3124 return 0;
3125}
3126
7a77db95 3127static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
549c7297 3128 struct dentry *dentry, const char *symname)
1da177e4
LT
3129{
3130 int error;
3131 int len;
3132 struct inode *inode;
7ad0414b 3133 struct folio *folio;
1da177e4
LT
3134
3135 len = strlen(symname) + 1;
09cbfeaf 3136 if (len > PAGE_SIZE)
1da177e4
LT
3137 return -ENAMETOOLONG;
3138
0825a6f9
JP
3139 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3140 VM_NORESERVE);
1da177e4
LT
3141 if (!inode)
3142 return -ENOSPC;
3143
9d8f13ba 3144 error = security_inode_init_security(inode, dir, &dentry->d_name,
6d9d88d0 3145 shmem_initxattrs, NULL);
343c3d7f
MN
3146 if (error && error != -EOPNOTSUPP) {
3147 iput(inode);
3148 return error;
570bc1c2
SS
3149 }
3150
1da177e4 3151 inode->i_size = len-1;
69f07ec9 3152 if (len <= SHORT_SYMLINK_LEN) {
3ed47db3
AV
3153 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3154 if (!inode->i_link) {
69f07ec9
HD
3155 iput(inode);
3156 return -ENOMEM;
3157 }
3158 inode->i_op = &shmem_short_symlink_operations;
1da177e4 3159 } else {
e8ecde25 3160 inode_nohighmem(inode);
7ad0414b 3161 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
1da177e4
LT
3162 if (error) {
3163 iput(inode);
3164 return error;
3165 }
14fcc23f 3166 inode->i_mapping->a_ops = &shmem_aops;
1da177e4 3167 inode->i_op = &shmem_symlink_inode_operations;
7ad0414b
MWO
3168 memcpy(folio_address(folio), symname, len);
3169 folio_mark_uptodate(folio);
3170 folio_mark_dirty(folio);
3171 folio_unlock(folio);
3172 folio_put(folio);
1da177e4 3173 }
1da177e4 3174 dir->i_size += BOGO_DIRENT_SIZE;
078cd827 3175 dir->i_ctime = dir->i_mtime = current_time(dir);
36f05cab 3176 inode_inc_iversion(dir);
1da177e4
LT
3177 d_instantiate(dentry, inode);
3178 dget(dentry);
3179 return 0;
3180}
3181
fceef393 3182static void shmem_put_link(void *arg)
1da177e4 3183{
e4b57722
MWO
3184 folio_mark_accessed(arg);
3185 folio_put(arg);
1da177e4
LT
3186}
3187
6b255391 3188static const char *shmem_get_link(struct dentry *dentry,
fceef393
AV
3189 struct inode *inode,
3190 struct delayed_call *done)
1da177e4 3191{
e4b57722 3192 struct folio *folio = NULL;
6b255391 3193 int error;
e4b57722 3194
6a6c9904 3195 if (!dentry) {
e4b57722
MWO
3196 folio = filemap_get_folio(inode->i_mapping, 0);
3197 if (!folio)
6a6c9904 3198 return ERR_PTR(-ECHILD);
7459c149 3199 if (PageHWPoison(folio_page(folio, 0)) ||
e4b57722
MWO
3200 !folio_test_uptodate(folio)) {
3201 folio_put(folio);
6a6c9904
AV
3202 return ERR_PTR(-ECHILD);
3203 }
3204 } else {
e4b57722 3205 error = shmem_get_folio(inode, 0, &folio, SGP_READ);
6a6c9904
AV
3206 if (error)
3207 return ERR_PTR(error);
e4b57722 3208 if (!folio)
a7605426 3209 return ERR_PTR(-ECHILD);
7459c149 3210 if (PageHWPoison(folio_page(folio, 0))) {
e4b57722
MWO
3211 folio_unlock(folio);
3212 folio_put(folio);
a7605426
YS
3213 return ERR_PTR(-ECHILD);
3214 }
e4b57722 3215 folio_unlock(folio);
6a6c9904 3216 }
e4b57722
MWO
3217 set_delayed_call(done, shmem_put_link, folio);
3218 return folio_address(folio);
1da177e4
LT
3219}
3220
b09e0fa4 3221#ifdef CONFIG_TMPFS_XATTR
e408e695
TT
3222
3223static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3224{
3225 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3226
3227 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3228
3229 return 0;
3230}
3231
3232static int shmem_fileattr_set(struct user_namespace *mnt_userns,
3233 struct dentry *dentry, struct fileattr *fa)
3234{
3235 struct inode *inode = d_inode(dentry);
3236 struct shmem_inode_info *info = SHMEM_I(inode);
3237
3238 if (fileattr_has_fsx(fa))
3239 return -EOPNOTSUPP;
cb241339
HD
3240 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3241 return -EOPNOTSUPP;
e408e695
TT
3242
3243 info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3244 (fa->flags & SHMEM_FL_USER_MODIFIABLE);
3245
cb241339 3246 shmem_set_inode_flags(inode, info->fsflags);
e408e695 3247 inode->i_ctime = current_time(inode);
36f05cab 3248 inode_inc_iversion(inode);
e408e695
TT
3249 return 0;
3250}
3251
46711810 3252/*
b09e0fa4
EP
3253 * Superblocks without xattr inode operations may get some security.* xattr
3254 * support from the LSM "for free". As soon as we have any other xattrs
39f0247d
AG
3255 * like ACLs, we also need to implement the security.* handlers at
3256 * filesystem level, though.
3257 */
3258
6d9d88d0
JS
3259/*
3260 * Callback for security_inode_init_security() for acquiring xattrs.
3261 */
3262static int shmem_initxattrs(struct inode *inode,
3263 const struct xattr *xattr_array,
3264 void *fs_info)
3265{
3266 struct shmem_inode_info *info = SHMEM_I(inode);
3267 const struct xattr *xattr;
38f38657 3268 struct simple_xattr *new_xattr;
6d9d88d0
JS
3269 size_t len;
3270
3271 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
38f38657 3272 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
6d9d88d0
JS
3273 if (!new_xattr)
3274 return -ENOMEM;
3275
3276 len = strlen(xattr->name) + 1;
3277 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3278 GFP_KERNEL);
3279 if (!new_xattr->name) {
3bef735a 3280 kvfree(new_xattr);
6d9d88d0
JS
3281 return -ENOMEM;
3282 }
3283
3284 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3285 XATTR_SECURITY_PREFIX_LEN);
3286 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3287 xattr->name, len);
3288
3b4c7bc0 3289 simple_xattr_add(&info->xattrs, new_xattr);
6d9d88d0
JS
3290 }
3291
3292 return 0;
3293}
3294
aa7c5241 3295static int shmem_xattr_handler_get(const struct xattr_handler *handler,
b296821a
AV
3296 struct dentry *unused, struct inode *inode,
3297 const char *name, void *buffer, size_t size)
b09e0fa4 3298{
b296821a 3299 struct shmem_inode_info *info = SHMEM_I(inode);
b09e0fa4 3300
aa7c5241 3301 name = xattr_full_name(handler, name);
38f38657 3302 return simple_xattr_get(&info->xattrs, name, buffer, size);
b09e0fa4
EP
3303}
3304
aa7c5241 3305static int shmem_xattr_handler_set(const struct xattr_handler *handler,
e65ce2a5 3306 struct user_namespace *mnt_userns,
59301226
AV
3307 struct dentry *unused, struct inode *inode,
3308 const char *name, const void *value,
3309 size_t size, int flags)
b09e0fa4 3310{
59301226 3311 struct shmem_inode_info *info = SHMEM_I(inode);
36f05cab 3312 int err;
b09e0fa4 3313
aa7c5241 3314 name = xattr_full_name(handler, name);
36f05cab
JL
3315 err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3316 if (!err) {
3317 inode->i_ctime = current_time(inode);
3318 inode_inc_iversion(inode);
3319 }
3320 return err;
b09e0fa4
EP
3321}
3322
aa7c5241
AG
3323static const struct xattr_handler shmem_security_xattr_handler = {
3324 .prefix = XATTR_SECURITY_PREFIX,
3325 .get = shmem_xattr_handler_get,
3326 .set = shmem_xattr_handler_set,
3327};
b09e0fa4 3328
aa7c5241
AG
3329static const struct xattr_handler shmem_trusted_xattr_handler = {
3330 .prefix = XATTR_TRUSTED_PREFIX,
3331 .get = shmem_xattr_handler_get,
3332 .set = shmem_xattr_handler_set,
3333};
b09e0fa4 3334
aa7c5241
AG
3335static const struct xattr_handler *shmem_xattr_handlers[] = {
3336#ifdef CONFIG_TMPFS_POSIX_ACL
3337 &posix_acl_access_xattr_handler,
3338 &posix_acl_default_xattr_handler,
3339#endif
3340 &shmem_security_xattr_handler,
3341 &shmem_trusted_xattr_handler,
3342 NULL
3343};
b09e0fa4
EP
3344
3345static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3346{
75c3cfa8 3347 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
786534b9 3348 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
b09e0fa4
EP
3349}
3350#endif /* CONFIG_TMPFS_XATTR */
3351
69f07ec9 3352static const struct inode_operations shmem_short_symlink_operations = {
f7cd16a5 3353 .getattr = shmem_getattr,
6b255391 3354 .get_link = simple_get_link,
b09e0fa4 3355#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3356 .listxattr = shmem_listxattr,
b09e0fa4
EP
3357#endif
3358};
3359
3360static const struct inode_operations shmem_symlink_inode_operations = {
f7cd16a5 3361 .getattr = shmem_getattr,
6b255391 3362 .get_link = shmem_get_link,
b09e0fa4 3363#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3364 .listxattr = shmem_listxattr,
39f0247d 3365#endif
b09e0fa4 3366};
39f0247d 3367
91828a40
DG
3368static struct dentry *shmem_get_parent(struct dentry *child)
3369{
3370 return ERR_PTR(-ESTALE);
3371}
3372
3373static int shmem_match(struct inode *ino, void *vfh)
3374{
3375 __u32 *fh = vfh;
3376 __u64 inum = fh[2];
3377 inum = (inum << 32) | fh[1];
3378 return ino->i_ino == inum && fh[0] == ino->i_generation;
3379}
3380
12ba780d
AG
3381/* Find any alias of inode, but prefer a hashed alias */
3382static struct dentry *shmem_find_alias(struct inode *inode)
3383{
3384 struct dentry *alias = d_find_alias(inode);
3385
3386 return alias ?: d_find_any_alias(inode);
3387}
3388
3389
480b116c
CH
3390static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3391 struct fid *fid, int fh_len, int fh_type)
91828a40 3392{
91828a40 3393 struct inode *inode;
480b116c 3394 struct dentry *dentry = NULL;
35c2a7f4 3395 u64 inum;
480b116c
CH
3396
3397 if (fh_len < 3)
3398 return NULL;
91828a40 3399
35c2a7f4
HD
3400 inum = fid->raw[2];
3401 inum = (inum << 32) | fid->raw[1];
3402
480b116c
CH
3403 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3404 shmem_match, fid->raw);
91828a40 3405 if (inode) {
12ba780d 3406 dentry = shmem_find_alias(inode);
91828a40
DG
3407 iput(inode);
3408 }
3409
480b116c 3410 return dentry;
91828a40
DG
3411}
3412
b0b0382b
AV
3413static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3414 struct inode *parent)
91828a40 3415{
5fe0c237
AK
3416 if (*len < 3) {
3417 *len = 3;
94e07a75 3418 return FILEID_INVALID;
5fe0c237 3419 }
91828a40 3420
1d3382cb 3421 if (inode_unhashed(inode)) {
91828a40
DG
3422 /* Unfortunately insert_inode_hash is not idempotent,
3423 * so as we hash inodes here rather than at creation
3424 * time, we need a lock to ensure we only try
3425 * to do it once
3426 */
3427 static DEFINE_SPINLOCK(lock);
3428 spin_lock(&lock);
1d3382cb 3429 if (inode_unhashed(inode))
91828a40
DG
3430 __insert_inode_hash(inode,
3431 inode->i_ino + inode->i_generation);
3432 spin_unlock(&lock);
3433 }
3434
3435 fh[0] = inode->i_generation;
3436 fh[1] = inode->i_ino;
3437 fh[2] = ((__u64)inode->i_ino) >> 32;
3438
3439 *len = 3;
3440 return 1;
3441}
3442
39655164 3443static const struct export_operations shmem_export_ops = {
91828a40 3444 .get_parent = shmem_get_parent,
91828a40 3445 .encode_fh = shmem_encode_fh,
480b116c 3446 .fh_to_dentry = shmem_fh_to_dentry,
91828a40
DG
3447};
3448
626c3920
AV
3449enum shmem_param {
3450 Opt_gid,
3451 Opt_huge,
3452 Opt_mode,
3453 Opt_mpol,
3454 Opt_nr_blocks,
3455 Opt_nr_inodes,
3456 Opt_size,
3457 Opt_uid,
ea3271f7
CD
3458 Opt_inode32,
3459 Opt_inode64,
626c3920
AV
3460};
3461
5eede625 3462static const struct constant_table shmem_param_enums_huge[] = {
2710c957
AV
3463 {"never", SHMEM_HUGE_NEVER },
3464 {"always", SHMEM_HUGE_ALWAYS },
3465 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3466 {"advise", SHMEM_HUGE_ADVISE },
2710c957
AV
3467 {}
3468};
3469
d7167b14 3470const struct fs_parameter_spec shmem_fs_parameters[] = {
626c3920 3471 fsparam_u32 ("gid", Opt_gid),
2710c957 3472 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
626c3920
AV
3473 fsparam_u32oct("mode", Opt_mode),
3474 fsparam_string("mpol", Opt_mpol),
3475 fsparam_string("nr_blocks", Opt_nr_blocks),
3476 fsparam_string("nr_inodes", Opt_nr_inodes),
3477 fsparam_string("size", Opt_size),
3478 fsparam_u32 ("uid", Opt_uid),
ea3271f7
CD
3479 fsparam_flag ("inode32", Opt_inode32),
3480 fsparam_flag ("inode64", Opt_inode64),
626c3920
AV
3481 {}
3482};
3483
f3235626 3484static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
1da177e4 3485{
f3235626 3486 struct shmem_options *ctx = fc->fs_private;
626c3920
AV
3487 struct fs_parse_result result;
3488 unsigned long long size;
e04dc423 3489 char *rest;
626c3920
AV
3490 int opt;
3491
d7167b14 3492 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
f3235626 3493 if (opt < 0)
626c3920 3494 return opt;
1da177e4 3495
626c3920
AV
3496 switch (opt) {
3497 case Opt_size:
3498 size = memparse(param->string, &rest);
e04dc423
AV
3499 if (*rest == '%') {
3500 size <<= PAGE_SHIFT;
3501 size *= totalram_pages();
3502 do_div(size, 100);
3503 rest++;
3504 }
3505 if (*rest)
626c3920 3506 goto bad_value;
e04dc423
AV
3507 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3508 ctx->seen |= SHMEM_SEEN_BLOCKS;
626c3920
AV
3509 break;
3510 case Opt_nr_blocks:
3511 ctx->blocks = memparse(param->string, &rest);
0c98c8e1 3512 if (*rest || ctx->blocks > S64_MAX)
626c3920 3513 goto bad_value;
e04dc423 3514 ctx->seen |= SHMEM_SEEN_BLOCKS;
626c3920
AV
3515 break;
3516 case Opt_nr_inodes:
3517 ctx->inodes = memparse(param->string, &rest);
e04dc423 3518 if (*rest)
626c3920 3519 goto bad_value;
e04dc423 3520 ctx->seen |= SHMEM_SEEN_INODES;
626c3920
AV
3521 break;
3522 case Opt_mode:
3523 ctx->mode = result.uint_32 & 07777;
3524 break;
3525 case Opt_uid:
3526 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
e04dc423 3527 if (!uid_valid(ctx->uid))
626c3920
AV
3528 goto bad_value;
3529 break;
3530 case Opt_gid:
3531 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
e04dc423 3532 if (!gid_valid(ctx->gid))
626c3920
AV
3533 goto bad_value;
3534 break;
3535 case Opt_huge:
3536 ctx->huge = result.uint_32;
3537 if (ctx->huge != SHMEM_HUGE_NEVER &&
396bcc52 3538 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
626c3920
AV
3539 has_transparent_hugepage()))
3540 goto unsupported_parameter;
e04dc423 3541 ctx->seen |= SHMEM_SEEN_HUGE;
626c3920
AV
3542 break;
3543 case Opt_mpol:
3544 if (IS_ENABLED(CONFIG_NUMA)) {
3545 mpol_put(ctx->mpol);
3546 ctx->mpol = NULL;
3547 if (mpol_parse_str(param->string, &ctx->mpol))
3548 goto bad_value;
3549 break;
3550 }
3551 goto unsupported_parameter;
ea3271f7
CD
3552 case Opt_inode32:
3553 ctx->full_inums = false;
3554 ctx->seen |= SHMEM_SEEN_INUMS;
3555 break;
3556 case Opt_inode64:
3557 if (sizeof(ino_t) < 8) {
3558 return invalfc(fc,
3559 "Cannot use inode64 with <64bit inums in kernel\n");
3560 }
3561 ctx->full_inums = true;
3562 ctx->seen |= SHMEM_SEEN_INUMS;
3563 break;
e04dc423
AV
3564 }
3565 return 0;
3566
626c3920 3567unsupported_parameter:
f35aa2bc 3568 return invalfc(fc, "Unsupported parameter '%s'", param->key);
626c3920 3569bad_value:
f35aa2bc 3570 return invalfc(fc, "Bad value for '%s'", param->key);
e04dc423
AV
3571}
3572
f3235626 3573static int shmem_parse_options(struct fs_context *fc, void *data)
e04dc423 3574{
f3235626
DH
3575 char *options = data;
3576
33f37c64
AV
3577 if (options) {
3578 int err = security_sb_eat_lsm_opts(options, &fc->security);
3579 if (err)
3580 return err;
3581 }
3582
b00dc3ad 3583 while (options != NULL) {
626c3920 3584 char *this_char = options;
b00dc3ad
HD
3585 for (;;) {
3586 /*
3587 * NUL-terminate this option: unfortunately,
3588 * mount options form a comma-separated list,
3589 * but mpol's nodelist may also contain commas.
3590 */
3591 options = strchr(options, ',');
3592 if (options == NULL)
3593 break;
3594 options++;
3595 if (!isdigit(*options)) {
3596 options[-1] = '\0';
3597 break;
3598 }
3599 }
626c3920 3600 if (*this_char) {
68d68ff6 3601 char *value = strchr(this_char, '=');
f3235626 3602 size_t len = 0;
626c3920
AV
3603 int err;
3604
3605 if (value) {
3606 *value++ = '\0';
f3235626 3607 len = strlen(value);
626c3920 3608 }
f3235626
DH
3609 err = vfs_parse_fs_string(fc, this_char, value, len);
3610 if (err < 0)
3611 return err;
1da177e4 3612 }
1da177e4
LT
3613 }
3614 return 0;
1da177e4
LT
3615}
3616
f3235626
DH
3617/*
3618 * Reconfigure a shmem filesystem.
3619 *
3620 * Note that we disallow change from limited->unlimited blocks/inodes while any
3621 * are in use; but we must separately disallow unlimited->limited, because in
3622 * that case we have no record of how much is already in use.
3623 */
3624static int shmem_reconfigure(struct fs_context *fc)
1da177e4 3625{
f3235626
DH
3626 struct shmem_options *ctx = fc->fs_private;
3627 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
0edd73b3 3628 unsigned long inodes;
bf11b9a8 3629 struct mempolicy *mpol = NULL;
f3235626 3630 const char *err;
1da177e4 3631
bf11b9a8 3632 raw_spin_lock(&sbinfo->stat_lock);
0edd73b3 3633 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
0c98c8e1 3634
f3235626
DH
3635 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3636 if (!sbinfo->max_blocks) {
3637 err = "Cannot retroactively limit size";
0b5071dd 3638 goto out;
f3235626 3639 }
0b5071dd 3640 if (percpu_counter_compare(&sbinfo->used_blocks,
f3235626
DH
3641 ctx->blocks) > 0) {
3642 err = "Too small a size for current use";
0b5071dd 3643 goto out;
f3235626 3644 }
0b5071dd 3645 }
f3235626
DH
3646 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3647 if (!sbinfo->max_inodes) {
3648 err = "Cannot retroactively limit inodes";
0b5071dd 3649 goto out;
f3235626
DH
3650 }
3651 if (ctx->inodes < inodes) {
3652 err = "Too few inodes for current use";
0b5071dd 3653 goto out;
f3235626 3654 }
0b5071dd 3655 }
0edd73b3 3656
ea3271f7
CD
3657 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3658 sbinfo->next_ino > UINT_MAX) {
3659 err = "Current inum too high to switch to 32-bit inums";
3660 goto out;
3661 }
3662
f3235626
DH
3663 if (ctx->seen & SHMEM_SEEN_HUGE)
3664 sbinfo->huge = ctx->huge;
ea3271f7
CD
3665 if (ctx->seen & SHMEM_SEEN_INUMS)
3666 sbinfo->full_inums = ctx->full_inums;
f3235626
DH
3667 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3668 sbinfo->max_blocks = ctx->blocks;
3669 if (ctx->seen & SHMEM_SEEN_INODES) {
3670 sbinfo->max_inodes = ctx->inodes;
3671 sbinfo->free_inodes = ctx->inodes - inodes;
0b5071dd 3672 }
71fe804b 3673
5f00110f
GT
3674 /*
3675 * Preserve previous mempolicy unless mpol remount option was specified.
3676 */
f3235626 3677 if (ctx->mpol) {
bf11b9a8 3678 mpol = sbinfo->mpol;
f3235626
DH
3679 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
3680 ctx->mpol = NULL;
5f00110f 3681 }
bf11b9a8
SAS
3682 raw_spin_unlock(&sbinfo->stat_lock);
3683 mpol_put(mpol);
f3235626 3684 return 0;
0edd73b3 3685out:
bf11b9a8 3686 raw_spin_unlock(&sbinfo->stat_lock);
f35aa2bc 3687 return invalfc(fc, "%s", err);
1da177e4 3688}
680d794b 3689
34c80b1d 3690static int shmem_show_options(struct seq_file *seq, struct dentry *root)
680d794b 3691{
34c80b1d 3692 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
680d794b
AM
3693
3694 if (sbinfo->max_blocks != shmem_default_max_blocks())
3695 seq_printf(seq, ",size=%luk",
09cbfeaf 3696 sbinfo->max_blocks << (PAGE_SHIFT - 10));
680d794b
AM
3697 if (sbinfo->max_inodes != shmem_default_max_inodes())
3698 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
0825a6f9 3699 if (sbinfo->mode != (0777 | S_ISVTX))
09208d15 3700 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
8751e039
EB
3701 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3702 seq_printf(seq, ",uid=%u",
3703 from_kuid_munged(&init_user_ns, sbinfo->uid));
3704 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3705 seq_printf(seq, ",gid=%u",
3706 from_kgid_munged(&init_user_ns, sbinfo->gid));
ea3271f7
CD
3707
3708 /*
3709 * Showing inode{64,32} might be useful even if it's the system default,
3710 * since then people don't have to resort to checking both here and
3711 * /proc/config.gz to confirm 64-bit inums were successfully applied
3712 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3713 *
3714 * We hide it when inode64 isn't the default and we are using 32-bit
3715 * inodes, since that probably just means the feature isn't even under
3716 * consideration.
3717 *
3718 * As such:
3719 *
3720 * +-----------------+-----------------+
3721 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
3722 * +------------------+-----------------+-----------------+
3723 * | full_inums=true | show | show |
3724 * | full_inums=false | show | hide |
3725 * +------------------+-----------------+-----------------+
3726 *
3727 */
3728 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3729 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
396bcc52 3730#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5a6e75f8
KS
3731 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3732 if (sbinfo->huge)
3733 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3734#endif
71fe804b 3735 shmem_show_mpol(seq, sbinfo->mpol);
680d794b
AM
3736 return 0;
3737}
9183df25 3738
680d794b 3739#endif /* CONFIG_TMPFS */
1da177e4
LT
3740
3741static void shmem_put_super(struct super_block *sb)
3742{
602586a8
HD
3743 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3744
e809d5f0 3745 free_percpu(sbinfo->ino_batch);
602586a8 3746 percpu_counter_destroy(&sbinfo->used_blocks);
49cd0a5c 3747 mpol_put(sbinfo->mpol);
602586a8 3748 kfree(sbinfo);
1da177e4
LT
3749 sb->s_fs_info = NULL;
3750}
3751
f3235626 3752static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
1da177e4 3753{
f3235626 3754 struct shmem_options *ctx = fc->fs_private;
1da177e4 3755 struct inode *inode;
0edd73b3 3756 struct shmem_sb_info *sbinfo;
680d794b
AM
3757
3758 /* Round up to L1_CACHE_BYTES to resist false sharing */
425fbf04 3759 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
680d794b
AM
3760 L1_CACHE_BYTES), GFP_KERNEL);
3761 if (!sbinfo)
3762 return -ENOMEM;
3763
680d794b 3764 sb->s_fs_info = sbinfo;
1da177e4 3765
0edd73b3 3766#ifdef CONFIG_TMPFS
1da177e4
LT
3767 /*
3768 * Per default we only allow half of the physical ram per
3769 * tmpfs instance, limiting inodes to one per page of lowmem;
3770 * but the internal instance is left unlimited.
3771 */
1751e8a6 3772 if (!(sb->s_flags & SB_KERNMOUNT)) {
f3235626
DH
3773 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3774 ctx->blocks = shmem_default_max_blocks();
3775 if (!(ctx->seen & SHMEM_SEEN_INODES))
3776 ctx->inodes = shmem_default_max_inodes();
ea3271f7
CD
3777 if (!(ctx->seen & SHMEM_SEEN_INUMS))
3778 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
ca4e0519 3779 } else {
1751e8a6 3780 sb->s_flags |= SB_NOUSER;
1da177e4 3781 }
91828a40 3782 sb->s_export_op = &shmem_export_ops;
36f05cab 3783 sb->s_flags |= SB_NOSEC | SB_I_VERSION;
1da177e4 3784#else
1751e8a6 3785 sb->s_flags |= SB_NOUSER;
1da177e4 3786#endif
f3235626
DH
3787 sbinfo->max_blocks = ctx->blocks;
3788 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
e809d5f0
CD
3789 if (sb->s_flags & SB_KERNMOUNT) {
3790 sbinfo->ino_batch = alloc_percpu(ino_t);
3791 if (!sbinfo->ino_batch)
3792 goto failed;
3793 }
f3235626
DH
3794 sbinfo->uid = ctx->uid;
3795 sbinfo->gid = ctx->gid;
ea3271f7 3796 sbinfo->full_inums = ctx->full_inums;
f3235626
DH
3797 sbinfo->mode = ctx->mode;
3798 sbinfo->huge = ctx->huge;
3799 sbinfo->mpol = ctx->mpol;
3800 ctx->mpol = NULL;
1da177e4 3801
bf11b9a8 3802 raw_spin_lock_init(&sbinfo->stat_lock);
908c7f19 3803 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
602586a8 3804 goto failed;
779750d2
KS
3805 spin_lock_init(&sbinfo->shrinklist_lock);
3806 INIT_LIST_HEAD(&sbinfo->shrinklist);
0edd73b3 3807
285b2c4f 3808 sb->s_maxbytes = MAX_LFS_FILESIZE;
09cbfeaf
KS
3809 sb->s_blocksize = PAGE_SIZE;
3810 sb->s_blocksize_bits = PAGE_SHIFT;
1da177e4
LT
3811 sb->s_magic = TMPFS_MAGIC;
3812 sb->s_op = &shmem_ops;
cfd95a9c 3813 sb->s_time_gran = 1;
b09e0fa4 3814#ifdef CONFIG_TMPFS_XATTR
39f0247d 3815 sb->s_xattr = shmem_xattr_handlers;
b09e0fa4
EP
3816#endif
3817#ifdef CONFIG_TMPFS_POSIX_ACL
1751e8a6 3818 sb->s_flags |= SB_POSIXACL;
39f0247d 3819#endif
2b4db796 3820 uuid_gen(&sb->s_uuid);
0edd73b3 3821
454abafe 3822 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
1da177e4
LT
3823 if (!inode)
3824 goto failed;
680d794b
AM
3825 inode->i_uid = sbinfo->uid;
3826 inode->i_gid = sbinfo->gid;
318ceed0
AV
3827 sb->s_root = d_make_root(inode);
3828 if (!sb->s_root)
48fde701 3829 goto failed;
1da177e4
LT
3830 return 0;
3831
1da177e4
LT
3832failed:
3833 shmem_put_super(sb);
f2b346e4 3834 return -ENOMEM;
1da177e4
LT
3835}
3836
f3235626
DH
3837static int shmem_get_tree(struct fs_context *fc)
3838{
3839 return get_tree_nodev(fc, shmem_fill_super);
3840}
3841
3842static void shmem_free_fc(struct fs_context *fc)
3843{
3844 struct shmem_options *ctx = fc->fs_private;
3845
3846 if (ctx) {
3847 mpol_put(ctx->mpol);
3848 kfree(ctx);
3849 }
3850}
3851
3852static const struct fs_context_operations shmem_fs_context_ops = {
3853 .free = shmem_free_fc,
3854 .get_tree = shmem_get_tree,
3855#ifdef CONFIG_TMPFS
3856 .parse_monolithic = shmem_parse_options,
3857 .parse_param = shmem_parse_one,
3858 .reconfigure = shmem_reconfigure,
3859#endif
3860};
3861
fcc234f8 3862static struct kmem_cache *shmem_inode_cachep;
1da177e4
LT
3863
3864static struct inode *shmem_alloc_inode(struct super_block *sb)
3865{
41ffe5d5 3866 struct shmem_inode_info *info;
fd60b288 3867 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
41ffe5d5 3868 if (!info)
1da177e4 3869 return NULL;
41ffe5d5 3870 return &info->vfs_inode;
1da177e4
LT
3871}
3872
74b1da56 3873static void shmem_free_in_core_inode(struct inode *inode)
fa0d7e3d 3874{
84e710da
AV
3875 if (S_ISLNK(inode->i_mode))
3876 kfree(inode->i_link);
fa0d7e3d
NP
3877 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3878}
3879
1da177e4
LT
3880static void shmem_destroy_inode(struct inode *inode)
3881{
09208d15 3882 if (S_ISREG(inode->i_mode))
1da177e4 3883 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
1da177e4
LT
3884}
3885
41ffe5d5 3886static void shmem_init_inode(void *foo)
1da177e4 3887{
41ffe5d5
HD
3888 struct shmem_inode_info *info = foo;
3889 inode_init_once(&info->vfs_inode);
1da177e4
LT
3890}
3891
9a8ec03e 3892static void shmem_init_inodecache(void)
1da177e4
LT
3893{
3894 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3895 sizeof(struct shmem_inode_info),
5d097056 3896 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
1da177e4
LT
3897}
3898
41ffe5d5 3899static void shmem_destroy_inodecache(void)
1da177e4 3900{
1a1d92c1 3901 kmem_cache_destroy(shmem_inode_cachep);
1da177e4
LT
3902}
3903
a7605426
YS
3904/* Keep the page in page cache instead of truncating it */
3905static int shmem_error_remove_page(struct address_space *mapping,
3906 struct page *page)
3907{
3908 return 0;
3909}
3910
30e6a51d 3911const struct address_space_operations shmem_aops = {
1da177e4 3912 .writepage = shmem_writepage,
46de8b97 3913 .dirty_folio = noop_dirty_folio,
1da177e4 3914#ifdef CONFIG_TMPFS
800d15a5
NP
3915 .write_begin = shmem_write_begin,
3916 .write_end = shmem_write_end,
1da177e4 3917#endif
1c93923c 3918#ifdef CONFIG_MIGRATION
54184650 3919 .migrate_folio = migrate_folio,
1c93923c 3920#endif
a7605426 3921 .error_remove_page = shmem_error_remove_page,
1da177e4 3922};
30e6a51d 3923EXPORT_SYMBOL(shmem_aops);
1da177e4 3924
15ad7cdc 3925static const struct file_operations shmem_file_operations = {
1da177e4 3926 .mmap = shmem_mmap,
a5454f95 3927 .open = generic_file_open,
c01d5b30 3928 .get_unmapped_area = shmem_get_unmapped_area,
1da177e4 3929#ifdef CONFIG_TMPFS
220f2ac9 3930 .llseek = shmem_file_llseek,
2ba5bbed 3931 .read_iter = shmem_file_read_iter,
8174202b 3932 .write_iter = generic_file_write_iter,
1b061d92 3933 .fsync = noop_fsync,
82c156f8 3934 .splice_read = generic_file_splice_read,
f6cb85d0 3935 .splice_write = iter_file_splice_write,
83e4fa9c 3936 .fallocate = shmem_fallocate,
1da177e4
LT
3937#endif
3938};
3939
92e1d5be 3940static const struct inode_operations shmem_inode_operations = {
44a30220 3941 .getattr = shmem_getattr,
94c1e62d 3942 .setattr = shmem_setattr,
b09e0fa4 3943#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3944 .listxattr = shmem_listxattr,
feda821e 3945 .set_acl = simple_set_acl,
e408e695
TT
3946 .fileattr_get = shmem_fileattr_get,
3947 .fileattr_set = shmem_fileattr_set,
b09e0fa4 3948#endif
1da177e4
LT
3949};
3950
92e1d5be 3951static const struct inode_operations shmem_dir_inode_operations = {
1da177e4 3952#ifdef CONFIG_TMPFS
f7cd16a5 3953 .getattr = shmem_getattr,
1da177e4
LT
3954 .create = shmem_create,
3955 .lookup = simple_lookup,
3956 .link = shmem_link,
3957 .unlink = shmem_unlink,
3958 .symlink = shmem_symlink,
3959 .mkdir = shmem_mkdir,
3960 .rmdir = shmem_rmdir,
3961 .mknod = shmem_mknod,
2773bf00 3962 .rename = shmem_rename2,
60545d0d 3963 .tmpfile = shmem_tmpfile,
1da177e4 3964#endif
b09e0fa4 3965#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3966 .listxattr = shmem_listxattr,
e408e695
TT
3967 .fileattr_get = shmem_fileattr_get,
3968 .fileattr_set = shmem_fileattr_set,
b09e0fa4 3969#endif
39f0247d 3970#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 3971 .setattr = shmem_setattr,
feda821e 3972 .set_acl = simple_set_acl,
39f0247d
AG
3973#endif
3974};
3975
92e1d5be 3976static const struct inode_operations shmem_special_inode_operations = {
f7cd16a5 3977 .getattr = shmem_getattr,
b09e0fa4 3978#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3979 .listxattr = shmem_listxattr,
b09e0fa4 3980#endif
39f0247d 3981#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 3982 .setattr = shmem_setattr,
feda821e 3983 .set_acl = simple_set_acl,
39f0247d 3984#endif
1da177e4
LT
3985};
3986
759b9775 3987static const struct super_operations shmem_ops = {
1da177e4 3988 .alloc_inode = shmem_alloc_inode,
74b1da56 3989 .free_inode = shmem_free_in_core_inode,
1da177e4
LT
3990 .destroy_inode = shmem_destroy_inode,
3991#ifdef CONFIG_TMPFS
3992 .statfs = shmem_statfs,
680d794b 3993 .show_options = shmem_show_options,
1da177e4 3994#endif
1f895f75 3995 .evict_inode = shmem_evict_inode,
1da177e4
LT
3996 .drop_inode = generic_delete_inode,
3997 .put_super = shmem_put_super,
396bcc52 3998#ifdef CONFIG_TRANSPARENT_HUGEPAGE
779750d2
KS
3999 .nr_cached_objects = shmem_unused_huge_count,
4000 .free_cached_objects = shmem_unused_huge_scan,
4001#endif
1da177e4
LT
4002};
4003
f0f37e2f 4004static const struct vm_operations_struct shmem_vm_ops = {
54cb8821 4005 .fault = shmem_fault,
d7c17551 4006 .map_pages = filemap_map_pages,
1da177e4
LT
4007#ifdef CONFIG_NUMA
4008 .set_policy = shmem_set_policy,
4009 .get_policy = shmem_get_policy,
4010#endif
4011};
4012
d09e8ca6
PT
4013static const struct vm_operations_struct shmem_anon_vm_ops = {
4014 .fault = shmem_fault,
4015 .map_pages = filemap_map_pages,
4016#ifdef CONFIG_NUMA
4017 .set_policy = shmem_set_policy,
4018 .get_policy = shmem_get_policy,
4019#endif
4020};
4021
f3235626 4022int shmem_init_fs_context(struct fs_context *fc)
1da177e4 4023{
f3235626
DH
4024 struct shmem_options *ctx;
4025
4026 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4027 if (!ctx)
4028 return -ENOMEM;
4029
4030 ctx->mode = 0777 | S_ISVTX;
4031 ctx->uid = current_fsuid();
4032 ctx->gid = current_fsgid();
4033
4034 fc->fs_private = ctx;
4035 fc->ops = &shmem_fs_context_ops;
4036 return 0;
1da177e4
LT
4037}
4038
41ffe5d5 4039static struct file_system_type shmem_fs_type = {
1da177e4
LT
4040 .owner = THIS_MODULE,
4041 .name = "tmpfs",
f3235626
DH
4042 .init_fs_context = shmem_init_fs_context,
4043#ifdef CONFIG_TMPFS
d7167b14 4044 .parameters = shmem_fs_parameters,
f3235626 4045#endif
1da177e4 4046 .kill_sb = kill_litter_super,
ff36da69 4047 .fs_flags = FS_USERNS_MOUNT,
1da177e4 4048};
1da177e4 4049
9096bbe9 4050void __init shmem_init(void)
1da177e4
LT
4051{
4052 int error;
4053
9a8ec03e 4054 shmem_init_inodecache();
1da177e4 4055
41ffe5d5 4056 error = register_filesystem(&shmem_fs_type);
1da177e4 4057 if (error) {
1170532b 4058 pr_err("Could not register tmpfs\n");
1da177e4
LT
4059 goto out2;
4060 }
95dc112a 4061
ca4e0519 4062 shm_mnt = kern_mount(&shmem_fs_type);
1da177e4
LT
4063 if (IS_ERR(shm_mnt)) {
4064 error = PTR_ERR(shm_mnt);
1170532b 4065 pr_err("Could not kern_mount tmpfs\n");
1da177e4
LT
4066 goto out1;
4067 }
5a6e75f8 4068
396bcc52 4069#ifdef CONFIG_TRANSPARENT_HUGEPAGE
435c0b87 4070 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
5a6e75f8
KS
4071 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4072 else
5e6e5a12 4073 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
5a6e75f8 4074#endif
9096bbe9 4075 return;
1da177e4
LT
4076
4077out1:
41ffe5d5 4078 unregister_filesystem(&shmem_fs_type);
1da177e4 4079out2:
41ffe5d5 4080 shmem_destroy_inodecache();
1da177e4 4081 shm_mnt = ERR_PTR(error);
1da177e4 4082}
853ac43a 4083
396bcc52 4084#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
5a6e75f8 4085static ssize_t shmem_enabled_show(struct kobject *kobj,
79d4d38a 4086 struct kobj_attribute *attr, char *buf)
5a6e75f8 4087{
26083eb6 4088 static const int values[] = {
5a6e75f8
KS
4089 SHMEM_HUGE_ALWAYS,
4090 SHMEM_HUGE_WITHIN_SIZE,
4091 SHMEM_HUGE_ADVISE,
4092 SHMEM_HUGE_NEVER,
4093 SHMEM_HUGE_DENY,
4094 SHMEM_HUGE_FORCE,
4095 };
79d4d38a
JP
4096 int len = 0;
4097 int i;
5a6e75f8 4098
79d4d38a
JP
4099 for (i = 0; i < ARRAY_SIZE(values); i++) {
4100 len += sysfs_emit_at(buf, len,
4101 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
4102 i ? " " : "",
4103 shmem_format_huge(values[i]));
5a6e75f8 4104 }
79d4d38a
JP
4105
4106 len += sysfs_emit_at(buf, len, "\n");
4107
4108 return len;
5a6e75f8
KS
4109}
4110
4111static ssize_t shmem_enabled_store(struct kobject *kobj,
4112 struct kobj_attribute *attr, const char *buf, size_t count)
4113{
4114 char tmp[16];
4115 int huge;
4116
4117 if (count + 1 > sizeof(tmp))
4118 return -EINVAL;
4119 memcpy(tmp, buf, count);
4120 tmp[count] = '\0';
4121 if (count && tmp[count - 1] == '\n')
4122 tmp[count - 1] = '\0';
4123
4124 huge = shmem_parse_huge(tmp);
4125 if (huge == -EINVAL)
4126 return -EINVAL;
4127 if (!has_transparent_hugepage() &&
4128 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4129 return -EINVAL;
4130
4131 shmem_huge = huge;
435c0b87 4132 if (shmem_huge > SHMEM_HUGE_DENY)
5a6e75f8
KS
4133 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4134 return count;
4135}
4136
4bfa8ada 4137struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
396bcc52 4138#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
f3f0e1d2 4139
853ac43a
MM
4140#else /* !CONFIG_SHMEM */
4141
4142/*
4143 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4144 *
4145 * This is intended for small system where the benefits of the full
4146 * shmem code (swap-backed and resource-limited) are outweighed by
4147 * their complexity. On systems without swap this code should be
4148 * effectively equivalent, but much lighter weight.
4149 */
4150
41ffe5d5 4151static struct file_system_type shmem_fs_type = {
853ac43a 4152 .name = "tmpfs",
f3235626 4153 .init_fs_context = ramfs_init_fs_context,
d7167b14 4154 .parameters = ramfs_fs_parameters,
853ac43a 4155 .kill_sb = kill_litter_super,
2b8576cb 4156 .fs_flags = FS_USERNS_MOUNT,
853ac43a
MM
4157};
4158
9096bbe9 4159void __init shmem_init(void)
853ac43a 4160{
41ffe5d5 4161 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
853ac43a 4162
41ffe5d5 4163 shm_mnt = kern_mount(&shmem_fs_type);
853ac43a 4164 BUG_ON(IS_ERR(shm_mnt));
853ac43a
MM
4165}
4166
10a9c496 4167int shmem_unuse(unsigned int type)
853ac43a
MM
4168{
4169 return 0;
4170}
4171
d7c9e99a 4172int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
3f96b79a
HD
4173{
4174 return 0;
4175}
4176
24513264
HD
4177void shmem_unlock_mapping(struct address_space *mapping)
4178{
4179}
4180
c01d5b30
HD
4181#ifdef CONFIG_MMU
4182unsigned long shmem_get_unmapped_area(struct file *file,
4183 unsigned long addr, unsigned long len,
4184 unsigned long pgoff, unsigned long flags)
4185{
4186 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4187}
4188#endif
4189
41ffe5d5 4190void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
94c1e62d 4191{
41ffe5d5 4192 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
94c1e62d
HD
4193}
4194EXPORT_SYMBOL_GPL(shmem_truncate_range);
4195
0b0a0806 4196#define shmem_vm_ops generic_file_vm_ops
d09e8ca6 4197#define shmem_anon_vm_ops generic_file_vm_ops
0b0a0806 4198#define shmem_file_operations ramfs_file_operations
454abafe 4199#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
0b0a0806
HD
4200#define shmem_acct_size(flags, size) 0
4201#define shmem_unacct_size(flags, size) do {} while (0)
853ac43a
MM
4202
4203#endif /* CONFIG_SHMEM */
4204
4205/* common code */
1da177e4 4206
703321b6 4207static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
c7277090 4208 unsigned long flags, unsigned int i_flags)
1da177e4 4209{
1da177e4 4210 struct inode *inode;
93dec2da 4211 struct file *res;
1da177e4 4212
703321b6
MA
4213 if (IS_ERR(mnt))
4214 return ERR_CAST(mnt);
1da177e4 4215
285b2c4f 4216 if (size < 0 || size > MAX_LFS_FILESIZE)
1da177e4
LT
4217 return ERR_PTR(-EINVAL);
4218
4219 if (shmem_acct_size(flags, size))
4220 return ERR_PTR(-ENOMEM);
4221
93dec2da
AV
4222 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4223 flags);
dac2d1f6
AV
4224 if (unlikely(!inode)) {
4225 shmem_unacct_size(flags, size);
4226 return ERR_PTR(-ENOSPC);
4227 }
c7277090 4228 inode->i_flags |= i_flags;
1da177e4 4229 inode->i_size = size;
6d6b77f1 4230 clear_nlink(inode); /* It is unlinked */
26567cdb 4231 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
93dec2da
AV
4232 if (!IS_ERR(res))
4233 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4234 &shmem_file_operations);
26567cdb 4235 if (IS_ERR(res))
93dec2da 4236 iput(inode);
6b4d0b27 4237 return res;
1da177e4 4238}
c7277090
EP
4239
4240/**
4241 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4242 * kernel internal. There will be NO LSM permission checks against the
4243 * underlying inode. So users of this interface must do LSM checks at a
e1832f29
SS
4244 * higher layer. The users are the big_key and shm implementations. LSM
4245 * checks are provided at the key or shm level rather than the inode.
c7277090
EP
4246 * @name: name for dentry (to be seen in /proc/<pid>/maps
4247 * @size: size to be set for the file
4248 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4249 */
4250struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4251{
703321b6 4252 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
c7277090
EP
4253}
4254
4255/**
4256 * shmem_file_setup - get an unlinked file living in tmpfs
4257 * @name: name for dentry (to be seen in /proc/<pid>/maps
4258 * @size: size to be set for the file
4259 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4260 */
4261struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4262{
703321b6 4263 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
c7277090 4264}
395e0ddc 4265EXPORT_SYMBOL_GPL(shmem_file_setup);
1da177e4 4266
703321b6
MA
4267/**
4268 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4269 * @mnt: the tmpfs mount where the file will be created
4270 * @name: name for dentry (to be seen in /proc/<pid>/maps
4271 * @size: size to be set for the file
4272 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4273 */
4274struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4275 loff_t size, unsigned long flags)
4276{
4277 return __shmem_file_setup(mnt, name, size, flags, 0);
4278}
4279EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4280
46711810 4281/**
1da177e4 4282 * shmem_zero_setup - setup a shared anonymous mapping
45e55300 4283 * @vma: the vma to be mmapped is prepared by do_mmap
1da177e4
LT
4284 */
4285int shmem_zero_setup(struct vm_area_struct *vma)
4286{
4287 struct file *file;
4288 loff_t size = vma->vm_end - vma->vm_start;
4289
66fc1303 4290 /*
c1e8d7c6 4291 * Cloning a new file under mmap_lock leads to a lock ordering conflict
66fc1303
HD
4292 * between XFS directory reading and selinux: since this file is only
4293 * accessible to the user through its mapping, use S_PRIVATE flag to
4294 * bypass file security, in the same way as shmem_kernel_file_setup().
4295 */
703321b6 4296 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
1da177e4
LT
4297 if (IS_ERR(file))
4298 return PTR_ERR(file);
4299
4300 if (vma->vm_file)
4301 fput(vma->vm_file);
4302 vma->vm_file = file;
d09e8ca6 4303 vma->vm_ops = &shmem_anon_vm_ops;
f3f0e1d2 4304
1da177e4
LT
4305 return 0;
4306}
d9d90e5e
HD
4307
4308/**
4309 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4310 * @mapping: the page's address_space
4311 * @index: the page index
4312 * @gfp: the page allocator flags to use if allocating
4313 *
4314 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4315 * with any new page allocations done using the specified allocation flags.
7e0a1265 4316 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
d9d90e5e
HD
4317 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4318 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4319 *
68da9f05
HD
4320 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4321 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
d9d90e5e
HD
4322 */
4323struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4324 pgoff_t index, gfp_t gfp)
4325{
68da9f05
HD
4326#ifdef CONFIG_SHMEM
4327 struct inode *inode = mapping->host;
a3a9c397 4328 struct folio *folio;
9276aad6 4329 struct page *page;
68da9f05
HD
4330 int error;
4331
30e6a51d 4332 BUG_ON(!shmem_mapping(mapping));
a3a9c397 4333 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
cfda0526 4334 gfp, NULL, NULL, NULL);
68da9f05 4335 if (error)
a7605426
YS
4336 return ERR_PTR(error);
4337
a3a9c397
MWO
4338 folio_unlock(folio);
4339 page = folio_file_page(folio, index);
a7605426 4340 if (PageHWPoison(page)) {
a3a9c397 4341 folio_put(folio);
a7605426
YS
4342 return ERR_PTR(-EIO);
4343 }
4344
68da9f05
HD
4345 return page;
4346#else
4347 /*
4348 * The tiny !SHMEM case uses ramfs without swap
4349 */
d9d90e5e 4350 return read_cache_page_gfp(mapping, index, gfp);
68da9f05 4351#endif
d9d90e5e
HD
4352}
4353EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);