]> git.ipfire.org Git - thirdparty/linux.git/blame - fs/libfs.c
libfs: add stashed_dentry_prune()
[thirdparty/linux.git] / fs / libfs.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * fs/libfs.c
4 * Library for filesystems writers.
5 */
6
ac13a829 7#include <linux/blkdev.h>
630d9c47 8#include <linux/export.h>
1da177e4 9#include <linux/pagemap.h>
5a0e3ad6 10#include <linux/slab.h>
5b825c3a 11#include <linux/cred.h>
1da177e4
LT
12#include <linux/mount.h>
13#include <linux/vfs.h>
7bb46a67 14#include <linux/quotaops.h>
7cf34c76 15#include <linux/mutex.h>
87dc800b 16#include <linux/namei.h>
2596110a 17#include <linux/exportfs.h>
5ca14835 18#include <linux/iversion.h>
d5aacad5 19#include <linux/writeback.h>
ff01bb48 20#include <linux/buffer_head.h> /* sync_mapping_buffers */
31d6d5ce
DH
21#include <linux/fs_context.h>
22#include <linux/pseudo_fs.h>
a3d1e7eb 23#include <linux/fsnotify.h>
c843843e
DR
24#include <linux/unicode.h>
25#include <linux/fscrypt.h>
b28ddcc3 26#include <linux/pidfs.h>
7cf34c76 27
7c0f6ba6 28#include <linux/uaccess.h>
1da177e4 29
a4464dbc
AV
30#include "internal.h"
31
b74d24f7 32int simple_getattr(struct mnt_idmap *idmap, const struct path *path,
549c7297
CB
33 struct kstat *stat, u32 request_mask,
34 unsigned int query_flags)
1da177e4 35{
a528d35e 36 struct inode *inode = d_inode(path->dentry);
0d72b928 37 generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
09cbfeaf 38 stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
1da177e4
LT
39 return 0;
40}
12f38872 41EXPORT_SYMBOL(simple_getattr);
1da177e4 42
726c3342 43int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 44{
ae62bcb5
AG
45 u64 id = huge_encode_dev(dentry->d_sb->s_dev);
46
47 buf->f_fsid = u64_to_fsid(id);
726c3342 48 buf->f_type = dentry->d_sb->s_magic;
09cbfeaf 49 buf->f_bsize = PAGE_SIZE;
1da177e4
LT
50 buf->f_namelen = NAME_MAX;
51 return 0;
52}
12f38872 53EXPORT_SYMBOL(simple_statfs);
1da177e4
LT
54
55/*
56 * Retaining negative dentries for an in-memory filesystem just wastes
57 * memory and lookup time: arrange for them to be deleted immediately.
58 */
b26d4cd3 59int always_delete_dentry(const struct dentry *dentry)
1da177e4
LT
60{
61 return 1;
62}
b26d4cd3
AV
63EXPORT_SYMBOL(always_delete_dentry);
64
65const struct dentry_operations simple_dentry_operations = {
66 .d_delete = always_delete_dentry,
67};
68EXPORT_SYMBOL(simple_dentry_operations);
1da177e4
LT
69
70/*
71 * Lookup the data. This is trivial - if the dentry didn't already
72 * exist, we know it is negative. Set d_op to delete negative dentries.
73 */
00cd8dd3 74struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
1da177e4 75{
1da177e4
LT
76 if (dentry->d_name.len > NAME_MAX)
77 return ERR_PTR(-ENAMETOOLONG);
74931da7
AV
78 if (!dentry->d_sb->s_d_op)
79 d_set_d_op(dentry, &simple_dentry_operations);
1da177e4
LT
80 d_add(dentry, NULL);
81 return NULL;
82}
12f38872 83EXPORT_SYMBOL(simple_lookup);
1da177e4 84
1da177e4
LT
85int dcache_dir_open(struct inode *inode, struct file *file)
86{
ba65dc5e 87 file->private_data = d_alloc_cursor(file->f_path.dentry);
1da177e4
LT
88
89 return file->private_data ? 0 : -ENOMEM;
90}
12f38872 91EXPORT_SYMBOL(dcache_dir_open);
1da177e4
LT
92
93int dcache_dir_close(struct inode *inode, struct file *file)
94{
95 dput(file->private_data);
96 return 0;
97}
12f38872 98EXPORT_SYMBOL(dcache_dir_close);
1da177e4 99
4f42c1b5 100/* parent is locked at least shared */
d4f4de5e
AV
101/*
102 * Returns an element of siblings' list.
103 * We are looking for <count>th positive after <p>; if
26b6c984
AV
104 * found, dentry is grabbed and returned to caller.
105 * If no such element exists, NULL is returned.
d4f4de5e 106 */
26b6c984 107static struct dentry *scan_positives(struct dentry *cursor,
da549bdd 108 struct hlist_node **p,
d4f4de5e 109 loff_t count,
26b6c984 110 struct dentry *last)
4f42c1b5 111{
d4f4de5e
AV
112 struct dentry *dentry = cursor->d_parent, *found = NULL;
113
114 spin_lock(&dentry->d_lock);
da549bdd
AV
115 while (*p) {
116 struct dentry *d = hlist_entry(*p, struct dentry, d_sib);
117 p = &d->d_sib.next;
d4f4de5e
AV
118 // we must at least skip cursors, to avoid livelocks
119 if (d->d_flags & DCACHE_DENTRY_CURSOR)
120 continue;
121 if (simple_positive(d) && !--count) {
122 spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
123 if (simple_positive(d))
124 found = dget_dlock(d);
125 spin_unlock(&d->d_lock);
126 if (likely(found))
127 break;
128 count = 1;
129 }
130 if (need_resched()) {
da549bdd
AV
131 if (!hlist_unhashed(&cursor->d_sib))
132 __hlist_del(&cursor->d_sib);
133 hlist_add_behind(&cursor->d_sib, &d->d_sib);
134 p = &cursor->d_sib.next;
d4f4de5e
AV
135 spin_unlock(&dentry->d_lock);
136 cond_resched();
137 spin_lock(&dentry->d_lock);
4f42c1b5
AV
138 }
139 }
d4f4de5e 140 spin_unlock(&dentry->d_lock);
26b6c984
AV
141 dput(last);
142 return found;
4f42c1b5
AV
143}
144
965c8e59 145loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
1da177e4 146{
2fd6b7f5 147 struct dentry *dentry = file->f_path.dentry;
965c8e59 148 switch (whence) {
1da177e4
LT
149 case 1:
150 offset += file->f_pos;
df561f66 151 fallthrough;
1da177e4
LT
152 case 0:
153 if (offset >= 0)
154 break;
df561f66 155 fallthrough;
1da177e4 156 default:
1da177e4
LT
157 return -EINVAL;
158 }
159 if (offset != file->f_pos) {
d4f4de5e
AV
160 struct dentry *cursor = file->private_data;
161 struct dentry *to = NULL;
d4f4de5e 162
d4f4de5e
AV
163 inode_lock_shared(dentry->d_inode);
164
26b6c984 165 if (offset > 2)
da549bdd 166 to = scan_positives(cursor, &dentry->d_children.first,
26b6c984
AV
167 offset - 2, NULL);
168 spin_lock(&dentry->d_lock);
da549bdd 169 hlist_del_init(&cursor->d_sib);
26b6c984 170 if (to)
da549bdd 171 hlist_add_behind(&cursor->d_sib, &to->d_sib);
26b6c984 172 spin_unlock(&dentry->d_lock);
d4f4de5e
AV
173 dput(to);
174
26b6c984
AV
175 file->f_pos = offset;
176
d4f4de5e 177 inode_unlock_shared(dentry->d_inode);
1da177e4 178 }
1da177e4
LT
179 return offset;
180}
12f38872 181EXPORT_SYMBOL(dcache_dir_lseek);
1da177e4 182
1da177e4
LT
183/*
184 * Directory is locked and all positive dentries in it are safe, since
185 * for ramfs-type trees they can't go away without unlink() or rmdir(),
186 * both impossible due to the lock on directory.
187 */
188
5f99f4e7 189int dcache_readdir(struct file *file, struct dir_context *ctx)
1da177e4 190{
5f99f4e7
AV
191 struct dentry *dentry = file->f_path.dentry;
192 struct dentry *cursor = file->private_data;
d4f4de5e 193 struct dentry *next = NULL;
da549bdd 194 struct hlist_node **p;
1da177e4 195
5f99f4e7
AV
196 if (!dir_emit_dots(file, ctx))
197 return 0;
5f99f4e7 198
4f42c1b5 199 if (ctx->pos == 2)
da549bdd 200 p = &dentry->d_children.first;
26b6c984 201 else
da549bdd 202 p = &cursor->d_sib.next;
d4f4de5e 203
26b6c984 204 while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
5f99f4e7 205 if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
364595a6
JL
206 d_inode(next)->i_ino,
207 fs_umode_to_dtype(d_inode(next)->i_mode)))
4f42c1b5 208 break;
5f99f4e7 209 ctx->pos++;
da549bdd 210 p = &next->d_sib.next;
1da177e4 211 }
d4f4de5e 212 spin_lock(&dentry->d_lock);
da549bdd 213 hlist_del_init(&cursor->d_sib);
26b6c984 214 if (next)
da549bdd 215 hlist_add_before(&cursor->d_sib, &next->d_sib);
d4f4de5e
AV
216 spin_unlock(&dentry->d_lock);
217 dput(next);
218
1da177e4
LT
219 return 0;
220}
12f38872 221EXPORT_SYMBOL(dcache_readdir);
1da177e4
LT
222
223ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
224{
225 return -EISDIR;
226}
12f38872 227EXPORT_SYMBOL(generic_read_dir);
1da177e4 228
4b6f5d20 229const struct file_operations simple_dir_operations = {
1da177e4
LT
230 .open = dcache_dir_open,
231 .release = dcache_dir_close,
232 .llseek = dcache_dir_lseek,
233 .read = generic_read_dir,
4e82901c 234 .iterate_shared = dcache_readdir,
1b061d92 235 .fsync = noop_fsync,
1da177e4 236};
12f38872 237EXPORT_SYMBOL(simple_dir_operations);
1da177e4 238
92e1d5be 239const struct inode_operations simple_dir_inode_operations = {
1da177e4
LT
240 .lookup = simple_lookup,
241};
12f38872 242EXPORT_SYMBOL(simple_dir_inode_operations);
1da177e4 243
6faddda6
CL
244static void offset_set(struct dentry *dentry, u32 offset)
245{
246 dentry->d_fsdata = (void *)((uintptr_t)(offset));
247}
248
249static u32 dentry2offset(struct dentry *dentry)
250{
251 return (u32)((uintptr_t)(dentry->d_fsdata));
252}
253
bbaef797
CL
254static struct lock_class_key simple_offset_xa_lock;
255
6faddda6
CL
256/**
257 * simple_offset_init - initialize an offset_ctx
258 * @octx: directory offset map to be initialized
259 *
260 */
261void simple_offset_init(struct offset_ctx *octx)
262{
263 xa_init_flags(&octx->xa, XA_FLAGS_ALLOC1);
bbaef797 264 lockdep_set_class(&octx->xa.xa_lock, &simple_offset_xa_lock);
6faddda6
CL
265
266 /* 0 is '.', 1 is '..', so always start with offset 2 */
267 octx->next_offset = 2;
268}
269
270/**
271 * simple_offset_add - Add an entry to a directory's offset map
272 * @octx: directory offset ctx to be updated
273 * @dentry: new dentry being added
274 *
275 * Returns zero on success. @so_ctx and the dentry offset are updated.
276 * Otherwise, a negative errno value is returned.
277 */
278int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
279{
280 static const struct xa_limit limit = XA_LIMIT(2, U32_MAX);
281 u32 offset;
282 int ret;
283
284 if (dentry2offset(dentry) != 0)
285 return -EBUSY;
286
287 ret = xa_alloc_cyclic(&octx->xa, &offset, dentry, limit,
288 &octx->next_offset, GFP_KERNEL);
289 if (ret < 0)
290 return ret;
291
292 offset_set(dentry, offset);
293 return 0;
294}
295
296/**
297 * simple_offset_remove - Remove an entry to a directory's offset map
298 * @octx: directory offset ctx to be updated
299 * @dentry: dentry being removed
300 *
301 */
302void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
303{
304 u32 offset;
305
306 offset = dentry2offset(dentry);
307 if (offset == 0)
308 return;
309
310 xa_erase(&octx->xa, offset);
311 offset_set(dentry, 0);
312}
313
314/**
315 * simple_offset_rename_exchange - exchange rename with directory offsets
316 * @old_dir: parent of dentry being moved
317 * @old_dentry: dentry being moved
318 * @new_dir: destination parent
319 * @new_dentry: destination dentry
320 *
321 * Returns zero on success. Otherwise a negative errno is returned and the
322 * rename is rolled back.
323 */
324int simple_offset_rename_exchange(struct inode *old_dir,
325 struct dentry *old_dentry,
326 struct inode *new_dir,
327 struct dentry *new_dentry)
328{
329 struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
330 struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
331 u32 old_index = dentry2offset(old_dentry);
332 u32 new_index = dentry2offset(new_dentry);
333 int ret;
334
335 simple_offset_remove(old_ctx, old_dentry);
336 simple_offset_remove(new_ctx, new_dentry);
337
338 ret = simple_offset_add(new_ctx, old_dentry);
339 if (ret)
340 goto out_restore;
341
342 ret = simple_offset_add(old_ctx, new_dentry);
343 if (ret) {
344 simple_offset_remove(new_ctx, old_dentry);
345 goto out_restore;
346 }
347
348 ret = simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
349 if (ret) {
350 simple_offset_remove(new_ctx, old_dentry);
351 simple_offset_remove(old_ctx, new_dentry);
352 goto out_restore;
353 }
354 return 0;
355
356out_restore:
357 offset_set(old_dentry, old_index);
358 xa_store(&old_ctx->xa, old_index, old_dentry, GFP_KERNEL);
359 offset_set(new_dentry, new_index);
360 xa_store(&new_ctx->xa, new_index, new_dentry, GFP_KERNEL);
361 return ret;
362}
363
364/**
365 * simple_offset_destroy - Release offset map
366 * @octx: directory offset ctx that is about to be destroyed
367 *
368 * During fs teardown (eg. umount), a directory's offset map might still
369 * contain entries. xa_destroy() cleans out anything that remains.
370 */
371void simple_offset_destroy(struct offset_ctx *octx)
372{
373 xa_destroy(&octx->xa);
374}
375
376/**
377 * offset_dir_llseek - Advance the read position of a directory descriptor
378 * @file: an open directory whose position is to be updated
379 * @offset: a byte offset
380 * @whence: enumerator describing the starting position for this update
381 *
382 * SEEK_END, SEEK_DATA, and SEEK_HOLE are not supported for directories.
383 *
384 * Returns the updated read position if successful; otherwise a
385 * negative errno is returned and the read position remains unchanged.
386 */
387static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
388{
389 switch (whence) {
390 case SEEK_CUR:
391 offset += file->f_pos;
392 fallthrough;
393 case SEEK_SET:
394 if (offset >= 0)
395 break;
396 fallthrough;
397 default:
398 return -EINVAL;
399 }
400
796432ef
CL
401 /* In this case, ->private_data is protected by f_pos_lock */
402 file->private_data = NULL;
6faddda6
CL
403 return vfs_setpos(file, offset, U32_MAX);
404}
405
406static struct dentry *offset_find_next(struct xa_state *xas)
407{
408 struct dentry *child, *found = NULL;
409
410 rcu_read_lock();
411 child = xas_next_entry(xas, U32_MAX);
412 if (!child)
413 goto out;
2be4f05a 414 spin_lock(&child->d_lock);
6faddda6
CL
415 if (simple_positive(child))
416 found = dget_dlock(child);
417 spin_unlock(&child->d_lock);
418out:
419 rcu_read_unlock();
420 return found;
421}
422
423static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
424{
425 u32 offset = dentry2offset(dentry);
426 struct inode *inode = d_inode(dentry);
427
428 return ctx->actor(ctx, dentry->d_name.name, dentry->d_name.len, offset,
429 inode->i_ino, fs_umode_to_dtype(inode->i_mode));
430}
431
796432ef 432static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
6faddda6 433{
6faddda6
CL
434 struct offset_ctx *so_ctx = inode->i_op->get_offset_ctx(inode);
435 XA_STATE(xas, &so_ctx->xa, ctx->pos);
436 struct dentry *dentry;
437
438 while (true) {
6faddda6 439 dentry = offset_find_next(&xas);
6faddda6 440 if (!dentry)
796432ef 441 return ERR_PTR(-ENOENT);
6faddda6
CL
442
443 if (!offset_dir_emit(ctx, dentry)) {
444 dput(dentry);
445 break;
446 }
447
448 dput(dentry);
449 ctx->pos = xas.xa_index + 1;
450 }
796432ef 451 return NULL;
6faddda6
CL
452}
453
454/**
455 * offset_readdir - Emit entries starting at offset @ctx->pos
456 * @file: an open directory to iterate over
457 * @ctx: directory iteration context
458 *
459 * Caller must hold @file's i_rwsem to prevent insertion or removal of
460 * entries during this call.
461 *
462 * On entry, @ctx->pos contains an offset that represents the first entry
463 * to be read from the directory.
464 *
465 * The operation continues until there are no more entries to read, or
466 * until the ctx->actor indicates there is no more space in the caller's
467 * output buffer.
468 *
469 * On return, @ctx->pos contains an offset that will read the next entry
2be4f05a 470 * in this directory when offset_readdir() is called again with @ctx.
6faddda6
CL
471 *
472 * Return values:
473 * %0 - Complete
474 */
475static int offset_readdir(struct file *file, struct dir_context *ctx)
476{
477 struct dentry *dir = file->f_path.dentry;
478
479 lockdep_assert_held(&d_inode(dir)->i_rwsem);
480
481 if (!dir_emit_dots(file, ctx))
482 return 0;
483
796432ef
CL
484 /* In this case, ->private_data is protected by f_pos_lock */
485 if (ctx->pos == 2)
486 file->private_data = NULL;
487 else if (file->private_data == ERR_PTR(-ENOENT))
488 return 0;
489 file->private_data = offset_iterate_dir(d_inode(dir), ctx);
6faddda6
CL
490 return 0;
491}
492
493const struct file_operations simple_offset_dir_operations = {
494 .llseek = offset_dir_llseek,
495 .iterate_shared = offset_readdir,
496 .read = generic_read_dir,
497 .fsync = noop_fsync,
498};
499
a3d1e7eb
AV
500static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
501{
da549bdd 502 struct dentry *child = NULL, *d;
a3d1e7eb
AV
503
504 spin_lock(&parent->d_lock);
da549bdd
AV
505 d = prev ? d_next_sibling(prev) : d_first_child(parent);
506 hlist_for_each_entry_from(d, d_sib) {
a3d1e7eb
AV
507 if (simple_positive(d)) {
508 spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
509 if (simple_positive(d))
510 child = dget_dlock(d);
511 spin_unlock(&d->d_lock);
512 if (likely(child))
513 break;
514 }
515 }
516 spin_unlock(&parent->d_lock);
517 dput(prev);
518 return child;
519}
520
521void simple_recursive_removal(struct dentry *dentry,
522 void (*callback)(struct dentry *))
523{
524 struct dentry *this = dget(dentry);
525 while (true) {
526 struct dentry *victim = NULL, *child;
527 struct inode *inode = this->d_inode;
528
529 inode_lock(inode);
530 if (d_is_dir(this))
531 inode->i_flags |= S_DEAD;
532 while ((child = find_next_child(this, victim)) == NULL) {
533 // kill and ascend
534 // update metadata while it's still locked
f7f43858 535 inode_set_ctime_current(inode);
a3d1e7eb
AV
536 clear_nlink(inode);
537 inode_unlock(inode);
538 victim = this;
539 this = this->d_parent;
540 inode = this->d_inode;
541 inode_lock(inode);
542 if (simple_positive(victim)) {
543 d_invalidate(victim); // avoid lost mounts
544 if (d_is_dir(victim))
545 fsnotify_rmdir(inode, victim);
546 else
547 fsnotify_unlink(inode, victim);
548 if (callback)
549 callback(victim);
550 dput(victim); // unpin it
551 }
552 if (victim == dentry) {
077c212f
JL
553 inode_set_mtime_to_ts(inode,
554 inode_set_ctime_current(inode));
a3d1e7eb
AV
555 if (d_is_dir(dentry))
556 drop_nlink(inode);
557 inode_unlock(inode);
558 dput(dentry);
559 return;
560 }
561 }
562 inode_unlock(inode);
563 this = child;
564 }
565}
566EXPORT_SYMBOL(simple_recursive_removal);
567
759b9775
HD
568static const struct super_operations simple_super_operations = {
569 .statfs = simple_statfs,
570};
571
db2c246a 572static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
1da177e4 573{
31d6d5ce 574 struct pseudo_fs_context *ctx = fc->fs_private;
1da177e4 575 struct inode *root;
1da177e4 576
89a4eb4b 577 s->s_maxbytes = MAX_LFS_FILESIZE;
3971e1a9
AN
578 s->s_blocksize = PAGE_SIZE;
579 s->s_blocksize_bits = PAGE_SHIFT;
8d9e46d8
AV
580 s->s_magic = ctx->magic;
581 s->s_op = ctx->ops ?: &simple_super_operations;
582 s->s_xattr = ctx->xattr;
1da177e4
LT
583 s->s_time_gran = 1;
584 root = new_inode(s);
585 if (!root)
db2c246a
DH
586 return -ENOMEM;
587
1a1c9bb4
JL
588 /*
589 * since this is the first inode, make it number 1. New inodes created
590 * after this must take care not to collide with it (by passing
591 * max_reserved of 1 to iunique).
592 */
593 root->i_ino = 1;
1da177e4 594 root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
077c212f 595 simple_inode_init_ts(root);
8d9e46d8
AV
596 s->s_root = d_make_root(root);
597 if (!s->s_root)
db2c246a 598 return -ENOMEM;
8d9e46d8 599 s->s_d_op = ctx->dops;
31d6d5ce 600 return 0;
db2c246a 601}
8d9e46d8 602
db2c246a
DH
603static int pseudo_fs_get_tree(struct fs_context *fc)
604{
2ac295d4 605 return get_tree_nodev(fc, pseudo_fs_fill_super);
31d6d5ce
DH
606}
607
608static void pseudo_fs_free(struct fs_context *fc)
609{
610 kfree(fc->fs_private);
611}
612
613static const struct fs_context_operations pseudo_fs_context_ops = {
614 .free = pseudo_fs_free,
615 .get_tree = pseudo_fs_get_tree,
616};
617
618/*
619 * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
620 * will never be mountable)
621 */
622struct pseudo_fs_context *init_pseudo(struct fs_context *fc,
623 unsigned long magic)
624{
625 struct pseudo_fs_context *ctx;
626
627 ctx = kzalloc(sizeof(struct pseudo_fs_context), GFP_KERNEL);
628 if (likely(ctx)) {
629 ctx->magic = magic;
630 fc->fs_private = ctx;
631 fc->ops = &pseudo_fs_context_ops;
db2c246a
DH
632 fc->sb_flags |= SB_NOUSER;
633 fc->global = true;
1da177e4 634 }
31d6d5ce 635 return ctx;
1da177e4 636}
31d6d5ce 637EXPORT_SYMBOL(init_pseudo);
1da177e4 638
20955e89
SB
639int simple_open(struct inode *inode, struct file *file)
640{
641 if (inode->i_private)
642 file->private_data = inode->i_private;
643 return 0;
644}
12f38872 645EXPORT_SYMBOL(simple_open);
20955e89 646
1da177e4
LT
647int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
648{
dea655c2 649 struct inode *inode = d_inode(old_dentry);
1da177e4 650
077c212f
JL
651 inode_set_mtime_to_ts(dir,
652 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
d8c76e6f 653 inc_nlink(inode);
7de9c6ee 654 ihold(inode);
1da177e4
LT
655 dget(dentry);
656 d_instantiate(dentry, inode);
657 return 0;
658}
12f38872 659EXPORT_SYMBOL(simple_link);
1da177e4 660
1da177e4
LT
661int simple_empty(struct dentry *dentry)
662{
663 struct dentry *child;
664 int ret = 0;
665
2fd6b7f5 666 spin_lock(&dentry->d_lock);
da549bdd 667 hlist_for_each_entry(child, &dentry->d_children, d_sib) {
da502956
NP
668 spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
669 if (simple_positive(child)) {
670 spin_unlock(&child->d_lock);
1da177e4 671 goto out;
da502956
NP
672 }
673 spin_unlock(&child->d_lock);
674 }
1da177e4
LT
675 ret = 1;
676out:
2fd6b7f5 677 spin_unlock(&dentry->d_lock);
1da177e4
LT
678 return ret;
679}
12f38872 680EXPORT_SYMBOL(simple_empty);
1da177e4
LT
681
682int simple_unlink(struct inode *dir, struct dentry *dentry)
683{
dea655c2 684 struct inode *inode = d_inode(dentry);
1da177e4 685
077c212f
JL
686 inode_set_mtime_to_ts(dir,
687 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
9a53c3a7 688 drop_nlink(inode);
1da177e4
LT
689 dput(dentry);
690 return 0;
691}
12f38872 692EXPORT_SYMBOL(simple_unlink);
1da177e4
LT
693
694int simple_rmdir(struct inode *dir, struct dentry *dentry)
695{
696 if (!simple_empty(dentry))
697 return -ENOTEMPTY;
698
dea655c2 699 drop_nlink(d_inode(dentry));
1da177e4 700 simple_unlink(dir, dentry);
9a53c3a7 701 drop_nlink(dir);
1da177e4
LT
702 return 0;
703}
12f38872 704EXPORT_SYMBOL(simple_rmdir);
1da177e4 705
0c476792
JL
706/**
707 * simple_rename_timestamp - update the various inode timestamps for rename
708 * @old_dir: old parent directory
709 * @old_dentry: dentry that is being renamed
710 * @new_dir: new parent directory
711 * @new_dentry: target for rename
712 *
713 * POSIX mandates that the old and new parent directories have their ctime and
714 * mtime updated, and that inodes of @old_dentry and @new_dentry (if any), have
715 * their ctime updated.
716 */
717void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
718 struct inode *new_dir, struct dentry *new_dentry)
719{
720 struct inode *newino = d_inode(new_dentry);
721
077c212f 722 inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
0c476792 723 if (new_dir != old_dir)
077c212f
JL
724 inode_set_mtime_to_ts(new_dir,
725 inode_set_ctime_current(new_dir));
0c476792
JL
726 inode_set_ctime_current(d_inode(old_dentry));
727 if (newino)
728 inode_set_ctime_current(newino);
729}
730EXPORT_SYMBOL_GPL(simple_rename_timestamp);
731
6429e463
LB
732int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
733 struct inode *new_dir, struct dentry *new_dentry)
734{
735 bool old_is_dir = d_is_dir(old_dentry);
736 bool new_is_dir = d_is_dir(new_dentry);
737
738 if (old_dir != new_dir && old_is_dir != new_is_dir) {
739 if (old_is_dir) {
740 drop_nlink(old_dir);
741 inc_nlink(new_dir);
742 } else {
743 drop_nlink(new_dir);
744 inc_nlink(old_dir);
745 }
746 }
0c476792 747 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
6429e463
LB
748 return 0;
749}
750EXPORT_SYMBOL_GPL(simple_rename_exchange);
751
e18275ae 752int simple_rename(struct mnt_idmap *idmap, struct inode *old_dir,
549c7297
CB
753 struct dentry *old_dentry, struct inode *new_dir,
754 struct dentry *new_dentry, unsigned int flags)
1da177e4 755{
e36cb0b8 756 int they_are_dirs = d_is_dir(old_dentry);
1da177e4 757
3871cb8c 758 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
e0e0be8a
MS
759 return -EINVAL;
760
3871cb8c
LB
761 if (flags & RENAME_EXCHANGE)
762 return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
763
1da177e4
LT
764 if (!simple_empty(new_dentry))
765 return -ENOTEMPTY;
766
dea655c2 767 if (d_really_is_positive(new_dentry)) {
1da177e4 768 simple_unlink(new_dir, new_dentry);
841590ce 769 if (they_are_dirs) {
dea655c2 770 drop_nlink(d_inode(new_dentry));
9a53c3a7 771 drop_nlink(old_dir);
841590ce 772 }
1da177e4 773 } else if (they_are_dirs) {
9a53c3a7 774 drop_nlink(old_dir);
d8c76e6f 775 inc_nlink(new_dir);
1da177e4
LT
776 }
777
0c476792 778 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
1da177e4
LT
779 return 0;
780}
12f38872 781EXPORT_SYMBOL(simple_rename);
1da177e4 782
7bb46a67 783/**
eef2380c 784 * simple_setattr - setattr for simple filesystem
c1632a0f 785 * @idmap: idmap of the target mount
7bb46a67
NP
786 * @dentry: dentry
787 * @iattr: iattr structure
788 *
789 * Returns 0 on success, -error on failure.
790 *
eef2380c
CH
791 * simple_setattr is a simple ->setattr implementation without a proper
792 * implementation of size changes.
793 *
794 * It can either be used for in-memory filesystems or special files
795 * on simple regular filesystems. Anything that needs to change on-disk
796 * or wire state on size changes needs its own setattr method.
7bb46a67 797 */
c1632a0f 798int simple_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
549c7297 799 struct iattr *iattr)
7bb46a67 800{
dea655c2 801 struct inode *inode = d_inode(dentry);
7bb46a67
NP
802 int error;
803
c1632a0f 804 error = setattr_prepare(idmap, dentry, iattr);
7bb46a67
NP
805 if (error)
806 return error;
807
2c27c65e
CH
808 if (iattr->ia_valid & ATTR_SIZE)
809 truncate_setsize(inode, iattr->ia_size);
c1632a0f 810 setattr_copy(idmap, inode, iattr);
eef2380c
CH
811 mark_inode_dirty(inode);
812 return 0;
7bb46a67
NP
813}
814EXPORT_SYMBOL(simple_setattr);
815
a77f580a 816static int simple_read_folio(struct file *file, struct folio *folio)
1da177e4 817{
a77f580a
MWO
818 folio_zero_range(folio, 0, folio_size(folio));
819 flush_dcache_folio(folio);
820 folio_mark_uptodate(folio);
821 folio_unlock(folio);
1da177e4
LT
822 return 0;
823}
824
afddba49 825int simple_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 826 loff_t pos, unsigned len,
afddba49
NP
827 struct page **pagep, void **fsdata)
828{
5522d9f7 829 struct folio *folio;
afddba49 830
5522d9f7
MWO
831 folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
832 mapping_gfp_mask(mapping));
833 if (IS_ERR(folio))
834 return PTR_ERR(folio);
afddba49 835
5522d9f7 836 *pagep = &folio->page;
afddba49 837
5522d9f7
MWO
838 if (!folio_test_uptodate(folio) && (len != folio_size(folio))) {
839 size_t from = offset_in_folio(folio, pos);
193cf4b9 840
5522d9f7
MWO
841 folio_zero_segments(folio, 0, from,
842 from + len, folio_size(folio));
193cf4b9
BH
843 }
844 return 0;
afddba49 845}
12f38872 846EXPORT_SYMBOL(simple_write_begin);
afddba49 847
ad2a722f
BH
848/**
849 * simple_write_end - .write_end helper for non-block-device FSes
8e88bfba 850 * @file: See .write_end of address_space_operations
ad2a722f
BH
851 * @mapping: "
852 * @pos: "
853 * @len: "
854 * @copied: "
855 * @page: "
856 * @fsdata: "
857 *
858 * simple_write_end does the minimum needed for updating a page after writing is
859 * done. It has the same API signature as the .write_end of
860 * address_space_operations vector. So it can just be set onto .write_end for
861 * FSes that don't need any other processing. i_mutex is assumed to be held.
862 * Block based filesystems should use generic_write_end().
863 * NOTE: Even though i_size might get updated by this function, mark_inode_dirty
864 * is not called, so a filesystem that actually does store data in .write_inode
865 * should extend on what's done here with a call to mark_inode_dirty() in the
866 * case that i_size has changed.
04fff641 867 *
a77f580a 868 * Use *ONLY* with simple_read_folio()
ad2a722f 869 */
c1e3dbe9 870static int simple_write_end(struct file *file, struct address_space *mapping,
afddba49
NP
871 loff_t pos, unsigned len, unsigned copied,
872 struct page *page, void *fsdata)
873{
5522d9f7
MWO
874 struct folio *folio = page_folio(page);
875 struct inode *inode = folio->mapping->host;
ad2a722f 876 loff_t last_pos = pos + copied;
afddba49 877
5522d9f7
MWO
878 /* zero the stale part of the folio if we did a short copy */
879 if (!folio_test_uptodate(folio)) {
04fff641 880 if (copied < len) {
5522d9f7 881 size_t from = offset_in_folio(folio, pos);
afddba49 882
5522d9f7 883 folio_zero_range(folio, from + copied, len - copied);
04fff641 884 }
5522d9f7 885 folio_mark_uptodate(folio);
04fff641 886 }
ad2a722f
BH
887 /*
888 * No need to use i_size_read() here, the i_size
889 * cannot change under us because we hold the i_mutex.
890 */
891 if (last_pos > inode->i_size)
892 i_size_write(inode, last_pos);
afddba49 893
5522d9f7
MWO
894 folio_mark_dirty(folio);
895 folio_unlock(folio);
896 folio_put(folio);
afddba49
NP
897
898 return copied;
899}
c1e3dbe9
CH
900
901/*
902 * Provides ramfs-style behavior: data in the pagecache, but no writeback.
903 */
904const struct address_space_operations ram_aops = {
a77f580a 905 .read_folio = simple_read_folio,
c1e3dbe9
CH
906 .write_begin = simple_write_begin,
907 .write_end = simple_write_end,
46de8b97 908 .dirty_folio = noop_dirty_folio,
c1e3dbe9
CH
909};
910EXPORT_SYMBOL(ram_aops);
afddba49 911
1a1c9bb4
JL
912/*
913 * the inodes created here are not hashed. If you use iunique to generate
914 * unique inode values later for this filesystem, then you must take care
915 * to pass it an appropriate max_reserved value to avoid collisions.
916 */
7d683a09 917int simple_fill_super(struct super_block *s, unsigned long magic,
cda37124 918 const struct tree_descr *files)
1da177e4 919{
1da177e4 920 struct inode *inode;
1da177e4
LT
921 struct dentry *dentry;
922 int i;
923
09cbfeaf
KS
924 s->s_blocksize = PAGE_SIZE;
925 s->s_blocksize_bits = PAGE_SHIFT;
1da177e4 926 s->s_magic = magic;
759b9775 927 s->s_op = &simple_super_operations;
1da177e4
LT
928 s->s_time_gran = 1;
929
930 inode = new_inode(s);
931 if (!inode)
932 return -ENOMEM;
1a1c9bb4
JL
933 /*
934 * because the root inode is 1, the files array must not contain an
935 * entry at index 1
936 */
937 inode->i_ino = 1;
1da177e4 938 inode->i_mode = S_IFDIR | 0755;
077c212f 939 simple_inode_init_ts(inode);
1da177e4
LT
940 inode->i_op = &simple_dir_inode_operations;
941 inode->i_fop = &simple_dir_operations;
bfe86848 942 set_nlink(inode, 2);
715cd66a
AV
943 s->s_root = d_make_root(inode);
944 if (!s->s_root)
1da177e4 945 return -ENOMEM;
1da177e4
LT
946 for (i = 0; !files->name || files->name[0]; i++, files++) {
947 if (!files->name)
948 continue;
1a1c9bb4
JL
949
950 /* warn if it tries to conflict with the root inode */
951 if (unlikely(i == 1))
952 printk(KERN_WARNING "%s: %s passed in a files array"
953 "with an index of 1!\n", __func__,
954 s->s_type->name);
955
715cd66a 956 dentry = d_alloc_name(s->s_root, files->name);
1da177e4 957 if (!dentry)
715cd66a 958 return -ENOMEM;
1da177e4 959 inode = new_inode(s);
32096ea1
KK
960 if (!inode) {
961 dput(dentry);
715cd66a 962 return -ENOMEM;
32096ea1 963 }
1da177e4 964 inode->i_mode = S_IFREG | files->mode;
077c212f 965 simple_inode_init_ts(inode);
1da177e4
LT
966 inode->i_fop = files->ops;
967 inode->i_ino = i;
968 d_add(dentry, inode);
969 }
1da177e4 970 return 0;
1da177e4 971}
12f38872 972EXPORT_SYMBOL(simple_fill_super);
1da177e4
LT
973
974static DEFINE_SPINLOCK(pin_fs_lock);
975
1f5ce9e9 976int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count)
1da177e4
LT
977{
978 struct vfsmount *mnt = NULL;
979 spin_lock(&pin_fs_lock);
980 if (unlikely(!*mount)) {
981 spin_unlock(&pin_fs_lock);
1751e8a6 982 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
1da177e4
LT
983 if (IS_ERR(mnt))
984 return PTR_ERR(mnt);
985 spin_lock(&pin_fs_lock);
986 if (!*mount)
987 *mount = mnt;
988 }
989 mntget(*mount);
990 ++*count;
991 spin_unlock(&pin_fs_lock);
992 mntput(mnt);
993 return 0;
994}
12f38872 995EXPORT_SYMBOL(simple_pin_fs);
1da177e4
LT
996
997void simple_release_fs(struct vfsmount **mount, int *count)
998{
999 struct vfsmount *mnt;
1000 spin_lock(&pin_fs_lock);
1001 mnt = *mount;
1002 if (!--*count)
1003 *mount = NULL;
1004 spin_unlock(&pin_fs_lock);
1005 mntput(mnt);
1006}
12f38872 1007EXPORT_SYMBOL(simple_release_fs);
1da177e4 1008
6d1029b5
AM
1009/**
1010 * simple_read_from_buffer - copy data from the buffer to user space
1011 * @to: the user space buffer to read to
1012 * @count: the maximum number of bytes to read
1013 * @ppos: the current position in the buffer
1014 * @from: the buffer to read from
1015 * @available: the size of the buffer
1016 *
1017 * The simple_read_from_buffer() function reads up to @count bytes from the
1018 * buffer @from at offset @ppos into the user space address starting at @to.
1019 *
1020 * On success, the number of bytes read is returned and the offset @ppos is
1021 * advanced by this number, or negative value is returned on error.
1022 **/
1da177e4
LT
1023ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
1024 const void *from, size_t available)
1025{
1026 loff_t pos = *ppos;
14be2746
SR
1027 size_t ret;
1028
1da177e4
LT
1029 if (pos < 0)
1030 return -EINVAL;
14be2746 1031 if (pos >= available || !count)
1da177e4
LT
1032 return 0;
1033 if (count > available - pos)
1034 count = available - pos;
14be2746
SR
1035 ret = copy_to_user(to, from + pos, count);
1036 if (ret == count)
1da177e4 1037 return -EFAULT;
14be2746 1038 count -= ret;
1da177e4
LT
1039 *ppos = pos + count;
1040 return count;
1041}
12f38872 1042EXPORT_SYMBOL(simple_read_from_buffer);
1da177e4 1043
6a727b43
JS
1044/**
1045 * simple_write_to_buffer - copy data from user space to the buffer
1046 * @to: the buffer to write to
1047 * @available: the size of the buffer
1048 * @ppos: the current position in the buffer
1049 * @from: the user space buffer to read from
1050 * @count: the maximum number of bytes to read
1051 *
1052 * The simple_write_to_buffer() function reads up to @count bytes from the user
1053 * space address starting at @from into the buffer @to at offset @ppos.
1054 *
1055 * On success, the number of bytes written is returned and the offset @ppos is
1056 * advanced by this number, or negative value is returned on error.
1057 **/
1058ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
1059 const void __user *from, size_t count)
1060{
1061 loff_t pos = *ppos;
1062 size_t res;
1063
1064 if (pos < 0)
1065 return -EINVAL;
1066 if (pos >= available || !count)
1067 return 0;
1068 if (count > available - pos)
1069 count = available - pos;
1070 res = copy_from_user(to + pos, from, count);
1071 if (res == count)
1072 return -EFAULT;
1073 count -= res;
1074 *ppos = pos + count;
1075 return count;
1076}
12f38872 1077EXPORT_SYMBOL(simple_write_to_buffer);
6a727b43 1078
6d1029b5
AM
1079/**
1080 * memory_read_from_buffer - copy data from the buffer
1081 * @to: the kernel space buffer to read to
1082 * @count: the maximum number of bytes to read
1083 * @ppos: the current position in the buffer
1084 * @from: the buffer to read from
1085 * @available: the size of the buffer
1086 *
1087 * The memory_read_from_buffer() function reads up to @count bytes from the
1088 * buffer @from at offset @ppos into the kernel space address starting at @to.
1089 *
1090 * On success, the number of bytes read is returned and the offset @ppos is
1091 * advanced by this number, or negative value is returned on error.
1092 **/
93b07113
AM
1093ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
1094 const void *from, size_t available)
1095{
1096 loff_t pos = *ppos;
1097
1098 if (pos < 0)
1099 return -EINVAL;
1100 if (pos >= available)
1101 return 0;
1102 if (count > available - pos)
1103 count = available - pos;
1104 memcpy(to, from + pos, count);
1105 *ppos = pos + count;
1106
1107 return count;
1108}
12f38872 1109EXPORT_SYMBOL(memory_read_from_buffer);
93b07113 1110
1da177e4
LT
1111/*
1112 * Transaction based IO.
1113 * The file expects a single write which triggers the transaction, and then
1114 * possibly a read which collects the result - which is stored in a
1115 * file-local buffer.
1116 */
76791ab2
IM
1117
1118void simple_transaction_set(struct file *file, size_t n)
1119{
1120 struct simple_transaction_argresp *ar = file->private_data;
1121
1122 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
1123
1124 /*
1125 * The barrier ensures that ar->size will really remain zero until
1126 * ar->data is ready for reading.
1127 */
1128 smp_mb();
1129 ar->size = n;
1130}
12f38872 1131EXPORT_SYMBOL(simple_transaction_set);
76791ab2 1132
1da177e4
LT
1133char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
1134{
1135 struct simple_transaction_argresp *ar;
1136 static DEFINE_SPINLOCK(simple_transaction_lock);
1137
1138 if (size > SIMPLE_TRANSACTION_LIMIT - 1)
1139 return ERR_PTR(-EFBIG);
1140
1141 ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL);
1142 if (!ar)
1143 return ERR_PTR(-ENOMEM);
1144
1145 spin_lock(&simple_transaction_lock);
1146
1147 /* only one write allowed per open */
1148 if (file->private_data) {
1149 spin_unlock(&simple_transaction_lock);
1150 free_page((unsigned long)ar);
1151 return ERR_PTR(-EBUSY);
1152 }
1153
1154 file->private_data = ar;
1155
1156 spin_unlock(&simple_transaction_lock);
1157
1158 if (copy_from_user(ar->data, buf, size))
1159 return ERR_PTR(-EFAULT);
1160
1161 return ar->data;
1162}
12f38872 1163EXPORT_SYMBOL(simple_transaction_get);
1da177e4
LT
1164
1165ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
1166{
1167 struct simple_transaction_argresp *ar = file->private_data;
1168
1169 if (!ar)
1170 return 0;
1171 return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
1172}
12f38872 1173EXPORT_SYMBOL(simple_transaction_read);
1da177e4
LT
1174
1175int simple_transaction_release(struct inode *inode, struct file *file)
1176{
1177 free_page((unsigned long)file->private_data);
1178 return 0;
1179}
12f38872 1180EXPORT_SYMBOL(simple_transaction_release);
1da177e4 1181
acaefc25
AB
1182/* Simple attribute files */
1183
1184struct simple_attr {
8b88b099
CH
1185 int (*get)(void *, u64 *);
1186 int (*set)(void *, u64);
acaefc25
AB
1187 char get_buf[24]; /* enough to store a u64 and "\n\0" */
1188 char set_buf[24];
1189 void *data;
1190 const char *fmt; /* format for read operation */
7cf34c76 1191 struct mutex mutex; /* protects access to these buffers */
acaefc25
AB
1192};
1193
1194/* simple_attr_open is called by an actual attribute open file operation
1195 * to set the attribute specific access operations. */
1196int simple_attr_open(struct inode *inode, struct file *file,
8b88b099 1197 int (*get)(void *, u64 *), int (*set)(void *, u64),
acaefc25
AB
1198 const char *fmt)
1199{
1200 struct simple_attr *attr;
1201
a65cab7d 1202 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
acaefc25
AB
1203 if (!attr)
1204 return -ENOMEM;
1205
1206 attr->get = get;
1207 attr->set = set;
8e18e294 1208 attr->data = inode->i_private;
acaefc25 1209 attr->fmt = fmt;
7cf34c76 1210 mutex_init(&attr->mutex);
acaefc25
AB
1211
1212 file->private_data = attr;
1213
1214 return nonseekable_open(inode, file);
1215}
12f38872 1216EXPORT_SYMBOL_GPL(simple_attr_open);
acaefc25 1217
74bedc4d 1218int simple_attr_release(struct inode *inode, struct file *file)
acaefc25
AB
1219{
1220 kfree(file->private_data);
1221 return 0;
1222}
12f38872 1223EXPORT_SYMBOL_GPL(simple_attr_release); /* GPL-only? This? Really? */
acaefc25
AB
1224
1225/* read from the buffer that is filled with the get function */
1226ssize_t simple_attr_read(struct file *file, char __user *buf,
1227 size_t len, loff_t *ppos)
1228{
1229 struct simple_attr *attr;
1230 size_t size;
1231 ssize_t ret;
1232
1233 attr = file->private_data;
1234
1235 if (!attr->get)
1236 return -EACCES;
1237
9261303a
CH
1238 ret = mutex_lock_interruptible(&attr->mutex);
1239 if (ret)
1240 return ret;
1241
a65cab7d
EB
1242 if (*ppos && attr->get_buf[0]) {
1243 /* continued read */
acaefc25 1244 size = strlen(attr->get_buf);
a65cab7d
EB
1245 } else {
1246 /* first read */
8b88b099
CH
1247 u64 val;
1248 ret = attr->get(attr->data, &val);
1249 if (ret)
1250 goto out;
1251
acaefc25 1252 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
8b88b099
CH
1253 attr->fmt, (unsigned long long)val);
1254 }
acaefc25
AB
1255
1256 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
8b88b099 1257out:
7cf34c76 1258 mutex_unlock(&attr->mutex);
acaefc25
AB
1259 return ret;
1260}
12f38872 1261EXPORT_SYMBOL_GPL(simple_attr_read);
acaefc25
AB
1262
1263/* interpret the buffer as a number to call the set function with */
2e41f274
AM
1264static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf,
1265 size_t len, loff_t *ppos, bool is_signed)
acaefc25
AB
1266{
1267 struct simple_attr *attr;
488dac0c 1268 unsigned long long val;
acaefc25
AB
1269 size_t size;
1270 ssize_t ret;
1271
1272 attr = file->private_data;
acaefc25
AB
1273 if (!attr->set)
1274 return -EACCES;
1275
9261303a
CH
1276 ret = mutex_lock_interruptible(&attr->mutex);
1277 if (ret)
1278 return ret;
1279
acaefc25
AB
1280 ret = -EFAULT;
1281 size = min(sizeof(attr->set_buf) - 1, len);
1282 if (copy_from_user(attr->set_buf, buf, size))
1283 goto out;
1284
acaefc25 1285 attr->set_buf[size] = '\0';
2e41f274
AM
1286 if (is_signed)
1287 ret = kstrtoll(attr->set_buf, 0, &val);
1288 else
1289 ret = kstrtoull(attr->set_buf, 0, &val);
488dac0c
YY
1290 if (ret)
1291 goto out;
05cc0cee
WF
1292 ret = attr->set(attr->data, val);
1293 if (ret == 0)
1294 ret = len; /* on success, claim we got the whole input */
acaefc25 1295out:
7cf34c76 1296 mutex_unlock(&attr->mutex);
acaefc25
AB
1297 return ret;
1298}
2e41f274
AM
1299
1300ssize_t simple_attr_write(struct file *file, const char __user *buf,
1301 size_t len, loff_t *ppos)
1302{
1303 return simple_attr_write_xsigned(file, buf, len, ppos, false);
1304}
12f38872 1305EXPORT_SYMBOL_GPL(simple_attr_write);
acaefc25 1306
2e41f274
AM
1307ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
1308 size_t len, loff_t *ppos)
1309{
1310 return simple_attr_write_xsigned(file, buf, len, ppos, true);
1311}
1312EXPORT_SYMBOL_GPL(simple_attr_write_signed);
1313
d9e5d922
AG
1314/**
1315 * generic_encode_ino32_fh - generic export_operations->encode_fh function
1316 * @inode: the object to encode
1317 * @fh: where to store the file handle fragment
1318 * @max_len: maximum length to store there (in 4 byte units)
1319 * @parent: parent directory inode, if wanted
1320 *
1321 * This generic encode_fh function assumes that the 32 inode number
1322 * is suitable for locating an inode, and that the generation number
1323 * can be used to check that it is still valid. It places them in the
1324 * filehandle fragment where export_decode_fh expects to find them.
1325 */
1326int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
1327 struct inode *parent)
1328{
1329 struct fid *fid = (void *)fh;
1330 int len = *max_len;
1331 int type = FILEID_INO32_GEN;
1332
1333 if (parent && (len < 4)) {
1334 *max_len = 4;
1335 return FILEID_INVALID;
1336 } else if (len < 2) {
1337 *max_len = 2;
1338 return FILEID_INVALID;
1339 }
1340
1341 len = 2;
1342 fid->i32.ino = inode->i_ino;
1343 fid->i32.gen = inode->i_generation;
1344 if (parent) {
1345 fid->i32.parent_ino = parent->i_ino;
1346 fid->i32.parent_gen = parent->i_generation;
1347 len = 4;
1348 type = FILEID_INO32_GEN_PARENT;
1349 }
1350 *max_len = len;
1351 return type;
1352}
1353EXPORT_SYMBOL_GPL(generic_encode_ino32_fh);
1354
2596110a
CH
1355/**
1356 * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
1357 * @sb: filesystem to do the file handle conversion on
1358 * @fid: file handle to convert
1359 * @fh_len: length of the file handle in bytes
1360 * @fh_type: type of file handle
1361 * @get_inode: filesystem callback to retrieve inode
1362 *
1363 * This function decodes @fid as long as it has one of the well-known
1364 * Linux filehandle types and calls @get_inode on it to retrieve the
1365 * inode for the object specified in the file handle.
1366 */
1367struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid,
1368 int fh_len, int fh_type, struct inode *(*get_inode)
1369 (struct super_block *sb, u64 ino, u32 gen))
1370{
1371 struct inode *inode = NULL;
1372
1373 if (fh_len < 2)
1374 return NULL;
1375
1376 switch (fh_type) {
1377 case FILEID_INO32_GEN:
1378 case FILEID_INO32_GEN_PARENT:
1379 inode = get_inode(sb, fid->i32.ino, fid->i32.gen);
1380 break;
1381 }
1382
4ea3ada2 1383 return d_obtain_alias(inode);
2596110a
CH
1384}
1385EXPORT_SYMBOL_GPL(generic_fh_to_dentry);
1386
1387/**
ca186830 1388 * generic_fh_to_parent - generic helper for the fh_to_parent export operation
2596110a
CH
1389 * @sb: filesystem to do the file handle conversion on
1390 * @fid: file handle to convert
1391 * @fh_len: length of the file handle in bytes
1392 * @fh_type: type of file handle
1393 * @get_inode: filesystem callback to retrieve inode
1394 *
1395 * This function decodes @fid as long as it has one of the well-known
1396 * Linux filehandle types and calls @get_inode on it to retrieve the
1397 * inode for the _parent_ object specified in the file handle if it
1398 * is specified in the file handle, or NULL otherwise.
1399 */
1400struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid,
1401 int fh_len, int fh_type, struct inode *(*get_inode)
1402 (struct super_block *sb, u64 ino, u32 gen))
1403{
1404 struct inode *inode = NULL;
1405
1406 if (fh_len <= 2)
1407 return NULL;
1408
1409 switch (fh_type) {
1410 case FILEID_INO32_GEN_PARENT:
1411 inode = get_inode(sb, fid->i32.parent_ino,
1412 (fh_len > 3 ? fid->i32.parent_gen : 0));
1413 break;
1414 }
1415
4ea3ada2 1416 return d_obtain_alias(inode);
2596110a
CH
1417}
1418EXPORT_SYMBOL_GPL(generic_fh_to_parent);
1419
1b061d92 1420/**
ac13a829
FF
1421 * __generic_file_fsync - generic fsync implementation for simple filesystems
1422 *
1b061d92 1423 * @file: file to synchronize
ac13a829
FF
1424 * @start: start offset in bytes
1425 * @end: end offset in bytes (inclusive)
1b061d92
CH
1426 * @datasync: only synchronize essential metadata if true
1427 *
1428 * This is a generic implementation of the fsync method for simple
1429 * filesystems which track all non-inode metadata in the buffers list
1430 * hanging off the address_space structure.
1431 */
ac13a829
FF
1432int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
1433 int datasync)
d5aacad5 1434{
7ea80859 1435 struct inode *inode = file->f_mapping->host;
d5aacad5
AV
1436 int err;
1437 int ret;
1438
383aa543 1439 err = file_write_and_wait_range(file, start, end);
02c24a82
JB
1440 if (err)
1441 return err;
1442
5955102c 1443 inode_lock(inode);
d5aacad5 1444 ret = sync_mapping_buffers(inode->i_mapping);
0ae45f63 1445 if (!(inode->i_state & I_DIRTY_ALL))
02c24a82 1446 goto out;
d5aacad5 1447 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
02c24a82 1448 goto out;
d5aacad5 1449
c3765016 1450 err = sync_inode_metadata(inode, 1);
d5aacad5
AV
1451 if (ret == 0)
1452 ret = err;
ac13a829 1453
02c24a82 1454out:
5955102c 1455 inode_unlock(inode);
383aa543
JL
1456 /* check and advance again to catch errors after syncing out buffers */
1457 err = file_check_and_advance_wb_err(file);
1458 if (ret == 0)
1459 ret = err;
1460 return ret;
d5aacad5 1461}
ac13a829
FF
1462EXPORT_SYMBOL(__generic_file_fsync);
1463
1464/**
1465 * generic_file_fsync - generic fsync implementation for simple filesystems
1466 * with flush
1467 * @file: file to synchronize
1468 * @start: start offset in bytes
1469 * @end: end offset in bytes (inclusive)
1470 * @datasync: only synchronize essential metadata if true
1471 *
1472 */
1473
1474int generic_file_fsync(struct file *file, loff_t start, loff_t end,
1475 int datasync)
1476{
1477 struct inode *inode = file->f_mapping->host;
1478 int err;
1479
1480 err = __generic_file_fsync(file, start, end, datasync);
1481 if (err)
1482 return err;
c6bf3f0e 1483 return blkdev_issue_flush(inode->i_sb->s_bdev);
ac13a829 1484}
1b061d92
CH
1485EXPORT_SYMBOL(generic_file_fsync);
1486
30ca22c7
PL
1487/**
1488 * generic_check_addressable - Check addressability of file system
1489 * @blocksize_bits: log of file system block size
1490 * @num_blocks: number of blocks in file system
1491 *
1492 * Determine whether a file system with @num_blocks blocks (and a
1493 * block size of 2**@blocksize_bits) is addressable by the sector_t
1494 * and page cache of the system. Return 0 if so and -EFBIG otherwise.
1495 */
1496int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
1497{
1498 u64 last_fs_block = num_blocks - 1;
a33f13ef 1499 u64 last_fs_page =
09cbfeaf 1500 last_fs_block >> (PAGE_SHIFT - blocksize_bits);
30ca22c7
PL
1501
1502 if (unlikely(num_blocks == 0))
1503 return 0;
1504
09cbfeaf 1505 if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
30ca22c7
PL
1506 return -EINVAL;
1507
a33f13ef
JB
1508 if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
1509 (last_fs_page > (pgoff_t)(~0ULL))) {
30ca22c7
PL
1510 return -EFBIG;
1511 }
1512 return 0;
1513}
1514EXPORT_SYMBOL(generic_check_addressable);
1515
1b061d92
CH
1516/*
1517 * No-op implementation of ->fsync for in-memory filesystems.
1518 */
02c24a82 1519int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1b061d92
CH
1520{
1521 return 0;
1522}
1b061d92 1523EXPORT_SYMBOL(noop_fsync);
87dc800b 1524
f44c7763
DW
1525ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1526{
1527 /*
1528 * iomap based filesystems support direct I/O without need for
1529 * this callback. However, it still needs to be set in
1530 * inode->a_ops so that open/fcntl know that direct I/O is
1531 * generally supported.
1532 */
1533 return -EINVAL;
1534}
1535EXPORT_SYMBOL_GPL(noop_direct_IO);
1536
fceef393
AV
1537/* Because kfree isn't assignment-compatible with void(void*) ;-/ */
1538void kfree_link(void *p)
87dc800b 1539{
fceef393 1540 kfree(p);
87dc800b 1541}
fceef393 1542EXPORT_SYMBOL(kfree_link);
6987843f 1543
6987843f
AV
1544struct inode *alloc_anon_inode(struct super_block *s)
1545{
1546 static const struct address_space_operations anon_aops = {
46de8b97 1547 .dirty_folio = noop_dirty_folio,
6987843f
AV
1548 };
1549 struct inode *inode = new_inode_pseudo(s);
1550
1551 if (!inode)
1552 return ERR_PTR(-ENOMEM);
1553
1554 inode->i_ino = get_next_ino();
1555 inode->i_mapping->a_ops = &anon_aops;
1556
1557 /*
1558 * Mark the inode dirty from the very beginning,
1559 * that way it will never be moved to the dirty
1560 * list because mark_inode_dirty() will think
1561 * that it already _is_ on the dirty list.
1562 */
1563 inode->i_state = I_DIRTY;
1564 inode->i_mode = S_IRUSR | S_IWUSR;
1565 inode->i_uid = current_fsuid();
1566 inode->i_gid = current_fsgid();
1567 inode->i_flags |= S_PRIVATE;
077c212f 1568 simple_inode_init_ts(inode);
6987843f
AV
1569 return inode;
1570}
1571EXPORT_SYMBOL(alloc_anon_inode);
1c994a09
JL
1572
1573/**
1574 * simple_nosetlease - generic helper for prohibiting leases
1575 * @filp: file pointer
1576 * @arg: type of lease to obtain
1577 * @flp: new lease supplied for insertion
e6f5c789 1578 * @priv: private data for lm_setup operation
1c994a09
JL
1579 *
1580 * Generic helper for filesystems that do not wish to allow leases to be set.
1581 * All arguments are ignored and it just returns -EINVAL.
1582 */
1583int
ed5f17f6 1584simple_nosetlease(struct file *filp, int arg, struct file_lock **flp,
e6f5c789 1585 void **priv)
1c994a09
JL
1586{
1587 return -EINVAL;
1588}
1589EXPORT_SYMBOL(simple_nosetlease);
61ba64fc 1590
6ee9706a
EB
1591/**
1592 * simple_get_link - generic helper to get the target of "fast" symlinks
1593 * @dentry: not used here
1594 * @inode: the symlink inode
1595 * @done: not used here
1596 *
1597 * Generic helper for filesystems to use for symlink inodes where a pointer to
1598 * the symlink target is stored in ->i_link. NOTE: this isn't normally called,
1599 * since as an optimization the path lookup code uses any non-NULL ->i_link
1600 * directly, without calling ->get_link(). But ->get_link() still must be set,
1601 * to mark the inode_operations as being for a symlink.
1602 *
1603 * Return: the symlink target
1604 */
6b255391 1605const char *simple_get_link(struct dentry *dentry, struct inode *inode,
fceef393 1606 struct delayed_call *done)
61ba64fc 1607{
6b255391 1608 return inode->i_link;
61ba64fc 1609}
6b255391 1610EXPORT_SYMBOL(simple_get_link);
61ba64fc
AV
1611
1612const struct inode_operations simple_symlink_inode_operations = {
6b255391 1613 .get_link = simple_get_link,
61ba64fc
AV
1614};
1615EXPORT_SYMBOL(simple_symlink_inode_operations);
fbabfd0f
EB
1616
1617/*
1618 * Operations for a permanently empty directory.
1619 */
1620static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
1621{
1622 return ERR_PTR(-ENOENT);
1623}
1624
b74d24f7 1625static int empty_dir_getattr(struct mnt_idmap *idmap,
549c7297 1626 const struct path *path, struct kstat *stat,
a528d35e 1627 u32 request_mask, unsigned int query_flags)
fbabfd0f 1628{
a528d35e 1629 struct inode *inode = d_inode(path->dentry);
0d72b928 1630 generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
fbabfd0f
EB
1631 return 0;
1632}
1633
c1632a0f 1634static int empty_dir_setattr(struct mnt_idmap *idmap,
549c7297 1635 struct dentry *dentry, struct iattr *attr)
fbabfd0f
EB
1636{
1637 return -EPERM;
1638}
1639
fbabfd0f
EB
1640static ssize_t empty_dir_listxattr(struct dentry *dentry, char *list, size_t size)
1641{
1642 return -EOPNOTSUPP;
1643}
1644
1645static const struct inode_operations empty_dir_inode_operations = {
1646 .lookup = empty_dir_lookup,
1647 .permission = generic_permission,
1648 .setattr = empty_dir_setattr,
1649 .getattr = empty_dir_getattr,
fbabfd0f
EB
1650 .listxattr = empty_dir_listxattr,
1651};
1652
1653static loff_t empty_dir_llseek(struct file *file, loff_t offset, int whence)
1654{
1655 /* An empty directory has two entries . and .. at offsets 0 and 1 */
1656 return generic_file_llseek_size(file, offset, whence, 2, 2);
1657}
1658
1659static int empty_dir_readdir(struct file *file, struct dir_context *ctx)
1660{
1661 dir_emit_dots(file, ctx);
1662 return 0;
1663}
1664
1665static const struct file_operations empty_dir_operations = {
1666 .llseek = empty_dir_llseek,
1667 .read = generic_read_dir,
c51da20c 1668 .iterate_shared = empty_dir_readdir,
fbabfd0f
EB
1669 .fsync = noop_fsync,
1670};
1671
1672
1673void make_empty_dir_inode(struct inode *inode)
1674{
1675 set_nlink(inode, 2);
1676 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
1677 inode->i_uid = GLOBAL_ROOT_UID;
1678 inode->i_gid = GLOBAL_ROOT_GID;
1679 inode->i_rdev = 0;
4b75de86 1680 inode->i_size = 0;
fbabfd0f
EB
1681 inode->i_blkbits = PAGE_SHIFT;
1682 inode->i_blocks = 0;
1683
1684 inode->i_op = &empty_dir_inode_operations;
f5c24438 1685 inode->i_opflags &= ~IOP_XATTR;
fbabfd0f
EB
1686 inode->i_fop = &empty_dir_operations;
1687}
1688
1689bool is_empty_dir_inode(struct inode *inode)
1690{
1691 return (inode->i_fop == &empty_dir_operations) &&
1692 (inode->i_op == &empty_dir_inode_operations);
1693}
c843843e 1694
5298d4bf 1695#if IS_ENABLED(CONFIG_UNICODE)
c843843e
DR
1696/**
1697 * generic_ci_d_compare - generic d_compare implementation for casefolding filesystems
1698 * @dentry: dentry whose name we are checking against
1699 * @len: len of name of dentry
1700 * @str: str pointer to name of dentry
1701 * @name: Name to compare against
1702 *
1703 * Return: 0 if names match, 1 if mismatch, or -ERRNO
1704 */
794c43f7
EB
1705static int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
1706 const char *str, const struct qstr *name)
c843843e
DR
1707{
1708 const struct dentry *parent = READ_ONCE(dentry->d_parent);
1709 const struct inode *dir = READ_ONCE(parent->d_inode);
1710 const struct super_block *sb = dentry->d_sb;
1711 const struct unicode_map *um = sb->s_encoding;
1712 struct qstr qstr = QSTR_INIT(str, len);
1713 char strbuf[DNAME_INLINE_LEN];
1714 int ret;
1715
af494af3 1716 if (!dir || !IS_CASEFOLDED(dir))
c843843e
DR
1717 goto fallback;
1718 /*
1719 * If the dentry name is stored in-line, then it may be concurrently
1720 * modified by a rename. If this happens, the VFS will eventually retry
1721 * the lookup, so it doesn't matter what ->d_compare() returns.
1722 * However, it's unsafe to call utf8_strncasecmp() with an unstable
1723 * string. Therefore, we have to copy the name into a temporary buffer.
1724 */
1725 if (len <= DNAME_INLINE_LEN - 1) {
1726 memcpy(strbuf, str, len);
1727 strbuf[len] = 0;
1728 qstr.name = strbuf;
1729 /* prevent compiler from optimizing out the temporary buffer */
1730 barrier();
1731 }
1732 ret = utf8_strncasecmp(um, name, &qstr);
1733 if (ret >= 0)
1734 return ret;
1735
1736 if (sb_has_strict_encoding(sb))
1737 return -EINVAL;
1738fallback:
1739 if (len != name->len)
1740 return 1;
1741 return !!memcmp(str, name->name, len);
1742}
c843843e
DR
1743
1744/**
1745 * generic_ci_d_hash - generic d_hash implementation for casefolding filesystems
1746 * @dentry: dentry of the parent directory
1747 * @str: qstr of name whose hash we should fill in
1748 *
1749 * Return: 0 if hash was successful or unchanged, and -EINVAL on error
1750 */
794c43f7 1751static int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str)
c843843e
DR
1752{
1753 const struct inode *dir = READ_ONCE(dentry->d_inode);
1754 struct super_block *sb = dentry->d_sb;
1755 const struct unicode_map *um = sb->s_encoding;
1756 int ret = 0;
1757
af494af3 1758 if (!dir || !IS_CASEFOLDED(dir))
c843843e
DR
1759 return 0;
1760
1761 ret = utf8_casefold_hash(um, dentry, str);
1762 if (ret < 0 && sb_has_strict_encoding(sb))
1763 return -EINVAL;
1764 return 0;
1765}
608af703
DR
1766
1767static const struct dentry_operations generic_ci_dentry_ops = {
1768 .d_hash = generic_ci_d_hash,
1769 .d_compare = generic_ci_d_compare,
1770};
1771#endif
1772
1773#ifdef CONFIG_FS_ENCRYPTION
1774static const struct dentry_operations generic_encrypted_dentry_ops = {
1775 .d_revalidate = fscrypt_d_revalidate,
1776};
1777#endif
1778
5298d4bf 1779#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE)
608af703
DR
1780static const struct dentry_operations generic_encrypted_ci_dentry_ops = {
1781 .d_hash = generic_ci_d_hash,
1782 .d_compare = generic_ci_d_compare,
1783 .d_revalidate = fscrypt_d_revalidate,
1784};
1785#endif
1786
1787/**
1788 * generic_set_encrypted_ci_d_ops - helper for setting d_ops for given dentry
1789 * @dentry: dentry to set ops on
1790 *
1791 * Casefolded directories need d_hash and d_compare set, so that the dentries
1792 * contained in them are handled case-insensitively. Note that these operations
1793 * are needed on the parent directory rather than on the dentries in it, and
1794 * while the casefolding flag can be toggled on and off on an empty directory,
1795 * dentry_operations can't be changed later. As a result, if the filesystem has
1796 * casefolding support enabled at all, we have to give all dentries the
1797 * casefolding operations even if their inode doesn't have the casefolding flag
1798 * currently (and thus the casefolding ops would be no-ops for now).
1799 *
1800 * Encryption works differently in that the only dentry operation it needs is
1801 * d_revalidate, which it only needs on dentries that have the no-key name flag.
1802 * The no-key flag can't be set "later", so we don't have to worry about that.
1803 *
1804 * Finally, to maximize compatibility with overlayfs (which isn't compatible
1805 * with certain dentry operations) and to avoid taking an unnecessary
1806 * performance hit, we use custom dentry_operations for each possible
1807 * combination rather than always installing all operations.
1808 */
1809void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
1810{
1811#ifdef CONFIG_FS_ENCRYPTION
1812 bool needs_encrypt_ops = dentry->d_flags & DCACHE_NOKEY_NAME;
1813#endif
5298d4bf 1814#if IS_ENABLED(CONFIG_UNICODE)
608af703
DR
1815 bool needs_ci_ops = dentry->d_sb->s_encoding;
1816#endif
5298d4bf 1817#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE)
608af703
DR
1818 if (needs_encrypt_ops && needs_ci_ops) {
1819 d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops);
1820 return;
1821 }
c843843e 1822#endif
608af703
DR
1823#ifdef CONFIG_FS_ENCRYPTION
1824 if (needs_encrypt_ops) {
1825 d_set_d_op(dentry, &generic_encrypted_dentry_ops);
1826 return;
1827 }
1828#endif
5298d4bf 1829#if IS_ENABLED(CONFIG_UNICODE)
608af703
DR
1830 if (needs_ci_ops) {
1831 d_set_d_op(dentry, &generic_ci_dentry_ops);
1832 return;
1833 }
1834#endif
1835}
1836EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops);
5ca14835
AM
1837
1838/**
1839 * inode_maybe_inc_iversion - increments i_version
1840 * @inode: inode with the i_version that should be updated
1841 * @force: increment the counter even if it's not necessary?
1842 *
1843 * Every time the inode is modified, the i_version field must be seen to have
1844 * changed by any observer.
1845 *
1846 * If "force" is set or the QUERIED flag is set, then ensure that we increment
1847 * the value, and clear the queried flag.
1848 *
1849 * In the common case where neither is set, then we can return "false" without
1850 * updating i_version.
1851 *
1852 * If this function returns false, and no other metadata has changed, then we
1853 * can avoid logging the metadata.
1854 */
1855bool inode_maybe_inc_iversion(struct inode *inode, bool force)
1856{
1857 u64 cur, new;
1858
1859 /*
1860 * The i_version field is not strictly ordered with any other inode
1861 * information, but the legacy inode_inc_iversion code used a spinlock
1862 * to serialize increments.
1863 *
1864 * Here, we add full memory barriers to ensure that any de-facto
1865 * ordering with other info is preserved.
1866 *
1867 * This barrier pairs with the barrier in inode_query_iversion()
1868 */
1869 smp_mb();
1870 cur = inode_peek_iversion_raw(inode);
1871 do {
1872 /* If flag is clear then we needn't do anything */
1873 if (!force && !(cur & I_VERSION_QUERIED))
1874 return false;
1875
1876 /* Since lowest bit is flag, add 2 to avoid it */
1877 new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
1878 } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
1879 return true;
1880}
1881EXPORT_SYMBOL(inode_maybe_inc_iversion);
c5bc1b3f
JL
1882
1883/**
1884 * inode_query_iversion - read i_version for later use
1885 * @inode: inode from which i_version should be read
1886 *
1887 * Read the inode i_version counter. This should be used by callers that wish
1888 * to store the returned i_version for later comparison. This will guarantee
1889 * that a later query of the i_version will result in a different value if
1890 * anything has changed.
1891 *
1892 * In this implementation, we fetch the current value, set the QUERIED flag and
1893 * then try to swap it into place with a cmpxchg, if it wasn't already set. If
1894 * that fails, we try again with the newly fetched value from the cmpxchg.
1895 */
1896u64 inode_query_iversion(struct inode *inode)
1897{
1898 u64 cur, new;
1899
1900 cur = inode_peek_iversion_raw(inode);
1901 do {
1902 /* If flag is already set, then no need to swap */
1903 if (cur & I_VERSION_QUERIED) {
1904 /*
1905 * This barrier (and the implicit barrier in the
1906 * cmpxchg below) pairs with the barrier in
1907 * inode_maybe_inc_iversion().
1908 */
1909 smp_mb();
1910 break;
1911 }
1912
1913 new = cur | I_VERSION_QUERIED;
1914 } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
1915 return cur >> I_VERSION_QUERIED_SHIFT;
1916}
1917EXPORT_SYMBOL(inode_query_iversion);
44fff0fa
CH
1918
1919ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
1920 ssize_t direct_written, ssize_t buffered_written)
1921{
1922 struct address_space *mapping = iocb->ki_filp->f_mapping;
1923 loff_t pos = iocb->ki_pos - buffered_written;
1924 loff_t end = iocb->ki_pos - 1;
1925 int err;
1926
1927 /*
1928 * If the buffered write fallback returned an error, we want to return
1929 * the number of bytes which were written by direct I/O, or the error
1930 * code if that was zero.
1931 *
1932 * Note that this differs from normal direct-io semantics, which will
1933 * return -EFOO even if some bytes were written.
1934 */
1935 if (unlikely(buffered_written < 0)) {
1936 if (direct_written)
1937 return direct_written;
1938 return buffered_written;
1939 }
1940
1941 /*
1942 * We need to ensure that the page cache pages are written to disk and
1943 * invalidated to preserve the expected O_DIRECT semantics.
1944 */
1945 err = filemap_write_and_wait_range(mapping, pos, end);
1946 if (err < 0) {
1947 /*
1948 * We don't know how much we wrote, so just return the number of
1949 * bytes which were direct-written
1950 */
8287474a 1951 iocb->ki_pos -= buffered_written;
44fff0fa
CH
1952 if (direct_written)
1953 return direct_written;
1954 return err;
1955 }
1956 invalidate_mapping_pages(mapping, pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
1957 return direct_written + buffered_written;
1958}
1959EXPORT_SYMBOL_GPL(direct_write_fallback);
077c212f
JL
1960
1961/**
1962 * simple_inode_init_ts - initialize the timestamps for a new inode
1963 * @inode: inode to be initialized
1964 *
1965 * When a new inode is created, most filesystems set the timestamps to the
1966 * current time. Add a helper to do this.
1967 */
1968struct timespec64 simple_inode_init_ts(struct inode *inode)
1969{
1970 struct timespec64 ts = inode_set_ctime_current(inode);
1971
1972 inode_set_atime_to_ts(inode, ts);
1973 inode_set_mtime_to_ts(inode, ts);
1974 return ts;
1975}
1976EXPORT_SYMBOL(simple_inode_init_ts);
07fd7c32
CB
1977
1978static inline struct dentry *get_stashed_dentry(struct dentry *stashed)
1979{
1980 struct dentry *dentry;
1981
1982 guard(rcu)();
1983 dentry = READ_ONCE(stashed);
1984 if (!dentry)
1985 return NULL;
1986 if (!lockref_get_not_dead(&dentry->d_lockref))
1987 return NULL;
1988 return dentry;
1989}
1990
2558e3b2
CB
1991static struct dentry *prepare_anon_dentry(struct dentry **stashed,
1992 unsigned long ino,
159a0d9f
CB
1993 struct super_block *sb,
1994 const struct file_operations *fops,
1995 const struct inode_operations *iops,
1996 void *data)
07fd7c32
CB
1997{
1998 struct dentry *dentry;
1999 struct inode *inode;
2000
2001 dentry = d_alloc_anon(sb);
2002 if (!dentry)
2003 return ERR_PTR(-ENOMEM);
2004
2005 inode = new_inode_pseudo(sb);
2006 if (!inode) {
2007 dput(dentry);
2008 return ERR_PTR(-ENOMEM);
2009 }
2010
2011 inode->i_ino = ino;
2012 inode->i_flags |= S_IMMUTABLE;
b28ddcc3
CB
2013 if (is_pidfs_sb(sb))
2014 inode->i_flags |= S_PRIVATE;
07fd7c32 2015 inode->i_mode = S_IFREG | S_IRUGO;
b28ddcc3
CB
2016 if (iops)
2017 inode->i_op = iops;
2018 if (fops)
2019 inode->i_fop = fops;
07fd7c32
CB
2020 inode->i_private = data;
2021 simple_inode_init_ts(inode);
2022
2558e3b2
CB
2023 /* Store address of location where dentry's supposed to be stashed. */
2024 dentry->d_fsdata = stashed;
2025
07fd7c32
CB
2026 /* @data is now owned by the fs */
2027 d_instantiate(dentry, inode);
159a0d9f
CB
2028 return dentry;
2029}
07fd7c32 2030
159a0d9f
CB
2031static struct dentry *stash_dentry(struct dentry **stashed,
2032 struct dentry *dentry)
2033{
2034 guard(rcu)();
2035 for (;;) {
2036 struct dentry *old;
07fd7c32 2037
159a0d9f
CB
2038 /* Assume any old dentry was cleared out. */
2039 old = cmpxchg(stashed, NULL, dentry);
2040 if (likely(!old))
2041 return dentry;
2042
2043 /* Check if somebody else installed a reusable dentry. */
2044 if (lockref_get_not_dead(&old->d_lockref))
2045 return old;
2046
2047 /* There's an old dead dentry there, try to take it over. */
2048 if (likely(try_cmpxchg(stashed, &old, dentry)))
2049 return dentry;
2050 }
07fd7c32
CB
2051}
2052
2053/**
2054 * path_from_stashed - create path from stashed or new dentry
2055 * @stashed: where to retrieve or stash dentry
2056 * @ino: inode number to use
2057 * @mnt: mnt of the filesystems to use
b28ddcc3 2058 * @iops: inode operations to use
07fd7c32
CB
2059 * @fops: file operations to use
2060 * @data: data to store in inode->i_private
2061 * @path: path to create
2062 *
2063 * The function tries to retrieve a stashed dentry from @stashed. If the dentry
2064 * is still valid then it will be reused. If the dentry isn't able the function
159a0d9f
CB
2065 * will allocate a new dentry and inode. It will then check again whether it
2066 * can reuse an existing dentry in case one has been added in the meantime or
2067 * update @stashed with the newly added dentry.
07fd7c32
CB
2068 *
2069 * Special-purpose helper for nsfs and pidfs.
2070 *
2071 * Return: If 0 or an error is returned the caller can be sure that @data must
159a0d9f 2072 * be cleaned up. If 1 is returned @data is owned by the filesystem.
07fd7c32
CB
2073 */
2074int path_from_stashed(struct dentry **stashed, unsigned long ino,
2075 struct vfsmount *mnt, const struct file_operations *fops,
b28ddcc3
CB
2076 const struct inode_operations *iops, void *data,
2077 struct path *path)
07fd7c32
CB
2078{
2079 struct dentry *dentry;
2080 int ret = 0;
2081
159a0d9f
CB
2082 /* See if dentry can be reused. */
2083 path->dentry = get_stashed_dentry(*stashed);
2084 if (path->dentry)
07fd7c32
CB
2085 goto out_path;
2086
159a0d9f 2087 /* Allocate a new dentry. */
2558e3b2 2088 dentry = prepare_anon_dentry(stashed, ino, mnt->mnt_sb, fops, iops, data);
07fd7c32
CB
2089 if (IS_ERR(dentry))
2090 return PTR_ERR(dentry);
159a0d9f
CB
2091
2092 /* Added a new dentry. @data is now owned by the filesystem. */
2093 path->dentry = stash_dentry(stashed, dentry);
2094 if (path->dentry != dentry)
2095 dput(dentry);
07fd7c32
CB
2096 ret = 1;
2097
2098out_path:
2558e3b2
CB
2099 WARN_ON_ONCE(path->dentry->d_fsdata != stashed);
2100 WARN_ON_ONCE(d_inode(path->dentry)->i_private != data);
07fd7c32
CB
2101 path->mnt = mntget(mnt);
2102 return ret;
2103}
2558e3b2
CB
2104
2105void stashed_dentry_prune(struct dentry *dentry)
2106{
2107 struct dentry **stashed = dentry->d_fsdata;
2108 struct inode *inode = d_inode(dentry);
2109
2110 if (WARN_ON_ONCE(!stashed))
2111 return;
2112
2113 if (!inode)
2114 return;
2115
2116 /*
2117 * Only replace our own @dentry as someone else might've
2118 * already cleared out @dentry and stashed their own
2119 * dentry in there.
2120 */
2121 cmpxchg(stashed, dentry, NULL);
2122}