2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
10 #include "dev_uring_i.h"
12 #include <linux/dax.h>
13 #include <linux/pagemap.h>
14 #include <linux/slab.h>
15 #include <linux/file.h>
16 #include <linux/seq_file.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/fs_context.h>
21 #include <linux/fs_parser.h>
22 #include <linux/statfs.h>
23 #include <linux/random.h>
24 #include <linux/sched.h>
25 #include <linux/exportfs.h>
26 #include <linux/posix_acl.h>
27 #include <linux/pid_namespace.h>
28 #include <uapi/linux/magic.h>
30 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
31 MODULE_DESCRIPTION("Filesystem in Userspace");
32 MODULE_LICENSE("GPL");
34 static struct kmem_cache
*fuse_inode_cachep
;
35 struct list_head fuse_conn_list
;
36 DEFINE_MUTEX(fuse_mutex
);
38 static int set_global_limit(const char *val
, const struct kernel_param
*kp
);
40 unsigned int fuse_max_pages_limit
= 256;
41 /* default is no timeout */
42 unsigned int fuse_default_req_timeout
;
43 unsigned int fuse_max_req_timeout
;
45 unsigned int max_user_bgreq
;
46 module_param_call(max_user_bgreq
, set_global_limit
, param_get_uint
,
47 &max_user_bgreq
, 0644);
48 __MODULE_PARM_TYPE(max_user_bgreq
, "uint");
49 MODULE_PARM_DESC(max_user_bgreq
,
50 "Global limit for the maximum number of backgrounded requests an "
51 "unprivileged user can set");
53 unsigned int max_user_congthresh
;
54 module_param_call(max_user_congthresh
, set_global_limit
, param_get_uint
,
55 &max_user_congthresh
, 0644);
56 __MODULE_PARM_TYPE(max_user_congthresh
, "uint");
57 MODULE_PARM_DESC(max_user_congthresh
,
58 "Global limit for the maximum congestion threshold an "
59 "unprivileged user can set");
61 #define FUSE_DEFAULT_BLKSIZE 512
63 /** Maximum number of outstanding background requests */
64 #define FUSE_DEFAULT_MAX_BACKGROUND 12
66 /** Congestion starts at 75% of maximum */
67 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
70 static struct file_system_type fuseblk_fs_type
;
73 struct fuse_forget_link
*fuse_alloc_forget(void)
75 return kzalloc(sizeof(struct fuse_forget_link
), GFP_KERNEL_ACCOUNT
);
78 static struct fuse_submount_lookup
*fuse_alloc_submount_lookup(void)
80 struct fuse_submount_lookup
*sl
;
82 sl
= kzalloc(sizeof(struct fuse_submount_lookup
), GFP_KERNEL_ACCOUNT
);
85 sl
->forget
= fuse_alloc_forget();
96 static struct inode
*fuse_alloc_inode(struct super_block
*sb
)
98 struct fuse_inode
*fi
;
100 fi
= alloc_inode_sb(sb
, fuse_inode_cachep
, GFP_KERNEL
);
108 fi
->attr_version
= 0;
111 fi
->submount_lookup
= NULL
;
112 mutex_init(&fi
->mutex
);
113 spin_lock_init(&fi
->lock
);
114 fi
->forget
= fuse_alloc_forget();
118 if (IS_ENABLED(CONFIG_FUSE_DAX
) && !fuse_dax_inode_alloc(sb
, fi
))
119 goto out_free_forget
;
121 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
122 fuse_inode_backing_set(fi
, NULL
);
129 kmem_cache_free(fuse_inode_cachep
, fi
);
133 static void fuse_free_inode(struct inode
*inode
)
135 struct fuse_inode
*fi
= get_fuse_inode(inode
);
137 mutex_destroy(&fi
->mutex
);
139 #ifdef CONFIG_FUSE_DAX
142 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
143 fuse_backing_put(fuse_inode_backing(fi
));
145 kmem_cache_free(fuse_inode_cachep
, fi
);
148 static void fuse_cleanup_submount_lookup(struct fuse_conn
*fc
,
149 struct fuse_submount_lookup
*sl
)
151 if (!refcount_dec_and_test(&sl
->count
))
154 fuse_queue_forget(fc
, sl
->forget
, sl
->nodeid
, 1);
159 static void fuse_evict_inode(struct inode
*inode
)
161 struct fuse_inode
*fi
= get_fuse_inode(inode
);
163 /* Will write inode on close/munmap and in all other dirtiers */
164 WARN_ON(inode
->i_state
& I_DIRTY_INODE
);
166 if (FUSE_IS_DAX(inode
))
167 dax_break_layout_final(inode
);
169 truncate_inode_pages_final(&inode
->i_data
);
171 if (inode
->i_sb
->s_flags
& SB_ACTIVE
) {
172 struct fuse_conn
*fc
= get_fuse_conn(inode
);
174 if (FUSE_IS_DAX(inode
))
175 fuse_dax_inode_cleanup(inode
);
177 fuse_queue_forget(fc
, fi
->forget
, fi
->nodeid
,
182 if (fi
->submount_lookup
) {
183 fuse_cleanup_submount_lookup(fc
, fi
->submount_lookup
);
184 fi
->submount_lookup
= NULL
;
187 * Evict of non-deleted inode may race with outstanding
188 * LOOKUP/READDIRPLUS requests and result in inconsistency when
189 * the request finishes. Deal with that here by bumping a
190 * counter that can be compared to the starting value.
192 if (inode
->i_nlink
> 0)
193 atomic64_inc(&fc
->evict_ctr
);
195 if (S_ISREG(inode
->i_mode
) && !fuse_is_bad(inode
)) {
196 WARN_ON(fi
->iocachectr
!= 0);
197 WARN_ON(!list_empty(&fi
->write_files
));
198 WARN_ON(!list_empty(&fi
->queued_writes
));
202 static int fuse_reconfigure(struct fs_context
*fsc
)
204 struct super_block
*sb
= fsc
->root
->d_sb
;
207 if (fsc
->sb_flags
& SB_MANDLOCK
)
214 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
215 * so that it will fit.
217 static ino_t
fuse_squash_ino(u64 ino64
)
219 ino_t ino
= (ino_t
) ino64
;
220 if (sizeof(ino_t
) < sizeof(u64
))
221 ino
^= ino64
>> (sizeof(u64
) - sizeof(ino_t
)) * 8;
225 void fuse_change_attributes_common(struct inode
*inode
, struct fuse_attr
*attr
,
226 struct fuse_statx
*sx
,
227 u64 attr_valid
, u32 cache_mask
,
230 struct fuse_conn
*fc
= get_fuse_conn(inode
);
231 struct fuse_inode
*fi
= get_fuse_inode(inode
);
233 lockdep_assert_held(&fi
->lock
);
236 * Clear basic stats from invalid mask.
238 * Don't do this if this is coming from a fuse_iget() call and there
239 * might have been a racing evict which would've invalidated the result
240 * if the attr_version would've been preserved.
242 * !evict_ctr -> this is create
243 * fi->attr_version != 0 -> this is not a new inode
244 * evict_ctr == fuse_get_evict_ctr() -> no evicts while during request
246 if (!evict_ctr
|| fi
->attr_version
|| evict_ctr
== fuse_get_evict_ctr(fc
))
247 set_mask_bits(&fi
->inval_mask
, STATX_BASIC_STATS
, 0);
249 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
250 fi
->i_time
= attr_valid
;
252 inode
->i_ino
= fuse_squash_ino(attr
->ino
);
253 inode
->i_mode
= (inode
->i_mode
& S_IFMT
) | (attr
->mode
& 07777);
254 set_nlink(inode
, attr
->nlink
);
255 inode
->i_uid
= make_kuid(fc
->user_ns
, attr
->uid
);
256 inode
->i_gid
= make_kgid(fc
->user_ns
, attr
->gid
);
257 inode
->i_blocks
= attr
->blocks
;
260 attr
->atimensec
= min_t(u32
, attr
->atimensec
, NSEC_PER_SEC
- 1);
261 attr
->mtimensec
= min_t(u32
, attr
->mtimensec
, NSEC_PER_SEC
- 1);
262 attr
->ctimensec
= min_t(u32
, attr
->ctimensec
, NSEC_PER_SEC
- 1);
264 inode_set_atime(inode
, attr
->atime
, attr
->atimensec
);
265 /* mtime from server may be stale due to local buffered write */
266 if (!(cache_mask
& STATX_MTIME
)) {
267 inode_set_mtime(inode
, attr
->mtime
, attr
->mtimensec
);
269 if (!(cache_mask
& STATX_CTIME
)) {
270 inode_set_ctime(inode
, attr
->ctime
, attr
->ctimensec
);
275 min_t(u32
, sx
->btime
.tv_nsec
, NSEC_PER_SEC
- 1);
278 * Btime has been queried, cache is valid (whether or not btime
279 * is available or not) so clear STATX_BTIME from inval_mask.
281 * Availability of the btime attribute is indicated in
284 set_mask_bits(&fi
->inval_mask
, STATX_BTIME
, 0);
285 if (sx
->mask
& STATX_BTIME
) {
286 set_bit(FUSE_I_BTIME
, &fi
->state
);
287 fi
->i_btime
.tv_sec
= sx
->btime
.tv_sec
;
288 fi
->i_btime
.tv_nsec
= sx
->btime
.tv_nsec
;
292 if (attr
->blksize
!= 0)
293 inode
->i_blkbits
= ilog2(attr
->blksize
);
295 inode
->i_blkbits
= inode
->i_sb
->s_blocksize_bits
;
298 * Don't set the sticky bit in i_mode, unless we want the VFS
299 * to check permissions. This prevents failures due to the
300 * check in may_delete().
302 fi
->orig_i_mode
= inode
->i_mode
;
303 if (!fc
->default_permissions
)
304 inode
->i_mode
&= ~S_ISVTX
;
306 fi
->orig_ino
= attr
->ino
;
309 * We are refreshing inode data and it is possible that another
310 * client set suid/sgid or security.capability xattr. So clear
311 * S_NOSEC. Ideally, we could have cleared it only if suid/sgid
312 * was set or if security.capability xattr was set. But we don't
313 * know if security.capability has been set or not. So clear it
314 * anyway. Its less efficient but should be safe.
316 inode
->i_flags
&= ~S_NOSEC
;
319 u32
fuse_get_cache_mask(struct inode
*inode
)
321 struct fuse_conn
*fc
= get_fuse_conn(inode
);
323 if (!fc
->writeback_cache
|| !S_ISREG(inode
->i_mode
))
326 return STATX_MTIME
| STATX_CTIME
| STATX_SIZE
;
329 static void fuse_change_attributes_i(struct inode
*inode
, struct fuse_attr
*attr
,
330 struct fuse_statx
*sx
, u64 attr_valid
,
331 u64 attr_version
, u64 evict_ctr
)
333 struct fuse_conn
*fc
= get_fuse_conn(inode
);
334 struct fuse_inode
*fi
= get_fuse_inode(inode
);
337 struct timespec64 old_mtime
;
339 spin_lock(&fi
->lock
);
341 * In case of writeback_cache enabled, writes update mtime, ctime and
342 * may update i_size. In these cases trust the cached value in the
345 cache_mask
= fuse_get_cache_mask(inode
);
346 if (cache_mask
& STATX_SIZE
)
347 attr
->size
= i_size_read(inode
);
349 if (cache_mask
& STATX_MTIME
) {
350 attr
->mtime
= inode_get_mtime_sec(inode
);
351 attr
->mtimensec
= inode_get_mtime_nsec(inode
);
353 if (cache_mask
& STATX_CTIME
) {
354 attr
->ctime
= inode_get_ctime_sec(inode
);
355 attr
->ctimensec
= inode_get_ctime_nsec(inode
);
358 if ((attr_version
!= 0 && fi
->attr_version
> attr_version
) ||
359 test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
360 spin_unlock(&fi
->lock
);
364 old_mtime
= inode_get_mtime(inode
);
365 fuse_change_attributes_common(inode
, attr
, sx
, attr_valid
, cache_mask
,
368 oldsize
= inode
->i_size
;
370 * In case of writeback_cache enabled, the cached writes beyond EOF
371 * extend local i_size without keeping userspace server in sync. So,
372 * attr->size coming from server can be stale. We cannot trust it.
374 if (!(cache_mask
& STATX_SIZE
))
375 i_size_write(inode
, attr
->size
);
376 spin_unlock(&fi
->lock
);
378 if (!cache_mask
&& S_ISREG(inode
->i_mode
)) {
381 if (oldsize
!= attr
->size
) {
382 truncate_pagecache(inode
, attr
->size
);
383 if (!fc
->explicit_inval_data
)
385 } else if (fc
->auto_inval_data
) {
386 struct timespec64 new_mtime
= {
387 .tv_sec
= attr
->mtime
,
388 .tv_nsec
= attr
->mtimensec
,
392 * Auto inval mode also checks and invalidates if mtime
395 if (!timespec64_equal(&old_mtime
, &new_mtime
))
400 invalidate_inode_pages2(inode
->i_mapping
);
403 if (IS_ENABLED(CONFIG_FUSE_DAX
))
404 fuse_dax_dontcache(inode
, attr
->flags
);
407 void fuse_change_attributes(struct inode
*inode
, struct fuse_attr
*attr
,
408 struct fuse_statx
*sx
, u64 attr_valid
,
411 fuse_change_attributes_i(inode
, attr
, sx
, attr_valid
, attr_version
, 0);
414 static void fuse_init_submount_lookup(struct fuse_submount_lookup
*sl
,
418 refcount_set(&sl
->count
, 1);
421 static void fuse_init_inode(struct inode
*inode
, struct fuse_attr
*attr
,
422 struct fuse_conn
*fc
)
424 inode
->i_mode
= attr
->mode
& S_IFMT
;
425 inode
->i_size
= attr
->size
;
426 inode_set_mtime(inode
, attr
->mtime
, attr
->mtimensec
);
427 inode_set_ctime(inode
, attr
->ctime
, attr
->ctimensec
);
428 if (S_ISREG(inode
->i_mode
)) {
429 fuse_init_common(inode
);
430 fuse_init_file_inode(inode
, attr
->flags
);
431 } else if (S_ISDIR(inode
->i_mode
))
432 fuse_init_dir(inode
);
433 else if (S_ISLNK(inode
->i_mode
))
434 fuse_init_symlink(inode
);
435 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
) ||
436 S_ISFIFO(inode
->i_mode
) || S_ISSOCK(inode
->i_mode
)) {
437 fuse_init_common(inode
);
438 init_special_inode(inode
, inode
->i_mode
,
439 new_decode_dev(attr
->rdev
));
443 * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL
444 * so they see the exact same behavior as before.
447 inode
->i_acl
= inode
->i_default_acl
= ACL_DONT_CACHE
;
450 static int fuse_inode_eq(struct inode
*inode
, void *_nodeidp
)
452 u64 nodeid
= *(u64
*) _nodeidp
;
453 if (get_node_id(inode
) == nodeid
)
459 static int fuse_inode_set(struct inode
*inode
, void *_nodeidp
)
461 u64 nodeid
= *(u64
*) _nodeidp
;
462 get_fuse_inode(inode
)->nodeid
= nodeid
;
466 struct inode
*fuse_iget(struct super_block
*sb
, u64 nodeid
,
467 int generation
, struct fuse_attr
*attr
,
468 u64 attr_valid
, u64 attr_version
,
472 struct fuse_inode
*fi
;
473 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
476 * Auto mount points get their node id from the submount root, which is
477 * not a unique identifier within this filesystem.
479 * To avoid conflicts, do not place submount points into the inode hash
482 if (fc
->auto_submounts
&& (attr
->flags
& FUSE_ATTR_SUBMOUNT
) &&
483 S_ISDIR(attr
->mode
)) {
484 struct fuse_inode
*fi
;
486 inode
= new_inode(sb
);
490 fuse_init_inode(inode
, attr
, fc
);
491 fi
= get_fuse_inode(inode
);
493 fi
->submount_lookup
= fuse_alloc_submount_lookup();
494 if (!fi
->submount_lookup
) {
498 /* Sets nlookup = 1 on fi->submount_lookup->nlookup */
499 fuse_init_submount_lookup(fi
->submount_lookup
, nodeid
);
500 inode
->i_flags
|= S_AUTOMOUNT
;
505 inode
= iget5_locked(sb
, nodeid
, fuse_inode_eq
, fuse_inode_set
, &nodeid
);
509 if ((inode
->i_state
& I_NEW
)) {
510 inode
->i_flags
|= S_NOATIME
;
511 if (!fc
->writeback_cache
|| !S_ISREG(attr
->mode
))
512 inode
->i_flags
|= S_NOCMTIME
;
513 inode
->i_generation
= generation
;
514 fuse_init_inode(inode
, attr
, fc
);
515 unlock_new_inode(inode
);
516 } else if (fuse_stale_inode(inode
, generation
, attr
)) {
517 /* nodeid was reused, any I/O on the old inode should fail */
518 fuse_make_bad(inode
);
519 if (inode
!= d_inode(sb
->s_root
)) {
520 remove_inode_hash(inode
);
525 fi
= get_fuse_inode(inode
);
526 spin_lock(&fi
->lock
);
528 spin_unlock(&fi
->lock
);
530 fuse_change_attributes_i(inode
, attr
, NULL
, attr_valid
, attr_version
,
535 struct inode
*fuse_ilookup(struct fuse_conn
*fc
, u64 nodeid
,
536 struct fuse_mount
**fm
)
538 struct fuse_mount
*fm_iter
;
541 WARN_ON(!rwsem_is_locked(&fc
->killsb
));
542 list_for_each_entry(fm_iter
, &fc
->mounts
, fc_entry
) {
546 inode
= ilookup5(fm_iter
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
557 int fuse_reverse_inval_inode(struct fuse_conn
*fc
, u64 nodeid
,
558 loff_t offset
, loff_t len
)
560 struct fuse_inode
*fi
;
565 inode
= fuse_ilookup(fc
, nodeid
, NULL
);
569 fi
= get_fuse_inode(inode
);
570 spin_lock(&fi
->lock
);
571 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
572 spin_unlock(&fi
->lock
);
574 fuse_invalidate_attr(inode
);
575 forget_all_cached_acls(inode
);
577 pg_start
= offset
>> PAGE_SHIFT
;
581 pg_end
= (offset
+ len
- 1) >> PAGE_SHIFT
;
582 invalidate_inode_pages2_range(inode
->i_mapping
,
589 bool fuse_lock_inode(struct inode
*inode
)
593 if (!get_fuse_conn(inode
)->parallel_dirops
) {
594 mutex_lock(&get_fuse_inode(inode
)->mutex
);
601 void fuse_unlock_inode(struct inode
*inode
, bool locked
)
604 mutex_unlock(&get_fuse_inode(inode
)->mutex
);
607 static void fuse_umount_begin(struct super_block
*sb
)
609 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
611 if (fc
->no_force_umount
)
616 // Only retire block-device-based superblocks.
617 if (sb
->s_bdev
!= NULL
)
621 static void fuse_send_destroy(struct fuse_mount
*fm
)
623 if (fm
->fc
->conn_init
) {
626 args
.opcode
= FUSE_DESTROY
;
629 fuse_simple_request(fm
, &args
);
633 static void convert_fuse_statfs(struct kstatfs
*stbuf
, struct fuse_kstatfs
*attr
)
635 stbuf
->f_type
= FUSE_SUPER_MAGIC
;
636 stbuf
->f_bsize
= attr
->bsize
;
637 stbuf
->f_frsize
= attr
->frsize
;
638 stbuf
->f_blocks
= attr
->blocks
;
639 stbuf
->f_bfree
= attr
->bfree
;
640 stbuf
->f_bavail
= attr
->bavail
;
641 stbuf
->f_files
= attr
->files
;
642 stbuf
->f_ffree
= attr
->ffree
;
643 stbuf
->f_namelen
= attr
->namelen
;
644 /* fsid is left zero */
647 static int fuse_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
649 struct super_block
*sb
= dentry
->d_sb
;
650 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
652 struct fuse_statfs_out outarg
;
655 if (!fuse_allow_current_process(fm
->fc
)) {
656 buf
->f_type
= FUSE_SUPER_MAGIC
;
660 memset(&outarg
, 0, sizeof(outarg
));
662 args
.opcode
= FUSE_STATFS
;
663 args
.nodeid
= get_node_id(d_inode(dentry
));
664 args
.out_numargs
= 1;
665 args
.out_args
[0].size
= sizeof(outarg
);
666 args
.out_args
[0].value
= &outarg
;
667 err
= fuse_simple_request(fm
, &args
);
669 convert_fuse_statfs(buf
, &outarg
.st
);
673 static struct fuse_sync_bucket
*fuse_sync_bucket_alloc(void)
675 struct fuse_sync_bucket
*bucket
;
677 bucket
= kzalloc(sizeof(*bucket
), GFP_KERNEL
| __GFP_NOFAIL
);
679 init_waitqueue_head(&bucket
->waitq
);
680 /* Initial active count */
681 atomic_set(&bucket
->count
, 1);
686 static void fuse_sync_fs_writes(struct fuse_conn
*fc
)
688 struct fuse_sync_bucket
*bucket
, *new_bucket
;
691 new_bucket
= fuse_sync_bucket_alloc();
692 spin_lock(&fc
->lock
);
693 bucket
= rcu_dereference_protected(fc
->curr_bucket
, 1);
694 count
= atomic_read(&bucket
->count
);
696 /* No outstanding writes? */
698 spin_unlock(&fc
->lock
);
704 * Completion of new bucket depends on completion of this bucket, so add
707 atomic_inc(&new_bucket
->count
);
708 rcu_assign_pointer(fc
->curr_bucket
, new_bucket
);
709 spin_unlock(&fc
->lock
);
711 * Drop initial active count. At this point if all writes in this and
712 * ancestor buckets complete, the count will go to zero and this task
715 atomic_dec(&bucket
->count
);
717 wait_event(bucket
->waitq
, atomic_read(&bucket
->count
) == 0);
719 /* Drop temp count on descendant bucket */
720 fuse_sync_bucket_dec(new_bucket
);
721 kfree_rcu(bucket
, rcu
);
724 static int fuse_sync_fs(struct super_block
*sb
, int wait
)
726 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
727 struct fuse_conn
*fc
= fm
->fc
;
728 struct fuse_syncfs_in inarg
;
733 * Userspace cannot handle the wait == 0 case. Avoid a
734 * gratuitous roundtrip.
739 /* The filesystem is being unmounted. Nothing to do. */
746 fuse_sync_fs_writes(fc
);
748 memset(&inarg
, 0, sizeof(inarg
));
750 args
.in_args
[0].size
= sizeof(inarg
);
751 args
.in_args
[0].value
= &inarg
;
752 args
.opcode
= FUSE_SYNCFS
;
753 args
.nodeid
= get_node_id(sb
->s_root
->d_inode
);
754 args
.out_numargs
= 0;
756 err
= fuse_simple_request(fm
, &args
);
757 if (err
== -ENOSYS
) {
772 OPT_DEFAULT_PERMISSIONS
,
779 static const struct fs_parameter_spec fuse_fs_parameters
[] = {
780 fsparam_string ("source", OPT_SOURCE
),
781 fsparam_u32 ("fd", OPT_FD
),
782 fsparam_u32oct ("rootmode", OPT_ROOTMODE
),
783 fsparam_uid ("user_id", OPT_USER_ID
),
784 fsparam_gid ("group_id", OPT_GROUP_ID
),
785 fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS
),
786 fsparam_flag ("allow_other", OPT_ALLOW_OTHER
),
787 fsparam_u32 ("max_read", OPT_MAX_READ
),
788 fsparam_u32 ("blksize", OPT_BLKSIZE
),
789 fsparam_string ("subtype", OPT_SUBTYPE
),
793 static int fuse_parse_param(struct fs_context
*fsc
, struct fs_parameter
*param
)
795 struct fs_parse_result result
;
796 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
801 if (fsc
->purpose
== FS_CONTEXT_FOR_RECONFIGURE
) {
803 * Ignore options coming from mount(MS_REMOUNT) for backward
809 return invalfc(fsc
, "No changes allowed in reconfigure");
812 opt
= fs_parse(fsc
, fuse_fs_parameters
, param
, &result
);
819 return invalfc(fsc
, "Multiple sources specified");
820 fsc
->source
= param
->string
;
821 param
->string
= NULL
;
826 return invalfc(fsc
, "Multiple subtypes specified");
827 ctx
->subtype
= param
->string
;
828 param
->string
= NULL
;
832 ctx
->fd
= result
.uint_32
;
833 ctx
->fd_present
= true;
837 if (!fuse_valid_type(result
.uint_32
))
838 return invalfc(fsc
, "Invalid rootmode");
839 ctx
->rootmode
= result
.uint_32
;
840 ctx
->rootmode_present
= true;
846 * The requested uid must be representable in the
847 * filesystem's idmapping.
849 if (!kuid_has_mapping(fsc
->user_ns
, kuid
))
850 return invalfc(fsc
, "Invalid user_id");
852 ctx
->user_id_present
= true;
858 * The requested gid must be representable in the
859 * filesystem's idmapping.
861 if (!kgid_has_mapping(fsc
->user_ns
, kgid
))
862 return invalfc(fsc
, "Invalid group_id");
863 ctx
->group_id
= kgid
;
864 ctx
->group_id_present
= true;
867 case OPT_DEFAULT_PERMISSIONS
:
868 ctx
->default_permissions
= true;
871 case OPT_ALLOW_OTHER
:
872 ctx
->allow_other
= true;
876 ctx
->max_read
= result
.uint_32
;
881 return invalfc(fsc
, "blksize only supported for fuseblk");
882 ctx
->blksize
= result
.uint_32
;
892 static void fuse_free_fsc(struct fs_context
*fsc
)
894 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
902 static int fuse_show_options(struct seq_file
*m
, struct dentry
*root
)
904 struct super_block
*sb
= root
->d_sb
;
905 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
907 if (fc
->legacy_opts_show
) {
908 seq_printf(m
, ",user_id=%u",
909 from_kuid_munged(fc
->user_ns
, fc
->user_id
));
910 seq_printf(m
, ",group_id=%u",
911 from_kgid_munged(fc
->user_ns
, fc
->group_id
));
912 if (fc
->default_permissions
)
913 seq_puts(m
, ",default_permissions");
915 seq_puts(m
, ",allow_other");
916 if (fc
->max_read
!= ~0)
917 seq_printf(m
, ",max_read=%u", fc
->max_read
);
918 if (sb
->s_bdev
&& sb
->s_blocksize
!= FUSE_DEFAULT_BLKSIZE
)
919 seq_printf(m
, ",blksize=%lu", sb
->s_blocksize
);
921 #ifdef CONFIG_FUSE_DAX
922 if (fc
->dax_mode
== FUSE_DAX_ALWAYS
)
923 seq_puts(m
, ",dax=always");
924 else if (fc
->dax_mode
== FUSE_DAX_NEVER
)
925 seq_puts(m
, ",dax=never");
926 else if (fc
->dax_mode
== FUSE_DAX_INODE_USER
)
927 seq_puts(m
, ",dax=inode");
933 static void fuse_iqueue_init(struct fuse_iqueue
*fiq
,
934 const struct fuse_iqueue_ops
*ops
,
937 memset(fiq
, 0, sizeof(struct fuse_iqueue
));
938 spin_lock_init(&fiq
->lock
);
939 init_waitqueue_head(&fiq
->waitq
);
940 INIT_LIST_HEAD(&fiq
->pending
);
941 INIT_LIST_HEAD(&fiq
->interrupts
);
942 fiq
->forget_list_tail
= &fiq
->forget_list_head
;
948 void fuse_pqueue_init(struct fuse_pqueue
*fpq
)
952 spin_lock_init(&fpq
->lock
);
953 for (i
= 0; i
< FUSE_PQ_HASH_SIZE
; i
++)
954 INIT_LIST_HEAD(&fpq
->processing
[i
]);
955 INIT_LIST_HEAD(&fpq
->io
);
959 void fuse_conn_init(struct fuse_conn
*fc
, struct fuse_mount
*fm
,
960 struct user_namespace
*user_ns
,
961 const struct fuse_iqueue_ops
*fiq_ops
, void *fiq_priv
)
963 memset(fc
, 0, sizeof(*fc
));
964 spin_lock_init(&fc
->lock
);
965 spin_lock_init(&fc
->bg_lock
);
966 init_rwsem(&fc
->killsb
);
967 refcount_set(&fc
->count
, 1);
968 atomic_set(&fc
->dev_count
, 1);
969 atomic_set(&fc
->epoch
, 1);
970 init_waitqueue_head(&fc
->blocked_waitq
);
971 fuse_iqueue_init(&fc
->iq
, fiq_ops
, fiq_priv
);
972 INIT_LIST_HEAD(&fc
->bg_queue
);
973 INIT_LIST_HEAD(&fc
->entry
);
974 INIT_LIST_HEAD(&fc
->devices
);
975 atomic_set(&fc
->num_waiting
, 0);
976 fc
->max_background
= FUSE_DEFAULT_MAX_BACKGROUND
;
977 fc
->congestion_threshold
= FUSE_DEFAULT_CONGESTION_THRESHOLD
;
978 atomic64_set(&fc
->khctr
, 0);
979 fc
->polled_files
= RB_ROOT
;
983 atomic64_set(&fc
->attr_version
, 1);
984 atomic64_set(&fc
->evict_ctr
, 1);
985 get_random_bytes(&fc
->scramble_key
, sizeof(fc
->scramble_key
));
986 fc
->pid_ns
= get_pid_ns(task_active_pid_ns(current
));
987 fc
->user_ns
= get_user_ns(user_ns
);
988 fc
->max_pages
= FUSE_DEFAULT_MAX_PAGES_PER_REQ
;
989 fc
->max_pages_limit
= fuse_max_pages_limit
;
990 fc
->name_max
= FUSE_NAME_LOW_MAX
;
991 fc
->timeout
.req_timeout
= 0;
993 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
994 fuse_backing_files_init(fc
);
996 INIT_LIST_HEAD(&fc
->mounts
);
997 list_add(&fm
->fc_entry
, &fc
->mounts
);
1000 EXPORT_SYMBOL_GPL(fuse_conn_init
);
1002 static void delayed_release(struct rcu_head
*p
)
1004 struct fuse_conn
*fc
= container_of(p
, struct fuse_conn
, rcu
);
1006 fuse_uring_destruct(fc
);
1008 put_user_ns(fc
->user_ns
);
1012 void fuse_conn_put(struct fuse_conn
*fc
)
1014 if (refcount_dec_and_test(&fc
->count
)) {
1015 struct fuse_iqueue
*fiq
= &fc
->iq
;
1016 struct fuse_sync_bucket
*bucket
;
1018 if (IS_ENABLED(CONFIG_FUSE_DAX
))
1019 fuse_dax_conn_free(fc
);
1020 if (fc
->timeout
.req_timeout
)
1021 cancel_delayed_work_sync(&fc
->timeout
.work
);
1022 if (fiq
->ops
->release
)
1023 fiq
->ops
->release(fiq
);
1024 put_pid_ns(fc
->pid_ns
);
1025 bucket
= rcu_dereference_protected(fc
->curr_bucket
, 1);
1027 WARN_ON(atomic_read(&bucket
->count
) != 1);
1030 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
1031 fuse_backing_files_free(fc
);
1032 call_rcu(&fc
->rcu
, delayed_release
);
1035 EXPORT_SYMBOL_GPL(fuse_conn_put
);
1037 struct fuse_conn
*fuse_conn_get(struct fuse_conn
*fc
)
1039 refcount_inc(&fc
->count
);
1042 EXPORT_SYMBOL_GPL(fuse_conn_get
);
1044 static struct inode
*fuse_get_root_inode(struct super_block
*sb
, unsigned int mode
)
1046 struct fuse_attr attr
;
1047 memset(&attr
, 0, sizeof(attr
));
1050 attr
.ino
= FUSE_ROOT_ID
;
1052 return fuse_iget(sb
, FUSE_ROOT_ID
, 0, &attr
, 0, 0, 0);
1055 struct fuse_inode_handle
{
1060 static struct dentry
*fuse_get_dentry(struct super_block
*sb
,
1061 struct fuse_inode_handle
*handle
)
1063 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
1064 struct inode
*inode
;
1065 struct dentry
*entry
;
1068 if (handle
->nodeid
== 0)
1071 inode
= ilookup5(sb
, handle
->nodeid
, fuse_inode_eq
, &handle
->nodeid
);
1073 struct fuse_entry_out outarg
;
1074 const struct qstr name
= QSTR_INIT(".", 1);
1076 if (!fc
->export_support
)
1079 err
= fuse_lookup_name(sb
, handle
->nodeid
, &name
, &outarg
,
1081 if (err
&& err
!= -ENOENT
)
1083 if (err
|| !inode
) {
1088 if (get_node_id(inode
) != handle
->nodeid
)
1092 if (inode
->i_generation
!= handle
->generation
)
1095 entry
= d_obtain_alias(inode
);
1096 if (!IS_ERR(entry
) && get_node_id(inode
) != FUSE_ROOT_ID
)
1097 fuse_invalidate_entry_cache(entry
);
1104 return ERR_PTR(err
);
1107 static int fuse_encode_fh(struct inode
*inode
, u32
*fh
, int *max_len
,
1108 struct inode
*parent
)
1110 int len
= parent
? 6 : 3;
1114 if (*max_len
< len
) {
1116 return FILEID_INVALID
;
1119 nodeid
= get_fuse_inode(inode
)->nodeid
;
1120 generation
= inode
->i_generation
;
1122 fh
[0] = (u32
)(nodeid
>> 32);
1123 fh
[1] = (u32
)(nodeid
& 0xffffffff);
1127 nodeid
= get_fuse_inode(parent
)->nodeid
;
1128 generation
= parent
->i_generation
;
1130 fh
[3] = (u32
)(nodeid
>> 32);
1131 fh
[4] = (u32
)(nodeid
& 0xffffffff);
1136 return parent
? FILEID_INO64_GEN_PARENT
: FILEID_INO64_GEN
;
1139 static struct dentry
*fuse_fh_to_dentry(struct super_block
*sb
,
1140 struct fid
*fid
, int fh_len
, int fh_type
)
1142 struct fuse_inode_handle handle
;
1144 if ((fh_type
!= FILEID_INO64_GEN
&&
1145 fh_type
!= FILEID_INO64_GEN_PARENT
) || fh_len
< 3)
1148 handle
.nodeid
= (u64
) fid
->raw
[0] << 32;
1149 handle
.nodeid
|= (u64
) fid
->raw
[1];
1150 handle
.generation
= fid
->raw
[2];
1151 return fuse_get_dentry(sb
, &handle
);
1154 static struct dentry
*fuse_fh_to_parent(struct super_block
*sb
,
1155 struct fid
*fid
, int fh_len
, int fh_type
)
1157 struct fuse_inode_handle parent
;
1159 if (fh_type
!= FILEID_INO64_GEN_PARENT
|| fh_len
< 6)
1162 parent
.nodeid
= (u64
) fid
->raw
[3] << 32;
1163 parent
.nodeid
|= (u64
) fid
->raw
[4];
1164 parent
.generation
= fid
->raw
[5];
1165 return fuse_get_dentry(sb
, &parent
);
1168 static struct dentry
*fuse_get_parent(struct dentry
*child
)
1170 struct inode
*child_inode
= d_inode(child
);
1171 struct fuse_conn
*fc
= get_fuse_conn(child_inode
);
1172 struct inode
*inode
;
1173 struct dentry
*parent
;
1174 struct fuse_entry_out outarg
;
1177 if (!fc
->export_support
)
1178 return ERR_PTR(-ESTALE
);
1180 err
= fuse_lookup_name(child_inode
->i_sb
, get_node_id(child_inode
),
1181 &dotdot_name
, &outarg
, &inode
);
1184 return ERR_PTR(-ESTALE
);
1185 return ERR_PTR(err
);
1188 parent
= d_obtain_alias(inode
);
1189 if (!IS_ERR(parent
) && get_node_id(inode
) != FUSE_ROOT_ID
)
1190 fuse_invalidate_entry_cache(parent
);
1195 /* only for fid encoding; no support for file handle */
1196 static const struct export_operations fuse_export_fid_operations
= {
1197 .encode_fh
= fuse_encode_fh
,
1200 static const struct export_operations fuse_export_operations
= {
1201 .fh_to_dentry
= fuse_fh_to_dentry
,
1202 .fh_to_parent
= fuse_fh_to_parent
,
1203 .encode_fh
= fuse_encode_fh
,
1204 .get_parent
= fuse_get_parent
,
1207 static const struct super_operations fuse_super_operations
= {
1208 .alloc_inode
= fuse_alloc_inode
,
1209 .free_inode
= fuse_free_inode
,
1210 .evict_inode
= fuse_evict_inode
,
1211 .write_inode
= fuse_write_inode
,
1212 .drop_inode
= generic_delete_inode
,
1213 .umount_begin
= fuse_umount_begin
,
1214 .statfs
= fuse_statfs
,
1215 .sync_fs
= fuse_sync_fs
,
1216 .show_options
= fuse_show_options
,
1219 static void sanitize_global_limit(unsigned int *limit
)
1222 * The default maximum number of async requests is calculated to consume
1223 * 1/2^13 of the total memory, assuming 392 bytes per request.
1226 *limit
= ((totalram_pages() << PAGE_SHIFT
) >> 13) / 392;
1228 if (*limit
>= 1 << 16)
1229 *limit
= (1 << 16) - 1;
1232 static int set_global_limit(const char *val
, const struct kernel_param
*kp
)
1236 rv
= param_set_uint(val
, kp
);
1240 sanitize_global_limit((unsigned int *)kp
->arg
);
1245 static void process_init_limits(struct fuse_conn
*fc
, struct fuse_init_out
*arg
)
1247 int cap_sys_admin
= capable(CAP_SYS_ADMIN
);
1249 if (arg
->minor
< 13)
1252 sanitize_global_limit(&max_user_bgreq
);
1253 sanitize_global_limit(&max_user_congthresh
);
1255 spin_lock(&fc
->bg_lock
);
1256 if (arg
->max_background
) {
1257 fc
->max_background
= arg
->max_background
;
1259 if (!cap_sys_admin
&& fc
->max_background
> max_user_bgreq
)
1260 fc
->max_background
= max_user_bgreq
;
1262 if (arg
->congestion_threshold
) {
1263 fc
->congestion_threshold
= arg
->congestion_threshold
;
1265 if (!cap_sys_admin
&&
1266 fc
->congestion_threshold
> max_user_congthresh
)
1267 fc
->congestion_threshold
= max_user_congthresh
;
1269 spin_unlock(&fc
->bg_lock
);
1272 static void set_request_timeout(struct fuse_conn
*fc
, unsigned int timeout
)
1274 fc
->timeout
.req_timeout
= secs_to_jiffies(timeout
);
1275 INIT_DELAYED_WORK(&fc
->timeout
.work
, fuse_check_timeout
);
1276 queue_delayed_work(system_wq
, &fc
->timeout
.work
,
1277 fuse_timeout_timer_freq
);
1280 static void init_server_timeout(struct fuse_conn
*fc
, unsigned int timeout
)
1282 if (!timeout
&& !fuse_max_req_timeout
&& !fuse_default_req_timeout
)
1286 timeout
= fuse_default_req_timeout
;
1288 if (fuse_max_req_timeout
) {
1290 timeout
= min(fuse_max_req_timeout
, timeout
);
1292 timeout
= fuse_max_req_timeout
;
1295 timeout
= max(FUSE_TIMEOUT_TIMER_FREQ
, timeout
);
1297 set_request_timeout(fc
, timeout
);
1300 struct fuse_init_args
{
1301 struct fuse_args args
;
1302 struct fuse_init_in in
;
1303 struct fuse_init_out out
;
1306 static void process_init_reply(struct fuse_mount
*fm
, struct fuse_args
*args
,
1309 struct fuse_conn
*fc
= fm
->fc
;
1310 struct fuse_init_args
*ia
= container_of(args
, typeof(*ia
), args
);
1311 struct fuse_init_out
*arg
= &ia
->out
;
1314 if (error
|| arg
->major
!= FUSE_KERNEL_VERSION
)
1317 unsigned long ra_pages
;
1318 unsigned int timeout
= 0;
1320 process_init_limits(fc
, arg
);
1322 if (arg
->minor
>= 6) {
1323 u64 flags
= arg
->flags
;
1325 if (flags
& FUSE_INIT_EXT
)
1326 flags
|= (u64
) arg
->flags2
<< 32;
1328 ra_pages
= arg
->max_readahead
/ PAGE_SIZE
;
1329 if (flags
& FUSE_ASYNC_READ
)
1331 if (!(flags
& FUSE_POSIX_LOCKS
))
1333 if (arg
->minor
>= 17) {
1334 if (!(flags
& FUSE_FLOCK_LOCKS
))
1337 if (!(flags
& FUSE_POSIX_LOCKS
))
1340 if (flags
& FUSE_ATOMIC_O_TRUNC
)
1341 fc
->atomic_o_trunc
= 1;
1342 if (arg
->minor
>= 9) {
1343 /* LOOKUP has dependency on proto version */
1344 if (flags
& FUSE_EXPORT_SUPPORT
)
1345 fc
->export_support
= 1;
1347 if (flags
& FUSE_BIG_WRITES
)
1349 if (flags
& FUSE_DONT_MASK
)
1351 if (flags
& FUSE_AUTO_INVAL_DATA
)
1352 fc
->auto_inval_data
= 1;
1353 else if (flags
& FUSE_EXPLICIT_INVAL_DATA
)
1354 fc
->explicit_inval_data
= 1;
1355 if (flags
& FUSE_DO_READDIRPLUS
) {
1356 fc
->do_readdirplus
= 1;
1357 if (flags
& FUSE_READDIRPLUS_AUTO
)
1358 fc
->readdirplus_auto
= 1;
1360 if (flags
& FUSE_ASYNC_DIO
)
1362 if (flags
& FUSE_WRITEBACK_CACHE
)
1363 fc
->writeback_cache
= 1;
1364 if (flags
& FUSE_PARALLEL_DIROPS
)
1365 fc
->parallel_dirops
= 1;
1366 if (flags
& FUSE_HANDLE_KILLPRIV
)
1367 fc
->handle_killpriv
= 1;
1368 if (arg
->time_gran
&& arg
->time_gran
<= 1000000000)
1369 fm
->sb
->s_time_gran
= arg
->time_gran
;
1370 if ((flags
& FUSE_POSIX_ACL
)) {
1371 fc
->default_permissions
= 1;
1374 if (flags
& FUSE_CACHE_SYMLINKS
)
1375 fc
->cache_symlinks
= 1;
1376 if (flags
& FUSE_ABORT_ERROR
)
1378 if (flags
& FUSE_MAX_PAGES
) {
1380 min_t(unsigned int, fc
->max_pages_limit
,
1381 max_t(unsigned int, arg
->max_pages
, 1));
1384 * PATH_MAX file names might need two pages for
1387 if (fc
->max_pages
> 1)
1388 fc
->name_max
= FUSE_NAME_MAX
;
1390 if (IS_ENABLED(CONFIG_FUSE_DAX
)) {
1391 if (flags
& FUSE_MAP_ALIGNMENT
&&
1392 !fuse_dax_check_alignment(fc
, arg
->map_alignment
)) {
1395 if (flags
& FUSE_HAS_INODE_DAX
)
1398 if (flags
& FUSE_HANDLE_KILLPRIV_V2
) {
1399 fc
->handle_killpriv_v2
= 1;
1400 fm
->sb
->s_flags
|= SB_NOSEC
;
1402 if (flags
& FUSE_SETXATTR_EXT
)
1403 fc
->setxattr_ext
= 1;
1404 if (flags
& FUSE_SECURITY_CTX
)
1405 fc
->init_security
= 1;
1406 if (flags
& FUSE_CREATE_SUPP_GROUP
)
1407 fc
->create_supp_group
= 1;
1408 if (flags
& FUSE_DIRECT_IO_ALLOW_MMAP
)
1409 fc
->direct_io_allow_mmap
= 1;
1411 * max_stack_depth is the max stack depth of FUSE fs,
1412 * so it has to be at least 1 to support passthrough
1415 * with max_stack_depth > 1, the backing files can be
1416 * on a stacked fs (e.g. overlayfs) themselves and with
1417 * max_stack_depth == 1, FUSE fs can be stacked as the
1418 * underlying fs of a stacked fs (e.g. overlayfs).
1420 * Also don't allow the combination of FUSE_PASSTHROUGH
1421 * and FUSE_WRITEBACK_CACHE, current design doesn't handle
1424 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
) &&
1425 (flags
& FUSE_PASSTHROUGH
) &&
1426 arg
->max_stack_depth
> 0 &&
1427 arg
->max_stack_depth
<= FILESYSTEM_MAX_STACK_DEPTH
&&
1428 !(flags
& FUSE_WRITEBACK_CACHE
)) {
1429 fc
->passthrough
= 1;
1430 fc
->max_stack_depth
= arg
->max_stack_depth
;
1431 fm
->sb
->s_stack_depth
= arg
->max_stack_depth
;
1433 if (flags
& FUSE_NO_EXPORT_SUPPORT
)
1434 fm
->sb
->s_export_op
= &fuse_export_fid_operations
;
1435 if (flags
& FUSE_ALLOW_IDMAP
) {
1436 if (fc
->default_permissions
)
1437 fm
->sb
->s_iflags
&= ~SB_I_NOIDMAP
;
1441 if (flags
& FUSE_OVER_IO_URING
&& fuse_uring_enabled())
1444 if (flags
& FUSE_REQUEST_TIMEOUT
)
1445 timeout
= arg
->request_timeout
;
1447 ra_pages
= fc
->max_read
/ PAGE_SIZE
;
1452 init_server_timeout(fc
, timeout
);
1454 fm
->sb
->s_bdi
->ra_pages
=
1455 min(fm
->sb
->s_bdi
->ra_pages
, ra_pages
);
1456 fc
->minor
= arg
->minor
;
1457 fc
->max_write
= arg
->minor
< 5 ? 4096 : arg
->max_write
;
1458 fc
->max_write
= max_t(unsigned, 4096, fc
->max_write
);
1468 fuse_set_initialized(fc
);
1469 wake_up_all(&fc
->blocked_waitq
);
1472 void fuse_send_init(struct fuse_mount
*fm
)
1474 struct fuse_init_args
*ia
;
1477 ia
= kzalloc(sizeof(*ia
), GFP_KERNEL
| __GFP_NOFAIL
);
1479 ia
->in
.major
= FUSE_KERNEL_VERSION
;
1480 ia
->in
.minor
= FUSE_KERNEL_MINOR_VERSION
;
1481 ia
->in
.max_readahead
= fm
->sb
->s_bdi
->ra_pages
* PAGE_SIZE
;
1483 FUSE_ASYNC_READ
| FUSE_POSIX_LOCKS
| FUSE_ATOMIC_O_TRUNC
|
1484 FUSE_EXPORT_SUPPORT
| FUSE_BIG_WRITES
| FUSE_DONT_MASK
|
1485 FUSE_SPLICE_WRITE
| FUSE_SPLICE_MOVE
| FUSE_SPLICE_READ
|
1486 FUSE_FLOCK_LOCKS
| FUSE_HAS_IOCTL_DIR
| FUSE_AUTO_INVAL_DATA
|
1487 FUSE_DO_READDIRPLUS
| FUSE_READDIRPLUS_AUTO
| FUSE_ASYNC_DIO
|
1488 FUSE_WRITEBACK_CACHE
| FUSE_NO_OPEN_SUPPORT
|
1489 FUSE_PARALLEL_DIROPS
| FUSE_HANDLE_KILLPRIV
| FUSE_POSIX_ACL
|
1490 FUSE_ABORT_ERROR
| FUSE_MAX_PAGES
| FUSE_CACHE_SYMLINKS
|
1491 FUSE_NO_OPENDIR_SUPPORT
| FUSE_EXPLICIT_INVAL_DATA
|
1492 FUSE_HANDLE_KILLPRIV_V2
| FUSE_SETXATTR_EXT
| FUSE_INIT_EXT
|
1493 FUSE_SECURITY_CTX
| FUSE_CREATE_SUPP_GROUP
|
1494 FUSE_HAS_EXPIRE_ONLY
| FUSE_DIRECT_IO_ALLOW_MMAP
|
1495 FUSE_NO_EXPORT_SUPPORT
| FUSE_HAS_RESEND
| FUSE_ALLOW_IDMAP
|
1496 FUSE_REQUEST_TIMEOUT
;
1497 #ifdef CONFIG_FUSE_DAX
1499 flags
|= FUSE_MAP_ALIGNMENT
;
1500 if (fuse_is_inode_dax_mode(fm
->fc
->dax_mode
))
1501 flags
|= FUSE_HAS_INODE_DAX
;
1503 if (fm
->fc
->auto_submounts
)
1504 flags
|= FUSE_SUBMOUNTS
;
1505 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
1506 flags
|= FUSE_PASSTHROUGH
;
1509 * This is just an information flag for fuse server. No need to check
1510 * the reply - server is either sending IORING_OP_URING_CMD or not.
1512 if (fuse_uring_enabled())
1513 flags
|= FUSE_OVER_IO_URING
;
1515 ia
->in
.flags
= flags
;
1516 ia
->in
.flags2
= flags
>> 32;
1518 ia
->args
.opcode
= FUSE_INIT
;
1519 ia
->args
.in_numargs
= 1;
1520 ia
->args
.in_args
[0].size
= sizeof(ia
->in
);
1521 ia
->args
.in_args
[0].value
= &ia
->in
;
1522 ia
->args
.out_numargs
= 1;
1523 /* Variable length argument used for backward compatibility
1524 with interface version < 7.5. Rest of init_out is zeroed
1525 by do_get_request(), so a short reply is not a problem */
1526 ia
->args
.out_argvar
= true;
1527 ia
->args
.out_args
[0].size
= sizeof(ia
->out
);
1528 ia
->args
.out_args
[0].value
= &ia
->out
;
1529 ia
->args
.force
= true;
1530 ia
->args
.nocreds
= true;
1531 ia
->args
.end
= process_init_reply
;
1533 if (fuse_simple_background(fm
, &ia
->args
, GFP_KERNEL
) != 0)
1534 process_init_reply(fm
, &ia
->args
, -ENOTCONN
);
1536 EXPORT_SYMBOL_GPL(fuse_send_init
);
1538 void fuse_free_conn(struct fuse_conn
*fc
)
1540 WARN_ON(!list_empty(&fc
->devices
));
1543 EXPORT_SYMBOL_GPL(fuse_free_conn
);
1545 static int fuse_bdi_init(struct fuse_conn
*fc
, struct super_block
*sb
)
1551 suffix
= "-fuseblk";
1553 * sb->s_bdi points to blkdev's bdi however we want to redirect
1554 * it to our private bdi...
1557 sb
->s_bdi
= &noop_backing_dev_info
;
1559 err
= super_setup_bdi_name(sb
, "%u:%u%s", MAJOR(fc
->dev
),
1560 MINOR(fc
->dev
), suffix
);
1564 /* fuse does it's own writeback accounting */
1565 sb
->s_bdi
->capabilities
&= ~BDI_CAP_WRITEBACK_ACCT
;
1566 sb
->s_bdi
->capabilities
|= BDI_CAP_STRICTLIMIT
;
1569 * For a single fuse filesystem use max 1% of dirty +
1570 * writeback threshold.
1572 * This gives about 1M of write buffer for memory maps on a
1573 * machine with 1G and 10% dirty_ratio, which should be more
1576 * Privileged users can raise it by writing to
1578 * /sys/class/bdi/<bdi>/max_ratio
1580 bdi_set_max_ratio(sb
->s_bdi
, 1);
1585 struct fuse_dev
*fuse_dev_alloc(void)
1587 struct fuse_dev
*fud
;
1588 struct list_head
*pq
;
1590 fud
= kzalloc(sizeof(struct fuse_dev
), GFP_KERNEL
);
1594 pq
= kcalloc(FUSE_PQ_HASH_SIZE
, sizeof(struct list_head
), GFP_KERNEL
);
1600 fud
->pq
.processing
= pq
;
1601 fuse_pqueue_init(&fud
->pq
);
1605 EXPORT_SYMBOL_GPL(fuse_dev_alloc
);
1607 void fuse_dev_install(struct fuse_dev
*fud
, struct fuse_conn
*fc
)
1609 fud
->fc
= fuse_conn_get(fc
);
1610 spin_lock(&fc
->lock
);
1611 list_add_tail(&fud
->entry
, &fc
->devices
);
1612 spin_unlock(&fc
->lock
);
1614 EXPORT_SYMBOL_GPL(fuse_dev_install
);
1616 struct fuse_dev
*fuse_dev_alloc_install(struct fuse_conn
*fc
)
1618 struct fuse_dev
*fud
;
1620 fud
= fuse_dev_alloc();
1624 fuse_dev_install(fud
, fc
);
1627 EXPORT_SYMBOL_GPL(fuse_dev_alloc_install
);
1629 void fuse_dev_free(struct fuse_dev
*fud
)
1631 struct fuse_conn
*fc
= fud
->fc
;
1634 spin_lock(&fc
->lock
);
1635 list_del(&fud
->entry
);
1636 spin_unlock(&fc
->lock
);
1640 kfree(fud
->pq
.processing
);
1643 EXPORT_SYMBOL_GPL(fuse_dev_free
);
1645 static void fuse_fill_attr_from_inode(struct fuse_attr
*attr
,
1646 const struct fuse_inode
*fi
)
1648 struct timespec64 atime
= inode_get_atime(&fi
->inode
);
1649 struct timespec64 mtime
= inode_get_mtime(&fi
->inode
);
1650 struct timespec64 ctime
= inode_get_ctime(&fi
->inode
);
1652 *attr
= (struct fuse_attr
){
1653 .ino
= fi
->inode
.i_ino
,
1654 .size
= fi
->inode
.i_size
,
1655 .blocks
= fi
->inode
.i_blocks
,
1656 .atime
= atime
.tv_sec
,
1657 .mtime
= mtime
.tv_sec
,
1658 .ctime
= ctime
.tv_sec
,
1659 .atimensec
= atime
.tv_nsec
,
1660 .mtimensec
= mtime
.tv_nsec
,
1661 .ctimensec
= ctime
.tv_nsec
,
1662 .mode
= fi
->inode
.i_mode
,
1663 .nlink
= fi
->inode
.i_nlink
,
1664 .uid
= __kuid_val(fi
->inode
.i_uid
),
1665 .gid
= __kgid_val(fi
->inode
.i_gid
),
1666 .rdev
= fi
->inode
.i_rdev
,
1667 .blksize
= 1u << fi
->inode
.i_blkbits
,
1671 static void fuse_sb_defaults(struct super_block
*sb
)
1673 sb
->s_magic
= FUSE_SUPER_MAGIC
;
1674 sb
->s_op
= &fuse_super_operations
;
1675 sb
->s_xattr
= fuse_xattr_handlers
;
1676 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1677 sb
->s_time_gran
= 1;
1678 sb
->s_export_op
= &fuse_export_operations
;
1679 sb
->s_iflags
|= SB_I_IMA_UNVERIFIABLE_SIGNATURE
;
1680 sb
->s_iflags
|= SB_I_NOIDMAP
;
1681 if (sb
->s_user_ns
!= &init_user_ns
)
1682 sb
->s_iflags
|= SB_I_UNTRUSTED_MOUNTER
;
1683 sb
->s_flags
&= ~(SB_NOSEC
| SB_I_VERSION
);
1686 static int fuse_fill_super_submount(struct super_block
*sb
,
1687 struct fuse_inode
*parent_fi
)
1689 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1690 struct super_block
*parent_sb
= parent_fi
->inode
.i_sb
;
1691 struct fuse_attr root_attr
;
1693 struct fuse_submount_lookup
*sl
;
1694 struct fuse_inode
*fi
;
1696 fuse_sb_defaults(sb
);
1699 WARN_ON(sb
->s_bdi
!= &noop_backing_dev_info
);
1700 sb
->s_bdi
= bdi_get(parent_sb
->s_bdi
);
1702 sb
->s_xattr
= parent_sb
->s_xattr
;
1703 sb
->s_export_op
= parent_sb
->s_export_op
;
1704 sb
->s_time_gran
= parent_sb
->s_time_gran
;
1705 sb
->s_blocksize
= parent_sb
->s_blocksize
;
1706 sb
->s_blocksize_bits
= parent_sb
->s_blocksize_bits
;
1707 sb
->s_subtype
= kstrdup(parent_sb
->s_subtype
, GFP_KERNEL
);
1708 if (parent_sb
->s_subtype
&& !sb
->s_subtype
)
1711 fuse_fill_attr_from_inode(&root_attr
, parent_fi
);
1712 root
= fuse_iget(sb
, parent_fi
->nodeid
, 0, &root_attr
, 0, 0,
1713 fuse_get_evict_ctr(fm
->fc
));
1715 * This inode is just a duplicate, so it is not looked up and
1716 * its nlookup should not be incremented. fuse_iget() does
1717 * that, though, so undo it here.
1719 fi
= get_fuse_inode(root
);
1722 sb
->s_d_op
= &fuse_dentry_operations
;
1723 sb
->s_root
= d_make_root(root
);
1728 * Grab the parent's submount_lookup pointer and take a
1729 * reference on the shared nlookup from the parent. This is to
1730 * prevent the last forget for this nodeid from getting
1731 * triggered until all users have finished with it.
1733 sl
= parent_fi
->submount_lookup
;
1736 refcount_inc(&sl
->count
);
1737 fi
->submount_lookup
= sl
;
1743 /* Filesystem context private data holds the FUSE inode of the mount point */
1744 static int fuse_get_tree_submount(struct fs_context
*fsc
)
1746 struct fuse_mount
*fm
;
1747 struct fuse_inode
*mp_fi
= fsc
->fs_private
;
1748 struct fuse_conn
*fc
= get_fuse_conn(&mp_fi
->inode
);
1749 struct super_block
*sb
;
1752 fm
= kzalloc(sizeof(struct fuse_mount
), GFP_KERNEL
);
1756 fm
->fc
= fuse_conn_get(fc
);
1757 fsc
->s_fs_info
= fm
;
1758 sb
= sget_fc(fsc
, NULL
, set_anon_super_fc
);
1760 fuse_mount_destroy(fm
);
1764 /* Initialize superblock, making @mp_fi its root */
1765 err
= fuse_fill_super_submount(sb
, mp_fi
);
1767 deactivate_locked_super(sb
);
1771 down_write(&fc
->killsb
);
1772 list_add_tail(&fm
->fc_entry
, &fc
->mounts
);
1773 up_write(&fc
->killsb
);
1775 sb
->s_flags
|= SB_ACTIVE
;
1776 fsc
->root
= dget(sb
->s_root
);
1781 static const struct fs_context_operations fuse_context_submount_ops
= {
1782 .get_tree
= fuse_get_tree_submount
,
1785 int fuse_init_fs_context_submount(struct fs_context
*fsc
)
1787 fsc
->ops
= &fuse_context_submount_ops
;
1790 EXPORT_SYMBOL_GPL(fuse_init_fs_context_submount
);
1792 int fuse_fill_super_common(struct super_block
*sb
, struct fuse_fs_context
*ctx
)
1794 struct fuse_dev
*fud
= NULL
;
1795 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1796 struct fuse_conn
*fc
= fm
->fc
;
1798 struct dentry
*root_dentry
;
1802 if (sb
->s_flags
& SB_MANDLOCK
)
1805 rcu_assign_pointer(fc
->curr_bucket
, fuse_sync_bucket_alloc());
1806 fuse_sb_defaults(sb
);
1811 if (!sb_set_blocksize(sb
, ctx
->blksize
))
1815 sb
->s_blocksize
= PAGE_SIZE
;
1816 sb
->s_blocksize_bits
= PAGE_SHIFT
;
1819 sb
->s_subtype
= ctx
->subtype
;
1820 ctx
->subtype
= NULL
;
1821 if (IS_ENABLED(CONFIG_FUSE_DAX
)) {
1822 err
= fuse_dax_conn_alloc(fc
, ctx
->dax_mode
, ctx
->dax_dev
);
1829 fud
= fuse_dev_alloc_install(fc
);
1834 fc
->dev
= sb
->s_dev
;
1836 err
= fuse_bdi_init(fc
, sb
);
1840 /* Handle umasking inside the fuse code */
1841 if (sb
->s_flags
& SB_POSIXACL
)
1843 sb
->s_flags
|= SB_POSIXACL
;
1845 fc
->default_permissions
= ctx
->default_permissions
;
1846 fc
->allow_other
= ctx
->allow_other
;
1847 fc
->user_id
= ctx
->user_id
;
1848 fc
->group_id
= ctx
->group_id
;
1849 fc
->legacy_opts_show
= ctx
->legacy_opts_show
;
1850 fc
->max_read
= max_t(unsigned int, 4096, ctx
->max_read
);
1851 fc
->destroy
= ctx
->destroy
;
1852 fc
->no_control
= ctx
->no_control
;
1853 fc
->no_force_umount
= ctx
->no_force_umount
;
1856 root
= fuse_get_root_inode(sb
, ctx
->rootmode
);
1857 sb
->s_d_op
= &fuse_root_dentry_operations
;
1858 root_dentry
= d_make_root(root
);
1861 /* Root dentry doesn't have .d_revalidate */
1862 sb
->s_d_op
= &fuse_dentry_operations
;
1864 mutex_lock(&fuse_mutex
);
1866 if (ctx
->fudptr
&& *ctx
->fudptr
)
1869 err
= fuse_ctl_add_conn(fc
);
1873 list_add_tail(&fc
->entry
, &fuse_conn_list
);
1874 sb
->s_root
= root_dentry
;
1877 mutex_unlock(&fuse_mutex
);
1881 mutex_unlock(&fuse_mutex
);
1887 if (IS_ENABLED(CONFIG_FUSE_DAX
))
1888 fuse_dax_conn_free(fc
);
1892 EXPORT_SYMBOL_GPL(fuse_fill_super_common
);
1894 static int fuse_fill_super(struct super_block
*sb
, struct fs_context
*fsc
)
1896 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
1899 if (!ctx
->file
|| !ctx
->rootmode_present
||
1900 !ctx
->user_id_present
|| !ctx
->group_id_present
)
1904 * Require mount to happen from the same user namespace which
1905 * opened /dev/fuse to prevent potential attacks.
1907 if ((ctx
->file
->f_op
!= &fuse_dev_operations
) ||
1908 (ctx
->file
->f_cred
->user_ns
!= sb
->s_user_ns
))
1910 ctx
->fudptr
= &ctx
->file
->private_data
;
1912 err
= fuse_fill_super_common(sb
, ctx
);
1915 /* file->private_data shall be visible on all CPUs after this */
1917 fuse_send_init(get_fuse_mount_super(sb
));
1922 * This is the path where user supplied an already initialized fuse dev. In
1923 * this case never create a new super if the old one is gone.
1925 static int fuse_set_no_super(struct super_block
*sb
, struct fs_context
*fsc
)
1930 static int fuse_test_super(struct super_block
*sb
, struct fs_context
*fsc
)
1933 return fsc
->sget_key
== get_fuse_conn_super(sb
);
1936 static int fuse_get_tree(struct fs_context
*fsc
)
1938 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
1939 struct fuse_dev
*fud
;
1940 struct fuse_conn
*fc
;
1941 struct fuse_mount
*fm
;
1942 struct super_block
*sb
;
1945 fc
= kmalloc(sizeof(*fc
), GFP_KERNEL
);
1949 fm
= kzalloc(sizeof(*fm
), GFP_KERNEL
);
1955 fuse_conn_init(fc
, fm
, fsc
->user_ns
, &fuse_dev_fiq_ops
, NULL
);
1956 fc
->release
= fuse_free_conn
;
1958 fsc
->s_fs_info
= fm
;
1960 if (ctx
->fd_present
)
1961 ctx
->file
= fget(ctx
->fd
);
1963 if (IS_ENABLED(CONFIG_BLOCK
) && ctx
->is_bdev
) {
1964 err
= get_tree_bdev(fsc
, fuse_fill_super
);
1968 * While block dev mount can be initialized with a dummy device fd
1969 * (found by device name), normal fuse mounts can't
1976 * Allow creating a fuse mount with an already initialized fuse
1979 fud
= READ_ONCE(ctx
->file
->private_data
);
1980 if (ctx
->file
->f_op
== &fuse_dev_operations
&& fud
) {
1981 fsc
->sget_key
= fud
->fc
;
1982 sb
= sget_fc(fsc
, fuse_test_super
, fuse_set_no_super
);
1983 err
= PTR_ERR_OR_ZERO(sb
);
1985 fsc
->root
= dget(sb
->s_root
);
1987 err
= get_tree_nodev(fsc
, fuse_fill_super
);
1991 fuse_mount_destroy(fm
);
1997 static const struct fs_context_operations fuse_context_ops
= {
1998 .free
= fuse_free_fsc
,
1999 .parse_param
= fuse_parse_param
,
2000 .reconfigure
= fuse_reconfigure
,
2001 .get_tree
= fuse_get_tree
,
2005 * Set up the filesystem mount context.
2007 static int fuse_init_fs_context(struct fs_context
*fsc
)
2009 struct fuse_fs_context
*ctx
;
2011 ctx
= kzalloc(sizeof(struct fuse_fs_context
), GFP_KERNEL
);
2016 ctx
->blksize
= FUSE_DEFAULT_BLKSIZE
;
2017 ctx
->legacy_opts_show
= true;
2020 if (fsc
->fs_type
== &fuseblk_fs_type
) {
2021 ctx
->is_bdev
= true;
2022 ctx
->destroy
= true;
2026 fsc
->fs_private
= ctx
;
2027 fsc
->ops
= &fuse_context_ops
;
2031 bool fuse_mount_remove(struct fuse_mount
*fm
)
2033 struct fuse_conn
*fc
= fm
->fc
;
2036 down_write(&fc
->killsb
);
2037 list_del_init(&fm
->fc_entry
);
2038 if (list_empty(&fc
->mounts
))
2040 up_write(&fc
->killsb
);
2044 EXPORT_SYMBOL_GPL(fuse_mount_remove
);
2046 void fuse_conn_destroy(struct fuse_mount
*fm
)
2048 struct fuse_conn
*fc
= fm
->fc
;
2051 fuse_send_destroy(fm
);
2053 fuse_abort_conn(fc
);
2054 fuse_wait_aborted(fc
);
2056 if (!list_empty(&fc
->entry
)) {
2057 mutex_lock(&fuse_mutex
);
2058 list_del(&fc
->entry
);
2059 fuse_ctl_remove_conn(fc
);
2060 mutex_unlock(&fuse_mutex
);
2063 EXPORT_SYMBOL_GPL(fuse_conn_destroy
);
2065 static void fuse_sb_destroy(struct super_block
*sb
)
2067 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
2071 last
= fuse_mount_remove(fm
);
2073 fuse_conn_destroy(fm
);
2077 void fuse_mount_destroy(struct fuse_mount
*fm
)
2079 fuse_conn_put(fm
->fc
);
2082 EXPORT_SYMBOL(fuse_mount_destroy
);
2084 static void fuse_kill_sb_anon(struct super_block
*sb
)
2086 fuse_sb_destroy(sb
);
2087 kill_anon_super(sb
);
2088 fuse_mount_destroy(get_fuse_mount_super(sb
));
2091 static struct file_system_type fuse_fs_type
= {
2092 .owner
= THIS_MODULE
,
2094 .fs_flags
= FS_HAS_SUBTYPE
| FS_USERNS_MOUNT
| FS_ALLOW_IDMAP
,
2095 .init_fs_context
= fuse_init_fs_context
,
2096 .parameters
= fuse_fs_parameters
,
2097 .kill_sb
= fuse_kill_sb_anon
,
2099 MODULE_ALIAS_FS("fuse");
2102 static void fuse_kill_sb_blk(struct super_block
*sb
)
2104 fuse_sb_destroy(sb
);
2105 kill_block_super(sb
);
2106 fuse_mount_destroy(get_fuse_mount_super(sb
));
2109 static struct file_system_type fuseblk_fs_type
= {
2110 .owner
= THIS_MODULE
,
2112 .init_fs_context
= fuse_init_fs_context
,
2113 .parameters
= fuse_fs_parameters
,
2114 .kill_sb
= fuse_kill_sb_blk
,
2115 .fs_flags
= FS_REQUIRES_DEV
| FS_HAS_SUBTYPE
| FS_ALLOW_IDMAP
,
2117 MODULE_ALIAS_FS("fuseblk");
2119 static inline int register_fuseblk(void)
2121 return register_filesystem(&fuseblk_fs_type
);
2124 static inline void unregister_fuseblk(void)
2126 unregister_filesystem(&fuseblk_fs_type
);
2129 static inline int register_fuseblk(void)
2134 static inline void unregister_fuseblk(void)
2139 static void fuse_inode_init_once(void *foo
)
2141 struct inode
*inode
= foo
;
2143 inode_init_once(inode
);
2146 static int __init
fuse_fs_init(void)
2150 fuse_inode_cachep
= kmem_cache_create("fuse_inode",
2151 sizeof(struct fuse_inode
), 0,
2152 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
|SLAB_RECLAIM_ACCOUNT
,
2153 fuse_inode_init_once
);
2155 if (!fuse_inode_cachep
)
2158 err
= register_fuseblk();
2162 err
= register_filesystem(&fuse_fs_type
);
2166 err
= fuse_sysctl_register();
2173 unregister_filesystem(&fuse_fs_type
);
2175 unregister_fuseblk();
2177 kmem_cache_destroy(fuse_inode_cachep
);
2182 static void fuse_fs_cleanup(void)
2184 fuse_sysctl_unregister();
2185 unregister_filesystem(&fuse_fs_type
);
2186 unregister_fuseblk();
2189 * Make sure all delayed rcu free inodes are flushed before we
2193 kmem_cache_destroy(fuse_inode_cachep
);
2196 static struct kobject
*fuse_kobj
;
2198 static int fuse_sysfs_init(void)
2202 fuse_kobj
= kobject_create_and_add("fuse", fs_kobj
);
2208 err
= sysfs_create_mount_point(fuse_kobj
, "connections");
2210 goto out_fuse_unregister
;
2214 out_fuse_unregister
:
2215 kobject_put(fuse_kobj
);
2220 static void fuse_sysfs_cleanup(void)
2222 sysfs_remove_mount_point(fuse_kobj
, "connections");
2223 kobject_put(fuse_kobj
);
2226 static int __init
fuse_init(void)
2230 pr_info("init (API version %i.%i)\n",
2231 FUSE_KERNEL_VERSION
, FUSE_KERNEL_MINOR_VERSION
);
2233 INIT_LIST_HEAD(&fuse_conn_list
);
2234 res
= fuse_fs_init();
2238 res
= fuse_dev_init();
2240 goto err_fs_cleanup
;
2242 res
= fuse_sysfs_init();
2244 goto err_dev_cleanup
;
2246 res
= fuse_ctl_init();
2248 goto err_sysfs_cleanup
;
2250 sanitize_global_limit(&max_user_bgreq
);
2251 sanitize_global_limit(&max_user_congthresh
);
2256 fuse_sysfs_cleanup();
2265 static void __exit
fuse_exit(void)
2270 fuse_sysfs_cleanup();
2275 module_init(fuse_init
);
2276 module_exit(fuse_exit
);