2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/fs_context.h>
19 #include <linux/fs_parser.h>
20 #include <linux/statfs.h>
21 #include <linux/random.h>
22 #include <linux/sched.h>
23 #include <linux/exportfs.h>
24 #include <linux/posix_acl.h>
25 #include <linux/pid_namespace.h>
26 #include <uapi/linux/magic.h>
28 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
29 MODULE_DESCRIPTION("Filesystem in Userspace");
30 MODULE_LICENSE("GPL");
32 static struct kmem_cache
*fuse_inode_cachep
;
33 struct list_head fuse_conn_list
;
34 DEFINE_MUTEX(fuse_mutex
);
36 static int set_global_limit(const char *val
, const struct kernel_param
*kp
);
38 unsigned max_user_bgreq
;
39 module_param_call(max_user_bgreq
, set_global_limit
, param_get_uint
,
40 &max_user_bgreq
, 0644);
41 __MODULE_PARM_TYPE(max_user_bgreq
, "uint");
42 MODULE_PARM_DESC(max_user_bgreq
,
43 "Global limit for the maximum number of backgrounded requests an "
44 "unprivileged user can set");
46 unsigned max_user_congthresh
;
47 module_param_call(max_user_congthresh
, set_global_limit
, param_get_uint
,
48 &max_user_congthresh
, 0644);
49 __MODULE_PARM_TYPE(max_user_congthresh
, "uint");
50 MODULE_PARM_DESC(max_user_congthresh
,
51 "Global limit for the maximum congestion threshold an "
52 "unprivileged user can set");
54 #define FUSE_DEFAULT_BLKSIZE 512
56 /** Maximum number of outstanding background requests */
57 #define FUSE_DEFAULT_MAX_BACKGROUND 12
59 /** Congestion starts at 75% of maximum */
60 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
63 static struct file_system_type fuseblk_fs_type
;
66 struct fuse_forget_link
*fuse_alloc_forget(void)
68 return kzalloc(sizeof(struct fuse_forget_link
), GFP_KERNEL_ACCOUNT
);
71 static struct fuse_submount_lookup
*fuse_alloc_submount_lookup(void)
73 struct fuse_submount_lookup
*sl
;
75 sl
= kzalloc(sizeof(struct fuse_submount_lookup
), GFP_KERNEL_ACCOUNT
);
78 sl
->forget
= fuse_alloc_forget();
89 static struct inode
*fuse_alloc_inode(struct super_block
*sb
)
91 struct fuse_inode
*fi
;
93 fi
= alloc_inode_sb(sb
, fuse_inode_cachep
, GFP_KERNEL
);
101 fi
->attr_version
= 0;
104 fi
->submount_lookup
= NULL
;
105 mutex_init(&fi
->mutex
);
106 spin_lock_init(&fi
->lock
);
107 fi
->forget
= fuse_alloc_forget();
111 if (IS_ENABLED(CONFIG_FUSE_DAX
) && !fuse_dax_inode_alloc(sb
, fi
))
112 goto out_free_forget
;
114 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
115 fuse_inode_backing_set(fi
, NULL
);
122 kmem_cache_free(fuse_inode_cachep
, fi
);
126 static void fuse_free_inode(struct inode
*inode
)
128 struct fuse_inode
*fi
= get_fuse_inode(inode
);
130 mutex_destroy(&fi
->mutex
);
132 #ifdef CONFIG_FUSE_DAX
135 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
136 fuse_backing_put(fuse_inode_backing(fi
));
138 kmem_cache_free(fuse_inode_cachep
, fi
);
141 static void fuse_cleanup_submount_lookup(struct fuse_conn
*fc
,
142 struct fuse_submount_lookup
*sl
)
144 if (!refcount_dec_and_test(&sl
->count
))
147 fuse_queue_forget(fc
, sl
->forget
, sl
->nodeid
, 1);
152 static void fuse_evict_inode(struct inode
*inode
)
154 struct fuse_inode
*fi
= get_fuse_inode(inode
);
156 /* Will write inode on close/munmap and in all other dirtiers */
157 WARN_ON(inode
->i_state
& I_DIRTY_INODE
);
159 truncate_inode_pages_final(&inode
->i_data
);
161 if (inode
->i_sb
->s_flags
& SB_ACTIVE
) {
162 struct fuse_conn
*fc
= get_fuse_conn(inode
);
164 if (FUSE_IS_DAX(inode
))
165 fuse_dax_inode_cleanup(inode
);
167 fuse_queue_forget(fc
, fi
->forget
, fi
->nodeid
,
172 if (fi
->submount_lookup
) {
173 fuse_cleanup_submount_lookup(fc
, fi
->submount_lookup
);
174 fi
->submount_lookup
= NULL
;
177 if (S_ISREG(inode
->i_mode
) && !fuse_is_bad(inode
)) {
178 WARN_ON(!list_empty(&fi
->write_files
));
179 WARN_ON(!list_empty(&fi
->queued_writes
));
183 static int fuse_reconfigure(struct fs_context
*fsc
)
185 struct super_block
*sb
= fsc
->root
->d_sb
;
188 if (fsc
->sb_flags
& SB_MANDLOCK
)
195 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
196 * so that it will fit.
198 static ino_t
fuse_squash_ino(u64 ino64
)
200 ino_t ino
= (ino_t
) ino64
;
201 if (sizeof(ino_t
) < sizeof(u64
))
202 ino
^= ino64
>> (sizeof(u64
) - sizeof(ino_t
)) * 8;
206 void fuse_change_attributes_common(struct inode
*inode
, struct fuse_attr
*attr
,
207 struct fuse_statx
*sx
,
208 u64 attr_valid
, u32 cache_mask
)
210 struct fuse_conn
*fc
= get_fuse_conn(inode
);
211 struct fuse_inode
*fi
= get_fuse_inode(inode
);
213 lockdep_assert_held(&fi
->lock
);
215 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
216 fi
->i_time
= attr_valid
;
217 /* Clear basic stats from invalid mask */
218 set_mask_bits(&fi
->inval_mask
, STATX_BASIC_STATS
, 0);
220 inode
->i_ino
= fuse_squash_ino(attr
->ino
);
221 inode
->i_mode
= (inode
->i_mode
& S_IFMT
) | (attr
->mode
& 07777);
222 set_nlink(inode
, attr
->nlink
);
223 inode
->i_uid
= make_kuid(fc
->user_ns
, attr
->uid
);
224 inode
->i_gid
= make_kgid(fc
->user_ns
, attr
->gid
);
225 inode
->i_blocks
= attr
->blocks
;
228 attr
->atimensec
= min_t(u32
, attr
->atimensec
, NSEC_PER_SEC
- 1);
229 attr
->mtimensec
= min_t(u32
, attr
->mtimensec
, NSEC_PER_SEC
- 1);
230 attr
->ctimensec
= min_t(u32
, attr
->ctimensec
, NSEC_PER_SEC
- 1);
232 inode_set_atime(inode
, attr
->atime
, attr
->atimensec
);
233 /* mtime from server may be stale due to local buffered write */
234 if (!(cache_mask
& STATX_MTIME
)) {
235 inode_set_mtime(inode
, attr
->mtime
, attr
->mtimensec
);
237 if (!(cache_mask
& STATX_CTIME
)) {
238 inode_set_ctime(inode
, attr
->ctime
, attr
->ctimensec
);
243 min_t(u32
, sx
->btime
.tv_nsec
, NSEC_PER_SEC
- 1);
246 * Btime has been queried, cache is valid (whether or not btime
247 * is available or not) so clear STATX_BTIME from inval_mask.
249 * Availability of the btime attribute is indicated in
252 set_mask_bits(&fi
->inval_mask
, STATX_BTIME
, 0);
253 if (sx
->mask
& STATX_BTIME
) {
254 set_bit(FUSE_I_BTIME
, &fi
->state
);
255 fi
->i_btime
.tv_sec
= sx
->btime
.tv_sec
;
256 fi
->i_btime
.tv_nsec
= sx
->btime
.tv_nsec
;
260 if (attr
->blksize
!= 0)
261 inode
->i_blkbits
= ilog2(attr
->blksize
);
263 inode
->i_blkbits
= inode
->i_sb
->s_blocksize_bits
;
266 * Don't set the sticky bit in i_mode, unless we want the VFS
267 * to check permissions. This prevents failures due to the
268 * check in may_delete().
270 fi
->orig_i_mode
= inode
->i_mode
;
271 if (!fc
->default_permissions
)
272 inode
->i_mode
&= ~S_ISVTX
;
274 fi
->orig_ino
= attr
->ino
;
277 * We are refreshing inode data and it is possible that another
278 * client set suid/sgid or security.capability xattr. So clear
279 * S_NOSEC. Ideally, we could have cleared it only if suid/sgid
280 * was set or if security.capability xattr was set. But we don't
281 * know if security.capability has been set or not. So clear it
282 * anyway. Its less efficient but should be safe.
284 inode
->i_flags
&= ~S_NOSEC
;
287 u32
fuse_get_cache_mask(struct inode
*inode
)
289 struct fuse_conn
*fc
= get_fuse_conn(inode
);
291 if (!fc
->writeback_cache
|| !S_ISREG(inode
->i_mode
))
294 return STATX_MTIME
| STATX_CTIME
| STATX_SIZE
;
297 void fuse_change_attributes(struct inode
*inode
, struct fuse_attr
*attr
,
298 struct fuse_statx
*sx
,
299 u64 attr_valid
, u64 attr_version
)
301 struct fuse_conn
*fc
= get_fuse_conn(inode
);
302 struct fuse_inode
*fi
= get_fuse_inode(inode
);
305 struct timespec64 old_mtime
;
307 spin_lock(&fi
->lock
);
309 * In case of writeback_cache enabled, writes update mtime, ctime and
310 * may update i_size. In these cases trust the cached value in the
313 cache_mask
= fuse_get_cache_mask(inode
);
314 if (cache_mask
& STATX_SIZE
)
315 attr
->size
= i_size_read(inode
);
317 if (cache_mask
& STATX_MTIME
) {
318 attr
->mtime
= inode_get_mtime_sec(inode
);
319 attr
->mtimensec
= inode_get_mtime_nsec(inode
);
321 if (cache_mask
& STATX_CTIME
) {
322 attr
->ctime
= inode_get_ctime_sec(inode
);
323 attr
->ctimensec
= inode_get_ctime_nsec(inode
);
326 if ((attr_version
!= 0 && fi
->attr_version
> attr_version
) ||
327 test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
328 spin_unlock(&fi
->lock
);
332 old_mtime
= inode_get_mtime(inode
);
333 fuse_change_attributes_common(inode
, attr
, sx
, attr_valid
, cache_mask
);
335 oldsize
= inode
->i_size
;
337 * In case of writeback_cache enabled, the cached writes beyond EOF
338 * extend local i_size without keeping userspace server in sync. So,
339 * attr->size coming from server can be stale. We cannot trust it.
341 if (!(cache_mask
& STATX_SIZE
))
342 i_size_write(inode
, attr
->size
);
343 spin_unlock(&fi
->lock
);
345 if (!cache_mask
&& S_ISREG(inode
->i_mode
)) {
348 if (oldsize
!= attr
->size
) {
349 truncate_pagecache(inode
, attr
->size
);
350 if (!fc
->explicit_inval_data
)
352 } else if (fc
->auto_inval_data
) {
353 struct timespec64 new_mtime
= {
354 .tv_sec
= attr
->mtime
,
355 .tv_nsec
= attr
->mtimensec
,
359 * Auto inval mode also checks and invalidates if mtime
362 if (!timespec64_equal(&old_mtime
, &new_mtime
))
367 invalidate_inode_pages2(inode
->i_mapping
);
370 if (IS_ENABLED(CONFIG_FUSE_DAX
))
371 fuse_dax_dontcache(inode
, attr
->flags
);
374 static void fuse_init_submount_lookup(struct fuse_submount_lookup
*sl
,
378 refcount_set(&sl
->count
, 1);
381 static void fuse_init_inode(struct inode
*inode
, struct fuse_attr
*attr
,
382 struct fuse_conn
*fc
)
384 inode
->i_mode
= attr
->mode
& S_IFMT
;
385 inode
->i_size
= attr
->size
;
386 inode_set_mtime(inode
, attr
->mtime
, attr
->mtimensec
);
387 inode_set_ctime(inode
, attr
->ctime
, attr
->ctimensec
);
388 if (S_ISREG(inode
->i_mode
)) {
389 fuse_init_common(inode
);
390 fuse_init_file_inode(inode
, attr
->flags
);
391 } else if (S_ISDIR(inode
->i_mode
))
392 fuse_init_dir(inode
);
393 else if (S_ISLNK(inode
->i_mode
))
394 fuse_init_symlink(inode
);
395 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
) ||
396 S_ISFIFO(inode
->i_mode
) || S_ISSOCK(inode
->i_mode
)) {
397 fuse_init_common(inode
);
398 init_special_inode(inode
, inode
->i_mode
,
399 new_decode_dev(attr
->rdev
));
403 * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL
404 * so they see the exact same behavior as before.
407 inode
->i_acl
= inode
->i_default_acl
= ACL_DONT_CACHE
;
410 static int fuse_inode_eq(struct inode
*inode
, void *_nodeidp
)
412 u64 nodeid
= *(u64
*) _nodeidp
;
413 if (get_node_id(inode
) == nodeid
)
419 static int fuse_inode_set(struct inode
*inode
, void *_nodeidp
)
421 u64 nodeid
= *(u64
*) _nodeidp
;
422 get_fuse_inode(inode
)->nodeid
= nodeid
;
426 struct inode
*fuse_iget(struct super_block
*sb
, u64 nodeid
,
427 int generation
, struct fuse_attr
*attr
,
428 u64 attr_valid
, u64 attr_version
)
431 struct fuse_inode
*fi
;
432 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
435 * Auto mount points get their node id from the submount root, which is
436 * not a unique identifier within this filesystem.
438 * To avoid conflicts, do not place submount points into the inode hash
441 if (fc
->auto_submounts
&& (attr
->flags
& FUSE_ATTR_SUBMOUNT
) &&
442 S_ISDIR(attr
->mode
)) {
443 struct fuse_inode
*fi
;
445 inode
= new_inode(sb
);
449 fuse_init_inode(inode
, attr
, fc
);
450 fi
= get_fuse_inode(inode
);
452 fi
->submount_lookup
= fuse_alloc_submount_lookup();
453 if (!fi
->submount_lookup
) {
457 /* Sets nlookup = 1 on fi->submount_lookup->nlookup */
458 fuse_init_submount_lookup(fi
->submount_lookup
, nodeid
);
459 inode
->i_flags
|= S_AUTOMOUNT
;
464 inode
= iget5_locked(sb
, nodeid
, fuse_inode_eq
, fuse_inode_set
, &nodeid
);
468 if ((inode
->i_state
& I_NEW
)) {
469 inode
->i_flags
|= S_NOATIME
;
470 if (!fc
->writeback_cache
|| !S_ISREG(attr
->mode
))
471 inode
->i_flags
|= S_NOCMTIME
;
472 inode
->i_generation
= generation
;
473 fuse_init_inode(inode
, attr
, fc
);
474 unlock_new_inode(inode
);
475 } else if (fuse_stale_inode(inode
, generation
, attr
)) {
476 /* nodeid was reused, any I/O on the old inode should fail */
477 fuse_make_bad(inode
);
481 fi
= get_fuse_inode(inode
);
482 spin_lock(&fi
->lock
);
484 spin_unlock(&fi
->lock
);
486 fuse_change_attributes(inode
, attr
, NULL
, attr_valid
, attr_version
);
491 struct inode
*fuse_ilookup(struct fuse_conn
*fc
, u64 nodeid
,
492 struct fuse_mount
**fm
)
494 struct fuse_mount
*fm_iter
;
497 WARN_ON(!rwsem_is_locked(&fc
->killsb
));
498 list_for_each_entry(fm_iter
, &fc
->mounts
, fc_entry
) {
502 inode
= ilookup5(fm_iter
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
513 int fuse_reverse_inval_inode(struct fuse_conn
*fc
, u64 nodeid
,
514 loff_t offset
, loff_t len
)
516 struct fuse_inode
*fi
;
521 inode
= fuse_ilookup(fc
, nodeid
, NULL
);
525 fi
= get_fuse_inode(inode
);
526 spin_lock(&fi
->lock
);
527 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
528 spin_unlock(&fi
->lock
);
530 fuse_invalidate_attr(inode
);
531 forget_all_cached_acls(inode
);
533 pg_start
= offset
>> PAGE_SHIFT
;
537 pg_end
= (offset
+ len
- 1) >> PAGE_SHIFT
;
538 invalidate_inode_pages2_range(inode
->i_mapping
,
545 bool fuse_lock_inode(struct inode
*inode
)
549 if (!get_fuse_conn(inode
)->parallel_dirops
) {
550 mutex_lock(&get_fuse_inode(inode
)->mutex
);
557 void fuse_unlock_inode(struct inode
*inode
, bool locked
)
560 mutex_unlock(&get_fuse_inode(inode
)->mutex
);
563 static void fuse_umount_begin(struct super_block
*sb
)
565 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
567 if (fc
->no_force_umount
)
572 // Only retire block-device-based superblocks.
573 if (sb
->s_bdev
!= NULL
)
577 static void fuse_send_destroy(struct fuse_mount
*fm
)
579 if (fm
->fc
->conn_init
) {
582 args
.opcode
= FUSE_DESTROY
;
585 fuse_simple_request(fm
, &args
);
589 static void convert_fuse_statfs(struct kstatfs
*stbuf
, struct fuse_kstatfs
*attr
)
591 stbuf
->f_type
= FUSE_SUPER_MAGIC
;
592 stbuf
->f_bsize
= attr
->bsize
;
593 stbuf
->f_frsize
= attr
->frsize
;
594 stbuf
->f_blocks
= attr
->blocks
;
595 stbuf
->f_bfree
= attr
->bfree
;
596 stbuf
->f_bavail
= attr
->bavail
;
597 stbuf
->f_files
= attr
->files
;
598 stbuf
->f_ffree
= attr
->ffree
;
599 stbuf
->f_namelen
= attr
->namelen
;
600 /* fsid is left zero */
603 static int fuse_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
605 struct super_block
*sb
= dentry
->d_sb
;
606 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
608 struct fuse_statfs_out outarg
;
611 if (!fuse_allow_current_process(fm
->fc
)) {
612 buf
->f_type
= FUSE_SUPER_MAGIC
;
616 memset(&outarg
, 0, sizeof(outarg
));
618 args
.opcode
= FUSE_STATFS
;
619 args
.nodeid
= get_node_id(d_inode(dentry
));
620 args
.out_numargs
= 1;
621 args
.out_args
[0].size
= sizeof(outarg
);
622 args
.out_args
[0].value
= &outarg
;
623 err
= fuse_simple_request(fm
, &args
);
625 convert_fuse_statfs(buf
, &outarg
.st
);
629 static struct fuse_sync_bucket
*fuse_sync_bucket_alloc(void)
631 struct fuse_sync_bucket
*bucket
;
633 bucket
= kzalloc(sizeof(*bucket
), GFP_KERNEL
| __GFP_NOFAIL
);
635 init_waitqueue_head(&bucket
->waitq
);
636 /* Initial active count */
637 atomic_set(&bucket
->count
, 1);
642 static void fuse_sync_fs_writes(struct fuse_conn
*fc
)
644 struct fuse_sync_bucket
*bucket
, *new_bucket
;
647 new_bucket
= fuse_sync_bucket_alloc();
648 spin_lock(&fc
->lock
);
649 bucket
= rcu_dereference_protected(fc
->curr_bucket
, 1);
650 count
= atomic_read(&bucket
->count
);
652 /* No outstanding writes? */
654 spin_unlock(&fc
->lock
);
660 * Completion of new bucket depends on completion of this bucket, so add
663 atomic_inc(&new_bucket
->count
);
664 rcu_assign_pointer(fc
->curr_bucket
, new_bucket
);
665 spin_unlock(&fc
->lock
);
667 * Drop initial active count. At this point if all writes in this and
668 * ancestor buckets complete, the count will go to zero and this task
671 atomic_dec(&bucket
->count
);
673 wait_event(bucket
->waitq
, atomic_read(&bucket
->count
) == 0);
675 /* Drop temp count on descendant bucket */
676 fuse_sync_bucket_dec(new_bucket
);
677 kfree_rcu(bucket
, rcu
);
680 static int fuse_sync_fs(struct super_block
*sb
, int wait
)
682 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
683 struct fuse_conn
*fc
= fm
->fc
;
684 struct fuse_syncfs_in inarg
;
689 * Userspace cannot handle the wait == 0 case. Avoid a
690 * gratuitous roundtrip.
695 /* The filesystem is being unmounted. Nothing to do. */
702 fuse_sync_fs_writes(fc
);
704 memset(&inarg
, 0, sizeof(inarg
));
706 args
.in_args
[0].size
= sizeof(inarg
);
707 args
.in_args
[0].value
= &inarg
;
708 args
.opcode
= FUSE_SYNCFS
;
709 args
.nodeid
= get_node_id(sb
->s_root
->d_inode
);
710 args
.out_numargs
= 0;
712 err
= fuse_simple_request(fm
, &args
);
713 if (err
== -ENOSYS
) {
728 OPT_DEFAULT_PERMISSIONS
,
735 static const struct fs_parameter_spec fuse_fs_parameters
[] = {
736 fsparam_string ("source", OPT_SOURCE
),
737 fsparam_u32 ("fd", OPT_FD
),
738 fsparam_u32oct ("rootmode", OPT_ROOTMODE
),
739 fsparam_u32 ("user_id", OPT_USER_ID
),
740 fsparam_u32 ("group_id", OPT_GROUP_ID
),
741 fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS
),
742 fsparam_flag ("allow_other", OPT_ALLOW_OTHER
),
743 fsparam_u32 ("max_read", OPT_MAX_READ
),
744 fsparam_u32 ("blksize", OPT_BLKSIZE
),
745 fsparam_string ("subtype", OPT_SUBTYPE
),
749 static int fuse_parse_param(struct fs_context
*fsc
, struct fs_parameter
*param
)
751 struct fs_parse_result result
;
752 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
755 if (fsc
->purpose
== FS_CONTEXT_FOR_RECONFIGURE
) {
757 * Ignore options coming from mount(MS_REMOUNT) for backward
763 return invalfc(fsc
, "No changes allowed in reconfigure");
766 opt
= fs_parse(fsc
, fuse_fs_parameters
, param
, &result
);
773 return invalfc(fsc
, "Multiple sources specified");
774 fsc
->source
= param
->string
;
775 param
->string
= NULL
;
780 return invalfc(fsc
, "Multiple subtypes specified");
781 ctx
->subtype
= param
->string
;
782 param
->string
= NULL
;
786 ctx
->fd
= result
.uint_32
;
787 ctx
->fd_present
= true;
791 if (!fuse_valid_type(result
.uint_32
))
792 return invalfc(fsc
, "Invalid rootmode");
793 ctx
->rootmode
= result
.uint_32
;
794 ctx
->rootmode_present
= true;
798 ctx
->user_id
= make_kuid(fsc
->user_ns
, result
.uint_32
);
799 if (!uid_valid(ctx
->user_id
))
800 return invalfc(fsc
, "Invalid user_id");
801 ctx
->user_id_present
= true;
805 ctx
->group_id
= make_kgid(fsc
->user_ns
, result
.uint_32
);
806 if (!gid_valid(ctx
->group_id
))
807 return invalfc(fsc
, "Invalid group_id");
808 ctx
->group_id_present
= true;
811 case OPT_DEFAULT_PERMISSIONS
:
812 ctx
->default_permissions
= true;
815 case OPT_ALLOW_OTHER
:
816 ctx
->allow_other
= true;
820 ctx
->max_read
= result
.uint_32
;
825 return invalfc(fsc
, "blksize only supported for fuseblk");
826 ctx
->blksize
= result
.uint_32
;
836 static void fuse_free_fsc(struct fs_context
*fsc
)
838 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
846 static int fuse_show_options(struct seq_file
*m
, struct dentry
*root
)
848 struct super_block
*sb
= root
->d_sb
;
849 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
851 if (fc
->legacy_opts_show
) {
852 seq_printf(m
, ",user_id=%u",
853 from_kuid_munged(fc
->user_ns
, fc
->user_id
));
854 seq_printf(m
, ",group_id=%u",
855 from_kgid_munged(fc
->user_ns
, fc
->group_id
));
856 if (fc
->default_permissions
)
857 seq_puts(m
, ",default_permissions");
859 seq_puts(m
, ",allow_other");
860 if (fc
->max_read
!= ~0)
861 seq_printf(m
, ",max_read=%u", fc
->max_read
);
862 if (sb
->s_bdev
&& sb
->s_blocksize
!= FUSE_DEFAULT_BLKSIZE
)
863 seq_printf(m
, ",blksize=%lu", sb
->s_blocksize
);
865 #ifdef CONFIG_FUSE_DAX
866 if (fc
->dax_mode
== FUSE_DAX_ALWAYS
)
867 seq_puts(m
, ",dax=always");
868 else if (fc
->dax_mode
== FUSE_DAX_NEVER
)
869 seq_puts(m
, ",dax=never");
870 else if (fc
->dax_mode
== FUSE_DAX_INODE_USER
)
871 seq_puts(m
, ",dax=inode");
877 static void fuse_iqueue_init(struct fuse_iqueue
*fiq
,
878 const struct fuse_iqueue_ops
*ops
,
881 memset(fiq
, 0, sizeof(struct fuse_iqueue
));
882 spin_lock_init(&fiq
->lock
);
883 init_waitqueue_head(&fiq
->waitq
);
884 INIT_LIST_HEAD(&fiq
->pending
);
885 INIT_LIST_HEAD(&fiq
->interrupts
);
886 fiq
->forget_list_tail
= &fiq
->forget_list_head
;
892 static void fuse_pqueue_init(struct fuse_pqueue
*fpq
)
896 spin_lock_init(&fpq
->lock
);
897 for (i
= 0; i
< FUSE_PQ_HASH_SIZE
; i
++)
898 INIT_LIST_HEAD(&fpq
->processing
[i
]);
899 INIT_LIST_HEAD(&fpq
->io
);
903 void fuse_conn_init(struct fuse_conn
*fc
, struct fuse_mount
*fm
,
904 struct user_namespace
*user_ns
,
905 const struct fuse_iqueue_ops
*fiq_ops
, void *fiq_priv
)
907 memset(fc
, 0, sizeof(*fc
));
908 spin_lock_init(&fc
->lock
);
909 spin_lock_init(&fc
->bg_lock
);
910 init_rwsem(&fc
->killsb
);
911 refcount_set(&fc
->count
, 1);
912 atomic_set(&fc
->dev_count
, 1);
913 init_waitqueue_head(&fc
->blocked_waitq
);
914 fuse_iqueue_init(&fc
->iq
, fiq_ops
, fiq_priv
);
915 INIT_LIST_HEAD(&fc
->bg_queue
);
916 INIT_LIST_HEAD(&fc
->entry
);
917 INIT_LIST_HEAD(&fc
->devices
);
918 atomic_set(&fc
->num_waiting
, 0);
919 fc
->max_background
= FUSE_DEFAULT_MAX_BACKGROUND
;
920 fc
->congestion_threshold
= FUSE_DEFAULT_CONGESTION_THRESHOLD
;
921 atomic64_set(&fc
->khctr
, 0);
922 fc
->polled_files
= RB_ROOT
;
926 atomic64_set(&fc
->attr_version
, 1);
927 get_random_bytes(&fc
->scramble_key
, sizeof(fc
->scramble_key
));
928 fc
->pid_ns
= get_pid_ns(task_active_pid_ns(current
));
929 fc
->user_ns
= get_user_ns(user_ns
);
930 fc
->max_pages
= FUSE_DEFAULT_MAX_PAGES_PER_REQ
;
931 fc
->max_pages_limit
= FUSE_MAX_MAX_PAGES
;
933 INIT_LIST_HEAD(&fc
->mounts
);
934 list_add(&fm
->fc_entry
, &fc
->mounts
);
937 EXPORT_SYMBOL_GPL(fuse_conn_init
);
939 void fuse_conn_put(struct fuse_conn
*fc
)
941 if (refcount_dec_and_test(&fc
->count
)) {
942 struct fuse_iqueue
*fiq
= &fc
->iq
;
943 struct fuse_sync_bucket
*bucket
;
945 if (IS_ENABLED(CONFIG_FUSE_DAX
))
946 fuse_dax_conn_free(fc
);
947 if (fiq
->ops
->release
)
948 fiq
->ops
->release(fiq
);
949 put_pid_ns(fc
->pid_ns
);
950 put_user_ns(fc
->user_ns
);
951 bucket
= rcu_dereference_protected(fc
->curr_bucket
, 1);
953 WARN_ON(atomic_read(&bucket
->count
) != 1);
959 EXPORT_SYMBOL_GPL(fuse_conn_put
);
961 struct fuse_conn
*fuse_conn_get(struct fuse_conn
*fc
)
963 refcount_inc(&fc
->count
);
966 EXPORT_SYMBOL_GPL(fuse_conn_get
);
968 static struct inode
*fuse_get_root_inode(struct super_block
*sb
, unsigned mode
)
970 struct fuse_attr attr
;
971 memset(&attr
, 0, sizeof(attr
));
974 attr
.ino
= FUSE_ROOT_ID
;
976 return fuse_iget(sb
, 1, 0, &attr
, 0, 0);
979 struct fuse_inode_handle
{
984 static struct dentry
*fuse_get_dentry(struct super_block
*sb
,
985 struct fuse_inode_handle
*handle
)
987 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
989 struct dentry
*entry
;
992 if (handle
->nodeid
== 0)
995 inode
= ilookup5(sb
, handle
->nodeid
, fuse_inode_eq
, &handle
->nodeid
);
997 struct fuse_entry_out outarg
;
998 const struct qstr name
= QSTR_INIT(".", 1);
1000 if (!fc
->export_support
)
1003 err
= fuse_lookup_name(sb
, handle
->nodeid
, &name
, &outarg
,
1005 if (err
&& err
!= -ENOENT
)
1007 if (err
|| !inode
) {
1012 if (get_node_id(inode
) != handle
->nodeid
)
1016 if (inode
->i_generation
!= handle
->generation
)
1019 entry
= d_obtain_alias(inode
);
1020 if (!IS_ERR(entry
) && get_node_id(inode
) != FUSE_ROOT_ID
)
1021 fuse_invalidate_entry_cache(entry
);
1028 return ERR_PTR(err
);
1031 static int fuse_encode_fh(struct inode
*inode
, u32
*fh
, int *max_len
,
1032 struct inode
*parent
)
1034 int len
= parent
? 6 : 3;
1038 if (*max_len
< len
) {
1040 return FILEID_INVALID
;
1043 nodeid
= get_fuse_inode(inode
)->nodeid
;
1044 generation
= inode
->i_generation
;
1046 fh
[0] = (u32
)(nodeid
>> 32);
1047 fh
[1] = (u32
)(nodeid
& 0xffffffff);
1051 nodeid
= get_fuse_inode(parent
)->nodeid
;
1052 generation
= parent
->i_generation
;
1054 fh
[3] = (u32
)(nodeid
>> 32);
1055 fh
[4] = (u32
)(nodeid
& 0xffffffff);
1060 return parent
? FILEID_INO64_GEN_PARENT
: FILEID_INO64_GEN
;
1063 static struct dentry
*fuse_fh_to_dentry(struct super_block
*sb
,
1064 struct fid
*fid
, int fh_len
, int fh_type
)
1066 struct fuse_inode_handle handle
;
1068 if ((fh_type
!= FILEID_INO64_GEN
&&
1069 fh_type
!= FILEID_INO64_GEN_PARENT
) || fh_len
< 3)
1072 handle
.nodeid
= (u64
) fid
->raw
[0] << 32;
1073 handle
.nodeid
|= (u64
) fid
->raw
[1];
1074 handle
.generation
= fid
->raw
[2];
1075 return fuse_get_dentry(sb
, &handle
);
1078 static struct dentry
*fuse_fh_to_parent(struct super_block
*sb
,
1079 struct fid
*fid
, int fh_len
, int fh_type
)
1081 struct fuse_inode_handle parent
;
1083 if (fh_type
!= FILEID_INO64_GEN_PARENT
|| fh_len
< 6)
1086 parent
.nodeid
= (u64
) fid
->raw
[3] << 32;
1087 parent
.nodeid
|= (u64
) fid
->raw
[4];
1088 parent
.generation
= fid
->raw
[5];
1089 return fuse_get_dentry(sb
, &parent
);
1092 static struct dentry
*fuse_get_parent(struct dentry
*child
)
1094 struct inode
*child_inode
= d_inode(child
);
1095 struct fuse_conn
*fc
= get_fuse_conn(child_inode
);
1096 struct inode
*inode
;
1097 struct dentry
*parent
;
1098 struct fuse_entry_out outarg
;
1101 if (!fc
->export_support
)
1102 return ERR_PTR(-ESTALE
);
1104 err
= fuse_lookup_name(child_inode
->i_sb
, get_node_id(child_inode
),
1105 &dotdot_name
, &outarg
, &inode
);
1108 return ERR_PTR(-ESTALE
);
1109 return ERR_PTR(err
);
1112 parent
= d_obtain_alias(inode
);
1113 if (!IS_ERR(parent
) && get_node_id(inode
) != FUSE_ROOT_ID
)
1114 fuse_invalidate_entry_cache(parent
);
1119 static const struct export_operations fuse_export_operations
= {
1120 .fh_to_dentry
= fuse_fh_to_dentry
,
1121 .fh_to_parent
= fuse_fh_to_parent
,
1122 .encode_fh
= fuse_encode_fh
,
1123 .get_parent
= fuse_get_parent
,
1126 static const struct super_operations fuse_super_operations
= {
1127 .alloc_inode
= fuse_alloc_inode
,
1128 .free_inode
= fuse_free_inode
,
1129 .evict_inode
= fuse_evict_inode
,
1130 .write_inode
= fuse_write_inode
,
1131 .drop_inode
= generic_delete_inode
,
1132 .umount_begin
= fuse_umount_begin
,
1133 .statfs
= fuse_statfs
,
1134 .sync_fs
= fuse_sync_fs
,
1135 .show_options
= fuse_show_options
,
1138 static void sanitize_global_limit(unsigned *limit
)
1141 * The default maximum number of async requests is calculated to consume
1142 * 1/2^13 of the total memory, assuming 392 bytes per request.
1145 *limit
= ((totalram_pages() << PAGE_SHIFT
) >> 13) / 392;
1147 if (*limit
>= 1 << 16)
1148 *limit
= (1 << 16) - 1;
1151 static int set_global_limit(const char *val
, const struct kernel_param
*kp
)
1155 rv
= param_set_uint(val
, kp
);
1159 sanitize_global_limit((unsigned *)kp
->arg
);
1164 static void process_init_limits(struct fuse_conn
*fc
, struct fuse_init_out
*arg
)
1166 int cap_sys_admin
= capable(CAP_SYS_ADMIN
);
1168 if (arg
->minor
< 13)
1171 sanitize_global_limit(&max_user_bgreq
);
1172 sanitize_global_limit(&max_user_congthresh
);
1174 spin_lock(&fc
->bg_lock
);
1175 if (arg
->max_background
) {
1176 fc
->max_background
= arg
->max_background
;
1178 if (!cap_sys_admin
&& fc
->max_background
> max_user_bgreq
)
1179 fc
->max_background
= max_user_bgreq
;
1181 if (arg
->congestion_threshold
) {
1182 fc
->congestion_threshold
= arg
->congestion_threshold
;
1184 if (!cap_sys_admin
&&
1185 fc
->congestion_threshold
> max_user_congthresh
)
1186 fc
->congestion_threshold
= max_user_congthresh
;
1188 spin_unlock(&fc
->bg_lock
);
1191 struct fuse_init_args
{
1192 struct fuse_args args
;
1193 struct fuse_init_in in
;
1194 struct fuse_init_out out
;
1197 static void process_init_reply(struct fuse_mount
*fm
, struct fuse_args
*args
,
1200 struct fuse_conn
*fc
= fm
->fc
;
1201 struct fuse_init_args
*ia
= container_of(args
, typeof(*ia
), args
);
1202 struct fuse_init_out
*arg
= &ia
->out
;
1205 if (error
|| arg
->major
!= FUSE_KERNEL_VERSION
)
1208 unsigned long ra_pages
;
1210 process_init_limits(fc
, arg
);
1212 if (arg
->minor
>= 6) {
1213 u64 flags
= arg
->flags
;
1215 if (flags
& FUSE_INIT_EXT
)
1216 flags
|= (u64
) arg
->flags2
<< 32;
1218 ra_pages
= arg
->max_readahead
/ PAGE_SIZE
;
1219 if (flags
& FUSE_ASYNC_READ
)
1221 if (!(flags
& FUSE_POSIX_LOCKS
))
1223 if (arg
->minor
>= 17) {
1224 if (!(flags
& FUSE_FLOCK_LOCKS
))
1227 if (!(flags
& FUSE_POSIX_LOCKS
))
1230 if (flags
& FUSE_ATOMIC_O_TRUNC
)
1231 fc
->atomic_o_trunc
= 1;
1232 if (arg
->minor
>= 9) {
1233 /* LOOKUP has dependency on proto version */
1234 if (flags
& FUSE_EXPORT_SUPPORT
)
1235 fc
->export_support
= 1;
1237 if (flags
& FUSE_BIG_WRITES
)
1239 if (flags
& FUSE_DONT_MASK
)
1241 if (flags
& FUSE_AUTO_INVAL_DATA
)
1242 fc
->auto_inval_data
= 1;
1243 else if (flags
& FUSE_EXPLICIT_INVAL_DATA
)
1244 fc
->explicit_inval_data
= 1;
1245 if (flags
& FUSE_DO_READDIRPLUS
) {
1246 fc
->do_readdirplus
= 1;
1247 if (flags
& FUSE_READDIRPLUS_AUTO
)
1248 fc
->readdirplus_auto
= 1;
1250 if (flags
& FUSE_ASYNC_DIO
)
1252 if (flags
& FUSE_WRITEBACK_CACHE
)
1253 fc
->writeback_cache
= 1;
1254 if (flags
& FUSE_PARALLEL_DIROPS
)
1255 fc
->parallel_dirops
= 1;
1256 if (flags
& FUSE_HANDLE_KILLPRIV
)
1257 fc
->handle_killpriv
= 1;
1258 if (arg
->time_gran
&& arg
->time_gran
<= 1000000000)
1259 fm
->sb
->s_time_gran
= arg
->time_gran
;
1260 if ((flags
& FUSE_POSIX_ACL
)) {
1261 fc
->default_permissions
= 1;
1264 if (flags
& FUSE_CACHE_SYMLINKS
)
1265 fc
->cache_symlinks
= 1;
1266 if (flags
& FUSE_ABORT_ERROR
)
1268 if (flags
& FUSE_MAX_PAGES
) {
1270 min_t(unsigned int, fc
->max_pages_limit
,
1271 max_t(unsigned int, arg
->max_pages
, 1));
1273 if (IS_ENABLED(CONFIG_FUSE_DAX
)) {
1274 if (flags
& FUSE_MAP_ALIGNMENT
&&
1275 !fuse_dax_check_alignment(fc
, arg
->map_alignment
)) {
1278 if (flags
& FUSE_HAS_INODE_DAX
)
1281 if (flags
& FUSE_HANDLE_KILLPRIV_V2
) {
1282 fc
->handle_killpriv_v2
= 1;
1283 fm
->sb
->s_flags
|= SB_NOSEC
;
1285 if (flags
& FUSE_SETXATTR_EXT
)
1286 fc
->setxattr_ext
= 1;
1287 if (flags
& FUSE_SECURITY_CTX
)
1288 fc
->init_security
= 1;
1289 if (flags
& FUSE_CREATE_SUPP_GROUP
)
1290 fc
->create_supp_group
= 1;
1291 if (flags
& FUSE_DIRECT_IO_ALLOW_MMAP
)
1292 fc
->direct_io_allow_mmap
= 1;
1294 * max_stack_depth is the max stack depth of FUSE fs,
1295 * so it has to be at least 1 to support passthrough
1298 * with max_stack_depth > 1, the backing files can be
1299 * on a stacked fs (e.g. overlayfs) themselves and with
1300 * max_stack_depth == 1, FUSE fs can be stacked as the
1301 * underlying fs of a stacked fs (e.g. overlayfs).
1303 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
) &&
1304 (flags
& FUSE_PASSTHROUGH
) &&
1305 arg
->max_stack_depth
> 0 &&
1306 arg
->max_stack_depth
<= FILESYSTEM_MAX_STACK_DEPTH
) {
1307 fc
->passthrough
= 1;
1308 fc
->max_stack_depth
= arg
->max_stack_depth
;
1309 fm
->sb
->s_stack_depth
= arg
->max_stack_depth
;
1312 ra_pages
= fc
->max_read
/ PAGE_SIZE
;
1317 fm
->sb
->s_bdi
->ra_pages
=
1318 min(fm
->sb
->s_bdi
->ra_pages
, ra_pages
);
1319 fc
->minor
= arg
->minor
;
1320 fc
->max_write
= arg
->minor
< 5 ? 4096 : arg
->max_write
;
1321 fc
->max_write
= max_t(unsigned, 4096, fc
->max_write
);
1331 fuse_set_initialized(fc
);
1332 wake_up_all(&fc
->blocked_waitq
);
1335 void fuse_send_init(struct fuse_mount
*fm
)
1337 struct fuse_init_args
*ia
;
1340 ia
= kzalloc(sizeof(*ia
), GFP_KERNEL
| __GFP_NOFAIL
);
1342 ia
->in
.major
= FUSE_KERNEL_VERSION
;
1343 ia
->in
.minor
= FUSE_KERNEL_MINOR_VERSION
;
1344 ia
->in
.max_readahead
= fm
->sb
->s_bdi
->ra_pages
* PAGE_SIZE
;
1346 FUSE_ASYNC_READ
| FUSE_POSIX_LOCKS
| FUSE_ATOMIC_O_TRUNC
|
1347 FUSE_EXPORT_SUPPORT
| FUSE_BIG_WRITES
| FUSE_DONT_MASK
|
1348 FUSE_SPLICE_WRITE
| FUSE_SPLICE_MOVE
| FUSE_SPLICE_READ
|
1349 FUSE_FLOCK_LOCKS
| FUSE_HAS_IOCTL_DIR
| FUSE_AUTO_INVAL_DATA
|
1350 FUSE_DO_READDIRPLUS
| FUSE_READDIRPLUS_AUTO
| FUSE_ASYNC_DIO
|
1351 FUSE_WRITEBACK_CACHE
| FUSE_NO_OPEN_SUPPORT
|
1352 FUSE_PARALLEL_DIROPS
| FUSE_HANDLE_KILLPRIV
| FUSE_POSIX_ACL
|
1353 FUSE_ABORT_ERROR
| FUSE_MAX_PAGES
| FUSE_CACHE_SYMLINKS
|
1354 FUSE_NO_OPENDIR_SUPPORT
| FUSE_EXPLICIT_INVAL_DATA
|
1355 FUSE_HANDLE_KILLPRIV_V2
| FUSE_SETXATTR_EXT
| FUSE_INIT_EXT
|
1356 FUSE_SECURITY_CTX
| FUSE_CREATE_SUPP_GROUP
|
1357 FUSE_HAS_EXPIRE_ONLY
| FUSE_DIRECT_IO_ALLOW_MMAP
;
1358 #ifdef CONFIG_FUSE_DAX
1360 flags
|= FUSE_MAP_ALIGNMENT
;
1361 if (fuse_is_inode_dax_mode(fm
->fc
->dax_mode
))
1362 flags
|= FUSE_HAS_INODE_DAX
;
1364 if (fm
->fc
->auto_submounts
)
1365 flags
|= FUSE_SUBMOUNTS
;
1366 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
1367 flags
|= FUSE_PASSTHROUGH
;
1369 ia
->in
.flags
= flags
;
1370 ia
->in
.flags2
= flags
>> 32;
1372 ia
->args
.opcode
= FUSE_INIT
;
1373 ia
->args
.in_numargs
= 1;
1374 ia
->args
.in_args
[0].size
= sizeof(ia
->in
);
1375 ia
->args
.in_args
[0].value
= &ia
->in
;
1376 ia
->args
.out_numargs
= 1;
1377 /* Variable length argument used for backward compatibility
1378 with interface version < 7.5. Rest of init_out is zeroed
1379 by do_get_request(), so a short reply is not a problem */
1380 ia
->args
.out_argvar
= true;
1381 ia
->args
.out_args
[0].size
= sizeof(ia
->out
);
1382 ia
->args
.out_args
[0].value
= &ia
->out
;
1383 ia
->args
.force
= true;
1384 ia
->args
.nocreds
= true;
1385 ia
->args
.end
= process_init_reply
;
1387 if (fuse_simple_background(fm
, &ia
->args
, GFP_KERNEL
) != 0)
1388 process_init_reply(fm
, &ia
->args
, -ENOTCONN
);
1390 EXPORT_SYMBOL_GPL(fuse_send_init
);
1392 void fuse_free_conn(struct fuse_conn
*fc
)
1394 WARN_ON(!list_empty(&fc
->devices
));
1397 EXPORT_SYMBOL_GPL(fuse_free_conn
);
1399 static int fuse_bdi_init(struct fuse_conn
*fc
, struct super_block
*sb
)
1405 suffix
= "-fuseblk";
1407 * sb->s_bdi points to blkdev's bdi however we want to redirect
1408 * it to our private bdi...
1411 sb
->s_bdi
= &noop_backing_dev_info
;
1413 err
= super_setup_bdi_name(sb
, "%u:%u%s", MAJOR(fc
->dev
),
1414 MINOR(fc
->dev
), suffix
);
1418 /* fuse does it's own writeback accounting */
1419 sb
->s_bdi
->capabilities
&= ~BDI_CAP_WRITEBACK_ACCT
;
1420 sb
->s_bdi
->capabilities
|= BDI_CAP_STRICTLIMIT
;
1423 * For a single fuse filesystem use max 1% of dirty +
1424 * writeback threshold.
1426 * This gives about 1M of write buffer for memory maps on a
1427 * machine with 1G and 10% dirty_ratio, which should be more
1430 * Privileged users can raise it by writing to
1432 * /sys/class/bdi/<bdi>/max_ratio
1434 bdi_set_max_ratio(sb
->s_bdi
, 1);
1439 struct fuse_dev
*fuse_dev_alloc(void)
1441 struct fuse_dev
*fud
;
1442 struct list_head
*pq
;
1444 fud
= kzalloc(sizeof(struct fuse_dev
), GFP_KERNEL
);
1448 pq
= kcalloc(FUSE_PQ_HASH_SIZE
, sizeof(struct list_head
), GFP_KERNEL
);
1454 fud
->pq
.processing
= pq
;
1455 fuse_pqueue_init(&fud
->pq
);
1459 EXPORT_SYMBOL_GPL(fuse_dev_alloc
);
1461 void fuse_dev_install(struct fuse_dev
*fud
, struct fuse_conn
*fc
)
1463 fud
->fc
= fuse_conn_get(fc
);
1464 spin_lock(&fc
->lock
);
1465 list_add_tail(&fud
->entry
, &fc
->devices
);
1466 spin_unlock(&fc
->lock
);
1468 EXPORT_SYMBOL_GPL(fuse_dev_install
);
1470 struct fuse_dev
*fuse_dev_alloc_install(struct fuse_conn
*fc
)
1472 struct fuse_dev
*fud
;
1474 fud
= fuse_dev_alloc();
1478 fuse_dev_install(fud
, fc
);
1481 EXPORT_SYMBOL_GPL(fuse_dev_alloc_install
);
1483 void fuse_dev_free(struct fuse_dev
*fud
)
1485 struct fuse_conn
*fc
= fud
->fc
;
1488 spin_lock(&fc
->lock
);
1489 list_del(&fud
->entry
);
1490 spin_unlock(&fc
->lock
);
1494 kfree(fud
->pq
.processing
);
1497 EXPORT_SYMBOL_GPL(fuse_dev_free
);
1499 static void fuse_fill_attr_from_inode(struct fuse_attr
*attr
,
1500 const struct fuse_inode
*fi
)
1502 struct timespec64 atime
= inode_get_atime(&fi
->inode
);
1503 struct timespec64 mtime
= inode_get_mtime(&fi
->inode
);
1504 struct timespec64 ctime
= inode_get_ctime(&fi
->inode
);
1506 *attr
= (struct fuse_attr
){
1507 .ino
= fi
->inode
.i_ino
,
1508 .size
= fi
->inode
.i_size
,
1509 .blocks
= fi
->inode
.i_blocks
,
1510 .atime
= atime
.tv_sec
,
1511 .mtime
= mtime
.tv_sec
,
1512 .ctime
= ctime
.tv_sec
,
1513 .atimensec
= atime
.tv_nsec
,
1514 .mtimensec
= mtime
.tv_nsec
,
1515 .ctimensec
= ctime
.tv_nsec
,
1516 .mode
= fi
->inode
.i_mode
,
1517 .nlink
= fi
->inode
.i_nlink
,
1518 .uid
= fi
->inode
.i_uid
.val
,
1519 .gid
= fi
->inode
.i_gid
.val
,
1520 .rdev
= fi
->inode
.i_rdev
,
1521 .blksize
= 1u << fi
->inode
.i_blkbits
,
1525 static void fuse_sb_defaults(struct super_block
*sb
)
1527 sb
->s_magic
= FUSE_SUPER_MAGIC
;
1528 sb
->s_op
= &fuse_super_operations
;
1529 sb
->s_xattr
= fuse_xattr_handlers
;
1530 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1531 sb
->s_time_gran
= 1;
1532 sb
->s_export_op
= &fuse_export_operations
;
1533 sb
->s_iflags
|= SB_I_IMA_UNVERIFIABLE_SIGNATURE
;
1534 if (sb
->s_user_ns
!= &init_user_ns
)
1535 sb
->s_iflags
|= SB_I_UNTRUSTED_MOUNTER
;
1536 sb
->s_flags
&= ~(SB_NOSEC
| SB_I_VERSION
);
1539 static int fuse_fill_super_submount(struct super_block
*sb
,
1540 struct fuse_inode
*parent_fi
)
1542 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1543 struct super_block
*parent_sb
= parent_fi
->inode
.i_sb
;
1544 struct fuse_attr root_attr
;
1546 struct fuse_submount_lookup
*sl
;
1547 struct fuse_inode
*fi
;
1549 fuse_sb_defaults(sb
);
1552 WARN_ON(sb
->s_bdi
!= &noop_backing_dev_info
);
1553 sb
->s_bdi
= bdi_get(parent_sb
->s_bdi
);
1555 sb
->s_xattr
= parent_sb
->s_xattr
;
1556 sb
->s_time_gran
= parent_sb
->s_time_gran
;
1557 sb
->s_blocksize
= parent_sb
->s_blocksize
;
1558 sb
->s_blocksize_bits
= parent_sb
->s_blocksize_bits
;
1559 sb
->s_subtype
= kstrdup(parent_sb
->s_subtype
, GFP_KERNEL
);
1560 if (parent_sb
->s_subtype
&& !sb
->s_subtype
)
1563 fuse_fill_attr_from_inode(&root_attr
, parent_fi
);
1564 root
= fuse_iget(sb
, parent_fi
->nodeid
, 0, &root_attr
, 0, 0);
1566 * This inode is just a duplicate, so it is not looked up and
1567 * its nlookup should not be incremented. fuse_iget() does
1568 * that, though, so undo it here.
1570 fi
= get_fuse_inode(root
);
1573 sb
->s_d_op
= &fuse_dentry_operations
;
1574 sb
->s_root
= d_make_root(root
);
1579 * Grab the parent's submount_lookup pointer and take a
1580 * reference on the shared nlookup from the parent. This is to
1581 * prevent the last forget for this nodeid from getting
1582 * triggered until all users have finished with it.
1584 sl
= parent_fi
->submount_lookup
;
1587 refcount_inc(&sl
->count
);
1588 fi
->submount_lookup
= sl
;
1594 /* Filesystem context private data holds the FUSE inode of the mount point */
1595 static int fuse_get_tree_submount(struct fs_context
*fsc
)
1597 struct fuse_mount
*fm
;
1598 struct fuse_inode
*mp_fi
= fsc
->fs_private
;
1599 struct fuse_conn
*fc
= get_fuse_conn(&mp_fi
->inode
);
1600 struct super_block
*sb
;
1603 fm
= kzalloc(sizeof(struct fuse_mount
), GFP_KERNEL
);
1607 fm
->fc
= fuse_conn_get(fc
);
1608 fsc
->s_fs_info
= fm
;
1609 sb
= sget_fc(fsc
, NULL
, set_anon_super_fc
);
1611 fuse_mount_destroy(fm
);
1615 /* Initialize superblock, making @mp_fi its root */
1616 err
= fuse_fill_super_submount(sb
, mp_fi
);
1618 deactivate_locked_super(sb
);
1622 down_write(&fc
->killsb
);
1623 list_add_tail(&fm
->fc_entry
, &fc
->mounts
);
1624 up_write(&fc
->killsb
);
1626 sb
->s_flags
|= SB_ACTIVE
;
1627 fsc
->root
= dget(sb
->s_root
);
1632 static const struct fs_context_operations fuse_context_submount_ops
= {
1633 .get_tree
= fuse_get_tree_submount
,
1636 int fuse_init_fs_context_submount(struct fs_context
*fsc
)
1638 fsc
->ops
= &fuse_context_submount_ops
;
1641 EXPORT_SYMBOL_GPL(fuse_init_fs_context_submount
);
1643 int fuse_fill_super_common(struct super_block
*sb
, struct fuse_fs_context
*ctx
)
1645 struct fuse_dev
*fud
= NULL
;
1646 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1647 struct fuse_conn
*fc
= fm
->fc
;
1649 struct dentry
*root_dentry
;
1653 if (sb
->s_flags
& SB_MANDLOCK
)
1656 rcu_assign_pointer(fc
->curr_bucket
, fuse_sync_bucket_alloc());
1657 fuse_sb_defaults(sb
);
1662 if (!sb_set_blocksize(sb
, ctx
->blksize
))
1666 sb
->s_blocksize
= PAGE_SIZE
;
1667 sb
->s_blocksize_bits
= PAGE_SHIFT
;
1670 sb
->s_subtype
= ctx
->subtype
;
1671 ctx
->subtype
= NULL
;
1672 if (IS_ENABLED(CONFIG_FUSE_DAX
)) {
1673 err
= fuse_dax_conn_alloc(fc
, ctx
->dax_mode
, ctx
->dax_dev
);
1680 fud
= fuse_dev_alloc_install(fc
);
1685 fc
->dev
= sb
->s_dev
;
1687 err
= fuse_bdi_init(fc
, sb
);
1691 /* Handle umasking inside the fuse code */
1692 if (sb
->s_flags
& SB_POSIXACL
)
1694 sb
->s_flags
|= SB_POSIXACL
;
1696 fc
->default_permissions
= ctx
->default_permissions
;
1697 fc
->allow_other
= ctx
->allow_other
;
1698 fc
->user_id
= ctx
->user_id
;
1699 fc
->group_id
= ctx
->group_id
;
1700 fc
->legacy_opts_show
= ctx
->legacy_opts_show
;
1701 fc
->max_read
= max_t(unsigned int, 4096, ctx
->max_read
);
1702 fc
->destroy
= ctx
->destroy
;
1703 fc
->no_control
= ctx
->no_control
;
1704 fc
->no_force_umount
= ctx
->no_force_umount
;
1707 root
= fuse_get_root_inode(sb
, ctx
->rootmode
);
1708 sb
->s_d_op
= &fuse_root_dentry_operations
;
1709 root_dentry
= d_make_root(root
);
1712 /* Root dentry doesn't have .d_revalidate */
1713 sb
->s_d_op
= &fuse_dentry_operations
;
1715 mutex_lock(&fuse_mutex
);
1717 if (ctx
->fudptr
&& *ctx
->fudptr
)
1720 err
= fuse_ctl_add_conn(fc
);
1724 list_add_tail(&fc
->entry
, &fuse_conn_list
);
1725 sb
->s_root
= root_dentry
;
1728 mutex_unlock(&fuse_mutex
);
1732 mutex_unlock(&fuse_mutex
);
1738 if (IS_ENABLED(CONFIG_FUSE_DAX
))
1739 fuse_dax_conn_free(fc
);
1743 EXPORT_SYMBOL_GPL(fuse_fill_super_common
);
1745 static int fuse_fill_super(struct super_block
*sb
, struct fs_context
*fsc
)
1747 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
1750 if (!ctx
->file
|| !ctx
->rootmode_present
||
1751 !ctx
->user_id_present
|| !ctx
->group_id_present
)
1755 * Require mount to happen from the same user namespace which
1756 * opened /dev/fuse to prevent potential attacks.
1758 if ((ctx
->file
->f_op
!= &fuse_dev_operations
) ||
1759 (ctx
->file
->f_cred
->user_ns
!= sb
->s_user_ns
))
1761 ctx
->fudptr
= &ctx
->file
->private_data
;
1763 err
= fuse_fill_super_common(sb
, ctx
);
1766 /* file->private_data shall be visible on all CPUs after this */
1768 fuse_send_init(get_fuse_mount_super(sb
));
1773 * This is the path where user supplied an already initialized fuse dev. In
1774 * this case never create a new super if the old one is gone.
1776 static int fuse_set_no_super(struct super_block
*sb
, struct fs_context
*fsc
)
1781 static int fuse_test_super(struct super_block
*sb
, struct fs_context
*fsc
)
1784 return fsc
->sget_key
== get_fuse_conn_super(sb
);
1787 static int fuse_get_tree(struct fs_context
*fsc
)
1789 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
1790 struct fuse_dev
*fud
;
1791 struct fuse_conn
*fc
;
1792 struct fuse_mount
*fm
;
1793 struct super_block
*sb
;
1796 fc
= kmalloc(sizeof(*fc
), GFP_KERNEL
);
1800 fm
= kzalloc(sizeof(*fm
), GFP_KERNEL
);
1806 fuse_conn_init(fc
, fm
, fsc
->user_ns
, &fuse_dev_fiq_ops
, NULL
);
1807 fc
->release
= fuse_free_conn
;
1809 fsc
->s_fs_info
= fm
;
1811 if (ctx
->fd_present
)
1812 ctx
->file
= fget(ctx
->fd
);
1814 if (IS_ENABLED(CONFIG_BLOCK
) && ctx
->is_bdev
) {
1815 err
= get_tree_bdev(fsc
, fuse_fill_super
);
1819 * While block dev mount can be initialized with a dummy device fd
1820 * (found by device name), normal fuse mounts can't
1827 * Allow creating a fuse mount with an already initialized fuse
1830 fud
= READ_ONCE(ctx
->file
->private_data
);
1831 if (ctx
->file
->f_op
== &fuse_dev_operations
&& fud
) {
1832 fsc
->sget_key
= fud
->fc
;
1833 sb
= sget_fc(fsc
, fuse_test_super
, fuse_set_no_super
);
1834 err
= PTR_ERR_OR_ZERO(sb
);
1836 fsc
->root
= dget(sb
->s_root
);
1838 err
= get_tree_nodev(fsc
, fuse_fill_super
);
1842 fuse_mount_destroy(fm
);
1848 static const struct fs_context_operations fuse_context_ops
= {
1849 .free
= fuse_free_fsc
,
1850 .parse_param
= fuse_parse_param
,
1851 .reconfigure
= fuse_reconfigure
,
1852 .get_tree
= fuse_get_tree
,
1856 * Set up the filesystem mount context.
1858 static int fuse_init_fs_context(struct fs_context
*fsc
)
1860 struct fuse_fs_context
*ctx
;
1862 ctx
= kzalloc(sizeof(struct fuse_fs_context
), GFP_KERNEL
);
1867 ctx
->blksize
= FUSE_DEFAULT_BLKSIZE
;
1868 ctx
->legacy_opts_show
= true;
1871 if (fsc
->fs_type
== &fuseblk_fs_type
) {
1872 ctx
->is_bdev
= true;
1873 ctx
->destroy
= true;
1877 fsc
->fs_private
= ctx
;
1878 fsc
->ops
= &fuse_context_ops
;
1882 bool fuse_mount_remove(struct fuse_mount
*fm
)
1884 struct fuse_conn
*fc
= fm
->fc
;
1887 down_write(&fc
->killsb
);
1888 list_del_init(&fm
->fc_entry
);
1889 if (list_empty(&fc
->mounts
))
1891 up_write(&fc
->killsb
);
1895 EXPORT_SYMBOL_GPL(fuse_mount_remove
);
1897 void fuse_conn_destroy(struct fuse_mount
*fm
)
1899 struct fuse_conn
*fc
= fm
->fc
;
1902 fuse_send_destroy(fm
);
1904 fuse_abort_conn(fc
);
1905 fuse_wait_aborted(fc
);
1907 if (!list_empty(&fc
->entry
)) {
1908 mutex_lock(&fuse_mutex
);
1909 list_del(&fc
->entry
);
1910 fuse_ctl_remove_conn(fc
);
1911 mutex_unlock(&fuse_mutex
);
1914 EXPORT_SYMBOL_GPL(fuse_conn_destroy
);
1916 static void fuse_sb_destroy(struct super_block
*sb
)
1918 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1922 last
= fuse_mount_remove(fm
);
1924 fuse_conn_destroy(fm
);
1928 void fuse_mount_destroy(struct fuse_mount
*fm
)
1930 fuse_conn_put(fm
->fc
);
1933 EXPORT_SYMBOL(fuse_mount_destroy
);
1935 static void fuse_kill_sb_anon(struct super_block
*sb
)
1937 fuse_sb_destroy(sb
);
1938 kill_anon_super(sb
);
1939 fuse_mount_destroy(get_fuse_mount_super(sb
));
1942 static struct file_system_type fuse_fs_type
= {
1943 .owner
= THIS_MODULE
,
1945 .fs_flags
= FS_HAS_SUBTYPE
| FS_USERNS_MOUNT
,
1946 .init_fs_context
= fuse_init_fs_context
,
1947 .parameters
= fuse_fs_parameters
,
1948 .kill_sb
= fuse_kill_sb_anon
,
1950 MODULE_ALIAS_FS("fuse");
1953 static void fuse_kill_sb_blk(struct super_block
*sb
)
1955 fuse_sb_destroy(sb
);
1956 kill_block_super(sb
);
1957 fuse_mount_destroy(get_fuse_mount_super(sb
));
1960 static struct file_system_type fuseblk_fs_type
= {
1961 .owner
= THIS_MODULE
,
1963 .init_fs_context
= fuse_init_fs_context
,
1964 .parameters
= fuse_fs_parameters
,
1965 .kill_sb
= fuse_kill_sb_blk
,
1966 .fs_flags
= FS_REQUIRES_DEV
| FS_HAS_SUBTYPE
,
1968 MODULE_ALIAS_FS("fuseblk");
1970 static inline int register_fuseblk(void)
1972 return register_filesystem(&fuseblk_fs_type
);
1975 static inline void unregister_fuseblk(void)
1977 unregister_filesystem(&fuseblk_fs_type
);
1980 static inline int register_fuseblk(void)
1985 static inline void unregister_fuseblk(void)
1990 static void fuse_inode_init_once(void *foo
)
1992 struct inode
*inode
= foo
;
1994 inode_init_once(inode
);
1997 static int __init
fuse_fs_init(void)
2001 fuse_inode_cachep
= kmem_cache_create("fuse_inode",
2002 sizeof(struct fuse_inode
), 0,
2003 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
|SLAB_RECLAIM_ACCOUNT
,
2004 fuse_inode_init_once
);
2006 if (!fuse_inode_cachep
)
2009 err
= register_fuseblk();
2013 err
= register_filesystem(&fuse_fs_type
);
2020 unregister_fuseblk();
2022 kmem_cache_destroy(fuse_inode_cachep
);
2027 static void fuse_fs_cleanup(void)
2029 unregister_filesystem(&fuse_fs_type
);
2030 unregister_fuseblk();
2033 * Make sure all delayed rcu free inodes are flushed before we
2037 kmem_cache_destroy(fuse_inode_cachep
);
2040 static struct kobject
*fuse_kobj
;
2042 static int fuse_sysfs_init(void)
2046 fuse_kobj
= kobject_create_and_add("fuse", fs_kobj
);
2052 err
= sysfs_create_mount_point(fuse_kobj
, "connections");
2054 goto out_fuse_unregister
;
2058 out_fuse_unregister
:
2059 kobject_put(fuse_kobj
);
2064 static void fuse_sysfs_cleanup(void)
2066 sysfs_remove_mount_point(fuse_kobj
, "connections");
2067 kobject_put(fuse_kobj
);
2070 static int __init
fuse_init(void)
2074 pr_info("init (API version %i.%i)\n",
2075 FUSE_KERNEL_VERSION
, FUSE_KERNEL_MINOR_VERSION
);
2077 INIT_LIST_HEAD(&fuse_conn_list
);
2078 res
= fuse_fs_init();
2082 res
= fuse_dev_init();
2084 goto err_fs_cleanup
;
2086 res
= fuse_sysfs_init();
2088 goto err_dev_cleanup
;
2090 res
= fuse_ctl_init();
2092 goto err_sysfs_cleanup
;
2094 sanitize_global_limit(&max_user_bgreq
);
2095 sanitize_global_limit(&max_user_congthresh
);
2100 fuse_sysfs_cleanup();
2109 static void __exit
fuse_exit(void)
2114 fuse_sysfs_cleanup();
2119 module_init(fuse_init
);
2120 module_exit(fuse_exit
);