1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/kernel.h>
8 #include <linux/file.h>
10 #include <linux/fsnotify.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/namei.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/security.h>
21 #include <linux/xattr.h>
23 #include <linux/slab.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include <linux/btrfs.h>
27 #include <linux/uaccess.h>
28 #include <linux/iversion.h>
29 #include <linux/fileattr.h>
30 #include <linux/fsverity.h>
31 #include <linux/sched/xacct.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "print-tree.h"
41 #include "rcu-string.h"
43 #include "dev-replace.h"
48 #include "compression.h"
49 #include "space-info.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
55 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
56 * structures are incorrect, as the timespec structure from userspace
57 * is 4 bytes too small. We define these alternatives here to teach
58 * the kernel about the 32-bit struct packing.
60 struct btrfs_ioctl_timespec_32
{
63 } __attribute__ ((__packed__
));
65 struct btrfs_ioctl_received_subvol_args_32
{
66 char uuid
[BTRFS_UUID_SIZE
]; /* in */
67 __u64 stransid
; /* in */
68 __u64 rtransid
; /* out */
69 struct btrfs_ioctl_timespec_32 stime
; /* in */
70 struct btrfs_ioctl_timespec_32 rtime
; /* out */
72 __u64 reserved
[16]; /* in */
73 } __attribute__ ((__packed__
));
75 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
76 struct btrfs_ioctl_received_subvol_args_32)
79 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
80 struct btrfs_ioctl_send_args_32
{
81 __s64 send_fd
; /* in */
82 __u64 clone_sources_count
; /* in */
83 compat_uptr_t clone_sources
; /* in */
84 __u64 parent_root
; /* in */
86 __u32 version
; /* in */
87 __u8 reserved
[28]; /* in */
88 } __attribute__ ((__packed__
));
90 #define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
91 struct btrfs_ioctl_send_args_32)
93 struct btrfs_ioctl_encoded_io_args_32
{
95 compat_ulong_t iovcnt
;
100 __u64 unencoded_offset
;
106 #define BTRFS_IOC_ENCODED_READ_32 _IOR(BTRFS_IOCTL_MAGIC, 64, \
107 struct btrfs_ioctl_encoded_io_args_32)
108 #define BTRFS_IOC_ENCODED_WRITE_32 _IOW(BTRFS_IOCTL_MAGIC, 64, \
109 struct btrfs_ioctl_encoded_io_args_32)
112 /* Mask out flags that are inappropriate for the given type of inode. */
113 static unsigned int btrfs_mask_fsflags_for_type(struct inode
*inode
,
116 if (S_ISDIR(inode
->i_mode
))
118 else if (S_ISREG(inode
->i_mode
))
119 return flags
& ~FS_DIRSYNC_FL
;
121 return flags
& (FS_NODUMP_FL
| FS_NOATIME_FL
);
125 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
128 static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode
*binode
)
130 unsigned int iflags
= 0;
131 u32 flags
= binode
->flags
;
132 u32 ro_flags
= binode
->ro_flags
;
134 if (flags
& BTRFS_INODE_SYNC
)
135 iflags
|= FS_SYNC_FL
;
136 if (flags
& BTRFS_INODE_IMMUTABLE
)
137 iflags
|= FS_IMMUTABLE_FL
;
138 if (flags
& BTRFS_INODE_APPEND
)
139 iflags
|= FS_APPEND_FL
;
140 if (flags
& BTRFS_INODE_NODUMP
)
141 iflags
|= FS_NODUMP_FL
;
142 if (flags
& BTRFS_INODE_NOATIME
)
143 iflags
|= FS_NOATIME_FL
;
144 if (flags
& BTRFS_INODE_DIRSYNC
)
145 iflags
|= FS_DIRSYNC_FL
;
146 if (flags
& BTRFS_INODE_NODATACOW
)
147 iflags
|= FS_NOCOW_FL
;
148 if (ro_flags
& BTRFS_INODE_RO_VERITY
)
149 iflags
|= FS_VERITY_FL
;
151 if (flags
& BTRFS_INODE_NOCOMPRESS
)
152 iflags
|= FS_NOCOMP_FL
;
153 else if (flags
& BTRFS_INODE_COMPRESS
)
154 iflags
|= FS_COMPR_FL
;
160 * Update inode->i_flags based on the btrfs internal flags.
162 void btrfs_sync_inode_flags_to_i_flags(struct inode
*inode
)
164 struct btrfs_inode
*binode
= BTRFS_I(inode
);
165 unsigned int new_fl
= 0;
167 if (binode
->flags
& BTRFS_INODE_SYNC
)
169 if (binode
->flags
& BTRFS_INODE_IMMUTABLE
)
170 new_fl
|= S_IMMUTABLE
;
171 if (binode
->flags
& BTRFS_INODE_APPEND
)
173 if (binode
->flags
& BTRFS_INODE_NOATIME
)
175 if (binode
->flags
& BTRFS_INODE_DIRSYNC
)
177 if (binode
->ro_flags
& BTRFS_INODE_RO_VERITY
)
180 set_mask_bits(&inode
->i_flags
,
181 S_SYNC
| S_APPEND
| S_IMMUTABLE
| S_NOATIME
| S_DIRSYNC
|
186 * Check if @flags are a supported and valid set of FS_*_FL flags and that
187 * the old and new flags are not conflicting
189 static int check_fsflags(unsigned int old_flags
, unsigned int flags
)
191 if (flags
& ~(FS_IMMUTABLE_FL
| FS_APPEND_FL
| \
192 FS_NOATIME_FL
| FS_NODUMP_FL
| \
193 FS_SYNC_FL
| FS_DIRSYNC_FL
| \
194 FS_NOCOMP_FL
| FS_COMPR_FL
|
198 /* COMPR and NOCOMP on new/old are valid */
199 if ((flags
& FS_NOCOMP_FL
) && (flags
& FS_COMPR_FL
))
202 if ((flags
& FS_COMPR_FL
) && (flags
& FS_NOCOW_FL
))
205 /* NOCOW and compression options are mutually exclusive */
206 if ((old_flags
& FS_NOCOW_FL
) && (flags
& (FS_COMPR_FL
| FS_NOCOMP_FL
)))
208 if ((flags
& FS_NOCOW_FL
) && (old_flags
& (FS_COMPR_FL
| FS_NOCOMP_FL
)))
214 static int check_fsflags_compatible(struct btrfs_fs_info
*fs_info
,
217 if (btrfs_is_zoned(fs_info
) && (flags
& FS_NOCOW_FL
))
224 * Set flags/xflags from the internal inode flags. The remaining items of
225 * fsxattr are zeroed.
227 int btrfs_fileattr_get(struct dentry
*dentry
, struct fileattr
*fa
)
229 struct btrfs_inode
*binode
= BTRFS_I(d_inode(dentry
));
231 fileattr_fill_flags(fa
, btrfs_inode_flags_to_fsflags(binode
));
235 int btrfs_fileattr_set(struct user_namespace
*mnt_userns
,
236 struct dentry
*dentry
, struct fileattr
*fa
)
238 struct inode
*inode
= d_inode(dentry
);
239 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
240 struct btrfs_inode
*binode
= BTRFS_I(inode
);
241 struct btrfs_root
*root
= binode
->root
;
242 struct btrfs_trans_handle
*trans
;
243 unsigned int fsflags
, old_fsflags
;
245 const char *comp
= NULL
;
248 if (btrfs_root_readonly(root
))
251 if (fileattr_has_fsx(fa
))
254 fsflags
= btrfs_mask_fsflags_for_type(inode
, fa
->flags
);
255 old_fsflags
= btrfs_inode_flags_to_fsflags(binode
);
256 ret
= check_fsflags(old_fsflags
, fsflags
);
260 ret
= check_fsflags_compatible(fs_info
, fsflags
);
264 binode_flags
= binode
->flags
;
265 if (fsflags
& FS_SYNC_FL
)
266 binode_flags
|= BTRFS_INODE_SYNC
;
268 binode_flags
&= ~BTRFS_INODE_SYNC
;
269 if (fsflags
& FS_IMMUTABLE_FL
)
270 binode_flags
|= BTRFS_INODE_IMMUTABLE
;
272 binode_flags
&= ~BTRFS_INODE_IMMUTABLE
;
273 if (fsflags
& FS_APPEND_FL
)
274 binode_flags
|= BTRFS_INODE_APPEND
;
276 binode_flags
&= ~BTRFS_INODE_APPEND
;
277 if (fsflags
& FS_NODUMP_FL
)
278 binode_flags
|= BTRFS_INODE_NODUMP
;
280 binode_flags
&= ~BTRFS_INODE_NODUMP
;
281 if (fsflags
& FS_NOATIME_FL
)
282 binode_flags
|= BTRFS_INODE_NOATIME
;
284 binode_flags
&= ~BTRFS_INODE_NOATIME
;
286 /* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */
287 if (!fa
->flags_valid
) {
288 /* 1 item for the inode */
289 trans
= btrfs_start_transaction(root
, 1);
291 return PTR_ERR(trans
);
295 if (fsflags
& FS_DIRSYNC_FL
)
296 binode_flags
|= BTRFS_INODE_DIRSYNC
;
298 binode_flags
&= ~BTRFS_INODE_DIRSYNC
;
299 if (fsflags
& FS_NOCOW_FL
) {
300 if (S_ISREG(inode
->i_mode
)) {
302 * It's safe to turn csums off here, no extents exist.
303 * Otherwise we want the flag to reflect the real COW
304 * status of the file and will not set it.
306 if (inode
->i_size
== 0)
307 binode_flags
|= BTRFS_INODE_NODATACOW
|
308 BTRFS_INODE_NODATASUM
;
310 binode_flags
|= BTRFS_INODE_NODATACOW
;
314 * Revert back under same assumptions as above
316 if (S_ISREG(inode
->i_mode
)) {
317 if (inode
->i_size
== 0)
318 binode_flags
&= ~(BTRFS_INODE_NODATACOW
|
319 BTRFS_INODE_NODATASUM
);
321 binode_flags
&= ~BTRFS_INODE_NODATACOW
;
326 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
327 * flag may be changed automatically if compression code won't make
330 if (fsflags
& FS_NOCOMP_FL
) {
331 binode_flags
&= ~BTRFS_INODE_COMPRESS
;
332 binode_flags
|= BTRFS_INODE_NOCOMPRESS
;
333 } else if (fsflags
& FS_COMPR_FL
) {
335 if (IS_SWAPFILE(inode
))
338 binode_flags
|= BTRFS_INODE_COMPRESS
;
339 binode_flags
&= ~BTRFS_INODE_NOCOMPRESS
;
341 comp
= btrfs_compress_type2str(fs_info
->compress_type
);
342 if (!comp
|| comp
[0] == 0)
343 comp
= btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB
);
345 binode_flags
&= ~(BTRFS_INODE_COMPRESS
| BTRFS_INODE_NOCOMPRESS
);
352 trans
= btrfs_start_transaction(root
, 3);
354 return PTR_ERR(trans
);
357 ret
= btrfs_set_prop(trans
, inode
, "btrfs.compression", comp
,
360 btrfs_abort_transaction(trans
, ret
);
364 ret
= btrfs_set_prop(trans
, inode
, "btrfs.compression", NULL
,
366 if (ret
&& ret
!= -ENODATA
) {
367 btrfs_abort_transaction(trans
, ret
);
373 binode
->flags
= binode_flags
;
374 btrfs_sync_inode_flags_to_i_flags(inode
);
375 inode_inc_iversion(inode
);
376 inode
->i_ctime
= current_time(inode
);
377 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
380 btrfs_end_transaction(trans
);
385 * Start exclusive operation @type, return true on success
387 bool btrfs_exclop_start(struct btrfs_fs_info
*fs_info
,
388 enum btrfs_exclusive_operation type
)
392 spin_lock(&fs_info
->super_lock
);
393 if (fs_info
->exclusive_operation
== BTRFS_EXCLOP_NONE
) {
394 fs_info
->exclusive_operation
= type
;
397 spin_unlock(&fs_info
->super_lock
);
403 * Conditionally allow to enter the exclusive operation in case it's compatible
404 * with the running one. This must be paired with btrfs_exclop_start_unlock and
405 * btrfs_exclop_finish.
408 * - the same type is already running
409 * - when trying to add a device and balance has been paused
410 * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
411 * must check the condition first that would allow none -> @type
413 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info
*fs_info
,
414 enum btrfs_exclusive_operation type
)
416 spin_lock(&fs_info
->super_lock
);
417 if (fs_info
->exclusive_operation
== type
||
418 (fs_info
->exclusive_operation
== BTRFS_EXCLOP_BALANCE_PAUSED
&&
419 type
== BTRFS_EXCLOP_DEV_ADD
))
422 spin_unlock(&fs_info
->super_lock
);
426 void btrfs_exclop_start_unlock(struct btrfs_fs_info
*fs_info
)
428 spin_unlock(&fs_info
->super_lock
);
431 void btrfs_exclop_finish(struct btrfs_fs_info
*fs_info
)
433 spin_lock(&fs_info
->super_lock
);
434 WRITE_ONCE(fs_info
->exclusive_operation
, BTRFS_EXCLOP_NONE
);
435 spin_unlock(&fs_info
->super_lock
);
436 sysfs_notify(&fs_info
->fs_devices
->fsid_kobj
, NULL
, "exclusive_operation");
439 void btrfs_exclop_balance(struct btrfs_fs_info
*fs_info
,
440 enum btrfs_exclusive_operation op
)
443 case BTRFS_EXCLOP_BALANCE_PAUSED
:
444 spin_lock(&fs_info
->super_lock
);
445 ASSERT(fs_info
->exclusive_operation
== BTRFS_EXCLOP_BALANCE
||
446 fs_info
->exclusive_operation
== BTRFS_EXCLOP_DEV_ADD
);
447 fs_info
->exclusive_operation
= BTRFS_EXCLOP_BALANCE_PAUSED
;
448 spin_unlock(&fs_info
->super_lock
);
450 case BTRFS_EXCLOP_BALANCE
:
451 spin_lock(&fs_info
->super_lock
);
452 ASSERT(fs_info
->exclusive_operation
== BTRFS_EXCLOP_BALANCE_PAUSED
);
453 fs_info
->exclusive_operation
= BTRFS_EXCLOP_BALANCE
;
454 spin_unlock(&fs_info
->super_lock
);
458 "invalid exclop balance operation %d requested", op
);
462 static int btrfs_ioctl_getversion(struct inode
*inode
, int __user
*arg
)
464 return put_user(inode
->i_generation
, arg
);
467 static noinline
int btrfs_ioctl_fitrim(struct btrfs_fs_info
*fs_info
,
470 struct btrfs_device
*device
;
471 struct fstrim_range range
;
472 u64 minlen
= ULLONG_MAX
;
476 if (!capable(CAP_SYS_ADMIN
))
480 * btrfs_trim_block_group() depends on space cache, which is not
481 * available in zoned filesystem. So, disallow fitrim on a zoned
482 * filesystem for now.
484 if (btrfs_is_zoned(fs_info
))
488 * If the fs is mounted with nologreplay, which requires it to be
489 * mounted in RO mode as well, we can not allow discard on free space
490 * inside block groups, because log trees refer to extents that are not
491 * pinned in a block group's free space cache (pinning the extents is
492 * precisely the first phase of replaying a log tree).
494 if (btrfs_test_opt(fs_info
, NOLOGREPLAY
))
498 list_for_each_entry_rcu(device
, &fs_info
->fs_devices
->devices
,
500 if (!device
->bdev
|| !bdev_max_discard_sectors(device
->bdev
))
503 minlen
= min_t(u64
, bdev_discard_granularity(device
->bdev
),
510 if (copy_from_user(&range
, arg
, sizeof(range
)))
514 * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
515 * block group is in the logical address space, which can be any
516 * sectorsize aligned bytenr in the range [0, U64_MAX].
518 if (range
.len
< fs_info
->sb
->s_blocksize
)
521 range
.minlen
= max(range
.minlen
, minlen
);
522 ret
= btrfs_trim_fs(fs_info
, &range
);
526 if (copy_to_user(arg
, &range
, sizeof(range
)))
532 int __pure
btrfs_is_empty_uuid(u8
*uuid
)
536 for (i
= 0; i
< BTRFS_UUID_SIZE
; i
++) {
544 * Calculate the number of transaction items to reserve for creating a subvolume
545 * or snapshot, not including the inode, directory entries, or parent directory.
547 static unsigned int create_subvol_num_items(struct btrfs_qgroup_inherit
*inherit
)
550 * 1 to add root block
553 * 1 to add root backref
555 * 1 to add qgroup info
556 * 1 to add qgroup limit
558 * Ideally the last two would only be accounted if qgroups are enabled,
559 * but that can change between now and the time we would insert them.
561 unsigned int num_items
= 7;
564 /* 2 to add qgroup relations for each inherited qgroup */
565 num_items
+= 2 * inherit
->num_qgroups
;
570 static noinline
int create_subvol(struct user_namespace
*mnt_userns
,
571 struct inode
*dir
, struct dentry
*dentry
,
572 struct btrfs_qgroup_inherit
*inherit
)
574 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
575 struct btrfs_trans_handle
*trans
;
576 struct btrfs_key key
;
577 struct btrfs_root_item
*root_item
;
578 struct btrfs_inode_item
*inode_item
;
579 struct extent_buffer
*leaf
;
580 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
581 struct btrfs_root
*new_root
;
582 struct btrfs_block_rsv block_rsv
;
583 struct timespec64 cur_time
= current_time(dir
);
584 struct btrfs_new_inode_args new_inode_args
= {
589 unsigned int trans_num_items
;
594 root_item
= kzalloc(sizeof(*root_item
), GFP_KERNEL
);
598 ret
= btrfs_get_free_objectid(fs_info
->tree_root
, &objectid
);
603 * Don't create subvolume whose level is not zero. Or qgroup will be
604 * screwed up since it assumes subvolume qgroup's level to be 0.
606 if (btrfs_qgroup_level(objectid
)) {
611 ret
= get_anon_bdev(&anon_dev
);
615 new_inode_args
.inode
= btrfs_new_subvol_inode(mnt_userns
, dir
);
616 if (!new_inode_args
.inode
) {
620 ret
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
623 trans_num_items
+= create_subvol_num_items(inherit
);
625 btrfs_init_block_rsv(&block_rsv
, BTRFS_BLOCK_RSV_TEMP
);
626 ret
= btrfs_subvolume_reserve_metadata(root
, &block_rsv
,
627 trans_num_items
, false);
629 goto out_new_inode_args
;
631 trans
= btrfs_start_transaction(root
, 0);
633 ret
= PTR_ERR(trans
);
634 btrfs_subvolume_release_metadata(root
, &block_rsv
);
635 goto out_new_inode_args
;
637 trans
->block_rsv
= &block_rsv
;
638 trans
->bytes_reserved
= block_rsv
.size
;
640 ret
= btrfs_qgroup_inherit(trans
, 0, objectid
, inherit
);
644 leaf
= btrfs_alloc_tree_block(trans
, root
, 0, objectid
, NULL
, 0, 0, 0,
645 BTRFS_NESTING_NORMAL
);
651 btrfs_mark_buffer_dirty(leaf
);
653 inode_item
= &root_item
->inode
;
654 btrfs_set_stack_inode_generation(inode_item
, 1);
655 btrfs_set_stack_inode_size(inode_item
, 3);
656 btrfs_set_stack_inode_nlink(inode_item
, 1);
657 btrfs_set_stack_inode_nbytes(inode_item
,
659 btrfs_set_stack_inode_mode(inode_item
, S_IFDIR
| 0755);
661 btrfs_set_root_flags(root_item
, 0);
662 btrfs_set_root_limit(root_item
, 0);
663 btrfs_set_stack_inode_flags(inode_item
, BTRFS_INODE_ROOT_ITEM_INIT
);
665 btrfs_set_root_bytenr(root_item
, leaf
->start
);
666 btrfs_set_root_generation(root_item
, trans
->transid
);
667 btrfs_set_root_level(root_item
, 0);
668 btrfs_set_root_refs(root_item
, 1);
669 btrfs_set_root_used(root_item
, leaf
->len
);
670 btrfs_set_root_last_snapshot(root_item
, 0);
672 btrfs_set_root_generation_v2(root_item
,
673 btrfs_root_generation(root_item
));
674 generate_random_guid(root_item
->uuid
);
675 btrfs_set_stack_timespec_sec(&root_item
->otime
, cur_time
.tv_sec
);
676 btrfs_set_stack_timespec_nsec(&root_item
->otime
, cur_time
.tv_nsec
);
677 root_item
->ctime
= root_item
->otime
;
678 btrfs_set_root_ctransid(root_item
, trans
->transid
);
679 btrfs_set_root_otransid(root_item
, trans
->transid
);
681 btrfs_tree_unlock(leaf
);
683 btrfs_set_root_dirid(root_item
, BTRFS_FIRST_FREE_OBJECTID
);
685 key
.objectid
= objectid
;
687 key
.type
= BTRFS_ROOT_ITEM_KEY
;
688 ret
= btrfs_insert_root(trans
, fs_info
->tree_root
, &key
,
692 * Since we don't abort the transaction in this case, free the
693 * tree block so that we don't leak space and leave the
694 * filesystem in an inconsistent state (an extent item in the
695 * extent tree with a backreference for a root that does not
698 btrfs_tree_lock(leaf
);
699 btrfs_clean_tree_block(leaf
);
700 btrfs_tree_unlock(leaf
);
701 btrfs_free_tree_block(trans
, objectid
, leaf
, 0, 1);
702 free_extent_buffer(leaf
);
706 free_extent_buffer(leaf
);
709 new_root
= btrfs_get_new_fs_root(fs_info
, objectid
, anon_dev
);
710 if (IS_ERR(new_root
)) {
711 ret
= PTR_ERR(new_root
);
712 btrfs_abort_transaction(trans
, ret
);
715 /* anon_dev is owned by new_root now. */
717 BTRFS_I(new_inode_args
.inode
)->root
= new_root
;
718 /* ... and new_root is owned by new_inode_args.inode now. */
720 ret
= btrfs_record_root_in_trans(trans
, new_root
);
722 btrfs_abort_transaction(trans
, ret
);
726 ret
= btrfs_uuid_tree_add(trans
, root_item
->uuid
,
727 BTRFS_UUID_KEY_SUBVOL
, objectid
);
729 btrfs_abort_transaction(trans
, ret
);
733 ret
= btrfs_create_new_inode(trans
, &new_inode_args
);
735 btrfs_abort_transaction(trans
, ret
);
739 d_instantiate_new(dentry
, new_inode_args
.inode
);
740 new_inode_args
.inode
= NULL
;
743 trans
->block_rsv
= NULL
;
744 trans
->bytes_reserved
= 0;
745 btrfs_subvolume_release_metadata(root
, &block_rsv
);
748 btrfs_end_transaction(trans
);
750 ret
= btrfs_commit_transaction(trans
);
752 btrfs_new_inode_args_destroy(&new_inode_args
);
754 iput(new_inode_args
.inode
);
757 free_anon_bdev(anon_dev
);
763 static int create_snapshot(struct btrfs_root
*root
, struct inode
*dir
,
764 struct dentry
*dentry
, bool readonly
,
765 struct btrfs_qgroup_inherit
*inherit
)
767 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
769 struct btrfs_pending_snapshot
*pending_snapshot
;
770 unsigned int trans_num_items
;
771 struct btrfs_trans_handle
*trans
;
774 /* We do not support snapshotting right now. */
775 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
777 "extent tree v2 doesn't support snapshotting yet");
781 if (!test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
))
784 if (atomic_read(&root
->nr_swapfiles
)) {
786 "cannot snapshot subvolume with active swapfile");
790 pending_snapshot
= kzalloc(sizeof(*pending_snapshot
), GFP_KERNEL
);
791 if (!pending_snapshot
)
794 ret
= get_anon_bdev(&pending_snapshot
->anon_dev
);
797 pending_snapshot
->root_item
= kzalloc(sizeof(struct btrfs_root_item
),
799 pending_snapshot
->path
= btrfs_alloc_path();
800 if (!pending_snapshot
->root_item
|| !pending_snapshot
->path
) {
805 btrfs_init_block_rsv(&pending_snapshot
->block_rsv
,
806 BTRFS_BLOCK_RSV_TEMP
);
810 * 1 to update parent inode item
812 trans_num_items
= create_subvol_num_items(inherit
) + 3;
813 ret
= btrfs_subvolume_reserve_metadata(BTRFS_I(dir
)->root
,
814 &pending_snapshot
->block_rsv
,
815 trans_num_items
, false);
819 pending_snapshot
->dentry
= dentry
;
820 pending_snapshot
->root
= root
;
821 pending_snapshot
->readonly
= readonly
;
822 pending_snapshot
->dir
= dir
;
823 pending_snapshot
->inherit
= inherit
;
825 trans
= btrfs_start_transaction(root
, 0);
827 ret
= PTR_ERR(trans
);
831 trans
->pending_snapshot
= pending_snapshot
;
833 ret
= btrfs_commit_transaction(trans
);
837 ret
= pending_snapshot
->error
;
841 ret
= btrfs_orphan_cleanup(pending_snapshot
->snap
);
845 inode
= btrfs_lookup_dentry(d_inode(dentry
->d_parent
), dentry
);
847 ret
= PTR_ERR(inode
);
851 d_instantiate(dentry
, inode
);
853 pending_snapshot
->anon_dev
= 0;
855 /* Prevent double freeing of anon_dev */
856 if (ret
&& pending_snapshot
->snap
)
857 pending_snapshot
->snap
->anon_dev
= 0;
858 btrfs_put_root(pending_snapshot
->snap
);
859 btrfs_subvolume_release_metadata(root
, &pending_snapshot
->block_rsv
);
861 if (pending_snapshot
->anon_dev
)
862 free_anon_bdev(pending_snapshot
->anon_dev
);
863 kfree(pending_snapshot
->root_item
);
864 btrfs_free_path(pending_snapshot
->path
);
865 kfree(pending_snapshot
);
870 /* copy of may_delete in fs/namei.c()
871 * Check whether we can remove a link victim from directory dir, check
872 * whether the type of victim is right.
873 * 1. We can't do it if dir is read-only (done in permission())
874 * 2. We should have write and exec permissions on dir
875 * 3. We can't remove anything from append-only dir
876 * 4. We can't do anything with immutable dir (done in permission())
877 * 5. If the sticky bit on dir is set we should either
878 * a. be owner of dir, or
879 * b. be owner of victim, or
880 * c. have CAP_FOWNER capability
881 * 6. If the victim is append-only or immutable we can't do anything with
882 * links pointing to it.
883 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
884 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
885 * 9. We can't remove a root or mountpoint.
886 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
887 * nfs_async_unlink().
890 static int btrfs_may_delete(struct user_namespace
*mnt_userns
,
891 struct inode
*dir
, struct dentry
*victim
, int isdir
)
895 if (d_really_is_negative(victim
))
898 BUG_ON(d_inode(victim
->d_parent
) != dir
);
899 audit_inode_child(dir
, victim
, AUDIT_TYPE_CHILD_DELETE
);
901 error
= inode_permission(mnt_userns
, dir
, MAY_WRITE
| MAY_EXEC
);
906 if (check_sticky(mnt_userns
, dir
, d_inode(victim
)) ||
907 IS_APPEND(d_inode(victim
)) || IS_IMMUTABLE(d_inode(victim
)) ||
908 IS_SWAPFILE(d_inode(victim
)))
911 if (!d_is_dir(victim
))
915 } else if (d_is_dir(victim
))
919 if (victim
->d_flags
& DCACHE_NFSFS_RENAMED
)
924 /* copy of may_create in fs/namei.c() */
925 static inline int btrfs_may_create(struct user_namespace
*mnt_userns
,
926 struct inode
*dir
, struct dentry
*child
)
928 if (d_really_is_positive(child
))
932 if (!fsuidgid_has_mapping(dir
->i_sb
, mnt_userns
))
934 return inode_permission(mnt_userns
, dir
, MAY_WRITE
| MAY_EXEC
);
938 * Create a new subvolume below @parent. This is largely modeled after
939 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
940 * inside this filesystem so it's quite a bit simpler.
942 static noinline
int btrfs_mksubvol(const struct path
*parent
,
943 struct user_namespace
*mnt_userns
,
944 const char *name
, int namelen
,
945 struct btrfs_root
*snap_src
,
947 struct btrfs_qgroup_inherit
*inherit
)
949 struct inode
*dir
= d_inode(parent
->dentry
);
950 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
951 struct dentry
*dentry
;
954 error
= down_write_killable_nested(&dir
->i_rwsem
, I_MUTEX_PARENT
);
958 dentry
= lookup_one(mnt_userns
, name
, parent
->dentry
, namelen
);
959 error
= PTR_ERR(dentry
);
963 error
= btrfs_may_create(mnt_userns
, dir
, dentry
);
968 * even if this name doesn't exist, we may get hash collisions.
969 * check for them now when we can safely fail
971 error
= btrfs_check_dir_item_collision(BTRFS_I(dir
)->root
,
977 down_read(&fs_info
->subvol_sem
);
979 if (btrfs_root_refs(&BTRFS_I(dir
)->root
->root_item
) == 0)
983 error
= create_snapshot(snap_src
, dir
, dentry
, readonly
, inherit
);
985 error
= create_subvol(mnt_userns
, dir
, dentry
, inherit
);
988 fsnotify_mkdir(dir
, dentry
);
990 up_read(&fs_info
->subvol_sem
);
994 btrfs_inode_unlock(dir
, 0);
998 static noinline
int btrfs_mksnapshot(const struct path
*parent
,
999 struct user_namespace
*mnt_userns
,
1000 const char *name
, int namelen
,
1001 struct btrfs_root
*root
,
1003 struct btrfs_qgroup_inherit
*inherit
)
1006 bool snapshot_force_cow
= false;
1009 * Force new buffered writes to reserve space even when NOCOW is
1010 * possible. This is to avoid later writeback (running dealloc) to
1011 * fallback to COW mode and unexpectedly fail with ENOSPC.
1013 btrfs_drew_read_lock(&root
->snapshot_lock
);
1015 ret
= btrfs_start_delalloc_snapshot(root
, false);
1020 * All previous writes have started writeback in NOCOW mode, so now
1021 * we force future writes to fallback to COW mode during snapshot
1024 atomic_inc(&root
->snapshot_force_cow
);
1025 snapshot_force_cow
= true;
1027 btrfs_wait_ordered_extents(root
, U64_MAX
, 0, (u64
)-1);
1029 ret
= btrfs_mksubvol(parent
, mnt_userns
, name
, namelen
,
1030 root
, readonly
, inherit
);
1032 if (snapshot_force_cow
)
1033 atomic_dec(&root
->snapshot_force_cow
);
1034 btrfs_drew_read_unlock(&root
->snapshot_lock
);
1039 * Defrag specific helper to get an extent map.
1041 * Differences between this and btrfs_get_extent() are:
1043 * - No extent_map will be added to inode->extent_tree
1044 * To reduce memory usage in the long run.
1046 * - Extra optimization to skip file extents older than @newer_than
1047 * By using btrfs_search_forward() we can skip entire file ranges that
1048 * have extents created in past transactions, because btrfs_search_forward()
1049 * will not visit leaves and nodes with a generation smaller than given
1050 * minimal generation threshold (@newer_than).
1052 * Return valid em if we find a file extent matching the requirement.
1053 * Return NULL if we can not find a file extent matching the requirement.
1055 * Return ERR_PTR() for error.
1057 static struct extent_map
*defrag_get_extent(struct btrfs_inode
*inode
,
1058 u64 start
, u64 newer_than
)
1060 struct btrfs_root
*root
= inode
->root
;
1061 struct btrfs_file_extent_item
*fi
;
1062 struct btrfs_path path
= { 0 };
1063 struct extent_map
*em
;
1064 struct btrfs_key key
;
1065 u64 ino
= btrfs_ino(inode
);
1068 em
= alloc_extent_map();
1075 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1079 ret
= btrfs_search_forward(root
, &key
, &path
, newer_than
);
1082 /* Can't find anything newer */
1086 ret
= btrfs_search_slot(NULL
, root
, &key
, &path
, 0, 0);
1090 if (path
.slots
[0] >= btrfs_header_nritems(path
.nodes
[0])) {
1092 * If btrfs_search_slot() makes path to point beyond nritems,
1093 * we should not have an empty leaf, as this inode must at
1094 * least have its INODE_ITEM.
1096 ASSERT(btrfs_header_nritems(path
.nodes
[0]));
1097 path
.slots
[0] = btrfs_header_nritems(path
.nodes
[0]) - 1;
1099 btrfs_item_key_to_cpu(path
.nodes
[0], &key
, path
.slots
[0]);
1100 /* Perfect match, no need to go one slot back */
1101 if (key
.objectid
== ino
&& key
.type
== BTRFS_EXTENT_DATA_KEY
&&
1102 key
.offset
== start
)
1105 /* We didn't find a perfect match, needs to go one slot back */
1106 if (path
.slots
[0] > 0) {
1107 btrfs_item_key_to_cpu(path
.nodes
[0], &key
, path
.slots
[0]);
1108 if (key
.objectid
== ino
&& key
.type
== BTRFS_EXTENT_DATA_KEY
)
1113 /* Iterate through the path to find a file extent covering @start */
1117 if (path
.slots
[0] >= btrfs_header_nritems(path
.nodes
[0]))
1120 btrfs_item_key_to_cpu(path
.nodes
[0], &key
, path
.slots
[0]);
1123 * We may go one slot back to INODE_REF/XATTR item, then
1124 * need to go forward until we reach an EXTENT_DATA.
1125 * But we should still has the correct ino as key.objectid.
1127 if (WARN_ON(key
.objectid
< ino
) || key
.type
< BTRFS_EXTENT_DATA_KEY
)
1130 /* It's beyond our target range, definitely not extent found */
1131 if (key
.objectid
> ino
|| key
.type
> BTRFS_EXTENT_DATA_KEY
)
1135 * | |<- File extent ->|
1138 * This means there is a hole between start and key.offset.
1140 if (key
.offset
> start
) {
1142 em
->orig_start
= start
;
1143 em
->block_start
= EXTENT_MAP_HOLE
;
1144 em
->len
= key
.offset
- start
;
1148 fi
= btrfs_item_ptr(path
.nodes
[0], path
.slots
[0],
1149 struct btrfs_file_extent_item
);
1150 extent_end
= btrfs_file_extent_end(&path
);
1153 * |<- file extent ->| |
1156 * We haven't reached start, search next slot.
1158 if (extent_end
<= start
)
1161 /* Now this extent covers @start, convert it to em */
1162 btrfs_extent_item_to_extent_map(inode
, &path
, fi
, false, em
);
1165 ret
= btrfs_next_item(root
, &path
);
1171 btrfs_release_path(&path
);
1175 btrfs_release_path(&path
);
1176 free_extent_map(em
);
1180 btrfs_release_path(&path
);
1181 free_extent_map(em
);
1182 return ERR_PTR(ret
);
1185 static struct extent_map
*defrag_lookup_extent(struct inode
*inode
, u64 start
,
1186 u64 newer_than
, bool locked
)
1188 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
1189 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
1190 struct extent_map
*em
;
1191 const u32 sectorsize
= BTRFS_I(inode
)->root
->fs_info
->sectorsize
;
1194 * hopefully we have this extent in the tree already, try without
1195 * the full extent lock
1197 read_lock(&em_tree
->lock
);
1198 em
= lookup_extent_mapping(em_tree
, start
, sectorsize
);
1199 read_unlock(&em_tree
->lock
);
1202 * We can get a merged extent, in that case, we need to re-search
1203 * tree to get the original em for defrag.
1205 * If @newer_than is 0 or em::generation < newer_than, we can trust
1206 * this em, as either we don't care about the generation, or the
1207 * merged extent map will be rejected anyway.
1209 if (em
&& test_bit(EXTENT_FLAG_MERGED
, &em
->flags
) &&
1210 newer_than
&& em
->generation
>= newer_than
) {
1211 free_extent_map(em
);
1216 struct extent_state
*cached
= NULL
;
1217 u64 end
= start
+ sectorsize
- 1;
1219 /* get the big lock and read metadata off disk */
1221 lock_extent_bits(io_tree
, start
, end
, &cached
);
1222 em
= defrag_get_extent(BTRFS_I(inode
), start
, newer_than
);
1224 unlock_extent_cached(io_tree
, start
, end
, &cached
);
1233 static u32
get_extent_max_capacity(const struct btrfs_fs_info
*fs_info
,
1234 const struct extent_map
*em
)
1236 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
))
1237 return BTRFS_MAX_COMPRESSED
;
1238 return fs_info
->max_extent_size
;
1241 static bool defrag_check_next_extent(struct inode
*inode
, struct extent_map
*em
,
1242 u32 extent_thresh
, u64 newer_than
, bool locked
)
1244 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1245 struct extent_map
*next
;
1248 /* this is the last extent */
1249 if (em
->start
+ em
->len
>= i_size_read(inode
))
1253 * Here we need to pass @newer_then when checking the next extent, or
1254 * we will hit a case we mark current extent for defrag, but the next
1255 * one will not be a target.
1256 * This will just cause extra IO without really reducing the fragments.
1258 next
= defrag_lookup_extent(inode
, em
->start
+ em
->len
, newer_than
, locked
);
1259 /* No more em or hole */
1260 if (!next
|| next
->block_start
>= EXTENT_MAP_LAST_BYTE
)
1262 if (test_bit(EXTENT_FLAG_PREALLOC
, &next
->flags
))
1265 * If the next extent is at its max capacity, defragging current extent
1266 * makes no sense, as the total number of extents won't change.
1268 if (next
->len
>= get_extent_max_capacity(fs_info
, em
))
1270 /* Skip older extent */
1271 if (next
->generation
< newer_than
)
1273 /* Also check extent size */
1274 if (next
->len
>= extent_thresh
)
1279 free_extent_map(next
);
1284 * Prepare one page to be defragged.
1288 * - Returned page is locked and has been set up properly.
1289 * - No ordered extent exists in the page.
1290 * - The page is uptodate.
1292 * NOTE: Caller should also wait for page writeback after the cluster is
1293 * prepared, here we don't do writeback wait for each page.
1295 static struct page
*defrag_prepare_one_page(struct btrfs_inode
*inode
,
1298 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
1299 gfp_t mask
= btrfs_alloc_write_mask(mapping
);
1300 u64 page_start
= (u64
)index
<< PAGE_SHIFT
;
1301 u64 page_end
= page_start
+ PAGE_SIZE
- 1;
1302 struct extent_state
*cached_state
= NULL
;
1307 page
= find_or_create_page(mapping
, index
, mask
);
1309 return ERR_PTR(-ENOMEM
);
1312 * Since we can defragment files opened read-only, we can encounter
1313 * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We
1314 * can't do I/O using huge pages yet, so return an error for now.
1315 * Filesystem transparent huge pages are typically only used for
1316 * executables that explicitly enable them, so this isn't very
1319 if (PageCompound(page
)) {
1322 return ERR_PTR(-ETXTBSY
);
1325 ret
= set_page_extent_mapped(page
);
1329 return ERR_PTR(ret
);
1332 /* Wait for any existing ordered extent in the range */
1334 struct btrfs_ordered_extent
*ordered
;
1336 lock_extent_bits(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
1337 ordered
= btrfs_lookup_ordered_range(inode
, page_start
, PAGE_SIZE
);
1338 unlock_extent_cached(&inode
->io_tree
, page_start
, page_end
,
1344 btrfs_start_ordered_extent(ordered
, 1);
1345 btrfs_put_ordered_extent(ordered
);
1348 * We unlocked the page above, so we need check if it was
1351 if (page
->mapping
!= mapping
|| !PagePrivate(page
)) {
1359 * Now the page range has no ordered extent any more. Read the page to
1362 if (!PageUptodate(page
)) {
1363 btrfs_read_folio(NULL
, page_folio(page
));
1365 if (page
->mapping
!= mapping
|| !PagePrivate(page
)) {
1370 if (!PageUptodate(page
)) {
1373 return ERR_PTR(-EIO
);
1379 struct defrag_target_range
{
1380 struct list_head list
;
1386 * Collect all valid target extents.
1388 * @start: file offset to lookup
1389 * @len: length to lookup
1390 * @extent_thresh: file extent size threshold, any extent size >= this value
1392 * @newer_than: only defrag extents newer than this value
1393 * @do_compress: whether the defrag is doing compression
1394 * if true, @extent_thresh will be ignored and all regular
1395 * file extents meeting @newer_than will be targets.
1396 * @locked: if the range has already held extent lock
1397 * @target_list: list of targets file extents
1399 static int defrag_collect_targets(struct btrfs_inode
*inode
,
1400 u64 start
, u64 len
, u32 extent_thresh
,
1401 u64 newer_than
, bool do_compress
,
1402 bool locked
, struct list_head
*target_list
,
1403 u64
*last_scanned_ret
)
1405 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1406 bool last_is_target
= false;
1410 while (cur
< start
+ len
) {
1411 struct extent_map
*em
;
1412 struct defrag_target_range
*new;
1413 bool next_mergeable
= true;
1416 last_is_target
= false;
1417 em
= defrag_lookup_extent(&inode
->vfs_inode
, cur
,
1418 newer_than
, locked
);
1423 * If the file extent is an inlined one, we may still want to
1424 * defrag it (fallthrough) if it will cause a regular extent.
1425 * This is for users who want to convert inline extents to
1426 * regular ones through max_inline= mount option.
1428 if (em
->block_start
== EXTENT_MAP_INLINE
&&
1429 em
->len
<= inode
->root
->fs_info
->max_inline
)
1432 /* Skip hole/delalloc/preallocated extents */
1433 if (em
->block_start
== EXTENT_MAP_HOLE
||
1434 em
->block_start
== EXTENT_MAP_DELALLOC
||
1435 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
1438 /* Skip older extent */
1439 if (em
->generation
< newer_than
)
1442 /* This em is under writeback, no need to defrag */
1443 if (em
->generation
== (u64
)-1)
1447 * Our start offset might be in the middle of an existing extent
1448 * map, so take that into account.
1450 range_len
= em
->len
- (cur
- em
->start
);
1452 * If this range of the extent map is already flagged for delalloc,
1455 * 1) We could deadlock later, when trying to reserve space for
1456 * delalloc, because in case we can't immediately reserve space
1457 * the flusher can start delalloc and wait for the respective
1458 * ordered extents to complete. The deadlock would happen
1459 * because we do the space reservation while holding the range
1460 * locked, and starting writeback, or finishing an ordered
1461 * extent, requires locking the range;
1463 * 2) If there's delalloc there, it means there's dirty pages for
1464 * which writeback has not started yet (we clean the delalloc
1465 * flag when starting writeback and after creating an ordered
1466 * extent). If we mark pages in an adjacent range for defrag,
1467 * then we will have a larger contiguous range for delalloc,
1468 * very likely resulting in a larger extent after writeback is
1469 * triggered (except in a case of free space fragmentation).
1471 if (test_range_bit(&inode
->io_tree
, cur
, cur
+ range_len
- 1,
1472 EXTENT_DELALLOC
, 0, NULL
))
1476 * For do_compress case, we want to compress all valid file
1477 * extents, thus no @extent_thresh or mergeable check.
1482 /* Skip too large extent */
1483 if (range_len
>= extent_thresh
)
1487 * Skip extents already at its max capacity, this is mostly for
1488 * compressed extents, which max cap is only 128K.
1490 if (em
->len
>= get_extent_max_capacity(fs_info
, em
))
1494 * Normally there are no more extents after an inline one, thus
1495 * @next_mergeable will normally be false and not defragged.
1496 * So if an inline extent passed all above checks, just add it
1497 * for defrag, and be converted to regular extents.
1499 if (em
->block_start
== EXTENT_MAP_INLINE
)
1502 next_mergeable
= defrag_check_next_extent(&inode
->vfs_inode
, em
,
1503 extent_thresh
, newer_than
, locked
);
1504 if (!next_mergeable
) {
1505 struct defrag_target_range
*last
;
1507 /* Empty target list, no way to merge with last entry */
1508 if (list_empty(target_list
))
1510 last
= list_entry(target_list
->prev
,
1511 struct defrag_target_range
, list
);
1512 /* Not mergeable with last entry */
1513 if (last
->start
+ last
->len
!= cur
)
1516 /* Mergeable, fall through to add it to @target_list. */
1520 last_is_target
= true;
1521 range_len
= min(extent_map_end(em
), start
+ len
) - cur
;
1523 * This one is a good target, check if it can be merged into
1524 * last range of the target list.
1526 if (!list_empty(target_list
)) {
1527 struct defrag_target_range
*last
;
1529 last
= list_entry(target_list
->prev
,
1530 struct defrag_target_range
, list
);
1531 ASSERT(last
->start
+ last
->len
<= cur
);
1532 if (last
->start
+ last
->len
== cur
) {
1533 /* Mergeable, enlarge the last entry */
1534 last
->len
+= range_len
;
1537 /* Fall through to allocate a new entry */
1540 /* Allocate new defrag_target_range */
1541 new = kmalloc(sizeof(*new), GFP_NOFS
);
1543 free_extent_map(em
);
1548 new->len
= range_len
;
1549 list_add_tail(&new->list
, target_list
);
1552 cur
= extent_map_end(em
);
1553 free_extent_map(em
);
1556 struct defrag_target_range
*entry
;
1557 struct defrag_target_range
*tmp
;
1559 list_for_each_entry_safe(entry
, tmp
, target_list
, list
) {
1560 list_del_init(&entry
->list
);
1564 if (!ret
&& last_scanned_ret
) {
1566 * If the last extent is not a target, the caller can skip to
1567 * the end of that extent.
1568 * Otherwise, we can only go the end of the specified range.
1570 if (!last_is_target
)
1571 *last_scanned_ret
= max(cur
, *last_scanned_ret
);
1573 *last_scanned_ret
= max(start
+ len
, *last_scanned_ret
);
1578 #define CLUSTER_SIZE (SZ_256K)
1579 static_assert(IS_ALIGNED(CLUSTER_SIZE
, PAGE_SIZE
));
1582 * Defrag one contiguous target range.
1584 * @inode: target inode
1585 * @target: target range to defrag
1586 * @pages: locked pages covering the defrag range
1587 * @nr_pages: number of locked pages
1589 * Caller should ensure:
1591 * - Pages are prepared
1592 * Pages should be locked, no ordered extent in the pages range,
1595 * - Extent bits are locked
1597 static int defrag_one_locked_target(struct btrfs_inode
*inode
,
1598 struct defrag_target_range
*target
,
1599 struct page
**pages
, int nr_pages
,
1600 struct extent_state
**cached_state
)
1602 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1603 struct extent_changeset
*data_reserved
= NULL
;
1604 const u64 start
= target
->start
;
1605 const u64 len
= target
->len
;
1606 unsigned long last_index
= (start
+ len
- 1) >> PAGE_SHIFT
;
1607 unsigned long start_index
= start
>> PAGE_SHIFT
;
1608 unsigned long first_index
= page_index(pages
[0]);
1612 ASSERT(last_index
- first_index
+ 1 <= nr_pages
);
1614 ret
= btrfs_delalloc_reserve_space(inode
, &data_reserved
, start
, len
);
1617 clear_extent_bit(&inode
->io_tree
, start
, start
+ len
- 1,
1618 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
|
1619 EXTENT_DEFRAG
, 0, 0, cached_state
);
1620 set_extent_defrag(&inode
->io_tree
, start
, start
+ len
- 1, cached_state
);
1622 /* Update the page status */
1623 for (i
= start_index
- first_index
; i
<= last_index
- first_index
; i
++) {
1624 ClearPageChecked(pages
[i
]);
1625 btrfs_page_clamp_set_dirty(fs_info
, pages
[i
], start
, len
);
1627 btrfs_delalloc_release_extents(inode
, len
);
1628 extent_changeset_free(data_reserved
);
1633 static int defrag_one_range(struct btrfs_inode
*inode
, u64 start
, u32 len
,
1634 u32 extent_thresh
, u64 newer_than
, bool do_compress
,
1635 u64
*last_scanned_ret
)
1637 struct extent_state
*cached_state
= NULL
;
1638 struct defrag_target_range
*entry
;
1639 struct defrag_target_range
*tmp
;
1640 LIST_HEAD(target_list
);
1641 struct page
**pages
;
1642 const u32 sectorsize
= inode
->root
->fs_info
->sectorsize
;
1643 u64 last_index
= (start
+ len
- 1) >> PAGE_SHIFT
;
1644 u64 start_index
= start
>> PAGE_SHIFT
;
1645 unsigned int nr_pages
= last_index
- start_index
+ 1;
1649 ASSERT(nr_pages
<= CLUSTER_SIZE
/ PAGE_SIZE
);
1650 ASSERT(IS_ALIGNED(start
, sectorsize
) && IS_ALIGNED(len
, sectorsize
));
1652 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
1656 /* Prepare all pages */
1657 for (i
= 0; i
< nr_pages
; i
++) {
1658 pages
[i
] = defrag_prepare_one_page(inode
, start_index
+ i
);
1659 if (IS_ERR(pages
[i
])) {
1660 ret
= PTR_ERR(pages
[i
]);
1665 for (i
= 0; i
< nr_pages
; i
++)
1666 wait_on_page_writeback(pages
[i
]);
1668 /* Lock the pages range */
1669 lock_extent_bits(&inode
->io_tree
, start_index
<< PAGE_SHIFT
,
1670 (last_index
<< PAGE_SHIFT
) + PAGE_SIZE
- 1,
1673 * Now we have a consistent view about the extent map, re-check
1674 * which range really needs to be defragged.
1676 * And this time we have extent locked already, pass @locked = true
1677 * so that we won't relock the extent range and cause deadlock.
1679 ret
= defrag_collect_targets(inode
, start
, len
, extent_thresh
,
1680 newer_than
, do_compress
, true,
1681 &target_list
, last_scanned_ret
);
1685 list_for_each_entry(entry
, &target_list
, list
) {
1686 ret
= defrag_one_locked_target(inode
, entry
, pages
, nr_pages
,
1692 list_for_each_entry_safe(entry
, tmp
, &target_list
, list
) {
1693 list_del_init(&entry
->list
);
1697 unlock_extent_cached(&inode
->io_tree
, start_index
<< PAGE_SHIFT
,
1698 (last_index
<< PAGE_SHIFT
) + PAGE_SIZE
- 1,
1701 for (i
= 0; i
< nr_pages
; i
++) {
1703 unlock_page(pages
[i
]);
1711 static int defrag_one_cluster(struct btrfs_inode
*inode
,
1712 struct file_ra_state
*ra
,
1713 u64 start
, u32 len
, u32 extent_thresh
,
1714 u64 newer_than
, bool do_compress
,
1715 unsigned long *sectors_defragged
,
1716 unsigned long max_sectors
,
1717 u64
*last_scanned_ret
)
1719 const u32 sectorsize
= inode
->root
->fs_info
->sectorsize
;
1720 struct defrag_target_range
*entry
;
1721 struct defrag_target_range
*tmp
;
1722 LIST_HEAD(target_list
);
1725 ret
= defrag_collect_targets(inode
, start
, len
, extent_thresh
,
1726 newer_than
, do_compress
, false,
1727 &target_list
, NULL
);
1731 list_for_each_entry(entry
, &target_list
, list
) {
1732 u32 range_len
= entry
->len
;
1734 /* Reached or beyond the limit */
1735 if (max_sectors
&& *sectors_defragged
>= max_sectors
) {
1741 range_len
= min_t(u32
, range_len
,
1742 (max_sectors
- *sectors_defragged
) * sectorsize
);
1745 * If defrag_one_range() has updated last_scanned_ret,
1746 * our range may already be invalid (e.g. hole punched).
1747 * Skip if our range is before last_scanned_ret, as there is
1748 * no need to defrag the range anymore.
1750 if (entry
->start
+ range_len
<= *last_scanned_ret
)
1754 page_cache_sync_readahead(inode
->vfs_inode
.i_mapping
,
1755 ra
, NULL
, entry
->start
>> PAGE_SHIFT
,
1756 ((entry
->start
+ range_len
- 1) >> PAGE_SHIFT
) -
1757 (entry
->start
>> PAGE_SHIFT
) + 1);
1759 * Here we may not defrag any range if holes are punched before
1760 * we locked the pages.
1761 * But that's fine, it only affects the @sectors_defragged
1764 ret
= defrag_one_range(inode
, entry
->start
, range_len
,
1765 extent_thresh
, newer_than
, do_compress
,
1769 *sectors_defragged
+= range_len
>>
1770 inode
->root
->fs_info
->sectorsize_bits
;
1773 list_for_each_entry_safe(entry
, tmp
, &target_list
, list
) {
1774 list_del_init(&entry
->list
);
1778 *last_scanned_ret
= max(*last_scanned_ret
, start
+ len
);
1783 * Entry point to file defragmentation.
1785 * @inode: inode to be defragged
1786 * @ra: readahead state (can be NUL)
1787 * @range: defrag options including range and flags
1788 * @newer_than: minimum transid to defrag
1789 * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
1790 * will be defragged.
1792 * Return <0 for error.
1793 * Return >=0 for the number of sectors defragged, and range->start will be updated
1794 * to indicate the file offset where next defrag should be started at.
1795 * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
1796 * defragging all the range).
1798 int btrfs_defrag_file(struct inode
*inode
, struct file_ra_state
*ra
,
1799 struct btrfs_ioctl_defrag_range_args
*range
,
1800 u64 newer_than
, unsigned long max_to_defrag
)
1802 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1803 unsigned long sectors_defragged
= 0;
1804 u64 isize
= i_size_read(inode
);
1807 bool do_compress
= range
->flags
& BTRFS_DEFRAG_RANGE_COMPRESS
;
1808 bool ra_allocated
= false;
1809 int compress_type
= BTRFS_COMPRESS_ZLIB
;
1811 u32 extent_thresh
= range
->extent_thresh
;
1812 pgoff_t start_index
;
1817 if (range
->start
>= isize
)
1821 if (range
->compress_type
>= BTRFS_NR_COMPRESS_TYPES
)
1823 if (range
->compress_type
)
1824 compress_type
= range
->compress_type
;
1827 if (extent_thresh
== 0)
1828 extent_thresh
= SZ_256K
;
1830 if (range
->start
+ range
->len
> range
->start
) {
1831 /* Got a specific range */
1832 last_byte
= min(isize
, range
->start
+ range
->len
);
1834 /* Defrag until file end */
1838 /* Align the range */
1839 cur
= round_down(range
->start
, fs_info
->sectorsize
);
1840 last_byte
= round_up(last_byte
, fs_info
->sectorsize
) - 1;
1843 * If we were not given a ra, allocate a readahead context. As
1844 * readahead is just an optimization, defrag will work without it so
1845 * we don't error out.
1848 ra_allocated
= true;
1849 ra
= kzalloc(sizeof(*ra
), GFP_KERNEL
);
1851 file_ra_state_init(ra
, inode
->i_mapping
);
1855 * Make writeback start from the beginning of the range, so that the
1856 * defrag range can be written sequentially.
1858 start_index
= cur
>> PAGE_SHIFT
;
1859 if (start_index
< inode
->i_mapping
->writeback_index
)
1860 inode
->i_mapping
->writeback_index
= start_index
;
1862 while (cur
< last_byte
) {
1863 const unsigned long prev_sectors_defragged
= sectors_defragged
;
1864 u64 last_scanned
= cur
;
1867 if (btrfs_defrag_cancelled(fs_info
)) {
1872 /* We want the cluster end at page boundary when possible */
1873 cluster_end
= (((cur
>> PAGE_SHIFT
) +
1874 (SZ_256K
>> PAGE_SHIFT
)) << PAGE_SHIFT
) - 1;
1875 cluster_end
= min(cluster_end
, last_byte
);
1877 btrfs_inode_lock(inode
, 0);
1878 if (IS_SWAPFILE(inode
)) {
1880 btrfs_inode_unlock(inode
, 0);
1883 if (!(inode
->i_sb
->s_flags
& SB_ACTIVE
)) {
1884 btrfs_inode_unlock(inode
, 0);
1888 BTRFS_I(inode
)->defrag_compress
= compress_type
;
1889 ret
= defrag_one_cluster(BTRFS_I(inode
), ra
, cur
,
1890 cluster_end
+ 1 - cur
, extent_thresh
,
1891 newer_than
, do_compress
, §ors_defragged
,
1892 max_to_defrag
, &last_scanned
);
1894 if (sectors_defragged
> prev_sectors_defragged
)
1895 balance_dirty_pages_ratelimited(inode
->i_mapping
);
1897 btrfs_inode_unlock(inode
, 0);
1900 cur
= max(cluster_end
+ 1, last_scanned
);
1911 * Update range.start for autodefrag, this will indicate where to start
1915 if (sectors_defragged
) {
1917 * We have defragged some sectors, for compression case they
1918 * need to be written back immediately.
1920 if (range
->flags
& BTRFS_DEFRAG_RANGE_START_IO
) {
1921 filemap_flush(inode
->i_mapping
);
1922 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
1923 &BTRFS_I(inode
)->runtime_flags
))
1924 filemap_flush(inode
->i_mapping
);
1926 if (range
->compress_type
== BTRFS_COMPRESS_LZO
)
1927 btrfs_set_fs_incompat(fs_info
, COMPRESS_LZO
);
1928 else if (range
->compress_type
== BTRFS_COMPRESS_ZSTD
)
1929 btrfs_set_fs_incompat(fs_info
, COMPRESS_ZSTD
);
1930 ret
= sectors_defragged
;
1933 btrfs_inode_lock(inode
, 0);
1934 BTRFS_I(inode
)->defrag_compress
= BTRFS_COMPRESS_NONE
;
1935 btrfs_inode_unlock(inode
, 0);
1941 * Try to start exclusive operation @type or cancel it if it's running.
1944 * 0 - normal mode, newly claimed op started
1945 * >0 - normal mode, something else is running,
1946 * return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS to user space
1947 * ECANCELED - cancel mode, successful cancel
1948 * ENOTCONN - cancel mode, operation not running anymore
1950 static int exclop_start_or_cancel_reloc(struct btrfs_fs_info
*fs_info
,
1951 enum btrfs_exclusive_operation type
, bool cancel
)
1954 /* Start normal op */
1955 if (!btrfs_exclop_start(fs_info
, type
))
1956 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
;
1957 /* Exclusive operation is now claimed */
1961 /* Cancel running op */
1962 if (btrfs_exclop_start_try_lock(fs_info
, type
)) {
1964 * This blocks any exclop finish from setting it to NONE, so we
1965 * request cancellation. Either it runs and we will wait for it,
1966 * or it has finished and no waiting will happen.
1968 atomic_inc(&fs_info
->reloc_cancel_req
);
1969 btrfs_exclop_start_unlock(fs_info
);
1971 if (test_bit(BTRFS_FS_RELOC_RUNNING
, &fs_info
->flags
))
1972 wait_on_bit(&fs_info
->flags
, BTRFS_FS_RELOC_RUNNING
,
1973 TASK_INTERRUPTIBLE
);
1978 /* Something else is running or none */
1982 static noinline
int btrfs_ioctl_resize(struct file
*file
,
1985 BTRFS_DEV_LOOKUP_ARGS(args
);
1986 struct inode
*inode
= file_inode(file
);
1987 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1991 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1992 struct btrfs_ioctl_vol_args
*vol_args
;
1993 struct btrfs_trans_handle
*trans
;
1994 struct btrfs_device
*device
= NULL
;
1997 char *devstr
= NULL
;
2002 if (!capable(CAP_SYS_ADMIN
))
2005 ret
= mnt_want_write_file(file
);
2010 * Read the arguments before checking exclusivity to be able to
2011 * distinguish regular resize and cancel
2013 vol_args
= memdup_user(arg
, sizeof(*vol_args
));
2014 if (IS_ERR(vol_args
)) {
2015 ret
= PTR_ERR(vol_args
);
2018 vol_args
->name
[BTRFS_PATH_NAME_MAX
] = '\0';
2019 sizestr
= vol_args
->name
;
2020 cancel
= (strcmp("cancel", sizestr
) == 0);
2021 ret
= exclop_start_or_cancel_reloc(fs_info
, BTRFS_EXCLOP_RESIZE
, cancel
);
2024 /* Exclusive operation is now claimed */
2026 devstr
= strchr(sizestr
, ':');
2028 sizestr
= devstr
+ 1;
2030 devstr
= vol_args
->name
;
2031 ret
= kstrtoull(devstr
, 10, &devid
);
2038 btrfs_info(fs_info
, "resizing devid %llu", devid
);
2042 device
= btrfs_find_device(fs_info
->fs_devices
, &args
);
2044 btrfs_info(fs_info
, "resizer unable to find device %llu",
2050 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2052 "resizer unable to apply on readonly device %llu",
2058 if (!strcmp(sizestr
, "max"))
2059 new_size
= bdev_nr_bytes(device
->bdev
);
2061 if (sizestr
[0] == '-') {
2064 } else if (sizestr
[0] == '+') {
2068 new_size
= memparse(sizestr
, &retptr
);
2069 if (*retptr
!= '\0' || new_size
== 0) {
2075 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
2080 old_size
= btrfs_device_get_total_bytes(device
);
2083 if (new_size
> old_size
) {
2087 new_size
= old_size
- new_size
;
2088 } else if (mod
> 0) {
2089 if (new_size
> ULLONG_MAX
- old_size
) {
2093 new_size
= old_size
+ new_size
;
2096 if (new_size
< SZ_256M
) {
2100 if (new_size
> bdev_nr_bytes(device
->bdev
)) {
2105 new_size
= round_down(new_size
, fs_info
->sectorsize
);
2107 if (new_size
> old_size
) {
2108 trans
= btrfs_start_transaction(root
, 0);
2109 if (IS_ERR(trans
)) {
2110 ret
= PTR_ERR(trans
);
2113 ret
= btrfs_grow_device(trans
, device
, new_size
);
2114 btrfs_commit_transaction(trans
);
2115 } else if (new_size
< old_size
) {
2116 ret
= btrfs_shrink_device(device
, new_size
);
2117 } /* equal, nothing need to do */
2119 if (ret
== 0 && new_size
!= old_size
)
2120 btrfs_info_in_rcu(fs_info
,
2121 "resize device %s (devid %llu) from %llu to %llu",
2122 rcu_str_deref(device
->name
), device
->devid
,
2123 old_size
, new_size
);
2125 btrfs_exclop_finish(fs_info
);
2129 mnt_drop_write_file(file
);
2133 static noinline
int __btrfs_ioctl_snap_create(struct file
*file
,
2134 struct user_namespace
*mnt_userns
,
2135 const char *name
, unsigned long fd
, int subvol
,
2137 struct btrfs_qgroup_inherit
*inherit
)
2142 if (!S_ISDIR(file_inode(file
)->i_mode
))
2145 ret
= mnt_want_write_file(file
);
2149 namelen
= strlen(name
);
2150 if (strchr(name
, '/')) {
2152 goto out_drop_write
;
2155 if (name
[0] == '.' &&
2156 (namelen
== 1 || (name
[1] == '.' && namelen
== 2))) {
2158 goto out_drop_write
;
2162 ret
= btrfs_mksubvol(&file
->f_path
, mnt_userns
, name
,
2163 namelen
, NULL
, readonly
, inherit
);
2165 struct fd src
= fdget(fd
);
2166 struct inode
*src_inode
;
2169 goto out_drop_write
;
2172 src_inode
= file_inode(src
.file
);
2173 if (src_inode
->i_sb
!= file_inode(file
)->i_sb
) {
2174 btrfs_info(BTRFS_I(file_inode(file
))->root
->fs_info
,
2175 "Snapshot src from another FS");
2177 } else if (!inode_owner_or_capable(mnt_userns
, src_inode
)) {
2179 * Subvolume creation is not restricted, but snapshots
2180 * are limited to own subvolumes only
2184 ret
= btrfs_mksnapshot(&file
->f_path
, mnt_userns
,
2186 BTRFS_I(src_inode
)->root
,
2192 mnt_drop_write_file(file
);
2197 static noinline
int btrfs_ioctl_snap_create(struct file
*file
,
2198 void __user
*arg
, int subvol
)
2200 struct btrfs_ioctl_vol_args
*vol_args
;
2203 if (!S_ISDIR(file_inode(file
)->i_mode
))
2206 vol_args
= memdup_user(arg
, sizeof(*vol_args
));
2207 if (IS_ERR(vol_args
))
2208 return PTR_ERR(vol_args
);
2209 vol_args
->name
[BTRFS_PATH_NAME_MAX
] = '\0';
2211 ret
= __btrfs_ioctl_snap_create(file
, file_mnt_user_ns(file
),
2212 vol_args
->name
, vol_args
->fd
, subvol
,
2219 static noinline
int btrfs_ioctl_snap_create_v2(struct file
*file
,
2220 void __user
*arg
, int subvol
)
2222 struct btrfs_ioctl_vol_args_v2
*vol_args
;
2224 bool readonly
= false;
2225 struct btrfs_qgroup_inherit
*inherit
= NULL
;
2227 if (!S_ISDIR(file_inode(file
)->i_mode
))
2230 vol_args
= memdup_user(arg
, sizeof(*vol_args
));
2231 if (IS_ERR(vol_args
))
2232 return PTR_ERR(vol_args
);
2233 vol_args
->name
[BTRFS_SUBVOL_NAME_MAX
] = '\0';
2235 if (vol_args
->flags
& ~BTRFS_SUBVOL_CREATE_ARGS_MASK
) {
2240 if (vol_args
->flags
& BTRFS_SUBVOL_RDONLY
)
2242 if (vol_args
->flags
& BTRFS_SUBVOL_QGROUP_INHERIT
) {
2245 if (vol_args
->size
< sizeof(*inherit
) ||
2246 vol_args
->size
> PAGE_SIZE
) {
2250 inherit
= memdup_user(vol_args
->qgroup_inherit
, vol_args
->size
);
2251 if (IS_ERR(inherit
)) {
2252 ret
= PTR_ERR(inherit
);
2256 if (inherit
->num_qgroups
> PAGE_SIZE
||
2257 inherit
->num_ref_copies
> PAGE_SIZE
||
2258 inherit
->num_excl_copies
> PAGE_SIZE
) {
2263 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
2264 2 * inherit
->num_excl_copies
;
2265 if (vol_args
->size
!= struct_size(inherit
, qgroups
, nums
)) {
2271 ret
= __btrfs_ioctl_snap_create(file
, file_mnt_user_ns(file
),
2272 vol_args
->name
, vol_args
->fd
, subvol
,
2283 static noinline
int btrfs_ioctl_subvol_getflags(struct inode
*inode
,
2286 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2287 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2291 if (btrfs_ino(BTRFS_I(inode
)) != BTRFS_FIRST_FREE_OBJECTID
)
2294 down_read(&fs_info
->subvol_sem
);
2295 if (btrfs_root_readonly(root
))
2296 flags
|= BTRFS_SUBVOL_RDONLY
;
2297 up_read(&fs_info
->subvol_sem
);
2299 if (copy_to_user(arg
, &flags
, sizeof(flags
)))
2305 static noinline
int btrfs_ioctl_subvol_setflags(struct file
*file
,
2308 struct inode
*inode
= file_inode(file
);
2309 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2310 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2311 struct btrfs_trans_handle
*trans
;
2316 if (!inode_owner_or_capable(file_mnt_user_ns(file
), inode
))
2319 ret
= mnt_want_write_file(file
);
2323 if (btrfs_ino(BTRFS_I(inode
)) != BTRFS_FIRST_FREE_OBJECTID
) {
2325 goto out_drop_write
;
2328 if (copy_from_user(&flags
, arg
, sizeof(flags
))) {
2330 goto out_drop_write
;
2333 if (flags
& ~BTRFS_SUBVOL_RDONLY
) {
2335 goto out_drop_write
;
2338 down_write(&fs_info
->subvol_sem
);
2341 if (!!(flags
& BTRFS_SUBVOL_RDONLY
) == btrfs_root_readonly(root
))
2344 root_flags
= btrfs_root_flags(&root
->root_item
);
2345 if (flags
& BTRFS_SUBVOL_RDONLY
) {
2346 btrfs_set_root_flags(&root
->root_item
,
2347 root_flags
| BTRFS_ROOT_SUBVOL_RDONLY
);
2350 * Block RO -> RW transition if this subvolume is involved in
2353 spin_lock(&root
->root_item_lock
);
2354 if (root
->send_in_progress
== 0) {
2355 btrfs_set_root_flags(&root
->root_item
,
2356 root_flags
& ~BTRFS_ROOT_SUBVOL_RDONLY
);
2357 spin_unlock(&root
->root_item_lock
);
2359 spin_unlock(&root
->root_item_lock
);
2361 "Attempt to set subvolume %llu read-write during send",
2362 root
->root_key
.objectid
);
2368 trans
= btrfs_start_transaction(root
, 1);
2369 if (IS_ERR(trans
)) {
2370 ret
= PTR_ERR(trans
);
2374 ret
= btrfs_update_root(trans
, fs_info
->tree_root
,
2375 &root
->root_key
, &root
->root_item
);
2377 btrfs_end_transaction(trans
);
2381 ret
= btrfs_commit_transaction(trans
);
2385 btrfs_set_root_flags(&root
->root_item
, root_flags
);
2387 up_write(&fs_info
->subvol_sem
);
2389 mnt_drop_write_file(file
);
2394 static noinline
int key_in_sk(struct btrfs_key
*key
,
2395 struct btrfs_ioctl_search_key
*sk
)
2397 struct btrfs_key test
;
2400 test
.objectid
= sk
->min_objectid
;
2401 test
.type
= sk
->min_type
;
2402 test
.offset
= sk
->min_offset
;
2404 ret
= btrfs_comp_cpu_keys(key
, &test
);
2408 test
.objectid
= sk
->max_objectid
;
2409 test
.type
= sk
->max_type
;
2410 test
.offset
= sk
->max_offset
;
2412 ret
= btrfs_comp_cpu_keys(key
, &test
);
2418 static noinline
int copy_to_sk(struct btrfs_path
*path
,
2419 struct btrfs_key
*key
,
2420 struct btrfs_ioctl_search_key
*sk
,
2423 unsigned long *sk_offset
,
2427 struct extent_buffer
*leaf
;
2428 struct btrfs_ioctl_search_header sh
;
2429 struct btrfs_key test
;
2430 unsigned long item_off
;
2431 unsigned long item_len
;
2437 leaf
= path
->nodes
[0];
2438 slot
= path
->slots
[0];
2439 nritems
= btrfs_header_nritems(leaf
);
2441 if (btrfs_header_generation(leaf
) > sk
->max_transid
) {
2445 found_transid
= btrfs_header_generation(leaf
);
2447 for (i
= slot
; i
< nritems
; i
++) {
2448 item_off
= btrfs_item_ptr_offset(leaf
, i
);
2449 item_len
= btrfs_item_size(leaf
, i
);
2451 btrfs_item_key_to_cpu(leaf
, key
, i
);
2452 if (!key_in_sk(key
, sk
))
2455 if (sizeof(sh
) + item_len
> *buf_size
) {
2462 * return one empty item back for v1, which does not
2466 *buf_size
= sizeof(sh
) + item_len
;
2471 if (sizeof(sh
) + item_len
+ *sk_offset
> *buf_size
) {
2476 sh
.objectid
= key
->objectid
;
2477 sh
.offset
= key
->offset
;
2478 sh
.type
= key
->type
;
2480 sh
.transid
= found_transid
;
2483 * Copy search result header. If we fault then loop again so we
2484 * can fault in the pages and -EFAULT there if there's a
2485 * problem. Otherwise we'll fault and then copy the buffer in
2486 * properly this next time through
2488 if (copy_to_user_nofault(ubuf
+ *sk_offset
, &sh
, sizeof(sh
))) {
2493 *sk_offset
+= sizeof(sh
);
2496 char __user
*up
= ubuf
+ *sk_offset
;
2498 * Copy the item, same behavior as above, but reset the
2499 * * sk_offset so we copy the full thing again.
2501 if (read_extent_buffer_to_user_nofault(leaf
, up
,
2502 item_off
, item_len
)) {
2504 *sk_offset
-= sizeof(sh
);
2508 *sk_offset
+= item_len
;
2512 if (ret
) /* -EOVERFLOW from above */
2515 if (*num_found
>= sk
->nr_items
) {
2522 test
.objectid
= sk
->max_objectid
;
2523 test
.type
= sk
->max_type
;
2524 test
.offset
= sk
->max_offset
;
2525 if (btrfs_comp_cpu_keys(key
, &test
) >= 0)
2527 else if (key
->offset
< (u64
)-1)
2529 else if (key
->type
< (u8
)-1) {
2532 } else if (key
->objectid
< (u64
)-1) {
2540 * 0: all items from this leaf copied, continue with next
2541 * 1: * more items can be copied, but unused buffer is too small
2542 * * all items were found
2543 * Either way, it will stops the loop which iterates to the next
2545 * -EOVERFLOW: item was to large for buffer
2546 * -EFAULT: could not copy extent buffer back to userspace
2551 static noinline
int search_ioctl(struct inode
*inode
,
2552 struct btrfs_ioctl_search_key
*sk
,
2556 struct btrfs_fs_info
*info
= btrfs_sb(inode
->i_sb
);
2557 struct btrfs_root
*root
;
2558 struct btrfs_key key
;
2559 struct btrfs_path
*path
;
2562 unsigned long sk_offset
= 0;
2564 if (*buf_size
< sizeof(struct btrfs_ioctl_search_header
)) {
2565 *buf_size
= sizeof(struct btrfs_ioctl_search_header
);
2569 path
= btrfs_alloc_path();
2573 if (sk
->tree_id
== 0) {
2574 /* search the root of the inode that was passed */
2575 root
= btrfs_grab_root(BTRFS_I(inode
)->root
);
2577 root
= btrfs_get_fs_root(info
, sk
->tree_id
, true);
2579 btrfs_free_path(path
);
2580 return PTR_ERR(root
);
2584 key
.objectid
= sk
->min_objectid
;
2585 key
.type
= sk
->min_type
;
2586 key
.offset
= sk
->min_offset
;
2591 * Ensure that the whole user buffer is faulted in at sub-page
2592 * granularity, otherwise the loop may live-lock.
2594 if (fault_in_subpage_writeable(ubuf
+ sk_offset
,
2595 *buf_size
- sk_offset
))
2598 ret
= btrfs_search_forward(root
, &key
, path
, sk
->min_transid
);
2604 ret
= copy_to_sk(path
, &key
, sk
, buf_size
, ubuf
,
2605 &sk_offset
, &num_found
);
2606 btrfs_release_path(path
);
2614 sk
->nr_items
= num_found
;
2615 btrfs_put_root(root
);
2616 btrfs_free_path(path
);
2620 static noinline
int btrfs_ioctl_tree_search(struct inode
*inode
,
2623 struct btrfs_ioctl_search_args __user
*uargs
= argp
;
2624 struct btrfs_ioctl_search_key sk
;
2628 if (!capable(CAP_SYS_ADMIN
))
2631 if (copy_from_user(&sk
, &uargs
->key
, sizeof(sk
)))
2634 buf_size
= sizeof(uargs
->buf
);
2636 ret
= search_ioctl(inode
, &sk
, &buf_size
, uargs
->buf
);
2639 * In the origin implementation an overflow is handled by returning a
2640 * search header with a len of zero, so reset ret.
2642 if (ret
== -EOVERFLOW
)
2645 if (ret
== 0 && copy_to_user(&uargs
->key
, &sk
, sizeof(sk
)))
2650 static noinline
int btrfs_ioctl_tree_search_v2(struct inode
*inode
,
2653 struct btrfs_ioctl_search_args_v2 __user
*uarg
= argp
;
2654 struct btrfs_ioctl_search_args_v2 args
;
2657 const size_t buf_limit
= SZ_16M
;
2659 if (!capable(CAP_SYS_ADMIN
))
2662 /* copy search header and buffer size */
2663 if (copy_from_user(&args
, uarg
, sizeof(args
)))
2666 buf_size
= args
.buf_size
;
2668 /* limit result size to 16MB */
2669 if (buf_size
> buf_limit
)
2670 buf_size
= buf_limit
;
2672 ret
= search_ioctl(inode
, &args
.key
, &buf_size
,
2673 (char __user
*)(&uarg
->buf
[0]));
2674 if (ret
== 0 && copy_to_user(&uarg
->key
, &args
.key
, sizeof(args
.key
)))
2676 else if (ret
== -EOVERFLOW
&&
2677 copy_to_user(&uarg
->buf_size
, &buf_size
, sizeof(buf_size
)))
2684 * Search INODE_REFs to identify path name of 'dirid' directory
2685 * in a 'tree_id' tree. and sets path name to 'name'.
2687 static noinline
int btrfs_search_path_in_tree(struct btrfs_fs_info
*info
,
2688 u64 tree_id
, u64 dirid
, char *name
)
2690 struct btrfs_root
*root
;
2691 struct btrfs_key key
;
2697 struct btrfs_inode_ref
*iref
;
2698 struct extent_buffer
*l
;
2699 struct btrfs_path
*path
;
2701 if (dirid
== BTRFS_FIRST_FREE_OBJECTID
) {
2706 path
= btrfs_alloc_path();
2710 ptr
= &name
[BTRFS_INO_LOOKUP_PATH_MAX
- 1];
2712 root
= btrfs_get_fs_root(info
, tree_id
, true);
2714 ret
= PTR_ERR(root
);
2719 key
.objectid
= dirid
;
2720 key
.type
= BTRFS_INODE_REF_KEY
;
2721 key
.offset
= (u64
)-1;
2724 ret
= btrfs_search_backwards(root
, &key
, path
);
2733 slot
= path
->slots
[0];
2735 iref
= btrfs_item_ptr(l
, slot
, struct btrfs_inode_ref
);
2736 len
= btrfs_inode_ref_name_len(l
, iref
);
2738 total_len
+= len
+ 1;
2740 ret
= -ENAMETOOLONG
;
2745 read_extent_buffer(l
, ptr
, (unsigned long)(iref
+ 1), len
);
2747 if (key
.offset
== BTRFS_FIRST_FREE_OBJECTID
)
2750 btrfs_release_path(path
);
2751 key
.objectid
= key
.offset
;
2752 key
.offset
= (u64
)-1;
2753 dirid
= key
.objectid
;
2755 memmove(name
, ptr
, total_len
);
2756 name
[total_len
] = '\0';
2759 btrfs_put_root(root
);
2760 btrfs_free_path(path
);
2764 static int btrfs_search_path_in_tree_user(struct user_namespace
*mnt_userns
,
2765 struct inode
*inode
,
2766 struct btrfs_ioctl_ino_lookup_user_args
*args
)
2768 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
2769 struct super_block
*sb
= inode
->i_sb
;
2770 struct btrfs_key upper_limit
= BTRFS_I(inode
)->location
;
2771 u64 treeid
= BTRFS_I(inode
)->root
->root_key
.objectid
;
2772 u64 dirid
= args
->dirid
;
2773 unsigned long item_off
;
2774 unsigned long item_len
;
2775 struct btrfs_inode_ref
*iref
;
2776 struct btrfs_root_ref
*rref
;
2777 struct btrfs_root
*root
= NULL
;
2778 struct btrfs_path
*path
;
2779 struct btrfs_key key
, key2
;
2780 struct extent_buffer
*leaf
;
2781 struct inode
*temp_inode
;
2788 path
= btrfs_alloc_path();
2793 * If the bottom subvolume does not exist directly under upper_limit,
2794 * construct the path in from the bottom up.
2796 if (dirid
!= upper_limit
.objectid
) {
2797 ptr
= &args
->path
[BTRFS_INO_LOOKUP_USER_PATH_MAX
- 1];
2799 root
= btrfs_get_fs_root(fs_info
, treeid
, true);
2801 ret
= PTR_ERR(root
);
2805 key
.objectid
= dirid
;
2806 key
.type
= BTRFS_INODE_REF_KEY
;
2807 key
.offset
= (u64
)-1;
2809 ret
= btrfs_search_backwards(root
, &key
, path
);
2817 leaf
= path
->nodes
[0];
2818 slot
= path
->slots
[0];
2820 iref
= btrfs_item_ptr(leaf
, slot
, struct btrfs_inode_ref
);
2821 len
= btrfs_inode_ref_name_len(leaf
, iref
);
2823 total_len
+= len
+ 1;
2824 if (ptr
< args
->path
) {
2825 ret
= -ENAMETOOLONG
;
2830 read_extent_buffer(leaf
, ptr
,
2831 (unsigned long)(iref
+ 1), len
);
2833 /* Check the read+exec permission of this directory */
2834 ret
= btrfs_previous_item(root
, path
, dirid
,
2835 BTRFS_INODE_ITEM_KEY
);
2838 } else if (ret
> 0) {
2843 leaf
= path
->nodes
[0];
2844 slot
= path
->slots
[0];
2845 btrfs_item_key_to_cpu(leaf
, &key2
, slot
);
2846 if (key2
.objectid
!= dirid
) {
2851 temp_inode
= btrfs_iget(sb
, key2
.objectid
, root
);
2852 if (IS_ERR(temp_inode
)) {
2853 ret
= PTR_ERR(temp_inode
);
2856 ret
= inode_permission(mnt_userns
, temp_inode
,
2857 MAY_READ
| MAY_EXEC
);
2864 if (key
.offset
== upper_limit
.objectid
)
2866 if (key
.objectid
== BTRFS_FIRST_FREE_OBJECTID
) {
2871 btrfs_release_path(path
);
2872 key
.objectid
= key
.offset
;
2873 key
.offset
= (u64
)-1;
2874 dirid
= key
.objectid
;
2877 memmove(args
->path
, ptr
, total_len
);
2878 args
->path
[total_len
] = '\0';
2879 btrfs_put_root(root
);
2881 btrfs_release_path(path
);
2884 /* Get the bottom subvolume's name from ROOT_REF */
2885 key
.objectid
= treeid
;
2886 key
.type
= BTRFS_ROOT_REF_KEY
;
2887 key
.offset
= args
->treeid
;
2888 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
2891 } else if (ret
> 0) {
2896 leaf
= path
->nodes
[0];
2897 slot
= path
->slots
[0];
2898 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2900 item_off
= btrfs_item_ptr_offset(leaf
, slot
);
2901 item_len
= btrfs_item_size(leaf
, slot
);
2902 /* Check if dirid in ROOT_REF corresponds to passed dirid */
2903 rref
= btrfs_item_ptr(leaf
, slot
, struct btrfs_root_ref
);
2904 if (args
->dirid
!= btrfs_root_ref_dirid(leaf
, rref
)) {
2909 /* Copy subvolume's name */
2910 item_off
+= sizeof(struct btrfs_root_ref
);
2911 item_len
-= sizeof(struct btrfs_root_ref
);
2912 read_extent_buffer(leaf
, args
->name
, item_off
, item_len
);
2913 args
->name
[item_len
] = 0;
2916 btrfs_put_root(root
);
2918 btrfs_free_path(path
);
2922 static noinline
int btrfs_ioctl_ino_lookup(struct btrfs_root
*root
,
2925 struct btrfs_ioctl_ino_lookup_args
*args
;
2928 args
= memdup_user(argp
, sizeof(*args
));
2930 return PTR_ERR(args
);
2933 * Unprivileged query to obtain the containing subvolume root id. The
2934 * path is reset so it's consistent with btrfs_search_path_in_tree.
2936 if (args
->treeid
== 0)
2937 args
->treeid
= root
->root_key
.objectid
;
2939 if (args
->objectid
== BTRFS_FIRST_FREE_OBJECTID
) {
2944 if (!capable(CAP_SYS_ADMIN
)) {
2949 ret
= btrfs_search_path_in_tree(root
->fs_info
,
2950 args
->treeid
, args
->objectid
,
2954 if (ret
== 0 && copy_to_user(argp
, args
, sizeof(*args
)))
2962 * Version of ino_lookup ioctl (unprivileged)
2964 * The main differences from ino_lookup ioctl are:
2966 * 1. Read + Exec permission will be checked using inode_permission() during
2967 * path construction. -EACCES will be returned in case of failure.
2968 * 2. Path construction will be stopped at the inode number which corresponds
2969 * to the fd with which this ioctl is called. If constructed path does not
2970 * exist under fd's inode, -EACCES will be returned.
2971 * 3. The name of bottom subvolume is also searched and filled.
2973 static int btrfs_ioctl_ino_lookup_user(struct file
*file
, void __user
*argp
)
2975 struct btrfs_ioctl_ino_lookup_user_args
*args
;
2976 struct inode
*inode
;
2979 args
= memdup_user(argp
, sizeof(*args
));
2981 return PTR_ERR(args
);
2983 inode
= file_inode(file
);
2985 if (args
->dirid
== BTRFS_FIRST_FREE_OBJECTID
&&
2986 BTRFS_I(inode
)->location
.objectid
!= BTRFS_FIRST_FREE_OBJECTID
) {
2988 * The subvolume does not exist under fd with which this is
2995 ret
= btrfs_search_path_in_tree_user(file_mnt_user_ns(file
), inode
, args
);
2997 if (ret
== 0 && copy_to_user(argp
, args
, sizeof(*args
)))
3004 /* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
3005 static int btrfs_ioctl_get_subvol_info(struct inode
*inode
, void __user
*argp
)
3007 struct btrfs_ioctl_get_subvol_info_args
*subvol_info
;
3008 struct btrfs_fs_info
*fs_info
;
3009 struct btrfs_root
*root
;
3010 struct btrfs_path
*path
;
3011 struct btrfs_key key
;
3012 struct btrfs_root_item
*root_item
;
3013 struct btrfs_root_ref
*rref
;
3014 struct extent_buffer
*leaf
;
3015 unsigned long item_off
;
3016 unsigned long item_len
;
3020 path
= btrfs_alloc_path();
3024 subvol_info
= kzalloc(sizeof(*subvol_info
), GFP_KERNEL
);
3026 btrfs_free_path(path
);
3030 fs_info
= BTRFS_I(inode
)->root
->fs_info
;
3032 /* Get root_item of inode's subvolume */
3033 key
.objectid
= BTRFS_I(inode
)->root
->root_key
.objectid
;
3034 root
= btrfs_get_fs_root(fs_info
, key
.objectid
, true);
3036 ret
= PTR_ERR(root
);
3039 root_item
= &root
->root_item
;
3041 subvol_info
->treeid
= key
.objectid
;
3043 subvol_info
->generation
= btrfs_root_generation(root_item
);
3044 subvol_info
->flags
= btrfs_root_flags(root_item
);
3046 memcpy(subvol_info
->uuid
, root_item
->uuid
, BTRFS_UUID_SIZE
);
3047 memcpy(subvol_info
->parent_uuid
, root_item
->parent_uuid
,
3049 memcpy(subvol_info
->received_uuid
, root_item
->received_uuid
,
3052 subvol_info
->ctransid
= btrfs_root_ctransid(root_item
);
3053 subvol_info
->ctime
.sec
= btrfs_stack_timespec_sec(&root_item
->ctime
);
3054 subvol_info
->ctime
.nsec
= btrfs_stack_timespec_nsec(&root_item
->ctime
);
3056 subvol_info
->otransid
= btrfs_root_otransid(root_item
);
3057 subvol_info
->otime
.sec
= btrfs_stack_timespec_sec(&root_item
->otime
);
3058 subvol_info
->otime
.nsec
= btrfs_stack_timespec_nsec(&root_item
->otime
);
3060 subvol_info
->stransid
= btrfs_root_stransid(root_item
);
3061 subvol_info
->stime
.sec
= btrfs_stack_timespec_sec(&root_item
->stime
);
3062 subvol_info
->stime
.nsec
= btrfs_stack_timespec_nsec(&root_item
->stime
);
3064 subvol_info
->rtransid
= btrfs_root_rtransid(root_item
);
3065 subvol_info
->rtime
.sec
= btrfs_stack_timespec_sec(&root_item
->rtime
);
3066 subvol_info
->rtime
.nsec
= btrfs_stack_timespec_nsec(&root_item
->rtime
);
3068 if (key
.objectid
!= BTRFS_FS_TREE_OBJECTID
) {
3069 /* Search root tree for ROOT_BACKREF of this subvolume */
3070 key
.type
= BTRFS_ROOT_BACKREF_KEY
;
3072 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
3075 } else if (path
->slots
[0] >=
3076 btrfs_header_nritems(path
->nodes
[0])) {
3077 ret
= btrfs_next_leaf(fs_info
->tree_root
, path
);
3080 } else if (ret
> 0) {
3086 leaf
= path
->nodes
[0];
3087 slot
= path
->slots
[0];
3088 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3089 if (key
.objectid
== subvol_info
->treeid
&&
3090 key
.type
== BTRFS_ROOT_BACKREF_KEY
) {
3091 subvol_info
->parent_id
= key
.offset
;
3093 rref
= btrfs_item_ptr(leaf
, slot
, struct btrfs_root_ref
);
3094 subvol_info
->dirid
= btrfs_root_ref_dirid(leaf
, rref
);
3096 item_off
= btrfs_item_ptr_offset(leaf
, slot
)
3097 + sizeof(struct btrfs_root_ref
);
3098 item_len
= btrfs_item_size(leaf
, slot
)
3099 - sizeof(struct btrfs_root_ref
);
3100 read_extent_buffer(leaf
, subvol_info
->name
,
3101 item_off
, item_len
);
3108 if (copy_to_user(argp
, subvol_info
, sizeof(*subvol_info
)))
3112 btrfs_put_root(root
);
3114 btrfs_free_path(path
);
3120 * Return ROOT_REF information of the subvolume containing this inode
3121 * except the subvolume name.
3123 static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root
*root
,
3126 struct btrfs_ioctl_get_subvol_rootref_args
*rootrefs
;
3127 struct btrfs_root_ref
*rref
;
3128 struct btrfs_path
*path
;
3129 struct btrfs_key key
;
3130 struct extent_buffer
*leaf
;
3136 path
= btrfs_alloc_path();
3140 rootrefs
= memdup_user(argp
, sizeof(*rootrefs
));
3141 if (IS_ERR(rootrefs
)) {
3142 btrfs_free_path(path
);
3143 return PTR_ERR(rootrefs
);
3146 objectid
= root
->root_key
.objectid
;
3147 key
.objectid
= objectid
;
3148 key
.type
= BTRFS_ROOT_REF_KEY
;
3149 key
.offset
= rootrefs
->min_treeid
;
3152 root
= root
->fs_info
->tree_root
;
3153 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3156 } else if (path
->slots
[0] >=
3157 btrfs_header_nritems(path
->nodes
[0])) {
3158 ret
= btrfs_next_leaf(root
, path
);
3161 } else if (ret
> 0) {
3167 leaf
= path
->nodes
[0];
3168 slot
= path
->slots
[0];
3170 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3171 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_ROOT_REF_KEY
) {
3176 if (found
== BTRFS_MAX_ROOTREF_BUFFER_NUM
) {
3181 rref
= btrfs_item_ptr(leaf
, slot
, struct btrfs_root_ref
);
3182 rootrefs
->rootref
[found
].treeid
= key
.offset
;
3183 rootrefs
->rootref
[found
].dirid
=
3184 btrfs_root_ref_dirid(leaf
, rref
);
3187 ret
= btrfs_next_item(root
, path
);
3190 } else if (ret
> 0) {
3197 if (!ret
|| ret
== -EOVERFLOW
) {
3198 rootrefs
->num_items
= found
;
3199 /* update min_treeid for next search */
3201 rootrefs
->min_treeid
=
3202 rootrefs
->rootref
[found
- 1].treeid
+ 1;
3203 if (copy_to_user(argp
, rootrefs
, sizeof(*rootrefs
)))
3208 btrfs_free_path(path
);
3213 static noinline
int btrfs_ioctl_snap_destroy(struct file
*file
,
3217 struct dentry
*parent
= file
->f_path
.dentry
;
3218 struct btrfs_fs_info
*fs_info
= btrfs_sb(parent
->d_sb
);
3219 struct dentry
*dentry
;
3220 struct inode
*dir
= d_inode(parent
);
3221 struct inode
*inode
;
3222 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
3223 struct btrfs_root
*dest
= NULL
;
3224 struct btrfs_ioctl_vol_args
*vol_args
= NULL
;
3225 struct btrfs_ioctl_vol_args_v2
*vol_args2
= NULL
;
3226 struct user_namespace
*mnt_userns
= file_mnt_user_ns(file
);
3227 char *subvol_name
, *subvol_name_ptr
= NULL
;
3230 bool destroy_parent
= false;
3232 /* We don't support snapshots with extent tree v2 yet. */
3233 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
3235 "extent tree v2 doesn't support snapshot deletion yet");
3240 vol_args2
= memdup_user(arg
, sizeof(*vol_args2
));
3241 if (IS_ERR(vol_args2
))
3242 return PTR_ERR(vol_args2
);
3244 if (vol_args2
->flags
& ~BTRFS_SUBVOL_DELETE_ARGS_MASK
) {
3250 * If SPEC_BY_ID is not set, we are looking for the subvolume by
3251 * name, same as v1 currently does.
3253 if (!(vol_args2
->flags
& BTRFS_SUBVOL_SPEC_BY_ID
)) {
3254 vol_args2
->name
[BTRFS_SUBVOL_NAME_MAX
] = 0;
3255 subvol_name
= vol_args2
->name
;
3257 err
= mnt_want_write_file(file
);
3261 struct inode
*old_dir
;
3263 if (vol_args2
->subvolid
< BTRFS_FIRST_FREE_OBJECTID
) {
3268 err
= mnt_want_write_file(file
);
3272 dentry
= btrfs_get_dentry(fs_info
->sb
,
3273 BTRFS_FIRST_FREE_OBJECTID
,
3274 vol_args2
->subvolid
, 0, 0);
3275 if (IS_ERR(dentry
)) {
3276 err
= PTR_ERR(dentry
);
3277 goto out_drop_write
;
3281 * Change the default parent since the subvolume being
3282 * deleted can be outside of the current mount point.
3284 parent
= btrfs_get_parent(dentry
);
3287 * At this point dentry->d_name can point to '/' if the
3288 * subvolume we want to destroy is outsite of the
3289 * current mount point, so we need to release the
3290 * current dentry and execute the lookup to return a new
3291 * one with ->d_name pointing to the
3292 * <mount point>/subvol_name.
3295 if (IS_ERR(parent
)) {
3296 err
= PTR_ERR(parent
);
3297 goto out_drop_write
;
3300 dir
= d_inode(parent
);
3303 * If v2 was used with SPEC_BY_ID, a new parent was
3304 * allocated since the subvolume can be outside of the
3305 * current mount point. Later on we need to release this
3306 * new parent dentry.
3308 destroy_parent
= true;
3311 * On idmapped mounts, deletion via subvolid is
3312 * restricted to subvolumes that are immediate
3313 * ancestors of the inode referenced by the file
3314 * descriptor in the ioctl. Otherwise the idmapping
3315 * could potentially be abused to delete subvolumes
3316 * anywhere in the filesystem the user wouldn't be able
3317 * to delete without an idmapped mount.
3319 if (old_dir
!= dir
&& mnt_userns
!= &init_user_ns
) {
3324 subvol_name_ptr
= btrfs_get_subvol_name_from_objectid(
3325 fs_info
, vol_args2
->subvolid
);
3326 if (IS_ERR(subvol_name_ptr
)) {
3327 err
= PTR_ERR(subvol_name_ptr
);
3330 /* subvol_name_ptr is already nul terminated */
3331 subvol_name
= (char *)kbasename(subvol_name_ptr
);
3334 vol_args
= memdup_user(arg
, sizeof(*vol_args
));
3335 if (IS_ERR(vol_args
))
3336 return PTR_ERR(vol_args
);
3338 vol_args
->name
[BTRFS_PATH_NAME_MAX
] = 0;
3339 subvol_name
= vol_args
->name
;
3341 err
= mnt_want_write_file(file
);
3346 subvol_namelen
= strlen(subvol_name
);
3348 if (strchr(subvol_name
, '/') ||
3349 strncmp(subvol_name
, "..", subvol_namelen
) == 0) {
3351 goto free_subvol_name
;
3354 if (!S_ISDIR(dir
->i_mode
)) {
3356 goto free_subvol_name
;
3359 err
= down_write_killable_nested(&dir
->i_rwsem
, I_MUTEX_PARENT
);
3361 goto free_subvol_name
;
3362 dentry
= lookup_one(mnt_userns
, subvol_name
, parent
, subvol_namelen
);
3363 if (IS_ERR(dentry
)) {
3364 err
= PTR_ERR(dentry
);
3365 goto out_unlock_dir
;
3368 if (d_really_is_negative(dentry
)) {
3373 inode
= d_inode(dentry
);
3374 dest
= BTRFS_I(inode
)->root
;
3375 if (!capable(CAP_SYS_ADMIN
)) {
3377 * Regular user. Only allow this with a special mount
3378 * option, when the user has write+exec access to the
3379 * subvol root, and when rmdir(2) would have been
3382 * Note that this is _not_ check that the subvol is
3383 * empty or doesn't contain data that we wouldn't
3384 * otherwise be able to delete.
3386 * Users who want to delete empty subvols should try
3390 if (!btrfs_test_opt(fs_info
, USER_SUBVOL_RM_ALLOWED
))
3394 * Do not allow deletion if the parent dir is the same
3395 * as the dir to be deleted. That means the ioctl
3396 * must be called on the dentry referencing the root
3397 * of the subvol, not a random directory contained
3404 err
= inode_permission(mnt_userns
, inode
, MAY_WRITE
| MAY_EXEC
);
3409 /* check if subvolume may be deleted by a user */
3410 err
= btrfs_may_delete(mnt_userns
, dir
, dentry
, 1);
3414 if (btrfs_ino(BTRFS_I(inode
)) != BTRFS_FIRST_FREE_OBJECTID
) {
3419 btrfs_inode_lock(inode
, 0);
3420 err
= btrfs_delete_subvolume(dir
, dentry
);
3421 btrfs_inode_unlock(inode
, 0);
3423 d_delete_notify(dir
, dentry
);
3428 btrfs_inode_unlock(dir
, 0);
3430 kfree(subvol_name_ptr
);
3435 mnt_drop_write_file(file
);
3442 static int btrfs_ioctl_defrag(struct file
*file
, void __user
*argp
)
3444 struct inode
*inode
= file_inode(file
);
3445 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3446 struct btrfs_ioctl_defrag_range_args range
= {0};
3449 ret
= mnt_want_write_file(file
);
3453 if (btrfs_root_readonly(root
)) {
3458 switch (inode
->i_mode
& S_IFMT
) {
3460 if (!capable(CAP_SYS_ADMIN
)) {
3464 ret
= btrfs_defrag_root(root
);
3468 * Note that this does not check the file descriptor for write
3469 * access. This prevents defragmenting executables that are
3470 * running and allows defrag on files open in read-only mode.
3472 if (!capable(CAP_SYS_ADMIN
) &&
3473 inode_permission(&init_user_ns
, inode
, MAY_WRITE
)) {
3479 if (copy_from_user(&range
, argp
, sizeof(range
))) {
3483 /* compression requires us to start the IO */
3484 if ((range
.flags
& BTRFS_DEFRAG_RANGE_COMPRESS
)) {
3485 range
.flags
|= BTRFS_DEFRAG_RANGE_START_IO
;
3486 range
.extent_thresh
= (u32
)-1;
3489 /* the rest are all set to zero by kzalloc */
3490 range
.len
= (u64
)-1;
3492 ret
= btrfs_defrag_file(file_inode(file
), &file
->f_ra
,
3493 &range
, BTRFS_OLDEST_GENERATION
, 0);
3501 mnt_drop_write_file(file
);
3505 static long btrfs_ioctl_add_dev(struct btrfs_fs_info
*fs_info
, void __user
*arg
)
3507 struct btrfs_ioctl_vol_args
*vol_args
;
3508 bool restore_op
= false;
3511 if (!capable(CAP_SYS_ADMIN
))
3514 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
3515 btrfs_err(fs_info
, "device add not supported on extent tree v2 yet");
3519 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_DEV_ADD
)) {
3520 if (!btrfs_exclop_start_try_lock(fs_info
, BTRFS_EXCLOP_DEV_ADD
))
3521 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
;
3524 * We can do the device add because we have a paused balanced,
3525 * change the exclusive op type and remember we should bring
3526 * back the paused balance
3528 fs_info
->exclusive_operation
= BTRFS_EXCLOP_DEV_ADD
;
3529 btrfs_exclop_start_unlock(fs_info
);
3533 vol_args
= memdup_user(arg
, sizeof(*vol_args
));
3534 if (IS_ERR(vol_args
)) {
3535 ret
= PTR_ERR(vol_args
);
3539 vol_args
->name
[BTRFS_PATH_NAME_MAX
] = '\0';
3540 ret
= btrfs_init_new_device(fs_info
, vol_args
->name
);
3543 btrfs_info(fs_info
, "disk added %s", vol_args
->name
);
3548 btrfs_exclop_balance(fs_info
, BTRFS_EXCLOP_BALANCE_PAUSED
);
3550 btrfs_exclop_finish(fs_info
);
3554 static long btrfs_ioctl_rm_dev_v2(struct file
*file
, void __user
*arg
)
3556 BTRFS_DEV_LOOKUP_ARGS(args
);
3557 struct inode
*inode
= file_inode(file
);
3558 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3559 struct btrfs_ioctl_vol_args_v2
*vol_args
;
3560 struct block_device
*bdev
= NULL
;
3563 bool cancel
= false;
3565 if (!capable(CAP_SYS_ADMIN
))
3568 vol_args
= memdup_user(arg
, sizeof(*vol_args
));
3569 if (IS_ERR(vol_args
))
3570 return PTR_ERR(vol_args
);
3572 if (vol_args
->flags
& ~BTRFS_DEVICE_REMOVE_ARGS_MASK
) {
3577 vol_args
->name
[BTRFS_SUBVOL_NAME_MAX
] = '\0';
3578 if (vol_args
->flags
& BTRFS_DEVICE_SPEC_BY_ID
) {
3579 args
.devid
= vol_args
->devid
;
3580 } else if (!strcmp("cancel", vol_args
->name
)) {
3583 ret
= btrfs_get_dev_args_from_path(fs_info
, &args
, vol_args
->name
);
3588 ret
= mnt_want_write_file(file
);
3592 ret
= exclop_start_or_cancel_reloc(fs_info
, BTRFS_EXCLOP_DEV_REMOVE
,
3597 /* Exclusive operation is now claimed */
3598 ret
= btrfs_rm_device(fs_info
, &args
, &bdev
, &mode
);
3600 btrfs_exclop_finish(fs_info
);
3603 if (vol_args
->flags
& BTRFS_DEVICE_SPEC_BY_ID
)
3604 btrfs_info(fs_info
, "device deleted: id %llu",
3607 btrfs_info(fs_info
, "device deleted: %s",
3611 mnt_drop_write_file(file
);
3613 blkdev_put(bdev
, mode
);
3615 btrfs_put_dev_args_from_path(&args
);
3620 static long btrfs_ioctl_rm_dev(struct file
*file
, void __user
*arg
)
3622 BTRFS_DEV_LOOKUP_ARGS(args
);
3623 struct inode
*inode
= file_inode(file
);
3624 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3625 struct btrfs_ioctl_vol_args
*vol_args
;
3626 struct block_device
*bdev
= NULL
;
3629 bool cancel
= false;
3631 if (!capable(CAP_SYS_ADMIN
))
3634 vol_args
= memdup_user(arg
, sizeof(*vol_args
));
3635 if (IS_ERR(vol_args
))
3636 return PTR_ERR(vol_args
);
3638 vol_args
->name
[BTRFS_PATH_NAME_MAX
] = '\0';
3639 if (!strcmp("cancel", vol_args
->name
)) {
3642 ret
= btrfs_get_dev_args_from_path(fs_info
, &args
, vol_args
->name
);
3647 ret
= mnt_want_write_file(file
);
3651 ret
= exclop_start_or_cancel_reloc(fs_info
, BTRFS_EXCLOP_DEV_REMOVE
,
3654 ret
= btrfs_rm_device(fs_info
, &args
, &bdev
, &mode
);
3656 btrfs_info(fs_info
, "disk deleted %s", vol_args
->name
);
3657 btrfs_exclop_finish(fs_info
);
3660 mnt_drop_write_file(file
);
3662 blkdev_put(bdev
, mode
);
3664 btrfs_put_dev_args_from_path(&args
);
3669 static long btrfs_ioctl_fs_info(struct btrfs_fs_info
*fs_info
,
3672 struct btrfs_ioctl_fs_info_args
*fi_args
;
3673 struct btrfs_device
*device
;
3674 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
3678 fi_args
= memdup_user(arg
, sizeof(*fi_args
));
3679 if (IS_ERR(fi_args
))
3680 return PTR_ERR(fi_args
);
3682 flags_in
= fi_args
->flags
;
3683 memset(fi_args
, 0, sizeof(*fi_args
));
3686 fi_args
->num_devices
= fs_devices
->num_devices
;
3688 list_for_each_entry_rcu(device
, &fs_devices
->devices
, dev_list
) {
3689 if (device
->devid
> fi_args
->max_id
)
3690 fi_args
->max_id
= device
->devid
;
3694 memcpy(&fi_args
->fsid
, fs_devices
->fsid
, sizeof(fi_args
->fsid
));
3695 fi_args
->nodesize
= fs_info
->nodesize
;
3696 fi_args
->sectorsize
= fs_info
->sectorsize
;
3697 fi_args
->clone_alignment
= fs_info
->sectorsize
;
3699 if (flags_in
& BTRFS_FS_INFO_FLAG_CSUM_INFO
) {
3700 fi_args
->csum_type
= btrfs_super_csum_type(fs_info
->super_copy
);
3701 fi_args
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
3702 fi_args
->flags
|= BTRFS_FS_INFO_FLAG_CSUM_INFO
;
3705 if (flags_in
& BTRFS_FS_INFO_FLAG_GENERATION
) {
3706 fi_args
->generation
= fs_info
->generation
;
3707 fi_args
->flags
|= BTRFS_FS_INFO_FLAG_GENERATION
;
3710 if (flags_in
& BTRFS_FS_INFO_FLAG_METADATA_UUID
) {
3711 memcpy(&fi_args
->metadata_uuid
, fs_devices
->metadata_uuid
,
3712 sizeof(fi_args
->metadata_uuid
));
3713 fi_args
->flags
|= BTRFS_FS_INFO_FLAG_METADATA_UUID
;
3716 if (copy_to_user(arg
, fi_args
, sizeof(*fi_args
)))
3723 static long btrfs_ioctl_dev_info(struct btrfs_fs_info
*fs_info
,
3726 BTRFS_DEV_LOOKUP_ARGS(args
);
3727 struct btrfs_ioctl_dev_info_args
*di_args
;
3728 struct btrfs_device
*dev
;
3731 di_args
= memdup_user(arg
, sizeof(*di_args
));
3732 if (IS_ERR(di_args
))
3733 return PTR_ERR(di_args
);
3735 args
.devid
= di_args
->devid
;
3736 if (!btrfs_is_empty_uuid(di_args
->uuid
))
3737 args
.uuid
= di_args
->uuid
;
3740 dev
= btrfs_find_device(fs_info
->fs_devices
, &args
);
3746 di_args
->devid
= dev
->devid
;
3747 di_args
->bytes_used
= btrfs_device_get_bytes_used(dev
);
3748 di_args
->total_bytes
= btrfs_device_get_total_bytes(dev
);
3749 memcpy(di_args
->uuid
, dev
->uuid
, sizeof(di_args
->uuid
));
3751 strncpy(di_args
->path
, rcu_str_deref(dev
->name
),
3752 sizeof(di_args
->path
) - 1);
3753 di_args
->path
[sizeof(di_args
->path
) - 1] = 0;
3755 di_args
->path
[0] = '\0';
3760 if (ret
== 0 && copy_to_user(arg
, di_args
, sizeof(*di_args
)))
3767 static long btrfs_ioctl_default_subvol(struct file
*file
, void __user
*argp
)
3769 struct inode
*inode
= file_inode(file
);
3770 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3771 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3772 struct btrfs_root
*new_root
;
3773 struct btrfs_dir_item
*di
;
3774 struct btrfs_trans_handle
*trans
;
3775 struct btrfs_path
*path
= NULL
;
3776 struct btrfs_disk_key disk_key
;
3781 if (!capable(CAP_SYS_ADMIN
))
3784 ret
= mnt_want_write_file(file
);
3788 if (copy_from_user(&objectid
, argp
, sizeof(objectid
))) {
3794 objectid
= BTRFS_FS_TREE_OBJECTID
;
3796 new_root
= btrfs_get_fs_root(fs_info
, objectid
, true);
3797 if (IS_ERR(new_root
)) {
3798 ret
= PTR_ERR(new_root
);
3801 if (!is_fstree(new_root
->root_key
.objectid
)) {
3806 path
= btrfs_alloc_path();
3812 trans
= btrfs_start_transaction(root
, 1);
3813 if (IS_ERR(trans
)) {
3814 ret
= PTR_ERR(trans
);
3818 dir_id
= btrfs_super_root_dir(fs_info
->super_copy
);
3819 di
= btrfs_lookup_dir_item(trans
, fs_info
->tree_root
, path
,
3820 dir_id
, "default", 7, 1);
3821 if (IS_ERR_OR_NULL(di
)) {
3822 btrfs_release_path(path
);
3823 btrfs_end_transaction(trans
);
3825 "Umm, you don't have the default diritem, this isn't going to work");
3830 btrfs_cpu_key_to_disk(&disk_key
, &new_root
->root_key
);
3831 btrfs_set_dir_item_key(path
->nodes
[0], di
, &disk_key
);
3832 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3833 btrfs_release_path(path
);
3835 btrfs_set_fs_incompat(fs_info
, DEFAULT_SUBVOL
);
3836 btrfs_end_transaction(trans
);
3838 btrfs_put_root(new_root
);
3839 btrfs_free_path(path
);
3841 mnt_drop_write_file(file
);
3845 static void get_block_group_info(struct list_head
*groups_list
,
3846 struct btrfs_ioctl_space_info
*space
)
3848 struct btrfs_block_group
*block_group
;
3850 space
->total_bytes
= 0;
3851 space
->used_bytes
= 0;
3853 list_for_each_entry(block_group
, groups_list
, list
) {
3854 space
->flags
= block_group
->flags
;
3855 space
->total_bytes
+= block_group
->length
;
3856 space
->used_bytes
+= block_group
->used
;
3860 static long btrfs_ioctl_space_info(struct btrfs_fs_info
*fs_info
,
3863 struct btrfs_ioctl_space_args space_args
;
3864 struct btrfs_ioctl_space_info space
;
3865 struct btrfs_ioctl_space_info
*dest
;
3866 struct btrfs_ioctl_space_info
*dest_orig
;
3867 struct btrfs_ioctl_space_info __user
*user_dest
;
3868 struct btrfs_space_info
*info
;
3869 static const u64 types
[] = {
3870 BTRFS_BLOCK_GROUP_DATA
,
3871 BTRFS_BLOCK_GROUP_SYSTEM
,
3872 BTRFS_BLOCK_GROUP_METADATA
,
3873 BTRFS_BLOCK_GROUP_DATA
| BTRFS_BLOCK_GROUP_METADATA
3881 if (copy_from_user(&space_args
,
3882 (struct btrfs_ioctl_space_args __user
*)arg
,
3883 sizeof(space_args
)))
3886 for (i
= 0; i
< num_types
; i
++) {
3887 struct btrfs_space_info
*tmp
;
3890 list_for_each_entry(tmp
, &fs_info
->space_info
, list
) {
3891 if (tmp
->flags
== types
[i
]) {
3900 down_read(&info
->groups_sem
);
3901 for (c
= 0; c
< BTRFS_NR_RAID_TYPES
; c
++) {
3902 if (!list_empty(&info
->block_groups
[c
]))
3905 up_read(&info
->groups_sem
);
3909 * Global block reserve, exported as a space_info
3913 /* space_slots == 0 means they are asking for a count */
3914 if (space_args
.space_slots
== 0) {
3915 space_args
.total_spaces
= slot_count
;
3919 slot_count
= min_t(u64
, space_args
.space_slots
, slot_count
);
3921 alloc_size
= sizeof(*dest
) * slot_count
;
3923 /* we generally have at most 6 or so space infos, one for each raid
3924 * level. So, a whole page should be more than enough for everyone
3926 if (alloc_size
> PAGE_SIZE
)
3929 space_args
.total_spaces
= 0;
3930 dest
= kmalloc(alloc_size
, GFP_KERNEL
);
3935 /* now we have a buffer to copy into */
3936 for (i
= 0; i
< num_types
; i
++) {
3937 struct btrfs_space_info
*tmp
;
3943 list_for_each_entry(tmp
, &fs_info
->space_info
, list
) {
3944 if (tmp
->flags
== types
[i
]) {
3952 down_read(&info
->groups_sem
);
3953 for (c
= 0; c
< BTRFS_NR_RAID_TYPES
; c
++) {
3954 if (!list_empty(&info
->block_groups
[c
])) {
3955 get_block_group_info(&info
->block_groups
[c
],
3957 memcpy(dest
, &space
, sizeof(space
));
3959 space_args
.total_spaces
++;
3965 up_read(&info
->groups_sem
);
3969 * Add global block reserve
3972 struct btrfs_block_rsv
*block_rsv
= &fs_info
->global_block_rsv
;
3974 spin_lock(&block_rsv
->lock
);
3975 space
.total_bytes
= block_rsv
->size
;
3976 space
.used_bytes
= block_rsv
->size
- block_rsv
->reserved
;
3977 spin_unlock(&block_rsv
->lock
);
3978 space
.flags
= BTRFS_SPACE_INFO_GLOBAL_RSV
;
3979 memcpy(dest
, &space
, sizeof(space
));
3980 space_args
.total_spaces
++;
3983 user_dest
= (struct btrfs_ioctl_space_info __user
*)
3984 (arg
+ sizeof(struct btrfs_ioctl_space_args
));
3986 if (copy_to_user(user_dest
, dest_orig
, alloc_size
))
3991 if (ret
== 0 && copy_to_user(arg
, &space_args
, sizeof(space_args
)))
3997 static noinline
long btrfs_ioctl_start_sync(struct btrfs_root
*root
,
4000 struct btrfs_trans_handle
*trans
;
4003 trans
= btrfs_attach_transaction_barrier(root
);
4004 if (IS_ERR(trans
)) {
4005 if (PTR_ERR(trans
) != -ENOENT
)
4006 return PTR_ERR(trans
);
4008 /* No running transaction, don't bother */
4009 transid
= root
->fs_info
->last_trans_committed
;
4012 transid
= trans
->transid
;
4013 btrfs_commit_transaction_async(trans
);
4016 if (copy_to_user(argp
, &transid
, sizeof(transid
)))
4021 static noinline
long btrfs_ioctl_wait_sync(struct btrfs_fs_info
*fs_info
,
4027 if (copy_from_user(&transid
, argp
, sizeof(transid
)))
4030 transid
= 0; /* current trans */
4032 return btrfs_wait_for_commit(fs_info
, transid
);
4035 static long btrfs_ioctl_scrub(struct file
*file
, void __user
*arg
)
4037 struct btrfs_fs_info
*fs_info
= btrfs_sb(file_inode(file
)->i_sb
);
4038 struct btrfs_ioctl_scrub_args
*sa
;
4041 if (!capable(CAP_SYS_ADMIN
))
4044 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
4045 btrfs_err(fs_info
, "scrub is not supported on extent tree v2 yet");
4049 sa
= memdup_user(arg
, sizeof(*sa
));
4053 if (!(sa
->flags
& BTRFS_SCRUB_READONLY
)) {
4054 ret
= mnt_want_write_file(file
);
4059 ret
= btrfs_scrub_dev(fs_info
, sa
->devid
, sa
->start
, sa
->end
,
4060 &sa
->progress
, sa
->flags
& BTRFS_SCRUB_READONLY
,
4064 * Copy scrub args to user space even if btrfs_scrub_dev() returned an
4065 * error. This is important as it allows user space to know how much
4066 * progress scrub has done. For example, if scrub is canceled we get
4067 * -ECANCELED from btrfs_scrub_dev() and return that error back to user
4068 * space. Later user space can inspect the progress from the structure
4069 * btrfs_ioctl_scrub_args and resume scrub from where it left off
4070 * previously (btrfs-progs does this).
4071 * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
4072 * then return -EFAULT to signal the structure was not copied or it may
4073 * be corrupt and unreliable due to a partial copy.
4075 if (copy_to_user(arg
, sa
, sizeof(*sa
)))
4078 if (!(sa
->flags
& BTRFS_SCRUB_READONLY
))
4079 mnt_drop_write_file(file
);
4085 static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info
*fs_info
)
4087 if (!capable(CAP_SYS_ADMIN
))
4090 return btrfs_scrub_cancel(fs_info
);
4093 static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info
*fs_info
,
4096 struct btrfs_ioctl_scrub_args
*sa
;
4099 if (!capable(CAP_SYS_ADMIN
))
4102 sa
= memdup_user(arg
, sizeof(*sa
));
4106 ret
= btrfs_scrub_progress(fs_info
, sa
->devid
, &sa
->progress
);
4108 if (ret
== 0 && copy_to_user(arg
, sa
, sizeof(*sa
)))
4115 static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info
*fs_info
,
4118 struct btrfs_ioctl_get_dev_stats
*sa
;
4121 sa
= memdup_user(arg
, sizeof(*sa
));
4125 if ((sa
->flags
& BTRFS_DEV_STATS_RESET
) && !capable(CAP_SYS_ADMIN
)) {
4130 ret
= btrfs_get_dev_stats(fs_info
, sa
);
4132 if (ret
== 0 && copy_to_user(arg
, sa
, sizeof(*sa
)))
4139 static long btrfs_ioctl_dev_replace(struct btrfs_fs_info
*fs_info
,
4142 struct btrfs_ioctl_dev_replace_args
*p
;
4145 if (!capable(CAP_SYS_ADMIN
))
4148 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
4149 btrfs_err(fs_info
, "device replace not supported on extent tree v2 yet");
4153 p
= memdup_user(arg
, sizeof(*p
));
4158 case BTRFS_IOCTL_DEV_REPLACE_CMD_START
:
4159 if (sb_rdonly(fs_info
->sb
)) {
4163 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_DEV_REPLACE
)) {
4164 ret
= BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
;
4166 ret
= btrfs_dev_replace_by_ioctl(fs_info
, p
);
4167 btrfs_exclop_finish(fs_info
);
4170 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS
:
4171 btrfs_dev_replace_status(fs_info
, p
);
4174 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL
:
4175 p
->result
= btrfs_dev_replace_cancel(fs_info
);
4183 if ((ret
== 0 || ret
== -ECANCELED
) && copy_to_user(arg
, p
, sizeof(*p
)))
4190 static long btrfs_ioctl_ino_to_path(struct btrfs_root
*root
, void __user
*arg
)
4196 struct btrfs_ioctl_ino_path_args
*ipa
= NULL
;
4197 struct inode_fs_paths
*ipath
= NULL
;
4198 struct btrfs_path
*path
;
4200 if (!capable(CAP_DAC_READ_SEARCH
))
4203 path
= btrfs_alloc_path();
4209 ipa
= memdup_user(arg
, sizeof(*ipa
));
4216 size
= min_t(u32
, ipa
->size
, 4096);
4217 ipath
= init_ipath(size
, root
, path
);
4218 if (IS_ERR(ipath
)) {
4219 ret
= PTR_ERR(ipath
);
4224 ret
= paths_from_inode(ipa
->inum
, ipath
);
4228 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
) {
4229 rel_ptr
= ipath
->fspath
->val
[i
] -
4230 (u64
)(unsigned long)ipath
->fspath
->val
;
4231 ipath
->fspath
->val
[i
] = rel_ptr
;
4234 ret
= copy_to_user((void __user
*)(unsigned long)ipa
->fspath
,
4235 ipath
->fspath
, size
);
4242 btrfs_free_path(path
);
4249 static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info
*fs_info
,
4250 void __user
*arg
, int version
)
4254 struct btrfs_ioctl_logical_ino_args
*loi
;
4255 struct btrfs_data_container
*inodes
= NULL
;
4256 struct btrfs_path
*path
= NULL
;
4259 if (!capable(CAP_SYS_ADMIN
))
4262 loi
= memdup_user(arg
, sizeof(*loi
));
4264 return PTR_ERR(loi
);
4267 ignore_offset
= false;
4268 size
= min_t(u32
, loi
->size
, SZ_64K
);
4270 /* All reserved bits must be 0 for now */
4271 if (memchr_inv(loi
->reserved
, 0, sizeof(loi
->reserved
))) {
4275 /* Only accept flags we have defined so far */
4276 if (loi
->flags
& ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET
)) {
4280 ignore_offset
= loi
->flags
& BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET
;
4281 size
= min_t(u32
, loi
->size
, SZ_16M
);
4284 path
= btrfs_alloc_path();
4290 inodes
= init_data_container(size
);
4291 if (IS_ERR(inodes
)) {
4292 ret
= PTR_ERR(inodes
);
4297 ret
= iterate_inodes_from_logical(loi
->logical
, fs_info
, path
,
4298 inodes
, ignore_offset
);
4304 ret
= copy_to_user((void __user
*)(unsigned long)loi
->inodes
, inodes
,
4310 btrfs_free_path(path
);
4318 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info
*fs_info
,
4319 struct btrfs_ioctl_balance_args
*bargs
)
4321 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
4323 bargs
->flags
= bctl
->flags
;
4325 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
))
4326 bargs
->state
|= BTRFS_BALANCE_STATE_RUNNING
;
4327 if (atomic_read(&fs_info
->balance_pause_req
))
4328 bargs
->state
|= BTRFS_BALANCE_STATE_PAUSE_REQ
;
4329 if (atomic_read(&fs_info
->balance_cancel_req
))
4330 bargs
->state
|= BTRFS_BALANCE_STATE_CANCEL_REQ
;
4332 memcpy(&bargs
->data
, &bctl
->data
, sizeof(bargs
->data
));
4333 memcpy(&bargs
->meta
, &bctl
->meta
, sizeof(bargs
->meta
));
4334 memcpy(&bargs
->sys
, &bctl
->sys
, sizeof(bargs
->sys
));
4336 spin_lock(&fs_info
->balance_lock
);
4337 memcpy(&bargs
->stat
, &bctl
->stat
, sizeof(bargs
->stat
));
4338 spin_unlock(&fs_info
->balance_lock
);
4342 * Try to acquire fs_info::balance_mutex as well as set BTRFS_EXLCOP_BALANCE as
4345 * @fs_info: the filesystem
4346 * @excl_acquired: ptr to boolean value which is set to false in case balance
4349 * Return 0 on success in which case both fs_info::balance is acquired as well
4350 * as exclusive ops are blocked. In case of failure return an error code.
4352 static int btrfs_try_lock_balance(struct btrfs_fs_info
*fs_info
, bool *excl_acquired
)
4357 * Exclusive operation is locked. Three possibilities:
4358 * (1) some other op is running
4359 * (2) balance is running
4360 * (3) balance is paused -- special case (think resume)
4363 if (btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_BALANCE
)) {
4364 *excl_acquired
= true;
4365 mutex_lock(&fs_info
->balance_mutex
);
4369 mutex_lock(&fs_info
->balance_mutex
);
4370 if (fs_info
->balance_ctl
) {
4371 /* This is either (2) or (3) */
4372 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4378 mutex_unlock(&fs_info
->balance_mutex
);
4380 * Lock released to allow other waiters to
4381 * continue, we'll reexamine the status again.
4383 mutex_lock(&fs_info
->balance_mutex
);
4385 if (fs_info
->balance_ctl
&&
4386 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4388 *excl_acquired
= false;
4394 ret
= BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
;
4398 mutex_unlock(&fs_info
->balance_mutex
);
4402 mutex_unlock(&fs_info
->balance_mutex
);
4403 *excl_acquired
= false;
4407 static long btrfs_ioctl_balance(struct file
*file
, void __user
*arg
)
4409 struct btrfs_root
*root
= BTRFS_I(file_inode(file
))->root
;
4410 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4411 struct btrfs_ioctl_balance_args
*bargs
;
4412 struct btrfs_balance_control
*bctl
;
4413 bool need_unlock
= true;
4416 if (!capable(CAP_SYS_ADMIN
))
4419 ret
= mnt_want_write_file(file
);
4423 bargs
= memdup_user(arg
, sizeof(*bargs
));
4424 if (IS_ERR(bargs
)) {
4425 ret
= PTR_ERR(bargs
);
4430 ret
= btrfs_try_lock_balance(fs_info
, &need_unlock
);
4434 lockdep_assert_held(&fs_info
->balance_mutex
);
4436 if (bargs
->flags
& BTRFS_BALANCE_RESUME
) {
4437 if (!fs_info
->balance_ctl
) {
4442 bctl
= fs_info
->balance_ctl
;
4443 spin_lock(&fs_info
->balance_lock
);
4444 bctl
->flags
|= BTRFS_BALANCE_RESUME
;
4445 spin_unlock(&fs_info
->balance_lock
);
4446 btrfs_exclop_balance(fs_info
, BTRFS_EXCLOP_BALANCE
);
4451 if (bargs
->flags
& ~(BTRFS_BALANCE_ARGS_MASK
| BTRFS_BALANCE_TYPE_MASK
)) {
4456 if (fs_info
->balance_ctl
) {
4461 bctl
= kzalloc(sizeof(*bctl
), GFP_KERNEL
);
4467 memcpy(&bctl
->data
, &bargs
->data
, sizeof(bctl
->data
));
4468 memcpy(&bctl
->meta
, &bargs
->meta
, sizeof(bctl
->meta
));
4469 memcpy(&bctl
->sys
, &bargs
->sys
, sizeof(bctl
->sys
));
4471 bctl
->flags
= bargs
->flags
;
4474 * Ownership of bctl and exclusive operation goes to btrfs_balance.
4475 * bctl is freed in reset_balance_state, or, if restriper was paused
4476 * all the way until unmount, in free_fs_info. The flag should be
4477 * cleared after reset_balance_state.
4479 need_unlock
= false;
4481 ret
= btrfs_balance(fs_info
, bctl
, bargs
);
4484 if (ret
== 0 || ret
== -ECANCELED
) {
4485 if (copy_to_user(arg
, bargs
, sizeof(*bargs
)))
4491 mutex_unlock(&fs_info
->balance_mutex
);
4493 btrfs_exclop_finish(fs_info
);
4495 mnt_drop_write_file(file
);
4500 static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info
*fs_info
, int cmd
)
4502 if (!capable(CAP_SYS_ADMIN
))
4506 case BTRFS_BALANCE_CTL_PAUSE
:
4507 return btrfs_pause_balance(fs_info
);
4508 case BTRFS_BALANCE_CTL_CANCEL
:
4509 return btrfs_cancel_balance(fs_info
);
4515 static long btrfs_ioctl_balance_progress(struct btrfs_fs_info
*fs_info
,
4518 struct btrfs_ioctl_balance_args
*bargs
;
4521 if (!capable(CAP_SYS_ADMIN
))
4524 mutex_lock(&fs_info
->balance_mutex
);
4525 if (!fs_info
->balance_ctl
) {
4530 bargs
= kzalloc(sizeof(*bargs
), GFP_KERNEL
);
4536 btrfs_update_ioctl_balance_args(fs_info
, bargs
);
4538 if (copy_to_user(arg
, bargs
, sizeof(*bargs
)))
4543 mutex_unlock(&fs_info
->balance_mutex
);
4547 static long btrfs_ioctl_quota_ctl(struct file
*file
, void __user
*arg
)
4549 struct inode
*inode
= file_inode(file
);
4550 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4551 struct btrfs_ioctl_quota_ctl_args
*sa
;
4554 if (!capable(CAP_SYS_ADMIN
))
4557 ret
= mnt_want_write_file(file
);
4561 sa
= memdup_user(arg
, sizeof(*sa
));
4567 down_write(&fs_info
->subvol_sem
);
4570 case BTRFS_QUOTA_CTL_ENABLE
:
4571 ret
= btrfs_quota_enable(fs_info
);
4573 case BTRFS_QUOTA_CTL_DISABLE
:
4574 ret
= btrfs_quota_disable(fs_info
);
4582 up_write(&fs_info
->subvol_sem
);
4584 mnt_drop_write_file(file
);
4588 static long btrfs_ioctl_qgroup_assign(struct file
*file
, void __user
*arg
)
4590 struct inode
*inode
= file_inode(file
);
4591 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4592 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4593 struct btrfs_ioctl_qgroup_assign_args
*sa
;
4594 struct btrfs_trans_handle
*trans
;
4598 if (!capable(CAP_SYS_ADMIN
))
4601 ret
= mnt_want_write_file(file
);
4605 sa
= memdup_user(arg
, sizeof(*sa
));
4611 trans
= btrfs_join_transaction(root
);
4612 if (IS_ERR(trans
)) {
4613 ret
= PTR_ERR(trans
);
4618 ret
= btrfs_add_qgroup_relation(trans
, sa
->src
, sa
->dst
);
4620 ret
= btrfs_del_qgroup_relation(trans
, sa
->src
, sa
->dst
);
4623 /* update qgroup status and info */
4624 err
= btrfs_run_qgroups(trans
);
4626 btrfs_handle_fs_error(fs_info
, err
,
4627 "failed to update qgroup status and info");
4628 err
= btrfs_end_transaction(trans
);
4635 mnt_drop_write_file(file
);
4639 static long btrfs_ioctl_qgroup_create(struct file
*file
, void __user
*arg
)
4641 struct inode
*inode
= file_inode(file
);
4642 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4643 struct btrfs_ioctl_qgroup_create_args
*sa
;
4644 struct btrfs_trans_handle
*trans
;
4648 if (!capable(CAP_SYS_ADMIN
))
4651 ret
= mnt_want_write_file(file
);
4655 sa
= memdup_user(arg
, sizeof(*sa
));
4661 if (!sa
->qgroupid
) {
4666 trans
= btrfs_join_transaction(root
);
4667 if (IS_ERR(trans
)) {
4668 ret
= PTR_ERR(trans
);
4673 ret
= btrfs_create_qgroup(trans
, sa
->qgroupid
);
4675 ret
= btrfs_remove_qgroup(trans
, sa
->qgroupid
);
4678 err
= btrfs_end_transaction(trans
);
4685 mnt_drop_write_file(file
);
4689 static long btrfs_ioctl_qgroup_limit(struct file
*file
, void __user
*arg
)
4691 struct inode
*inode
= file_inode(file
);
4692 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4693 struct btrfs_ioctl_qgroup_limit_args
*sa
;
4694 struct btrfs_trans_handle
*trans
;
4699 if (!capable(CAP_SYS_ADMIN
))
4702 ret
= mnt_want_write_file(file
);
4706 sa
= memdup_user(arg
, sizeof(*sa
));
4712 trans
= btrfs_join_transaction(root
);
4713 if (IS_ERR(trans
)) {
4714 ret
= PTR_ERR(trans
);
4718 qgroupid
= sa
->qgroupid
;
4720 /* take the current subvol as qgroup */
4721 qgroupid
= root
->root_key
.objectid
;
4724 ret
= btrfs_limit_qgroup(trans
, qgroupid
, &sa
->lim
);
4726 err
= btrfs_end_transaction(trans
);
4733 mnt_drop_write_file(file
);
4737 static long btrfs_ioctl_quota_rescan(struct file
*file
, void __user
*arg
)
4739 struct inode
*inode
= file_inode(file
);
4740 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4741 struct btrfs_ioctl_quota_rescan_args
*qsa
;
4744 if (!capable(CAP_SYS_ADMIN
))
4747 ret
= mnt_want_write_file(file
);
4751 qsa
= memdup_user(arg
, sizeof(*qsa
));
4762 ret
= btrfs_qgroup_rescan(fs_info
);
4767 mnt_drop_write_file(file
);
4771 static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info
*fs_info
,
4774 struct btrfs_ioctl_quota_rescan_args qsa
= {0};
4776 if (!capable(CAP_SYS_ADMIN
))
4779 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
4781 qsa
.progress
= fs_info
->qgroup_rescan_progress
.objectid
;
4784 if (copy_to_user(arg
, &qsa
, sizeof(qsa
)))
4790 static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info
*fs_info
,
4793 if (!capable(CAP_SYS_ADMIN
))
4796 return btrfs_qgroup_wait_for_completion(fs_info
, true);
4799 static long _btrfs_ioctl_set_received_subvol(struct file
*file
,
4800 struct user_namespace
*mnt_userns
,
4801 struct btrfs_ioctl_received_subvol_args
*sa
)
4803 struct inode
*inode
= file_inode(file
);
4804 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4805 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4806 struct btrfs_root_item
*root_item
= &root
->root_item
;
4807 struct btrfs_trans_handle
*trans
;
4808 struct timespec64 ct
= current_time(inode
);
4810 int received_uuid_changed
;
4812 if (!inode_owner_or_capable(mnt_userns
, inode
))
4815 ret
= mnt_want_write_file(file
);
4819 down_write(&fs_info
->subvol_sem
);
4821 if (btrfs_ino(BTRFS_I(inode
)) != BTRFS_FIRST_FREE_OBJECTID
) {
4826 if (btrfs_root_readonly(root
)) {
4833 * 2 - uuid items (received uuid + subvol uuid)
4835 trans
= btrfs_start_transaction(root
, 3);
4836 if (IS_ERR(trans
)) {
4837 ret
= PTR_ERR(trans
);
4842 sa
->rtransid
= trans
->transid
;
4843 sa
->rtime
.sec
= ct
.tv_sec
;
4844 sa
->rtime
.nsec
= ct
.tv_nsec
;
4846 received_uuid_changed
= memcmp(root_item
->received_uuid
, sa
->uuid
,
4848 if (received_uuid_changed
&&
4849 !btrfs_is_empty_uuid(root_item
->received_uuid
)) {
4850 ret
= btrfs_uuid_tree_remove(trans
, root_item
->received_uuid
,
4851 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4852 root
->root_key
.objectid
);
4853 if (ret
&& ret
!= -ENOENT
) {
4854 btrfs_abort_transaction(trans
, ret
);
4855 btrfs_end_transaction(trans
);
4859 memcpy(root_item
->received_uuid
, sa
->uuid
, BTRFS_UUID_SIZE
);
4860 btrfs_set_root_stransid(root_item
, sa
->stransid
);
4861 btrfs_set_root_rtransid(root_item
, sa
->rtransid
);
4862 btrfs_set_stack_timespec_sec(&root_item
->stime
, sa
->stime
.sec
);
4863 btrfs_set_stack_timespec_nsec(&root_item
->stime
, sa
->stime
.nsec
);
4864 btrfs_set_stack_timespec_sec(&root_item
->rtime
, sa
->rtime
.sec
);
4865 btrfs_set_stack_timespec_nsec(&root_item
->rtime
, sa
->rtime
.nsec
);
4867 ret
= btrfs_update_root(trans
, fs_info
->tree_root
,
4868 &root
->root_key
, &root
->root_item
);
4870 btrfs_end_transaction(trans
);
4873 if (received_uuid_changed
&& !btrfs_is_empty_uuid(sa
->uuid
)) {
4874 ret
= btrfs_uuid_tree_add(trans
, sa
->uuid
,
4875 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4876 root
->root_key
.objectid
);
4877 if (ret
< 0 && ret
!= -EEXIST
) {
4878 btrfs_abort_transaction(trans
, ret
);
4879 btrfs_end_transaction(trans
);
4883 ret
= btrfs_commit_transaction(trans
);
4885 up_write(&fs_info
->subvol_sem
);
4886 mnt_drop_write_file(file
);
4891 static long btrfs_ioctl_set_received_subvol_32(struct file
*file
,
4894 struct btrfs_ioctl_received_subvol_args_32
*args32
= NULL
;
4895 struct btrfs_ioctl_received_subvol_args
*args64
= NULL
;
4898 args32
= memdup_user(arg
, sizeof(*args32
));
4900 return PTR_ERR(args32
);
4902 args64
= kmalloc(sizeof(*args64
), GFP_KERNEL
);
4908 memcpy(args64
->uuid
, args32
->uuid
, BTRFS_UUID_SIZE
);
4909 args64
->stransid
= args32
->stransid
;
4910 args64
->rtransid
= args32
->rtransid
;
4911 args64
->stime
.sec
= args32
->stime
.sec
;
4912 args64
->stime
.nsec
= args32
->stime
.nsec
;
4913 args64
->rtime
.sec
= args32
->rtime
.sec
;
4914 args64
->rtime
.nsec
= args32
->rtime
.nsec
;
4915 args64
->flags
= args32
->flags
;
4917 ret
= _btrfs_ioctl_set_received_subvol(file
, file_mnt_user_ns(file
), args64
);
4921 memcpy(args32
->uuid
, args64
->uuid
, BTRFS_UUID_SIZE
);
4922 args32
->stransid
= args64
->stransid
;
4923 args32
->rtransid
= args64
->rtransid
;
4924 args32
->stime
.sec
= args64
->stime
.sec
;
4925 args32
->stime
.nsec
= args64
->stime
.nsec
;
4926 args32
->rtime
.sec
= args64
->rtime
.sec
;
4927 args32
->rtime
.nsec
= args64
->rtime
.nsec
;
4928 args32
->flags
= args64
->flags
;
4930 ret
= copy_to_user(arg
, args32
, sizeof(*args32
));
4941 static long btrfs_ioctl_set_received_subvol(struct file
*file
,
4944 struct btrfs_ioctl_received_subvol_args
*sa
= NULL
;
4947 sa
= memdup_user(arg
, sizeof(*sa
));
4951 ret
= _btrfs_ioctl_set_received_subvol(file
, file_mnt_user_ns(file
), sa
);
4956 ret
= copy_to_user(arg
, sa
, sizeof(*sa
));
4965 static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info
*fs_info
,
4970 char label
[BTRFS_LABEL_SIZE
];
4972 spin_lock(&fs_info
->super_lock
);
4973 memcpy(label
, fs_info
->super_copy
->label
, BTRFS_LABEL_SIZE
);
4974 spin_unlock(&fs_info
->super_lock
);
4976 len
= strnlen(label
, BTRFS_LABEL_SIZE
);
4978 if (len
== BTRFS_LABEL_SIZE
) {
4980 "label is too long, return the first %zu bytes",
4984 ret
= copy_to_user(arg
, label
, len
);
4986 return ret
? -EFAULT
: 0;
4989 static int btrfs_ioctl_set_fslabel(struct file
*file
, void __user
*arg
)
4991 struct inode
*inode
= file_inode(file
);
4992 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4993 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4994 struct btrfs_super_block
*super_block
= fs_info
->super_copy
;
4995 struct btrfs_trans_handle
*trans
;
4996 char label
[BTRFS_LABEL_SIZE
];
4999 if (!capable(CAP_SYS_ADMIN
))
5002 if (copy_from_user(label
, arg
, sizeof(label
)))
5005 if (strnlen(label
, BTRFS_LABEL_SIZE
) == BTRFS_LABEL_SIZE
) {
5007 "unable to set label with more than %d bytes",
5008 BTRFS_LABEL_SIZE
- 1);
5012 ret
= mnt_want_write_file(file
);
5016 trans
= btrfs_start_transaction(root
, 0);
5017 if (IS_ERR(trans
)) {
5018 ret
= PTR_ERR(trans
);
5022 spin_lock(&fs_info
->super_lock
);
5023 strcpy(super_block
->label
, label
);
5024 spin_unlock(&fs_info
->super_lock
);
5025 ret
= btrfs_commit_transaction(trans
);
5028 mnt_drop_write_file(file
);
5032 #define INIT_FEATURE_FLAGS(suffix) \
5033 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5034 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5035 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5037 int btrfs_ioctl_get_supported_features(void __user
*arg
)
5039 static const struct btrfs_ioctl_feature_flags features
[3] = {
5040 INIT_FEATURE_FLAGS(SUPP
),
5041 INIT_FEATURE_FLAGS(SAFE_SET
),
5042 INIT_FEATURE_FLAGS(SAFE_CLEAR
)
5045 if (copy_to_user(arg
, &features
, sizeof(features
)))
5051 static int btrfs_ioctl_get_features(struct btrfs_fs_info
*fs_info
,
5054 struct btrfs_super_block
*super_block
= fs_info
->super_copy
;
5055 struct btrfs_ioctl_feature_flags features
;
5057 features
.compat_flags
= btrfs_super_compat_flags(super_block
);
5058 features
.compat_ro_flags
= btrfs_super_compat_ro_flags(super_block
);
5059 features
.incompat_flags
= btrfs_super_incompat_flags(super_block
);
5061 if (copy_to_user(arg
, &features
, sizeof(features
)))
5067 static int check_feature_bits(struct btrfs_fs_info
*fs_info
,
5068 enum btrfs_feature_set set
,
5069 u64 change_mask
, u64 flags
, u64 supported_flags
,
5070 u64 safe_set
, u64 safe_clear
)
5072 const char *type
= btrfs_feature_set_name(set
);
5074 u64 disallowed
, unsupported
;
5075 u64 set_mask
= flags
& change_mask
;
5076 u64 clear_mask
= ~flags
& change_mask
;
5078 unsupported
= set_mask
& ~supported_flags
;
5080 names
= btrfs_printable_features(set
, unsupported
);
5083 "this kernel does not support the %s feature bit%s",
5084 names
, strchr(names
, ',') ? "s" : "");
5088 "this kernel does not support %s bits 0x%llx",
5093 disallowed
= set_mask
& ~safe_set
;
5095 names
= btrfs_printable_features(set
, disallowed
);
5098 "can't set the %s feature bit%s while mounted",
5099 names
, strchr(names
, ',') ? "s" : "");
5103 "can't set %s bits 0x%llx while mounted",
5108 disallowed
= clear_mask
& ~safe_clear
;
5110 names
= btrfs_printable_features(set
, disallowed
);
5113 "can't clear the %s feature bit%s while mounted",
5114 names
, strchr(names
, ',') ? "s" : "");
5118 "can't clear %s bits 0x%llx while mounted",
5126 #define check_feature(fs_info, change_mask, flags, mask_base) \
5127 check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
5128 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5129 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5130 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5132 static int btrfs_ioctl_set_features(struct file
*file
, void __user
*arg
)
5134 struct inode
*inode
= file_inode(file
);
5135 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5136 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5137 struct btrfs_super_block
*super_block
= fs_info
->super_copy
;
5138 struct btrfs_ioctl_feature_flags flags
[2];
5139 struct btrfs_trans_handle
*trans
;
5143 if (!capable(CAP_SYS_ADMIN
))
5146 if (copy_from_user(flags
, arg
, sizeof(flags
)))
5150 if (!flags
[0].compat_flags
&& !flags
[0].compat_ro_flags
&&
5151 !flags
[0].incompat_flags
)
5154 ret
= check_feature(fs_info
, flags
[0].compat_flags
,
5155 flags
[1].compat_flags
, COMPAT
);
5159 ret
= check_feature(fs_info
, flags
[0].compat_ro_flags
,
5160 flags
[1].compat_ro_flags
, COMPAT_RO
);
5164 ret
= check_feature(fs_info
, flags
[0].incompat_flags
,
5165 flags
[1].incompat_flags
, INCOMPAT
);
5169 ret
= mnt_want_write_file(file
);
5173 trans
= btrfs_start_transaction(root
, 0);
5174 if (IS_ERR(trans
)) {
5175 ret
= PTR_ERR(trans
);
5176 goto out_drop_write
;
5179 spin_lock(&fs_info
->super_lock
);
5180 newflags
= btrfs_super_compat_flags(super_block
);
5181 newflags
|= flags
[0].compat_flags
& flags
[1].compat_flags
;
5182 newflags
&= ~(flags
[0].compat_flags
& ~flags
[1].compat_flags
);
5183 btrfs_set_super_compat_flags(super_block
, newflags
);
5185 newflags
= btrfs_super_compat_ro_flags(super_block
);
5186 newflags
|= flags
[0].compat_ro_flags
& flags
[1].compat_ro_flags
;
5187 newflags
&= ~(flags
[0].compat_ro_flags
& ~flags
[1].compat_ro_flags
);
5188 btrfs_set_super_compat_ro_flags(super_block
, newflags
);
5190 newflags
= btrfs_super_incompat_flags(super_block
);
5191 newflags
|= flags
[0].incompat_flags
& flags
[1].incompat_flags
;
5192 newflags
&= ~(flags
[0].incompat_flags
& ~flags
[1].incompat_flags
);
5193 btrfs_set_super_incompat_flags(super_block
, newflags
);
5194 spin_unlock(&fs_info
->super_lock
);
5196 ret
= btrfs_commit_transaction(trans
);
5198 mnt_drop_write_file(file
);
5203 static int _btrfs_ioctl_send(struct inode
*inode
, void __user
*argp
, bool compat
)
5205 struct btrfs_ioctl_send_args
*arg
;
5209 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5210 struct btrfs_ioctl_send_args_32 args32
;
5212 ret
= copy_from_user(&args32
, argp
, sizeof(args32
));
5215 arg
= kzalloc(sizeof(*arg
), GFP_KERNEL
);
5218 arg
->send_fd
= args32
.send_fd
;
5219 arg
->clone_sources_count
= args32
.clone_sources_count
;
5220 arg
->clone_sources
= compat_ptr(args32
.clone_sources
);
5221 arg
->parent_root
= args32
.parent_root
;
5222 arg
->flags
= args32
.flags
;
5223 memcpy(arg
->reserved
, args32
.reserved
,
5224 sizeof(args32
.reserved
));
5229 arg
= memdup_user(argp
, sizeof(*arg
));
5231 return PTR_ERR(arg
);
5233 ret
= btrfs_ioctl_send(inode
, arg
);
5238 static int btrfs_ioctl_encoded_read(struct file
*file
, void __user
*argp
,
5241 struct btrfs_ioctl_encoded_io_args args
= { 0 };
5242 size_t copy_end_kernel
= offsetofend(struct btrfs_ioctl_encoded_io_args
,
5245 struct iovec iovstack
[UIO_FASTIOV
];
5246 struct iovec
*iov
= iovstack
;
5247 struct iov_iter iter
;
5252 if (!capable(CAP_SYS_ADMIN
)) {
5258 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5259 struct btrfs_ioctl_encoded_io_args_32 args32
;
5261 copy_end
= offsetofend(struct btrfs_ioctl_encoded_io_args_32
,
5263 if (copy_from_user(&args32
, argp
, copy_end
)) {
5267 args
.iov
= compat_ptr(args32
.iov
);
5268 args
.iovcnt
= args32
.iovcnt
;
5269 args
.offset
= args32
.offset
;
5270 args
.flags
= args32
.flags
;
5275 copy_end
= copy_end_kernel
;
5276 if (copy_from_user(&args
, argp
, copy_end
)) {
5281 if (args
.flags
!= 0) {
5286 ret
= import_iovec(READ
, args
.iov
, args
.iovcnt
, ARRAY_SIZE(iovstack
),
5291 if (iov_iter_count(&iter
) == 0) {
5296 ret
= rw_verify_area(READ
, file
, &pos
, args
.len
);
5300 init_sync_kiocb(&kiocb
, file
);
5303 ret
= btrfs_encoded_read(&kiocb
, &iter
, &args
);
5305 fsnotify_access(file
);
5306 if (copy_to_user(argp
+ copy_end
,
5307 (char *)&args
+ copy_end_kernel
,
5308 sizeof(args
) - copy_end_kernel
))
5316 add_rchar(current
, ret
);
5321 static int btrfs_ioctl_encoded_write(struct file
*file
, void __user
*argp
, bool compat
)
5323 struct btrfs_ioctl_encoded_io_args args
;
5324 struct iovec iovstack
[UIO_FASTIOV
];
5325 struct iovec
*iov
= iovstack
;
5326 struct iov_iter iter
;
5331 if (!capable(CAP_SYS_ADMIN
)) {
5336 if (!(file
->f_mode
& FMODE_WRITE
)) {
5342 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5343 struct btrfs_ioctl_encoded_io_args_32 args32
;
5345 if (copy_from_user(&args32
, argp
, sizeof(args32
))) {
5349 args
.iov
= compat_ptr(args32
.iov
);
5350 args
.iovcnt
= args32
.iovcnt
;
5351 args
.offset
= args32
.offset
;
5352 args
.flags
= args32
.flags
;
5353 args
.len
= args32
.len
;
5354 args
.unencoded_len
= args32
.unencoded_len
;
5355 args
.unencoded_offset
= args32
.unencoded_offset
;
5356 args
.compression
= args32
.compression
;
5357 args
.encryption
= args32
.encryption
;
5358 memcpy(args
.reserved
, args32
.reserved
, sizeof(args
.reserved
));
5363 if (copy_from_user(&args
, argp
, sizeof(args
))) {
5370 if (args
.flags
!= 0)
5372 if (memchr_inv(args
.reserved
, 0, sizeof(args
.reserved
)))
5374 if (args
.compression
== BTRFS_ENCODED_IO_COMPRESSION_NONE
&&
5375 args
.encryption
== BTRFS_ENCODED_IO_ENCRYPTION_NONE
)
5377 if (args
.compression
>= BTRFS_ENCODED_IO_COMPRESSION_TYPES
||
5378 args
.encryption
>= BTRFS_ENCODED_IO_ENCRYPTION_TYPES
)
5380 if (args
.unencoded_offset
> args
.unencoded_len
)
5382 if (args
.len
> args
.unencoded_len
- args
.unencoded_offset
)
5385 ret
= import_iovec(WRITE
, args
.iov
, args
.iovcnt
, ARRAY_SIZE(iovstack
),
5390 file_start_write(file
);
5392 if (iov_iter_count(&iter
) == 0) {
5397 ret
= rw_verify_area(WRITE
, file
, &pos
, args
.len
);
5401 init_sync_kiocb(&kiocb
, file
);
5402 ret
= kiocb_set_rw_flags(&kiocb
, 0);
5407 ret
= btrfs_do_write_iter(&kiocb
, &iter
, &args
);
5409 fsnotify_modify(file
);
5412 file_end_write(file
);
5416 add_wchar(current
, ret
);
5421 long btrfs_ioctl(struct file
*file
, unsigned int
5422 cmd
, unsigned long arg
)
5424 struct inode
*inode
= file_inode(file
);
5425 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5426 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5427 void __user
*argp
= (void __user
*)arg
;
5430 case FS_IOC_GETVERSION
:
5431 return btrfs_ioctl_getversion(inode
, argp
);
5432 case FS_IOC_GETFSLABEL
:
5433 return btrfs_ioctl_get_fslabel(fs_info
, argp
);
5434 case FS_IOC_SETFSLABEL
:
5435 return btrfs_ioctl_set_fslabel(file
, argp
);
5437 return btrfs_ioctl_fitrim(fs_info
, argp
);
5438 case BTRFS_IOC_SNAP_CREATE
:
5439 return btrfs_ioctl_snap_create(file
, argp
, 0);
5440 case BTRFS_IOC_SNAP_CREATE_V2
:
5441 return btrfs_ioctl_snap_create_v2(file
, argp
, 0);
5442 case BTRFS_IOC_SUBVOL_CREATE
:
5443 return btrfs_ioctl_snap_create(file
, argp
, 1);
5444 case BTRFS_IOC_SUBVOL_CREATE_V2
:
5445 return btrfs_ioctl_snap_create_v2(file
, argp
, 1);
5446 case BTRFS_IOC_SNAP_DESTROY
:
5447 return btrfs_ioctl_snap_destroy(file
, argp
, false);
5448 case BTRFS_IOC_SNAP_DESTROY_V2
:
5449 return btrfs_ioctl_snap_destroy(file
, argp
, true);
5450 case BTRFS_IOC_SUBVOL_GETFLAGS
:
5451 return btrfs_ioctl_subvol_getflags(inode
, argp
);
5452 case BTRFS_IOC_SUBVOL_SETFLAGS
:
5453 return btrfs_ioctl_subvol_setflags(file
, argp
);
5454 case BTRFS_IOC_DEFAULT_SUBVOL
:
5455 return btrfs_ioctl_default_subvol(file
, argp
);
5456 case BTRFS_IOC_DEFRAG
:
5457 return btrfs_ioctl_defrag(file
, NULL
);
5458 case BTRFS_IOC_DEFRAG_RANGE
:
5459 return btrfs_ioctl_defrag(file
, argp
);
5460 case BTRFS_IOC_RESIZE
:
5461 return btrfs_ioctl_resize(file
, argp
);
5462 case BTRFS_IOC_ADD_DEV
:
5463 return btrfs_ioctl_add_dev(fs_info
, argp
);
5464 case BTRFS_IOC_RM_DEV
:
5465 return btrfs_ioctl_rm_dev(file
, argp
);
5466 case BTRFS_IOC_RM_DEV_V2
:
5467 return btrfs_ioctl_rm_dev_v2(file
, argp
);
5468 case BTRFS_IOC_FS_INFO
:
5469 return btrfs_ioctl_fs_info(fs_info
, argp
);
5470 case BTRFS_IOC_DEV_INFO
:
5471 return btrfs_ioctl_dev_info(fs_info
, argp
);
5472 case BTRFS_IOC_TREE_SEARCH
:
5473 return btrfs_ioctl_tree_search(inode
, argp
);
5474 case BTRFS_IOC_TREE_SEARCH_V2
:
5475 return btrfs_ioctl_tree_search_v2(inode
, argp
);
5476 case BTRFS_IOC_INO_LOOKUP
:
5477 return btrfs_ioctl_ino_lookup(root
, argp
);
5478 case BTRFS_IOC_INO_PATHS
:
5479 return btrfs_ioctl_ino_to_path(root
, argp
);
5480 case BTRFS_IOC_LOGICAL_INO
:
5481 return btrfs_ioctl_logical_to_ino(fs_info
, argp
, 1);
5482 case BTRFS_IOC_LOGICAL_INO_V2
:
5483 return btrfs_ioctl_logical_to_ino(fs_info
, argp
, 2);
5484 case BTRFS_IOC_SPACE_INFO
:
5485 return btrfs_ioctl_space_info(fs_info
, argp
);
5486 case BTRFS_IOC_SYNC
: {
5489 ret
= btrfs_start_delalloc_roots(fs_info
, LONG_MAX
, false);
5492 ret
= btrfs_sync_fs(inode
->i_sb
, 1);
5494 * The transaction thread may want to do more work,
5495 * namely it pokes the cleaner kthread that will start
5496 * processing uncleaned subvols.
5498 wake_up_process(fs_info
->transaction_kthread
);
5501 case BTRFS_IOC_START_SYNC
:
5502 return btrfs_ioctl_start_sync(root
, argp
);
5503 case BTRFS_IOC_WAIT_SYNC
:
5504 return btrfs_ioctl_wait_sync(fs_info
, argp
);
5505 case BTRFS_IOC_SCRUB
:
5506 return btrfs_ioctl_scrub(file
, argp
);
5507 case BTRFS_IOC_SCRUB_CANCEL
:
5508 return btrfs_ioctl_scrub_cancel(fs_info
);
5509 case BTRFS_IOC_SCRUB_PROGRESS
:
5510 return btrfs_ioctl_scrub_progress(fs_info
, argp
);
5511 case BTRFS_IOC_BALANCE_V2
:
5512 return btrfs_ioctl_balance(file
, argp
);
5513 case BTRFS_IOC_BALANCE_CTL
:
5514 return btrfs_ioctl_balance_ctl(fs_info
, arg
);
5515 case BTRFS_IOC_BALANCE_PROGRESS
:
5516 return btrfs_ioctl_balance_progress(fs_info
, argp
);
5517 case BTRFS_IOC_SET_RECEIVED_SUBVOL
:
5518 return btrfs_ioctl_set_received_subvol(file
, argp
);
5520 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32
:
5521 return btrfs_ioctl_set_received_subvol_32(file
, argp
);
5523 case BTRFS_IOC_SEND
:
5524 return _btrfs_ioctl_send(inode
, argp
, false);
5525 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5526 case BTRFS_IOC_SEND_32
:
5527 return _btrfs_ioctl_send(inode
, argp
, true);
5529 case BTRFS_IOC_GET_DEV_STATS
:
5530 return btrfs_ioctl_get_dev_stats(fs_info
, argp
);
5531 case BTRFS_IOC_QUOTA_CTL
:
5532 return btrfs_ioctl_quota_ctl(file
, argp
);
5533 case BTRFS_IOC_QGROUP_ASSIGN
:
5534 return btrfs_ioctl_qgroup_assign(file
, argp
);
5535 case BTRFS_IOC_QGROUP_CREATE
:
5536 return btrfs_ioctl_qgroup_create(file
, argp
);
5537 case BTRFS_IOC_QGROUP_LIMIT
:
5538 return btrfs_ioctl_qgroup_limit(file
, argp
);
5539 case BTRFS_IOC_QUOTA_RESCAN
:
5540 return btrfs_ioctl_quota_rescan(file
, argp
);
5541 case BTRFS_IOC_QUOTA_RESCAN_STATUS
:
5542 return btrfs_ioctl_quota_rescan_status(fs_info
, argp
);
5543 case BTRFS_IOC_QUOTA_RESCAN_WAIT
:
5544 return btrfs_ioctl_quota_rescan_wait(fs_info
, argp
);
5545 case BTRFS_IOC_DEV_REPLACE
:
5546 return btrfs_ioctl_dev_replace(fs_info
, argp
);
5547 case BTRFS_IOC_GET_SUPPORTED_FEATURES
:
5548 return btrfs_ioctl_get_supported_features(argp
);
5549 case BTRFS_IOC_GET_FEATURES
:
5550 return btrfs_ioctl_get_features(fs_info
, argp
);
5551 case BTRFS_IOC_SET_FEATURES
:
5552 return btrfs_ioctl_set_features(file
, argp
);
5553 case BTRFS_IOC_GET_SUBVOL_INFO
:
5554 return btrfs_ioctl_get_subvol_info(inode
, argp
);
5555 case BTRFS_IOC_GET_SUBVOL_ROOTREF
:
5556 return btrfs_ioctl_get_subvol_rootref(root
, argp
);
5557 case BTRFS_IOC_INO_LOOKUP_USER
:
5558 return btrfs_ioctl_ino_lookup_user(file
, argp
);
5559 case FS_IOC_ENABLE_VERITY
:
5560 return fsverity_ioctl_enable(file
, (const void __user
*)argp
);
5561 case FS_IOC_MEASURE_VERITY
:
5562 return fsverity_ioctl_measure(file
, argp
);
5563 case BTRFS_IOC_ENCODED_READ
:
5564 return btrfs_ioctl_encoded_read(file
, argp
, false);
5565 case BTRFS_IOC_ENCODED_WRITE
:
5566 return btrfs_ioctl_encoded_write(file
, argp
, false);
5567 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5568 case BTRFS_IOC_ENCODED_READ_32
:
5569 return btrfs_ioctl_encoded_read(file
, argp
, true);
5570 case BTRFS_IOC_ENCODED_WRITE_32
:
5571 return btrfs_ioctl_encoded_write(file
, argp
, true);
5578 #ifdef CONFIG_COMPAT
5579 long btrfs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
5582 * These all access 32-bit values anyway so no further
5583 * handling is necessary.
5586 case FS_IOC32_GETVERSION
:
5587 cmd
= FS_IOC_GETVERSION
;
5591 return btrfs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));