1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
8 #include <sys/sendfile.h>
12 #include "alloc-util.h"
13 #include "btrfs-util.h"
14 #include "chattr-util.h"
16 #include "dirent-util.h"
22 #include "missing_syscall.h"
23 #include "mkdir-label.h"
24 #include "mountpoint-util.h"
25 #include "nulstr-util.h"
27 #include "selinux-util.h"
28 #include "signal-util.h"
29 #include "stat-util.h"
30 #include "stdio-util.h"
31 #include "string-util.h"
33 #include "sync-util.h"
34 #include "time-util.h"
35 #include "tmpfile-util.h"
36 #include "umask-util.h"
37 #include "user-util.h"
38 #include "xattr-util.h"
40 #define COPY_BUFFER_SIZE (16U*1024U)
42 /* A safety net for descending recursively into file system trees to copy. On Linux PATH_MAX is 4096, which means the
43 * deepest valid path one can build is around 2048, which we hence use as a safety net here, to not spin endlessly in
44 * case of bind mount cycles and suchlike. */
45 #define COPY_DEPTH_MAX 2048U
47 static ssize_t
try_copy_file_range(
48 int fd_in
, loff_t
*off_in
,
49 int fd_out
, loff_t
*off_out
,
59 r
= copy_file_range(fd_in
, off_in
, fd_out
, off_out
, len
, flags
);
61 have
= r
>= 0 || errno
!= ENOSYS
;
71 FD_IS_NONBLOCKING_PIPE
,
74 static int fd_is_nonblock_pipe(int fd
) {
78 /* Checks whether the specified file descriptor refers to a pipe, and if so if O_NONBLOCK is set. */
80 if (fstat(fd
, &st
) < 0)
83 if (!S_ISFIFO(st
.st_mode
))
86 flags
= fcntl(fd
, F_GETFL
);
90 return FLAGS_SET(flags
, O_NONBLOCK
) ? FD_IS_NONBLOCKING_PIPE
: FD_IS_BLOCKING_PIPE
;
93 static int look_for_signals(CopyFlags copy_flags
) {
96 if ((copy_flags
& (COPY_SIGINT
|COPY_SIGTERM
)) == 0)
99 r
= pop_pending_signal(copy_flags
& COPY_SIGINT
? SIGINT
: 0,
100 copy_flags
& COPY_SIGTERM
? SIGTERM
: 0);
104 return log_debug_errno(SYNTHETIC_ERRNO(EINTR
),
105 "Got %s, cancelling copy operation.", signal_to_string(r
));
110 static int create_hole(int fd
, off_t size
) {
114 offset
= lseek(fd
, 0, SEEK_CUR
);
118 end
= lseek(fd
, 0, SEEK_END
);
122 /* If we're not at the end of the target file, try to punch a hole in the existing space using fallocate(). */
125 fallocate(fd
, FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
, offset
, MIN(size
, end
- offset
)) < 0 &&
126 !ERRNO_IS_NOT_SUPPORTED(errno
))
129 if (end
- offset
>= size
) {
130 /* If we've created the full hole, set the file pointer to the end of the hole we created and exit. */
131 if (lseek(fd
, offset
+ size
, SEEK_SET
) < 0)
137 /* If we haven't created the full hole, use ftruncate() to grow the file (and the hole) to the
138 * required size and move the file pointer to the end of the file. */
140 size
-= end
- offset
;
142 if (ftruncate(fd
, end
+ size
) < 0)
145 if (lseek(fd
, 0, SEEK_END
) < 0)
154 CopyFlags copy_flags
,
156 size_t *ret_remains_size
,
157 copy_progress_bytes_t progress
,
160 bool try_cfr
= true, try_sendfile
= true, try_splice
= true, copied_something
= false;
161 int r
, nonblock_pipe
= -1;
162 size_t m
= SSIZE_MAX
; /* that is the maximum that sendfile and c_f_r accept */
167 /* Tries to copy bytes from the file descriptor 'fdf' to 'fdt' in the smartest possible way. Copies a maximum
168 * of 'max_bytes', which may be specified as UINT64_MAX, in which no maximum is applied. Returns negative on
169 * error, zero if EOF is hit before the bytes limit is hit and positive otherwise. If the copy fails for some
170 * reason but we read but didn't yet write some data an ret_remains/ret_remains_size is not NULL, then it will
171 * be initialized with an allocated buffer containing this "remaining" data. Note that these two parameters are
172 * initialized with a valid buffer only on failure and only if there's actually data already read. Otherwise
173 * these parameters if non-NULL are set to NULL. */
177 if (ret_remains_size
)
178 *ret_remains_size
= 0;
180 /* Try btrfs reflinks first. This only works on regular, seekable files, hence let's check the file offsets of
181 * source and destination first. */
182 if ((copy_flags
& COPY_REFLINK
)) {
185 foffset
= lseek(fdf
, 0, SEEK_CUR
);
189 toffset
= lseek(fdt
, 0, SEEK_CUR
);
192 if (foffset
== 0 && toffset
== 0 && max_bytes
== UINT64_MAX
)
193 r
= btrfs_reflink(fdf
, fdt
); /* full file reflink */
195 r
= btrfs_clone_range(fdf
, foffset
, fdt
, toffset
, max_bytes
== UINT64_MAX
? 0 : max_bytes
); /* partial reflink */
199 /* This worked, yay! Now — to be fully correct — let's adjust the file pointers */
200 if (max_bytes
== UINT64_MAX
) {
202 /* We cloned to the end of the source file, let's position the read
203 * pointer there, and query it at the same time. */
204 t
= lseek(fdf
, 0, SEEK_END
);
210 /* Let's adjust the destination file write pointer by the same number
212 t
= lseek(fdt
, toffset
+ (t
- foffset
), SEEK_SET
);
216 return 0; /* we copied the whole thing, hence hit EOF, return 0 */
218 t
= lseek(fdf
, foffset
+ max_bytes
, SEEK_SET
);
222 t
= lseek(fdt
, toffset
+ max_bytes
, SEEK_SET
);
226 return 1; /* we copied only some number of bytes, which worked, but this means we didn't hit EOF, return 1 */
237 return 1; /* return > 0 if we hit the max_bytes limit */
239 r
= look_for_signals(copy_flags
);
243 if (max_bytes
!= UINT64_MAX
&& m
> max_bytes
)
246 if (copy_flags
& COPY_HOLES
) {
249 c
= lseek(fdf
, 0, SEEK_CUR
);
253 /* To see if we're in a hole, we search for the next data offset. */
254 e
= lseek(fdf
, c
, SEEK_DATA
);
255 if (e
< 0 && errno
== ENXIO
)
256 /* If errno == ENXIO, that means we've reached the final hole of the file and
257 * that hole isn't followed by more data. */
258 e
= lseek(fdf
, 0, SEEK_END
);
262 /* If we're in a hole (current offset is not a data offset), create a hole of the
263 * same size in the target file. */
265 r
= create_hole(fdt
, e
- c
);
270 c
= e
; /* Set c to the start of the data segment. */
272 /* After copying a potential hole, find the end of the data segment by looking for
273 * the next hole. If we get ENXIO, we're at EOF. */
274 e
= lseek(fdf
, c
, SEEK_HOLE
);
281 /* SEEK_HOLE modifies the file offset so we need to move back to the initial offset. */
282 if (lseek(fdf
, c
, SEEK_SET
) < 0)
285 /* Make sure we're not copying more than the current data segment. */
286 m
= MIN(m
, (size_t) e
- c
);
289 /* First try copy_file_range(), unless we already tried */
291 n
= try_copy_file_range(fdf
, NULL
, fdt
, NULL
, m
, 0u);
293 if (!IN_SET(n
, -EINVAL
, -ENOSYS
, -EXDEV
, -EBADF
))
297 /* use fallback below */
298 } else if (n
== 0) { /* likely EOF */
300 if (copied_something
)
303 /* So, we hit EOF immediately, without having copied a single byte. This
304 * could indicate two things: the file is actually empty, or we are on some
305 * virtual file system such as procfs/sysfs where the syscall actually
306 * doesn't work but doesn't return an error. Try to handle that, by falling
307 * back to simple read()s in case we encounter empty files.
309 * See: https://lwn.net/Articles/846403/ */
310 try_cfr
= try_sendfile
= try_splice
= false;
316 /* First try sendfile(), unless we already tried */
318 n
= sendfile(fdt
, fdf
, NULL
, m
);
320 if (!IN_SET(errno
, EINVAL
, ENOSYS
))
323 try_sendfile
= false;
324 /* use fallback below */
325 } else if (n
== 0) { /* likely EOF */
327 if (copied_something
)
330 try_sendfile
= try_splice
= false; /* same logic as above for copy_file_range() */
336 /* Then try splice, unless we already tried. */
339 /* splice()'s asynchronous I/O support is a bit weird. When it encounters a pipe file
340 * descriptor, then it will ignore its O_NONBLOCK flag and instead only honour the
341 * SPLICE_F_NONBLOCK flag specified in its flag parameter. Let's hide this behaviour
342 * here, and check if either of the specified fds are a pipe, and if so, let's pass
343 * the flag automatically, depending on O_NONBLOCK being set.
345 * Here's a twist though: when we use it to move data between two pipes of which one
346 * has O_NONBLOCK set and the other has not, then we have no individual control over
347 * O_NONBLOCK behaviour. Hence in that case we can't use splice() and still guarantee
348 * systematic O_NONBLOCK behaviour, hence don't. */
350 if (nonblock_pipe
< 0) {
353 /* Check if either of these fds is a pipe, and if so non-blocking or not */
354 a
= fd_is_nonblock_pipe(fdf
);
358 b
= fd_is_nonblock_pipe(fdt
);
362 if ((a
== FD_IS_NO_PIPE
&& b
== FD_IS_NO_PIPE
) ||
363 (a
== FD_IS_BLOCKING_PIPE
&& b
== FD_IS_NONBLOCKING_PIPE
) ||
364 (a
== FD_IS_NONBLOCKING_PIPE
&& b
== FD_IS_BLOCKING_PIPE
))
366 /* splice() only works if one of the fds is a pipe. If neither is,
367 * let's skip this step right-away. As mentioned above, if one of the
368 * two fds refers to a blocking pipe and the other to a non-blocking
369 * pipe, we can't use splice() either, hence don't try either. This
370 * hence means we can only use splice() if either only one of the two
371 * fds is a pipe, or if both are pipes with the same nonblocking flag
376 nonblock_pipe
= a
== FD_IS_NONBLOCKING_PIPE
|| b
== FD_IS_NONBLOCKING_PIPE
;
381 n
= splice(fdf
, NULL
, fdt
, NULL
, m
, nonblock_pipe
? SPLICE_F_NONBLOCK
: 0);
383 if (!IN_SET(errno
, EINVAL
, ENOSYS
))
387 /* use fallback below */
388 } else if (n
== 0) { /* likely EOF */
390 if (copied_something
)
393 try_splice
= false; /* same logic as above for copy_file_range() + sendfile() */
399 /* As a fallback just copy bits by hand */
401 uint8_t buf
[MIN(m
, COPY_BUFFER_SIZE
)], *p
= buf
;
404 n
= read(fdf
, buf
, sizeof buf
);
407 if (n
== 0) /* EOF */
414 k
= write(fdt
, p
, z
);
428 if (ret_remains_size
)
429 *ret_remains_size
= z
;
442 r
= progress(n
, userdata
);
447 if (max_bytes
!= UINT64_MAX
) {
448 assert(max_bytes
>= (uint64_t) n
);
452 /* sendfile accepts at most SSIZE_MAX-offset bytes to copy, so reduce our maximum by the
453 * amount we already copied, but don't go below our copy buffer size, unless we are close the
454 * limit of bytes we are allowed to copy. */
455 m
= MAX(MIN(COPY_BUFFER_SIZE
, max_bytes
), m
- n
);
457 copied_something
= true;
460 return 0; /* return 0 if we hit EOF earlier than the size limit */
463 static int fd_copy_symlink(
466 const struct stat
*st
,
471 CopyFlags copy_flags
) {
473 _cleanup_free_
char *target
= NULL
;
480 r
= readlinkat_malloc(df
, from
, &target
);
484 if (copy_flags
& COPY_MAC_CREATE
) {
485 r
= mac_selinux_create_file_prepare_at(dt
, to
, S_IFLNK
);
489 r
= RET_NERRNO(symlinkat(target
, dt
, to
));
490 if (copy_flags
& COPY_MAC_CREATE
)
491 mac_selinux_create_file_clear();
493 if (FLAGS_SET(copy_flags
, COPY_GRACEFUL_WARN
) && (ERRNO_IS_PRIVILEGE(r
) || ERRNO_IS_NOT_SUPPORTED(r
))) {
494 log_notice_errno(r
, "Failed to copy symlink '%s', ignoring: %m", from
);
502 uid_is_valid(override_uid
) ? override_uid
: st
->st_uid
,
503 gid_is_valid(override_gid
) ? override_gid
: st
->st_gid
,
504 AT_SYMLINK_NOFOLLOW
) < 0)
507 (void) copy_xattr(df
, from
, dt
, to
, copy_flags
);
508 (void) utimensat(dt
, to
, (struct timespec
[]) { st
->st_atim
, st
->st_mtim
}, AT_SYMLINK_NOFOLLOW
);
512 /* Encapsulates the database we store potential hardlink targets in */
513 typedef struct HardlinkContext
{
514 int dir_fd
; /* An fd to the directory we use as lookup table. Never AT_FDCWD. Lazily created, when
515 * we add the first entry. */
517 /* These two fields are used to create the hardlink repository directory above — via
518 * mkdirat(parent_fd, subdir) — and are kept so that we can automatically remove the directory again
519 * when we are done. */
520 int parent_fd
; /* Possibly AT_FDCWD */
524 static int hardlink_context_setup(
528 CopyFlags copy_flags
) {
530 _cleanup_close_
int dt_copy
= -EBADF
;
534 assert(c
->dir_fd
< 0 && c
->dir_fd
!= AT_FDCWD
);
535 assert(c
->parent_fd
< 0);
538 /* If hardlink recreation is requested we have to maintain a database of inodes that are potential
539 * hardlink sources. Given that generally disk sizes have to be assumed to be larger than what fits
540 * into physical RAM we cannot maintain that database in dynamic memory alone. Here we opt to
541 * maintain it on disk, to simplify things: inside the destination directory we'll maintain a
542 * temporary directory consisting of hardlinks of every inode we copied that might be subject of
543 * hardlinks. We can then use that as hardlink source later on. Yes, this means additional disk IO
544 * but thankfully Linux is optimized for this kind of thing. If this ever becomes a performance
545 * bottleneck we can certainly place an in-memory hash table in front of this, but for the beginning,
546 * let's keep things simple, and just use the disk as lookup table for inodes.
548 * Note that this should have zero performance impact as long as .n_link of all files copied remains
549 * <= 0, because in that case we will not actually allocate the hardlink inode lookup table directory
550 * on disk (we do so lazily, when the first candidate with .n_link > 1 is seen). This means, in the
551 * common case where hardlinks are not used at all or only for few files the fact that we store the
552 * table on disk shouldn't matter perfomance-wise. */
554 if (!FLAGS_SET(copy_flags
, COPY_HARDLINKS
))
562 dt_copy
= fcntl(dt
, F_DUPFD_CLOEXEC
, 3);
567 r
= tempfn_random_child(to
, "hardlink", &c
->subdir
);
571 c
->parent_fd
= TAKE_FD(dt_copy
);
573 /* We don't actually create the directory we keep the table in here, that's done on-demand when the
574 * first entry is added, using hardlink_context_realize() below. */
578 static int hardlink_context_realize(HardlinkContext
*c
) {
582 if (c
->dir_fd
>= 0) /* Already realized */
585 if (c
->parent_fd
< 0 && c
->parent_fd
!= AT_FDCWD
) /* Not configured */
590 c
->dir_fd
= open_mkdir_at(c
->parent_fd
, c
->subdir
, O_EXCL
|O_CLOEXEC
, 0700);
597 static void hardlink_context_destroy(HardlinkContext
*c
) {
602 /* Automatically remove the hardlink lookup table directory again after we are done. This is used via
603 * _cleanup_() so that we really delete this, even on failure. */
605 if (c
->dir_fd
>= 0) {
606 r
= rm_rf_children(TAKE_FD(c
->dir_fd
), REMOVE_PHYSICAL
, NULL
); /* consumes dir_fd in all cases, even on failure */
608 log_debug_errno(r
, "Failed to remove hardlink store (%s) contents, ignoring: %m", c
->subdir
);
610 assert(c
->parent_fd
>= 0 || c
->parent_fd
== AT_FDCWD
);
613 if (unlinkat(c
->parent_fd
, c
->subdir
, AT_REMOVEDIR
) < 0)
614 log_debug_errno(errno
, "Failed to remove hardlink store (%s) directory, ignoring: %m", c
->subdir
);
617 assert_cc(AT_FDCWD
< 0);
618 c
->parent_fd
= safe_close(c
->parent_fd
);
620 c
->subdir
= mfree(c
->subdir
);
623 static int try_hardlink(
625 const struct stat
*st
,
629 char dev_ino
[DECIMAL_STR_MAX(dev_t
)*2 + DECIMAL_STR_MAX(uint64_t) + 4];
632 assert(dt
>= 0 || dt
== AT_FDCWD
);
635 if (!c
) /* No temporary hardlink directory, don't bother */
638 if (st
->st_nlink
<= 1) /* Source not hardlinked, don't bother */
641 if (c
->dir_fd
< 0) /* not yet realized, hence empty */
644 xsprintf(dev_ino
, "%u:%u:%" PRIu64
, major(st
->st_dev
), minor(st
->st_dev
), (uint64_t) st
->st_ino
);
645 if (linkat(c
->dir_fd
, dev_ino
, dt
, to
, 0) < 0) {
646 if (errno
!= ENOENT
) /* doesn't exist in store yet */
647 log_debug_errno(errno
, "Failed to hardlink %s to %s, ignoring: %m", dev_ino
, to
);
654 static int memorize_hardlink(
656 const struct stat
*st
,
660 char dev_ino
[DECIMAL_STR_MAX(dev_t
)*2 + DECIMAL_STR_MAX(uint64_t) + 4];
664 assert(dt
>= 0 || dt
== AT_FDCWD
);
667 if (!c
) /* No temporary hardlink directory, don't bother */
670 if (st
->st_nlink
<= 1) /* Source not hardlinked, don't bother */
673 r
= hardlink_context_realize(c
); /* Create the hardlink store lazily */
677 xsprintf(dev_ino
, "%u:%u:%" PRIu64
, major(st
->st_dev
), minor(st
->st_dev
), (uint64_t) st
->st_ino
);
678 if (linkat(dt
, to
, c
->dir_fd
, dev_ino
, 0) < 0) {
679 log_debug_errno(errno
, "Failed to hardlink %s to %s, ignoring: %m", to
, dev_ino
);
686 static int fd_copy_tree_generic(
689 const struct stat
*st
,
692 dev_t original_device
,
696 CopyFlags copy_flags
,
698 HardlinkContext
*hardlink_context
,
699 const char *display_path
,
700 copy_progress_path_t progress_path
,
701 copy_progress_bytes_t progress_bytes
,
704 static int fd_copy_regular(
707 const struct stat
*st
,
712 CopyFlags copy_flags
,
713 HardlinkContext
*hardlink_context
,
714 copy_progress_bytes_t progress
,
717 _cleanup_close_
int fdf
= -EBADF
, fdt
= -EBADF
;
724 r
= try_hardlink(hardlink_context
, st
, dt
, to
);
727 if (r
> 0) /* worked! */
730 fdf
= openat(df
, from
, O_RDONLY
|O_CLOEXEC
|O_NOCTTY
|O_NOFOLLOW
);
734 if (copy_flags
& COPY_MAC_CREATE
) {
735 r
= mac_selinux_create_file_prepare_at(dt
, to
, S_IFREG
);
739 fdt
= openat(dt
, to
, O_WRONLY
|O_CREAT
|O_EXCL
|O_CLOEXEC
|O_NOCTTY
|O_NOFOLLOW
, st
->st_mode
& 07777);
740 if (copy_flags
& COPY_MAC_CREATE
)
741 mac_selinux_create_file_clear();
745 r
= copy_bytes_full(fdf
, fdt
, UINT64_MAX
, copy_flags
, NULL
, NULL
, progress
, userdata
);
750 uid_is_valid(override_uid
) ? override_uid
: st
->st_uid
,
751 gid_is_valid(override_gid
) ? override_gid
: st
->st_gid
) < 0)
754 if (fchmod(fdt
, st
->st_mode
& 07777) < 0)
757 (void) futimens(fdt
, (struct timespec
[]) { st
->st_atim
, st
->st_mtim
});
758 (void) copy_xattr(fdf
, NULL
, fdt
, NULL
, copy_flags
);
760 if (copy_flags
& COPY_FSYNC
) {
761 if (fsync(fdt
) < 0) {
767 q
= close_nointr(TAKE_FD(fdt
)); /* even if this fails, the fd is now invalidated */
773 (void) memorize_hardlink(hardlink_context
, st
, dt
, to
);
777 (void) unlinkat(dt
, to
, 0);
781 static int fd_copy_fifo(
784 const struct stat
*st
,
789 CopyFlags copy_flags
,
790 HardlinkContext
*hardlink_context
) {
797 r
= try_hardlink(hardlink_context
, st
, dt
, to
);
800 if (r
> 0) /* worked! */
803 if (copy_flags
& COPY_MAC_CREATE
) {
804 r
= mac_selinux_create_file_prepare_at(dt
, to
, S_IFIFO
);
808 r
= RET_NERRNO(mkfifoat(dt
, to
, st
->st_mode
& 07777));
809 if (copy_flags
& COPY_MAC_CREATE
)
810 mac_selinux_create_file_clear();
812 if (FLAGS_SET(copy_flags
, COPY_GRACEFUL_WARN
) && (ERRNO_IS_PRIVILEGE(r
) || ERRNO_IS_NOT_SUPPORTED(r
))) {
813 log_notice_errno(r
, "Failed to copy fifo '%s', ignoring: %m", from
);
821 uid_is_valid(override_uid
) ? override_uid
: st
->st_uid
,
822 gid_is_valid(override_gid
) ? override_gid
: st
->st_gid
,
823 AT_SYMLINK_NOFOLLOW
) < 0)
826 if (fchmodat(dt
, to
, st
->st_mode
& 07777, 0) < 0)
829 (void) utimensat(dt
, to
, (struct timespec
[]) { st
->st_atim
, st
->st_mtim
}, AT_SYMLINK_NOFOLLOW
);
831 (void) memorize_hardlink(hardlink_context
, st
, dt
, to
);
835 static int fd_copy_node(
838 const struct stat
*st
,
843 CopyFlags copy_flags
,
844 HardlinkContext
*hardlink_context
) {
851 r
= try_hardlink(hardlink_context
, st
, dt
, to
);
854 if (r
> 0) /* worked! */
857 if (copy_flags
& COPY_MAC_CREATE
) {
858 r
= mac_selinux_create_file_prepare_at(dt
, to
, st
->st_mode
& S_IFMT
);
862 r
= RET_NERRNO(mknodat(dt
, to
, st
->st_mode
, st
->st_rdev
));
863 if (copy_flags
& COPY_MAC_CREATE
)
864 mac_selinux_create_file_clear();
866 if (FLAGS_SET(copy_flags
, COPY_GRACEFUL_WARN
) && (ERRNO_IS_PRIVILEGE(r
) || ERRNO_IS_NOT_SUPPORTED(r
))) {
867 log_notice_errno(r
, "Failed to copy node '%s', ignoring: %m", from
);
875 uid_is_valid(override_uid
) ? override_uid
: st
->st_uid
,
876 gid_is_valid(override_gid
) ? override_gid
: st
->st_gid
,
877 AT_SYMLINK_NOFOLLOW
) < 0)
880 if (fchmodat(dt
, to
, st
->st_mode
& 07777, 0) < 0)
883 (void) utimensat(dt
, to
, (struct timespec
[]) { st
->st_atim
, st
->st_mtim
}, AT_SYMLINK_NOFOLLOW
);
885 (void) memorize_hardlink(hardlink_context
, st
, dt
, to
);
889 static int fd_copy_directory(
892 const struct stat
*st
,
895 dev_t original_device
,
899 CopyFlags copy_flags
,
901 HardlinkContext
*hardlink_context
,
902 const char *display_path
,
903 copy_progress_path_t progress_path
,
904 copy_progress_bytes_t progress_bytes
,
907 _cleanup_(hardlink_context_destroy
) HardlinkContext our_hardlink_context
= {
912 _cleanup_close_
int fdf
= -EBADF
, fdt
= -EBADF
;
913 _cleanup_closedir_
DIR *d
= NULL
;
914 bool exists
, created
;
921 return -ENAMETOOLONG
;
924 fdf
= openat(df
, from
, O_RDONLY
|O_DIRECTORY
|O_CLOEXEC
|O_NOCTTY
|O_NOFOLLOW
);
926 fdf
= fcntl(df
, F_DUPFD_CLOEXEC
, 3);
930 if (!hardlink_context
) {
931 /* If recreating hardlinks is requested let's set up a context for that now. */
932 r
= hardlink_context_setup(&our_hardlink_context
, dt
, to
, copy_flags
);
935 if (r
> 0) /* It's enabled and allocated, let's now use the same context for all recursive
936 * invocations from here down */
937 hardlink_context
= &our_hardlink_context
;
940 d
= take_fdopendir(&fdf
);
945 if (copy_flags
& COPY_MERGE_EMPTY
) {
946 r
= dir_is_empty_at(dt
, to
, /* ignore_hidden_or_backup= */ false);
947 if (r
< 0 && r
!= -ENOENT
)
956 if (copy_flags
& COPY_MAC_CREATE
)
957 r
= mkdirat_label(dt
, to
, st
->st_mode
& 07777);
959 r
= mkdirat(dt
, to
, st
->st_mode
& 07777);
962 else if (errno
== EEXIST
&& (copy_flags
& COPY_MERGE
))
968 fdt
= openat(dt
, to
, O_RDONLY
|O_DIRECTORY
|O_CLOEXEC
|O_NOCTTY
|O_NOFOLLOW
);
974 if (PTR_TO_INT(hashmap_get(denylist
, st
)) == DENY_CONTENTS
) {
975 log_debug("%s is in the denylist, not recursing", from
);
979 FOREACH_DIRENT_ALL(de
, d
, return -errno
) {
980 const char *child_display_path
= NULL
;
981 _cleanup_free_
char *dp
= NULL
;
985 if (dot_or_dot_dot(de
->d_name
))
988 r
= look_for_signals(copy_flags
);
992 if (fstatat(dirfd(d
), de
->d_name
, &buf
, AT_SYMLINK_NOFOLLOW
) < 0) {
999 child_display_path
= dp
= path_join(display_path
, de
->d_name
);
1001 child_display_path
= de
->d_name
;
1003 r
= progress_path(child_display_path
, &buf
, userdata
);
1008 if (PTR_TO_INT(hashmap_get(denylist
, &buf
)) == DENY_INODE
) {
1009 log_debug("%s/%s is in the denylist, ignoring", from
, de
->d_name
);
1013 if (S_ISDIR(buf
.st_mode
)) {
1015 * Don't descend into directories on other file systems, if this is requested. We do a simple
1016 * .st_dev check here, which basically comes for free. Note that we do this check only on
1017 * directories, not other kind of file system objects, for two reason:
1019 * • The kernel's overlayfs pseudo file system that overlays multiple real file systems
1020 * propagates the .st_dev field of the file system a file originates from all the way up
1021 * through the stack to stat(). It doesn't do that for directories however. This means that
1022 * comparing .st_dev on non-directories suggests that they all are mount points. To avoid
1023 * confusion we hence avoid relying on this check for regular files.
1025 * • The main reason we do this check at all is to protect ourselves from bind mount cycles,
1026 * where we really want to avoid descending down in all eternity. However the .st_dev check
1027 * is usually not sufficient for this protection anyway, as bind mount cycles from the same
1028 * file system onto itself can't be detected that way. (Note we also do a recursion depth
1029 * check, which is probably the better protection in this regard, which is why
1030 * COPY_SAME_MOUNT is optional).
1033 if (FLAGS_SET(copy_flags
, COPY_SAME_MOUNT
)) {
1034 if (buf
.st_dev
!= original_device
)
1037 r
= fd_is_mount_point(dirfd(d
), de
->d_name
, 0);
1045 q
= fd_copy_tree_generic(dirfd(d
), de
->d_name
, &buf
, fdt
, de
->d_name
, original_device
,
1046 depth_left
-1, override_uid
, override_gid
, copy_flags
, denylist
,
1047 hardlink_context
, child_display_path
, progress_path
, progress_bytes
,
1050 if (q
== -EINTR
) /* Propagate SIGINT/SIGTERM up instantly */
1052 if (q
== -EEXIST
&& (copy_flags
& COPY_MERGE
))
1061 uid_is_valid(override_uid
) ? override_uid
: st
->st_uid
,
1062 gid_is_valid(override_gid
) ? override_gid
: st
->st_gid
) < 0)
1065 if (fchmod(fdt
, st
->st_mode
& 07777) < 0)
1068 (void) copy_xattr(dirfd(d
), NULL
, fdt
, NULL
, copy_flags
);
1069 (void) futimens(fdt
, (struct timespec
[]) { st
->st_atim
, st
->st_mtim
});
1072 if (copy_flags
& COPY_FSYNC_FULL
) {
1080 static int fd_copy_leaf(
1083 const struct stat
*st
,
1088 CopyFlags copy_flags
,
1089 HardlinkContext
*hardlink_context
,
1090 const char *display_path
,
1091 copy_progress_bytes_t progress_bytes
,
1095 if (S_ISREG(st
->st_mode
))
1096 r
= fd_copy_regular(df
, from
, st
, dt
, to
, override_uid
, override_gid
, copy_flags
, hardlink_context
, progress_bytes
, userdata
);
1097 else if (S_ISLNK(st
->st_mode
))
1098 r
= fd_copy_symlink(df
, from
, st
, dt
, to
, override_uid
, override_gid
, copy_flags
);
1099 else if (S_ISFIFO(st
->st_mode
))
1100 r
= fd_copy_fifo(df
, from
, st
, dt
, to
, override_uid
, override_gid
, copy_flags
, hardlink_context
);
1101 else if (S_ISBLK(st
->st_mode
) || S_ISCHR(st
->st_mode
) || S_ISSOCK(st
->st_mode
))
1102 r
= fd_copy_node(df
, from
, st
, dt
, to
, override_uid
, override_gid
, copy_flags
, hardlink_context
);
1109 static int fd_copy_tree_generic(
1112 const struct stat
*st
,
1115 dev_t original_device
,
1116 unsigned depth_left
,
1119 CopyFlags copy_flags
,
1121 HardlinkContext
*hardlink_context
,
1122 const char *display_path
,
1123 copy_progress_path_t progress_path
,
1124 copy_progress_bytes_t progress_bytes
,
1128 if (S_ISDIR(st
->st_mode
))
1129 return fd_copy_directory(df
, from
, st
, dt
, to
, original_device
, depth_left
-1, override_uid
,
1130 override_gid
, copy_flags
, denylist
, hardlink_context
, display_path
,
1131 progress_path
, progress_bytes
, userdata
);
1133 DenyType t
= PTR_TO_INT(hashmap_get(denylist
, st
));
1134 if (t
== DENY_INODE
) {
1135 log_debug("%s is in the denylist, ignoring", from
);
1137 } else if (t
== DENY_CONTENTS
)
1138 log_debug("%s is configured to have its contents excluded, but is not a directory", from
);
1140 r
= fd_copy_leaf(df
, from
, st
, dt
, to
, override_uid
, override_gid
, copy_flags
, hardlink_context
, display_path
, progress_bytes
, userdata
);
1141 /* We just tried to copy a leaf node of the tree. If it failed because the node already exists *and* the COPY_REPLACE flag has been provided, we should unlink the node and re-copy. */
1142 if (r
== -EEXIST
&& (copy_flags
& COPY_REPLACE
)) {
1143 /* This codepath is us trying to address an error to copy, if the unlink fails, lets just return the original error. */
1144 if (unlinkat(dt
, to
, 0) < 0)
1147 r
= fd_copy_leaf(df
, from
, st
, dt
, to
, override_uid
, override_gid
, copy_flags
, hardlink_context
, display_path
, progress_bytes
, userdata
);
1153 int copy_tree_at_full(
1160 CopyFlags copy_flags
,
1162 copy_progress_path_t progress_path
,
1163 copy_progress_bytes_t progress_bytes
,
1172 if (fstatat(fdf
, from
, &st
, AT_SYMLINK_NOFOLLOW
) < 0)
1175 r
= fd_copy_tree_generic(fdf
, from
, &st
, fdt
, to
, st
.st_dev
, COPY_DEPTH_MAX
, override_uid
,
1176 override_gid
, copy_flags
, denylist
, NULL
, NULL
, progress_path
,
1177 progress_bytes
, userdata
);
1181 if (S_ISDIR(st
.st_mode
) && (copy_flags
& COPY_SYNCFS
)) {
1182 /* If the top-level inode is a directory run syncfs() now. */
1183 r
= syncfs_path(fdt
, to
);
1186 } else if ((copy_flags
& (COPY_FSYNC_FULL
|COPY_SYNCFS
)) != 0) {
1187 /* fsync() the parent dir of what we just copied if COPY_FSYNC_FULL is set. Also do this in
1188 * case COPY_SYNCFS is set but the top-level inode wasn't actually a directory. We do this so that
1189 * COPY_SYNCFS provides reasonable synchronization semantics on any kind of inode: when the
1190 * copy operation is done the whole inode — regardless of its type — and all its children
1191 * will be synchronized to disk. */
1192 r
= fsync_parent_at(fdt
, to
);
1200 static int sync_dir_by_flags(const char *path
, CopyFlags copy_flags
) {
1202 if (copy_flags
& COPY_SYNCFS
)
1203 return syncfs_path(AT_FDCWD
, path
);
1204 if (copy_flags
& COPY_FSYNC_FULL
)
1205 return fsync_parent_at(AT_FDCWD
, path
);
1210 int copy_directory_fd_full(
1213 CopyFlags copy_flags
,
1214 copy_progress_path_t progress_path
,
1215 copy_progress_bytes_t progress_bytes
,
1224 if (fstat(dirfd
, &st
) < 0)
1227 r
= stat_verify_directory(&st
);
1231 r
= fd_copy_directory(
1237 UID_INVALID
, GID_INVALID
,
1246 r
= sync_dir_by_flags(to
, copy_flags
);
1253 int copy_directory_full(
1256 CopyFlags copy_flags
,
1257 copy_progress_path_t progress_path
,
1258 copy_progress_bytes_t progress_bytes
,
1267 if (lstat(from
, &st
) < 0)
1270 r
= stat_verify_directory(&st
);
1274 r
= fd_copy_directory(
1280 UID_INVALID
, GID_INVALID
,
1289 r
= sync_dir_by_flags(to
, copy_flags
);
1296 int copy_file_fd_at_full(
1300 CopyFlags copy_flags
,
1301 copy_progress_bytes_t progress_bytes
,
1304 _cleanup_close_
int fdf
= -EBADF
;
1308 assert(dir_fdf
>= 0 || dir_fdf
== AT_FDCWD
);
1312 fdf
= openat(dir_fdf
, from
, O_RDONLY
|O_CLOEXEC
|O_NOCTTY
);
1316 r
= fd_verify_regular(fdf
);
1320 if (fstat(fdt
, &st
) < 0)
1323 r
= copy_bytes_full(fdf
, fdt
, UINT64_MAX
, copy_flags
, NULL
, NULL
, progress_bytes
, userdata
);
1327 /* Make sure to copy file attributes only over if target is a regular
1328 * file (so that copying a file to /dev/null won't alter the access
1329 * mode/ownership of that device node...) */
1330 if (S_ISREG(st
.st_mode
)) {
1331 (void) copy_times(fdf
, fdt
, copy_flags
);
1332 (void) copy_xattr(fdf
, NULL
, fdt
, NULL
, copy_flags
);
1335 if (copy_flags
& COPY_FSYNC_FULL
) {
1336 r
= fsync_full(fdt
);
1339 } else if (copy_flags
& COPY_FSYNC
) {
1347 int copy_file_at_full(
1354 unsigned chattr_flags
,
1355 unsigned chattr_mask
,
1356 CopyFlags copy_flags
,
1357 copy_progress_bytes_t progress_bytes
,
1360 _cleanup_close_
int fdf
= -EBADF
, fdt
= -EBADF
;
1364 assert(dir_fdf
>= 0 || dir_fdf
== AT_FDCWD
);
1365 assert(dir_fdt
>= 0 || dir_fdt
== AT_FDCWD
);
1369 fdf
= openat(dir_fdf
, from
, O_RDONLY
|O_CLOEXEC
|O_NOCTTY
);
1373 if (fstat(fdf
, &st
) < 0)
1376 r
= stat_verify_regular(&st
);
1381 if (copy_flags
& COPY_MAC_CREATE
) {
1382 r
= mac_selinux_create_file_prepare_at(dir_fdt
, to
, S_IFREG
);
1386 fdt
= openat(dir_fdt
, to
, flags
|O_WRONLY
|O_CREAT
|O_CLOEXEC
|O_NOCTTY
,
1387 mode
!= MODE_INVALID
? mode
: st
.st_mode
);
1388 if (copy_flags
& COPY_MAC_CREATE
)
1389 mac_selinux_create_file_clear();
1394 if (!FLAGS_SET(flags
, O_EXCL
)) { /* if O_EXCL was used we created the thing as regular file, no need to check again */
1395 r
= fd_verify_regular(fdt
);
1400 if (chattr_mask
!= 0)
1401 (void) chattr_fd(fdt
, chattr_flags
, chattr_mask
& CHATTR_EARLY_FL
, NULL
);
1403 r
= copy_bytes_full(fdf
, fdt
, UINT64_MAX
, copy_flags
, NULL
, NULL
, progress_bytes
, userdata
);
1407 (void) copy_times(fdf
, fdt
, copy_flags
);
1408 (void) copy_xattr(fdf
, NULL
, fdt
, NULL
, copy_flags
);
1410 if (chattr_mask
!= 0)
1411 (void) chattr_fd(fdt
, chattr_flags
, chattr_mask
& ~CHATTR_EARLY_FL
, NULL
);
1413 if (copy_flags
& (COPY_FSYNC
|COPY_FSYNC_FULL
)) {
1414 if (fsync(fdt
) < 0) {
1420 r
= close_nointr(TAKE_FD(fdt
)); /* even if this fails, the fd is now invalidated */
1424 if (copy_flags
& COPY_FSYNC_FULL
) {
1425 r
= fsync_parent_at(dir_fdt
, to
);
1433 /* Only unlink if we definitely are the ones who created the file */
1434 if (FLAGS_SET(flags
, O_EXCL
))
1435 (void) unlinkat(dir_fdt
, to
, 0);
1440 int copy_file_atomic_at_full(
1446 unsigned chattr_flags
,
1447 unsigned chattr_mask
,
1448 CopyFlags copy_flags
,
1449 copy_progress_bytes_t progress_bytes
,
1452 _cleanup_(unlink_and_freep
) char *t
= NULL
;
1453 _cleanup_close_
int fdt
= -EBADF
;
1459 if (copy_flags
& COPY_MAC_CREATE
) {
1460 r
= mac_selinux_create_file_prepare_at(dir_fdt
, to
, S_IFREG
);
1464 fdt
= open_tmpfile_linkable_at(dir_fdt
, to
, O_WRONLY
|O_CLOEXEC
, &t
);
1465 if (copy_flags
& COPY_MAC_CREATE
)
1466 mac_selinux_create_file_clear();
1470 if (chattr_mask
!= 0)
1471 (void) chattr_fd(fdt
, chattr_flags
, chattr_mask
& CHATTR_EARLY_FL
, NULL
);
1473 r
= copy_file_fd_at_full(dir_fdf
, from
, fdt
, copy_flags
, progress_bytes
, userdata
);
1477 if (fchmod(fdt
, mode
) < 0)
1480 if ((copy_flags
& (COPY_FSYNC
|COPY_FSYNC_FULL
))) {
1486 r
= link_tmpfile_at(fdt
, dir_fdt
, t
, to
, copy_flags
& COPY_REPLACE
);
1492 if (chattr_mask
!= 0)
1493 (void) chattr_fd(fdt
, chattr_flags
, chattr_mask
& ~CHATTR_EARLY_FL
, NULL
);
1495 r
= close_nointr(TAKE_FD(fdt
)); /* even if this fails, the fd is now invalidated */
1499 if (copy_flags
& COPY_FSYNC_FULL
) {
1500 /* Sync the parent directory */
1501 r
= fsync_parent_at(dir_fdt
, to
);
1509 (void) unlinkat(dir_fdt
, to
, 0);
1513 int copy_times(int fdf
, int fdt
, CopyFlags flags
) {
1519 if (fstat(fdf
, &st
) < 0)
1522 if (futimens(fdt
, (struct timespec
[2]) { st
.st_atim
, st
.st_mtim
}) < 0)
1525 if (FLAGS_SET(flags
, COPY_CRTIME
)) {
1528 if (fd_getcrtime(fdf
, &crtime
) >= 0)
1529 (void) fd_setcrtime(fdt
, crtime
);
1535 int copy_access(int fdf
, int fdt
) {
1541 /* Copies just the access mode (and not the ownership) from fdf to fdt */
1543 if (fstat(fdf
, &st
) < 0)
1546 return RET_NERRNO(fchmod(fdt
, st
.st_mode
& 07777));
1549 int copy_rights_with_fallback(int fdf
, int fdt
, const char *patht
) {
1555 /* Copies both access mode and ownership from fdf to fdt */
1557 if (fstat(fdf
, &st
) < 0)
1560 return fchmod_and_chown_with_fallback(fdt
, patht
, st
.st_mode
& 07777, st
.st_uid
, st
.st_gid
);
1563 int copy_xattr(int df
, const char *from
, int dt
, const char *to
, CopyFlags copy_flags
) {
1564 _cleanup_free_
char *names
= NULL
;
1567 r
= listxattr_at_malloc(df
, from
, 0, &names
);
1571 NULSTR_FOREACH(p
, names
) {
1572 _cleanup_free_
char *value
= NULL
;
1574 if (!FLAGS_SET(copy_flags
, COPY_ALL_XATTRS
) && !startswith(p
, "user."))
1577 r
= getxattr_at_malloc(df
, from
, p
, 0, &value
);
1579 continue; /* gone by now */
1583 if (xsetxattr(dt
, to
, p
, value
, r
, 0) < 0)