]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - copy/xfs_copy.c
libxfs: pass a struct libxfs_init to libxfs_alloc_buftarg
[thirdparty/xfsprogs-dev.git] / copy / xfs_copy.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "libxfs.h"
8 #include <sys/stat.h>
9 #include <sys/wait.h>
10 #include <pthread.h>
11 #include <signal.h>
12 #include <stdarg.h>
13 #include "xfs_copy.h"
14 #include "libxlog.h"
15 #include "libfrog/platform.h"
16
17 #define rounddown(x, y) (((x)/(y))*(y))
18 #define uuid_equal(s,d) (platform_uuid_compare((s),(d)) == 0)
19
20 extern int platform_check_ismounted(char *, char *, struct stat *, int);
21
22 static char *logfile_name;
23 static FILE *logerr;
24 static char LOGFILE_NAME[] = "/var/tmp/xfs_copy.log.XXXXXX";
25
26 static char *source_name;
27 static int source_fd;
28
29 static unsigned int source_blocksize; /* source filesystem blocksize */
30 static unsigned int source_sectorsize; /* source disk sectorsize */
31
32 static xfs_agblock_t first_agbno;
33
34 static uint64_t barcount[11];
35
36 static unsigned int num_targets;
37 static target_control *target;
38
39 static wbuf w_buf;
40 static wbuf btree_buf;
41
42 static unsigned int kids;
43
44 static thread_control glob_masks;
45 static thread_args *targ;
46
47 static pthread_mutex_t mainwait;
48
49 #define ACTIVE 1
50 #define INACTIVE 2
51
52 xfs_off_t write_log_trailer(int fd, wbuf *w, xfs_mount_t *mp);
53 xfs_off_t write_log_header(int fd, wbuf *w, xfs_mount_t *mp);
54 static int format_logs(struct xfs_mount *);
55
56 /* general purpose message reporting routine */
57
58 #define OUT 0x01 /* use stdout stream */
59 #define ERR 0x02 /* use stderr stream */
60 #define LOG 0x04 /* use logerr stream */
61 #define PRE 0x08 /* append strerror string */
62 #define LAST 0x10 /* final message we print */
63
64 static void
65 signal_maskfunc(int addset, int newset)
66 {
67 sigset_t set;
68
69 sigemptyset(&set);
70 sigaddset(&set, addset);
71 sigprocmask(newset, &set, NULL);
72 }
73
74 static void
75 do_message(int flags, int code, const char *fmt, ...)
76 {
77 va_list ap;
78 int eek = 0;
79
80 if (flags & LOG) {
81 va_start(ap, fmt);
82 if (vfprintf(logerr, fmt, ap) <= 0)
83 eek = 1;
84 va_end(ap);
85 }
86 if (eek)
87 flags |= ERR; /* failed, force stderr */
88 if (flags & ERR) {
89 va_start(ap, fmt);
90 vfprintf(stderr, fmt, ap);
91 va_end(ap);
92 } else if (flags & OUT) {
93 va_start(ap, fmt);
94 vfprintf(stdout, fmt, ap);
95 va_end(ap);
96 }
97
98 if (flags & PRE) {
99 do_message(flags & ~PRE, 0, ": %s\n", strerror(code));
100 if (flags & LAST)
101 fprintf(stderr,
102 _("Check logfile \"%s\" for more details\n"),
103 logfile_name);
104 }
105
106 /* logfile is broken, force a write to stderr */
107 if (eek) {
108 fprintf(stderr, _("%s: could not write to logfile \"%s\".\n"),
109 progname, logfile_name);
110 fprintf(stderr,
111 _("Aborting XFS copy -- logfile error -- reason: %s\n"),
112 strerror(errno));
113 rcu_unregister_thread();
114 pthread_exit(NULL);
115 }
116 }
117
118 #define do_out(args...) do_message(OUT|LOG, 0, ## args)
119 #define do_log(args...) do_message(ERR|LOG, 0, ## args)
120 #define do_warn(args...) do_message(LOG, 0, ## args)
121 #define do_error(e,s) do_message(ERR|LOG|PRE, e, s)
122 #define do_fatal(e,s) do_message(ERR|LOG|PRE|LAST, e, s)
123 #define do_vfatal(e,s,args...) do_message(ERR|LOG|PRE|LAST, e, s, ## args)
124 #define die_perror() \
125 do { \
126 do_message(ERR|LOG|PRE|LAST, errno, \
127 _("Aborting XFS copy - reason")); \
128 exit(1); \
129 } while (0)
130
131 /* workaround craziness in the xlog routines */
132 int xlog_recover_do_trans(struct xlog *log, struct xlog_recover *t, int p)
133 {
134 return 0;
135 }
136
137 static void
138 check_errors(void)
139 {
140 int i, first_error = 0;
141
142 for (i = 0; i < num_targets; i++) {
143 if (target[i].state != INACTIVE) {
144 if (platform_flush_device(target[i].fd, 0)) {
145 target[i].error = errno;
146 target[i].state = INACTIVE;
147 target[i].err_type = 2;
148 }
149 }
150
151 if (target[i].state == INACTIVE) {
152 if (first_error == 0) {
153 first_error++;
154 do_log(
155 _("THE FOLLOWING COPIES FAILED TO COMPLETE\n"));
156 }
157 do_log(" %s -- ", target[i].name);
158 switch (target[i].err_type) {
159 case 0:
160 do_log(_("write error"));
161 break;
162 case 1:
163 do_log(_("lseek error"));
164 break;
165 case 2:
166 do_log(_("flush error"));
167 break;
168 default:
169 do_log(_("unknown error type %d"),
170 target[i].err_type);
171 break;
172 }
173 do_log(_(" at offset %lld\n"), target[i].position);
174 }
175 }
176 if (first_error == 0) {
177 fprintf(stdout, _("All copies completed.\n"));
178 fflush(NULL);
179 } else {
180 fprintf(stderr, _("See \"%s\" for more details.\n"),
181 logfile_name);
182 exit(1);
183 }
184 }
185
186 /*
187 * don't have to worry about alignment and mins because those
188 * are taken care of when the buffer's read in
189 */
190 static int
191 do_write(
192 thread_args *args,
193 wbuf *buf)
194 {
195 int res;
196 int error = 0;
197
198 if (!buf)
199 buf = &w_buf;
200
201 if (target[args->id].position != buf->position) {
202 if (lseek(args->fd, buf->position, SEEK_SET) < 0) {
203 error = target[args->id].err_type = 1;
204 } else {
205 target[args->id].position = buf->position;
206 }
207 }
208
209 if ((res = write(target[args->id].fd, buf->data,
210 buf->length)) == buf->length) {
211 target[args->id].position += res;
212 } else {
213 error = 2;
214 }
215
216 if (error) {
217 target[args->id].error = errno;
218 target[args->id].position = buf->position;
219 }
220 return error;
221 }
222
223 static void *
224 begin_reader(void *arg)
225 {
226 thread_args *args = arg;
227
228 rcu_register_thread();
229 for (;;) {
230 pthread_mutex_lock(&args->wait);
231 if (do_write(args, NULL))
232 goto handle_error;
233 pthread_mutex_lock(&glob_masks.mutex);
234 if (--glob_masks.num_working == 0)
235 pthread_mutex_unlock(&mainwait);
236 pthread_mutex_unlock(&glob_masks.mutex);
237 }
238 /* NOTREACHED */
239
240 handle_error:
241 /* error will be logged by primary thread */
242
243 pthread_mutex_lock(&glob_masks.mutex);
244 target[args->id].state = INACTIVE;
245 if (--glob_masks.num_working == 0)
246 pthread_mutex_unlock(&mainwait);
247 pthread_mutex_unlock(&glob_masks.mutex);
248 rcu_unregister_thread();
249 pthread_exit(NULL);
250 return NULL;
251 }
252
253 static void
254 handler(int sig)
255 {
256 pid_t pid;
257 int status, i;
258
259 pid = wait(&status);
260
261 kids--;
262
263 for (i = 0; i < num_targets; i++) {
264 if (target[i].pid == pid) {
265 if (target[i].state == INACTIVE) {
266 /* thread got an I/O error */
267
268 if (target[i].err_type == 0) {
269 do_warn(
270 _("%s: write error on target %d \"%s\" at offset %lld\n"),
271 progname, i, target[i].name,
272 target[i].position);
273 } else {
274 do_warn(
275 _("%s: lseek error on target %d \"%s\" at offset %lld\n"),
276 progname, i, target[i].name,
277 target[i].position);
278 }
279
280 do_vfatal(target[i].error,
281 _("Aborting target %d - reason"), i);
282
283 if (kids == 0) {
284 do_log(
285 _("Aborting XFS copy - no more targets.\n"));
286 check_errors();
287 pthread_exit(NULL);
288 }
289
290 signal(SIGCHLD, handler);
291 return;
292 } else {
293 /* it just croaked it bigtime, log it */
294
295 do_warn(
296 _("%s: thread %d died unexpectedly, target \"%s\" incomplete\n"),
297 progname, i, target[i].name);
298 do_warn(_("%s: offset was probably %lld\n"),
299 progname, target[i].position);
300 do_fatal(target[i].error,
301 _("Aborting XFS copy - reason"));
302 pthread_exit(NULL);
303 }
304 }
305 }
306
307 /* unknown child -- something very wrong */
308
309 do_warn(_("%s: Unknown child died (should never happen!)\n"), progname);
310 die_perror();
311 pthread_exit(NULL);
312 signal(SIGCHLD, handler);
313 }
314
315 static void
316 usage(void)
317 {
318 fprintf(stderr,
319 _("Usage: %s [-bdV] [-L logfile] source target [target ...]\n"),
320 progname);
321 exit(1);
322 }
323
324 static void
325 init_bar(uint64_t source_blocks)
326 {
327 int i;
328
329 for (i = 0; i < 11; i++)
330 barcount[i] = (source_blocks/10)*i;
331 }
332
333 static int
334 bump_bar(int tenths, uint64_t numblocks)
335 {
336 static char *bar[11] = {
337 " 0% ",
338 " ... 10% ",
339 " ... 20% ",
340 " ... 30% ",
341 " ... 40% ",
342 " ... 50% ",
343 " ... 60% ",
344 " ... 70% ",
345 " ... 80% ",
346 " ... 90% ",
347 " ... 100%\n\n",
348 };
349
350 if (tenths > 10) {
351 printf("%s", bar[10]);
352 fflush(stdout);
353 } else {
354 while (tenths < 10 && numblocks > barcount[tenths]) {
355 printf("%s", bar[tenths]);
356 fflush(stdout);
357 tenths++;
358 }
359 }
360 return tenths;
361 }
362
363 static xfs_off_t source_position = -1;
364
365 static wbuf *
366 wbuf_init(wbuf *buf, int data_size, int data_align, int min_io_size, int id)
367 {
368 ASSERT(data_size % BBSIZE == 0);
369 while ((buf->data = memalign(data_align, data_size)) == NULL) {
370 data_size >>= 1;
371 if (data_size < min_io_size)
372 return NULL;
373 }
374 ASSERT(min_io_size % BBSIZE == 0);
375 buf->data_align = data_align;
376 buf->min_io_size = min_io_size;
377 buf->size = data_size;
378 buf->id = id;
379 return buf;
380 }
381
382 static void
383 read_wbuf(int fd, wbuf *buf, xfs_mount_t *mp)
384 {
385 int res = 0;
386 xfs_off_t lres = 0;
387 xfs_off_t newpos;
388 size_t diff;
389
390 newpos = rounddown(buf->position, (xfs_off_t) buf->min_io_size);
391
392 if (newpos != buf->position) {
393 diff = buf->position - newpos;
394 buf->position = newpos;
395
396 buf->length += diff;
397 }
398
399 if (source_position != buf->position) {
400 lres = lseek(fd, buf->position, SEEK_SET);
401 if (lres < 0LL) {
402 do_warn(_("%s: lseek failure at offset %lld\n"),
403 progname, source_position);
404 die_perror();
405 }
406 source_position = buf->position;
407 }
408
409 ASSERT(source_position % source_sectorsize == 0);
410
411 /* round up length for direct I/O if necessary */
412
413 if (buf->length % buf->min_io_size != 0)
414 buf->length = roundup(buf->length, buf->min_io_size);
415
416 if (buf->length > buf->size) {
417 do_warn(_("assert error: buf->length = %d, buf->size = %d\n"),
418 buf->length, buf->size);
419 exit(1);
420 }
421
422 if ((res = read(fd, buf->data, buf->length)) < 0) {
423 do_warn(_("%s: read failure at offset %lld\n"),
424 progname, source_position);
425 die_perror();
426 }
427
428 if (res < buf->length &&
429 source_position + res == mp->m_sb.sb_dblocks * source_blocksize)
430 res = buf->length;
431 else
432 ASSERT(res == buf->length);
433 source_position += res;
434 buf->length = res;
435 }
436
437 static void
438 read_ag_header(int fd, xfs_agnumber_t agno, wbuf *buf, ag_header_t *ag,
439 xfs_mount_t *mp, int blocksize, int sectorsize)
440 {
441 xfs_daddr_t off;
442 int length;
443 xfs_off_t newpos;
444 size_t diff;
445
446 /* initial settings */
447
448 diff = 0;
449 off = XFS_AG_DADDR(mp, agno, XFS_SB_DADDR);
450 buf->position = (xfs_off_t) off * (xfs_off_t) BBSIZE;
451 length = buf->length = first_agbno * blocksize;
452 if (length == 0) {
453 do_log(_("ag header buffer invalid!\n"));
454 exit(1);
455 }
456
457 /* handle alignment stuff */
458
459 newpos = rounddown(buf->position, (xfs_off_t) buf->min_io_size);
460 if (newpos != buf->position) {
461 diff = buf->position - newpos;
462 buf->position = newpos;
463 buf->length += diff;
464 }
465
466 /* round up length for direct I/O if necessary */
467
468 if (buf->length % buf->min_io_size != 0)
469 buf->length = roundup(buf->length, buf->min_io_size);
470
471 read_wbuf(fd, buf, mp);
472 ASSERT(buf->length >= length);
473
474 ag->xfs_sb = (struct xfs_dsb *) (buf->data + diff);
475 ASSERT(be32_to_cpu(ag->xfs_sb->sb_magicnum) == XFS_SB_MAGIC);
476 ag->xfs_agf = (xfs_agf_t *) (buf->data + diff + sectorsize);
477 ASSERT(be32_to_cpu(ag->xfs_agf->agf_magicnum) == XFS_AGF_MAGIC);
478 ag->xfs_agi = (xfs_agi_t *) (buf->data + diff + 2 * sectorsize);
479 ASSERT(be32_to_cpu(ag->xfs_agi->agi_magicnum) == XFS_AGI_MAGIC);
480 ag->xfs_agfl = (struct xfs_agfl *) (buf->data + diff + 3 * sectorsize);
481 }
482
483
484 static void
485 write_wbuf(void)
486 {
487 int i;
488 int badness = 0;
489
490 /* verify target threads */
491 for (i = 0; i < num_targets; i++)
492 if (target[i].state != INACTIVE)
493 glob_masks.num_working++;
494
495 /* release target threads */
496 for (i = 0; i < num_targets; i++)
497 if (target[i].state != INACTIVE)
498 pthread_mutex_unlock(&targ[i].wait); /* wake up */
499 else
500 badness++;
501
502 /*
503 * If all the targets are inactive then there won't be any io
504 * threads left to release mainwait. We're screwed, so bail out.
505 */
506 if (badness == num_targets) {
507 check_errors();
508 exit(1);
509 }
510
511 signal_maskfunc(SIGCHLD, SIG_UNBLOCK);
512 pthread_mutex_lock(&mainwait);
513 signal_maskfunc(SIGCHLD, SIG_BLOCK);
514 }
515
516 static void
517 sb_update_uuid(
518 struct xfs_mount *mp,
519 ag_header_t *ag_hdr, /* AG hdr to update for this copy */
520 thread_args *tcarg) /* Args for this thread, with UUID */
521 {
522 /*
523 * If this filesystem has CRCs, the original UUID is stamped into
524 * all metadata. If we don't have an existing meta_uuid field in the
525 * the original filesystem and we are changing the UUID in this copy,
526 * we must copy the original sb_uuid to the sb_meta_uuid slot and set
527 * the incompat flag for the feature on this copy.
528 */
529 if (xfs_has_crc(mp) && !xfs_has_metauuid(mp) &&
530 !uuid_equal(&tcarg->uuid, &mp->m_sb.sb_uuid)) {
531 uint32_t feat;
532
533 feat = be32_to_cpu(ag_hdr->xfs_sb->sb_features_incompat);
534 feat |= XFS_SB_FEAT_INCOMPAT_META_UUID;
535 ag_hdr->xfs_sb->sb_features_incompat = cpu_to_be32(feat);
536 platform_uuid_copy(&ag_hdr->xfs_sb->sb_meta_uuid,
537 &mp->m_sb.sb_uuid);
538 }
539
540 /* Copy the (possibly new) fs-identifier UUID into sb_uuid */
541 platform_uuid_copy(&ag_hdr->xfs_sb->sb_uuid, &tcarg->uuid);
542
543 /* We may have changed the UUID, so update the superblock CRC */
544 if (xfs_has_crc(mp))
545 xfs_update_cksum((char *)ag_hdr->xfs_sb, mp->m_sb.sb_sectsize,
546 XFS_SB_CRC_OFF);
547 }
548
549 int
550 main(int argc, char **argv)
551 {
552 int i, j;
553 int logfd;
554 int howfar = 0;
555 int open_flags;
556 xfs_off_t pos;
557 size_t length;
558 int c;
559 uint64_t size, sizeb;
560 uint64_t numblocks = 0;
561 int wblocks = 0;
562 int num_threads = 0;
563 struct dioattr d;
564 int wbuf_size;
565 int wbuf_align;
566 int wbuf_miniosize;
567 int source_is_file = 0;
568 int buffered_output = 0;
569 int duplicate = 0;
570 uint btree_levels, current_level;
571 ag_header_t ag_hdr;
572 xfs_mount_t *mp;
573 xfs_mount_t mbuf;
574 struct xlog xlog;
575 struct xfs_buf *sbp;
576 xfs_sb_t *sb;
577 xfs_agnumber_t num_ags, agno;
578 xfs_agblock_t bno;
579 xfs_daddr_t begin, next_begin, ag_begin, new_begin, ag_end;
580 struct xfs_btree_block *block;
581 xfs_alloc_ptr_t *ptr;
582 xfs_alloc_rec_t *rec_ptr;
583 extern char *optarg;
584 extern int optind;
585 struct libxfs_init xargs;
586 thread_args *tcarg;
587 struct stat statbuf;
588 int error;
589
590 progname = basename(argv[0]);
591
592 setlocale(LC_ALL, "");
593 bindtextdomain(PACKAGE, LOCALEDIR);
594 textdomain(PACKAGE);
595
596 while ((c = getopt(argc, argv, "bdL:V")) != EOF) {
597 switch (c) {
598 case 'b':
599 buffered_output = 1;
600 break;
601 case 'd':
602 duplicate = 1;
603 break;
604 case 'L':
605 logfile_name = optarg;
606 break;
607 case 'V':
608 printf(_("%s version %s\n"), progname, VERSION);
609 exit(0);
610 default:
611 usage();
612 }
613 }
614
615 if (argc - optind < 2)
616 usage();
617
618 if (logfile_name) {
619 logfd = open(logfile_name, O_CREAT|O_WRONLY|O_EXCL, 0600);
620 } else {
621 logfile_name = LOGFILE_NAME;
622 logfd = mkstemp(logfile_name);
623 }
624
625 if (logfd < 0) {
626 fprintf(stderr, _("%s: couldn't open log file \"%s\"\n"),
627 progname, logfile_name);
628 perror(_("Aborting XFS copy - reason"));
629 exit(1);
630 }
631
632 if ((logerr = fdopen(logfd, "w")) == NULL) {
633 fprintf(stderr, _("%s: couldn't set up logfile stream\n"),
634 progname);
635 perror(_("Aborting XFS copy - reason"));
636 exit(1);
637 }
638
639 source_name = argv[optind];
640 source_fd = -1;
641 optind++;
642
643 num_targets = argc - optind;
644 if ((target = malloc(sizeof(target_control) * num_targets)) == NULL) {
645 do_log(_("Couldn't allocate target array\n"));
646 die_perror();
647 }
648 for (i = 0; optind < argc; i++, optind++) {
649 target[i].name = argv[optind];
650 target[i].fd = -1;
651 target[i].position = -1;
652 target[i].state = INACTIVE;
653 target[i].error = 0;
654 target[i].err_type = 0;
655 }
656
657 /* open up source -- is it a file? */
658
659 open_flags = O_RDONLY;
660
661 if ((source_fd = open(source_name, open_flags)) < 0) {
662 do_log(_("%s: couldn't open source \"%s\"\n"),
663 progname, source_name);
664 die_perror();
665 }
666
667 if (fstat(source_fd, &statbuf) < 0) {
668 do_log(_("%s: couldn't stat source \"%s\"\n"),
669 progname, source_name);
670 die_perror();
671 }
672
673 if (S_ISREG(statbuf.st_mode))
674 source_is_file = 1;
675
676 if (source_is_file && platform_test_xfs_fd(source_fd)) {
677 if (fcntl(source_fd, F_SETFL, open_flags | O_DIRECT) < 0) {
678 do_log(_("%s: Cannot set direct I/O flag on \"%s\".\n"),
679 progname, source_name);
680 die_perror();
681 }
682 if (xfsctl(source_name, source_fd, XFS_IOC_DIOINFO, &d) < 0) {
683 do_log(_("%s: xfsctl on file \"%s\" failed.\n"),
684 progname, source_name);
685 die_perror();
686 }
687
688 wbuf_align = d.d_mem;
689 wbuf_size = min(d.d_maxiosz, 1 * 1024 * 1024);
690 wbuf_miniosize = d.d_miniosz;
691 } else {
692 /* set arbitrary I/O params, miniosize at least 1 disk block */
693
694 wbuf_align = getpagesize();
695 wbuf_size = 1 * 1024 * 1024;
696 wbuf_miniosize = -1; /* set after mounting source fs */
697 }
698
699 if (!source_is_file) {
700 /*
701 * check to make sure a filesystem isn't mounted
702 * on the device
703 */
704 if (platform_check_ismounted(source_name, NULL, &statbuf, 0)) {
705 do_log(
706 _("%s: Warning -- a filesystem is mounted on the source device.\n"),
707 progname);
708 do_log(
709 _("\t\tGenerated copies may be corrupt unless the source is\n"));
710 do_log(
711 _("\t\tunmounted or mounted read-only. Copy proceeding...\n"));
712 }
713 }
714
715 /* prepare the libxfs_init structure */
716
717 memset(&xargs, 0, sizeof(xargs));
718 xargs.isdirect = LIBXFS_DIRECT;
719 xargs.isreadonly = LIBXFS_ISREADONLY;
720
721 xargs.dname = source_name;
722 xargs.disfile = source_is_file;
723
724 if (!libxfs_init(&xargs)) {
725 do_log(_("%s: couldn't initialize XFS library\n"
726 "%s: Aborting.\n"), progname, progname);
727 exit(1);
728 }
729
730 memset(&mbuf, 0, sizeof(xfs_mount_t));
731
732 /* We don't yet know the sector size, so read maximal size */
733 libxfs_buftarg_init(&mbuf, &xargs);
734 error = -libxfs_buf_read_uncached(mbuf.m_ddev_targp, XFS_SB_DADDR,
735 1 << (XFS_MAX_SECTORSIZE_LOG - BBSHIFT), 0, &sbp, NULL);
736 if (error) {
737 do_log(_("%s: couldn't read superblock, error=%d\n"),
738 progname, error);
739 exit(1);
740 }
741
742 sb = &mbuf.m_sb;
743 libxfs_sb_from_disk(sb, sbp->b_addr);
744
745 /* Do it again, now with proper length and verifier */
746 libxfs_buf_relse(sbp);
747
748 error = -libxfs_buf_read_uncached(mbuf.m_ddev_targp, XFS_SB_DADDR,
749 1 << (sb->sb_sectlog - BBSHIFT), 0, &sbp,
750 &xfs_sb_buf_ops);
751 if (error) {
752 do_log(_("%s: couldn't read superblock, error=%d\n"),
753 progname, error);
754 exit(1);
755 }
756 libxfs_buf_relse(sbp);
757
758 mp = libxfs_mount(&mbuf, sb, &xargs, 0);
759 if (mp == NULL) {
760 do_log(_("%s: %s filesystem failed to initialize\n"
761 "%s: Aborting.\n"), progname, source_name, progname);
762 exit(1);
763 } else if (mp->m_sb.sb_inprogress) {
764 do_log(_("%s %s filesystem failed to initialize\n"
765 "%s: Aborting.\n"), progname, source_name, progname);
766 exit(1);
767 } else if (mp->m_sb.sb_logstart == 0) {
768 do_log(_("%s: %s has an external log.\n%s: Aborting.\n"),
769 progname, source_name, progname);
770 exit(1);
771 } else if (mp->m_sb.sb_rextents != 0) {
772 do_log(_("%s: %s has a real-time section.\n"
773 "%s: Aborting.\n"), progname, source_name, progname);
774 exit(1);
775 }
776
777
778 /*
779 * Set up the mount pointer to access the log and check whether the log
780 * is clean. Fail on a dirty or corrupt log in non-duplicate mode
781 * because the log is formatted as part of the copy and we don't want to
782 * destroy data. We also need the current log cycle to format v5
783 * superblock logs correctly.
784 */
785 memset(&xlog, 0, sizeof(struct xlog));
786 mp->m_log = &xlog;
787 c = xlog_is_dirty(mp, mp->m_log);
788 if (!duplicate) {
789 if (c == 1) {
790 do_log(_(
791 "Error: source filesystem log is dirty. Mount the filesystem to replay the\n"
792 "log, unmount and retry xfs_copy.\n"));
793 exit(1);
794 } else if (c < 0) {
795 do_log(_(
796 "Error: could not determine the log head or tail of the source filesystem.\n"
797 "Mount the filesystem to replay the log or run xfs_repair.\n"));
798 exit(1);
799 }
800 }
801
802 source_blocksize = mp->m_sb.sb_blocksize;
803 source_sectorsize = mp->m_sb.sb_sectsize;
804
805 if (wbuf_miniosize == -1)
806 wbuf_miniosize = source_sectorsize;
807
808 ASSERT(source_blocksize % source_sectorsize == 0);
809 ASSERT(source_sectorsize % BBSIZE == 0);
810
811 if (source_blocksize < source_sectorsize) {
812 do_log(_("Error: filesystem block size is smaller than the"
813 " disk sectorsize.\nAborting XFS copy now.\n"));
814 exit(1);
815 }
816
817 first_agbno = XFS_AGFL_BLOCK(mp) + 1;
818
819 /* now open targets */
820
821 open_flags = O_RDWR;
822
823 for (i = 0; i < num_targets; i++) {
824 int write_last_block = 0;
825
826 if (stat(target[i].name, &statbuf) < 0) {
827 /* ok, assume it's a file and create it */
828
829 do_out(_("Creating file %s\n"), target[i].name);
830
831 open_flags |= O_CREAT;
832 if (!buffered_output)
833 open_flags |= O_DIRECT;
834 write_last_block = 1;
835 } else if (S_ISREG(statbuf.st_mode)) {
836 open_flags |= O_TRUNC;
837 if (!buffered_output)
838 open_flags |= O_DIRECT;
839 write_last_block = 1;
840 } else {
841 /*
842 * check to make sure a filesystem isn't mounted
843 * on the device
844 */
845 if (platform_check_ismounted(target[i].name,
846 NULL, &statbuf, 0)) {
847 do_log(_("%s: a filesystem is mounted "
848 "on target device \"%s\".\n"
849 "%s cannot copy to mounted filesystems."
850 " Aborting\n"),
851 progname, target[i].name, progname);
852 exit(1);
853 }
854 }
855
856 target[i].fd = open(target[i].name, open_flags, 0644);
857 if (target[i].fd < 0) {
858 do_log(_("%s: couldn't open target \"%s\"\n"),
859 progname, target[i].name);
860 die_perror();
861 }
862
863 if (write_last_block) {
864 /* ensure regular files are correctly sized */
865
866 if (ftruncate(target[i].fd, mp->m_sb.sb_dblocks *
867 source_blocksize)) {
868 do_log(_("%s: cannot grow data section.\n"),
869 progname);
870 die_perror();
871 }
872 if (platform_test_xfs_fd(target[i].fd)) {
873 if (xfsctl(target[i].name, target[i].fd,
874 XFS_IOC_DIOINFO, &d) < 0) {
875 do_log(
876 _("%s: xfsctl on \"%s\" failed.\n"),
877 progname, target[i].name);
878 die_perror();
879 } else {
880 wbuf_align = max(wbuf_align, d.d_mem);
881 wbuf_size = min(d.d_maxiosz, wbuf_size);
882 wbuf_miniosize = max(d.d_miniosz,
883 wbuf_miniosize);
884 }
885 }
886 } else {
887 char *lb[XFS_MAX_SECTORSIZE] = { NULL };
888 off64_t off;
889
890 /* ensure device files are sufficiently large */
891
892 off = mp->m_sb.sb_dblocks * source_blocksize;
893 off -= sizeof(lb);
894 if (pwrite(target[i].fd, lb, sizeof(lb), off) < 0) {
895 do_log(_("%s: failed to write last block\n"),
896 progname);
897 do_log(_("\tIs target \"%s\" too small?\n"),
898 target[i].name);
899 die_perror();
900 }
901 }
902 }
903
904 /* initialize locks and bufs */
905
906 if (pthread_mutex_init(&glob_masks.mutex, NULL) != 0) {
907 do_log(_("Couldn't initialize global thread mask\n"));
908 die_perror();
909 }
910 glob_masks.num_working = 0;
911
912 if (wbuf_init(&w_buf, wbuf_size, wbuf_align,
913 wbuf_miniosize, 0) == NULL) {
914 do_log(_("Error initializing wbuf 0\n"));
915 die_perror();
916 }
917
918 wblocks = wbuf_size / BBSIZE;
919
920 if (wbuf_init(&btree_buf, max(source_blocksize, wbuf_miniosize),
921 wbuf_align, wbuf_miniosize, 1) == NULL) {
922 do_log(_("Error initializing btree buf 1\n"));
923 die_perror();
924 }
925
926 if (pthread_mutex_init(&mainwait,NULL) != 0) {
927 do_log(_("Error creating first semaphore.\n"));
928 die_perror();
929 exit(1);
930 }
931 /* need to start out blocking */
932 pthread_mutex_lock(&mainwait);
933
934 /* set up sigchild signal handler */
935
936 signal(SIGCHLD, handler);
937 signal_maskfunc(SIGCHLD, SIG_BLOCK);
938
939 /* make children */
940
941 if ((targ = malloc(num_targets * sizeof(thread_args))) == NULL) {
942 do_log(_("Couldn't malloc space for thread args\n"));
943 die_perror();
944 exit(1);
945 }
946
947 for (i = 0, tcarg = targ; i < num_targets; i++, tcarg++) {
948 if (!duplicate)
949 platform_uuid_generate(&tcarg->uuid);
950 else
951 platform_uuid_copy(&tcarg->uuid, &mp->m_sb.sb_uuid);
952
953 if (pthread_mutex_init(&tcarg->wait, NULL) != 0) {
954 do_log(_("Error creating thread mutex %d\n"), i);
955 die_perror();
956 exit(1);
957 }
958 /* need to start out blocking */
959 pthread_mutex_lock(&tcarg->wait);
960 }
961
962 for (i = 0, tcarg = targ; i < num_targets; i++, tcarg++) {
963 tcarg->id = i;
964 tcarg->fd = target[i].fd;
965
966 target[i].state = ACTIVE;
967 num_threads++;
968
969 if (pthread_create(&target[i].pid, NULL,
970 begin_reader, (void *)tcarg)) {
971 do_log(_("Error creating thread for target %d\n"), i);
972 die_perror();
973 }
974 }
975
976 ASSERT(num_targets == num_threads);
977
978 /* set up statistics */
979
980 num_ags = mp->m_sb.sb_agcount;
981
982 init_bar(mp->m_sb.sb_blocksize / BBSIZE
983 * ((uint64_t)mp->m_sb.sb_dblocks
984 - (uint64_t)mp->m_sb.sb_fdblocks + 10 * num_ags));
985
986 kids = num_targets;
987
988 for (agno = 0; agno < num_ags && kids > 0; agno++) {
989 /* read in first blocks of the ag */
990
991 read_ag_header(source_fd, agno, &w_buf, &ag_hdr, mp,
992 source_blocksize, source_sectorsize);
993
994 /* set the in_progress bit for the first AG */
995
996 if (agno == 0)
997 ag_hdr.xfs_sb->sb_inprogress = 1;
998
999 /* save what we need (agf) in the btree buffer */
1000
1001 memmove(btree_buf.data, ag_hdr.xfs_agf, source_sectorsize);
1002 ag_hdr.xfs_agf = (xfs_agf_t *) btree_buf.data;
1003 btree_buf.length = source_blocksize;
1004
1005 /* write the ag header out */
1006
1007 write_wbuf();
1008
1009 /* traverse btree until we get to the leftmost leaf node */
1010
1011 bno = be32_to_cpu(ag_hdr.xfs_agf->agf_roots[XFS_BTNUM_BNOi]);
1012 current_level = 0;
1013 btree_levels = be32_to_cpu(ag_hdr.xfs_agf->
1014 agf_levels[XFS_BTNUM_BNOi]);
1015
1016 ag_end = XFS_AGB_TO_DADDR(mp, agno,
1017 be32_to_cpu(ag_hdr.xfs_agf->agf_length) - 1)
1018 + source_blocksize / BBSIZE;
1019
1020 for (;;) {
1021 /* none of this touches the w_buf buffer */
1022
1023 if (current_level >= btree_levels) {
1024 do_log(
1025 _("Error: current level %d >= btree levels %d\n"),
1026 current_level, btree_levels);
1027 exit(1);
1028 }
1029
1030 current_level++;
1031
1032 btree_buf.position = pos = (xfs_off_t)
1033 XFS_AGB_TO_DADDR(mp,agno,bno) << BBSHIFT;
1034 btree_buf.length = source_blocksize;
1035
1036 read_wbuf(source_fd, &btree_buf, mp);
1037 block = (struct xfs_btree_block *)
1038 ((char *)btree_buf.data +
1039 pos - btree_buf.position);
1040
1041 if (be32_to_cpu(block->bb_magic) !=
1042 (xfs_has_crc(mp) ?
1043 XFS_ABTB_CRC_MAGIC : XFS_ABTB_MAGIC)) {
1044 do_log(_("Bad btree magic 0x%x\n"),
1045 be32_to_cpu(block->bb_magic));
1046 exit(1);
1047 }
1048
1049 if (be16_to_cpu(block->bb_level) == 0)
1050 break;
1051
1052 ptr = XFS_ALLOC_PTR_ADDR(mp, block, 1,
1053 mp->m_alloc_mxr[1]);
1054 bno = be32_to_cpu(ptr[0]);
1055 }
1056
1057 /* align first data copy but don't overwrite ag header */
1058
1059 pos = w_buf.position >> BBSHIFT;
1060 length = w_buf.length >> BBSHIFT;
1061 next_begin = pos + length;
1062 ag_begin = next_begin;
1063
1064 ASSERT(w_buf.position % source_sectorsize == 0);
1065
1066 /* handle the rest of the ag */
1067
1068 for (;;) {
1069 if (be16_to_cpu(block->bb_level) != 0) {
1070 do_log(
1071 _("WARNING: source filesystem inconsistent.\n"));
1072 do_log(
1073 _(" A leaf btree rec isn't a leaf. Aborting now.\n"));
1074 exit(1);
1075 }
1076
1077 rec_ptr = XFS_ALLOC_REC_ADDR(mp, block, 1);
1078 for (i = 0; i < be16_to_cpu(block->bb_numrecs);
1079 i++, rec_ptr++) {
1080 /* calculate in daddr's */
1081
1082 begin = next_begin;
1083
1084 /*
1085 * protect against pathological case of a
1086 * hole right after the ag header in a
1087 * mis-aligned case
1088 */
1089
1090 if (begin < ag_begin)
1091 begin = ag_begin;
1092
1093 /*
1094 * round size up to ensure we copy a
1095 * range bigger than required
1096 */
1097
1098 sizeb = XFS_AGB_TO_DADDR(mp, agno,
1099 be32_to_cpu(rec_ptr->ar_startblock)) -
1100 begin;
1101 size = roundup(sizeb <<BBSHIFT, wbuf_miniosize);
1102 if (size > 0) {
1103 /* copy extent */
1104
1105 w_buf.position = (xfs_off_t)
1106 begin << BBSHIFT;
1107
1108 while (size > 0) {
1109 /*
1110 * let lower layer do alignment
1111 */
1112 if (size > w_buf.size) {
1113 w_buf.length = w_buf.size;
1114 size -= w_buf.size;
1115 sizeb -= wblocks;
1116 numblocks += wblocks;
1117 } else {
1118 w_buf.length = size;
1119 numblocks += sizeb;
1120 size = 0;
1121 }
1122
1123 read_wbuf(source_fd, &w_buf, mp);
1124 write_wbuf();
1125
1126 w_buf.position += w_buf.length;
1127
1128 howfar = bump_bar(
1129 howfar, numblocks);
1130 }
1131 }
1132
1133 /* round next starting point down */
1134
1135 new_begin = XFS_AGB_TO_DADDR(mp, agno,
1136 be32_to_cpu(rec_ptr->ar_startblock) +
1137 be32_to_cpu(rec_ptr->ar_blockcount));
1138 next_begin = rounddown(new_begin,
1139 w_buf.min_io_size >> BBSHIFT);
1140 }
1141
1142 if (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK)
1143 break;
1144
1145 /* read in next btree record block */
1146
1147 btree_buf.position = pos = (xfs_off_t)
1148 XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(
1149 block->bb_u.s.bb_rightsib)) << BBSHIFT;
1150 btree_buf.length = source_blocksize;
1151
1152 /* let read_wbuf handle alignment */
1153
1154 read_wbuf(source_fd, &btree_buf, mp);
1155
1156 block = (struct xfs_btree_block *)
1157 ((char *) btree_buf.data +
1158 pos - btree_buf.position);
1159
1160 ASSERT(be32_to_cpu(block->bb_magic) == XFS_ABTB_MAGIC ||
1161 be32_to_cpu(block->bb_magic) == XFS_ABTB_CRC_MAGIC);
1162 }
1163
1164 /*
1165 * write out range of used blocks after last range
1166 * of free blocks in AG
1167 */
1168 if (next_begin < ag_end) {
1169 begin = next_begin;
1170
1171 sizeb = ag_end - begin;
1172 size = roundup(sizeb << BBSHIFT, wbuf_miniosize);
1173
1174 if (size > 0) {
1175 /* copy extent */
1176
1177 w_buf.position = (xfs_off_t) begin << BBSHIFT;
1178
1179 while (size > 0) {
1180 /*
1181 * let lower layer do alignment
1182 */
1183 if (size > w_buf.size) {
1184 w_buf.length = w_buf.size;
1185 size -= w_buf.size;
1186 sizeb -= wblocks;
1187 numblocks += wblocks;
1188 } else {
1189 w_buf.length = size;
1190 numblocks += sizeb;
1191 size = 0;
1192 }
1193
1194 read_wbuf(source_fd, &w_buf, mp);
1195 write_wbuf();
1196
1197 w_buf.position += w_buf.length;
1198
1199 howfar = bump_bar(howfar, numblocks);
1200 }
1201 }
1202 }
1203 }
1204
1205 if (kids > 0) {
1206 if (!duplicate)
1207 /* write a clean log using the specified UUID */
1208 format_logs(mp);
1209 else
1210 num_ags = 1;
1211
1212 /* reread and rewrite superblocks (UUID and in-progress) */
1213 /* [backwards, so inprogress bit only updated when done] */
1214
1215 for (i = num_ags - 1; i >= 0; i--) {
1216 read_ag_header(source_fd, i, &w_buf, &ag_hdr, mp,
1217 source_blocksize, source_sectorsize);
1218 if (i == 0)
1219 ag_hdr.xfs_sb->sb_inprogress = 0;
1220
1221 /* do each thread in turn, each has its own UUID */
1222
1223 for (j = 0, tcarg = targ; j < num_targets; j++) {
1224 sb_update_uuid(mp, &ag_hdr, tcarg);
1225 do_write(tcarg, NULL);
1226 tcarg++;
1227 }
1228 }
1229
1230 bump_bar(100, 0);
1231 }
1232
1233 check_errors();
1234 libxfs_umount(mp);
1235 libxfs_destroy(&xargs);
1236
1237 return 0;
1238 }
1239
1240 static char *
1241 next_log_chunk(char *p, int offset, void *private)
1242 {
1243 wbuf *buf = (wbuf *)private;
1244
1245 if (buf->length < (int)(p - buf->data) + offset) {
1246 /* need to flush this one, then start afresh */
1247
1248 do_write(buf->owner, NULL);
1249 memset(buf->data, 0, buf->length);
1250 return buf->data;
1251 }
1252 return p + offset;
1253 }
1254
1255 /*
1256 * Writes a log header at the start of the log (with the real
1257 * filesystem UUID embedded into it), and writes to all targets.
1258 *
1259 * Returns the next buffer-length-aligned disk address.
1260 */
1261 xfs_off_t
1262 write_log_header(int fd, wbuf *buf, xfs_mount_t *mp)
1263 {
1264 char *p = buf->data;
1265 xfs_off_t logstart;
1266 int offset;
1267
1268 logstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT;
1269 buf->position = rounddown(logstart, (xfs_off_t)buf->length);
1270
1271 memset(p, 0, buf->size);
1272 if (logstart % buf->length) { /* unaligned */
1273 read_wbuf(fd, buf, mp);
1274 offset = logstart - buf->position;
1275 p += offset;
1276 memset(p, 0, buf->length - offset);
1277 }
1278
1279 offset = libxfs_log_header(p, &buf->owner->uuid,
1280 xfs_has_logv2(mp) ? 2 : 1,
1281 mp->m_sb.sb_logsunit, XLOG_FMT, NULLCOMMITLSN,
1282 NULLCOMMITLSN, next_log_chunk, buf);
1283 do_write(buf->owner, NULL);
1284
1285 return roundup(logstart + offset, buf->length);
1286 }
1287
1288 /*
1289 * May do an aligned read of the last buffer in the log (& zero
1290 * the start of that buffer). Returns the disk address at the
1291 * end of last aligned buffer in the log.
1292 */
1293 xfs_off_t
1294 write_log_trailer(int fd, wbuf *buf, xfs_mount_t *mp)
1295 {
1296 xfs_off_t logend;
1297 int offset;
1298
1299 logend = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT;
1300 logend += XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks);
1301
1302 buf->position = rounddown(logend, (xfs_off_t)buf->length);
1303
1304 if (logend % buf->length) { /* unaligned */
1305 read_wbuf(fd, buf, mp);
1306 offset = (int)(logend - buf->position);
1307 memset(buf->data, 0, offset);
1308 do_write(buf->owner, NULL);
1309 }
1310
1311 return buf->position;
1312 }
1313
1314 /*
1315 * Clear a log by writing a record at the head, the tail and zeroing everything
1316 * in between.
1317 */
1318 static void
1319 clear_log(
1320 struct xfs_mount *mp,
1321 thread_args *tcarg)
1322 {
1323 xfs_off_t pos;
1324 xfs_off_t end_pos;
1325
1326 w_buf.owner = tcarg;
1327 w_buf.length = rounddown(w_buf.size, w_buf.min_io_size);
1328 pos = write_log_header(source_fd, &w_buf, mp);
1329 end_pos = write_log_trailer(source_fd, &w_buf, mp);
1330 w_buf.position = pos;
1331 memset(w_buf.data, 0, w_buf.length);
1332
1333 while (w_buf.position < end_pos) {
1334 do_write(tcarg, NULL);
1335 w_buf.position += w_buf.length;
1336 }
1337 }
1338
1339 /*
1340 * Format the log to a particular cycle number. This is required for version 5
1341 * superblock filesystems to provide metadata LSN validity guarantees.
1342 */
1343 static void
1344 format_log(
1345 struct xfs_mount *mp,
1346 thread_args *tcarg,
1347 wbuf *buf)
1348 {
1349 int logstart;
1350 int length;
1351 int cycle = XLOG_INIT_CYCLE;
1352
1353 buf->owner = tcarg;
1354 buf->length = buf->size;
1355 buf->position = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT;
1356
1357 logstart = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logstart);
1358 length = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
1359
1360 /*
1361 * Bump the cycle number on v5 superblock filesystems to guarantee that
1362 * all existing metadata LSNs are valid (behind the current LSN) on the
1363 * target fs.
1364 */
1365 if (xfs_has_crc(mp))
1366 cycle = mp->m_log->l_curr_cycle + 1;
1367
1368 /*
1369 * Format the entire log into the memory buffer and write it out. If the
1370 * write fails, mark the target inactive so the failure is reported.
1371 */
1372 libxfs_log_clear(NULL, buf->data, logstart, length, &buf->owner->uuid,
1373 xfs_has_logv2(mp) ? 2 : 1,
1374 mp->m_sb.sb_logsunit, XLOG_FMT, cycle, true);
1375 if (do_write(buf->owner, buf))
1376 target[tcarg->id].state = INACTIVE;
1377 }
1378
1379 static int
1380 format_logs(
1381 struct xfs_mount *mp)
1382 {
1383 thread_args *tcarg;
1384 int i;
1385 wbuf logbuf;
1386 int logsize;
1387
1388 if (xfs_has_crc(mp)) {
1389 logsize = XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks);
1390 if (!wbuf_init(&logbuf, logsize, w_buf.data_align,
1391 w_buf.min_io_size, w_buf.id))
1392 return -ENOMEM;
1393 }
1394
1395 for (i = 0, tcarg = targ; i < num_targets; i++) {
1396 if (xfs_has_crc(mp))
1397 format_log(mp, tcarg, &logbuf);
1398 else
1399 clear_log(mp, tcarg);
1400 tcarg++;
1401 }
1402
1403 if (xfs_has_crc(mp))
1404 free(logbuf.data);
1405
1406 return 0;
1407 }