]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - copy/xfs_copy.c
xfsprogs: Add new sb_meta_uuid field, update userspace tools to manipulate it
[thirdparty/xfsprogs-dev.git] / copy / xfs_copy.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <xfs/libxfs.h>
20 #include <sys/stat.h>
21 #include <sys/wait.h>
22 #include <pthread.h>
23 #include <signal.h>
24 #include <stdarg.h>
25 #include "xfs_copy.h"
26
27 #define rounddown(x, y) (((x)/(y))*(y))
28 #define uuid_equal(s,d) (platform_uuid_compare((s),(d)) == 0)
29
30 extern int platform_check_ismounted(char *, char *, struct stat64 *, int);
31
32 int logfd;
33 char *logfile_name;
34 FILE *logerr;
35 char LOGFILE_NAME[] = "/var/tmp/xfs_copy.log.XXXXXX";
36
37 char *source_name;
38 int source_fd;
39
40 unsigned int source_blocksize; /* source filesystem blocksize */
41 unsigned int source_sectorsize; /* source disk sectorsize */
42
43 xfs_agblock_t first_agbno;
44
45 __uint64_t barcount[11];
46
47 unsigned int num_targets;
48 target_control *target;
49
50 wbuf w_buf;
51 wbuf btree_buf;
52
53 pid_t parent_pid;
54 unsigned int kids;
55
56 thread_control glob_masks;
57 thread_args *targ;
58
59 pthread_mutex_t mainwait;
60
61 #define ACTIVE 1
62 #define INACTIVE 2
63
64 xfs_off_t write_log_trailer(int fd, wbuf *w, xfs_mount_t *mp);
65 xfs_off_t write_log_header(int fd, wbuf *w, xfs_mount_t *mp);
66
67 /* general purpose message reporting routine */
68
69 #define OUT 0x01 /* use stdout stream */
70 #define ERR 0x02 /* use stderr stream */
71 #define LOG 0x04 /* use logerr stream */
72 #define PRE 0x08 /* append strerror string */
73 #define LAST 0x10 /* final message we print */
74
75 void
76 do_message(int flags, int code, const char *fmt, ...)
77 {
78 va_list ap;
79 int eek = 0;
80
81 if (flags & LOG) {
82 va_start(ap, fmt);
83 if (vfprintf(logerr, fmt, ap) <= 0)
84 eek = 1;
85 va_end(ap);
86 }
87 if (eek)
88 flags |= ERR; /* failed, force stderr */
89 if (flags & ERR) {
90 va_start(ap, fmt);
91 vfprintf(stderr, fmt, ap);
92 va_end(ap);
93 } else if (flags & OUT) {
94 va_start(ap, fmt);
95 vfprintf(stdout, fmt, ap);
96 va_end(ap);
97 }
98
99 if (flags & PRE) {
100 do_message(flags & ~PRE, 0, ": %s\n", strerror(code));
101 if (flags & LAST)
102 fprintf(stderr,
103 _("Check logfile \"%s\" for more details\n"),
104 logfile_name);
105 }
106
107 /* logfile is broken, force a write to stderr */
108 if (eek) {
109 fprintf(stderr, _("%s: could not write to logfile \"%s\".\n"),
110 progname, logfile_name);
111 fprintf(stderr,
112 _("Aborting XFS copy -- logfile error -- reason: %s\n"),
113 strerror(errno));
114 pthread_exit(NULL);
115 }
116 }
117
118 #define do_out(args...) do_message(OUT|LOG, 0, ## args)
119 #define do_log(args...) do_message(ERR|LOG, 0, ## args)
120 #define do_warn(args...) do_message(LOG, 0, ## args)
121 #define do_error(e,s) do_message(ERR|LOG|PRE, e, s)
122 #define do_fatal(e,s) do_message(ERR|LOG|PRE|LAST, e, s)
123 #define do_vfatal(e,s,args...) do_message(ERR|LOG|PRE|LAST, e, s, ## args)
124 #define die_perror() \
125 do { \
126 do_message(ERR|LOG|PRE|LAST, errno, \
127 _("Aborting XFS copy - reason")); \
128 exit(1); \
129 } while (0)
130
131 void
132 check_errors(void)
133 {
134 int i, first_error = 0;
135
136 for (i = 0; i < num_targets; i++) {
137 if (target[i].state == INACTIVE) {
138 if (first_error == 0) {
139 first_error++;
140 do_log(
141 _("THE FOLLOWING COPIES FAILED TO COMPLETE\n"));
142 }
143 do_log(" %s -- ", target[i].name);
144 if (target[i].err_type == 0)
145 do_log(_("write error"));
146 else
147 do_log(_("lseek64 error"));
148 do_log(_(" at offset %lld\n"), target[i].position);
149 }
150 }
151 if (first_error == 0) {
152 fprintf(stdout, _("All copies completed.\n"));
153 fflush(NULL);
154 } else {
155 fprintf(stderr, _("See \"%s\" for more details.\n"),
156 logfile_name);
157 exit(1);
158 }
159 }
160
161 /*
162 * don't have to worry about alignment and mins because those
163 * are taken care of when the buffer's read in
164 */
165 int
166 do_write(thread_args *args)
167 {
168 int res, error = 0;
169
170 if (target[args->id].position != w_buf.position) {
171 if (lseek64(args->fd, w_buf.position, SEEK_SET) < 0) {
172 error = target[args->id].err_type = 1;
173 } else {
174 target[args->id].position = w_buf.position;
175 }
176 }
177
178 if ((res = write(target[args->id].fd, w_buf.data,
179 w_buf.length)) == w_buf.length) {
180 target[args->id].position += res;
181 } else {
182 error = 2;
183 }
184
185 if (error) {
186 target[args->id].error = errno;
187 target[args->id].position = w_buf.position;
188 }
189 return error;
190 }
191
192 void *
193 begin_reader(void *arg)
194 {
195 thread_args *args = arg;
196
197 for (;;) {
198 pthread_mutex_lock(&args->wait);
199 if (do_write(args))
200 goto handle_error;
201 pthread_mutex_lock(&glob_masks.mutex);
202 if (--glob_masks.num_working == 0)
203 pthread_mutex_unlock(&mainwait);
204 pthread_mutex_unlock(&glob_masks.mutex);
205 }
206 /* NOTREACHED */
207
208 handle_error:
209 /* error will be logged by primary thread */
210
211 pthread_mutex_lock(&glob_masks.mutex);
212 target[args->id].state = INACTIVE;
213 if (--glob_masks.num_working == 0)
214 pthread_mutex_unlock(&mainwait);
215 pthread_mutex_unlock(&glob_masks.mutex);
216 pthread_exit(NULL);
217 return NULL;
218 }
219
220 void
221 handler(int sig)
222 {
223 pid_t pid;
224 int status, i;
225
226 pid = wait(&status);
227
228 kids--;
229
230 for (i = 0; i < num_targets; i++) {
231 if (target[i].pid == pid) {
232 if (target[i].state == INACTIVE) {
233 /* thread got an I/O error */
234
235 if (target[i].err_type == 0) {
236 do_warn(
237 _("%s: write error on target %d \"%s\" at offset %lld\n"),
238 progname, i, target[i].name,
239 target[i].position);
240 } else {
241 do_warn(
242 _("%s: lseek64 error on target %d \"%s\" at offset %lld\n"),
243 progname, i, target[i].name,
244 target[i].position);
245 }
246
247 do_vfatal(target[i].error,
248 _("Aborting target %d - reason"), i);
249
250 if (kids == 0) {
251 do_log(
252 _("Aborting XFS copy - no more targets.\n"));
253 check_errors();
254 pthread_exit(NULL);
255 }
256
257 signal(SIGCHLD, handler);
258 return;
259 } else {
260 /* it just croaked it bigtime, log it */
261
262 do_warn(
263 _("%s: thread %d died unexpectedly, target \"%s\" incomplete\n"),
264 progname, i, target[i].name);
265 do_warn(_("%s: offset was probably %lld\n"),
266 progname, target[i].position);
267 do_fatal(target[i].error,
268 _("Aborting XFS copy - reason"));
269 pthread_exit(NULL);
270 }
271 }
272 }
273
274 /* unknown child -- something very wrong */
275
276 do_warn(_("%s: Unknown child died (should never happen!)\n"), progname);
277 die_perror();
278 pthread_exit(NULL);
279 signal(SIGCHLD, handler);
280 }
281
282 void
283 usage(void)
284 {
285 fprintf(stderr,
286 _("Usage: %s [-bdV] [-L logfile] source target [target ...]\n"),
287 progname);
288 exit(1);
289 }
290
291 void
292 init_bar(__uint64_t source_blocks)
293 {
294 int i;
295
296 for (i = 0; i < 11; i++)
297 barcount[i] = (source_blocks/10)*i;
298 }
299
300 int
301 bump_bar(int tenths, __uint64_t numblocks)
302 {
303 static char *bar[11] = {
304 " 0% ",
305 " ... 10% ",
306 " ... 20% ",
307 " ... 30% ",
308 " ... 40% ",
309 " ... 50% ",
310 " ... 60% ",
311 " ... 70% ",
312 " ... 80% ",
313 " ... 90% ",
314 " ... 100%\n\n",
315 };
316
317 if (tenths > 10) {
318 printf("%s", bar[10]);
319 fflush(stdout);
320 } else {
321 while (tenths < 10 && numblocks > barcount[tenths]) {
322 printf("%s", bar[tenths]);
323 fflush(stdout);
324 tenths++;
325 }
326 }
327 return tenths;
328 }
329
330 static xfs_off_t source_position = -1;
331
332 wbuf *
333 wbuf_init(wbuf *buf, int data_size, int data_align, int min_io_size, int id)
334 {
335 ASSERT(data_size % BBSIZE == 0);
336 while ((buf->data = memalign(data_align, data_size)) == NULL) {
337 data_size >>= 1;
338 if (data_size < min_io_size)
339 return NULL;
340 }
341 ASSERT(min_io_size % BBSIZE == 0);
342 buf->min_io_size = min_io_size;
343 buf->size = data_size;
344 buf->id = id;
345 return buf;
346 }
347
348 void
349 read_wbuf(int fd, wbuf *buf, xfs_mount_t *mp)
350 {
351 int res = 0;
352 xfs_off_t lres = 0;
353 xfs_off_t newpos;
354 size_t diff;
355
356 newpos = rounddown(buf->position, (xfs_off_t) buf->min_io_size);
357
358 if (newpos != buf->position) {
359 diff = buf->position - newpos;
360 buf->position = newpos;
361
362 buf->length += diff;
363 }
364
365 if (source_position != buf->position) {
366 lres = lseek64(fd, buf->position, SEEK_SET);
367 if (lres < 0LL) {
368 do_warn(_("%s: lseek64 failure at offset %lld\n"),
369 progname, source_position);
370 die_perror();
371 }
372 source_position = buf->position;
373 }
374
375 ASSERT(source_position % source_sectorsize == 0);
376
377 /* round up length for direct I/O if necessary */
378
379 if (buf->length % buf->min_io_size != 0)
380 buf->length = roundup(buf->length, buf->min_io_size);
381
382 if (buf->length > buf->size) {
383 do_warn(_("assert error: buf->length = %d, buf->size = %d\n"),
384 buf->length, buf->size);
385 exit(1);
386 }
387
388 if ((res = read(fd, buf->data, buf->length)) < 0) {
389 do_warn(_("%s: read failure at offset %lld\n"),
390 progname, source_position);
391 die_perror();
392 }
393
394 if (res < buf->length &&
395 source_position + res == mp->m_sb.sb_dblocks * source_blocksize)
396 res = buf->length;
397 else
398 ASSERT(res == buf->length);
399 source_position += res;
400 buf->length = res;
401 }
402
403 void
404 read_ag_header(int fd, xfs_agnumber_t agno, wbuf *buf, ag_header_t *ag,
405 xfs_mount_t *mp, int blocksize, int sectorsize)
406 {
407 xfs_daddr_t off;
408 int length;
409 xfs_off_t newpos;
410 size_t diff;
411
412 /* initial settings */
413
414 diff = 0;
415 off = XFS_AG_DADDR(mp, agno, XFS_SB_DADDR);
416 buf->position = (xfs_off_t) off * (xfs_off_t) BBSIZE;
417 length = buf->length = first_agbno * blocksize;
418 if (length == 0) {
419 do_log(_("ag header buffer invalid!\n"));
420 exit(1);
421 }
422
423 /* handle alignment stuff */
424
425 newpos = rounddown(buf->position, (xfs_off_t) buf->min_io_size);
426 if (newpos != buf->position) {
427 diff = buf->position - newpos;
428 buf->position = newpos;
429 buf->length += diff;
430 }
431
432 /* round up length for direct I/O if necessary */
433
434 if (buf->length % buf->min_io_size != 0)
435 buf->length = roundup(buf->length, buf->min_io_size);
436
437 read_wbuf(fd, buf, mp);
438 ASSERT(buf->length >= length);
439
440 ag->xfs_sb = (xfs_dsb_t *) (buf->data + diff);
441 ASSERT(be32_to_cpu(ag->xfs_sb->sb_magicnum) == XFS_SB_MAGIC);
442 ag->xfs_agf = (xfs_agf_t *) (buf->data + diff + sectorsize);
443 ASSERT(be32_to_cpu(ag->xfs_agf->agf_magicnum) == XFS_AGF_MAGIC);
444 ag->xfs_agi = (xfs_agi_t *) (buf->data + diff + 2 * sectorsize);
445 ASSERT(be32_to_cpu(ag->xfs_agi->agi_magicnum) == XFS_AGI_MAGIC);
446 ag->xfs_agfl = (xfs_agfl_t *) (buf->data + diff + 3 * sectorsize);
447 }
448
449
450 void
451 write_wbuf(void)
452 {
453 int i;
454
455 /* verify target threads */
456 for (i = 0; i < num_targets; i++)
457 if (target[i].state != INACTIVE)
458 glob_masks.num_working++;
459
460 /* release target threads */
461 for (i = 0; i < num_targets; i++)
462 if (target[i].state != INACTIVE)
463 pthread_mutex_unlock(&targ[i].wait); /* wake up */
464
465 sigrelse(SIGCHLD);
466 pthread_mutex_lock(&mainwait);
467 sighold(SIGCHLD);
468 }
469
470 void
471 sb_update_uuid(
472 xfs_sb_t *sb,
473 ag_header_t *ag_hdr,
474 thread_args *tcarg)
475 {
476 /*
477 * If this filesystem has CRCs, the original UUID is stamped into
478 * all metadata. If we are changing the UUID in the copy, we need
479 * to copy the original UUID into the meta_uuid slot and set the
480 * set the incompat flag if that hasn't already been done.
481 */
482 if (!uuid_equal(&tcarg->uuid, &ag_hdr->xfs_sb->sb_uuid) &&
483 xfs_sb_version_hascrc(sb) && !xfs_sb_version_hasmetauuid(sb)) {
484 __be32 feat;
485
486 feat = be32_to_cpu(ag_hdr->xfs_sb->sb_features_incompat);
487 feat |= XFS_SB_FEAT_INCOMPAT_META_UUID;
488 ag_hdr->xfs_sb->sb_features_incompat = cpu_to_be32(feat);
489 platform_uuid_copy(&ag_hdr->xfs_sb->sb_meta_uuid,
490 &ag_hdr->xfs_sb->sb_uuid);
491 }
492
493 platform_uuid_copy(&ag_hdr->xfs_sb->sb_uuid, &tcarg->uuid);
494
495 /* We may have changed the UUID, so update the superblock CRC */
496 if (xfs_sb_version_hascrc(sb))
497 xfs_update_cksum((char *)ag_hdr->xfs_sb, sb->sb_sectsize,
498 XFS_SB_CRC_OFF);
499 }
500
501 int
502 main(int argc, char **argv)
503 {
504 int i, j;
505 int howfar = 0;
506 int open_flags;
507 xfs_off_t pos, end_pos;
508 size_t length;
509 int c;
510 __uint64_t size, sizeb;
511 __uint64_t numblocks = 0;
512 int wblocks = 0;
513 int num_threads = 0;
514 struct dioattr d;
515 int wbuf_size;
516 int wbuf_align;
517 int wbuf_miniosize;
518 int source_is_file = 0;
519 int buffered_output = 0;
520 int duplicate = 0;
521 uint btree_levels, current_level;
522 ag_header_t ag_hdr;
523 xfs_mount_t *mp;
524 xfs_mount_t mbuf;
525 xfs_buf_t *sbp;
526 xfs_sb_t *sb;
527 xfs_agnumber_t num_ags, agno;
528 xfs_agblock_t bno;
529 xfs_daddr_t begin, next_begin, ag_begin, new_begin, ag_end;
530 struct xfs_btree_block *block;
531 xfs_alloc_ptr_t *ptr;
532 xfs_alloc_rec_t *rec_ptr;
533 extern char *optarg;
534 extern int optind;
535 libxfs_init_t xargs;
536 thread_args *tcarg;
537 struct stat64 statbuf;
538
539 progname = basename(argv[0]);
540
541 setlocale(LC_ALL, "");
542 bindtextdomain(PACKAGE, LOCALEDIR);
543 textdomain(PACKAGE);
544
545 while ((c = getopt(argc, argv, "bdL:V")) != EOF) {
546 switch (c) {
547 case 'b':
548 buffered_output = 1;
549 break;
550 case 'd':
551 duplicate = 1;
552 break;
553 case 'L':
554 logfile_name = optarg;
555 break;
556 case 'V':
557 printf(_("%s version %s\n"), progname, VERSION);
558 exit(0);
559 case '?':
560 usage();
561 }
562 }
563
564 if (argc - optind < 2)
565 usage();
566
567 if (logfile_name) {
568 logfd = open(logfile_name, O_CREAT|O_WRONLY|O_EXCL, 0600);
569 } else {
570 logfile_name = LOGFILE_NAME;
571 logfd = mkstemp(logfile_name);
572 }
573
574 if (logfd < 0) {
575 fprintf(stderr, _("%s: couldn't open log file \"%s\"\n"),
576 progname, logfile_name);
577 perror(_("Aborting XFS copy - reason"));
578 exit(1);
579 }
580
581 if ((logerr = fdopen(logfd, "w")) == NULL) {
582 fprintf(stderr, _("%s: couldn't set up logfile stream\n"),
583 progname);
584 perror(_("Aborting XFS copy - reason"));
585 exit(1);
586 }
587
588 source_name = argv[optind];
589 source_fd = -1;
590 optind++;
591
592 num_targets = argc - optind;
593 if ((target = malloc(sizeof(target_control) * num_targets)) == NULL) {
594 do_log(_("Couldn't allocate target array\n"));
595 die_perror();
596 }
597 for (i = 0; optind < argc; i++, optind++) {
598 target[i].name = argv[optind];
599 target[i].fd = -1;
600 target[i].position = -1;
601 target[i].state = INACTIVE;
602 target[i].error = 0;
603 target[i].err_type = 0;
604 }
605
606 parent_pid = getpid();
607
608 /* open up source -- is it a file? */
609
610 open_flags = O_RDONLY;
611
612 if ((source_fd = open(source_name, open_flags)) < 0) {
613 do_log(_("%s: couldn't open source \"%s\"\n"),
614 progname, source_name);
615 die_perror();
616 }
617
618 if (fstat64(source_fd, &statbuf) < 0) {
619 do_log(_("%s: couldn't stat source \"%s\"\n"),
620 progname, source_name);
621 die_perror();
622 }
623
624 if (S_ISREG(statbuf.st_mode))
625 source_is_file = 1;
626
627 if (source_is_file && platform_test_xfs_fd(source_fd)) {
628 if (fcntl(source_fd, F_SETFL, open_flags | O_DIRECT) < 0) {
629 do_log(_("%s: Cannot set direct I/O flag on \"%s\".\n"),
630 progname, source_name);
631 die_perror();
632 }
633 if (xfsctl(source_name, source_fd, XFS_IOC_DIOINFO, &d) < 0) {
634 do_log(_("%s: xfsctl on file \"%s\" failed.\n"),
635 progname, source_name);
636 die_perror();
637 }
638
639 wbuf_align = d.d_mem;
640 wbuf_size = MIN(d.d_maxiosz, 1 * 1024 * 1024);
641 wbuf_miniosize = d.d_miniosz;
642 } else {
643 /* set arbitrary I/O params, miniosize at least 1 disk block */
644
645 wbuf_align = getpagesize();
646 wbuf_size = 1 * 1024 * 1024;
647 wbuf_miniosize = -1; /* set after mounting source fs */
648 }
649
650 if (!source_is_file) {
651 /*
652 * check to make sure a filesystem isn't mounted
653 * on the device
654 */
655 if (platform_check_ismounted(source_name, NULL, &statbuf, 0)) {
656 do_log(
657 _("%s: Warning -- a filesystem is mounted on the source device.\n"),
658 progname);
659 do_log(
660 _("\t\tGenerated copies may be corrupt unless the source is\n"));
661 do_log(
662 _("\t\tunmounted or mounted read-only. Copy proceeding...\n"));
663 }
664 }
665
666 /* prepare the libxfs_init structure */
667
668 memset(&xargs, 0, sizeof(xargs));
669 xargs.isdirect = LIBXFS_DIRECT;
670 xargs.isreadonly = LIBXFS_ISREADONLY;
671
672 if (source_is_file) {
673 xargs.dname = source_name;
674 xargs.disfile = 1;
675 } else
676 xargs.volname = source_name;
677
678 if (!libxfs_init(&xargs)) {
679 do_log(_("%s: couldn't initialize XFS library\n"
680 "%s: Aborting.\n"), progname, progname);
681 exit(1);
682 }
683
684 /* prepare the mount structure */
685
686 memset(&mbuf, 0, sizeof(xfs_mount_t));
687 libxfs_buftarg_init(&mbuf, xargs.ddev, xargs.logdev, xargs.rtdev);
688 sbp = libxfs_readbuf(mbuf.m_ddev_targp, XFS_SB_DADDR, 1, 0,
689 &xfs_sb_buf_ops);
690 sb = &mbuf.m_sb;
691 libxfs_sb_from_disk(sb, XFS_BUF_TO_SBP(sbp));
692
693 mp = libxfs_mount(&mbuf, sb, xargs.ddev, xargs.logdev, xargs.rtdev, 0);
694 if (mp == NULL) {
695 do_log(_("%s: %s filesystem failed to initialize\n"
696 "%s: Aborting.\n"), progname, source_name, progname);
697 exit(1);
698 } else if (mp->m_sb.sb_inprogress) {
699 do_log(_("%s %s filesystem failed to initialize\n"
700 "%s: Aborting.\n"), progname, source_name, progname);
701 exit(1);
702 } else if (mp->m_sb.sb_logstart == 0) {
703 do_log(_("%s: %s has an external log.\n%s: Aborting.\n"),
704 progname, source_name, progname);
705 exit(1);
706 } else if (mp->m_sb.sb_rextents != 0) {
707 do_log(_("%s: %s has a real-time section.\n"
708 "%s: Aborting.\n"), progname, source_name, progname);
709 exit(1);
710 }
711
712 source_blocksize = mp->m_sb.sb_blocksize;
713 source_sectorsize = mp->m_sb.sb_sectsize;
714
715 if (wbuf_miniosize == -1)
716 wbuf_miniosize = source_sectorsize;
717
718 ASSERT(source_blocksize % source_sectorsize == 0);
719 ASSERT(source_sectorsize % BBSIZE == 0);
720
721 if (source_blocksize < source_sectorsize) {
722 do_log(_("Error: filesystem block size is smaller than the"
723 " disk sectorsize.\nAborting XFS copy now.\n"));
724 exit(1);
725 }
726
727 first_agbno = XFS_AGFL_BLOCK(mp) + 1;
728
729 /* now open targets */
730
731 open_flags = O_RDWR;
732
733 for (i = 0; i < num_targets; i++) {
734 int write_last_block = 0;
735
736 if (stat64(target[i].name, &statbuf) < 0) {
737 /* ok, assume it's a file and create it */
738
739 do_out(_("Creating file %s\n"), target[i].name);
740
741 open_flags |= O_CREAT;
742 if (!buffered_output)
743 open_flags |= O_DIRECT;
744 write_last_block = 1;
745 } else if (S_ISREG(statbuf.st_mode)) {
746 open_flags |= O_TRUNC;
747 if (!buffered_output)
748 open_flags |= O_DIRECT;
749 write_last_block = 1;
750 } else {
751 /*
752 * check to make sure a filesystem isn't mounted
753 * on the device
754 */
755 if (platform_check_ismounted(target[i].name,
756 NULL, &statbuf, 0)) {
757 do_log(_("%s: a filesystem is mounted "
758 "on target device \"%s\".\n"
759 "%s cannot copy to mounted filesystems."
760 " Aborting\n"),
761 progname, target[i].name, progname);
762 exit(1);
763 }
764 }
765
766 target[i].fd = open(target[i].name, open_flags, 0644);
767 if (target[i].fd < 0) {
768 do_log(_("%s: couldn't open target \"%s\"\n"),
769 progname, target[i].name);
770 die_perror();
771 }
772
773 if (write_last_block) {
774 /* ensure regular files are correctly sized */
775
776 if (ftruncate64(target[i].fd, mp->m_sb.sb_dblocks *
777 source_blocksize)) {
778 do_log(_("%s: cannot grow data section.\n"),
779 progname);
780 die_perror();
781 }
782 if (platform_test_xfs_fd(target[i].fd)) {
783 if (xfsctl(target[i].name, target[i].fd,
784 XFS_IOC_DIOINFO, &d) < 0) {
785 do_log(
786 _("%s: xfsctl on \"%s\" failed.\n"),
787 progname, target[i].name);
788 die_perror();
789 } else {
790 wbuf_align = MAX(wbuf_align, d.d_mem);
791 wbuf_size = MIN(d.d_maxiosz, wbuf_size);
792 wbuf_miniosize = MAX(d.d_miniosz,
793 wbuf_miniosize);
794 }
795 }
796 } else {
797 char *lb[XFS_MAX_SECTORSIZE] = { NULL };
798 off64_t off;
799
800 /* ensure device files are sufficiently large */
801
802 off = mp->m_sb.sb_dblocks * source_blocksize;
803 off -= sizeof(lb);
804 if (pwrite64(target[i].fd, lb, sizeof(lb), off) < 0) {
805 do_log(_("%s: failed to write last block\n"),
806 progname);
807 do_log(_("\tIs target \"%s\" too small?\n"),
808 target[i].name);
809 die_perror();
810 }
811 }
812 }
813
814 /* initialize locks and bufs */
815
816 if (pthread_mutex_init(&glob_masks.mutex, NULL) != 0) {
817 do_log(_("Couldn't initialize global thread mask\n"));
818 die_perror();
819 }
820 glob_masks.num_working = 0;
821
822 if (wbuf_init(&w_buf, wbuf_size, wbuf_align,
823 wbuf_miniosize, 0) == NULL) {
824 do_log(_("Error initializing wbuf 0\n"));
825 die_perror();
826 }
827
828 wblocks = wbuf_size / BBSIZE;
829
830 if (wbuf_init(&btree_buf, MAX(source_blocksize, wbuf_miniosize),
831 wbuf_align, wbuf_miniosize, 1) == NULL) {
832 do_log(_("Error initializing btree buf 1\n"));
833 die_perror();
834 }
835
836 if (pthread_mutex_init(&mainwait,NULL) != 0) {
837 do_log(_("Error creating first semaphore.\n"));
838 die_perror();
839 exit(1);
840 }
841 /* need to start out blocking */
842 pthread_mutex_lock(&mainwait);
843
844 /* set up sigchild signal handler */
845
846 signal(SIGCHLD, handler);
847 sighold(SIGCHLD);
848
849 /* make children */
850
851 if ((targ = malloc(num_targets * sizeof(thread_args))) == NULL) {
852 do_log(_("Couldn't malloc space for thread args\n"));
853 die_perror();
854 exit(1);
855 }
856
857 for (i = 0, tcarg = targ; i < num_targets; i++, tcarg++) {
858 if (!duplicate)
859 platform_uuid_generate(&tcarg->uuid);
860 else
861 platform_uuid_copy(&tcarg->uuid, &mp->m_sb.sb_uuid);
862
863 if (pthread_mutex_init(&tcarg->wait, NULL) != 0) {
864 do_log(_("Error creating thread mutex %d\n"), i);
865 die_perror();
866 exit(1);
867 }
868 /* need to start out blocking */
869 pthread_mutex_lock(&tcarg->wait);
870 }
871
872 for (i = 0, tcarg = targ; i < num_targets; i++, tcarg++) {
873 tcarg->id = i;
874 tcarg->fd = target[i].fd;
875
876 target[i].state = ACTIVE;
877 num_threads++;
878
879 if (pthread_create(&target[i].pid, NULL,
880 begin_reader, (void *)tcarg)) {
881 do_log(_("Error creating thread for target %d\n"), i);
882 die_perror();
883 }
884 }
885
886 ASSERT(num_targets == num_threads);
887
888 /* set up statistics */
889
890 num_ags = mp->m_sb.sb_agcount;
891
892 init_bar(mp->m_sb.sb_blocksize / BBSIZE
893 * ((__uint64_t)mp->m_sb.sb_dblocks
894 - (__uint64_t)mp->m_sb.sb_fdblocks + 10 * num_ags));
895
896 kids = num_targets;
897
898 for (agno = 0; agno < num_ags && kids > 0; agno++) {
899 /* read in first blocks of the ag */
900
901 read_ag_header(source_fd, agno, &w_buf, &ag_hdr, mp,
902 source_blocksize, source_sectorsize);
903
904 /* set the in_progress bit for the first AG */
905
906 if (agno == 0)
907 ag_hdr.xfs_sb->sb_inprogress = 1;
908
909 /* save what we need (agf) in the btree buffer */
910
911 memmove(btree_buf.data, ag_hdr.xfs_agf, source_sectorsize);
912 ag_hdr.xfs_agf = (xfs_agf_t *) btree_buf.data;
913 btree_buf.length = source_blocksize;
914
915 /* write the ag header out */
916
917 write_wbuf();
918
919 /* traverse btree until we get to the leftmost leaf node */
920
921 bno = be32_to_cpu(ag_hdr.xfs_agf->agf_roots[XFS_BTNUM_BNOi]);
922 current_level = 0;
923 btree_levels = be32_to_cpu(ag_hdr.xfs_agf->
924 agf_levels[XFS_BTNUM_BNOi]);
925
926 ag_end = XFS_AGB_TO_DADDR(mp, agno,
927 be32_to_cpu(ag_hdr.xfs_agf->agf_length) - 1)
928 + source_blocksize / BBSIZE;
929
930 for (;;) {
931 /* none of this touches the w_buf buffer */
932
933 if (current_level >= btree_levels) {
934 do_log(
935 _("Error: current level %d >= btree levels %d\n"),
936 current_level, btree_levels);
937 exit(1);
938 }
939
940 current_level++;
941
942 btree_buf.position = pos = (xfs_off_t)
943 XFS_AGB_TO_DADDR(mp,agno,bno) << BBSHIFT;
944 btree_buf.length = source_blocksize;
945
946 read_wbuf(source_fd, &btree_buf, mp);
947 block = (struct xfs_btree_block *)
948 ((char *)btree_buf.data +
949 pos - btree_buf.position);
950
951 if (be32_to_cpu(block->bb_magic) !=
952 (xfs_sb_version_hascrc(&mp->m_sb) ?
953 XFS_ABTB_CRC_MAGIC : XFS_ABTB_MAGIC)) {
954 do_log(_("Bad btree magic 0x%x\n"),
955 be32_to_cpu(block->bb_magic));
956 exit(1);
957 }
958
959 if (be16_to_cpu(block->bb_level) == 0)
960 break;
961
962 ptr = XFS_ALLOC_PTR_ADDR(mp, block, 1,
963 mp->m_alloc_mxr[1]);
964 bno = be32_to_cpu(ptr[0]);
965 }
966
967 /* align first data copy but don't overwrite ag header */
968
969 pos = w_buf.position >> BBSHIFT;
970 length = w_buf.length >> BBSHIFT;
971 next_begin = pos + length;
972 ag_begin = next_begin;
973
974 ASSERT(w_buf.position % source_sectorsize == 0);
975
976 /* handle the rest of the ag */
977
978 for (;;) {
979 if (be16_to_cpu(block->bb_level) != 0) {
980 do_log(
981 _("WARNING: source filesystem inconsistent.\n"));
982 do_log(
983 _(" A leaf btree rec isn't a leaf. Aborting now.\n"));
984 exit(1);
985 }
986
987 rec_ptr = XFS_ALLOC_REC_ADDR(mp, block, 1);
988 for (i = 0; i < be16_to_cpu(block->bb_numrecs);
989 i++, rec_ptr++) {
990 /* calculate in daddr's */
991
992 begin = next_begin;
993
994 /*
995 * protect against pathological case of a
996 * hole right after the ag header in a
997 * mis-aligned case
998 */
999
1000 if (begin < ag_begin)
1001 begin = ag_begin;
1002
1003 /*
1004 * round size up to ensure we copy a
1005 * range bigger than required
1006 */
1007
1008 sizeb = XFS_AGB_TO_DADDR(mp, agno,
1009 be32_to_cpu(rec_ptr->ar_startblock)) -
1010 begin;
1011 size = roundup(sizeb <<BBSHIFT, wbuf_miniosize);
1012 if (size > 0) {
1013 /* copy extent */
1014
1015 w_buf.position = (xfs_off_t)
1016 begin << BBSHIFT;
1017
1018 while (size > 0) {
1019 /*
1020 * let lower layer do alignment
1021 */
1022 if (size > w_buf.size) {
1023 w_buf.length = w_buf.size;
1024 size -= w_buf.size;
1025 sizeb -= wblocks;
1026 numblocks += wblocks;
1027 } else {
1028 w_buf.length = size;
1029 numblocks += sizeb;
1030 size = 0;
1031 }
1032
1033 read_wbuf(source_fd, &w_buf, mp);
1034 write_wbuf();
1035
1036 w_buf.position += w_buf.length;
1037
1038 howfar = bump_bar(
1039 howfar, numblocks);
1040 }
1041 }
1042
1043 /* round next starting point down */
1044
1045 new_begin = XFS_AGB_TO_DADDR(mp, agno,
1046 be32_to_cpu(rec_ptr->ar_startblock) +
1047 be32_to_cpu(rec_ptr->ar_blockcount));
1048 next_begin = rounddown(new_begin,
1049 w_buf.min_io_size >> BBSHIFT);
1050 }
1051
1052 if (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK)
1053 break;
1054
1055 /* read in next btree record block */
1056
1057 btree_buf.position = pos = (xfs_off_t)
1058 XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(
1059 block->bb_u.s.bb_rightsib)) << BBSHIFT;
1060 btree_buf.length = source_blocksize;
1061
1062 /* let read_wbuf handle alignment */
1063
1064 read_wbuf(source_fd, &btree_buf, mp);
1065
1066 block = (struct xfs_btree_block *)
1067 ((char *) btree_buf.data +
1068 pos - btree_buf.position);
1069
1070 ASSERT(be32_to_cpu(block->bb_magic) == XFS_ABTB_MAGIC);
1071 }
1072
1073 /*
1074 * write out range of used blocks after last range
1075 * of free blocks in AG
1076 */
1077 if (next_begin < ag_end) {
1078 begin = next_begin;
1079
1080 sizeb = ag_end - begin;
1081 size = roundup(sizeb << BBSHIFT, wbuf_miniosize);
1082
1083 if (size > 0) {
1084 /* copy extent */
1085
1086 w_buf.position = (xfs_off_t) begin << BBSHIFT;
1087
1088 while (size > 0) {
1089 /*
1090 * let lower layer do alignment
1091 */
1092 if (size > w_buf.size) {
1093 w_buf.length = w_buf.size;
1094 size -= w_buf.size;
1095 sizeb -= wblocks;
1096 numblocks += wblocks;
1097 } else {
1098 w_buf.length = size;
1099 numblocks += sizeb;
1100 size = 0;
1101 }
1102
1103 read_wbuf(source_fd, &w_buf, mp);
1104 write_wbuf();
1105
1106 w_buf.position += w_buf.length;
1107
1108 howfar = bump_bar(howfar, numblocks);
1109 }
1110 }
1111 }
1112 }
1113
1114 if (kids > 0) {
1115 if (!duplicate) {
1116
1117 /* write a clean log using the specified UUID */
1118 for (j = 0, tcarg = targ; j < num_targets; j++) {
1119 w_buf.owner = tcarg;
1120 w_buf.length = rounddown(w_buf.size,
1121 w_buf.min_io_size);
1122 pos = write_log_header(
1123 source_fd, &w_buf, mp);
1124 end_pos = write_log_trailer(
1125 source_fd, &w_buf, mp);
1126 w_buf.position = pos;
1127 memset(w_buf.data, 0, w_buf.length);
1128
1129 while (w_buf.position < end_pos) {
1130 do_write(tcarg);
1131 w_buf.position += w_buf.length;
1132 }
1133 tcarg++;
1134 }
1135 } else {
1136 num_ags = 1;
1137 }
1138
1139 /* reread and rewrite superblocks (UUID and in-progress) */
1140 /* [backwards, so inprogress bit only updated when done] */
1141
1142 for (i = num_ags - 1; i >= 0; i--) {
1143 read_ag_header(source_fd, i, &w_buf, &ag_hdr, mp,
1144 source_blocksize, source_sectorsize);
1145 if (i == 0)
1146 ag_hdr.xfs_sb->sb_inprogress = 0;
1147
1148 /* do each thread in turn, each has its own UUID */
1149
1150 for (j = 0, tcarg = targ; j < num_targets; j++) {
1151 sb_update_uuid(sb, &ag_hdr, tcarg);
1152 do_write(tcarg);
1153 tcarg++;
1154 }
1155 }
1156
1157 bump_bar(100, 0);
1158 }
1159
1160 check_errors();
1161 return 0;
1162 }
1163
1164 xfs_caddr_t
1165 next_log_chunk(xfs_caddr_t p, int offset, void *private)
1166 {
1167 wbuf *buf = (wbuf *)private;
1168
1169 if (buf->length < (int)(p - buf->data) + offset) {
1170 /* need to flush this one, then start afresh */
1171
1172 do_write(buf->owner);
1173 memset(buf->data, 0, buf->length);
1174 return buf->data;
1175 }
1176 return p + offset;
1177 }
1178
1179 /*
1180 * Writes a log header at the start of the log (with the real
1181 * filesystem UUID embedded into it), and writes to all targets.
1182 *
1183 * Returns the next buffer-length-aligned disk address.
1184 */
1185 xfs_off_t
1186 write_log_header(int fd, wbuf *buf, xfs_mount_t *mp)
1187 {
1188 xfs_caddr_t p = buf->data;
1189 xfs_off_t logstart;
1190 int offset;
1191
1192 logstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT;
1193 buf->position = rounddown(logstart, (xfs_off_t)buf->length);
1194
1195 memset(p, 0, buf->size);
1196 if (logstart % buf->length) { /* unaligned */
1197 read_wbuf(fd, buf, mp);
1198 offset = logstart - buf->position;
1199 p += offset;
1200 memset(p, 0, buf->length - offset);
1201 }
1202
1203 offset = libxfs_log_header(p, &buf->owner->uuid,
1204 xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1,
1205 mp->m_sb.sb_logsunit, XLOG_FMT,
1206 next_log_chunk, buf);
1207 do_write(buf->owner);
1208
1209 return roundup(logstart + offset, buf->length);
1210 }
1211
1212 /*
1213 * May do an aligned read of the last buffer in the log (& zero
1214 * the start of that buffer). Returns the disk address at the
1215 * end of last aligned buffer in the log.
1216 */
1217 xfs_off_t
1218 write_log_trailer(int fd, wbuf *buf, xfs_mount_t *mp)
1219 {
1220 xfs_off_t logend;
1221 int offset;
1222
1223 logend = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT;
1224 logend += XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks);
1225
1226 buf->position = rounddown(logend, (xfs_off_t)buf->length);
1227
1228 if (logend % buf->length) { /* unaligned */
1229 read_wbuf(fd, buf, mp);
1230 offset = (int)(logend - buf->position);
1231 memset(buf->data, 0, offset);
1232 do_write(buf->owner);
1233 }
1234
1235 return buf->position;
1236 }