]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - copy/xfs_copy.c
Fix endian bug in xfs_copy, dealing with fragmented freespace (multi-level btrees).
[thirdparty/xfsprogs-dev.git] / copy / xfs_copy.c
1 /*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33 #include <xfs/libxfs.h>
34 #include <sys/stat.h>
35 #include <sys/wait.h>
36 #include <pthread.h>
37 #include <signal.h>
38 #include <stdarg.h>
39 #include "xfs_copy.h"
40
41 #define rounddown(x, y) (((x)/(y))*(y))
42
43 extern int platform_check_ismounted(char *, char *, struct stat64 *, int);
44
45 int logfd;
46 char *logfile_name;
47 FILE *logerr;
48 char LOGFILE_NAME[] = "/var/tmp/xfs_copy.log.XXXXXX";
49
50 char *source_name;
51 int source_fd;
52
53 unsigned int source_blocksize; /* source filesystem blocksize */
54 unsigned int source_sectorsize; /* source disk sectorsize */
55
56 xfs_agblock_t first_agbno;
57
58 __uint64_t barcount[11];
59
60 unsigned int num_targets;
61 target_control *target;
62
63 wbuf w_buf;
64 wbuf btree_buf;
65
66 pid_t parent_pid;
67 unsigned int kids;
68
69 thread_control glob_masks;
70 thread_args *targ;
71
72 pthread_mutex_t mainwait;
73
74 #define ACTIVE 1
75 #define INACTIVE 2
76
77 xfs_off_t write_log_trailer(int fd, wbuf *w, xfs_mount_t *mp);
78 xfs_off_t write_log_header(int fd, wbuf *w, xfs_mount_t *mp);
79
80 /* general purpose message reporting routine */
81
82 #define OUT 0x01 /* use stdout stream */
83 #define ERR 0x02 /* use stderr stream */
84 #define LOG 0x04 /* use logerr stream */
85 #define PRE 0x08 /* append strerror string */
86 #define LAST 0x10 /* final message we print */
87
88 void
89 do_message(int flags, int code, const char *fmt, ...)
90 {
91 va_list ap;
92 int eek = 0;
93
94 va_start(ap, fmt);
95 if (flags & LOG)
96 if (vfprintf(logerr, fmt, ap) <= 0)
97 eek = 1;
98 if (eek)
99 flags |= ERR; /* failed, force stderr */
100 if (flags & ERR)
101 vfprintf(stderr, fmt, ap);
102 else if (flags & OUT)
103 vfprintf(stdout, fmt, ap);
104 va_end(ap);
105
106 if (flags & PRE) {
107 do_message(flags & ~PRE, 0, ": %s\n", strerror(code));
108 if (flags & LAST)
109 fprintf(stderr,
110 _("Check logfile \"%s\" for more details\n"),
111 logfile_name);
112 }
113
114 /* logfile is broken, force a write to stderr */
115 if (eek) {
116 fprintf(stderr, _("%s: could not write to logfile \"%s\".\n"),
117 progname, logfile_name);
118 fprintf(stderr,
119 _("Aborting XFS copy -- logfile error -- reason: %s\n"),
120 strerror(errno));
121 pthread_exit(NULL);
122 }
123 }
124
125 #define do_out(args...) do_message(OUT|LOG, 0, ## args)
126 #define do_log(args...) do_message(ERR|LOG, 0, ## args)
127 #define do_warn(args...) do_message(LOG, 0, ## args)
128 #define do_error(e,s) do_message(ERR|LOG|PRE, e, s)
129 #define do_fatal(e,s) do_message(ERR|LOG|PRE|LAST, e, s)
130 #define do_vfatal(e,s,args...) do_message(ERR|LOG|PRE|LAST, e, s, ## args)
131 #define die_perror() \
132 do { \
133 do_message(ERR|LOG|PRE|LAST, errno, \
134 _("Aborting XFS copy - reason")); \
135 exit(1); \
136 } while (0)
137
138 void
139 check_errors(void)
140 {
141 int i, first_error = 0;
142
143 for (i = 0; i < num_targets; i++) {
144 if (target[i].state == INACTIVE) {
145 if (first_error == 0) {
146 first_error++;
147 do_log(
148 _("THE FOLLOWING COPIES FAILED TO COMPLETE\n"));
149 }
150 do_log(" %s -- ", target[i].name);
151 if (target[i].err_type == 0)
152 do_log(_("write error"));
153 else
154 do_log(_("lseek64 error"));
155 do_log(_(" at offset %lld\n"), target[i].position);
156 }
157 }
158 if (first_error == 0) {
159 fprintf(stdout, _("All copies completed.\n"));
160 fflush(NULL);
161 } else {
162 fprintf(stderr, _("See \"%s\" for more details.\n"),
163 logfile_name);
164 exit(1);
165 }
166 }
167
168 /*
169 * don't have to worry about alignment and mins because those
170 * are taken care of when the buffer's read in
171 */
172 int
173 do_write(thread_args *args)
174 {
175 int res, error = 0;
176
177 if (target[args->id].position != w_buf.position) {
178 if (lseek64(args->fd, w_buf.position, SEEK_SET) < 0) {
179 error = target[args->id].err_type = 1;
180 } else {
181 target[args->id].position = w_buf.position;
182 }
183 }
184
185 if ((res = write(target[args->id].fd, w_buf.data,
186 w_buf.length)) == w_buf.length) {
187 target[args->id].position += res;
188 } else {
189 error = 2;
190 }
191
192 if (error) {
193 target[args->id].error = errno;
194 target[args->id].position = w_buf.position;
195 }
196 return error;
197 }
198
199 void *
200 begin_reader(void *arg)
201 {
202 thread_args *args = arg;
203
204 for (;;) {
205 pthread_mutex_lock(&args->wait);
206 if (do_write(args))
207 goto handle_error;
208 pthread_mutex_lock(&glob_masks.mutex);
209 if (--glob_masks.num_working == 0)
210 pthread_mutex_unlock(&mainwait);
211 pthread_mutex_unlock(&glob_masks.mutex);
212 }
213 /* NOTREACHED */
214
215 handle_error:
216 /* error will be logged by primary thread */
217
218 pthread_mutex_lock(&glob_masks.mutex);
219 target[args->id].state = INACTIVE;
220 if (--glob_masks.num_working == 0)
221 pthread_mutex_unlock(&mainwait);
222 pthread_mutex_unlock(&glob_masks.mutex);
223 pthread_exit(NULL);
224 return NULL;
225 }
226
227 void
228 killall(void)
229 {
230 int i;
231
232 /* only the parent gets to kill things */
233
234 if (getpid() != parent_pid)
235 return;
236
237 for (i = 0; i < num_targets; i++) {
238 if (target[i].state == ACTIVE) {
239 /* kill up target threads */
240 pthread_kill(target[i].pid, SIGKILL);
241 pthread_mutex_unlock(&targ[i].wait);
242 }
243 }
244 }
245
246 void
247 handler()
248 {
249 pid_t pid = getpid();
250 int status, i;
251
252 pid = wait(&status);
253
254 kids--;
255
256 for (i = 0; i < num_targets; i++) {
257 if (target[i].pid == pid) {
258 if (target[i].state == INACTIVE) {
259 /* thread got an I/O error */
260
261 if (target[i].err_type == 0) {
262 do_warn(
263 _("%s: write error on target %d \"%s\" at offset %lld\n"),
264 progname, i, target[i].name,
265 target[i].position);
266 } else {
267 do_warn(
268 _("%s: lseek64 error on target %d \"%s\" at offset %lld\n"),
269 progname, i, target[i].name,
270 target[i].position);
271 }
272
273 do_vfatal(target[i].error,
274 _("Aborting target %d - reason"), i);
275
276 if (kids == 0) {
277 do_log(
278 _("Aborting XFS copy - no more targets.\n"));
279 check_errors();
280 pthread_exit(NULL);
281 }
282
283 signal(SIGCHLD, handler);
284 return;
285 } else {
286 /* it just croaked it bigtime, log it */
287
288 do_warn(
289 _("%s: thread %d died unexpectedly, target \"%s\" incomplete\n"),
290 progname, i, target[i].name);
291 do_warn(_("%s: offset was probably %lld\n"),
292 progname, target[i].position);
293 do_fatal(target[i].error,
294 _("Aborting XFS copy - reason"));
295 pthread_exit(NULL);
296 }
297 }
298 }
299
300 /* unknown child -- something very wrong */
301
302 do_warn(_("%s: Unknown child died (should never happen!)\n"), progname);
303 die_perror();
304 pthread_exit(NULL);
305 signal(SIGCHLD, handler);
306 }
307
308 void
309 usage(void)
310 {
311 fprintf(stderr,
312 _("Usage: %s [-bd] [-L logfile] source target [target ...]\n"),
313 progname);
314 exit(1);
315 }
316
317 void
318 init_bar(__uint64_t source_blocks)
319 {
320 int i;
321
322 for (i = 0; i < 11; i++)
323 barcount[i] = (source_blocks/10)*i;
324 }
325
326 int
327 bump_bar(int tenths, __uint64_t numblocks)
328 {
329 static char *bar[11] = {
330 " 0% ",
331 " ... 10% ",
332 " ... 20% ",
333 " ... 30% ",
334 " ... 40% ",
335 " ... 50% ",
336 " ... 60% ",
337 " ... 70% ",
338 " ... 80% ",
339 " ... 90% ",
340 " ... 100%\n\n",
341 };
342
343 if (tenths > 10) {
344 printf("%s", bar[10]);
345 fflush(stdout);
346 } else {
347 while (tenths < 10 && numblocks > barcount[tenths]) {
348 printf("%s", bar[tenths]);
349 fflush(stdout);
350 tenths++;
351 }
352 }
353 return tenths;
354 }
355
356 static xfs_off_t source_position = -1;
357
358 wbuf *
359 wbuf_init(wbuf *buf, int data_size, int data_align, int min_io_size, int id)
360 {
361 buf->id = id;
362 if ((buf->data = memalign(data_align, data_size)) == NULL)
363 return NULL;
364 ASSERT(min_io_size % BBSIZE == 0);
365 buf->min_io_size = min_io_size;
366 buf->size = MAX(data_size, 2*min_io_size);
367 return buf;
368 }
369
370 void
371 read_wbuf(int fd, wbuf *buf, xfs_mount_t *mp)
372 {
373 int res = 0;
374 xfs_off_t lres = 0;
375 xfs_off_t newpos;
376 size_t diff;
377
378 newpos = rounddown(buf->position, (xfs_off_t) buf->min_io_size);
379
380 if (newpos != buf->position) {
381 diff = buf->position - newpos;
382 buf->position = newpos;
383
384 buf->length += diff;
385 }
386
387 if (source_position != buf->position) {
388 lres = lseek64(fd, buf->position, SEEK_SET);
389 if (lres < 0LL) {
390 do_warn(_("%s: lseek64 failure at offset %lld\n"),
391 progname, source_position);
392 die_perror();
393 }
394 source_position = buf->position;
395 }
396
397 ASSERT(source_position % source_sectorsize == 0);
398
399 /* round up length for direct I/O if necessary */
400
401 if (buf->length % buf->min_io_size != 0)
402 buf->length = roundup(buf->length, buf->min_io_size);
403
404 if (buf->length > buf->size) {
405 do_warn(_("assert error: buf->length = %d, buf->size = %d\n"),
406 buf->length, buf->size);
407 killall();
408 abort();
409 }
410
411 if ((res = read(fd, buf->data, buf->length)) < 0) {
412 do_warn(_("%s: read failure at offset %lld\n"),
413 progname, source_position);
414 die_perror();
415 }
416
417 if (res < buf->length &&
418 source_position + res == mp->m_sb.sb_dblocks * source_blocksize)
419 res = buf->length;
420 else
421 ASSERT(res == buf->length);
422 source_position += res;
423 buf->length = res;
424 }
425
426 void
427 read_ag_header(int fd, xfs_agnumber_t agno, wbuf *buf, ag_header_t *ag,
428 xfs_mount_t *mp, int blocksize, int sectorsize)
429 {
430 xfs_daddr_t off;
431 int length;
432 xfs_off_t newpos;
433 size_t diff;
434
435 /* initial settings */
436
437 diff = 0;
438 off = XFS_AG_DADDR(mp, agno, XFS_SB_DADDR);
439 buf->position = (xfs_off_t) off * (xfs_off_t) BBSIZE;
440 length = buf->length = first_agbno * blocksize;
441
442 /* handle alignment stuff */
443
444 newpos = rounddown(buf->position, (xfs_off_t) buf->min_io_size);
445 if (newpos != buf->position) {
446 diff = buf->position - newpos;
447 buf->position = newpos;
448 buf->length += diff;
449 }
450
451 /* round up length for direct I/O if necessary */
452
453 if (buf->length % buf->min_io_size != 0)
454 buf->length = roundup(buf->length, buf->min_io_size);
455
456 ASSERT(length != 0);
457 read_wbuf(fd, buf, mp);
458 ASSERT(buf->length >= length);
459
460 ag->xfs_sb = (xfs_sb_t *) (buf->data + diff);
461 ASSERT(INT_GET(ag->xfs_sb->sb_magicnum, ARCH_CONVERT)==XFS_SB_MAGIC);
462 ag->xfs_agf = (xfs_agf_t *) (buf->data + diff + sectorsize);
463 ASSERT(INT_GET(ag->xfs_agf->agf_magicnum, ARCH_CONVERT)==XFS_AGF_MAGIC);
464 ag->xfs_agi = (xfs_agi_t *) (buf->data + diff + 2*sectorsize);
465 ASSERT(INT_GET(ag->xfs_agi->agi_magicnum, ARCH_CONVERT)==XFS_AGI_MAGIC);
466 ag->xfs_agfl = (xfs_agfl_t *) (buf->data + diff + 3*sectorsize);
467 }
468
469
470 void
471 write_wbuf(void)
472 {
473 int i;
474
475 /* verify target threads */
476 for (i = 0; i < num_targets; i++)
477 if (target[i].state != INACTIVE)
478 glob_masks.num_working++;
479
480 /* release target threads */
481 for (i = 0; i < num_targets; i++)
482 if (target[i].state != INACTIVE)
483 pthread_mutex_unlock(&targ[i].wait); /* wake up */
484
485 sigrelse(SIGCHLD);
486 pthread_mutex_lock(&mainwait);
487 sighold(SIGCHLD);
488 }
489
490
491 int
492 main(int argc, char **argv)
493 {
494 int i, j;
495 int howfar = 0;
496 int open_flags;
497 xfs_off_t pos, end_pos;
498 size_t length;
499 int c, size, sizeb, first_residue, tmp_residue;
500 __uint64_t numblocks = 0;
501 int wblocks = 0;
502 int num_threads = 0;
503 struct dioattr d;
504 int wbuf_size;
505 int wbuf_align;
506 int wbuf_miniosize;
507 int source_is_file = 0;
508 int buffered_output = 0;
509 int duplicate_uuids = 0;
510 uint btree_levels, current_level;
511 ag_header_t ag_hdr;
512 xfs_mount_t *mp;
513 xfs_mount_t mbuf;
514 xfs_buf_t *sbp;
515 xfs_sb_t *sb;
516 xfs_agnumber_t num_ags, agno;
517 xfs_agblock_t bno;
518 xfs_daddr_t begin, next_begin, ag_begin, new_begin, ag_end;
519 xfs_alloc_block_t *block;
520 xfs_alloc_ptr_t *ptr;
521 xfs_alloc_rec_t *rec_ptr;
522 extern char *optarg;
523 extern int optind;
524 libxfs_init_t xargs;
525 thread_args *tcarg;
526 struct stat64 statbuf;
527
528 progname = basename(argv[0]);
529
530 setlocale(LC_ALL, "");
531 bindtextdomain(PACKAGE, LOCALEDIR);
532 textdomain(PACKAGE);
533
534 while ((c = getopt(argc, argv, "bdL:V")) != EOF) {
535 switch (c) {
536 case 'b':
537 buffered_output = 1;
538 break;
539 case 'd':
540 duplicate_uuids = 1;
541 break;
542 case 'L':
543 logfile_name = optarg;
544 break;
545 case 'V':
546 printf(_("%s version %s\n"), progname, VERSION);
547 exit(0);
548 case '?':
549 usage();
550 }
551 }
552
553 if (argc - optind < 2)
554 usage();
555
556 if (logfile_name) {
557 logfd = open(logfile_name, O_CREAT|O_WRONLY|O_EXCL, 0600);
558 } else {
559 logfile_name = LOGFILE_NAME;
560 logfd = mkstemp(logfile_name);
561 }
562
563 if (logfd < 0) {
564 fprintf(stderr, _("%s: couldn't open log file \"%s\"\n"),
565 progname, logfile_name);
566 perror(_("Aborting XFS copy - reason"));
567 exit(1);
568 }
569
570 if ((logerr = fdopen(logfd, "w")) == NULL) {
571 fprintf(stderr, _("%s: couldn't set up logfile stream\n"),
572 progname);
573 perror(_("Aborting XFS copy - reason"));
574 exit(1);
575 }
576
577 source_name = argv[optind];
578 source_fd = -1;
579 optind++;
580
581 num_targets = argc - optind;
582 if ((target = malloc(sizeof(target_control) * num_targets)) == NULL) {
583 do_log(_("Couldn't allocate target array\n"));
584 die_perror();
585 }
586 for (i = 0; optind < argc; i++, optind++) {
587 target[i].name = argv[optind];
588 target[i].fd = -1;
589 target[i].position = -1;
590 target[i].state = INACTIVE;
591 target[i].error = 0;
592 target[i].err_type = 0;
593 }
594
595 parent_pid = getpid();
596
597 if (atexit(killall)) {
598 do_log(_("%s: couldn't register atexit function.\n"), progname);
599 die_perror();
600 }
601
602 /* open up source -- is it a file? */
603
604 open_flags = O_RDONLY;
605
606 if ((source_fd = open(source_name, open_flags)) < 0) {
607 do_log(_("%s: couldn't open source \"%s\"\n"),
608 progname, source_name);
609 die_perror();
610 }
611
612 if (fstat64(source_fd, &statbuf) < 0) {
613 do_log(_("%s: couldn't stat source \"%s\"\n"),
614 progname, source_name);
615 die_perror();
616 }
617
618 if (S_ISREG(statbuf.st_mode))
619 source_is_file = 1;
620
621 if (source_is_file && platform_test_xfs_fd(source_fd)) {
622 if (fcntl(source_fd, F_SETFL, open_flags | O_DIRECT) < 0) {
623 do_log(_("%s: Cannot set direct I/O flag on \"%s\".\n"),
624 progname, source_name);
625 die_perror();
626 }
627 if (xfsctl(source_name, source_fd, XFS_IOC_DIOINFO, &d) < 0) {
628 do_log(_("%s: xfsctl on file \"%s\" failed.\n"),
629 progname, source_name);
630 die_perror();
631 }
632
633 wbuf_align = d.d_mem;
634 wbuf_size = d.d_maxiosz;
635 wbuf_miniosize = d.d_miniosz;
636 } else {
637 /* set arbitrary I/O params, miniosize at least 1 disk block */
638
639 wbuf_align = 4096*4;
640 wbuf_size = 1024 * 4000;
641 wbuf_miniosize = -1; /* set after mounting source fs */
642 }
643
644 if (!source_is_file) {
645 /*
646 * check to make sure a filesystem isn't mounted
647 * on the device
648 */
649 if (platform_check_ismounted(source_name, NULL, &statbuf, 0)) {
650 do_log(
651 _("%s: Warning -- a filesystem is mounted on the source device.\n"),
652 progname);
653 do_log(
654 _("\t\tGenerated copies may be corrupt unless the source is\n"));
655 do_log(
656 _("\t\tunmounted or mounted read-only. Copy proceeding...\n"));
657 }
658 }
659
660 /* prepare the libxfs_init structure */
661
662 memset(&xargs, 0, sizeof(xargs));
663 xargs.notvolmsg = "oh no %s";
664 xargs.isreadonly = LIBXFS_ISREADONLY;
665 xargs.notvolok = 1;
666
667 if (source_is_file) {
668 xargs.dname = source_name;
669 xargs.disfile = 1;
670 } else
671 xargs.volname = source_name;
672
673 if (!libxfs_init(&xargs)) {
674 do_log(_("%s: couldn't initialize XFS library\n"
675 "%s: Aborting.\n"), progname, progname);
676 exit(1);
677 }
678
679 /* prepare the mount structure */
680
681 sbp = libxfs_readbuf(xargs.ddev, XFS_SB_DADDR, 1, 0);
682 memset(&mbuf, 0, sizeof(xfs_mount_t));
683 sb = &mbuf.m_sb;
684 libxfs_xlate_sb(XFS_BUF_PTR(sbp), sb, 1, ARCH_CONVERT, XFS_SB_ALL_BITS);
685
686 mp = libxfs_mount(&mbuf, sb, xargs.ddev, xargs.logdev, xargs.rtdev, 1);
687 if (mp == NULL) {
688 do_log(_("%s: %s filesystem failed to initialize\n"
689 "%s: Aborting.\n"), progname, source_name, progname);
690 exit(1);
691 } else if (mp->m_sb.sb_inprogress) {
692 do_log(_("%s %s filesystem failed to initialize\n"
693 "%s: Aborting.\n"), progname, source_name, progname);
694 exit(1);
695 } else if (mp->m_sb.sb_logstart == 0) {
696 do_log(_("%s: %s has an external log.\n%s: Aborting.\n"),
697 progname, source_name, progname);
698 exit(1);
699 } else if (mp->m_sb.sb_rextents != 0) {
700 do_log(_("%s: %s has a real-time section.\n"
701 "%s: Aborting.\n"), progname, source_name, progname);
702 exit(1);
703 }
704
705 source_blocksize = mp->m_sb.sb_blocksize;
706 source_sectorsize = mp->m_sb.sb_sectsize;
707
708 if (wbuf_miniosize == -1)
709 wbuf_miniosize = source_sectorsize;
710
711 ASSERT(source_blocksize % source_sectorsize == 0);
712 ASSERT(source_sectorsize % BBSIZE == 0);
713
714 if (source_blocksize > source_sectorsize) {
715 /* get number of leftover sectors in last block of ag header */
716
717 tmp_residue = ((XFS_AGFL_DADDR(mp) + 1) * source_sectorsize)
718 % source_blocksize;
719 first_residue = (tmp_residue == 0) ? 0 :
720 source_blocksize - tmp_residue;
721 ASSERT(first_residue % source_sectorsize == 0);
722 } else if (source_blocksize == source_sectorsize) {
723 first_residue = 0;
724 } else {
725 do_log(_("Error: filesystem block size is smaller than the"
726 " disk sectorsize.\nAborting XFS copy now.\n"));
727 exit(1);
728 }
729
730 first_agbno = (((XFS_AGFL_DADDR(mp) + 1) * source_sectorsize)
731 + first_residue) / source_blocksize;
732 ASSERT(first_agbno != 0);
733 ASSERT( ((((XFS_AGFL_DADDR(mp) + 1) * source_sectorsize)
734 + first_residue) % source_blocksize) == 0);
735
736 /* now open targets */
737
738 open_flags = O_RDWR;
739
740 for (i = 0; i < num_targets; i++) {
741 int write_last_block = 0;
742
743 if (stat64(target[i].name, &statbuf) < 0) {
744 /* ok, assume it's a file and create it */
745
746 do_out(_("Creating file %s\n"), target[i].name);
747
748 open_flags |= O_CREAT;
749 if (!buffered_output)
750 open_flags |= O_DIRECT;
751 write_last_block = 1;
752 } else if (S_ISREG(statbuf.st_mode)) {
753 open_flags |= O_TRUNC;
754 if (!buffered_output)
755 open_flags |= O_DIRECT;
756 write_last_block = 1;
757 } else {
758 /*
759 * check to make sure a filesystem isn't mounted
760 * on the device
761 */
762 if (platform_check_ismounted(target[i].name,
763 NULL, &statbuf, 0)) {
764 do_log(_("%s: a filesystem is mounted "
765 "on target device \"%s\".\n"
766 "%s cannot copy to mounted filesystems."
767 " Aborting\n"),
768 progname, target[i].name, progname);
769 exit(1);
770 }
771 }
772
773 target[i].fd = open(target[i].name, open_flags, 0644);
774 if (target[i].fd < 0) {
775 do_log(_("%s: couldn't open target \"%s\"\n"),
776 progname, target[i].name);
777 die_perror();
778 }
779
780 if (write_last_block) {
781 /* ensure regular files are correctly sized */
782
783 if (ftruncate64(target[i].fd, mp->m_sb.sb_dblocks *
784 source_blocksize)) {
785 do_log(_("%s: cannot grow data section.\n"),
786 progname);
787 die_perror();
788 }
789 if (platform_test_xfs_fd(target[i].fd)) {
790 if (xfsctl(target[i].name, target[i].fd,
791 XFS_IOC_DIOINFO, &d) < 0) {
792 do_log(
793 _("%s: xfsctl on \"%s\" failed.\n"),
794 progname, target[i].name);
795 die_perror();
796 } else {
797 wbuf_align = MAX(wbuf_align, d.d_mem);
798 wbuf_size = MIN(d.d_maxiosz, wbuf_size);
799 wbuf_miniosize = MAX(d.d_miniosz,
800 wbuf_miniosize);
801 }
802 }
803 } else {
804 char *lb[XFS_MAX_SECTORSIZE] = { 0 };
805 off64_t off;
806
807 /* ensure device files are sufficiently large */
808
809 off = mp->m_sb.sb_dblocks * source_blocksize;
810 off -= sizeof(lb);
811 if (pwrite64(target[i].fd, lb, sizeof(lb), off) < 0) {
812 do_log(_("%s: failed to write last block\n"),
813 progname);
814 do_log(_("\tIs target \"%s\" too small?\n"),
815 target[i].name);
816 die_perror();
817 }
818 }
819 }
820
821 /* initialize locks and bufs */
822
823 if (pthread_mutex_init(&glob_masks.mutex, NULL) != 0) {
824 do_log(_("Couldn't initialize global thread mask\n"));
825 die_perror();
826 }
827 glob_masks.num_working = 0;
828
829 if (wbuf_init(&w_buf, wbuf_size, wbuf_align,
830 wbuf_miniosize, 0) == NULL) {
831 do_log(_("Error initializing wbuf 0\n"));
832 die_perror();
833 }
834
835 wblocks = wbuf_size / BBSIZE;
836
837 if (wbuf_init(&btree_buf, MAX(source_blocksize, wbuf_miniosize),
838 wbuf_align, wbuf_miniosize, 1) == NULL) {
839 do_log(_("Error initializing btree buf 1\n"));
840 die_perror();
841 }
842
843 if (pthread_mutex_init(&mainwait,NULL) != 0) {
844 do_log(_("Error creating first semaphore.\n"));
845 die_perror();
846 exit(1);
847 }
848 /* need to start out blocking */
849 pthread_mutex_lock(&mainwait);
850
851 /* set up sigchild signal handler */
852
853 signal(SIGCHLD, handler);
854 sighold(SIGCHLD);
855
856 /* make children */
857
858 if ((targ = malloc(num_targets * sizeof(thread_args))) == NULL) {
859 do_log(_("Couldn't malloc space for thread args\n"));
860 die_perror();
861 exit(1);
862 }
863
864 for (i = 0, tcarg = targ; i < num_targets; i++, tcarg++) {
865 if (!duplicate_uuids)
866 uuid_generate(tcarg->uuid);
867 else
868 uuid_copy(tcarg->uuid, mp->m_sb.sb_uuid);
869
870 if (pthread_mutex_init(&tcarg->wait, NULL) != 0) {
871 do_log(_("Error creating thread mutex %d\n"), i);
872 die_perror();
873 exit(1);
874 }
875 /* need to start out blocking */
876 pthread_mutex_lock(&tcarg->wait);
877 }
878
879 for (i = 0, tcarg = targ; i < num_targets; i++, tcarg++) {
880 tcarg->id = i;
881 tcarg->fd = target[i].fd;
882
883 target[i].state = ACTIVE;
884 num_threads++;
885
886 if (pthread_create(&target[i].pid, NULL,
887 begin_reader, (void *)tcarg)) {
888 do_log(_("Error creating thread for target %d\n"), i);
889 die_perror();
890 }
891 }
892
893 ASSERT(num_targets == num_threads);
894
895 /* set up statistics */
896
897 num_ags = mp->m_sb.sb_agcount;
898
899 init_bar(mp->m_sb.sb_blocksize / BBSIZE
900 * ((__uint64_t)mp->m_sb.sb_dblocks
901 - (__uint64_t)mp->m_sb.sb_fdblocks + 10 * num_ags));
902
903 kids = num_targets;
904 block = (xfs_alloc_block_t *) btree_buf.data;
905
906 for (agno = 0; agno < num_ags && kids > 0; agno++) {
907 /* read in first blocks of the ag */
908
909 read_ag_header(source_fd, agno, &w_buf, &ag_hdr, mp,
910 source_blocksize, source_sectorsize);
911
912 /* set the in_progress bit for the first AG */
913
914 if (agno == 0)
915 INT_SET(ag_hdr.xfs_sb->sb_inprogress, ARCH_CONVERT, 1);
916
917 /* save what we need (agf) in the btree buffer */
918
919 bcopy(ag_hdr.xfs_agf, btree_buf.data, source_sectorsize);
920 ag_hdr.xfs_agf = (xfs_agf_t *) btree_buf.data;
921 btree_buf.length = source_blocksize;
922
923 /* write the ag header out */
924
925 write_wbuf();
926
927 /* traverse btree until we get to the leftmost leaf node */
928
929 bno = INT_GET(ag_hdr.xfs_agf->agf_roots[XFS_BTNUM_BNOi],
930 ARCH_CONVERT);
931 current_level = 0;
932 btree_levels = INT_GET(
933 ag_hdr.xfs_agf->agf_levels[XFS_BTNUM_BNOi],
934 ARCH_CONVERT);
935
936 ag_end = XFS_AGB_TO_DADDR(mp, agno,
937 INT_GET(ag_hdr.xfs_agf->agf_length,ARCH_CONVERT) - 1)
938 + source_blocksize/BBSIZE;
939
940 for (;;) {
941 /* none of this touches the w_buf buffer */
942
943 ASSERT(current_level < btree_levels);
944
945 current_level++;
946
947 btree_buf.position = pos = (xfs_off_t)
948 XFS_AGB_TO_DADDR(mp,agno,bno) << BBSHIFT;
949 btree_buf.length = source_blocksize;
950
951 read_wbuf(source_fd, &btree_buf, mp);
952 block = (xfs_alloc_block_t *) ((char *) btree_buf.data
953 + pos - btree_buf.position);
954
955 ASSERT(INT_GET(block->bb_magic,ARCH_CONVERT) ==
956 XFS_ABTB_MAGIC);
957
958 if (INT_GET(block->bb_level,ARCH_CONVERT) == 0)
959 break;
960
961 ptr = XFS_BTREE_PTR_ADDR(sourceb_blocksize, xfs_alloc,
962 block, 1, mp->m_alloc_mxr[1]),
963
964 bno = INT_GET(ptr[0], ARCH_CONVERT);
965 }
966
967 /* align first data copy but don't overwrite ag header */
968
969 pos = w_buf.position >> BBSHIFT;
970 length = w_buf.length >> BBSHIFT;
971 next_begin = pos + length;
972 ag_begin = next_begin;
973
974 ASSERT(w_buf.position % source_sectorsize == 0);
975
976 /* handle the rest of the ag */
977
978 for (;;) {
979 if (INT_GET(block->bb_level,ARCH_CONVERT) != 0) {
980 do_log(
981 _("WARNING: source filesystem inconsistent.\n"));
982 do_log(
983 _(" A leaf btree rec isn't a leaf. Aborting now.\n"));
984 exit(1);
985 }
986
987 rec_ptr = XFS_BTREE_REC_ADDR(source_blocksize,
988 xfs_alloc, block, 1, mp->m_alloc_mxr[0]);
989
990 for (i = 0;
991 i < INT_GET(block->bb_numrecs,ARCH_CONVERT);
992 i++, rec_ptr++) {
993 /* calculate in daddr's */
994
995 begin = next_begin;
996
997 /*
998 * protect against pathological case of a
999 * hole right after the ag header in a
1000 * mis-aligned case
1001 */
1002
1003 if (begin < ag_begin)
1004 begin = ag_begin;
1005
1006 /*
1007 * round size up to ensure we copy a
1008 * range bigger than required
1009 */
1010
1011 sizeb = XFS_AGB_TO_DADDR(mp, agno,
1012 INT_GET(rec_ptr->ar_startblock,
1013 ARCH_CONVERT)) - begin;
1014 size = roundup(sizeb <<BBSHIFT, wbuf_miniosize);
1015 if (size > 0) {
1016 /* copy extent */
1017
1018 w_buf.position = (xfs_off_t)
1019 begin << BBSHIFT;
1020
1021 while (size > 0) {
1022 /*
1023 * let lower layer do alignment
1024 */
1025 if (size > w_buf.size) {
1026 w_buf.length = w_buf.size;
1027 size -= w_buf.size;
1028 sizeb -= wblocks;
1029 numblocks += wblocks;
1030 } else {
1031 w_buf.length = size;
1032 numblocks += sizeb;
1033 size = 0;
1034 }
1035
1036 read_wbuf(source_fd, &w_buf, mp);
1037 write_wbuf();
1038
1039 w_buf.position += w_buf.length;
1040
1041 howfar = bump_bar(
1042 howfar, numblocks);
1043 }
1044 }
1045
1046 /* round next starting point down */
1047
1048 new_begin = XFS_AGB_TO_DADDR(mp, agno,
1049 INT_GET(rec_ptr->ar_startblock,
1050 ARCH_CONVERT) +
1051 INT_GET(rec_ptr->ar_blockcount,
1052 ARCH_CONVERT));
1053 next_begin = rounddown(new_begin,
1054 w_buf.min_io_size >> BBSHIFT);
1055 }
1056
1057 if (INT_GET(block->bb_rightsib,ARCH_CONVERT) ==
1058 NULLAGBLOCK)
1059 break;
1060
1061 /* read in next btree record block */
1062
1063 btree_buf.position = pos = (xfs_off_t)
1064 XFS_AGB_TO_DADDR(mp, agno,
1065 INT_GET(block->bb_rightsib,
1066 ARCH_CONVERT)) << BBSHIFT;
1067 btree_buf.length = source_blocksize;
1068
1069 /* let read_wbuf handle alignment */
1070
1071 read_wbuf(source_fd, &btree_buf, mp);
1072
1073 block = (xfs_alloc_block_t *) ((char *) btree_buf.data
1074 + pos - btree_buf.position);
1075
1076 ASSERT(INT_GET(block->bb_magic,ARCH_CONVERT) ==
1077 XFS_ABTB_MAGIC);
1078 }
1079
1080 /*
1081 * write out range of used blocks after last range
1082 * of free blocks in AG
1083 */
1084 if (next_begin < ag_end) {
1085 begin = next_begin;
1086
1087 sizeb = ag_end - begin;
1088 size = roundup(sizeb << BBSHIFT, wbuf_miniosize);
1089
1090 if (size > 0) {
1091 /* copy extent */
1092
1093 w_buf.position = (xfs_off_t) begin << BBSHIFT;
1094
1095 while (size > 0) {
1096 /*
1097 * let lower layer do alignment
1098 */
1099 if (size > w_buf.size) {
1100 w_buf.length = w_buf.size;
1101 size -= w_buf.size;
1102 sizeb -= wblocks;
1103 numblocks += wblocks;
1104 } else {
1105 w_buf.length = size;
1106 numblocks += sizeb;
1107 size = 0;
1108 }
1109
1110 read_wbuf(source_fd, &w_buf, mp);
1111 write_wbuf();
1112
1113 w_buf.position += w_buf.length;
1114
1115 howfar = bump_bar(howfar, numblocks);
1116 }
1117 }
1118 }
1119 }
1120
1121 if (kids > 0) {
1122 /* write a clean log using the specified UUID */
1123
1124 for (j = 0, tcarg = targ; j < num_targets; j++) {
1125 w_buf.owner = tcarg;
1126 w_buf.length = rounddown(w_buf.size, w_buf.min_io_size);
1127
1128 pos = write_log_header(source_fd, &w_buf, mp);
1129 end_pos = write_log_trailer(source_fd, &w_buf, mp);
1130
1131 w_buf.position = pos;
1132 memset(w_buf.data, 0, w_buf.length);
1133
1134 while (w_buf.position < end_pos) {
1135 do_write(tcarg);
1136 w_buf.position += w_buf.length;
1137 }
1138 tcarg++;
1139 }
1140
1141 /* reread and rewrite superblocks (UUID and in-progress) */
1142 /* [backwards, so inprogress bit only updated when done] */
1143
1144 if (duplicate_uuids)
1145 num_ags = 1;
1146 for (i = num_ags - 1; i >= 0; i--) {
1147 read_ag_header(source_fd, i, &w_buf, &ag_hdr, mp,
1148 source_blocksize, source_sectorsize);
1149 if (i == 0)
1150 ag_hdr.xfs_sb->sb_inprogress = 0;
1151
1152 /* do each thread in turn, each has its own UUID */
1153
1154 for (j = 0, tcarg = targ; j < num_targets; j++) {
1155 uuid_copy(ag_hdr.xfs_sb->sb_uuid, tcarg->uuid);
1156 do_write(tcarg);
1157 tcarg++;
1158 }
1159 }
1160
1161 bump_bar(100, 0);
1162 }
1163
1164 check_errors();
1165 killall();
1166 pthread_exit(NULL);
1167 /*NOTREACHED*/
1168 return 0;
1169 }
1170
1171 xfs_caddr_t
1172 next_log_chunk(xfs_caddr_t p, int offset, void *private)
1173 {
1174 wbuf *buf = (wbuf *)private;
1175
1176 if (buf->length < (int)(p - buf->data) + offset) {
1177 /* need to flush this one, then start afresh */
1178
1179 do_write(buf->owner);
1180 memset(buf->data, 0, buf->length);
1181 return buf->data;
1182 }
1183 return p + offset;
1184 }
1185
1186 /*
1187 * Writes a log header at the start of the log (with the real
1188 * filesystem UUID embedded into it), and writes to all targets.
1189 *
1190 * Returns the next buffer-length-aligned disk address.
1191 */
1192 xfs_off_t
1193 write_log_header(int fd, wbuf *buf, xfs_mount_t *mp)
1194 {
1195 xfs_caddr_t p = buf->data;
1196 xfs_off_t logstart;
1197 int offset;
1198
1199 logstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT;
1200 buf->position = rounddown(logstart, (xfs_off_t)buf->length);
1201
1202 memset(p, 0, buf->size);
1203 if (logstart % buf->length) { /* unaligned */
1204 read_wbuf(fd, buf, mp);
1205 offset = logstart - buf->position;
1206 p += offset;
1207 memset(p, 0, buf->length - offset);
1208 }
1209
1210 offset = libxfs_log_header(p, &buf->owner->uuid,
1211 XFS_SB_VERSION_HASLOGV2(&mp->m_sb) ? 2 : 1,
1212 mp->m_sb.sb_logsunit, XLOG_FMT,
1213 next_log_chunk, buf);
1214 do_write(buf->owner);
1215
1216 return logstart + roundup(offset, buf->length);
1217 }
1218
1219 /*
1220 * May do an aligned read of the last buffer in the log (& zero
1221 * the start of that buffer). Returns the disk address at the
1222 * end of last aligned buffer in the log.
1223 */
1224 xfs_off_t
1225 write_log_trailer(int fd, wbuf *buf, xfs_mount_t *mp)
1226 {
1227 xfs_off_t logend;
1228 int offset;
1229
1230 logend = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT;
1231 logend += XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks);
1232
1233 buf->position = rounddown(logend, (xfs_off_t)buf->length);
1234
1235 if (logend % buf->length) { /* unaligned */
1236 read_wbuf(fd, buf, mp);
1237 offset = (int)(logend - buf->position);
1238 memset(buf->data, 0, offset);
1239 do_write(buf->owner);
1240 }
1241
1242 return buf->position;
1243 }